2 * hugetlbpage-backed filesystem. Based on ramfs.
4 * Nadia Yvette Chambers, 2002
6 * Copyright (C) 2002 Linus Torvalds.
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/module.h>
12 #include <linux/thread_info.h>
13 #include <asm/current.h>
14 #include <linux/sched.h> /* remove ASAP */
15 #include <linux/falloc.h>
17 #include <linux/mount.h>
18 #include <linux/file.h>
19 #include <linux/kernel.h>
20 #include <linux/writeback.h>
21 #include <linux/pagemap.h>
22 #include <linux/highmem.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/capability.h>
26 #include <linux/ctype.h>
27 #include <linux/backing-dev.h>
28 #include <linux/hugetlb.h>
29 #include <linux/pagevec.h>
30 #include <linux/parser.h>
31 #include <linux/mman.h>
32 #include <linux/slab.h>
33 #include <linux/dnotify.h>
34 #include <linux/statfs.h>
35 #include <linux/security.h>
36 #include <linux/magic.h>
37 #include <linux/migrate.h>
38 #include <linux/uio.h>
40 #include <asm/uaccess.h>
42 static const struct super_operations hugetlbfs_ops;
43 static const struct address_space_operations hugetlbfs_aops;
44 const struct file_operations hugetlbfs_file_operations;
45 static const struct inode_operations hugetlbfs_dir_inode_operations;
46 static const struct inode_operations hugetlbfs_inode_operations;
48 struct hugetlbfs_config {
54 struct hstate *hstate;
58 struct hugetlbfs_inode_info {
59 struct shared_policy policy;
60 struct inode vfs_inode;
63 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
65 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
68 int sysctl_hugetlb_shm_group;
71 Opt_size, Opt_nr_inodes,
72 Opt_mode, Opt_uid, Opt_gid,
73 Opt_pagesize, Opt_min_size,
77 static const match_table_t tokens = {
78 {Opt_size, "size=%s"},
79 {Opt_nr_inodes, "nr_inodes=%s"},
80 {Opt_mode, "mode=%o"},
83 {Opt_pagesize, "pagesize=%s"},
84 {Opt_min_size, "min_size=%s"},
89 static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
90 struct inode *inode, pgoff_t index)
92 vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy,
96 static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
98 mpol_cond_put(vma->vm_policy);
101 static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
102 struct inode *inode, pgoff_t index)
106 static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
111 static void huge_pagevec_release(struct pagevec *pvec)
115 for (i = 0; i < pagevec_count(pvec); ++i)
116 put_page(pvec->pages[i]);
118 pagevec_reinit(pvec);
121 static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
123 struct inode *inode = file_inode(file);
126 struct hstate *h = hstate_file(file);
129 * vma address alignment (but not the pgoff alignment) has
130 * already been checked by prepare_hugepage_range. If you add
131 * any error returns here, do so after setting VM_HUGETLB, so
132 * is_vm_hugetlb_page tests below unmap_region go the right
133 * way when do_mmap_pgoff unwinds (may be important on powerpc
136 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
137 vma->vm_ops = &hugetlb_vm_ops;
139 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
142 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
144 mutex_lock(&inode->i_mutex);
148 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
150 if (hugetlb_reserve_pages(inode,
151 vma->vm_pgoff >> huge_page_order(h),
152 len >> huge_page_shift(h), vma,
157 if (vma->vm_flags & VM_WRITE && inode->i_size < len)
160 mutex_unlock(&inode->i_mutex);
166 * Called under down_write(mmap_sem).
169 #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
171 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
172 unsigned long len, unsigned long pgoff, unsigned long flags)
174 struct mm_struct *mm = current->mm;
175 struct vm_area_struct *vma;
176 struct hstate *h = hstate_file(file);
177 struct vm_unmapped_area_info info;
179 if (len & ~huge_page_mask(h))
184 if (flags & MAP_FIXED) {
185 if (prepare_hugepage_range(file, addr, len))
191 addr = ALIGN(addr, huge_page_size(h));
192 vma = find_vma(mm, addr);
193 if (TASK_SIZE - len >= addr &&
194 (!vma || addr + len <= vma->vm_start))
200 info.low_limit = TASK_UNMAPPED_BASE;
201 info.high_limit = TASK_SIZE;
202 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
203 info.align_offset = 0;
204 return vm_unmapped_area(&info);
209 hugetlbfs_read_actor(struct page *page, unsigned long offset,
210 struct iov_iter *to, unsigned long size)
215 /* Find which 4k chunk and offset with in that chunk */
216 i = offset >> PAGE_CACHE_SHIFT;
217 offset = offset & ~PAGE_CACHE_MASK;
221 chunksize = PAGE_CACHE_SIZE;
224 if (chunksize > size)
226 n = copy_page_to_iter(&page[i], offset, chunksize, to);
238 * Support for read() - Find the page attached to f_mapping and copy out the
239 * data. Its *very* similar to do_generic_mapping_read(), we can't use that
240 * since it has PAGE_CACHE_SIZE assumptions.
242 static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
244 struct file *file = iocb->ki_filp;
245 struct hstate *h = hstate_file(file);
246 struct address_space *mapping = file->f_mapping;
247 struct inode *inode = mapping->host;
248 unsigned long index = iocb->ki_pos >> huge_page_shift(h);
249 unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
250 unsigned long end_index;
254 while (iov_iter_count(to)) {
258 /* nr is the maximum number of bytes to copy from this page */
259 nr = huge_page_size(h);
260 isize = i_size_read(inode);
263 end_index = (isize - 1) >> huge_page_shift(h);
264 if (index > end_index)
266 if (index == end_index) {
267 nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
274 page = find_lock_page(mapping, index);
275 if (unlikely(page == NULL)) {
277 * We have a HOLE, zero out the user-buffer for the
278 * length of the hole or request.
280 copied = iov_iter_zero(nr, to);
285 * We have the page, copy it to user space buffer.
287 copied = hugetlbfs_read_actor(page, offset, to, nr);
288 page_cache_release(page);
292 if (copied != nr && iov_iter_count(to)) {
297 index += offset >> huge_page_shift(h);
298 offset &= ~huge_page_mask(h);
300 iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
304 static int hugetlbfs_write_begin(struct file *file,
305 struct address_space *mapping,
306 loff_t pos, unsigned len, unsigned flags,
307 struct page **pagep, void **fsdata)
312 static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
313 loff_t pos, unsigned len, unsigned copied,
314 struct page *page, void *fsdata)
320 static void remove_huge_page(struct page *page)
322 ClearPageDirty(page);
323 ClearPageUptodate(page);
324 delete_from_page_cache(page);
329 * remove_inode_hugepages handles two distinct cases: truncation and hole
330 * punch. There are subtle differences in operation for each case.
332 * truncation is indicated by end of range being LLONG_MAX
333 * In this case, we first scan the range and release found pages.
334 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
335 * maps and global counts.
336 * hole punch is indicated if end is not LLONG_MAX
337 * In the hole punch case we scan the range and release found pages.
338 * Only when releasing a page is the associated region/reserv map
339 * deleted. The region/reserv map for ranges without associated
340 * pages are not modified.
341 * Note: If the passed end of range value is beyond the end of file, but
342 * not LLONG_MAX this routine still performs a hole punch operation.
344 static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
347 struct hstate *h = hstate_inode(inode);
348 struct address_space *mapping = &inode->i_data;
349 const pgoff_t start = lstart >> huge_page_shift(h);
350 const pgoff_t end = lend >> huge_page_shift(h);
351 struct vm_area_struct pseudo_vma;
355 long lookup_nr = PAGEVEC_SIZE;
356 bool truncate_op = (lend == LLONG_MAX);
358 memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
359 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
360 pagevec_init(&pvec, 0);
364 * Make sure to never grab more pages that we
365 * might possibly need.
367 if (end - next < lookup_nr)
368 lookup_nr = end - next;
371 * This pagevec_lookup() may return pages past 'end',
372 * so we must check for page->index > end.
374 if (!pagevec_lookup(&pvec, mapping, next, lookup_nr)) {
381 for (i = 0; i < pagevec_count(&pvec); ++i) {
382 struct page *page = pvec.pages[i];
385 hash = hugetlb_fault_mutex_hash(h, current->mm,
388 mutex_lock(&hugetlb_fault_mutex_table[hash]);
391 if (page->index >= end) {
393 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
394 next = end; /* we are done */
399 * If page is mapped, it was faulted in after being
400 * unmapped. Do nothing in this race case. In the
401 * normal case page is not mapped.
403 if (!page_mapped(page)) {
404 bool rsv_on_error = !PagePrivate(page);
406 * We must free the huge page and remove
407 * from page cache (remove_huge_page) BEFORE
408 * removing the region/reserve map
409 * (hugetlb_unreserve_pages). In rare out
410 * of memory conditions, removal of the
411 * region/reserve map could fail. Before
412 * free'ing the page, note PagePrivate which
413 * is used in case of error.
415 remove_huge_page(page);
418 if (unlikely(hugetlb_unreserve_pages(
421 hugetlb_fix_reserve_counts(
422 inode, rsv_on_error);
426 if (page->index > next)
432 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
434 huge_pagevec_release(&pvec);
438 (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed);
441 static void hugetlbfs_evict_inode(struct inode *inode)
443 struct resv_map *resv_map;
445 remove_inode_hugepages(inode, 0, LLONG_MAX);
446 resv_map = (struct resv_map *)inode->i_mapping->private_data;
447 /* root inode doesn't have the resv_map, so we should check it */
449 resv_map_release(&resv_map->refs);
454 hugetlb_vmdelete_list(struct rb_root *root, pgoff_t start, pgoff_t end)
456 struct vm_area_struct *vma;
459 * end == 0 indicates that the entire range after
460 * start should be unmapped.
462 vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) {
463 unsigned long v_offset;
466 * Can the expression below overflow on 32-bit arches?
467 * No, because the interval tree returns us only those vmas
468 * which overlap the truncated area starting at pgoff,
469 * and no vma on a 32-bit arch can span beyond the 4GB.
471 if (vma->vm_pgoff < start)
472 v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
477 end = ((end - start) << PAGE_SHIFT) +
478 vma->vm_start + v_offset;
479 if (end > vma->vm_end)
484 unmap_hugepage_range(vma, vma->vm_start + v_offset, end, NULL);
488 static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
491 struct address_space *mapping = inode->i_mapping;
492 struct hstate *h = hstate_inode(inode);
494 BUG_ON(offset & ~huge_page_mask(h));
495 pgoff = offset >> PAGE_SHIFT;
497 i_size_write(inode, offset);
498 i_mmap_lock_write(mapping);
499 if (!RB_EMPTY_ROOT(&mapping->i_mmap))
500 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
501 i_mmap_unlock_write(mapping);
502 remove_inode_hugepages(inode, offset, LLONG_MAX);
506 static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
508 struct hstate *h = hstate_inode(inode);
509 loff_t hpage_size = huge_page_size(h);
510 unsigned long hpage_shift = huge_page_shift(h);
511 loff_t hole_start, hole_end;
514 * For hole punch round up the beginning offset of the hole and
515 * round down the end.
517 hole_start = round_up(offset, hpage_size);
518 hole_end = round_down(offset + len, hpage_size);
520 if (hole_end > hole_start) {
521 struct address_space *mapping = inode->i_mapping;
522 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(hugetlb_falloc_waitq);
524 * Page faults on the area to be hole punched must be stopped
525 * during the operation. Initialize struct and have
526 * inode->i_private point to it.
528 struct hugetlb_falloc hugetlb_falloc = {
529 .waitq = &hugetlb_falloc_waitq,
530 .start = hole_start >> hpage_shift,
531 .end = hole_end >> hpage_shift
534 mutex_lock(&inode->i_mutex);
537 * inode->i_private will be checked in the page fault path.
538 * The locking assures that all writes to the structure are
539 * complete before assigning to i_private. A fault on another
540 * CPU will see the fully initialized structure.
542 spin_lock(&inode->i_lock);
543 inode->i_private = &hugetlb_falloc;
544 spin_unlock(&inode->i_lock);
546 i_mmap_lock_write(mapping);
547 if (!RB_EMPTY_ROOT(&mapping->i_mmap))
548 hugetlb_vmdelete_list(&mapping->i_mmap,
549 hole_start >> PAGE_SHIFT,
550 hole_end >> PAGE_SHIFT);
551 i_mmap_unlock_write(mapping);
552 remove_inode_hugepages(inode, hole_start, hole_end);
554 spin_lock(&inode->i_lock);
555 inode->i_private = NULL;
556 wake_up_all(&hugetlb_falloc_waitq);
557 spin_unlock(&inode->i_lock);
559 mutex_unlock(&inode->i_mutex);
565 static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
568 struct inode *inode = file_inode(file);
569 struct address_space *mapping = inode->i_mapping;
570 struct hstate *h = hstate_inode(inode);
571 struct vm_area_struct pseudo_vma;
572 struct mm_struct *mm = current->mm;
573 loff_t hpage_size = huge_page_size(h);
574 unsigned long hpage_shift = huge_page_shift(h);
575 pgoff_t start, index, end;
579 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
582 if (mode & FALLOC_FL_PUNCH_HOLE)
583 return hugetlbfs_punch_hole(inode, offset, len);
586 * Default preallocate case.
587 * For this range, start is rounded down and end is rounded up
588 * as well as being converted to page offsets.
590 start = offset >> hpage_shift;
591 end = (offset + len + hpage_size - 1) >> hpage_shift;
593 mutex_lock(&inode->i_mutex);
595 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
596 error = inode_newsize_ok(inode, offset + len);
601 * Initialize a pseudo vma as this is required by the huge page
602 * allocation routines. If NUMA is configured, use page index
603 * as input to create an allocation policy.
605 memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
606 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
607 pseudo_vma.vm_file = file;
609 for (index = start; index < end; index++) {
611 * This is supposed to be the vaddr where the page is being
612 * faulted in, but we have no vaddr here.
616 int avoid_reserve = 0;
621 * fallocate(2) manpage permits EINTR; we may have been
622 * interrupted because we are using up too much memory.
624 if (signal_pending(current)) {
629 /* Set numa allocation policy based on index */
630 hugetlb_set_vma_policy(&pseudo_vma, inode, index);
632 /* addr is the offset within the file (zero based) */
633 addr = index * hpage_size;
635 /* mutex taken here, fault path and hole punch */
636 hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping,
638 mutex_lock(&hugetlb_fault_mutex_table[hash]);
640 /* See if already present in mapping to avoid alloc/free */
641 page = find_get_page(mapping, index);
644 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
645 hugetlb_drop_vma_policy(&pseudo_vma);
649 /* Allocate page and add to page cache */
650 page = alloc_huge_page(&pseudo_vma, addr, avoid_reserve);
651 hugetlb_drop_vma_policy(&pseudo_vma);
653 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
654 error = PTR_ERR(page);
657 clear_huge_page(page, addr, pages_per_huge_page(h));
658 __SetPageUptodate(page);
659 error = huge_add_to_page_cache(page, mapping, index);
660 if (unlikely(error)) {
662 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
666 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
669 * page_put due to reference from alloc_huge_page()
670 * unlock_page because locked by add_to_page_cache()
676 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
677 i_size_write(inode, offset + len);
678 inode->i_ctime = CURRENT_TIME;
680 mutex_unlock(&inode->i_mutex);
684 static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
686 struct inode *inode = d_inode(dentry);
687 struct hstate *h = hstate_inode(inode);
689 unsigned int ia_valid = attr->ia_valid;
693 error = inode_change_ok(inode, attr);
697 if (ia_valid & ATTR_SIZE) {
699 if (attr->ia_size & ~huge_page_mask(h))
701 error = hugetlb_vmtruncate(inode, attr->ia_size);
706 setattr_copy(inode, attr);
707 mark_inode_dirty(inode);
711 static struct inode *hugetlbfs_get_root(struct super_block *sb,
712 struct hugetlbfs_config *config)
716 inode = new_inode(sb);
718 struct hugetlbfs_inode_info *info;
719 inode->i_ino = get_next_ino();
720 inode->i_mode = S_IFDIR | config->mode;
721 inode->i_uid = config->uid;
722 inode->i_gid = config->gid;
723 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
724 info = HUGETLBFS_I(inode);
725 mpol_shared_policy_init(&info->policy, NULL);
726 inode->i_op = &hugetlbfs_dir_inode_operations;
727 inode->i_fop = &simple_dir_operations;
728 /* directory inodes start off with i_nlink == 2 (for "." entry) */
730 lockdep_annotate_inode_mutex_key(inode);
736 * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
737 * be taken from reclaim -- unlike regular filesystems. This needs an
738 * annotation because huge_pmd_share() does an allocation under
741 static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
743 static struct inode *hugetlbfs_get_inode(struct super_block *sb,
745 umode_t mode, dev_t dev)
748 struct resv_map *resv_map;
750 resv_map = resv_map_alloc();
754 inode = new_inode(sb);
756 struct hugetlbfs_inode_info *info;
757 inode->i_ino = get_next_ino();
758 inode_init_owner(inode, dir, mode);
759 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
760 &hugetlbfs_i_mmap_rwsem_key);
761 inode->i_mapping->a_ops = &hugetlbfs_aops;
762 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
763 inode->i_mapping->private_data = resv_map;
764 info = HUGETLBFS_I(inode);
766 * The policy is initialized here even if we are creating a
767 * private inode because initialization simply creates an
768 * an empty rb tree and calls spin_lock_init(), later when we
769 * call mpol_free_shared_policy() it will just return because
770 * the rb tree will still be empty.
772 mpol_shared_policy_init(&info->policy, NULL);
773 switch (mode & S_IFMT) {
775 init_special_inode(inode, mode, dev);
778 inode->i_op = &hugetlbfs_inode_operations;
779 inode->i_fop = &hugetlbfs_file_operations;
782 inode->i_op = &hugetlbfs_dir_inode_operations;
783 inode->i_fop = &simple_dir_operations;
785 /* directory inodes start off with i_nlink == 2 (for "." entry) */
789 inode->i_op = &page_symlink_inode_operations;
792 lockdep_annotate_inode_mutex_key(inode);
794 kref_put(&resv_map->refs, resv_map_release);
800 * File creation. Allocate an inode, and we're done..
802 static int hugetlbfs_mknod(struct inode *dir,
803 struct dentry *dentry, umode_t mode, dev_t dev)
808 inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
810 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
811 d_instantiate(dentry, inode);
812 dget(dentry); /* Extra count - pin the dentry in core */
818 static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
820 int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
826 static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
828 return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
831 static int hugetlbfs_symlink(struct inode *dir,
832 struct dentry *dentry, const char *symname)
837 inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
839 int l = strlen(symname)+1;
840 error = page_symlink(inode, symname, l);
842 d_instantiate(dentry, inode);
847 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
853 * mark the head page dirty
855 static int hugetlbfs_set_page_dirty(struct page *page)
857 struct page *head = compound_head(page);
863 static int hugetlbfs_migrate_page(struct address_space *mapping,
864 struct page *newpage, struct page *page,
865 enum migrate_mode mode)
869 rc = migrate_huge_page_move_mapping(mapping, newpage, page);
870 if (rc != MIGRATEPAGE_SUCCESS)
872 migrate_page_copy(newpage, page);
874 return MIGRATEPAGE_SUCCESS;
877 static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
879 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
880 struct hstate *h = hstate_inode(d_inode(dentry));
882 buf->f_type = HUGETLBFS_MAGIC;
883 buf->f_bsize = huge_page_size(h);
885 spin_lock(&sbinfo->stat_lock);
886 /* If no limits set, just report 0 for max/free/used
887 * blocks, like simple_statfs() */
891 spin_lock(&sbinfo->spool->lock);
892 buf->f_blocks = sbinfo->spool->max_hpages;
893 free_pages = sbinfo->spool->max_hpages
894 - sbinfo->spool->used_hpages;
895 buf->f_bavail = buf->f_bfree = free_pages;
896 spin_unlock(&sbinfo->spool->lock);
897 buf->f_files = sbinfo->max_inodes;
898 buf->f_ffree = sbinfo->free_inodes;
900 spin_unlock(&sbinfo->stat_lock);
902 buf->f_namelen = NAME_MAX;
906 static void hugetlbfs_put_super(struct super_block *sb)
908 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
911 sb->s_fs_info = NULL;
914 hugepage_put_subpool(sbi->spool);
920 static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
922 if (sbinfo->free_inodes >= 0) {
923 spin_lock(&sbinfo->stat_lock);
924 if (unlikely(!sbinfo->free_inodes)) {
925 spin_unlock(&sbinfo->stat_lock);
928 sbinfo->free_inodes--;
929 spin_unlock(&sbinfo->stat_lock);
935 static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
937 if (sbinfo->free_inodes >= 0) {
938 spin_lock(&sbinfo->stat_lock);
939 sbinfo->free_inodes++;
940 spin_unlock(&sbinfo->stat_lock);
945 static struct kmem_cache *hugetlbfs_inode_cachep;
947 static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
949 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
950 struct hugetlbfs_inode_info *p;
952 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
954 p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
956 hugetlbfs_inc_free_inodes(sbinfo);
959 return &p->vfs_inode;
962 static void hugetlbfs_i_callback(struct rcu_head *head)
964 struct inode *inode = container_of(head, struct inode, i_rcu);
965 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
968 static void hugetlbfs_destroy_inode(struct inode *inode)
970 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
971 mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
972 call_rcu(&inode->i_rcu, hugetlbfs_i_callback);
975 static const struct address_space_operations hugetlbfs_aops = {
976 .write_begin = hugetlbfs_write_begin,
977 .write_end = hugetlbfs_write_end,
978 .set_page_dirty = hugetlbfs_set_page_dirty,
979 .migratepage = hugetlbfs_migrate_page,
983 static void init_once(void *foo)
985 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
987 inode_init_once(&ei->vfs_inode);
990 const struct file_operations hugetlbfs_file_operations = {
991 .read_iter = hugetlbfs_read_iter,
992 .mmap = hugetlbfs_file_mmap,
994 .get_unmapped_area = hugetlb_get_unmapped_area,
995 .llseek = default_llseek,
996 .fallocate = hugetlbfs_fallocate,
999 static const struct inode_operations hugetlbfs_dir_inode_operations = {
1000 .create = hugetlbfs_create,
1001 .lookup = simple_lookup,
1002 .link = simple_link,
1003 .unlink = simple_unlink,
1004 .symlink = hugetlbfs_symlink,
1005 .mkdir = hugetlbfs_mkdir,
1006 .rmdir = simple_rmdir,
1007 .mknod = hugetlbfs_mknod,
1008 .rename = simple_rename,
1009 .setattr = hugetlbfs_setattr,
1012 static const struct inode_operations hugetlbfs_inode_operations = {
1013 .setattr = hugetlbfs_setattr,
1016 static const struct super_operations hugetlbfs_ops = {
1017 .alloc_inode = hugetlbfs_alloc_inode,
1018 .destroy_inode = hugetlbfs_destroy_inode,
1019 .evict_inode = hugetlbfs_evict_inode,
1020 .statfs = hugetlbfs_statfs,
1021 .put_super = hugetlbfs_put_super,
1022 .show_options = generic_show_options,
1025 enum { NO_SIZE, SIZE_STD, SIZE_PERCENT };
1028 * Convert size option passed from command line to number of huge pages
1029 * in the pool specified by hstate. Size option could be in bytes
1030 * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
1033 hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
1036 if (val_type == NO_SIZE)
1039 if (val_type == SIZE_PERCENT) {
1040 size_opt <<= huge_page_shift(h);
1041 size_opt *= h->max_huge_pages;
1042 do_div(size_opt, 100);
1045 size_opt >>= huge_page_shift(h);
1050 hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
1053 substring_t args[MAX_OPT_ARGS];
1055 unsigned long long max_size_opt = 0, min_size_opt = 0;
1056 int max_val_type = NO_SIZE, min_val_type = NO_SIZE;
1061 while ((p = strsep(&options, ",")) != NULL) {
1066 token = match_token(p, tokens, args);
1069 if (match_int(&args[0], &option))
1071 pconfig->uid = make_kuid(current_user_ns(), option);
1072 if (!uid_valid(pconfig->uid))
1077 if (match_int(&args[0], &option))
1079 pconfig->gid = make_kgid(current_user_ns(), option);
1080 if (!gid_valid(pconfig->gid))
1085 if (match_octal(&args[0], &option))
1087 pconfig->mode = option & 01777U;
1091 /* memparse() will accept a K/M/G without a digit */
1092 if (!isdigit(*args[0].from))
1094 max_size_opt = memparse(args[0].from, &rest);
1095 max_val_type = SIZE_STD;
1097 max_val_type = SIZE_PERCENT;
1102 /* memparse() will accept a K/M/G without a digit */
1103 if (!isdigit(*args[0].from))
1105 pconfig->nr_inodes = memparse(args[0].from, &rest);
1108 case Opt_pagesize: {
1110 ps = memparse(args[0].from, &rest);
1111 pconfig->hstate = size_to_hstate(ps);
1112 if (!pconfig->hstate) {
1113 pr_err("Unsupported page size %lu MB\n",
1120 case Opt_min_size: {
1121 /* memparse() will accept a K/M/G without a digit */
1122 if (!isdigit(*args[0].from))
1124 min_size_opt = memparse(args[0].from, &rest);
1125 min_val_type = SIZE_STD;
1127 min_val_type = SIZE_PERCENT;
1132 pr_err("Bad mount option: \"%s\"\n", p);
1139 * Use huge page pool size (in hstate) to convert the size
1140 * options to number of huge pages. If NO_SIZE, -1 is returned.
1142 pconfig->max_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
1143 max_size_opt, max_val_type);
1144 pconfig->min_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
1145 min_size_opt, min_val_type);
1148 * If max_size was specified, then min_size must be smaller
1150 if (max_val_type > NO_SIZE &&
1151 pconfig->min_hpages > pconfig->max_hpages) {
1152 pr_err("minimum size can not be greater than maximum size\n");
1159 pr_err("Bad value '%s' for mount option '%s'\n", args[0].from, p);
1164 hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
1167 struct hugetlbfs_config config;
1168 struct hugetlbfs_sb_info *sbinfo;
1170 save_mount_options(sb, data);
1172 config.max_hpages = -1; /* No limit on size by default */
1173 config.nr_inodes = -1; /* No limit on number of inodes by default */
1174 config.uid = current_fsuid();
1175 config.gid = current_fsgid();
1177 config.hstate = &default_hstate;
1178 config.min_hpages = -1; /* No default minimum size */
1179 ret = hugetlbfs_parse_options(data, &config);
1183 sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
1186 sb->s_fs_info = sbinfo;
1187 sbinfo->hstate = config.hstate;
1188 spin_lock_init(&sbinfo->stat_lock);
1189 sbinfo->max_inodes = config.nr_inodes;
1190 sbinfo->free_inodes = config.nr_inodes;
1191 sbinfo->spool = NULL;
1193 * Allocate and initialize subpool if maximum or minimum size is
1194 * specified. Any needed reservations (for minimim size) are taken
1195 * taken when the subpool is created.
1197 if (config.max_hpages != -1 || config.min_hpages != -1) {
1198 sbinfo->spool = hugepage_new_subpool(config.hstate,
1204 sb->s_maxbytes = MAX_LFS_FILESIZE;
1205 sb->s_blocksize = huge_page_size(config.hstate);
1206 sb->s_blocksize_bits = huge_page_shift(config.hstate);
1207 sb->s_magic = HUGETLBFS_MAGIC;
1208 sb->s_op = &hugetlbfs_ops;
1209 sb->s_time_gran = 1;
1210 sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config));
1215 kfree(sbinfo->spool);
1220 static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
1221 int flags, const char *dev_name, void *data)
1223 return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super);
1226 static struct file_system_type hugetlbfs_fs_type = {
1227 .name = "hugetlbfs",
1228 .mount = hugetlbfs_mount,
1229 .kill_sb = kill_litter_super,
1231 MODULE_ALIAS_FS("hugetlbfs");
1233 static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
1235 static int can_do_hugetlb_shm(void)
1238 shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
1239 return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
1242 static int get_hstate_idx(int page_size_log)
1244 struct hstate *h = hstate_sizelog(page_size_log);
1251 static const struct dentry_operations anon_ops = {
1252 .d_dname = simple_dname
1256 * Note that size should be aligned to proper hugepage size in caller side,
1257 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
1259 struct file *hugetlb_file_setup(const char *name, size_t size,
1260 vm_flags_t acctflag, struct user_struct **user,
1261 int creat_flags, int page_size_log)
1263 struct file *file = ERR_PTR(-ENOMEM);
1264 struct inode *inode;
1266 struct super_block *sb;
1267 struct qstr quick_string;
1270 hstate_idx = get_hstate_idx(page_size_log);
1272 return ERR_PTR(-ENODEV);
1275 if (!hugetlbfs_vfsmount[hstate_idx])
1276 return ERR_PTR(-ENOENT);
1278 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
1279 *user = current_user();
1280 if (user_shm_lock(size, *user)) {
1282 pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
1283 current->comm, current->pid);
1284 task_unlock(current);
1287 return ERR_PTR(-EPERM);
1291 sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb;
1292 quick_string.name = name;
1293 quick_string.len = strlen(quick_string.name);
1294 quick_string.hash = 0;
1295 path.dentry = d_alloc_pseudo(sb, &quick_string);
1297 goto out_shm_unlock;
1299 d_set_d_op(path.dentry, &anon_ops);
1300 path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]);
1301 file = ERR_PTR(-ENOSPC);
1302 inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
1305 if (creat_flags == HUGETLB_SHMFS_INODE)
1306 inode->i_flags |= S_PRIVATE;
1308 file = ERR_PTR(-ENOMEM);
1309 if (hugetlb_reserve_pages(inode, 0,
1310 size >> huge_page_shift(hstate_inode(inode)), NULL,
1314 d_instantiate(path.dentry, inode);
1315 inode->i_size = size;
1318 file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
1319 &hugetlbfs_file_operations);
1321 goto out_dentry; /* inode is already attached */
1331 user_shm_unlock(size, *user);
1337 static int __init init_hugetlbfs_fs(void)
1343 if (!hugepages_supported()) {
1344 pr_info("disabling because there are no supported hugepage sizes\n");
1349 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1350 sizeof(struct hugetlbfs_inode_info),
1352 if (hugetlbfs_inode_cachep == NULL)
1355 error = register_filesystem(&hugetlbfs_fs_type);
1360 for_each_hstate(h) {
1362 unsigned ps_kb = 1U << (h->order + PAGE_SHIFT - 10);
1364 snprintf(buf, sizeof(buf), "pagesize=%uK", ps_kb);
1365 hugetlbfs_vfsmount[i] = kern_mount_data(&hugetlbfs_fs_type,
1368 if (IS_ERR(hugetlbfs_vfsmount[i])) {
1369 pr_err("Cannot mount internal hugetlbfs for "
1370 "page size %uK", ps_kb);
1371 error = PTR_ERR(hugetlbfs_vfsmount[i]);
1372 hugetlbfs_vfsmount[i] = NULL;
1376 /* Non default hstates are optional */
1377 if (!IS_ERR_OR_NULL(hugetlbfs_vfsmount[default_hstate_idx]))
1381 kmem_cache_destroy(hugetlbfs_inode_cachep);
1386 static void __exit exit_hugetlbfs_fs(void)
1393 * Make sure all delayed rcu free inodes are flushed before we
1397 kmem_cache_destroy(hugetlbfs_inode_cachep);
1400 kern_unmount(hugetlbfs_vfsmount[i++]);
1401 unregister_filesystem(&hugetlbfs_fs_type);
1404 module_init(init_hugetlbfs_fs)
1405 module_exit(exit_hugetlbfs_fs)
1407 MODULE_LICENSE("GPL");