1 #ifndef _LINUX_HUGETLB_H
2 #define _LINUX_HUGETLB_H
4 #include <linux/mm_types.h>
5 #include <linux/mmdebug.h>
7 #include <linux/hugetlb_inline.h>
8 #include <linux/cgroup.h>
9 #include <linux/list.h>
10 #include <linux/kref.h>
11 #include <asm/pgtable.h>
19 * Some architectures requires a hugepage directory format that is
20 * required to support multiple hugepage sizes. For example
21 * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
22 * introduced the same on powerpc. This allows for a more flexible hugepage
25 typedef struct { unsigned long pd; } hugepd_t;
26 #define is_hugepd(hugepd) (0)
27 #define __hugepd(x) ((hugepd_t) { (x) })
28 static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
29 unsigned pdshift, unsigned long end,
30 int write, struct page **pages, int *nr)
35 extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
36 unsigned pdshift, unsigned long end,
37 int write, struct page **pages, int *nr);
41 #ifdef CONFIG_HUGETLB_PAGE
43 #include <linux/mempolicy.h>
44 #include <linux/shm.h>
45 #include <asm/tlbflush.h>
47 struct hugepage_subpool {
50 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
51 long used_hpages; /* Used count against maximum, includes */
52 /* both alloced and reserved pages. */
53 struct hstate *hstate;
54 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
55 long rsv_hpages; /* Pages reserved against global pool to */
56 /* sasitfy minimum size. */
62 struct list_head regions;
63 long adds_in_progress;
64 struct list_head region_cache;
65 long region_cache_count;
67 extern struct resv_map *resv_map_alloc(void);
68 void resv_map_release(struct kref *ref);
70 extern spinlock_t hugetlb_lock;
71 extern int hugetlb_max_hstate __read_mostly;
72 #define for_each_hstate(h) \
73 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
75 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
77 void hugepage_put_subpool(struct hugepage_subpool *spool);
79 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
80 int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
81 int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
82 int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
85 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
86 void __user *, size_t *, loff_t *);
89 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
90 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
91 struct page **, struct vm_area_struct **,
92 unsigned long *, unsigned long *, long, unsigned int,
94 void unmap_hugepage_range(struct vm_area_struct *,
95 unsigned long, unsigned long, struct page *);
96 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
97 struct vm_area_struct *vma,
98 unsigned long start, unsigned long end,
99 struct page *ref_page);
100 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
101 unsigned long start, unsigned long end,
102 struct page *ref_page);
103 void hugetlb_report_meminfo(struct seq_file *);
104 int hugetlb_report_node_meminfo(int, char *);
105 void hugetlb_show_meminfo(void);
106 unsigned long hugetlb_total_pages(void);
107 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
108 unsigned long address, unsigned int flags);
109 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
110 struct vm_area_struct *dst_vma,
111 unsigned long dst_addr,
112 unsigned long src_addr,
113 struct page **pagep);
114 int hugetlb_reserve_pages(struct inode *inode, long from, long to,
115 struct vm_area_struct *vma,
116 vm_flags_t vm_flags);
117 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
119 bool isolate_huge_page(struct page *page, struct list_head *list);
120 void putback_active_hugepage(struct page *page);
121 void free_huge_page(struct page *page);
122 void hugetlb_fix_reserve_counts(struct inode *inode);
123 extern struct mutex *hugetlb_fault_mutex_table;
124 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
125 struct vm_area_struct *vma,
126 struct address_space *mapping,
127 pgoff_t idx, unsigned long address);
129 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
131 extern int hugepages_treat_as_movable;
132 extern int sysctl_hugetlb_shm_group;
133 extern struct list_head huge_boot_pages;
137 pte_t *huge_pte_alloc(struct mm_struct *mm,
138 unsigned long addr, unsigned long sz);
139 pte_t *huge_pte_offset(struct mm_struct *mm,
140 unsigned long addr, unsigned long sz);
141 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
142 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
144 struct page *follow_huge_pd(struct vm_area_struct *vma,
145 unsigned long address, hugepd_t hpd,
146 int flags, int pdshift);
147 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
148 pmd_t *pmd, int flags);
149 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
150 pud_t *pud, int flags);
151 struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
152 pgd_t *pgd, int flags);
154 int pmd_huge(pmd_t pmd);
155 int pud_huge(pud_t pud);
156 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
157 unsigned long address, unsigned long end, pgprot_t newprot);
159 bool is_hugetlb_entry_migration(pte_t pte);
160 #else /* !CONFIG_HUGETLB_PAGE */
162 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
166 static inline unsigned long hugetlb_total_pages(void)
171 #define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
172 #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
173 #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
174 static inline void hugetlb_report_meminfo(struct seq_file *m)
177 #define hugetlb_report_node_meminfo(n, buf) 0
178 static inline void hugetlb_show_meminfo(void)
181 #define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
182 #define follow_huge_pmd(mm, addr, pmd, flags) NULL
183 #define follow_huge_pud(mm, addr, pud, flags) NULL
184 #define follow_huge_pgd(mm, addr, pgd, flags) NULL
185 #define prepare_hugepage_range(file, addr, len) (-EINVAL)
186 #define pmd_huge(x) 0
187 #define pud_huge(x) 0
188 #define is_hugepage_only_range(mm, addr, len) 0
189 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
190 #define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
191 #define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
192 src_addr, pagep) ({ BUG(); 0; })
193 #define huge_pte_offset(mm, address, sz) 0
195 static inline bool isolate_huge_page(struct page *page, struct list_head *list)
199 #define putback_active_hugepage(p) do {} while (0)
201 static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
202 unsigned long address, unsigned long end, pgprot_t newprot)
207 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
208 struct vm_area_struct *vma, unsigned long start,
209 unsigned long end, struct page *ref_page)
214 static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
215 struct vm_area_struct *vma, unsigned long start,
216 unsigned long end, struct page *ref_page)
221 #endif /* !CONFIG_HUGETLB_PAGE */
223 * hugepages at page global directory. If arch support
224 * hugepages at pgd level, they need to define this.
227 #define pgd_huge(x) 0
230 #define p4d_huge(x) 0
234 static inline int pgd_write(pgd_t pgd)
242 static inline int pud_write(pud_t pud)
249 #define HUGETLB_ANON_FILE "anon_hugepage"
253 * The file will be used as an shm file so shmfs accounting rules
256 HUGETLB_SHMFS_INODE = 1,
258 * The file is being created on the internal vfs mount and shmfs
259 * accounting rules do not apply
261 HUGETLB_ANONHUGE_INODE = 2,
264 #ifdef CONFIG_HUGETLBFS
265 struct hugetlbfs_sb_info {
266 long max_inodes; /* inodes allowed */
267 long free_inodes; /* inodes free */
268 spinlock_t stat_lock;
269 struct hstate *hstate;
270 struct hugepage_subpool *spool;
273 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
275 return sb->s_fs_info;
278 extern const struct file_operations hugetlbfs_file_operations;
279 extern const struct vm_operations_struct hugetlb_vm_ops;
280 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
281 struct user_struct **user, int creat_flags,
284 static inline bool is_file_hugepages(struct file *file)
286 if (file->f_op == &hugetlbfs_file_operations)
289 return is_file_shm_hugepages(file);
293 #else /* !CONFIG_HUGETLBFS */
295 #define is_file_hugepages(file) false
296 static inline struct file *
297 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
298 struct user_struct **user, int creat_flags,
301 return ERR_PTR(-ENOSYS);
304 #endif /* !CONFIG_HUGETLBFS */
306 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
307 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
308 unsigned long len, unsigned long pgoff,
309 unsigned long flags);
310 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
312 #ifdef CONFIG_HUGETLB_PAGE
314 #define HSTATE_NAME_LEN 32
315 /* Defines one hugetlb page size */
317 int next_nid_to_alloc;
318 int next_nid_to_free;
321 unsigned long max_huge_pages;
322 unsigned long nr_huge_pages;
323 unsigned long free_huge_pages;
324 unsigned long resv_huge_pages;
325 unsigned long surplus_huge_pages;
326 unsigned long nr_overcommit_huge_pages;
327 struct list_head hugepage_activelist;
328 struct list_head hugepage_freelists[MAX_NUMNODES];
329 unsigned int nr_huge_pages_node[MAX_NUMNODES];
330 unsigned int free_huge_pages_node[MAX_NUMNODES];
331 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
332 #ifdef CONFIG_CGROUP_HUGETLB
333 /* cgroup control files */
334 struct cftype cgroup_files[5];
336 char name[HSTATE_NAME_LEN];
339 struct huge_bootmem_page {
340 struct list_head list;
341 struct hstate *hstate;
342 #ifdef CONFIG_HIGHMEM
347 struct page *alloc_huge_page(struct vm_area_struct *vma,
348 unsigned long addr, int avoid_reserve);
349 struct page *alloc_huge_page_node(struct hstate *h, int nid);
350 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
351 unsigned long addr, int avoid_reserve);
352 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
354 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
358 int __init alloc_bootmem_huge_page(struct hstate *h);
360 void __init hugetlb_bad_size(void);
361 void __init hugetlb_add_hstate(unsigned order);
362 struct hstate *size_to_hstate(unsigned long size);
364 #ifndef HUGE_MAX_HSTATE
365 #define HUGE_MAX_HSTATE 1
368 extern struct hstate hstates[HUGE_MAX_HSTATE];
369 extern unsigned int default_hstate_idx;
371 #define default_hstate (hstates[default_hstate_idx])
373 static inline struct hstate *hstate_inode(struct inode *i)
375 return HUGETLBFS_SB(i->i_sb)->hstate;
378 static inline struct hstate *hstate_file(struct file *f)
380 return hstate_inode(file_inode(f));
383 static inline struct hstate *hstate_sizelog(int page_size_log)
386 return &default_hstate;
388 return size_to_hstate(1UL << page_size_log);
391 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
393 return hstate_file(vma->vm_file);
396 static inline unsigned long huge_page_size(struct hstate *h)
398 return (unsigned long)PAGE_SIZE << h->order;
401 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
403 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
405 static inline unsigned long huge_page_mask(struct hstate *h)
410 static inline unsigned int huge_page_order(struct hstate *h)
415 static inline unsigned huge_page_shift(struct hstate *h)
417 return h->order + PAGE_SHIFT;
420 static inline bool hstate_is_gigantic(struct hstate *h)
422 return huge_page_order(h) >= MAX_ORDER;
425 static inline unsigned int pages_per_huge_page(struct hstate *h)
427 return 1 << h->order;
430 static inline unsigned int blocks_per_huge_page(struct hstate *h)
432 return huge_page_size(h) / 512;
435 #include <asm/hugetlb.h>
437 #ifndef arch_make_huge_pte
438 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
439 struct page *page, int writable)
445 static inline struct hstate *page_hstate(struct page *page)
447 VM_BUG_ON_PAGE(!PageHuge(page), page);
448 return size_to_hstate(PAGE_SIZE << compound_order(page));
451 static inline unsigned hstate_index_to_shift(unsigned index)
453 return hstates[index].order + PAGE_SHIFT;
456 static inline int hstate_index(struct hstate *h)
461 pgoff_t __basepage_index(struct page *page);
463 /* Return page->index in PAGE_SIZE units */
464 static inline pgoff_t basepage_index(struct page *page)
466 if (!PageCompound(page))
469 return __basepage_index(page);
472 extern int dissolve_free_huge_page(struct page *page);
473 extern int dissolve_free_huge_pages(unsigned long start_pfn,
474 unsigned long end_pfn);
475 static inline bool hugepage_migration_supported(struct hstate *h)
477 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
478 if ((huge_page_shift(h) == PMD_SHIFT) ||
479 (huge_page_shift(h) == PGDIR_SHIFT))
488 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
489 struct mm_struct *mm, pte_t *pte)
491 if (huge_page_size(h) == PMD_SIZE)
492 return pmd_lockptr(mm, (pmd_t *) pte);
493 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
494 return &mm->page_table_lock;
497 #ifndef hugepages_supported
499 * Some platform decide whether they support huge pages at boot
500 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
501 * when there is no such support
503 #define hugepages_supported() (HPAGE_SHIFT != 0)
506 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
508 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
510 atomic_long_add(l, &mm->hugetlb_usage);
513 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
515 atomic_long_sub(l, &mm->hugetlb_usage);
518 #ifndef set_huge_swap_pte_at
519 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
520 pte_t *ptep, pte_t pte, unsigned long sz)
522 set_huge_pte_at(mm, addr, ptep, pte);
525 #else /* CONFIG_HUGETLB_PAGE */
527 #define alloc_huge_page(v, a, r) NULL
528 #define alloc_huge_page_node(h, nid) NULL
529 #define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL
530 #define alloc_huge_page_noerr(v, a, r) NULL
531 #define alloc_bootmem_huge_page(h) NULL
532 #define hstate_file(f) NULL
533 #define hstate_sizelog(s) NULL
534 #define hstate_vma(v) NULL
535 #define hstate_inode(i) NULL
536 #define page_hstate(page) NULL
537 #define huge_page_size(h) PAGE_SIZE
538 #define huge_page_mask(h) PAGE_MASK
539 #define vma_kernel_pagesize(v) PAGE_SIZE
540 #define vma_mmu_pagesize(v) PAGE_SIZE
541 #define huge_page_order(h) 0
542 #define huge_page_shift(h) PAGE_SHIFT
543 static inline bool hstate_is_gigantic(struct hstate *h)
548 static inline unsigned int pages_per_huge_page(struct hstate *h)
553 static inline unsigned hstate_index_to_shift(unsigned index)
558 static inline int hstate_index(struct hstate *h)
563 static inline pgoff_t basepage_index(struct page *page)
568 static inline int dissolve_free_huge_page(struct page *page)
573 static inline int dissolve_free_huge_pages(unsigned long start_pfn,
574 unsigned long end_pfn)
579 static inline bool hugepage_migration_supported(struct hstate *h)
584 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
585 struct mm_struct *mm, pte_t *pte)
587 return &mm->page_table_lock;
590 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
594 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
598 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
599 pte_t *ptep, pte_t pte, unsigned long sz)
602 #endif /* CONFIG_HUGETLB_PAGE */
604 static inline spinlock_t *huge_pte_lock(struct hstate *h,
605 struct mm_struct *mm, pte_t *pte)
609 ptl = huge_pte_lockptr(h, mm, pte);
614 #endif /* _LINUX_HUGETLB_H */