]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - include/linux/hugetlb.h
ASoC: wm8995: Use IS_ENABLED() macro
[karo-tx-linux.git] / include / linux / hugetlb.h
1 #ifndef _LINUX_HUGETLB_H
2 #define _LINUX_HUGETLB_H
3
4 #include <linux/mm_types.h>
5 #include <linux/fs.h>
6 #include <linux/hugetlb_inline.h>
7 #include <linux/cgroup.h>
8
9 struct ctl_table;
10 struct user_struct;
11 struct mmu_gather;
12
13 #ifdef CONFIG_HUGETLB_PAGE
14
15 #include <linux/mempolicy.h>
16 #include <linux/shm.h>
17 #include <asm/tlbflush.h>
18
19 struct hugepage_subpool {
20         spinlock_t lock;
21         long count;
22         long max_hpages, used_hpages;
23 };
24
25 extern spinlock_t hugetlb_lock;
26 extern int hugetlb_max_hstate __read_mostly;
27 #define for_each_hstate(h) \
28         for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
29
30 struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
31 void hugepage_put_subpool(struct hugepage_subpool *spool);
32
33 int PageHuge(struct page *page);
34 int PageHeadHuge(struct page *page_head);
35
36 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
37 int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
38 int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
39 int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
40
41 #ifdef CONFIG_NUMA
42 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
43                                         void __user *, size_t *, loff_t *);
44 #endif
45
46 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
47 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
48                          struct page **, struct vm_area_struct **,
49                          unsigned long *, unsigned long *, long, unsigned int);
50 void unmap_hugepage_range(struct vm_area_struct *,
51                           unsigned long, unsigned long, struct page *);
52 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
53                           struct vm_area_struct *vma,
54                           unsigned long start, unsigned long end,
55                           struct page *ref_page);
56 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
57                                 unsigned long start, unsigned long end,
58                                 struct page *ref_page);
59 void hugetlb_report_meminfo(struct seq_file *);
60 int hugetlb_report_node_meminfo(int, char *);
61 void hugetlb_show_meminfo(void);
62 unsigned long hugetlb_total_pages(void);
63 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
64                         unsigned long address, unsigned int flags);
65 int hugetlb_reserve_pages(struct inode *inode, long from, long to,
66                                                 struct vm_area_struct *vma,
67                                                 vm_flags_t vm_flags);
68 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
69 int dequeue_hwpoisoned_huge_page(struct page *page);
70 bool isolate_huge_page(struct page *page, struct list_head *list);
71 void putback_active_hugepage(struct page *page);
72 bool is_hugepage_active(struct page *page);
73
74 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
75 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
76 #endif
77
78 extern unsigned long hugepages_treat_as_movable;
79 extern const unsigned long hugetlb_zero, hugetlb_infinity;
80 extern int sysctl_hugetlb_shm_group;
81 extern struct list_head huge_boot_pages;
82
83 /* arch callbacks */
84
85 pte_t *huge_pte_alloc(struct mm_struct *mm,
86                         unsigned long addr, unsigned long sz);
87 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
88 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
89 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
90                               int write);
91 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
92                                 pmd_t *pmd, int write);
93 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
94                                 pud_t *pud, int write);
95 int pmd_huge(pmd_t pmd);
96 int pud_huge(pud_t pmd);
97 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
98                 unsigned long address, unsigned long end, pgprot_t newprot);
99
100 #else /* !CONFIG_HUGETLB_PAGE */
101
102 static inline int PageHuge(struct page *page)
103 {
104         return 0;
105 }
106
107 static inline int PageHeadHuge(struct page *page_head)
108 {
109         return 0;
110 }
111
112 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
113 {
114 }
115
116 static inline unsigned long hugetlb_total_pages(void)
117 {
118         return 0;
119 }
120
121 #define follow_hugetlb_page(m,v,p,vs,a,b,i,w)   ({ BUG(); 0; })
122 #define follow_huge_addr(mm, addr, write)       ERR_PTR(-EINVAL)
123 #define copy_hugetlb_page_range(src, dst, vma)  ({ BUG(); 0; })
124 static inline void hugetlb_report_meminfo(struct seq_file *m)
125 {
126 }
127 #define hugetlb_report_node_meminfo(n, buf)     0
128 static inline void hugetlb_show_meminfo(void)
129 {
130 }
131 #define follow_huge_pmd(mm, addr, pmd, write)   NULL
132 #define follow_huge_pud(mm, addr, pud, write)   NULL
133 #define prepare_hugepage_range(file, addr, len) (-EINVAL)
134 #define pmd_huge(x)     0
135 #define pud_huge(x)     0
136 #define is_hugepage_only_range(mm, addr, len)   0
137 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
138 #define hugetlb_fault(mm, vma, addr, flags)     ({ BUG(); 0; })
139 #define huge_pte_offset(mm, address)    0
140 static inline int dequeue_hwpoisoned_huge_page(struct page *page)
141 {
142         return 0;
143 }
144
145 #define isolate_huge_page(p, l) false
146 #define putback_active_hugepage(p)      do {} while (0)
147 #define is_hugepage_active(x)   false
148
149 static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
150                 unsigned long address, unsigned long end, pgprot_t newprot)
151 {
152         return 0;
153 }
154
155 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
156                         struct vm_area_struct *vma, unsigned long start,
157                         unsigned long end, struct page *ref_page)
158 {
159         BUG();
160 }
161
162 static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
163                         struct vm_area_struct *vma, unsigned long start,
164                         unsigned long end, struct page *ref_page)
165 {
166         BUG();
167 }
168
169 #endif /* !CONFIG_HUGETLB_PAGE */
170
171 #define HUGETLB_ANON_FILE "anon_hugepage"
172
173 enum {
174         /*
175          * The file will be used as an shm file so shmfs accounting rules
176          * apply
177          */
178         HUGETLB_SHMFS_INODE     = 1,
179         /*
180          * The file is being created on the internal vfs mount and shmfs
181          * accounting rules do not apply
182          */
183         HUGETLB_ANONHUGE_INODE  = 2,
184 };
185
186 #ifdef CONFIG_HUGETLBFS
187 struct hugetlbfs_sb_info {
188         long    max_inodes;   /* inodes allowed */
189         long    free_inodes;  /* inodes free */
190         spinlock_t      stat_lock;
191         struct hstate *hstate;
192         struct hugepage_subpool *spool;
193 };
194
195 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
196 {
197         return sb->s_fs_info;
198 }
199
200 extern const struct file_operations hugetlbfs_file_operations;
201 extern const struct vm_operations_struct hugetlb_vm_ops;
202 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
203                                 struct user_struct **user, int creat_flags,
204                                 int page_size_log);
205
206 static inline int is_file_hugepages(struct file *file)
207 {
208         if (file->f_op == &hugetlbfs_file_operations)
209                 return 1;
210         if (is_file_shm_hugepages(file))
211                 return 1;
212
213         return 0;
214 }
215
216
217 #else /* !CONFIG_HUGETLBFS */
218
219 #define is_file_hugepages(file)                 0
220 static inline struct file *
221 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
222                 struct user_struct **user, int creat_flags,
223                 int page_size_log)
224 {
225         return ERR_PTR(-ENOSYS);
226 }
227
228 #endif /* !CONFIG_HUGETLBFS */
229
230 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
231 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
232                                         unsigned long len, unsigned long pgoff,
233                                         unsigned long flags);
234 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
235
236 #ifdef CONFIG_HUGETLB_PAGE
237
238 #define HSTATE_NAME_LEN 32
239 /* Defines one hugetlb page size */
240 struct hstate {
241         int next_nid_to_alloc;
242         int next_nid_to_free;
243         unsigned int order;
244         unsigned long mask;
245         unsigned long max_huge_pages;
246         unsigned long nr_huge_pages;
247         unsigned long free_huge_pages;
248         unsigned long resv_huge_pages;
249         unsigned long surplus_huge_pages;
250         unsigned long nr_overcommit_huge_pages;
251         struct list_head hugepage_activelist;
252         struct list_head hugepage_freelists[MAX_NUMNODES];
253         unsigned int nr_huge_pages_node[MAX_NUMNODES];
254         unsigned int free_huge_pages_node[MAX_NUMNODES];
255         unsigned int surplus_huge_pages_node[MAX_NUMNODES];
256 #ifdef CONFIG_CGROUP_HUGETLB
257         /* cgroup control files */
258         struct cftype cgroup_files[5];
259 #endif
260         char name[HSTATE_NAME_LEN];
261 };
262
263 struct huge_bootmem_page {
264         struct list_head list;
265         struct hstate *hstate;
266 #ifdef CONFIG_HIGHMEM
267         phys_addr_t phys;
268 #endif
269 };
270
271 struct page *alloc_huge_page_node(struct hstate *h, int nid);
272 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
273                                 unsigned long addr, int avoid_reserve);
274
275 /* arch callback */
276 int __init alloc_bootmem_huge_page(struct hstate *h);
277
278 void __init hugetlb_add_hstate(unsigned order);
279 struct hstate *size_to_hstate(unsigned long size);
280
281 #ifndef HUGE_MAX_HSTATE
282 #define HUGE_MAX_HSTATE 1
283 #endif
284
285 extern struct hstate hstates[HUGE_MAX_HSTATE];
286 extern unsigned int default_hstate_idx;
287
288 #define default_hstate (hstates[default_hstate_idx])
289
290 static inline struct hstate *hstate_inode(struct inode *i)
291 {
292         struct hugetlbfs_sb_info *hsb;
293         hsb = HUGETLBFS_SB(i->i_sb);
294         return hsb->hstate;
295 }
296
297 static inline struct hstate *hstate_file(struct file *f)
298 {
299         return hstate_inode(file_inode(f));
300 }
301
302 static inline struct hstate *hstate_sizelog(int page_size_log)
303 {
304         if (!page_size_log)
305                 return &default_hstate;
306         return size_to_hstate(1 << page_size_log);
307 }
308
309 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
310 {
311         return hstate_file(vma->vm_file);
312 }
313
314 static inline unsigned long huge_page_size(struct hstate *h)
315 {
316         return (unsigned long)PAGE_SIZE << h->order;
317 }
318
319 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
320
321 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
322
323 static inline unsigned long huge_page_mask(struct hstate *h)
324 {
325         return h->mask;
326 }
327
328 static inline unsigned int huge_page_order(struct hstate *h)
329 {
330         return h->order;
331 }
332
333 static inline unsigned huge_page_shift(struct hstate *h)
334 {
335         return h->order + PAGE_SHIFT;
336 }
337
338 static inline unsigned int pages_per_huge_page(struct hstate *h)
339 {
340         return 1 << h->order;
341 }
342
343 static inline unsigned int blocks_per_huge_page(struct hstate *h)
344 {
345         return huge_page_size(h) / 512;
346 }
347
348 #include <asm/hugetlb.h>
349
350 #ifndef arch_make_huge_pte
351 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
352                                        struct page *page, int writable)
353 {
354         return entry;
355 }
356 #endif
357
358 static inline struct hstate *page_hstate(struct page *page)
359 {
360         return size_to_hstate(PAGE_SIZE << compound_order(page));
361 }
362
363 static inline unsigned hstate_index_to_shift(unsigned index)
364 {
365         return hstates[index].order + PAGE_SHIFT;
366 }
367
368 static inline int hstate_index(struct hstate *h)
369 {
370         return h - hstates;
371 }
372
373 pgoff_t __basepage_index(struct page *page);
374
375 /* Return page->index in PAGE_SIZE units */
376 static inline pgoff_t basepage_index(struct page *page)
377 {
378         if (!PageCompound(page))
379                 return page->index;
380
381         return __basepage_index(page);
382 }
383
384 extern void dissolve_free_huge_pages(unsigned long start_pfn,
385                                      unsigned long end_pfn);
386 int pmd_huge_support(void);
387 /*
388  * Currently hugepage migration is enabled only for pmd-based hugepage.
389  * This function will be updated when hugepage migration is more widely
390  * supported.
391  */
392 static inline int hugepage_migration_support(struct hstate *h)
393 {
394         return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT);
395 }
396
397 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
398                                            struct mm_struct *mm, pte_t *pte)
399 {
400         if (huge_page_size(h) == PMD_SIZE)
401                 return pmd_lockptr(mm, (pmd_t *) pte);
402         VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
403         return &mm->page_table_lock;
404 }
405
406 #else   /* CONFIG_HUGETLB_PAGE */
407 struct hstate {};
408 #define alloc_huge_page_node(h, nid) NULL
409 #define alloc_huge_page_noerr(v, a, r) NULL
410 #define alloc_bootmem_huge_page(h) NULL
411 #define hstate_file(f) NULL
412 #define hstate_sizelog(s) NULL
413 #define hstate_vma(v) NULL
414 #define hstate_inode(i) NULL
415 #define page_hstate(page) NULL
416 #define huge_page_size(h) PAGE_SIZE
417 #define huge_page_mask(h) PAGE_MASK
418 #define vma_kernel_pagesize(v) PAGE_SIZE
419 #define vma_mmu_pagesize(v) PAGE_SIZE
420 #define huge_page_order(h) 0
421 #define huge_page_shift(h) PAGE_SHIFT
422 static inline unsigned int pages_per_huge_page(struct hstate *h)
423 {
424         return 1;
425 }
426 #define hstate_index_to_shift(index) 0
427 #define hstate_index(h) 0
428
429 static inline pgoff_t basepage_index(struct page *page)
430 {
431         return page->index;
432 }
433 #define dissolve_free_huge_pages(s, e)  do {} while (0)
434 #define pmd_huge_support()      0
435 #define hugepage_migration_support(h)   0
436
437 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
438                                            struct mm_struct *mm, pte_t *pte)
439 {
440         return &mm->page_table_lock;
441 }
442 #endif  /* CONFIG_HUGETLB_PAGE */
443
444 static inline spinlock_t *huge_pte_lock(struct hstate *h,
445                                         struct mm_struct *mm, pte_t *pte)
446 {
447         spinlock_t *ptl;
448
449         ptl = huge_pte_lockptr(h, mm, pte);
450         spin_lock(ptl);
451         return ptl;
452 }
453
454 #endif /* _LINUX_HUGETLB_H */