]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - mm/hugetlb.c
hugetlbfs: truncate_hugepages() takes a range of pages
[karo-tx-linux.git] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) Nadia Yvette Chambers, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/compiler.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/bootmem.h>
20 #include <linux/sysfs.h>
21 #include <linux/slab.h>
22 #include <linux/rmap.h>
23 #include <linux/swap.h>
24 #include <linux/swapops.h>
25 #include <linux/page-isolation.h>
26 #include <linux/jhash.h>
27
28 #include <asm/page.h>
29 #include <asm/pgtable.h>
30 #include <asm/tlb.h>
31
32 #include <linux/io.h>
33 #include <linux/hugetlb.h>
34 #include <linux/hugetlb_cgroup.h>
35 #include <linux/node.h>
36 #include "internal.h"
37
38 int hugepages_treat_as_movable;
39
40 int hugetlb_max_hstate __read_mostly;
41 unsigned int default_hstate_idx;
42 struct hstate hstates[HUGE_MAX_HSTATE];
43 /*
44  * Minimum page order among possible hugepage sizes, set to a proper value
45  * at boot time.
46  */
47 static unsigned int minimum_order __read_mostly = UINT_MAX;
48
49 __initdata LIST_HEAD(huge_boot_pages);
50
51 /* for command line parsing */
52 static struct hstate * __initdata parsed_hstate;
53 static unsigned long __initdata default_hstate_max_huge_pages;
54 static unsigned long __initdata default_hstate_size;
55
56 /*
57  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
58  * free_huge_pages, and surplus_huge_pages.
59  */
60 DEFINE_SPINLOCK(hugetlb_lock);
61
62 /*
63  * Serializes faults on the same logical page.  This is used to
64  * prevent spurious OOMs when the hugepage pool is fully utilized.
65  */
66 static int num_fault_mutexes;
67 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
68
69 /* Forward declaration */
70 static int hugetlb_acct_memory(struct hstate *h, long delta);
71
72 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
73 {
74         bool free = (spool->count == 0) && (spool->used_hpages == 0);
75
76         spin_unlock(&spool->lock);
77
78         /* If no pages are used, and no other handles to the subpool
79          * remain, give up any reservations mased on minimum size and
80          * free the subpool */
81         if (free) {
82                 if (spool->min_hpages != -1)
83                         hugetlb_acct_memory(spool->hstate,
84                                                 -spool->min_hpages);
85                 kfree(spool);
86         }
87 }
88
89 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
90                                                 long min_hpages)
91 {
92         struct hugepage_subpool *spool;
93
94         spool = kzalloc(sizeof(*spool), GFP_KERNEL);
95         if (!spool)
96                 return NULL;
97
98         spin_lock_init(&spool->lock);
99         spool->count = 1;
100         spool->max_hpages = max_hpages;
101         spool->hstate = h;
102         spool->min_hpages = min_hpages;
103
104         if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
105                 kfree(spool);
106                 return NULL;
107         }
108         spool->rsv_hpages = min_hpages;
109
110         return spool;
111 }
112
113 void hugepage_put_subpool(struct hugepage_subpool *spool)
114 {
115         spin_lock(&spool->lock);
116         BUG_ON(!spool->count);
117         spool->count--;
118         unlock_or_release_subpool(spool);
119 }
120
121 /*
122  * Subpool accounting for allocating and reserving pages.
123  * Return -ENOMEM if there are not enough resources to satisfy the
124  * the request.  Otherwise, return the number of pages by which the
125  * global pools must be adjusted (upward).  The returned value may
126  * only be different than the passed value (delta) in the case where
127  * a subpool minimum size must be manitained.
128  */
129 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
130                                       long delta)
131 {
132         long ret = delta;
133
134         if (!spool)
135                 return ret;
136
137         spin_lock(&spool->lock);
138
139         if (spool->max_hpages != -1) {          /* maximum size accounting */
140                 if ((spool->used_hpages + delta) <= spool->max_hpages)
141                         spool->used_hpages += delta;
142                 else {
143                         ret = -ENOMEM;
144                         goto unlock_ret;
145                 }
146         }
147
148         if (spool->min_hpages != -1) {          /* minimum size accounting */
149                 if (delta > spool->rsv_hpages) {
150                         /*
151                          * Asking for more reserves than those already taken on
152                          * behalf of subpool.  Return difference.
153                          */
154                         ret = delta - spool->rsv_hpages;
155                         spool->rsv_hpages = 0;
156                 } else {
157                         ret = 0;        /* reserves already accounted for */
158                         spool->rsv_hpages -= delta;
159                 }
160         }
161
162 unlock_ret:
163         spin_unlock(&spool->lock);
164         return ret;
165 }
166
167 /*
168  * Subpool accounting for freeing and unreserving pages.
169  * Return the number of global page reservations that must be dropped.
170  * The return value may only be different than the passed value (delta)
171  * in the case where a subpool minimum size must be maintained.
172  */
173 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
174                                        long delta)
175 {
176         long ret = delta;
177
178         if (!spool)
179                 return delta;
180
181         spin_lock(&spool->lock);
182
183         if (spool->max_hpages != -1)            /* maximum size accounting */
184                 spool->used_hpages -= delta;
185
186         if (spool->min_hpages != -1) {          /* minimum size accounting */
187                 if (spool->rsv_hpages + delta <= spool->min_hpages)
188                         ret = 0;
189                 else
190                         ret = spool->rsv_hpages + delta - spool->min_hpages;
191
192                 spool->rsv_hpages += delta;
193                 if (spool->rsv_hpages > spool->min_hpages)
194                         spool->rsv_hpages = spool->min_hpages;
195         }
196
197         /*
198          * If hugetlbfs_put_super couldn't free spool due to an outstanding
199          * quota reference, free it now.
200          */
201         unlock_or_release_subpool(spool);
202
203         return ret;
204 }
205
206 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
207 {
208         return HUGETLBFS_SB(inode->i_sb)->spool;
209 }
210
211 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
212 {
213         return subpool_inode(file_inode(vma->vm_file));
214 }
215
216 /*
217  * Region tracking -- allows tracking of reservations and instantiated pages
218  *                    across the pages in a mapping.
219  *
220  * The region data structures are embedded into a resv_map and protected
221  * by a resv_map's lock.  The set of regions within the resv_map represent
222  * reservations for huge pages, or huge pages that have already been
223  * instantiated within the map.  The from and to elements are huge page
224  * indicies into the associated mapping.  from indicates the starting index
225  * of the region.  to represents the first index past the end of  the region.
226  *
227  * For example, a file region structure with from == 0 and to == 4 represents
228  * four huge pages in a mapping.  It is important to note that the to element
229  * represents the first element past the end of the region. This is used in
230  * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
231  *
232  * Interval notation of the form [from, to) will be used to indicate that
233  * the endpoint from is inclusive and to is exclusive.
234  */
235 struct file_region {
236         struct list_head link;
237         long from;
238         long to;
239 };
240
241 /*
242  * Add the huge page range represented by [f, t) to the reserve
243  * map.  In the normal case, existing regions will be expanded
244  * to accommodate the specified range.  Sufficient regions should
245  * exist for expansion due to the previous call to region_chg
246  * with the same range.  However, it is possible that region_del
247  * could have been called after region_chg and modifed the map
248  * in such a way that no region exists to be expanded.  In this
249  * case, pull a region descriptor from the cache associated with
250  * the map and use that for the new range.
251  *
252  * Return the number of new huge pages added to the map.  This
253  * number is greater than or equal to zero.
254  */
255 static long region_add(struct resv_map *resv, long f, long t)
256 {
257         struct list_head *head = &resv->regions;
258         struct file_region *rg, *nrg, *trg;
259         long add = 0;
260
261         spin_lock(&resv->lock);
262         /* Locate the region we are either in or before. */
263         list_for_each_entry(rg, head, link)
264                 if (f <= rg->to)
265                         break;
266
267         /*
268          * If no region exists which can be expanded to include the
269          * specified range, the list must have been modified by an
270          * interleving call to region_del().  Pull a region descriptor
271          * from the cache and use it for this range.
272          */
273         if (&rg->link == head || t < rg->from) {
274                 VM_BUG_ON(resv->region_cache_count <= 0);
275
276                 resv->region_cache_count--;
277                 nrg = list_first_entry(&resv->region_cache, struct file_region,
278                                         link);
279                 list_del(&nrg->link);
280
281                 nrg->from = f;
282                 nrg->to = t;
283                 list_add(&nrg->link, rg->link.prev);
284
285                 add += t - f;
286                 goto out_locked;
287         }
288
289         /* Round our left edge to the current segment if it encloses us. */
290         if (f > rg->from)
291                 f = rg->from;
292
293         /* Check for and consume any regions we now overlap with. */
294         nrg = rg;
295         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
296                 if (&rg->link == head)
297                         break;
298                 if (rg->from > t)
299                         break;
300
301                 /* If this area reaches higher then extend our area to
302                  * include it completely.  If this is not the first area
303                  * which we intend to reuse, free it. */
304                 if (rg->to > t)
305                         t = rg->to;
306                 if (rg != nrg) {
307                         /* Decrement return value by the deleted range.
308                          * Another range will span this area so that by
309                          * end of routine add will be >= zero
310                          */
311                         add -= (rg->to - rg->from);
312                         list_del(&rg->link);
313                         kfree(rg);
314                 }
315         }
316
317         add += (nrg->from - f);         /* Added to beginning of region */
318         nrg->from = f;
319         add += t - nrg->to;             /* Added to end of region */
320         nrg->to = t;
321
322 out_locked:
323         resv->adds_in_progress--;
324         spin_unlock(&resv->lock);
325         VM_BUG_ON(add < 0);
326         return add;
327 }
328
329 /*
330  * Examine the existing reserve map and determine how many
331  * huge pages in the specified range [f, t) are NOT currently
332  * represented.  This routine is called before a subsequent
333  * call to region_add that will actually modify the reserve
334  * map to add the specified range [f, t).  region_chg does
335  * not change the number of huge pages represented by the
336  * map.  However, if the existing regions in the map can not
337  * be expanded to represent the new range, a new file_region
338  * structure is added to the map as a placeholder.  This is
339  * so that the subsequent region_add call will have all the
340  * regions it needs and will not fail.
341  *
342  * Upon entry, region_chg will also examine the cache of region descriptors
343  * associated with the map.  If there are not enough descriptors cached, one
344  * will be allocated for the in progress add operation.
345  *
346  * Returns the number of huge pages that need to be added to the existing
347  * reservation map for the range [f, t).  This number is greater or equal to
348  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
349  * is needed and can not be allocated.
350  */
351 static long region_chg(struct resv_map *resv, long f, long t)
352 {
353         struct list_head *head = &resv->regions;
354         struct file_region *rg, *nrg = NULL;
355         long chg = 0;
356
357 retry:
358         spin_lock(&resv->lock);
359 retry_locked:
360         resv->adds_in_progress++;
361
362         /*
363          * Check for sufficient descriptors in the cache to accommodate
364          * the number of in progress add operations.
365          */
366         if (resv->adds_in_progress > resv->region_cache_count) {
367                 struct file_region *trg;
368
369                 VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
370                 /* Must drop lock to allocate a new descriptor. */
371                 resv->adds_in_progress--;
372                 spin_unlock(&resv->lock);
373
374                 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
375                 if (!trg)
376                         return -ENOMEM;
377
378                 spin_lock(&resv->lock);
379                 list_add(&trg->link, &resv->region_cache);
380                 resv->region_cache_count++;
381                 goto retry_locked;
382         }
383
384         /* Locate the region we are before or in. */
385         list_for_each_entry(rg, head, link)
386                 if (f <= rg->to)
387                         break;
388
389         /* If we are below the current region then a new region is required.
390          * Subtle, allocate a new region at the position but make it zero
391          * size such that we can guarantee to record the reservation. */
392         if (&rg->link == head || t < rg->from) {
393                 if (!nrg) {
394                         resv->adds_in_progress--;
395                         spin_unlock(&resv->lock);
396                         nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
397                         if (!nrg)
398                                 return -ENOMEM;
399
400                         nrg->from = f;
401                         nrg->to   = f;
402                         INIT_LIST_HEAD(&nrg->link);
403                         goto retry;
404                 }
405
406                 list_add(&nrg->link, rg->link.prev);
407                 chg = t - f;
408                 goto out_nrg;
409         }
410
411         /* Round our left edge to the current segment if it encloses us. */
412         if (f > rg->from)
413                 f = rg->from;
414         chg = t - f;
415
416         /* Check for and consume any regions we now overlap with. */
417         list_for_each_entry(rg, rg->link.prev, link) {
418                 if (&rg->link == head)
419                         break;
420                 if (rg->from > t)
421                         goto out;
422
423                 /* We overlap with this area, if it extends further than
424                  * us then we must extend ourselves.  Account for its
425                  * existing reservation. */
426                 if (rg->to > t) {
427                         chg += rg->to - t;
428                         t = rg->to;
429                 }
430                 chg -= rg->to - rg->from;
431         }
432
433 out:
434         spin_unlock(&resv->lock);
435         /*  We already know we raced and no longer need the new region */
436         kfree(nrg);
437         return chg;
438 out_nrg:
439         spin_unlock(&resv->lock);
440         return chg;
441 }
442
443 /*
444  * Abort the in progress add operation.  The adds_in_progress field
445  * of the resv_map keeps track of the operations in progress between
446  * calls to region_chg and region_add.  Operations are sometimes
447  * aborted after the call to region_chg.  In such cases, region_abort
448  * is called to decrement the adds_in_progress counter.
449  *
450  * NOTE: The range arguments [f, t) are not needed or used in this
451  * routine.  They are kept to make reading the calling code easier as
452  * arguments will match the associated region_chg call.
453  */
454 static void region_abort(struct resv_map *resv, long f, long t)
455 {
456         spin_lock(&resv->lock);
457         VM_BUG_ON(!resv->region_cache_count);
458         resv->adds_in_progress--;
459         spin_unlock(&resv->lock);
460 }
461
462 /*
463  * Delete the specified range [f, t) from the reserve map.  If the
464  * t parameter is LONG_MAX, this indicates that ALL regions after f
465  * should be deleted.  Locate the regions which intersect [f, t)
466  * and either trim, delete or split the existing regions.
467  *
468  * Returns the number of huge pages deleted from the reserve map.
469  * In the normal case, the return value is zero or more.  In the
470  * case where a region must be split, a new region descriptor must
471  * be allocated.  If the allocation fails, -ENOMEM will be returned.
472  * NOTE: If the parameter t == LONG_MAX, then we will never split
473  * a region and possibly return -ENOMEM.  Callers specifying
474  * t == LONG_MAX do not need to check for -ENOMEM error.
475  */
476 static long region_del(struct resv_map *resv, long f, long t)
477 {
478         struct list_head *head = &resv->regions;
479         struct file_region *rg, *trg;
480         struct file_region *nrg = NULL;
481         long del = 0;
482
483 retry:
484         spin_lock(&resv->lock);
485         list_for_each_entry_safe(rg, trg, head, link) {
486                 if (rg->to <= f)
487                         continue;
488                 if (rg->from >= t)
489                         break;
490
491                 if (f > rg->from && t < rg->to) { /* Must split region */
492                         /*
493                          * Check for an entry in the cache before dropping
494                          * lock and attempting allocation.
495                          */
496                         if (!nrg &&
497                             resv->region_cache_count > resv->adds_in_progress) {
498                                 nrg = list_first_entry(&resv->region_cache,
499                                                         struct file_region,
500                                                         link);
501                                 list_del(&nrg->link);
502                                 resv->region_cache_count--;
503                         }
504
505                         if (!nrg) {
506                                 spin_unlock(&resv->lock);
507                                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
508                                 if (!nrg)
509                                         return -ENOMEM;
510                                 goto retry;
511                         }
512
513                         del += t - f;
514
515                         /* New entry for end of split region */
516                         nrg->from = t;
517                         nrg->to = rg->to;
518                         INIT_LIST_HEAD(&nrg->link);
519
520                         /* Original entry is trimmed */
521                         rg->to = f;
522
523                         list_add(&nrg->link, &rg->link);
524                         nrg = NULL;
525                         break;
526                 }
527
528                 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
529                         del += rg->to - rg->from;
530                         list_del(&rg->link);
531                         kfree(rg);
532                         continue;
533                 }
534
535                 if (f <= rg->from) {    /* Trim beginning of region */
536                         del += t - rg->from;
537                         rg->from = t;
538                 } else {                /* Trim end of region */
539                         del += rg->to - f;
540                         rg->to = f;
541                 }
542         }
543
544         spin_unlock(&resv->lock);
545         kfree(nrg);
546         return del;
547 }
548
549 /*
550  * A rare out of memory error was encountered which prevented removal of
551  * the reserve map region for a page.  The huge page itself was free'ed
552  * and removed from the page cache.  This routine will adjust the subpool
553  * usage count, and the global reserve count if needed.  By incrementing
554  * these counts, the reserve map entry which could not be deleted will
555  * appear as a "reserved" entry instead of simply dangling with incorrect
556  * counts.
557  */
558 void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve)
559 {
560         struct hugepage_subpool *spool = subpool_inode(inode);
561         long rsv_adjust;
562
563         rsv_adjust = hugepage_subpool_get_pages(spool, 1);
564         if (restore_reserve && rsv_adjust) {
565                 struct hstate *h = hstate_inode(inode);
566
567                 hugetlb_acct_memory(h, 1);
568         }
569 }
570
571 /*
572  * Count and return the number of huge pages in the reserve map
573  * that intersect with the range [f, t).
574  */
575 static long region_count(struct resv_map *resv, long f, long t)
576 {
577         struct list_head *head = &resv->regions;
578         struct file_region *rg;
579         long chg = 0;
580
581         spin_lock(&resv->lock);
582         /* Locate each segment we overlap with, and count that overlap. */
583         list_for_each_entry(rg, head, link) {
584                 long seg_from;
585                 long seg_to;
586
587                 if (rg->to <= f)
588                         continue;
589                 if (rg->from >= t)
590                         break;
591
592                 seg_from = max(rg->from, f);
593                 seg_to = min(rg->to, t);
594
595                 chg += seg_to - seg_from;
596         }
597         spin_unlock(&resv->lock);
598
599         return chg;
600 }
601
602 /*
603  * Convert the address within this vma to the page offset within
604  * the mapping, in pagecache page units; huge pages here.
605  */
606 static pgoff_t vma_hugecache_offset(struct hstate *h,
607                         struct vm_area_struct *vma, unsigned long address)
608 {
609         return ((address - vma->vm_start) >> huge_page_shift(h)) +
610                         (vma->vm_pgoff >> huge_page_order(h));
611 }
612
613 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
614                                      unsigned long address)
615 {
616         return vma_hugecache_offset(hstate_vma(vma), vma, address);
617 }
618
619 /*
620  * Return the size of the pages allocated when backing a VMA. In the majority
621  * cases this will be same size as used by the page table entries.
622  */
623 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
624 {
625         struct hstate *hstate;
626
627         if (!is_vm_hugetlb_page(vma))
628                 return PAGE_SIZE;
629
630         hstate = hstate_vma(vma);
631
632         return 1UL << huge_page_shift(hstate);
633 }
634 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
635
636 /*
637  * Return the page size being used by the MMU to back a VMA. In the majority
638  * of cases, the page size used by the kernel matches the MMU size. On
639  * architectures where it differs, an architecture-specific version of this
640  * function is required.
641  */
642 #ifndef vma_mmu_pagesize
643 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
644 {
645         return vma_kernel_pagesize(vma);
646 }
647 #endif
648
649 /*
650  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
651  * bits of the reservation map pointer, which are always clear due to
652  * alignment.
653  */
654 #define HPAGE_RESV_OWNER    (1UL << 0)
655 #define HPAGE_RESV_UNMAPPED (1UL << 1)
656 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
657
658 /*
659  * These helpers are used to track how many pages are reserved for
660  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
661  * is guaranteed to have their future faults succeed.
662  *
663  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
664  * the reserve counters are updated with the hugetlb_lock held. It is safe
665  * to reset the VMA at fork() time as it is not in use yet and there is no
666  * chance of the global counters getting corrupted as a result of the values.
667  *
668  * The private mapping reservation is represented in a subtly different
669  * manner to a shared mapping.  A shared mapping has a region map associated
670  * with the underlying file, this region map represents the backing file
671  * pages which have ever had a reservation assigned which this persists even
672  * after the page is instantiated.  A private mapping has a region map
673  * associated with the original mmap which is attached to all VMAs which
674  * reference it, this region map represents those offsets which have consumed
675  * reservation ie. where pages have been instantiated.
676  */
677 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
678 {
679         return (unsigned long)vma->vm_private_data;
680 }
681
682 static void set_vma_private_data(struct vm_area_struct *vma,
683                                                         unsigned long value)
684 {
685         vma->vm_private_data = (void *)value;
686 }
687
688 struct resv_map *resv_map_alloc(void)
689 {
690         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
691         struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
692
693         if (!resv_map || !rg) {
694                 kfree(resv_map);
695                 kfree(rg);
696                 return NULL;
697         }
698
699         kref_init(&resv_map->refs);
700         spin_lock_init(&resv_map->lock);
701         INIT_LIST_HEAD(&resv_map->regions);
702
703         resv_map->adds_in_progress = 0;
704
705         INIT_LIST_HEAD(&resv_map->region_cache);
706         list_add(&rg->link, &resv_map->region_cache);
707         resv_map->region_cache_count = 1;
708
709         return resv_map;
710 }
711
712 void resv_map_release(struct kref *ref)
713 {
714         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
715         struct list_head *head = &resv_map->region_cache;
716         struct file_region *rg, *trg;
717
718         /* Clear out any active regions before we release the map. */
719         region_del(resv_map, 0, LONG_MAX);
720
721         /* ... and any entries left in the cache */
722         list_for_each_entry_safe(rg, trg, head, link) {
723                 list_del(&rg->link);
724                 kfree(rg);
725         }
726
727         VM_BUG_ON(resv_map->adds_in_progress);
728
729         kfree(resv_map);
730 }
731
732 static inline struct resv_map *inode_resv_map(struct inode *inode)
733 {
734         return inode->i_mapping->private_data;
735 }
736
737 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
738 {
739         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
740         if (vma->vm_flags & VM_MAYSHARE) {
741                 struct address_space *mapping = vma->vm_file->f_mapping;
742                 struct inode *inode = mapping->host;
743
744                 return inode_resv_map(inode);
745
746         } else {
747                 return (struct resv_map *)(get_vma_private_data(vma) &
748                                                         ~HPAGE_RESV_MASK);
749         }
750 }
751
752 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
753 {
754         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
755         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
756
757         set_vma_private_data(vma, (get_vma_private_data(vma) &
758                                 HPAGE_RESV_MASK) | (unsigned long)map);
759 }
760
761 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
762 {
763         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
764         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
765
766         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
767 }
768
769 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
770 {
771         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
772
773         return (get_vma_private_data(vma) & flag) != 0;
774 }
775
776 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
777 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
778 {
779         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
780         if (!(vma->vm_flags & VM_MAYSHARE))
781                 vma->vm_private_data = (void *)0;
782 }
783
784 /* Returns true if the VMA has associated reserve pages */
785 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
786 {
787         if (vma->vm_flags & VM_NORESERVE) {
788                 /*
789                  * This address is already reserved by other process(chg == 0),
790                  * so, we should decrement reserved count. Without decrementing,
791                  * reserve count remains after releasing inode, because this
792                  * allocated page will go into page cache and is regarded as
793                  * coming from reserved pool in releasing step.  Currently, we
794                  * don't have any other solution to deal with this situation
795                  * properly, so add work-around here.
796                  */
797                 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
798                         return true;
799                 else
800                         return false;
801         }
802
803         /* Shared mappings always use reserves */
804         if (vma->vm_flags & VM_MAYSHARE)
805                 return true;
806
807         /*
808          * Only the process that called mmap() has reserves for
809          * private mappings.
810          */
811         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
812                 return true;
813
814         return false;
815 }
816
817 static void enqueue_huge_page(struct hstate *h, struct page *page)
818 {
819         int nid = page_to_nid(page);
820         list_move(&page->lru, &h->hugepage_freelists[nid]);
821         h->free_huge_pages++;
822         h->free_huge_pages_node[nid]++;
823 }
824
825 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
826 {
827         struct page *page;
828
829         list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
830                 if (!is_migrate_isolate_page(page))
831                         break;
832         /*
833          * if 'non-isolated free hugepage' not found on the list,
834          * the allocation fails.
835          */
836         if (&h->hugepage_freelists[nid] == &page->lru)
837                 return NULL;
838         list_move(&page->lru, &h->hugepage_activelist);
839         set_page_refcounted(page);
840         h->free_huge_pages--;
841         h->free_huge_pages_node[nid]--;
842         return page;
843 }
844
845 /* Movability of hugepages depends on migration support. */
846 static inline gfp_t htlb_alloc_mask(struct hstate *h)
847 {
848         if (hugepages_treat_as_movable || hugepage_migration_supported(h))
849                 return GFP_HIGHUSER_MOVABLE;
850         else
851                 return GFP_HIGHUSER;
852 }
853
854 static struct page *dequeue_huge_page_vma(struct hstate *h,
855                                 struct vm_area_struct *vma,
856                                 unsigned long address, int avoid_reserve,
857                                 long chg)
858 {
859         struct page *page = NULL;
860         struct mempolicy *mpol;
861         nodemask_t *nodemask;
862         struct zonelist *zonelist;
863         struct zone *zone;
864         struct zoneref *z;
865         unsigned int cpuset_mems_cookie;
866
867         /*
868          * A child process with MAP_PRIVATE mappings created by their parent
869          * have no page reserves. This check ensures that reservations are
870          * not "stolen". The child may still get SIGKILLed
871          */
872         if (!vma_has_reserves(vma, chg) &&
873                         h->free_huge_pages - h->resv_huge_pages == 0)
874                 goto err;
875
876         /* If reserves cannot be used, ensure enough pages are in the pool */
877         if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
878                 goto err;
879
880 retry_cpuset:
881         cpuset_mems_cookie = read_mems_allowed_begin();
882         zonelist = huge_zonelist(vma, address,
883                                         htlb_alloc_mask(h), &mpol, &nodemask);
884
885         for_each_zone_zonelist_nodemask(zone, z, zonelist,
886                                                 MAX_NR_ZONES - 1, nodemask) {
887                 if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
888                         page = dequeue_huge_page_node(h, zone_to_nid(zone));
889                         if (page) {
890                                 if (avoid_reserve)
891                                         break;
892                                 if (!vma_has_reserves(vma, chg))
893                                         break;
894
895                                 SetPagePrivate(page);
896                                 h->resv_huge_pages--;
897                                 break;
898                         }
899                 }
900         }
901
902         mpol_cond_put(mpol);
903         if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
904                 goto retry_cpuset;
905         return page;
906
907 err:
908         return NULL;
909 }
910
911 /*
912  * common helper functions for hstate_next_node_to_{alloc|free}.
913  * We may have allocated or freed a huge page based on a different
914  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
915  * be outside of *nodes_allowed.  Ensure that we use an allowed
916  * node for alloc or free.
917  */
918 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
919 {
920         nid = next_node(nid, *nodes_allowed);
921         if (nid == MAX_NUMNODES)
922                 nid = first_node(*nodes_allowed);
923         VM_BUG_ON(nid >= MAX_NUMNODES);
924
925         return nid;
926 }
927
928 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
929 {
930         if (!node_isset(nid, *nodes_allowed))
931                 nid = next_node_allowed(nid, nodes_allowed);
932         return nid;
933 }
934
935 /*
936  * returns the previously saved node ["this node"] from which to
937  * allocate a persistent huge page for the pool and advance the
938  * next node from which to allocate, handling wrap at end of node
939  * mask.
940  */
941 static int hstate_next_node_to_alloc(struct hstate *h,
942                                         nodemask_t *nodes_allowed)
943 {
944         int nid;
945
946         VM_BUG_ON(!nodes_allowed);
947
948         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
949         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
950
951         return nid;
952 }
953
954 /*
955  * helper for free_pool_huge_page() - return the previously saved
956  * node ["this node"] from which to free a huge page.  Advance the
957  * next node id whether or not we find a free huge page to free so
958  * that the next attempt to free addresses the next node.
959  */
960 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
961 {
962         int nid;
963
964         VM_BUG_ON(!nodes_allowed);
965
966         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
967         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
968
969         return nid;
970 }
971
972 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)           \
973         for (nr_nodes = nodes_weight(*mask);                            \
974                 nr_nodes > 0 &&                                         \
975                 ((node = hstate_next_node_to_alloc(hs, mask)) || 1);    \
976                 nr_nodes--)
977
978 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)            \
979         for (nr_nodes = nodes_weight(*mask);                            \
980                 nr_nodes > 0 &&                                         \
981                 ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
982                 nr_nodes--)
983
984 #if defined(CONFIG_CMA) && defined(CONFIG_X86_64)
985 static void destroy_compound_gigantic_page(struct page *page,
986                                         unsigned long order)
987 {
988         int i;
989         int nr_pages = 1 << order;
990         struct page *p = page + 1;
991
992         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
993                 __ClearPageTail(p);
994                 set_page_refcounted(p);
995                 p->first_page = NULL;
996         }
997
998         set_compound_order(page, 0);
999         __ClearPageHead(page);
1000 }
1001
1002 static void free_gigantic_page(struct page *page, unsigned order)
1003 {
1004         free_contig_range(page_to_pfn(page), 1 << order);
1005 }
1006
1007 static int __alloc_gigantic_page(unsigned long start_pfn,
1008                                 unsigned long nr_pages)
1009 {
1010         unsigned long end_pfn = start_pfn + nr_pages;
1011         return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1012 }
1013
1014 static bool pfn_range_valid_gigantic(unsigned long start_pfn,
1015                                 unsigned long nr_pages)
1016 {
1017         unsigned long i, end_pfn = start_pfn + nr_pages;
1018         struct page *page;
1019
1020         for (i = start_pfn; i < end_pfn; i++) {
1021                 if (!pfn_valid(i))
1022                         return false;
1023
1024                 page = pfn_to_page(i);
1025
1026                 if (PageReserved(page))
1027                         return false;
1028
1029                 if (page_count(page) > 0)
1030                         return false;
1031
1032                 if (PageHuge(page))
1033                         return false;
1034         }
1035
1036         return true;
1037 }
1038
1039 static bool zone_spans_last_pfn(const struct zone *zone,
1040                         unsigned long start_pfn, unsigned long nr_pages)
1041 {
1042         unsigned long last_pfn = start_pfn + nr_pages - 1;
1043         return zone_spans_pfn(zone, last_pfn);
1044 }
1045
1046 static struct page *alloc_gigantic_page(int nid, unsigned order)
1047 {
1048         unsigned long nr_pages = 1 << order;
1049         unsigned long ret, pfn, flags;
1050         struct zone *z;
1051
1052         z = NODE_DATA(nid)->node_zones;
1053         for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
1054                 spin_lock_irqsave(&z->lock, flags);
1055
1056                 pfn = ALIGN(z->zone_start_pfn, nr_pages);
1057                 while (zone_spans_last_pfn(z, pfn, nr_pages)) {
1058                         if (pfn_range_valid_gigantic(pfn, nr_pages)) {
1059                                 /*
1060                                  * We release the zone lock here because
1061                                  * alloc_contig_range() will also lock the zone
1062                                  * at some point. If there's an allocation
1063                                  * spinning on this lock, it may win the race
1064                                  * and cause alloc_contig_range() to fail...
1065                                  */
1066                                 spin_unlock_irqrestore(&z->lock, flags);
1067                                 ret = __alloc_gigantic_page(pfn, nr_pages);
1068                                 if (!ret)
1069                                         return pfn_to_page(pfn);
1070                                 spin_lock_irqsave(&z->lock, flags);
1071                         }
1072                         pfn += nr_pages;
1073                 }
1074
1075                 spin_unlock_irqrestore(&z->lock, flags);
1076         }
1077
1078         return NULL;
1079 }
1080
1081 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1082 static void prep_compound_gigantic_page(struct page *page, unsigned long order);
1083
1084 static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
1085 {
1086         struct page *page;
1087
1088         page = alloc_gigantic_page(nid, huge_page_order(h));
1089         if (page) {
1090                 prep_compound_gigantic_page(page, huge_page_order(h));
1091                 prep_new_huge_page(h, page, nid);
1092         }
1093
1094         return page;
1095 }
1096
1097 static int alloc_fresh_gigantic_page(struct hstate *h,
1098                                 nodemask_t *nodes_allowed)
1099 {
1100         struct page *page = NULL;
1101         int nr_nodes, node;
1102
1103         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1104                 page = alloc_fresh_gigantic_page_node(h, node);
1105                 if (page)
1106                         return 1;
1107         }
1108
1109         return 0;
1110 }
1111
1112 static inline bool gigantic_page_supported(void) { return true; }
1113 #else
1114 static inline bool gigantic_page_supported(void) { return false; }
1115 static inline void free_gigantic_page(struct page *page, unsigned order) { }
1116 static inline void destroy_compound_gigantic_page(struct page *page,
1117                                                 unsigned long order) { }
1118 static inline int alloc_fresh_gigantic_page(struct hstate *h,
1119                                         nodemask_t *nodes_allowed) { return 0; }
1120 #endif
1121
1122 static void update_and_free_page(struct hstate *h, struct page *page)
1123 {
1124         int i;
1125
1126         if (hstate_is_gigantic(h) && !gigantic_page_supported())
1127                 return;
1128
1129         h->nr_huge_pages--;
1130         h->nr_huge_pages_node[page_to_nid(page)]--;
1131         for (i = 0; i < pages_per_huge_page(h); i++) {
1132                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1133                                 1 << PG_referenced | 1 << PG_dirty |
1134                                 1 << PG_active | 1 << PG_private |
1135                                 1 << PG_writeback);
1136         }
1137         VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1138         set_compound_page_dtor(page, NULL);
1139         set_page_refcounted(page);
1140         if (hstate_is_gigantic(h)) {
1141                 destroy_compound_gigantic_page(page, huge_page_order(h));
1142                 free_gigantic_page(page, huge_page_order(h));
1143         } else {
1144                 __free_pages(page, huge_page_order(h));
1145         }
1146 }
1147
1148 struct hstate *size_to_hstate(unsigned long size)
1149 {
1150         struct hstate *h;
1151
1152         for_each_hstate(h) {
1153                 if (huge_page_size(h) == size)
1154                         return h;
1155         }
1156         return NULL;
1157 }
1158
1159 /*
1160  * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1161  * to hstate->hugepage_activelist.)
1162  *
1163  * This function can be called for tail pages, but never returns true for them.
1164  */
1165 bool page_huge_active(struct page *page)
1166 {
1167         VM_BUG_ON_PAGE(!PageHuge(page), page);
1168         return PageHead(page) && PagePrivate(&page[1]);
1169 }
1170
1171 /* never called for tail page */
1172 static void set_page_huge_active(struct page *page)
1173 {
1174         VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1175         SetPagePrivate(&page[1]);
1176 }
1177
1178 static void clear_page_huge_active(struct page *page)
1179 {
1180         VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1181         ClearPagePrivate(&page[1]);
1182 }
1183
1184 void free_huge_page(struct page *page)
1185 {
1186         /*
1187          * Can't pass hstate in here because it is called from the
1188          * compound page destructor.
1189          */
1190         struct hstate *h = page_hstate(page);
1191         int nid = page_to_nid(page);
1192         struct hugepage_subpool *spool =
1193                 (struct hugepage_subpool *)page_private(page);
1194         bool restore_reserve;
1195
1196         set_page_private(page, 0);
1197         page->mapping = NULL;
1198         BUG_ON(page_count(page));
1199         BUG_ON(page_mapcount(page));
1200         restore_reserve = PagePrivate(page);
1201         ClearPagePrivate(page);
1202
1203         /*
1204          * A return code of zero implies that the subpool will be under its
1205          * minimum size if the reservation is not restored after page is free.
1206          * Therefore, force restore_reserve operation.
1207          */
1208         if (hugepage_subpool_put_pages(spool, 1) == 0)
1209                 restore_reserve = true;
1210
1211         spin_lock(&hugetlb_lock);
1212         clear_page_huge_active(page);
1213         hugetlb_cgroup_uncharge_page(hstate_index(h),
1214                                      pages_per_huge_page(h), page);
1215         if (restore_reserve)
1216                 h->resv_huge_pages++;
1217
1218         if (h->surplus_huge_pages_node[nid]) {
1219                 /* remove the page from active list */
1220                 list_del(&page->lru);
1221                 update_and_free_page(h, page);
1222                 h->surplus_huge_pages--;
1223                 h->surplus_huge_pages_node[nid]--;
1224         } else {
1225                 arch_clear_hugepage_flags(page);
1226                 enqueue_huge_page(h, page);
1227         }
1228         spin_unlock(&hugetlb_lock);
1229 }
1230
1231 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1232 {
1233         INIT_LIST_HEAD(&page->lru);
1234         set_compound_page_dtor(page, free_huge_page);
1235         spin_lock(&hugetlb_lock);
1236         set_hugetlb_cgroup(page, NULL);
1237         h->nr_huge_pages++;
1238         h->nr_huge_pages_node[nid]++;
1239         spin_unlock(&hugetlb_lock);
1240         put_page(page); /* free it into the hugepage allocator */
1241 }
1242
1243 static void prep_compound_gigantic_page(struct page *page, unsigned long order)
1244 {
1245         int i;
1246         int nr_pages = 1 << order;
1247         struct page *p = page + 1;
1248
1249         /* we rely on prep_new_huge_page to set the destructor */
1250         set_compound_order(page, order);
1251         __SetPageHead(page);
1252         __ClearPageReserved(page);
1253         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1254                 /*
1255                  * For gigantic hugepages allocated through bootmem at
1256                  * boot, it's safer to be consistent with the not-gigantic
1257                  * hugepages and clear the PG_reserved bit from all tail pages
1258                  * too.  Otherwse drivers using get_user_pages() to access tail
1259                  * pages may get the reference counting wrong if they see
1260                  * PG_reserved set on a tail page (despite the head page not
1261                  * having PG_reserved set).  Enforcing this consistency between
1262                  * head and tail pages allows drivers to optimize away a check
1263                  * on the head page when they need know if put_page() is needed
1264                  * after get_user_pages().
1265                  */
1266                 __ClearPageReserved(p);
1267                 set_page_count(p, 0);
1268                 p->first_page = page;
1269                 /* Make sure p->first_page is always valid for PageTail() */
1270                 smp_wmb();
1271                 __SetPageTail(p);
1272         }
1273 }
1274
1275 /*
1276  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1277  * transparent huge pages.  See the PageTransHuge() documentation for more
1278  * details.
1279  */
1280 int PageHuge(struct page *page)
1281 {
1282         if (!PageCompound(page))
1283                 return 0;
1284
1285         page = compound_head(page);
1286         return get_compound_page_dtor(page) == free_huge_page;
1287 }
1288 EXPORT_SYMBOL_GPL(PageHuge);
1289
1290 /*
1291  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1292  * normal or transparent huge pages.
1293  */
1294 int PageHeadHuge(struct page *page_head)
1295 {
1296         if (!PageHead(page_head))
1297                 return 0;
1298
1299         return get_compound_page_dtor(page_head) == free_huge_page;
1300 }
1301
1302 pgoff_t __basepage_index(struct page *page)
1303 {
1304         struct page *page_head = compound_head(page);
1305         pgoff_t index = page_index(page_head);
1306         unsigned long compound_idx;
1307
1308         if (!PageHuge(page_head))
1309                 return page_index(page);
1310
1311         if (compound_order(page_head) >= MAX_ORDER)
1312                 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1313         else
1314                 compound_idx = page - page_head;
1315
1316         return (index << compound_order(page_head)) + compound_idx;
1317 }
1318
1319 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
1320 {
1321         struct page *page;
1322
1323         page = alloc_pages_exact_node(nid,
1324                 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1325                                                 __GFP_REPEAT|__GFP_NOWARN,
1326                 huge_page_order(h));
1327         if (page) {
1328                 prep_new_huge_page(h, page, nid);
1329         }
1330
1331         return page;
1332 }
1333
1334 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1335 {
1336         struct page *page;
1337         int nr_nodes, node;
1338         int ret = 0;
1339
1340         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1341                 page = alloc_fresh_huge_page_node(h, node);
1342                 if (page) {
1343                         ret = 1;
1344                         break;
1345                 }
1346         }
1347
1348         if (ret)
1349                 count_vm_event(HTLB_BUDDY_PGALLOC);
1350         else
1351                 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1352
1353         return ret;
1354 }
1355
1356 /*
1357  * Free huge page from pool from next node to free.
1358  * Attempt to keep persistent huge pages more or less
1359  * balanced over allowed nodes.
1360  * Called with hugetlb_lock locked.
1361  */
1362 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1363                                                          bool acct_surplus)
1364 {
1365         int nr_nodes, node;
1366         int ret = 0;
1367
1368         for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1369                 /*
1370                  * If we're returning unused surplus pages, only examine
1371                  * nodes with surplus pages.
1372                  */
1373                 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1374                     !list_empty(&h->hugepage_freelists[node])) {
1375                         struct page *page =
1376                                 list_entry(h->hugepage_freelists[node].next,
1377                                           struct page, lru);
1378                         list_del(&page->lru);
1379                         h->free_huge_pages--;
1380                         h->free_huge_pages_node[node]--;
1381                         if (acct_surplus) {
1382                                 h->surplus_huge_pages--;
1383                                 h->surplus_huge_pages_node[node]--;
1384                         }
1385                         update_and_free_page(h, page);
1386                         ret = 1;
1387                         break;
1388                 }
1389         }
1390
1391         return ret;
1392 }
1393
1394 /*
1395  * Dissolve a given free hugepage into free buddy pages. This function does
1396  * nothing for in-use (including surplus) hugepages.
1397  */
1398 static void dissolve_free_huge_page(struct page *page)
1399 {
1400         spin_lock(&hugetlb_lock);
1401         if (PageHuge(page) && !page_count(page)) {
1402                 struct hstate *h = page_hstate(page);
1403                 int nid = page_to_nid(page);
1404                 list_del(&page->lru);
1405                 h->free_huge_pages--;
1406                 h->free_huge_pages_node[nid]--;
1407                 update_and_free_page(h, page);
1408         }
1409         spin_unlock(&hugetlb_lock);
1410 }
1411
1412 /*
1413  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1414  * make specified memory blocks removable from the system.
1415  * Note that start_pfn should aligned with (minimum) hugepage size.
1416  */
1417 void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1418 {
1419         unsigned long pfn;
1420
1421         if (!hugepages_supported())
1422                 return;
1423
1424         VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
1425         for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
1426                 dissolve_free_huge_page(pfn_to_page(pfn));
1427 }
1428
1429 static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
1430 {
1431         struct page *page;
1432         unsigned int r_nid;
1433
1434         if (hstate_is_gigantic(h))
1435                 return NULL;
1436
1437         /*
1438          * Assume we will successfully allocate the surplus page to
1439          * prevent racing processes from causing the surplus to exceed
1440          * overcommit
1441          *
1442          * This however introduces a different race, where a process B
1443          * tries to grow the static hugepage pool while alloc_pages() is
1444          * called by process A. B will only examine the per-node
1445          * counters in determining if surplus huge pages can be
1446          * converted to normal huge pages in adjust_pool_surplus(). A
1447          * won't be able to increment the per-node counter, until the
1448          * lock is dropped by B, but B doesn't drop hugetlb_lock until
1449          * no more huge pages can be converted from surplus to normal
1450          * state (and doesn't try to convert again). Thus, we have a
1451          * case where a surplus huge page exists, the pool is grown, and
1452          * the surplus huge page still exists after, even though it
1453          * should just have been converted to a normal huge page. This
1454          * does not leak memory, though, as the hugepage will be freed
1455          * once it is out of use. It also does not allow the counters to
1456          * go out of whack in adjust_pool_surplus() as we don't modify
1457          * the node values until we've gotten the hugepage and only the
1458          * per-node value is checked there.
1459          */
1460         spin_lock(&hugetlb_lock);
1461         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1462                 spin_unlock(&hugetlb_lock);
1463                 return NULL;
1464         } else {
1465                 h->nr_huge_pages++;
1466                 h->surplus_huge_pages++;
1467         }
1468         spin_unlock(&hugetlb_lock);
1469
1470         if (nid == NUMA_NO_NODE)
1471                 page = alloc_pages(htlb_alloc_mask(h)|__GFP_COMP|
1472                                    __GFP_REPEAT|__GFP_NOWARN,
1473                                    huge_page_order(h));
1474         else
1475                 page = alloc_pages_exact_node(nid,
1476                         htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1477                         __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
1478
1479         spin_lock(&hugetlb_lock);
1480         if (page) {
1481                 INIT_LIST_HEAD(&page->lru);
1482                 r_nid = page_to_nid(page);
1483                 set_compound_page_dtor(page, free_huge_page);
1484                 set_hugetlb_cgroup(page, NULL);
1485                 /*
1486                  * We incremented the global counters already
1487                  */
1488                 h->nr_huge_pages_node[r_nid]++;
1489                 h->surplus_huge_pages_node[r_nid]++;
1490                 __count_vm_event(HTLB_BUDDY_PGALLOC);
1491         } else {
1492                 h->nr_huge_pages--;
1493                 h->surplus_huge_pages--;
1494                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1495         }
1496         spin_unlock(&hugetlb_lock);
1497
1498         return page;
1499 }
1500
1501 /*
1502  * This allocation function is useful in the context where vma is irrelevant.
1503  * E.g. soft-offlining uses this function because it only cares physical
1504  * address of error page.
1505  */
1506 struct page *alloc_huge_page_node(struct hstate *h, int nid)
1507 {
1508         struct page *page = NULL;
1509
1510         spin_lock(&hugetlb_lock);
1511         if (h->free_huge_pages - h->resv_huge_pages > 0)
1512                 page = dequeue_huge_page_node(h, nid);
1513         spin_unlock(&hugetlb_lock);
1514
1515         if (!page)
1516                 page = alloc_buddy_huge_page(h, nid);
1517
1518         return page;
1519 }
1520
1521 /*
1522  * Increase the hugetlb pool such that it can accommodate a reservation
1523  * of size 'delta'.
1524  */
1525 static int gather_surplus_pages(struct hstate *h, int delta)
1526 {
1527         struct list_head surplus_list;
1528         struct page *page, *tmp;
1529         int ret, i;
1530         int needed, allocated;
1531         bool alloc_ok = true;
1532
1533         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1534         if (needed <= 0) {
1535                 h->resv_huge_pages += delta;
1536                 return 0;
1537         }
1538
1539         allocated = 0;
1540         INIT_LIST_HEAD(&surplus_list);
1541
1542         ret = -ENOMEM;
1543 retry:
1544         spin_unlock(&hugetlb_lock);
1545         for (i = 0; i < needed; i++) {
1546                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1547                 if (!page) {
1548                         alloc_ok = false;
1549                         break;
1550                 }
1551                 list_add(&page->lru, &surplus_list);
1552         }
1553         allocated += i;
1554
1555         /*
1556          * After retaking hugetlb_lock, we need to recalculate 'needed'
1557          * because either resv_huge_pages or free_huge_pages may have changed.
1558          */
1559         spin_lock(&hugetlb_lock);
1560         needed = (h->resv_huge_pages + delta) -
1561                         (h->free_huge_pages + allocated);
1562         if (needed > 0) {
1563                 if (alloc_ok)
1564                         goto retry;
1565                 /*
1566                  * We were not able to allocate enough pages to
1567                  * satisfy the entire reservation so we free what
1568                  * we've allocated so far.
1569                  */
1570                 goto free;
1571         }
1572         /*
1573          * The surplus_list now contains _at_least_ the number of extra pages
1574          * needed to accommodate the reservation.  Add the appropriate number
1575          * of pages to the hugetlb pool and free the extras back to the buddy
1576          * allocator.  Commit the entire reservation here to prevent another
1577          * process from stealing the pages as they are added to the pool but
1578          * before they are reserved.
1579          */
1580         needed += allocated;
1581         h->resv_huge_pages += delta;
1582         ret = 0;
1583
1584         /* Free the needed pages to the hugetlb pool */
1585         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1586                 if ((--needed) < 0)
1587                         break;
1588                 /*
1589                  * This page is now managed by the hugetlb allocator and has
1590                  * no users -- drop the buddy allocator's reference.
1591                  */
1592                 put_page_testzero(page);
1593                 VM_BUG_ON_PAGE(page_count(page), page);
1594                 enqueue_huge_page(h, page);
1595         }
1596 free:
1597         spin_unlock(&hugetlb_lock);
1598
1599         /* Free unnecessary surplus pages to the buddy allocator */
1600         list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1601                 put_page(page);
1602         spin_lock(&hugetlb_lock);
1603
1604         return ret;
1605 }
1606
1607 /*
1608  * When releasing a hugetlb pool reservation, any surplus pages that were
1609  * allocated to satisfy the reservation must be explicitly freed if they were
1610  * never used.
1611  * Called with hugetlb_lock held.
1612  */
1613 static void return_unused_surplus_pages(struct hstate *h,
1614                                         unsigned long unused_resv_pages)
1615 {
1616         unsigned long nr_pages;
1617
1618         /* Uncommit the reservation */
1619         h->resv_huge_pages -= unused_resv_pages;
1620
1621         /* Cannot return gigantic pages currently */
1622         if (hstate_is_gigantic(h))
1623                 return;
1624
1625         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1626
1627         /*
1628          * We want to release as many surplus pages as possible, spread
1629          * evenly across all nodes with memory. Iterate across these nodes
1630          * until we can no longer free unreserved surplus pages. This occurs
1631          * when the nodes with surplus pages have no free pages.
1632          * free_pool_huge_page() will balance the the freed pages across the
1633          * on-line nodes with memory and will handle the hstate accounting.
1634          */
1635         while (nr_pages--) {
1636                 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1637                         break;
1638                 cond_resched_lock(&hugetlb_lock);
1639         }
1640 }
1641
1642
1643 /*
1644  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
1645  * are used by the huge page allocation routines to manage reservations.
1646  *
1647  * vma_needs_reservation is called to determine if the huge page at addr
1648  * within the vma has an associated reservation.  If a reservation is
1649  * needed, the value 1 is returned.  The caller is then responsible for
1650  * managing the global reservation and subpool usage counts.  After
1651  * the huge page has been allocated, vma_commit_reservation is called
1652  * to add the page to the reservation map.  If the page allocation fails,
1653  * the reservation must be ended instead of committed.  vma_end_reservation
1654  * is called in such cases.
1655  *
1656  * In the normal case, vma_commit_reservation returns the same value
1657  * as the preceding vma_needs_reservation call.  The only time this
1658  * is not the case is if a reserve map was changed between calls.  It
1659  * is the responsibility of the caller to notice the difference and
1660  * take appropriate action.
1661  */
1662 enum vma_resv_mode {
1663         VMA_NEEDS_RESV,
1664         VMA_COMMIT_RESV,
1665         VMA_END_RESV,
1666 };
1667 static long __vma_reservation_common(struct hstate *h,
1668                                 struct vm_area_struct *vma, unsigned long addr,
1669                                 enum vma_resv_mode mode)
1670 {
1671         struct resv_map *resv;
1672         pgoff_t idx;
1673         long ret;
1674
1675         resv = vma_resv_map(vma);
1676         if (!resv)
1677                 return 1;
1678
1679         idx = vma_hugecache_offset(h, vma, addr);
1680         switch (mode) {
1681         case VMA_NEEDS_RESV:
1682                 ret = region_chg(resv, idx, idx + 1);
1683                 break;
1684         case VMA_COMMIT_RESV:
1685                 ret = region_add(resv, idx, idx + 1);
1686                 break;
1687         case VMA_END_RESV:
1688                 region_abort(resv, idx, idx + 1);
1689                 ret = 0;
1690                 break;
1691         default:
1692                 BUG();
1693         }
1694
1695         if (vma->vm_flags & VM_MAYSHARE)
1696                 return ret;
1697         else
1698                 return ret < 0 ? ret : 0;
1699 }
1700
1701 static long vma_needs_reservation(struct hstate *h,
1702                         struct vm_area_struct *vma, unsigned long addr)
1703 {
1704         return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1705 }
1706
1707 static long vma_commit_reservation(struct hstate *h,
1708                         struct vm_area_struct *vma, unsigned long addr)
1709 {
1710         return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
1711 }
1712
1713 static void vma_end_reservation(struct hstate *h,
1714                         struct vm_area_struct *vma, unsigned long addr)
1715 {
1716         (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1717 }
1718
1719 static struct page *alloc_huge_page(struct vm_area_struct *vma,
1720                                     unsigned long addr, int avoid_reserve)
1721 {
1722         struct hugepage_subpool *spool = subpool_vma(vma);
1723         struct hstate *h = hstate_vma(vma);
1724         struct page *page;
1725         long chg, commit;
1726         int ret, idx;
1727         struct hugetlb_cgroup *h_cg;
1728
1729         idx = hstate_index(h);
1730         /*
1731          * Processes that did not create the mapping will have no
1732          * reserves and will not have accounted against subpool
1733          * limit. Check that the subpool limit can be made before
1734          * satisfying the allocation MAP_NORESERVE mappings may also
1735          * need pages and subpool limit allocated allocated if no reserve
1736          * mapping overlaps.
1737          */
1738         chg = vma_needs_reservation(h, vma, addr);
1739         if (chg < 0)
1740                 return ERR_PTR(-ENOMEM);
1741         if (chg || avoid_reserve)
1742                 if (hugepage_subpool_get_pages(spool, 1) < 0) {
1743                         vma_end_reservation(h, vma, addr);
1744                         return ERR_PTR(-ENOSPC);
1745                 }
1746
1747         ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1748         if (ret)
1749                 goto out_subpool_put;
1750
1751         spin_lock(&hugetlb_lock);
1752         page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg);
1753         if (!page) {
1754                 spin_unlock(&hugetlb_lock);
1755                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1756                 if (!page)
1757                         goto out_uncharge_cgroup;
1758
1759                 spin_lock(&hugetlb_lock);
1760                 list_move(&page->lru, &h->hugepage_activelist);
1761                 /* Fall through */
1762         }
1763         hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
1764         spin_unlock(&hugetlb_lock);
1765
1766         set_page_private(page, (unsigned long)spool);
1767
1768         commit = vma_commit_reservation(h, vma, addr);
1769         if (unlikely(chg > commit)) {
1770                 /*
1771                  * The page was added to the reservation map between
1772                  * vma_needs_reservation and vma_commit_reservation.
1773                  * This indicates a race with hugetlb_reserve_pages.
1774                  * Adjust for the subpool count incremented above AND
1775                  * in hugetlb_reserve_pages for the same page.  Also,
1776                  * the reservation count added in hugetlb_reserve_pages
1777                  * no longer applies.
1778                  */
1779                 long rsv_adjust;
1780
1781                 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
1782                 hugetlb_acct_memory(h, -rsv_adjust);
1783         }
1784         return page;
1785
1786 out_uncharge_cgroup:
1787         hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
1788 out_subpool_put:
1789         if (chg || avoid_reserve)
1790                 hugepage_subpool_put_pages(spool, 1);
1791         vma_end_reservation(h, vma, addr);
1792         return ERR_PTR(-ENOSPC);
1793 }
1794
1795 /*
1796  * alloc_huge_page()'s wrapper which simply returns the page if allocation
1797  * succeeds, otherwise NULL. This function is called from new_vma_page(),
1798  * where no ERR_VALUE is expected to be returned.
1799  */
1800 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
1801                                 unsigned long addr, int avoid_reserve)
1802 {
1803         struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
1804         if (IS_ERR(page))
1805                 page = NULL;
1806         return page;
1807 }
1808
1809 int __weak alloc_bootmem_huge_page(struct hstate *h)
1810 {
1811         struct huge_bootmem_page *m;
1812         int nr_nodes, node;
1813
1814         for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
1815                 void *addr;
1816
1817                 addr = memblock_virt_alloc_try_nid_nopanic(
1818                                 huge_page_size(h), huge_page_size(h),
1819                                 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
1820                 if (addr) {
1821                         /*
1822                          * Use the beginning of the huge page to store the
1823                          * huge_bootmem_page struct (until gather_bootmem
1824                          * puts them into the mem_map).
1825                          */
1826                         m = addr;
1827                         goto found;
1828                 }
1829         }
1830         return 0;
1831
1832 found:
1833         BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
1834         /* Put them into a private list first because mem_map is not up yet */
1835         list_add(&m->list, &huge_boot_pages);
1836         m->hstate = h;
1837         return 1;
1838 }
1839
1840 static void __init prep_compound_huge_page(struct page *page, int order)
1841 {
1842         if (unlikely(order > (MAX_ORDER - 1)))
1843                 prep_compound_gigantic_page(page, order);
1844         else
1845                 prep_compound_page(page, order);
1846 }
1847
1848 /* Put bootmem huge pages into the standard lists after mem_map is up */
1849 static void __init gather_bootmem_prealloc(void)
1850 {
1851         struct huge_bootmem_page *m;
1852
1853         list_for_each_entry(m, &huge_boot_pages, list) {
1854                 struct hstate *h = m->hstate;
1855                 struct page *page;
1856
1857 #ifdef CONFIG_HIGHMEM
1858                 page = pfn_to_page(m->phys >> PAGE_SHIFT);
1859                 memblock_free_late(__pa(m),
1860                                    sizeof(struct huge_bootmem_page));
1861 #else
1862                 page = virt_to_page(m);
1863 #endif
1864                 WARN_ON(page_count(page) != 1);
1865                 prep_compound_huge_page(page, h->order);
1866                 WARN_ON(PageReserved(page));
1867                 prep_new_huge_page(h, page, page_to_nid(page));
1868                 /*
1869                  * If we had gigantic hugepages allocated at boot time, we need
1870                  * to restore the 'stolen' pages to totalram_pages in order to
1871                  * fix confusing memory reports from free(1) and another
1872                  * side-effects, like CommitLimit going negative.
1873                  */
1874                 if (hstate_is_gigantic(h))
1875                         adjust_managed_page_count(page, 1 << h->order);
1876         }
1877 }
1878
1879 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1880 {
1881         unsigned long i;
1882
1883         for (i = 0; i < h->max_huge_pages; ++i) {
1884                 if (hstate_is_gigantic(h)) {
1885                         if (!alloc_bootmem_huge_page(h))
1886                                 break;
1887                 } else if (!alloc_fresh_huge_page(h,
1888                                          &node_states[N_MEMORY]))
1889                         break;
1890         }
1891         h->max_huge_pages = i;
1892 }
1893
1894 static void __init hugetlb_init_hstates(void)
1895 {
1896         struct hstate *h;
1897
1898         for_each_hstate(h) {
1899                 if (minimum_order > huge_page_order(h))
1900                         minimum_order = huge_page_order(h);
1901
1902                 /* oversize hugepages were init'ed in early boot */
1903                 if (!hstate_is_gigantic(h))
1904                         hugetlb_hstate_alloc_pages(h);
1905         }
1906         VM_BUG_ON(minimum_order == UINT_MAX);
1907 }
1908
1909 static char * __init memfmt(char *buf, unsigned long n)
1910 {
1911         if (n >= (1UL << 30))
1912                 sprintf(buf, "%lu GB", n >> 30);
1913         else if (n >= (1UL << 20))
1914                 sprintf(buf, "%lu MB", n >> 20);
1915         else
1916                 sprintf(buf, "%lu KB", n >> 10);
1917         return buf;
1918 }
1919
1920 static void __init report_hugepages(void)
1921 {
1922         struct hstate *h;
1923
1924         for_each_hstate(h) {
1925                 char buf[32];
1926                 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
1927                         memfmt(buf, huge_page_size(h)),
1928                         h->free_huge_pages);
1929         }
1930 }
1931
1932 #ifdef CONFIG_HIGHMEM
1933 static void try_to_free_low(struct hstate *h, unsigned long count,
1934                                                 nodemask_t *nodes_allowed)
1935 {
1936         int i;
1937
1938         if (hstate_is_gigantic(h))
1939                 return;
1940
1941         for_each_node_mask(i, *nodes_allowed) {
1942                 struct page *page, *next;
1943                 struct list_head *freel = &h->hugepage_freelists[i];
1944                 list_for_each_entry_safe(page, next, freel, lru) {
1945                         if (count >= h->nr_huge_pages)
1946                                 return;
1947                         if (PageHighMem(page))
1948                                 continue;
1949                         list_del(&page->lru);
1950                         update_and_free_page(h, page);
1951                         h->free_huge_pages--;
1952                         h->free_huge_pages_node[page_to_nid(page)]--;
1953                 }
1954         }
1955 }
1956 #else
1957 static inline void try_to_free_low(struct hstate *h, unsigned long count,
1958                                                 nodemask_t *nodes_allowed)
1959 {
1960 }
1961 #endif
1962
1963 /*
1964  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
1965  * balanced by operating on them in a round-robin fashion.
1966  * Returns 1 if an adjustment was made.
1967  */
1968 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1969                                 int delta)
1970 {
1971         int nr_nodes, node;
1972
1973         VM_BUG_ON(delta != -1 && delta != 1);
1974
1975         if (delta < 0) {
1976                 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1977                         if (h->surplus_huge_pages_node[node])
1978                                 goto found;
1979                 }
1980         } else {
1981                 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1982                         if (h->surplus_huge_pages_node[node] <
1983                                         h->nr_huge_pages_node[node])
1984                                 goto found;
1985                 }
1986         }
1987         return 0;
1988
1989 found:
1990         h->surplus_huge_pages += delta;
1991         h->surplus_huge_pages_node[node] += delta;
1992         return 1;
1993 }
1994
1995 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1996 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1997                                                 nodemask_t *nodes_allowed)
1998 {
1999         unsigned long min_count, ret;
2000
2001         if (hstate_is_gigantic(h) && !gigantic_page_supported())
2002                 return h->max_huge_pages;
2003
2004         /*
2005          * Increase the pool size
2006          * First take pages out of surplus state.  Then make up the
2007          * remaining difference by allocating fresh huge pages.
2008          *
2009          * We might race with alloc_buddy_huge_page() here and be unable
2010          * to convert a surplus huge page to a normal huge page. That is
2011          * not critical, though, it just means the overall size of the
2012          * pool might be one hugepage larger than it needs to be, but
2013          * within all the constraints specified by the sysctls.
2014          */
2015         spin_lock(&hugetlb_lock);
2016         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2017                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
2018                         break;
2019         }
2020
2021         while (count > persistent_huge_pages(h)) {
2022                 /*
2023                  * If this allocation races such that we no longer need the
2024                  * page, free_huge_page will handle it by freeing the page
2025                  * and reducing the surplus.
2026                  */
2027                 spin_unlock(&hugetlb_lock);
2028                 if (hstate_is_gigantic(h))
2029                         ret = alloc_fresh_gigantic_page(h, nodes_allowed);
2030                 else
2031                         ret = alloc_fresh_huge_page(h, nodes_allowed);
2032                 spin_lock(&hugetlb_lock);
2033                 if (!ret)
2034                         goto out;
2035
2036                 /* Bail for signals. Probably ctrl-c from user */
2037                 if (signal_pending(current))
2038                         goto out;
2039         }
2040
2041         /*
2042          * Decrease the pool size
2043          * First return free pages to the buddy allocator (being careful
2044          * to keep enough around to satisfy reservations).  Then place
2045          * pages into surplus state as needed so the pool will shrink
2046          * to the desired size as pages become free.
2047          *
2048          * By placing pages into the surplus state independent of the
2049          * overcommit value, we are allowing the surplus pool size to
2050          * exceed overcommit. There are few sane options here. Since
2051          * alloc_buddy_huge_page() is checking the global counter,
2052          * though, we'll note that we're not allowed to exceed surplus
2053          * and won't grow the pool anywhere else. Not until one of the
2054          * sysctls are changed, or the surplus pages go out of use.
2055          */
2056         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2057         min_count = max(count, min_count);
2058         try_to_free_low(h, min_count, nodes_allowed);
2059         while (min_count < persistent_huge_pages(h)) {
2060                 if (!free_pool_huge_page(h, nodes_allowed, 0))
2061                         break;
2062                 cond_resched_lock(&hugetlb_lock);
2063         }
2064         while (count < persistent_huge_pages(h)) {
2065                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
2066                         break;
2067         }
2068 out:
2069         ret = persistent_huge_pages(h);
2070         spin_unlock(&hugetlb_lock);
2071         return ret;
2072 }
2073
2074 #define HSTATE_ATTR_RO(_name) \
2075         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2076
2077 #define HSTATE_ATTR(_name) \
2078         static struct kobj_attribute _name##_attr = \
2079                 __ATTR(_name, 0644, _name##_show, _name##_store)
2080
2081 static struct kobject *hugepages_kobj;
2082 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2083
2084 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2085
2086 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2087 {
2088         int i;
2089
2090         for (i = 0; i < HUGE_MAX_HSTATE; i++)
2091                 if (hstate_kobjs[i] == kobj) {
2092                         if (nidp)
2093                                 *nidp = NUMA_NO_NODE;
2094                         return &hstates[i];
2095                 }
2096
2097         return kobj_to_node_hstate(kobj, nidp);
2098 }
2099
2100 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2101                                         struct kobj_attribute *attr, char *buf)
2102 {
2103         struct hstate *h;
2104         unsigned long nr_huge_pages;
2105         int nid;
2106
2107         h = kobj_to_hstate(kobj, &nid);
2108         if (nid == NUMA_NO_NODE)
2109                 nr_huge_pages = h->nr_huge_pages;
2110         else
2111                 nr_huge_pages = h->nr_huge_pages_node[nid];
2112
2113         return sprintf(buf, "%lu\n", nr_huge_pages);
2114 }
2115
2116 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2117                                            struct hstate *h, int nid,
2118                                            unsigned long count, size_t len)
2119 {
2120         int err;
2121         NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
2122
2123         if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
2124                 err = -EINVAL;
2125                 goto out;
2126         }
2127
2128         if (nid == NUMA_NO_NODE) {
2129                 /*
2130                  * global hstate attribute
2131                  */
2132                 if (!(obey_mempolicy &&
2133                                 init_nodemask_of_mempolicy(nodes_allowed))) {
2134                         NODEMASK_FREE(nodes_allowed);
2135                         nodes_allowed = &node_states[N_MEMORY];
2136                 }
2137         } else if (nodes_allowed) {
2138                 /*
2139                  * per node hstate attribute: adjust count to global,
2140                  * but restrict alloc/free to the specified node.
2141                  */
2142                 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2143                 init_nodemask_of_node(nodes_allowed, nid);
2144         } else
2145                 nodes_allowed = &node_states[N_MEMORY];
2146
2147         h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
2148
2149         if (nodes_allowed != &node_states[N_MEMORY])
2150                 NODEMASK_FREE(nodes_allowed);
2151
2152         return len;
2153 out:
2154         NODEMASK_FREE(nodes_allowed);
2155         return err;
2156 }
2157
2158 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2159                                          struct kobject *kobj, const char *buf,
2160                                          size_t len)
2161 {
2162         struct hstate *h;
2163         unsigned long count;
2164         int nid;
2165         int err;
2166
2167         err = kstrtoul(buf, 10, &count);
2168         if (err)
2169                 return err;
2170
2171         h = kobj_to_hstate(kobj, &nid);
2172         return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2173 }
2174
2175 static ssize_t nr_hugepages_show(struct kobject *kobj,
2176                                        struct kobj_attribute *attr, char *buf)
2177 {
2178         return nr_hugepages_show_common(kobj, attr, buf);
2179 }
2180
2181 static ssize_t nr_hugepages_store(struct kobject *kobj,
2182                struct kobj_attribute *attr, const char *buf, size_t len)
2183 {
2184         return nr_hugepages_store_common(false, kobj, buf, len);
2185 }
2186 HSTATE_ATTR(nr_hugepages);
2187
2188 #ifdef CONFIG_NUMA
2189
2190 /*
2191  * hstate attribute for optionally mempolicy-based constraint on persistent
2192  * huge page alloc/free.
2193  */
2194 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2195                                        struct kobj_attribute *attr, char *buf)
2196 {
2197         return nr_hugepages_show_common(kobj, attr, buf);
2198 }
2199
2200 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2201                struct kobj_attribute *attr, const char *buf, size_t len)
2202 {
2203         return nr_hugepages_store_common(true, kobj, buf, len);
2204 }
2205 HSTATE_ATTR(nr_hugepages_mempolicy);
2206 #endif
2207
2208
2209 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2210                                         struct kobj_attribute *attr, char *buf)
2211 {
2212         struct hstate *h = kobj_to_hstate(kobj, NULL);
2213         return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2214 }
2215
2216 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2217                 struct kobj_attribute *attr, const char *buf, size_t count)
2218 {
2219         int err;
2220         unsigned long input;
2221         struct hstate *h = kobj_to_hstate(kobj, NULL);
2222
2223         if (hstate_is_gigantic(h))
2224                 return -EINVAL;
2225
2226         err = kstrtoul(buf, 10, &input);
2227         if (err)
2228                 return err;
2229
2230         spin_lock(&hugetlb_lock);
2231         h->nr_overcommit_huge_pages = input;
2232         spin_unlock(&hugetlb_lock);
2233
2234         return count;
2235 }
2236 HSTATE_ATTR(nr_overcommit_hugepages);
2237
2238 static ssize_t free_hugepages_show(struct kobject *kobj,
2239                                         struct kobj_attribute *attr, char *buf)
2240 {
2241         struct hstate *h;
2242         unsigned long free_huge_pages;
2243         int nid;
2244
2245         h = kobj_to_hstate(kobj, &nid);
2246         if (nid == NUMA_NO_NODE)
2247                 free_huge_pages = h->free_huge_pages;
2248         else
2249                 free_huge_pages = h->free_huge_pages_node[nid];
2250
2251         return sprintf(buf, "%lu\n", free_huge_pages);
2252 }
2253 HSTATE_ATTR_RO(free_hugepages);
2254
2255 static ssize_t resv_hugepages_show(struct kobject *kobj,
2256                                         struct kobj_attribute *attr, char *buf)
2257 {
2258         struct hstate *h = kobj_to_hstate(kobj, NULL);
2259         return sprintf(buf, "%lu\n", h->resv_huge_pages);
2260 }
2261 HSTATE_ATTR_RO(resv_hugepages);
2262
2263 static ssize_t surplus_hugepages_show(struct kobject *kobj,
2264                                         struct kobj_attribute *attr, char *buf)
2265 {
2266         struct hstate *h;
2267         unsigned long surplus_huge_pages;
2268         int nid;
2269
2270         h = kobj_to_hstate(kobj, &nid);
2271         if (nid == NUMA_NO_NODE)
2272                 surplus_huge_pages = h->surplus_huge_pages;
2273         else
2274                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2275
2276         return sprintf(buf, "%lu\n", surplus_huge_pages);
2277 }
2278 HSTATE_ATTR_RO(surplus_hugepages);
2279
2280 static struct attribute *hstate_attrs[] = {
2281         &nr_hugepages_attr.attr,
2282         &nr_overcommit_hugepages_attr.attr,
2283         &free_hugepages_attr.attr,
2284         &resv_hugepages_attr.attr,
2285         &surplus_hugepages_attr.attr,
2286 #ifdef CONFIG_NUMA
2287         &nr_hugepages_mempolicy_attr.attr,
2288 #endif
2289         NULL,
2290 };
2291
2292 static struct attribute_group hstate_attr_group = {
2293         .attrs = hstate_attrs,
2294 };
2295
2296 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2297                                     struct kobject **hstate_kobjs,
2298                                     struct attribute_group *hstate_attr_group)
2299 {
2300         int retval;
2301         int hi = hstate_index(h);
2302
2303         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2304         if (!hstate_kobjs[hi])
2305                 return -ENOMEM;
2306
2307         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2308         if (retval)
2309                 kobject_put(hstate_kobjs[hi]);
2310
2311         return retval;
2312 }
2313
2314 static void __init hugetlb_sysfs_init(void)
2315 {
2316         struct hstate *h;
2317         int err;
2318
2319         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2320         if (!hugepages_kobj)
2321                 return;
2322
2323         for_each_hstate(h) {
2324                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2325                                          hstate_kobjs, &hstate_attr_group);
2326                 if (err)
2327                         pr_err("Hugetlb: Unable to add hstate %s", h->name);
2328         }
2329 }
2330
2331 #ifdef CONFIG_NUMA
2332
2333 /*
2334  * node_hstate/s - associate per node hstate attributes, via their kobjects,
2335  * with node devices in node_devices[] using a parallel array.  The array
2336  * index of a node device or _hstate == node id.
2337  * This is here to avoid any static dependency of the node device driver, in
2338  * the base kernel, on the hugetlb module.
2339  */
2340 struct node_hstate {
2341         struct kobject          *hugepages_kobj;
2342         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
2343 };
2344 struct node_hstate node_hstates[MAX_NUMNODES];
2345
2346 /*
2347  * A subset of global hstate attributes for node devices
2348  */
2349 static struct attribute *per_node_hstate_attrs[] = {
2350         &nr_hugepages_attr.attr,
2351         &free_hugepages_attr.attr,
2352         &surplus_hugepages_attr.attr,
2353         NULL,
2354 };
2355
2356 static struct attribute_group per_node_hstate_attr_group = {
2357         .attrs = per_node_hstate_attrs,
2358 };
2359
2360 /*
2361  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2362  * Returns node id via non-NULL nidp.
2363  */
2364 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2365 {
2366         int nid;
2367
2368         for (nid = 0; nid < nr_node_ids; nid++) {
2369                 struct node_hstate *nhs = &node_hstates[nid];
2370                 int i;
2371                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2372                         if (nhs->hstate_kobjs[i] == kobj) {
2373                                 if (nidp)
2374                                         *nidp = nid;
2375                                 return &hstates[i];
2376                         }
2377         }
2378
2379         BUG();
2380         return NULL;
2381 }
2382
2383 /*
2384  * Unregister hstate attributes from a single node device.
2385  * No-op if no hstate attributes attached.
2386  */
2387 static void hugetlb_unregister_node(struct node *node)
2388 {
2389         struct hstate *h;
2390         struct node_hstate *nhs = &node_hstates[node->dev.id];
2391
2392         if (!nhs->hugepages_kobj)
2393                 return;         /* no hstate attributes */
2394
2395         for_each_hstate(h) {
2396                 int idx = hstate_index(h);
2397                 if (nhs->hstate_kobjs[idx]) {
2398                         kobject_put(nhs->hstate_kobjs[idx]);
2399                         nhs->hstate_kobjs[idx] = NULL;
2400                 }
2401         }
2402
2403         kobject_put(nhs->hugepages_kobj);
2404         nhs->hugepages_kobj = NULL;
2405 }
2406
2407 /*
2408  * hugetlb module exit:  unregister hstate attributes from node devices
2409  * that have them.
2410  */
2411 static void hugetlb_unregister_all_nodes(void)
2412 {
2413         int nid;
2414
2415         /*
2416          * disable node device registrations.
2417          */
2418         register_hugetlbfs_with_node(NULL, NULL);
2419
2420         /*
2421          * remove hstate attributes from any nodes that have them.
2422          */
2423         for (nid = 0; nid < nr_node_ids; nid++)
2424                 hugetlb_unregister_node(node_devices[nid]);
2425 }
2426
2427 /*
2428  * Register hstate attributes for a single node device.
2429  * No-op if attributes already registered.
2430  */
2431 static void hugetlb_register_node(struct node *node)
2432 {
2433         struct hstate *h;
2434         struct node_hstate *nhs = &node_hstates[node->dev.id];
2435         int err;
2436
2437         if (nhs->hugepages_kobj)
2438                 return;         /* already allocated */
2439
2440         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2441                                                         &node->dev.kobj);
2442         if (!nhs->hugepages_kobj)
2443                 return;
2444
2445         for_each_hstate(h) {
2446                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2447                                                 nhs->hstate_kobjs,
2448                                                 &per_node_hstate_attr_group);
2449                 if (err) {
2450                         pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2451                                 h->name, node->dev.id);
2452                         hugetlb_unregister_node(node);
2453                         break;
2454                 }
2455         }
2456 }
2457
2458 /*
2459  * hugetlb init time:  register hstate attributes for all registered node
2460  * devices of nodes that have memory.  All on-line nodes should have
2461  * registered their associated device by this time.
2462  */
2463 static void __init hugetlb_register_all_nodes(void)
2464 {
2465         int nid;
2466
2467         for_each_node_state(nid, N_MEMORY) {
2468                 struct node *node = node_devices[nid];
2469                 if (node->dev.id == nid)
2470                         hugetlb_register_node(node);
2471         }
2472
2473         /*
2474          * Let the node device driver know we're here so it can
2475          * [un]register hstate attributes on node hotplug.
2476          */
2477         register_hugetlbfs_with_node(hugetlb_register_node,
2478                                      hugetlb_unregister_node);
2479 }
2480 #else   /* !CONFIG_NUMA */
2481
2482 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2483 {
2484         BUG();
2485         if (nidp)
2486                 *nidp = -1;
2487         return NULL;
2488 }
2489
2490 static void hugetlb_unregister_all_nodes(void) { }
2491
2492 static void hugetlb_register_all_nodes(void) { }
2493
2494 #endif
2495
2496 static void __exit hugetlb_exit(void)
2497 {
2498         struct hstate *h;
2499
2500         hugetlb_unregister_all_nodes();
2501
2502         for_each_hstate(h) {
2503                 kobject_put(hstate_kobjs[hstate_index(h)]);
2504         }
2505
2506         kobject_put(hugepages_kobj);
2507         kfree(hugetlb_fault_mutex_table);
2508 }
2509 module_exit(hugetlb_exit);
2510
2511 static int __init hugetlb_init(void)
2512 {
2513         int i;
2514
2515         if (!hugepages_supported())
2516                 return 0;
2517
2518         if (!size_to_hstate(default_hstate_size)) {
2519                 default_hstate_size = HPAGE_SIZE;
2520                 if (!size_to_hstate(default_hstate_size))
2521                         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2522         }
2523         default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2524         if (default_hstate_max_huge_pages)
2525                 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2526
2527         hugetlb_init_hstates();
2528         gather_bootmem_prealloc();
2529         report_hugepages();
2530
2531         hugetlb_sysfs_init();
2532         hugetlb_register_all_nodes();
2533         hugetlb_cgroup_file_init();
2534
2535 #ifdef CONFIG_SMP
2536         num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2537 #else
2538         num_fault_mutexes = 1;
2539 #endif
2540         hugetlb_fault_mutex_table =
2541                 kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2542         BUG_ON(!hugetlb_fault_mutex_table);
2543
2544         for (i = 0; i < num_fault_mutexes; i++)
2545                 mutex_init(&hugetlb_fault_mutex_table[i]);
2546         return 0;
2547 }
2548 module_init(hugetlb_init);
2549
2550 /* Should be called on processing a hugepagesz=... option */
2551 void __init hugetlb_add_hstate(unsigned order)
2552 {
2553         struct hstate *h;
2554         unsigned long i;
2555
2556         if (size_to_hstate(PAGE_SIZE << order)) {
2557                 pr_warning("hugepagesz= specified twice, ignoring\n");
2558                 return;
2559         }
2560         BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2561         BUG_ON(order == 0);
2562         h = &hstates[hugetlb_max_hstate++];
2563         h->order = order;
2564         h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2565         h->nr_huge_pages = 0;
2566         h->free_huge_pages = 0;
2567         for (i = 0; i < MAX_NUMNODES; ++i)
2568                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2569         INIT_LIST_HEAD(&h->hugepage_activelist);
2570         h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
2571         h->next_nid_to_free = first_node(node_states[N_MEMORY]);
2572         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2573                                         huge_page_size(h)/1024);
2574
2575         parsed_hstate = h;
2576 }
2577
2578 static int __init hugetlb_nrpages_setup(char *s)
2579 {
2580         unsigned long *mhp;
2581         static unsigned long *last_mhp;
2582
2583         /*
2584          * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2585          * so this hugepages= parameter goes to the "default hstate".
2586          */
2587         if (!hugetlb_max_hstate)
2588                 mhp = &default_hstate_max_huge_pages;
2589         else
2590                 mhp = &parsed_hstate->max_huge_pages;
2591
2592         if (mhp == last_mhp) {
2593                 pr_warning("hugepages= specified twice without "
2594                            "interleaving hugepagesz=, ignoring\n");
2595                 return 1;
2596         }
2597
2598         if (sscanf(s, "%lu", mhp) <= 0)
2599                 *mhp = 0;
2600
2601         /*
2602          * Global state is always initialized later in hugetlb_init.
2603          * But we need to allocate >= MAX_ORDER hstates here early to still
2604          * use the bootmem allocator.
2605          */
2606         if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2607                 hugetlb_hstate_alloc_pages(parsed_hstate);
2608
2609         last_mhp = mhp;
2610
2611         return 1;
2612 }
2613 __setup("hugepages=", hugetlb_nrpages_setup);
2614
2615 static int __init hugetlb_default_setup(char *s)
2616 {
2617         default_hstate_size = memparse(s, &s);
2618         return 1;
2619 }
2620 __setup("default_hugepagesz=", hugetlb_default_setup);
2621
2622 static unsigned int cpuset_mems_nr(unsigned int *array)
2623 {
2624         int node;
2625         unsigned int nr = 0;
2626
2627         for_each_node_mask(node, cpuset_current_mems_allowed)
2628                 nr += array[node];
2629
2630         return nr;
2631 }
2632
2633 #ifdef CONFIG_SYSCTL
2634 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2635                          struct ctl_table *table, int write,
2636                          void __user *buffer, size_t *length, loff_t *ppos)
2637 {
2638         struct hstate *h = &default_hstate;
2639         unsigned long tmp = h->max_huge_pages;
2640         int ret;
2641
2642         if (!hugepages_supported())
2643                 return -ENOTSUPP;
2644
2645         table->data = &tmp;
2646         table->maxlen = sizeof(unsigned long);
2647         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2648         if (ret)
2649                 goto out;
2650
2651         if (write)
2652                 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2653                                                   NUMA_NO_NODE, tmp, *length);
2654 out:
2655         return ret;
2656 }
2657
2658 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2659                           void __user *buffer, size_t *length, loff_t *ppos)
2660 {
2661
2662         return hugetlb_sysctl_handler_common(false, table, write,
2663                                                         buffer, length, ppos);
2664 }
2665
2666 #ifdef CONFIG_NUMA
2667 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2668                           void __user *buffer, size_t *length, loff_t *ppos)
2669 {
2670         return hugetlb_sysctl_handler_common(true, table, write,
2671                                                         buffer, length, ppos);
2672 }
2673 #endif /* CONFIG_NUMA */
2674
2675 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2676                         void __user *buffer,
2677                         size_t *length, loff_t *ppos)
2678 {
2679         struct hstate *h = &default_hstate;
2680         unsigned long tmp;
2681         int ret;
2682
2683         if (!hugepages_supported())
2684                 return -ENOTSUPP;
2685
2686         tmp = h->nr_overcommit_huge_pages;
2687
2688         if (write && hstate_is_gigantic(h))
2689                 return -EINVAL;
2690
2691         table->data = &tmp;
2692         table->maxlen = sizeof(unsigned long);
2693         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2694         if (ret)
2695                 goto out;
2696
2697         if (write) {
2698                 spin_lock(&hugetlb_lock);
2699                 h->nr_overcommit_huge_pages = tmp;
2700                 spin_unlock(&hugetlb_lock);
2701         }
2702 out:
2703         return ret;
2704 }
2705
2706 #endif /* CONFIG_SYSCTL */
2707
2708 void hugetlb_report_meminfo(struct seq_file *m)
2709 {
2710         struct hstate *h = &default_hstate;
2711         if (!hugepages_supported())
2712                 return;
2713         seq_printf(m,
2714                         "HugePages_Total:   %5lu\n"
2715                         "HugePages_Free:    %5lu\n"
2716                         "HugePages_Rsvd:    %5lu\n"
2717                         "HugePages_Surp:    %5lu\n"
2718                         "Hugepagesize:   %8lu kB\n",
2719                         h->nr_huge_pages,
2720                         h->free_huge_pages,
2721                         h->resv_huge_pages,
2722                         h->surplus_huge_pages,
2723                         1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2724 }
2725
2726 int hugetlb_report_node_meminfo(int nid, char *buf)
2727 {
2728         struct hstate *h = &default_hstate;
2729         if (!hugepages_supported())
2730                 return 0;
2731         return sprintf(buf,
2732                 "Node %d HugePages_Total: %5u\n"
2733                 "Node %d HugePages_Free:  %5u\n"
2734                 "Node %d HugePages_Surp:  %5u\n",
2735                 nid, h->nr_huge_pages_node[nid],
2736                 nid, h->free_huge_pages_node[nid],
2737                 nid, h->surplus_huge_pages_node[nid]);
2738 }
2739
2740 void hugetlb_show_meminfo(void)
2741 {
2742         struct hstate *h;
2743         int nid;
2744
2745         if (!hugepages_supported())
2746                 return;
2747
2748         for_each_node_state(nid, N_MEMORY)
2749                 for_each_hstate(h)
2750                         pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
2751                                 nid,
2752                                 h->nr_huge_pages_node[nid],
2753                                 h->free_huge_pages_node[nid],
2754                                 h->surplus_huge_pages_node[nid],
2755                                 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2756 }
2757
2758 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2759 unsigned long hugetlb_total_pages(void)
2760 {
2761         struct hstate *h;
2762         unsigned long nr_total_pages = 0;
2763
2764         for_each_hstate(h)
2765                 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2766         return nr_total_pages;
2767 }
2768
2769 static int hugetlb_acct_memory(struct hstate *h, long delta)
2770 {
2771         int ret = -ENOMEM;
2772
2773         spin_lock(&hugetlb_lock);
2774         /*
2775          * When cpuset is configured, it breaks the strict hugetlb page
2776          * reservation as the accounting is done on a global variable. Such
2777          * reservation is completely rubbish in the presence of cpuset because
2778          * the reservation is not checked against page availability for the
2779          * current cpuset. Application can still potentially OOM'ed by kernel
2780          * with lack of free htlb page in cpuset that the task is in.
2781          * Attempt to enforce strict accounting with cpuset is almost
2782          * impossible (or too ugly) because cpuset is too fluid that
2783          * task or memory node can be dynamically moved between cpusets.
2784          *
2785          * The change of semantics for shared hugetlb mapping with cpuset is
2786          * undesirable. However, in order to preserve some of the semantics,
2787          * we fall back to check against current free page availability as
2788          * a best attempt and hopefully to minimize the impact of changing
2789          * semantics that cpuset has.
2790          */
2791         if (delta > 0) {
2792                 if (gather_surplus_pages(h, delta) < 0)
2793                         goto out;
2794
2795                 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2796                         return_unused_surplus_pages(h, delta);
2797                         goto out;
2798                 }
2799         }
2800
2801         ret = 0;
2802         if (delta < 0)
2803                 return_unused_surplus_pages(h, (unsigned long) -delta);
2804
2805 out:
2806         spin_unlock(&hugetlb_lock);
2807         return ret;
2808 }
2809
2810 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2811 {
2812         struct resv_map *resv = vma_resv_map(vma);
2813
2814         /*
2815          * This new VMA should share its siblings reservation map if present.
2816          * The VMA will only ever have a valid reservation map pointer where
2817          * it is being copied for another still existing VMA.  As that VMA
2818          * has a reference to the reservation map it cannot disappear until
2819          * after this open call completes.  It is therefore safe to take a
2820          * new reference here without additional locking.
2821          */
2822         if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2823                 kref_get(&resv->refs);
2824 }
2825
2826 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2827 {
2828         struct hstate *h = hstate_vma(vma);
2829         struct resv_map *resv = vma_resv_map(vma);
2830         struct hugepage_subpool *spool = subpool_vma(vma);
2831         unsigned long reserve, start, end;
2832         long gbl_reserve;
2833
2834         if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2835                 return;
2836
2837         start = vma_hugecache_offset(h, vma, vma->vm_start);
2838         end = vma_hugecache_offset(h, vma, vma->vm_end);
2839
2840         reserve = (end - start) - region_count(resv, start, end);
2841
2842         kref_put(&resv->refs, resv_map_release);
2843
2844         if (reserve) {
2845                 /*
2846                  * Decrement reserve counts.  The global reserve count may be
2847                  * adjusted if the subpool has a minimum size.
2848                  */
2849                 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
2850                 hugetlb_acct_memory(h, -gbl_reserve);
2851         }
2852 }
2853
2854 /*
2855  * We cannot handle pagefaults against hugetlb pages at all.  They cause
2856  * handle_mm_fault() to try to instantiate regular-sized pages in the
2857  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2858  * this far.
2859  */
2860 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2861 {
2862         BUG();
2863         return 0;
2864 }
2865
2866 const struct vm_operations_struct hugetlb_vm_ops = {
2867         .fault = hugetlb_vm_op_fault,
2868         .open = hugetlb_vm_op_open,
2869         .close = hugetlb_vm_op_close,
2870 };
2871
2872 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2873                                 int writable)
2874 {
2875         pte_t entry;
2876
2877         if (writable) {
2878                 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
2879                                          vma->vm_page_prot)));
2880         } else {
2881                 entry = huge_pte_wrprotect(mk_huge_pte(page,
2882                                            vma->vm_page_prot));
2883         }
2884         entry = pte_mkyoung(entry);
2885         entry = pte_mkhuge(entry);
2886         entry = arch_make_huge_pte(entry, vma, page, writable);
2887
2888         return entry;
2889 }
2890
2891 static void set_huge_ptep_writable(struct vm_area_struct *vma,
2892                                    unsigned long address, pte_t *ptep)
2893 {
2894         pte_t entry;
2895
2896         entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
2897         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
2898                 update_mmu_cache(vma, address, ptep);
2899 }
2900
2901 static int is_hugetlb_entry_migration(pte_t pte)
2902 {
2903         swp_entry_t swp;
2904
2905         if (huge_pte_none(pte) || pte_present(pte))
2906                 return 0;
2907         swp = pte_to_swp_entry(pte);
2908         if (non_swap_entry(swp) && is_migration_entry(swp))
2909                 return 1;
2910         else
2911                 return 0;
2912 }
2913
2914 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2915 {
2916         swp_entry_t swp;
2917
2918         if (huge_pte_none(pte) || pte_present(pte))
2919                 return 0;
2920         swp = pte_to_swp_entry(pte);
2921         if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2922                 return 1;
2923         else
2924                 return 0;
2925 }
2926
2927 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2928                             struct vm_area_struct *vma)
2929 {
2930         pte_t *src_pte, *dst_pte, entry;
2931         struct page *ptepage;
2932         unsigned long addr;
2933         int cow;
2934         struct hstate *h = hstate_vma(vma);
2935         unsigned long sz = huge_page_size(h);
2936         unsigned long mmun_start;       /* For mmu_notifiers */
2937         unsigned long mmun_end;         /* For mmu_notifiers */
2938         int ret = 0;
2939
2940         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2941
2942         mmun_start = vma->vm_start;
2943         mmun_end = vma->vm_end;
2944         if (cow)
2945                 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
2946
2947         for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2948                 spinlock_t *src_ptl, *dst_ptl;
2949                 src_pte = huge_pte_offset(src, addr);
2950                 if (!src_pte)
2951                         continue;
2952                 dst_pte = huge_pte_alloc(dst, addr, sz);
2953                 if (!dst_pte) {
2954                         ret = -ENOMEM;
2955                         break;
2956                 }
2957
2958                 /* If the pagetables are shared don't copy or take references */
2959                 if (dst_pte == src_pte)
2960                         continue;
2961
2962                 dst_ptl = huge_pte_lock(h, dst, dst_pte);
2963                 src_ptl = huge_pte_lockptr(h, src, src_pte);
2964                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
2965                 entry = huge_ptep_get(src_pte);
2966                 if (huge_pte_none(entry)) { /* skip none entry */
2967                         ;
2968                 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
2969                                     is_hugetlb_entry_hwpoisoned(entry))) {
2970                         swp_entry_t swp_entry = pte_to_swp_entry(entry);
2971
2972                         if (is_write_migration_entry(swp_entry) && cow) {
2973                                 /*
2974                                  * COW mappings require pages in both
2975                                  * parent and child to be set to read.
2976                                  */
2977                                 make_migration_entry_read(&swp_entry);
2978                                 entry = swp_entry_to_pte(swp_entry);
2979                                 set_huge_pte_at(src, addr, src_pte, entry);
2980                         }
2981                         set_huge_pte_at(dst, addr, dst_pte, entry);
2982                 } else {
2983                         if (cow) {
2984                                 huge_ptep_set_wrprotect(src, addr, src_pte);
2985                                 mmu_notifier_invalidate_range(src, mmun_start,
2986                                                                    mmun_end);
2987                         }
2988                         entry = huge_ptep_get(src_pte);
2989                         ptepage = pte_page(entry);
2990                         get_page(ptepage);
2991                         page_dup_rmap(ptepage);
2992                         set_huge_pte_at(dst, addr, dst_pte, entry);
2993                 }
2994                 spin_unlock(src_ptl);
2995                 spin_unlock(dst_ptl);
2996         }
2997
2998         if (cow)
2999                 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
3000
3001         return ret;
3002 }
3003
3004 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3005                             unsigned long start, unsigned long end,
3006                             struct page *ref_page)
3007 {
3008         int force_flush = 0;
3009         struct mm_struct *mm = vma->vm_mm;
3010         unsigned long address;
3011         pte_t *ptep;
3012         pte_t pte;
3013         spinlock_t *ptl;
3014         struct page *page;
3015         struct hstate *h = hstate_vma(vma);
3016         unsigned long sz = huge_page_size(h);
3017         const unsigned long mmun_start = start; /* For mmu_notifiers */
3018         const unsigned long mmun_end   = end;   /* For mmu_notifiers */
3019
3020         WARN_ON(!is_vm_hugetlb_page(vma));
3021         BUG_ON(start & ~huge_page_mask(h));
3022         BUG_ON(end & ~huge_page_mask(h));
3023
3024         tlb_start_vma(tlb, vma);
3025         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3026         address = start;
3027 again:
3028         for (; address < end; address += sz) {
3029                 ptep = huge_pte_offset(mm, address);
3030                 if (!ptep)
3031                         continue;
3032
3033                 ptl = huge_pte_lock(h, mm, ptep);
3034                 if (huge_pmd_unshare(mm, &address, ptep))
3035                         goto unlock;
3036
3037                 pte = huge_ptep_get(ptep);
3038                 if (huge_pte_none(pte))
3039                         goto unlock;
3040
3041                 /*
3042                  * Migrating hugepage or HWPoisoned hugepage is already
3043                  * unmapped and its refcount is dropped, so just clear pte here.
3044                  */
3045                 if (unlikely(!pte_present(pte))) {
3046                         huge_pte_clear(mm, address, ptep);
3047                         goto unlock;
3048                 }
3049
3050                 page = pte_page(pte);
3051                 /*
3052                  * If a reference page is supplied, it is because a specific
3053                  * page is being unmapped, not a range. Ensure the page we
3054                  * are about to unmap is the actual page of interest.
3055                  */
3056                 if (ref_page) {
3057                         if (page != ref_page)
3058                                 goto unlock;
3059
3060                         /*
3061                          * Mark the VMA as having unmapped its page so that
3062                          * future faults in this VMA will fail rather than
3063                          * looking like data was lost
3064                          */
3065                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3066                 }
3067
3068                 pte = huge_ptep_get_and_clear(mm, address, ptep);
3069                 tlb_remove_tlb_entry(tlb, ptep, address);
3070                 if (huge_pte_dirty(pte))
3071                         set_page_dirty(page);
3072
3073                 page_remove_rmap(page);
3074                 force_flush = !__tlb_remove_page(tlb, page);
3075                 if (force_flush) {
3076                         address += sz;
3077                         spin_unlock(ptl);
3078                         break;
3079                 }
3080                 /* Bail out after unmapping reference page if supplied */
3081                 if (ref_page) {
3082                         spin_unlock(ptl);
3083                         break;
3084                 }
3085 unlock:
3086                 spin_unlock(ptl);
3087         }
3088         /*
3089          * mmu_gather ran out of room to batch pages, we break out of
3090          * the PTE lock to avoid doing the potential expensive TLB invalidate
3091          * and page-free while holding it.
3092          */
3093         if (force_flush) {
3094                 force_flush = 0;
3095                 tlb_flush_mmu(tlb);
3096                 if (address < end && !ref_page)
3097                         goto again;
3098         }
3099         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3100         tlb_end_vma(tlb, vma);
3101 }
3102
3103 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3104                           struct vm_area_struct *vma, unsigned long start,
3105                           unsigned long end, struct page *ref_page)
3106 {
3107         __unmap_hugepage_range(tlb, vma, start, end, ref_page);
3108
3109         /*
3110          * Clear this flag so that x86's huge_pmd_share page_table_shareable
3111          * test will fail on a vma being torn down, and not grab a page table
3112          * on its way out.  We're lucky that the flag has such an appropriate
3113          * name, and can in fact be safely cleared here. We could clear it
3114          * before the __unmap_hugepage_range above, but all that's necessary
3115          * is to clear it before releasing the i_mmap_rwsem. This works
3116          * because in the context this is called, the VMA is about to be
3117          * destroyed and the i_mmap_rwsem is held.
3118          */
3119         vma->vm_flags &= ~VM_MAYSHARE;
3120 }
3121
3122 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3123                           unsigned long end, struct page *ref_page)
3124 {
3125         struct mm_struct *mm;
3126         struct mmu_gather tlb;
3127
3128         mm = vma->vm_mm;
3129
3130         tlb_gather_mmu(&tlb, mm, start, end);
3131         __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3132         tlb_finish_mmu(&tlb, start, end);
3133 }
3134
3135 /*
3136  * This is called when the original mapper is failing to COW a MAP_PRIVATE
3137  * mappping it owns the reserve page for. The intention is to unmap the page
3138  * from other VMAs and let the children be SIGKILLed if they are faulting the
3139  * same region.
3140  */
3141 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3142                               struct page *page, unsigned long address)
3143 {
3144         struct hstate *h = hstate_vma(vma);
3145         struct vm_area_struct *iter_vma;
3146         struct address_space *mapping;
3147         pgoff_t pgoff;
3148
3149         /*
3150          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
3151          * from page cache lookup which is in HPAGE_SIZE units.
3152          */
3153         address = address & huge_page_mask(h);
3154         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3155                         vma->vm_pgoff;
3156         mapping = file_inode(vma->vm_file)->i_mapping;
3157
3158         /*
3159          * Take the mapping lock for the duration of the table walk. As
3160          * this mapping should be shared between all the VMAs,
3161          * __unmap_hugepage_range() is called as the lock is already held
3162          */
3163         i_mmap_lock_write(mapping);
3164         vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3165                 /* Do not unmap the current VMA */
3166                 if (iter_vma == vma)
3167                         continue;
3168
3169                 /*
3170                  * Unmap the page from other VMAs without their own reserves.
3171                  * They get marked to be SIGKILLed if they fault in these
3172                  * areas. This is because a future no-page fault on this VMA
3173                  * could insert a zeroed page instead of the data existing
3174                  * from the time of fork. This would look like data corruption
3175                  */
3176                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3177                         unmap_hugepage_range(iter_vma, address,
3178                                              address + huge_page_size(h), page);
3179         }
3180         i_mmap_unlock_write(mapping);
3181 }
3182
3183 /*
3184  * Hugetlb_cow() should be called with page lock of the original hugepage held.
3185  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
3186  * cannot race with other handlers or page migration.
3187  * Keep the pte_same checks anyway to make transition from the mutex easier.
3188  */
3189 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3190                         unsigned long address, pte_t *ptep, pte_t pte,
3191                         struct page *pagecache_page, spinlock_t *ptl)
3192 {
3193         struct hstate *h = hstate_vma(vma);
3194         struct page *old_page, *new_page;
3195         int ret = 0, outside_reserve = 0;
3196         unsigned long mmun_start;       /* For mmu_notifiers */
3197         unsigned long mmun_end;         /* For mmu_notifiers */
3198
3199         old_page = pte_page(pte);
3200
3201 retry_avoidcopy:
3202         /* If no-one else is actually using this page, avoid the copy
3203          * and just make the page writable */
3204         if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3205                 page_move_anon_rmap(old_page, vma, address);
3206                 set_huge_ptep_writable(vma, address, ptep);
3207                 return 0;
3208         }
3209
3210         /*
3211          * If the process that created a MAP_PRIVATE mapping is about to
3212          * perform a COW due to a shared page count, attempt to satisfy
3213          * the allocation without using the existing reserves. The pagecache
3214          * page is used to determine if the reserve at this address was
3215          * consumed or not. If reserves were used, a partial faulted mapping
3216          * at the time of fork() could consume its reserves on COW instead
3217          * of the full address range.
3218          */
3219         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3220                         old_page != pagecache_page)
3221                 outside_reserve = 1;
3222
3223         page_cache_get(old_page);
3224
3225         /*
3226          * Drop page table lock as buddy allocator may be called. It will
3227          * be acquired again before returning to the caller, as expected.
3228          */
3229         spin_unlock(ptl);
3230         new_page = alloc_huge_page(vma, address, outside_reserve);
3231
3232         if (IS_ERR(new_page)) {
3233                 /*
3234                  * If a process owning a MAP_PRIVATE mapping fails to COW,
3235                  * it is due to references held by a child and an insufficient
3236                  * huge page pool. To guarantee the original mappers
3237                  * reliability, unmap the page from child processes. The child
3238                  * may get SIGKILLed if it later faults.
3239                  */
3240                 if (outside_reserve) {
3241                         page_cache_release(old_page);
3242                         BUG_ON(huge_pte_none(pte));
3243                         unmap_ref_private(mm, vma, old_page, address);
3244                         BUG_ON(huge_pte_none(pte));
3245                         spin_lock(ptl);
3246                         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3247                         if (likely(ptep &&
3248                                    pte_same(huge_ptep_get(ptep), pte)))
3249                                 goto retry_avoidcopy;
3250                         /*
3251                          * race occurs while re-acquiring page table
3252                          * lock, and our job is done.
3253                          */
3254                         return 0;
3255                 }
3256
3257                 ret = (PTR_ERR(new_page) == -ENOMEM) ?
3258                         VM_FAULT_OOM : VM_FAULT_SIGBUS;
3259                 goto out_release_old;
3260         }
3261
3262         /*
3263          * When the original hugepage is shared one, it does not have
3264          * anon_vma prepared.
3265          */
3266         if (unlikely(anon_vma_prepare(vma))) {
3267                 ret = VM_FAULT_OOM;
3268                 goto out_release_all;
3269         }
3270
3271         copy_user_huge_page(new_page, old_page, address, vma,
3272                             pages_per_huge_page(h));
3273         __SetPageUptodate(new_page);
3274         set_page_huge_active(new_page);
3275
3276         mmun_start = address & huge_page_mask(h);
3277         mmun_end = mmun_start + huge_page_size(h);
3278         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3279
3280         /*
3281          * Retake the page table lock to check for racing updates
3282          * before the page tables are altered
3283          */
3284         spin_lock(ptl);
3285         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3286         if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3287                 ClearPagePrivate(new_page);
3288
3289                 /* Break COW */
3290                 huge_ptep_clear_flush(vma, address, ptep);
3291                 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
3292                 set_huge_pte_at(mm, address, ptep,
3293                                 make_huge_pte(vma, new_page, 1));
3294                 page_remove_rmap(old_page);
3295                 hugepage_add_new_anon_rmap(new_page, vma, address);
3296                 /* Make the old page be freed below */
3297                 new_page = old_page;
3298         }
3299         spin_unlock(ptl);
3300         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3301 out_release_all:
3302         page_cache_release(new_page);
3303 out_release_old:
3304         page_cache_release(old_page);
3305
3306         spin_lock(ptl); /* Caller expects lock to be held */
3307         return ret;
3308 }
3309
3310 /* Return the pagecache page at a given address within a VMA */
3311 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3312                         struct vm_area_struct *vma, unsigned long address)
3313 {
3314         struct address_space *mapping;
3315         pgoff_t idx;
3316
3317         mapping = vma->vm_file->f_mapping;
3318         idx = vma_hugecache_offset(h, vma, address);
3319
3320         return find_lock_page(mapping, idx);
3321 }
3322
3323 /*
3324  * Return whether there is a pagecache page to back given address within VMA.
3325  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3326  */
3327 static bool hugetlbfs_pagecache_present(struct hstate *h,
3328                         struct vm_area_struct *vma, unsigned long address)
3329 {
3330         struct address_space *mapping;
3331         pgoff_t idx;
3332         struct page *page;
3333
3334         mapping = vma->vm_file->f_mapping;
3335         idx = vma_hugecache_offset(h, vma, address);
3336
3337         page = find_get_page(mapping, idx);
3338         if (page)
3339                 put_page(page);
3340         return page != NULL;
3341 }
3342
3343 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
3344                            struct address_space *mapping, pgoff_t idx,
3345                            unsigned long address, pte_t *ptep, unsigned int flags)
3346 {
3347         struct hstate *h = hstate_vma(vma);
3348         int ret = VM_FAULT_SIGBUS;
3349         int anon_rmap = 0;
3350         unsigned long size;
3351         struct page *page;
3352         pte_t new_pte;
3353         spinlock_t *ptl;
3354
3355         /*
3356          * Currently, we are forced to kill the process in the event the
3357          * original mapper has unmapped pages from the child due to a failed
3358          * COW. Warn that such a situation has occurred as it may not be obvious
3359          */
3360         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3361                 pr_warning("PID %d killed due to inadequate hugepage pool\n",
3362                            current->pid);
3363                 return ret;
3364         }
3365
3366         /*
3367          * Use page lock to guard against racing truncation
3368          * before we get page_table_lock.
3369          */
3370 retry:
3371         page = find_lock_page(mapping, idx);
3372         if (!page) {
3373                 size = i_size_read(mapping->host) >> huge_page_shift(h);
3374                 if (idx >= size)
3375                         goto out;
3376                 page = alloc_huge_page(vma, address, 0);
3377                 if (IS_ERR(page)) {
3378                         ret = PTR_ERR(page);
3379                         if (ret == -ENOMEM)
3380                                 ret = VM_FAULT_OOM;
3381                         else
3382                                 ret = VM_FAULT_SIGBUS;
3383                         goto out;
3384                 }
3385                 clear_huge_page(page, address, pages_per_huge_page(h));
3386                 __SetPageUptodate(page);
3387                 set_page_huge_active(page);
3388
3389                 if (vma->vm_flags & VM_MAYSHARE) {
3390                         int err;
3391                         struct inode *inode = mapping->host;
3392
3393                         err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3394                         if (err) {
3395                                 put_page(page);
3396                                 if (err == -EEXIST)
3397                                         goto retry;
3398                                 goto out;
3399                         }
3400                         ClearPagePrivate(page);
3401
3402                         spin_lock(&inode->i_lock);
3403                         inode->i_blocks += blocks_per_huge_page(h);
3404                         spin_unlock(&inode->i_lock);
3405                 } else {
3406                         lock_page(page);
3407                         if (unlikely(anon_vma_prepare(vma))) {
3408                                 ret = VM_FAULT_OOM;
3409                                 goto backout_unlocked;
3410                         }
3411                         anon_rmap = 1;
3412                 }
3413         } else {
3414                 /*
3415                  * If memory error occurs between mmap() and fault, some process
3416                  * don't have hwpoisoned swap entry for errored virtual address.
3417                  * So we need to block hugepage fault by PG_hwpoison bit check.
3418                  */
3419                 if (unlikely(PageHWPoison(page))) {
3420                         ret = VM_FAULT_HWPOISON |
3421                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3422                         goto backout_unlocked;
3423                 }
3424         }
3425
3426         /*
3427          * If we are going to COW a private mapping later, we examine the
3428          * pending reservations for this page now. This will ensure that
3429          * any allocations necessary to record that reservation occur outside
3430          * the spinlock.
3431          */
3432         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3433                 if (vma_needs_reservation(h, vma, address) < 0) {
3434                         ret = VM_FAULT_OOM;
3435                         goto backout_unlocked;
3436                 }
3437                 /* Just decrements count, does not deallocate */
3438                 vma_end_reservation(h, vma, address);
3439         }
3440
3441         ptl = huge_pte_lockptr(h, mm, ptep);
3442         spin_lock(ptl);
3443         size = i_size_read(mapping->host) >> huge_page_shift(h);
3444         if (idx >= size)
3445                 goto backout;
3446
3447         ret = 0;
3448         if (!huge_pte_none(huge_ptep_get(ptep)))
3449                 goto backout;
3450
3451         if (anon_rmap) {
3452                 ClearPagePrivate(page);
3453                 hugepage_add_new_anon_rmap(page, vma, address);
3454         } else
3455                 page_dup_rmap(page);
3456         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3457                                 && (vma->vm_flags & VM_SHARED)));
3458         set_huge_pte_at(mm, address, ptep, new_pte);
3459
3460         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3461                 /* Optimization, do the COW without a second fault */
3462                 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
3463         }
3464
3465         spin_unlock(ptl);
3466         unlock_page(page);
3467 out:
3468         return ret;
3469
3470 backout:
3471         spin_unlock(ptl);
3472 backout_unlocked:
3473         unlock_page(page);
3474         put_page(page);
3475         goto out;
3476 }
3477
3478 #ifdef CONFIG_SMP
3479 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3480                             struct vm_area_struct *vma,
3481                             struct address_space *mapping,
3482                             pgoff_t idx, unsigned long address)
3483 {
3484         unsigned long key[2];
3485         u32 hash;
3486
3487         if (vma->vm_flags & VM_SHARED) {
3488                 key[0] = (unsigned long) mapping;
3489                 key[1] = idx;
3490         } else {
3491                 key[0] = (unsigned long) mm;
3492                 key[1] = address >> huge_page_shift(h);
3493         }
3494
3495         hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3496
3497         return hash & (num_fault_mutexes - 1);
3498 }
3499 #else
3500 /*
3501  * For uniprocesor systems we always use a single mutex, so just
3502  * return 0 and avoid the hashing overhead.
3503  */
3504 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3505                             struct vm_area_struct *vma,
3506                             struct address_space *mapping,
3507                             pgoff_t idx, unsigned long address)
3508 {
3509         return 0;
3510 }
3511 #endif
3512
3513 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3514                         unsigned long address, unsigned int flags)
3515 {
3516         pte_t *ptep, entry;
3517         spinlock_t *ptl;
3518         int ret;
3519         u32 hash;
3520         pgoff_t idx;
3521         struct page *page = NULL;
3522         struct page *pagecache_page = NULL;
3523         struct hstate *h = hstate_vma(vma);
3524         struct address_space *mapping;
3525         int need_wait_lock = 0;
3526
3527         address &= huge_page_mask(h);
3528
3529         ptep = huge_pte_offset(mm, address);
3530         if (ptep) {
3531                 entry = huge_ptep_get(ptep);
3532                 if (unlikely(is_hugetlb_entry_migration(entry))) {
3533                         migration_entry_wait_huge(vma, mm, ptep);
3534                         return 0;
3535                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3536                         return VM_FAULT_HWPOISON_LARGE |
3537                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3538         }
3539
3540         ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3541         if (!ptep)
3542                 return VM_FAULT_OOM;
3543
3544         mapping = vma->vm_file->f_mapping;
3545         idx = vma_hugecache_offset(h, vma, address);
3546
3547         /*
3548          * Serialize hugepage allocation and instantiation, so that we don't
3549          * get spurious allocation failures if two CPUs race to instantiate
3550          * the same page in the page cache.
3551          */
3552         hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address);
3553         mutex_lock(&hugetlb_fault_mutex_table[hash]);
3554
3555         entry = huge_ptep_get(ptep);
3556         if (huge_pte_none(entry)) {
3557                 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3558                 goto out_mutex;
3559         }
3560
3561         ret = 0;
3562
3563         /*
3564          * entry could be a migration/hwpoison entry at this point, so this
3565          * check prevents the kernel from going below assuming that we have
3566          * a active hugepage in pagecache. This goto expects the 2nd page fault,
3567          * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
3568          * handle it.
3569          */
3570         if (!pte_present(entry))
3571                 goto out_mutex;
3572
3573         /*
3574          * If we are going to COW the mapping later, we examine the pending
3575          * reservations for this page now. This will ensure that any
3576          * allocations necessary to record that reservation occur outside the
3577          * spinlock. For private mappings, we also lookup the pagecache
3578          * page now as it is used to determine if a reservation has been
3579          * consumed.
3580          */
3581         if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3582                 if (vma_needs_reservation(h, vma, address) < 0) {
3583                         ret = VM_FAULT_OOM;
3584                         goto out_mutex;
3585                 }
3586                 /* Just decrements count, does not deallocate */
3587                 vma_end_reservation(h, vma, address);
3588
3589                 if (!(vma->vm_flags & VM_MAYSHARE))
3590                         pagecache_page = hugetlbfs_pagecache_page(h,
3591                                                                 vma, address);
3592         }
3593
3594         ptl = huge_pte_lock(h, mm, ptep);
3595
3596         /* Check for a racing update before calling hugetlb_cow */
3597         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3598                 goto out_ptl;
3599
3600         /*
3601          * hugetlb_cow() requires page locks of pte_page(entry) and
3602          * pagecache_page, so here we need take the former one
3603          * when page != pagecache_page or !pagecache_page.
3604          */
3605         page = pte_page(entry);
3606         if (page != pagecache_page)
3607                 if (!trylock_page(page)) {
3608                         need_wait_lock = 1;
3609                         goto out_ptl;
3610                 }
3611
3612         get_page(page);
3613
3614         if (flags & FAULT_FLAG_WRITE) {
3615                 if (!huge_pte_write(entry)) {
3616                         ret = hugetlb_cow(mm, vma, address, ptep, entry,
3617                                         pagecache_page, ptl);
3618                         goto out_put_page;
3619                 }
3620                 entry = huge_pte_mkdirty(entry);
3621         }
3622         entry = pte_mkyoung(entry);
3623         if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3624                                                 flags & FAULT_FLAG_WRITE))
3625                 update_mmu_cache(vma, address, ptep);
3626 out_put_page:
3627         if (page != pagecache_page)
3628                 unlock_page(page);
3629         put_page(page);
3630 out_ptl:
3631         spin_unlock(ptl);
3632
3633         if (pagecache_page) {
3634                 unlock_page(pagecache_page);
3635                 put_page(pagecache_page);
3636         }
3637 out_mutex:
3638         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3639         /*
3640          * Generally it's safe to hold refcount during waiting page lock. But
3641          * here we just wait to defer the next page fault to avoid busy loop and
3642          * the page is not used after unlocked before returning from the current
3643          * page fault. So we are safe from accessing freed page, even if we wait
3644          * here without taking refcount.
3645          */
3646         if (need_wait_lock)
3647                 wait_on_page_locked(page);
3648         return ret;
3649 }
3650
3651 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3652                          struct page **pages, struct vm_area_struct **vmas,
3653                          unsigned long *position, unsigned long *nr_pages,
3654                          long i, unsigned int flags)
3655 {
3656         unsigned long pfn_offset;
3657         unsigned long vaddr = *position;
3658         unsigned long remainder = *nr_pages;
3659         struct hstate *h = hstate_vma(vma);
3660
3661         while (vaddr < vma->vm_end && remainder) {
3662                 pte_t *pte;
3663                 spinlock_t *ptl = NULL;
3664                 int absent;
3665                 struct page *page;
3666
3667                 /*
3668                  * If we have a pending SIGKILL, don't keep faulting pages and
3669                  * potentially allocating memory.
3670                  */
3671                 if (unlikely(fatal_signal_pending(current))) {
3672                         remainder = 0;
3673                         break;
3674                 }
3675
3676                 /*
3677                  * Some archs (sparc64, sh*) have multiple pte_ts to
3678                  * each hugepage.  We have to make sure we get the
3679                  * first, for the page indexing below to work.
3680                  *
3681                  * Note that page table lock is not held when pte is null.
3682                  */
3683                 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
3684                 if (pte)
3685                         ptl = huge_pte_lock(h, mm, pte);
3686                 absent = !pte || huge_pte_none(huge_ptep_get(pte));
3687
3688                 /*
3689                  * When coredumping, it suits get_dump_page if we just return
3690                  * an error where there's an empty slot with no huge pagecache
3691                  * to back it.  This way, we avoid allocating a hugepage, and
3692                  * the sparse dumpfile avoids allocating disk blocks, but its
3693                  * huge holes still show up with zeroes where they need to be.
3694                  */
3695                 if (absent && (flags & FOLL_DUMP) &&
3696                     !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3697                         if (pte)
3698                                 spin_unlock(ptl);
3699                         remainder = 0;
3700                         break;
3701                 }
3702
3703                 /*
3704                  * We need call hugetlb_fault for both hugepages under migration
3705                  * (in which case hugetlb_fault waits for the migration,) and
3706                  * hwpoisoned hugepages (in which case we need to prevent the
3707                  * caller from accessing to them.) In order to do this, we use
3708                  * here is_swap_pte instead of is_hugetlb_entry_migration and
3709                  * is_hugetlb_entry_hwpoisoned. This is because it simply covers
3710                  * both cases, and because we can't follow correct pages
3711                  * directly from any kind of swap entries.
3712                  */
3713                 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
3714                     ((flags & FOLL_WRITE) &&
3715                       !huge_pte_write(huge_ptep_get(pte)))) {
3716                         int ret;
3717
3718                         if (pte)
3719                                 spin_unlock(ptl);
3720                         ret = hugetlb_fault(mm, vma, vaddr,
3721                                 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
3722                         if (!(ret & VM_FAULT_ERROR))
3723                                 continue;
3724
3725                         remainder = 0;
3726                         break;
3727                 }
3728
3729                 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
3730                 page = pte_page(huge_ptep_get(pte));
3731 same_page:
3732                 if (pages) {
3733                         pages[i] = mem_map_offset(page, pfn_offset);
3734                         get_page_foll(pages[i]);
3735                 }
3736
3737                 if (vmas)
3738                         vmas[i] = vma;
3739
3740                 vaddr += PAGE_SIZE;
3741                 ++pfn_offset;
3742                 --remainder;
3743                 ++i;
3744                 if (vaddr < vma->vm_end && remainder &&
3745                                 pfn_offset < pages_per_huge_page(h)) {
3746                         /*
3747                          * We use pfn_offset to avoid touching the pageframes
3748                          * of this compound page.
3749                          */
3750                         goto same_page;
3751                 }
3752                 spin_unlock(ptl);
3753         }
3754         *nr_pages = remainder;
3755         *position = vaddr;
3756
3757         return i ? i : -EFAULT;
3758 }
3759
3760 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3761                 unsigned long address, unsigned long end, pgprot_t newprot)
3762 {
3763         struct mm_struct *mm = vma->vm_mm;
3764         unsigned long start = address;
3765         pte_t *ptep;
3766         pte_t pte;
3767         struct hstate *h = hstate_vma(vma);
3768         unsigned long pages = 0;
3769
3770         BUG_ON(address >= end);
3771         flush_cache_range(vma, address, end);
3772
3773         mmu_notifier_invalidate_range_start(mm, start, end);
3774         i_mmap_lock_write(vma->vm_file->f_mapping);
3775         for (; address < end; address += huge_page_size(h)) {
3776                 spinlock_t *ptl;
3777                 ptep = huge_pte_offset(mm, address);
3778                 if (!ptep)
3779                         continue;
3780                 ptl = huge_pte_lock(h, mm, ptep);
3781                 if (huge_pmd_unshare(mm, &address, ptep)) {
3782                         pages++;
3783                         spin_unlock(ptl);
3784                         continue;
3785                 }
3786                 pte = huge_ptep_get(ptep);
3787                 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
3788                         spin_unlock(ptl);
3789                         continue;
3790                 }
3791                 if (unlikely(is_hugetlb_entry_migration(pte))) {
3792                         swp_entry_t entry = pte_to_swp_entry(pte);
3793
3794                         if (is_write_migration_entry(entry)) {
3795                                 pte_t newpte;
3796
3797                                 make_migration_entry_read(&entry);
3798                                 newpte = swp_entry_to_pte(entry);
3799                                 set_huge_pte_at(mm, address, ptep, newpte);
3800                                 pages++;
3801                         }
3802                         spin_unlock(ptl);
3803                         continue;
3804                 }
3805                 if (!huge_pte_none(pte)) {
3806                         pte = huge_ptep_get_and_clear(mm, address, ptep);
3807                         pte = pte_mkhuge(huge_pte_modify(pte, newprot));
3808                         pte = arch_make_huge_pte(pte, vma, NULL, 0);
3809                         set_huge_pte_at(mm, address, ptep, pte);
3810                         pages++;
3811                 }
3812                 spin_unlock(ptl);
3813         }
3814         /*
3815          * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
3816          * may have cleared our pud entry and done put_page on the page table:
3817          * once we release i_mmap_rwsem, another task can do the final put_page
3818          * and that page table be reused and filled with junk.
3819          */
3820         flush_tlb_range(vma, start, end);
3821         mmu_notifier_invalidate_range(mm, start, end);
3822         i_mmap_unlock_write(vma->vm_file->f_mapping);
3823         mmu_notifier_invalidate_range_end(mm, start, end);
3824
3825         return pages << h->order;
3826 }
3827
3828 int hugetlb_reserve_pages(struct inode *inode,
3829                                         long from, long to,
3830                                         struct vm_area_struct *vma,
3831                                         vm_flags_t vm_flags)
3832 {
3833         long ret, chg;
3834         struct hstate *h = hstate_inode(inode);
3835         struct hugepage_subpool *spool = subpool_inode(inode);
3836         struct resv_map *resv_map;
3837         long gbl_reserve;
3838
3839         /*
3840          * Only apply hugepage reservation if asked. At fault time, an
3841          * attempt will be made for VM_NORESERVE to allocate a page
3842          * without using reserves
3843          */
3844         if (vm_flags & VM_NORESERVE)
3845                 return 0;
3846
3847         /*
3848          * Shared mappings base their reservation on the number of pages that
3849          * are already allocated on behalf of the file. Private mappings need
3850          * to reserve the full area even if read-only as mprotect() may be
3851          * called to make the mapping read-write. Assume !vma is a shm mapping
3852          */
3853         if (!vma || vma->vm_flags & VM_MAYSHARE) {
3854                 resv_map = inode_resv_map(inode);
3855
3856                 chg = region_chg(resv_map, from, to);
3857
3858         } else {
3859                 resv_map = resv_map_alloc();
3860                 if (!resv_map)
3861                         return -ENOMEM;
3862
3863                 chg = to - from;
3864
3865                 set_vma_resv_map(vma, resv_map);
3866                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
3867         }
3868
3869         if (chg < 0) {
3870                 ret = chg;
3871                 goto out_err;
3872         }
3873
3874         /*
3875          * There must be enough pages in the subpool for the mapping. If
3876          * the subpool has a minimum size, there may be some global
3877          * reservations already in place (gbl_reserve).
3878          */
3879         gbl_reserve = hugepage_subpool_get_pages(spool, chg);
3880         if (gbl_reserve < 0) {
3881                 ret = -ENOSPC;
3882                 goto out_err;
3883         }
3884
3885         /*
3886          * Check enough hugepages are available for the reservation.
3887          * Hand the pages back to the subpool if there are not
3888          */
3889         ret = hugetlb_acct_memory(h, gbl_reserve);
3890         if (ret < 0) {
3891                 /* put back original number of pages, chg */
3892                 (void)hugepage_subpool_put_pages(spool, chg);
3893                 goto out_err;
3894         }
3895
3896         /*
3897          * Account for the reservations made. Shared mappings record regions
3898          * that have reservations as they are shared by multiple VMAs.
3899          * When the last VMA disappears, the region map says how much
3900          * the reservation was and the page cache tells how much of
3901          * the reservation was consumed. Private mappings are per-VMA and
3902          * only the consumed reservations are tracked. When the VMA
3903          * disappears, the original reservation is the VMA size and the
3904          * consumed reservations are stored in the map. Hence, nothing
3905          * else has to be done for private mappings here
3906          */
3907         if (!vma || vma->vm_flags & VM_MAYSHARE) {
3908                 long add = region_add(resv_map, from, to);
3909
3910                 if (unlikely(chg > add)) {
3911                         /*
3912                          * pages in this range were added to the reserve
3913                          * map between region_chg and region_add.  This
3914                          * indicates a race with alloc_huge_page.  Adjust
3915                          * the subpool and reserve counts modified above
3916                          * based on the difference.
3917                          */
3918                         long rsv_adjust;
3919
3920                         rsv_adjust = hugepage_subpool_put_pages(spool,
3921                                                                 chg - add);
3922                         hugetlb_acct_memory(h, -rsv_adjust);
3923                 }
3924         }
3925         return 0;
3926 out_err:
3927         if (!vma || vma->vm_flags & VM_MAYSHARE)
3928                 region_abort(resv_map, from, to);
3929         if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3930                 kref_put(&resv_map->refs, resv_map_release);
3931         return ret;
3932 }
3933
3934 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
3935                                                                 long freed)
3936 {
3937         struct hstate *h = hstate_inode(inode);
3938         struct resv_map *resv_map = inode_resv_map(inode);
3939         long chg = 0;
3940         struct hugepage_subpool *spool = subpool_inode(inode);
3941         long gbl_reserve;
3942
3943         if (resv_map) {
3944                 chg = region_del(resv_map, start, end);
3945                 /*
3946                  * region_del() can fail in the rare case where a region
3947                  * must be split and another region descriptor can not be
3948                  * allocated.  If end == LONG_MAX, it will not fail.
3949                  */
3950                 if (chg < 0)
3951                         return chg;
3952         }
3953
3954         spin_lock(&inode->i_lock);
3955         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
3956         spin_unlock(&inode->i_lock);
3957
3958         /*
3959          * If the subpool has a minimum size, the number of global
3960          * reservations to be released may be adjusted.
3961          */
3962         gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
3963         hugetlb_acct_memory(h, -gbl_reserve);
3964
3965         return 0;
3966 }
3967
3968 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
3969 static unsigned long page_table_shareable(struct vm_area_struct *svma,
3970                                 struct vm_area_struct *vma,
3971                                 unsigned long addr, pgoff_t idx)
3972 {
3973         unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
3974                                 svma->vm_start;
3975         unsigned long sbase = saddr & PUD_MASK;
3976         unsigned long s_end = sbase + PUD_SIZE;
3977
3978         /* Allow segments to share if only one is marked locked */
3979         unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
3980         unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
3981
3982         /*
3983          * match the virtual addresses, permission and the alignment of the
3984          * page table page.
3985          */
3986         if (pmd_index(addr) != pmd_index(saddr) ||
3987             vm_flags != svm_flags ||
3988             sbase < svma->vm_start || svma->vm_end < s_end)
3989                 return 0;
3990
3991         return saddr;
3992 }
3993
3994 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
3995 {
3996         unsigned long base = addr & PUD_MASK;
3997         unsigned long end = base + PUD_SIZE;
3998
3999         /*
4000          * check on proper vm_flags and page table alignment
4001          */
4002         if (vma->vm_flags & VM_MAYSHARE &&
4003             vma->vm_start <= base && end <= vma->vm_end)
4004                 return true;
4005         return false;
4006 }
4007
4008 /*
4009  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
4010  * and returns the corresponding pte. While this is not necessary for the
4011  * !shared pmd case because we can allocate the pmd later as well, it makes the
4012  * code much cleaner. pmd allocation is essential for the shared case because
4013  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
4014  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
4015  * bad pmd for sharing.
4016  */
4017 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4018 {
4019         struct vm_area_struct *vma = find_vma(mm, addr);
4020         struct address_space *mapping = vma->vm_file->f_mapping;
4021         pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4022                         vma->vm_pgoff;
4023         struct vm_area_struct *svma;
4024         unsigned long saddr;
4025         pte_t *spte = NULL;
4026         pte_t *pte;
4027         spinlock_t *ptl;
4028
4029         if (!vma_shareable(vma, addr))
4030                 return (pte_t *)pmd_alloc(mm, pud, addr);
4031
4032         i_mmap_lock_write(mapping);
4033         vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4034                 if (svma == vma)
4035                         continue;
4036
4037                 saddr = page_table_shareable(svma, vma, addr, idx);
4038                 if (saddr) {
4039                         spte = huge_pte_offset(svma->vm_mm, saddr);
4040                         if (spte) {
4041                                 mm_inc_nr_pmds(mm);
4042                                 get_page(virt_to_page(spte));
4043                                 break;
4044                         }
4045                 }
4046         }
4047
4048         if (!spte)
4049                 goto out;
4050
4051         ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
4052         spin_lock(ptl);
4053         if (pud_none(*pud)) {
4054                 pud_populate(mm, pud,
4055                                 (pmd_t *)((unsigned long)spte & PAGE_MASK));
4056         } else {
4057                 put_page(virt_to_page(spte));
4058                 mm_inc_nr_pmds(mm);
4059         }
4060         spin_unlock(ptl);
4061 out:
4062         pte = (pte_t *)pmd_alloc(mm, pud, addr);
4063         i_mmap_unlock_write(mapping);
4064         return pte;
4065 }
4066
4067 /*
4068  * unmap huge page backed by shared pte.
4069  *
4070  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
4071  * indicated by page_count > 1, unmap is achieved by clearing pud and
4072  * decrementing the ref count. If count == 1, the pte page is not shared.
4073  *
4074  * called with page table lock held.
4075  *
4076  * returns: 1 successfully unmapped a shared pte page
4077  *          0 the underlying pte page is not shared, or it is the last user
4078  */
4079 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4080 {
4081         pgd_t *pgd = pgd_offset(mm, *addr);
4082         pud_t *pud = pud_offset(pgd, *addr);
4083
4084         BUG_ON(page_count(virt_to_page(ptep)) == 0);
4085         if (page_count(virt_to_page(ptep)) == 1)
4086                 return 0;
4087
4088         pud_clear(pud);
4089         put_page(virt_to_page(ptep));
4090         mm_dec_nr_pmds(mm);
4091         *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4092         return 1;
4093 }
4094 #define want_pmd_share()        (1)
4095 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4096 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4097 {
4098         return NULL;
4099 }
4100
4101 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4102 {
4103         return 0;
4104 }
4105 #define want_pmd_share()        (0)
4106 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4107
4108 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4109 pte_t *huge_pte_alloc(struct mm_struct *mm,
4110                         unsigned long addr, unsigned long sz)
4111 {
4112         pgd_t *pgd;
4113         pud_t *pud;
4114         pte_t *pte = NULL;
4115
4116         pgd = pgd_offset(mm, addr);
4117         pud = pud_alloc(mm, pgd, addr);
4118         if (pud) {
4119                 if (sz == PUD_SIZE) {
4120                         pte = (pte_t *)pud;
4121                 } else {
4122                         BUG_ON(sz != PMD_SIZE);
4123                         if (want_pmd_share() && pud_none(*pud))
4124                                 pte = huge_pmd_share(mm, addr, pud);
4125                         else
4126                                 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4127                 }
4128         }
4129         BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
4130
4131         return pte;
4132 }
4133
4134 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
4135 {
4136         pgd_t *pgd;
4137         pud_t *pud;
4138         pmd_t *pmd = NULL;
4139
4140         pgd = pgd_offset(mm, addr);
4141         if (pgd_present(*pgd)) {
4142                 pud = pud_offset(pgd, addr);
4143                 if (pud_present(*pud)) {
4144                         if (pud_huge(*pud))
4145                                 return (pte_t *)pud;
4146                         pmd = pmd_offset(pud, addr);
4147                 }
4148         }
4149         return (pte_t *) pmd;
4150 }
4151
4152 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
4153
4154 /*
4155  * These functions are overwritable if your architecture needs its own
4156  * behavior.
4157  */
4158 struct page * __weak
4159 follow_huge_addr(struct mm_struct *mm, unsigned long address,
4160                               int write)
4161 {
4162         return ERR_PTR(-EINVAL);
4163 }
4164
4165 struct page * __weak
4166 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4167                 pmd_t *pmd, int flags)
4168 {
4169         struct page *page = NULL;
4170         spinlock_t *ptl;
4171 retry:
4172         ptl = pmd_lockptr(mm, pmd);
4173         spin_lock(ptl);
4174         /*
4175          * make sure that the address range covered by this pmd is not
4176          * unmapped from other threads.
4177          */
4178         if (!pmd_huge(*pmd))
4179                 goto out;
4180         if (pmd_present(*pmd)) {
4181                 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4182                 if (flags & FOLL_GET)
4183                         get_page(page);
4184         } else {
4185                 if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
4186                         spin_unlock(ptl);
4187                         __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4188                         goto retry;
4189                 }
4190                 /*
4191                  * hwpoisoned entry is treated as no_page_table in
4192                  * follow_page_mask().
4193                  */
4194         }
4195 out:
4196         spin_unlock(ptl);
4197         return page;
4198 }
4199
4200 struct page * __weak
4201 follow_huge_pud(struct mm_struct *mm, unsigned long address,
4202                 pud_t *pud, int flags)
4203 {
4204         if (flags & FOLL_GET)
4205                 return NULL;
4206
4207         return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
4208 }
4209
4210 #ifdef CONFIG_MEMORY_FAILURE
4211
4212 /*
4213  * This function is called from memory failure code.
4214  * Assume the caller holds page lock of the head page.
4215  */
4216 int dequeue_hwpoisoned_huge_page(struct page *hpage)
4217 {
4218         struct hstate *h = page_hstate(hpage);
4219         int nid = page_to_nid(hpage);
4220         int ret = -EBUSY;
4221
4222         spin_lock(&hugetlb_lock);
4223         /*
4224          * Just checking !page_huge_active is not enough, because that could be
4225          * an isolated/hwpoisoned hugepage (which have >0 refcount).
4226          */
4227         if (!page_huge_active(hpage) && !page_count(hpage)) {
4228                 /*
4229                  * Hwpoisoned hugepage isn't linked to activelist or freelist,
4230                  * but dangling hpage->lru can trigger list-debug warnings
4231                  * (this happens when we call unpoison_memory() on it),
4232                  * so let it point to itself with list_del_init().
4233                  */
4234                 list_del_init(&hpage->lru);
4235                 set_page_refcounted(hpage);
4236                 h->free_huge_pages--;
4237                 h->free_huge_pages_node[nid]--;
4238                 ret = 0;
4239         }
4240         spin_unlock(&hugetlb_lock);
4241         return ret;
4242 }
4243 #endif
4244
4245 bool isolate_huge_page(struct page *page, struct list_head *list)
4246 {
4247         bool ret = true;
4248
4249         VM_BUG_ON_PAGE(!PageHead(page), page);
4250         spin_lock(&hugetlb_lock);
4251         if (!page_huge_active(page) || !get_page_unless_zero(page)) {
4252                 ret = false;
4253                 goto unlock;
4254         }
4255         clear_page_huge_active(page);
4256         list_move_tail(&page->lru, list);
4257 unlock:
4258         spin_unlock(&hugetlb_lock);
4259         return ret;
4260 }
4261
4262 void putback_active_hugepage(struct page *page)
4263 {
4264         VM_BUG_ON_PAGE(!PageHead(page), page);
4265         spin_lock(&hugetlb_lock);
4266         set_page_huge_active(page);
4267         list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
4268         spin_unlock(&hugetlb_lock);
4269         put_page(page);
4270 }