]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - mm/huge_memory.c
Merge branch 'akpm-current/current'
[karo-tx-linux.git] / mm / huge_memory.c
1 /*
2  *  Copyright (C) 2009  Red Hat, Inc.
3  *
4  *  This work is licensed under the terms of the GNU GPL, version 2. See
5  *  the COPYING file in the top-level directory.
6  */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/mm.h>
11 #include <linux/sched.h>
12 #include <linux/highmem.h>
13 #include <linux/hugetlb.h>
14 #include <linux/mmu_notifier.h>
15 #include <linux/rmap.h>
16 #include <linux/swap.h>
17 #include <linux/shrinker.h>
18 #include <linux/mm_inline.h>
19 #include <linux/dax.h>
20 #include <linux/kthread.h>
21 #include <linux/khugepaged.h>
22 #include <linux/freezer.h>
23 #include <linux/mman.h>
24 #include <linux/pagemap.h>
25 #include <linux/migrate.h>
26 #include <linux/hashtable.h>
27 #include <linux/userfaultfd_k.h>
28 #include <linux/page_idle.h>
29 #include <linux/swapops.h>
30
31 #include <asm/tlb.h>
32 #include <asm/pgalloc.h>
33 #include "internal.h"
34
35 enum scan_result {
36         SCAN_FAIL,
37         SCAN_SUCCEED,
38         SCAN_PMD_NULL,
39         SCAN_EXCEED_NONE_PTE,
40         SCAN_PTE_NON_PRESENT,
41         SCAN_PAGE_RO,
42         SCAN_NO_REFERENCED_PAGE,
43         SCAN_PAGE_NULL,
44         SCAN_SCAN_ABORT,
45         SCAN_PAGE_COUNT,
46         SCAN_PAGE_LRU,
47         SCAN_PAGE_LOCK,
48         SCAN_PAGE_ANON,
49         SCAN_PAGE_COMPOUND,
50         SCAN_ANY_PROCESS,
51         SCAN_VMA_NULL,
52         SCAN_VMA_CHECK,
53         SCAN_ADDRESS_RANGE,
54         SCAN_SWAP_CACHE_PAGE,
55         SCAN_DEL_PAGE_LRU,
56         SCAN_ALLOC_HUGE_PAGE_FAIL,
57         SCAN_CGROUP_CHARGE_FAIL,
58         SCAN_EXCEED_SWAP_PTE
59 };
60
61 #define CREATE_TRACE_POINTS
62 #include <trace/events/huge_memory.h>
63
64 /*
65  * By default transparent hugepage support is disabled in order that avoid
66  * to risk increase the memory footprint of applications without a guaranteed
67  * benefit. When transparent hugepage support is enabled, is for all mappings,
68  * and khugepaged scans all mappings.
69  * Defrag is invoked by khugepaged hugepage allocations and by page faults
70  * for all hugepage allocations.
71  */
72 unsigned long transparent_hugepage_flags __read_mostly =
73 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
74         (1<<TRANSPARENT_HUGEPAGE_FLAG)|
75 #endif
76 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
77         (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
78 #endif
79         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
80         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
81         (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
82
83 /* default scan 8*512 pte (or vmas) every 30 second */
84 static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
85 static unsigned int khugepaged_pages_collapsed;
86 static unsigned int khugepaged_full_scans;
87 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
88 /* during fragmentation poll the hugepage allocator once every minute */
89 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
90 static struct task_struct *khugepaged_thread __read_mostly;
91 static DEFINE_MUTEX(khugepaged_mutex);
92 static DEFINE_SPINLOCK(khugepaged_mm_lock);
93 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
94 /*
95  * default collapse hugepages if there is at least one pte mapped like
96  * it would have happened if the vma was large enough during page
97  * fault.
98  */
99 static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
100 static unsigned int khugepaged_max_ptes_swap __read_mostly = HPAGE_PMD_NR/8;
101
102 static int khugepaged(void *none);
103 static int khugepaged_slab_init(void);
104 static void khugepaged_slab_exit(void);
105
106 #define MM_SLOTS_HASH_BITS 10
107 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
108
109 static struct kmem_cache *mm_slot_cache __read_mostly;
110
111 /**
112  * struct mm_slot - hash lookup from mm to mm_slot
113  * @hash: hash collision list
114  * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
115  * @mm: the mm that this information is valid for
116  */
117 struct mm_slot {
118         struct hlist_node hash;
119         struct list_head mm_node;
120         struct mm_struct *mm;
121 };
122
123 /**
124  * struct khugepaged_scan - cursor for scanning
125  * @mm_head: the head of the mm list to scan
126  * @mm_slot: the current mm_slot we are scanning
127  * @address: the next address inside that to be scanned
128  *
129  * There is only the one khugepaged_scan instance of this cursor structure.
130  */
131 struct khugepaged_scan {
132         struct list_head mm_head;
133         struct mm_slot *mm_slot;
134         unsigned long address;
135 };
136 static struct khugepaged_scan khugepaged_scan = {
137         .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
138 };
139
140 static DEFINE_SPINLOCK(split_queue_lock);
141 static LIST_HEAD(split_queue);
142 static unsigned long split_queue_len;
143 static struct shrinker deferred_split_shrinker;
144
145 static void set_recommended_min_free_kbytes(void)
146 {
147         struct zone *zone;
148         int nr_zones = 0;
149         unsigned long recommended_min;
150
151         for_each_populated_zone(zone)
152                 nr_zones++;
153
154         /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
155         recommended_min = pageblock_nr_pages * nr_zones * 2;
156
157         /*
158          * Make sure that on average at least two pageblocks are almost free
159          * of another type, one for a migratetype to fall back to and a
160          * second to avoid subsequent fallbacks of other types There are 3
161          * MIGRATE_TYPES we care about.
162          */
163         recommended_min += pageblock_nr_pages * nr_zones *
164                            MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
165
166         /* don't ever allow to reserve more than 5% of the lowmem */
167         recommended_min = min(recommended_min,
168                               (unsigned long) nr_free_buffer_pages() / 20);
169         recommended_min <<= (PAGE_SHIFT-10);
170
171         if (recommended_min > min_free_kbytes) {
172                 if (user_min_free_kbytes >= 0)
173                         pr_info("raising min_free_kbytes from %d to %lu "
174                                 "to help transparent hugepage allocations\n",
175                                 min_free_kbytes, recommended_min);
176
177                 min_free_kbytes = recommended_min;
178         }
179         setup_per_zone_wmarks();
180 }
181
182 static int start_stop_khugepaged(void)
183 {
184         int err = 0;
185         if (khugepaged_enabled()) {
186                 if (!khugepaged_thread)
187                         khugepaged_thread = kthread_run(khugepaged, NULL,
188                                                         "khugepaged");
189                 if (IS_ERR(khugepaged_thread)) {
190                         pr_err("khugepaged: kthread_run(khugepaged) failed\n");
191                         err = PTR_ERR(khugepaged_thread);
192                         khugepaged_thread = NULL;
193                         goto fail;
194                 }
195
196                 if (!list_empty(&khugepaged_scan.mm_head))
197                         wake_up_interruptible(&khugepaged_wait);
198
199                 set_recommended_min_free_kbytes();
200         } else if (khugepaged_thread) {
201                 kthread_stop(khugepaged_thread);
202                 khugepaged_thread = NULL;
203         }
204 fail:
205         return err;
206 }
207
208 static atomic_t huge_zero_refcount;
209 struct page *huge_zero_page __read_mostly;
210
211 struct page *get_huge_zero_page(void)
212 {
213         struct page *zero_page;
214 retry:
215         if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
216                 return READ_ONCE(huge_zero_page);
217
218         zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
219                         HPAGE_PMD_ORDER);
220         if (!zero_page) {
221                 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
222                 return NULL;
223         }
224         count_vm_event(THP_ZERO_PAGE_ALLOC);
225         preempt_disable();
226         if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
227                 preempt_enable();
228                 __free_pages(zero_page, compound_order(zero_page));
229                 goto retry;
230         }
231
232         /* We take additional reference here. It will be put back by shrinker */
233         atomic_set(&huge_zero_refcount, 2);
234         preempt_enable();
235         return READ_ONCE(huge_zero_page);
236 }
237
238 static void put_huge_zero_page(void)
239 {
240         /*
241          * Counter should never go to zero here. Only shrinker can put
242          * last reference.
243          */
244         BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
245 }
246
247 static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
248                                         struct shrink_control *sc)
249 {
250         /* we can free zero page only if last reference remains */
251         return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
252 }
253
254 static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
255                                        struct shrink_control *sc)
256 {
257         if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
258                 struct page *zero_page = xchg(&huge_zero_page, NULL);
259                 BUG_ON(zero_page == NULL);
260                 __free_pages(zero_page, compound_order(zero_page));
261                 return HPAGE_PMD_NR;
262         }
263
264         return 0;
265 }
266
267 static struct shrinker huge_zero_page_shrinker = {
268         .count_objects = shrink_huge_zero_page_count,
269         .scan_objects = shrink_huge_zero_page_scan,
270         .seeks = DEFAULT_SEEKS,
271 };
272
273 #ifdef CONFIG_SYSFS
274
275 static ssize_t double_flag_show(struct kobject *kobj,
276                                 struct kobj_attribute *attr, char *buf,
277                                 enum transparent_hugepage_flag enabled,
278                                 enum transparent_hugepage_flag req_madv)
279 {
280         if (test_bit(enabled, &transparent_hugepage_flags)) {
281                 VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
282                 return sprintf(buf, "[always] madvise never\n");
283         } else if (test_bit(req_madv, &transparent_hugepage_flags))
284                 return sprintf(buf, "always [madvise] never\n");
285         else
286                 return sprintf(buf, "always madvise [never]\n");
287 }
288 static ssize_t double_flag_store(struct kobject *kobj,
289                                  struct kobj_attribute *attr,
290                                  const char *buf, size_t count,
291                                  enum transparent_hugepage_flag enabled,
292                                  enum transparent_hugepage_flag req_madv)
293 {
294         if (!memcmp("always", buf,
295                     min(sizeof("always")-1, count))) {
296                 set_bit(enabled, &transparent_hugepage_flags);
297                 clear_bit(req_madv, &transparent_hugepage_flags);
298         } else if (!memcmp("madvise", buf,
299                            min(sizeof("madvise")-1, count))) {
300                 clear_bit(enabled, &transparent_hugepage_flags);
301                 set_bit(req_madv, &transparent_hugepage_flags);
302         } else if (!memcmp("never", buf,
303                            min(sizeof("never")-1, count))) {
304                 clear_bit(enabled, &transparent_hugepage_flags);
305                 clear_bit(req_madv, &transparent_hugepage_flags);
306         } else
307                 return -EINVAL;
308
309         return count;
310 }
311
312 static ssize_t enabled_show(struct kobject *kobj,
313                             struct kobj_attribute *attr, char *buf)
314 {
315         return double_flag_show(kobj, attr, buf,
316                                 TRANSPARENT_HUGEPAGE_FLAG,
317                                 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
318 }
319 static ssize_t enabled_store(struct kobject *kobj,
320                              struct kobj_attribute *attr,
321                              const char *buf, size_t count)
322 {
323         ssize_t ret;
324
325         ret = double_flag_store(kobj, attr, buf, count,
326                                 TRANSPARENT_HUGEPAGE_FLAG,
327                                 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
328
329         if (ret > 0) {
330                 int err;
331
332                 mutex_lock(&khugepaged_mutex);
333                 err = start_stop_khugepaged();
334                 mutex_unlock(&khugepaged_mutex);
335
336                 if (err)
337                         ret = err;
338         }
339
340         return ret;
341 }
342 static struct kobj_attribute enabled_attr =
343         __ATTR(enabled, 0644, enabled_show, enabled_store);
344
345 static ssize_t single_flag_show(struct kobject *kobj,
346                                 struct kobj_attribute *attr, char *buf,
347                                 enum transparent_hugepage_flag flag)
348 {
349         return sprintf(buf, "%d\n",
350                        !!test_bit(flag, &transparent_hugepage_flags));
351 }
352
353 static ssize_t single_flag_store(struct kobject *kobj,
354                                  struct kobj_attribute *attr,
355                                  const char *buf, size_t count,
356                                  enum transparent_hugepage_flag flag)
357 {
358         unsigned long value;
359         int ret;
360
361         ret = kstrtoul(buf, 10, &value);
362         if (ret < 0)
363                 return ret;
364         if (value > 1)
365                 return -EINVAL;
366
367         if (value)
368                 set_bit(flag, &transparent_hugepage_flags);
369         else
370                 clear_bit(flag, &transparent_hugepage_flags);
371
372         return count;
373 }
374
375 /*
376  * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
377  * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
378  * memory just to allocate one more hugepage.
379  */
380 static ssize_t defrag_show(struct kobject *kobj,
381                            struct kobj_attribute *attr, char *buf)
382 {
383         return double_flag_show(kobj, attr, buf,
384                                 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
385                                 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
386 }
387 static ssize_t defrag_store(struct kobject *kobj,
388                             struct kobj_attribute *attr,
389                             const char *buf, size_t count)
390 {
391         return double_flag_store(kobj, attr, buf, count,
392                                  TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
393                                  TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
394 }
395 static struct kobj_attribute defrag_attr =
396         __ATTR(defrag, 0644, defrag_show, defrag_store);
397
398 static ssize_t use_zero_page_show(struct kobject *kobj,
399                 struct kobj_attribute *attr, char *buf)
400 {
401         return single_flag_show(kobj, attr, buf,
402                                 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
403 }
404 static ssize_t use_zero_page_store(struct kobject *kobj,
405                 struct kobj_attribute *attr, const char *buf, size_t count)
406 {
407         return single_flag_store(kobj, attr, buf, count,
408                                  TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
409 }
410 static struct kobj_attribute use_zero_page_attr =
411         __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
412 #ifdef CONFIG_DEBUG_VM
413 static ssize_t debug_cow_show(struct kobject *kobj,
414                                 struct kobj_attribute *attr, char *buf)
415 {
416         return single_flag_show(kobj, attr, buf,
417                                 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
418 }
419 static ssize_t debug_cow_store(struct kobject *kobj,
420                                struct kobj_attribute *attr,
421                                const char *buf, size_t count)
422 {
423         return single_flag_store(kobj, attr, buf, count,
424                                  TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
425 }
426 static struct kobj_attribute debug_cow_attr =
427         __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
428 #endif /* CONFIG_DEBUG_VM */
429
430 static struct attribute *hugepage_attr[] = {
431         &enabled_attr.attr,
432         &defrag_attr.attr,
433         &use_zero_page_attr.attr,
434 #ifdef CONFIG_DEBUG_VM
435         &debug_cow_attr.attr,
436 #endif
437         NULL,
438 };
439
440 static struct attribute_group hugepage_attr_group = {
441         .attrs = hugepage_attr,
442 };
443
444 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
445                                          struct kobj_attribute *attr,
446                                          char *buf)
447 {
448         return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
449 }
450
451 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
452                                           struct kobj_attribute *attr,
453                                           const char *buf, size_t count)
454 {
455         unsigned long msecs;
456         int err;
457
458         err = kstrtoul(buf, 10, &msecs);
459         if (err || msecs > UINT_MAX)
460                 return -EINVAL;
461
462         khugepaged_scan_sleep_millisecs = msecs;
463         wake_up_interruptible(&khugepaged_wait);
464
465         return count;
466 }
467 static struct kobj_attribute scan_sleep_millisecs_attr =
468         __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
469                scan_sleep_millisecs_store);
470
471 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
472                                           struct kobj_attribute *attr,
473                                           char *buf)
474 {
475         return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
476 }
477
478 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
479                                            struct kobj_attribute *attr,
480                                            const char *buf, size_t count)
481 {
482         unsigned long msecs;
483         int err;
484
485         err = kstrtoul(buf, 10, &msecs);
486         if (err || msecs > UINT_MAX)
487                 return -EINVAL;
488
489         khugepaged_alloc_sleep_millisecs = msecs;
490         wake_up_interruptible(&khugepaged_wait);
491
492         return count;
493 }
494 static struct kobj_attribute alloc_sleep_millisecs_attr =
495         __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
496                alloc_sleep_millisecs_store);
497
498 static ssize_t pages_to_scan_show(struct kobject *kobj,
499                                   struct kobj_attribute *attr,
500                                   char *buf)
501 {
502         return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
503 }
504 static ssize_t pages_to_scan_store(struct kobject *kobj,
505                                    struct kobj_attribute *attr,
506                                    const char *buf, size_t count)
507 {
508         int err;
509         unsigned long pages;
510
511         err = kstrtoul(buf, 10, &pages);
512         if (err || !pages || pages > UINT_MAX)
513                 return -EINVAL;
514
515         khugepaged_pages_to_scan = pages;
516
517         return count;
518 }
519 static struct kobj_attribute pages_to_scan_attr =
520         __ATTR(pages_to_scan, 0644, pages_to_scan_show,
521                pages_to_scan_store);
522
523 static ssize_t pages_collapsed_show(struct kobject *kobj,
524                                     struct kobj_attribute *attr,
525                                     char *buf)
526 {
527         return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
528 }
529 static struct kobj_attribute pages_collapsed_attr =
530         __ATTR_RO(pages_collapsed);
531
532 static ssize_t full_scans_show(struct kobject *kobj,
533                                struct kobj_attribute *attr,
534                                char *buf)
535 {
536         return sprintf(buf, "%u\n", khugepaged_full_scans);
537 }
538 static struct kobj_attribute full_scans_attr =
539         __ATTR_RO(full_scans);
540
541 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
542                                       struct kobj_attribute *attr, char *buf)
543 {
544         return single_flag_show(kobj, attr, buf,
545                                 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
546 }
547 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
548                                        struct kobj_attribute *attr,
549                                        const char *buf, size_t count)
550 {
551         return single_flag_store(kobj, attr, buf, count,
552                                  TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
553 }
554 static struct kobj_attribute khugepaged_defrag_attr =
555         __ATTR(defrag, 0644, khugepaged_defrag_show,
556                khugepaged_defrag_store);
557
558 /*
559  * max_ptes_none controls if khugepaged should collapse hugepages over
560  * any unmapped ptes in turn potentially increasing the memory
561  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
562  * reduce the available free memory in the system as it
563  * runs. Increasing max_ptes_none will instead potentially reduce the
564  * free memory in the system during the khugepaged scan.
565  */
566 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
567                                              struct kobj_attribute *attr,
568                                              char *buf)
569 {
570         return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
571 }
572 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
573                                               struct kobj_attribute *attr,
574                                               const char *buf, size_t count)
575 {
576         int err;
577         unsigned long max_ptes_none;
578
579         err = kstrtoul(buf, 10, &max_ptes_none);
580         if (err || max_ptes_none > HPAGE_PMD_NR-1)
581                 return -EINVAL;
582
583         khugepaged_max_ptes_none = max_ptes_none;
584
585         return count;
586 }
587 static struct kobj_attribute khugepaged_max_ptes_none_attr =
588         __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
589                khugepaged_max_ptes_none_store);
590
591 static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
592                                              struct kobj_attribute *attr,
593                                              char *buf)
594 {
595         return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
596 }
597
598 static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
599                                               struct kobj_attribute *attr,
600                                               const char *buf, size_t count)
601 {
602         int err;
603         unsigned long max_ptes_swap;
604
605         err  = kstrtoul(buf, 10, &max_ptes_swap);
606         if (err || max_ptes_swap > HPAGE_PMD_NR-1)
607                 return -EINVAL;
608
609         khugepaged_max_ptes_swap = max_ptes_swap;
610
611         return count;
612 }
613
614 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
615         __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
616                khugepaged_max_ptes_swap_store);
617
618 static struct attribute *khugepaged_attr[] = {
619         &khugepaged_defrag_attr.attr,
620         &khugepaged_max_ptes_none_attr.attr,
621         &pages_to_scan_attr.attr,
622         &pages_collapsed_attr.attr,
623         &full_scans_attr.attr,
624         &scan_sleep_millisecs_attr.attr,
625         &alloc_sleep_millisecs_attr.attr,
626         &khugepaged_max_ptes_swap_attr.attr,
627         NULL,
628 };
629
630 static struct attribute_group khugepaged_attr_group = {
631         .attrs = khugepaged_attr,
632         .name = "khugepaged",
633 };
634
635 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
636 {
637         int err;
638
639         *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
640         if (unlikely(!*hugepage_kobj)) {
641                 pr_err("failed to create transparent hugepage kobject\n");
642                 return -ENOMEM;
643         }
644
645         err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
646         if (err) {
647                 pr_err("failed to register transparent hugepage group\n");
648                 goto delete_obj;
649         }
650
651         err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
652         if (err) {
653                 pr_err("failed to register transparent hugepage group\n");
654                 goto remove_hp_group;
655         }
656
657         return 0;
658
659 remove_hp_group:
660         sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
661 delete_obj:
662         kobject_put(*hugepage_kobj);
663         return err;
664 }
665
666 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
667 {
668         sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
669         sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
670         kobject_put(hugepage_kobj);
671 }
672 #else
673 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
674 {
675         return 0;
676 }
677
678 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
679 {
680 }
681 #endif /* CONFIG_SYSFS */
682
683 static int __init hugepage_init(void)
684 {
685         int err;
686         struct kobject *hugepage_kobj;
687
688         if (!has_transparent_hugepage()) {
689                 transparent_hugepage_flags = 0;
690                 return -EINVAL;
691         }
692
693         err = hugepage_init_sysfs(&hugepage_kobj);
694         if (err)
695                 goto err_sysfs;
696
697         err = khugepaged_slab_init();
698         if (err)
699                 goto err_slab;
700
701         err = register_shrinker(&huge_zero_page_shrinker);
702         if (err)
703                 goto err_hzp_shrinker;
704         err = register_shrinker(&deferred_split_shrinker);
705         if (err)
706                 goto err_split_shrinker;
707
708         /*
709          * By default disable transparent hugepages on smaller systems,
710          * where the extra memory used could hurt more than TLB overhead
711          * is likely to save.  The admin can still enable it through /sys.
712          */
713         if (totalram_pages < (512 << (20 - PAGE_SHIFT))) {
714                 transparent_hugepage_flags = 0;
715                 return 0;
716         }
717
718         err = start_stop_khugepaged();
719         if (err)
720                 goto err_khugepaged;
721
722         return 0;
723 err_khugepaged:
724         unregister_shrinker(&deferred_split_shrinker);
725 err_split_shrinker:
726         unregister_shrinker(&huge_zero_page_shrinker);
727 err_hzp_shrinker:
728         khugepaged_slab_exit();
729 err_slab:
730         hugepage_exit_sysfs(hugepage_kobj);
731 err_sysfs:
732         return err;
733 }
734 subsys_initcall(hugepage_init);
735
736 static int __init setup_transparent_hugepage(char *str)
737 {
738         int ret = 0;
739         if (!str)
740                 goto out;
741         if (!strcmp(str, "always")) {
742                 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
743                         &transparent_hugepage_flags);
744                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
745                           &transparent_hugepage_flags);
746                 ret = 1;
747         } else if (!strcmp(str, "madvise")) {
748                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
749                           &transparent_hugepage_flags);
750                 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
751                         &transparent_hugepage_flags);
752                 ret = 1;
753         } else if (!strcmp(str, "never")) {
754                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
755                           &transparent_hugepage_flags);
756                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
757                           &transparent_hugepage_flags);
758                 ret = 1;
759         }
760 out:
761         if (!ret)
762                 pr_warn("transparent_hugepage= cannot parse, ignored\n");
763         return ret;
764 }
765 __setup("transparent_hugepage=", setup_transparent_hugepage);
766
767 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
768 {
769         if (likely(vma->vm_flags & VM_WRITE))
770                 pmd = pmd_mkwrite(pmd);
771         return pmd;
772 }
773
774 static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot)
775 {
776         pmd_t entry;
777         entry = mk_pmd(page, prot);
778         entry = pmd_mkhuge(entry);
779         return entry;
780 }
781
782 static inline struct list_head *page_deferred_list(struct page *page)
783 {
784         /*
785          * ->lru in the tail pages is occupied by compound_head.
786          * Let's use ->mapping + ->index in the second tail page as list_head.
787          */
788         return (struct list_head *)&page[2].mapping;
789 }
790
791 void prep_transhuge_page(struct page *page)
792 {
793         /*
794          * we use page->mapping and page->indexlru in second tail page
795          * as list_head: assuming THP order >= 2
796          */
797         BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);
798
799         INIT_LIST_HEAD(page_deferred_list(page));
800         set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
801 }
802
803 static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
804                                         struct vm_area_struct *vma,
805                                         unsigned long address, pmd_t *pmd,
806                                         struct page *page, gfp_t gfp,
807                                         unsigned int flags)
808 {
809         struct mem_cgroup *memcg;
810         pgtable_t pgtable;
811         spinlock_t *ptl;
812         unsigned long haddr = address & HPAGE_PMD_MASK;
813
814         VM_BUG_ON_PAGE(!PageCompound(page), page);
815
816         if (mem_cgroup_try_charge(page, mm, gfp, &memcg, true)) {
817                 put_page(page);
818                 count_vm_event(THP_FAULT_FALLBACK);
819                 return VM_FAULT_FALLBACK;
820         }
821
822         pgtable = pte_alloc_one(mm, haddr);
823         if (unlikely(!pgtable)) {
824                 mem_cgroup_cancel_charge(page, memcg, true);
825                 put_page(page);
826                 return VM_FAULT_OOM;
827         }
828
829         clear_huge_page(page, haddr, HPAGE_PMD_NR);
830         /*
831          * The memory barrier inside __SetPageUptodate makes sure that
832          * clear_huge_page writes become visible before the set_pmd_at()
833          * write.
834          */
835         __SetPageUptodate(page);
836
837         ptl = pmd_lock(mm, pmd);
838         if (unlikely(!pmd_none(*pmd))) {
839                 spin_unlock(ptl);
840                 mem_cgroup_cancel_charge(page, memcg, true);
841                 put_page(page);
842                 pte_free(mm, pgtable);
843         } else {
844                 pmd_t entry;
845
846                 /* Deliver the page fault to userland */
847                 if (userfaultfd_missing(vma)) {
848                         int ret;
849
850                         spin_unlock(ptl);
851                         mem_cgroup_cancel_charge(page, memcg, true);
852                         put_page(page);
853                         pte_free(mm, pgtable);
854                         ret = handle_userfault(vma, address, flags,
855                                                VM_UFFD_MISSING);
856                         VM_BUG_ON(ret & VM_FAULT_FALLBACK);
857                         return ret;
858                 }
859
860                 entry = mk_huge_pmd(page, vma->vm_page_prot);
861                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
862                 page_add_new_anon_rmap(page, vma, haddr, true);
863                 mem_cgroup_commit_charge(page, memcg, false, true);
864                 lru_cache_add_active_or_unevictable(page, vma);
865                 pgtable_trans_huge_deposit(mm, pmd, pgtable);
866                 set_pmd_at(mm, haddr, pmd, entry);
867                 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
868                 atomic_long_inc(&mm->nr_ptes);
869                 spin_unlock(ptl);
870                 count_vm_event(THP_FAULT_ALLOC);
871         }
872
873         return 0;
874 }
875
876 static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
877 {
878         return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_RECLAIM)) | extra_gfp;
879 }
880
881 /* Caller must hold page table lock. */
882 static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
883                 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
884                 struct page *zero_page)
885 {
886         pmd_t entry;
887         if (!pmd_none(*pmd))
888                 return false;
889         entry = mk_pmd(zero_page, vma->vm_page_prot);
890         entry = pmd_mkhuge(entry);
891         pgtable_trans_huge_deposit(mm, pmd, pgtable);
892         set_pmd_at(mm, haddr, pmd, entry);
893         atomic_long_inc(&mm->nr_ptes);
894         return true;
895 }
896
897 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
898                                unsigned long address, pmd_t *pmd,
899                                unsigned int flags)
900 {
901         gfp_t gfp;
902         struct page *page;
903         unsigned long haddr = address & HPAGE_PMD_MASK;
904
905         if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
906                 return VM_FAULT_FALLBACK;
907         if (unlikely(anon_vma_prepare(vma)))
908                 return VM_FAULT_OOM;
909         if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
910                 return VM_FAULT_OOM;
911         if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm) &&
912                         transparent_hugepage_use_zero_page()) {
913                 spinlock_t *ptl;
914                 pgtable_t pgtable;
915                 struct page *zero_page;
916                 bool set;
917                 int ret;
918                 pgtable = pte_alloc_one(mm, haddr);
919                 if (unlikely(!pgtable))
920                         return VM_FAULT_OOM;
921                 zero_page = get_huge_zero_page();
922                 if (unlikely(!zero_page)) {
923                         pte_free(mm, pgtable);
924                         count_vm_event(THP_FAULT_FALLBACK);
925                         return VM_FAULT_FALLBACK;
926                 }
927                 ptl = pmd_lock(mm, pmd);
928                 ret = 0;
929                 set = false;
930                 if (pmd_none(*pmd)) {
931                         if (userfaultfd_missing(vma)) {
932                                 spin_unlock(ptl);
933                                 ret = handle_userfault(vma, address, flags,
934                                                        VM_UFFD_MISSING);
935                                 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
936                         } else {
937                                 set_huge_zero_page(pgtable, mm, vma,
938                                                    haddr, pmd,
939                                                    zero_page);
940                                 spin_unlock(ptl);
941                                 set = true;
942                         }
943                 } else
944                         spin_unlock(ptl);
945                 if (!set) {
946                         pte_free(mm, pgtable);
947                         put_huge_zero_page();
948                 }
949                 return ret;
950         }
951         gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
952         page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
953         if (unlikely(!page)) {
954                 count_vm_event(THP_FAULT_FALLBACK);
955                 return VM_FAULT_FALLBACK;
956         }
957         prep_transhuge_page(page);
958         return __do_huge_pmd_anonymous_page(mm, vma, address, pmd, page, gfp,
959                                             flags);
960 }
961
962 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
963                 pmd_t *pmd, unsigned long pfn, pgprot_t prot, bool write)
964 {
965         struct mm_struct *mm = vma->vm_mm;
966         pmd_t entry;
967         spinlock_t *ptl;
968
969         ptl = pmd_lock(mm, pmd);
970         if (pmd_none(*pmd)) {
971                 entry = pmd_mkhuge(pfn_pmd(pfn, prot));
972                 if (write) {
973                         entry = pmd_mkyoung(pmd_mkdirty(entry));
974                         entry = maybe_pmd_mkwrite(entry, vma);
975                 }
976                 set_pmd_at(mm, addr, pmd, entry);
977                 update_mmu_cache_pmd(vma, addr, pmd);
978         }
979         spin_unlock(ptl);
980 }
981
982 int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
983                         pmd_t *pmd, unsigned long pfn, bool write)
984 {
985         pgprot_t pgprot = vma->vm_page_prot;
986         /*
987          * If we had pmd_special, we could avoid all these restrictions,
988          * but we need to be consistent with PTEs and architectures that
989          * can't support a 'special' bit.
990          */
991         BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
992         BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
993                                                 (VM_PFNMAP|VM_MIXEDMAP));
994         BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
995         BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
996
997         if (addr < vma->vm_start || addr >= vma->vm_end)
998                 return VM_FAULT_SIGBUS;
999         if (track_pfn_insert(vma, &pgprot, pfn))
1000                 return VM_FAULT_SIGBUS;
1001         insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write);
1002         return VM_FAULT_NOPAGE;
1003 }
1004
1005 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1006                   pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1007                   struct vm_area_struct *vma)
1008 {
1009         spinlock_t *dst_ptl, *src_ptl;
1010         struct page *src_page;
1011         pmd_t pmd;
1012         pgtable_t pgtable;
1013         int ret;
1014
1015         ret = -ENOMEM;
1016         pgtable = pte_alloc_one(dst_mm, addr);
1017         if (unlikely(!pgtable))
1018                 goto out;
1019
1020         dst_ptl = pmd_lock(dst_mm, dst_pmd);
1021         src_ptl = pmd_lockptr(src_mm, src_pmd);
1022         spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1023
1024         ret = -EAGAIN;
1025         pmd = *src_pmd;
1026         if (unlikely(!pmd_trans_huge(pmd))) {
1027                 pte_free(dst_mm, pgtable);
1028                 goto out_unlock;
1029         }
1030         /*
1031          * When page table lock is held, the huge zero pmd should not be
1032          * under splitting since we don't split the page itself, only pmd to
1033          * a page table.
1034          */
1035         if (is_huge_zero_pmd(pmd)) {
1036                 struct page *zero_page;
1037                 /*
1038                  * get_huge_zero_page() will never allocate a new page here,
1039                  * since we already have a zero page to copy. It just takes a
1040                  * reference.
1041                  */
1042                 zero_page = get_huge_zero_page();
1043                 set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
1044                                 zero_page);
1045                 ret = 0;
1046                 goto out_unlock;
1047         }
1048
1049         src_page = pmd_page(pmd);
1050         VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
1051         get_page(src_page);
1052         page_dup_rmap(src_page, true);
1053         add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1054
1055         pmdp_set_wrprotect(src_mm, addr, src_pmd);
1056         pmd = pmd_mkold(pmd_wrprotect(pmd));
1057         pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1058         set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1059         atomic_long_inc(&dst_mm->nr_ptes);
1060
1061         ret = 0;
1062 out_unlock:
1063         spin_unlock(src_ptl);
1064         spin_unlock(dst_ptl);
1065 out:
1066         return ret;
1067 }
1068
1069 void huge_pmd_set_accessed(struct mm_struct *mm,
1070                            struct vm_area_struct *vma,
1071                            unsigned long address,
1072                            pmd_t *pmd, pmd_t orig_pmd,
1073                            int dirty)
1074 {
1075         spinlock_t *ptl;
1076         pmd_t entry;
1077         unsigned long haddr;
1078
1079         ptl = pmd_lock(mm, pmd);
1080         if (unlikely(!pmd_same(*pmd, orig_pmd)))
1081                 goto unlock;
1082
1083         entry = pmd_mkyoung(orig_pmd);
1084         haddr = address & HPAGE_PMD_MASK;
1085         if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty))
1086                 update_mmu_cache_pmd(vma, address, pmd);
1087
1088 unlock:
1089         spin_unlock(ptl);
1090 }
1091
1092 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
1093                                         struct vm_area_struct *vma,
1094                                         unsigned long address,
1095                                         pmd_t *pmd, pmd_t orig_pmd,
1096                                         struct page *page,
1097                                         unsigned long haddr)
1098 {
1099         struct mem_cgroup *memcg;
1100         spinlock_t *ptl;
1101         pgtable_t pgtable;
1102         pmd_t _pmd;
1103         int ret = 0, i;
1104         struct page **pages;
1105         unsigned long mmun_start;       /* For mmu_notifiers */
1106         unsigned long mmun_end;         /* For mmu_notifiers */
1107
1108         pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
1109                         GFP_KERNEL);
1110         if (unlikely(!pages)) {
1111                 ret |= VM_FAULT_OOM;
1112                 goto out;
1113         }
1114
1115         for (i = 0; i < HPAGE_PMD_NR; i++) {
1116                 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
1117                                                __GFP_OTHER_NODE,
1118                                                vma, address, page_to_nid(page));
1119                 if (unlikely(!pages[i] ||
1120                              mem_cgroup_try_charge(pages[i], mm, GFP_KERNEL,
1121                                                    &memcg, false))) {
1122                         if (pages[i])
1123                                 put_page(pages[i]);
1124                         while (--i >= 0) {
1125                                 memcg = (void *)page_private(pages[i]);
1126                                 set_page_private(pages[i], 0);
1127                                 mem_cgroup_cancel_charge(pages[i], memcg,
1128                                                 false);
1129                                 put_page(pages[i]);
1130                         }
1131                         kfree(pages);
1132                         ret |= VM_FAULT_OOM;
1133                         goto out;
1134                 }
1135                 set_page_private(pages[i], (unsigned long)memcg);
1136         }
1137
1138         for (i = 0; i < HPAGE_PMD_NR; i++) {
1139                 copy_user_highpage(pages[i], page + i,
1140                                    haddr + PAGE_SIZE * i, vma);
1141                 __SetPageUptodate(pages[i]);
1142                 cond_resched();
1143         }
1144
1145         mmun_start = haddr;
1146         mmun_end   = haddr + HPAGE_PMD_SIZE;
1147         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1148
1149         ptl = pmd_lock(mm, pmd);
1150         if (unlikely(!pmd_same(*pmd, orig_pmd)))
1151                 goto out_free_pages;
1152         VM_BUG_ON_PAGE(!PageHead(page), page);
1153
1154         pmdp_huge_clear_flush_notify(vma, haddr, pmd);
1155         /* leave pmd empty until pte is filled */
1156
1157         pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1158         pmd_populate(mm, &_pmd, pgtable);
1159
1160         for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1161                 pte_t *pte, entry;
1162                 entry = mk_pte(pages[i], vma->vm_page_prot);
1163                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1164                 memcg = (void *)page_private(pages[i]);
1165                 set_page_private(pages[i], 0);
1166                 page_add_new_anon_rmap(pages[i], vma, haddr, false);
1167                 mem_cgroup_commit_charge(pages[i], memcg, false, false);
1168                 lru_cache_add_active_or_unevictable(pages[i], vma);
1169                 pte = pte_offset_map(&_pmd, haddr);
1170                 VM_BUG_ON(!pte_none(*pte));
1171                 set_pte_at(mm, haddr, pte, entry);
1172                 pte_unmap(pte);
1173         }
1174         kfree(pages);
1175
1176         smp_wmb(); /* make pte visible before pmd */
1177         pmd_populate(mm, pmd, pgtable);
1178         page_remove_rmap(page, true);
1179         spin_unlock(ptl);
1180
1181         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1182
1183         ret |= VM_FAULT_WRITE;
1184         put_page(page);
1185
1186 out:
1187         return ret;
1188
1189 out_free_pages:
1190         spin_unlock(ptl);
1191         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1192         for (i = 0; i < HPAGE_PMD_NR; i++) {
1193                 memcg = (void *)page_private(pages[i]);
1194                 set_page_private(pages[i], 0);
1195                 mem_cgroup_cancel_charge(pages[i], memcg, false);
1196                 put_page(pages[i]);
1197         }
1198         kfree(pages);
1199         goto out;
1200 }
1201
1202 int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1203                         unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
1204 {
1205         spinlock_t *ptl;
1206         int ret = 0;
1207         struct page *page = NULL, *new_page;
1208         struct mem_cgroup *memcg;
1209         unsigned long haddr;
1210         unsigned long mmun_start;       /* For mmu_notifiers */
1211         unsigned long mmun_end;         /* For mmu_notifiers */
1212         gfp_t huge_gfp;                 /* for allocation and charge */
1213
1214         ptl = pmd_lockptr(mm, pmd);
1215         VM_BUG_ON_VMA(!vma->anon_vma, vma);
1216         haddr = address & HPAGE_PMD_MASK;
1217         if (is_huge_zero_pmd(orig_pmd))
1218                 goto alloc;
1219         spin_lock(ptl);
1220         if (unlikely(!pmd_same(*pmd, orig_pmd)))
1221                 goto out_unlock;
1222
1223         page = pmd_page(orig_pmd);
1224         VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
1225         /*
1226          * We can only reuse the page if nobody else maps the huge page or it's
1227          * part. We can do it by checking page_mapcount() on each sub-page, but
1228          * it's expensive.
1229          * The cheaper way is to check page_count() to be equal 1: every
1230          * mapcount takes page reference reference, so this way we can
1231          * guarantee, that the PMD is the only mapping.
1232          * This can give false negative if somebody pinned the page, but that's
1233          * fine.
1234          */
1235         if (page_mapcount(page) == 1 && page_count(page) == 1) {
1236                 pmd_t entry;
1237                 entry = pmd_mkyoung(orig_pmd);
1238                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1239                 if (pmdp_set_access_flags(vma, haddr, pmd, entry,  1))
1240                         update_mmu_cache_pmd(vma, address, pmd);
1241                 ret |= VM_FAULT_WRITE;
1242                 goto out_unlock;
1243         }
1244         get_page(page);
1245         spin_unlock(ptl);
1246 alloc:
1247         if (transparent_hugepage_enabled(vma) &&
1248             !transparent_hugepage_debug_cow()) {
1249                 huge_gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
1250                 new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
1251         } else
1252                 new_page = NULL;
1253
1254         if (likely(new_page)) {
1255                 prep_transhuge_page(new_page);
1256         } else {
1257                 if (!page) {
1258                         split_huge_pmd(vma, pmd, address);
1259                         ret |= VM_FAULT_FALLBACK;
1260                 } else {
1261                         ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
1262                                         pmd, orig_pmd, page, haddr);
1263                         if (ret & VM_FAULT_OOM) {
1264                                 split_huge_pmd(vma, pmd, address);
1265                                 ret |= VM_FAULT_FALLBACK;
1266                         }
1267                         put_page(page);
1268                 }
1269                 count_vm_event(THP_FAULT_FALLBACK);
1270                 goto out;
1271         }
1272
1273         if (unlikely(mem_cgroup_try_charge(new_page, mm, huge_gfp,
1274                                         &memcg, true))) {
1275                 put_page(new_page);
1276                 if (page) {
1277                         split_huge_pmd(vma, pmd, address);
1278                         put_page(page);
1279                 } else
1280                         split_huge_pmd(vma, pmd, address);
1281                 ret |= VM_FAULT_FALLBACK;
1282                 count_vm_event(THP_FAULT_FALLBACK);
1283                 goto out;
1284         }
1285
1286         count_vm_event(THP_FAULT_ALLOC);
1287
1288         if (!page)
1289                 clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
1290         else
1291                 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
1292         __SetPageUptodate(new_page);
1293
1294         mmun_start = haddr;
1295         mmun_end   = haddr + HPAGE_PMD_SIZE;
1296         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1297
1298         spin_lock(ptl);
1299         if (page)
1300                 put_page(page);
1301         if (unlikely(!pmd_same(*pmd, orig_pmd))) {
1302                 spin_unlock(ptl);
1303                 mem_cgroup_cancel_charge(new_page, memcg, true);
1304                 put_page(new_page);
1305                 goto out_mn;
1306         } else {
1307                 pmd_t entry;
1308                 entry = mk_huge_pmd(new_page, vma->vm_page_prot);
1309                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1310                 pmdp_huge_clear_flush_notify(vma, haddr, pmd);
1311                 page_add_new_anon_rmap(new_page, vma, haddr, true);
1312                 mem_cgroup_commit_charge(new_page, memcg, false, true);
1313                 lru_cache_add_active_or_unevictable(new_page, vma);
1314                 set_pmd_at(mm, haddr, pmd, entry);
1315                 update_mmu_cache_pmd(vma, address, pmd);
1316                 if (!page) {
1317                         add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
1318                         put_huge_zero_page();
1319                 } else {
1320                         VM_BUG_ON_PAGE(!PageHead(page), page);
1321                         page_remove_rmap(page, true);
1322                         put_page(page);
1323                 }
1324                 ret |= VM_FAULT_WRITE;
1325         }
1326         spin_unlock(ptl);
1327 out_mn:
1328         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1329 out:
1330         return ret;
1331 out_unlock:
1332         spin_unlock(ptl);
1333         return ret;
1334 }
1335
1336 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1337                                    unsigned long addr,
1338                                    pmd_t *pmd,
1339                                    unsigned int flags)
1340 {
1341         struct mm_struct *mm = vma->vm_mm;
1342         struct page *page = NULL;
1343
1344         assert_spin_locked(pmd_lockptr(mm, pmd));
1345
1346         if (flags & FOLL_WRITE && !pmd_write(*pmd))
1347                 goto out;
1348
1349         /* Avoid dumping huge zero page */
1350         if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1351                 return ERR_PTR(-EFAULT);
1352
1353         /* Full NUMA hinting faults to serialise migration in fault paths */
1354         if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
1355                 goto out;
1356
1357         page = pmd_page(*pmd);
1358         VM_BUG_ON_PAGE(!PageHead(page), page);
1359         if (flags & FOLL_TOUCH) {
1360                 pmd_t _pmd;
1361                 /*
1362                  * We should set the dirty bit only for FOLL_WRITE but
1363                  * for now the dirty bit in the pmd is meaningless.
1364                  * And if the dirty bit will become meaningful and
1365                  * we'll only set it with FOLL_WRITE, an atomic
1366                  * set_bit will be required on the pmd to set the
1367                  * young bit, instead of the current set_pmd_at.
1368                  */
1369                 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
1370                 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1371                                           pmd, _pmd,  1))
1372                         update_mmu_cache_pmd(vma, addr, pmd);
1373         }
1374         if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
1375                 /*
1376                  * We don't mlock() pte-mapped THPs. This way we can avoid
1377                  * leaking mlocked pages into non-VM_LOCKED VMAs.
1378                  *
1379                  * In most cases the pmd is the only mapping of the page as we
1380                  * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for
1381                  * writable private mappings in populate_vma_page_range().
1382                  *
1383                  * The only scenario when we have the page shared here is if we
1384                  * mlocking read-only mapping shared over fork(). We skip
1385                  * mlocking such pages.
1386                  */
1387                 if (compound_mapcount(page) == 1 && !PageDoubleMap(page) &&
1388                                 page->mapping && trylock_page(page)) {
1389                         lru_add_drain();
1390                         if (page->mapping)
1391                                 mlock_vma_page(page);
1392                         unlock_page(page);
1393                 }
1394         }
1395         page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1396         VM_BUG_ON_PAGE(!PageCompound(page), page);
1397         if (flags & FOLL_GET)
1398                 get_page(page);
1399
1400 out:
1401         return page;
1402 }
1403
1404 /* NUMA hinting page fault entry point for trans huge pmds */
1405 int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1406                                 unsigned long addr, pmd_t pmd, pmd_t *pmdp)
1407 {
1408         spinlock_t *ptl;
1409         struct anon_vma *anon_vma = NULL;
1410         struct page *page;
1411         unsigned long haddr = addr & HPAGE_PMD_MASK;
1412         int page_nid = -1, this_nid = numa_node_id();
1413         int target_nid, last_cpupid = -1;
1414         bool page_locked;
1415         bool migrated = false;
1416         bool was_writable;
1417         int flags = 0;
1418
1419         /* A PROT_NONE fault should not end up here */
1420         BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
1421
1422         ptl = pmd_lock(mm, pmdp);
1423         if (unlikely(!pmd_same(pmd, *pmdp)))
1424                 goto out_unlock;
1425
1426         /*
1427          * If there are potential migrations, wait for completion and retry
1428          * without disrupting NUMA hinting information. Do not relock and
1429          * check_same as the page may no longer be mapped.
1430          */
1431         if (unlikely(pmd_trans_migrating(*pmdp))) {
1432                 page = pmd_page(*pmdp);
1433                 spin_unlock(ptl);
1434                 wait_on_page_locked(page);
1435                 goto out;
1436         }
1437
1438         page = pmd_page(pmd);
1439         BUG_ON(is_huge_zero_page(page));
1440         page_nid = page_to_nid(page);
1441         last_cpupid = page_cpupid_last(page);
1442         count_vm_numa_event(NUMA_HINT_FAULTS);
1443         if (page_nid == this_nid) {
1444                 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
1445                 flags |= TNF_FAULT_LOCAL;
1446         }
1447
1448         /* See similar comment in do_numa_page for explanation */
1449         if (!(vma->vm_flags & VM_WRITE))
1450                 flags |= TNF_NO_GROUP;
1451
1452         /*
1453          * Acquire the page lock to serialise THP migrations but avoid dropping
1454          * page_table_lock if at all possible
1455          */
1456         page_locked = trylock_page(page);
1457         target_nid = mpol_misplaced(page, vma, haddr);
1458         if (target_nid == -1) {
1459                 /* If the page was locked, there are no parallel migrations */
1460                 if (page_locked)
1461                         goto clear_pmdnuma;
1462         }
1463
1464         /* Migration could have started since the pmd_trans_migrating check */
1465         if (!page_locked) {
1466                 spin_unlock(ptl);
1467                 wait_on_page_locked(page);
1468                 page_nid = -1;
1469                 goto out;
1470         }
1471
1472         /*
1473          * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
1474          * to serialises splits
1475          */
1476         get_page(page);
1477         spin_unlock(ptl);
1478         anon_vma = page_lock_anon_vma_read(page);
1479
1480         /* Confirm the PMD did not change while page_table_lock was released */
1481         spin_lock(ptl);
1482         if (unlikely(!pmd_same(pmd, *pmdp))) {
1483                 unlock_page(page);
1484                 put_page(page);
1485                 page_nid = -1;
1486                 goto out_unlock;
1487         }
1488
1489         /* Bail if we fail to protect against THP splits for any reason */
1490         if (unlikely(!anon_vma)) {
1491                 put_page(page);
1492                 page_nid = -1;
1493                 goto clear_pmdnuma;
1494         }
1495
1496         /*
1497          * Migrate the THP to the requested node, returns with page unlocked
1498          * and access rights restored.
1499          */
1500         spin_unlock(ptl);
1501         migrated = migrate_misplaced_transhuge_page(mm, vma,
1502                                 pmdp, pmd, addr, page, target_nid);
1503         if (migrated) {
1504                 flags |= TNF_MIGRATED;
1505                 page_nid = target_nid;
1506         } else
1507                 flags |= TNF_MIGRATE_FAIL;
1508
1509         goto out;
1510 clear_pmdnuma:
1511         BUG_ON(!PageLocked(page));
1512         was_writable = pmd_write(pmd);
1513         pmd = pmd_modify(pmd, vma->vm_page_prot);
1514         pmd = pmd_mkyoung(pmd);
1515         if (was_writable)
1516                 pmd = pmd_mkwrite(pmd);
1517         set_pmd_at(mm, haddr, pmdp, pmd);
1518         update_mmu_cache_pmd(vma, addr, pmdp);
1519         unlock_page(page);
1520 out_unlock:
1521         spin_unlock(ptl);
1522
1523 out:
1524         if (anon_vma)
1525                 page_unlock_anon_vma_read(anon_vma);
1526
1527         if (page_nid != -1)
1528                 task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags);
1529
1530         return 0;
1531 }
1532
1533 int madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1534                  pmd_t *pmd, unsigned long addr)
1535
1536 {
1537         spinlock_t *ptl;
1538         struct mm_struct *mm = tlb->mm;
1539         int ret = 1;
1540
1541         if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1542                 struct page *page;
1543                 pmd_t orig_pmd;
1544
1545                 if (is_huge_zero_pmd(*pmd))
1546                         goto out;
1547
1548                 orig_pmd = pmdp_huge_get_and_clear(mm, addr, pmd);
1549
1550                 /* No hugepage in swapcache */
1551                 page = pmd_page(orig_pmd);
1552                 VM_BUG_ON_PAGE(PageSwapCache(page), page);
1553
1554                 orig_pmd = pmd_mkold(orig_pmd);
1555                 orig_pmd = pmd_mkclean(orig_pmd);
1556
1557                 set_pmd_at(mm, addr, pmd, orig_pmd);
1558                 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1559 out:
1560                 spin_unlock(ptl);
1561                 ret = 0;
1562         }
1563
1564         return ret;
1565 }
1566
1567 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1568                  pmd_t *pmd, unsigned long addr)
1569 {
1570         pmd_t orig_pmd;
1571         spinlock_t *ptl;
1572
1573         if (!__pmd_trans_huge_lock(pmd, vma, &ptl))
1574                 return 0;
1575         /*
1576          * For architectures like ppc64 we look at deposited pgtable
1577          * when calling pmdp_huge_get_and_clear. So do the
1578          * pgtable_trans_huge_withdraw after finishing pmdp related
1579          * operations.
1580          */
1581         orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
1582                         tlb->fullmm);
1583         tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1584         if (vma_is_dax(vma)) {
1585                 spin_unlock(ptl);
1586                 if (is_huge_zero_pmd(orig_pmd))
1587                         put_huge_zero_page();
1588         } else if (is_huge_zero_pmd(orig_pmd)) {
1589                 pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
1590                 atomic_long_dec(&tlb->mm->nr_ptes);
1591                 spin_unlock(ptl);
1592                 put_huge_zero_page();
1593         } else {
1594                 struct page *page = pmd_page(orig_pmd);
1595                 page_remove_rmap(page, true);
1596                 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1597                 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1598                 VM_BUG_ON_PAGE(!PageHead(page), page);
1599                 pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
1600                 atomic_long_dec(&tlb->mm->nr_ptes);
1601                 spin_unlock(ptl);
1602                 tlb_remove_page(tlb, page);
1603         }
1604         return 1;
1605 }
1606
1607 bool move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
1608                   unsigned long old_addr,
1609                   unsigned long new_addr, unsigned long old_end,
1610                   pmd_t *old_pmd, pmd_t *new_pmd)
1611 {
1612         spinlock_t *old_ptl, *new_ptl;
1613         pmd_t pmd;
1614
1615         struct mm_struct *mm = vma->vm_mm;
1616
1617         if ((old_addr & ~HPAGE_PMD_MASK) ||
1618             (new_addr & ~HPAGE_PMD_MASK) ||
1619             old_end - old_addr < HPAGE_PMD_SIZE ||
1620             (new_vma->vm_flags & VM_NOHUGEPAGE))
1621                 return false;
1622
1623         /*
1624          * The destination pmd shouldn't be established, free_pgtables()
1625          * should have release it.
1626          */
1627         if (WARN_ON(!pmd_none(*new_pmd))) {
1628                 VM_BUG_ON(pmd_trans_huge(*new_pmd));
1629                 return false;
1630         }
1631
1632         /*
1633          * We don't have to worry about the ordering of src and dst
1634          * ptlocks because exclusive mmap_sem prevents deadlock.
1635          */
1636         if (__pmd_trans_huge_lock(old_pmd, vma, &old_ptl)) {
1637                 new_ptl = pmd_lockptr(mm, new_pmd);
1638                 if (new_ptl != old_ptl)
1639                         spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
1640                 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1641                 VM_BUG_ON(!pmd_none(*new_pmd));
1642
1643                 if (pmd_move_must_withdraw(new_ptl, old_ptl)) {
1644                         pgtable_t pgtable;
1645                         pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1646                         pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
1647                 }
1648                 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
1649                 if (new_ptl != old_ptl)
1650                         spin_unlock(new_ptl);
1651                 spin_unlock(old_ptl);
1652                 return true;
1653         }
1654         return false;
1655 }
1656
1657 /*
1658  * Returns
1659  *  - 0 if PMD could not be locked
1660  *  - 1 if PMD was locked but protections unchange and TLB flush unnecessary
1661  *  - HPAGE_PMD_NR is protections changed and TLB flush necessary
1662  */
1663 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1664                 unsigned long addr, pgprot_t newprot, int prot_numa)
1665 {
1666         struct mm_struct *mm = vma->vm_mm;
1667         spinlock_t *ptl;
1668         int ret = 0;
1669
1670         if (__pmd_trans_huge_lock(pmd, vma, &ptl)) {
1671                 pmd_t entry;
1672                 bool preserve_write = prot_numa && pmd_write(*pmd);
1673                 ret = 1;
1674
1675                 /*
1676                  * Avoid trapping faults against the zero page. The read-only
1677                  * data is likely to be read-cached on the local CPU and
1678                  * local/remote hits to the zero page are not interesting.
1679                  */
1680                 if (prot_numa && is_huge_zero_pmd(*pmd)) {
1681                         spin_unlock(ptl);
1682                         return ret;
1683                 }
1684
1685                 if (!prot_numa || !pmd_protnone(*pmd)) {
1686                         entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd);
1687                         entry = pmd_modify(entry, newprot);
1688                         if (preserve_write)
1689                                 entry = pmd_mkwrite(entry);
1690                         ret = HPAGE_PMD_NR;
1691                         set_pmd_at(mm, addr, pmd, entry);
1692                         BUG_ON(!preserve_write && pmd_write(entry));
1693                 }
1694                 spin_unlock(ptl);
1695         }
1696
1697         return ret;
1698 }
1699
1700 /*
1701  * Returns true if a given pmd maps a thp, false otherwise.
1702  *
1703  * Note that if it returns true, this routine returns without unlocking page
1704  * table lock. So callers must unlock it.
1705  */
1706 bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
1707                 spinlock_t **ptl)
1708 {
1709         *ptl = pmd_lock(vma->vm_mm, pmd);
1710         if (likely(pmd_trans_huge(*pmd)))
1711                 return true;
1712         spin_unlock(*ptl);
1713         return false;
1714 }
1715
1716 /*
1717  * This function returns whether a given @page is mapped onto the @address
1718  * in the virtual space of @mm.
1719  *
1720  * When it's true, this function returns *pmd with holding the page table lock
1721  * and passing it back to the caller via @ptl.
1722  * If it's false, returns NULL without holding the page table lock.
1723  */
1724 pmd_t *page_check_address_pmd(struct page *page,
1725                               struct mm_struct *mm,
1726                               unsigned long address,
1727                               spinlock_t **ptl)
1728 {
1729         pgd_t *pgd;
1730         pud_t *pud;
1731         pmd_t *pmd;
1732
1733         if (address & ~HPAGE_PMD_MASK)
1734                 return NULL;
1735
1736         pgd = pgd_offset(mm, address);
1737         if (!pgd_present(*pgd))
1738                 return NULL;
1739         pud = pud_offset(pgd, address);
1740         if (!pud_present(*pud))
1741                 return NULL;
1742         pmd = pmd_offset(pud, address);
1743
1744         *ptl = pmd_lock(mm, pmd);
1745         if (!pmd_present(*pmd))
1746                 goto unlock;
1747         if (pmd_page(*pmd) != page)
1748                 goto unlock;
1749         if (pmd_trans_huge(*pmd))
1750                 return pmd;
1751 unlock:
1752         spin_unlock(*ptl);
1753         return NULL;
1754 }
1755
1756 #define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
1757
1758 int hugepage_madvise(struct vm_area_struct *vma,
1759                      unsigned long *vm_flags, int advice)
1760 {
1761         switch (advice) {
1762         case MADV_HUGEPAGE:
1763 #ifdef CONFIG_S390
1764                 /*
1765                  * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
1766                  * can't handle this properly after s390_enable_sie, so we simply
1767                  * ignore the madvise to prevent qemu from causing a SIGSEGV.
1768                  */
1769                 if (mm_has_pgste(vma->vm_mm))
1770                         return 0;
1771 #endif
1772                 /*
1773                  * Be somewhat over-protective like KSM for now!
1774                  */
1775                 if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
1776                         return -EINVAL;
1777                 *vm_flags &= ~VM_NOHUGEPAGE;
1778                 *vm_flags |= VM_HUGEPAGE;
1779                 /*
1780                  * If the vma become good for khugepaged to scan,
1781                  * register it here without waiting a page fault that
1782                  * may not happen any time soon.
1783                  */
1784                 if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags)))
1785                         return -ENOMEM;
1786                 break;
1787         case MADV_NOHUGEPAGE:
1788                 /*
1789                  * Be somewhat over-protective like KSM for now!
1790                  */
1791                 if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP))
1792                         return -EINVAL;
1793                 *vm_flags &= ~VM_HUGEPAGE;
1794                 *vm_flags |= VM_NOHUGEPAGE;
1795                 /*
1796                  * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
1797                  * this vma even if we leave the mm registered in khugepaged if
1798                  * it got registered before VM_NOHUGEPAGE was set.
1799                  */
1800                 break;
1801         }
1802
1803         return 0;
1804 }
1805
1806 static int __init khugepaged_slab_init(void)
1807 {
1808         mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
1809                                           sizeof(struct mm_slot),
1810                                           __alignof__(struct mm_slot), 0, NULL);
1811         if (!mm_slot_cache)
1812                 return -ENOMEM;
1813
1814         return 0;
1815 }
1816
1817 static void __init khugepaged_slab_exit(void)
1818 {
1819         kmem_cache_destroy(mm_slot_cache);
1820 }
1821
1822 static inline struct mm_slot *alloc_mm_slot(void)
1823 {
1824         if (!mm_slot_cache)     /* initialization failed */
1825                 return NULL;
1826         return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
1827 }
1828
1829 static inline void free_mm_slot(struct mm_slot *mm_slot)
1830 {
1831         kmem_cache_free(mm_slot_cache, mm_slot);
1832 }
1833
1834 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
1835 {
1836         struct mm_slot *mm_slot;
1837
1838         hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
1839                 if (mm == mm_slot->mm)
1840                         return mm_slot;
1841
1842         return NULL;
1843 }
1844
1845 static void insert_to_mm_slots_hash(struct mm_struct *mm,
1846                                     struct mm_slot *mm_slot)
1847 {
1848         mm_slot->mm = mm;
1849         hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
1850 }
1851
1852 static inline int khugepaged_test_exit(struct mm_struct *mm)
1853 {
1854         return atomic_read(&mm->mm_users) == 0;
1855 }
1856
1857 int __khugepaged_enter(struct mm_struct *mm)
1858 {
1859         struct mm_slot *mm_slot;
1860         int wakeup;
1861
1862         mm_slot = alloc_mm_slot();
1863         if (!mm_slot)
1864                 return -ENOMEM;
1865
1866         /* __khugepaged_exit() must not run from under us */
1867         VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
1868         if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
1869                 free_mm_slot(mm_slot);
1870                 return 0;
1871         }
1872
1873         spin_lock(&khugepaged_mm_lock);
1874         insert_to_mm_slots_hash(mm, mm_slot);
1875         /*
1876          * Insert just behind the scanning cursor, to let the area settle
1877          * down a little.
1878          */
1879         wakeup = list_empty(&khugepaged_scan.mm_head);
1880         list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
1881         spin_unlock(&khugepaged_mm_lock);
1882
1883         atomic_inc(&mm->mm_count);
1884         if (wakeup)
1885                 wake_up_interruptible(&khugepaged_wait);
1886
1887         return 0;
1888 }
1889
1890 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
1891                                unsigned long vm_flags)
1892 {
1893         unsigned long hstart, hend;
1894         if (!vma->anon_vma)
1895                 /*
1896                  * Not yet faulted in so we will register later in the
1897                  * page fault if needed.
1898                  */
1899                 return 0;
1900         if (vma->vm_ops)
1901                 /* khugepaged not yet working on file or special mappings */
1902                 return 0;
1903         VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
1904         hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1905         hend = vma->vm_end & HPAGE_PMD_MASK;
1906         if (hstart < hend)
1907                 return khugepaged_enter(vma, vm_flags);
1908         return 0;
1909 }
1910
1911 void __khugepaged_exit(struct mm_struct *mm)
1912 {
1913         struct mm_slot *mm_slot;
1914         int free = 0;
1915
1916         spin_lock(&khugepaged_mm_lock);
1917         mm_slot = get_mm_slot(mm);
1918         if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
1919                 hash_del(&mm_slot->hash);
1920                 list_del(&mm_slot->mm_node);
1921                 free = 1;
1922         }
1923         spin_unlock(&khugepaged_mm_lock);
1924
1925         if (free) {
1926                 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1927                 free_mm_slot(mm_slot);
1928                 mmdrop(mm);
1929         } else if (mm_slot) {
1930                 /*
1931                  * This is required to serialize against
1932                  * khugepaged_test_exit() (which is guaranteed to run
1933                  * under mmap sem read mode). Stop here (after we
1934                  * return all pagetables will be destroyed) until
1935                  * khugepaged has finished working on the pagetables
1936                  * under the mmap_sem.
1937                  */
1938                 down_write(&mm->mmap_sem);
1939                 up_write(&mm->mmap_sem);
1940         }
1941 }
1942
1943 static void release_pte_page(struct page *page)
1944 {
1945         /* 0 stands for page_is_file_cache(page) == false */
1946         dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
1947         unlock_page(page);
1948         putback_lru_page(page);
1949 }
1950
1951 static void release_pte_pages(pte_t *pte, pte_t *_pte)
1952 {
1953         while (--_pte >= pte) {
1954                 pte_t pteval = *_pte;
1955                 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
1956                         release_pte_page(pte_page(pteval));
1957         }
1958 }
1959
1960 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
1961                                         unsigned long address,
1962                                         pte_t *pte)
1963 {
1964         struct page *page = NULL;
1965         pte_t *_pte;
1966         int none_or_zero = 0, result = 0;
1967         bool referenced = false, writable = false;
1968
1969         for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
1970              _pte++, address += PAGE_SIZE) {
1971                 pte_t pteval = *_pte;
1972                 if (pte_none(pteval) || (pte_present(pteval) &&
1973                                 is_zero_pfn(pte_pfn(pteval)))) {
1974                         if (!userfaultfd_armed(vma) &&
1975                             ++none_or_zero <= khugepaged_max_ptes_none) {
1976                                 continue;
1977                         } else {
1978                                 result = SCAN_EXCEED_NONE_PTE;
1979                                 goto out;
1980                         }
1981                 }
1982                 if (!pte_present(pteval)) {
1983                         result = SCAN_PTE_NON_PRESENT;
1984                         goto out;
1985                 }
1986                 page = vm_normal_page(vma, address, pteval);
1987                 if (unlikely(!page)) {
1988                         result = SCAN_PAGE_NULL;
1989                         goto out;
1990                 }
1991
1992                 VM_BUG_ON_PAGE(PageCompound(page), page);
1993                 VM_BUG_ON_PAGE(!PageAnon(page), page);
1994                 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
1995
1996                 /*
1997                  * We can do it before isolate_lru_page because the
1998                  * page can't be freed from under us. NOTE: PG_lock
1999                  * is needed to serialize against split_huge_page
2000                  * when invoked from the VM.
2001                  */
2002                 if (!trylock_page(page)) {
2003                         result = SCAN_PAGE_LOCK;
2004                         goto out;
2005                 }
2006
2007                 /*
2008                  * cannot use mapcount: can't collapse if there's a gup pin.
2009                  * The page must only be referenced by the scanned process
2010                  * and page swap cache.
2011                  */
2012                 if (page_count(page) != 1 + !!PageSwapCache(page)) {
2013                         unlock_page(page);
2014                         result = SCAN_PAGE_COUNT;
2015                         goto out;
2016                 }
2017                 if (pte_write(pteval)) {
2018                         writable = true;
2019                 } else {
2020                         if (PageSwapCache(page) && !reuse_swap_page(page)) {
2021                                 unlock_page(page);
2022                                 result = SCAN_SWAP_CACHE_PAGE;
2023                                 goto out;
2024                         }
2025                         /*
2026                          * Page is not in the swap cache. It can be collapsed
2027                          * into a THP.
2028                          */
2029                 }
2030
2031                 /*
2032                  * Isolate the page to avoid collapsing an hugepage
2033                  * currently in use by the VM.
2034                  */
2035                 if (isolate_lru_page(page)) {
2036                         unlock_page(page);
2037                         result = SCAN_DEL_PAGE_LRU;
2038                         goto out;
2039                 }
2040                 /* 0 stands for page_is_file_cache(page) == false */
2041                 inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
2042                 VM_BUG_ON_PAGE(!PageLocked(page), page);
2043                 VM_BUG_ON_PAGE(PageLRU(page), page);
2044
2045                 /* If there is no mapped pte young don't collapse the page */
2046                 if (pte_young(pteval) ||
2047                     page_is_young(page) || PageReferenced(page) ||
2048                     mmu_notifier_test_young(vma->vm_mm, address))
2049                         referenced = true;
2050         }
2051         if (likely(writable)) {
2052                 if (likely(referenced)) {
2053                         result = SCAN_SUCCEED;
2054                         trace_mm_collapse_huge_page_isolate(page_to_pfn(page), none_or_zero,
2055                                                             referenced, writable, result);
2056                         return 1;
2057                 }
2058         } else {
2059                 result = SCAN_PAGE_RO;
2060         }
2061
2062 out:
2063         release_pte_pages(pte, _pte);
2064         trace_mm_collapse_huge_page_isolate(page_to_pfn(page), none_or_zero,
2065                                             referenced, writable, result);
2066         return 0;
2067 }
2068
2069 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
2070                                       struct vm_area_struct *vma,
2071                                       unsigned long address,
2072                                       spinlock_t *ptl)
2073 {
2074         pte_t *_pte;
2075         for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
2076                 pte_t pteval = *_pte;
2077                 struct page *src_page;
2078
2079                 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
2080                         clear_user_highpage(page, address);
2081                         add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
2082                         if (is_zero_pfn(pte_pfn(pteval))) {
2083                                 /*
2084                                  * ptl mostly unnecessary.
2085                                  */
2086                                 spin_lock(ptl);
2087                                 /*
2088                                  * paravirt calls inside pte_clear here are
2089                                  * superfluous.
2090                                  */
2091                                 pte_clear(vma->vm_mm, address, _pte);
2092                                 spin_unlock(ptl);
2093                         }
2094                 } else {
2095                         src_page = pte_page(pteval);
2096                         copy_user_highpage(page, src_page, address, vma);
2097                         VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
2098                         release_pte_page(src_page);
2099                         /*
2100                          * ptl mostly unnecessary, but preempt has to
2101                          * be disabled to update the per-cpu stats
2102                          * inside page_remove_rmap().
2103                          */
2104                         spin_lock(ptl);
2105                         /*
2106                          * paravirt calls inside pte_clear here are
2107                          * superfluous.
2108                          */
2109                         pte_clear(vma->vm_mm, address, _pte);
2110                         page_remove_rmap(src_page, false);
2111                         spin_unlock(ptl);
2112                         free_page_and_swap_cache(src_page);
2113                 }
2114
2115                 address += PAGE_SIZE;
2116                 page++;
2117         }
2118 }
2119
2120 static void khugepaged_alloc_sleep(void)
2121 {
2122         DEFINE_WAIT(wait);
2123
2124         add_wait_queue(&khugepaged_wait, &wait);
2125         freezable_schedule_timeout_interruptible(
2126                 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
2127         remove_wait_queue(&khugepaged_wait, &wait);
2128 }
2129
2130 static int khugepaged_node_load[MAX_NUMNODES];
2131
2132 static bool khugepaged_scan_abort(int nid)
2133 {
2134         int i;
2135
2136         /*
2137          * If zone_reclaim_mode is disabled, then no extra effort is made to
2138          * allocate memory locally.
2139          */
2140         if (!zone_reclaim_mode)
2141                 return false;
2142
2143         /* If there is a count for this node already, it must be acceptable */
2144         if (khugepaged_node_load[nid])
2145                 return false;
2146
2147         for (i = 0; i < MAX_NUMNODES; i++) {
2148                 if (!khugepaged_node_load[i])
2149                         continue;
2150                 if (node_distance(nid, i) > RECLAIM_DISTANCE)
2151                         return true;
2152         }
2153         return false;
2154 }
2155
2156 #ifdef CONFIG_NUMA
2157 static int khugepaged_find_target_node(void)
2158 {
2159         static int last_khugepaged_target_node = NUMA_NO_NODE;
2160         int nid, target_node = 0, max_value = 0;
2161
2162         /* find first node with max normal pages hit */
2163         for (nid = 0; nid < MAX_NUMNODES; nid++)
2164                 if (khugepaged_node_load[nid] > max_value) {
2165                         max_value = khugepaged_node_load[nid];
2166                         target_node = nid;
2167                 }
2168
2169         /* do some balance if several nodes have the same hit record */
2170         if (target_node <= last_khugepaged_target_node)
2171                 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
2172                                 nid++)
2173                         if (max_value == khugepaged_node_load[nid]) {
2174                                 target_node = nid;
2175                                 break;
2176                         }
2177
2178         last_khugepaged_target_node = target_node;
2179         return target_node;
2180 }
2181
2182 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
2183 {
2184         if (IS_ERR(*hpage)) {
2185                 if (!*wait)
2186                         return false;
2187
2188                 *wait = false;
2189                 *hpage = NULL;
2190                 khugepaged_alloc_sleep();
2191         } else if (*hpage) {
2192                 put_page(*hpage);
2193                 *hpage = NULL;
2194         }
2195
2196         return true;
2197 }
2198
2199 static struct page *
2200 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
2201                        struct vm_area_struct *vma, unsigned long address,
2202                        int node)
2203 {
2204         VM_BUG_ON_PAGE(*hpage, *hpage);
2205
2206         /*
2207          * Before allocating the hugepage, release the mmap_sem read lock.
2208          * The allocation can take potentially a long time if it involves
2209          * sync compaction, and we do not need to hold the mmap_sem during
2210          * that. We will recheck the vma after taking it again in write mode.
2211          */
2212         up_read(&mm->mmap_sem);
2213
2214         *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
2215         if (unlikely(!*hpage)) {
2216                 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2217                 *hpage = ERR_PTR(-ENOMEM);
2218                 return NULL;
2219         }
2220
2221         prep_transhuge_page(*hpage);
2222         count_vm_event(THP_COLLAPSE_ALLOC);
2223         return *hpage;
2224 }
2225 #else
2226 static int khugepaged_find_target_node(void)
2227 {
2228         return 0;
2229 }
2230
2231 static inline struct page *alloc_hugepage(int defrag)
2232 {
2233         struct page *page;
2234
2235         page = alloc_pages(alloc_hugepage_gfpmask(defrag, 0), HPAGE_PMD_ORDER);
2236         if (page)
2237                 prep_transhuge_page(page);
2238         return page;
2239 }
2240
2241 static struct page *khugepaged_alloc_hugepage(bool *wait)
2242 {
2243         struct page *hpage;
2244
2245         do {
2246                 hpage = alloc_hugepage(khugepaged_defrag());
2247                 if (!hpage) {
2248                         count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2249                         if (!*wait)
2250                                 return NULL;
2251
2252                         *wait = false;
2253                         khugepaged_alloc_sleep();
2254                 } else
2255                         count_vm_event(THP_COLLAPSE_ALLOC);
2256         } while (unlikely(!hpage) && likely(khugepaged_enabled()));
2257
2258         return hpage;
2259 }
2260
2261 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
2262 {
2263         if (!*hpage)
2264                 *hpage = khugepaged_alloc_hugepage(wait);
2265
2266         if (unlikely(!*hpage))
2267                 return false;
2268
2269         return true;
2270 }
2271
2272 static struct page *
2273 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
2274                        struct vm_area_struct *vma, unsigned long address,
2275                        int node)
2276 {
2277         up_read(&mm->mmap_sem);
2278         VM_BUG_ON(!*hpage);
2279
2280         return  *hpage;
2281 }
2282 #endif
2283
2284 static bool hugepage_vma_check(struct vm_area_struct *vma)
2285 {
2286         if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
2287             (vma->vm_flags & VM_NOHUGEPAGE))
2288                 return false;
2289         if (!vma->anon_vma || vma->vm_ops)
2290                 return false;
2291         if (is_vma_temporary_stack(vma))
2292                 return false;
2293         VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
2294         return true;
2295 }
2296
2297 /*
2298  * Bring missing pages in from swap, to complete THP collapse.
2299  * Only done if khugepaged_scan_pmd believes it is worthwhile.
2300  *
2301  * Called and returns without pte mapped or spinlocks held,
2302  * but with mmap_sem held to protect against vma changes.
2303  */
2304
2305 static void __collapse_huge_page_swapin(struct mm_struct *mm,
2306                                         struct vm_area_struct *vma,
2307                                         unsigned long address, pmd_t *pmd)
2308 {
2309         unsigned long _address;
2310         pte_t *pte, pteval;
2311         int swapped_in = 0, ret = 0;
2312
2313         pte = pte_offset_map(pmd, address);
2314         for (_address = address; _address < address + HPAGE_PMD_NR*PAGE_SIZE;
2315              pte++, _address += PAGE_SIZE) {
2316                 pteval = *pte;
2317                 if (!is_swap_pte(pteval))
2318                         continue;
2319                 swapped_in++;
2320                 ret = do_swap_page(mm, vma, _address, pte, pmd,
2321                                    FAULT_FLAG_ALLOW_RETRY|FAULT_FLAG_RETRY_NOWAIT,
2322                                    pteval);
2323                 if (ret & VM_FAULT_ERROR) {
2324                         trace_mm_collapse_huge_page_swapin(mm, swapped_in, 0);
2325                         return;
2326                 }
2327                 /* pte is unmapped now, we need to map it */
2328                 pte = pte_offset_map(pmd, _address);
2329         }
2330         pte--;
2331         pte_unmap(pte);
2332         trace_mm_collapse_huge_page_swapin(mm, swapped_in, 1);
2333 }
2334
2335 static void collapse_huge_page(struct mm_struct *mm,
2336                                    unsigned long address,
2337                                    struct page **hpage,
2338                                    struct vm_area_struct *vma,
2339                                    int node)
2340 {
2341         pmd_t *pmd, _pmd;
2342         pte_t *pte;
2343         pgtable_t pgtable;
2344         struct page *new_page;
2345         spinlock_t *pmd_ptl, *pte_ptl;
2346         int isolated = 0, result = 0;
2347         unsigned long hstart, hend;
2348         struct mem_cgroup *memcg;
2349         unsigned long mmun_start;       /* For mmu_notifiers */
2350         unsigned long mmun_end;         /* For mmu_notifiers */
2351         gfp_t gfp;
2352
2353         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2354
2355         /* Only allocate from the target node */
2356         gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
2357                 __GFP_THISNODE;
2358
2359         /* release the mmap_sem read lock. */
2360         new_page = khugepaged_alloc_page(hpage, gfp, mm, vma, address, node);
2361         if (!new_page) {
2362                 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
2363                 goto out_nolock;
2364         }
2365
2366         if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
2367                 result = SCAN_CGROUP_CHARGE_FAIL;
2368                 goto out_nolock;
2369         }
2370
2371         /*
2372          * Prevent all access to pagetables with the exception of
2373          * gup_fast later hanlded by the ptep_clear_flush and the VM
2374          * handled by the anon_vma lock + PG_lock.
2375          */
2376         down_write(&mm->mmap_sem);
2377         if (unlikely(khugepaged_test_exit(mm))) {
2378                 result = SCAN_ANY_PROCESS;
2379                 goto out;
2380         }
2381
2382         vma = find_vma(mm, address);
2383         if (!vma) {
2384                 result = SCAN_VMA_NULL;
2385                 goto out;
2386         }
2387         hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2388         hend = vma->vm_end & HPAGE_PMD_MASK;
2389         if (address < hstart || address + HPAGE_PMD_SIZE > hend) {
2390                 result = SCAN_ADDRESS_RANGE;
2391                 goto out;
2392         }
2393         if (!hugepage_vma_check(vma)) {
2394                 result = SCAN_VMA_CHECK;
2395                 goto out;
2396         }
2397         pmd = mm_find_pmd(mm, address);
2398         if (!pmd) {
2399                 result = SCAN_PMD_NULL;
2400                 goto out;
2401         }
2402
2403         __collapse_huge_page_swapin(mm, vma, address, pmd);
2404
2405         anon_vma_lock_write(vma->anon_vma);
2406
2407         pte = pte_offset_map(pmd, address);
2408         pte_ptl = pte_lockptr(mm, pmd);
2409
2410         mmun_start = address;
2411         mmun_end   = address + HPAGE_PMD_SIZE;
2412         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2413         pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
2414         /*
2415          * After this gup_fast can't run anymore. This also removes
2416          * any huge TLB entry from the CPU so we won't allow
2417          * huge and small TLB entries for the same virtual address
2418          * to avoid the risk of CPU bugs in that area.
2419          */
2420         _pmd = pmdp_collapse_flush(vma, address, pmd);
2421         spin_unlock(pmd_ptl);
2422         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2423
2424         spin_lock(pte_ptl);
2425         isolated = __collapse_huge_page_isolate(vma, address, pte);
2426         spin_unlock(pte_ptl);
2427
2428         if (unlikely(!isolated)) {
2429                 pte_unmap(pte);
2430                 spin_lock(pmd_ptl);
2431                 BUG_ON(!pmd_none(*pmd));
2432                 /*
2433                  * We can only use set_pmd_at when establishing
2434                  * hugepmds and never for establishing regular pmds that
2435                  * points to regular pagetables. Use pmd_populate for that
2436                  */
2437                 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
2438                 spin_unlock(pmd_ptl);
2439                 anon_vma_unlock_write(vma->anon_vma);
2440                 result = SCAN_FAIL;
2441                 goto out;
2442         }
2443
2444         /*
2445          * All pages are isolated and locked so anon_vma rmap
2446          * can't run anymore.
2447          */
2448         anon_vma_unlock_write(vma->anon_vma);
2449
2450         __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
2451         pte_unmap(pte);
2452         __SetPageUptodate(new_page);
2453         pgtable = pmd_pgtable(_pmd);
2454
2455         _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
2456         _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
2457
2458         /*
2459          * spin_lock() below is not the equivalent of smp_wmb(), so
2460          * this is needed to avoid the copy_huge_page writes to become
2461          * visible after the set_pmd_at() write.
2462          */
2463         smp_wmb();
2464
2465         spin_lock(pmd_ptl);
2466         BUG_ON(!pmd_none(*pmd));
2467         page_add_new_anon_rmap(new_page, vma, address, true);
2468         mem_cgroup_commit_charge(new_page, memcg, false, true);
2469         lru_cache_add_active_or_unevictable(new_page, vma);
2470         pgtable_trans_huge_deposit(mm, pmd, pgtable);
2471         set_pmd_at(mm, address, pmd, _pmd);
2472         update_mmu_cache_pmd(vma, address, pmd);
2473         spin_unlock(pmd_ptl);
2474
2475         *hpage = NULL;
2476
2477         khugepaged_pages_collapsed++;
2478         result = SCAN_SUCCEED;
2479 out_up_write:
2480         up_write(&mm->mmap_sem);
2481 out_nolock:
2482         trace_mm_collapse_huge_page(mm, isolated, result);
2483         return;
2484 out:
2485         mem_cgroup_cancel_charge(new_page, memcg, true);
2486         goto out_up_write;
2487 }
2488
2489 static int khugepaged_scan_pmd(struct mm_struct *mm,
2490                                struct vm_area_struct *vma,
2491                                unsigned long address,
2492                                struct page **hpage)
2493 {
2494         pmd_t *pmd;
2495         pte_t *pte, *_pte;
2496         int ret = 0, none_or_zero = 0, result = 0;
2497         struct page *page = NULL;
2498         unsigned long _address;
2499         spinlock_t *ptl;
2500         int node = NUMA_NO_NODE, unmapped = 0;
2501         bool writable = false, referenced = false;
2502
2503         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2504
2505         pmd = mm_find_pmd(mm, address);
2506         if (!pmd) {
2507                 result = SCAN_PMD_NULL;
2508                 goto out;
2509         }
2510
2511         memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
2512         pte = pte_offset_map_lock(mm, pmd, address, &ptl);
2513         for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
2514              _pte++, _address += PAGE_SIZE) {
2515                 pte_t pteval = *_pte;
2516                 if (is_swap_pte(pteval)) {
2517                         if (++unmapped <= khugepaged_max_ptes_swap) {
2518                                 continue;
2519                         } else {
2520                                 result = SCAN_EXCEED_SWAP_PTE;
2521                                 goto out_unmap;
2522                         }
2523                 }
2524                 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
2525                         if (!userfaultfd_armed(vma) &&
2526                             ++none_or_zero <= khugepaged_max_ptes_none) {
2527                                 continue;
2528                         } else {
2529                                 result = SCAN_EXCEED_NONE_PTE;
2530                                 goto out_unmap;
2531                         }
2532                 }
2533                 if (!pte_present(pteval)) {
2534                         result = SCAN_PTE_NON_PRESENT;
2535                         goto out_unmap;
2536                 }
2537                 if (pte_write(pteval))
2538                         writable = true;
2539
2540                 page = vm_normal_page(vma, _address, pteval);
2541                 if (unlikely(!page)) {
2542                         result = SCAN_PAGE_NULL;
2543                         goto out_unmap;
2544                 }
2545
2546                 /* TODO: teach khugepaged to collapse THP mapped with pte */
2547                 if (PageCompound(page)) {
2548                         result = SCAN_PAGE_COMPOUND;
2549                         goto out_unmap;
2550                 }
2551
2552                 /*
2553                  * Record which node the original page is from and save this
2554                  * information to khugepaged_node_load[].
2555                  * Khupaged will allocate hugepage from the node has the max
2556                  * hit record.
2557                  */
2558                 node = page_to_nid(page);
2559                 if (khugepaged_scan_abort(node)) {
2560                         result = SCAN_SCAN_ABORT;
2561                         goto out_unmap;
2562                 }
2563                 khugepaged_node_load[node]++;
2564                 if (!PageLRU(page)) {
2565                         result = SCAN_SCAN_ABORT;
2566                         goto out_unmap;
2567                 }
2568                 if (PageLocked(page)) {
2569                         result = SCAN_PAGE_LOCK;
2570                         goto out_unmap;
2571                 }
2572                 if (!PageAnon(page)) {
2573                         result = SCAN_PAGE_ANON;
2574                         goto out_unmap;
2575                 }
2576
2577                 /*
2578                  * cannot use mapcount: can't collapse if there's a gup pin.
2579                  * The page must only be referenced by the scanned process
2580                  * and page swap cache.
2581                  */
2582                 if (page_count(page) != 1 + !!PageSwapCache(page)) {
2583                         result = SCAN_PAGE_COUNT;
2584                         goto out_unmap;
2585                 }
2586                 if (pte_young(pteval) ||
2587                     page_is_young(page) || PageReferenced(page) ||
2588                     mmu_notifier_test_young(vma->vm_mm, address))
2589                         referenced = true;
2590         }
2591         if (writable) {
2592                 if (referenced) {
2593                         result = SCAN_SUCCEED;
2594                         ret = 1;
2595                 } else {
2596                         result = SCAN_NO_REFERENCED_PAGE;
2597                 }
2598         } else {
2599                 result = SCAN_PAGE_RO;
2600         }
2601 out_unmap:
2602         pte_unmap_unlock(pte, ptl);
2603         if (ret) {
2604                 node = khugepaged_find_target_node();
2605                 /* collapse_huge_page will return with the mmap_sem released */
2606                 collapse_huge_page(mm, address, hpage, vma, node);
2607         }
2608 out:
2609         trace_mm_khugepaged_scan_pmd(mm, page_to_pfn(page), writable, referenced,
2610                                      none_or_zero, result, unmapped);
2611         return ret;
2612 }
2613
2614 static void collect_mm_slot(struct mm_slot *mm_slot)
2615 {
2616         struct mm_struct *mm = mm_slot->mm;
2617
2618         VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
2619
2620         if (khugepaged_test_exit(mm)) {
2621                 /* free mm_slot */
2622                 hash_del(&mm_slot->hash);
2623                 list_del(&mm_slot->mm_node);
2624
2625                 /*
2626                  * Not strictly needed because the mm exited already.
2627                  *
2628                  * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
2629                  */
2630
2631                 /* khugepaged_mm_lock actually not necessary for the below */
2632                 free_mm_slot(mm_slot);
2633                 mmdrop(mm);
2634         }
2635 }
2636
2637 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2638                                             struct page **hpage)
2639         __releases(&khugepaged_mm_lock)
2640         __acquires(&khugepaged_mm_lock)
2641 {
2642         struct mm_slot *mm_slot;
2643         struct mm_struct *mm;
2644         struct vm_area_struct *vma;
2645         int progress = 0;
2646
2647         VM_BUG_ON(!pages);
2648         VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
2649
2650         if (khugepaged_scan.mm_slot)
2651                 mm_slot = khugepaged_scan.mm_slot;
2652         else {
2653                 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2654                                      struct mm_slot, mm_node);
2655                 khugepaged_scan.address = 0;
2656                 khugepaged_scan.mm_slot = mm_slot;
2657         }
2658         spin_unlock(&khugepaged_mm_lock);
2659
2660         mm = mm_slot->mm;
2661         down_read(&mm->mmap_sem);
2662         if (unlikely(khugepaged_test_exit(mm)))
2663                 vma = NULL;
2664         else
2665                 vma = find_vma(mm, khugepaged_scan.address);
2666
2667         progress++;
2668         for (; vma; vma = vma->vm_next) {
2669                 unsigned long hstart, hend;
2670
2671                 cond_resched();
2672                 if (unlikely(khugepaged_test_exit(mm))) {
2673                         progress++;
2674                         break;
2675                 }
2676                 if (!hugepage_vma_check(vma)) {
2677 skip:
2678                         progress++;
2679                         continue;
2680                 }
2681                 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2682                 hend = vma->vm_end & HPAGE_PMD_MASK;
2683                 if (hstart >= hend)
2684                         goto skip;
2685                 if (khugepaged_scan.address > hend)
2686                         goto skip;
2687                 if (khugepaged_scan.address < hstart)
2688                         khugepaged_scan.address = hstart;
2689                 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2690
2691                 while (khugepaged_scan.address < hend) {
2692                         int ret;
2693                         cond_resched();
2694                         if (unlikely(khugepaged_test_exit(mm)))
2695                                 goto breakouterloop;
2696
2697                         VM_BUG_ON(khugepaged_scan.address < hstart ||
2698                                   khugepaged_scan.address + HPAGE_PMD_SIZE >
2699                                   hend);
2700                         ret = khugepaged_scan_pmd(mm, vma,
2701                                                   khugepaged_scan.address,
2702                                                   hpage);
2703                         /* move to next address */
2704                         khugepaged_scan.address += HPAGE_PMD_SIZE;
2705                         progress += HPAGE_PMD_NR;
2706                         if (ret)
2707                                 /* we released mmap_sem so break loop */
2708                                 goto breakouterloop_mmap_sem;
2709                         if (progress >= pages)
2710                                 goto breakouterloop;
2711                 }
2712         }
2713 breakouterloop:
2714         up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
2715 breakouterloop_mmap_sem:
2716
2717         spin_lock(&khugepaged_mm_lock);
2718         VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2719         /*
2720          * Release the current mm_slot if this mm is about to die, or
2721          * if we scanned all vmas of this mm.
2722          */
2723         if (khugepaged_test_exit(mm) || !vma) {
2724                 /*
2725                  * Make sure that if mm_users is reaching zero while
2726                  * khugepaged runs here, khugepaged_exit will find
2727                  * mm_slot not pointing to the exiting mm.
2728                  */
2729                 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2730                         khugepaged_scan.mm_slot = list_entry(
2731                                 mm_slot->mm_node.next,
2732                                 struct mm_slot, mm_node);
2733                         khugepaged_scan.address = 0;
2734                 } else {
2735                         khugepaged_scan.mm_slot = NULL;
2736                         khugepaged_full_scans++;
2737                 }
2738
2739                 collect_mm_slot(mm_slot);
2740         }
2741
2742         return progress;
2743 }
2744
2745 static int khugepaged_has_work(void)
2746 {
2747         return !list_empty(&khugepaged_scan.mm_head) &&
2748                 khugepaged_enabled();
2749 }
2750
2751 static int khugepaged_wait_event(void)
2752 {
2753         return !list_empty(&khugepaged_scan.mm_head) ||
2754                 kthread_should_stop();
2755 }
2756
2757 static void khugepaged_do_scan(void)
2758 {
2759         struct page *hpage = NULL;
2760         unsigned int progress = 0, pass_through_head = 0;
2761         unsigned int pages = khugepaged_pages_to_scan;
2762         bool wait = true;
2763
2764         barrier(); /* write khugepaged_pages_to_scan to local stack */
2765
2766         while (progress < pages) {
2767                 if (!khugepaged_prealloc_page(&hpage, &wait))
2768                         break;
2769
2770                 cond_resched();
2771
2772                 if (unlikely(kthread_should_stop() || try_to_freeze()))
2773                         break;
2774
2775                 spin_lock(&khugepaged_mm_lock);
2776                 if (!khugepaged_scan.mm_slot)
2777                         pass_through_head++;
2778                 if (khugepaged_has_work() &&
2779                     pass_through_head < 2)
2780                         progress += khugepaged_scan_mm_slot(pages - progress,
2781                                                             &hpage);
2782                 else
2783                         progress = pages;
2784                 spin_unlock(&khugepaged_mm_lock);
2785         }
2786
2787         if (!IS_ERR_OR_NULL(hpage))
2788                 put_page(hpage);
2789 }
2790
2791 static void khugepaged_wait_work(void)
2792 {
2793         if (khugepaged_has_work()) {
2794                 if (!khugepaged_scan_sleep_millisecs)
2795                         return;
2796
2797                 wait_event_freezable_timeout(khugepaged_wait,
2798                                              kthread_should_stop(),
2799                         msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
2800                 return;
2801         }
2802
2803         if (khugepaged_enabled())
2804                 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2805 }
2806
2807 static int khugepaged(void *none)
2808 {
2809         struct mm_slot *mm_slot;
2810
2811         set_freezable();
2812         set_user_nice(current, MAX_NICE);
2813
2814         while (!kthread_should_stop()) {
2815                 khugepaged_do_scan();
2816                 khugepaged_wait_work();
2817         }
2818
2819         spin_lock(&khugepaged_mm_lock);
2820         mm_slot = khugepaged_scan.mm_slot;
2821         khugepaged_scan.mm_slot = NULL;
2822         if (mm_slot)
2823                 collect_mm_slot(mm_slot);
2824         spin_unlock(&khugepaged_mm_lock);
2825         return 0;
2826 }
2827
2828 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2829                 unsigned long haddr, pmd_t *pmd)
2830 {
2831         struct mm_struct *mm = vma->vm_mm;
2832         pgtable_t pgtable;
2833         pmd_t _pmd;
2834         int i;
2835
2836         /* leave pmd empty until pte is filled */
2837         pmdp_huge_clear_flush_notify(vma, haddr, pmd);
2838
2839         pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2840         pmd_populate(mm, &_pmd, pgtable);
2841
2842         for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
2843                 pte_t *pte, entry;
2844                 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
2845                 entry = pte_mkspecial(entry);
2846                 pte = pte_offset_map(&_pmd, haddr);
2847                 VM_BUG_ON(!pte_none(*pte));
2848                 set_pte_at(mm, haddr, pte, entry);
2849                 pte_unmap(pte);
2850         }
2851         smp_wmb(); /* make pte visible before pmd */
2852         pmd_populate(mm, pmd, pgtable);
2853         put_huge_zero_page();
2854 }
2855
2856 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2857                 unsigned long haddr, bool freeze)
2858 {
2859         struct mm_struct *mm = vma->vm_mm;
2860         struct page *page;
2861         pgtable_t pgtable;
2862         pmd_t _pmd;
2863         bool young, write;
2864         int i;
2865
2866         VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
2867         VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2868         VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
2869         VM_BUG_ON(!pmd_trans_huge(*pmd));
2870
2871         count_vm_event(THP_SPLIT_PMD);
2872
2873         if (vma_is_dax(vma)) {
2874                 pmd_t _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
2875                 if (is_huge_zero_pmd(_pmd))
2876                         put_huge_zero_page();
2877                 return;
2878         } else if (is_huge_zero_pmd(*pmd)) {
2879                 return __split_huge_zero_page_pmd(vma, haddr, pmd);
2880         }
2881
2882         page = pmd_page(*pmd);
2883         VM_BUG_ON_PAGE(!page_count(page), page);
2884         atomic_add(HPAGE_PMD_NR - 1, &page->_count);
2885         write = pmd_write(*pmd);
2886         young = pmd_young(*pmd);
2887
2888         /* leave pmd empty until pte is filled */
2889         pmdp_huge_clear_flush_notify(vma, haddr, pmd);
2890
2891         pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2892         pmd_populate(mm, &_pmd, pgtable);
2893
2894         for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
2895                 pte_t entry, *pte;
2896                 /*
2897                  * Note that NUMA hinting access restrictions are not
2898                  * transferred to avoid any possibility of altering
2899                  * permissions across VMAs.
2900                  */
2901                 if (freeze) {
2902                         swp_entry_t swp_entry;
2903                         swp_entry = make_migration_entry(page + i, write);
2904                         entry = swp_entry_to_pte(swp_entry);
2905                 } else {
2906                         entry = mk_pte(page + i, vma->vm_page_prot);
2907                         entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2908                         if (!write)
2909                                 entry = pte_wrprotect(entry);
2910                         if (!young)
2911                                 entry = pte_mkold(entry);
2912                 }
2913                 pte = pte_offset_map(&_pmd, haddr);
2914                 BUG_ON(!pte_none(*pte));
2915                 set_pte_at(mm, haddr, pte, entry);
2916                 atomic_inc(&page[i]._mapcount);
2917                 pte_unmap(pte);
2918         }
2919
2920         /*
2921          * Set PG_double_map before dropping compound_mapcount to avoid
2922          * false-negative page_mapped().
2923          */
2924         if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) {
2925                 for (i = 0; i < HPAGE_PMD_NR; i++)
2926                         atomic_inc(&page[i]._mapcount);
2927         }
2928
2929         if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
2930                 /* Last compound_mapcount is gone. */
2931                 __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
2932                 if (TestClearPageDoubleMap(page)) {
2933                         /* No need in mapcount reference anymore */
2934                         for (i = 0; i < HPAGE_PMD_NR; i++)
2935                                 atomic_dec(&page[i]._mapcount);
2936                 }
2937         }
2938
2939         smp_wmb(); /* make pte visible before pmd */
2940         pmd_populate(mm, pmd, pgtable);
2941 }
2942
2943 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
2944                 unsigned long address)
2945 {
2946         spinlock_t *ptl;
2947         struct mm_struct *mm = vma->vm_mm;
2948         struct page *page = NULL;
2949         unsigned long haddr = address & HPAGE_PMD_MASK;
2950
2951         mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
2952         ptl = pmd_lock(mm, pmd);
2953         if (unlikely(!pmd_trans_huge(*pmd)))
2954                 goto out;
2955         page = pmd_page(*pmd);
2956         __split_huge_pmd_locked(vma, pmd, haddr, false);
2957         if (PageMlocked(page))
2958                 get_page(page);
2959         else
2960                 page = NULL;
2961 out:
2962         spin_unlock(ptl);
2963         mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE);
2964         if (page) {
2965                 lock_page(page);
2966                 munlock_vma_page(page);
2967                 unlock_page(page);
2968                 put_page(page);
2969         }
2970 }
2971
2972 static void split_huge_pmd_address(struct vm_area_struct *vma,
2973                                     unsigned long address)
2974 {
2975         pgd_t *pgd;
2976         pud_t *pud;
2977         pmd_t *pmd;
2978
2979         VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
2980
2981         pgd = pgd_offset(vma->vm_mm, address);
2982         if (!pgd_present(*pgd))
2983                 return;
2984
2985         pud = pud_offset(pgd, address);
2986         if (!pud_present(*pud))
2987                 return;
2988
2989         pmd = pmd_offset(pud, address);
2990         if (!pmd_present(*pmd) || !pmd_trans_huge(*pmd))
2991                 return;
2992         /*
2993          * Caller holds the mmap_sem write mode, so a huge pmd cannot
2994          * materialize from under us.
2995          */
2996         split_huge_pmd(vma, pmd, address);
2997 }
2998
2999 void vma_adjust_trans_huge(struct vm_area_struct *vma,
3000                              unsigned long start,
3001                              unsigned long end,
3002                              long adjust_next)
3003 {
3004         /*
3005          * If the new start address isn't hpage aligned and it could
3006          * previously contain an hugepage: check if we need to split
3007          * an huge pmd.
3008          */
3009         if (start & ~HPAGE_PMD_MASK &&
3010             (start & HPAGE_PMD_MASK) >= vma->vm_start &&
3011             (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
3012                 split_huge_pmd_address(vma, start);
3013
3014         /*
3015          * If the new end address isn't hpage aligned and it could
3016          * previously contain an hugepage: check if we need to split
3017          * an huge pmd.
3018          */
3019         if (end & ~HPAGE_PMD_MASK &&
3020             (end & HPAGE_PMD_MASK) >= vma->vm_start &&
3021             (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
3022                 split_huge_pmd_address(vma, end);
3023
3024         /*
3025          * If we're also updating the vma->vm_next->vm_start, if the new
3026          * vm_next->vm_start isn't page aligned and it could previously
3027          * contain an hugepage: check if we need to split an huge pmd.
3028          */
3029         if (adjust_next > 0) {
3030                 struct vm_area_struct *next = vma->vm_next;
3031                 unsigned long nstart = next->vm_start;
3032                 nstart += adjust_next << PAGE_SHIFT;
3033                 if (nstart & ~HPAGE_PMD_MASK &&
3034                     (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
3035                     (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
3036                         split_huge_pmd_address(next, nstart);
3037         }
3038 }
3039
3040 static void freeze_page_vma(struct vm_area_struct *vma, struct page *page,
3041                 unsigned long address)
3042 {
3043         spinlock_t *ptl;
3044         pgd_t *pgd;
3045         pud_t *pud;
3046         pmd_t *pmd;
3047         pte_t *pte;
3048         int i;
3049
3050         pgd = pgd_offset(vma->vm_mm, address);
3051         if (!pgd_present(*pgd))
3052                 return;
3053         pud = pud_offset(pgd, address);
3054         if (!pud_present(*pud))
3055                 return;
3056         pmd = pmd_offset(pud, address);
3057         ptl = pmd_lock(vma->vm_mm, pmd);
3058         if (!pmd_present(*pmd)) {
3059                 spin_unlock(ptl);
3060                 return;
3061         }
3062         if (pmd_trans_huge(*pmd)) {
3063                 if (page == pmd_page(*pmd))
3064                         __split_huge_pmd_locked(vma, pmd, address, true);
3065                 spin_unlock(ptl);
3066                 return;
3067         }
3068         spin_unlock(ptl);
3069
3070         pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
3071         for (i = 0; i < HPAGE_PMD_NR; i++, address += PAGE_SIZE, page++) {
3072                 pte_t entry, swp_pte;
3073                 swp_entry_t swp_entry;
3074
3075                 if (!pte_present(pte[i]))
3076                         continue;
3077                 if (page_to_pfn(page) != pte_pfn(pte[i]))
3078                         continue;
3079                 flush_cache_page(vma, address, page_to_pfn(page));
3080                 entry = ptep_clear_flush(vma, address, pte + i);
3081                 swp_entry = make_migration_entry(page, pte_write(entry));
3082                 swp_pte = swp_entry_to_pte(swp_entry);
3083                 if (pte_soft_dirty(entry))
3084                         swp_pte = pte_swp_mksoft_dirty(swp_pte);
3085                 set_pte_at(vma->vm_mm, address, pte + i, swp_pte);
3086         }
3087         pte_unmap_unlock(pte, ptl);
3088 }
3089
3090 static void freeze_page(struct anon_vma *anon_vma, struct page *page)
3091 {
3092         struct anon_vma_chain *avc;
3093         pgoff_t pgoff = page_to_pgoff(page);
3094
3095         VM_BUG_ON_PAGE(!PageHead(page), page);
3096
3097         anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff,
3098                         pgoff + HPAGE_PMD_NR - 1) {
3099                 unsigned long haddr;
3100
3101                 haddr = __vma_address(page, avc->vma) & HPAGE_PMD_MASK;
3102                 mmu_notifier_invalidate_range_start(avc->vma->vm_mm,
3103                                 haddr, haddr + HPAGE_PMD_SIZE);
3104                 freeze_page_vma(avc->vma, page, haddr);
3105                 mmu_notifier_invalidate_range_end(avc->vma->vm_mm,
3106                                 haddr, haddr + HPAGE_PMD_SIZE);
3107         }
3108 }
3109
3110 static void unfreeze_page_vma(struct vm_area_struct *vma, struct page *page,
3111                 unsigned long address)
3112 {
3113         spinlock_t *ptl;
3114         pmd_t *pmd;
3115         pte_t *pte, entry;
3116         swp_entry_t swp_entry;
3117         int i;
3118
3119         pmd = mm_find_pmd(vma->vm_mm, address);
3120         if (!pmd)
3121                 return;
3122         pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
3123         for (i = 0; i < HPAGE_PMD_NR; i++, address += PAGE_SIZE, page++) {
3124                 if (!page_mapped(page))
3125                         continue;
3126                 if (!is_swap_pte(pte[i]))
3127                         continue;
3128
3129                 swp_entry = pte_to_swp_entry(pte[i]);
3130                 if (!is_migration_entry(swp_entry))
3131                         continue;
3132                 if (migration_entry_to_page(swp_entry) != page)
3133                         continue;
3134
3135                 entry = pte_mkold(mk_pte(page, vma->vm_page_prot));
3136                 entry = pte_mkdirty(entry);
3137                 if (is_write_migration_entry(swp_entry))
3138                         entry = maybe_mkwrite(entry, vma);
3139
3140                 flush_dcache_page(page);
3141                 set_pte_at(vma->vm_mm, address, pte + i, entry);
3142
3143                 /* No need to invalidate - it was non-present before */
3144                 update_mmu_cache(vma, address, pte + i);
3145         }
3146         pte_unmap_unlock(pte, ptl);
3147 }
3148
3149 static void unfreeze_page(struct anon_vma *anon_vma, struct page *page)
3150 {
3151         struct anon_vma_chain *avc;
3152         pgoff_t pgoff = page_to_pgoff(page);
3153
3154         anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
3155                         pgoff, pgoff + HPAGE_PMD_NR - 1) {
3156                 unsigned long address = __vma_address(page, avc->vma);
3157
3158                 mmu_notifier_invalidate_range_start(avc->vma->vm_mm,
3159                                 address, address + HPAGE_PMD_SIZE);
3160                 unfreeze_page_vma(avc->vma, page, address);
3161                 mmu_notifier_invalidate_range_end(avc->vma->vm_mm,
3162                                 address, address + HPAGE_PMD_SIZE);
3163         }
3164 }
3165
3166 static int total_mapcount(struct page *page)
3167 {
3168         int i, ret;
3169
3170         ret = compound_mapcount(page);
3171         for (i = 0; i < HPAGE_PMD_NR; i++)
3172                 ret += atomic_read(&page[i]._mapcount) + 1;
3173
3174         if (PageDoubleMap(page))
3175                 ret -= HPAGE_PMD_NR;
3176
3177         return ret;
3178 }
3179
3180 static int __split_huge_page_tail(struct page *head, int tail,
3181                 struct lruvec *lruvec, struct list_head *list)
3182 {
3183         int mapcount;
3184         struct page *page_tail = head + tail;
3185
3186         mapcount = atomic_read(&page_tail->_mapcount) + 1;
3187         VM_BUG_ON_PAGE(atomic_read(&page_tail->_count) != 0, page_tail);
3188
3189         /*
3190          * tail_page->_count is zero and not changing from under us. But
3191          * get_page_unless_zero() may be running from under us on the
3192          * tail_page. If we used atomic_set() below instead of atomic_add(), we
3193          * would then run atomic_set() concurrently with
3194          * get_page_unless_zero(), and atomic_set() is implemented in C not
3195          * using locked ops. spin_unlock on x86 sometime uses locked ops
3196          * because of PPro errata 66, 92, so unless somebody can guarantee
3197          * atomic_set() here would be safe on all archs (and not only on x86),
3198          * it's safer to use atomic_add().
3199          */
3200         atomic_add(mapcount + 1, &page_tail->_count);
3201
3202         /* after clearing PageTail the gup refcount can be released */
3203         smp_mb__after_atomic();
3204
3205         page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
3206         page_tail->flags |= (head->flags &
3207                         ((1L << PG_referenced) |
3208                          (1L << PG_swapbacked) |
3209                          (1L << PG_mlocked) |
3210                          (1L << PG_uptodate) |
3211                          (1L << PG_active) |
3212                          (1L << PG_locked) |
3213                          (1L << PG_unevictable)));
3214         page_tail->flags |= (1L << PG_dirty);
3215
3216         clear_compound_head(page_tail);
3217
3218         if (page_is_young(head))
3219                 set_page_young(page_tail);
3220         if (page_is_idle(head))
3221                 set_page_idle(page_tail);
3222
3223         /* ->mapping in first tail page is compound_mapcount */
3224         VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
3225                         page_tail);
3226         page_tail->mapping = head->mapping;
3227
3228         page_tail->index = head->index + tail;
3229         page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
3230         lru_add_page_tail(head, page_tail, lruvec, list);
3231
3232         return mapcount;
3233 }
3234
3235 static void __split_huge_page(struct page *page, struct list_head *list)
3236 {
3237         struct page *head = compound_head(page);
3238         struct zone *zone = page_zone(head);
3239         struct lruvec *lruvec;
3240         int i, tail_mapcount;
3241
3242         /* prevent PageLRU to go away from under us, and freeze lru stats */
3243         spin_lock_irq(&zone->lru_lock);
3244         lruvec = mem_cgroup_page_lruvec(head, zone);
3245
3246         spin_lock(&split_queue_lock);
3247         if (!list_empty(page_deferred_list(head))) {
3248                 split_queue_len--;
3249                 list_del(page_deferred_list(head));
3250         }
3251         spin_unlock(&split_queue_lock);
3252
3253         /* complete memcg works before add pages to LRU */
3254         mem_cgroup_split_huge_fixup(head);
3255
3256         tail_mapcount = 0;
3257         for (i = HPAGE_PMD_NR - 1; i >= 1; i--)
3258                 tail_mapcount += __split_huge_page_tail(head, i, lruvec, list);
3259         atomic_sub(tail_mapcount, &head->_count);
3260
3261         ClearPageCompound(head);
3262         spin_unlock_irq(&zone->lru_lock);
3263
3264         unfreeze_page(page_anon_vma(head), head);
3265
3266         for (i = 0; i < HPAGE_PMD_NR; i++) {
3267                 struct page *subpage = head + i;
3268                 if (subpage == page)
3269                         continue;
3270                 unlock_page(subpage);
3271
3272                 /*
3273                  * Subpages may be freed if there wasn't any mapping
3274                  * like if add_to_swap() is running on a lru page that
3275                  * had its mapping zapped. And freeing these pages
3276                  * requires taking the lru_lock so we do the put_page
3277                  * of the tail pages after the split is complete.
3278                  */
3279                 put_page(subpage);
3280         }
3281 }
3282
3283 /*
3284  * This function splits huge page into normal pages. @page can point to any
3285  * subpage of huge page to split. Split doesn't change the position of @page.
3286  *
3287  * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
3288  * The huge page must be locked.
3289  *
3290  * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
3291  *
3292  * Both head page and tail pages will inherit mapping, flags, and so on from
3293  * the hugepage.
3294  *
3295  * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
3296  * they are not mapped.
3297  *
3298  * Returns 0 if the hugepage is split successfully.
3299  * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
3300  * us.
3301  */
3302 int split_huge_page_to_list(struct page *page, struct list_head *list)
3303 {
3304         struct page *head = compound_head(page);
3305         struct anon_vma *anon_vma;
3306         int count, mapcount, ret;
3307
3308         VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
3309         VM_BUG_ON_PAGE(!PageAnon(page), page);
3310         VM_BUG_ON_PAGE(!PageLocked(page), page);
3311         VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
3312         VM_BUG_ON_PAGE(!PageCompound(page), page);
3313
3314         /*
3315          * The caller does not necessarily hold an mmap_sem that would prevent
3316          * the anon_vma disappearing so we first we take a reference to it
3317          * and then lock the anon_vma for write. This is similar to
3318          * page_lock_anon_vma_read except the write lock is taken to serialise
3319          * against parallel split or collapse operations.
3320          */
3321         anon_vma = page_get_anon_vma(head);
3322         if (!anon_vma) {
3323                 ret = -EBUSY;
3324                 goto out;
3325         }
3326         anon_vma_lock_write(anon_vma);
3327
3328         /*
3329          * Racy check if we can split the page, before freeze_page() will
3330          * split PMDs
3331          */
3332         if (total_mapcount(head) != page_count(head) - 1) {
3333                 ret = -EBUSY;
3334                 goto out_unlock;
3335         }
3336
3337         freeze_page(anon_vma, head);
3338         VM_BUG_ON_PAGE(compound_mapcount(head), head);
3339
3340         count = page_count(head);
3341         mapcount = total_mapcount(head);
3342         if (mapcount == count - 1) {
3343                 __split_huge_page(page, list);
3344                 ret = 0;
3345         } else if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount > count - 1) {
3346                 pr_alert("total_mapcount: %u, page_count(): %u\n",
3347                                 mapcount, count);
3348                 if (PageTail(page))
3349                         dump_page(head, NULL);
3350                 dump_page(page, "total_mapcount(head) > page_count(head) - 1");
3351                 BUG();
3352         } else {
3353                 unfreeze_page(anon_vma, head);
3354                 ret = -EBUSY;
3355         }
3356
3357 out_unlock:
3358         anon_vma_unlock_write(anon_vma);
3359         put_anon_vma(anon_vma);
3360 out:
3361         count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
3362         return ret;
3363 }
3364
3365 void free_transhuge_page(struct page *page)
3366 {
3367         unsigned long flags;
3368
3369         spin_lock_irqsave(&split_queue_lock, flags);
3370         if (!list_empty(page_deferred_list(page))) {
3371                 split_queue_len--;
3372                 list_del(page_deferred_list(page));
3373         }
3374         spin_unlock_irqrestore(&split_queue_lock, flags);
3375         free_compound_page(page);
3376 }
3377
3378 void deferred_split_huge_page(struct page *page)
3379 {
3380         unsigned long flags;
3381
3382         VM_BUG_ON_PAGE(!PageTransHuge(page), page);
3383
3384         spin_lock_irqsave(&split_queue_lock, flags);
3385         if (list_empty(page_deferred_list(page))) {
3386                 list_add_tail(page_deferred_list(page), &split_queue);
3387                 split_queue_len++;
3388         }
3389         spin_unlock_irqrestore(&split_queue_lock, flags);
3390 }
3391
3392 static unsigned long deferred_split_count(struct shrinker *shrink,
3393                 struct shrink_control *sc)
3394 {
3395         /*
3396          * Split a page from split_queue will free up at least one page,
3397          * at most HPAGE_PMD_NR - 1. We don't track exact number.
3398          * Let's use HPAGE_PMD_NR / 2 as ballpark.
3399          */
3400         return ACCESS_ONCE(split_queue_len) * HPAGE_PMD_NR / 2;
3401 }
3402
3403 static unsigned long deferred_split_scan(struct shrinker *shrink,
3404                 struct shrink_control *sc)
3405 {
3406         unsigned long flags;
3407         LIST_HEAD(list), *pos, *next;
3408         struct page *page;
3409         int split = 0;
3410
3411         spin_lock_irqsave(&split_queue_lock, flags);
3412         list_splice_init(&split_queue, &list);
3413
3414         /* Take pin on all head pages to avoid freeing them under us */
3415         list_for_each_safe(pos, next, &list) {
3416                 page = list_entry((void *)pos, struct page, mapping);
3417                 page = compound_head(page);
3418                 /* race with put_compound_page() */
3419                 if (!get_page_unless_zero(page)) {
3420                         list_del_init(page_deferred_list(page));
3421                         split_queue_len--;
3422                 }
3423         }
3424         spin_unlock_irqrestore(&split_queue_lock, flags);
3425
3426         list_for_each_safe(pos, next, &list) {
3427                 page = list_entry((void *)pos, struct page, mapping);
3428                 lock_page(page);
3429                 /* split_huge_page() removes page from list on success */
3430                 if (!split_huge_page(page))
3431                         split++;
3432                 unlock_page(page);
3433                 put_page(page);
3434         }
3435
3436         spin_lock_irqsave(&split_queue_lock, flags);
3437         list_splice_tail(&list, &split_queue);
3438         spin_unlock_irqrestore(&split_queue_lock, flags);
3439
3440         return split * HPAGE_PMD_NR / 2;
3441 }
3442
3443 static struct shrinker deferred_split_shrinker = {
3444         .count_objects = deferred_split_count,
3445         .scan_objects = deferred_split_scan,
3446         .seeks = DEFAULT_SEEKS,
3447 };