2 * Copyright (C) 2009 Red Hat, Inc.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/sched.h>
12 #include <linux/highmem.h>
13 #include <linux/hugetlb.h>
14 #include <linux/mmu_notifier.h>
15 #include <linux/rmap.h>
16 #include <linux/swap.h>
17 #include <linux/shrinker.h>
18 #include <linux/mm_inline.h>
19 #include <linux/dax.h>
20 #include <linux/kthread.h>
21 #include <linux/khugepaged.h>
22 #include <linux/freezer.h>
23 #include <linux/mman.h>
24 #include <linux/pagemap.h>
25 #include <linux/migrate.h>
26 #include <linux/hashtable.h>
27 #include <linux/userfaultfd_k.h>
28 #include <linux/page_idle.h>
31 #include <asm/pgalloc.h>
41 SCAN_NO_REFERENCED_PAGE,
55 SCAN_ALLOC_HUGE_PAGE_FAIL,
56 SCAN_CGROUP_CHARGE_FAIL
59 #define CREATE_TRACE_POINTS
60 #include <trace/events/huge_memory.h>
63 * By default transparent hugepage support is disabled in order that avoid
64 * to risk increase the memory footprint of applications without a guaranteed
65 * benefit. When transparent hugepage support is enabled, is for all mappings,
66 * and khugepaged scans all mappings.
67 * Defrag is invoked by khugepaged hugepage allocations and by page faults
68 * for all hugepage allocations.
70 unsigned long transparent_hugepage_flags __read_mostly =
71 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
72 (1<<TRANSPARENT_HUGEPAGE_FLAG)|
74 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
75 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
77 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
78 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
79 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
81 /* default scan 8*512 pte (or vmas) every 30 second */
82 static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
83 static unsigned int khugepaged_pages_collapsed;
84 static unsigned int khugepaged_full_scans;
85 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
86 /* during fragmentation poll the hugepage allocator once every minute */
87 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
88 static struct task_struct *khugepaged_thread __read_mostly;
89 static DEFINE_MUTEX(khugepaged_mutex);
90 static DEFINE_SPINLOCK(khugepaged_mm_lock);
91 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
93 * default collapse hugepages if there is at least one pte mapped like
94 * it would have happened if the vma was large enough during page
97 static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
99 static int khugepaged(void *none);
100 static int khugepaged_slab_init(void);
101 static void khugepaged_slab_exit(void);
103 #define MM_SLOTS_HASH_BITS 10
104 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
106 static struct kmem_cache *mm_slot_cache __read_mostly;
109 * struct mm_slot - hash lookup from mm to mm_slot
110 * @hash: hash collision list
111 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
112 * @mm: the mm that this information is valid for
115 struct hlist_node hash;
116 struct list_head mm_node;
117 struct mm_struct *mm;
121 * struct khugepaged_scan - cursor for scanning
122 * @mm_head: the head of the mm list to scan
123 * @mm_slot: the current mm_slot we are scanning
124 * @address: the next address inside that to be scanned
126 * There is only the one khugepaged_scan instance of this cursor structure.
128 struct khugepaged_scan {
129 struct list_head mm_head;
130 struct mm_slot *mm_slot;
131 unsigned long address;
133 static struct khugepaged_scan khugepaged_scan = {
134 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
138 static void set_recommended_min_free_kbytes(void)
142 unsigned long recommended_min;
144 for_each_populated_zone(zone)
147 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
148 recommended_min = pageblock_nr_pages * nr_zones * 2;
151 * Make sure that on average at least two pageblocks are almost free
152 * of another type, one for a migratetype to fall back to and a
153 * second to avoid subsequent fallbacks of other types There are 3
154 * MIGRATE_TYPES we care about.
156 recommended_min += pageblock_nr_pages * nr_zones *
157 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
159 /* don't ever allow to reserve more than 5% of the lowmem */
160 recommended_min = min(recommended_min,
161 (unsigned long) nr_free_buffer_pages() / 20);
162 recommended_min <<= (PAGE_SHIFT-10);
164 if (recommended_min > min_free_kbytes) {
165 if (user_min_free_kbytes >= 0)
166 pr_info("raising min_free_kbytes from %d to %lu "
167 "to help transparent hugepage allocations\n",
168 min_free_kbytes, recommended_min);
170 min_free_kbytes = recommended_min;
172 setup_per_zone_wmarks();
175 static int start_stop_khugepaged(void)
178 if (khugepaged_enabled()) {
179 if (!khugepaged_thread)
180 khugepaged_thread = kthread_run(khugepaged, NULL,
182 if (IS_ERR(khugepaged_thread)) {
183 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
184 err = PTR_ERR(khugepaged_thread);
185 khugepaged_thread = NULL;
189 if (!list_empty(&khugepaged_scan.mm_head))
190 wake_up_interruptible(&khugepaged_wait);
192 set_recommended_min_free_kbytes();
193 } else if (khugepaged_thread) {
194 kthread_stop(khugepaged_thread);
195 khugepaged_thread = NULL;
201 static atomic_t huge_zero_refcount;
202 struct page *huge_zero_page __read_mostly;
204 struct page *get_huge_zero_page(void)
206 struct page *zero_page;
208 if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
209 return READ_ONCE(huge_zero_page);
211 zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
214 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
217 count_vm_event(THP_ZERO_PAGE_ALLOC);
219 if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
221 __free_pages(zero_page, compound_order(zero_page));
225 /* We take additional reference here. It will be put back by shrinker */
226 atomic_set(&huge_zero_refcount, 2);
228 return READ_ONCE(huge_zero_page);
231 static void put_huge_zero_page(void)
234 * Counter should never go to zero here. Only shrinker can put
237 BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
240 static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
241 struct shrink_control *sc)
243 /* we can free zero page only if last reference remains */
244 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
247 static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
248 struct shrink_control *sc)
250 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
251 struct page *zero_page = xchg(&huge_zero_page, NULL);
252 BUG_ON(zero_page == NULL);
253 __free_pages(zero_page, compound_order(zero_page));
260 static struct shrinker huge_zero_page_shrinker = {
261 .count_objects = shrink_huge_zero_page_count,
262 .scan_objects = shrink_huge_zero_page_scan,
263 .seeks = DEFAULT_SEEKS,
268 static ssize_t double_flag_show(struct kobject *kobj,
269 struct kobj_attribute *attr, char *buf,
270 enum transparent_hugepage_flag enabled,
271 enum transparent_hugepage_flag req_madv)
273 if (test_bit(enabled, &transparent_hugepage_flags)) {
274 VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
275 return sprintf(buf, "[always] madvise never\n");
276 } else if (test_bit(req_madv, &transparent_hugepage_flags))
277 return sprintf(buf, "always [madvise] never\n");
279 return sprintf(buf, "always madvise [never]\n");
281 static ssize_t double_flag_store(struct kobject *kobj,
282 struct kobj_attribute *attr,
283 const char *buf, size_t count,
284 enum transparent_hugepage_flag enabled,
285 enum transparent_hugepage_flag req_madv)
287 if (!memcmp("always", buf,
288 min(sizeof("always")-1, count))) {
289 set_bit(enabled, &transparent_hugepage_flags);
290 clear_bit(req_madv, &transparent_hugepage_flags);
291 } else if (!memcmp("madvise", buf,
292 min(sizeof("madvise")-1, count))) {
293 clear_bit(enabled, &transparent_hugepage_flags);
294 set_bit(req_madv, &transparent_hugepage_flags);
295 } else if (!memcmp("never", buf,
296 min(sizeof("never")-1, count))) {
297 clear_bit(enabled, &transparent_hugepage_flags);
298 clear_bit(req_madv, &transparent_hugepage_flags);
305 static ssize_t enabled_show(struct kobject *kobj,
306 struct kobj_attribute *attr, char *buf)
308 return double_flag_show(kobj, attr, buf,
309 TRANSPARENT_HUGEPAGE_FLAG,
310 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
312 static ssize_t enabled_store(struct kobject *kobj,
313 struct kobj_attribute *attr,
314 const char *buf, size_t count)
318 ret = double_flag_store(kobj, attr, buf, count,
319 TRANSPARENT_HUGEPAGE_FLAG,
320 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
325 mutex_lock(&khugepaged_mutex);
326 err = start_stop_khugepaged();
327 mutex_unlock(&khugepaged_mutex);
335 static struct kobj_attribute enabled_attr =
336 __ATTR(enabled, 0644, enabled_show, enabled_store);
338 static ssize_t single_flag_show(struct kobject *kobj,
339 struct kobj_attribute *attr, char *buf,
340 enum transparent_hugepage_flag flag)
342 return sprintf(buf, "%d\n",
343 !!test_bit(flag, &transparent_hugepage_flags));
346 static ssize_t single_flag_store(struct kobject *kobj,
347 struct kobj_attribute *attr,
348 const char *buf, size_t count,
349 enum transparent_hugepage_flag flag)
354 ret = kstrtoul(buf, 10, &value);
361 set_bit(flag, &transparent_hugepage_flags);
363 clear_bit(flag, &transparent_hugepage_flags);
369 * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
370 * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
371 * memory just to allocate one more hugepage.
373 static ssize_t defrag_show(struct kobject *kobj,
374 struct kobj_attribute *attr, char *buf)
376 return double_flag_show(kobj, attr, buf,
377 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
378 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
380 static ssize_t defrag_store(struct kobject *kobj,
381 struct kobj_attribute *attr,
382 const char *buf, size_t count)
384 return double_flag_store(kobj, attr, buf, count,
385 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
386 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
388 static struct kobj_attribute defrag_attr =
389 __ATTR(defrag, 0644, defrag_show, defrag_store);
391 static ssize_t use_zero_page_show(struct kobject *kobj,
392 struct kobj_attribute *attr, char *buf)
394 return single_flag_show(kobj, attr, buf,
395 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
397 static ssize_t use_zero_page_store(struct kobject *kobj,
398 struct kobj_attribute *attr, const char *buf, size_t count)
400 return single_flag_store(kobj, attr, buf, count,
401 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
403 static struct kobj_attribute use_zero_page_attr =
404 __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
405 #ifdef CONFIG_DEBUG_VM
406 static ssize_t debug_cow_show(struct kobject *kobj,
407 struct kobj_attribute *attr, char *buf)
409 return single_flag_show(kobj, attr, buf,
410 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
412 static ssize_t debug_cow_store(struct kobject *kobj,
413 struct kobj_attribute *attr,
414 const char *buf, size_t count)
416 return single_flag_store(kobj, attr, buf, count,
417 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
419 static struct kobj_attribute debug_cow_attr =
420 __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
421 #endif /* CONFIG_DEBUG_VM */
423 static struct attribute *hugepage_attr[] = {
426 &use_zero_page_attr.attr,
427 #ifdef CONFIG_DEBUG_VM
428 &debug_cow_attr.attr,
433 static struct attribute_group hugepage_attr_group = {
434 .attrs = hugepage_attr,
437 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
438 struct kobj_attribute *attr,
441 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
444 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
445 struct kobj_attribute *attr,
446 const char *buf, size_t count)
451 err = kstrtoul(buf, 10, &msecs);
452 if (err || msecs > UINT_MAX)
455 khugepaged_scan_sleep_millisecs = msecs;
456 wake_up_interruptible(&khugepaged_wait);
460 static struct kobj_attribute scan_sleep_millisecs_attr =
461 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
462 scan_sleep_millisecs_store);
464 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
465 struct kobj_attribute *attr,
468 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
471 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
472 struct kobj_attribute *attr,
473 const char *buf, size_t count)
478 err = kstrtoul(buf, 10, &msecs);
479 if (err || msecs > UINT_MAX)
482 khugepaged_alloc_sleep_millisecs = msecs;
483 wake_up_interruptible(&khugepaged_wait);
487 static struct kobj_attribute alloc_sleep_millisecs_attr =
488 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
489 alloc_sleep_millisecs_store);
491 static ssize_t pages_to_scan_show(struct kobject *kobj,
492 struct kobj_attribute *attr,
495 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
497 static ssize_t pages_to_scan_store(struct kobject *kobj,
498 struct kobj_attribute *attr,
499 const char *buf, size_t count)
504 err = kstrtoul(buf, 10, &pages);
505 if (err || !pages || pages > UINT_MAX)
508 khugepaged_pages_to_scan = pages;
512 static struct kobj_attribute pages_to_scan_attr =
513 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
514 pages_to_scan_store);
516 static ssize_t pages_collapsed_show(struct kobject *kobj,
517 struct kobj_attribute *attr,
520 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
522 static struct kobj_attribute pages_collapsed_attr =
523 __ATTR_RO(pages_collapsed);
525 static ssize_t full_scans_show(struct kobject *kobj,
526 struct kobj_attribute *attr,
529 return sprintf(buf, "%u\n", khugepaged_full_scans);
531 static struct kobj_attribute full_scans_attr =
532 __ATTR_RO(full_scans);
534 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
535 struct kobj_attribute *attr, char *buf)
537 return single_flag_show(kobj, attr, buf,
538 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
540 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
541 struct kobj_attribute *attr,
542 const char *buf, size_t count)
544 return single_flag_store(kobj, attr, buf, count,
545 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
547 static struct kobj_attribute khugepaged_defrag_attr =
548 __ATTR(defrag, 0644, khugepaged_defrag_show,
549 khugepaged_defrag_store);
552 * max_ptes_none controls if khugepaged should collapse hugepages over
553 * any unmapped ptes in turn potentially increasing the memory
554 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
555 * reduce the available free memory in the system as it
556 * runs. Increasing max_ptes_none will instead potentially reduce the
557 * free memory in the system during the khugepaged scan.
559 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
560 struct kobj_attribute *attr,
563 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
565 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
566 struct kobj_attribute *attr,
567 const char *buf, size_t count)
570 unsigned long max_ptes_none;
572 err = kstrtoul(buf, 10, &max_ptes_none);
573 if (err || max_ptes_none > HPAGE_PMD_NR-1)
576 khugepaged_max_ptes_none = max_ptes_none;
580 static struct kobj_attribute khugepaged_max_ptes_none_attr =
581 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
582 khugepaged_max_ptes_none_store);
584 static struct attribute *khugepaged_attr[] = {
585 &khugepaged_defrag_attr.attr,
586 &khugepaged_max_ptes_none_attr.attr,
587 &pages_to_scan_attr.attr,
588 &pages_collapsed_attr.attr,
589 &full_scans_attr.attr,
590 &scan_sleep_millisecs_attr.attr,
591 &alloc_sleep_millisecs_attr.attr,
595 static struct attribute_group khugepaged_attr_group = {
596 .attrs = khugepaged_attr,
597 .name = "khugepaged",
600 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
604 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
605 if (unlikely(!*hugepage_kobj)) {
606 pr_err("failed to create transparent hugepage kobject\n");
610 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
612 pr_err("failed to register transparent hugepage group\n");
616 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
618 pr_err("failed to register transparent hugepage group\n");
619 goto remove_hp_group;
625 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
627 kobject_put(*hugepage_kobj);
631 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
633 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
634 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
635 kobject_put(hugepage_kobj);
638 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
643 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
646 #endif /* CONFIG_SYSFS */
648 static int __init hugepage_init(void)
651 struct kobject *hugepage_kobj;
653 if (!has_transparent_hugepage()) {
654 transparent_hugepage_flags = 0;
658 err = hugepage_init_sysfs(&hugepage_kobj);
662 err = khugepaged_slab_init();
666 err = register_shrinker(&huge_zero_page_shrinker);
668 goto err_hzp_shrinker;
671 * By default disable transparent hugepages on smaller systems,
672 * where the extra memory used could hurt more than TLB overhead
673 * is likely to save. The admin can still enable it through /sys.
675 if (totalram_pages < (512 << (20 - PAGE_SHIFT))) {
676 transparent_hugepage_flags = 0;
680 err = start_stop_khugepaged();
686 unregister_shrinker(&huge_zero_page_shrinker);
688 khugepaged_slab_exit();
690 hugepage_exit_sysfs(hugepage_kobj);
694 subsys_initcall(hugepage_init);
696 static int __init setup_transparent_hugepage(char *str)
701 if (!strcmp(str, "always")) {
702 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
703 &transparent_hugepage_flags);
704 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
705 &transparent_hugepage_flags);
707 } else if (!strcmp(str, "madvise")) {
708 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
709 &transparent_hugepage_flags);
710 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
711 &transparent_hugepage_flags);
713 } else if (!strcmp(str, "never")) {
714 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
715 &transparent_hugepage_flags);
716 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
717 &transparent_hugepage_flags);
722 pr_warn("transparent_hugepage= cannot parse, ignored\n");
725 __setup("transparent_hugepage=", setup_transparent_hugepage);
727 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
729 if (likely(vma->vm_flags & VM_WRITE))
730 pmd = pmd_mkwrite(pmd);
734 static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot)
737 entry = mk_pmd(page, prot);
738 entry = pmd_mkhuge(entry);
742 static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
743 struct vm_area_struct *vma,
744 unsigned long address, pmd_t *pmd,
745 struct page *page, gfp_t gfp,
748 struct mem_cgroup *memcg;
751 unsigned long haddr = address & HPAGE_PMD_MASK;
753 VM_BUG_ON_PAGE(!PageCompound(page), page);
755 if (mem_cgroup_try_charge(page, mm, gfp, &memcg, true)) {
757 count_vm_event(THP_FAULT_FALLBACK);
758 return VM_FAULT_FALLBACK;
761 pgtable = pte_alloc_one(mm, haddr);
762 if (unlikely(!pgtable)) {
763 mem_cgroup_cancel_charge(page, memcg, true);
768 clear_huge_page(page, haddr, HPAGE_PMD_NR);
770 * The memory barrier inside __SetPageUptodate makes sure that
771 * clear_huge_page writes become visible before the set_pmd_at()
774 __SetPageUptodate(page);
776 ptl = pmd_lock(mm, pmd);
777 if (unlikely(!pmd_none(*pmd))) {
779 mem_cgroup_cancel_charge(page, memcg, true);
781 pte_free(mm, pgtable);
785 /* Deliver the page fault to userland */
786 if (userfaultfd_missing(vma)) {
790 mem_cgroup_cancel_charge(page, memcg, true);
792 pte_free(mm, pgtable);
793 ret = handle_userfault(vma, address, flags,
795 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
799 entry = mk_huge_pmd(page, vma->vm_page_prot);
800 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
801 page_add_new_anon_rmap(page, vma, haddr, true);
802 mem_cgroup_commit_charge(page, memcg, false, true);
803 lru_cache_add_active_or_unevictable(page, vma);
804 pgtable_trans_huge_deposit(mm, pmd, pgtable);
805 set_pmd_at(mm, haddr, pmd, entry);
806 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
807 atomic_long_inc(&mm->nr_ptes);
809 count_vm_event(THP_FAULT_ALLOC);
815 static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
817 return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_RECLAIM)) | extra_gfp;
820 /* Caller must hold page table lock. */
821 static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
822 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
823 struct page *zero_page)
828 entry = mk_pmd(zero_page, vma->vm_page_prot);
829 entry = pmd_mkhuge(entry);
830 pgtable_trans_huge_deposit(mm, pmd, pgtable);
831 set_pmd_at(mm, haddr, pmd, entry);
832 atomic_long_inc(&mm->nr_ptes);
836 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
837 unsigned long address, pmd_t *pmd,
842 unsigned long haddr = address & HPAGE_PMD_MASK;
844 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
845 return VM_FAULT_FALLBACK;
846 if (vma->vm_flags & VM_LOCKED)
847 return VM_FAULT_FALLBACK;
848 if (unlikely(anon_vma_prepare(vma)))
850 if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
852 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm) &&
853 transparent_hugepage_use_zero_page()) {
856 struct page *zero_page;
859 pgtable = pte_alloc_one(mm, haddr);
860 if (unlikely(!pgtable))
862 zero_page = get_huge_zero_page();
863 if (unlikely(!zero_page)) {
864 pte_free(mm, pgtable);
865 count_vm_event(THP_FAULT_FALLBACK);
866 return VM_FAULT_FALLBACK;
868 ptl = pmd_lock(mm, pmd);
871 if (pmd_none(*pmd)) {
872 if (userfaultfd_missing(vma)) {
874 ret = handle_userfault(vma, address, flags,
876 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
878 set_huge_zero_page(pgtable, mm, vma,
887 pte_free(mm, pgtable);
888 put_huge_zero_page();
892 gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
893 page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
894 if (unlikely(!page)) {
895 count_vm_event(THP_FAULT_FALLBACK);
896 return VM_FAULT_FALLBACK;
898 return __do_huge_pmd_anonymous_page(mm, vma, address, pmd, page, gfp,
902 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
903 pmd_t *pmd, unsigned long pfn, pgprot_t prot, bool write)
905 struct mm_struct *mm = vma->vm_mm;
909 ptl = pmd_lock(mm, pmd);
910 if (pmd_none(*pmd)) {
911 entry = pmd_mkhuge(pfn_pmd(pfn, prot));
913 entry = pmd_mkyoung(pmd_mkdirty(entry));
914 entry = maybe_pmd_mkwrite(entry, vma);
916 set_pmd_at(mm, addr, pmd, entry);
917 update_mmu_cache_pmd(vma, addr, pmd);
922 int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
923 pmd_t *pmd, unsigned long pfn, bool write)
925 pgprot_t pgprot = vma->vm_page_prot;
927 * If we had pmd_special, we could avoid all these restrictions,
928 * but we need to be consistent with PTEs and architectures that
929 * can't support a 'special' bit.
931 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
932 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
933 (VM_PFNMAP|VM_MIXEDMAP));
934 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
935 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
937 if (addr < vma->vm_start || addr >= vma->vm_end)
938 return VM_FAULT_SIGBUS;
939 if (track_pfn_insert(vma, &pgprot, pfn))
940 return VM_FAULT_SIGBUS;
941 insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write);
942 return VM_FAULT_NOPAGE;
945 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
946 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
947 struct vm_area_struct *vma)
949 spinlock_t *dst_ptl, *src_ptl;
950 struct page *src_page;
956 pgtable = pte_alloc_one(dst_mm, addr);
957 if (unlikely(!pgtable))
960 dst_ptl = pmd_lock(dst_mm, dst_pmd);
961 src_ptl = pmd_lockptr(src_mm, src_pmd);
962 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
966 if (unlikely(!pmd_trans_huge(pmd))) {
967 pte_free(dst_mm, pgtable);
971 * When page table lock is held, the huge zero pmd should not be
972 * under splitting since we don't split the page itself, only pmd to
975 if (is_huge_zero_pmd(pmd)) {
976 struct page *zero_page;
978 * get_huge_zero_page() will never allocate a new page here,
979 * since we already have a zero page to copy. It just takes a
982 zero_page = get_huge_zero_page();
983 set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
989 if (unlikely(pmd_trans_splitting(pmd))) {
990 /* split huge page running from under us */
991 spin_unlock(src_ptl);
992 spin_unlock(dst_ptl);
993 pte_free(dst_mm, pgtable);
995 wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
998 src_page = pmd_page(pmd);
999 VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
1001 page_dup_rmap(src_page);
1002 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1004 pmdp_set_wrprotect(src_mm, addr, src_pmd);
1005 pmd = pmd_mkold(pmd_wrprotect(pmd));
1006 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1007 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1008 atomic_long_inc(&dst_mm->nr_ptes);
1012 spin_unlock(src_ptl);
1013 spin_unlock(dst_ptl);
1018 void huge_pmd_set_accessed(struct mm_struct *mm,
1019 struct vm_area_struct *vma,
1020 unsigned long address,
1021 pmd_t *pmd, pmd_t orig_pmd,
1026 unsigned long haddr;
1028 ptl = pmd_lock(mm, pmd);
1029 if (unlikely(!pmd_same(*pmd, orig_pmd)))
1032 entry = pmd_mkyoung(orig_pmd);
1033 haddr = address & HPAGE_PMD_MASK;
1034 if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty))
1035 update_mmu_cache_pmd(vma, address, pmd);
1042 * Save CONFIG_DEBUG_PAGEALLOC from faulting falsely on tail pages
1043 * during copy_user_huge_page()'s copy_page_rep(): in the case when
1044 * the source page gets split and a tail freed before copy completes.
1045 * Called under pmd_lock of checked pmd, so safe from splitting itself.
1047 static void get_user_huge_page(struct page *page)
1049 if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) {
1050 struct page *endpage = page + HPAGE_PMD_NR;
1052 atomic_add(HPAGE_PMD_NR, &page->_count);
1053 while (++page < endpage)
1054 get_huge_page_tail(page);
1060 static void put_user_huge_page(struct page *page)
1062 if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) {
1063 struct page *endpage = page + HPAGE_PMD_NR;
1065 while (page < endpage)
1072 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
1073 struct vm_area_struct *vma,
1074 unsigned long address,
1075 pmd_t *pmd, pmd_t orig_pmd,
1077 unsigned long haddr)
1079 struct mem_cgroup *memcg;
1084 struct page **pages;
1085 unsigned long mmun_start; /* For mmu_notifiers */
1086 unsigned long mmun_end; /* For mmu_notifiers */
1088 pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
1090 if (unlikely(!pages)) {
1091 ret |= VM_FAULT_OOM;
1095 for (i = 0; i < HPAGE_PMD_NR; i++) {
1096 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
1098 vma, address, page_to_nid(page));
1099 if (unlikely(!pages[i] ||
1100 mem_cgroup_try_charge(pages[i], mm, GFP_KERNEL,
1105 memcg = (void *)page_private(pages[i]);
1106 set_page_private(pages[i], 0);
1107 mem_cgroup_cancel_charge(pages[i], memcg,
1112 ret |= VM_FAULT_OOM;
1115 set_page_private(pages[i], (unsigned long)memcg);
1118 for (i = 0; i < HPAGE_PMD_NR; i++) {
1119 copy_user_highpage(pages[i], page + i,
1120 haddr + PAGE_SIZE * i, vma);
1121 __SetPageUptodate(pages[i]);
1126 mmun_end = haddr + HPAGE_PMD_SIZE;
1127 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1129 ptl = pmd_lock(mm, pmd);
1130 if (unlikely(!pmd_same(*pmd, orig_pmd)))
1131 goto out_free_pages;
1132 VM_BUG_ON_PAGE(!PageHead(page), page);
1134 pmdp_huge_clear_flush_notify(vma, haddr, pmd);
1135 /* leave pmd empty until pte is filled */
1137 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1138 pmd_populate(mm, &_pmd, pgtable);
1140 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1142 entry = mk_pte(pages[i], vma->vm_page_prot);
1143 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1144 memcg = (void *)page_private(pages[i]);
1145 set_page_private(pages[i], 0);
1146 page_add_new_anon_rmap(pages[i], vma, haddr, false);
1147 mem_cgroup_commit_charge(pages[i], memcg, false, false);
1148 lru_cache_add_active_or_unevictable(pages[i], vma);
1149 pte = pte_offset_map(&_pmd, haddr);
1150 VM_BUG_ON(!pte_none(*pte));
1151 set_pte_at(mm, haddr, pte, entry);
1156 smp_wmb(); /* make pte visible before pmd */
1157 pmd_populate(mm, pmd, pgtable);
1158 page_remove_rmap(page, true);
1161 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1163 ret |= VM_FAULT_WRITE;
1171 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1172 for (i = 0; i < HPAGE_PMD_NR; i++) {
1173 memcg = (void *)page_private(pages[i]);
1174 set_page_private(pages[i], 0);
1175 mem_cgroup_cancel_charge(pages[i], memcg, false);
1182 int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1183 unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
1187 struct page *page = NULL, *new_page;
1188 struct mem_cgroup *memcg;
1189 unsigned long haddr;
1190 unsigned long mmun_start; /* For mmu_notifiers */
1191 unsigned long mmun_end; /* For mmu_notifiers */
1192 gfp_t huge_gfp; /* for allocation and charge */
1194 ptl = pmd_lockptr(mm, pmd);
1195 VM_BUG_ON_VMA(!vma->anon_vma, vma);
1196 haddr = address & HPAGE_PMD_MASK;
1197 if (is_huge_zero_pmd(orig_pmd))
1200 if (unlikely(!pmd_same(*pmd, orig_pmd)))
1203 page = pmd_page(orig_pmd);
1204 VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
1206 * We can only reuse the page if nobody else maps the huge page or it's
1207 * part. We can do it by checking page_mapcount() on each sub-page, but
1209 * The cheaper way is to check page_count() to be equal 1: every
1210 * mapcount takes page reference reference, so this way we can
1211 * guarantee, that the PMD is the only mapping.
1212 * This can give false negative if somebody pinned the page, but that's
1215 if (page_mapcount(page) == 1 && page_count(page) == 1) {
1217 entry = pmd_mkyoung(orig_pmd);
1218 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1219 if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1))
1220 update_mmu_cache_pmd(vma, address, pmd);
1221 ret |= VM_FAULT_WRITE;
1224 get_user_huge_page(page);
1227 if (transparent_hugepage_enabled(vma) &&
1228 !transparent_hugepage_debug_cow()) {
1229 huge_gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
1230 new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
1234 if (unlikely(!new_page)) {
1236 split_huge_pmd(vma, pmd, address);
1237 ret |= VM_FAULT_FALLBACK;
1239 ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
1240 pmd, orig_pmd, page, haddr);
1241 if (ret & VM_FAULT_OOM) {
1242 split_huge_pmd(vma, pmd, address);
1243 ret |= VM_FAULT_FALLBACK;
1245 put_user_huge_page(page);
1247 count_vm_event(THP_FAULT_FALLBACK);
1251 if (unlikely(mem_cgroup_try_charge(new_page, mm, huge_gfp, &memcg,
1255 split_huge_pmd(vma, pmd, address);
1256 put_user_huge_page(page);
1258 split_huge_pmd(vma, pmd, address);
1259 ret |= VM_FAULT_FALLBACK;
1260 count_vm_event(THP_FAULT_FALLBACK);
1264 count_vm_event(THP_FAULT_ALLOC);
1267 clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
1269 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
1270 __SetPageUptodate(new_page);
1273 mmun_end = haddr + HPAGE_PMD_SIZE;
1274 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1278 put_user_huge_page(page);
1279 if (unlikely(!pmd_same(*pmd, orig_pmd))) {
1281 mem_cgroup_cancel_charge(new_page, memcg, true);
1286 entry = mk_huge_pmd(new_page, vma->vm_page_prot);
1287 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1288 pmdp_huge_clear_flush_notify(vma, haddr, pmd);
1289 page_add_new_anon_rmap(new_page, vma, haddr, true);
1290 mem_cgroup_commit_charge(new_page, memcg, false, true);
1291 lru_cache_add_active_or_unevictable(new_page, vma);
1292 set_pmd_at(mm, haddr, pmd, entry);
1293 update_mmu_cache_pmd(vma, address, pmd);
1295 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
1296 put_huge_zero_page();
1298 VM_BUG_ON_PAGE(!PageHead(page), page);
1299 page_remove_rmap(page, true);
1302 ret |= VM_FAULT_WRITE;
1306 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1314 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1319 struct mm_struct *mm = vma->vm_mm;
1320 struct page *page = NULL;
1322 assert_spin_locked(pmd_lockptr(mm, pmd));
1324 if (flags & FOLL_WRITE && !pmd_write(*pmd))
1327 /* Avoid dumping huge zero page */
1328 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1329 return ERR_PTR(-EFAULT);
1331 /* Full NUMA hinting faults to serialise migration in fault paths */
1332 if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
1335 page = pmd_page(*pmd);
1336 VM_BUG_ON_PAGE(!PageHead(page), page);
1337 if (flags & FOLL_TOUCH) {
1340 * We should set the dirty bit only for FOLL_WRITE but
1341 * for now the dirty bit in the pmd is meaningless.
1342 * And if the dirty bit will become meaningful and
1343 * we'll only set it with FOLL_WRITE, an atomic
1344 * set_bit will be required on the pmd to set the
1345 * young bit, instead of the current set_pmd_at.
1347 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
1348 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1350 update_mmu_cache_pmd(vma, addr, pmd);
1352 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
1353 if (page->mapping && trylock_page(page)) {
1356 mlock_vma_page(page);
1360 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1361 VM_BUG_ON_PAGE(!PageCompound(page), page);
1362 if (flags & FOLL_GET)
1363 get_page_foll(page);
1369 /* NUMA hinting page fault entry point for trans huge pmds */
1370 int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1371 unsigned long addr, pmd_t pmd, pmd_t *pmdp)
1374 struct anon_vma *anon_vma = NULL;
1376 unsigned long haddr = addr & HPAGE_PMD_MASK;
1377 int page_nid = -1, this_nid = numa_node_id();
1378 int target_nid, last_cpupid = -1;
1380 bool migrated = false;
1384 /* A PROT_NONE fault should not end up here */
1385 BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
1387 ptl = pmd_lock(mm, pmdp);
1388 if (unlikely(!pmd_same(pmd, *pmdp)))
1392 * If there are potential migrations, wait for completion and retry
1393 * without disrupting NUMA hinting information. Do not relock and
1394 * check_same as the page may no longer be mapped.
1396 if (unlikely(pmd_trans_migrating(*pmdp))) {
1397 page = pmd_page(*pmdp);
1399 wait_on_page_locked(page);
1403 page = pmd_page(pmd);
1404 BUG_ON(is_huge_zero_page(page));
1405 page_nid = page_to_nid(page);
1406 last_cpupid = page_cpupid_last(page);
1407 count_vm_numa_event(NUMA_HINT_FAULTS);
1408 if (page_nid == this_nid) {
1409 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
1410 flags |= TNF_FAULT_LOCAL;
1413 /* See similar comment in do_numa_page for explanation */
1414 if (!(vma->vm_flags & VM_WRITE))
1415 flags |= TNF_NO_GROUP;
1418 * Acquire the page lock to serialise THP migrations but avoid dropping
1419 * page_table_lock if at all possible
1421 page_locked = trylock_page(page);
1422 target_nid = mpol_misplaced(page, vma, haddr);
1423 if (target_nid == -1) {
1424 /* If the page was locked, there are no parallel migrations */
1429 /* Migration could have started since the pmd_trans_migrating check */
1432 wait_on_page_locked(page);
1438 * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
1439 * to serialises splits
1443 anon_vma = page_lock_anon_vma_read(page);
1445 /* Confirm the PMD did not change while page_table_lock was released */
1447 if (unlikely(!pmd_same(pmd, *pmdp))) {
1454 /* Bail if we fail to protect against THP splits for any reason */
1455 if (unlikely(!anon_vma)) {
1462 * Migrate the THP to the requested node, returns with page unlocked
1463 * and access rights restored.
1466 migrated = migrate_misplaced_transhuge_page(mm, vma,
1467 pmdp, pmd, addr, page, target_nid);
1469 flags |= TNF_MIGRATED;
1470 page_nid = target_nid;
1472 flags |= TNF_MIGRATE_FAIL;
1476 BUG_ON(!PageLocked(page));
1477 was_writable = pmd_write(pmd);
1478 pmd = pmd_modify(pmd, vma->vm_page_prot);
1479 pmd = pmd_mkyoung(pmd);
1481 pmd = pmd_mkwrite(pmd);
1482 set_pmd_at(mm, haddr, pmdp, pmd);
1483 update_mmu_cache_pmd(vma, addr, pmdp);
1490 page_unlock_anon_vma_read(anon_vma);
1493 task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags);
1498 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1499 pmd_t *pmd, unsigned long addr)
1504 if (__pmd_trans_huge_lock(pmd, vma, &ptl) != 1)
1507 * For architectures like ppc64 we look at deposited pgtable
1508 * when calling pmdp_huge_get_and_clear. So do the
1509 * pgtable_trans_huge_withdraw after finishing pmdp related
1512 orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
1514 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1515 if (vma_is_dax(vma)) {
1517 if (is_huge_zero_pmd(orig_pmd))
1518 put_huge_zero_page();
1519 } else if (is_huge_zero_pmd(orig_pmd)) {
1520 pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
1521 atomic_long_dec(&tlb->mm->nr_ptes);
1523 put_huge_zero_page();
1525 struct page *page = pmd_page(orig_pmd);
1526 page_remove_rmap(page, true);
1527 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1528 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1529 VM_BUG_ON_PAGE(!PageHead(page), page);
1530 pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
1531 atomic_long_dec(&tlb->mm->nr_ptes);
1533 tlb_remove_page(tlb, page);
1538 int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
1539 unsigned long old_addr,
1540 unsigned long new_addr, unsigned long old_end,
1541 pmd_t *old_pmd, pmd_t *new_pmd)
1543 spinlock_t *old_ptl, *new_ptl;
1547 struct mm_struct *mm = vma->vm_mm;
1549 if ((old_addr & ~HPAGE_PMD_MASK) ||
1550 (new_addr & ~HPAGE_PMD_MASK) ||
1551 old_end - old_addr < HPAGE_PMD_SIZE ||
1552 (new_vma->vm_flags & VM_NOHUGEPAGE))
1556 * The destination pmd shouldn't be established, free_pgtables()
1557 * should have release it.
1559 if (WARN_ON(!pmd_none(*new_pmd))) {
1560 VM_BUG_ON(pmd_trans_huge(*new_pmd));
1565 * We don't have to worry about the ordering of src and dst
1566 * ptlocks because exclusive mmap_sem prevents deadlock.
1568 ret = __pmd_trans_huge_lock(old_pmd, vma, &old_ptl);
1570 new_ptl = pmd_lockptr(mm, new_pmd);
1571 if (new_ptl != old_ptl)
1572 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
1573 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1574 VM_BUG_ON(!pmd_none(*new_pmd));
1576 if (pmd_move_must_withdraw(new_ptl, old_ptl)) {
1578 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1579 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
1581 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
1582 if (new_ptl != old_ptl)
1583 spin_unlock(new_ptl);
1584 spin_unlock(old_ptl);
1592 * - 0 if PMD could not be locked
1593 * - 1 if PMD was locked but protections unchange and TLB flush unnecessary
1594 * - HPAGE_PMD_NR is protections changed and TLB flush necessary
1596 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1597 unsigned long addr, pgprot_t newprot, int prot_numa)
1599 struct mm_struct *mm = vma->vm_mm;
1603 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1605 bool preserve_write = prot_numa && pmd_write(*pmd);
1609 * Avoid trapping faults against the zero page. The read-only
1610 * data is likely to be read-cached on the local CPU and
1611 * local/remote hits to the zero page are not interesting.
1613 if (prot_numa && is_huge_zero_pmd(*pmd)) {
1618 if (!prot_numa || !pmd_protnone(*pmd)) {
1619 entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd);
1620 entry = pmd_modify(entry, newprot);
1622 entry = pmd_mkwrite(entry);
1624 set_pmd_at(mm, addr, pmd, entry);
1625 BUG_ON(!preserve_write && pmd_write(entry));
1634 * Returns 1 if a given pmd maps a stable (not under splitting) thp.
1635 * Returns -1 if it maps a thp under splitting. Returns 0 otherwise.
1637 * Note that if it returns 1, this routine returns without unlocking page
1638 * table locks. So callers must unlock them.
1640 int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
1643 *ptl = pmd_lock(vma->vm_mm, pmd);
1644 if (likely(pmd_trans_huge(*pmd))) {
1645 if (unlikely(pmd_trans_splitting(*pmd))) {
1647 wait_split_huge_page(vma->anon_vma, pmd);
1650 /* Thp mapped by 'pmd' is stable, so we can
1651 * handle it as it is. */
1660 * This function returns whether a given @page is mapped onto the @address
1661 * in the virtual space of @mm.
1663 * When it's true, this function returns *pmd with holding the page table lock
1664 * and passing it back to the caller via @ptl.
1665 * If it's false, returns NULL without holding the page table lock.
1667 pmd_t *page_check_address_pmd(struct page *page,
1668 struct mm_struct *mm,
1669 unsigned long address,
1670 enum page_check_address_pmd_flag flag,
1677 if (address & ~HPAGE_PMD_MASK)
1680 pgd = pgd_offset(mm, address);
1681 if (!pgd_present(*pgd))
1683 pud = pud_offset(pgd, address);
1684 if (!pud_present(*pud))
1686 pmd = pmd_offset(pud, address);
1688 *ptl = pmd_lock(mm, pmd);
1689 if (!pmd_present(*pmd))
1691 if (pmd_page(*pmd) != page)
1694 * split_vma() may create temporary aliased mappings. There is
1695 * no risk as long as all huge pmd are found and have their
1696 * splitting bit set before __split_huge_page_refcount
1697 * runs. Finding the same huge pmd more than once during the
1698 * same rmap walk is not a problem.
1700 if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
1701 pmd_trans_splitting(*pmd))
1703 if (pmd_trans_huge(*pmd)) {
1704 VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
1705 !pmd_trans_splitting(*pmd));
1713 static int __split_huge_page_splitting(struct page *page,
1714 struct vm_area_struct *vma,
1715 unsigned long address)
1717 struct mm_struct *mm = vma->vm_mm;
1721 /* For mmu_notifiers */
1722 const unsigned long mmun_start = address;
1723 const unsigned long mmun_end = address + HPAGE_PMD_SIZE;
1725 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1726 pmd = page_check_address_pmd(page, mm, address,
1727 PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, &ptl);
1730 * We can't temporarily set the pmd to null in order
1731 * to split it, the pmd must remain marked huge at all
1732 * times or the VM won't take the pmd_trans_huge paths
1733 * and it won't wait on the anon_vma->root->rwsem to
1734 * serialize against split_huge_page*.
1736 pmdp_splitting_flush(vma, address, pmd);
1741 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1746 static void __split_huge_page_refcount(struct page *page,
1747 struct list_head *list)
1750 struct zone *zone = page_zone(page);
1751 struct lruvec *lruvec;
1754 /* prevent PageLRU to go away from under us, and freeze lru stats */
1755 spin_lock_irq(&zone->lru_lock);
1756 lruvec = mem_cgroup_page_lruvec(page, zone);
1758 compound_lock(page);
1759 /* complete memcg works before add pages to LRU */
1760 mem_cgroup_split_huge_fixup(page);
1762 for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
1763 struct page *page_tail = page + i;
1765 /* tail_page->_mapcount cannot change */
1766 BUG_ON(page_mapcount(page_tail) < 0);
1767 tail_count += page_mapcount(page_tail);
1768 /* check for overflow */
1769 BUG_ON(tail_count < 0);
1770 BUG_ON(atomic_read(&page_tail->_count) != 0);
1772 * tail_page->_count is zero and not changing from
1773 * under us. But get_page_unless_zero() may be running
1774 * from under us on the tail_page. If we used
1775 * atomic_set() below instead of atomic_add(), we
1776 * would then run atomic_set() concurrently with
1777 * get_page_unless_zero(), and atomic_set() is
1778 * implemented in C not using locked ops. spin_unlock
1779 * on x86 sometime uses locked ops because of PPro
1780 * errata 66, 92, so unless somebody can guarantee
1781 * atomic_set() here would be safe on all archs (and
1782 * not only on x86), it's safer to use atomic_add().
1784 atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1,
1785 &page_tail->_count);
1787 /* after clearing PageTail the gup refcount can be released */
1788 smp_mb__after_atomic();
1790 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1791 page_tail->flags |= (page->flags &
1792 ((1L << PG_referenced) |
1793 (1L << PG_swapbacked) |
1794 (1L << PG_mlocked) |
1795 (1L << PG_uptodate) |
1797 (1L << PG_unevictable)));
1798 page_tail->flags |= (1L << PG_dirty);
1800 clear_compound_head(page_tail);
1802 if (page_is_young(page))
1803 set_page_young(page_tail);
1804 if (page_is_idle(page))
1805 set_page_idle(page_tail);
1808 * __split_huge_page_splitting() already set the
1809 * splitting bit in all pmd that could map this
1810 * hugepage, that will ensure no CPU can alter the
1811 * mapcount on the head page. The mapcount is only
1812 * accounted in the head page and it has to be
1813 * transferred to all tail pages in the below code. So
1814 * for this code to be safe, the split the mapcount
1815 * can't change. But that doesn't mean userland can't
1816 * keep changing and reading the page contents while
1817 * we transfer the mapcount, so the pmd splitting
1818 * status is achieved setting a reserved bit in the
1819 * pmd, not by clearing the present bit.
1821 page_tail->_mapcount = page->_mapcount;
1823 BUG_ON(page_tail->mapping != TAIL_MAPPING);
1824 page_tail->mapping = page->mapping;
1826 page_tail->index = page->index + i;
1827 page_cpupid_xchg_last(page_tail, page_cpupid_last(page));
1829 BUG_ON(!PageAnon(page_tail));
1830 BUG_ON(!PageUptodate(page_tail));
1831 BUG_ON(!PageDirty(page_tail));
1832 BUG_ON(!PageSwapBacked(page_tail));
1834 lru_add_page_tail(page, page_tail, lruvec, list);
1836 atomic_sub(tail_count, &page->_count);
1837 BUG_ON(atomic_read(&page->_count) <= 0);
1839 __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1);
1841 ClearPageCompound(page);
1842 compound_unlock(page);
1843 spin_unlock_irq(&zone->lru_lock);
1845 for (i = 1; i < HPAGE_PMD_NR; i++) {
1846 struct page *page_tail = page + i;
1847 BUG_ON(page_count(page_tail) <= 0);
1849 * Tail pages may be freed if there wasn't any mapping
1850 * like if add_to_swap() is running on a lru page that
1851 * had its mapping zapped. And freeing these pages
1852 * requires taking the lru_lock so we do the put_page
1853 * of the tail pages after the split is complete.
1855 put_page(page_tail);
1859 * Only the head page (now become a regular page) is required
1860 * to be pinned by the caller.
1862 BUG_ON(page_count(page) <= 0);
1865 static int __split_huge_page_map(struct page *page,
1866 struct vm_area_struct *vma,
1867 unsigned long address)
1869 struct mm_struct *mm = vma->vm_mm;
1874 unsigned long haddr;
1876 pmd = page_check_address_pmd(page, mm, address,
1877 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, &ptl);
1879 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1880 pmd_populate(mm, &_pmd, pgtable);
1881 if (pmd_write(*pmd))
1882 BUG_ON(page_mapcount(page) != 1);
1885 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1887 BUG_ON(PageCompound(page+i));
1889 * Note that NUMA hinting access restrictions are not
1890 * transferred to avoid any possibility of altering
1891 * permissions across VMAs.
1893 entry = mk_pte(page + i, vma->vm_page_prot);
1894 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1895 if (!pmd_write(*pmd))
1896 entry = pte_wrprotect(entry);
1897 if (!pmd_young(*pmd))
1898 entry = pte_mkold(entry);
1899 pte = pte_offset_map(&_pmd, haddr);
1900 BUG_ON(!pte_none(*pte));
1901 set_pte_at(mm, haddr, pte, entry);
1905 smp_wmb(); /* make pte visible before pmd */
1907 * Up to this point the pmd is present and huge and
1908 * userland has the whole access to the hugepage
1909 * during the split (which happens in place). If we
1910 * overwrite the pmd with the not-huge version
1911 * pointing to the pte here (which of course we could
1912 * if all CPUs were bug free), userland could trigger
1913 * a small page size TLB miss on the small sized TLB
1914 * while the hugepage TLB entry is still established
1915 * in the huge TLB. Some CPU doesn't like that. See
1916 * http://support.amd.com/us/Processor_TechDocs/41322.pdf,
1917 * Erratum 383 on page 93. Intel should be safe but is
1918 * also warns that it's only safe if the permission
1919 * and cache attributes of the two entries loaded in
1920 * the two TLB is identical (which should be the case
1921 * here). But it is generally safer to never allow
1922 * small and huge TLB entries for the same virtual
1923 * address to be loaded simultaneously. So instead of
1924 * doing "pmd_populate(); flush_pmd_tlb_range();" we first
1925 * mark the current pmd notpresent (atomically because
1926 * here the pmd_trans_huge and pmd_trans_splitting
1927 * must remain set at all times on the pmd until the
1928 * split is complete for this pmd), then we flush the
1929 * SMP TLB and finally we write the non-huge version
1930 * of the pmd entry with pmd_populate.
1932 pmdp_invalidate(vma, address, pmd);
1933 pmd_populate(mm, pmd, pgtable);
1941 /* must be called with anon_vma->root->rwsem held */
1942 static void __split_huge_page(struct page *page,
1943 struct anon_vma *anon_vma,
1944 struct list_head *list)
1946 int mapcount, mapcount2;
1947 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1948 struct anon_vma_chain *avc;
1950 BUG_ON(!PageHead(page));
1951 BUG_ON(PageTail(page));
1954 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
1955 struct vm_area_struct *vma = avc->vma;
1956 unsigned long addr = vma_address(page, vma);
1957 BUG_ON(is_vma_temporary_stack(vma));
1958 mapcount += __split_huge_page_splitting(page, vma, addr);
1961 * It is critical that new vmas are added to the tail of the
1962 * anon_vma list. This guarantes that if copy_huge_pmd() runs
1963 * and establishes a child pmd before
1964 * __split_huge_page_splitting() freezes the parent pmd (so if
1965 * we fail to prevent copy_huge_pmd() from running until the
1966 * whole __split_huge_page() is complete), we will still see
1967 * the newly established pmd of the child later during the
1968 * walk, to be able to set it as pmd_trans_splitting too.
1970 if (mapcount != page_mapcount(page)) {
1971 pr_err("mapcount %d page_mapcount %d\n",
1972 mapcount, page_mapcount(page));
1976 __split_huge_page_refcount(page, list);
1979 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
1980 struct vm_area_struct *vma = avc->vma;
1981 unsigned long addr = vma_address(page, vma);
1982 BUG_ON(is_vma_temporary_stack(vma));
1983 mapcount2 += __split_huge_page_map(page, vma, addr);
1985 if (mapcount != mapcount2) {
1986 pr_err("mapcount %d mapcount2 %d page_mapcount %d\n",
1987 mapcount, mapcount2, page_mapcount(page));
1993 * Split a hugepage into normal pages. This doesn't change the position of head
1994 * page. If @list is null, tail pages will be added to LRU list, otherwise, to
1995 * @list. Both head page and tail pages will inherit mapping, flags, and so on
1996 * from the hugepage.
1997 * Return 0 if the hugepage is split successfully otherwise return 1.
1999 int split_huge_page_to_list(struct page *page, struct list_head *list)
2001 struct anon_vma *anon_vma;
2004 BUG_ON(is_huge_zero_page(page));
2005 BUG_ON(!PageAnon(page));
2008 * The caller does not necessarily hold an mmap_sem that would prevent
2009 * the anon_vma disappearing so we first we take a reference to it
2010 * and then lock the anon_vma for write. This is similar to
2011 * page_lock_anon_vma_read except the write lock is taken to serialise
2012 * against parallel split or collapse operations.
2014 anon_vma = page_get_anon_vma(page);
2017 anon_vma_lock_write(anon_vma);
2020 if (!PageCompound(page))
2023 BUG_ON(!PageSwapBacked(page));
2024 __split_huge_page(page, anon_vma, list);
2025 count_vm_event(THP_SPLIT);
2027 BUG_ON(PageCompound(page));
2029 anon_vma_unlock_write(anon_vma);
2030 put_anon_vma(anon_vma);
2035 #define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
2037 int hugepage_madvise(struct vm_area_struct *vma,
2038 unsigned long *vm_flags, int advice)
2044 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
2045 * can't handle this properly after s390_enable_sie, so we simply
2046 * ignore the madvise to prevent qemu from causing a SIGSEGV.
2048 if (mm_has_pgste(vma->vm_mm))
2052 * Be somewhat over-protective like KSM for now!
2054 if (*vm_flags & VM_NO_THP)
2056 *vm_flags &= ~VM_NOHUGEPAGE;
2057 *vm_flags |= VM_HUGEPAGE;
2059 * If the vma become good for khugepaged to scan,
2060 * register it here without waiting a page fault that
2061 * may not happen any time soon.
2063 if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags)))
2066 case MADV_NOHUGEPAGE:
2068 * Be somewhat over-protective like KSM for now!
2070 if (*vm_flags & VM_NO_THP)
2072 *vm_flags &= ~VM_HUGEPAGE;
2073 *vm_flags |= VM_NOHUGEPAGE;
2075 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
2076 * this vma even if we leave the mm registered in khugepaged if
2077 * it got registered before VM_NOHUGEPAGE was set.
2085 static int __init khugepaged_slab_init(void)
2087 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
2088 sizeof(struct mm_slot),
2089 __alignof__(struct mm_slot), 0, NULL);
2096 static void __init khugepaged_slab_exit(void)
2098 kmem_cache_destroy(mm_slot_cache);
2101 static inline struct mm_slot *alloc_mm_slot(void)
2103 if (!mm_slot_cache) /* initialization failed */
2105 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
2108 static inline void free_mm_slot(struct mm_slot *mm_slot)
2110 kmem_cache_free(mm_slot_cache, mm_slot);
2113 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
2115 struct mm_slot *mm_slot;
2117 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
2118 if (mm == mm_slot->mm)
2124 static void insert_to_mm_slots_hash(struct mm_struct *mm,
2125 struct mm_slot *mm_slot)
2128 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
2131 static inline int khugepaged_test_exit(struct mm_struct *mm)
2133 return atomic_read(&mm->mm_users) == 0;
2136 int __khugepaged_enter(struct mm_struct *mm)
2138 struct mm_slot *mm_slot;
2141 mm_slot = alloc_mm_slot();
2145 /* __khugepaged_exit() must not run from under us */
2146 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
2147 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
2148 free_mm_slot(mm_slot);
2152 spin_lock(&khugepaged_mm_lock);
2153 insert_to_mm_slots_hash(mm, mm_slot);
2155 * Insert just behind the scanning cursor, to let the area settle
2158 wakeup = list_empty(&khugepaged_scan.mm_head);
2159 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
2160 spin_unlock(&khugepaged_mm_lock);
2162 atomic_inc(&mm->mm_count);
2164 wake_up_interruptible(&khugepaged_wait);
2169 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
2170 unsigned long vm_flags)
2172 unsigned long hstart, hend;
2175 * Not yet faulted in so we will register later in the
2176 * page fault if needed.
2180 /* khugepaged not yet working on file or special mappings */
2182 VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
2183 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2184 hend = vma->vm_end & HPAGE_PMD_MASK;
2186 return khugepaged_enter(vma, vm_flags);
2190 void __khugepaged_exit(struct mm_struct *mm)
2192 struct mm_slot *mm_slot;
2195 spin_lock(&khugepaged_mm_lock);
2196 mm_slot = get_mm_slot(mm);
2197 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
2198 hash_del(&mm_slot->hash);
2199 list_del(&mm_slot->mm_node);
2202 spin_unlock(&khugepaged_mm_lock);
2205 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
2206 free_mm_slot(mm_slot);
2208 } else if (mm_slot) {
2210 * This is required to serialize against
2211 * khugepaged_test_exit() (which is guaranteed to run
2212 * under mmap sem read mode). Stop here (after we
2213 * return all pagetables will be destroyed) until
2214 * khugepaged has finished working on the pagetables
2215 * under the mmap_sem.
2217 down_write(&mm->mmap_sem);
2218 up_write(&mm->mmap_sem);
2222 static void release_pte_page(struct page *page)
2224 /* 0 stands for page_is_file_cache(page) == false */
2225 dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
2227 putback_lru_page(page);
2230 static void release_pte_pages(pte_t *pte, pte_t *_pte)
2232 while (--_pte >= pte) {
2233 pte_t pteval = *_pte;
2234 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
2235 release_pte_page(pte_page(pteval));
2239 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
2240 unsigned long address,
2243 struct page *page = NULL;
2245 int none_or_zero = 0, result = 0;
2246 bool referenced = false, writable = false;
2248 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
2249 _pte++, address += PAGE_SIZE) {
2250 pte_t pteval = *_pte;
2251 if (pte_none(pteval) || (pte_present(pteval) &&
2252 is_zero_pfn(pte_pfn(pteval)))) {
2253 if (!userfaultfd_armed(vma) &&
2254 ++none_or_zero <= khugepaged_max_ptes_none) {
2257 result = SCAN_EXCEED_NONE_PTE;
2261 if (!pte_present(pteval)) {
2262 result = SCAN_PTE_NON_PRESENT;
2265 page = vm_normal_page(vma, address, pteval);
2266 if (unlikely(!page)) {
2267 result = SCAN_PAGE_NULL;
2271 VM_BUG_ON_PAGE(PageCompound(page), page);
2272 VM_BUG_ON_PAGE(!PageAnon(page), page);
2273 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
2276 * We can do it before isolate_lru_page because the
2277 * page can't be freed from under us. NOTE: PG_lock
2278 * is needed to serialize against split_huge_page
2279 * when invoked from the VM.
2281 if (!trylock_page(page)) {
2282 result = SCAN_PAGE_LOCK;
2287 * cannot use mapcount: can't collapse if there's a gup pin.
2288 * The page must only be referenced by the scanned process
2289 * and page swap cache.
2291 if (page_count(page) != 1 + !!PageSwapCache(page)) {
2293 result = SCAN_PAGE_COUNT;
2296 if (pte_write(pteval)) {
2299 if (PageSwapCache(page) && !reuse_swap_page(page)) {
2301 result = SCAN_SWAP_CACHE_PAGE;
2305 * Page is not in the swap cache. It can be collapsed
2311 * Isolate the page to avoid collapsing an hugepage
2312 * currently in use by the VM.
2314 if (isolate_lru_page(page)) {
2316 result = SCAN_DEL_PAGE_LRU;
2319 /* 0 stands for page_is_file_cache(page) == false */
2320 inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
2321 VM_BUG_ON_PAGE(!PageLocked(page), page);
2322 VM_BUG_ON_PAGE(PageLRU(page), page);
2324 /* If there is no mapped pte young don't collapse the page */
2325 if (pte_young(pteval) ||
2326 page_is_young(page) || PageReferenced(page) ||
2327 mmu_notifier_test_young(vma->vm_mm, address))
2330 if (likely(writable)) {
2331 if (likely(referenced)) {
2332 result = SCAN_SUCCEED;
2333 trace_mm_collapse_huge_page_isolate(page_to_pfn(page), none_or_zero,
2334 referenced, writable, result);
2338 result = SCAN_PAGE_RO;
2342 release_pte_pages(pte, _pte);
2343 trace_mm_collapse_huge_page_isolate(page_to_pfn(page), none_or_zero,
2344 referenced, writable, result);
2348 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
2349 struct vm_area_struct *vma,
2350 unsigned long address,
2354 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
2355 pte_t pteval = *_pte;
2356 struct page *src_page;
2358 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
2359 clear_user_highpage(page, address);
2360 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
2361 if (is_zero_pfn(pte_pfn(pteval))) {
2363 * ptl mostly unnecessary.
2367 * paravirt calls inside pte_clear here are
2370 pte_clear(vma->vm_mm, address, _pte);
2374 src_page = pte_page(pteval);
2375 copy_user_highpage(page, src_page, address, vma);
2376 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
2377 release_pte_page(src_page);
2379 * ptl mostly unnecessary, but preempt has to
2380 * be disabled to update the per-cpu stats
2381 * inside page_remove_rmap().
2385 * paravirt calls inside pte_clear here are
2388 pte_clear(vma->vm_mm, address, _pte);
2389 page_remove_rmap(src_page, false);
2391 free_page_and_swap_cache(src_page);
2394 address += PAGE_SIZE;
2399 static void khugepaged_alloc_sleep(void)
2403 add_wait_queue(&khugepaged_wait, &wait);
2404 freezable_schedule_timeout_interruptible(
2405 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
2406 remove_wait_queue(&khugepaged_wait, &wait);
2409 static int khugepaged_node_load[MAX_NUMNODES];
2411 static bool khugepaged_scan_abort(int nid)
2416 * If zone_reclaim_mode is disabled, then no extra effort is made to
2417 * allocate memory locally.
2419 if (!zone_reclaim_mode)
2422 /* If there is a count for this node already, it must be acceptable */
2423 if (khugepaged_node_load[nid])
2426 for (i = 0; i < MAX_NUMNODES; i++) {
2427 if (!khugepaged_node_load[i])
2429 if (node_distance(nid, i) > RECLAIM_DISTANCE)
2436 static int khugepaged_find_target_node(void)
2438 static int last_khugepaged_target_node = NUMA_NO_NODE;
2439 int nid, target_node = 0, max_value = 0;
2441 /* find first node with max normal pages hit */
2442 for (nid = 0; nid < MAX_NUMNODES; nid++)
2443 if (khugepaged_node_load[nid] > max_value) {
2444 max_value = khugepaged_node_load[nid];
2448 /* do some balance if several nodes have the same hit record */
2449 if (target_node <= last_khugepaged_target_node)
2450 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
2452 if (max_value == khugepaged_node_load[nid]) {
2457 last_khugepaged_target_node = target_node;
2461 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
2463 if (IS_ERR(*hpage)) {
2469 khugepaged_alloc_sleep();
2470 } else if (*hpage) {
2478 static struct page *
2479 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
2480 unsigned long address, int node)
2482 VM_BUG_ON_PAGE(*hpage, *hpage);
2485 * Before allocating the hugepage, release the mmap_sem read lock.
2486 * The allocation can take potentially a long time if it involves
2487 * sync compaction, and we do not need to hold the mmap_sem during
2488 * that. We will recheck the vma after taking it again in write mode.
2490 up_read(&mm->mmap_sem);
2492 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
2493 if (unlikely(!*hpage)) {
2494 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2495 *hpage = ERR_PTR(-ENOMEM);
2499 count_vm_event(THP_COLLAPSE_ALLOC);
2503 static int khugepaged_find_target_node(void)
2508 static inline struct page *alloc_hugepage(int defrag)
2510 return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
2514 static struct page *khugepaged_alloc_hugepage(bool *wait)
2519 hpage = alloc_hugepage(khugepaged_defrag());
2521 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2526 khugepaged_alloc_sleep();
2528 count_vm_event(THP_COLLAPSE_ALLOC);
2529 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
2534 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
2537 *hpage = khugepaged_alloc_hugepage(wait);
2539 if (unlikely(!*hpage))
2545 static struct page *
2546 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
2547 unsigned long address, int node)
2549 up_read(&mm->mmap_sem);
2556 static bool hugepage_vma_check(struct vm_area_struct *vma)
2558 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
2559 (vma->vm_flags & VM_NOHUGEPAGE))
2561 if (vma->vm_flags & VM_LOCKED)
2563 if (!vma->anon_vma || vma->vm_ops)
2565 if (is_vma_temporary_stack(vma))
2567 VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
2571 static void collapse_huge_page(struct mm_struct *mm,
2572 unsigned long address,
2573 struct page **hpage,
2574 struct vm_area_struct *vma,
2580 struct page *new_page;
2581 spinlock_t *pmd_ptl, *pte_ptl;
2582 int isolated, result = 0;
2583 unsigned long hstart, hend;
2584 struct mem_cgroup *memcg;
2585 unsigned long mmun_start; /* For mmu_notifiers */
2586 unsigned long mmun_end; /* For mmu_notifiers */
2589 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2591 /* Only allocate from the target node */
2592 gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
2595 /* release the mmap_sem read lock. */
2596 new_page = khugepaged_alloc_page(hpage, gfp, mm, address, node);
2598 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
2602 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
2603 result = SCAN_CGROUP_CHARGE_FAIL;
2608 * Prevent all access to pagetables with the exception of
2609 * gup_fast later hanlded by the ptep_clear_flush and the VM
2610 * handled by the anon_vma lock + PG_lock.
2612 down_write(&mm->mmap_sem);
2613 if (unlikely(khugepaged_test_exit(mm))) {
2614 result = SCAN_ANY_PROCESS;
2618 vma = find_vma(mm, address);
2620 result = SCAN_VMA_NULL;
2623 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2624 hend = vma->vm_end & HPAGE_PMD_MASK;
2625 if (address < hstart || address + HPAGE_PMD_SIZE > hend) {
2626 result = SCAN_ADDRESS_RANGE;
2629 if (!hugepage_vma_check(vma)) {
2630 result = SCAN_VMA_CHECK;
2633 pmd = mm_find_pmd(mm, address);
2635 result = SCAN_PMD_NULL;
2639 anon_vma_lock_write(vma->anon_vma);
2641 pte = pte_offset_map(pmd, address);
2642 pte_ptl = pte_lockptr(mm, pmd);
2644 mmun_start = address;
2645 mmun_end = address + HPAGE_PMD_SIZE;
2646 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2647 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
2649 * After this gup_fast can't run anymore. This also removes
2650 * any huge TLB entry from the CPU so we won't allow
2651 * huge and small TLB entries for the same virtual address
2652 * to avoid the risk of CPU bugs in that area.
2654 _pmd = pmdp_collapse_flush(vma, address, pmd);
2655 spin_unlock(pmd_ptl);
2656 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2659 isolated = __collapse_huge_page_isolate(vma, address, pte);
2660 spin_unlock(pte_ptl);
2662 if (unlikely(!isolated)) {
2665 BUG_ON(!pmd_none(*pmd));
2667 * We can only use set_pmd_at when establishing
2668 * hugepmds and never for establishing regular pmds that
2669 * points to regular pagetables. Use pmd_populate for that
2671 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
2672 spin_unlock(pmd_ptl);
2673 anon_vma_unlock_write(vma->anon_vma);
2679 * All pages are isolated and locked so anon_vma rmap
2680 * can't run anymore.
2682 anon_vma_unlock_write(vma->anon_vma);
2684 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
2686 __SetPageUptodate(new_page);
2687 pgtable = pmd_pgtable(_pmd);
2689 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
2690 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
2693 * spin_lock() below is not the equivalent of smp_wmb(), so
2694 * this is needed to avoid the copy_huge_page writes to become
2695 * visible after the set_pmd_at() write.
2700 BUG_ON(!pmd_none(*pmd));
2701 page_add_new_anon_rmap(new_page, vma, address, true);
2702 mem_cgroup_commit_charge(new_page, memcg, false, true);
2703 lru_cache_add_active_or_unevictable(new_page, vma);
2704 pgtable_trans_huge_deposit(mm, pmd, pgtable);
2705 set_pmd_at(mm, address, pmd, _pmd);
2706 update_mmu_cache_pmd(vma, address, pmd);
2707 spin_unlock(pmd_ptl);
2711 khugepaged_pages_collapsed++;
2712 result = SCAN_SUCCEED;
2714 up_write(&mm->mmap_sem);
2715 trace_mm_collapse_huge_page(mm, isolated, result);
2719 trace_mm_collapse_huge_page(mm, isolated, result);
2722 mem_cgroup_cancel_charge(new_page, memcg, true);
2726 static int khugepaged_scan_pmd(struct mm_struct *mm,
2727 struct vm_area_struct *vma,
2728 unsigned long address,
2729 struct page **hpage)
2733 int ret = 0, none_or_zero = 0, result = 0;
2734 struct page *page = NULL;
2735 unsigned long _address;
2737 int node = NUMA_NO_NODE;
2738 bool writable = false, referenced = false;
2740 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2742 pmd = mm_find_pmd(mm, address);
2744 result = SCAN_PMD_NULL;
2748 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
2749 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
2750 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
2751 _pte++, _address += PAGE_SIZE) {
2752 pte_t pteval = *_pte;
2753 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
2754 if (!userfaultfd_armed(vma) &&
2755 ++none_or_zero <= khugepaged_max_ptes_none) {
2758 result = SCAN_EXCEED_NONE_PTE;
2762 if (!pte_present(pteval)) {
2763 result = SCAN_PTE_NON_PRESENT;
2766 if (pte_write(pteval))
2769 page = vm_normal_page(vma, _address, pteval);
2770 if (unlikely(!page)) {
2771 result = SCAN_PAGE_NULL;
2775 /* TODO: teach khugepaged to collapse THP mapped with pte */
2776 if (PageCompound(page)) {
2777 result = SCAN_PAGE_COMPOUND;
2782 * Record which node the original page is from and save this
2783 * information to khugepaged_node_load[].
2784 * Khupaged will allocate hugepage from the node has the max
2787 node = page_to_nid(page);
2788 if (khugepaged_scan_abort(node)) {
2789 result = SCAN_SCAN_ABORT;
2792 khugepaged_node_load[node]++;
2793 if (!PageLRU(page)) {
2794 result = SCAN_SCAN_ABORT;
2797 if (PageLocked(page)) {
2798 result = SCAN_PAGE_LOCK;
2801 if (!PageAnon(page)) {
2802 result = SCAN_PAGE_ANON;
2807 * cannot use mapcount: can't collapse if there's a gup pin.
2808 * The page must only be referenced by the scanned process
2809 * and page swap cache.
2811 if (page_count(page) != 1 + !!PageSwapCache(page)) {
2812 result = SCAN_PAGE_COUNT;
2815 if (pte_young(pteval) ||
2816 page_is_young(page) || PageReferenced(page) ||
2817 mmu_notifier_test_young(vma->vm_mm, address))
2822 result = SCAN_SUCCEED;
2825 result = SCAN_NO_REFERENCED_PAGE;
2828 result = SCAN_PAGE_RO;
2831 pte_unmap_unlock(pte, ptl);
2833 node = khugepaged_find_target_node();
2834 /* collapse_huge_page will return with the mmap_sem released */
2835 collapse_huge_page(mm, address, hpage, vma, node);
2838 trace_mm_khugepaged_scan_pmd(mm, page_to_pfn(page), writable, referenced,
2839 none_or_zero, result);
2843 static void collect_mm_slot(struct mm_slot *mm_slot)
2845 struct mm_struct *mm = mm_slot->mm;
2847 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
2849 if (khugepaged_test_exit(mm)) {
2851 hash_del(&mm_slot->hash);
2852 list_del(&mm_slot->mm_node);
2855 * Not strictly needed because the mm exited already.
2857 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
2860 /* khugepaged_mm_lock actually not necessary for the below */
2861 free_mm_slot(mm_slot);
2866 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2867 struct page **hpage)
2868 __releases(&khugepaged_mm_lock)
2869 __acquires(&khugepaged_mm_lock)
2871 struct mm_slot *mm_slot;
2872 struct mm_struct *mm;
2873 struct vm_area_struct *vma;
2877 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
2879 if (khugepaged_scan.mm_slot)
2880 mm_slot = khugepaged_scan.mm_slot;
2882 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2883 struct mm_slot, mm_node);
2884 khugepaged_scan.address = 0;
2885 khugepaged_scan.mm_slot = mm_slot;
2887 spin_unlock(&khugepaged_mm_lock);
2890 down_read(&mm->mmap_sem);
2891 if (unlikely(khugepaged_test_exit(mm)))
2894 vma = find_vma(mm, khugepaged_scan.address);
2897 for (; vma; vma = vma->vm_next) {
2898 unsigned long hstart, hend;
2901 if (unlikely(khugepaged_test_exit(mm))) {
2905 if (!hugepage_vma_check(vma)) {
2910 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2911 hend = vma->vm_end & HPAGE_PMD_MASK;
2914 if (khugepaged_scan.address > hend)
2916 if (khugepaged_scan.address < hstart)
2917 khugepaged_scan.address = hstart;
2918 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2920 while (khugepaged_scan.address < hend) {
2923 if (unlikely(khugepaged_test_exit(mm)))
2924 goto breakouterloop;
2926 VM_BUG_ON(khugepaged_scan.address < hstart ||
2927 khugepaged_scan.address + HPAGE_PMD_SIZE >
2929 ret = khugepaged_scan_pmd(mm, vma,
2930 khugepaged_scan.address,
2932 /* move to next address */
2933 khugepaged_scan.address += HPAGE_PMD_SIZE;
2934 progress += HPAGE_PMD_NR;
2936 /* we released mmap_sem so break loop */
2937 goto breakouterloop_mmap_sem;
2938 if (progress >= pages)
2939 goto breakouterloop;
2943 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
2944 breakouterloop_mmap_sem:
2946 spin_lock(&khugepaged_mm_lock);
2947 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2949 * Release the current mm_slot if this mm is about to die, or
2950 * if we scanned all vmas of this mm.
2952 if (khugepaged_test_exit(mm) || !vma) {
2954 * Make sure that if mm_users is reaching zero while
2955 * khugepaged runs here, khugepaged_exit will find
2956 * mm_slot not pointing to the exiting mm.
2958 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2959 khugepaged_scan.mm_slot = list_entry(
2960 mm_slot->mm_node.next,
2961 struct mm_slot, mm_node);
2962 khugepaged_scan.address = 0;
2964 khugepaged_scan.mm_slot = NULL;
2965 khugepaged_full_scans++;
2968 collect_mm_slot(mm_slot);
2974 static int khugepaged_has_work(void)
2976 return !list_empty(&khugepaged_scan.mm_head) &&
2977 khugepaged_enabled();
2980 static int khugepaged_wait_event(void)
2982 return !list_empty(&khugepaged_scan.mm_head) ||
2983 kthread_should_stop();
2986 static void khugepaged_do_scan(void)
2988 struct page *hpage = NULL;
2989 unsigned int progress = 0, pass_through_head = 0;
2990 unsigned int pages = khugepaged_pages_to_scan;
2993 barrier(); /* write khugepaged_pages_to_scan to local stack */
2995 while (progress < pages) {
2996 if (!khugepaged_prealloc_page(&hpage, &wait))
3001 if (unlikely(kthread_should_stop() || try_to_freeze()))
3004 spin_lock(&khugepaged_mm_lock);
3005 if (!khugepaged_scan.mm_slot)
3006 pass_through_head++;
3007 if (khugepaged_has_work() &&
3008 pass_through_head < 2)
3009 progress += khugepaged_scan_mm_slot(pages - progress,
3013 spin_unlock(&khugepaged_mm_lock);
3016 if (!IS_ERR_OR_NULL(hpage))
3020 static void khugepaged_wait_work(void)
3022 if (khugepaged_has_work()) {
3023 if (!khugepaged_scan_sleep_millisecs)
3026 wait_event_freezable_timeout(khugepaged_wait,
3027 kthread_should_stop(),
3028 msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
3032 if (khugepaged_enabled())
3033 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
3036 static int khugepaged(void *none)
3038 struct mm_slot *mm_slot;
3041 set_user_nice(current, MAX_NICE);
3043 while (!kthread_should_stop()) {
3044 khugepaged_do_scan();
3045 khugepaged_wait_work();
3048 spin_lock(&khugepaged_mm_lock);
3049 mm_slot = khugepaged_scan.mm_slot;
3050 khugepaged_scan.mm_slot = NULL;
3052 collect_mm_slot(mm_slot);
3053 spin_unlock(&khugepaged_mm_lock);
3057 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
3058 unsigned long haddr, pmd_t *pmd)
3060 struct mm_struct *mm = vma->vm_mm;
3065 pmdp_huge_clear_flush_notify(vma, haddr, pmd);
3066 /* leave pmd empty until pte is filled */
3068 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
3069 pmd_populate(mm, &_pmd, pgtable);
3071 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
3073 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
3074 entry = pte_mkspecial(entry);
3075 pte = pte_offset_map(&_pmd, haddr);
3076 VM_BUG_ON(!pte_none(*pte));
3077 set_pte_at(mm, haddr, pte, entry);
3080 smp_wmb(); /* make pte visible before pmd */
3081 pmd_populate(mm, pmd, pgtable);
3082 put_huge_zero_page();
3085 void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
3089 struct page *page = NULL;
3090 struct mm_struct *mm = vma->vm_mm;
3091 unsigned long haddr = address & HPAGE_PMD_MASK;
3092 unsigned long mmun_start; /* For mmu_notifiers */
3093 unsigned long mmun_end; /* For mmu_notifiers */
3095 BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE);
3098 mmun_end = haddr + HPAGE_PMD_SIZE;
3100 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3101 ptl = pmd_lock(mm, pmd);
3102 if (unlikely(!pmd_trans_huge(*pmd)))
3104 if (vma_is_dax(vma)) {
3105 pmd_t _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
3106 if (is_huge_zero_pmd(_pmd))
3107 put_huge_zero_page();
3108 } else if (is_huge_zero_pmd(*pmd)) {
3109 __split_huge_zero_page_pmd(vma, haddr, pmd);
3111 page = pmd_page(*pmd);
3112 VM_BUG_ON_PAGE(!page_count(page), page);
3117 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3122 split_huge_page(page);
3126 * We don't always have down_write of mmap_sem here: a racing
3127 * do_huge_pmd_wp_page() might have copied-on-write to another
3128 * huge page before our split_huge_page() got the anon_vma lock.
3130 if (unlikely(pmd_trans_huge(*pmd)))
3134 static void split_huge_pmd_address(struct vm_area_struct *vma,
3135 unsigned long address)
3141 VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
3143 pgd = pgd_offset(vma->vm_mm, address);
3144 if (!pgd_present(*pgd))
3147 pud = pud_offset(pgd, address);
3148 if (!pud_present(*pud))
3151 pmd = pmd_offset(pud, address);
3152 if (!pmd_present(*pmd) || !pmd_trans_huge(*pmd))
3155 * Caller holds the mmap_sem write mode, so a huge pmd cannot
3156 * materialize from under us.
3158 __split_huge_page_pmd(vma, address, pmd);
3161 void vma_adjust_trans_huge(struct vm_area_struct *vma,
3162 unsigned long start,
3167 * If the new start address isn't hpage aligned and it could
3168 * previously contain an hugepage: check if we need to split
3171 if (start & ~HPAGE_PMD_MASK &&
3172 (start & HPAGE_PMD_MASK) >= vma->vm_start &&
3173 (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
3174 split_huge_pmd_address(vma, start);
3177 * If the new end address isn't hpage aligned and it could
3178 * previously contain an hugepage: check if we need to split
3181 if (end & ~HPAGE_PMD_MASK &&
3182 (end & HPAGE_PMD_MASK) >= vma->vm_start &&
3183 (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
3184 split_huge_pmd_address(vma, end);
3187 * If we're also updating the vma->vm_next->vm_start, if the new
3188 * vm_next->vm_start isn't page aligned and it could previously
3189 * contain an hugepage: check if we need to split an huge pmd.
3191 if (adjust_next > 0) {
3192 struct vm_area_struct *next = vma->vm_next;
3193 unsigned long nstart = next->vm_start;
3194 nstart += adjust_next << PAGE_SHIFT;
3195 if (nstart & ~HPAGE_PMD_MASK &&
3196 (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
3197 (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
3198 split_huge_pmd_address(next, nstart);