]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - mm/migrate.c
Merge tag 'char-misc-4.13-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/gregk...
[karo-tx-linux.git] / mm / migrate.c
1 /*
2  * Memory Migration functionality - linux/mm/migrate.c
3  *
4  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
5  *
6  * Page migration was first developed in the context of the memory hotplug
7  * project. The main authors of the migration code are:
8  *
9  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10  * Hirokazu Takahashi <taka@valinux.co.jp>
11  * Dave Hansen <haveblue@us.ibm.com>
12  * Christoph Lameter
13  */
14
15 #include <linux/migrate.h>
16 #include <linux/export.h>
17 #include <linux/swap.h>
18 #include <linux/swapops.h>
19 #include <linux/pagemap.h>
20 #include <linux/buffer_head.h>
21 #include <linux/mm_inline.h>
22 #include <linux/nsproxy.h>
23 #include <linux/pagevec.h>
24 #include <linux/ksm.h>
25 #include <linux/rmap.h>
26 #include <linux/topology.h>
27 #include <linux/cpu.h>
28 #include <linux/cpuset.h>
29 #include <linux/writeback.h>
30 #include <linux/mempolicy.h>
31 #include <linux/vmalloc.h>
32 #include <linux/security.h>
33 #include <linux/backing-dev.h>
34 #include <linux/compaction.h>
35 #include <linux/syscalls.h>
36 #include <linux/hugetlb.h>
37 #include <linux/hugetlb_cgroup.h>
38 #include <linux/gfp.h>
39 #include <linux/balloon_compaction.h>
40 #include <linux/mmu_notifier.h>
41 #include <linux/page_idle.h>
42 #include <linux/page_owner.h>
43 #include <linux/sched/mm.h>
44
45 #include <asm/tlbflush.h>
46
47 #define CREATE_TRACE_POINTS
48 #include <trace/events/migrate.h>
49
50 #include "internal.h"
51
52 /*
53  * migrate_prep() needs to be called before we start compiling a list of pages
54  * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
55  * undesirable, use migrate_prep_local()
56  */
57 int migrate_prep(void)
58 {
59         /*
60          * Clear the LRU lists so pages can be isolated.
61          * Note that pages may be moved off the LRU after we have
62          * drained them. Those pages will fail to migrate like other
63          * pages that may be busy.
64          */
65         lru_add_drain_all();
66
67         return 0;
68 }
69
70 /* Do the necessary work of migrate_prep but not if it involves other CPUs */
71 int migrate_prep_local(void)
72 {
73         lru_add_drain();
74
75         return 0;
76 }
77
78 int isolate_movable_page(struct page *page, isolate_mode_t mode)
79 {
80         struct address_space *mapping;
81
82         /*
83          * Avoid burning cycles with pages that are yet under __free_pages(),
84          * or just got freed under us.
85          *
86          * In case we 'win' a race for a movable page being freed under us and
87          * raise its refcount preventing __free_pages() from doing its job
88          * the put_page() at the end of this block will take care of
89          * release this page, thus avoiding a nasty leakage.
90          */
91         if (unlikely(!get_page_unless_zero(page)))
92                 goto out;
93
94         /*
95          * Check PageMovable before holding a PG_lock because page's owner
96          * assumes anybody doesn't touch PG_lock of newly allocated page
97          * so unconditionally grapping the lock ruins page's owner side.
98          */
99         if (unlikely(!__PageMovable(page)))
100                 goto out_putpage;
101         /*
102          * As movable pages are not isolated from LRU lists, concurrent
103          * compaction threads can race against page migration functions
104          * as well as race against the releasing a page.
105          *
106          * In order to avoid having an already isolated movable page
107          * being (wrongly) re-isolated while it is under migration,
108          * or to avoid attempting to isolate pages being released,
109          * lets be sure we have the page lock
110          * before proceeding with the movable page isolation steps.
111          */
112         if (unlikely(!trylock_page(page)))
113                 goto out_putpage;
114
115         if (!PageMovable(page) || PageIsolated(page))
116                 goto out_no_isolated;
117
118         mapping = page_mapping(page);
119         VM_BUG_ON_PAGE(!mapping, page);
120
121         if (!mapping->a_ops->isolate_page(page, mode))
122                 goto out_no_isolated;
123
124         /* Driver shouldn't use PG_isolated bit of page->flags */
125         WARN_ON_ONCE(PageIsolated(page));
126         __SetPageIsolated(page);
127         unlock_page(page);
128
129         return 0;
130
131 out_no_isolated:
132         unlock_page(page);
133 out_putpage:
134         put_page(page);
135 out:
136         return -EBUSY;
137 }
138
139 /* It should be called on page which is PG_movable */
140 void putback_movable_page(struct page *page)
141 {
142         struct address_space *mapping;
143
144         VM_BUG_ON_PAGE(!PageLocked(page), page);
145         VM_BUG_ON_PAGE(!PageMovable(page), page);
146         VM_BUG_ON_PAGE(!PageIsolated(page), page);
147
148         mapping = page_mapping(page);
149         mapping->a_ops->putback_page(page);
150         __ClearPageIsolated(page);
151 }
152
153 /*
154  * Put previously isolated pages back onto the appropriate lists
155  * from where they were once taken off for compaction/migration.
156  *
157  * This function shall be used whenever the isolated pageset has been
158  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
159  * and isolate_huge_page().
160  */
161 void putback_movable_pages(struct list_head *l)
162 {
163         struct page *page;
164         struct page *page2;
165
166         list_for_each_entry_safe(page, page2, l, lru) {
167                 if (unlikely(PageHuge(page))) {
168                         putback_active_hugepage(page);
169                         continue;
170                 }
171                 list_del(&page->lru);
172                 /*
173                  * We isolated non-lru movable page so here we can use
174                  * __PageMovable because LRU page's mapping cannot have
175                  * PAGE_MAPPING_MOVABLE.
176                  */
177                 if (unlikely(__PageMovable(page))) {
178                         VM_BUG_ON_PAGE(!PageIsolated(page), page);
179                         lock_page(page);
180                         if (PageMovable(page))
181                                 putback_movable_page(page);
182                         else
183                                 __ClearPageIsolated(page);
184                         unlock_page(page);
185                         put_page(page);
186                 } else {
187                         dec_node_page_state(page, NR_ISOLATED_ANON +
188                                         page_is_file_cache(page));
189                         putback_lru_page(page);
190                 }
191         }
192 }
193
194 /*
195  * Restore a potential migration pte to a working pte entry
196  */
197 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
198                                  unsigned long addr, void *old)
199 {
200         struct page_vma_mapped_walk pvmw = {
201                 .page = old,
202                 .vma = vma,
203                 .address = addr,
204                 .flags = PVMW_SYNC | PVMW_MIGRATION,
205         };
206         struct page *new;
207         pte_t pte;
208         swp_entry_t entry;
209
210         VM_BUG_ON_PAGE(PageTail(page), page);
211         while (page_vma_mapped_walk(&pvmw)) {
212                 if (PageKsm(page))
213                         new = page;
214                 else
215                         new = page - pvmw.page->index +
216                                 linear_page_index(vma, pvmw.address);
217
218                 get_page(new);
219                 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
220                 if (pte_swp_soft_dirty(*pvmw.pte))
221                         pte = pte_mksoft_dirty(pte);
222
223                 /*
224                  * Recheck VMA as permissions can change since migration started
225                  */
226                 entry = pte_to_swp_entry(*pvmw.pte);
227                 if (is_write_migration_entry(entry))
228                         pte = maybe_mkwrite(pte, vma);
229
230                 flush_dcache_page(new);
231 #ifdef CONFIG_HUGETLB_PAGE
232                 if (PageHuge(new)) {
233                         pte = pte_mkhuge(pte);
234                         pte = arch_make_huge_pte(pte, vma, new, 0);
235                         set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
236                         if (PageAnon(new))
237                                 hugepage_add_anon_rmap(new, vma, pvmw.address);
238                         else
239                                 page_dup_rmap(new, true);
240                 } else
241 #endif
242                 {
243                         set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
244
245                         if (PageAnon(new))
246                                 page_add_anon_rmap(new, vma, pvmw.address, false);
247                         else
248                                 page_add_file_rmap(new, false);
249                 }
250                 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
251                         mlock_vma_page(new);
252
253                 /* No need to invalidate - it was non-present before */
254                 update_mmu_cache(vma, pvmw.address, pvmw.pte);
255         }
256
257         return true;
258 }
259
260 /*
261  * Get rid of all migration entries and replace them by
262  * references to the indicated page.
263  */
264 void remove_migration_ptes(struct page *old, struct page *new, bool locked)
265 {
266         struct rmap_walk_control rwc = {
267                 .rmap_one = remove_migration_pte,
268                 .arg = old,
269         };
270
271         if (locked)
272                 rmap_walk_locked(new, &rwc);
273         else
274                 rmap_walk(new, &rwc);
275 }
276
277 /*
278  * Something used the pte of a page under migration. We need to
279  * get to the page and wait until migration is finished.
280  * When we return from this function the fault will be retried.
281  */
282 void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
283                                 spinlock_t *ptl)
284 {
285         pte_t pte;
286         swp_entry_t entry;
287         struct page *page;
288
289         spin_lock(ptl);
290         pte = *ptep;
291         if (!is_swap_pte(pte))
292                 goto out;
293
294         entry = pte_to_swp_entry(pte);
295         if (!is_migration_entry(entry))
296                 goto out;
297
298         page = migration_entry_to_page(entry);
299
300         /*
301          * Once radix-tree replacement of page migration started, page_count
302          * *must* be zero. And, we don't want to call wait_on_page_locked()
303          * against a page without get_page().
304          * So, we use get_page_unless_zero(), here. Even failed, page fault
305          * will occur again.
306          */
307         if (!get_page_unless_zero(page))
308                 goto out;
309         pte_unmap_unlock(ptep, ptl);
310         wait_on_page_locked(page);
311         put_page(page);
312         return;
313 out:
314         pte_unmap_unlock(ptep, ptl);
315 }
316
317 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
318                                 unsigned long address)
319 {
320         spinlock_t *ptl = pte_lockptr(mm, pmd);
321         pte_t *ptep = pte_offset_map(pmd, address);
322         __migration_entry_wait(mm, ptep, ptl);
323 }
324
325 void migration_entry_wait_huge(struct vm_area_struct *vma,
326                 struct mm_struct *mm, pte_t *pte)
327 {
328         spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
329         __migration_entry_wait(mm, pte, ptl);
330 }
331
332 #ifdef CONFIG_BLOCK
333 /* Returns true if all buffers are successfully locked */
334 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
335                                                         enum migrate_mode mode)
336 {
337         struct buffer_head *bh = head;
338
339         /* Simple case, sync compaction */
340         if (mode != MIGRATE_ASYNC) {
341                 do {
342                         get_bh(bh);
343                         lock_buffer(bh);
344                         bh = bh->b_this_page;
345
346                 } while (bh != head);
347
348                 return true;
349         }
350
351         /* async case, we cannot block on lock_buffer so use trylock_buffer */
352         do {
353                 get_bh(bh);
354                 if (!trylock_buffer(bh)) {
355                         /*
356                          * We failed to lock the buffer and cannot stall in
357                          * async migration. Release the taken locks
358                          */
359                         struct buffer_head *failed_bh = bh;
360                         put_bh(failed_bh);
361                         bh = head;
362                         while (bh != failed_bh) {
363                                 unlock_buffer(bh);
364                                 put_bh(bh);
365                                 bh = bh->b_this_page;
366                         }
367                         return false;
368                 }
369
370                 bh = bh->b_this_page;
371         } while (bh != head);
372         return true;
373 }
374 #else
375 static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
376                                                         enum migrate_mode mode)
377 {
378         return true;
379 }
380 #endif /* CONFIG_BLOCK */
381
382 /*
383  * Replace the page in the mapping.
384  *
385  * The number of remaining references must be:
386  * 1 for anonymous pages without a mapping
387  * 2 for pages with a mapping
388  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
389  */
390 int migrate_page_move_mapping(struct address_space *mapping,
391                 struct page *newpage, struct page *page,
392                 struct buffer_head *head, enum migrate_mode mode,
393                 int extra_count)
394 {
395         struct zone *oldzone, *newzone;
396         int dirty;
397         int expected_count = 1 + extra_count;
398         void **pslot;
399
400         if (!mapping) {
401                 /* Anonymous page without mapping */
402                 if (page_count(page) != expected_count)
403                         return -EAGAIN;
404
405                 /* No turning back from here */
406                 newpage->index = page->index;
407                 newpage->mapping = page->mapping;
408                 if (PageSwapBacked(page))
409                         __SetPageSwapBacked(newpage);
410
411                 return MIGRATEPAGE_SUCCESS;
412         }
413
414         oldzone = page_zone(page);
415         newzone = page_zone(newpage);
416
417         spin_lock_irq(&mapping->tree_lock);
418
419         pslot = radix_tree_lookup_slot(&mapping->page_tree,
420                                         page_index(page));
421
422         expected_count += 1 + page_has_private(page);
423         if (page_count(page) != expected_count ||
424                 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
425                 spin_unlock_irq(&mapping->tree_lock);
426                 return -EAGAIN;
427         }
428
429         if (!page_ref_freeze(page, expected_count)) {
430                 spin_unlock_irq(&mapping->tree_lock);
431                 return -EAGAIN;
432         }
433
434         /*
435          * In the async migration case of moving a page with buffers, lock the
436          * buffers using trylock before the mapping is moved. If the mapping
437          * was moved, we later failed to lock the buffers and could not move
438          * the mapping back due to an elevated page count, we would have to
439          * block waiting on other references to be dropped.
440          */
441         if (mode == MIGRATE_ASYNC && head &&
442                         !buffer_migrate_lock_buffers(head, mode)) {
443                 page_ref_unfreeze(page, expected_count);
444                 spin_unlock_irq(&mapping->tree_lock);
445                 return -EAGAIN;
446         }
447
448         /*
449          * Now we know that no one else is looking at the page:
450          * no turning back from here.
451          */
452         newpage->index = page->index;
453         newpage->mapping = page->mapping;
454         get_page(newpage);      /* add cache reference */
455         if (PageSwapBacked(page)) {
456                 __SetPageSwapBacked(newpage);
457                 if (PageSwapCache(page)) {
458                         SetPageSwapCache(newpage);
459                         set_page_private(newpage, page_private(page));
460                 }
461         } else {
462                 VM_BUG_ON_PAGE(PageSwapCache(page), page);
463         }
464
465         /* Move dirty while page refs frozen and newpage not yet exposed */
466         dirty = PageDirty(page);
467         if (dirty) {
468                 ClearPageDirty(page);
469                 SetPageDirty(newpage);
470         }
471
472         radix_tree_replace_slot(&mapping->page_tree, pslot, newpage);
473
474         /*
475          * Drop cache reference from old page by unfreezing
476          * to one less reference.
477          * We know this isn't the last reference.
478          */
479         page_ref_unfreeze(page, expected_count - 1);
480
481         spin_unlock(&mapping->tree_lock);
482         /* Leave irq disabled to prevent preemption while updating stats */
483
484         /*
485          * If moved to a different zone then also account
486          * the page for that zone. Other VM counters will be
487          * taken care of when we establish references to the
488          * new page and drop references to the old page.
489          *
490          * Note that anonymous pages are accounted for
491          * via NR_FILE_PAGES and NR_ANON_MAPPED if they
492          * are mapped to swap space.
493          */
494         if (newzone != oldzone) {
495                 __dec_node_state(oldzone->zone_pgdat, NR_FILE_PAGES);
496                 __inc_node_state(newzone->zone_pgdat, NR_FILE_PAGES);
497                 if (PageSwapBacked(page) && !PageSwapCache(page)) {
498                         __dec_node_state(oldzone->zone_pgdat, NR_SHMEM);
499                         __inc_node_state(newzone->zone_pgdat, NR_SHMEM);
500                 }
501                 if (dirty && mapping_cap_account_dirty(mapping)) {
502                         __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
503                         __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
504                         __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
505                         __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
506                 }
507         }
508         local_irq_enable();
509
510         return MIGRATEPAGE_SUCCESS;
511 }
512 EXPORT_SYMBOL(migrate_page_move_mapping);
513
514 /*
515  * The expected number of remaining references is the same as that
516  * of migrate_page_move_mapping().
517  */
518 int migrate_huge_page_move_mapping(struct address_space *mapping,
519                                    struct page *newpage, struct page *page)
520 {
521         int expected_count;
522         void **pslot;
523
524         spin_lock_irq(&mapping->tree_lock);
525
526         pslot = radix_tree_lookup_slot(&mapping->page_tree,
527                                         page_index(page));
528
529         expected_count = 2 + page_has_private(page);
530         if (page_count(page) != expected_count ||
531                 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
532                 spin_unlock_irq(&mapping->tree_lock);
533                 return -EAGAIN;
534         }
535
536         if (!page_ref_freeze(page, expected_count)) {
537                 spin_unlock_irq(&mapping->tree_lock);
538                 return -EAGAIN;
539         }
540
541         newpage->index = page->index;
542         newpage->mapping = page->mapping;
543
544         get_page(newpage);
545
546         radix_tree_replace_slot(&mapping->page_tree, pslot, newpage);
547
548         page_ref_unfreeze(page, expected_count - 1);
549
550         spin_unlock_irq(&mapping->tree_lock);
551
552         return MIGRATEPAGE_SUCCESS;
553 }
554
555 /*
556  * Gigantic pages are so large that we do not guarantee that page++ pointer
557  * arithmetic will work across the entire page.  We need something more
558  * specialized.
559  */
560 static void __copy_gigantic_page(struct page *dst, struct page *src,
561                                 int nr_pages)
562 {
563         int i;
564         struct page *dst_base = dst;
565         struct page *src_base = src;
566
567         for (i = 0; i < nr_pages; ) {
568                 cond_resched();
569                 copy_highpage(dst, src);
570
571                 i++;
572                 dst = mem_map_next(dst, dst_base, i);
573                 src = mem_map_next(src, src_base, i);
574         }
575 }
576
577 static void copy_huge_page(struct page *dst, struct page *src)
578 {
579         int i;
580         int nr_pages;
581
582         if (PageHuge(src)) {
583                 /* hugetlbfs page */
584                 struct hstate *h = page_hstate(src);
585                 nr_pages = pages_per_huge_page(h);
586
587                 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
588                         __copy_gigantic_page(dst, src, nr_pages);
589                         return;
590                 }
591         } else {
592                 /* thp page */
593                 BUG_ON(!PageTransHuge(src));
594                 nr_pages = hpage_nr_pages(src);
595         }
596
597         for (i = 0; i < nr_pages; i++) {
598                 cond_resched();
599                 copy_highpage(dst + i, src + i);
600         }
601 }
602
603 /*
604  * Copy the page to its new location
605  */
606 void migrate_page_copy(struct page *newpage, struct page *page)
607 {
608         int cpupid;
609
610         if (PageHuge(page) || PageTransHuge(page))
611                 copy_huge_page(newpage, page);
612         else
613                 copy_highpage(newpage, page);
614
615         if (PageError(page))
616                 SetPageError(newpage);
617         if (PageReferenced(page))
618                 SetPageReferenced(newpage);
619         if (PageUptodate(page))
620                 SetPageUptodate(newpage);
621         if (TestClearPageActive(page)) {
622                 VM_BUG_ON_PAGE(PageUnevictable(page), page);
623                 SetPageActive(newpage);
624         } else if (TestClearPageUnevictable(page))
625                 SetPageUnevictable(newpage);
626         if (PageChecked(page))
627                 SetPageChecked(newpage);
628         if (PageMappedToDisk(page))
629                 SetPageMappedToDisk(newpage);
630
631         /* Move dirty on pages not done by migrate_page_move_mapping() */
632         if (PageDirty(page))
633                 SetPageDirty(newpage);
634
635         if (page_is_young(page))
636                 set_page_young(newpage);
637         if (page_is_idle(page))
638                 set_page_idle(newpage);
639
640         /*
641          * Copy NUMA information to the new page, to prevent over-eager
642          * future migrations of this same page.
643          */
644         cpupid = page_cpupid_xchg_last(page, -1);
645         page_cpupid_xchg_last(newpage, cpupid);
646
647         ksm_migrate_page(newpage, page);
648         /*
649          * Please do not reorder this without considering how mm/ksm.c's
650          * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
651          */
652         if (PageSwapCache(page))
653                 ClearPageSwapCache(page);
654         ClearPagePrivate(page);
655         set_page_private(page, 0);
656
657         /*
658          * If any waiters have accumulated on the new page then
659          * wake them up.
660          */
661         if (PageWriteback(newpage))
662                 end_page_writeback(newpage);
663
664         copy_page_owner(page, newpage);
665
666         mem_cgroup_migrate(page, newpage);
667 }
668 EXPORT_SYMBOL(migrate_page_copy);
669
670 /************************************************************
671  *                    Migration functions
672  ***********************************************************/
673
674 /*
675  * Common logic to directly migrate a single LRU page suitable for
676  * pages that do not use PagePrivate/PagePrivate2.
677  *
678  * Pages are locked upon entry and exit.
679  */
680 int migrate_page(struct address_space *mapping,
681                 struct page *newpage, struct page *page,
682                 enum migrate_mode mode)
683 {
684         int rc;
685
686         BUG_ON(PageWriteback(page));    /* Writeback must be complete */
687
688         rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
689
690         if (rc != MIGRATEPAGE_SUCCESS)
691                 return rc;
692
693         migrate_page_copy(newpage, page);
694         return MIGRATEPAGE_SUCCESS;
695 }
696 EXPORT_SYMBOL(migrate_page);
697
698 #ifdef CONFIG_BLOCK
699 /*
700  * Migration function for pages with buffers. This function can only be used
701  * if the underlying filesystem guarantees that no other references to "page"
702  * exist.
703  */
704 int buffer_migrate_page(struct address_space *mapping,
705                 struct page *newpage, struct page *page, enum migrate_mode mode)
706 {
707         struct buffer_head *bh, *head;
708         int rc;
709
710         if (!page_has_buffers(page))
711                 return migrate_page(mapping, newpage, page, mode);
712
713         head = page_buffers(page);
714
715         rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
716
717         if (rc != MIGRATEPAGE_SUCCESS)
718                 return rc;
719
720         /*
721          * In the async case, migrate_page_move_mapping locked the buffers
722          * with an IRQ-safe spinlock held. In the sync case, the buffers
723          * need to be locked now
724          */
725         if (mode != MIGRATE_ASYNC)
726                 BUG_ON(!buffer_migrate_lock_buffers(head, mode));
727
728         ClearPagePrivate(page);
729         set_page_private(newpage, page_private(page));
730         set_page_private(page, 0);
731         put_page(page);
732         get_page(newpage);
733
734         bh = head;
735         do {
736                 set_bh_page(bh, newpage, bh_offset(bh));
737                 bh = bh->b_this_page;
738
739         } while (bh != head);
740
741         SetPagePrivate(newpage);
742
743         migrate_page_copy(newpage, page);
744
745         bh = head;
746         do {
747                 unlock_buffer(bh);
748                 put_bh(bh);
749                 bh = bh->b_this_page;
750
751         } while (bh != head);
752
753         return MIGRATEPAGE_SUCCESS;
754 }
755 EXPORT_SYMBOL(buffer_migrate_page);
756 #endif
757
758 /*
759  * Writeback a page to clean the dirty state
760  */
761 static int writeout(struct address_space *mapping, struct page *page)
762 {
763         struct writeback_control wbc = {
764                 .sync_mode = WB_SYNC_NONE,
765                 .nr_to_write = 1,
766                 .range_start = 0,
767                 .range_end = LLONG_MAX,
768                 .for_reclaim = 1
769         };
770         int rc;
771
772         if (!mapping->a_ops->writepage)
773                 /* No write method for the address space */
774                 return -EINVAL;
775
776         if (!clear_page_dirty_for_io(page))
777                 /* Someone else already triggered a write */
778                 return -EAGAIN;
779
780         /*
781          * A dirty page may imply that the underlying filesystem has
782          * the page on some queue. So the page must be clean for
783          * migration. Writeout may mean we loose the lock and the
784          * page state is no longer what we checked for earlier.
785          * At this point we know that the migration attempt cannot
786          * be successful.
787          */
788         remove_migration_ptes(page, page, false);
789
790         rc = mapping->a_ops->writepage(page, &wbc);
791
792         if (rc != AOP_WRITEPAGE_ACTIVATE)
793                 /* unlocked. Relock */
794                 lock_page(page);
795
796         return (rc < 0) ? -EIO : -EAGAIN;
797 }
798
799 /*
800  * Default handling if a filesystem does not provide a migration function.
801  */
802 static int fallback_migrate_page(struct address_space *mapping,
803         struct page *newpage, struct page *page, enum migrate_mode mode)
804 {
805         if (PageDirty(page)) {
806                 /* Only writeback pages in full synchronous migration */
807                 if (mode != MIGRATE_SYNC)
808                         return -EBUSY;
809                 return writeout(mapping, page);
810         }
811
812         /*
813          * Buffers may be managed in a filesystem specific way.
814          * We must have no buffers or drop them.
815          */
816         if (page_has_private(page) &&
817             !try_to_release_page(page, GFP_KERNEL))
818                 return -EAGAIN;
819
820         return migrate_page(mapping, newpage, page, mode);
821 }
822
823 /*
824  * Move a page to a newly allocated page
825  * The page is locked and all ptes have been successfully removed.
826  *
827  * The new page will have replaced the old page if this function
828  * is successful.
829  *
830  * Return value:
831  *   < 0 - error code
832  *  MIGRATEPAGE_SUCCESS - success
833  */
834 static int move_to_new_page(struct page *newpage, struct page *page,
835                                 enum migrate_mode mode)
836 {
837         struct address_space *mapping;
838         int rc = -EAGAIN;
839         bool is_lru = !__PageMovable(page);
840
841         VM_BUG_ON_PAGE(!PageLocked(page), page);
842         VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
843
844         mapping = page_mapping(page);
845
846         if (likely(is_lru)) {
847                 if (!mapping)
848                         rc = migrate_page(mapping, newpage, page, mode);
849                 else if (mapping->a_ops->migratepage)
850                         /*
851                          * Most pages have a mapping and most filesystems
852                          * provide a migratepage callback. Anonymous pages
853                          * are part of swap space which also has its own
854                          * migratepage callback. This is the most common path
855                          * for page migration.
856                          */
857                         rc = mapping->a_ops->migratepage(mapping, newpage,
858                                                         page, mode);
859                 else
860                         rc = fallback_migrate_page(mapping, newpage,
861                                                         page, mode);
862         } else {
863                 /*
864                  * In case of non-lru page, it could be released after
865                  * isolation step. In that case, we shouldn't try migration.
866                  */
867                 VM_BUG_ON_PAGE(!PageIsolated(page), page);
868                 if (!PageMovable(page)) {
869                         rc = MIGRATEPAGE_SUCCESS;
870                         __ClearPageIsolated(page);
871                         goto out;
872                 }
873
874                 rc = mapping->a_ops->migratepage(mapping, newpage,
875                                                 page, mode);
876                 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
877                         !PageIsolated(page));
878         }
879
880         /*
881          * When successful, old pagecache page->mapping must be cleared before
882          * page is freed; but stats require that PageAnon be left as PageAnon.
883          */
884         if (rc == MIGRATEPAGE_SUCCESS) {
885                 if (__PageMovable(page)) {
886                         VM_BUG_ON_PAGE(!PageIsolated(page), page);
887
888                         /*
889                          * We clear PG_movable under page_lock so any compactor
890                          * cannot try to migrate this page.
891                          */
892                         __ClearPageIsolated(page);
893                 }
894
895                 /*
896                  * Anonymous and movable page->mapping will be cleard by
897                  * free_pages_prepare so don't reset it here for keeping
898                  * the type to work PageAnon, for example.
899                  */
900                 if (!PageMappingFlags(page))
901                         page->mapping = NULL;
902         }
903 out:
904         return rc;
905 }
906
907 static int __unmap_and_move(struct page *page, struct page *newpage,
908                                 int force, enum migrate_mode mode)
909 {
910         int rc = -EAGAIN;
911         int page_was_mapped = 0;
912         struct anon_vma *anon_vma = NULL;
913         bool is_lru = !__PageMovable(page);
914
915         if (!trylock_page(page)) {
916                 if (!force || mode == MIGRATE_ASYNC)
917                         goto out;
918
919                 /*
920                  * It's not safe for direct compaction to call lock_page.
921                  * For example, during page readahead pages are added locked
922                  * to the LRU. Later, when the IO completes the pages are
923                  * marked uptodate and unlocked. However, the queueing
924                  * could be merging multiple pages for one bio (e.g.
925                  * mpage_readpages). If an allocation happens for the
926                  * second or third page, the process can end up locking
927                  * the same page twice and deadlocking. Rather than
928                  * trying to be clever about what pages can be locked,
929                  * avoid the use of lock_page for direct compaction
930                  * altogether.
931                  */
932                 if (current->flags & PF_MEMALLOC)
933                         goto out;
934
935                 lock_page(page);
936         }
937
938         if (PageWriteback(page)) {
939                 /*
940                  * Only in the case of a full synchronous migration is it
941                  * necessary to wait for PageWriteback. In the async case,
942                  * the retry loop is too short and in the sync-light case,
943                  * the overhead of stalling is too much
944                  */
945                 if (mode != MIGRATE_SYNC) {
946                         rc = -EBUSY;
947                         goto out_unlock;
948                 }
949                 if (!force)
950                         goto out_unlock;
951                 wait_on_page_writeback(page);
952         }
953
954         /*
955          * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
956          * we cannot notice that anon_vma is freed while we migrates a page.
957          * This get_anon_vma() delays freeing anon_vma pointer until the end
958          * of migration. File cache pages are no problem because of page_lock()
959          * File Caches may use write_page() or lock_page() in migration, then,
960          * just care Anon page here.
961          *
962          * Only page_get_anon_vma() understands the subtleties of
963          * getting a hold on an anon_vma from outside one of its mms.
964          * But if we cannot get anon_vma, then we won't need it anyway,
965          * because that implies that the anon page is no longer mapped
966          * (and cannot be remapped so long as we hold the page lock).
967          */
968         if (PageAnon(page) && !PageKsm(page))
969                 anon_vma = page_get_anon_vma(page);
970
971         /*
972          * Block others from accessing the new page when we get around to
973          * establishing additional references. We are usually the only one
974          * holding a reference to newpage at this point. We used to have a BUG
975          * here if trylock_page(newpage) fails, but would like to allow for
976          * cases where there might be a race with the previous use of newpage.
977          * This is much like races on refcount of oldpage: just don't BUG().
978          */
979         if (unlikely(!trylock_page(newpage)))
980                 goto out_unlock;
981
982         if (unlikely(!is_lru)) {
983                 rc = move_to_new_page(newpage, page, mode);
984                 goto out_unlock_both;
985         }
986
987         /*
988          * Corner case handling:
989          * 1. When a new swap-cache page is read into, it is added to the LRU
990          * and treated as swapcache but it has no rmap yet.
991          * Calling try_to_unmap() against a page->mapping==NULL page will
992          * trigger a BUG.  So handle it here.
993          * 2. An orphaned page (see truncate_complete_page) might have
994          * fs-private metadata. The page can be picked up due to memory
995          * offlining.  Everywhere else except page reclaim, the page is
996          * invisible to the vm, so the page can not be migrated.  So try to
997          * free the metadata, so the page can be freed.
998          */
999         if (!page->mapping) {
1000                 VM_BUG_ON_PAGE(PageAnon(page), page);
1001                 if (page_has_private(page)) {
1002                         try_to_free_buffers(page);
1003                         goto out_unlock_both;
1004                 }
1005         } else if (page_mapped(page)) {
1006                 /* Establish migration ptes */
1007                 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
1008                                 page);
1009                 try_to_unmap(page,
1010                         TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1011                 page_was_mapped = 1;
1012         }
1013
1014         if (!page_mapped(page))
1015                 rc = move_to_new_page(newpage, page, mode);
1016
1017         if (page_was_mapped)
1018                 remove_migration_ptes(page,
1019                         rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
1020
1021 out_unlock_both:
1022         unlock_page(newpage);
1023 out_unlock:
1024         /* Drop an anon_vma reference if we took one */
1025         if (anon_vma)
1026                 put_anon_vma(anon_vma);
1027         unlock_page(page);
1028 out:
1029         /*
1030          * If migration is successful, decrease refcount of the newpage
1031          * which will not free the page because new page owner increased
1032          * refcounter. As well, if it is LRU page, add the page to LRU
1033          * list in here.
1034          */
1035         if (rc == MIGRATEPAGE_SUCCESS) {
1036                 if (unlikely(__PageMovable(newpage)))
1037                         put_page(newpage);
1038                 else
1039                         putback_lru_page(newpage);
1040         }
1041
1042         return rc;
1043 }
1044
1045 /*
1046  * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move().  Work
1047  * around it.
1048  */
1049 #if (GCC_VERSION >= 40700 && GCC_VERSION < 40900) && defined(CONFIG_ARM)
1050 #define ICE_noinline noinline
1051 #else
1052 #define ICE_noinline
1053 #endif
1054
1055 /*
1056  * Obtain the lock on page, remove all ptes and migrate the page
1057  * to the newly allocated page in newpage.
1058  */
1059 static ICE_noinline int unmap_and_move(new_page_t get_new_page,
1060                                    free_page_t put_new_page,
1061                                    unsigned long private, struct page *page,
1062                                    int force, enum migrate_mode mode,
1063                                    enum migrate_reason reason)
1064 {
1065         int rc = MIGRATEPAGE_SUCCESS;
1066         int *result = NULL;
1067         struct page *newpage;
1068
1069         newpage = get_new_page(page, private, &result);
1070         if (!newpage)
1071                 return -ENOMEM;
1072
1073         if (page_count(page) == 1) {
1074                 /* page was freed from under us. So we are done. */
1075                 ClearPageActive(page);
1076                 ClearPageUnevictable(page);
1077                 if (unlikely(__PageMovable(page))) {
1078                         lock_page(page);
1079                         if (!PageMovable(page))
1080                                 __ClearPageIsolated(page);
1081                         unlock_page(page);
1082                 }
1083                 if (put_new_page)
1084                         put_new_page(newpage, private);
1085                 else
1086                         put_page(newpage);
1087                 goto out;
1088         }
1089
1090         if (unlikely(PageTransHuge(page))) {
1091                 lock_page(page);
1092                 rc = split_huge_page(page);
1093                 unlock_page(page);
1094                 if (rc)
1095                         goto out;
1096         }
1097
1098         rc = __unmap_and_move(page, newpage, force, mode);
1099         if (rc == MIGRATEPAGE_SUCCESS)
1100                 set_page_owner_migrate_reason(newpage, reason);
1101
1102 out:
1103         if (rc != -EAGAIN) {
1104                 /*
1105                  * A page that has been migrated has all references
1106                  * removed and will be freed. A page that has not been
1107                  * migrated will have kepts its references and be
1108                  * restored.
1109                  */
1110                 list_del(&page->lru);
1111
1112                 /*
1113                  * Compaction can migrate also non-LRU pages which are
1114                  * not accounted to NR_ISOLATED_*. They can be recognized
1115                  * as __PageMovable
1116                  */
1117                 if (likely(!__PageMovable(page)))
1118                         dec_node_page_state(page, NR_ISOLATED_ANON +
1119                                         page_is_file_cache(page));
1120         }
1121
1122         /*
1123          * If migration is successful, releases reference grabbed during
1124          * isolation. Otherwise, restore the page to right list unless
1125          * we want to retry.
1126          */
1127         if (rc == MIGRATEPAGE_SUCCESS) {
1128                 put_page(page);
1129                 if (reason == MR_MEMORY_FAILURE) {
1130                         /*
1131                          * Set PG_HWPoison on just freed page
1132                          * intentionally. Although it's rather weird,
1133                          * it's how HWPoison flag works at the moment.
1134                          */
1135                         if (!test_set_page_hwpoison(page))
1136                                 num_poisoned_pages_inc();
1137                 }
1138         } else {
1139                 if (rc != -EAGAIN) {
1140                         if (likely(!__PageMovable(page))) {
1141                                 putback_lru_page(page);
1142                                 goto put_new;
1143                         }
1144
1145                         lock_page(page);
1146                         if (PageMovable(page))
1147                                 putback_movable_page(page);
1148                         else
1149                                 __ClearPageIsolated(page);
1150                         unlock_page(page);
1151                         put_page(page);
1152                 }
1153 put_new:
1154                 if (put_new_page)
1155                         put_new_page(newpage, private);
1156                 else
1157                         put_page(newpage);
1158         }
1159
1160         if (result) {
1161                 if (rc)
1162                         *result = rc;
1163                 else
1164                         *result = page_to_nid(newpage);
1165         }
1166         return rc;
1167 }
1168
1169 /*
1170  * Counterpart of unmap_and_move_page() for hugepage migration.
1171  *
1172  * This function doesn't wait the completion of hugepage I/O
1173  * because there is no race between I/O and migration for hugepage.
1174  * Note that currently hugepage I/O occurs only in direct I/O
1175  * where no lock is held and PG_writeback is irrelevant,
1176  * and writeback status of all subpages are counted in the reference
1177  * count of the head page (i.e. if all subpages of a 2MB hugepage are
1178  * under direct I/O, the reference of the head page is 512 and a bit more.)
1179  * This means that when we try to migrate hugepage whose subpages are
1180  * doing direct I/O, some references remain after try_to_unmap() and
1181  * hugepage migration fails without data corruption.
1182  *
1183  * There is also no race when direct I/O is issued on the page under migration,
1184  * because then pte is replaced with migration swap entry and direct I/O code
1185  * will wait in the page fault for migration to complete.
1186  */
1187 static int unmap_and_move_huge_page(new_page_t get_new_page,
1188                                 free_page_t put_new_page, unsigned long private,
1189                                 struct page *hpage, int force,
1190                                 enum migrate_mode mode, int reason)
1191 {
1192         int rc = -EAGAIN;
1193         int *result = NULL;
1194         int page_was_mapped = 0;
1195         struct page *new_hpage;
1196         struct anon_vma *anon_vma = NULL;
1197
1198         /*
1199          * Movability of hugepages depends on architectures and hugepage size.
1200          * This check is necessary because some callers of hugepage migration
1201          * like soft offline and memory hotremove don't walk through page
1202          * tables or check whether the hugepage is pmd-based or not before
1203          * kicking migration.
1204          */
1205         if (!hugepage_migration_supported(page_hstate(hpage))) {
1206                 putback_active_hugepage(hpage);
1207                 return -ENOSYS;
1208         }
1209
1210         new_hpage = get_new_page(hpage, private, &result);
1211         if (!new_hpage)
1212                 return -ENOMEM;
1213
1214         if (!trylock_page(hpage)) {
1215                 if (!force || mode != MIGRATE_SYNC)
1216                         goto out;
1217                 lock_page(hpage);
1218         }
1219
1220         if (PageAnon(hpage))
1221                 anon_vma = page_get_anon_vma(hpage);
1222
1223         if (unlikely(!trylock_page(new_hpage)))
1224                 goto put_anon;
1225
1226         if (page_mapped(hpage)) {
1227                 try_to_unmap(hpage,
1228                         TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1229                 page_was_mapped = 1;
1230         }
1231
1232         if (!page_mapped(hpage))
1233                 rc = move_to_new_page(new_hpage, hpage, mode);
1234
1235         if (page_was_mapped)
1236                 remove_migration_ptes(hpage,
1237                         rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
1238
1239         unlock_page(new_hpage);
1240
1241 put_anon:
1242         if (anon_vma)
1243                 put_anon_vma(anon_vma);
1244
1245         if (rc == MIGRATEPAGE_SUCCESS) {
1246                 hugetlb_cgroup_migrate(hpage, new_hpage);
1247                 put_new_page = NULL;
1248                 set_page_owner_migrate_reason(new_hpage, reason);
1249         }
1250
1251         unlock_page(hpage);
1252 out:
1253         if (rc != -EAGAIN)
1254                 putback_active_hugepage(hpage);
1255         if (reason == MR_MEMORY_FAILURE && !test_set_page_hwpoison(hpage))
1256                 num_poisoned_pages_inc();
1257
1258         /*
1259          * If migration was not successful and there's a freeing callback, use
1260          * it.  Otherwise, put_page() will drop the reference grabbed during
1261          * isolation.
1262          */
1263         if (put_new_page)
1264                 put_new_page(new_hpage, private);
1265         else
1266                 putback_active_hugepage(new_hpage);
1267
1268         if (result) {
1269                 if (rc)
1270                         *result = rc;
1271                 else
1272                         *result = page_to_nid(new_hpage);
1273         }
1274         return rc;
1275 }
1276
1277 /*
1278  * migrate_pages - migrate the pages specified in a list, to the free pages
1279  *                 supplied as the target for the page migration
1280  *
1281  * @from:               The list of pages to be migrated.
1282  * @get_new_page:       The function used to allocate free pages to be used
1283  *                      as the target of the page migration.
1284  * @put_new_page:       The function used to free target pages if migration
1285  *                      fails, or NULL if no special handling is necessary.
1286  * @private:            Private data to be passed on to get_new_page()
1287  * @mode:               The migration mode that specifies the constraints for
1288  *                      page migration, if any.
1289  * @reason:             The reason for page migration.
1290  *
1291  * The function returns after 10 attempts or if no pages are movable any more
1292  * because the list has become empty or no retryable pages exist any more.
1293  * The caller should call putback_movable_pages() to return pages to the LRU
1294  * or free list only if ret != 0.
1295  *
1296  * Returns the number of pages that were not migrated, or an error code.
1297  */
1298 int migrate_pages(struct list_head *from, new_page_t get_new_page,
1299                 free_page_t put_new_page, unsigned long private,
1300                 enum migrate_mode mode, int reason)
1301 {
1302         int retry = 1;
1303         int nr_failed = 0;
1304         int nr_succeeded = 0;
1305         int pass = 0;
1306         struct page *page;
1307         struct page *page2;
1308         int swapwrite = current->flags & PF_SWAPWRITE;
1309         int rc;
1310
1311         if (!swapwrite)
1312                 current->flags |= PF_SWAPWRITE;
1313
1314         for(pass = 0; pass < 10 && retry; pass++) {
1315                 retry = 0;
1316
1317                 list_for_each_entry_safe(page, page2, from, lru) {
1318                         cond_resched();
1319
1320                         if (PageHuge(page))
1321                                 rc = unmap_and_move_huge_page(get_new_page,
1322                                                 put_new_page, private, page,
1323                                                 pass > 2, mode, reason);
1324                         else
1325                                 rc = unmap_and_move(get_new_page, put_new_page,
1326                                                 private, page, pass > 2, mode,
1327                                                 reason);
1328
1329                         switch(rc) {
1330                         case -ENOMEM:
1331                                 nr_failed++;
1332                                 goto out;
1333                         case -EAGAIN:
1334                                 retry++;
1335                                 break;
1336                         case MIGRATEPAGE_SUCCESS:
1337                                 nr_succeeded++;
1338                                 break;
1339                         default:
1340                                 /*
1341                                  * Permanent failure (-EBUSY, -ENOSYS, etc.):
1342                                  * unlike -EAGAIN case, the failed page is
1343                                  * removed from migration page list and not
1344                                  * retried in the next outer loop.
1345                                  */
1346                                 nr_failed++;
1347                                 break;
1348                         }
1349                 }
1350         }
1351         nr_failed += retry;
1352         rc = nr_failed;
1353 out:
1354         if (nr_succeeded)
1355                 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1356         if (nr_failed)
1357                 count_vm_events(PGMIGRATE_FAIL, nr_failed);
1358         trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
1359
1360         if (!swapwrite)
1361                 current->flags &= ~PF_SWAPWRITE;
1362
1363         return rc;
1364 }
1365
1366 #ifdef CONFIG_NUMA
1367 /*
1368  * Move a list of individual pages
1369  */
1370 struct page_to_node {
1371         unsigned long addr;
1372         struct page *page;
1373         int node;
1374         int status;
1375 };
1376
1377 static struct page *new_page_node(struct page *p, unsigned long private,
1378                 int **result)
1379 {
1380         struct page_to_node *pm = (struct page_to_node *)private;
1381
1382         while (pm->node != MAX_NUMNODES && pm->page != p)
1383                 pm++;
1384
1385         if (pm->node == MAX_NUMNODES)
1386                 return NULL;
1387
1388         *result = &pm->status;
1389
1390         if (PageHuge(p))
1391                 return alloc_huge_page_node(page_hstate(compound_head(p)),
1392                                         pm->node);
1393         else
1394                 return __alloc_pages_node(pm->node,
1395                                 GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0);
1396 }
1397
1398 /*
1399  * Move a set of pages as indicated in the pm array. The addr
1400  * field must be set to the virtual address of the page to be moved
1401  * and the node number must contain a valid target node.
1402  * The pm array ends with node = MAX_NUMNODES.
1403  */
1404 static int do_move_page_to_node_array(struct mm_struct *mm,
1405                                       struct page_to_node *pm,
1406                                       int migrate_all)
1407 {
1408         int err;
1409         struct page_to_node *pp;
1410         LIST_HEAD(pagelist);
1411
1412         down_read(&mm->mmap_sem);
1413
1414         /*
1415          * Build a list of pages to migrate
1416          */
1417         for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
1418                 struct vm_area_struct *vma;
1419                 struct page *page;
1420
1421                 err = -EFAULT;
1422                 vma = find_vma(mm, pp->addr);
1423                 if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
1424                         goto set_status;
1425
1426                 /* FOLL_DUMP to ignore special (like zero) pages */
1427                 page = follow_page(vma, pp->addr,
1428                                 FOLL_GET | FOLL_SPLIT | FOLL_DUMP);
1429
1430                 err = PTR_ERR(page);
1431                 if (IS_ERR(page))
1432                         goto set_status;
1433
1434                 err = -ENOENT;
1435                 if (!page)
1436                         goto set_status;
1437
1438                 pp->page = page;
1439                 err = page_to_nid(page);
1440
1441                 if (err == pp->node)
1442                         /*
1443                          * Node already in the right place
1444                          */
1445                         goto put_and_set;
1446
1447                 err = -EACCES;
1448                 if (page_mapcount(page) > 1 &&
1449                                 !migrate_all)
1450                         goto put_and_set;
1451
1452                 if (PageHuge(page)) {
1453                         if (PageHead(page))
1454                                 isolate_huge_page(page, &pagelist);
1455                         goto put_and_set;
1456                 }
1457
1458                 err = isolate_lru_page(page);
1459                 if (!err) {
1460                         list_add_tail(&page->lru, &pagelist);
1461                         inc_node_page_state(page, NR_ISOLATED_ANON +
1462                                             page_is_file_cache(page));
1463                 }
1464 put_and_set:
1465                 /*
1466                  * Either remove the duplicate refcount from
1467                  * isolate_lru_page() or drop the page ref if it was
1468                  * not isolated.
1469                  */
1470                 put_page(page);
1471 set_status:
1472                 pp->status = err;
1473         }
1474
1475         err = 0;
1476         if (!list_empty(&pagelist)) {
1477                 err = migrate_pages(&pagelist, new_page_node, NULL,
1478                                 (unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
1479                 if (err)
1480                         putback_movable_pages(&pagelist);
1481         }
1482
1483         up_read(&mm->mmap_sem);
1484         return err;
1485 }
1486
1487 /*
1488  * Migrate an array of page address onto an array of nodes and fill
1489  * the corresponding array of status.
1490  */
1491 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1492                          unsigned long nr_pages,
1493                          const void __user * __user *pages,
1494                          const int __user *nodes,
1495                          int __user *status, int flags)
1496 {
1497         struct page_to_node *pm;
1498         unsigned long chunk_nr_pages;
1499         unsigned long chunk_start;
1500         int err;
1501
1502         err = -ENOMEM;
1503         pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
1504         if (!pm)
1505                 goto out;
1506
1507         migrate_prep();
1508
1509         /*
1510          * Store a chunk of page_to_node array in a page,
1511          * but keep the last one as a marker
1512          */
1513         chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1;
1514
1515         for (chunk_start = 0;
1516              chunk_start < nr_pages;
1517              chunk_start += chunk_nr_pages) {
1518                 int j;
1519
1520                 if (chunk_start + chunk_nr_pages > nr_pages)
1521                         chunk_nr_pages = nr_pages - chunk_start;
1522
1523                 /* fill the chunk pm with addrs and nodes from user-space */
1524                 for (j = 0; j < chunk_nr_pages; j++) {
1525                         const void __user *p;
1526                         int node;
1527
1528                         err = -EFAULT;
1529                         if (get_user(p, pages + j + chunk_start))
1530                                 goto out_pm;
1531                         pm[j].addr = (unsigned long) p;
1532
1533                         if (get_user(node, nodes + j + chunk_start))
1534                                 goto out_pm;
1535
1536                         err = -ENODEV;
1537                         if (node < 0 || node >= MAX_NUMNODES)
1538                                 goto out_pm;
1539
1540                         if (!node_state(node, N_MEMORY))
1541                                 goto out_pm;
1542
1543                         err = -EACCES;
1544                         if (!node_isset(node, task_nodes))
1545                                 goto out_pm;
1546
1547                         pm[j].node = node;
1548                 }
1549
1550                 /* End marker for this chunk */
1551                 pm[chunk_nr_pages].node = MAX_NUMNODES;
1552
1553                 /* Migrate this chunk */
1554                 err = do_move_page_to_node_array(mm, pm,
1555                                                  flags & MPOL_MF_MOVE_ALL);
1556                 if (err < 0)
1557                         goto out_pm;
1558
1559                 /* Return status information */
1560                 for (j = 0; j < chunk_nr_pages; j++)
1561                         if (put_user(pm[j].status, status + j + chunk_start)) {
1562                                 err = -EFAULT;
1563                                 goto out_pm;
1564                         }
1565         }
1566         err = 0;
1567
1568 out_pm:
1569         free_page((unsigned long)pm);
1570 out:
1571         return err;
1572 }
1573
1574 /*
1575  * Determine the nodes of an array of pages and store it in an array of status.
1576  */
1577 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1578                                 const void __user **pages, int *status)
1579 {
1580         unsigned long i;
1581
1582         down_read(&mm->mmap_sem);
1583
1584         for (i = 0; i < nr_pages; i++) {
1585                 unsigned long addr = (unsigned long)(*pages);
1586                 struct vm_area_struct *vma;
1587                 struct page *page;
1588                 int err = -EFAULT;
1589
1590                 vma = find_vma(mm, addr);
1591                 if (!vma || addr < vma->vm_start)
1592                         goto set_status;
1593
1594                 /* FOLL_DUMP to ignore special (like zero) pages */
1595                 page = follow_page(vma, addr, FOLL_DUMP);
1596
1597                 err = PTR_ERR(page);
1598                 if (IS_ERR(page))
1599                         goto set_status;
1600
1601                 err = page ? page_to_nid(page) : -ENOENT;
1602 set_status:
1603                 *status = err;
1604
1605                 pages++;
1606                 status++;
1607         }
1608
1609         up_read(&mm->mmap_sem);
1610 }
1611
1612 /*
1613  * Determine the nodes of a user array of pages and store it in
1614  * a user array of status.
1615  */
1616 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1617                          const void __user * __user *pages,
1618                          int __user *status)
1619 {
1620 #define DO_PAGES_STAT_CHUNK_NR 16
1621         const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1622         int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1623
1624         while (nr_pages) {
1625                 unsigned long chunk_nr;
1626
1627                 chunk_nr = nr_pages;
1628                 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1629                         chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1630
1631                 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1632                         break;
1633
1634                 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1635
1636                 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1637                         break;
1638
1639                 pages += chunk_nr;
1640                 status += chunk_nr;
1641                 nr_pages -= chunk_nr;
1642         }
1643         return nr_pages ? -EFAULT : 0;
1644 }
1645
1646 /*
1647  * Move a list of pages in the address space of the currently executing
1648  * process.
1649  */
1650 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1651                 const void __user * __user *, pages,
1652                 const int __user *, nodes,
1653                 int __user *, status, int, flags)
1654 {
1655         const struct cred *cred = current_cred(), *tcred;
1656         struct task_struct *task;
1657         struct mm_struct *mm;
1658         int err;
1659         nodemask_t task_nodes;
1660
1661         /* Check flags */
1662         if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1663                 return -EINVAL;
1664
1665         if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1666                 return -EPERM;
1667
1668         /* Find the mm_struct */
1669         rcu_read_lock();
1670         task = pid ? find_task_by_vpid(pid) : current;
1671         if (!task) {
1672                 rcu_read_unlock();
1673                 return -ESRCH;
1674         }
1675         get_task_struct(task);
1676
1677         /*
1678          * Check if this process has the right to modify the specified
1679          * process. The right exists if the process has administrative
1680          * capabilities, superuser privileges or the same
1681          * userid as the target process.
1682          */
1683         tcred = __task_cred(task);
1684         if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1685             !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
1686             !capable(CAP_SYS_NICE)) {
1687                 rcu_read_unlock();
1688                 err = -EPERM;
1689                 goto out;
1690         }
1691         rcu_read_unlock();
1692
1693         err = security_task_movememory(task);
1694         if (err)
1695                 goto out;
1696
1697         task_nodes = cpuset_mems_allowed(task);
1698         mm = get_task_mm(task);
1699         put_task_struct(task);
1700
1701         if (!mm)
1702                 return -EINVAL;
1703
1704         if (nodes)
1705                 err = do_pages_move(mm, task_nodes, nr_pages, pages,
1706                                     nodes, status, flags);
1707         else
1708                 err = do_pages_stat(mm, nr_pages, pages, status);
1709
1710         mmput(mm);
1711         return err;
1712
1713 out:
1714         put_task_struct(task);
1715         return err;
1716 }
1717
1718 #ifdef CONFIG_NUMA_BALANCING
1719 /*
1720  * Returns true if this is a safe migration target node for misplaced NUMA
1721  * pages. Currently it only checks the watermarks which crude
1722  */
1723 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
1724                                    unsigned long nr_migrate_pages)
1725 {
1726         int z;
1727
1728         for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1729                 struct zone *zone = pgdat->node_zones + z;
1730
1731                 if (!populated_zone(zone))
1732                         continue;
1733
1734                 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
1735                 if (!zone_watermark_ok(zone, 0,
1736                                        high_wmark_pages(zone) +
1737                                        nr_migrate_pages,
1738                                        0, 0))
1739                         continue;
1740                 return true;
1741         }
1742         return false;
1743 }
1744
1745 static struct page *alloc_misplaced_dst_page(struct page *page,
1746                                            unsigned long data,
1747                                            int **result)
1748 {
1749         int nid = (int) data;
1750         struct page *newpage;
1751
1752         newpage = __alloc_pages_node(nid,
1753                                          (GFP_HIGHUSER_MOVABLE |
1754                                           __GFP_THISNODE | __GFP_NOMEMALLOC |
1755                                           __GFP_NORETRY | __GFP_NOWARN) &
1756                                          ~__GFP_RECLAIM, 0);
1757
1758         return newpage;
1759 }
1760
1761 /*
1762  * page migration rate limiting control.
1763  * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs
1764  * window of time. Default here says do not migrate more than 1280M per second.
1765  */
1766 static unsigned int migrate_interval_millisecs __read_mostly = 100;
1767 static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
1768
1769 /* Returns true if the node is migrate rate-limited after the update */
1770 static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
1771                                         unsigned long nr_pages)
1772 {
1773         /*
1774          * Rate-limit the amount of data that is being migrated to a node.
1775          * Optimal placement is no good if the memory bus is saturated and
1776          * all the time is being spent migrating!
1777          */
1778         if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
1779                 spin_lock(&pgdat->numabalancing_migrate_lock);
1780                 pgdat->numabalancing_migrate_nr_pages = 0;
1781                 pgdat->numabalancing_migrate_next_window = jiffies +
1782                         msecs_to_jiffies(migrate_interval_millisecs);
1783                 spin_unlock(&pgdat->numabalancing_migrate_lock);
1784         }
1785         if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) {
1786                 trace_mm_numa_migrate_ratelimit(current, pgdat->node_id,
1787                                                                 nr_pages);
1788                 return true;
1789         }
1790
1791         /*
1792          * This is an unlocked non-atomic update so errors are possible.
1793          * The consequences are failing to migrate when we potentiall should
1794          * have which is not severe enough to warrant locking. If it is ever
1795          * a problem, it can be converted to a per-cpu counter.
1796          */
1797         pgdat->numabalancing_migrate_nr_pages += nr_pages;
1798         return false;
1799 }
1800
1801 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
1802 {
1803         int page_lru;
1804
1805         VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
1806
1807         /* Avoid migrating to a node that is nearly full */
1808         if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
1809                 return 0;
1810
1811         if (isolate_lru_page(page))
1812                 return 0;
1813
1814         /*
1815          * migrate_misplaced_transhuge_page() skips page migration's usual
1816          * check on page_count(), so we must do it here, now that the page
1817          * has been isolated: a GUP pin, or any other pin, prevents migration.
1818          * The expected page count is 3: 1 for page's mapcount and 1 for the
1819          * caller's pin and 1 for the reference taken by isolate_lru_page().
1820          */
1821         if (PageTransHuge(page) && page_count(page) != 3) {
1822                 putback_lru_page(page);
1823                 return 0;
1824         }
1825
1826         page_lru = page_is_file_cache(page);
1827         mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
1828                                 hpage_nr_pages(page));
1829
1830         /*
1831          * Isolating the page has taken another reference, so the
1832          * caller's reference can be safely dropped without the page
1833          * disappearing underneath us during migration.
1834          */
1835         put_page(page);
1836         return 1;
1837 }
1838
1839 bool pmd_trans_migrating(pmd_t pmd)
1840 {
1841         struct page *page = pmd_page(pmd);
1842         return PageLocked(page);
1843 }
1844
1845 /*
1846  * Attempt to migrate a misplaced page to the specified destination
1847  * node. Caller is expected to have an elevated reference count on
1848  * the page that will be dropped by this function before returning.
1849  */
1850 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
1851                            int node)
1852 {
1853         pg_data_t *pgdat = NODE_DATA(node);
1854         int isolated;
1855         int nr_remaining;
1856         LIST_HEAD(migratepages);
1857
1858         /*
1859          * Don't migrate file pages that are mapped in multiple processes
1860          * with execute permissions as they are probably shared libraries.
1861          */
1862         if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
1863             (vma->vm_flags & VM_EXEC))
1864                 goto out;
1865
1866         /*
1867          * Rate-limit the amount of data that is being migrated to a node.
1868          * Optimal placement is no good if the memory bus is saturated and
1869          * all the time is being spent migrating!
1870          */
1871         if (numamigrate_update_ratelimit(pgdat, 1))
1872                 goto out;
1873
1874         isolated = numamigrate_isolate_page(pgdat, page);
1875         if (!isolated)
1876                 goto out;
1877
1878         list_add(&page->lru, &migratepages);
1879         nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
1880                                      NULL, node, MIGRATE_ASYNC,
1881                                      MR_NUMA_MISPLACED);
1882         if (nr_remaining) {
1883                 if (!list_empty(&migratepages)) {
1884                         list_del(&page->lru);
1885                         dec_node_page_state(page, NR_ISOLATED_ANON +
1886                                         page_is_file_cache(page));
1887                         putback_lru_page(page);
1888                 }
1889                 isolated = 0;
1890         } else
1891                 count_vm_numa_event(NUMA_PAGE_MIGRATE);
1892         BUG_ON(!list_empty(&migratepages));
1893         return isolated;
1894
1895 out:
1896         put_page(page);
1897         return 0;
1898 }
1899 #endif /* CONFIG_NUMA_BALANCING */
1900
1901 #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1902 /*
1903  * Migrates a THP to a given target node. page must be locked and is unlocked
1904  * before returning.
1905  */
1906 int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1907                                 struct vm_area_struct *vma,
1908                                 pmd_t *pmd, pmd_t entry,
1909                                 unsigned long address,
1910                                 struct page *page, int node)
1911 {
1912         spinlock_t *ptl;
1913         pg_data_t *pgdat = NODE_DATA(node);
1914         int isolated = 0;
1915         struct page *new_page = NULL;
1916         int page_lru = page_is_file_cache(page);
1917         unsigned long mmun_start = address & HPAGE_PMD_MASK;
1918         unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
1919
1920         /*
1921          * Rate-limit the amount of data that is being migrated to a node.
1922          * Optimal placement is no good if the memory bus is saturated and
1923          * all the time is being spent migrating!
1924          */
1925         if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
1926                 goto out_dropref;
1927
1928         new_page = alloc_pages_node(node,
1929                 (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
1930                 HPAGE_PMD_ORDER);
1931         if (!new_page)
1932                 goto out_fail;
1933         prep_transhuge_page(new_page);
1934
1935         isolated = numamigrate_isolate_page(pgdat, page);
1936         if (!isolated) {
1937                 put_page(new_page);
1938                 goto out_fail;
1939         }
1940
1941         /* Prepare a page as a migration target */
1942         __SetPageLocked(new_page);
1943         if (PageSwapBacked(page))
1944                 __SetPageSwapBacked(new_page);
1945
1946         /* anon mapping, we can simply copy page->mapping to the new page: */
1947         new_page->mapping = page->mapping;
1948         new_page->index = page->index;
1949         migrate_page_copy(new_page, page);
1950         WARN_ON(PageLRU(new_page));
1951
1952         /* Recheck the target PMD */
1953         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1954         ptl = pmd_lock(mm, pmd);
1955         if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) {
1956                 spin_unlock(ptl);
1957                 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1958
1959                 /* Reverse changes made by migrate_page_copy() */
1960                 if (TestClearPageActive(new_page))
1961                         SetPageActive(page);
1962                 if (TestClearPageUnevictable(new_page))
1963                         SetPageUnevictable(page);
1964
1965                 unlock_page(new_page);
1966                 put_page(new_page);             /* Free it */
1967
1968                 /* Retake the callers reference and putback on LRU */
1969                 get_page(page);
1970                 putback_lru_page(page);
1971                 mod_node_page_state(page_pgdat(page),
1972                          NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
1973
1974                 goto out_unlock;
1975         }
1976
1977         entry = mk_huge_pmd(new_page, vma->vm_page_prot);
1978         entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1979
1980         /*
1981          * Clear the old entry under pagetable lock and establish the new PTE.
1982          * Any parallel GUP will either observe the old page blocking on the
1983          * page lock, block on the page table lock or observe the new page.
1984          * The SetPageUptodate on the new page and page_add_new_anon_rmap
1985          * guarantee the copy is visible before the pagetable update.
1986          */
1987         flush_cache_range(vma, mmun_start, mmun_end);
1988         page_add_anon_rmap(new_page, vma, mmun_start, true);
1989         pmdp_huge_clear_flush_notify(vma, mmun_start, pmd);
1990         set_pmd_at(mm, mmun_start, pmd, entry);
1991         update_mmu_cache_pmd(vma, address, &entry);
1992
1993         page_ref_unfreeze(page, 2);
1994         mlock_migrate_page(new_page, page);
1995         page_remove_rmap(page, true);
1996         set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
1997
1998         spin_unlock(ptl);
1999         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2000
2001         /* Take an "isolate" reference and put new page on the LRU. */
2002         get_page(new_page);
2003         putback_lru_page(new_page);
2004
2005         unlock_page(new_page);
2006         unlock_page(page);
2007         put_page(page);                 /* Drop the rmap reference */
2008         put_page(page);                 /* Drop the LRU isolation reference */
2009
2010         count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
2011         count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
2012
2013         mod_node_page_state(page_pgdat(page),
2014                         NR_ISOLATED_ANON + page_lru,
2015                         -HPAGE_PMD_NR);
2016         return isolated;
2017
2018 out_fail:
2019         count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
2020 out_dropref:
2021         ptl = pmd_lock(mm, pmd);
2022         if (pmd_same(*pmd, entry)) {
2023                 entry = pmd_modify(entry, vma->vm_page_prot);
2024                 set_pmd_at(mm, mmun_start, pmd, entry);
2025                 update_mmu_cache_pmd(vma, address, &entry);
2026         }
2027         spin_unlock(ptl);
2028
2029 out_unlock:
2030         unlock_page(page);
2031         put_page(page);
2032         return 0;
2033 }
2034 #endif /* CONFIG_NUMA_BALANCING */
2035
2036 #endif /* CONFIG_NUMA */