]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/migrate.c
Merge branch 'akpm-current/current'
[karo-tx-linux.git] / mm / migrate.c
index 4612bb2e3677d015c839e48d6b9d96f9fb353f7e..734704f6f29b33dc8256463fcb9de1c23621d057 100644 (file)
@@ -71,29 +71,13 @@ int migrate_prep_local(void)
        return 0;
 }
 
-/*
- * Add isolated pages on the list back to the LRU under page lock
- * to avoid leaking evictable pages back onto unevictable list.
- */
-void putback_lru_pages(struct list_head *l)
-{
-       struct page *page;
-       struct page *page2;
-
-       list_for_each_entry_safe(page, page2, l, lru) {
-               list_del(&page->lru);
-               dec_zone_page_state(page, NR_ISOLATED_ANON +
-                               page_is_file_cache(page));
-                       putback_lru_page(page);
-       }
-}
-
 /*
  * Put previously isolated pages back onto the appropriate lists
  * from where they were once taken off for compaction/migration.
  *
- * This function shall be used instead of putback_lru_pages(),
- * whenever the isolated pageset has been built by isolate_migratepages_range()
+ * This function shall be used whenever the isolated pageset has been
+ * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
+ * and isolate_huge_page().
  */
 void putback_movable_pages(struct list_head *l)
 {
@@ -515,7 +499,7 @@ void migrate_page_copy(struct page *newpage, struct page *page)
        if (PageUptodate(page))
                SetPageUptodate(newpage);
        if (TestClearPageActive(page)) {
-               VM_BUG_ON(PageUnevictable(page));
+               VM_BUG_ON_PAGE(PageUnevictable(page), page);
                SetPageActive(newpage);
        } else if (TestClearPageUnevictable(page))
                SetPageUnevictable(newpage);
@@ -568,14 +552,6 @@ void migrate_page_copy(struct page *newpage, struct page *page)
  *                    Migration functions
  ***********************************************************/
 
-/* Always fail migration. Used for mappings that are not movable */
-int fail_migrate_page(struct address_space *mapping,
-                       struct page *newpage, struct page *page)
-{
-       return -EIO;
-}
-EXPORT_SYMBOL(fail_migrate_page);
-
 /*
  * Common logic to directly migrate a single page suitable for
  * pages that do not use PagePrivate/PagePrivate2.
@@ -895,7 +871,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
         * free the metadata, so the page can be freed.
         */
        if (!page->mapping) {
-               VM_BUG_ON(PageAnon(page));
+               VM_BUG_ON_PAGE(PageAnon(page), page);
                if (page_has_private(page)) {
                        try_to_free_buffers(page);
                        goto uncharge;
@@ -1013,7 +989,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
 {
        int rc = 0;
        int *result = NULL;
-       struct page *new_hpage = get_new_page(hpage, private, &result);
+       struct page *new_hpage;
        struct anon_vma *anon_vma = NULL;
 
        /*
@@ -1023,9 +999,12 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
         * tables or check whether the hugepage is pmd-based or not before
         * kicking migration.
         */
-       if (!hugepage_migration_support(page_hstate(hpage)))
+       if (!hugepage_migration_support(page_hstate(hpage))) {
+               putback_active_hugepage(hpage);
                return -ENOSYS;
+       }
 
+       new_hpage = get_new_page(hpage, private, &result);
        if (!new_hpage)
                return -ENOMEM;
 
@@ -1125,7 +1104,12 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
                                nr_succeeded++;
                                break;
                        default:
-                               /* Permanent failure */
+                               /*
+                                * Permanent failure (-EBUSY, -ENOSYS, etc.):
+                                * unlike -EAGAIN case, the failed page is
+                                * removed from migration page list and not
+                                * retried in the next outer loop.
+                                */
                                nr_failed++;
                                break;
                        }
@@ -1614,8 +1598,11 @@ static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
                        msecs_to_jiffies(migrate_interval_millisecs);
                spin_unlock(&pgdat->numabalancing_migrate_lock);
        }
-       if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages)
+       if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) {
+               trace_mm_numa_migrate_ratelimit(current, pgdat->node_id,
+                                                               nr_pages);
                return true;
+       }
 
        /*
         * This is an unlocked non-atomic update so errors are possible.
@@ -1631,7 +1618,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
 {
        int page_lru;
 
-       VM_BUG_ON(compound_order(page) && !PageTransHuge(page));
+       VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
 
        /* Avoid migrating to a node that is nearly full */
        if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
@@ -1714,7 +1701,12 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
        nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
                                     node, MIGRATE_ASYNC, MR_NUMA_MISPLACED);
        if (nr_remaining) {
-               putback_lru_pages(&migratepages);
+               if (!list_empty(&migratepages)) {
+                       list_del(&page->lru);
+                       dec_zone_page_state(page, NR_ISOLATED_ANON +
+                                       page_is_file_cache(page));
+                       putback_lru_page(page);
+               }
                isolated = 0;
        } else
                count_vm_numa_event(NUMA_PAGE_MIGRATE);
@@ -1761,8 +1753,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
        if (!new_page)
                goto out_fail;
 
-       page_cpupid_xchg_last(new_page, page_cpupid_last(page));
-
        isolated = numamigrate_isolate_page(pgdat, page);
        if (!isolated) {
                put_page(new_page);