]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
mm: soft-offline: use migrate_pages() instead of migrate_huge_page()
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Wed, 11 Sep 2013 21:22:01 +0000 (14:22 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 11 Sep 2013 22:57:47 +0000 (15:57 -0700)
Currently migrate_huge_page() takes a pointer to a hugepage to be migrated
as an argument, instead of taking a pointer to the list of hugepages to be
migrated.  This behavior was introduced in commit 189ebff28 ("hugetlb:
simplify migrate_huge_page()"), and was OK because until now hugepage
migration is enabled only for soft-offlining which migrates only one
hugepage in a single call.

But the situation will change in the later patches in this series which
enable other users of page migration to support hugepage migration.  They
can kick migration for both of normal pages and hugepages in a single
call, so we need to go back to original implementation which uses linked
lists to collect the hugepages to be migrated.

With this patch, soft_offline_huge_page() switches to use migrate_pages(),
and migrate_huge_page() is not used any more.  So let's remove it.

Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Acked-by: Andi Kleen <ak@linux.intel.com>
Reviewed-by: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Acked-by: Hillf Danton <dhillf@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Rik van Riel <riel@redhat.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/migrate.h
mm/memory-failure.c
mm/migrate.c

index a405d3dc0f61f0bd9609a580e17c5bd14127e39a..6fe521420631e7533d76b22e04dae0f24ccfc7e0 100644 (file)
@@ -41,8 +41,6 @@ extern int migrate_page(struct address_space *,
                        struct page *, struct page *, enum migrate_mode);
 extern int migrate_pages(struct list_head *l, new_page_t x,
                unsigned long private, enum migrate_mode mode, int reason);
-extern int migrate_huge_page(struct page *, new_page_t x,
-               unsigned long private, enum migrate_mode mode);
 
 extern int fail_migrate_page(struct address_space *,
                        struct page *, struct page *);
@@ -62,9 +60,6 @@ static inline void putback_movable_pages(struct list_head *l) {}
 static inline int migrate_pages(struct list_head *l, new_page_t x,
                unsigned long private, enum migrate_mode mode, int reason)
        { return -ENOSYS; }
-static inline int migrate_huge_page(struct page *page, new_page_t x,
-               unsigned long private, enum migrate_mode mode)
-       { return -ENOSYS; }
 
 static inline int migrate_prep(void) { return -ENOSYS; }
 static inline int migrate_prep_local(void) { return -ENOSYS; }
index d84c5e5331bb5199632f46fda6d3ca3fef9dbaf4..e05ed31c0f61b9bf14a4cc8ce2baa747025fc4ba 100644 (file)
@@ -1470,6 +1470,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
        int ret;
        unsigned long pfn = page_to_pfn(page);
        struct page *hpage = compound_head(page);
+       LIST_HEAD(pagelist);
 
        /*
         * This double-check of PageHWPoison is to avoid the race with
@@ -1485,12 +1486,20 @@ static int soft_offline_huge_page(struct page *page, int flags)
        unlock_page(hpage);
 
        /* Keep page count to indicate a given hugepage is isolated. */
-       ret = migrate_huge_page(hpage, new_page, MPOL_MF_MOVE_ALL,
-                               MIGRATE_SYNC);
-       put_page(hpage);
+       list_move(&hpage->lru, &pagelist);
+       ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
+                               MIGRATE_SYNC, MR_MEMORY_FAILURE);
        if (ret) {
                pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
                        pfn, ret, page->flags);
+               /*
+                * We know that soft_offline_huge_page() tries to migrate
+                * only one hugepage pointed to by hpage, so we need not
+                * run through the pagelist here.
+                */
+               putback_active_hugepage(hpage);
+               if (ret > 0)
+                       ret = -EIO;
        } else {
                set_page_hwpoison_huge_page(hpage);
                dequeue_hwpoisoned_huge_page(hpage);
index b44a067fee10098af0d0eb15de02807db8b5945f..3ec47d3394c82f60455b26206155fa735c239c6a 100644 (file)
@@ -979,6 +979,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
 
        unlock_page(hpage);
 out:
+       if (rc != -EAGAIN)
+               putback_active_hugepage(hpage);
        put_page(new_hpage);
        if (result) {
                if (rc)
@@ -1066,32 +1068,6 @@ out:
        return rc;
 }
 
-int migrate_huge_page(struct page *hpage, new_page_t get_new_page,
-                     unsigned long private, enum migrate_mode mode)
-{
-       int pass, rc;
-
-       for (pass = 0; pass < 10; pass++) {
-               rc = unmap_and_move_huge_page(get_new_page, private,
-                                               hpage, pass > 2, mode);
-               switch (rc) {
-               case -ENOMEM:
-                       goto out;
-               case -EAGAIN:
-                       /* try again */
-                       cond_resched();
-                       break;
-               case MIGRATEPAGE_SUCCESS:
-                       goto out;
-               default:
-                       rc = -EIO;
-                       goto out;
-               }
-       }
-out:
-       return rc;
-}
-
 #ifdef CONFIG_NUMA
 /*
  * Move a list of individual pages