]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
mm: migrate: check movability of hugepage in unmap_and_move_huge_page()
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Wed, 11 Sep 2013 21:22:11 +0000 (14:22 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 11 Sep 2013 22:57:49 +0000 (15:57 -0700)
Currently hugepage migration works well only for pmd-based hugepages
(mainly due to lack of testing,) so we had better not enable migration of
other levels of hugepages until we are ready for it.

Some users of hugepage migration (mbind, move_pages, and migrate_pages) do
page table walk and check pud/pmd_huge() there, so they are safe.  But the
other users (softoffline and memory hotremove) don't do this, so without
this patch they can try to migrate unexpected types of hugepages.

To prevent this, we introduce hugepage_migration_support() as an
architecture dependent check of whether hugepage are implemented on a pmd
basis or not.  And on some architecture multiple sizes of hugepages are
available, so hugepage_migration_support() also checks hugepage size.

Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Hillf Danton <dhillf@gmail.com>
Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Rik van Riel <riel@redhat.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
13 files changed:
arch/arm/mm/hugetlbpage.c
arch/arm64/mm/hugetlbpage.c
arch/ia64/mm/hugetlbpage.c
arch/metag/mm/hugetlbpage.c
arch/mips/mm/hugetlbpage.c
arch/powerpc/mm/hugetlbpage.c
arch/s390/mm/hugetlbpage.c
arch/sh/mm/hugetlbpage.c
arch/sparc/mm/hugetlbpage.c
arch/tile/mm/hugetlbpage.c
arch/x86/mm/hugetlbpage.c
include/linux/hugetlb.h
mm/migrate.c

index 66781bf34077cb540a67f845eeeb919c2decfc4e..54ee6163c1814453298272fdb182341e3a5ffcdf 100644 (file)
@@ -56,3 +56,8 @@ int pmd_huge(pmd_t pmd)
 {
        return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
 }
+
+int pmd_huge_support(void)
+{
+       return 1;
+}
index 2fc8258bab2df614ab51a0bf1ffb4a36a236ba41..5e9aec358306f0c13bdd0bb70758dde88be9e961 100644 (file)
@@ -54,6 +54,11 @@ int pud_huge(pud_t pud)
        return !(pud_val(pud) & PUD_TABLE_BIT);
 }
 
+int pmd_huge_support(void)
+{
+       return 1;
+}
+
 static __init int setup_hugepagesz(char *opt)
 {
        unsigned long ps = memparse(opt, &opt);
index 76069c18ee42c186edf37c680ee78249d553b4fc..68232db98baa74856752a86fc63cc12678e76787 100644 (file)
@@ -114,6 +114,11 @@ int pud_huge(pud_t pud)
        return 0;
 }
 
+int pmd_huge_support(void)
+{
+       return 0;
+}
+
 struct page *
 follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
 {
index 3c52fa6d0f8e24030294fecacc26498f5de9ffe5..042431509b5664bca9bc6c930b296c77a064079a 100644 (file)
@@ -110,6 +110,11 @@ int pud_huge(pud_t pud)
        return 0;
 }
 
+int pmd_huge_support(void)
+{
+       return 1;
+}
+
 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
                             pmd_t *pmd, int write)
 {
index a7fee0dfb7a9daaeb67616804f00cac7365d778f..01fda4419ed09de2e5adbfcd0b318023f3cc7664 100644 (file)
@@ -85,6 +85,11 @@ int pud_huge(pud_t pud)
        return (pud_val(pud) & _PAGE_HUGE) != 0;
 }
 
+int pmd_huge_support(void)
+{
+       return 1;
+}
+
 struct page *
 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
                pmd_t *pmd, int write)
index 834ca8eb38f202e01c5151fdb56b13197ed6acc8..d67db4bd672dd4223065c9b5470ee368a9456d1d 100644 (file)
@@ -86,6 +86,11 @@ int pgd_huge(pgd_t pgd)
         */
        return ((pgd_val(pgd) & 0x3) != 0x0);
 }
+
+int pmd_huge_support(void)
+{
+       return 1;
+}
 #else
 int pmd_huge(pmd_t pmd)
 {
@@ -101,6 +106,11 @@ int pgd_huge(pgd_t pgd)
 {
        return 0;
 }
+
+int pmd_huge_support(void)
+{
+       return 0;
+}
 #endif
 
 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
index 248445f92604efff09a5188352c3e6fba1248bb0..d261c62e40a68f8c885d6019e6a0ef27d9497c4c 100644 (file)
@@ -223,6 +223,11 @@ int pud_huge(pud_t pud)
        return 0;
 }
 
+int pmd_huge_support(void)
+{
+       return 1;
+}
+
 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
                             pmd_t *pmdp, int write)
 {
index d7762349ea4869be1b40898a5ae7e6ff8e79c6aa..0d676a41081e873582a68cf3c1d179ab51dd529d 100644 (file)
@@ -83,6 +83,11 @@ int pud_huge(pud_t pud)
        return 0;
 }
 
+int pmd_huge_support(void)
+{
+       return 0;
+}
+
 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
                             pmd_t *pmd, int write)
 {
index d2b59441ebddfb84080da0fcadff6921e71342f6..96399646570a780e1b38a6324994c5f71470bb0b 100644 (file)
@@ -234,6 +234,11 @@ int pud_huge(pud_t pud)
        return 0;
 }
 
+int pmd_huge_support(void)
+{
+       return 0;
+}
+
 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
                             pmd_t *pmd, int write)
 {
index e514899e1100319dc83fe69530f1aad67b17ceea..0cb3bbaa580c5dbc5ed00417ffd4b6e002c03813 100644 (file)
@@ -166,6 +166,11 @@ int pud_huge(pud_t pud)
        return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
 }
 
+int pmd_huge_support(void)
+{
+       return 1;
+}
+
 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
                             pmd_t *pmd, int write)
 {
index 7e73e8c690966dccbd8a7ef9f3d5397a367828f1..9d980d88b7477a82f757e75d0efda0c867c76d43 100644 (file)
@@ -59,6 +59,10 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
        return NULL;
 }
 
+int pmd_huge_support(void)
+{
+       return 0;
+}
 #else
 
 struct page *
@@ -77,6 +81,10 @@ int pud_huge(pud_t pud)
        return !!(pud_val(pud) & _PAGE_PSE);
 }
 
+int pmd_huge_support(void)
+{
+       return 1;
+}
 #endif
 
 /* x86_64 also uses this file */
index 2e02c4ed1035713e7491ce837a6a62ebcf02fb18..0393270466c3fd8a59d24781cf54ab5ffd0c5a57 100644 (file)
@@ -381,6 +381,16 @@ static inline pgoff_t basepage_index(struct page *page)
 
 extern void dissolve_free_huge_pages(unsigned long start_pfn,
                                     unsigned long end_pfn);
+int pmd_huge_support(void);
+/*
+ * Currently hugepage migration is enabled only for pmd-based hugepage.
+ * This function will be updated when hugepage migration is more widely
+ * supported.
+ */
+static inline int hugepage_migration_support(struct hstate *h)
+{
+       return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT);
+}
 
 #else  /* CONFIG_HUGETLB_PAGE */
 struct hstate {};
@@ -409,6 +419,8 @@ static inline pgoff_t basepage_index(struct page *page)
        return page->index;
 }
 #define dissolve_free_huge_pages(s, e) do {} while (0)
+#define pmd_huge_support()     0
+#define hugepage_migration_support(h)  0
 #endif /* CONFIG_HUGETLB_PAGE */
 
 #endif /* _LINUX_HUGETLB_H */
index d3137375fa80e401c4ead5db5c60152cfe3f8f7a..61f14a1923fd6044f7435498fd7cc9d0a0eebc9f 100644 (file)
@@ -949,6 +949,16 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
        struct page *new_hpage = get_new_page(hpage, private, &result);
        struct anon_vma *anon_vma = NULL;
 
+       /*
+        * Movability of hugepages depends on architectures and hugepage size.
+        * This check is necessary because some callers of hugepage migration
+        * like soft offline and memory hotremove don't walk through page
+        * tables or check whether the hugepage is pmd-based or not before
+        * kicking migration.
+        */
+       if (!hugepage_migration_support(page_hstate(hpage)))
+               return -ENOSYS;
+
        if (!new_hpage)
                return -ENOMEM;