]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
hugetlb: restrict hugepage_migration_support() to x86_64
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Wed, 4 Jun 2014 23:05:35 +0000 (16:05 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 1 Jul 2014 03:11:53 +0000 (20:11 -0700)
commit c177c81e09e517bbf75b67762cdab1b83aba6976 upstream.

Currently hugepage migration is available for all archs which support
pmd-level hugepage, but testing is done only for x86_64 and there're
bugs for other archs.  So to avoid breaking such archs, this patch
limits the availability strictly to x86_64 until developers of other
archs get interested in enabling this feature.

Simply disabling hugepage migration on non-x86_64 archs is not enough to
fix the reported problem where sys_move_pages() hits the BUG_ON() in
follow_page(FOLL_GET), so let's fix this by checking if hugepage
migration is supported in vma_migratable().

Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Reported-by: Michael Ellerman <mpe@ellerman.id.au>
Tested-by: Michael Ellerman <mpe@ellerman.id.au>
Acked-by: Hugh Dickins <hughd@google.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: David Miller <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
15 files changed:
arch/arm/mm/hugetlbpage.c
arch/arm64/mm/hugetlbpage.c
arch/ia64/mm/hugetlbpage.c
arch/metag/mm/hugetlbpage.c
arch/mips/mm/hugetlbpage.c
arch/powerpc/mm/hugetlbpage.c
arch/s390/mm/hugetlbpage.c
arch/sh/mm/hugetlbpage.c
arch/sparc/mm/hugetlbpage.c
arch/tile/mm/hugetlbpage.c
arch/x86/Kconfig
arch/x86/mm/hugetlbpage.c
include/linux/hugetlb.h
include/linux/mempolicy.h
mm/Kconfig

index 54ee6163c1814453298272fdb182341e3a5ffcdf..66781bf34077cb540a67f845eeeb919c2decfc4e 100644 (file)
@@ -56,8 +56,3 @@ int pmd_huge(pmd_t pmd)
 {
        return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
 }
-
-int pmd_huge_support(void)
-{
-       return 1;
-}
index 31eb959e9aa81d05f16269e3bb500a965cb7cd48..023747bf4dd7dd2025a17c3fc44244aa1b0f7b00 100644 (file)
@@ -58,11 +58,6 @@ int pud_huge(pud_t pud)
 #endif
 }
 
-int pmd_huge_support(void)
-{
-       return 1;
-}
-
 static __init int setup_hugepagesz(char *opt)
 {
        unsigned long ps = memparse(opt, &opt);
index 68232db98baa74856752a86fc63cc12678e76787..76069c18ee42c186edf37c680ee78249d553b4fc 100644 (file)
@@ -114,11 +114,6 @@ int pud_huge(pud_t pud)
        return 0;
 }
 
-int pmd_huge_support(void)
-{
-       return 0;
-}
-
 struct page *
 follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
 {
index 042431509b5664bca9bc6c930b296c77a064079a..3c52fa6d0f8e24030294fecacc26498f5de9ffe5 100644 (file)
@@ -110,11 +110,6 @@ int pud_huge(pud_t pud)
        return 0;
 }
 
-int pmd_huge_support(void)
-{
-       return 1;
-}
-
 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
                             pmd_t *pmd, int write)
 {
index 77e0ae036e7c9edf75e9ffc64369802a54ee3a1e..4ec8ee10d3718f1ebfcee78e34fb838f72dc087c 100644 (file)
@@ -84,11 +84,6 @@ int pud_huge(pud_t pud)
        return (pud_val(pud) & _PAGE_HUGE) != 0;
 }
 
-int pmd_huge_support(void)
-{
-       return 1;
-}
-
 struct page *
 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
                pmd_t *pmd, int write)
index eb923654ba80e7412760ce67e6429486576fb044..7e70ae968e5f9df04cdf372e06ebea433be34d5b 100644 (file)
@@ -86,11 +86,6 @@ int pgd_huge(pgd_t pgd)
         */
        return ((pgd_val(pgd) & 0x3) != 0x0);
 }
-
-int pmd_huge_support(void)
-{
-       return 1;
-}
 #else
 int pmd_huge(pmd_t pmd)
 {
@@ -106,11 +101,6 @@ int pgd_huge(pgd_t pgd)
 {
        return 0;
 }
-
-int pmd_huge_support(void)
-{
-       return 0;
-}
 #endif
 
 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
index d261c62e40a68f8c885d6019e6a0ef27d9497c4c..248445f92604efff09a5188352c3e6fba1248bb0 100644 (file)
@@ -223,11 +223,6 @@ int pud_huge(pud_t pud)
        return 0;
 }
 
-int pmd_huge_support(void)
-{
-       return 1;
-}
-
 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
                             pmd_t *pmdp, int write)
 {
index 0d676a41081e873582a68cf3c1d179ab51dd529d..d7762349ea4869be1b40898a5ae7e6ff8e79c6aa 100644 (file)
@@ -83,11 +83,6 @@ int pud_huge(pud_t pud)
        return 0;
 }
 
-int pmd_huge_support(void)
-{
-       return 0;
-}
-
 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
                             pmd_t *pmd, int write)
 {
index 9bd9ce80bf77eb46605942278dd4c1fe72fa4da0..d329537739c6f39bb1aa8fa0b7b21d3d4c7ed52c 100644 (file)
@@ -231,11 +231,6 @@ int pud_huge(pud_t pud)
        return 0;
 }
 
-int pmd_huge_support(void)
-{
-       return 0;
-}
-
 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
                             pmd_t *pmd, int write)
 {
index 0cb3bbaa580c5dbc5ed00417ffd4b6e002c03813..e514899e1100319dc83fe69530f1aad67b17ceea 100644 (file)
@@ -166,11 +166,6 @@ int pud_huge(pud_t pud)
        return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
 }
 
-int pmd_huge_support(void)
-{
-       return 1;
-}
-
 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
                             pmd_t *pmd, int write)
 {
index 0af5250d914fd7b52d01873571dd6facb7bba8d4..1981dd9b8a11e37458f9e9395301c9aabcdb6d67 100644 (file)
@@ -1909,6 +1909,10 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK
        def_bool y
        depends on X86_64 || X86_PAE
 
+config ARCH_ENABLE_HUGEPAGE_MIGRATION
+       def_bool y
+       depends on X86_64 && HUGETLB_PAGE && MIGRATION
+
 menu "Power management and ACPI options"
 
 config ARCH_HIBERNATION_HEADER
index 8c9f647ff9e111203af86f894a13b450e4513013..8b977ebf9388c4c064bc344dcb16533bd36dafa0 100644 (file)
@@ -58,11 +58,6 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 {
        return NULL;
 }
-
-int pmd_huge_support(void)
-{
-       return 0;
-}
 #else
 
 struct page *
@@ -80,11 +75,6 @@ int pud_huge(pud_t pud)
 {
        return !!(pud_val(pud) & _PAGE_PSE);
 }
-
-int pmd_huge_support(void)
-{
-       return 1;
-}
 #endif
 
 #ifdef CONFIG_HUGETLB_PAGE
index 8c43cc469d78259b6028dfb30be8899b9a95e3d0..bd1e9bcec5478e553c47b3302e29ccddb3a126c6 100644 (file)
@@ -382,15 +382,13 @@ static inline pgoff_t basepage_index(struct page *page)
 
 extern void dissolve_free_huge_pages(unsigned long start_pfn,
                                     unsigned long end_pfn);
-int pmd_huge_support(void);
-/*
- * Currently hugepage migration is enabled only for pmd-based hugepage.
- * This function will be updated when hugepage migration is more widely
- * supported.
- */
 static inline int hugepage_migration_support(struct hstate *h)
 {
-       return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT);
+#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
+       return huge_page_shift(h) == PMD_SHIFT;
+#else
+       return 0;
+#endif
 }
 
 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
@@ -430,7 +428,6 @@ static inline pgoff_t basepage_index(struct page *page)
        return page->index;
 }
 #define dissolve_free_huge_pages(s, e) do {} while (0)
-#define pmd_huge_support()     0
 #define hugepage_migration_support(h)  0
 
 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
index 5f1ea756aaceee191eaccb1e5c73ab7455636de8..5bba088bd23976918fc39e6e80b9832a38de4b7d 100644 (file)
@@ -176,6 +176,12 @@ static inline int vma_migratable(struct vm_area_struct *vma)
 {
        if (vma->vm_flags & (VM_IO | VM_PFNMAP))
                return 0;
+
+#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
+       if (vma->vm_flags & VM_HUGETLB)
+               return 0;
+#endif
+
        /*
         * Migration allocates pages in the highest zone. If we cannot
         * do so then migration (at least from node to node) is not
index 2888024e0b0abea6b6f37f4531ad0f8dfec5c7b3..9b63c1584a4226c38ca96ebd6aabf4a6cd085c2c 100644 (file)
@@ -263,6 +263,9 @@ config MIGRATION
          pages as migration can relocate pages to satisfy a huge page
          allocation instead of reclaiming.
 
+config ARCH_ENABLE_HUGEPAGE_MIGRATION
+       boolean
+
 config PHYS_ADDR_T_64BIT
        def_bool 64BIT || ARCH_PHYS_ADDR_T_64BIT