]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
mm/follow_page_mask: add support for hugetlb pgd entries
authorAnshuman Khandual <khandual@linux.vnet.ibm.com>
Thu, 6 Jul 2017 22:38:50 +0000 (15:38 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 6 Jul 2017 23:24:33 +0000 (16:24 -0700)
ppc64 supports pgd hugetlb entries.  Add code to handle hugetlb pgd
entries to follow_page_mask so that ppc64 can switch to it to handle
hugetlbe entries.

Link: http://lkml.kernel.org/r/1494926612-23928-5-git-send-email-aneesh.kumar@linux.vnet.ibm.com
Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Mike Kravetz <kravetz@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/hugetlb.h
mm/gup.c
mm/hugetlb.c

index aa1df49b9a1432c263bc99f826c81aadf3905528..3656ce605dc9cf2c6f59bdde877399638bc47b4b 100644 (file)
@@ -121,6 +121,9 @@ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
                                pmd_t *pmd, int flags);
 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
                                pud_t *pud, int flags);
+struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
+                            pgd_t *pgd, int flags);
+
 int pmd_huge(pmd_t pmd);
 int pud_huge(pud_t pud);
 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
@@ -150,6 +153,7 @@ static inline void hugetlb_show_meminfo(void)
 }
 #define follow_huge_pmd(mm, addr, pmd, flags)  NULL
 #define follow_huge_pud(mm, addr, pud, flags)  NULL
+#define follow_huge_pgd(mm, addr, pgd, flags)  NULL
 #define prepare_hugepage_range(file, addr, len)        (-EINVAL)
 #define pmd_huge(x)    0
 #define pud_huge(x)    0
index bf68e21d7a3a646a068efc1255983e7c7a532c96..fe95a37a417256be4463105832614fbd892106ec 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -357,6 +357,13 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
        if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
                return no_page_table(vma, flags);
 
+       if (pgd_huge(*pgd)) {
+               page = follow_huge_pgd(mm, address, pgd, flags);
+               if (page)
+                       return page;
+               return no_page_table(vma, flags);
+       }
+
        return follow_p4d_mask(vma, address, pgd, flags, page_mask);
 }
 
index 65c84414a6b760e505104429ef5f3d9a7fc8b58d..a446869aa7f10158f0ae75e7a6e48075c203b547 100644 (file)
@@ -4715,6 +4715,15 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
        return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
 }
 
+struct page * __weak
+follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
+{
+       if (flags & FOLL_GET)
+               return NULL;
+
+       return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
+}
+
 #ifdef CONFIG_MEMORY_FAILURE
 
 /*