]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/gup.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
[karo-tx-linux.git] / mm / gup.c
index 3ab78dc3db7df4fd3ac303080eb1fcaa456eb7ea..23f01c40c88f63cc88ff62548a4d120b0c406a8e 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -208,72 +208,28 @@ no_page:
        return no_page_table(vma, flags);
 }
 
-/**
- * follow_page_mask - look up a page descriptor from a user-virtual address
- * @vma: vm_area_struct mapping @address
- * @address: virtual address to look up
- * @flags: flags modifying lookup behaviour
- * @page_mask: on output, *page_mask is set according to the size of the page
- *
- * @flags can have FOLL_ flags set, defined in <linux/mm.h>
- *
- * Returns the mapped (struct page *), %NULL if no mapping exists, or
- * an error pointer if there is a mapping to something not represented
- * by a page descriptor (see also vm_normal_page()).
- */
-struct page *follow_page_mask(struct vm_area_struct *vma,
-                             unsigned long address, unsigned int flags,
-                             unsigned int *page_mask)
+static struct page *follow_pmd_mask(struct vm_area_struct *vma,
+                                   unsigned long address, pud_t *pudp,
+                                   unsigned int flags, unsigned int *page_mask)
 {
-       pgd_t *pgd;
-       p4d_t *p4d;
-       pud_t *pud;
        pmd_t *pmd;
        spinlock_t *ptl;
        struct page *page;
        struct mm_struct *mm = vma->vm_mm;
 
-       *page_mask = 0;
-
-       page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
-       if (!IS_ERR(page)) {
-               BUG_ON(flags & FOLL_GET);
-               return page;
-       }
-
-       pgd = pgd_offset(mm, address);
-       if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
-               return no_page_table(vma, flags);
-       p4d = p4d_offset(pgd, address);
-       if (p4d_none(*p4d))
-               return no_page_table(vma, flags);
-       BUILD_BUG_ON(p4d_huge(*p4d));
-       if (unlikely(p4d_bad(*p4d)))
-               return no_page_table(vma, flags);
-       pud = pud_offset(p4d, address);
-       if (pud_none(*pud))
+       pmd = pmd_offset(pudp, address);
+       if (pmd_none(*pmd))
                return no_page_table(vma, flags);
-       if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
-               page = follow_huge_pud(mm, address, pud, flags);
+       if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
+               page = follow_huge_pmd(mm, address, pmd, flags);
                if (page)
                        return page;
                return no_page_table(vma, flags);
        }
-       if (pud_devmap(*pud)) {
-               ptl = pud_lock(mm, pud);
-               page = follow_devmap_pud(vma, address, pud, flags);
-               spin_unlock(ptl);
-               if (page)
-                       return page;
-       }
-       if (unlikely(pud_bad(*pud)))
-               return no_page_table(vma, flags);
-
-       pmd = pmd_offset(pud, address);
-       if (pmd_none(*pmd))
-               return no_page_table(vma, flags);
-       if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
-               page = follow_huge_pmd(mm, address, pmd, flags);
+       if (is_hugepd(__hugepd(pmd_val(*pmd)))) {
+               page = follow_huge_pd(vma, address,
+                                     __hugepd(pmd_val(*pmd)), flags,
+                                     PMD_SHIFT);
                if (page)
                        return page;
                return no_page_table(vma, flags);
@@ -319,13 +275,131 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
                return ret ? ERR_PTR(ret) :
                        follow_page_pte(vma, address, pmd, flags);
        }
-
        page = follow_trans_huge_pmd(vma, address, pmd, flags);
        spin_unlock(ptl);
        *page_mask = HPAGE_PMD_NR - 1;
        return page;
 }
 
+
+static struct page *follow_pud_mask(struct vm_area_struct *vma,
+                                   unsigned long address, p4d_t *p4dp,
+                                   unsigned int flags, unsigned int *page_mask)
+{
+       pud_t *pud;
+       spinlock_t *ptl;
+       struct page *page;
+       struct mm_struct *mm = vma->vm_mm;
+
+       pud = pud_offset(p4dp, address);
+       if (pud_none(*pud))
+               return no_page_table(vma, flags);
+       if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
+               page = follow_huge_pud(mm, address, pud, flags);
+               if (page)
+                       return page;
+               return no_page_table(vma, flags);
+       }
+       if (is_hugepd(__hugepd(pud_val(*pud)))) {
+               page = follow_huge_pd(vma, address,
+                                     __hugepd(pud_val(*pud)), flags,
+                                     PUD_SHIFT);
+               if (page)
+                       return page;
+               return no_page_table(vma, flags);
+       }
+       if (pud_devmap(*pud)) {
+               ptl = pud_lock(mm, pud);
+               page = follow_devmap_pud(vma, address, pud, flags);
+               spin_unlock(ptl);
+               if (page)
+                       return page;
+       }
+       if (unlikely(pud_bad(*pud)))
+               return no_page_table(vma, flags);
+
+       return follow_pmd_mask(vma, address, pud, flags, page_mask);
+}
+
+
+static struct page *follow_p4d_mask(struct vm_area_struct *vma,
+                                   unsigned long address, pgd_t *pgdp,
+                                   unsigned int flags, unsigned int *page_mask)
+{
+       p4d_t *p4d;
+       struct page *page;
+
+       p4d = p4d_offset(pgdp, address);
+       if (p4d_none(*p4d))
+               return no_page_table(vma, flags);
+       BUILD_BUG_ON(p4d_huge(*p4d));
+       if (unlikely(p4d_bad(*p4d)))
+               return no_page_table(vma, flags);
+
+       if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
+               page = follow_huge_pd(vma, address,
+                                     __hugepd(p4d_val(*p4d)), flags,
+                                     P4D_SHIFT);
+               if (page)
+                       return page;
+               return no_page_table(vma, flags);
+       }
+       return follow_pud_mask(vma, address, p4d, flags, page_mask);
+}
+
+/**
+ * follow_page_mask - look up a page descriptor from a user-virtual address
+ * @vma: vm_area_struct mapping @address
+ * @address: virtual address to look up
+ * @flags: flags modifying lookup behaviour
+ * @page_mask: on output, *page_mask is set according to the size of the page
+ *
+ * @flags can have FOLL_ flags set, defined in <linux/mm.h>
+ *
+ * Returns the mapped (struct page *), %NULL if no mapping exists, or
+ * an error pointer if there is a mapping to something not represented
+ * by a page descriptor (see also vm_normal_page()).
+ */
+struct page *follow_page_mask(struct vm_area_struct *vma,
+                             unsigned long address, unsigned int flags,
+                             unsigned int *page_mask)
+{
+       pgd_t *pgd;
+       struct page *page;
+       struct mm_struct *mm = vma->vm_mm;
+
+       *page_mask = 0;
+
+       /* make this handle hugepd */
+       page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
+       if (!IS_ERR(page)) {
+               BUG_ON(flags & FOLL_GET);
+               return page;
+       }
+
+       pgd = pgd_offset(mm, address);
+
+       if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
+               return no_page_table(vma, flags);
+
+       if (pgd_huge(*pgd)) {
+               page = follow_huge_pgd(mm, address, pgd, flags);
+               if (page)
+                       return page;
+               return no_page_table(vma, flags);
+       }
+       if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
+               page = follow_huge_pd(vma, address,
+                                     __hugepd(pgd_val(*pgd)), flags,
+                                     PGDIR_SHIFT);
+               if (page)
+                       return page;
+               return no_page_table(vma, flags);
+       }
+
+       return follow_p4d_mask(vma, address, pgd, flags, page_mask);
+}
+
 static int get_gate_page(struct mm_struct *mm, unsigned long address,
                unsigned int gup_flags, struct vm_area_struct **vma,
                struct page **page)
@@ -1349,16 +1423,15 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
                return __gup_device_huge_pmd(orig, addr, end, pages, nr);
 
        refs = 0;
-       head = pmd_page(orig);
-       page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+       page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
        do {
-               VM_BUG_ON_PAGE(compound_head(page) != head, page);
                pages[*nr] = page;
                (*nr)++;
                page++;
                refs++;
        } while (addr += PAGE_SIZE, addr != end);
 
+       head = compound_head(pmd_page(orig));
        if (!page_cache_add_speculative(head, refs)) {
                *nr -= refs;
                return 0;
@@ -1388,16 +1461,15 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
                return __gup_device_huge_pud(orig, addr, end, pages, nr);
 
        refs = 0;
-       head = pud_page(orig);
-       page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+       page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
        do {
-               VM_BUG_ON_PAGE(compound_head(page) != head, page);
                pages[*nr] = page;
                (*nr)++;
                page++;
                refs++;
        } while (addr += PAGE_SIZE, addr != end);
 
+       head = compound_head(pud_page(orig));
        if (!page_cache_add_speculative(head, refs)) {
                *nr -= refs;
                return 0;
@@ -1426,16 +1498,15 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
 
        BUILD_BUG_ON(pgd_devmap(orig));
        refs = 0;
-       head = pgd_page(orig);
-       page = head + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
+       page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
        do {
-               VM_BUG_ON_PAGE(compound_head(page) != head, page);
                pages[*nr] = page;
                (*nr)++;
                page++;
                refs++;
        } while (addr += PAGE_SIZE, addr != end);
 
+       head = compound_head(pgd_page(orig));
        if (!page_cache_add_speculative(head, refs)) {
                *nr -= refs;
                return 0;