]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
mm: add page_check_address_transhuge() helper
authorVladimir Davydov <vdavydov@virtuozzo.com>
Sat, 16 Jan 2016 00:54:45 +0000 (16:54 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 16 Jan 2016 01:56:32 +0000 (17:56 -0800)
page_referenced_one() and page_idle_clear_pte_refs_one() duplicate the
code for looking up pte of a (possibly transhuge) page.  Move this code
to a new helper function, page_check_address_transhuge(), and make the
above mentioned functions use it.

This is just a cleanup, no functional changes are intended.

Signed-off-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Reviewed-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/rmap.h
mm/page_idle.c
mm/rmap.c

index ebf3750e42b2c399db2ea034deef752da1235bdf..77d1ba57d495fc5dffb3873c6f03fb955b435d10 100644 (file)
@@ -215,6 +215,25 @@ static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm,
        return ptep;
 }
 
+/*
+ * Used by idle page tracking to check if a page was referenced via page
+ * tables.
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+bool page_check_address_transhuge(struct page *page, struct mm_struct *mm,
+                                 unsigned long address, pmd_t **pmdp,
+                                 pte_t **ptep, spinlock_t **ptlp);
+#else
+static inline bool page_check_address_transhuge(struct page *page,
+                               struct mm_struct *mm, unsigned long address,
+                               pmd_t **pmdp, pte_t **ptep, spinlock_t **ptlp)
+{
+       *ptep = page_check_address(page, mm, address, ptlp, 0);
+       *pmdp = NULL;
+       return !!*ptep;
+}
+#endif
+
 /*
  * Used by swapoff to help locate where page is expected in vma.
  */
index 2c553ba969f862aa24c08c177b3d1c2cf81ba3df..4ea9c4ef5146b8b784848a6710b70e5fc0dfcd41 100644 (file)
@@ -55,71 +55,26 @@ static int page_idle_clear_pte_refs_one(struct page *page,
                                        unsigned long addr, void *arg)
 {
        struct mm_struct *mm = vma->vm_mm;
-       spinlock_t *ptl;
-       pgd_t *pgd;
-       pud_t *pud;
        pmd_t *pmd;
        pte_t *pte;
+       spinlock_t *ptl;
        bool referenced = false;
 
-       pgd = pgd_offset(mm, addr);
-       if (!pgd_present(*pgd))
-               return SWAP_AGAIN;
-       pud = pud_offset(pgd, addr);
-       if (!pud_present(*pud))
+       if (!page_check_address_transhuge(page, mm, addr, &pmd, &pte, &ptl))
                return SWAP_AGAIN;
-       pmd = pmd_offset(pud, addr);
-
-       if (pmd_trans_huge(*pmd)) {
-               ptl = pmd_lock(mm, pmd);
-               if (!pmd_present(*pmd))
-                       goto unlock_pmd;
-               if (unlikely(!pmd_trans_huge(*pmd))) {
-                       spin_unlock(ptl);
-                       goto map_pte;
-               }
-
-               if (pmd_page(*pmd) != page)
-                       goto unlock_pmd;
 
+       if (pte) {
+               referenced = ptep_clear_young_notify(vma, addr, pte);
+               pte_unmap(pte);
+       } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
                referenced = pmdp_clear_young_notify(vma, addr, pmd);
-               spin_unlock(ptl);
-               goto found;
-unlock_pmd:
-               spin_unlock(ptl);
-               return SWAP_AGAIN;
        } else {
-               pmd_t pmde = *pmd;
-
-               barrier();
-               if (!pmd_present(pmde) || pmd_trans_huge(pmde))
-                       return SWAP_AGAIN;
-
-       }
-map_pte:
-       pte = pte_offset_map(pmd, addr);
-       if (!pte_present(*pte)) {
-               pte_unmap(pte);
-               return SWAP_AGAIN;
+               /* unexpected pmd-mapped page? */
+               WARN_ON_ONCE(1);
        }
 
-       ptl = pte_lockptr(mm, pmd);
-       spin_lock(ptl);
-
-       if (!pte_present(*pte)) {
-               pte_unmap_unlock(pte, ptl);
-               return SWAP_AGAIN;
-       }
-
-       /* THP can be referenced by any subpage */
-       if (pte_pfn(*pte) - page_to_pfn(page) >= hpage_nr_pages(page)) {
-               pte_unmap_unlock(pte, ptl);
-               return SWAP_AGAIN;
-       }
+       spin_unlock(ptl);
 
-       referenced = ptep_clear_young_notify(vma, addr, pte);
-       pte_unmap_unlock(pte, ptl);
-found:
        if (referenced) {
                clear_page_idle(page);
                /*
index 6127c00b2262d9f7a37e5f7908dcdb0eb496e666..cdc2a885a4cde7d3f8312a4fbb97df4df0b2a2c4 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -798,48 +798,44 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
        return 1;
 }
 
-struct page_referenced_arg {
-       int mapcount;
-       int referenced;
-       unsigned long vm_flags;
-       struct mem_cgroup *memcg;
-};
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 /*
- * arg: page_referenced_arg will be passed
+ * Check that @page is mapped at @address into @mm. In contrast to
+ * page_check_address(), this function can handle transparent huge pages.
+ *
+ * On success returns true with pte mapped and locked. For PMD-mapped
+ * transparent huge pages *@ptep is set to NULL.
  */
-static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
-                       unsigned long address, void *arg)
+bool page_check_address_transhuge(struct page *page, struct mm_struct *mm,
+                                 unsigned long address, pmd_t **pmdp,
+                                 pte_t **ptep, spinlock_t **ptlp)
 {
-       struct mm_struct *mm = vma->vm_mm;
-       spinlock_t *ptl;
-       int referenced = 0;
-       struct page_referenced_arg *pra = arg;
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
        pte_t *pte;
+       spinlock_t *ptl;
 
        if (unlikely(PageHuge(page))) {
                /* when pud is not present, pte will be NULL */
                pte = huge_pte_offset(mm, address);
                if (!pte)
-                       return SWAP_AGAIN;
+                       return false;
 
                ptl = huge_pte_lockptr(page_hstate(page), mm, pte);
+               pmd = NULL;
                goto check_pte;
        }
 
        pgd = pgd_offset(mm, address);
        if (!pgd_present(*pgd))
-               return SWAP_AGAIN;
+               return false;
        pud = pud_offset(pgd, address);
        if (!pud_present(*pud))
-               return SWAP_AGAIN;
+               return false;
        pmd = pmd_offset(pud, address);
 
        if (pmd_trans_huge(*pmd)) {
-               int ret = SWAP_AGAIN;
-
                ptl = pmd_lock(mm, pmd);
                if (!pmd_present(*pmd))
                        goto unlock_pmd;
@@ -851,31 +847,23 @@ static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
                if (pmd_page(*pmd) != page)
                        goto unlock_pmd;
 
-               if (vma->vm_flags & VM_LOCKED) {
-                       pra->vm_flags |= VM_LOCKED;
-                       ret = SWAP_FAIL; /* To break the loop */
-                       goto unlock_pmd;
-               }
-
-               if (pmdp_clear_flush_young_notify(vma, address, pmd))
-                       referenced++;
-               spin_unlock(ptl);
+               pte = NULL;
                goto found;
 unlock_pmd:
                spin_unlock(ptl);
-               return ret;
+               return false;
        } else {
                pmd_t pmde = *pmd;
 
                barrier();
                if (!pmd_present(pmde) || pmd_trans_huge(pmde))
-                       return SWAP_AGAIN;
+                       return false;
        }
 map_pte:
        pte = pte_offset_map(pmd, address);
        if (!pte_present(*pte)) {
                pte_unmap(pte);
-               return SWAP_AGAIN;
+               return false;
        }
 
        ptl = pte_lockptr(mm, pmd);
@@ -884,35 +872,74 @@ check_pte:
 
        if (!pte_present(*pte)) {
                pte_unmap_unlock(pte, ptl);
-               return SWAP_AGAIN;
+               return false;
        }
 
        /* THP can be referenced by any subpage */
        if (pte_pfn(*pte) - page_to_pfn(page) >= hpage_nr_pages(page)) {
                pte_unmap_unlock(pte, ptl);
-               return SWAP_AGAIN;
+               return false;
        }
+found:
+       *ptep = pte;
+       *pmdp = pmd;
+       *ptlp = ptl;
+       return true;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+struct page_referenced_arg {
+       int mapcount;
+       int referenced;
+       unsigned long vm_flags;
+       struct mem_cgroup *memcg;
+};
+/*
+ * arg: page_referenced_arg will be passed
+ */
+static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
+                       unsigned long address, void *arg)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       struct page_referenced_arg *pra = arg;
+       pmd_t *pmd;
+       pte_t *pte;
+       spinlock_t *ptl;
+       int referenced = 0;
+
+       if (!page_check_address_transhuge(page, mm, address, &pmd, &pte, &ptl))
+               return SWAP_AGAIN;
 
        if (vma->vm_flags & VM_LOCKED) {
-               pte_unmap_unlock(pte, ptl);
+               if (pte)
+                       pte_unmap(pte);
+               spin_unlock(ptl);
                pra->vm_flags |= VM_LOCKED;
                return SWAP_FAIL; /* To break the loop */
        }
 
-       if (ptep_clear_flush_young_notify(vma, address, pte)) {
-               /*
-                * Don't treat a reference through a sequentially read
-                * mapping as such.  If the page has been used in
-                * another mapping, we will catch it; if this other
-                * mapping is already gone, the unmap path will have
-                * set PG_referenced or activated the page.
-                */
-               if (likely(!(vma->vm_flags & VM_SEQ_READ)))
+       if (pte) {
+               if (ptep_clear_flush_young_notify(vma, address, pte)) {
+                       /*
+                        * Don't treat a reference through a sequentially read
+                        * mapping as such.  If the page has been used in
+                        * another mapping, we will catch it; if this other
+                        * mapping is already gone, the unmap path will have
+                        * set PG_referenced or activated the page.
+                        */
+                       if (likely(!(vma->vm_flags & VM_SEQ_READ)))
+                               referenced++;
+               }
+               pte_unmap(pte);
+       } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
+               if (pmdp_clear_flush_young_notify(vma, address, pmd))
                        referenced++;
+       } else {
+               /* unexpected pmd-mapped page? */
+               WARN_ON_ONCE(1);
        }
-       pte_unmap_unlock(pte, ptl);
+       spin_unlock(ptl);
 
-found:
        if (referenced)
                clear_page_idle(page);
        if (test_and_clear_page_young(page))