]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
powerpc/mm: Add devmap support for ppc64
authorOliver O'Halloran <oohall@gmail.com>
Wed, 28 Jun 2017 01:32:34 +0000 (11:32 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Sun, 2 Jul 2017 10:40:28 +0000 (20:40 +1000)
Add support for the devmap bit on PTEs and PMDs for PPC64 Book3S.  This
is used to differentiate device backed memory from transparent huge
pages since they are handled in more or less the same manner by the core
mm code.

Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Oliver O'Halloran <oohall@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/include/asm/book3s/64/radix.h
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/pgtable-book3s64.c
arch/powerpc/mm/pgtable-hash64.c
arch/powerpc/mm/pgtable-radix.c
arch/powerpc/mm/pgtable_64.c

index 85bc9875c3be5f66c1a7339e769413ac7e870391..c0737c86a36272b1df213a41c4c75426c16d9178 100644 (file)
@@ -5,6 +5,7 @@
 
 #ifndef __ASSEMBLY__
 #include <linux/mmdebug.h>
+#include <linux/bug.h>
 #endif
 
 /*
@@ -79,6 +80,9 @@
 
 #define _PAGE_SOFT_DIRTY       _RPAGE_SW3 /* software: software dirty tracking */
 #define _PAGE_SPECIAL          _RPAGE_SW2 /* software: special page */
+#define _PAGE_DEVMAP           _RPAGE_SW1 /* software: ZONE_DEVICE page */
+#define __HAVE_ARCH_PTE_DEVMAP
+
 /*
  * Drivers request for cache inhibited pte mapping using _PAGE_NO_CACHE
  * Instead of fixing all of them, add an alternate define which
@@ -599,6 +603,16 @@ static inline pte_t pte_mkhuge(pte_t pte)
        return pte;
 }
 
+static inline pte_t pte_mkdevmap(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_SPECIAL|_PAGE_DEVMAP);
+}
+
+static inline int pte_devmap(pte_t pte)
+{
+       return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DEVMAP));
+}
+
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
        /* FIXME!! check whether this need to be a conditional */
@@ -1146,6 +1160,37 @@ static inline bool arch_needs_pgtable_deposit(void)
        return true;
 }
 
+
+static inline pmd_t pmd_mkdevmap(pmd_t pmd)
+{
+       return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
+}
+
+static inline int pmd_devmap(pmd_t pmd)
+{
+       return pte_devmap(pmd_pte(pmd));
+}
+
+static inline int pud_devmap(pud_t pud)
+{
+       return 0;
+}
+
+static inline int pgd_devmap(pgd_t pgd)
+{
+       return 0;
+}
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+static inline const int pud_pfn(pud_t pud)
+{
+       /*
+        * Currently all calls to pud_pfn() are gated around a pud_devmap()
+        * check so this should never be used. If it grows another user we
+        * want to know about it.
+        */
+       BUILD_BUG();
+       return 0;
+}
 #endif /* __ASSEMBLY__ */
 #endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */
index ac16d1943022a83f86e898fb7d070436eafcdbe5..ba43754e96d2eff428c07ddbf1e0be18efa3c1c6 100644 (file)
@@ -252,7 +252,7 @@ static inline int radix__pgd_bad(pgd_t pgd)
 
 static inline int radix__pmd_trans_huge(pmd_t pmd)
 {
-       return !!(pmd_val(pmd) & _PAGE_PTE);
+       return (pmd_val(pmd) & (_PAGE_PTE | _PAGE_DEVMAP)) == _PAGE_PTE;
 }
 
 static inline pmd_t radix__pmd_mkhuge(pmd_t pmd)
index 19659081da72d7833f237ef8f2970483df16d485..1ca196c00b2a676385a555e48b1322c8ea4260bb 100644 (file)
@@ -964,7 +964,7 @@ pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
                        if (pmd_none(pmd))
                                return NULL;
 
-                       if (pmd_trans_huge(pmd)) {
+                       if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
                                if (is_thp)
                                        *is_thp = true;
                                ret_pte = (pte_t *) pmdp;
index 5fcb3dd74c139bf7ca997834cde3253192820616..31eed8fa8e991c0322e6b88112ce2ed974518f21 100644 (file)
@@ -32,7 +32,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
 {
        int changed;
 #ifdef CONFIG_DEBUG_VM
-       WARN_ON(!pmd_trans_huge(*pmdp));
+       WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
        assert_spin_locked(&vma->vm_mm->page_table_lock);
 #endif
        changed = !pmd_same(*(pmdp), entry);
@@ -59,7 +59,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
 #ifdef CONFIG_DEBUG_VM
        WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
        assert_spin_locked(&mm->page_table_lock);
-       WARN_ON(!pmd_trans_huge(pmd));
+       WARN_ON(!(pmd_trans_huge(pmd) || pmd_devmap(pmd)));
 #endif
        trace_hugepage_set_pmd(addr, pmd_val(pmd));
        return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
index 16877f60fb87a8f32e91932d83bced22afb1c413..a0facee5881141cdcef4eb88588d9f0e1cc0d37a 100644 (file)
@@ -184,7 +184,7 @@ unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr
        unsigned long old;
 
 #ifdef CONFIG_DEBUG_VM
-       WARN_ON(!pmd_trans_huge(*pmdp));
+       WARN_ON(!hash__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
        assert_spin_locked(&mm->page_table_lock);
 #endif
 
@@ -216,6 +216,7 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres
 
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
        VM_BUG_ON(pmd_trans_huge(*pmdp));
+       VM_BUG_ON(pmd_devmap(*pmdp));
 
        pmd = *pmdp;
        pmd_clear(pmdp);
@@ -296,6 +297,7 @@ void hash__pmdp_huge_split_prepare(struct vm_area_struct *vma,
 {
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
        VM_BUG_ON(REGION_ID(address) != USER_REGION_ID);
+       VM_BUG_ON(pmd_devmap(*pmdp));
 
        /*
         * We can't mark the pmd none here, because that will cause a race
index 419199d689280318a46a193b396ad948ff5a5cbc..f6af90371b1efa88cd571aa320e166468c96d124 100644 (file)
@@ -696,7 +696,7 @@ unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long add
        unsigned long old;
 
 #ifdef CONFIG_DEBUG_VM
-       WARN_ON(!radix__pmd_trans_huge(*pmdp));
+       WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
        assert_spin_locked(&mm->page_table_lock);
 #endif
 
@@ -714,6 +714,7 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre
 
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
        VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
+       VM_BUG_ON(pmd_devmap(*pmdp));
        /*
         * khugepaged calls this for normal pmd
         */
index 1feb36e58a45c3bf6503b364c469f387f165ce3a..bce0ed50789ce6cc77f2978dfcd6b5f5e844a86e 100644 (file)
@@ -324,7 +324,7 @@ struct page *pud_page(pud_t pud)
  */
 struct page *pmd_page(pmd_t pmd)
 {
-       if (pmd_trans_huge(pmd) || pmd_huge(pmd))
+       if (pmd_trans_huge(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))
                return pte_page(pmd_pte(pmd));
        return virt_to_page(pmd_page_vaddr(pmd));
 }