]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
powerpc/THP: Add code to handle HPTE faults for hugepages
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Thu, 20 Jun 2013 09:00:21 +0000 (14:30 +0530)
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>
Fri, 21 Jun 2013 06:01:56 +0000 (16:01 +1000)
The deposted PTE page in the second half of the PMD table is used to
track the state on hash PTEs. After updating the HPTE, we mark the
coresponding slot in the deposted PTE page valid.

Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
arch/powerpc/include/asm/mmu-hash64.h
arch/powerpc/mm/Makefile
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/hugepage-hash64.c [new file with mode: 0644]

index 2accc9611248ff9a36357b9bcc9078f00743af4c..3d6fbb00d20b4e1bacd39566f6ea2ec6cfecd105 100644 (file)
@@ -340,6 +340,19 @@ extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
                     pte_t *ptep, unsigned long trap, int local, int ssize,
                     unsigned int shift, unsigned int mmu_psize);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern int __hash_page_thp(unsigned long ea, unsigned long access,
+                          unsigned long vsid, pmd_t *pmdp, unsigned long trap,
+                          int local, int ssize, unsigned int psize);
+#else
+static inline int __hash_page_thp(unsigned long ea, unsigned long access,
+                                 unsigned long vsid, pmd_t *pmdp,
+                                 unsigned long trap, int local,
+                                 int ssize, unsigned int psize)
+{
+       BUG();
+}
+#endif
 extern void hash_failure_debug(unsigned long ea, unsigned long access,
                               unsigned long vsid, unsigned long trap,
                               int ssize, int psize, int lpsize,
index ff0379cdeecac0fde9221301bcba8bb2a8f69577..51230ee6a4075170d9efcc52038a1c3919d4b495 100644 (file)
@@ -32,6 +32,7 @@ ifeq ($(CONFIG_HUGETLB_PAGE),y)
 obj-$(CONFIG_PPC_STD_MMU_64)   += hugetlbpage-hash64.o
 obj-$(CONFIG_PPC_BOOK3E_MMU)   += hugetlbpage-book3e.o
 endif
+obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += hugepage-hash64.o
 obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o
 obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
 obj-$(CONFIG_HIGHMEM)          += highmem.o
index e8434ca6efd4d67358d9a36f1785dd9b3d337c8f..7a81e866e7b1b5cc5ac284a4fb08ffabf410e02b 100644 (file)
@@ -1050,13 +1050,26 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
                goto bail;
        }
 
-#ifdef CONFIG_HUGETLB_PAGE
        if (hugeshift) {
-               rc = __hash_page_huge(ea, access, vsid, ptep, trap, local,
-                                       ssize, hugeshift, psize);
+               if (pmd_trans_huge(*(pmd_t *)ptep))
+                       rc = __hash_page_thp(ea, access, vsid, (pmd_t *)ptep,
+                                            trap, local, ssize, psize);
+#ifdef CONFIG_HUGETLB_PAGE
+               else
+                       rc = __hash_page_huge(ea, access, vsid, ptep, trap,
+                                             local, ssize, hugeshift, psize);
+#else
+               else {
+                       /*
+                        * if we have hugeshift, and is not transhuge with
+                        * hugetlb disabled, something is really wrong.
+                        */
+                       rc = 1;
+                       WARN_ON(1);
+               }
+#endif
                goto bail;
        }
-#endif /* CONFIG_HUGETLB_PAGE */
 
 #ifndef CONFIG_PPC_64K_PAGES
        DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
new file mode 100644 (file)
index 0000000..3c22fa3
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+ * Copyright IBM Corporation, 2013
+ * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+
+/*
+ * PPC64 THP Support for hash based MMUs
+ */
+#include <linux/mm.h>
+#include <asm/machdep.h>
+
+int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
+                   pmd_t *pmdp, unsigned long trap, int local, int ssize,
+                   unsigned int psize)
+{
+       unsigned int index, valid;
+       unsigned char *hpte_slot_array;
+       unsigned long rflags, pa, hidx;
+       unsigned long old_pmd, new_pmd;
+       int ret, lpsize = MMU_PAGE_16M;
+       unsigned long vpn, hash, shift, slot;
+
+       /*
+        * atomically mark the linux large page PMD busy and dirty
+        */
+       do {
+               old_pmd = pmd_val(*pmdp);
+               /* If PMD busy, retry the access */
+               if (unlikely(old_pmd & _PAGE_BUSY))
+                       return 0;
+               /* If PMD permissions don't match, take page fault */
+               if (unlikely(access & ~old_pmd))
+                       return 1;
+               /*
+                * Try to lock the PTE, add ACCESSED and DIRTY if it was
+                * a write access
+                */
+               new_pmd = old_pmd | _PAGE_BUSY | _PAGE_ACCESSED;
+               if (access & _PAGE_RW)
+                       new_pmd |= _PAGE_DIRTY;
+       } while (old_pmd != __cmpxchg_u64((unsigned long *)pmdp,
+                                         old_pmd, new_pmd));
+       /*
+        * PP bits. _PAGE_USER is already PP bit 0x2, so we only
+        * need to add in 0x1 if it's a read-only user page
+        */
+       rflags = new_pmd & _PAGE_USER;
+       if ((new_pmd & _PAGE_USER) && !((new_pmd & _PAGE_RW) &&
+                                          (new_pmd & _PAGE_DIRTY)))
+               rflags |= 0x1;
+       /*
+        * _PAGE_EXEC -> HW_NO_EXEC since it's inverted
+        */
+       rflags |= ((new_pmd & _PAGE_EXEC) ? 0 : HPTE_R_N);
+
+#if 0
+       if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
+
+               /*
+                * No CPU has hugepages but lacks no execute, so we
+                * don't need to worry about that case
+                */
+               rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
+       }
+#endif
+       /*
+        * Find the slot index details for this ea, using base page size.
+        */
+       shift = mmu_psize_defs[psize].shift;
+       index = (ea & ~HPAGE_PMD_MASK) >> shift;
+       BUG_ON(index >= 4096);
+
+       vpn = hpt_vpn(ea, vsid, ssize);
+       hash = hpt_hash(vpn, shift, ssize);
+       hpte_slot_array = get_hpte_slot_array(pmdp);
+
+       valid = hpte_valid(hpte_slot_array, index);
+       if (valid) {
+               /* update the hpte bits */
+               hidx =  hpte_hash_index(hpte_slot_array, index);
+               if (hidx & _PTEIDX_SECONDARY)
+                       hash = ~hash;
+               slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
+               slot += hidx & _PTEIDX_GROUP_IX;
+
+               ret = ppc_md.hpte_updatepp(slot, rflags, vpn,
+                                          psize, lpsize, ssize, local);
+               /*
+                * We failed to update, try to insert a new entry.
+                */
+               if (ret == -1) {
+                       /*
+                        * large pte is marked busy, so we can be sure
+                        * nobody is looking at hpte_slot_array. hence we can
+                        * safely update this here.
+                        */
+                       valid = 0;
+                       new_pmd &= ~_PAGE_HPTEFLAGS;
+                       hpte_slot_array[index] = 0;
+               } else
+                       /* clear the busy bits and set the hash pte bits */
+                       new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
+       }
+
+       if (!valid) {
+               unsigned long hpte_group;
+
+               /* insert new entry */
+               pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
+repeat:
+               hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+
+               /* clear the busy bits and set the hash pte bits */
+               new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
+
+               /* Add in WIMG bits */
+               rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
+                                     _PAGE_COHERENT | _PAGE_GUARDED));
+
+               /* Insert into the hash table, primary slot */
+               slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
+                                         psize, lpsize, ssize);
+               /*
+                * Primary is full, try the secondary
+                */
+               if (unlikely(slot == -1)) {
+                       hpte_group = ((~hash & htab_hash_mask) *
+                                     HPTES_PER_GROUP) & ~0x7UL;
+                       slot = ppc_md.hpte_insert(hpte_group, vpn, pa,
+                                                 rflags, HPTE_V_SECONDARY,
+                                                 psize, lpsize, ssize);
+                       if (slot == -1) {
+                               if (mftb() & 0x1)
+                                       hpte_group = ((hash & htab_hash_mask) *
+                                                     HPTES_PER_GROUP) & ~0x7UL;
+
+                               ppc_md.hpte_remove(hpte_group);
+                               goto repeat;
+                       }
+               }
+               /*
+                * Hypervisor failure. Restore old pmd and return -1
+                * similar to __hash_page_*
+                */
+               if (unlikely(slot == -2)) {
+                       *pmdp = __pmd(old_pmd);
+                       hash_failure_debug(ea, access, vsid, trap, ssize,
+                                          psize, lpsize, old_pmd);
+                       return -1;
+               }
+               /*
+                * large pte is marked busy, so we can be sure
+                * nobody is looking at hpte_slot_array. hence we can
+                * safely update this here.
+                */
+               mark_hpte_slot_valid(hpte_slot_array, index, slot);
+       }
+       /*
+        * No need to use ldarx/stdcx here
+        */
+       *pmdp = __pmd(new_pmd & ~_PAGE_BUSY);
+       return 0;
+}