]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - arch/arc/mm/tlb.c
Merge remote-tracking branch 'mips/mips-for-linux-next'
[karo-tx-linux.git] / arch / arc / mm / tlb.c
index 2a30c91f7977fdc0e27024cae6f8b28527071ada..0ee7398468476f57b301bde2fa7c7e13735bb3fb 100644 (file)
@@ -109,6 +109,10 @@ DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
 static inline void __tlb_entry_erase(void)
 {
        write_aux_reg(ARC_REG_TLBPD1, 0);
+
+       if (is_pae40_enabled())
+               write_aux_reg(ARC_REG_TLBPD1HI, 0);
+
        write_aux_reg(ARC_REG_TLBPD0, 0);
        write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
 }
@@ -182,7 +186,7 @@ static void utlb_invalidate(void)
 
 }
 
-static void tlb_entry_insert(unsigned int pd0, unsigned int pd1)
+static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
 {
        unsigned int idx;
 
@@ -225,10 +229,14 @@ static void tlb_entry_erase(unsigned int vaddr_n_asid)
        write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry);
 }
 
-static void tlb_entry_insert(unsigned int pd0, unsigned int pd1)
+static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
 {
        write_aux_reg(ARC_REG_TLBPD0, pd0);
        write_aux_reg(ARC_REG_TLBPD1, pd1);
+
+       if (is_pae40_enabled())
+               write_aux_reg(ARC_REG_TLBPD1HI, (u64)pd1 >> 32);
+
        write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry);
 }
 
@@ -249,6 +257,10 @@ noinline void local_flush_tlb_all(void)
 
        /* Load PD0 and PD1 with template for a Blank Entry */
        write_aux_reg(ARC_REG_TLBPD1, 0);
+
+       if (is_pae40_enabled())
+               write_aux_reg(ARC_REG_TLBPD1HI, 0);
+
        write_aux_reg(ARC_REG_TLBPD0, 0);
 
        for (entry = 0; entry < num_tlb; entry++) {
@@ -499,11 +511,12 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 /*
  * Routine to create a TLB entry
  */
-void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
 {
        unsigned long flags;
        unsigned int asid_or_sasid, rwx;
-       unsigned long pd0, pd1;
+       unsigned long pd0;
+       pte_t pd1;
 
        /*
         * create_tlb() assumes that current->mm == vma->mm, since
@@ -535,9 +548,9 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
 
        local_irq_save(flags);
 
-       tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), address);
+       tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), vaddr);
 
-       address &= PAGE_MASK;
+       vaddr &= PAGE_MASK;
 
        /* update this PTE credentials */
        pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
@@ -547,7 +560,7 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
        /* ASID for this task */
        asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
 
-       pd0 = address | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
+       pd0 = vaddr | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
 
        /*
         * ARC MMU provides fully orthogonal access bits for K/U mode,
@@ -583,7 +596,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
                      pte_t *ptep)
 {
        unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
-       unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
+       phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
        struct page *page = pfn_to_page(pte_pfn(*ptep));
 
        create_tlb(vma, vaddr, ptep);
@@ -785,10 +798,11 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
                          IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
 
        n += scnprintf(buf + n, len - n,
-                     "MMU [v%x]\t: %dK PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d\n",
+                     "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d %s%s\n",
                       p_mmu->ver, p_mmu->pg_sz_k, super_pg,
                       p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
-                      p_mmu->u_dtlb, p_mmu->u_itlb);
+                      p_mmu->u_dtlb, p_mmu->u_itlb,
+                      IS_AVAIL2(p_mmu->pae, "PAE40 ", CONFIG_ARC_HAS_PAE40));
 
        return buf;
 }
@@ -821,6 +835,9 @@ void arc_mmu_init(void)
                panic("MMU Super pg size != Linux HPAGE_PMD_SIZE (%luM)\n",
                      (unsigned long)TO_MB(HPAGE_PMD_SIZE));
 
+       if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae)
+               panic("Hardware doesn't support PAE40\n");
+
        /* Enable the MMU */
        write_aux_reg(ARC_REG_PID, MMU_ENABLE);
 
@@ -856,15 +873,15 @@ void arc_mmu_init(void)
  *      the duplicate one.
  * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
  */
-volatile int dup_pd_verbose = 1;/* Be slient abt it or complain (default) */
+volatile int dup_pd_silent; /* Be slient abt it or complain (default) */
 
 void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
                          struct pt_regs *regs)
 {
-       int set, way, n;
-       unsigned long flags, is_valid;
        struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
-       unsigned int pd0[mmu->ways], pd1[mmu->ways];
+       unsigned int pd0[mmu->ways];
+       unsigned long flags;
+       int set;
 
        local_irq_save(flags);
 
@@ -874,14 +891,16 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
        /* loop thru all sets of TLB */
        for (set = 0; set < mmu->sets; set++) {
 
+               int is_valid, way;
+
                /* read out all the ways of current set */
                for (way = 0, is_valid = 0; way < mmu->ways; way++) {
                        write_aux_reg(ARC_REG_TLBINDEX,
                                          SET_WAY_TO_IDX(mmu, set, way));
                        write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
                        pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
-                       pd1[way] = read_aux_reg(ARC_REG_TLBPD1);
                        is_valid |= pd0[way] & _PAGE_PRESENT;
+                       pd0[way] &= PAGE_MASK;
                }
 
                /* If all the WAYS in SET are empty, skip to next SET */
@@ -890,30 +909,28 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
 
                /* Scan the set for duplicate ways: needs a nested loop */
                for (way = 0; way < mmu->ways - 1; way++) {
+
+                       int n;
+
                        if (!pd0[way])
                                continue;
 
                        for (n = way + 1; n < mmu->ways; n++) {
-                               if ((pd0[way] & PAGE_MASK) ==
-                                   (pd0[n] & PAGE_MASK)) {
-
-                                       if (dup_pd_verbose) {
-                                               pr_info("Duplicate PD's @"
-                                                       "[%d:%d]/[%d:%d]\n",
-                                                    set, way, set, n);
-                                               pr_info("TLBPD0[%u]: %08x\n",
-                                                    way, pd0[way]);
-                                       }
-
-                                       /*
-                                        * clear entry @way and not @n. This is
-                                        * critical to our optimised loop
-                                        */
-                                       pd0[way] = pd1[way] = 0;
-                                       write_aux_reg(ARC_REG_TLBINDEX,
+                               if (pd0[way] != pd0[n])
+                                       continue;
+
+                               if (!dup_pd_silent)
+                                       pr_info("Dup TLB PD0 %08x @ set %d ways %d,%d\n",
+                                               pd0[way], set, way, n);
+
+                               /*
+                                * clear entry @way and not @n.
+                                * This is critical to our optimised loop
+                                */
+                               pd0[way] = 0;
+                               write_aux_reg(ARC_REG_TLBINDEX,
                                                SET_WAY_TO_IDX(mmu, set, way));
-                                       __tlb_entry_erase();
-                               }
+                               __tlb_entry_erase();
                        }
                }
        }