]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
powerpc/64/kexec: Fix MMU cleanup on radix
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>
Fri, 19 Aug 2016 08:52:37 +0000 (14:22 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 22 Sep 2016 21:54:17 +0000 (07:54 +1000)
Just using the hash ops won't work anymore since radix will have
NULL in there. Instead create an mmu_cleanup_all() function which
will do the right thing based on the MMU mode.

For Radix, for now I clear UPRT and the PTCR, effectively switching
back to Radix with no partition table setup.

Currently set it to NULL on BookE thought it might be a good idea
to wipe the TLB there (Scott ?)

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Acked-by: Balbir Singh <bsingharora@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/mmu-book3e.h
arch/powerpc/include/asm/mmu.h
arch/powerpc/kernel/machine_kexec_64.c
arch/powerpc/mm/pgtable-book3s64.c
arch/powerpc/mm/pgtable-radix.c

index cd4f04a7480299eff334d69eed9229e6544f8276..b62a8d43a06c203202020a78d960b71408ab79d0 100644 (file)
@@ -313,6 +313,9 @@ extern int book3e_htw_mode;
  * return 1, indicating that the tlb requires preloading.
  */
 #define HUGETLB_NEED_PRELOAD
+
+#define mmu_cleanup_all NULL
+
 #endif
 
 #endif /* !__ASSEMBLY__ */
index e2fb408f83983617591e3fe3511f9691b76f0b3d..79c989a05aa1c2c513d832fbd9aef7ddc4b00fc0 100644 (file)
@@ -204,6 +204,10 @@ extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
  * make it match the size our of bolted TLB area
  */
 extern u64 ppc64_rma_size;
+
+/* Cleanup function used by kexec */
+extern void mmu_cleanup_all(void);
+extern void radix__mmu_cleanup_all(void);
 #endif /* CONFIG_PPC64 */
 
 struct mm_struct;
index 4c780a3422821a83887f4433038f51769f05c881..7a7793211ae73c3ffe0c56fdbc1d4c006bd041f8 100644 (file)
@@ -55,9 +55,6 @@ int default_machine_kexec_prepare(struct kimage *image)
        const unsigned long *basep;
        const unsigned int *sizep;
 
-       if (!mmu_hash_ops.hpte_clear_all)
-               return -ENOENT;
-
        /*
         * Since we use the kernel fault handlers and paging code to
         * handle the virtual mode, we must make sure no destination
@@ -379,13 +376,8 @@ void default_machine_kexec(struct kimage *image)
         * a toc is easier in C, so pass in what we can.
         */
        kexec_sequence(&kexec_stack, image->start, image,
-                       page_address(image->control_code_page),
-#ifdef CONFIG_PPC_STD_MMU
-                       mmu_hash_ops.hpte_clear_all
-#else
-                       NULL
-#endif
-       );
+                      page_address(image->control_code_page),
+                      mmu_cleanup_all);
        /* NOTREACHED */
 }
 
index 7328886bca4c32c1c11d8dcbdc70800e1e291191..f4f437cbabf1d44447094e40da48fec9e14eaadc 100644 (file)
@@ -116,3 +116,12 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
        return;
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+/* For use by kexec */
+void mmu_cleanup_all(void)
+{
+       if (radix_enabled())
+               radix__mmu_cleanup_all();
+       else if (mmu_hash_ops.hpte_clear_all)
+               mmu_hash_ops.hpte_clear_all();
+}
index 8f086352e421c3ae4e3a2879f0e166c64c8364b3..ed7bddc456b72b5a7ce1b647438cd2271306302a 100644 (file)
@@ -396,6 +396,18 @@ void radix__early_init_mmu_secondary(void)
        }
 }
 
+void radix__mmu_cleanup_all(void)
+{
+       unsigned long lpcr;
+
+       if (!firmware_has_feature(FW_FEATURE_LPAR)) {
+               lpcr = mfspr(SPRN_LPCR);
+               mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
+               mtspr(SPRN_PTCR, 0);
+               radix__flush_tlb_all();
+       }
+}
+
 void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
                                phys_addr_t first_memblock_size)
 {