]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
powerpc/mm: Abstraction for vmemmap and map_kernel_page()
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Fri, 29 Apr 2016 13:25:59 +0000 (23:25 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Sun, 1 May 2016 08:33:02 +0000 (18:33 +1000)
For hash we create vmemmap mapping using bolted hash page table entries.
For radix we fill the radix page table. The next patch will add the
radix details for creating vmemmap mappings.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/book3s/64/hash.h
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/include/asm/nohash/64/pgtable.h
arch/powerpc/mm/init_64.c
arch/powerpc/mm/mmu_decl.h
arch/powerpc/mm/pgtable-hash64.c

index 62714399a1a0afad17eca1cc97e16f12404b2bcd..cd3e91583c8103742dc199b3e9c99aa1b20fe6cb 100644 (file)
@@ -193,6 +193,14 @@ static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
+
+extern int hash__map_kernel_page(unsigned long ea, unsigned long pa,
+                            unsigned long flags);
+extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
+                                             unsigned long page_size,
+                                             unsigned long phys);
+extern void hash__vmemmap_remove_mapping(unsigned long start,
+                                    unsigned long page_size);
 #endif /* !__ASSEMBLY__ */
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
index a22b47844f2fee59d608bcfa6e4adf08595bf87a..c1b787dcfbb65e12b89d471e0fc3acd0c58d2ca0 100644 (file)
@@ -722,6 +722,26 @@ extern struct page *pgd_page(pgd_t pgd);
 void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
 void pgtable_cache_init(void);
 
+static inline int map_kernel_page(unsigned long ea, unsigned long pa,
+                                 unsigned long flags)
+{
+       return hash__map_kernel_page(ea, pa, flags);
+}
+
+static inline int __meminit vmemmap_create_mapping(unsigned long start,
+                                                  unsigned long page_size,
+                                                  unsigned long phys)
+{
+       return hash__vmemmap_create_mapping(start, page_size, phys);
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+static inline void vmemmap_remove_mapping(unsigned long start,
+                                         unsigned long page_size)
+{
+       return hash__vmemmap_remove_mapping(start, page_size);
+}
+#endif
 struct page *realmode_pfn_to_page(unsigned long pfn);
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
index 10debb93c4a4835a8ccaa758cb850237d7a6f200..f143d6fb35766075eb428b05cc291afdfe010381 100644 (file)
@@ -362,6 +362,13 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
 
 void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
 void pgtable_cache_init(void);
+extern int map_kernel_page(unsigned long ea, unsigned long pa,
+                          unsigned long flags);
+extern int __meminit vmemmap_create_mapping(unsigned long start,
+                                           unsigned long page_size,
+                                           unsigned long phys);
+extern void vmemmap_remove_mapping(unsigned long start,
+                                  unsigned long page_size);
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_POWERPC_NOHASH_64_PGTABLE_H */
index 09ca65e55b58b94bc0326babe17ea7fe15111f92..33709bdb04196ae3dfac4cb3f7463db2210a3449 100644 (file)
@@ -240,9 +240,6 @@ static __meminit void vmemmap_list_populate(unsigned long phys,
        vmemmap_list = vmem_back;
 }
 
-extern int __meminit vmemmap_create_mapping(unsigned long start,
-                                           unsigned long page_size,
-                                           unsigned long phys);
 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
 {
        unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
@@ -281,8 +278,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
 }
 
 #ifdef CONFIG_MEMORY_HOTPLUG
-extern void vmemmap_remove_mapping(unsigned long start,
-                                  unsigned long page_size);
 static unsigned long vmemmap_list_free(unsigned long start)
 {
        struct vmemmap_backing *vmem_back, *vmem_back_prev;
index bfb7c0bcabd57a6dd12f87ca1e41f7bacdfe4f28..6af65327c99322b564bacdf66657f62f4e35a05b 100644 (file)
@@ -108,11 +108,6 @@ extern unsigned long Hash_size, Hash_mask;
 
 #endif /* CONFIG_PPC32 */
 
-#ifdef CONFIG_PPC64
-extern int map_kernel_page(unsigned long ea, unsigned long pa,
-                          unsigned long flags);
-#endif /* CONFIG_PPC64 */
-
 extern unsigned long ioremap_bot;
 extern unsigned long __max_low_memory;
 extern phys_addr_t __initial_memory_limit_addr;
index 6f5fa68c784e868f2f6014bff52c4c8b497cf089..64975ebfc50b33728eebd15ec1bf9eb4505bd8b1 100644 (file)
@@ -20,9 +20,9 @@
  * On hash-based CPUs, the vmemmap is bolted in the hash table.
  *
  */
-int __meminit vmemmap_create_mapping(unsigned long start,
-                                    unsigned long page_size,
-                                    unsigned long phys)
+int __meminit hash__vmemmap_create_mapping(unsigned long start,
+                                      unsigned long page_size,
+                                      unsigned long phys)
 {
        int rc = htab_bolt_mapping(start, start + page_size, phys,
                                   pgprot_val(PAGE_KERNEL),
@@ -37,8 +37,8 @@ int __meminit vmemmap_create_mapping(unsigned long start,
 }
 
 #ifdef CONFIG_MEMORY_HOTPLUG
-void vmemmap_remove_mapping(unsigned long start,
-                           unsigned long page_size)
+void hash__vmemmap_remove_mapping(unsigned long start,
+                             unsigned long page_size)
 {
        int rc = htab_remove_mapping(start, start + page_size,
                                     mmu_vmemmap_psize,
@@ -54,7 +54,7 @@ void vmemmap_remove_mapping(unsigned long start,
  * map_kernel_page adds an entry to the ioremap page table
  * and adds an entry to the HPT, possibly bolting it
  */
-int map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags)
+int hash__map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags)
 {
        pgd_t *pgdp;
        pud_t *pudp;