3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
9 * Derived from "arch/i386/mm/init.c"
10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 * Dave Engebretsen <engebret@us.ibm.com>
13 * Rework for PPC64 port.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
24 #include <linux/signal.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/mman.h>
32 #include <linux/swap.h>
33 #include <linux/stddef.h>
34 #include <linux/vmalloc.h>
35 #include <linux/init.h>
36 #include <linux/delay.h>
37 #include <linux/highmem.h>
38 #include <linux/idr.h>
39 #include <linux/nodemask.h>
40 #include <linux/module.h>
41 #include <linux/poison.h>
42 #include <linux/memblock.h>
43 #include <linux/hugetlb.h>
44 #include <linux/slab.h>
45 #include <linux/of_fdt.h>
46 #include <linux/libfdt.h>
48 #include <asm/pgalloc.h>
53 #include <asm/mmu_context.h>
54 #include <asm/pgtable.h>
56 #include <linux/uaccess.h>
58 #include <asm/machdep.h>
61 #include <asm/processor.h>
62 #include <asm/mmzone.h>
63 #include <asm/cputable.h>
64 #include <asm/sections.h>
65 #include <asm/iommu.h>
70 #ifdef CONFIG_PPC_STD_MMU_64
71 #if H_PGTABLE_RANGE > USER_VSID_RANGE
72 #warning Limited user VSID range means pagetable space is wasted
74 #endif /* CONFIG_PPC_STD_MMU_64 */
76 phys_addr_t memstart_addr = ~0;
77 EXPORT_SYMBOL_GPL(memstart_addr);
78 phys_addr_t kernstart_addr;
79 EXPORT_SYMBOL_GPL(kernstart_addr);
81 #ifdef CONFIG_SPARSEMEM_VMEMMAP
83 * Given an address within the vmemmap, determine the pfn of the page that
84 * represents the start of the section it is within. Note that we have to
85 * do this by hand as the proffered address may not be correctly aligned.
86 * Subtraction of non-aligned pointers produces undefined results.
88 static unsigned long __meminit vmemmap_section_start(unsigned long page)
90 unsigned long offset = page - ((unsigned long)(vmemmap));
92 /* Return the pfn of the start of the section. */
93 return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
97 * Check if this vmemmap page is already initialised. If any section
98 * which overlaps this vmemmap page is initialised then this page is
99 * initialised already.
101 static int __meminit vmemmap_populated(unsigned long start, int page_size)
103 unsigned long end = start + page_size;
104 start = (unsigned long)(pfn_to_page(vmemmap_section_start(start)));
106 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
107 if (pfn_valid(page_to_pfn((struct page *)start)))
113 struct vmemmap_backing *vmemmap_list;
114 static struct vmemmap_backing *next;
116 static int num_freed;
118 static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
120 struct vmemmap_backing *vmem_back;
121 /* get from freed entries first */
130 /* allocate a page when required and hand out chunks */
132 next = vmemmap_alloc_block(PAGE_SIZE, node);
133 if (unlikely(!next)) {
137 num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
145 static __meminit void vmemmap_list_populate(unsigned long phys,
149 struct vmemmap_backing *vmem_back;
151 vmem_back = vmemmap_list_alloc(node);
152 if (unlikely(!vmem_back)) {
157 vmem_back->phys = phys;
158 vmem_back->virt_addr = start;
159 vmem_back->list = vmemmap_list;
161 vmemmap_list = vmem_back;
164 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
166 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
168 /* Align to the page size of the linear mapping. */
169 start = _ALIGN_DOWN(start, page_size);
171 pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
173 for (; start < end; start += page_size) {
177 if (vmemmap_populated(start, page_size))
180 p = vmemmap_alloc_block(page_size, node);
184 vmemmap_list_populate(__pa(p), start, node);
186 pr_debug(" * %016lx..%016lx allocated at %p\n",
187 start, start + page_size, p);
189 rc = vmemmap_create_mapping(start, page_size, __pa(p));
192 "vmemmap_populate: Unable to create vmemmap mapping: %d\n",
201 #ifdef CONFIG_MEMORY_HOTPLUG
202 static unsigned long vmemmap_list_free(unsigned long start)
204 struct vmemmap_backing *vmem_back, *vmem_back_prev;
206 vmem_back_prev = vmem_back = vmemmap_list;
208 /* look for it with prev pointer recorded */
209 for (; vmem_back; vmem_back = vmem_back->list) {
210 if (vmem_back->virt_addr == start)
212 vmem_back_prev = vmem_back;
215 if (unlikely(!vmem_back)) {
220 /* remove it from vmemmap_list */
221 if (vmem_back == vmemmap_list) /* remove head */
222 vmemmap_list = vmem_back->list;
224 vmem_back_prev->list = vmem_back->list;
226 /* next point to this freed entry */
227 vmem_back->list = next;
231 return vmem_back->phys;
234 void __ref vmemmap_free(unsigned long start, unsigned long end)
236 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
238 start = _ALIGN_DOWN(start, page_size);
240 pr_debug("vmemmap_free %lx...%lx\n", start, end);
242 for (; start < end; start += page_size) {
246 * the section has already be marked as invalid, so
247 * vmemmap_populated() true means some other sections still
248 * in this page, so skip it.
250 if (vmemmap_populated(start, page_size))
253 addr = vmemmap_list_free(start);
255 struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
257 if (PageReserved(page)) {
258 /* allocated from bootmem */
259 if (page_size < PAGE_SIZE) {
261 * this shouldn't happen, but if it is
262 * the case, leave the memory there
266 unsigned int nr_pages =
267 1 << get_order(page_size);
269 free_reserved_page(page++);
272 free_pages((unsigned long)(__va(addr)),
273 get_order(page_size));
275 vmemmap_remove_mapping(start, page_size);
280 void register_page_bootmem_memmap(unsigned long section_nr,
281 struct page *start_page, unsigned long size)
286 * We do not have access to the sparsemem vmemmap, so we fallback to
287 * walking the list of sparsemem blocks which we already maintain for
288 * the sake of crashdump. In the long run, we might want to maintain
289 * a tree if performance of that linear walk becomes a problem.
291 * realmode_pfn_to_page functions can fail due to:
292 * 1) As real sparsemem blocks do not lay in RAM continously (they
293 * are in virtual address space which is not available in the real mode),
294 * the requested page struct can be split between blocks so get_page/put_page
296 * 2) When huge pages are used, the get_page/put_page API will fail
297 * in real mode as the linked addresses in the page struct are virtual
300 struct page *realmode_pfn_to_page(unsigned long pfn)
302 struct vmemmap_backing *vmem_back;
304 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
305 unsigned long pg_va = (unsigned long) pfn_to_page(pfn);
307 for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) {
308 if (pg_va < vmem_back->virt_addr)
311 /* After vmemmap_list entry free is possible, need check all */
312 if ((pg_va + sizeof(struct page)) <=
313 (vmem_back->virt_addr + page_size)) {
314 page = (struct page *) (vmem_back->phys + pg_va -
315 vmem_back->virt_addr);
320 /* Probably that page struct is split between real pages */
323 EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
325 #elif defined(CONFIG_FLATMEM)
327 struct page *realmode_pfn_to_page(unsigned long pfn)
329 struct page *page = pfn_to_page(pfn);
332 EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
334 #endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */
336 #ifdef CONFIG_PPC_STD_MMU_64
337 static bool disable_radix;
338 static int __init parse_disable_radix(char *p)
340 disable_radix = true;
343 early_param("disable_radix", parse_disable_radix);
346 * If we're running under a hypervisor, we need to check the contents of
347 * /chosen/ibm,architecture-vec-5 to see if the hypervisor is willing to do
348 * radix. If not, we clear the radix feature bit so we fall back to hash.
350 static void early_check_vec5(void)
352 unsigned long root, chosen;
357 root = of_get_flat_dt_root();
358 chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
359 if (chosen == -FDT_ERR_NOTFOUND) {
360 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
363 vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
365 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
368 if (size <= OV5_INDX(OV5_MMU_SUPPORT)) {
369 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
373 /* Check for supported configuration */
374 mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] &
375 OV5_FEAT(OV5_MMU_SUPPORT);
376 if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) {
377 /* Hypervisor only supports radix - check enabled && GTSE */
378 if (!early_radix_enabled()) {
379 pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
381 if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] &
382 OV5_FEAT(OV5_RADIX_GTSE))) {
383 pr_warn("WARNING: Hypervisor doesn't support RADIX with GTSE\n");
385 /* Do radix anyway - the hypervisor said we had to */
386 cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
387 } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) {
388 /* Hypervisor only supports hash - disable radix */
389 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
393 void __init mmu_early_init_devtree(void)
395 /* Disable radix mode based on kernel command line. */
397 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
400 * Check /chosen/ibm,architecture-vec-5 if running as a guest.
401 * When running bare-metal, we can use radix if we like
402 * even though the ibm,architecture-vec-5 property created by
403 * skiboot doesn't have the necessary bits set.
405 if (!(mfmsr() & MSR_HV))
408 if (early_radix_enabled())
409 radix__early_init_devtree();
411 hash__early_init_devtree();
413 #endif /* CONFIG_PPC_STD_MMU_64 */