2 * linux/arch/arm/mm/init.c
4 * Copyright (C) 1995-2005 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/bootmem.h>
15 #include <linux/mman.h>
16 #include <linux/export.h>
17 #include <linux/nodemask.h>
18 #include <linux/initrd.h>
19 #include <linux/of_fdt.h>
20 #include <linux/highmem.h>
21 #include <linux/gfp.h>
22 #include <linux/memblock.h>
23 #include <linux/dma-contiguous.h>
24 #include <linux/sizes.h>
25 #include <linux/stop_machine.h>
28 #include <asm/mach-types.h>
29 #include <asm/memblock.h>
30 #include <asm/memory.h>
32 #include <asm/sections.h>
33 #include <asm/setup.h>
34 #include <asm/system_info.h>
36 #include <asm/fixmap.h>
38 #include <asm/mach/arch.h>
39 #include <asm/mach/map.h>
43 #ifdef CONFIG_CPU_CP15_MMU
44 unsigned long __init __clear_cr(unsigned long mask)
46 cr_alignment = cr_alignment & ~mask;
51 static phys_addr_t phys_initrd_start __initdata = 0;
52 static unsigned long phys_initrd_size __initdata = 0;
54 static int __init early_initrd(char *p)
60 start = memparse(p, &endp);
62 size = memparse(endp + 1, NULL);
64 phys_initrd_start = start;
65 phys_initrd_size = size;
69 early_param("initrd", early_initrd);
71 static int __init parse_tag_initrd(const struct tag *tag)
73 pr_warn("ATAG_INITRD is deprecated; "
74 "please update your bootloader.\n");
75 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
76 phys_initrd_size = tag->u.initrd.size;
80 __tagtable(ATAG_INITRD, parse_tag_initrd);
82 static int __init parse_tag_initrd2(const struct tag *tag)
84 phys_initrd_start = tag->u.initrd.start;
85 phys_initrd_size = tag->u.initrd.size;
89 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
91 static void __init find_limits(unsigned long *min, unsigned long *max_low,
92 unsigned long *max_high)
94 *max_low = PFN_DOWN(memblock_get_current_limit());
95 *min = PFN_UP(memblock_start_of_DRAM());
96 *max_high = PFN_DOWN(memblock_end_of_DRAM());
99 #ifdef CONFIG_ZONE_DMA
101 phys_addr_t arm_dma_zone_size __read_mostly;
102 EXPORT_SYMBOL(arm_dma_zone_size);
105 * The DMA mask corresponding to the maximum bus address allocatable
106 * using GFP_DMA. The default here places no restriction on DMA
107 * allocations. This must be the smallest DMA mask in the system,
108 * so a successful GFP_DMA allocation will always satisfy this.
110 phys_addr_t arm_dma_limit;
111 unsigned long arm_dma_pfn_limit;
113 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
114 unsigned long dma_size)
116 if (size[0] <= dma_size)
119 size[ZONE_NORMAL] = size[0] - dma_size;
120 size[ZONE_DMA] = dma_size;
121 hole[ZONE_NORMAL] = hole[0];
126 void __init setup_dma_zone(const struct machine_desc *mdesc)
128 #ifdef CONFIG_ZONE_DMA
129 if (mdesc->dma_zone_size) {
130 arm_dma_zone_size = mdesc->dma_zone_size;
131 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
133 arm_dma_limit = 0xffffffff;
134 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
138 static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
139 unsigned long max_high)
141 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
142 struct memblock_region *reg;
145 * initialise the zones.
147 memset(zone_size, 0, sizeof(zone_size));
150 * The memory size has already been determined. If we need
151 * to do anything fancy with the allocation of this memory
152 * to the zones, now is the time to do it.
154 zone_size[0] = max_low - min;
155 #ifdef CONFIG_HIGHMEM
156 zone_size[ZONE_HIGHMEM] = max_high - max_low;
160 * Calculate the size of the holes.
161 * holes = node_size - sum(bank_sizes)
163 memcpy(zhole_size, zone_size, sizeof(zhole_size));
164 for_each_memblock(memory, reg) {
165 unsigned long start = memblock_region_memory_base_pfn(reg);
166 unsigned long end = memblock_region_memory_end_pfn(reg);
168 if (start < max_low) {
169 unsigned long low_end = min(end, max_low);
170 zhole_size[0] -= low_end - start;
172 #ifdef CONFIG_HIGHMEM
174 unsigned long high_start = max(start, max_low);
175 zhole_size[ZONE_HIGHMEM] -= end - high_start;
180 #ifdef CONFIG_ZONE_DMA
182 * Adjust the sizes according to any special requirements for
185 if (arm_dma_zone_size)
186 arm_adjust_dma_zone(zone_size, zhole_size,
187 arm_dma_zone_size >> PAGE_SHIFT);
190 free_area_init_node(0, zone_size, min, zhole_size);
193 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
194 int pfn_valid(unsigned long pfn)
196 return memblock_is_map_memory(__pfn_to_phys(pfn));
198 EXPORT_SYMBOL(pfn_valid);
201 #ifndef CONFIG_SPARSEMEM
202 static void __init arm_memory_present(void)
206 static void __init arm_memory_present(void)
208 struct memblock_region *reg;
210 for_each_memblock(memory, reg)
211 memory_present(0, memblock_region_memory_base_pfn(reg),
212 memblock_region_memory_end_pfn(reg));
216 static bool arm_memblock_steal_permitted = true;
218 phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
222 BUG_ON(!arm_memblock_steal_permitted);
224 phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
225 memblock_free(phys, size);
226 memblock_remove(phys, size);
231 static void __init arm_initrd_init(void)
233 #ifdef CONFIG_BLK_DEV_INITRD
237 /* FDT scan will populate initrd_start */
238 if (initrd_start && !phys_initrd_size) {
239 phys_initrd_start = __virt_to_phys(initrd_start);
240 phys_initrd_size = initrd_end - initrd_start;
243 initrd_start = initrd_end = 0;
245 if (!phys_initrd_size)
249 * Round the memory region to page boundaries as per free_initrd_mem()
250 * This allows us to detect whether the pages overlapping the initrd
251 * are in use, but more importantly, reserves the entire set of pages
252 * as we don't want these pages allocated for other purposes.
254 start = round_down(phys_initrd_start, PAGE_SIZE);
255 size = phys_initrd_size + (phys_initrd_start - start);
256 size = round_up(size, PAGE_SIZE);
258 if (!memblock_is_region_memory(start, size)) {
259 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
264 if (memblock_is_region_reserved(start, size)) {
265 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
270 memblock_reserve(start, size);
272 /* Now convert initrd to virtual addresses */
273 initrd_start = __phys_to_virt(phys_initrd_start);
274 initrd_end = initrd_start + phys_initrd_size;
278 void __init arm_memblock_init(const struct machine_desc *mdesc)
280 /* Register the kernel text, kernel data and initrd with memblock. */
281 memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
285 arm_mm_memblock_reserve();
287 /* reserve any platform specific memblock areas */
291 early_init_fdt_reserve_self();
292 early_init_fdt_scan_reserved_mem();
294 /* reserve memory for DMA contiguous allocations */
295 dma_contiguous_reserve(arm_dma_limit);
297 arm_memblock_steal_permitted = false;
301 void __init bootmem_init(void)
303 unsigned long min, max_low, max_high;
305 memblock_allow_resize();
306 max_low = max_high = 0;
308 find_limits(&min, &max_low, &max_high);
310 early_memtest((phys_addr_t)min << PAGE_SHIFT,
311 (phys_addr_t)max_low << PAGE_SHIFT);
314 * Sparsemem tries to allocate bootmem in memory_present(),
315 * so must be done after the fixed reservations
317 arm_memory_present();
320 * sparse_init() needs the bootmem allocator up and running.
325 * Now free the memory - free_area_init_node needs
326 * the sparse mem_map arrays initialized by sparse_init()
327 * for memmap_init_zone(), otherwise all PFNs are invalid.
329 zone_sizes_init(min, max_low, max_high);
332 * This doesn't seem to be used by the Linux memory manager any
333 * more, but is used by ll_rw_block. If we can get rid of it, we
334 * also get rid of some of the stuff above as well.
337 max_low_pfn = max_low;
342 * Poison init memory with an undefined instruction (ARM) or a branch to an
343 * undefined instruction (Thumb).
345 static inline void poison_init_mem(void *s, size_t count)
348 for (; count != 0; count -= 4)
353 free_memmap(unsigned long start_pfn, unsigned long end_pfn)
355 struct page *start_pg, *end_pg;
356 phys_addr_t pg, pgend;
359 * Convert start_pfn/end_pfn to a struct page pointer.
361 start_pg = pfn_to_page(start_pfn - 1) + 1;
362 end_pg = pfn_to_page(end_pfn - 1) + 1;
365 * Convert to physical addresses, and
366 * round start upwards and end downwards.
368 pg = PAGE_ALIGN(__pa(start_pg));
369 pgend = __pa(end_pg) & PAGE_MASK;
372 * If there are free pages between these,
373 * free the section of the memmap array.
376 memblock_free_early(pg, pgend - pg);
380 * The mem_map array can get very big. Free the unused area of the memory map.
382 static void __init free_unused_memmap(void)
384 unsigned long start, prev_end = 0;
385 struct memblock_region *reg;
388 * This relies on each bank being in address order.
389 * The banks are sorted previously in bootmem_init().
391 for_each_memblock(memory, reg) {
392 start = memblock_region_memory_base_pfn(reg);
394 #ifdef CONFIG_SPARSEMEM
396 * Take care not to free memmap entries that don't exist
397 * due to SPARSEMEM sections which aren't present.
400 ALIGN(prev_end, PAGES_PER_SECTION));
403 * Align down here since the VM subsystem insists that the
404 * memmap entries are valid from the bank start aligned to
405 * MAX_ORDER_NR_PAGES.
407 start = round_down(start, MAX_ORDER_NR_PAGES);
410 * If we had a previous bank, and there is a space
411 * between the current bank and the previous, free it.
413 if (prev_end && prev_end < start)
414 free_memmap(prev_end, start);
417 * Align up here since the VM subsystem insists that the
418 * memmap entries are valid from the bank end aligned to
419 * MAX_ORDER_NR_PAGES.
421 prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
425 #ifdef CONFIG_SPARSEMEM
426 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
427 free_memmap(prev_end,
428 ALIGN(prev_end, PAGES_PER_SECTION));
432 #ifdef CONFIG_HIGHMEM
433 static inline void free_area_high(unsigned long pfn, unsigned long end)
435 for (; pfn < end; pfn++)
436 free_highmem_page(pfn_to_page(pfn));
440 static void __init free_highpages(void)
442 #ifdef CONFIG_HIGHMEM
443 unsigned long max_low = max_low_pfn;
444 struct memblock_region *mem, *res;
446 /* set highmem page free */
447 for_each_memblock(memory, mem) {
448 unsigned long start = memblock_region_memory_base_pfn(mem);
449 unsigned long end = memblock_region_memory_end_pfn(mem);
451 /* Ignore complete lowmem entries */
455 if (memblock_is_nomap(mem))
458 /* Truncate partial highmem entries */
462 /* Find and exclude any reserved regions */
463 for_each_memblock(reserved, res) {
464 unsigned long res_start, res_end;
466 res_start = memblock_region_reserved_base_pfn(res);
467 res_end = memblock_region_reserved_end_pfn(res);
471 if (res_start < start)
477 if (res_start != start)
478 free_area_high(start, res_start);
484 /* And now free anything which remains */
486 free_area_high(start, end);
492 * mem_init() marks the free areas in the mem_map and tells us how much
493 * memory is free. This is done after various parts of the system have
494 * claimed their memory after the kernel image.
496 void __init mem_init(void)
498 #ifdef CONFIG_HAVE_TCM
499 /* These pointers are filled in on TCM detection */
504 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
506 /* this will put all unused low memory onto the freelists */
507 free_unused_memmap();
511 /* now that our DMA memory is actually so designated, we can free it */
512 free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
517 mem_init_print_info(NULL);
519 #define MLK(b, t) b, t, ((t) - (b)) >> 10
520 #define MLM(b, t) b, t, ((t) - (b)) >> 20
521 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
523 pr_notice("Virtual kernel memory layout:\n"
524 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
525 #ifdef CONFIG_HAVE_TCM
526 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
527 " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
529 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
530 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
531 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
532 #ifdef CONFIG_HIGHMEM
533 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
535 #ifdef CONFIG_MODULES
536 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
538 " .text : 0x%p" " - 0x%p" " (%4td kB)\n"
539 " .init : 0x%p" " - 0x%p" " (%4td kB)\n"
540 " .data : 0x%p" " - 0x%p" " (%4td kB)\n"
541 " .bss : 0x%p" " - 0x%p" " (%4td kB)\n",
543 MLK(VECTORS_BASE, VECTORS_BASE + PAGE_SIZE),
544 #ifdef CONFIG_HAVE_TCM
545 MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
546 MLK(ITCM_OFFSET, (unsigned long) itcm_end),
548 MLK(FIXADDR_START, FIXADDR_END),
549 MLM(VMALLOC_START, VMALLOC_END),
550 MLM(PAGE_OFFSET, (unsigned long)high_memory),
551 #ifdef CONFIG_HIGHMEM
552 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
555 #ifdef CONFIG_MODULES
556 MLM(MODULES_VADDR, MODULES_END),
559 MLK_ROUNDUP(_text, _etext),
560 MLK_ROUNDUP(__init_begin, __init_end),
561 MLK_ROUNDUP(_sdata, _edata),
562 MLK_ROUNDUP(__bss_start, __bss_stop));
569 * Check boundaries twice: Some fundamental inconsistencies can
570 * be detected at build time already.
573 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
574 BUG_ON(TASK_SIZE > MODULES_VADDR);
577 #ifdef CONFIG_HIGHMEM
578 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
579 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
582 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
583 extern int sysctl_overcommit_memory;
585 * On a machine this small we won't get
586 * anywhere without overcommit, so turn
589 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
593 #ifdef CONFIG_STRICT_KERNEL_RWX
594 struct section_perm {
603 /* First section-aligned location at or after __start_rodata. */
604 extern char __start_rodata_section_aligned[];
606 static struct section_perm nx_perms[] = {
607 /* Make pages tables, etc before _stext RW (set NX). */
609 .name = "pre-text NX",
610 .start = PAGE_OFFSET,
611 .end = (unsigned long)_stext,
612 .mask = ~PMD_SECT_XN,
615 /* Make init RW (set NX). */
618 .start = (unsigned long)__init_begin,
619 .end = (unsigned long)_sdata,
620 .mask = ~PMD_SECT_XN,
623 /* Make rodata NX (set RO in ro_perms below). */
626 .start = (unsigned long)__start_rodata_section_aligned,
627 .end = (unsigned long)__init_begin,
628 .mask = ~PMD_SECT_XN,
633 static struct section_perm ro_perms[] = {
634 /* Make kernel code and rodata RX (set RO). */
636 .name = "text/rodata RO",
637 .start = (unsigned long)_stext,
638 .end = (unsigned long)__init_begin,
639 #ifdef CONFIG_ARM_LPAE
640 .mask = ~L_PMD_SECT_RDONLY,
641 .prot = L_PMD_SECT_RDONLY,
643 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
644 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
645 .clear = PMD_SECT_AP_WRITE,
651 * Updates section permissions only for the current mm (sections are
652 * copied into each mm). During startup, this is the init_mm. Is only
653 * safe to be called with preemption disabled, as under stop_machine().
655 static inline void section_update(unsigned long addr, pmdval_t mask,
656 pmdval_t prot, struct mm_struct *mm)
660 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
662 #ifdef CONFIG_ARM_LPAE
663 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
665 if (addr & SECTION_SIZE)
666 pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
668 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
670 flush_pmd_entry(pmd);
671 local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
674 /* Make sure extended page tables are in use. */
675 static inline bool arch_has_strict_perms(void)
677 if (cpu_architecture() < CPU_ARCH_ARMv6)
680 return !!(get_cr() & CR_XP);
683 void set_section_perms(struct section_perm *perms, int n, bool set,
684 struct mm_struct *mm)
689 if (!arch_has_strict_perms())
692 for (i = 0; i < n; i++) {
693 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
694 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
695 pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
696 perms[i].name, perms[i].start, perms[i].end,
701 for (addr = perms[i].start;
703 addr += SECTION_SIZE)
704 section_update(addr, perms[i].mask,
705 set ? perms[i].prot : perms[i].clear, mm);
710 static void update_sections_early(struct section_perm perms[], int n)
712 struct task_struct *t, *s;
714 read_lock(&tasklist_lock);
715 for_each_process(t) {
716 if (t->flags & PF_KTHREAD)
718 for_each_thread(t, s)
719 set_section_perms(perms, n, true, s->mm);
721 read_unlock(&tasklist_lock);
722 set_section_perms(perms, n, true, current->active_mm);
723 set_section_perms(perms, n, true, &init_mm);
726 int __fix_kernmem_perms(void *unused)
728 update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
732 void fix_kernmem_perms(void)
734 stop_machine(__fix_kernmem_perms, NULL, NULL);
737 int __mark_rodata_ro(void *unused)
739 update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
743 void mark_rodata_ro(void)
745 stop_machine(__mark_rodata_ro, NULL, NULL);
748 void set_kernel_text_rw(void)
750 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
754 void set_kernel_text_ro(void)
756 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
761 static inline void fix_kernmem_perms(void) { }
762 #endif /* CONFIG_STRICT_KERNEL_RWX */
764 void free_tcmmem(void)
766 #ifdef CONFIG_HAVE_TCM
767 extern char __tcm_start, __tcm_end;
769 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
770 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
774 void free_initmem(void)
779 poison_init_mem(__init_begin, __init_end - __init_begin);
780 if (!machine_is_integrator() && !machine_is_cintegrator())
781 free_initmem_default(-1);
784 #ifdef CONFIG_BLK_DEV_INITRD
786 static int keep_initrd;
788 void free_initrd_mem(unsigned long start, unsigned long end)
791 if (start == initrd_start)
792 start = round_down(start, PAGE_SIZE);
793 if (end == initrd_end)
794 end = round_up(end, PAGE_SIZE);
796 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
797 free_reserved_area((void *)start, (void *)end, -1, "initrd");
801 static int __init keepinitrd_setup(char *__unused)
807 __setup("keepinitrd", keepinitrd_setup);