2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1995 Linus Torvalds
7 * Copyright (C) 1995 Waldorf Electronics
8 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
9 * Copyright (C) 1996 Stoned Elipot
10 * Copyright (C) 1999 Silicon Graphics, Inc.
11 * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/export.h>
16 #include <linux/screen_info.h>
17 #include <linux/memblock.h>
18 #include <linux/bootmem.h>
19 #include <linux/initrd.h>
20 #include <linux/root_dev.h>
21 #include <linux/highmem.h>
22 #include <linux/console.h>
23 #include <linux/pfn.h>
24 #include <linux/debugfs.h>
25 #include <linux/kexec.h>
26 #include <linux/sizes.h>
27 #include <linux/device.h>
28 #include <linux/dma-contiguous.h>
30 #include <asm/addrspace.h>
31 #include <asm/bootinfo.h>
33 #include <asm/cache.h>
36 #include <asm/debug.h>
37 #include <asm/sections.h>
38 #include <asm/setup.h>
39 #include <asm/smp-ops.h>
42 struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
44 EXPORT_SYMBOL(cpu_data);
47 struct screen_info screen_info;
51 * Despite it's name this variable is even if we don't have PCI
53 unsigned int PCI_DMA_BUS_IS_PHYS;
55 EXPORT_SYMBOL(PCI_DMA_BUS_IS_PHYS);
60 * These are initialized so they are in the .data section
62 unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
64 EXPORT_SYMBOL(mips_machtype);
66 struct boot_mem_map boot_mem_map;
68 static char __initdata command_line[COMMAND_LINE_SIZE];
69 char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
71 #ifdef CONFIG_CMDLINE_BOOL
72 static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
76 * mips_io_port_base is the begin of the address space to which x86 style
77 * I/O ports are mapped.
79 const unsigned long mips_io_port_base = -1;
80 EXPORT_SYMBOL(mips_io_port_base);
82 static struct resource code_resource = { .name = "Kernel code", };
83 static struct resource data_resource = { .name = "Kernel data", };
85 static void *detect_magic __initdata = detect_memory_region;
87 void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
89 int x = boot_mem_map.nr_map;
93 if (start + size < start) {
94 pr_warn("Trying to add an invalid memory region, skipped\n");
99 * Try to merge with existing entry, if any.
101 for (i = 0; i < boot_mem_map.nr_map; i++) {
102 struct boot_mem_map_entry *entry = boot_mem_map.map + i;
105 if (entry->type != type)
108 if (start + size < entry->addr)
109 continue; /* no overlap */
111 if (entry->addr + entry->size < start)
112 continue; /* no overlap */
114 top = max(entry->addr + entry->size, start + size);
115 entry->addr = min(entry->addr, start);
116 entry->size = top - entry->addr;
121 if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) {
122 pr_err("Ooops! Too many entries in the memory map!\n");
126 boot_mem_map.map[x].addr = start;
127 boot_mem_map.map[x].size = size;
128 boot_mem_map.map[x].type = type;
129 boot_mem_map.nr_map++;
132 void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
134 void *dm = &detect_magic;
137 for (size = sz_min; size < sz_max; size <<= 1) {
138 if (!memcmp(dm, dm + size, sizeof(detect_magic)))
142 pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
143 ((unsigned long long) size) / SZ_1M,
144 (unsigned long long) start,
145 ((unsigned long long) sz_min) / SZ_1M,
146 ((unsigned long long) sz_max) / SZ_1M);
148 add_memory_region(start, size, BOOT_MEM_RAM);
151 static void __init print_memory_map(void)
154 const int field = 2 * sizeof(unsigned long);
156 for (i = 0; i < boot_mem_map.nr_map; i++) {
157 printk(KERN_INFO " memory: %0*Lx @ %0*Lx ",
158 field, (unsigned long long) boot_mem_map.map[i].size,
159 field, (unsigned long long) boot_mem_map.map[i].addr);
161 switch (boot_mem_map.map[i].type) {
163 printk(KERN_CONT "(usable)\n");
165 case BOOT_MEM_INIT_RAM:
166 printk(KERN_CONT "(usable after init)\n");
168 case BOOT_MEM_ROM_DATA:
169 printk(KERN_CONT "(ROM data)\n");
171 case BOOT_MEM_RESERVED:
172 printk(KERN_CONT "(reserved)\n");
175 printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type);
184 #ifdef CONFIG_BLK_DEV_INITRD
186 static int __init rd_start_early(char *p)
188 unsigned long start = memparse(p, &p);
191 /* Guess if the sign extension was forgotten by bootloader */
195 initrd_start = start;
199 early_param("rd_start", rd_start_early);
201 static int __init rd_size_early(char *p)
203 initrd_end += memparse(p, &p);
206 early_param("rd_size", rd_size_early);
208 /* it returns the next free pfn after initrd */
209 static unsigned long __init init_initrd(void)
214 * Board specific code or command line parser should have
215 * already set up initrd_start and initrd_end. In these cases
216 * perfom sanity checks and use them if all looks good.
218 if (!initrd_start || initrd_end <= initrd_start)
221 if (initrd_start & ~PAGE_MASK) {
222 pr_err("initrd start must be page aligned\n");
225 if (initrd_start < PAGE_OFFSET) {
226 pr_err("initrd start < PAGE_OFFSET\n");
231 * Sanitize initrd addresses. For example firmware
232 * can't guess if they need to pass them through
233 * 64-bits values if the kernel has been built in pure
234 * 32-bit. We need also to switch from KSEG0 to XKPHYS
235 * addresses now, so the code can now safely use __pa().
237 end = __pa(initrd_end);
238 initrd_end = (unsigned long)__va(end);
239 initrd_start = (unsigned long)__va(__pa(initrd_start));
241 ROOT_DEV = Root_RAM0;
249 static void __init finalize_initrd(void)
251 unsigned long size = initrd_end - initrd_start;
254 printk(KERN_INFO "Initrd not found or empty");
257 if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
258 printk(KERN_ERR "Initrd extends beyond end of memory");
262 reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT);
263 initrd_below_start_ok = 1;
265 pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
269 printk(KERN_CONT " - disabling initrd\n");
274 #else /* !CONFIG_BLK_DEV_INITRD */
276 static unsigned long __init init_initrd(void)
281 #define finalize_initrd() do {} while (0)
286 * Initialize the bootmem allocator. It also setup initrd related data
289 #if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_NUMA))
291 static void __init bootmem_init(void)
297 #else /* !CONFIG_SGI_IP27 */
299 static void __init bootmem_init(void)
301 unsigned long reserved_end;
302 unsigned long mapstart = ~0UL;
303 unsigned long bootmap_size;
307 * Sanity check any INITRD first. We don't take it into account
308 * for bootmem setup initially, rely on the end-of-kernel-code
309 * as our memory range starting point. Once bootmem is inited we
310 * will reserve the area used for the initrd.
313 reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
316 * max_low_pfn is not a number of pages. The number of pages
317 * of the system is given by 'max_low_pfn - min_low_pfn'.
323 * Find the highest page frame number we have available.
325 for (i = 0; i < boot_mem_map.nr_map; i++) {
326 unsigned long start, end;
328 if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
331 start = PFN_UP(boot_mem_map.map[i].addr);
332 end = PFN_DOWN(boot_mem_map.map[i].addr
333 + boot_mem_map.map[i].size);
335 if (end > max_low_pfn)
337 if (start < min_low_pfn)
339 if (end <= reserved_end)
341 #ifdef CONFIG_BLK_DEV_INITRD
342 /* Skip zones before initrd and initrd itself */
343 if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
346 if (start >= mapstart)
348 mapstart = max(reserved_end, start);
351 if (min_low_pfn >= max_low_pfn)
352 panic("Incorrect memory mapping !!!");
353 if (min_low_pfn > ARCH_PFN_OFFSET) {
354 pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
355 (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
356 min_low_pfn - ARCH_PFN_OFFSET);
357 } else if (min_low_pfn < ARCH_PFN_OFFSET) {
358 pr_info("%lu free pages won't be used\n",
359 ARCH_PFN_OFFSET - min_low_pfn);
361 min_low_pfn = ARCH_PFN_OFFSET;
364 * Determine low and high memory ranges
366 max_pfn = max_low_pfn;
367 if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) {
368 #ifdef CONFIG_HIGHMEM
369 highstart_pfn = PFN_DOWN(HIGHMEM_START);
370 highend_pfn = max_low_pfn;
372 max_low_pfn = PFN_DOWN(HIGHMEM_START);
375 #ifdef CONFIG_BLK_DEV_INITRD
377 * mapstart should be after initrd_end
380 mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
384 * Initialize the boot-time allocator with low memory only.
386 bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart,
387 min_low_pfn, max_low_pfn);
390 for (i = 0; i < boot_mem_map.nr_map; i++) {
391 unsigned long start, end;
393 start = PFN_UP(boot_mem_map.map[i].addr);
394 end = PFN_DOWN(boot_mem_map.map[i].addr
395 + boot_mem_map.map[i].size);
397 if (start <= min_low_pfn)
402 #ifndef CONFIG_HIGHMEM
403 if (end > max_low_pfn)
407 * ... finally, is the area going away?
413 memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
417 * Register fully available low RAM pages with the bootmem allocator.
419 for (i = 0; i < boot_mem_map.nr_map; i++) {
420 unsigned long start, end, size;
422 start = PFN_UP(boot_mem_map.map[i].addr);
423 end = PFN_DOWN(boot_mem_map.map[i].addr
424 + boot_mem_map.map[i].size);
427 * Reserve usable memory.
429 switch (boot_mem_map.map[i].type) {
432 case BOOT_MEM_INIT_RAM:
433 memory_present(0, start, end);
436 /* Not usable memory */
441 * We are rounding up the start address of usable memory
442 * and at the end of the usable range downwards.
444 if (start >= max_low_pfn)
446 if (start < reserved_end)
447 start = reserved_end;
448 if (end > max_low_pfn)
452 * ... finally, is the area going away?
458 /* Register lowmem ranges */
459 free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT);
460 memory_present(0, start, end);
464 * Reserve the bootmap memory.
466 reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT);
469 * Reserve initrd memory if needed.
474 #endif /* CONFIG_SGI_IP27 */
477 * arch_mem_init - initialize memory management subsystem
479 * o plat_mem_setup() detects the memory configuration and will record detected
480 * memory areas using add_memory_region.
482 * At this stage the memory configuration of the system is known to the
483 * kernel but generic memory management system is still entirely uninitialized.
488 * o dma_contiguous_reserve()
490 * At this stage the bootmem allocator is ready to use.
492 * NOTE: historically plat_mem_setup did the entire platform initialization.
493 * This was rather impractical because it meant plat_mem_setup had to
494 * get away without any kind of memory allocator. To keep old code from
495 * breaking plat_setup was just renamed to plat_mem_setup and a second platform
496 * initialization hook for anything else was introduced.
499 static int usermem __initdata;
501 static int __init early_parse_mem(char *p)
503 phys_addr_t start, size;
506 * If a user specifies memory size, we
507 * blow away any automatically generated
511 boot_mem_map.nr_map = 0;
515 size = memparse(p, &p);
517 start = memparse(p + 1, &p);
519 add_memory_region(start, size, BOOT_MEM_RAM);
522 early_param("mem", early_parse_mem);
524 #ifdef CONFIG_PROC_VMCORE
525 unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
526 static int __init early_parse_elfcorehdr(char *p)
530 setup_elfcorehdr = memparse(p, &p);
532 for (i = 0; i < boot_mem_map.nr_map; i++) {
533 unsigned long start = boot_mem_map.map[i].addr;
534 unsigned long end = (boot_mem_map.map[i].addr +
535 boot_mem_map.map[i].size);
536 if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
538 * Reserve from the elf core header to the end of
539 * the memory segment, that should all be kdump
542 setup_elfcorehdr_size = end - setup_elfcorehdr;
547 * If we don't find it in the memory map, then we shouldn't
548 * have to worry about it, as the new kernel won't use it.
552 early_param("elfcorehdr", early_parse_elfcorehdr);
555 static void __init arch_mem_addpart(phys_addr_t mem, phys_addr_t end, int type)
564 /* Make sure it is in the boot_mem_map */
565 for (i = 0; i < boot_mem_map.nr_map; i++) {
566 if (mem >= boot_mem_map.map[i].addr &&
567 mem < (boot_mem_map.map[i].addr +
568 boot_mem_map.map[i].size))
571 add_memory_region(mem, size, type);
575 static inline unsigned long long get_total_mem(void)
577 unsigned long long total;
579 total = max_pfn - min_low_pfn;
580 return total << PAGE_SHIFT;
583 static void __init mips_parse_crashkernel(void)
585 unsigned long long total_mem;
586 unsigned long long crash_size, crash_base;
589 total_mem = get_total_mem();
590 ret = parse_crashkernel(boot_command_line, total_mem,
591 &crash_size, &crash_base);
592 if (ret != 0 || crash_size <= 0)
595 crashk_res.start = crash_base;
596 crashk_res.end = crash_base + crash_size - 1;
599 static void __init request_crashkernel(struct resource *res)
603 ret = request_resource(res, &crashk_res);
605 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
606 (unsigned long)((crashk_res.end -
607 crashk_res.start + 1) >> 20),
608 (unsigned long)(crashk_res.start >> 20));
610 #else /* !defined(CONFIG_KEXEC) */
611 static void __init mips_parse_crashkernel(void)
615 static void __init request_crashkernel(struct resource *res)
618 #endif /* !defined(CONFIG_KEXEC) */
620 static void __init arch_mem_init(char **cmdline_p)
622 struct memblock_region *reg;
623 extern void plat_mem_setup(void);
625 /* call board setup routine */
629 * Make sure all kernel memory is in the maps. The "UP" and
630 * "DOWN" are opposite for initdata since if it crosses over
631 * into another memory section you don't want that to be
632 * freed when the initdata is freed.
634 arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
635 PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
637 arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
638 PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
641 pr_info("Determined physical RAM map:\n");
644 #ifdef CONFIG_CMDLINE_BOOL
645 #ifdef CONFIG_CMDLINE_OVERRIDE
646 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
648 if (builtin_cmdline[0]) {
649 strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
650 strlcat(arcs_cmdline, builtin_cmdline, COMMAND_LINE_SIZE);
652 strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
655 strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
657 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
659 *cmdline_p = command_line;
664 pr_info("User-defined physical RAM map:\n");
669 #ifdef CONFIG_PROC_VMCORE
670 if (setup_elfcorehdr && setup_elfcorehdr_size) {
671 printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
672 setup_elfcorehdr, setup_elfcorehdr_size);
673 reserve_bootmem(setup_elfcorehdr, setup_elfcorehdr_size,
678 mips_parse_crashkernel();
680 if (crashk_res.start != crashk_res.end)
681 reserve_bootmem(crashk_res.start,
682 crashk_res.end - crashk_res.start + 1,
687 plat_swiotlb_setup();
690 dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
691 /* Tell bootmem about cma reserved memblock section */
692 for_each_memblock(reserved, reg)
694 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
697 static void __init resource_init(void)
701 if (UNCAC_BASE != IO_BASE)
704 code_resource.start = __pa_symbol(&_text);
705 code_resource.end = __pa_symbol(&_etext) - 1;
706 data_resource.start = __pa_symbol(&_etext);
707 data_resource.end = __pa_symbol(&_edata) - 1;
709 for (i = 0; i < boot_mem_map.nr_map; i++) {
710 struct resource *res;
711 unsigned long start, end;
713 start = boot_mem_map.map[i].addr;
714 end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1;
715 if (start >= HIGHMEM_START)
717 if (end >= HIGHMEM_START)
718 end = HIGHMEM_START - 1;
720 res = alloc_bootmem(sizeof(struct resource));
721 switch (boot_mem_map.map[i].type) {
723 case BOOT_MEM_INIT_RAM:
724 case BOOT_MEM_ROM_DATA:
725 res->name = "System RAM";
727 case BOOT_MEM_RESERVED:
729 res->name = "reserved";
735 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
736 request_resource(&iomem_resource, res);
739 * We don't know which RAM region contains kernel data,
740 * so we try it repeatedly and let the resource manager
743 request_resource(res, &code_resource);
744 request_resource(res, &data_resource);
745 request_crashkernel(res);
750 static void __init prefill_possible_map(void)
752 int i, possible = num_possible_cpus();
754 if (possible > nr_cpu_ids)
755 possible = nr_cpu_ids;
757 for (i = 0; i < possible; i++)
758 set_cpu_possible(i, true);
759 for (; i < NR_CPUS; i++)
760 set_cpu_possible(i, false);
762 nr_cpu_ids = possible;
765 static inline void prefill_possible_map(void) {}
768 void __init setup_arch(char **cmdline_p)
773 setup_early_fdc_console();
774 #ifdef CONFIG_EARLY_PRINTK
775 setup_early_printk();
780 #if defined(CONFIG_VT)
781 #if defined(CONFIG_VGA_CONSOLE)
782 conswitchp = &vga_con;
783 #elif defined(CONFIG_DUMMY_CONSOLE)
784 conswitchp = &dummy_con;
788 arch_mem_init(cmdline_p);
792 prefill_possible_map();
797 unsigned long kernelsp[NR_CPUS];
798 unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
800 #ifdef CONFIG_DEBUG_FS
801 struct dentry *mips_debugfs_dir;
802 static int __init debugfs_mips(void)
806 d = debugfs_create_dir("mips", NULL);
809 mips_debugfs_dir = d;
812 arch_initcall(debugfs_mips);