]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/arm/kernel/setup.c
Merge tag 'for-linus' of git://github.com/rustyrussell/linux
[karo-tx-linux.git] / arch / arm / kernel / setup.c
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/kexec.h>
23 #include <linux/of_fdt.h>
24 #include <linux/root_dev.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/fs.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
31 #include <linux/bug.h>
32 #include <linux/compiler.h>
33 #include <linux/sort.h>
34
35 #include <asm/unified.h>
36 #include <asm/cpu.h>
37 #include <asm/cputype.h>
38 #include <asm/elf.h>
39 #include <asm/procinfo.h>
40 #include <asm/sections.h>
41 #include <asm/setup.h>
42 #include <asm/smp_plat.h>
43 #include <asm/mach-types.h>
44 #include <asm/cacheflush.h>
45 #include <asm/cachetype.h>
46 #include <asm/tlbflush.h>
47 #include <asm/system.h>
48
49 #include <asm/prom.h>
50 #include <asm/mach/arch.h>
51 #include <asm/mach/irq.h>
52 #include <asm/mach/time.h>
53 #include <asm/traps.h>
54 #include <asm/unwind.h>
55 #include <asm/memblock.h>
56
57 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
58 #include "compat.h"
59 #endif
60 #include "atags.h"
61 #include "tcm.h"
62
63 #ifndef MEM_SIZE
64 #define MEM_SIZE        (16*1024*1024)
65 #endif
66
67 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
68 char fpe_type[8];
69
70 static int __init fpe_setup(char *line)
71 {
72         memcpy(fpe_type, line, 8);
73         return 1;
74 }
75
76 __setup("fpe=", fpe_setup);
77 #endif
78
79 extern void paging_init(struct machine_desc *desc);
80 extern void sanity_check_meminfo(void);
81 extern void reboot_setup(char *str);
82
83 unsigned int processor_id;
84 EXPORT_SYMBOL(processor_id);
85 unsigned int __machine_arch_type __read_mostly;
86 EXPORT_SYMBOL(__machine_arch_type);
87 unsigned int cacheid __read_mostly;
88 EXPORT_SYMBOL(cacheid);
89
90 unsigned int __atags_pointer __initdata;
91
92 unsigned int system_rev;
93 EXPORT_SYMBOL(system_rev);
94
95 unsigned int system_serial_low;
96 EXPORT_SYMBOL(system_serial_low);
97
98 unsigned int system_serial_high;
99 EXPORT_SYMBOL(system_serial_high);
100
101 unsigned int elf_hwcap __read_mostly;
102 EXPORT_SYMBOL(elf_hwcap);
103
104
105 #ifdef MULTI_CPU
106 struct processor processor __read_mostly;
107 #endif
108 #ifdef MULTI_TLB
109 struct cpu_tlb_fns cpu_tlb __read_mostly;
110 #endif
111 #ifdef MULTI_USER
112 struct cpu_user_fns cpu_user __read_mostly;
113 #endif
114 #ifdef MULTI_CACHE
115 struct cpu_cache_fns cpu_cache __read_mostly;
116 #endif
117 #ifdef CONFIG_OUTER_CACHE
118 struct outer_cache_fns outer_cache __read_mostly;
119 EXPORT_SYMBOL(outer_cache);
120 #endif
121
122 /*
123  * Cached cpu_architecture() result for use by assembler code.
124  * C code should use the cpu_architecture() function instead of accessing this
125  * variable directly.
126  */
127 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
128
129 struct stack {
130         u32 irq[3];
131         u32 abt[3];
132         u32 und[3];
133 } ____cacheline_aligned;
134
135 static struct stack stacks[NR_CPUS];
136
137 char elf_platform[ELF_PLATFORM_SIZE];
138 EXPORT_SYMBOL(elf_platform);
139
140 static const char *cpu_name;
141 static const char *machine_name;
142 static char __initdata cmd_line[COMMAND_LINE_SIZE];
143 struct machine_desc *machine_desc __initdata;
144
145 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
146 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
147 #define ENDIANNESS ((char)endian_test.l)
148
149 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
150
151 /*
152  * Standard memory resources
153  */
154 static struct resource mem_res[] = {
155         {
156                 .name = "Video RAM",
157                 .start = 0,
158                 .end = 0,
159                 .flags = IORESOURCE_MEM
160         },
161         {
162                 .name = "Kernel code",
163                 .start = 0,
164                 .end = 0,
165                 .flags = IORESOURCE_MEM
166         },
167         {
168                 .name = "Kernel data",
169                 .start = 0,
170                 .end = 0,
171                 .flags = IORESOURCE_MEM
172         }
173 };
174
175 #define video_ram   mem_res[0]
176 #define kernel_code mem_res[1]
177 #define kernel_data mem_res[2]
178
179 static struct resource io_res[] = {
180         {
181                 .name = "reserved",
182                 .start = 0x3bc,
183                 .end = 0x3be,
184                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
185         },
186         {
187                 .name = "reserved",
188                 .start = 0x378,
189                 .end = 0x37f,
190                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
191         },
192         {
193                 .name = "reserved",
194                 .start = 0x278,
195                 .end = 0x27f,
196                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
197         }
198 };
199
200 #define lp0 io_res[0]
201 #define lp1 io_res[1]
202 #define lp2 io_res[2]
203
204 static const char *proc_arch[] = {
205         "undefined/unknown",
206         "3",
207         "4",
208         "4T",
209         "5",
210         "5T",
211         "5TE",
212         "5TEJ",
213         "6TEJ",
214         "7",
215         "?(11)",
216         "?(12)",
217         "?(13)",
218         "?(14)",
219         "?(15)",
220         "?(16)",
221         "?(17)",
222 };
223
224 static int __get_cpu_architecture(void)
225 {
226         int cpu_arch;
227
228         if ((read_cpuid_id() & 0x0008f000) == 0) {
229                 cpu_arch = CPU_ARCH_UNKNOWN;
230         } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
231                 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
232         } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
233                 cpu_arch = (read_cpuid_id() >> 16) & 7;
234                 if (cpu_arch)
235                         cpu_arch += CPU_ARCH_ARMv3;
236         } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
237                 unsigned int mmfr0;
238
239                 /* Revised CPUID format. Read the Memory Model Feature
240                  * Register 0 and check for VMSAv7 or PMSAv7 */
241                 asm("mrc        p15, 0, %0, c0, c1, 4"
242                     : "=r" (mmfr0));
243                 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
244                     (mmfr0 & 0x000000f0) >= 0x00000030)
245                         cpu_arch = CPU_ARCH_ARMv7;
246                 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
247                          (mmfr0 & 0x000000f0) == 0x00000020)
248                         cpu_arch = CPU_ARCH_ARMv6;
249                 else
250                         cpu_arch = CPU_ARCH_UNKNOWN;
251         } else
252                 cpu_arch = CPU_ARCH_UNKNOWN;
253
254         return cpu_arch;
255 }
256
257 int __pure cpu_architecture(void)
258 {
259         BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
260
261         return __cpu_architecture;
262 }
263
264 static int cpu_has_aliasing_icache(unsigned int arch)
265 {
266         int aliasing_icache;
267         unsigned int id_reg, num_sets, line_size;
268
269         /* PIPT caches never alias. */
270         if (icache_is_pipt())
271                 return 0;
272
273         /* arch specifies the register format */
274         switch (arch) {
275         case CPU_ARCH_ARMv7:
276                 asm("mcr        p15, 2, %0, c0, c0, 0 @ set CSSELR"
277                     : /* No output operands */
278                     : "r" (1));
279                 isb();
280                 asm("mrc        p15, 1, %0, c0, c0, 0 @ read CCSIDR"
281                     : "=r" (id_reg));
282                 line_size = 4 << ((id_reg & 0x7) + 2);
283                 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
284                 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
285                 break;
286         case CPU_ARCH_ARMv6:
287                 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
288                 break;
289         default:
290                 /* I-cache aliases will be handled by D-cache aliasing code */
291                 aliasing_icache = 0;
292         }
293
294         return aliasing_icache;
295 }
296
297 static void __init cacheid_init(void)
298 {
299         unsigned int cachetype = read_cpuid_cachetype();
300         unsigned int arch = cpu_architecture();
301
302         if (arch >= CPU_ARCH_ARMv6) {
303                 if ((cachetype & (7 << 29)) == 4 << 29) {
304                         /* ARMv7 register format */
305                         arch = CPU_ARCH_ARMv7;
306                         cacheid = CACHEID_VIPT_NONALIASING;
307                         switch (cachetype & (3 << 14)) {
308                         case (1 << 14):
309                                 cacheid |= CACHEID_ASID_TAGGED;
310                                 break;
311                         case (3 << 14):
312                                 cacheid |= CACHEID_PIPT;
313                                 break;
314                         }
315                 } else {
316                         arch = CPU_ARCH_ARMv6;
317                         if (cachetype & (1 << 23))
318                                 cacheid = CACHEID_VIPT_ALIASING;
319                         else
320                                 cacheid = CACHEID_VIPT_NONALIASING;
321                 }
322                 if (cpu_has_aliasing_icache(arch))
323                         cacheid |= CACHEID_VIPT_I_ALIASING;
324         } else {
325                 cacheid = CACHEID_VIVT;
326         }
327
328         printk("CPU: %s data cache, %s instruction cache\n",
329                 cache_is_vivt() ? "VIVT" :
330                 cache_is_vipt_aliasing() ? "VIPT aliasing" :
331                 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
332                 cache_is_vivt() ? "VIVT" :
333                 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
334                 icache_is_vipt_aliasing() ? "VIPT aliasing" :
335                 icache_is_pipt() ? "PIPT" :
336                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
337 }
338
339 /*
340  * These functions re-use the assembly code in head.S, which
341  * already provide the required functionality.
342  */
343 extern struct proc_info_list *lookup_processor_type(unsigned int);
344
345 void __init early_print(const char *str, ...)
346 {
347         extern void printascii(const char *);
348         char buf[256];
349         va_list ap;
350
351         va_start(ap, str);
352         vsnprintf(buf, sizeof(buf), str, ap);
353         va_end(ap);
354
355 #ifdef CONFIG_DEBUG_LL
356         printascii(buf);
357 #endif
358         printk("%s", buf);
359 }
360
361 static void __init feat_v6_fixup(void)
362 {
363         int id = read_cpuid_id();
364
365         if ((id & 0xff0f0000) != 0x41070000)
366                 return;
367
368         /*
369          * HWCAP_TLS is available only on 1136 r1p0 and later,
370          * see also kuser_get_tls_init.
371          */
372         if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
373                 elf_hwcap &= ~HWCAP_TLS;
374 }
375
376 /*
377  * cpu_init - initialise one CPU.
378  *
379  * cpu_init sets up the per-CPU stacks.
380  */
381 void cpu_init(void)
382 {
383         unsigned int cpu = smp_processor_id();
384         struct stack *stk = &stacks[cpu];
385
386         if (cpu >= NR_CPUS) {
387                 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
388                 BUG();
389         }
390
391         cpu_proc_init();
392
393         /*
394          * Define the placement constraint for the inline asm directive below.
395          * In Thumb-2, msr with an immediate value is not allowed.
396          */
397 #ifdef CONFIG_THUMB2_KERNEL
398 #define PLC     "r"
399 #else
400 #define PLC     "I"
401 #endif
402
403         /*
404          * setup stacks for re-entrant exception handlers
405          */
406         __asm__ (
407         "msr    cpsr_c, %1\n\t"
408         "add    r14, %0, %2\n\t"
409         "mov    sp, r14\n\t"
410         "msr    cpsr_c, %3\n\t"
411         "add    r14, %0, %4\n\t"
412         "mov    sp, r14\n\t"
413         "msr    cpsr_c, %5\n\t"
414         "add    r14, %0, %6\n\t"
415         "mov    sp, r14\n\t"
416         "msr    cpsr_c, %7"
417             :
418             : "r" (stk),
419               PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
420               "I" (offsetof(struct stack, irq[0])),
421               PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
422               "I" (offsetof(struct stack, abt[0])),
423               PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
424               "I" (offsetof(struct stack, und[0])),
425               PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
426             : "r14");
427 }
428
429 int __cpu_logical_map[NR_CPUS];
430
431 void __init smp_setup_processor_id(void)
432 {
433         int i;
434         u32 cpu = is_smp() ? read_cpuid_mpidr() & 0xff : 0;
435
436         cpu_logical_map(0) = cpu;
437         for (i = 1; i < NR_CPUS; ++i)
438                 cpu_logical_map(i) = i == cpu ? 0 : i;
439
440         printk(KERN_INFO "Booting Linux on physical CPU %d\n", cpu);
441 }
442
443 static void __init setup_processor(void)
444 {
445         struct proc_info_list *list;
446
447         /*
448          * locate processor in the list of supported processor
449          * types.  The linker builds this table for us from the
450          * entries in arch/arm/mm/proc-*.S
451          */
452         list = lookup_processor_type(read_cpuid_id());
453         if (!list) {
454                 printk("CPU configuration botched (ID %08x), unable "
455                        "to continue.\n", read_cpuid_id());
456                 while (1);
457         }
458
459         cpu_name = list->cpu_name;
460         __cpu_architecture = __get_cpu_architecture();
461
462 #ifdef MULTI_CPU
463         processor = *list->proc;
464 #endif
465 #ifdef MULTI_TLB
466         cpu_tlb = *list->tlb;
467 #endif
468 #ifdef MULTI_USER
469         cpu_user = *list->user;
470 #endif
471 #ifdef MULTI_CACHE
472         cpu_cache = *list->cache;
473 #endif
474
475         printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
476                cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
477                proc_arch[cpu_architecture()], cr_alignment);
478
479         snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
480                  list->arch_name, ENDIANNESS);
481         snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
482                  list->elf_name, ENDIANNESS);
483         elf_hwcap = list->elf_hwcap;
484 #ifndef CONFIG_ARM_THUMB
485         elf_hwcap &= ~HWCAP_THUMB;
486 #endif
487
488         feat_v6_fixup();
489
490         cacheid_init();
491         cpu_init();
492 }
493
494 void __init dump_machine_table(void)
495 {
496         struct machine_desc *p;
497
498         early_print("Available machine support:\n\nID (hex)\tNAME\n");
499         for_each_machine_desc(p)
500                 early_print("%08x\t%s\n", p->nr, p->name);
501
502         early_print("\nPlease check your kernel config and/or bootloader.\n");
503
504         while (true)
505                 /* can't use cpu_relax() here as it may require MMU setup */;
506 }
507
508 int __init arm_add_memory(phys_addr_t start, unsigned long size)
509 {
510         struct membank *bank = &meminfo.bank[meminfo.nr_banks];
511
512         if (meminfo.nr_banks >= NR_BANKS) {
513                 printk(KERN_CRIT "NR_BANKS too low, "
514                         "ignoring memory at 0x%08llx\n", (long long)start);
515                 return -EINVAL;
516         }
517
518         /*
519          * Ensure that start/size are aligned to a page boundary.
520          * Size is appropriately rounded down, start is rounded up.
521          */
522         size -= start & ~PAGE_MASK;
523         bank->start = PAGE_ALIGN(start);
524         bank->size  = size & PAGE_MASK;
525
526         /*
527          * Check whether this memory region has non-zero size or
528          * invalid node number.
529          */
530         if (bank->size == 0)
531                 return -EINVAL;
532
533         meminfo.nr_banks++;
534         return 0;
535 }
536
537 /*
538  * Pick out the memory size.  We look for mem=size@start,
539  * where start and size are "size[KkMm]"
540  */
541 static int __init early_mem(char *p)
542 {
543         static int usermem __initdata = 0;
544         unsigned long size;
545         phys_addr_t start;
546         char *endp;
547
548         /*
549          * If the user specifies memory size, we
550          * blow away any automatically generated
551          * size.
552          */
553         if (usermem == 0) {
554                 usermem = 1;
555                 meminfo.nr_banks = 0;
556         }
557
558         start = PHYS_OFFSET;
559         size  = memparse(p, &endp);
560         if (*endp == '@')
561                 start = memparse(endp + 1, NULL);
562
563         arm_add_memory(start, size);
564
565         return 0;
566 }
567 early_param("mem", early_mem);
568
569 static void __init
570 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
571 {
572 #ifdef CONFIG_BLK_DEV_RAM
573         extern int rd_size, rd_image_start, rd_prompt, rd_doload;
574
575         rd_image_start = image_start;
576         rd_prompt = prompt;
577         rd_doload = doload;
578
579         if (rd_sz)
580                 rd_size = rd_sz;
581 #endif
582 }
583
584 static void __init request_standard_resources(struct machine_desc *mdesc)
585 {
586         struct memblock_region *region;
587         struct resource *res;
588
589         kernel_code.start   = virt_to_phys(_text);
590         kernel_code.end     = virt_to_phys(_etext - 1);
591         kernel_data.start   = virt_to_phys(_sdata);
592         kernel_data.end     = virt_to_phys(_end - 1);
593
594         for_each_memblock(memory, region) {
595                 res = alloc_bootmem_low(sizeof(*res));
596                 res->name  = "System RAM";
597                 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
598                 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
599                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
600
601                 request_resource(&iomem_resource, res);
602
603                 if (kernel_code.start >= res->start &&
604                     kernel_code.end <= res->end)
605                         request_resource(res, &kernel_code);
606                 if (kernel_data.start >= res->start &&
607                     kernel_data.end <= res->end)
608                         request_resource(res, &kernel_data);
609         }
610
611         if (mdesc->video_start) {
612                 video_ram.start = mdesc->video_start;
613                 video_ram.end   = mdesc->video_end;
614                 request_resource(&iomem_resource, &video_ram);
615         }
616
617         /*
618          * Some machines don't have the possibility of ever
619          * possessing lp0, lp1 or lp2
620          */
621         if (mdesc->reserve_lp0)
622                 request_resource(&ioport_resource, &lp0);
623         if (mdesc->reserve_lp1)
624                 request_resource(&ioport_resource, &lp1);
625         if (mdesc->reserve_lp2)
626                 request_resource(&ioport_resource, &lp2);
627 }
628
629 /*
630  *  Tag parsing.
631  *
632  * This is the new way of passing data to the kernel at boot time.  Rather
633  * than passing a fixed inflexible structure to the kernel, we pass a list
634  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
635  * tag for the list to be recognised (to distinguish the tagged list from
636  * a param_struct).  The list is terminated with a zero-length tag (this tag
637  * is not parsed in any way).
638  */
639 static int __init parse_tag_core(const struct tag *tag)
640 {
641         if (tag->hdr.size > 2) {
642                 if ((tag->u.core.flags & 1) == 0)
643                         root_mountflags &= ~MS_RDONLY;
644                 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
645         }
646         return 0;
647 }
648
649 __tagtable(ATAG_CORE, parse_tag_core);
650
651 static int __init parse_tag_mem32(const struct tag *tag)
652 {
653         return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
654 }
655
656 __tagtable(ATAG_MEM, parse_tag_mem32);
657
658 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
659 struct screen_info screen_info = {
660  .orig_video_lines      = 30,
661  .orig_video_cols       = 80,
662  .orig_video_mode       = 0,
663  .orig_video_ega_bx     = 0,
664  .orig_video_isVGA      = 1,
665  .orig_video_points     = 8
666 };
667
668 static int __init parse_tag_videotext(const struct tag *tag)
669 {
670         screen_info.orig_x            = tag->u.videotext.x;
671         screen_info.orig_y            = tag->u.videotext.y;
672         screen_info.orig_video_page   = tag->u.videotext.video_page;
673         screen_info.orig_video_mode   = tag->u.videotext.video_mode;
674         screen_info.orig_video_cols   = tag->u.videotext.video_cols;
675         screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
676         screen_info.orig_video_lines  = tag->u.videotext.video_lines;
677         screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
678         screen_info.orig_video_points = tag->u.videotext.video_points;
679         return 0;
680 }
681
682 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
683 #endif
684
685 static int __init parse_tag_ramdisk(const struct tag *tag)
686 {
687         setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
688                       (tag->u.ramdisk.flags & 2) == 0,
689                       tag->u.ramdisk.start, tag->u.ramdisk.size);
690         return 0;
691 }
692
693 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
694
695 static int __init parse_tag_serialnr(const struct tag *tag)
696 {
697         system_serial_low = tag->u.serialnr.low;
698         system_serial_high = tag->u.serialnr.high;
699         return 0;
700 }
701
702 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
703
704 static int __init parse_tag_revision(const struct tag *tag)
705 {
706         system_rev = tag->u.revision.rev;
707         return 0;
708 }
709
710 __tagtable(ATAG_REVISION, parse_tag_revision);
711
712 static int __init parse_tag_cmdline(const struct tag *tag)
713 {
714 #if defined(CONFIG_CMDLINE_EXTEND)
715         strlcat(default_command_line, " ", COMMAND_LINE_SIZE);
716         strlcat(default_command_line, tag->u.cmdline.cmdline,
717                 COMMAND_LINE_SIZE);
718 #elif defined(CONFIG_CMDLINE_FORCE)
719         pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
720 #else
721         strlcpy(default_command_line, tag->u.cmdline.cmdline,
722                 COMMAND_LINE_SIZE);
723 #endif
724         return 0;
725 }
726
727 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
728
729 /*
730  * Scan the tag table for this tag, and call its parse function.
731  * The tag table is built by the linker from all the __tagtable
732  * declarations.
733  */
734 static int __init parse_tag(const struct tag *tag)
735 {
736         extern struct tagtable __tagtable_begin, __tagtable_end;
737         struct tagtable *t;
738
739         for (t = &__tagtable_begin; t < &__tagtable_end; t++)
740                 if (tag->hdr.tag == t->tag) {
741                         t->parse(tag);
742                         break;
743                 }
744
745         return t < &__tagtable_end;
746 }
747
748 /*
749  * Parse all tags in the list, checking both the global and architecture
750  * specific tag tables.
751  */
752 static void __init parse_tags(const struct tag *t)
753 {
754         for (; t->hdr.size; t = tag_next(t))
755                 if (!parse_tag(t))
756                         printk(KERN_WARNING
757                                 "Ignoring unrecognised tag 0x%08x\n",
758                                 t->hdr.tag);
759 }
760
761 /*
762  * This holds our defaults.
763  */
764 static struct init_tags {
765         struct tag_header hdr1;
766         struct tag_core   core;
767         struct tag_header hdr2;
768         struct tag_mem32  mem;
769         struct tag_header hdr3;
770 } init_tags __initdata = {
771         { tag_size(tag_core), ATAG_CORE },
772         { 1, PAGE_SIZE, 0xff },
773         { tag_size(tag_mem32), ATAG_MEM },
774         { MEM_SIZE },
775         { 0, ATAG_NONE }
776 };
777
778 static int __init customize_machine(void)
779 {
780         /* customizes platform devices, or adds new ones */
781         if (machine_desc->init_machine)
782                 machine_desc->init_machine();
783         return 0;
784 }
785 arch_initcall(customize_machine);
786
787 #ifdef CONFIG_KEXEC
788 static inline unsigned long long get_total_mem(void)
789 {
790         unsigned long total;
791
792         total = max_low_pfn - min_low_pfn;
793         return total << PAGE_SHIFT;
794 }
795
796 /**
797  * reserve_crashkernel() - reserves memory are for crash kernel
798  *
799  * This function reserves memory area given in "crashkernel=" kernel command
800  * line parameter. The memory reserved is used by a dump capture kernel when
801  * primary kernel is crashing.
802  */
803 static void __init reserve_crashkernel(void)
804 {
805         unsigned long long crash_size, crash_base;
806         unsigned long long total_mem;
807         int ret;
808
809         total_mem = get_total_mem();
810         ret = parse_crashkernel(boot_command_line, total_mem,
811                                 &crash_size, &crash_base);
812         if (ret)
813                 return;
814
815         ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
816         if (ret < 0) {
817                 printk(KERN_WARNING "crashkernel reservation failed - "
818                        "memory is in use (0x%lx)\n", (unsigned long)crash_base);
819                 return;
820         }
821
822         printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
823                "for crashkernel (System RAM: %ldMB)\n",
824                (unsigned long)(crash_size >> 20),
825                (unsigned long)(crash_base >> 20),
826                (unsigned long)(total_mem >> 20));
827
828         crashk_res.start = crash_base;
829         crashk_res.end = crash_base + crash_size - 1;
830         insert_resource(&iomem_resource, &crashk_res);
831 }
832 #else
833 static inline void reserve_crashkernel(void) {}
834 #endif /* CONFIG_KEXEC */
835
836 static void __init squash_mem_tags(struct tag *tag)
837 {
838         for (; tag->hdr.size; tag = tag_next(tag))
839                 if (tag->hdr.tag == ATAG_MEM)
840                         tag->hdr.tag = ATAG_NONE;
841 }
842
843 static struct machine_desc * __init setup_machine_tags(unsigned int nr)
844 {
845         struct tag *tags = (struct tag *)&init_tags;
846         struct machine_desc *mdesc = NULL, *p;
847         char *from = default_command_line;
848
849         init_tags.mem.start = PHYS_OFFSET;
850
851         /*
852          * locate machine in the list of supported machines.
853          */
854         for_each_machine_desc(p)
855                 if (nr == p->nr) {
856                         printk("Machine: %s\n", p->name);
857                         mdesc = p;
858                         break;
859                 }
860
861         if (!mdesc) {
862                 early_print("\nError: unrecognized/unsupported machine ID"
863                         " (r1 = 0x%08x).\n\n", nr);
864                 dump_machine_table(); /* does not return */
865         }
866
867         if (__atags_pointer)
868                 tags = phys_to_virt(__atags_pointer);
869         else if (mdesc->atag_offset)
870                 tags = (void *)(PAGE_OFFSET + mdesc->atag_offset);
871
872 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
873         /*
874          * If we have the old style parameters, convert them to
875          * a tag list.
876          */
877         if (tags->hdr.tag != ATAG_CORE)
878                 convert_to_tag_list(tags);
879 #endif
880
881         if (tags->hdr.tag != ATAG_CORE) {
882 #if defined(CONFIG_OF)
883                 /*
884                  * If CONFIG_OF is set, then assume this is a reasonably
885                  * modern system that should pass boot parameters
886                  */
887                 early_print("Warning: Neither atags nor dtb found\n");
888 #endif
889                 tags = (struct tag *)&init_tags;
890         }
891
892         if (mdesc->fixup)
893                 mdesc->fixup(tags, &from, &meminfo);
894
895         if (tags->hdr.tag == ATAG_CORE) {
896                 if (meminfo.nr_banks != 0)
897                         squash_mem_tags(tags);
898                 save_atags(tags);
899                 parse_tags(tags);
900         }
901
902         /* parse_early_param needs a boot_command_line */
903         strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
904
905         return mdesc;
906 }
907
908 static int __init meminfo_cmp(const void *_a, const void *_b)
909 {
910         const struct membank *a = _a, *b = _b;
911         long cmp = bank_pfn_start(a) - bank_pfn_start(b);
912         return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
913 }
914
915 void __init setup_arch(char **cmdline_p)
916 {
917         struct machine_desc *mdesc;
918
919         setup_processor();
920         mdesc = setup_machine_fdt(__atags_pointer);
921         if (!mdesc)
922                 mdesc = setup_machine_tags(machine_arch_type);
923         machine_desc = mdesc;
924         machine_name = mdesc->name;
925
926 #ifdef CONFIG_ZONE_DMA
927         if (mdesc->dma_zone_size) {
928                 extern unsigned long arm_dma_zone_size;
929                 arm_dma_zone_size = mdesc->dma_zone_size;
930         }
931 #endif
932         if (mdesc->restart_mode)
933                 reboot_setup(&mdesc->restart_mode);
934
935         init_mm.start_code = (unsigned long) _text;
936         init_mm.end_code   = (unsigned long) _etext;
937         init_mm.end_data   = (unsigned long) _edata;
938         init_mm.brk        = (unsigned long) _end;
939
940         /* populate cmd_line too for later use, preserving boot_command_line */
941         strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
942         *cmdline_p = cmd_line;
943
944         parse_early_param();
945
946         sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
947         sanity_check_meminfo();
948         arm_memblock_init(&meminfo, mdesc);
949
950         paging_init(mdesc);
951         request_standard_resources(mdesc);
952
953         if (mdesc->restart)
954                 arm_pm_restart = mdesc->restart;
955
956         unflatten_device_tree();
957
958 #ifdef CONFIG_SMP
959         if (is_smp())
960                 smp_init_cpus();
961 #endif
962         reserve_crashkernel();
963
964         tcm_init();
965
966 #ifdef CONFIG_MULTI_IRQ_HANDLER
967         handle_arch_irq = mdesc->handle_irq;
968 #endif
969
970 #ifdef CONFIG_VT
971 #if defined(CONFIG_VGA_CONSOLE)
972         conswitchp = &vga_con;
973 #elif defined(CONFIG_DUMMY_CONSOLE)
974         conswitchp = &dummy_con;
975 #endif
976 #endif
977         early_trap_init();
978
979         if (mdesc->init_early)
980                 mdesc->init_early();
981 }
982
983
984 static int __init topology_init(void)
985 {
986         int cpu;
987
988         for_each_possible_cpu(cpu) {
989                 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
990                 cpuinfo->cpu.hotpluggable = 1;
991                 register_cpu(&cpuinfo->cpu, cpu);
992         }
993
994         return 0;
995 }
996 subsys_initcall(topology_init);
997
998 #ifdef CONFIG_HAVE_PROC_CPU
999 static int __init proc_cpu_init(void)
1000 {
1001         struct proc_dir_entry *res;
1002
1003         res = proc_mkdir("cpu", NULL);
1004         if (!res)
1005                 return -ENOMEM;
1006         return 0;
1007 }
1008 fs_initcall(proc_cpu_init);
1009 #endif
1010
1011 static const char *hwcap_str[] = {
1012         "swp",
1013         "half",
1014         "thumb",
1015         "26bit",
1016         "fastmult",
1017         "fpa",
1018         "vfp",
1019         "edsp",
1020         "java",
1021         "iwmmxt",
1022         "crunch",
1023         "thumbee",
1024         "neon",
1025         "vfpv3",
1026         "vfpv3d16",
1027         "tls",
1028         "vfpv4",
1029         "idiva",
1030         "idivt",
1031         NULL
1032 };
1033
1034 static int c_show(struct seq_file *m, void *v)
1035 {
1036         int i;
1037
1038         seq_printf(m, "Processor\t: %s rev %d (%s)\n",
1039                    cpu_name, read_cpuid_id() & 15, elf_platform);
1040
1041 #if defined(CONFIG_SMP)
1042         for_each_online_cpu(i) {
1043                 /*
1044                  * glibc reads /proc/cpuinfo to determine the number of
1045                  * online processors, looking for lines beginning with
1046                  * "processor".  Give glibc what it expects.
1047                  */
1048                 seq_printf(m, "processor\t: %d\n", i);
1049                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
1050                            per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1051                            (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1052         }
1053 #else /* CONFIG_SMP */
1054         seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1055                    loops_per_jiffy / (500000/HZ),
1056                    (loops_per_jiffy / (5000/HZ)) % 100);
1057 #endif
1058
1059         /* dump out the processor features */
1060         seq_puts(m, "Features\t: ");
1061
1062         for (i = 0; hwcap_str[i]; i++)
1063                 if (elf_hwcap & (1 << i))
1064                         seq_printf(m, "%s ", hwcap_str[i]);
1065
1066         seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
1067         seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
1068
1069         if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
1070                 /* pre-ARM7 */
1071                 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
1072         } else {
1073                 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
1074                         /* ARM7 */
1075                         seq_printf(m, "CPU variant\t: 0x%02x\n",
1076                                    (read_cpuid_id() >> 16) & 127);
1077                 } else {
1078                         /* post-ARM7 */
1079                         seq_printf(m, "CPU variant\t: 0x%x\n",
1080                                    (read_cpuid_id() >> 20) & 15);
1081                 }
1082                 seq_printf(m, "CPU part\t: 0x%03x\n",
1083                            (read_cpuid_id() >> 4) & 0xfff);
1084         }
1085         seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
1086
1087         seq_puts(m, "\n");
1088
1089         seq_printf(m, "Hardware\t: %s\n", machine_name);
1090         seq_printf(m, "Revision\t: %04x\n", system_rev);
1091         seq_printf(m, "Serial\t\t: %08x%08x\n",
1092                    system_serial_high, system_serial_low);
1093
1094         return 0;
1095 }
1096
1097 static void *c_start(struct seq_file *m, loff_t *pos)
1098 {
1099         return *pos < 1 ? (void *)1 : NULL;
1100 }
1101
1102 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1103 {
1104         ++*pos;
1105         return NULL;
1106 }
1107
1108 static void c_stop(struct seq_file *m, void *v)
1109 {
1110 }
1111
1112 const struct seq_operations cpuinfo_op = {
1113         .start  = c_start,
1114         .next   = c_next,
1115         .stop   = c_stop,
1116         .show   = c_show
1117 };