]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/arm64/kernel/setup.c
Merge remote-tracking branch 'samsung/for-next'
[karo-tx-linux.git] / arch / arm64 / kernel / setup.c
1 /*
2  * Based on arch/arm/kernel/setup.c
3  *
4  * Copyright (C) 1995-2001 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include <linux/acpi.h>
21 #include <linux/export.h>
22 #include <linux/kernel.h>
23 #include <linux/stddef.h>
24 #include <linux/ioport.h>
25 #include <linux/delay.h>
26 #include <linux/utsname.h>
27 #include <linux/initrd.h>
28 #include <linux/console.h>
29 #include <linux/cache.h>
30 #include <linux/bootmem.h>
31 #include <linux/seq_file.h>
32 #include <linux/screen_info.h>
33 #include <linux/init.h>
34 #include <linux/kexec.h>
35 #include <linux/crash_dump.h>
36 #include <linux/root_dev.h>
37 #include <linux/cpu.h>
38 #include <linux/interrupt.h>
39 #include <linux/smp.h>
40 #include <linux/fs.h>
41 #include <linux/proc_fs.h>
42 #include <linux/memblock.h>
43 #include <linux/of_iommu.h>
44 #include <linux/of_fdt.h>
45 #include <linux/of_platform.h>
46 #include <linux/efi.h>
47 #include <linux/personality.h>
48 #include <linux/psci.h>
49
50 #include <asm/acpi.h>
51 #include <asm/fixmap.h>
52 #include <asm/cpu.h>
53 #include <asm/cputype.h>
54 #include <asm/elf.h>
55 #include <asm/cpufeature.h>
56 #include <asm/cpu_ops.h>
57 #include <asm/sections.h>
58 #include <asm/setup.h>
59 #include <asm/smp_plat.h>
60 #include <asm/cacheflush.h>
61 #include <asm/tlbflush.h>
62 #include <asm/traps.h>
63 #include <asm/memblock.h>
64 #include <asm/efi.h>
65 #include <asm/xen/hypervisor.h>
66
67 unsigned long elf_hwcap __read_mostly;
68 EXPORT_SYMBOL_GPL(elf_hwcap);
69
70 #ifdef CONFIG_COMPAT
71 #define COMPAT_ELF_HWCAP_DEFAULT        \
72                                 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
73                                  COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
74                                  COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
75                                  COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
76                                  COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
77                                  COMPAT_HWCAP_LPAE)
78 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
79 unsigned int compat_elf_hwcap2 __read_mostly;
80 #endif
81
82 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
83
84 phys_addr_t __fdt_pointer __initdata;
85
86 /*
87  * Standard memory resources
88  */
89 static struct resource mem_res[] = {
90         {
91                 .name = "Kernel code",
92                 .start = 0,
93                 .end = 0,
94                 .flags = IORESOURCE_MEM
95         },
96         {
97                 .name = "Kernel data",
98                 .start = 0,
99                 .end = 0,
100                 .flags = IORESOURCE_MEM
101         }
102 };
103
104 #define kernel_code mem_res[0]
105 #define kernel_data mem_res[1]
106
107 /*
108  * The recorded values of x0 .. x3 upon kernel entry.
109  */
110 u64 __cacheline_aligned boot_args[4];
111
112 void __init smp_setup_processor_id(void)
113 {
114         u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
115         cpu_logical_map(0) = mpidr;
116
117         /*
118          * clear __my_cpu_offset on boot CPU to avoid hang caused by
119          * using percpu variable early, for example, lockdep will
120          * access percpu variable inside lock_release
121          */
122         set_my_cpu_offset(0);
123         pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr);
124 }
125
126 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
127 {
128         return phys_id == cpu_logical_map(cpu);
129 }
130
131 struct mpidr_hash mpidr_hash;
132 /**
133  * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
134  *                        level in order to build a linear index from an
135  *                        MPIDR value. Resulting algorithm is a collision
136  *                        free hash carried out through shifting and ORing
137  */
138 static void __init smp_build_mpidr_hash(void)
139 {
140         u32 i, affinity, fs[4], bits[4], ls;
141         u64 mask = 0;
142         /*
143          * Pre-scan the list of MPIDRS and filter out bits that do
144          * not contribute to affinity levels, ie they never toggle.
145          */
146         for_each_possible_cpu(i)
147                 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
148         pr_debug("mask of set bits %#llx\n", mask);
149         /*
150          * Find and stash the last and first bit set at all affinity levels to
151          * check how many bits are required to represent them.
152          */
153         for (i = 0; i < 4; i++) {
154                 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
155                 /*
156                  * Find the MSB bit and LSB bits position
157                  * to determine how many bits are required
158                  * to express the affinity level.
159                  */
160                 ls = fls(affinity);
161                 fs[i] = affinity ? ffs(affinity) - 1 : 0;
162                 bits[i] = ls - fs[i];
163         }
164         /*
165          * An index can be created from the MPIDR_EL1 by isolating the
166          * significant bits at each affinity level and by shifting
167          * them in order to compress the 32 bits values space to a
168          * compressed set of values. This is equivalent to hashing
169          * the MPIDR_EL1 through shifting and ORing. It is a collision free
170          * hash though not minimal since some levels might contain a number
171          * of CPUs that is not an exact power of 2 and their bit
172          * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
173          */
174         mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
175         mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
176         mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
177                                                 (bits[1] + bits[0]);
178         mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
179                                   fs[3] - (bits[2] + bits[1] + bits[0]);
180         mpidr_hash.mask = mask;
181         mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
182         pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
183                 mpidr_hash.shift_aff[0],
184                 mpidr_hash.shift_aff[1],
185                 mpidr_hash.shift_aff[2],
186                 mpidr_hash.shift_aff[3],
187                 mpidr_hash.mask,
188                 mpidr_hash.bits);
189         /*
190          * 4x is an arbitrary value used to warn on a hash table much bigger
191          * than expected on most systems.
192          */
193         if (mpidr_hash_size() > 4 * num_possible_cpus())
194                 pr_warn("Large number of MPIDR hash buckets detected\n");
195         __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
196 }
197
198 static void __init setup_processor(void)
199 {
200         u64 features;
201         s64 block;
202         u32 cwg;
203         int cls;
204
205         printk("CPU: AArch64 Processor [%08x] revision %d\n",
206                read_cpuid_id(), read_cpuid_id() & 15);
207
208         sprintf(init_utsname()->machine, ELF_PLATFORM);
209         elf_hwcap = 0;
210
211         cpuinfo_store_boot_cpu();
212
213         /*
214          * Check for sane CTR_EL0.CWG value.
215          */
216         cwg = cache_type_cwg();
217         cls = cache_line_size();
218         if (!cwg)
219                 pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
220                         cls);
221         if (L1_CACHE_BYTES < cls)
222                 pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
223                         L1_CACHE_BYTES, cls);
224
225         /*
226          * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
227          * The blocks we test below represent incremental functionality
228          * for non-negative values. Negative values are reserved.
229          */
230         features = read_cpuid(ID_AA64ISAR0_EL1);
231         block = cpuid_feature_extract_field(features, 4);
232         if (block > 0) {
233                 switch (block) {
234                 default:
235                 case 2:
236                         elf_hwcap |= HWCAP_PMULL;
237                 case 1:
238                         elf_hwcap |= HWCAP_AES;
239                 case 0:
240                         break;
241                 }
242         }
243
244         if (cpuid_feature_extract_field(features, 8) > 0)
245                 elf_hwcap |= HWCAP_SHA1;
246
247         if (cpuid_feature_extract_field(features, 12) > 0)
248                 elf_hwcap |= HWCAP_SHA2;
249
250         if (cpuid_feature_extract_field(features, 16) > 0)
251                 elf_hwcap |= HWCAP_CRC32;
252
253         block = cpuid_feature_extract_field(features, 20);
254         if (block > 0) {
255                 switch (block) {
256                 default:
257                 case 2:
258                         elf_hwcap |= HWCAP_ATOMICS;
259                 case 1:
260                         /* RESERVED */
261                 case 0:
262                         break;
263                 }
264         }
265
266 #ifdef CONFIG_COMPAT
267         /*
268          * ID_ISAR5_EL1 carries similar information as above, but pertaining to
269          * the AArch32 32-bit execution state.
270          */
271         features = read_cpuid(ID_ISAR5_EL1);
272         block = cpuid_feature_extract_field(features, 4);
273         if (block > 0) {
274                 switch (block) {
275                 default:
276                 case 2:
277                         compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
278                 case 1:
279                         compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
280                 case 0:
281                         break;
282                 }
283         }
284
285         if (cpuid_feature_extract_field(features, 8) > 0)
286                 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
287
288         if (cpuid_feature_extract_field(features, 12) > 0)
289                 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
290
291         if (cpuid_feature_extract_field(features, 16) > 0)
292                 compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
293 #endif
294 }
295
296 static void __init setup_machine_fdt(phys_addr_t dt_phys)
297 {
298         void *dt_virt = fixmap_remap_fdt(dt_phys);
299
300         if (!dt_virt || !early_init_dt_scan(dt_virt)) {
301                 pr_crit("\n"
302                         "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
303                         "The dtb must be 8-byte aligned and must not exceed 2 MB in size\n"
304                         "\nPlease check your bootloader.",
305                         &dt_phys, dt_virt);
306
307                 while (true)
308                         cpu_relax();
309         }
310
311         dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
312 }
313
314 static void __init request_standard_resources(void)
315 {
316         struct memblock_region *region;
317         struct resource *res;
318
319         kernel_code.start   = virt_to_phys(_text);
320         kernel_code.end     = virt_to_phys(_etext - 1);
321         kernel_data.start   = virt_to_phys(_sdata);
322         kernel_data.end     = virt_to_phys(_end - 1);
323
324         for_each_memblock(memory, region) {
325                 res = alloc_bootmem_low(sizeof(*res));
326                 res->name  = "System RAM";
327                 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
328                 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
329                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
330
331                 request_resource(&iomem_resource, res);
332
333                 if (kernel_code.start >= res->start &&
334                     kernel_code.end <= res->end)
335                         request_resource(res, &kernel_code);
336                 if (kernel_data.start >= res->start &&
337                     kernel_data.end <= res->end)
338                         request_resource(res, &kernel_data);
339         }
340 }
341
342 #ifdef CONFIG_BLK_DEV_INITRD
343 /*
344  * Relocate initrd if it is not completely within the linear mapping.
345  * This would be the case if mem= cuts out all or part of it.
346  */
347 static void __init relocate_initrd(void)
348 {
349         phys_addr_t orig_start = __virt_to_phys(initrd_start);
350         phys_addr_t orig_end = __virt_to_phys(initrd_end);
351         phys_addr_t ram_end = memblock_end_of_DRAM();
352         phys_addr_t new_start;
353         unsigned long size, to_free = 0;
354         void *dest;
355
356         if (orig_end <= ram_end)
357                 return;
358
359         /*
360          * Any of the original initrd which overlaps the linear map should
361          * be freed after relocating.
362          */
363         if (orig_start < ram_end)
364                 to_free = ram_end - orig_start;
365
366         size = orig_end - orig_start;
367         if (!size)
368                 return;
369
370         /* initrd needs to be relocated completely inside linear mapping */
371         new_start = memblock_find_in_range(0, PFN_PHYS(max_pfn),
372                                            size, PAGE_SIZE);
373         if (!new_start)
374                 panic("Cannot relocate initrd of size %ld\n", size);
375         memblock_reserve(new_start, size);
376
377         initrd_start = __phys_to_virt(new_start);
378         initrd_end   = initrd_start + size;
379
380         pr_info("Moving initrd from [%llx-%llx] to [%llx-%llx]\n",
381                 orig_start, orig_start + size - 1,
382                 new_start, new_start + size - 1);
383
384         dest = (void *)initrd_start;
385
386         if (to_free) {
387                 memcpy(dest, (void *)__phys_to_virt(orig_start), to_free);
388                 dest += to_free;
389         }
390
391         copy_from_early_mem(dest, orig_start + to_free, size - to_free);
392
393         if (to_free) {
394                 pr_info("Freeing original RAMDISK from [%llx-%llx]\n",
395                         orig_start, orig_start + to_free - 1);
396                 memblock_free(orig_start, to_free);
397         }
398 }
399 #else
400 static inline void __init relocate_initrd(void)
401 {
402 }
403 #endif
404
405 u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
406
407 void __init setup_arch(char **cmdline_p)
408 {
409         setup_processor();
410
411         init_mm.start_code = (unsigned long) _text;
412         init_mm.end_code   = (unsigned long) _etext;
413         init_mm.end_data   = (unsigned long) _edata;
414         init_mm.brk        = (unsigned long) _end;
415
416         *cmdline_p = boot_command_line;
417
418         early_fixmap_init();
419         early_ioremap_init();
420
421         setup_machine_fdt(__fdt_pointer);
422
423         parse_early_param();
424
425         /*
426          *  Unmask asynchronous aborts after bringing up possible earlycon.
427          * (Report possible System Errors once we can report this occurred)
428          */
429         local_async_enable();
430
431         efi_init();
432         arm64_memblock_init();
433
434         /* Parse the ACPI tables for possible boot-time configuration */
435         acpi_boot_table_init();
436
437         paging_init();
438         relocate_initrd();
439         request_standard_resources();
440
441         early_ioremap_reset();
442
443         if (acpi_disabled) {
444                 unflatten_device_tree();
445                 psci_dt_init();
446         } else {
447                 psci_acpi_init();
448         }
449         xen_early_init();
450
451         cpu_read_bootcpu_ops();
452         smp_init_cpus();
453         smp_build_mpidr_hash();
454
455 #ifdef CONFIG_VT
456 #if defined(CONFIG_VGA_CONSOLE)
457         conswitchp = &vga_con;
458 #elif defined(CONFIG_DUMMY_CONSOLE)
459         conswitchp = &dummy_con;
460 #endif
461 #endif
462         if (boot_args[1] || boot_args[2] || boot_args[3]) {
463                 pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
464                         "\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
465                         "This indicates a broken bootloader or old kernel\n",
466                         boot_args[1], boot_args[2], boot_args[3]);
467         }
468 }
469
470 static int __init arm64_device_init(void)
471 {
472         if (of_have_populated_dt()) {
473                 of_iommu_init();
474                 of_platform_populate(NULL, of_default_bus_match_table,
475                                      NULL, NULL);
476         } else if (acpi_disabled) {
477                 pr_crit("Device tree not populated\n");
478         }
479         return 0;
480 }
481 arch_initcall_sync(arm64_device_init);
482
483 static int __init topology_init(void)
484 {
485         int i;
486
487         for_each_possible_cpu(i) {
488                 struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
489                 cpu->hotpluggable = 1;
490                 register_cpu(cpu, i);
491         }
492
493         return 0;
494 }
495 subsys_initcall(topology_init);
496
497 static const char *hwcap_str[] = {
498         "fp",
499         "asimd",
500         "evtstrm",
501         "aes",
502         "pmull",
503         "sha1",
504         "sha2",
505         "crc32",
506         "atomics",
507         NULL
508 };
509
510 #ifdef CONFIG_COMPAT
511 static const char *compat_hwcap_str[] = {
512         "swp",
513         "half",
514         "thumb",
515         "26bit",
516         "fastmult",
517         "fpa",
518         "vfp",
519         "edsp",
520         "java",
521         "iwmmxt",
522         "crunch",
523         "thumbee",
524         "neon",
525         "vfpv3",
526         "vfpv3d16",
527         "tls",
528         "vfpv4",
529         "idiva",
530         "idivt",
531         "vfpd32",
532         "lpae",
533         "evtstrm"
534 };
535
536 static const char *compat_hwcap2_str[] = {
537         "aes",
538         "pmull",
539         "sha1",
540         "sha2",
541         "crc32",
542         NULL
543 };
544 #endif /* CONFIG_COMPAT */
545
546 static int c_show(struct seq_file *m, void *v)
547 {
548         int i, j;
549
550         for_each_online_cpu(i) {
551                 struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
552                 u32 midr = cpuinfo->reg_midr;
553
554                 /*
555                  * glibc reads /proc/cpuinfo to determine the number of
556                  * online processors, looking for lines beginning with
557                  * "processor".  Give glibc what it expects.
558                  */
559                 seq_printf(m, "processor\t: %d\n", i);
560
561                 /*
562                  * Dump out the common processor features in a single line.
563                  * Userspace should read the hwcaps with getauxval(AT_HWCAP)
564                  * rather than attempting to parse this, but there's a body of
565                  * software which does already (at least for 32-bit).
566                  */
567                 seq_puts(m, "Features\t:");
568                 if (personality(current->personality) == PER_LINUX32) {
569 #ifdef CONFIG_COMPAT
570                         for (j = 0; compat_hwcap_str[j]; j++)
571                                 if (compat_elf_hwcap & (1 << j))
572                                         seq_printf(m, " %s", compat_hwcap_str[j]);
573
574                         for (j = 0; compat_hwcap2_str[j]; j++)
575                                 if (compat_elf_hwcap2 & (1 << j))
576                                         seq_printf(m, " %s", compat_hwcap2_str[j]);
577 #endif /* CONFIG_COMPAT */
578                 } else {
579                         for (j = 0; hwcap_str[j]; j++)
580                                 if (elf_hwcap & (1 << j))
581                                         seq_printf(m, " %s", hwcap_str[j]);
582                 }
583                 seq_puts(m, "\n");
584
585                 seq_printf(m, "CPU implementer\t: 0x%02x\n",
586                            MIDR_IMPLEMENTOR(midr));
587                 seq_printf(m, "CPU architecture: 8\n");
588                 seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
589                 seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
590                 seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
591         }
592
593         return 0;
594 }
595
596 static void *c_start(struct seq_file *m, loff_t *pos)
597 {
598         return *pos < 1 ? (void *)1 : NULL;
599 }
600
601 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
602 {
603         ++*pos;
604         return NULL;
605 }
606
607 static void c_stop(struct seq_file *m, void *v)
608 {
609 }
610
611 const struct seq_operations cpuinfo_op = {
612         .start  = c_start,
613         .next   = c_next,
614         .stop   = c_stop,
615         .show   = c_show
616 };