2 * prepare to run common code
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
7 #define DISABLE_BRANCH_PROFILING
8 #include <linux/init.h>
9 #include <linux/linkage.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/percpu.h>
14 #include <linux/start_kernel.h>
16 #include <linux/memblock.h>
18 #include <asm/processor.h>
19 #include <asm/proto.h>
21 #include <asm/setup.h>
23 #include <asm/pgtable.h>
24 #include <asm/tlbflush.h>
25 #include <asm/sections.h>
26 #include <asm/kdebug.h>
27 #include <asm/e820/api.h>
28 #include <asm/bios_ebda.h>
29 #include <asm/bootparam_utils.h>
30 #include <asm/microcode.h>
31 #include <asm/kasan.h>
34 * Manage page tables very early on.
36 extern pgd_t early_level4_pgt[PTRS_PER_PGD];
37 extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
38 static unsigned int __initdata next_early_pgt;
39 pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
41 static void __init *fixup_pointer(void *ptr, unsigned long physaddr)
43 return ptr - (void *)_text + (void *)physaddr;
46 void __init __startup_64(unsigned long physaddr)
48 unsigned long load_delta, *p;
51 pmdval_t *pmd, pmd_entry;
54 /* Is the address too large? */
55 if (physaddr >> MAX_PHYSMEM_BITS)
59 * Compute the delta between the address I am compiled to run at
60 * and the address I am actually running at.
62 load_delta = physaddr - (unsigned long)(_text - __START_KERNEL_map);
64 /* Is the address not 2M aligned? */
65 if (load_delta & ~PMD_PAGE_MASK)
68 /* Fixup the physical addresses in the page table */
70 pgd = fixup_pointer(&early_level4_pgt, physaddr);
71 pgd[pgd_index(__START_KERNEL_map)] += load_delta;
73 pud = fixup_pointer(&level3_kernel_pgt, physaddr);
74 pud[510] += load_delta;
75 pud[511] += load_delta;
77 pmd = fixup_pointer(level2_fixmap_pgt, physaddr);
78 pmd[506] += load_delta;
81 * Set up the identity mapping for the switchover. These
82 * entries should *NOT* have the global bit set! This also
83 * creates a bunch of nonsense entries but that is fine --
84 * it avoids problems around wraparound.
87 pud = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
88 pmd = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
90 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
91 pgd[i + 0] = (pgdval_t)pud + _KERNPG_TABLE;
92 pgd[i + 1] = (pgdval_t)pud + _KERNPG_TABLE;
94 i = (physaddr >> PUD_SHIFT) % PTRS_PER_PUD;
95 pud[i + 0] = (pudval_t)pmd + _KERNPG_TABLE;
96 pud[i + 1] = (pudval_t)pmd + _KERNPG_TABLE;
98 pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
99 pmd_entry += physaddr;
101 for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
102 int idx = i + (physaddr >> PMD_SHIFT) % PTRS_PER_PMD;
103 pmd[idx] = pmd_entry + i * PMD_SIZE;
107 * Fixup the kernel text+data virtual addresses. Note that
108 * we might write invalid pmds, when the kernel is relocated
109 * cleanup_highmap() fixes this up along with the mappings
113 pmd = fixup_pointer(level2_kernel_pgt, physaddr);
114 for (i = 0; i < PTRS_PER_PMD; i++) {
115 if (pmd[i] & _PAGE_PRESENT)
116 pmd[i] += load_delta;
119 /* Fixup phys_base */
120 p = fixup_pointer(&phys_base, physaddr);
124 /* Wipe all early page tables except for the kernel symbol map */
125 static void __init reset_early_page_tables(void)
127 memset(early_level4_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
129 write_cr3(__pa_nodebug(early_level4_pgt));
132 /* Create a new PMD entry */
133 int __init early_make_pgtable(unsigned long address)
135 unsigned long physaddr = address - __PAGE_OFFSET;
136 pgdval_t pgd, *pgd_p;
137 pudval_t pud, *pud_p;
138 pmdval_t pmd, *pmd_p;
140 /* Invalid address or early pgt is done ? */
141 if (physaddr >= MAXMEM ||
142 read_cr3_pa() != __pa_nodebug(early_level4_pgt))
146 pgd_p = &early_level4_pgt[pgd_index(address)].pgd;
150 * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
151 * critical -- __PAGE_OFFSET would point us back into the dynamic
152 * range and we might end up looping forever...
155 pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
157 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
158 reset_early_page_tables();
162 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
163 memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
164 *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
166 pud_p += pud_index(address);
170 pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
172 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
173 reset_early_page_tables();
177 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
178 memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
179 *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
181 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
182 pmd_p[pmd_index(address)] = pmd;
187 /* Don't add a printk in there. printk relies on the PDA which is not initialized
189 static void __init clear_bss(void)
191 memset(__bss_start, 0,
192 (unsigned long) __bss_stop - (unsigned long) __bss_start);
195 static unsigned long get_cmd_line_ptr(void)
197 unsigned long cmd_line_ptr = boot_params.hdr.cmd_line_ptr;
199 cmd_line_ptr |= (u64)boot_params.ext_cmd_line_ptr << 32;
204 static void __init copy_bootdata(char *real_mode_data)
207 unsigned long cmd_line_ptr;
209 memcpy(&boot_params, real_mode_data, sizeof boot_params);
210 sanitize_boot_params(&boot_params);
211 cmd_line_ptr = get_cmd_line_ptr();
213 command_line = __va(cmd_line_ptr);
214 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
218 asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
223 * Build-time sanity checks on the kernel image and module
224 * area mappings. (these are purely build-time and produce no code)
226 BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map);
227 BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE);
228 BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
229 BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0);
230 BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
231 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
232 BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
233 (__START_KERNEL & PGDIR_MASK)));
234 BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
238 /* Kill off the identity-map trampoline */
239 reset_early_page_tables();
243 clear_page(init_level4_pgt);
247 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
248 set_intr_gate(i, early_idt_handler_array[i]);
249 load_idt((const struct desc_ptr *)&idt_descr);
251 copy_bootdata(__va(real_mode_data));
254 * Load microcode early on BSP.
258 /* set init_level4_pgt kernel high mapping*/
259 init_level4_pgt[511] = early_level4_pgt[511];
261 x86_64_start_reservations(real_mode_data);
264 void __init x86_64_start_reservations(char *real_mode_data)
266 /* version is always not zero if it is copied */
267 if (!boot_params.hdr.version)
268 copy_bootdata(__va(real_mode_data));
270 x86_early_init_platform_quirks();
272 switch (boot_params.hdr.hardware_subarch) {
273 case X86_SUBARCH_INTEL_MID:
274 x86_intel_mid_early_setup();