2 * linux/arch/x86_64/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
9 #include <linux/signal.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
18 #include <linux/swap.h>
19 #include <linux/smp.h>
20 #include <linux/init.h>
21 #include <linux/pagemap.h>
22 #include <linux/bootmem.h>
23 #include <linux/proc_fs.h>
24 #include <linux/pci.h>
25 #include <linux/poison.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/module.h>
28 #include <linux/memory_hotplug.h>
30 #include <asm/processor.h>
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
33 #include <asm/pgtable.h>
34 #include <asm/pgalloc.h>
36 #include <asm/fixmap.h>
40 #include <asm/mmu_context.h>
41 #include <asm/proto.h>
43 #include <asm/sections.h>
49 struct dma_mapping_ops* dma_ops;
50 EXPORT_SYMBOL(dma_ops);
52 static unsigned long dma_reserve __initdata;
54 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
57 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
58 * physical space so we can cache the place of the first one and move
59 * around without checking the pgd every time.
64 long i, total = 0, reserved = 0;
65 long shared = 0, cached = 0;
69 printk(KERN_INFO "Mem-info:\n");
71 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
73 for_each_online_pgdat(pgdat) {
74 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
75 page = pfn_to_page(pgdat->node_start_pfn + i);
77 if (PageReserved(page))
79 else if (PageSwapCache(page))
81 else if (page_count(page))
82 shared += page_count(page) - 1;
85 printk(KERN_INFO "%lu pages of RAM\n", total);
86 printk(KERN_INFO "%lu reserved pages\n",reserved);
87 printk(KERN_INFO "%lu pages shared\n",shared);
88 printk(KERN_INFO "%lu pages swap cached\n",cached);
93 static __init void *spp_getpage(void)
97 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
99 ptr = alloc_bootmem_pages(PAGE_SIZE);
100 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
101 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
103 Dprintk("spp_getpage %p\n", ptr);
107 static __init void set_pte_phys(unsigned long vaddr,
108 unsigned long phys, pgprot_t prot)
115 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
117 pgd = pgd_offset_k(vaddr);
118 if (pgd_none(*pgd)) {
119 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
122 pud = pud_offset(pgd, vaddr);
123 if (pud_none(*pud)) {
124 pmd = (pmd_t *) spp_getpage();
125 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
126 if (pmd != pmd_offset(pud, 0)) {
127 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
131 pmd = pmd_offset(pud, vaddr);
132 if (pmd_none(*pmd)) {
133 pte = (pte_t *) spp_getpage();
134 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
135 if (pte != pte_offset_kernel(pmd, 0)) {
136 printk("PAGETABLE BUG #02!\n");
140 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
142 pte = pte_offset_kernel(pmd, vaddr);
143 if (!pte_none(*pte) &&
144 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
146 set_pte(pte, new_pte);
149 * It's enough to flush this one mapping.
150 * (PGE mappings get flushed as well)
152 __flush_tlb_one(vaddr);
155 /* NOTE: this is meant to be run only at boot */
157 __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
159 unsigned long address = __fix_to_virt(idx);
161 if (idx >= __end_of_fixed_addresses) {
162 printk("Invalid __set_fixmap\n");
165 set_pte_phys(address, phys, prot);
168 unsigned long __initdata table_start, table_end;
170 extern pmd_t temp_boot_pmds[];
172 static struct temp_map {
176 } temp_mappings[] __initdata = {
177 { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
178 { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) },
182 static __meminit void *alloc_low_page(int *index, unsigned long *phys)
186 unsigned long pfn = table_end++, paddr;
190 adr = (void *)get_zeroed_page(GFP_ATOMIC);
196 panic("alloc_low_page: ran out of memory");
197 for (i = 0; temp_mappings[i].allocated; i++) {
198 if (!temp_mappings[i].pmd)
199 panic("alloc_low_page: ran out of temp mappings");
201 ti = &temp_mappings[i];
202 paddr = (pfn << PAGE_SHIFT) & PMD_MASK;
203 set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE));
206 adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
207 memset(adr, 0, PAGE_SIZE);
209 *phys = pfn * PAGE_SIZE;
213 static __meminit void unmap_low_page(int i)
220 ti = &temp_mappings[i];
221 set_pmd(ti->pmd, __pmd(0));
225 /* Must run before zap_low_mappings */
226 __init void *early_ioremap(unsigned long addr, unsigned long size)
228 unsigned long map = round_down(addr, LARGE_PAGE_SIZE);
230 /* actually usually some more */
231 if (size >= LARGE_PAGE_SIZE) {
234 set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
235 map += LARGE_PAGE_SIZE;
236 set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
238 return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
241 /* To avoid virtual aliases later */
242 __init void early_iounmap(void *addr, unsigned long size)
244 if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
245 printk("early_iounmap: bad address %p\n", addr);
246 set_pmd(temp_mappings[0].pmd, __pmd(0));
247 set_pmd(temp_mappings[1].pmd, __pmd(0));
251 static void __meminit
252 phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
254 int i = pmd_index(address);
256 for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
258 pmd_t *pmd = pmd_page + pmd_index(address);
260 if (address >= end) {
262 for (; i < PTRS_PER_PMD; i++, pmd++)
263 set_pmd(pmd, __pmd(0));
270 entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
271 entry &= __supported_pte_mask;
272 set_pmd(pmd, __pmd(entry));
276 static void __meminit
277 phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
279 pmd_t *pmd = pmd_offset(pud,0);
280 spin_lock(&init_mm.page_table_lock);
281 phys_pmd_init(pmd, address, end);
282 spin_unlock(&init_mm.page_table_lock);
286 static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
288 int i = pud_index(addr);
291 for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {
293 unsigned long pmd_phys;
294 pud_t *pud = pud_page + pud_index(addr);
300 if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) {
301 set_pud(pud, __pud(0));
306 phys_pmd_update(pud, addr, end);
310 pmd = alloc_low_page(&map, &pmd_phys);
311 spin_lock(&init_mm.page_table_lock);
312 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
313 phys_pmd_init(pmd, addr, end);
314 spin_unlock(&init_mm.page_table_lock);
320 static void __init find_early_table_space(unsigned long end)
322 unsigned long puds, pmds, tables, start;
324 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
325 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
326 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
327 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
329 /* RED-PEN putting page tables only on node 0 could
330 cause a hotspot and fill up ZONE_DMA. The page tables
331 need roughly 0.5KB per GB. */
333 table_start = find_e820_area(start, end, tables);
334 if (table_start == -1UL)
335 panic("Cannot find space for the kernel page tables");
337 table_start >>= PAGE_SHIFT;
338 table_end = table_start;
340 early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
341 end, table_start << PAGE_SHIFT,
342 (table_start << PAGE_SHIFT) + tables);
345 /* Setup the direct mapping of the physical memory at PAGE_OFFSET.
346 This runs before bootmem is initialized and gets pages directly from the
347 physical memory. To access them they are temporarily mapped. */
348 void __meminit init_memory_mapping(unsigned long start, unsigned long end)
352 Dprintk("init_memory_mapping\n");
355 * Find space for the kernel direct mapping tables.
356 * Later we should allocate these tables in the local node of the memory
357 * mapped. Unfortunately this is done currently before the nodes are
361 find_early_table_space(end);
363 start = (unsigned long)__va(start);
364 end = (unsigned long)__va(end);
366 for (; start < end; start = next) {
368 unsigned long pud_phys;
369 pgd_t *pgd = pgd_offset_k(start);
373 pud = pud_offset(pgd, start & PGDIR_MASK);
375 pud = alloc_low_page(&map, &pud_phys);
377 next = start + PGDIR_SIZE;
380 phys_pud_init(pud, __pa(start), __pa(next));
382 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
387 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
391 void __cpuinit zap_low_mappings(int cpu)
394 pgd_t *pgd = pgd_offset_k(0UL);
398 * For AP's, zap the low identity mappings by changing the cr3
399 * to init_level4_pgt and doing local flush tlb all
401 asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
407 void __init paging_init(void)
409 unsigned long max_zone_pfns[MAX_NR_ZONES];
410 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
411 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
412 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
413 max_zone_pfns[ZONE_NORMAL] = end_pfn;
415 memory_present(0, 0, end_pfn);
417 free_area_init_nodes(max_zone_pfns);
421 /* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
422 from the CPU leading to inconsistent cache lines. address and size
423 must be aligned to 2MB boundaries.
424 Does nothing when the mapping doesn't exist. */
425 void __init clear_kernel_mapping(unsigned long address, unsigned long size)
427 unsigned long end = address + size;
429 BUG_ON(address & ~LARGE_PAGE_MASK);
430 BUG_ON(size & ~LARGE_PAGE_MASK);
432 for (; address < end; address += LARGE_PAGE_SIZE) {
433 pgd_t *pgd = pgd_offset_k(address);
438 pud = pud_offset(pgd, address);
441 pmd = pmd_offset(pud, address);
442 if (!pmd || pmd_none(*pmd))
444 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
445 /* Could handle this, but it should not happen currently. */
447 "clear_kernel_mapping: mapping has been split. will leak memory\n");
450 set_pmd(pmd, __pmd(0));
456 * Memory hotplug specific functions
458 void online_page(struct page *page)
460 ClearPageReserved(page);
461 init_page_count(page);
467 #ifdef CONFIG_MEMORY_HOTPLUG
469 * Memory is added always to NORMAL zone. This means you will never get
470 * additional DMA/DMA32 memory.
472 int arch_add_memory(int nid, u64 start, u64 size)
474 struct pglist_data *pgdat = NODE_DATA(nid);
475 struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
476 unsigned long start_pfn = start >> PAGE_SHIFT;
477 unsigned long nr_pages = size >> PAGE_SHIFT;
480 init_memory_mapping(start, (start + size -1));
482 ret = __add_pages(zone, start_pfn, nr_pages);
488 printk("%s: Problem encountered in __add_pages!\n", __func__);
491 EXPORT_SYMBOL_GPL(arch_add_memory);
493 int remove_memory(u64 start, u64 size)
497 EXPORT_SYMBOL_GPL(remove_memory);
499 #if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA)
500 int memory_add_physaddr_to_nid(u64 start)
504 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
507 #endif /* CONFIG_MEMORY_HOTPLUG */
509 #ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
511 * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
512 * just online the pages.
514 int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
518 unsigned long total = 0, mem = 0;
519 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
520 if (pfn_valid(pfn)) {
521 online_page(pfn_to_page(pfn));
528 z->spanned_pages += total;
529 z->present_pages += mem;
530 z->zone_pgdat->node_spanned_pages += total;
531 z->zone_pgdat->node_present_pages += mem;
537 static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
540 void __init mem_init(void)
542 long codesize, reservedpages, datasize, initsize;
546 /* clear the zero-page */
547 memset(empty_zero_page, 0, PAGE_SIZE);
551 /* this will put all low memory onto the freelists */
553 totalram_pages = numa_free_all_bootmem();
555 totalram_pages = free_all_bootmem();
557 reservedpages = end_pfn - totalram_pages -
558 absent_pages_in_range(0, end_pfn);
562 codesize = (unsigned long) &_etext - (unsigned long) &_text;
563 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
564 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
566 /* Register memory areas for /proc/kcore */
567 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
568 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
569 VMALLOC_END-VMALLOC_START);
570 kclist_add(&kcore_kernel, &_stext, _end - _stext);
571 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
572 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
573 VSYSCALL_END - VSYSCALL_START);
575 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
576 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
577 end_pfn << (PAGE_SHIFT-10),
579 reservedpages << (PAGE_SHIFT-10),
585 * Sync boot_level4_pgt mappings with the init_level4_pgt
586 * except for the low identity mappings which are already zapped
587 * in init_level4_pgt. This sync-up is essential for AP's bringup
589 memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
593 void free_init_pages(char *what, unsigned long begin, unsigned long end)
600 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
601 for (addr = begin; addr < end; addr += PAGE_SIZE) {
602 ClearPageReserved(virt_to_page(addr));
603 init_page_count(virt_to_page(addr));
604 memset((void *)(addr & ~(PAGE_SIZE-1)),
605 POISON_FREE_INITMEM, PAGE_SIZE);
611 void free_initmem(void)
613 memset(__initdata_begin, POISON_FREE_INITDATA,
614 __initdata_end - __initdata_begin);
615 free_init_pages("unused kernel memory",
616 (unsigned long)(&__init_begin),
617 (unsigned long)(&__init_end));
620 #ifdef CONFIG_DEBUG_RODATA
622 void mark_rodata_ro(void)
624 unsigned long addr = (unsigned long)__start_rodata;
626 for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
627 change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
629 printk ("Write protecting the kernel read-only data: %luk\n",
630 (__end_rodata - __start_rodata) >> 10);
633 * change_page_attr_addr() requires a global_flush_tlb() call after it.
634 * We do this after the printk so that if something went wrong in the
635 * change, the printk gets out at least to give a better debug hint
636 * of who is the culprit.
642 #ifdef CONFIG_BLK_DEV_INITRD
643 void free_initrd_mem(unsigned long start, unsigned long end)
645 free_init_pages("initrd memory", start, end);
649 void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
652 int nid = phys_to_nid(phys);
654 unsigned long pfn = phys >> PAGE_SHIFT;
655 if (pfn >= end_pfn) {
656 /* This can happen with kdump kernels when accessing firmware
658 if (pfn < end_pfn_map)
660 printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
665 /* Should check here against the e820 map to avoid double free */
667 reserve_bootmem_node(NODE_DATA(nid), phys, len);
669 reserve_bootmem(phys, len);
671 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
672 dma_reserve += len / PAGE_SIZE;
673 set_dma_reserve(dma_reserve);
677 int kern_addr_valid(unsigned long addr)
679 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
685 if (above != 0 && above != -1UL)
688 pgd = pgd_offset_k(addr);
692 pud = pud_offset(pgd, addr);
696 pmd = pmd_offset(pud, addr);
700 return pfn_valid(pmd_pfn(*pmd));
702 pte = pte_offset_kernel(pmd, addr);
705 return pfn_valid(pte_pfn(*pte));
709 #include <linux/sysctl.h>
711 extern int exception_trace, page_fault_trace;
713 static ctl_table debug_table2[] = {
716 .procname = "exception-trace",
717 .data = &exception_trace,
718 .maxlen = sizeof(int),
720 .proc_handler = proc_dointvec
725 static ctl_table debug_root_table2[] = {
727 .ctl_name = CTL_DEBUG,
730 .child = debug_table2
735 static __init int x8664_sysctl_init(void)
737 register_sysctl_table(debug_root_table2);
740 __initcall(x8664_sysctl_init);
743 /* A pseudo VMA to allow ptrace access for the vsyscall page. This only
744 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
745 not need special handling anymore. */
747 static struct vm_area_struct gate_vma = {
748 .vm_start = VSYSCALL_START,
749 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES << PAGE_SHIFT),
750 .vm_page_prot = PAGE_READONLY_EXEC,
751 .vm_flags = VM_READ | VM_EXEC
754 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
756 #ifdef CONFIG_IA32_EMULATION
757 if (test_tsk_thread_flag(tsk, TIF_IA32))
763 int in_gate_area(struct task_struct *task, unsigned long addr)
765 struct vm_area_struct *vma = get_gate_vma(task);
768 return (addr >= vma->vm_start) && (addr < vma->vm_end);
771 /* Use this when you have no reliable task/vma, typically from interrupt
772 * context. It is less reliable than using the task's vma and may give
775 int in_gate_area_no_task(unsigned long addr)
777 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);