2 * Based on arch/arm/mm/mmu.c
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/export.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/libfdt.h>
25 #include <linux/mman.h>
26 #include <linux/nodemask.h>
27 #include <linux/memblock.h>
30 #include <linux/slab.h>
31 #include <linux/stop_machine.h>
33 #include <asm/cputype.h>
34 #include <asm/fixmap.h>
35 #include <asm/sections.h>
36 #include <asm/setup.h>
37 #include <asm/sizes.h>
39 #include <asm/memblock.h>
40 #include <asm/mmu_context.h>
44 u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
47 * Empty_zero_page is a special page that is used for zero-initialized data
50 struct page *empty_zero_page;
51 EXPORT_SYMBOL(empty_zero_page);
53 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
54 unsigned long size, pgprot_t vma_prot)
57 return pgprot_noncached(vma_prot);
58 else if (file->f_flags & O_SYNC)
59 return pgprot_writecombine(vma_prot);
62 EXPORT_SYMBOL(phys_mem_access_prot);
64 static void __init *early_alloc(unsigned long sz)
66 void *ptr = __va(memblock_alloc(sz, sz));
73 * remap a PMD into pages
75 static void split_pmd(pmd_t *pmd, pte_t *pte)
77 unsigned long pfn = pmd_pfn(*pmd);
82 * Need to have the least restrictive permissions available
83 * permissions will be fixed up later. Default the new page
84 * range as contiguous ptes.
86 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC_CONT));
88 } while (pte++, i++, i < PTRS_PER_PTE);
92 * Given a PTE with the CONT bit set, determine where the CONT range
93 * starts, and clear the entire range of PTE CONT bits.
95 static void clear_cont_pte_range(pte_t *pte, unsigned long addr)
99 pte -= CONT_RANGE_OFFSET(addr);
100 for (i = 0; i < CONT_PTES; i++) {
101 set_pte(pte, pte_mknoncont(*pte));
108 * Given a range of PTEs set the pfn and provided page protection flags
110 static void __populate_init_pte(pte_t *pte, unsigned long addr,
111 unsigned long end, phys_addr_t phys,
114 unsigned long pfn = __phys_to_pfn(phys);
117 /* clear all the bits except the pfn, then apply the prot */
118 set_pte(pte, pfn_pte(pfn, prot));
122 } while (addr != end);
125 static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
126 unsigned long end, phys_addr_t phys,
128 void *(*alloc)(unsigned long size))
133 if (pmd_none(*pmd) || pmd_sect(*pmd)) {
134 pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
137 __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
140 BUG_ON(pmd_bad(*pmd));
142 pte = pte_offset_kernel(pmd, addr);
144 next = min(end, (addr + CONT_SIZE) & CONT_MASK);
145 if (((addr | next | phys) & ~CONT_MASK) == 0) {
146 /* a block of CONT_PTES */
147 __populate_init_pte(pte, addr, next, phys,
148 prot | __pgprot(PTE_CONT));
151 * If the range being split is already inside of a
152 * contiguous range but this PTE isn't going to be
153 * contiguous, then we want to unmark the adjacent
154 * ranges, then update the portion of the range we
155 * are interrested in.
157 clear_cont_pte_range(pte, addr);
158 __populate_init_pte(pte, addr, next, phys, prot);
161 pte += (next - addr) >> PAGE_SHIFT;
164 } while (addr != end);
167 void split_pud(pud_t *old_pud, pmd_t *pmd)
169 unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
170 pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
174 set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
176 } while (pmd++, i++, i < PTRS_PER_PMD);
179 static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
180 unsigned long addr, unsigned long end,
181 phys_addr_t phys, pgprot_t prot,
182 void *(*alloc)(unsigned long size))
188 * Check for initial section mappings in the pgd/pud and remove them.
190 if (pud_none(*pud) || pud_sect(*pud)) {
191 pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
192 if (pud_sect(*pud)) {
194 * need to have the 1G of mappings continue to be
199 pud_populate(mm, pud, pmd);
202 BUG_ON(pud_bad(*pud));
204 pmd = pmd_offset(pud, addr);
206 next = pmd_addr_end(addr, end);
207 /* try section mapping first */
208 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
210 set_pmd(pmd, __pmd(phys |
211 pgprot_val(mk_sect_prot(prot))));
213 * Check for previous table entries created during
214 * boot (__create_page_tables) and flush them.
216 if (!pmd_none(old_pmd)) {
218 if (pmd_table(old_pmd)) {
219 phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
220 if (!WARN_ON_ONCE(slab_is_available()))
221 memblock_free(table, PAGE_SIZE);
225 alloc_init_pte(pmd, addr, next, phys, prot, alloc);
228 } while (pmd++, addr = next, addr != end);
231 static inline bool use_1G_block(unsigned long addr, unsigned long next,
234 if (PAGE_SHIFT != 12)
237 if (((addr | next | phys) & ~PUD_MASK) != 0)
243 static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
244 unsigned long addr, unsigned long end,
245 phys_addr_t phys, pgprot_t prot,
246 void *(*alloc)(unsigned long size))
251 if (pgd_none(*pgd)) {
252 pud = alloc(PTRS_PER_PUD * sizeof(pud_t));
253 pgd_populate(mm, pgd, pud);
255 BUG_ON(pgd_bad(*pgd));
257 pud = pud_offset(pgd, addr);
259 next = pud_addr_end(addr, end);
262 * For 4K granule only, attempt to put down a 1GB block
264 if (use_1G_block(addr, next, phys)) {
265 pud_t old_pud = *pud;
266 set_pud(pud, __pud(phys |
267 pgprot_val(mk_sect_prot(prot))));
270 * If we have an old value for a pud, it will
271 * be pointing to a pmd table that we no longer
272 * need (from swapper_pg_dir).
274 * Look up the old pmd table and free it.
276 if (!pud_none(old_pud)) {
278 if (pud_table(old_pud)) {
279 phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
280 if (!WARN_ON_ONCE(slab_is_available()))
281 memblock_free(table, PAGE_SIZE);
285 alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc);
288 } while (pud++, addr = next, addr != end);
292 * Create the page directory entries and any necessary page tables for the
293 * mapping specified by 'md'.
295 static void __create_mapping(struct mm_struct *mm, pgd_t *pgd,
296 phys_addr_t phys, unsigned long virt,
297 phys_addr_t size, pgprot_t prot,
298 void *(*alloc)(unsigned long size))
300 unsigned long addr, length, end, next;
302 addr = virt & PAGE_MASK;
303 length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
307 next = pgd_addr_end(addr, end);
308 alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc);
310 } while (pgd++, addr = next, addr != end);
313 static void *late_alloc(unsigned long size)
317 BUG_ON(size > PAGE_SIZE);
318 ptr = (void *)__get_free_page(PGALLOC_GFP);
323 static void __init create_mapping(phys_addr_t phys, unsigned long virt,
324 phys_addr_t size, pgprot_t prot)
326 if (virt < VMALLOC_START) {
327 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
331 __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
332 size, prot, early_alloc);
335 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
336 unsigned long virt, phys_addr_t size,
339 __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
343 static void create_mapping_late(phys_addr_t phys, unsigned long virt,
344 phys_addr_t size, pgprot_t prot)
346 if (virt < VMALLOC_START) {
347 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
352 return __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK),
353 phys, virt, size, prot, late_alloc);
356 #ifdef CONFIG_DEBUG_RODATA
357 static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
360 * Set up the executable regions using the existing section mappings
361 * for now. This will get more fine grained later once all memory
364 unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
365 unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
367 if (end < kernel_x_start) {
368 create_mapping(start, __phys_to_virt(start),
369 end - start, PAGE_KERNEL);
370 } else if (start >= kernel_x_end) {
371 create_mapping(start, __phys_to_virt(start),
372 end - start, PAGE_KERNEL);
374 if (start < kernel_x_start)
375 create_mapping(start, __phys_to_virt(start),
376 kernel_x_start - start,
378 create_mapping(kernel_x_start,
379 __phys_to_virt(kernel_x_start),
380 kernel_x_end - kernel_x_start,
382 if (kernel_x_end < end)
383 create_mapping(kernel_x_end,
384 __phys_to_virt(kernel_x_end),
391 static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
393 create_mapping(start, __phys_to_virt(start), end - start,
398 static void __init map_mem(void)
400 struct memblock_region *reg;
404 * Temporarily limit the memblock range. We need to do this as
405 * create_mapping requires puds, pmds and ptes to be allocated from
406 * memory addressable from the initial direct kernel mapping.
408 * The initial direct kernel mapping, located at swapper_pg_dir, gives
409 * us PUD_SIZE (4K pages) or PMD_SIZE (64K pages) memory starting from
410 * PHYS_OFFSET (which must be aligned to 2MB as per
411 * Documentation/arm64/booting.txt).
413 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
414 limit = PHYS_OFFSET + PMD_SIZE;
416 limit = PHYS_OFFSET + PUD_SIZE;
417 memblock_set_current_limit(limit);
419 /* map all the memory banks */
420 for_each_memblock(memory, reg) {
421 phys_addr_t start = reg->base;
422 phys_addr_t end = start + reg->size;
427 #ifndef CONFIG_ARM64_64K_PAGES
429 * For the first memory bank align the start address and
430 * current memblock limit to prevent create_mapping() from
431 * allocating pte page tables from unmapped memory.
432 * When 64K pages are enabled, the pte page table for the
433 * first PGDIR_SIZE is already present in swapper_pg_dir.
436 start = ALIGN(start, PMD_SIZE);
438 limit = end & PMD_MASK;
439 memblock_set_current_limit(limit);
442 __map_memblock(start, end);
445 /* Limit no longer required. */
446 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
449 void __init fixup_executable(void)
451 #ifdef CONFIG_DEBUG_RODATA
452 /* now that we are actually fully mapped, make the start/end more fine grained */
453 if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {
454 unsigned long aligned_start = round_down(__pa(_stext),
457 create_mapping(aligned_start, __phys_to_virt(aligned_start),
458 __pa(_stext) - aligned_start,
462 if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) {
463 unsigned long aligned_end = round_up(__pa(__init_end),
465 create_mapping(__pa(__init_end), (unsigned long)__init_end,
466 aligned_end - __pa(__init_end),
472 #ifdef CONFIG_DEBUG_RODATA
473 void mark_rodata_ro(void)
475 create_mapping_late(__pa(_stext), (unsigned long)_stext,
476 (unsigned long)_etext - (unsigned long)_stext,
477 PAGE_KERNEL_EXEC | PTE_RDONLY);
482 void fixup_init(void)
484 create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
485 (unsigned long)__init_end - (unsigned long)__init_begin,
490 * paging_init() sets up the page tables, initialises the zone memory
491 * maps and sets up the zero page.
493 void __init paging_init(void)
500 /* allocate the zero page. */
501 zero_page = early_alloc(PAGE_SIZE);
505 empty_zero_page = virt_to_page(zero_page);
508 * TTBR0 is only used for the identity mapping at this stage. Make it
509 * point to zero page to avoid speculatively fetching new entries.
511 cpu_set_reserved_ttbr0();
512 local_flush_tlb_all();
513 cpu_set_default_tcr_t0sz();
517 * Check whether a kernel address is valid (derived from arch/x86/).
519 int kern_addr_valid(unsigned long addr)
526 if ((((long)addr) >> VA_BITS) != -1UL)
529 pgd = pgd_offset_k(addr);
533 pud = pud_offset(pgd, addr);
538 return pfn_valid(pud_pfn(*pud));
540 pmd = pmd_offset(pud, addr);
545 return pfn_valid(pmd_pfn(*pmd));
547 pte = pte_offset_kernel(pmd, addr);
551 return pfn_valid(pte_pfn(*pte));
553 #ifdef CONFIG_SPARSEMEM_VMEMMAP
554 #ifdef CONFIG_ARM64_64K_PAGES
555 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
557 return vmemmap_populate_basepages(start, end, node);
559 #else /* !CONFIG_ARM64_64K_PAGES */
560 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
562 unsigned long addr = start;
569 next = pmd_addr_end(addr, end);
571 pgd = vmemmap_pgd_populate(addr, node);
575 pud = vmemmap_pud_populate(pgd, addr, node);
579 pmd = pmd_offset(pud, addr);
580 if (pmd_none(*pmd)) {
583 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
587 set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
589 vmemmap_verify((pte_t *)pmd, node, addr, next);
590 } while (addr = next, addr != end);
594 #endif /* CONFIG_ARM64_64K_PAGES */
595 void vmemmap_free(unsigned long start, unsigned long end)
598 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
600 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
601 #if CONFIG_PGTABLE_LEVELS > 2
602 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
604 #if CONFIG_PGTABLE_LEVELS > 3
605 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
608 static inline pud_t * fixmap_pud(unsigned long addr)
610 pgd_t *pgd = pgd_offset_k(addr);
612 BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
614 return pud_offset(pgd, addr);
617 static inline pmd_t * fixmap_pmd(unsigned long addr)
619 pud_t *pud = fixmap_pud(addr);
621 BUG_ON(pud_none(*pud) || pud_bad(*pud));
623 return pmd_offset(pud, addr);
626 static inline pte_t * fixmap_pte(unsigned long addr)
628 pmd_t *pmd = fixmap_pmd(addr);
630 BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
632 return pte_offset_kernel(pmd, addr);
635 void __init early_fixmap_init(void)
640 unsigned long addr = FIXADDR_START;
642 pgd = pgd_offset_k(addr);
643 pgd_populate(&init_mm, pgd, bm_pud);
644 pud = pud_offset(pgd, addr);
645 pud_populate(&init_mm, pud, bm_pmd);
646 pmd = pmd_offset(pud, addr);
647 pmd_populate_kernel(&init_mm, pmd, bm_pte);
650 * The boot-ioremap range spans multiple pmds, for which
651 * we are not preparted:
653 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
654 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
656 if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
657 || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
659 pr_warn("pmd %p != %p, %p\n",
660 pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
661 fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
662 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
663 fix_to_virt(FIX_BTMAP_BEGIN));
664 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
665 fix_to_virt(FIX_BTMAP_END));
667 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
668 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
672 void __set_fixmap(enum fixed_addresses idx,
673 phys_addr_t phys, pgprot_t flags)
675 unsigned long addr = __fix_to_virt(idx);
678 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
680 pte = fixmap_pte(addr);
682 if (pgprot_val(flags)) {
683 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
685 pte_clear(&init_mm, addr, pte);
686 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
690 void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
692 const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
693 pgprot_t prot = PAGE_KERNEL | PTE_RDONLY;
694 int granularity, size, offset;
698 * Check whether the physical FDT address is set and meets the minimum
699 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
700 * at least 8 bytes so that we can always access the size field of the
701 * FDT header after mapping the first chunk, double check here if that
702 * is indeed the case.
704 BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
705 if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
709 * Make sure that the FDT region can be mapped without the need to
710 * allocate additional translation table pages, so that it is safe
711 * to call create_mapping() this early.
713 * On 64k pages, the FDT will be mapped using PTEs, so we need to
714 * be in the same PMD as the rest of the fixmap.
715 * On 4k pages, we'll use section mappings for the FDT so we only
716 * have to be in the same PUD.
718 BUILD_BUG_ON(dt_virt_base % SZ_2M);
720 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) {
721 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> PMD_SHIFT !=
722 __fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT);
724 granularity = PAGE_SIZE;
726 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> PUD_SHIFT !=
727 __fix_to_virt(FIX_BTMAP_BEGIN) >> PUD_SHIFT);
729 granularity = PMD_SIZE;
732 offset = dt_phys % granularity;
733 dt_virt = (void *)dt_virt_base + offset;
735 /* map the first chunk so we can read the size from the header */
736 create_mapping(round_down(dt_phys, granularity), dt_virt_base,
739 if (fdt_check_header(dt_virt) != 0)
742 size = fdt_totalsize(dt_virt);
743 if (size > MAX_FDT_SIZE)
746 if (offset + size > granularity)
747 create_mapping(round_down(dt_phys, granularity), dt_virt_base,
748 round_up(offset + size, granularity), prot);
750 memblock_reserve(dt_phys, size);