2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
5 #include <linux/highmem.h>
6 #include <linux/bootmem.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
12 void clflush_cache_range(void *addr, int size)
16 for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
20 #include <asm/processor.h>
21 #include <asm/tlbflush.h>
22 #include <asm/sections.h>
23 #include <asm/uaccess.h>
24 #include <asm/pgalloc.h>
27 * We allow the BIOS range to be executable:
29 #define BIOS_BEGIN 0x000a0000
30 #define BIOS_END 0x00100000
32 static inline pgprot_t check_exec(pgprot_t prot, unsigned long address)
34 if (__pa(address) >= BIOS_BEGIN && __pa(address) < BIOS_END)
35 pgprot_val(prot) &= ~_PAGE_NX;
37 * Better fail early if someone sets the kernel text to NX.
38 * Does not cover __inittext
40 BUG_ON(address >= (unsigned long)&_text &&
41 address < (unsigned long)&_etext &&
42 (pgprot_val(prot) & _PAGE_NX));
47 pte_t *lookup_address(unsigned long address, int *level)
49 pgd_t *pgd = pgd_offset_k(address);
53 *level = PG_LEVEL_NONE;
57 pud = pud_offset(pgd, address);
60 pmd = pmd_offset(pud, address);
69 return pte_offset_kernel(pmd, address);
72 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
75 set_pte_atomic(kpte, pte);
77 if (!SHARED_KERNEL_PMD) {
80 for (page = pgd_list; page; page = (struct page *)page->index) {
85 pgd = (pgd_t *)page_address(page) + pgd_index(address);
86 pud = pud_offset(pgd, address);
87 pmd = pmd_offset(pud, address);
88 set_pte_atomic((pte_t *)pmd, pte);
94 static int split_large_page(pte_t *kpte, unsigned long address)
96 pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
97 gfp_t gfp_flags = GFP_KERNEL;
104 #ifdef CONFIG_DEBUG_PAGEALLOC
105 gfp_flags = GFP_ATOMIC;
107 base = alloc_pages(gfp_flags, 0);
111 spin_lock_irqsave(&pgd_lock, flags);
113 * Check for races, another CPU might have split this page
116 tmp = lookup_address(address, &level);
122 address = __pa(address);
123 addr = address & LARGE_PAGE_MASK;
124 pbase = (pte_t *)page_address(base);
126 paravirt_alloc_pt(&init_mm, page_to_pfn(base));
129 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
130 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
133 * Install the new, split up pagetable. Important detail here:
135 * On Intel the NX bit of all levels must be cleared to make a
136 * page executable. See section 4.13.2 of Intel 64 and IA-32
137 * Architectures Software Developer's Manual).
139 ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
140 __set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
144 spin_unlock_irqrestore(&pgd_lock, flags);
147 __free_pages(base, 0);
153 __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot)
155 struct page *kpte_page;
160 BUG_ON(pfn > max_low_pfn);
164 kpte = lookup_address(address, &level);
168 kpte_page = virt_to_page(kpte);
169 BUG_ON(PageLRU(kpte_page));
170 BUG_ON(PageCompound(kpte_page));
172 prot = check_exec(prot, address);
174 if (level == PG_LEVEL_4K) {
175 set_pte_atomic(kpte, pfn_pte(pfn, canon_pgprot(prot)));
177 err = split_large_page(kpte, address);
185 * change_page_attr_addr - Change page table attributes in linear mapping
186 * @address: Virtual address in linear mapping.
187 * @numpages: Number of pages to change
188 * @prot: New page table attribute (PAGE_*)
190 * Change page attributes of a page in the direct mapping. This is a variant
191 * of change_page_attr() that also works on memory holes that do not have
192 * mem_map entry (pfn_valid() is false).
194 * See change_page_attr() documentation for more details.
197 int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
199 int err = 0, kernel_map = 0, i;
202 if (address >= __START_KERNEL_map &&
203 address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
205 address = (unsigned long)__va(__pa(address));
210 for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
211 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
213 if (!kernel_map || pte_present(pfn_pte(0, prot))) {
214 err = __change_page_attr(address, pfn, prot);
220 * Handle kernel mapping too which aliases part of
223 if (__pa(address) < KERNEL_TEXT_SIZE) {
227 addr2 = __START_KERNEL_map + __pa(address);
228 /* Make sure the kernel mappings stay executable */
229 prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
230 err = __change_page_attr(addr2, pfn, prot2);
239 * change_page_attr - Change page table attributes in the linear mapping.
240 * @page: First page to change
241 * @numpages: Number of pages to change
242 * @prot: New protection/caching type (PAGE_*)
244 * Returns 0 on success, otherwise a negated errno.
246 * This should be used when a page is mapped with a different caching policy
247 * than write-back somewhere - some CPUs do not like it when mappings with
248 * different caching policies exist. This changes the page attributes of the
249 * in kernel linear mapping too.
251 * Caller must call global_flush_tlb() later to make the changes active.
253 * The caller needs to ensure that there are no conflicting mappings elsewhere
254 * (e.g. in user space) * This function only deals with the kernel linear map.
256 * For MMIO areas without mem_map use change_page_attr_addr() instead.
258 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
260 unsigned long addr = (unsigned long)page_address(page);
262 return change_page_attr_addr(addr, numpages, prot);
264 EXPORT_SYMBOL(change_page_attr);
266 static void flush_kernel_map(void *arg)
269 * Flush all to work around Errata in early athlons regarding
270 * large page flushing.
274 if (boot_cpu_data.x86_model >= 4)
278 void global_flush_tlb(void)
280 BUG_ON(irqs_disabled());
282 on_each_cpu(flush_kernel_map, NULL, 1, 1);
284 EXPORT_SYMBOL(global_flush_tlb);
286 #ifdef CONFIG_DEBUG_PAGEALLOC
287 void kernel_map_pages(struct page *page, int numpages, int enable)
289 if (PageHighMem(page))
292 debug_check_no_locks_freed(page_address(page),
293 numpages * PAGE_SIZE);
297 * If page allocator is not up yet then do not call c_p_a():
299 if (!debug_pagealloc_enabled)
303 * The return value is ignored - the calls cannot fail,
304 * large pages are disabled at boot time:
306 change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
309 * We should perform an IPI and flush all tlbs,
310 * but that can deadlock->flush only current cpu: