1 #ifndef _ASM_X86_PGTABLE_DEFS_H
2 #define _ASM_X86_PGTABLE_DEFS_H
4 #include <linux/const.h>
5 #include <asm/page_types.h>
7 #define FIRST_USER_ADDRESS 0UL
9 #define _PAGE_BIT_PRESENT 0 /* is present */
10 #define _PAGE_BIT_RW 1 /* writeable */
11 #define _PAGE_BIT_USER 2 /* userspace addressable */
12 #define _PAGE_BIT_PWT 3 /* page write through */
13 #define _PAGE_BIT_PCD 4 /* page cache disabled */
14 #define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
15 #define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
16 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
17 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
18 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
19 #define _PAGE_BIT_SOFTW1 9 /* available for programmer */
20 #define _PAGE_BIT_SOFTW2 10 /* " */
21 #define _PAGE_BIT_SOFTW3 11 /* " */
22 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
23 #define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1
24 #define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1
25 #define _PAGE_BIT_HIDDEN _PAGE_BIT_SOFTW3 /* hidden by kmemcheck */
26 #define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */
27 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
29 /* If _PAGE_BIT_PRESENT is clear, we use these: */
30 /* - if the user mapped it with PROT_NONE; pte_present gives true */
31 #define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL
33 #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
34 #define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
35 #define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
36 #define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
37 #define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
38 #define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
39 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
40 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
41 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
42 #define _PAGE_SOFTW1 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW1)
43 #define _PAGE_SOFTW2 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW2)
44 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
45 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
46 #define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
47 #define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
48 #define __HAVE_ARCH_PTE_SPECIAL
50 #ifdef CONFIG_KMEMCHECK
51 #define _PAGE_HIDDEN (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
53 #define _PAGE_HIDDEN (_AT(pteval_t, 0))
57 * The same hidden bit is used by kmemcheck, but since kmemcheck
58 * works on kernel pages while soft-dirty engine on user space,
59 * they do not conflict with each other.
62 #ifdef CONFIG_MEM_SOFT_DIRTY
63 #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
65 #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0))
69 * Tracking soft dirty bit when a page goes to a swap is tricky.
70 * We need a bit which can be stored in pte _and_ not conflict
71 * with swap entry format. On x86 bits 6 and 7 are *not* involved
72 * into swap entry computation, but bit 6 is used for nonlinear
73 * file mapping, so we borrow bit 7 for soft dirty tracking.
75 * Please note that this bit must be treated as swap dirty page
76 * mark if and only if the PTE has present bit clear!
78 #ifdef CONFIG_MEM_SOFT_DIRTY
79 #define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE
81 #define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0))
84 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
85 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
87 #define _PAGE_NX (_AT(pteval_t, 0))
90 #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
92 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
93 _PAGE_ACCESSED | _PAGE_DIRTY)
94 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
97 /* Set of bits not changed in pte_modify */
98 #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
99 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
101 #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
104 * The cache modes defined here are used to translate between pure SW usage
105 * and the HW defined cache mode bits and/or PAT entries.
107 * The resulting bits for PWT, PCD and PAT should be chosen in a way
108 * to have the WB mode at index 0 (all bits clear). This is the default
109 * right now and likely would break too much if changed.
112 enum page_cache_mode {
113 _PAGE_CACHE_MODE_WB = 0,
114 _PAGE_CACHE_MODE_WC = 1,
115 _PAGE_CACHE_MODE_UC_MINUS = 2,
116 _PAGE_CACHE_MODE_UC = 3,
117 _PAGE_CACHE_MODE_WT = 4,
118 _PAGE_CACHE_MODE_WP = 5,
119 _PAGE_CACHE_MODE_NUM = 8
123 #define _PAGE_CACHE_MASK (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)
124 #define _PAGE_NOCACHE (cachemode2protval(_PAGE_CACHE_MODE_UC))
126 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
127 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
128 _PAGE_ACCESSED | _PAGE_NX)
130 #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
131 _PAGE_USER | _PAGE_ACCESSED)
132 #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
133 _PAGE_ACCESSED | _PAGE_NX)
134 #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
136 #define PAGE_COPY PAGE_COPY_NOEXEC
137 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
138 _PAGE_ACCESSED | _PAGE_NX)
139 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
142 #define __PAGE_KERNEL_EXEC \
143 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
144 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
146 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
147 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
148 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE)
149 #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
150 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
151 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
152 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
154 #define __PAGE_KERNEL_IO (__PAGE_KERNEL)
155 #define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE)
157 #define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
158 #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
159 #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
160 #define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
161 #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
162 #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
163 #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
164 #define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
165 #define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR)
167 #define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
168 #define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
171 #define __P000 PAGE_NONE
172 #define __P001 PAGE_READONLY
173 #define __P010 PAGE_COPY
174 #define __P011 PAGE_COPY
175 #define __P100 PAGE_READONLY_EXEC
176 #define __P101 PAGE_READONLY_EXEC
177 #define __P110 PAGE_COPY_EXEC
178 #define __P111 PAGE_COPY_EXEC
180 #define __S000 PAGE_NONE
181 #define __S001 PAGE_READONLY
182 #define __S010 PAGE_SHARED
183 #define __S011 PAGE_SHARED
184 #define __S100 PAGE_READONLY_EXEC
185 #define __S101 PAGE_READONLY_EXEC
186 #define __S110 PAGE_SHARED_EXEC
187 #define __S111 PAGE_SHARED_EXEC
190 * early identity mapping pte attrib macros.
193 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
195 #define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
196 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
197 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
201 # include <asm/pgtable_32_types.h>
203 # include <asm/pgtable_64_types.h>
208 #include <linux/types.h>
210 /* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
211 #define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
213 /* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
214 #define PTE_FLAGS_MASK (~PTE_PFN_MASK)
216 typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
218 typedef struct { pgdval_t pgd; } pgd_t;
220 static inline pgd_t native_make_pgd(pgdval_t val)
222 return (pgd_t) { val };
225 static inline pgdval_t native_pgd_val(pgd_t pgd)
230 static inline pgdval_t pgd_flags(pgd_t pgd)
232 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
235 #if CONFIG_PGTABLE_LEVELS > 3
236 typedef struct { pudval_t pud; } pud_t;
238 static inline pud_t native_make_pud(pmdval_t val)
240 return (pud_t) { val };
243 static inline pudval_t native_pud_val(pud_t pud)
248 #include <asm-generic/pgtable-nopud.h>
250 static inline pudval_t native_pud_val(pud_t pud)
252 return native_pgd_val(pud.pgd);
256 #if CONFIG_PGTABLE_LEVELS > 2
257 typedef struct { pmdval_t pmd; } pmd_t;
259 static inline pmd_t native_make_pmd(pmdval_t val)
261 return (pmd_t) { val };
264 static inline pmdval_t native_pmd_val(pmd_t pmd)
269 #include <asm-generic/pgtable-nopmd.h>
271 static inline pmdval_t native_pmd_val(pmd_t pmd)
273 return native_pgd_val(pmd.pud.pgd);
277 static inline pudval_t pud_flags(pud_t pud)
279 return native_pud_val(pud) & PTE_FLAGS_MASK;
282 static inline pmdval_t pmd_flags(pmd_t pmd)
284 return native_pmd_val(pmd) & PTE_FLAGS_MASK;
287 static inline pte_t native_make_pte(pteval_t val)
289 return (pte_t) { .pte = val };
292 static inline pteval_t native_pte_val(pte_t pte)
297 static inline pteval_t pte_flags(pte_t pte)
299 return native_pte_val(pte) & PTE_FLAGS_MASK;
302 #define pgprot_val(x) ((x).pgprot)
303 #define __pgprot(x) ((pgprot_t) { (x) } )
305 extern uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM];
306 extern uint8_t __pte2cachemode_tbl[8];
308 #define __pte2cm_idx(cb) \
309 ((((cb) >> (_PAGE_BIT_PAT - 2)) & 4) | \
310 (((cb) >> (_PAGE_BIT_PCD - 1)) & 2) | \
311 (((cb) >> _PAGE_BIT_PWT) & 1))
312 #define __cm_idx2pte(i) \
313 ((((i) & 4) << (_PAGE_BIT_PAT - 2)) | \
314 (((i) & 2) << (_PAGE_BIT_PCD - 1)) | \
315 (((i) & 1) << _PAGE_BIT_PWT))
317 static inline unsigned long cachemode2protval(enum page_cache_mode pcm)
319 if (likely(pcm == 0))
321 return __cachemode2pte_tbl[pcm];
323 static inline pgprot_t cachemode2pgprot(enum page_cache_mode pcm)
325 return __pgprot(cachemode2protval(pcm));
327 static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
329 unsigned long masked;
331 masked = pgprot_val(pgprot) & _PAGE_CACHE_MASK;
332 if (likely(masked == 0))
334 return __pte2cachemode_tbl[__pte2cm_idx(masked)];
336 static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
341 val = pgprot_val(pgprot);
342 pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
343 ((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
346 static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
351 val = pgprot_val(pgprot);
352 pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
353 ((val & _PAGE_PAT_LARGE) >>
354 (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
359 typedef struct page *pgtable_t;
361 extern pteval_t __supported_pte_mask;
362 extern void set_nx(void);
363 extern int nx_enabled;
365 #define pgprot_writecombine pgprot_writecombine
366 extern pgprot_t pgprot_writecombine(pgprot_t prot);
368 #define pgprot_writethrough pgprot_writethrough
369 extern pgprot_t pgprot_writethrough(pgprot_t prot);
371 /* Indicate that x86 has its own track and untrack pfn vma functions */
372 #define __HAVE_PFNMAP_TRACKING
374 #define __HAVE_PHYS_MEM_ACCESS_PROT
376 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
377 unsigned long size, pgprot_t vma_prot);
378 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
379 unsigned long size, pgprot_t *vma_prot);
381 /* Install a pte for a particular vaddr in kernel space. */
382 void set_pte_vaddr(unsigned long vaddr, pte_t pte);
385 extern void native_pagetable_init(void);
387 #define native_pagetable_init paging_init
391 extern void arch_report_meminfo(struct seq_file *m);
401 #ifdef CONFIG_PROC_FS
402 extern void update_page_count(int level, unsigned long pages);
404 static inline void update_page_count(int level, unsigned long pages) { }
408 * Helper function that returns the kernel pagetable entry controlling
409 * the virtual address 'address'. NULL means no pagetable entry present.
410 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
413 extern pte_t *lookup_address(unsigned long address, unsigned int *level);
414 extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
415 unsigned int *level);
416 extern pmd_t *lookup_pmd_address(unsigned long address);
417 extern phys_addr_t slow_virt_to_phys(void *__address);
418 extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
419 unsigned numpages, unsigned long page_flags);
420 void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address,
422 #endif /* !__ASSEMBLY__ */
424 #endif /* _ASM_X86_PGTABLE_DEFS_H */