1 #ifndef _ASM_GENERIC_PGTABLE_H
2 #define _ASM_GENERIC_PGTABLE_H
6 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
8 * Largely same as above, but only sets the access flags (dirty,
9 * accessed, and writable). Furthermore, we know it always gets set
10 * to a "more permissive" setting, which allows most architectures
11 * to optimize this. We return whether the PTE actually changed, which
12 * in turn instructs the caller to do things like update__mmu_cache.
13 * This used to be done in the caller, but sparc needs minor faults to
14 * force that call on sun4c so we changed this macro slightly
16 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
18 int __changed = !pte_same(*(__ptep), __entry); \
20 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
21 flush_tlb_page(__vma, __address); \
27 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
28 #define ptep_test_and_clear_young(__vma, __address, __ptep) \
30 pte_t __pte = *(__ptep); \
32 if (!pte_young(__pte)) \
35 set_pte_at((__vma)->vm_mm, (__address), \
36 (__ptep), pte_mkold(__pte)); \
41 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
42 #define ptep_clear_flush_young(__vma, __address, __ptep) \
45 __young = ptep_test_and_clear_young(__vma, __address, __ptep); \
47 flush_tlb_page(__vma, __address); \
52 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
53 #define ptep_test_and_clear_dirty(__vma, __address, __ptep) \
55 pte_t __pte = *__ptep; \
57 if (!pte_dirty(__pte)) \
60 set_pte_at((__vma)->vm_mm, (__address), (__ptep), \
61 pte_mkclean(__pte)); \
66 #ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
67 #define ptep_clear_flush_dirty(__vma, __address, __ptep) \
70 __dirty = ptep_test_and_clear_dirty(__vma, __address, __ptep); \
72 flush_tlb_page(__vma, __address); \
77 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
78 #define ptep_get_and_clear(__mm, __address, __ptep) \
80 pte_t __pte = *(__ptep); \
81 pte_clear((__mm), (__address), (__ptep)); \
86 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
87 #define ptep_get_and_clear_full(__mm, __address, __ptep, __full) \
90 __pte = ptep_get_and_clear((__mm), (__address), (__ptep)); \
96 * Some architectures may be able to avoid expensive synchronization
97 * primitives when modifications are made to PTE's which are already
98 * not present, or in the process of an address space destruction.
100 #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
101 #define pte_clear_not_present_full(__mm, __address, __ptep, __full) \
103 pte_clear((__mm), (__address), (__ptep)); \
107 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
108 #define ptep_clear_flush(__vma, __address, __ptep) \
111 __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep); \
112 flush_tlb_page(__vma, __address); \
117 #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
119 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
121 pte_t old_pte = *ptep;
122 set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
126 #ifndef __HAVE_ARCH_PTE_SAME
127 #define pte_same(A,B) (pte_val(A) == pte_val(B))
130 #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
131 #define page_test_dirty(page) (0)
134 #ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY
135 #define page_clear_dirty(page) do { } while (0)
138 #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
139 #define pte_maybe_dirty(pte) pte_dirty(pte)
141 #define pte_maybe_dirty(pte) (1)
144 #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
145 #define page_test_and_clear_young(page) (0)
148 #ifndef __HAVE_ARCH_PGD_OFFSET_GATE
149 #define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
152 #ifndef __HAVE_ARCH_LAZY_MMU_PROT_UPDATE
153 #define lazy_mmu_prot_update(pte) do { } while (0)
156 #ifndef __HAVE_ARCH_MOVE_PTE
157 #define move_pte(pte, prot, old_addr, new_addr) (pte)
161 * A facility to provide lazy MMU batching. This allows PTE updates and
162 * page invalidations to be delayed until a call to leave lazy MMU mode
163 * is issued. Some architectures may benefit from doing this, and it is
164 * beneficial for both shadow and direct mode hypervisors, which may batch
165 * the PTE updates which happen during this window. Note that using this
166 * interface requires that read hazards be removed from the code. A read
167 * hazard could result in the direct mode hypervisor case, since the actual
168 * write to the page tables may not yet have taken place, so reads though
169 * a raw PTE pointer after it has been modified are not guaranteed to be
170 * up to date. This mode can only be entered and left under the protection of
171 * the page table locks for all page tables which may be modified. In the UP
172 * case, this is required so that preemption is disabled, and in the SMP case,
173 * it must synchronize the delayed page table writes properly on other CPUs.
175 #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
176 #define arch_enter_lazy_mmu_mode() do {} while (0)
177 #define arch_leave_lazy_mmu_mode() do {} while (0)
178 #define arch_flush_lazy_mmu_mode() do {} while (0)
182 * A facility to provide batching of the reload of page tables with the
183 * actual context switch code for paravirtualized guests. By convention,
184 * only one of the lazy modes (CPU, MMU) should be active at any given
185 * time, entry should never be nested, and entry and exits should always
186 * be paired. This is for sanity of maintaining and reasoning about the
189 #ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE
190 #define arch_enter_lazy_cpu_mode() do {} while (0)
191 #define arch_leave_lazy_cpu_mode() do {} while (0)
192 #define arch_flush_lazy_cpu_mode() do {} while (0)
196 * When walking page tables, get the address of the next boundary,
197 * or the end address of the range if that comes earlier. Although no
198 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
201 #define pgd_addr_end(addr, end) \
202 ({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
203 (__boundary - 1 < (end) - 1)? __boundary: (end); \
207 #define pud_addr_end(addr, end) \
208 ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
209 (__boundary - 1 < (end) - 1)? __boundary: (end); \
214 #define pmd_addr_end(addr, end) \
215 ({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
216 (__boundary - 1 < (end) - 1)? __boundary: (end); \
221 * When walking page tables, we usually want to skip any p?d_none entries;
222 * and any p?d_bad entries - reporting the error before resetting to none.
223 * Do the tests inline, but report and clear the bad entry in mm/memory.c.
225 void pgd_clear_bad(pgd_t *);
226 void pud_clear_bad(pud_t *);
227 void pmd_clear_bad(pmd_t *);
229 static inline int pgd_none_or_clear_bad(pgd_t *pgd)
233 if (unlikely(pgd_bad(*pgd))) {
240 static inline int pud_none_or_clear_bad(pud_t *pud)
244 if (unlikely(pud_bad(*pud))) {
251 static inline int pmd_none_or_clear_bad(pmd_t *pmd)
255 if (unlikely(pmd_bad(*pmd))) {
261 #endif /* !__ASSEMBLY__ */
263 #endif /* _ASM_GENERIC_PGTABLE_H */