]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - include/linux/huge_mm.h
mm: prepare page_referenced() and page_idle to new THP refcounting
[karo-tx-linux.git] / include / linux / huge_mm.h
1 #ifndef _LINUX_HUGE_MM_H
2 #define _LINUX_HUGE_MM_H
3
4 extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
5                                       struct vm_area_struct *vma,
6                                       unsigned long address, pmd_t *pmd,
7                                       unsigned int flags);
8 extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
9                          pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
10                          struct vm_area_struct *vma);
11 extern void huge_pmd_set_accessed(struct mm_struct *mm,
12                                   struct vm_area_struct *vma,
13                                   unsigned long address, pmd_t *pmd,
14                                   pmd_t orig_pmd, int dirty);
15 extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
16                                unsigned long address, pmd_t *pmd,
17                                pmd_t orig_pmd);
18 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
19                                           unsigned long addr,
20                                           pmd_t *pmd,
21                                           unsigned int flags);
22 extern int zap_huge_pmd(struct mmu_gather *tlb,
23                         struct vm_area_struct *vma,
24                         pmd_t *pmd, unsigned long addr);
25 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
26                         unsigned long addr, unsigned long end,
27                         unsigned char *vec);
28 extern bool move_huge_pmd(struct vm_area_struct *vma,
29                          struct vm_area_struct *new_vma,
30                          unsigned long old_addr,
31                          unsigned long new_addr, unsigned long old_end,
32                          pmd_t *old_pmd, pmd_t *new_pmd);
33 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
34                         unsigned long addr, pgprot_t newprot,
35                         int prot_numa);
36 int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
37                         unsigned long pfn, bool write);
38
39 enum transparent_hugepage_flag {
40         TRANSPARENT_HUGEPAGE_FLAG,
41         TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
42         TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
43         TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
44         TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
45         TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
46 #ifdef CONFIG_DEBUG_VM
47         TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
48 #endif
49 };
50
51 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
52 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
53
54 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
55 #define HPAGE_PMD_SHIFT PMD_SHIFT
56 #define HPAGE_PMD_SIZE  ((1UL) << HPAGE_PMD_SHIFT)
57 #define HPAGE_PMD_MASK  (~(HPAGE_PMD_SIZE - 1))
58
59 extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
60
61 #define transparent_hugepage_enabled(__vma)                             \
62         ((transparent_hugepage_flags &                                  \
63           (1<<TRANSPARENT_HUGEPAGE_FLAG) ||                             \
64           (transparent_hugepage_flags &                                 \
65            (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) &&                   \
66            ((__vma)->vm_flags & VM_HUGEPAGE))) &&                       \
67          !((__vma)->vm_flags & VM_NOHUGEPAGE) &&                        \
68          !is_vma_temporary_stack(__vma))
69 #define transparent_hugepage_defrag(__vma)                              \
70         ((transparent_hugepage_flags &                                  \
71           (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) ||                     \
72          (transparent_hugepage_flags &                                  \
73           (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) &&             \
74           (__vma)->vm_flags & VM_HUGEPAGE))
75 #define transparent_hugepage_use_zero_page()                            \
76         (transparent_hugepage_flags &                                   \
77          (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
78 #ifdef CONFIG_DEBUG_VM
79 #define transparent_hugepage_debug_cow()                                \
80         (transparent_hugepage_flags &                                   \
81          (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
82 #else /* CONFIG_DEBUG_VM */
83 #define transparent_hugepage_debug_cow() 0
84 #endif /* CONFIG_DEBUG_VM */
85
86 extern unsigned long transparent_hugepage_flags;
87
88 extern void prep_transhuge_page(struct page *page);
89 extern void free_transhuge_page(struct page *page);
90
91 int split_huge_page_to_list(struct page *page, struct list_head *list);
92 static inline int split_huge_page(struct page *page)
93 {
94         return split_huge_page_to_list(page, NULL);
95 }
96 void deferred_split_huge_page(struct page *page);
97
98 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
99                 unsigned long address);
100
101 #define split_huge_pmd(__vma, __pmd, __address)                         \
102         do {                                                            \
103                 pmd_t *____pmd = (__pmd);                               \
104                 if (pmd_trans_huge(*____pmd))                           \
105                         __split_huge_pmd(__vma, __pmd, __address);      \
106         }  while (0)
107
108 #if HPAGE_PMD_ORDER >= MAX_ORDER
109 #error "hugepages can't be allocated by the buddy allocator"
110 #endif
111 extern int hugepage_madvise(struct vm_area_struct *vma,
112                             unsigned long *vm_flags, int advice);
113 extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
114                                     unsigned long start,
115                                     unsigned long end,
116                                     long adjust_next);
117 extern bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
118                 spinlock_t **ptl);
119 /* mmap_sem must be held on entry */
120 static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
121                 spinlock_t **ptl)
122 {
123         VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
124         if (pmd_trans_huge(*pmd))
125                 return __pmd_trans_huge_lock(pmd, vma, ptl);
126         else
127                 return false;
128 }
129 static inline int hpage_nr_pages(struct page *page)
130 {
131         if (unlikely(PageTransHuge(page)))
132                 return HPAGE_PMD_NR;
133         return 1;
134 }
135
136 extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
137                                 unsigned long addr, pmd_t pmd, pmd_t *pmdp);
138
139 extern struct page *huge_zero_page;
140
141 static inline bool is_huge_zero_page(struct page *page)
142 {
143         return ACCESS_ONCE(huge_zero_page) == page;
144 }
145
146 static inline bool is_huge_zero_pmd(pmd_t pmd)
147 {
148         return is_huge_zero_page(pmd_page(pmd));
149 }
150
151 struct page *get_huge_zero_page(void);
152
153 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
154 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
155 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
156 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
157
158 #define hpage_nr_pages(x) 1
159
160 #define transparent_hugepage_enabled(__vma) 0
161
162 #define transparent_hugepage_flags 0UL
163 static inline int
164 split_huge_page_to_list(struct page *page, struct list_head *list)
165 {
166         return 0;
167 }
168 static inline int split_huge_page(struct page *page)
169 {
170         return 0;
171 }
172 static inline void deferred_split_huge_page(struct page *page) {}
173 #define split_huge_pmd(__vma, __pmd, __address) \
174         do { } while (0)
175 static inline int hugepage_madvise(struct vm_area_struct *vma,
176                                    unsigned long *vm_flags, int advice)
177 {
178         BUG();
179         return 0;
180 }
181 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
182                                          unsigned long start,
183                                          unsigned long end,
184                                          long adjust_next)
185 {
186 }
187 static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
188                 spinlock_t **ptl)
189 {
190         return false;
191 }
192
193 static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
194                                         unsigned long addr, pmd_t pmd, pmd_t *pmdp)
195 {
196         return 0;
197 }
198
199 static inline bool is_huge_zero_page(struct page *page)
200 {
201         return false;
202 }
203
204 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
205
206 #endif /* _LINUX_HUGE_MM_H */