]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/i386/mm/hugetlbpage.c
[PATCH] Hugepage consolidation
[karo-tx-linux.git] / arch / i386 / mm / hugetlbpage.c
1 /*
2  * IA-32 Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
5  */
6
7 #include <linux/config.h>
8 #include <linux/init.h>
9 #include <linux/fs.h>
10 #include <linux/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/smp_lock.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
17 #include <asm/mman.h>
18 #include <asm/tlb.h>
19 #include <asm/tlbflush.h>
20
21 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
22 {
23         pgd_t *pgd;
24         pud_t *pud;
25         pmd_t *pmd = NULL;
26
27         pgd = pgd_offset(mm, addr);
28         pud = pud_alloc(mm, pgd, addr);
29         pmd = pmd_alloc(mm, pud, addr);
30         return (pte_t *) pmd;
31 }
32
33 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
34 {
35         pgd_t *pgd;
36         pud_t *pud;
37         pmd_t *pmd = NULL;
38
39         pgd = pgd_offset(mm, addr);
40         pud = pud_offset(pgd, addr);
41         pmd = pmd_offset(pud, addr);
42         return (pte_t *) pmd;
43 }
44
45 /*
46  * This function checks for proper alignment of input addr and len parameters.
47  */
48 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
49 {
50         if (len & ~HPAGE_MASK)
51                 return -EINVAL;
52         if (addr & ~HPAGE_MASK)
53                 return -EINVAL;
54         return 0;
55 }
56
57 #if 0   /* This is just for testing */
58 struct page *
59 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
60 {
61         unsigned long start = address;
62         int length = 1;
63         int nr;
64         struct page *page;
65         struct vm_area_struct *vma;
66
67         vma = find_vma(mm, addr);
68         if (!vma || !is_vm_hugetlb_page(vma))
69                 return ERR_PTR(-EINVAL);
70
71         pte = huge_pte_offset(mm, address);
72
73         /* hugetlb should be locked, and hence, prefaulted */
74         WARN_ON(!pte || pte_none(*pte));
75
76         page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
77
78         WARN_ON(!PageCompound(page));
79
80         return page;
81 }
82
83 int pmd_huge(pmd_t pmd)
84 {
85         return 0;
86 }
87
88 struct page *
89 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
90                 pmd_t *pmd, int write)
91 {
92         return NULL;
93 }
94
95 #else
96
97 struct page *
98 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
99 {
100         return ERR_PTR(-EINVAL);
101 }
102
103 int pmd_huge(pmd_t pmd)
104 {
105         return !!(pmd_val(pmd) & _PAGE_PSE);
106 }
107
108 struct page *
109 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
110                 pmd_t *pmd, int write)
111 {
112         struct page *page;
113
114         page = pte_page(*(pte_t *)pmd);
115         if (page)
116                 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
117         return page;
118 }
119 #endif
120
121 void hugetlb_clean_stale_pgtable(pte_t *pte)
122 {
123         pmd_t *pmd = (pmd_t *) pte;
124         struct page *page;
125
126         page = pmd_page(*pmd);
127         pmd_clear(pmd);
128         dec_page_state(nr_page_table_pages);
129         page_cache_release(page);
130 }
131
132 /* x86_64 also uses this file */
133
134 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
135 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
136                 unsigned long addr, unsigned long len,
137                 unsigned long pgoff, unsigned long flags)
138 {
139         struct mm_struct *mm = current->mm;
140         struct vm_area_struct *vma;
141         unsigned long start_addr;
142
143         start_addr = mm->free_area_cache;
144
145 full_search:
146         addr = ALIGN(start_addr, HPAGE_SIZE);
147
148         for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
149                 /* At this point:  (!vma || addr < vma->vm_end). */
150                 if (TASK_SIZE - len < addr) {
151                         /*
152                          * Start a new search - just in case we missed
153                          * some holes.
154                          */
155                         if (start_addr != TASK_UNMAPPED_BASE) {
156                                 start_addr = TASK_UNMAPPED_BASE;
157                                 goto full_search;
158                         }
159                         return -ENOMEM;
160                 }
161                 if (!vma || addr + len <= vma->vm_start) {
162                         mm->free_area_cache = addr + len;
163                         return addr;
164                 }
165                 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
166         }
167 }
168
169 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
170                 unsigned long addr0, unsigned long len,
171                 unsigned long pgoff, unsigned long flags)
172 {
173         struct mm_struct *mm = current->mm;
174         struct vm_area_struct *vma, *prev_vma;
175         unsigned long base = mm->mmap_base, addr = addr0;
176         int first_time = 1;
177
178         /* don't allow allocations above current base */
179         if (mm->free_area_cache > base)
180                 mm->free_area_cache = base;
181
182 try_again:
183         /* make sure it can fit in the remaining address space */
184         if (mm->free_area_cache < len)
185                 goto fail;
186
187         /* either no address requested or cant fit in requested address hole */
188         addr = (mm->free_area_cache - len) & HPAGE_MASK;
189         do {
190                 /*
191                  * Lookup failure means no vma is above this address,
192                  * i.e. return with success:
193                  */
194                 if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
195                         return addr;
196
197                 /*
198                  * new region fits between prev_vma->vm_end and
199                  * vma->vm_start, use it:
200                  */
201                 if (addr + len <= vma->vm_start &&
202                                 (!prev_vma || (addr >= prev_vma->vm_end)))
203                         /* remember the address as a hint for next time */
204                         return (mm->free_area_cache = addr);
205                 else
206                         /* pull free_area_cache down to the first hole */
207                         if (mm->free_area_cache == vma->vm_end)
208                                 mm->free_area_cache = vma->vm_start;
209
210                 /* try just below the current vma->vm_start */
211                 addr = (vma->vm_start - len) & HPAGE_MASK;
212         } while (len <= vma->vm_start);
213
214 fail:
215         /*
216          * if hint left us with no space for the requested
217          * mapping then try again:
218          */
219         if (first_time) {
220                 mm->free_area_cache = base;
221                 first_time = 0;
222                 goto try_again;
223         }
224         /*
225          * A failed mmap() very likely causes application failure,
226          * so fall back to the bottom-up function here. This scenario
227          * can happen with large stack limits and large mmap()
228          * allocations.
229          */
230         mm->free_area_cache = TASK_UNMAPPED_BASE;
231         addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
232                         len, pgoff, flags);
233
234         /*
235          * Restore the topdown base:
236          */
237         mm->free_area_cache = base;
238
239         return addr;
240 }
241
242 unsigned long
243 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
244                 unsigned long len, unsigned long pgoff, unsigned long flags)
245 {
246         struct mm_struct *mm = current->mm;
247         struct vm_area_struct *vma;
248
249         if (len & ~HPAGE_MASK)
250                 return -EINVAL;
251         if (len > TASK_SIZE)
252                 return -ENOMEM;
253
254         if (addr) {
255                 addr = ALIGN(addr, HPAGE_SIZE);
256                 vma = find_vma(mm, addr);
257                 if (TASK_SIZE - len >= addr &&
258                     (!vma || addr + len <= vma->vm_start))
259                         return addr;
260         }
261         if (mm->get_unmapped_area == arch_get_unmapped_area)
262                 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
263                                 pgoff, flags);
264         else
265                 return hugetlb_get_unmapped_area_topdown(file, addr, len,
266                                 pgoff, flags);
267 }
268
269 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
270