2 * IA-32 Huge TLB Page Support for Kernel.
4 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
7 #include <linux/config.h>
8 #include <linux/init.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/smp_lock.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
19 #include <asm/tlbflush.h>
21 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
27 pgd = pgd_offset(mm, addr);
28 pud = pud_alloc(mm, pgd, addr);
29 pmd = pmd_alloc(mm, pud, addr);
33 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
39 pgd = pgd_offset(mm, addr);
40 pud = pud_offset(pgd, addr);
41 pmd = pmd_offset(pud, addr);
46 * This function checks for proper alignment of input addr and len parameters.
48 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
50 if (len & ~HPAGE_MASK)
52 if (addr & ~HPAGE_MASK)
57 #if 0 /* This is just for testing */
59 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
61 unsigned long start = address;
65 struct vm_area_struct *vma;
67 vma = find_vma(mm, addr);
68 if (!vma || !is_vm_hugetlb_page(vma))
69 return ERR_PTR(-EINVAL);
71 pte = huge_pte_offset(mm, address);
73 /* hugetlb should be locked, and hence, prefaulted */
74 WARN_ON(!pte || pte_none(*pte));
76 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
78 WARN_ON(!PageCompound(page));
83 int pmd_huge(pmd_t pmd)
89 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
90 pmd_t *pmd, int write)
98 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
100 return ERR_PTR(-EINVAL);
103 int pmd_huge(pmd_t pmd)
105 return !!(pmd_val(pmd) & _PAGE_PSE);
109 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
110 pmd_t *pmd, int write)
114 page = pte_page(*(pte_t *)pmd);
116 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
121 void hugetlb_clean_stale_pgtable(pte_t *pte)
123 pmd_t *pmd = (pmd_t *) pte;
126 page = pmd_page(*pmd);
128 dec_page_state(nr_page_table_pages);
129 page_cache_release(page);
132 /* x86_64 also uses this file */
134 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
135 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
136 unsigned long addr, unsigned long len,
137 unsigned long pgoff, unsigned long flags)
139 struct mm_struct *mm = current->mm;
140 struct vm_area_struct *vma;
141 unsigned long start_addr;
143 start_addr = mm->free_area_cache;
146 addr = ALIGN(start_addr, HPAGE_SIZE);
148 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
149 /* At this point: (!vma || addr < vma->vm_end). */
150 if (TASK_SIZE - len < addr) {
152 * Start a new search - just in case we missed
155 if (start_addr != TASK_UNMAPPED_BASE) {
156 start_addr = TASK_UNMAPPED_BASE;
161 if (!vma || addr + len <= vma->vm_start) {
162 mm->free_area_cache = addr + len;
165 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
169 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
170 unsigned long addr0, unsigned long len,
171 unsigned long pgoff, unsigned long flags)
173 struct mm_struct *mm = current->mm;
174 struct vm_area_struct *vma, *prev_vma;
175 unsigned long base = mm->mmap_base, addr = addr0;
178 /* don't allow allocations above current base */
179 if (mm->free_area_cache > base)
180 mm->free_area_cache = base;
183 /* make sure it can fit in the remaining address space */
184 if (mm->free_area_cache < len)
187 /* either no address requested or cant fit in requested address hole */
188 addr = (mm->free_area_cache - len) & HPAGE_MASK;
191 * Lookup failure means no vma is above this address,
192 * i.e. return with success:
194 if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
198 * new region fits between prev_vma->vm_end and
199 * vma->vm_start, use it:
201 if (addr + len <= vma->vm_start &&
202 (!prev_vma || (addr >= prev_vma->vm_end)))
203 /* remember the address as a hint for next time */
204 return (mm->free_area_cache = addr);
206 /* pull free_area_cache down to the first hole */
207 if (mm->free_area_cache == vma->vm_end)
208 mm->free_area_cache = vma->vm_start;
210 /* try just below the current vma->vm_start */
211 addr = (vma->vm_start - len) & HPAGE_MASK;
212 } while (len <= vma->vm_start);
216 * if hint left us with no space for the requested
217 * mapping then try again:
220 mm->free_area_cache = base;
225 * A failed mmap() very likely causes application failure,
226 * so fall back to the bottom-up function here. This scenario
227 * can happen with large stack limits and large mmap()
230 mm->free_area_cache = TASK_UNMAPPED_BASE;
231 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
235 * Restore the topdown base:
237 mm->free_area_cache = base;
243 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
244 unsigned long len, unsigned long pgoff, unsigned long flags)
246 struct mm_struct *mm = current->mm;
247 struct vm_area_struct *vma;
249 if (len & ~HPAGE_MASK)
255 addr = ALIGN(addr, HPAGE_SIZE);
256 vma = find_vma(mm, addr);
257 if (TASK_SIZE - len >= addr &&
258 (!vma || addr + len <= vma->vm_start))
261 if (mm->get_unmapped_area == arch_get_unmapped_area)
262 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
265 return hugetlb_get_unmapped_area_topdown(file, addr, len,
269 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/