]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - mm/mmap.c
Linux 2.6.27.38
[karo-tx-linux.git] / mm / mmap.c
1 /*
2  * mm/mmap.c
3  *
4  * Written by obz.
5  *
6  * Address space accounting code        <alan@redhat.com>
7  */
8
9 #include <linux/slab.h>
10 #include <linux/backing-dev.h>
11 #include <linux/mm.h>
12 #include <linux/shm.h>
13 #include <linux/mman.h>
14 #include <linux/pagemap.h>
15 #include <linux/swap.h>
16 #include <linux/syscalls.h>
17 #include <linux/capability.h>
18 #include <linux/init.h>
19 #include <linux/file.h>
20 #include <linux/fs.h>
21 #include <linux/personality.h>
22 #include <linux/security.h>
23 #include <linux/hugetlb.h>
24 #include <linux/profile.h>
25 #include <linux/module.h>
26 #include <linux/mount.h>
27 #include <linux/mempolicy.h>
28 #include <linux/rmap.h>
29 #include <linux/mmu_notifier.h>
30
31 #include <asm/uaccess.h>
32 #include <asm/cacheflush.h>
33 #include <asm/tlb.h>
34 #include <asm/mmu_context.h>
35
36 #include "internal.h"
37
38 #ifndef arch_mmap_check
39 #define arch_mmap_check(addr, len, flags)       (0)
40 #endif
41
42 #ifndef arch_rebalance_pgtables
43 #define arch_rebalance_pgtables(addr, len)              (addr)
44 #endif
45
46 static void unmap_region(struct mm_struct *mm,
47                 struct vm_area_struct *vma, struct vm_area_struct *prev,
48                 unsigned long start, unsigned long end);
49
50 /*
51  * WARNING: the debugging will use recursive algorithms so never enable this
52  * unless you know what you are doing.
53  */
54 #undef DEBUG_MM_RB
55
56 /* description of effects of mapping type and prot in current implementation.
57  * this is due to the limited x86 page protection hardware.  The expected
58  * behavior is in parens:
59  *
60  * map_type     prot
61  *              PROT_NONE       PROT_READ       PROT_WRITE      PROT_EXEC
62  * MAP_SHARED   r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
63  *              w: (no) no      w: (no) no      w: (yes) yes    w: (no) no
64  *              x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
65  *              
66  * MAP_PRIVATE  r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
67  *              w: (no) no      w: (no) no      w: (copy) copy  w: (no) no
68  *              x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
69  *
70  */
71 pgprot_t protection_map[16] = {
72         __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
73         __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
74 };
75
76 pgprot_t vm_get_page_prot(unsigned long vm_flags)
77 {
78         return __pgprot(pgprot_val(protection_map[vm_flags &
79                                 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
80                         pgprot_val(arch_vm_get_page_prot(vm_flags)));
81 }
82 EXPORT_SYMBOL(vm_get_page_prot);
83
84 int sysctl_overcommit_memory = OVERCOMMIT_GUESS;  /* heuristic overcommit */
85 int sysctl_overcommit_ratio = 50;       /* default is 50% */
86 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
87 atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0);
88
89 /* amount of vm to protect from userspace access */
90 unsigned long mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
91
92 /*
93  * Check that a process has enough memory to allocate a new virtual
94  * mapping. 0 means there is enough memory for the allocation to
95  * succeed and -ENOMEM implies there is not.
96  *
97  * We currently support three overcommit policies, which are set via the
98  * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
99  *
100  * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
101  * Additional code 2002 Jul 20 by Robert Love.
102  *
103  * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
104  *
105  * Note this is a helper function intended to be used by LSMs which
106  * wish to use this logic.
107  */
108 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
109 {
110         unsigned long free, allowed;
111
112         vm_acct_memory(pages);
113
114         /*
115          * Sometimes we want to use more memory than we have
116          */
117         if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
118                 return 0;
119
120         if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
121                 unsigned long n;
122
123                 free = global_page_state(NR_FILE_PAGES);
124                 free += nr_swap_pages;
125
126                 /*
127                  * Any slabs which are created with the
128                  * SLAB_RECLAIM_ACCOUNT flag claim to have contents
129                  * which are reclaimable, under pressure.  The dentry
130                  * cache and most inode caches should fall into this
131                  */
132                 free += global_page_state(NR_SLAB_RECLAIMABLE);
133
134                 /*
135                  * Leave the last 3% for root
136                  */
137                 if (!cap_sys_admin)
138                         free -= free / 32;
139
140                 if (free > pages)
141                         return 0;
142
143                 /*
144                  * nr_free_pages() is very expensive on large systems,
145                  * only call if we're about to fail.
146                  */
147                 n = nr_free_pages();
148
149                 /*
150                  * Leave reserved pages. The pages are not for anonymous pages.
151                  */
152                 if (n <= totalreserve_pages)
153                         goto error;
154                 else
155                         n -= totalreserve_pages;
156
157                 /*
158                  * Leave the last 3% for root
159                  */
160                 if (!cap_sys_admin)
161                         n -= n / 32;
162                 free += n;
163
164                 if (free > pages)
165                         return 0;
166
167                 goto error;
168         }
169
170         allowed = (totalram_pages - hugetlb_total_pages())
171                 * sysctl_overcommit_ratio / 100;
172         /*
173          * Leave the last 3% for root
174          */
175         if (!cap_sys_admin)
176                 allowed -= allowed / 32;
177         allowed += total_swap_pages;
178
179         /* Don't let a single process grow too big:
180            leave 3% of the size of this process for other processes */
181         allowed -= mm->total_vm / 32;
182
183         /*
184          * cast `allowed' as a signed long because vm_committed_space
185          * sometimes has a negative value
186          */
187         if (atomic_long_read(&vm_committed_space) < (long)allowed)
188                 return 0;
189 error:
190         vm_unacct_memory(pages);
191
192         return -ENOMEM;
193 }
194
195 /*
196  * Requires inode->i_mapping->i_mmap_lock
197  */
198 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
199                 struct file *file, struct address_space *mapping)
200 {
201         if (vma->vm_flags & VM_DENYWRITE)
202                 atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
203         if (vma->vm_flags & VM_SHARED)
204                 mapping->i_mmap_writable--;
205
206         flush_dcache_mmap_lock(mapping);
207         if (unlikely(vma->vm_flags & VM_NONLINEAR))
208                 list_del_init(&vma->shared.vm_set.list);
209         else
210                 vma_prio_tree_remove(vma, &mapping->i_mmap);
211         flush_dcache_mmap_unlock(mapping);
212 }
213
214 /*
215  * Unlink a file-based vm structure from its prio_tree, to hide
216  * vma from rmap and vmtruncate before freeing its page tables.
217  */
218 void unlink_file_vma(struct vm_area_struct *vma)
219 {
220         struct file *file = vma->vm_file;
221
222         if (file) {
223                 struct address_space *mapping = file->f_mapping;
224                 spin_lock(&mapping->i_mmap_lock);
225                 __remove_shared_vm_struct(vma, file, mapping);
226                 spin_unlock(&mapping->i_mmap_lock);
227         }
228 }
229
230 /*
231  * Close a vm structure and free it, returning the next.
232  */
233 static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
234 {
235         struct vm_area_struct *next = vma->vm_next;
236
237         might_sleep();
238         if (vma->vm_ops && vma->vm_ops->close)
239                 vma->vm_ops->close(vma);
240         if (vma->vm_file) {
241                 fput(vma->vm_file);
242                 if (vma->vm_flags & VM_EXECUTABLE)
243                         removed_exe_file_vma(vma->vm_mm);
244         }
245         mpol_put(vma_policy(vma));
246         kmem_cache_free(vm_area_cachep, vma);
247         return next;
248 }
249
250 SYSCALL_DEFINE1(brk, unsigned long, brk)
251 {
252         unsigned long rlim, retval;
253         unsigned long newbrk, oldbrk;
254         struct mm_struct *mm = current->mm;
255         unsigned long min_brk;
256
257         down_write(&mm->mmap_sem);
258
259 #ifdef CONFIG_COMPAT_BRK
260         min_brk = mm->end_code;
261 #else
262         min_brk = mm->start_brk;
263 #endif
264         if (brk < min_brk)
265                 goto out;
266
267         /*
268          * Check against rlimit here. If this check is done later after the test
269          * of oldbrk with newbrk then it can escape the test and let the data
270          * segment grow beyond its set limit the in case where the limit is
271          * not page aligned -Ram Gupta
272          */
273         rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
274         if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
275                         (mm->end_data - mm->start_data) > rlim)
276                 goto out;
277
278         newbrk = PAGE_ALIGN(brk);
279         oldbrk = PAGE_ALIGN(mm->brk);
280         if (oldbrk == newbrk)
281                 goto set_brk;
282
283         /* Always allow shrinking brk. */
284         if (brk <= mm->brk) {
285                 if (!do_munmap(mm, newbrk, oldbrk-newbrk))
286                         goto set_brk;
287                 goto out;
288         }
289
290         /* Check against existing mmap mappings. */
291         if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
292                 goto out;
293
294         /* Ok, looks good - let it rip. */
295         if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
296                 goto out;
297 set_brk:
298         mm->brk = brk;
299 out:
300         retval = mm->brk;
301         up_write(&mm->mmap_sem);
302         return retval;
303 }
304
305 #ifdef DEBUG_MM_RB
306 static int browse_rb(struct rb_root *root)
307 {
308         int i = 0, j;
309         struct rb_node *nd, *pn = NULL;
310         unsigned long prev = 0, pend = 0;
311
312         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
313                 struct vm_area_struct *vma;
314                 vma = rb_entry(nd, struct vm_area_struct, vm_rb);
315                 if (vma->vm_start < prev)
316                         printk("vm_start %lx prev %lx\n", vma->vm_start, prev), i = -1;
317                 if (vma->vm_start < pend)
318                         printk("vm_start %lx pend %lx\n", vma->vm_start, pend);
319                 if (vma->vm_start > vma->vm_end)
320                         printk("vm_end %lx < vm_start %lx\n", vma->vm_end, vma->vm_start);
321                 i++;
322                 pn = nd;
323                 prev = vma->vm_start;
324                 pend = vma->vm_end;
325         }
326         j = 0;
327         for (nd = pn; nd; nd = rb_prev(nd)) {
328                 j++;
329         }
330         if (i != j)
331                 printk("backwards %d, forwards %d\n", j, i), i = 0;
332         return i;
333 }
334
335 void validate_mm(struct mm_struct *mm)
336 {
337         int bug = 0;
338         int i = 0;
339         struct vm_area_struct *tmp = mm->mmap;
340         while (tmp) {
341                 tmp = tmp->vm_next;
342                 i++;
343         }
344         if (i != mm->map_count)
345                 printk("map_count %d vm_next %d\n", mm->map_count, i), bug = 1;
346         i = browse_rb(&mm->mm_rb);
347         if (i != mm->map_count)
348                 printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
349         BUG_ON(bug);
350 }
351 #else
352 #define validate_mm(mm) do { } while (0)
353 #endif
354
355 static struct vm_area_struct *
356 find_vma_prepare(struct mm_struct *mm, unsigned long addr,
357                 struct vm_area_struct **pprev, struct rb_node ***rb_link,
358                 struct rb_node ** rb_parent)
359 {
360         struct vm_area_struct * vma;
361         struct rb_node ** __rb_link, * __rb_parent, * rb_prev;
362
363         __rb_link = &mm->mm_rb.rb_node;
364         rb_prev = __rb_parent = NULL;
365         vma = NULL;
366
367         while (*__rb_link) {
368                 struct vm_area_struct *vma_tmp;
369
370                 __rb_parent = *__rb_link;
371                 vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
372
373                 if (vma_tmp->vm_end > addr) {
374                         vma = vma_tmp;
375                         if (vma_tmp->vm_start <= addr)
376                                 break;
377                         __rb_link = &__rb_parent->rb_left;
378                 } else {
379                         rb_prev = __rb_parent;
380                         __rb_link = &__rb_parent->rb_right;
381                 }
382         }
383
384         *pprev = NULL;
385         if (rb_prev)
386                 *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
387         *rb_link = __rb_link;
388         *rb_parent = __rb_parent;
389         return vma;
390 }
391
392 static inline void
393 __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
394                 struct vm_area_struct *prev, struct rb_node *rb_parent)
395 {
396         if (prev) {
397                 vma->vm_next = prev->vm_next;
398                 prev->vm_next = vma;
399         } else {
400                 mm->mmap = vma;
401                 if (rb_parent)
402                         vma->vm_next = rb_entry(rb_parent,
403                                         struct vm_area_struct, vm_rb);
404                 else
405                         vma->vm_next = NULL;
406         }
407 }
408
409 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
410                 struct rb_node **rb_link, struct rb_node *rb_parent)
411 {
412         rb_link_node(&vma->vm_rb, rb_parent, rb_link);
413         rb_insert_color(&vma->vm_rb, &mm->mm_rb);
414 }
415
416 static inline void __vma_link_file(struct vm_area_struct *vma)
417 {
418         struct file * file;
419
420         file = vma->vm_file;
421         if (file) {
422                 struct address_space *mapping = file->f_mapping;
423
424                 if (vma->vm_flags & VM_DENYWRITE)
425                         atomic_dec(&file->f_path.dentry->d_inode->i_writecount);
426                 if (vma->vm_flags & VM_SHARED)
427                         mapping->i_mmap_writable++;
428
429                 flush_dcache_mmap_lock(mapping);
430                 if (unlikely(vma->vm_flags & VM_NONLINEAR))
431                         vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
432                 else
433                         vma_prio_tree_insert(vma, &mapping->i_mmap);
434                 flush_dcache_mmap_unlock(mapping);
435         }
436 }
437
438 static void
439 __vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
440         struct vm_area_struct *prev, struct rb_node **rb_link,
441         struct rb_node *rb_parent)
442 {
443         __vma_link_list(mm, vma, prev, rb_parent);
444         __vma_link_rb(mm, vma, rb_link, rb_parent);
445         __anon_vma_link(vma);
446 }
447
448 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
449                         struct vm_area_struct *prev, struct rb_node **rb_link,
450                         struct rb_node *rb_parent)
451 {
452         struct address_space *mapping = NULL;
453
454         if (vma->vm_file)
455                 mapping = vma->vm_file->f_mapping;
456
457         if (mapping) {
458                 spin_lock(&mapping->i_mmap_lock);
459                 vma->vm_truncate_count = mapping->truncate_count;
460         }
461         anon_vma_lock(vma);
462
463         __vma_link(mm, vma, prev, rb_link, rb_parent);
464         __vma_link_file(vma);
465
466         anon_vma_unlock(vma);
467         if (mapping)
468                 spin_unlock(&mapping->i_mmap_lock);
469
470         mm->map_count++;
471         validate_mm(mm);
472 }
473
474 /*
475  * Helper for vma_adjust in the split_vma insert case:
476  * insert vm structure into list and rbtree and anon_vma,
477  * but it has already been inserted into prio_tree earlier.
478  */
479 static void
480 __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
481 {
482         struct vm_area_struct * __vma, * prev;
483         struct rb_node ** rb_link, * rb_parent;
484
485         __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
486         BUG_ON(__vma && __vma->vm_start < vma->vm_end);
487         __vma_link(mm, vma, prev, rb_link, rb_parent);
488         mm->map_count++;
489 }
490
491 static inline void
492 __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
493                 struct vm_area_struct *prev)
494 {
495         prev->vm_next = vma->vm_next;
496         rb_erase(&vma->vm_rb, &mm->mm_rb);
497         if (mm->mmap_cache == vma)
498                 mm->mmap_cache = prev;
499 }
500
501 /*
502  * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
503  * is already present in an i_mmap tree without adjusting the tree.
504  * The following helper function should be used when such adjustments
505  * are necessary.  The "insert" vma (if any) is to be inserted
506  * before we drop the necessary locks.
507  */
508 void vma_adjust(struct vm_area_struct *vma, unsigned long start,
509         unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
510 {
511         struct mm_struct *mm = vma->vm_mm;
512         struct vm_area_struct *next = vma->vm_next;
513         struct vm_area_struct *importer = NULL;
514         struct address_space *mapping = NULL;
515         struct prio_tree_root *root = NULL;
516         struct file *file = vma->vm_file;
517         struct anon_vma *anon_vma = NULL;
518         long adjust_next = 0;
519         int remove_next = 0;
520
521         if (next && !insert) {
522                 if (end >= next->vm_end) {
523                         /*
524                          * vma expands, overlapping all the next, and
525                          * perhaps the one after too (mprotect case 6).
526                          */
527 again:                  remove_next = 1 + (end > next->vm_end);
528                         end = next->vm_end;
529                         anon_vma = next->anon_vma;
530                         importer = vma;
531                 } else if (end > next->vm_start) {
532                         /*
533                          * vma expands, overlapping part of the next:
534                          * mprotect case 5 shifting the boundary up.
535                          */
536                         adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
537                         anon_vma = next->anon_vma;
538                         importer = vma;
539                 } else if (end < vma->vm_end) {
540                         /*
541                          * vma shrinks, and !insert tells it's not
542                          * split_vma inserting another: so it must be
543                          * mprotect case 4 shifting the boundary down.
544                          */
545                         adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT);
546                         anon_vma = next->anon_vma;
547                         importer = next;
548                 }
549         }
550
551         if (file) {
552                 mapping = file->f_mapping;
553                 if (!(vma->vm_flags & VM_NONLINEAR))
554                         root = &mapping->i_mmap;
555                 spin_lock(&mapping->i_mmap_lock);
556                 if (importer &&
557                     vma->vm_truncate_count != next->vm_truncate_count) {
558                         /*
559                          * unmap_mapping_range might be in progress:
560                          * ensure that the expanding vma is rescanned.
561                          */
562                         importer->vm_truncate_count = 0;
563                 }
564                 if (insert) {
565                         insert->vm_truncate_count = vma->vm_truncate_count;
566                         /*
567                          * Put into prio_tree now, so instantiated pages
568                          * are visible to arm/parisc __flush_dcache_page
569                          * throughout; but we cannot insert into address
570                          * space until vma start or end is updated.
571                          */
572                         __vma_link_file(insert);
573                 }
574         }
575
576         /*
577          * When changing only vma->vm_end, we don't really need
578          * anon_vma lock.
579          */
580         if (vma->anon_vma && (insert || importer || start != vma->vm_start))
581                 anon_vma = vma->anon_vma;
582         if (anon_vma) {
583                 spin_lock(&anon_vma->lock);
584                 /*
585                  * Easily overlooked: when mprotect shifts the boundary,
586                  * make sure the expanding vma has anon_vma set if the
587                  * shrinking vma had, to cover any anon pages imported.
588                  */
589                 if (importer && !importer->anon_vma) {
590                         importer->anon_vma = anon_vma;
591                         __anon_vma_link(importer);
592                 }
593         }
594
595         if (root) {
596                 flush_dcache_mmap_lock(mapping);
597                 vma_prio_tree_remove(vma, root);
598                 if (adjust_next)
599                         vma_prio_tree_remove(next, root);
600         }
601
602         vma->vm_start = start;
603         vma->vm_end = end;
604         vma->vm_pgoff = pgoff;
605         if (adjust_next) {
606                 next->vm_start += adjust_next << PAGE_SHIFT;
607                 next->vm_pgoff += adjust_next;
608         }
609
610         if (root) {
611                 if (adjust_next)
612                         vma_prio_tree_insert(next, root);
613                 vma_prio_tree_insert(vma, root);
614                 flush_dcache_mmap_unlock(mapping);
615         }
616
617         if (remove_next) {
618                 /*
619                  * vma_merge has merged next into vma, and needs
620                  * us to remove next before dropping the locks.
621                  */
622                 __vma_unlink(mm, next, vma);
623                 if (file)
624                         __remove_shared_vm_struct(next, file, mapping);
625                 if (next->anon_vma)
626                         __anon_vma_merge(vma, next);
627         } else if (insert) {
628                 /*
629                  * split_vma has split insert from vma, and needs
630                  * us to insert it before dropping the locks
631                  * (it may either follow vma or precede it).
632                  */
633                 __insert_vm_struct(mm, insert);
634         }
635
636         if (anon_vma)
637                 spin_unlock(&anon_vma->lock);
638         if (mapping)
639                 spin_unlock(&mapping->i_mmap_lock);
640
641         if (remove_next) {
642                 if (file) {
643                         fput(file);
644                         if (next->vm_flags & VM_EXECUTABLE)
645                                 removed_exe_file_vma(mm);
646                 }
647                 mm->map_count--;
648                 mpol_put(vma_policy(next));
649                 kmem_cache_free(vm_area_cachep, next);
650                 /*
651                  * In mprotect's case 6 (see comments on vma_merge),
652                  * we must remove another next too. It would clutter
653                  * up the code too much to do both in one go.
654                  */
655                 if (remove_next == 2) {
656                         next = vma->vm_next;
657                         goto again;
658                 }
659         }
660
661         validate_mm(mm);
662 }
663
664 /*
665  * If the vma has a ->close operation then the driver probably needs to release
666  * per-vma resources, so we don't attempt to merge those.
667  */
668 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
669
670 static inline int is_mergeable_vma(struct vm_area_struct *vma,
671                         struct file *file, unsigned long vm_flags)
672 {
673         if (vma->vm_flags != vm_flags)
674                 return 0;
675         if (vma->vm_file != file)
676                 return 0;
677         if (vma->vm_ops && vma->vm_ops->close)
678                 return 0;
679         return 1;
680 }
681
682 static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
683                                         struct anon_vma *anon_vma2)
684 {
685         return !anon_vma1 || !anon_vma2 || (anon_vma1 == anon_vma2);
686 }
687
688 /*
689  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
690  * in front of (at a lower virtual address and file offset than) the vma.
691  *
692  * We cannot merge two vmas if they have differently assigned (non-NULL)
693  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
694  *
695  * We don't check here for the merged mmap wrapping around the end of pagecache
696  * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which
697  * wrap, nor mmaps which cover the final page at index -1UL.
698  */
699 static int
700 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
701         struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
702 {
703         if (is_mergeable_vma(vma, file, vm_flags) &&
704             is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
705                 if (vma->vm_pgoff == vm_pgoff)
706                         return 1;
707         }
708         return 0;
709 }
710
711 /*
712  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
713  * beyond (at a higher virtual address and file offset than) the vma.
714  *
715  * We cannot merge two vmas if they have differently assigned (non-NULL)
716  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
717  */
718 static int
719 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
720         struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
721 {
722         if (is_mergeable_vma(vma, file, vm_flags) &&
723             is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
724                 pgoff_t vm_pglen;
725                 vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
726                 if (vma->vm_pgoff + vm_pglen == vm_pgoff)
727                         return 1;
728         }
729         return 0;
730 }
731
732 /*
733  * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
734  * whether that can be merged with its predecessor or its successor.
735  * Or both (it neatly fills a hole).
736  *
737  * In most cases - when called for mmap, brk or mremap - [addr,end) is
738  * certain not to be mapped by the time vma_merge is called; but when
739  * called for mprotect, it is certain to be already mapped (either at
740  * an offset within prev, or at the start of next), and the flags of
741  * this area are about to be changed to vm_flags - and the no-change
742  * case has already been eliminated.
743  *
744  * The following mprotect cases have to be considered, where AAAA is
745  * the area passed down from mprotect_fixup, never extending beyond one
746  * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after:
747  *
748  *     AAAA             AAAA                AAAA          AAAA
749  *    PPPPPPNNNNNN    PPPPPPNNNNNN    PPPPPPNNNNNN    PPPPNNNNXXXX
750  *    cannot merge    might become    might become    might become
751  *                    PPNNNNNNNNNN    PPPPPPPPPPNN    PPPPPPPPPPPP 6 or
752  *    mmap, brk or    case 4 below    case 5 below    PPPPPPPPXXXX 7 or
753  *    mremap move:                                    PPPPNNNNNNNN 8
754  *        AAAA
755  *    PPPP    NNNN    PPPPPPPPPPPP    PPPPPPPPNNNN    PPPPNNNNNNNN
756  *    might become    case 1 below    case 2 below    case 3 below
757  *
758  * Odd one out? Case 8, because it extends NNNN but needs flags of XXXX:
759  * mprotect_fixup updates vm_flags & vm_page_prot on successful return.
760  */
761 struct vm_area_struct *vma_merge(struct mm_struct *mm,
762                         struct vm_area_struct *prev, unsigned long addr,
763                         unsigned long end, unsigned long vm_flags,
764                         struct anon_vma *anon_vma, struct file *file,
765                         pgoff_t pgoff, struct mempolicy *policy)
766 {
767         pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
768         struct vm_area_struct *area, *next;
769
770         /*
771          * We later require that vma->vm_flags == vm_flags,
772          * so this tests vma->vm_flags & VM_SPECIAL, too.
773          */
774         if (vm_flags & VM_SPECIAL)
775                 return NULL;
776
777         if (prev)
778                 next = prev->vm_next;
779         else
780                 next = mm->mmap;
781         area = next;
782         if (next && next->vm_end == end)                /* cases 6, 7, 8 */
783                 next = next->vm_next;
784
785         /*
786          * Can it merge with the predecessor?
787          */
788         if (prev && prev->vm_end == addr &&
789                         mpol_equal(vma_policy(prev), policy) &&
790                         can_vma_merge_after(prev, vm_flags,
791                                                 anon_vma, file, pgoff)) {
792                 /*
793                  * OK, it can.  Can we now merge in the successor as well?
794                  */
795                 if (next && end == next->vm_start &&
796                                 mpol_equal(policy, vma_policy(next)) &&
797                                 can_vma_merge_before(next, vm_flags,
798                                         anon_vma, file, pgoff+pglen) &&
799                                 is_mergeable_anon_vma(prev->anon_vma,
800                                                       next->anon_vma)) {
801                                                         /* cases 1, 6 */
802                         vma_adjust(prev, prev->vm_start,
803                                 next->vm_end, prev->vm_pgoff, NULL);
804                 } else                                  /* cases 2, 5, 7 */
805                         vma_adjust(prev, prev->vm_start,
806                                 end, prev->vm_pgoff, NULL);
807                 return prev;
808         }
809
810         /*
811          * Can this new request be merged in front of next?
812          */
813         if (next && end == next->vm_start &&
814                         mpol_equal(policy, vma_policy(next)) &&
815                         can_vma_merge_before(next, vm_flags,
816                                         anon_vma, file, pgoff+pglen)) {
817                 if (prev && addr < prev->vm_end)        /* case 4 */
818                         vma_adjust(prev, prev->vm_start,
819                                 addr, prev->vm_pgoff, NULL);
820                 else                                    /* cases 3, 8 */
821                         vma_adjust(area, addr, next->vm_end,
822                                 next->vm_pgoff - pglen, NULL);
823                 return area;
824         }
825
826         return NULL;
827 }
828
829 /*
830  * find_mergeable_anon_vma is used by anon_vma_prepare, to check
831  * neighbouring vmas for a suitable anon_vma, before it goes off
832  * to allocate a new anon_vma.  It checks because a repetitive
833  * sequence of mprotects and faults may otherwise lead to distinct
834  * anon_vmas being allocated, preventing vma merge in subsequent
835  * mprotect.
836  */
837 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
838 {
839         struct vm_area_struct *near;
840         unsigned long vm_flags;
841
842         near = vma->vm_next;
843         if (!near)
844                 goto try_prev;
845
846         /*
847          * Since only mprotect tries to remerge vmas, match flags
848          * which might be mprotected into each other later on.
849          * Neither mlock nor madvise tries to remerge at present,
850          * so leave their flags as obstructing a merge.
851          */
852         vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC);
853         vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC);
854
855         if (near->anon_vma && vma->vm_end == near->vm_start &&
856                         mpol_equal(vma_policy(vma), vma_policy(near)) &&
857                         can_vma_merge_before(near, vm_flags,
858                                 NULL, vma->vm_file, vma->vm_pgoff +
859                                 ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)))
860                 return near->anon_vma;
861 try_prev:
862         /*
863          * It is potentially slow to have to call find_vma_prev here.
864          * But it's only on the first write fault on the vma, not
865          * every time, and we could devise a way to avoid it later
866          * (e.g. stash info in next's anon_vma_node when assigning
867          * an anon_vma, or when trying vma_merge).  Another time.
868          */
869         BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma);
870         if (!near)
871                 goto none;
872
873         vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC);
874         vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC);
875
876         if (near->anon_vma && near->vm_end == vma->vm_start &&
877                         mpol_equal(vma_policy(near), vma_policy(vma)) &&
878                         can_vma_merge_after(near, vm_flags,
879                                 NULL, vma->vm_file, vma->vm_pgoff))
880                 return near->anon_vma;
881 none:
882         /*
883          * There's no absolute need to look only at touching neighbours:
884          * we could search further afield for "compatible" anon_vmas.
885          * But it would probably just be a waste of time searching,
886          * or lead to too many vmas hanging off the same anon_vma.
887          * We're trying to allow mprotect remerging later on,
888          * not trying to minimize memory used for anon_vmas.
889          */
890         return NULL;
891 }
892
893 #ifdef CONFIG_PROC_FS
894 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
895                                                 struct file *file, long pages)
896 {
897         const unsigned long stack_flags
898                 = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
899
900         if (file) {
901                 mm->shared_vm += pages;
902                 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
903                         mm->exec_vm += pages;
904         } else if (flags & stack_flags)
905                 mm->stack_vm += pages;
906         if (flags & (VM_RESERVED|VM_IO))
907                 mm->reserved_vm += pages;
908 }
909 #endif /* CONFIG_PROC_FS */
910
911 /*
912  * The caller must hold down_write(current->mm->mmap_sem).
913  */
914
915 unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
916                         unsigned long len, unsigned long prot,
917                         unsigned long flags, unsigned long pgoff)
918 {
919         struct mm_struct * mm = current->mm;
920         struct inode *inode;
921         unsigned int vm_flags;
922         int error;
923         int accountable = 1;
924         unsigned long reqprot = prot;
925
926         /*
927          * Does the application expect PROT_READ to imply PROT_EXEC?
928          *
929          * (the exception is when the underlying filesystem is noexec
930          *  mounted, in which case we dont add PROT_EXEC.)
931          */
932         if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
933                 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
934                         prot |= PROT_EXEC;
935
936         if (!len)
937                 return -EINVAL;
938
939         if (!(flags & MAP_FIXED))
940                 addr = round_hint_to_min(addr);
941
942         error = arch_mmap_check(addr, len, flags);
943         if (error)
944                 return error;
945
946         /* Careful about overflows.. */
947         len = PAGE_ALIGN(len);
948         if (!len || len > TASK_SIZE)
949                 return -ENOMEM;
950
951         /* offset overflow? */
952         if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
953                return -EOVERFLOW;
954
955         /* Too many mappings? */
956         if (mm->map_count > sysctl_max_map_count)
957                 return -ENOMEM;
958
959         /* Obtain the address to map to. we verify (or select) it and ensure
960          * that it represents a valid section of the address space.
961          */
962         addr = get_unmapped_area(file, addr, len, pgoff, flags);
963         if (addr & ~PAGE_MASK)
964                 return addr;
965
966         /* Do simple checking here so the lower-level routines won't have
967          * to. we assume access permissions have been handled by the open
968          * of the memory object, so we don't do any here.
969          */
970         vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
971                         mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
972
973         if (flags & MAP_LOCKED) {
974                 if (!can_do_mlock())
975                         return -EPERM;
976                 vm_flags |= VM_LOCKED;
977         }
978         /* mlock MCL_FUTURE? */
979         if (vm_flags & VM_LOCKED) {
980                 unsigned long locked, lock_limit;
981                 locked = len >> PAGE_SHIFT;
982                 locked += mm->locked_vm;
983                 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
984                 lock_limit >>= PAGE_SHIFT;
985                 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
986                         return -EAGAIN;
987         }
988
989         inode = file ? file->f_path.dentry->d_inode : NULL;
990
991         if (file) {
992                 switch (flags & MAP_TYPE) {
993                 case MAP_SHARED:
994                         if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
995                                 return -EACCES;
996
997                         /*
998                          * Make sure we don't allow writing to an append-only
999                          * file..
1000                          */
1001                         if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
1002                                 return -EACCES;
1003
1004                         /*
1005                          * Make sure there are no mandatory locks on the file.
1006                          */
1007                         if (locks_verify_locked(inode))
1008                                 return -EAGAIN;
1009
1010                         vm_flags |= VM_SHARED | VM_MAYSHARE;
1011                         if (!(file->f_mode & FMODE_WRITE))
1012                                 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
1013
1014                         /* fall through */
1015                 case MAP_PRIVATE:
1016                         if (!(file->f_mode & FMODE_READ))
1017                                 return -EACCES;
1018                         if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
1019                                 if (vm_flags & VM_EXEC)
1020                                         return -EPERM;
1021                                 vm_flags &= ~VM_MAYEXEC;
1022                         }
1023                         if (is_file_hugepages(file))
1024                                 accountable = 0;
1025
1026                         if (!file->f_op || !file->f_op->mmap)
1027                                 return -ENODEV;
1028                         break;
1029
1030                 default:
1031                         return -EINVAL;
1032                 }
1033         } else {
1034                 switch (flags & MAP_TYPE) {
1035                 case MAP_SHARED:
1036                         /*
1037                          * Ignore pgoff.
1038                          */
1039                         pgoff = 0;
1040                         vm_flags |= VM_SHARED | VM_MAYSHARE;
1041                         break;
1042                 case MAP_PRIVATE:
1043                         /*
1044                          * Set pgoff according to addr for anon_vma.
1045                          */
1046                         pgoff = addr >> PAGE_SHIFT;
1047                         break;
1048                 default:
1049                         return -EINVAL;
1050                 }
1051         }
1052
1053         error = security_file_mmap(file, reqprot, prot, flags, addr, 0);
1054         if (error)
1055                 return error;
1056
1057         return mmap_region(file, addr, len, flags, vm_flags, pgoff,
1058                            accountable);
1059 }
1060 EXPORT_SYMBOL(do_mmap_pgoff);
1061
1062 /*
1063  * Some shared mappigns will want the pages marked read-only
1064  * to track write events. If so, we'll downgrade vm_page_prot
1065  * to the private version (using protection_map[] without the
1066  * VM_SHARED bit).
1067  */
1068 int vma_wants_writenotify(struct vm_area_struct *vma)
1069 {
1070         unsigned int vm_flags = vma->vm_flags;
1071
1072         /* If it was private or non-writable, the write bit is already clear */
1073         if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
1074                 return 0;
1075
1076         /* The backer wishes to know when pages are first written to? */
1077         if (vma->vm_ops && vma->vm_ops->page_mkwrite)
1078                 return 1;
1079
1080         /* The open routine did something to the protections already? */
1081         if (pgprot_val(vma->vm_page_prot) !=
1082             pgprot_val(vm_get_page_prot(vm_flags)))
1083                 return 0;
1084
1085         /* Specialty mapping? */
1086         if (vm_flags & (VM_PFNMAP|VM_INSERTPAGE))
1087                 return 0;
1088
1089         /* Can the mapping track the dirty pages? */
1090         return vma->vm_file && vma->vm_file->f_mapping &&
1091                 mapping_cap_account_dirty(vma->vm_file->f_mapping);
1092 }
1093
1094 unsigned long mmap_region(struct file *file, unsigned long addr,
1095                           unsigned long len, unsigned long flags,
1096                           unsigned int vm_flags, unsigned long pgoff,
1097                           int accountable)
1098 {
1099         struct mm_struct *mm = current->mm;
1100         struct vm_area_struct *vma, *prev;
1101         int correct_wcount = 0;
1102         int error;
1103         struct rb_node **rb_link, *rb_parent;
1104         unsigned long charged = 0;
1105         struct inode *inode =  file ? file->f_path.dentry->d_inode : NULL;
1106
1107         /* Clear old maps */
1108         error = -ENOMEM;
1109 munmap_back:
1110         vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
1111         if (vma && vma->vm_start < addr + len) {
1112                 if (do_munmap(mm, addr, len))
1113                         return -ENOMEM;
1114                 goto munmap_back;
1115         }
1116
1117         /* Check against address space limit. */
1118         if (!may_expand_vm(mm, len >> PAGE_SHIFT))
1119                 return -ENOMEM;
1120
1121         if (flags & MAP_NORESERVE)
1122                 vm_flags |= VM_NORESERVE;
1123
1124         if (accountable && (!(flags & MAP_NORESERVE) ||
1125                             sysctl_overcommit_memory == OVERCOMMIT_NEVER)) {
1126                 if (vm_flags & VM_SHARED) {
1127                         /* Check memory availability in shmem_file_setup? */
1128                         vm_flags |= VM_ACCOUNT;
1129                 } else if (vm_flags & VM_WRITE) {
1130                         /*
1131                          * Private writable mapping: check memory availability
1132                          */
1133                         charged = len >> PAGE_SHIFT;
1134                         if (security_vm_enough_memory(charged))
1135                                 return -ENOMEM;
1136                         vm_flags |= VM_ACCOUNT;
1137                 }
1138         }
1139
1140         /*
1141          * Can we just expand an old private anonymous mapping?
1142          * The VM_SHARED test is necessary because shmem_zero_setup
1143          * will create the file object for a shared anonymous map below.
1144          */
1145         if (!file && !(vm_flags & VM_SHARED) &&
1146             vma_merge(mm, prev, addr, addr + len, vm_flags,
1147                                         NULL, NULL, pgoff, NULL))
1148                 goto out;
1149
1150         /*
1151          * Determine the object being mapped and call the appropriate
1152          * specific mapper. the address has already been validated, but
1153          * not unmapped, but the maps are removed from the list.
1154          */
1155         vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1156         if (!vma) {
1157                 error = -ENOMEM;
1158                 goto unacct_error;
1159         }
1160
1161         vma->vm_mm = mm;
1162         vma->vm_start = addr;
1163         vma->vm_end = addr + len;
1164         vma->vm_flags = vm_flags;
1165         vma->vm_page_prot = vm_get_page_prot(vm_flags);
1166         vma->vm_pgoff = pgoff;
1167
1168         if (file) {
1169                 error = -EINVAL;
1170                 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1171                         goto free_vma;
1172                 if (vm_flags & VM_DENYWRITE) {
1173                         error = deny_write_access(file);
1174                         if (error)
1175                                 goto free_vma;
1176                         correct_wcount = 1;
1177                 }
1178                 vma->vm_file = file;
1179                 get_file(file);
1180                 error = file->f_op->mmap(file, vma);
1181                 if (error)
1182                         goto unmap_and_free_vma;
1183                 if (vm_flags & VM_EXECUTABLE)
1184                         added_exe_file_vma(mm);
1185         } else if (vm_flags & VM_SHARED) {
1186                 error = shmem_zero_setup(vma);
1187                 if (error)
1188                         goto free_vma;
1189         }
1190
1191         /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform
1192          * shmem_zero_setup (perhaps called through /dev/zero's ->mmap)
1193          * that memory reservation must be checked; but that reservation
1194          * belongs to shared memory object, not to vma: so now clear it.
1195          */
1196         if ((vm_flags & (VM_SHARED|VM_ACCOUNT)) == (VM_SHARED|VM_ACCOUNT))
1197                 vma->vm_flags &= ~VM_ACCOUNT;
1198
1199         /* Can addr have changed??
1200          *
1201          * Answer: Yes, several device drivers can do it in their
1202          *         f_op->mmap method. -DaveM
1203          */
1204         addr = vma->vm_start;
1205         pgoff = vma->vm_pgoff;
1206         vm_flags = vma->vm_flags;
1207
1208         if (vma_wants_writenotify(vma))
1209                 vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
1210
1211         if (file && vma_merge(mm, prev, addr, vma->vm_end,
1212                         vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
1213                 mpol_put(vma_policy(vma));
1214                 kmem_cache_free(vm_area_cachep, vma);
1215                 fput(file);
1216                 if (vm_flags & VM_EXECUTABLE)
1217                         removed_exe_file_vma(mm);
1218         } else {
1219                 vma_link(mm, vma, prev, rb_link, rb_parent);
1220                 file = vma->vm_file;
1221         }
1222
1223         /* Once vma denies write, undo our temporary denial count */
1224         if (correct_wcount)
1225                 atomic_inc(&inode->i_writecount);
1226 out:
1227         mm->total_vm += len >> PAGE_SHIFT;
1228         vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
1229         if (vm_flags & VM_LOCKED) {
1230                 mm->locked_vm += len >> PAGE_SHIFT;
1231                 make_pages_present(addr, addr + len);
1232         }
1233         if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
1234                 make_pages_present(addr, addr + len);
1235         return addr;
1236
1237 unmap_and_free_vma:
1238         if (correct_wcount)
1239                 atomic_inc(&inode->i_writecount);
1240         vma->vm_file = NULL;
1241         fput(file);
1242
1243         /* Undo any partial mapping done by a device driver. */
1244         unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
1245         charged = 0;
1246 free_vma:
1247         kmem_cache_free(vm_area_cachep, vma);
1248 unacct_error:
1249         if (charged)
1250                 vm_unacct_memory(charged);
1251         return error;
1252 }
1253
1254 /* Get an address range which is currently unmapped.
1255  * For shmat() with addr=0.
1256  *
1257  * Ugly calling convention alert:
1258  * Return value with the low bits set means error value,
1259  * ie
1260  *      if (ret & ~PAGE_MASK)
1261  *              error = ret;
1262  *
1263  * This function "knows" that -ENOMEM has the bits set.
1264  */
1265 #ifndef HAVE_ARCH_UNMAPPED_AREA
1266 unsigned long
1267 arch_get_unmapped_area(struct file *filp, unsigned long addr,
1268                 unsigned long len, unsigned long pgoff, unsigned long flags)
1269 {
1270         struct mm_struct *mm = current->mm;
1271         struct vm_area_struct *vma;
1272         unsigned long start_addr;
1273
1274         if (len > TASK_SIZE)
1275                 return -ENOMEM;
1276
1277         if (flags & MAP_FIXED)
1278                 return addr;
1279
1280         if (addr) {
1281                 addr = PAGE_ALIGN(addr);
1282                 vma = find_vma(mm, addr);
1283                 if (TASK_SIZE - len >= addr &&
1284                     (!vma || addr + len <= vma->vm_start))
1285                         return addr;
1286         }
1287         if (len > mm->cached_hole_size) {
1288                 start_addr = addr = mm->free_area_cache;
1289         } else {
1290                 start_addr = addr = TASK_UNMAPPED_BASE;
1291                 mm->cached_hole_size = 0;
1292         }
1293
1294 full_search:
1295         for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1296                 /* At this point:  (!vma || addr < vma->vm_end). */
1297                 if (TASK_SIZE - len < addr) {
1298                         /*
1299                          * Start a new search - just in case we missed
1300                          * some holes.
1301                          */
1302                         if (start_addr != TASK_UNMAPPED_BASE) {
1303                                 addr = TASK_UNMAPPED_BASE;
1304                                 start_addr = addr;
1305                                 mm->cached_hole_size = 0;
1306                                 goto full_search;
1307                         }
1308                         return -ENOMEM;
1309                 }
1310                 if (!vma || addr + len <= vma->vm_start) {
1311                         /*
1312                          * Remember the place where we stopped the search:
1313                          */
1314                         mm->free_area_cache = addr + len;
1315                         return addr;
1316                 }
1317                 if (addr + mm->cached_hole_size < vma->vm_start)
1318                         mm->cached_hole_size = vma->vm_start - addr;
1319                 addr = vma->vm_end;
1320         }
1321 }
1322 #endif  
1323
1324 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
1325 {
1326         /*
1327          * Is this a new hole at the lowest possible address?
1328          */
1329         if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
1330                 mm->free_area_cache = addr;
1331                 mm->cached_hole_size = ~0UL;
1332         }
1333 }
1334
1335 /*
1336  * This mmap-allocator allocates new areas top-down from below the
1337  * stack's low limit (the base):
1338  */
1339 #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1340 unsigned long
1341 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
1342                           const unsigned long len, const unsigned long pgoff,
1343                           const unsigned long flags)
1344 {
1345         struct vm_area_struct *vma;
1346         struct mm_struct *mm = current->mm;
1347         unsigned long addr = addr0;
1348
1349         /* requested length too big for entire address space */
1350         if (len > TASK_SIZE)
1351                 return -ENOMEM;
1352
1353         if (flags & MAP_FIXED)
1354                 return addr;
1355
1356         /* requesting a specific address */
1357         if (addr) {
1358                 addr = PAGE_ALIGN(addr);
1359                 vma = find_vma(mm, addr);
1360                 if (TASK_SIZE - len >= addr &&
1361                                 (!vma || addr + len <= vma->vm_start))
1362                         return addr;
1363         }
1364
1365         /* check if free_area_cache is useful for us */
1366         if (len <= mm->cached_hole_size) {
1367                 mm->cached_hole_size = 0;
1368                 mm->free_area_cache = mm->mmap_base;
1369         }
1370
1371         /* either no address requested or can't fit in requested address hole */
1372         addr = mm->free_area_cache;
1373
1374         /* make sure it can fit in the remaining address space */
1375         if (addr > len) {
1376                 vma = find_vma(mm, addr-len);
1377                 if (!vma || addr <= vma->vm_start)
1378                         /* remember the address as a hint for next time */
1379                         return (mm->free_area_cache = addr-len);
1380         }
1381
1382         if (mm->mmap_base < len)
1383                 goto bottomup;
1384
1385         addr = mm->mmap_base-len;
1386
1387         do {
1388                 /*
1389                  * Lookup failure means no vma is above this address,
1390                  * else if new region fits below vma->vm_start,
1391                  * return with success:
1392                  */
1393                 vma = find_vma(mm, addr);
1394                 if (!vma || addr+len <= vma->vm_start)
1395                         /* remember the address as a hint for next time */
1396                         return (mm->free_area_cache = addr);
1397
1398                 /* remember the largest hole we saw so far */
1399                 if (addr + mm->cached_hole_size < vma->vm_start)
1400                         mm->cached_hole_size = vma->vm_start - addr;
1401
1402                 /* try just below the current vma->vm_start */
1403                 addr = vma->vm_start-len;
1404         } while (len < vma->vm_start);
1405
1406 bottomup:
1407         /*
1408          * A failed mmap() very likely causes application failure,
1409          * so fall back to the bottom-up function here. This scenario
1410          * can happen with large stack limits and large mmap()
1411          * allocations.
1412          */
1413         mm->cached_hole_size = ~0UL;
1414         mm->free_area_cache = TASK_UNMAPPED_BASE;
1415         addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
1416         /*
1417          * Restore the topdown base:
1418          */
1419         mm->free_area_cache = mm->mmap_base;
1420         mm->cached_hole_size = ~0UL;
1421
1422         return addr;
1423 }
1424 #endif
1425
1426 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
1427 {
1428         /*
1429          * Is this a new hole at the highest possible address?
1430          */
1431         if (addr > mm->free_area_cache)
1432                 mm->free_area_cache = addr;
1433
1434         /* dont allow allocations above current base */
1435         if (mm->free_area_cache > mm->mmap_base)
1436                 mm->free_area_cache = mm->mmap_base;
1437 }
1438
1439 unsigned long
1440 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1441                 unsigned long pgoff, unsigned long flags)
1442 {
1443         unsigned long (*get_area)(struct file *, unsigned long,
1444                                   unsigned long, unsigned long, unsigned long);
1445
1446         get_area = current->mm->get_unmapped_area;
1447         if (file && file->f_op && file->f_op->get_unmapped_area)
1448                 get_area = file->f_op->get_unmapped_area;
1449         addr = get_area(file, addr, len, pgoff, flags);
1450         if (IS_ERR_VALUE(addr))
1451                 return addr;
1452
1453         if (addr > TASK_SIZE - len)
1454                 return -ENOMEM;
1455         if (addr & ~PAGE_MASK)
1456                 return -EINVAL;
1457
1458         return arch_rebalance_pgtables(addr, len);
1459 }
1460
1461 EXPORT_SYMBOL(get_unmapped_area);
1462
1463 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
1464 struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
1465 {
1466         struct vm_area_struct *vma = NULL;
1467
1468         if (mm) {
1469                 /* Check the cache first. */
1470                 /* (Cache hit rate is typically around 35%.) */
1471                 vma = mm->mmap_cache;
1472                 if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
1473                         struct rb_node * rb_node;
1474
1475                         rb_node = mm->mm_rb.rb_node;
1476                         vma = NULL;
1477
1478                         while (rb_node) {
1479                                 struct vm_area_struct * vma_tmp;
1480
1481                                 vma_tmp = rb_entry(rb_node,
1482                                                 struct vm_area_struct, vm_rb);
1483
1484                                 if (vma_tmp->vm_end > addr) {
1485                                         vma = vma_tmp;
1486                                         if (vma_tmp->vm_start <= addr)
1487                                                 break;
1488                                         rb_node = rb_node->rb_left;
1489                                 } else
1490                                         rb_node = rb_node->rb_right;
1491                         }
1492                         if (vma)
1493                                 mm->mmap_cache = vma;
1494                 }
1495         }
1496         return vma;
1497 }
1498
1499 EXPORT_SYMBOL(find_vma);
1500
1501 /* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
1502 struct vm_area_struct *
1503 find_vma_prev(struct mm_struct *mm, unsigned long addr,
1504                         struct vm_area_struct **pprev)
1505 {
1506         struct vm_area_struct *vma = NULL, *prev = NULL;
1507         struct rb_node * rb_node;
1508         if (!mm)
1509                 goto out;
1510
1511         /* Guard against addr being lower than the first VMA */
1512         vma = mm->mmap;
1513
1514         /* Go through the RB tree quickly. */
1515         rb_node = mm->mm_rb.rb_node;
1516
1517         while (rb_node) {
1518                 struct vm_area_struct *vma_tmp;
1519                 vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
1520
1521                 if (addr < vma_tmp->vm_end) {
1522                         rb_node = rb_node->rb_left;
1523                 } else {
1524                         prev = vma_tmp;
1525                         if (!prev->vm_next || (addr < prev->vm_next->vm_end))
1526                                 break;
1527                         rb_node = rb_node->rb_right;
1528                 }
1529         }
1530
1531 out:
1532         *pprev = prev;
1533         return prev ? prev->vm_next : vma;
1534 }
1535
1536 /*
1537  * Verify that the stack growth is acceptable and
1538  * update accounting. This is shared with both the
1539  * grow-up and grow-down cases.
1540  */
1541 static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, unsigned long grow)
1542 {
1543         struct mm_struct *mm = vma->vm_mm;
1544         struct rlimit *rlim = current->signal->rlim;
1545         unsigned long new_start;
1546
1547         /* address space limit tests */
1548         if (!may_expand_vm(mm, grow))
1549                 return -ENOMEM;
1550
1551         /* Stack limit test */
1552         if (size > rlim[RLIMIT_STACK].rlim_cur)
1553                 return -ENOMEM;
1554
1555         /* mlock limit tests */
1556         if (vma->vm_flags & VM_LOCKED) {
1557                 unsigned long locked;
1558                 unsigned long limit;
1559                 locked = mm->locked_vm + grow;
1560                 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
1561                 if (locked > limit && !capable(CAP_IPC_LOCK))
1562                         return -ENOMEM;
1563         }
1564
1565         /* Check to ensure the stack will not grow into a hugetlb-only region */
1566         new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
1567                         vma->vm_end - size;
1568         if (is_hugepage_only_range(vma->vm_mm, new_start, size))
1569                 return -EFAULT;
1570
1571         /*
1572          * Overcommit..  This must be the final test, as it will
1573          * update security statistics.
1574          */
1575         if (security_vm_enough_memory(grow))
1576                 return -ENOMEM;
1577
1578         /* Ok, everything looks good - let it rip */
1579         mm->total_vm += grow;
1580         if (vma->vm_flags & VM_LOCKED)
1581                 mm->locked_vm += grow;
1582         vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
1583         return 0;
1584 }
1585
1586 #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
1587 /*
1588  * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
1589  * vma is the last one with address > vma->vm_end.  Have to extend vma.
1590  */
1591 #ifndef CONFIG_IA64
1592 static inline
1593 #endif
1594 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1595 {
1596         int error;
1597
1598         if (!(vma->vm_flags & VM_GROWSUP))
1599                 return -EFAULT;
1600
1601         /*
1602          * We must make sure the anon_vma is allocated
1603          * so that the anon_vma locking is not a noop.
1604          */
1605         if (unlikely(anon_vma_prepare(vma)))
1606                 return -ENOMEM;
1607         anon_vma_lock(vma);
1608
1609         /*
1610          * vma->vm_start/vm_end cannot change under us because the caller
1611          * is required to hold the mmap_sem in read mode.  We need the
1612          * anon_vma lock to serialize against concurrent expand_stacks.
1613          * Also guard against wrapping around to address 0.
1614          */
1615         if (address < PAGE_ALIGN(address+4))
1616                 address = PAGE_ALIGN(address+4);
1617         else {
1618                 anon_vma_unlock(vma);
1619                 return -ENOMEM;
1620         }
1621         error = 0;
1622
1623         /* Somebody else might have raced and expanded it already */
1624         if (address > vma->vm_end) {
1625                 unsigned long size, grow;
1626
1627                 size = address - vma->vm_start;
1628                 grow = (address - vma->vm_end) >> PAGE_SHIFT;
1629
1630                 error = acct_stack_growth(vma, size, grow);
1631                 if (!error)
1632                         vma->vm_end = address;
1633         }
1634         anon_vma_unlock(vma);
1635         return error;
1636 }
1637 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
1638
1639 /*
1640  * vma is the first one with address < vma->vm_start.  Have to extend vma.
1641  */
1642 static inline int expand_downwards(struct vm_area_struct *vma,
1643                                    unsigned long address)
1644 {
1645         int error;
1646
1647         /*
1648          * We must make sure the anon_vma is allocated
1649          * so that the anon_vma locking is not a noop.
1650          */
1651         if (unlikely(anon_vma_prepare(vma)))
1652                 return -ENOMEM;
1653
1654         address &= PAGE_MASK;
1655         error = security_file_mmap(NULL, 0, 0, 0, address, 1);
1656         if (error)
1657                 return error;
1658
1659         anon_vma_lock(vma);
1660
1661         /*
1662          * vma->vm_start/vm_end cannot change under us because the caller
1663          * is required to hold the mmap_sem in read mode.  We need the
1664          * anon_vma lock to serialize against concurrent expand_stacks.
1665          */
1666
1667         /* Somebody else might have raced and expanded it already */
1668         if (address < vma->vm_start) {
1669                 unsigned long size, grow;
1670
1671                 size = vma->vm_end - address;
1672                 grow = (vma->vm_start - address) >> PAGE_SHIFT;
1673
1674                 error = acct_stack_growth(vma, size, grow);
1675                 if (!error) {
1676                         vma->vm_start = address;
1677                         vma->vm_pgoff -= grow;
1678                 }
1679         }
1680         anon_vma_unlock(vma);
1681         return error;
1682 }
1683
1684 int expand_stack_downwards(struct vm_area_struct *vma, unsigned long address)
1685 {
1686         return expand_downwards(vma, address);
1687 }
1688
1689 #ifdef CONFIG_STACK_GROWSUP
1690 int expand_stack(struct vm_area_struct *vma, unsigned long address)
1691 {
1692         return expand_upwards(vma, address);
1693 }
1694
1695 struct vm_area_struct *
1696 find_extend_vma(struct mm_struct *mm, unsigned long addr)
1697 {
1698         struct vm_area_struct *vma, *prev;
1699
1700         addr &= PAGE_MASK;
1701         vma = find_vma_prev(mm, addr, &prev);
1702         if (vma && (vma->vm_start <= addr))
1703                 return vma;
1704         if (!prev || expand_stack(prev, addr))
1705                 return NULL;
1706         if (prev->vm_flags & VM_LOCKED)
1707                 make_pages_present(addr, prev->vm_end);
1708         return prev;
1709 }
1710 #else
1711 int expand_stack(struct vm_area_struct *vma, unsigned long address)
1712 {
1713         return expand_downwards(vma, address);
1714 }
1715
1716 struct vm_area_struct *
1717 find_extend_vma(struct mm_struct * mm, unsigned long addr)
1718 {
1719         struct vm_area_struct * vma;
1720         unsigned long start;
1721
1722         addr &= PAGE_MASK;
1723         vma = find_vma(mm,addr);
1724         if (!vma)
1725                 return NULL;
1726         if (vma->vm_start <= addr)
1727                 return vma;
1728         if (!(vma->vm_flags & VM_GROWSDOWN))
1729                 return NULL;
1730         start = vma->vm_start;
1731         if (expand_stack(vma, addr))
1732                 return NULL;
1733         if (vma->vm_flags & VM_LOCKED)
1734                 make_pages_present(addr, start);
1735         return vma;
1736 }
1737 #endif
1738
1739 /*
1740  * Ok - we have the memory areas we should free on the vma list,
1741  * so release them, and do the vma updates.
1742  *
1743  * Called with the mm semaphore held.
1744  */
1745 static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
1746 {
1747         /* Update high watermark before we lower total_vm */
1748         update_hiwater_vm(mm);
1749         do {
1750                 long nrpages = vma_pages(vma);
1751
1752                 mm->total_vm -= nrpages;
1753                 if (vma->vm_flags & VM_LOCKED)
1754                         mm->locked_vm -= nrpages;
1755                 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
1756                 vma = remove_vma(vma);
1757         } while (vma);
1758         validate_mm(mm);
1759 }
1760
1761 /*
1762  * Get rid of page table information in the indicated region.
1763  *
1764  * Called with the mm semaphore held.
1765  */
1766 static void unmap_region(struct mm_struct *mm,
1767                 struct vm_area_struct *vma, struct vm_area_struct *prev,
1768                 unsigned long start, unsigned long end)
1769 {
1770         struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;
1771         struct mmu_gather *tlb;
1772         unsigned long nr_accounted = 0;
1773
1774         lru_add_drain();
1775         tlb = tlb_gather_mmu(mm, 0);
1776         update_hiwater_rss(mm);
1777         unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL);
1778         vm_unacct_memory(nr_accounted);
1779         free_pgtables(tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS,
1780                                  next? next->vm_start: 0);
1781         tlb_finish_mmu(tlb, start, end);
1782 }
1783
1784 /*
1785  * Create a list of vma's touched by the unmap, removing them from the mm's
1786  * vma list as we go..
1787  */
1788 static void
1789 detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
1790         struct vm_area_struct *prev, unsigned long end)
1791 {
1792         struct vm_area_struct **insertion_point;
1793         struct vm_area_struct *tail_vma = NULL;
1794         unsigned long addr;
1795
1796         insertion_point = (prev ? &prev->vm_next : &mm->mmap);
1797         do {
1798                 rb_erase(&vma->vm_rb, &mm->mm_rb);
1799                 mm->map_count--;
1800                 tail_vma = vma;
1801                 vma = vma->vm_next;
1802         } while (vma && vma->vm_start < end);
1803         *insertion_point = vma;
1804         tail_vma->vm_next = NULL;
1805         if (mm->unmap_area == arch_unmap_area)
1806                 addr = prev ? prev->vm_end : mm->mmap_base;
1807         else
1808                 addr = vma ?  vma->vm_start : mm->mmap_base;
1809         mm->unmap_area(mm, addr);
1810         mm->mmap_cache = NULL;          /* Kill the cache. */
1811 }
1812
1813 /*
1814  * Split a vma into two pieces at address 'addr', a new vma is allocated
1815  * either for the first part or the tail.
1816  */
1817 int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
1818               unsigned long addr, int new_below)
1819 {
1820         struct mempolicy *pol;
1821         struct vm_area_struct *new;
1822
1823         if (is_vm_hugetlb_page(vma) && (addr &
1824                                         ~(huge_page_mask(hstate_vma(vma)))))
1825                 return -EINVAL;
1826
1827         if (mm->map_count >= sysctl_max_map_count)
1828                 return -ENOMEM;
1829
1830         new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1831         if (!new)
1832                 return -ENOMEM;
1833
1834         /* most fields are the same, copy all, and then fixup */
1835         *new = *vma;
1836
1837         if (new_below)
1838                 new->vm_end = addr;
1839         else {
1840                 new->vm_start = addr;
1841                 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
1842         }
1843
1844         pol = mpol_dup(vma_policy(vma));
1845         if (IS_ERR(pol)) {
1846                 kmem_cache_free(vm_area_cachep, new);
1847                 return PTR_ERR(pol);
1848         }
1849         vma_set_policy(new, pol);
1850
1851         if (new->vm_file) {
1852                 get_file(new->vm_file);
1853                 if (vma->vm_flags & VM_EXECUTABLE)
1854                         added_exe_file_vma(mm);
1855         }
1856
1857         if (new->vm_ops && new->vm_ops->open)
1858                 new->vm_ops->open(new);
1859
1860         if (new_below)
1861                 vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
1862                         ((addr - new->vm_start) >> PAGE_SHIFT), new);
1863         else
1864                 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
1865
1866         return 0;
1867 }
1868
1869 /* Munmap is split into 2 main parts -- this part which finds
1870  * what needs doing, and the areas themselves, which do the
1871  * work.  This now handles partial unmappings.
1872  * Jeremy Fitzhardinge <jeremy@goop.org>
1873  */
1874 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
1875 {
1876         unsigned long end;
1877         struct vm_area_struct *vma, *prev, *last;
1878
1879         if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
1880                 return -EINVAL;
1881
1882         if ((len = PAGE_ALIGN(len)) == 0)
1883                 return -EINVAL;
1884
1885         /* Find the first overlapping VMA */
1886         vma = find_vma_prev(mm, start, &prev);
1887         if (!vma)
1888                 return 0;
1889         /* we have  start < vma->vm_end  */
1890
1891         /* if it doesn't overlap, we have nothing.. */
1892         end = start + len;
1893         if (vma->vm_start >= end)
1894                 return 0;
1895
1896         /*
1897          * If we need to split any vma, do it now to save pain later.
1898          *
1899          * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
1900          * unmapped vm_area_struct will remain in use: so lower split_vma
1901          * places tmp vma above, and higher split_vma places tmp vma below.
1902          */
1903         if (start > vma->vm_start) {
1904                 int error = split_vma(mm, vma, start, 0);
1905                 if (error)
1906                         return error;
1907                 prev = vma;
1908         }
1909
1910         /* Does it split the last one? */
1911         last = find_vma(mm, end);
1912         if (last && end > last->vm_start) {
1913                 int error = split_vma(mm, last, end, 1);
1914                 if (error)
1915                         return error;
1916         }
1917         vma = prev? prev->vm_next: mm->mmap;
1918
1919         /*
1920          * Remove the vma's, and unmap the actual pages
1921          */
1922         detach_vmas_to_be_unmapped(mm, vma, prev, end);
1923         unmap_region(mm, vma, prev, start, end);
1924
1925         /* Fix up all other VM information */
1926         remove_vma_list(mm, vma);
1927
1928         return 0;
1929 }
1930
1931 EXPORT_SYMBOL(do_munmap);
1932
1933 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1934 {
1935         int ret;
1936         struct mm_struct *mm = current->mm;
1937
1938         profile_munmap(addr);
1939
1940         down_write(&mm->mmap_sem);
1941         ret = do_munmap(mm, addr, len);
1942         up_write(&mm->mmap_sem);
1943         return ret;
1944 }
1945
1946 static inline void verify_mm_writelocked(struct mm_struct *mm)
1947 {
1948 #ifdef CONFIG_DEBUG_VM
1949         if (unlikely(down_read_trylock(&mm->mmap_sem))) {
1950                 WARN_ON(1);
1951                 up_read(&mm->mmap_sem);
1952         }
1953 #endif
1954 }
1955
1956 /*
1957  *  this is really a simplified "do_mmap".  it only handles
1958  *  anonymous maps.  eventually we may be able to do some
1959  *  brk-specific accounting here.
1960  */
1961 unsigned long do_brk(unsigned long addr, unsigned long len)
1962 {
1963         struct mm_struct * mm = current->mm;
1964         struct vm_area_struct * vma, * prev;
1965         unsigned long flags;
1966         struct rb_node ** rb_link, * rb_parent;
1967         pgoff_t pgoff = addr >> PAGE_SHIFT;
1968         int error;
1969
1970         len = PAGE_ALIGN(len);
1971         if (!len)
1972                 return addr;
1973
1974         if ((addr + len) > TASK_SIZE || (addr + len) < addr)
1975                 return -EINVAL;
1976
1977         if (is_hugepage_only_range(mm, addr, len))
1978                 return -EINVAL;
1979
1980         error = security_file_mmap(NULL, 0, 0, 0, addr, 1);
1981         if (error)
1982                 return error;
1983
1984         flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
1985
1986         error = arch_mmap_check(addr, len, flags);
1987         if (error)
1988                 return error;
1989
1990         /*
1991          * mlock MCL_FUTURE?
1992          */
1993         if (mm->def_flags & VM_LOCKED) {
1994                 unsigned long locked, lock_limit;
1995                 locked = len >> PAGE_SHIFT;
1996                 locked += mm->locked_vm;
1997                 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
1998                 lock_limit >>= PAGE_SHIFT;
1999                 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
2000                         return -EAGAIN;
2001         }
2002
2003         /*
2004          * mm->mmap_sem is required to protect against another thread
2005          * changing the mappings in case we sleep.
2006          */
2007         verify_mm_writelocked(mm);
2008
2009         /*
2010          * Clear old maps.  this also does some error checking for us
2011          */
2012  munmap_back:
2013         vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
2014         if (vma && vma->vm_start < addr + len) {
2015                 if (do_munmap(mm, addr, len))
2016                         return -ENOMEM;
2017                 goto munmap_back;
2018         }
2019
2020         /* Check against address space limits *after* clearing old maps... */
2021         if (!may_expand_vm(mm, len >> PAGE_SHIFT))
2022                 return -ENOMEM;
2023
2024         if (mm->map_count > sysctl_max_map_count)
2025                 return -ENOMEM;
2026
2027         if (security_vm_enough_memory(len >> PAGE_SHIFT))
2028                 return -ENOMEM;
2029
2030         /* Can we just expand an old private anonymous mapping? */
2031         if (vma_merge(mm, prev, addr, addr + len, flags,
2032                                         NULL, NULL, pgoff, NULL))
2033                 goto out;
2034
2035         /*
2036          * create a vma struct for an anonymous mapping
2037          */
2038         vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2039         if (!vma) {
2040                 vm_unacct_memory(len >> PAGE_SHIFT);
2041                 return -ENOMEM;
2042         }
2043
2044         vma->vm_mm = mm;
2045         vma->vm_start = addr;
2046         vma->vm_end = addr + len;
2047         vma->vm_pgoff = pgoff;
2048         vma->vm_flags = flags;
2049         vma->vm_page_prot = vm_get_page_prot(flags);
2050         vma_link(mm, vma, prev, rb_link, rb_parent);
2051 out:
2052         mm->total_vm += len >> PAGE_SHIFT;
2053         if (flags & VM_LOCKED) {
2054                 mm->locked_vm += len >> PAGE_SHIFT;
2055                 make_pages_present(addr, addr + len);
2056         }
2057         return addr;
2058 }
2059
2060 EXPORT_SYMBOL(do_brk);
2061
2062 /* Release all mmaps. */
2063 void exit_mmap(struct mm_struct *mm)
2064 {
2065         struct mmu_gather *tlb;
2066         struct vm_area_struct *vma = mm->mmap;
2067         unsigned long nr_accounted = 0;
2068         unsigned long end;
2069
2070         /* mm's last user has gone, and its about to be pulled down */
2071         arch_exit_mmap(mm);
2072         mmu_notifier_release(mm);
2073
2074         if (!mm->mmap)  /* Can happen if dup_mmap() received an OOM */
2075                 return;
2076
2077         lru_add_drain();
2078         flush_cache_mm(mm);
2079         tlb = tlb_gather_mmu(mm, 1);
2080         /* Don't update_hiwater_rss(mm) here, do_exit already did */
2081         /* Use -1 here to ensure all VMAs in the mm are unmapped */
2082         end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
2083         vm_unacct_memory(nr_accounted);
2084         free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0);
2085         tlb_finish_mmu(tlb, 0, end);
2086
2087         /*
2088          * Walk the list again, actually closing and freeing it,
2089          * with preemption enabled, without holding any MM locks.
2090          */
2091         while (vma)
2092                 vma = remove_vma(vma);
2093
2094         BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
2095 }
2096
2097 /* Insert vm structure into process list sorted by address
2098  * and into the inode's i_mmap tree.  If vm_file is non-NULL
2099  * then i_mmap_lock is taken here.
2100  */
2101 int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
2102 {
2103         struct vm_area_struct * __vma, * prev;
2104         struct rb_node ** rb_link, * rb_parent;
2105
2106         /*
2107          * The vm_pgoff of a purely anonymous vma should be irrelevant
2108          * until its first write fault, when page's anon_vma and index
2109          * are set.  But now set the vm_pgoff it will almost certainly
2110          * end up with (unless mremap moves it elsewhere before that
2111          * first wfault), so /proc/pid/maps tells a consistent story.
2112          *
2113          * By setting it to reflect the virtual start address of the
2114          * vma, merges and splits can happen in a seamless way, just
2115          * using the existing file pgoff checks and manipulations.
2116          * Similarly in do_mmap_pgoff and in do_brk.
2117          */
2118         if (!vma->vm_file) {
2119                 BUG_ON(vma->anon_vma);
2120                 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2121         }
2122         __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent);
2123         if (__vma && __vma->vm_start < vma->vm_end)
2124                 return -ENOMEM;
2125         if ((vma->vm_flags & VM_ACCOUNT) &&
2126              security_vm_enough_memory_mm(mm, vma_pages(vma)))
2127                 return -ENOMEM;
2128         vma_link(mm, vma, prev, rb_link, rb_parent);
2129         return 0;
2130 }
2131
2132 /*
2133  * Copy the vma structure to a new location in the same mm,
2134  * prior to moving page table entries, to effect an mremap move.
2135  */
2136 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
2137         unsigned long addr, unsigned long len, pgoff_t pgoff)
2138 {
2139         struct vm_area_struct *vma = *vmap;
2140         unsigned long vma_start = vma->vm_start;
2141         struct mm_struct *mm = vma->vm_mm;
2142         struct vm_area_struct *new_vma, *prev;
2143         struct rb_node **rb_link, *rb_parent;
2144         struct mempolicy *pol;
2145
2146         /*
2147          * If anonymous vma has not yet been faulted, update new pgoff
2148          * to match new location, to increase its chance of merging.
2149          */
2150         if (!vma->vm_file && !vma->anon_vma)
2151                 pgoff = addr >> PAGE_SHIFT;
2152
2153         find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
2154         new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
2155                         vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
2156         if (new_vma) {
2157                 /*
2158                  * Source vma may have been merged into new_vma
2159                  */
2160                 if (vma_start >= new_vma->vm_start &&
2161                     vma_start < new_vma->vm_end)
2162                         *vmap = new_vma;
2163         } else {
2164                 new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
2165                 if (new_vma) {
2166                         *new_vma = *vma;
2167                         pol = mpol_dup(vma_policy(vma));
2168                         if (IS_ERR(pol)) {
2169                                 kmem_cache_free(vm_area_cachep, new_vma);
2170                                 return NULL;
2171                         }
2172                         vma_set_policy(new_vma, pol);
2173                         new_vma->vm_start = addr;
2174                         new_vma->vm_end = addr + len;
2175                         new_vma->vm_pgoff = pgoff;
2176                         if (new_vma->vm_file) {
2177                                 get_file(new_vma->vm_file);
2178                                 if (vma->vm_flags & VM_EXECUTABLE)
2179                                         added_exe_file_vma(mm);
2180                         }
2181                         if (new_vma->vm_ops && new_vma->vm_ops->open)
2182                                 new_vma->vm_ops->open(new_vma);
2183                         vma_link(mm, new_vma, prev, rb_link, rb_parent);
2184                 }
2185         }
2186         return new_vma;
2187 }
2188
2189 /*
2190  * Return true if the calling process may expand its vm space by the passed
2191  * number of pages
2192  */
2193 int may_expand_vm(struct mm_struct *mm, unsigned long npages)
2194 {
2195         unsigned long cur = mm->total_vm;       /* pages */
2196         unsigned long lim;
2197
2198         lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
2199
2200         if (cur + npages > lim)
2201                 return 0;
2202         return 1;
2203 }
2204
2205
2206 static int special_mapping_fault(struct vm_area_struct *vma,
2207                                 struct vm_fault *vmf)
2208 {
2209         pgoff_t pgoff;
2210         struct page **pages;
2211
2212         /*
2213          * special mappings have no vm_file, and in that case, the mm
2214          * uses vm_pgoff internally. So we have to subtract it from here.
2215          * We are allowed to do this because we are the mm; do not copy
2216          * this code into drivers!
2217          */
2218         pgoff = vmf->pgoff - vma->vm_pgoff;
2219
2220         for (pages = vma->vm_private_data; pgoff && *pages; ++pages)
2221                 pgoff--;
2222
2223         if (*pages) {
2224                 struct page *page = *pages;
2225                 get_page(page);
2226                 vmf->page = page;
2227                 return 0;
2228         }
2229
2230         return VM_FAULT_SIGBUS;
2231 }
2232
2233 /*
2234  * Having a close hook prevents vma merging regardless of flags.
2235  */
2236 static void special_mapping_close(struct vm_area_struct *vma)
2237 {
2238 }
2239
2240 static struct vm_operations_struct special_mapping_vmops = {
2241         .close = special_mapping_close,
2242         .fault = special_mapping_fault,
2243 };
2244
2245 /*
2246  * Called with mm->mmap_sem held for writing.
2247  * Insert a new vma covering the given region, with the given flags.
2248  * Its pages are supplied by the given array of struct page *.
2249  * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
2250  * The region past the last page supplied will always produce SIGBUS.
2251  * The array pointer and the pages it points to are assumed to stay alive
2252  * for as long as this mapping might exist.
2253  */
2254 int install_special_mapping(struct mm_struct *mm,
2255                             unsigned long addr, unsigned long len,
2256                             unsigned long vm_flags, struct page **pages)
2257 {
2258         struct vm_area_struct *vma;
2259
2260         vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2261         if (unlikely(vma == NULL))
2262                 return -ENOMEM;
2263
2264         vma->vm_mm = mm;
2265         vma->vm_start = addr;
2266         vma->vm_end = addr + len;
2267
2268         vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
2269         vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2270
2271         vma->vm_ops = &special_mapping_vmops;
2272         vma->vm_private_data = pages;
2273
2274         if (unlikely(insert_vm_struct(mm, vma))) {
2275                 kmem_cache_free(vm_area_cachep, vma);
2276                 return -ENOMEM;
2277         }
2278
2279         mm->total_vm += len >> PAGE_SHIFT;
2280
2281         return 0;
2282 }
2283
2284 static DEFINE_MUTEX(mm_all_locks_mutex);
2285
2286 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
2287 {
2288         if (!test_bit(0, (unsigned long *) &anon_vma->head.next)) {
2289                 /*
2290                  * The LSB of head.next can't change from under us
2291                  * because we hold the mm_all_locks_mutex.
2292                  */
2293                 spin_lock_nest_lock(&anon_vma->lock, &mm->mmap_sem);
2294                 /*
2295                  * We can safely modify head.next after taking the
2296                  * anon_vma->lock. If some other vma in this mm shares
2297                  * the same anon_vma we won't take it again.
2298                  *
2299                  * No need of atomic instructions here, head.next
2300                  * can't change from under us thanks to the
2301                  * anon_vma->lock.
2302                  */
2303                 if (__test_and_set_bit(0, (unsigned long *)
2304                                        &anon_vma->head.next))
2305                         BUG();
2306         }
2307 }
2308
2309 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
2310 {
2311         if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2312                 /*
2313                  * AS_MM_ALL_LOCKS can't change from under us because
2314                  * we hold the mm_all_locks_mutex.
2315                  *
2316                  * Operations on ->flags have to be atomic because
2317                  * even if AS_MM_ALL_LOCKS is stable thanks to the
2318                  * mm_all_locks_mutex, there may be other cpus
2319                  * changing other bitflags in parallel to us.
2320                  */
2321                 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
2322                         BUG();
2323                 spin_lock_nest_lock(&mapping->i_mmap_lock, &mm->mmap_sem);
2324         }
2325 }
2326
2327 /*
2328  * This operation locks against the VM for all pte/vma/mm related
2329  * operations that could ever happen on a certain mm. This includes
2330  * vmtruncate, try_to_unmap, and all page faults.
2331  *
2332  * The caller must take the mmap_sem in write mode before calling
2333  * mm_take_all_locks(). The caller isn't allowed to release the
2334  * mmap_sem until mm_drop_all_locks() returns.
2335  *
2336  * mmap_sem in write mode is required in order to block all operations
2337  * that could modify pagetables and free pages without need of
2338  * altering the vma layout (for example populate_range() with
2339  * nonlinear vmas). It's also needed in write mode to avoid new
2340  * anon_vmas to be associated with existing vmas.
2341  *
2342  * A single task can't take more than one mm_take_all_locks() in a row
2343  * or it would deadlock.
2344  *
2345  * The LSB in anon_vma->head.next and the AS_MM_ALL_LOCKS bitflag in
2346  * mapping->flags avoid to take the same lock twice, if more than one
2347  * vma in this mm is backed by the same anon_vma or address_space.
2348  *
2349  * We can take all the locks in random order because the VM code
2350  * taking i_mmap_lock or anon_vma->lock outside the mmap_sem never
2351  * takes more than one of them in a row. Secondly we're protected
2352  * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
2353  *
2354  * mm_take_all_locks() and mm_drop_all_locks are expensive operations
2355  * that may have to take thousand of locks.
2356  *
2357  * mm_take_all_locks() can fail if it's interrupted by signals.
2358  */
2359 int mm_take_all_locks(struct mm_struct *mm)
2360 {
2361         struct vm_area_struct *vma;
2362         int ret = -EINTR;
2363
2364         BUG_ON(down_read_trylock(&mm->mmap_sem));
2365
2366         mutex_lock(&mm_all_locks_mutex);
2367
2368         for (vma = mm->mmap; vma; vma = vma->vm_next) {
2369                 if (signal_pending(current))
2370                         goto out_unlock;
2371                 if (vma->vm_file && vma->vm_file->f_mapping)
2372                         vm_lock_mapping(mm, vma->vm_file->f_mapping);
2373         }
2374
2375         for (vma = mm->mmap; vma; vma = vma->vm_next) {
2376                 if (signal_pending(current))
2377                         goto out_unlock;
2378                 if (vma->anon_vma)
2379                         vm_lock_anon_vma(mm, vma->anon_vma);
2380         }
2381
2382         ret = 0;
2383
2384 out_unlock:
2385         if (ret)
2386                 mm_drop_all_locks(mm);
2387
2388         return ret;
2389 }
2390
2391 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
2392 {
2393         if (test_bit(0, (unsigned long *) &anon_vma->head.next)) {
2394                 /*
2395                  * The LSB of head.next can't change to 0 from under
2396                  * us because we hold the mm_all_locks_mutex.
2397                  *
2398                  * We must however clear the bitflag before unlocking
2399                  * the vma so the users using the anon_vma->head will
2400                  * never see our bitflag.
2401                  *
2402                  * No need of atomic instructions here, head.next
2403                  * can't change from under us until we release the
2404                  * anon_vma->lock.
2405                  */
2406                 if (!__test_and_clear_bit(0, (unsigned long *)
2407                                           &anon_vma->head.next))
2408                         BUG();
2409                 spin_unlock(&anon_vma->lock);
2410         }
2411 }
2412
2413 static void vm_unlock_mapping(struct address_space *mapping)
2414 {
2415         if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2416                 /*
2417                  * AS_MM_ALL_LOCKS can't change to 0 from under us
2418                  * because we hold the mm_all_locks_mutex.
2419                  */
2420                 spin_unlock(&mapping->i_mmap_lock);
2421                 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
2422                                         &mapping->flags))
2423                         BUG();
2424         }
2425 }
2426
2427 /*
2428  * The mmap_sem cannot be released by the caller until
2429  * mm_drop_all_locks() returns.
2430  */
2431 void mm_drop_all_locks(struct mm_struct *mm)
2432 {
2433         struct vm_area_struct *vma;
2434
2435         BUG_ON(down_read_trylock(&mm->mmap_sem));
2436         BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
2437
2438         for (vma = mm->mmap; vma; vma = vma->vm_next) {
2439                 if (vma->anon_vma)
2440                         vm_unlock_anon_vma(vma->anon_vma);
2441                 if (vma->vm_file && vma->vm_file->f_mapping)
2442                         vm_unlock_mapping(vma->vm_file->f_mapping);
2443         }
2444
2445         mutex_unlock(&mm_all_locks_mutex);
2446 }