]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/mmap.c
KVM: Fix device assignment threaded irq handler
[karo-tx-linux.git] / mm / mmap.c
index 131521e12f1390ec945531c9628563fe96b39367..3edfcdfa42d9f27a5238780065220ec3b4fc702a 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -971,7 +971,7 @@ static inline unsigned long round_hint_to_min(unsigned long hint)
  * The caller must hold down_write(&current->mm->mmap_sem).
  */
 
-static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
                        unsigned long len, unsigned long prot,
                        unsigned long flags, unsigned long pgoff)
 {
@@ -1102,34 +1102,6 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
        return mmap_region(file, addr, len, flags, vm_flags, pgoff);
 }
 
-unsigned long do_mmap(struct file *file, unsigned long addr,
-       unsigned long len, unsigned long prot,
-       unsigned long flag, unsigned long offset)
-{
-       if (unlikely(offset + PAGE_ALIGN(len) < offset))
-               return -EINVAL;
-       if (unlikely(offset & ~PAGE_MASK))
-               return -EINVAL;
-       return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
-}
-
-unsigned long vm_mmap(struct file *file, unsigned long addr,
-       unsigned long len, unsigned long prot,
-       unsigned long flag, unsigned long offset)
-{
-       unsigned long ret;
-       struct mm_struct *mm = current->mm;
-
-       ret = security_mmap_file(file, prot, flag);
-       if (!ret) {
-               down_write(&mm->mmap_sem);
-               ret = do_mmap(file, addr, len, prot, flag, offset);
-               up_write(&mm->mmap_sem);
-       }
-       return ret;
-}
-EXPORT_SYMBOL(vm_mmap);
-
 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
                unsigned long, prot, unsigned long, flags,
                unsigned long, fd, unsigned long, pgoff)
@@ -1161,13 +1133,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
 
        flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
 
-       retval = security_mmap_file(file, prot, flags);
-       if (!retval) {
-               down_write(&current->mm->mmap_sem);
-               retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-               up_write(&current->mm->mmap_sem);
-       }
-
+       retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
        if (file)
                fput(file);
 out:
@@ -1640,33 +1606,34 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 {
        struct vm_area_struct *vma = NULL;
 
-       if (mm) {
-               /* Check the cache first. */
-               /* (Cache hit rate is typically around 35%.) */
-               vma = mm->mmap_cache;
-               if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
-                       struct rb_node * rb_node;
-
-                       rb_node = mm->mm_rb.rb_node;
-                       vma = NULL;
-
-                       while (rb_node) {
-                               struct vm_area_struct * vma_tmp;
-
-                               vma_tmp = rb_entry(rb_node,
-                                               struct vm_area_struct, vm_rb);
-
-                               if (vma_tmp->vm_end > addr) {
-                                       vma = vma_tmp;
-                                       if (vma_tmp->vm_start <= addr)
-                                               break;
-                                       rb_node = rb_node->rb_left;
-                               } else
-                                       rb_node = rb_node->rb_right;
-                       }
-                       if (vma)
-                               mm->mmap_cache = vma;
+       if (WARN_ON_ONCE(!mm))          /* Remove this in linux-3.6 */
+               return NULL;
+
+       /* Check the cache first. */
+       /* (Cache hit rate is typically around 35%.) */
+       vma = mm->mmap_cache;
+       if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
+               struct rb_node *rb_node;
+
+               rb_node = mm->mm_rb.rb_node;
+               vma = NULL;
+
+               while (rb_node) {
+                       struct vm_area_struct *vma_tmp;
+
+                       vma_tmp = rb_entry(rb_node,
+                                          struct vm_area_struct, vm_rb);
+
+                       if (vma_tmp->vm_end > addr) {
+                               vma = vma_tmp;
+                               if (vma_tmp->vm_start <= addr)
+                                       break;
+                               rb_node = rb_node->rb_left;
+                       } else
+                               rb_node = rb_node->rb_right;
                }
+               if (vma)
+                       mm->mmap_cache = vma;
        }
        return vma;
 }
@@ -2159,7 +2126,6 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
 
        return 0;
 }
-EXPORT_SYMBOL(do_munmap);
 
 int vm_munmap(unsigned long start, size_t len)
 {