]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/mmap.c
KVM: Fix device assignment threaded irq handler
[karo-tx-linux.git] / mm / mmap.c
index e8dcfc7de866e2b7c1a1dccd90eb4a9aff89161d..3edfcdfa42d9f27a5238780065220ec3b4fc702a 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -971,15 +971,13 @@ static inline unsigned long round_hint_to_min(unsigned long hint)
  * The caller must hold down_write(&current->mm->mmap_sem).
  */
 
-static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
                        unsigned long len, unsigned long prot,
                        unsigned long flags, unsigned long pgoff)
 {
        struct mm_struct * mm = current->mm;
        struct inode *inode;
        vm_flags_t vm_flags;
-       int error;
-       unsigned long reqprot = prot;
 
        /*
         * Does the application expect PROT_READ to imply PROT_EXEC?
@@ -1101,39 +1099,9 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
                }
        }
 
-       error = security_file_mmap(file, reqprot, prot, flags, addr, 0);
-       if (error)
-               return error;
-
        return mmap_region(file, addr, len, flags, vm_flags, pgoff);
 }
 
-unsigned long do_mmap(struct file *file, unsigned long addr,
-       unsigned long len, unsigned long prot,
-       unsigned long flag, unsigned long offset)
-{
-       if (unlikely(offset + PAGE_ALIGN(len) < offset))
-               return -EINVAL;
-       if (unlikely(offset & ~PAGE_MASK))
-               return -EINVAL;
-       return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
-}
-EXPORT_SYMBOL(do_mmap);
-
-unsigned long vm_mmap(struct file *file, unsigned long addr,
-       unsigned long len, unsigned long prot,
-       unsigned long flag, unsigned long offset)
-{
-       unsigned long ret;
-       struct mm_struct *mm = current->mm;
-
-       down_write(&mm->mmap_sem);
-       ret = do_mmap(file, addr, len, prot, flag, offset);
-       up_write(&mm->mmap_sem);
-       return ret;
-}
-EXPORT_SYMBOL(vm_mmap);
-
 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
                unsigned long, prot, unsigned long, flags,
                unsigned long, fd, unsigned long, pgoff)
@@ -1165,10 +1133,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
 
        flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
 
-       down_write(&current->mm->mmap_sem);
-       retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-       up_write(&current->mm->mmap_sem);
-
+       retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
        if (file)
                fput(file);
 out:
@@ -1629,7 +1594,9 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
        if (addr & ~PAGE_MASK)
                return -EINVAL;
 
-       return arch_rebalance_pgtables(addr, len);
+       addr = arch_rebalance_pgtables(addr, len);
+       error = security_mmap_addr(addr);
+       return error ? error : addr;
 }
 
 EXPORT_SYMBOL(get_unmapped_area);
@@ -1639,33 +1606,34 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 {
        struct vm_area_struct *vma = NULL;
 
-       if (mm) {
-               /* Check the cache first. */
-               /* (Cache hit rate is typically around 35%.) */
-               vma = mm->mmap_cache;
-               if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
-                       struct rb_node * rb_node;
-
-                       rb_node = mm->mm_rb.rb_node;
-                       vma = NULL;
-
-                       while (rb_node) {
-                               struct vm_area_struct * vma_tmp;
-
-                               vma_tmp = rb_entry(rb_node,
-                                               struct vm_area_struct, vm_rb);
-
-                               if (vma_tmp->vm_end > addr) {
-                                       vma = vma_tmp;
-                                       if (vma_tmp->vm_start <= addr)
-                                               break;
-                                       rb_node = rb_node->rb_left;
-                               } else
-                                       rb_node = rb_node->rb_right;
-                       }
-                       if (vma)
-                               mm->mmap_cache = vma;
+       if (WARN_ON_ONCE(!mm))          /* Remove this in linux-3.6 */
+               return NULL;
+
+       /* Check the cache first. */
+       /* (Cache hit rate is typically around 35%.) */
+       vma = mm->mmap_cache;
+       if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
+               struct rb_node *rb_node;
+
+               rb_node = mm->mm_rb.rb_node;
+               vma = NULL;
+
+               while (rb_node) {
+                       struct vm_area_struct *vma_tmp;
+
+                       vma_tmp = rb_entry(rb_node,
+                                          struct vm_area_struct, vm_rb);
+
+                       if (vma_tmp->vm_end > addr) {
+                               vma = vma_tmp;
+                               if (vma_tmp->vm_start <= addr)
+                                       break;
+                               rb_node = rb_node->rb_left;
+                       } else
+                               rb_node = rb_node->rb_right;
                }
+               if (vma)
+                       mm->mmap_cache = vma;
        }
        return vma;
 }
@@ -1818,7 +1786,7 @@ int expand_downwards(struct vm_area_struct *vma,
                return -ENOMEM;
 
        address &= PAGE_MASK;
-       error = security_file_mmap(NULL, 0, 0, 0, address, 1);
+       error = security_mmap_addr(address);
        if (error)
                return error;
 
@@ -2158,7 +2126,6 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
 
        return 0;
 }
-EXPORT_SYMBOL(do_munmap);
 
 int vm_munmap(unsigned long start, size_t len)
 {
@@ -2206,10 +2173,6 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
        if (!len)
                return addr;
 
-       error = security_file_mmap(NULL, 0, 0, 0, addr, 1);
-       if (error)
-               return error;
-
        flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
 
        error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
@@ -2562,10 +2525,6 @@ int install_special_mapping(struct mm_struct *mm,
        vma->vm_ops = &special_mapping_vmops;
        vma->vm_private_data = pages;
 
-       ret = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
-       if (ret)
-               goto out;
-
        ret = insert_vm_struct(mm, vma);
        if (ret)
                goto out;