]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/filemap_xip.c
Revert "ACPI: Fan: Drop force_power_state acpi_device option"
[karo-tx-linux.git] / mm / filemap_xip.c
index 65ffc321f0c0f223e4c6115e91c53d12d8319399..f874ae818ad3812f7cd051972c823c743a154d33 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/rmap.h>
 #include <linux/sched.h>
 #include <asm/tlbflush.h>
-#include "filemap.h"
 
 /*
  * We do use our own empty page to avoid interference with other users
@@ -26,14 +25,15 @@ static struct page *__xip_sparse_page;
 static struct page *xip_sparse_page(void)
 {
        if (!__xip_sparse_page) {
-               unsigned long zeroes = get_zeroed_page(GFP_HIGHUSER);
-               if (zeroes) {
+               struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
+
+               if (page) {
                        static DEFINE_SPINLOCK(xip_alloc_lock);
                        spin_lock(&xip_alloc_lock);
                        if (!__xip_sparse_page)
-                               __xip_sparse_page = virt_to_page(zeroes);
+                               __xip_sparse_page = page;
                        else
-                               free_page(zeroes);
+                               __free_page(page);
                        spin_unlock(&xip_alloc_lock);
                }
        }
@@ -205,62 +205,58 @@ __xip_unmap (struct address_space * mapping,
 }
 
 /*
- * xip_nopage() is invoked via the vma operations vector for a
+ * xip_fault() is invoked via the vma operations vector for a
  * mapped memory region to read in file data during a page fault.
  *
- * This function is derived from filemap_nopage, but used for execute in place
+ * This function is derived from filemap_fault, but used for execute in place
  */
-static struct page *
-xip_file_nopage(struct vm_area_struct * area,
-                  unsigned long address,
-                  int *type)
+static int xip_file_fault(struct vm_area_struct *area, struct vm_fault *vmf)
 {
        struct file *file = area->vm_file;
        struct address_space *mapping = file->f_mapping;
        struct inode *inode = mapping->host;
        struct page *page;
-       unsigned long size, pgoff, endoff;
+       pgoff_t size;
 
-       pgoff = ((address - area->vm_start) >> PAGE_CACHE_SHIFT)
-               + area->vm_pgoff;
-       endoff = ((area->vm_end - area->vm_start) >> PAGE_CACHE_SHIFT)
-               + area->vm_pgoff;
+       /* XXX: are VM_FAULT_ codes OK? */
 
        size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-       if (pgoff >= size)
-               return NOPAGE_SIGBUS;
+       if (vmf->pgoff >= size)
+               return VM_FAULT_SIGBUS;
 
-       page = mapping->a_ops->get_xip_page(mapping, pgoff*(PAGE_SIZE/512), 0);
+       page = mapping->a_ops->get_xip_page(mapping,
+                                       vmf->pgoff*(PAGE_SIZE/512), 0);
        if (!IS_ERR(page))
                goto out;
        if (PTR_ERR(page) != -ENODATA)
-               return NOPAGE_SIGBUS;
+               return VM_FAULT_OOM;
 
        /* sparse block */
        if ((area->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
            (area->vm_flags & (VM_SHARED| VM_MAYSHARE)) &&
            (!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
                /* maybe shared writable, allocate new block */
-               page = mapping->a_ops->get_xip_page (mapping,
-                       pgoff*(PAGE_SIZE/512), 1);
+               page = mapping->a_ops->get_xip_page(mapping,
+                                       vmf->pgoff*(PAGE_SIZE/512), 1);
                if (IS_ERR(page))
-                       return NOPAGE_SIGBUS;
+                       return VM_FAULT_SIGBUS;
                /* unmap page at pgoff from all other vmas */
-               __xip_unmap(mapping, pgoff);
+               __xip_unmap(mapping, vmf->pgoff);
        } else {
                /* not shared and writable, use xip_sparse_page() */
                page = xip_sparse_page();
                if (!page)
-                       return NOPAGE_OOM;
+                       return VM_FAULT_OOM;
        }
 
 out:
        page_cache_get(page);
-       return page;
+       vmf->page = page;
+       return 0;
 }
 
 static struct vm_operations_struct xip_file_vm_ops = {
-       .nopage         = xip_file_nopage,
+       .fault  = xip_file_fault,
 };
 
 int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
@@ -269,6 +265,7 @@ int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
 
        file_accessed(file);
        vma->vm_ops = &xip_file_vm_ops;
+       vma->vm_flags |= VM_CAN_NONLINEAR;
        return 0;
 }
 EXPORT_SYMBOL_GPL(xip_file_mmap);
@@ -291,6 +288,7 @@ __xip_file_write(struct file *filp, const char __user *buf,
                unsigned long index;
                unsigned long offset;
                size_t copied;
+               char *kaddr;
 
                offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
                index = pos >> PAGE_CACHE_SHIFT;
@@ -298,14 +296,6 @@ __xip_file_write(struct file *filp, const char __user *buf,
                if (bytes > count)
                        bytes = count;
 
-               /*
-                * Bring in the user page that we will copy from _first_.
-                * Otherwise there's a nasty deadlock on copying from the
-                * same page as we're writing to, without it being marked
-                * up-to-date.
-                */
-               fault_in_pages_readable(buf, bytes);
-
                page = a_ops->get_xip_page(mapping,
                                           index*(PAGE_SIZE/512), 0);
                if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) {
@@ -322,8 +312,13 @@ __xip_file_write(struct file *filp, const char __user *buf,
                        break;
                }
 
-               copied = filemap_copy_from_user(page, offset, buf, bytes);
+               fault_in_pages_readable(buf, bytes);
+               kaddr = kmap_atomic(page, KM_USER0);
+               copied = bytes -
+                       __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes);
+               kunmap_atomic(kaddr, KM_USER0);
                flush_dcache_page(page);
+
                if (likely(copied > 0)) {
                        status = copied;