]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/shmem.c
Merge tag 'regulator-v3.16-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[karo-tx-linux.git] / mm / shmem.c
index 5402481c28d190a83718f6b1897eec78df4c0209..8f419cff9e3451fa3b4a98026d332d45ae80ea86 100644 (file)
@@ -80,11 +80,12 @@ static struct vfsmount *shm_mnt;
 #define SHORT_SYMLINK_LEN 128
 
 /*
- * shmem_fallocate and shmem_writepage communicate via inode->i_private
- * (with i_mutex making sure that it has only one user at a time):
- * we would prefer not to enlarge the shmem inode just for that.
+ * shmem_fallocate communicates with shmem_fault or shmem_writepage via
+ * inode->i_private (with i_mutex making sure that it has only one user at
+ * a time): we would prefer not to enlarge the shmem inode just for that.
  */
 struct shmem_falloc {
+       int     mode;           /* FALLOC_FL mode currently operating */
        pgoff_t start;          /* start of range currently being fallocated */
        pgoff_t next;           /* the next page offset to be fallocated */
        pgoff_t nr_falloced;    /* how many new pages have been fallocated */
@@ -759,6 +760,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
                        spin_lock(&inode->i_lock);
                        shmem_falloc = inode->i_private;
                        if (shmem_falloc &&
+                           !shmem_falloc->mode &&
                            index >= shmem_falloc->start &&
                            index < shmem_falloc->next)
                                shmem_falloc->nr_unswapped++;
@@ -1233,6 +1235,44 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        int error;
        int ret = VM_FAULT_LOCKED;
 
+       /*
+        * Trinity finds that probing a hole which tmpfs is punching can
+        * prevent the hole-punch from ever completing: which in turn
+        * locks writers out with its hold on i_mutex.  So refrain from
+        * faulting pages into the hole while it's being punched, and
+        * wait on i_mutex to be released if vmf->flags permits.
+        */
+       if (unlikely(inode->i_private)) {
+               struct shmem_falloc *shmem_falloc;
+
+               spin_lock(&inode->i_lock);
+               shmem_falloc = inode->i_private;
+               if (!shmem_falloc ||
+                   shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE ||
+                   vmf->pgoff < shmem_falloc->start ||
+                   vmf->pgoff >= shmem_falloc->next)
+                       shmem_falloc = NULL;
+               spin_unlock(&inode->i_lock);
+               /*
+                * i_lock has protected us from taking shmem_falloc seriously
+                * once return from shmem_fallocate() went back up that stack.
+                * i_lock does not serialize with i_mutex at all, but it does
+                * not matter if sometimes we wait unnecessarily, or sometimes
+                * miss out on waiting: we just need to make those cases rare.
+                */
+               if (shmem_falloc) {
+                       if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
+                          !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+                               up_read(&vma->vm_mm->mmap_sem);
+                               mutex_lock(&inode->i_mutex);
+                               mutex_unlock(&inode->i_mutex);
+                               return VM_FAULT_RETRY;
+                       }
+                       /* cond_resched? Leave that to GUP or return to user */
+                       return VM_FAULT_NOPAGE;
+               }
+       }
+
        error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
        if (error)
                return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
@@ -1406,8 +1446,7 @@ shmem_write_end(struct file *file, struct address_space *mapping,
        return copied;
 }
 
-static ssize_t shmem_file_aio_read(struct kiocb *iocb,
-               const struct iovec *iov, unsigned long nr_segs, loff_t pos)
+static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file);
@@ -1416,15 +1455,8 @@ static ssize_t shmem_file_aio_read(struct kiocb *iocb,
        unsigned long offset;
        enum sgp_type sgp = SGP_READ;
        int error = 0;
-       ssize_t retval;
-       size_t count;
+       ssize_t retval = 0;
        loff_t *ppos = &iocb->ki_pos;
-       struct iov_iter iter;
-
-       retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
-       if (retval)
-               return retval;
-       iov_iter_init(&iter, iov, nr_segs, count, 0);
 
        /*
         * Might this read be for a stacking filesystem?  Then when reading
@@ -1500,14 +1532,14 @@ static ssize_t shmem_file_aio_read(struct kiocb *iocb,
                 * Ok, we have the page, and it's up-to-date, so
                 * now we can copy it to user space...
                 */
-               ret = copy_page_to_iter(page, offset, nr, &iter);
+               ret = copy_page_to_iter(page, offset, nr, to);
                retval += ret;
                offset += ret;
                index += offset >> PAGE_CACHE_SHIFT;
                offset &= ~PAGE_CACHE_MASK;
 
                page_cache_release(page);
-               if (!iov_iter_count(&iter))
+               if (!iov_iter_count(to))
                        break;
                if (ret < nr) {
                        error = -EFAULT;
@@ -1732,20 +1764,31 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
        pgoff_t start, index, end;
        int error;
 
+       if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+               return -EOPNOTSUPP;
+
        mutex_lock(&inode->i_mutex);
 
+       shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
+
        if (mode & FALLOC_FL_PUNCH_HOLE) {
                struct address_space *mapping = file->f_mapping;
                loff_t unmap_start = round_up(offset, PAGE_SIZE);
                loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
 
+               shmem_falloc.start = unmap_start >> PAGE_SHIFT;
+               shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
+               spin_lock(&inode->i_lock);
+               inode->i_private = &shmem_falloc;
+               spin_unlock(&inode->i_lock);
+
                if ((u64)unmap_end > (u64)unmap_start)
                        unmap_mapping_range(mapping, unmap_start,
                                            1 + unmap_end - unmap_start, 0);
                shmem_truncate_range(inode, offset, offset + len - 1);
                /* No need to unmap again: hole-punching leaves COWed pages */
                error = 0;
-               goto out;
+               goto undone;
        }
 
        /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
@@ -2629,13 +2672,13 @@ static const struct file_operations shmem_file_operations = {
        .mmap           = shmem_mmap,
 #ifdef CONFIG_TMPFS
        .llseek         = shmem_file_llseek,
-       .read           = do_sync_read,
-       .write          = do_sync_write,
-       .aio_read       = shmem_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read           = new_sync_read,
+       .write          = new_sync_write,
+       .read_iter      = shmem_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .fsync          = noop_fsync,
        .splice_read    = shmem_file_splice_read,
-       .splice_write   = generic_file_splice_write,
+       .splice_write   = iter_file_splice_write,
        .fallocate      = shmem_fallocate,
 #endif
 };