]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'akpm' (patches from Andrew)
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 13 May 2017 16:49:35 +0000 (09:49 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 13 May 2017 16:49:35 +0000 (09:49 -0700)
Merge misc fixes from Andrew Morton:
 "15 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm, docs: update memory.stat description with workingset* entries
  mm: vmscan: scan until it finds eligible pages
  mm, thp: copying user pages must schedule on collapse
  dax: fix PMD data corruption when fault races with write
  dax: fix data corruption when fault races with write
  ext4: return to starting transaction in ext4_dax_huge_fault()
  mm: fix data corruption due to stale mmap reads
  dax: prevent invalidation of mapped DAX entries
  Tigran has moved
  mm, vmalloc: fix vmalloc users tracking properly
  mm/khugepaged: add missed tracepoint for collapse_huge_page_swapin
  gcov: support GCC 7.1
  mm, vmstat: Remove spurious WARN() during zoneinfo print
  time: delete current_fs_time()
  hwpoison, memcg: forcibly uncharge LRU pages

1  2 
fs/dax.c
include/linux/dax.h

diff --combined fs/dax.c
index 18fe9bb22d5566ab29f1a0aa05a65ab2784c5cac,93ae87297ffa09ed6e25b04da9f985ea52ba5d73..c22eaf162f95c1456563b31a8362da9e531c9185
+++ b/fs/dax.c
@@@ -460,35 -460,6 +460,6 @@@ int dax_delete_mapping_entry(struct add
        return ret;
  }
  
- /*
-  * Invalidate exceptional DAX entry if easily possible. This handles DAX
-  * entries for invalidate_inode_pages() so we evict the entry only if we can
-  * do so without blocking.
-  */
- int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index)
- {
-       int ret = 0;
-       void *entry, **slot;
-       struct radix_tree_root *page_tree = &mapping->page_tree;
-       spin_lock_irq(&mapping->tree_lock);
-       entry = __radix_tree_lookup(page_tree, index, NULL, &slot);
-       if (!entry || !radix_tree_exceptional_entry(entry) ||
-           slot_locked(mapping, slot))
-               goto out;
-       if (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
-           radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
-               goto out;
-       radix_tree_delete(page_tree, index);
-       mapping->nrexceptional--;
-       ret = 1;
- out:
-       spin_unlock_irq(&mapping->tree_lock);
-       if (ret)
-               dax_wake_mapping_entry_waiter(mapping, index, entry, true);
-       return ret;
- }
  /*
   * Invalidate exceptional DAX entry if it is clean.
   */
@@@ -993,12 -964,12 +964,12 @@@ int __dax_zero_page_range(struct block_
                void *kaddr;
                pfn_t pfn;
  
 -              rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
 +              rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
                if (rc)
                        return rc;
  
                id = dax_read_lock();
 -              rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr,
 +              rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr,
                                &pfn);
                if (rc < 0) {
                        dax_read_unlock(id);
@@@ -1044,7 -1015,7 +1015,7 @@@ dax_iomap_actor(struct inode *inode, lo
         * into page tables. We have to tear down these mappings so that data
         * written by write(2) is visible in mmap.
         */
-       if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) {
+       if (iomap->flags & IOMAP_F_NEW) {
                invalidate_inode_pages2_range(inode->i_mapping,
                                              pos >> PAGE_SHIFT,
                                              (end - 1) >> PAGE_SHIFT);
@@@ -1177,6 -1148,12 +1148,12 @@@ static int dax_iomap_pte_fault(struct v
        if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
                flags |= IOMAP_WRITE;
  
+       entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
+       if (IS_ERR(entry)) {
+               vmf_ret = dax_fault_return(PTR_ERR(entry));
+               goto out;
+       }
        /*
         * Note that we don't bother to use iomap_apply here: DAX required
         * the file system block size to be equal the page size, which means
        error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
        if (error) {
                vmf_ret = dax_fault_return(error);
-               goto out;
+               goto unlock_entry;
        }
        if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
-               vmf_ret = dax_fault_return(-EIO);       /* fs corruption? */
-               goto finish_iomap;
-       }
-       entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
-       if (IS_ERR(entry)) {
-               vmf_ret = dax_fault_return(PTR_ERR(entry));
-               goto finish_iomap;
+               error = -EIO;   /* fs corruption? */
+               goto error_finish_iomap;
        }
  
        sector = dax_iomap_sector(&iomap, pos);
                }
  
                if (error)
-                       goto error_unlock_entry;
+                       goto error_finish_iomap;
  
                __SetPageUptodate(vmf->cow_page);
                vmf_ret = finish_fault(vmf);
                if (!vmf_ret)
                        vmf_ret = VM_FAULT_DONE_COW;
-               goto unlock_entry;
+               goto finish_iomap;
        }
  
        switch (iomap.type) {
        case IOMAP_HOLE:
                if (!(vmf->flags & FAULT_FLAG_WRITE)) {
                        vmf_ret = dax_load_hole(mapping, &entry, vmf);
-                       goto unlock_entry;
+                       goto finish_iomap;
                }
                /*FALLTHRU*/
        default:
                break;
        }
  
-  error_unlock_entry:
+  error_finish_iomap:
        vmf_ret = dax_fault_return(error) | major;
-  unlock_entry:
-       put_locked_mapping_entry(mapping, vmf->pgoff, entry);
   finish_iomap:
        if (ops->iomap_end) {
                int copied = PAGE_SIZE;
                 */
                ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
        }
- out:
+  unlock_entry:
+       put_locked_mapping_entry(mapping, vmf->pgoff, entry);
+  out:
        trace_dax_pte_fault_done(inode, vmf, vmf_ret);
        return vmf_ret;
  }
@@@ -1416,6 -1387,16 +1387,16 @@@ static int dax_iomap_pmd_fault(struct v
        if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
                goto fallback;
  
+       /*
+        * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
+        * PMD or a HZP entry.  If it can't (because a 4k page is already in
+        * the tree, for instance), it will return -EEXIST and we just fall
+        * back to 4k entries.
+        */
+       entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
+       if (IS_ERR(entry))
+               goto fallback;
        /*
         * Note that we don't use iomap_apply here.  We aren't doing I/O, only
         * setting up a mapping, so really we're using iomap_begin() as a way
        pos = (loff_t)pgoff << PAGE_SHIFT;
        error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
        if (error)
-               goto fallback;
+               goto unlock_entry;
  
        if (iomap.offset + iomap.length < pos + PMD_SIZE)
                goto finish_iomap;
  
-       /*
-        * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
-        * PMD or a HZP entry.  If it can't (because a 4k page is already in
-        * the tree, for instance), it will return -EEXIST and we just fall
-        * back to 4k entries.
-        */
-       entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
-       if (IS_ERR(entry))
-               goto finish_iomap;
        switch (iomap.type) {
        case IOMAP_MAPPED:
                result = dax_pmd_insert_mapping(vmf, &iomap, pos, &entry);
        case IOMAP_UNWRITTEN:
        case IOMAP_HOLE:
                if (WARN_ON_ONCE(write))
-                       goto unlock_entry;
+                       break;
                result = dax_pmd_load_hole(vmf, &iomap, &entry);
                break;
        default:
                break;
        }
  
-  unlock_entry:
-       put_locked_mapping_entry(mapping, pgoff, entry);
   finish_iomap:
        if (ops->iomap_end) {
                int copied = PMD_SIZE;
                ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
                                &iomap);
        }
+  unlock_entry:
+       put_locked_mapping_entry(mapping, pgoff, entry);
   fallback:
        if (result == VM_FAULT_FALLBACK) {
                split_huge_pmd(vma, vmf->pmd, vmf->address);
diff --combined include/linux/dax.h
index 7fdf1d71004299681918e0a51d0c01ce5acd0846,d1236d16ef00ed74f7ab3116288db683e591f2d5..00ebac854bb79f16ed5b04d74c6dafa641de0bb7
@@@ -18,38 -18,12 +18,38 @@@ struct dax_operations 
                        void **, pfn_t *);
  };
  
 +int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
 +#if IS_ENABLED(CONFIG_FS_DAX)
 +int __bdev_dax_supported(struct super_block *sb, int blocksize);
 +static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
 +{
 +      return __bdev_dax_supported(sb, blocksize);
 +}
 +#else
 +static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
 +{
 +      return -EOPNOTSUPP;
 +}
 +#endif
 +
 +#if IS_ENABLED(CONFIG_DAX)
 +struct dax_device *dax_get_by_host(const char *host);
 +void put_dax(struct dax_device *dax_dev);
 +#else
 +static inline struct dax_device *dax_get_by_host(const char *host)
 +{
 +      return NULL;
 +}
 +
 +static inline void put_dax(struct dax_device *dax_dev)
 +{
 +}
 +#endif
 +
  int dax_read_lock(void);
  void dax_read_unlock(int id);
 -struct dax_device *dax_get_by_host(const char *host);
  struct dax_device *alloc_dax(void *private, const char *host,
                const struct dax_operations *ops);
 -void put_dax(struct dax_device *dax_dev);
  bool dax_alive(struct dax_device *dax_dev);
  void kill_dax(struct dax_device *dax_dev);
  void *dax_get_private(struct dax_device *dax_dev);
@@@ -89,7 -63,6 +89,6 @@@ ssize_t dax_iomap_rw(struct kiocb *iocb
  int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
                    const struct iomap_ops *ops);
  int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
- int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index);
  int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
                                      pgoff_t index);
  void dax_wake_mapping_entry_waiter(struct address_space *mapping,