]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/filemap.c
iio: pressure: bmp280: add support for BMP180
[karo-tx-linux.git] / mm / filemap.c
index da7a35d83de7edc4ed562caa5e1925194f2c7229..a8c69c8c0a90a1e50cfdc822ce2cbc3854e0c8e8 100644 (file)
  *    ->tree_lock              (page_remove_rmap->set_page_dirty)
  *    bdi.wb->list_lock                (page_remove_rmap->set_page_dirty)
  *    ->inode->i_lock          (page_remove_rmap->set_page_dirty)
- *    ->memcg->move_lock       (page_remove_rmap->mem_cgroup_begin_page_stat)
+ *    ->memcg->move_lock       (page_remove_rmap->lock_page_memcg)
  *    bdi.wb->list_lock                (zap_pte_range->set_page_dirty)
  *    ->inode->i_lock          (zap_pte_range->set_page_dirty)
  *    ->private_lock           (zap_pte_range->__set_page_dirty_buffers)
@@ -176,11 +176,9 @@ static void page_cache_tree_delete(struct address_space *mapping,
 /*
  * Delete a page from the page cache and free it. Caller has to make
  * sure the page is locked and that nobody else uses it - or that usage
- * is safe.  The caller must hold the mapping's tree_lock and
- * mem_cgroup_begin_page_stat().
+ * is safe.  The caller must hold the mapping's tree_lock.
  */
-void __delete_from_page_cache(struct page *page, void *shadow,
-                             struct mem_cgroup *memcg)
+void __delete_from_page_cache(struct page *page, void *shadow)
 {
        struct address_space *mapping = page->mapping;
 
@@ -239,8 +237,7 @@ void __delete_from_page_cache(struct page *page, void *shadow,
         * anyway will be cleared before returning page into buddy allocator.
         */
        if (WARN_ON_ONCE(PageDirty(page)))
-               account_page_cleaned(page, mapping, memcg,
-                                    inode_to_wb(mapping->host));
+               account_page_cleaned(page, mapping, inode_to_wb(mapping->host));
 }
 
 /**
@@ -254,7 +251,6 @@ void __delete_from_page_cache(struct page *page, void *shadow,
 void delete_from_page_cache(struct page *page)
 {
        struct address_space *mapping = page->mapping;
-       struct mem_cgroup *memcg;
        unsigned long flags;
 
        void (*freepage)(struct page *);
@@ -263,11 +259,9 @@ void delete_from_page_cache(struct page *page)
 
        freepage = mapping->a_ops->freepage;
 
-       memcg = mem_cgroup_begin_page_stat(page);
        spin_lock_irqsave(&mapping->tree_lock, flags);
-       __delete_from_page_cache(page, NULL, memcg);
+       __delete_from_page_cache(page, NULL);
        spin_unlock_irqrestore(&mapping->tree_lock, flags);
-       mem_cgroup_end_page_stat(memcg);
 
        if (freepage)
                freepage(page);
@@ -551,7 +545,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
        if (!error) {
                struct address_space *mapping = old->mapping;
                void (*freepage)(struct page *);
-               struct mem_cgroup *memcg;
                unsigned long flags;
 
                pgoff_t offset = old->index;
@@ -561,9 +554,8 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
                new->mapping = mapping;
                new->index = offset;
 
-               memcg = mem_cgroup_begin_page_stat(old);
                spin_lock_irqsave(&mapping->tree_lock, flags);
-               __delete_from_page_cache(old, NULL, memcg);
+               __delete_from_page_cache(old, NULL);
                error = radix_tree_insert(&mapping->page_tree, offset, new);
                BUG_ON(error);
                mapping->nrpages++;
@@ -576,8 +568,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
                if (PageSwapBacked(new))
                        __inc_zone_page_state(new, NR_SHMEM);
                spin_unlock_irqrestore(&mapping->tree_lock, flags);
-               mem_cgroup_end_page_stat(memcg);
-               mem_cgroup_replace_page(old, new);
+               mem_cgroup_migrate(old, new);
                radix_tree_preload_end();
                if (freepage)
                        freepage(old);
@@ -595,7 +586,7 @@ static int page_cache_tree_insert(struct address_space *mapping,
        void **slot;
        int error;
 
-       error = __radix_tree_create(&mapping->page_tree, page->index,
+       error = __radix_tree_create(&mapping->page_tree, page->index, 0,
                                    &node, &slot);
        if (error)
                return error;
@@ -1264,7 +1255,6 @@ unsigned find_get_entries(struct address_space *mapping,
                return 0;
 
        rcu_read_lock();
-restart:
        radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
                struct page *page;
 repeat:
@@ -1272,8 +1262,10 @@ repeat:
                if (unlikely(!page))
                        continue;
                if (radix_tree_exception(page)) {
-                       if (radix_tree_deref_retry(page))
-                               goto restart;
+                       if (radix_tree_deref_retry(page)) {
+                               slot = radix_tree_iter_retry(&iter);
+                               continue;
+                       }
                        /*
                         * A shadow entry of a recently evicted page, a swap
                         * entry from shmem/tmpfs or a DAX entry.  Return it
@@ -1326,7 +1318,6 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
                return 0;
 
        rcu_read_lock();
-restart:
        radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
                struct page *page;
 repeat:
@@ -1336,13 +1327,8 @@ repeat:
 
                if (radix_tree_exception(page)) {
                        if (radix_tree_deref_retry(page)) {
-                               /*
-                                * Transient condition which can only trigger
-                                * when entry at index 0 moves out of or back
-                                * to root: none yet gotten, safe to restart.
-                                */
-                               WARN_ON(iter.index);
-                               goto restart;
+                               slot = radix_tree_iter_retry(&iter);
+                               continue;
                        }
                        /*
                         * A shadow entry of a recently evicted page,
@@ -1393,7 +1379,6 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
                return 0;
 
        rcu_read_lock();
-restart:
        radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) {
                struct page *page;
 repeat:
@@ -1404,12 +1389,8 @@ repeat:
 
                if (radix_tree_exception(page)) {
                        if (radix_tree_deref_retry(page)) {
-                               /*
-                                * Transient condition which can only trigger
-                                * when entry at index 0 moves out of or back
-                                * to root: none yet gotten, safe to restart.
-                                */
-                               goto restart;
+                               slot = radix_tree_iter_retry(&iter);
+                               continue;
                        }
                        /*
                         * A shadow entry of a recently evicted page,
@@ -1469,7 +1450,6 @@ unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
                return 0;
 
        rcu_read_lock();
-restart:
        radix_tree_for_each_tagged(slot, &mapping->page_tree,
                                   &iter, *index, tag) {
                struct page *page;
@@ -1480,12 +1460,8 @@ repeat:
 
                if (radix_tree_exception(page)) {
                        if (radix_tree_deref_retry(page)) {
-                               /*
-                                * Transient condition which can only trigger
-                                * when entry at index 0 moves out of or back
-                                * to root: none yet gotten, safe to restart.
-                                */
-                               goto restart;
+                               slot = radix_tree_iter_retry(&iter);
+                               continue;
                        }
                        /*
                         * A shadow entry of a recently evicted page.
@@ -1548,7 +1524,6 @@ unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
                return 0;
 
        rcu_read_lock();
-restart:
        radix_tree_for_each_tagged(slot, &mapping->page_tree,
                                   &iter, start, tag) {
                struct page *page;
@@ -1558,12 +1533,8 @@ repeat:
                        continue;
                if (radix_tree_exception(page)) {
                        if (radix_tree_deref_retry(page)) {
-                               /*
-                                * Transient condition which can only trigger
-                                * when entry at index 0 moves out of or back
-                                * to root: none yet gotten, safe to restart.
-                                */
-                               goto restart;
+                               slot = radix_tree_iter_retry(&iter);
+                               continue;
                        }
 
                        /*
@@ -1668,6 +1639,15 @@ find_page:
                                        index, last_index - index);
                }
                if (!PageUptodate(page)) {
+                       /*
+                        * See comment in do_read_cache_page on why
+                        * wait_on_page_locked is used to avoid unnecessarily
+                        * serialisations and why it's safe.
+                        */
+                       wait_on_page_locked_killable(page);
+                       if (PageUptodate(page))
+                               goto page_ok;
+
                        if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
                                        !mapping->a_ops->is_partially_uptodate)
                                goto page_not_up_to_date;
@@ -1860,15 +1840,16 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
        ssize_t retval = 0;
        loff_t *ppos = &iocb->ki_pos;
        loff_t pos = *ppos;
+       size_t count = iov_iter_count(iter);
+
+       if (!count)
+               goto out; /* skip atime */
 
        if (iocb->ki_flags & IOCB_DIRECT) {
                struct address_space *mapping = file->f_mapping;
                struct inode *inode = mapping->host;
-               size_t count = iov_iter_count(iter);
                loff_t size;
 
-               if (!count)
-                       goto out; /* skip atime */
                size = i_size_read(inode);
                retval = filemap_write_and_wait_range(mapping, pos,
                                        pos + count - 1);
@@ -2171,10 +2152,11 @@ repeat:
                if (unlikely(!page))
                        goto next;
                if (radix_tree_exception(page)) {
-                       if (radix_tree_deref_retry(page))
-                               break;
-                       else
-                               goto next;
+                       if (radix_tree_deref_retry(page)) {
+                               slot = radix_tree_iter_retry(&iter);
+                               continue;
+                       }
+                       goto next;
                }
 
                if (!page_cache_get_speculative(page))
@@ -2303,7 +2285,7 @@ static struct page *wait_on_page_read(struct page *page)
        return page;
 }
 
-static struct page *__read_cache_page(struct address_space *mapping,
+static struct page *do_read_cache_page(struct address_space *mapping,
                                pgoff_t index,
                                int (*filler)(void *, struct page *),
                                void *data,
@@ -2325,53 +2307,74 @@ repeat:
                        /* Presumably ENOMEM for radix tree node */
                        return ERR_PTR(err);
                }
+
+filler:
                err = filler(data, page);
                if (err < 0) {
                        page_cache_release(page);
-                       page = ERR_PTR(err);
-               } else {
-                       page = wait_on_page_read(page);
+                       return ERR_PTR(err);
                }
-       }
-       return page;
-}
-
-static struct page *do_read_cache_page(struct address_space *mapping,
-                               pgoff_t index,
-                               int (*filler)(void *, struct page *),
-                               void *data,
-                               gfp_t gfp)
 
-{
-       struct page *page;
-       int err;
+               page = wait_on_page_read(page);
+               if (IS_ERR(page))
+                       return page;
+               goto out;
+       }
+       if (PageUptodate(page))
+               goto out;
 
-retry:
-       page = __read_cache_page(mapping, index, filler, data, gfp);
-       if (IS_ERR(page))
-               return page;
+       /*
+        * Page is not up to date and may be locked due one of the following
+        * case a: Page is being filled and the page lock is held
+        * case b: Read/write error clearing the page uptodate status
+        * case c: Truncation in progress (page locked)
+        * case d: Reclaim in progress
+        *
+        * Case a, the page will be up to date when the page is unlocked.
+        *    There is no need to serialise on the page lock here as the page
+        *    is pinned so the lock gives no additional protection. Even if the
+        *    the page is truncated, the data is still valid if PageUptodate as
+        *    it's a race vs truncate race.
+        * Case b, the page will not be up to date
+        * Case c, the page may be truncated but in itself, the data may still
+        *    be valid after IO completes as it's a read vs truncate race. The
+        *    operation must restart if the page is not uptodate on unlock but
+        *    otherwise serialising on page lock to stabilise the mapping gives
+        *    no additional guarantees to the caller as the page lock is
+        *    released before return.
+        * Case d, similar to truncation. If reclaim holds the page lock, it
+        *    will be a race with remove_mapping that determines if the mapping
+        *    is valid on unlock but otherwise the data is valid and there is
+        *    no need to serialise with page lock.
+        *
+        * As the page lock gives no additional guarantee, we optimistically
+        * wait on the page to be unlocked and check if it's up to date and
+        * use the page if it is. Otherwise, the page lock is required to
+        * distinguish between the different cases. The motivation is that we
+        * avoid spurious serialisations and wakeups when multiple processes
+        * wait on the same page for IO to complete.
+        */
+       wait_on_page_locked(page);
        if (PageUptodate(page))
                goto out;
 
+       /* Distinguish between all the cases under the safety of the lock */
        lock_page(page);
+
+       /* Case c or d, restart the operation */
        if (!page->mapping) {
                unlock_page(page);
                page_cache_release(page);
-               goto retry;
+               goto repeat;
        }
+
+       /* Someone else locked and filled the page in a very small window */
        if (PageUptodate(page)) {
                unlock_page(page);
                goto out;
        }
-       err = filler(data, page);
-       if (err < 0) {
-               page_cache_release(page);
-               return ERR_PTR(err);
-       } else {
-               page = wait_on_page_read(page);
-               if (IS_ERR(page))
-                       return page;
-       }
+       goto filler;
+
 out:
        mark_page_accessed(page);
        return page;