]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
mm: filemap: don't plant shadow entries without radix tree node
authorJohannes Weiner <hannes@cmpxchg.org>
Tue, 4 Oct 2016 20:02:08 +0000 (22:02 +0200)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 5 Oct 2016 16:17:56 +0000 (09:17 -0700)
When the underflow checks were added to workingset_node_shadow_dec(),
they triggered immediately:

  kernel BUG at ./include/linux/swap.h:276!
  invalid opcode: 0000 [#1] SMP
  Modules linked in: isofs usb_storage fuse xt_CHECKSUM ipt_MASQUERADE nf_nat_masquerade_ipv4 tun nf_conntrack_netbios_ns nf_conntrack_broadcast ip6t_REJECT nf_reject_ipv6
   soundcore wmi acpi_als pinctrl_sunrisepoint kfifo_buf tpm_tis industrialio acpi_pad pinctrl_intel tpm_tis_core tpm nfsd auth_rpcgss nfs_acl lockd grace sunrpc dm_crypt
  CPU: 0 PID: 20929 Comm: blkid Not tainted 4.8.0-rc8-00087-gbe67d60ba944 #1
  Hardware name: System manufacturer System Product Name/Z170-K, BIOS 1803 05/06/2016
  task: ffff8faa93ecd940 task.stack: ffff8faa7f478000
  RIP: page_cache_tree_insert+0xf1/0x100
  Call Trace:
    __add_to_page_cache_locked+0x12e/0x270
    add_to_page_cache_lru+0x4e/0xe0
    mpage_readpages+0x112/0x1d0
    blkdev_readpages+0x1d/0x20
    __do_page_cache_readahead+0x1ad/0x290
    force_page_cache_readahead+0xaa/0x100
    page_cache_sync_readahead+0x3f/0x50
    generic_file_read_iter+0x5af/0x740
    blkdev_read_iter+0x35/0x40
    __vfs_read+0xe1/0x130
    vfs_read+0x96/0x130
    SyS_read+0x55/0xc0
    entry_SYSCALL_64_fastpath+0x13/0x8f
  Code: 03 00 48 8b 5d d8 65 48 33 1c 25 28 00 00 00 44 89 e8 75 19 48 83 c4 18 5b 41 5c 41 5d 41 5e 5d c3 0f 0b 41 bd ef ff ff ff eb d7 <0f> 0b e8 88 68 ef ff 0f 1f 84 00
  RIP  page_cache_tree_insert+0xf1/0x100

This is a long-standing bug in the way shadow entries are accounted in
the radix tree nodes. The shrinker needs to know when radix tree nodes
contain only shadow entries, no pages, so node->count is split in half
to count shadows in the upper bits and pages in the lower bits.

Unfortunately, the radix tree implementation doesn't know of this and
assumes all entries are in node->count. When there is a shadow entry
directly in root->rnode and the tree is later extended, the radix tree
implementation will copy that entry into the new node and and bump its
node->count, i.e. increases the page count bits. Once the shadow gets
removed and we subtract from the upper counter, node->count underflows
and triggers the warning. Afterwards, without node->count reaching 0
again, the radix tree node is leaked.

Limit shadow entries to when we have actual radix tree nodes and can
count them properly. That means we lose the ability to detect refaults
from files that had only the first page faulted in at eviction time.

Fixes: 449dd6984d0e ("mm: keep page cache radix tree nodes in check")
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reported-and-tested-by: Linus Torvalds <torvalds@linux-foundation.org>
Reviewed-by: Jan Kara <jack@suse.cz>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: stable@vger.kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/radix-tree.h
lib/radix-tree.c
mm/filemap.c

index 4c45105dece3879f943219276a2db81c4656ce9f..52b97db938309429e7262f79af6eaef58fd672d2 100644 (file)
@@ -280,9 +280,9 @@ bool __radix_tree_delete_node(struct radix_tree_root *root,
                              struct radix_tree_node *node);
 void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
 void *radix_tree_delete(struct radix_tree_root *, unsigned long);
-struct radix_tree_node *radix_tree_replace_clear_tags(
-                               struct radix_tree_root *root,
-                               unsigned long index, void *entry);
+void radix_tree_clear_tags(struct radix_tree_root *root,
+                          struct radix_tree_node *node,
+                          void **slot);
 unsigned int radix_tree_gang_lookup(struct radix_tree_root *root,
                        void **results, unsigned long first_index,
                        unsigned int max_items);
index 91f0727e3cada11bd721afdeeb112dd196546637..8e6d552c40ddfc7fa745a8a8acf34ee17f02bc3d 100644 (file)
@@ -1583,15 +1583,10 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
 }
 EXPORT_SYMBOL(radix_tree_delete);
 
-struct radix_tree_node *radix_tree_replace_clear_tags(
-                       struct radix_tree_root *root,
-                       unsigned long index, void *entry)
+void radix_tree_clear_tags(struct radix_tree_root *root,
+                          struct radix_tree_node *node,
+                          void **slot)
 {
-       struct radix_tree_node *node;
-       void **slot;
-
-       __radix_tree_lookup(root, index, &node, &slot);
-
        if (node) {
                unsigned int tag, offset = get_slot_offset(node, slot);
                for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
@@ -1600,9 +1595,6 @@ struct radix_tree_node *radix_tree_replace_clear_tags(
                /* Clear root node tags */
                root->gfp_mask &= __GFP_BITS_MASK;
        }
-
-       radix_tree_replace_slot(slot, entry);
-       return node;
 }
 
 /**
index 2d0986a64f1f729548937e239860ac7620a16393..96b9e9c30630b4e8d6f5ca28b2480348ee51950b 100644 (file)
@@ -169,33 +169,35 @@ static int page_cache_tree_insert(struct address_space *mapping,
 static void page_cache_tree_delete(struct address_space *mapping,
                                   struct page *page, void *shadow)
 {
-       struct radix_tree_node *node;
        int i, nr = PageHuge(page) ? 1 : hpage_nr_pages(page);
 
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        VM_BUG_ON_PAGE(PageTail(page), page);
        VM_BUG_ON_PAGE(nr != 1 && shadow, page);
 
-       if (shadow) {
-               mapping->nrexceptional += nr;
-               /*
-                * Make sure the nrexceptional update is committed before
-                * the nrpages update so that final truncate racing
-                * with reclaim does not see both counters 0 at the
-                * same time and miss a shadow entry.
-                */
-               smp_wmb();
-       }
-       mapping->nrpages -= nr;
-
        for (i = 0; i < nr; i++) {
-               node = radix_tree_replace_clear_tags(&mapping->page_tree,
-                               page->index + i, shadow);
+               struct radix_tree_node *node;
+               void **slot;
+
+               __radix_tree_lookup(&mapping->page_tree, page->index + i,
+                                   &node, &slot);
+
+               radix_tree_clear_tags(&mapping->page_tree, node, slot);
+
                if (!node) {
                        VM_BUG_ON_PAGE(nr != 1, page);
-                       return;
+                       /*
+                        * We need a node to properly account shadow
+                        * entries. Don't plant any without. XXX
+                        */
+                       shadow = NULL;
                }
 
+               radix_tree_replace_slot(slot, shadow);
+
+               if (!node)
+                       break;
+
                workingset_node_pages_dec(node);
                if (shadow)
                        workingset_node_shadows_inc(node);
@@ -219,6 +221,18 @@ static void page_cache_tree_delete(struct address_space *mapping,
                                        &node->private_list);
                }
        }
+
+       if (shadow) {
+               mapping->nrexceptional += nr;
+               /*
+                * Make sure the nrexceptional update is committed before
+                * the nrpages update so that final truncate racing
+                * with reclaim does not see both counters 0 at the
+                * same time and miss a shadow entry.
+                */
+               smp_wmb();
+       }
+       mapping->nrpages -= nr;
 }
 
 /*