]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
mm: memcontrol: use the node-native slab memory counters
authorJohannes Weiner <hannes@cmpxchg.org>
Thu, 6 Jul 2017 22:40:46 +0000 (15:40 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 6 Jul 2017 23:24:35 +0000 (16:24 -0700)
Now that the slab counters are moved from the zone to the node level we
can drop the private memcg node stats and use the official ones.

Link: http://lkml.kernel.org/r/20170530181724.27197-4-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Josef Bacik <josef@toxicpanda.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/memcontrol.h
mm/memcontrol.c
mm/slab.h

index 72d0853beb31fbbf6658954962cfb86465c70982..fa506ae61d66f678047bcaab7ba40ae8de40e636 100644 (file)
@@ -44,8 +44,6 @@ enum memcg_stat_item {
        MEMCG_SOCK,
        /* XXX: why are these zone and not node counters? */
        MEMCG_KERNEL_STACK_KB,
-       MEMCG_SLAB_RECLAIMABLE,
-       MEMCG_SLAB_UNRECLAIMABLE,
        MEMCG_NR_STAT,
 };
 
index 4f686fc1c5fac144769a278315d9074cee4d6cb5..dceb0deb8d5e34d26e014c50b6fb9dbf8fea3aad 100644 (file)
@@ -5198,8 +5198,8 @@ static int memory_stat_show(struct seq_file *m, void *v)
        seq_printf(m, "kernel_stack %llu\n",
                   (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024);
        seq_printf(m, "slab %llu\n",
-                  (u64)(stat[MEMCG_SLAB_RECLAIMABLE] +
-                        stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
+                  (u64)(stat[NR_SLAB_RECLAIMABLE] +
+                        stat[NR_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
        seq_printf(m, "sock %llu\n",
                   (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
 
@@ -5223,9 +5223,9 @@ static int memory_stat_show(struct seq_file *m, void *v)
        }
 
        seq_printf(m, "slab_reclaimable %llu\n",
-                  (u64)stat[MEMCG_SLAB_RECLAIMABLE] * PAGE_SIZE);
+                  (u64)stat[NR_SLAB_RECLAIMABLE] * PAGE_SIZE);
        seq_printf(m, "slab_unreclaimable %llu\n",
-                  (u64)stat[MEMCG_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
+                  (u64)stat[NR_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
 
        /* Accumulated memory events */
 
index 9cfcf099709c19cfc8b5070325a0527c763eddaa..69f0579cb5aa8e512fedc3f2b6abf73df1a8f09d 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -287,7 +287,7 @@ static __always_inline int memcg_charge_slab(struct page *page,
 
        memcg_kmem_update_page_stat(page,
                        (s->flags & SLAB_RECLAIM_ACCOUNT) ?
-                       MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
+                       NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
                        1 << order);
        return 0;
 }
@@ -300,7 +300,7 @@ static __always_inline void memcg_uncharge_slab(struct page *page, int order,
 
        memcg_kmem_update_page_stat(page,
                        (s->flags & SLAB_RECLAIM_ACCOUNT) ?
-                       MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
+                       NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
                        -(1 << order));
        memcg_kmem_uncharge(page, order);
 }