]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/slub.c
mm: vmstat: move slab statistics from zone to node counters
[karo-tx-linux.git] / mm / slub.c
index 8addc535bcdc58794fe40e72a729e4589d44d2b6..aa5aa6bfb35e3f2d33caac3845cc55431e765df0 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1615,7 +1615,7 @@ out:
        if (!page)
                return NULL;
 
-       mod_zone_page_state(page_zone(page),
+       mod_node_page_state(page_pgdat(page),
                (s->flags & SLAB_RECLAIM_ACCOUNT) ?
                NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
                1 << oo_order(oo));
@@ -1655,7 +1655,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
 
        kmemcheck_free_shadow(page, compound_order(page));
 
-       mod_zone_page_state(page_zone(page),
+       mod_node_page_state(page_pgdat(page),
                (s->flags & SLAB_RECLAIM_ACCOUNT) ?
                NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
                -pages);
@@ -1829,7 +1829,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
                        stat(s, CPU_PARTIAL_NODE);
                }
                if (!kmem_cache_has_cpu_partial(s)
-                       || available > s->cpu_partial / 2)
+                       || available > slub_cpu_partial(s) / 2)
                        break;
 
        }
@@ -1993,7 +1993,7 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
  * Remove the cpu slab
  */
 static void deactivate_slab(struct kmem_cache *s, struct page *page,
-                               void *freelist)
+                               void *freelist, struct kmem_cache_cpu *c)
 {
        enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
        struct kmem_cache_node *n = get_node(s, page_to_nid(page));
@@ -2132,6 +2132,9 @@ redo:
                discard_slab(s, page);
                stat(s, FREE_SLAB);
        }
+
+       c->page = NULL;
+       c->freelist = NULL;
 }
 
 /*
@@ -2266,11 +2269,9 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
 {
        stat(s, CPUSLAB_FLUSH);
-       deactivate_slab(s, c->page, c->freelist);
+       deactivate_slab(s, c->page, c->freelist, c);
 
        c->tid = next_tid(c->tid);
-       c->page = NULL;
-       c->freelist = NULL;
 }
 
 /*
@@ -2302,7 +2303,7 @@ static bool has_cpu_slab(int cpu, void *info)
        struct kmem_cache *s = info;
        struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
 
-       return c->page || c->partial;
+       return c->page || slub_percpu_partial(c);
 }
 
 static void flush_all(struct kmem_cache *s)
@@ -2521,9 +2522,7 @@ redo:
 
                if (unlikely(!node_match(page, searchnode))) {
                        stat(s, ALLOC_NODE_MISMATCH);
-                       deactivate_slab(s, page, c->freelist);
-                       c->page = NULL;
-                       c->freelist = NULL;
+                       deactivate_slab(s, page, c->freelist, c);
                        goto new_slab;
                }
        }
@@ -2534,9 +2533,7 @@ redo:
         * information when the page leaves the per-cpu allocator
         */
        if (unlikely(!pfmemalloc_match(page, gfpflags))) {
-               deactivate_slab(s, page, c->freelist);
-               c->page = NULL;
-               c->freelist = NULL;
+               deactivate_slab(s, page, c->freelist, c);
                goto new_slab;
        }
 
@@ -2568,11 +2565,10 @@ load_freelist:
 
 new_slab:
 
-       if (c->partial) {
-               page = c->page = c->partial;
-               c->partial = page->next;
+       if (slub_percpu_partial(c)) {
+               page = c->page = slub_percpu_partial(c);
+               slub_set_percpu_partial(c, page);
                stat(s, CPU_PARTIAL_ALLOC);
-               c->freelist = NULL;
                goto redo;
        }
 
@@ -2592,9 +2588,7 @@ new_slab:
                        !alloc_debug_processing(s, page, freelist, addr))
                goto new_slab;  /* Slab failed checks. Next slab needed */
 
-       deactivate_slab(s, page, get_freepointer(s, freelist));
-       c->page = NULL;
-       c->freelist = NULL;
+       deactivate_slab(s, page, get_freepointer(s, freelist), c);
        return freelist;
 }
 
@@ -3410,6 +3404,39 @@ static void set_min_partial(struct kmem_cache *s, unsigned long min)
        s->min_partial = min;
 }
 
+static void set_cpu_partial(struct kmem_cache *s)
+{
+#ifdef CONFIG_SLUB_CPU_PARTIAL
+       /*
+        * cpu_partial determined the maximum number of objects kept in the
+        * per cpu partial lists of a processor.
+        *
+        * Per cpu partial lists mainly contain slabs that just have one
+        * object freed. If they are used for allocation then they can be
+        * filled up again with minimal effort. The slab will never hit the
+        * per node partial lists and therefore no locking will be required.
+        *
+        * This setting also determines
+        *
+        * A) The number of objects from per cpu partial slabs dumped to the
+        *    per node list when we reach the limit.
+        * B) The number of objects in cpu partial slabs to extract from the
+        *    per node list when we run out of per cpu objects. We only fetch
+        *    50% to keep some capacity around for frees.
+        */
+       if (!kmem_cache_has_cpu_partial(s))
+               s->cpu_partial = 0;
+       else if (s->size >= PAGE_SIZE)
+               s->cpu_partial = 2;
+       else if (s->size >= 1024)
+               s->cpu_partial = 6;
+       else if (s->size >= 256)
+               s->cpu_partial = 13;
+       else
+               s->cpu_partial = 30;
+#endif
+}
+
 /*
  * calculate_sizes() determines the order and the distribution of data within
  * a slab object.
@@ -3568,33 +3595,7 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
         */
        set_min_partial(s, ilog2(s->size) / 2);
 
-       /*
-        * cpu_partial determined the maximum number of objects kept in the
-        * per cpu partial lists of a processor.
-        *
-        * Per cpu partial lists mainly contain slabs that just have one
-        * object freed. If they are used for allocation then they can be
-        * filled up again with minimal effort. The slab will never hit the
-        * per node partial lists and therefore no locking will be required.
-        *
-        * This setting also determines
-        *
-        * A) The number of objects from per cpu partial slabs dumped to the
-        *    per node list when we reach the limit.
-        * B) The number of objects in cpu partial slabs to extract from the
-        *    per node list when we run out of per cpu objects. We only fetch
-        *    50% to keep some capacity around for frees.
-        */
-       if (!kmem_cache_has_cpu_partial(s))
-               s->cpu_partial = 0;
-       else if (s->size >= PAGE_SIZE)
-               s->cpu_partial = 2;
-       else if (s->size >= 1024)
-               s->cpu_partial = 6;
-       else if (s->size >= 256)
-               s->cpu_partial = 13;
-       else
-               s->cpu_partial = 30;
+       set_cpu_partial(s);
 
 #ifdef CONFIG_NUMA
        s->remote_node_defrag_ratio = 1000;
@@ -3981,7 +3982,7 @@ void __kmemcg_cache_deactivate(struct kmem_cache *s)
         * Disable empty slabs caching. Used to avoid pinning offline
         * memory cgroups by kmem pages that can be freed.
         */
-       s->cpu_partial = 0;
+       slub_set_cpu_partial(s, 0);
        s->min_partial = 0;
 
        /*
@@ -4760,7 +4761,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
                        total += x;
                        nodes[node] += x;
 
-                       page = READ_ONCE(c->partial);
+                       page = slub_percpu_partial_read_once(c);
                        if (page) {
                                node = page_to_nid(page);
                                if (flags & SO_TOTAL)
@@ -4921,7 +4922,7 @@ SLAB_ATTR(min_partial);
 
 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
 {
-       return sprintf(buf, "%u\n", s->cpu_partial);
+       return sprintf(buf, "%u\n", slub_cpu_partial(s));
 }
 
 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
@@ -4936,7 +4937,7 @@ static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
        if (objects && !kmem_cache_has_cpu_partial(s))
                return -EINVAL;
 
-       s->cpu_partial = objects;
+       slub_set_cpu_partial(s, objects);
        flush_all(s);
        return length;
 }
@@ -4988,7 +4989,9 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
        int len;
 
        for_each_online_cpu(cpu) {
-               struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial;
+               struct page *page;
+
+               page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
 
                if (page) {
                        pages += page->pages;
@@ -5000,7 +5003,9 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
 
 #ifdef CONFIG_SMP
        for_each_online_cpu(cpu) {
-               struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial;
+               struct page *page;
+
+               page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
 
                if (page && len < PAGE_SIZE - 20)
                        len += sprintf(buf + len, " C%d=%d(%d)", cpu,