]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/slub.c
mm/slub.c: wrap cpu_slab->partial in CONFIG_SLUB_CPU_PARTIAL
[karo-tx-linux.git] / mm / slub.c
index 8addc535bcdc58794fe40e72a729e4589d44d2b6..48071c54127537ba7c4ec981f9d867b6fd4a42fb 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1993,7 +1993,7 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
  * Remove the cpu slab
  */
 static void deactivate_slab(struct kmem_cache *s, struct page *page,
-                               void *freelist)
+                               void *freelist, struct kmem_cache_cpu *c)
 {
        enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
        struct kmem_cache_node *n = get_node(s, page_to_nid(page));
@@ -2132,6 +2132,9 @@ redo:
                discard_slab(s, page);
                stat(s, FREE_SLAB);
        }
+
+       c->page = NULL;
+       c->freelist = NULL;
 }
 
 /*
@@ -2266,11 +2269,9 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
 {
        stat(s, CPUSLAB_FLUSH);
-       deactivate_slab(s, c->page, c->freelist);
+       deactivate_slab(s, c->page, c->freelist, c);
 
        c->tid = next_tid(c->tid);
-       c->page = NULL;
-       c->freelist = NULL;
 }
 
 /*
@@ -2302,7 +2303,7 @@ static bool has_cpu_slab(int cpu, void *info)
        struct kmem_cache *s = info;
        struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
 
-       return c->page || c->partial;
+       return c->page || slub_percpu_partial(c);
 }
 
 static void flush_all(struct kmem_cache *s)
@@ -2521,9 +2522,7 @@ redo:
 
                if (unlikely(!node_match(page, searchnode))) {
                        stat(s, ALLOC_NODE_MISMATCH);
-                       deactivate_slab(s, page, c->freelist);
-                       c->page = NULL;
-                       c->freelist = NULL;
+                       deactivate_slab(s, page, c->freelist, c);
                        goto new_slab;
                }
        }
@@ -2534,9 +2533,7 @@ redo:
         * information when the page leaves the per-cpu allocator
         */
        if (unlikely(!pfmemalloc_match(page, gfpflags))) {
-               deactivate_slab(s, page, c->freelist);
-               c->page = NULL;
-               c->freelist = NULL;
+               deactivate_slab(s, page, c->freelist, c);
                goto new_slab;
        }
 
@@ -2568,11 +2565,10 @@ load_freelist:
 
 new_slab:
 
-       if (c->partial) {
-               page = c->page = c->partial;
-               c->partial = page->next;
+       if (slub_percpu_partial(c)) {
+               page = c->page = slub_percpu_partial(c);
+               slub_set_percpu_partial(c, page);
                stat(s, CPU_PARTIAL_ALLOC);
-               c->freelist = NULL;
                goto redo;
        }
 
@@ -2592,9 +2588,7 @@ new_slab:
                        !alloc_debug_processing(s, page, freelist, addr))
                goto new_slab;  /* Slab failed checks. Next slab needed */
 
-       deactivate_slab(s, page, get_freepointer(s, freelist));
-       c->page = NULL;
-       c->freelist = NULL;
+       deactivate_slab(s, page, get_freepointer(s, freelist), c);
        return freelist;
 }
 
@@ -4760,7 +4754,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
                        total += x;
                        nodes[node] += x;
 
-                       page = READ_ONCE(c->partial);
+                       page = slub_percpu_partial_read_once(c);
                        if (page) {
                                node = page_to_nid(page);
                                if (flags & SO_TOTAL)
@@ -4988,7 +4982,9 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
        int len;
 
        for_each_online_cpu(cpu) {
-               struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial;
+               struct page *page;
+
+               page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
 
                if (page) {
                        pages += page->pages;
@@ -5000,7 +4996,9 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
 
 #ifdef CONFIG_SMP
        for_each_online_cpu(cpu) {
-               struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial;
+               struct page *page;
+
+               page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
 
                if (page && len < PAGE_SIZE - 20)
                        len += sprintf(buf + len, " C%d=%d(%d)", cpu,