]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
sl[au]b: always get the cache from its page in kmem_cache_free()
authorGlauber Costa <glommer@parallels.com>
Sat, 3 Nov 2012 00:42:31 +0000 (11:42 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Tue, 6 Nov 2012 04:51:58 +0000 (15:51 +1100)
struct page already has this information.  If we start chaining caches,
this information will always be more trustworthy than whatever is passed
into the function

Signed-off-by: Glauber Costa <glommer@parallels.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Frederic Weisbecker <fweisbec@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: JoonSoo Kim <js1304@gmail.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Rik van Riel <riel@redhat.com>
Cc: Suleiman Souhlal <suleiman@google.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/memcontrol.h
mm/slab.c
mm/slab.h
mm/slob.c
mm/slub.c

index 16bff74940cd1fe44c5803216f73019f3449146f..d77d88d40d74c3687b5b32fa55b7c3330afa458b 100644 (file)
@@ -547,6 +547,11 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
        return __memcg_kmem_get_cache(cachep, gfp);
 }
 #else
+static inline bool memcg_kmem_enabled(void)
+{
+       return false;
+}
+
 static inline bool
 memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
 {
index cb9fb9c1de68b3384c2f77cb43c28720d72f1de7..dd11b5ccceb2516ea0af79e2ec2ccb43158bfe2f 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -87,7 +87,6 @@
  */
 
 #include       <linux/slab.h>
-#include       "slab.h"
 #include       <linux/mm.h>
 #include       <linux/poison.h>
 #include       <linux/swap.h>
 
 #include       "internal.h"
 
+#include       "slab.h"
+
 /*
  * DEBUG       - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
  *               0 for faster, smaller code (especially in the critical paths).
@@ -3938,6 +3939,9 @@ EXPORT_SYMBOL(__kmalloc);
 void kmem_cache_free(struct kmem_cache *cachep, void *objp)
 {
        unsigned long flags;
+       cachep = cache_from_obj(cachep, objp);
+       if (!cachep)
+               return;
 
        local_irq_save(flags);
        debug_check_no_locks_freed(objp, cachep->object_size);
index 22eb5aa2f81051485fa93d487e9bc303c3562a87..fb1c4c4d49654bfe3e4b5feae3544d4e74916e55 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -108,6 +108,13 @@ static inline bool cache_match_memcg(struct kmem_cache *cachep,
        return (is_root_cache(cachep) && !memcg) ||
                                (cachep->memcg_params->memcg == memcg);
 }
+
+static inline bool slab_equal_or_root(struct kmem_cache *s,
+                                       struct kmem_cache *p)
+{
+       return (p == s) ||
+               (s->memcg_params && (p == s->memcg_params->root_cache));
+}
 #else
 static inline bool is_root_cache(struct kmem_cache *s)
 {
@@ -119,5 +126,37 @@ static inline bool cache_match_memcg(struct kmem_cache *cachep,
 {
        return true;
 }
+
+static inline bool slab_equal_or_root(struct kmem_cache *s,
+                                     struct kmem_cache *p)
+{
+       return true;
+}
 #endif
+
+static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
+{
+       struct kmem_cache *cachep;
+       struct page *page;
+
+       /*
+        * When kmemcg is not being used, both assignments should return the
+        * same value. but we don't want to pay the assignment price in that
+        * case. If it is not compiled in, the compiler should be smart enough
+        * to not do even the assignment. In that case, slab_equal_or_root
+        * will also be a constant.
+        */
+       if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE))
+               return s;
+
+       page = virt_to_head_page(x);
+       cachep = page->slab_cache;
+       if (slab_equal_or_root(cachep, s))
+               return cachep;
+
+       pr_err("%s: Wrong slab cache. %s but object is from %s\n",
+               __FUNCTION__, cachep->name, s->name);
+       WARN_ON_ONCE(1);
+       return s;
+}
 #endif
index 87e16c4d9143621f608f45f675b4be467add5961..52381c41f2b843609d568d3afacb4ed0468a190b 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -58,7 +58,6 @@
 
 #include <linux/kernel.h>
 #include <linux/slab.h>
-#include "slab.h"
 
 #include <linux/mm.h>
 #include <linux/swap.h> /* struct reclaim_state */
@@ -73,6 +72,7 @@
 
 #include <linux/atomic.h>
 
+#include "slab.h"
 /*
  * slob_block has a field 'units', which indicates size of block if +ve,
  * or offset of next block if -ve (in SLOB_UNITs).
index 50ee6e334b4876b32e0441f743ab84d6ec359f0d..7edb99e6bb057ba9a2273bd98171783fd2a040e8 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2611,19 +2611,10 @@ redo:
 
 void kmem_cache_free(struct kmem_cache *s, void *x)
 {
-       struct page *page;
-
-       page = virt_to_head_page(x);
-
-       if (kmem_cache_debug(s) && page->slab_cache != s) {
-               pr_err("kmem_cache_free: Wrong slab cache. %s but object"
-                       " is from  %s\n", page->slab_cache->name, s->name);
-               WARN_ON_ONCE(1);
+       s = cache_from_obj(s, x);
+       if (!s)
                return;
-       }
-
-       slab_free(s, page, x, _RET_IP_);
-
+       slab_free(s, virt_to_head_page(x), x, _RET_IP_);
        trace_kmem_cache_free(_RET_IP_, x);
 }
 EXPORT_SYMBOL(kmem_cache_free);