]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
mm: memcg: lookup_page_cgroup (almost) never returns NULL
authorJohannes Weiner <jweiner@redhat.com>
Thu, 8 Dec 2011 04:42:46 +0000 (15:42 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Tue, 13 Dec 2011 06:43:32 +0000 (17:43 +1100)
Pages have their corresponding page_cgroup descriptors set up before they
are used in userspace, and thus managed by a memory cgroup.

The only time where lookup_page_cgroup() can return NULL is in the
CONFIG_DEBUG_VM-only page sanity checking code that executes while feeding
pages into the page allocator for the first time.

Remove the NULL checks against lookup_page_cgroup() results from all
callsites where we know that corresponding page_cgroup descriptors must be
allocated, and add a comment to the callsite that actually does have to
check the return value.

Signed-off-by: Johannes Weiner <jweiner@redhat.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memcontrol.c

index 5f06f4b5458798f8d8f0cd845c31de1a8bbc6976..ff50e81fe1710643c7f32ee9c4428b13f23f9ec9 100644 (file)
@@ -1894,9 +1894,6 @@ void mem_cgroup_update_page_stat(struct page *page,
        bool need_unlock = false;
        unsigned long uninitialized_var(flags);
 
-       if (unlikely(!pc))
-               return;
-
        rcu_read_lock();
        memcg = pc->mem_cgroup;
        if (unlikely(!memcg || !PageCgroupUsed(pc)))
@@ -2669,8 +2666,6 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
        }
 
        pc = lookup_page_cgroup(page);
-       BUG_ON(!pc); /* XXX: remove this and move pc lookup into commit */
-
        ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
        if (ret || !memcg)
                return ret;
@@ -2942,7 +2937,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
         * Check if our page_cgroup is valid
         */
        pc = lookup_page_cgroup(page);
-       if (unlikely(!pc || !PageCgroupUsed(pc)))
+       if (unlikely(!PageCgroupUsed(pc)))
                return NULL;
 
        lock_page_cgroup(pc);
@@ -3326,6 +3321,10 @@ static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
        struct page_cgroup *pc;
 
        pc = lookup_page_cgroup(page);
+       /*
+        * Can be NULL while feeding pages into the page allocator for
+        * the first time, i.e. during boot or memory hotplug.
+        */
        if (likely(pc) && PageCgroupUsed(pc))
                return pc;
        return NULL;