]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
memcg: mem_cgroup_charge never NULL
authorHugh Dickins <hugh@veritas.com>
Tue, 4 Mar 2008 22:29:08 +0000 (14:29 -0800)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Wed, 5 Mar 2008 00:35:15 +0000 (16:35 -0800)
My memcgroup patch to fix hang with shmem/tmpfs added NULL page handling to
mem_cgroup_charge_common.  It seemed convenient at the time, but hard to
justify now: there's a perfectly appropriate swappage to charge and uncharge
instead, this is not on any hot path through shmem_getpage, and no performance
hit was observed from the slight extra overhead.

So revert that NULL page handling from mem_cgroup_charge_common; and make it
clearer by bringing page_cgroup_assign_new_page_cgroup into its body - that
was a helper I found more of a hindrance to understanding.

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Cc: David Rientjes <rientjes@google.com>
Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hirokazu Takahashi <taka@valinux.co.jp>
Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp>
Cc: Paul Menage <menage@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memcontrol.c
mm/shmem.c

index 9e170d3c71e5f246bbc2f428126228d7a696f39f..83ba13ad31e16d69d32087bf1a02285b72e3844b 100644 (file)
@@ -300,25 +300,6 @@ static void __always_inline unlock_page_cgroup(struct page *page)
        bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
 }
 
-/*
- * Tie new page_cgroup to struct page under lock_page_cgroup()
- * This can fail if the page has been tied to a page_cgroup.
- * If success, returns 0.
- */
-static int page_cgroup_assign_new_page_cgroup(struct page *page,
-                                               struct page_cgroup *pc)
-{
-       int ret = 0;
-
-       lock_page_cgroup(page);
-       if (!page_get_page_cgroup(page))
-               page_assign_page_cgroup(page, pc);
-       else /* A page is tied to other pc. */
-               ret = 1;
-       unlock_page_cgroup(page);
-       return ret;
-}
-
 /*
  * Clear page->page_cgroup member under lock_page_cgroup().
  * If given "pc" value is different from one page->page_cgroup,
@@ -585,26 +566,24 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
         * with it
         */
 retry:
-       if (page) {
-               lock_page_cgroup(page);
-               pc = page_get_page_cgroup(page);
-               /*
-                * The page_cgroup exists and
-                * the page has already been accounted.
-                */
-               if (pc) {
-                       if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) {
-                               /* this page is under being uncharged ? */
-                               unlock_page_cgroup(page);
-                               cpu_relax();
-                               goto retry;
-                       } else {
-                               unlock_page_cgroup(page);
-                               goto done;
-                       }
+       lock_page_cgroup(page);
+       pc = page_get_page_cgroup(page);
+       /*
+        * The page_cgroup exists and
+        * the page has already been accounted.
+        */
+       if (pc) {
+               if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) {
+                       /* this page is under being uncharged ? */
+                       unlock_page_cgroup(page);
+                       cpu_relax();
+                       goto retry;
+               } else {
+                       unlock_page_cgroup(page);
+                       goto done;
                }
-               unlock_page_cgroup(page);
        }
+       unlock_page_cgroup(page);
 
        pc = kzalloc(sizeof(struct page_cgroup), gfp_mask);
        if (pc == NULL)
@@ -663,7 +642,9 @@ retry:
        if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
                pc->flags |= PAGE_CGROUP_FLAG_CACHE;
 
-       if (!page || page_cgroup_assign_new_page_cgroup(page, pc)) {
+       lock_page_cgroup(page);
+       if (page_get_page_cgroup(page)) {
+               unlock_page_cgroup(page);
                /*
                 * Another charge has been added to this page already.
                 * We take lock_page_cgroup(page) again and read
@@ -672,10 +653,10 @@ retry:
                res_counter_uncharge(&mem->res, PAGE_SIZE);
                css_put(&mem->css);
                kfree(pc);
-               if (!page)
-                       goto done;
                goto retry;
        }
+       page_assign_page_cgroup(page, pc);
+       unlock_page_cgroup(page);
 
        mz = page_cgroup_zoneinfo(pc);
        spin_lock_irqsave(&mz->lru_lock, flags);
index 90b576cbc06e40fb0f43d06f37f5ef24deee9f13..3372bc579e896f474ef24cffa929b69228ff9445 100644 (file)
@@ -1370,14 +1370,17 @@ repeat:
                        shmem_swp_unmap(entry);
                        spin_unlock(&info->lock);
                        unlock_page(swappage);
-                       page_cache_release(swappage);
                        if (error == -ENOMEM) {
                                /* allow reclaim from this memory cgroup */
-                               error = mem_cgroup_cache_charge(NULL,
+                               error = mem_cgroup_cache_charge(swappage,
                                        current->mm, gfp & ~__GFP_HIGHMEM);
-                               if (error)
+                               if (error) {
+                                       page_cache_release(swappage);
                                        goto failed;
+                               }
+                               mem_cgroup_uncharge_page(swappage);
                        }
+                       page_cache_release(swappage);
                        goto repeat;
                }
        } else if (sgp == SGP_READ && !filepage) {