]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
mm: memcg: shorten preempt-disabled section around event checks
authorJohannes Weiner <jweiner@redhat.com>
Fri, 16 Dec 2011 04:50:35 +0000 (15:50 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Tue, 20 Dec 2011 07:44:00 +0000 (18:44 +1100)
Only the ratelimit checks themselves have to run with preemption disabled,
the resulting actions - checking for usage thresholds, updating the soft
limit tree - can and should run with preemption enabled.

Signed-off-by: Johannes Weiner <jweiner@redhat.com>
Reported-by: Yong Zhang <yong.zhang0@gmail.com>
Tested-by: Yong Zhang <yong.zhang0@gmail.com>
Reported-by: Luis Henriques <henrix@camandro.org>
Tested-by: Luis Henriques <henrix@camandro.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memcontrol.c

index 85d799ca162fb145550e869a378956243ba5a321..a6342fb5f29b1987c92bf0641cec68967a1161bc 100644 (file)
@@ -753,37 +753,32 @@ static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
        return total;
 }
 
-static bool __memcg_event_check(struct mem_cgroup *memcg, int target)
+static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
+                                      enum mem_cgroup_events_target target)
 {
        unsigned long val, next;
 
        val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
        next = __this_cpu_read(memcg->stat->targets[target]);
        /* from time_after() in jiffies.h */
-       return ((long)next - (long)val < 0);
-}
-
-static void __mem_cgroup_target_update(struct mem_cgroup *memcg, int target)
-{
-       unsigned long val, next;
-
-       val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
-
-       switch (target) {
-       case MEM_CGROUP_TARGET_THRESH:
-               next = val + THRESHOLDS_EVENTS_TARGET;
-               break;
-       case MEM_CGROUP_TARGET_SOFTLIMIT:
-               next = val + SOFTLIMIT_EVENTS_TARGET;
-               break;
-       case MEM_CGROUP_TARGET_NUMAINFO:
-               next = val + NUMAINFO_EVENTS_TARGET;
-               break;
-       default:
-               return;
+       if ((long)next - (long)val < 0) {
+               switch (target) {
+               case MEM_CGROUP_TARGET_THRESH:
+                       next = val + THRESHOLDS_EVENTS_TARGET;
+                       break;
+               case MEM_CGROUP_TARGET_SOFTLIMIT:
+                       next = val + SOFTLIMIT_EVENTS_TARGET;
+                       break;
+               case MEM_CGROUP_TARGET_NUMAINFO:
+                       next = val + NUMAINFO_EVENTS_TARGET;
+                       break;
+               default:
+                       break;
+               }
+               __this_cpu_write(memcg->stat->targets[target], next);
+               return true;
        }
-
-       __this_cpu_write(memcg->stat->targets[target], next);
+       return false;
 }
 
 /*
@@ -794,25 +789,27 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
 {
        preempt_disable();
        /* threshold event is triggered in finer grain than soft limit */
-       if (unlikely(__memcg_event_check(memcg, MEM_CGROUP_TARGET_THRESH))) {
+       if (unlikely(mem_cgroup_event_ratelimit(memcg,
+                                               MEM_CGROUP_TARGET_THRESH))) {
+               bool do_softlimit, do_numainfo;
+
+               do_softlimit = mem_cgroup_event_ratelimit(memcg,
+                                               MEM_CGROUP_TARGET_SOFTLIMIT);
+#if MAX_NUMNODES > 1
+               do_numainfo = mem_cgroup_event_ratelimit(memcg,
+                                               MEM_CGROUP_TARGET_NUMAINFO);
+#endif
+               preempt_enable();
+
                mem_cgroup_threshold(memcg);
-               __mem_cgroup_target_update(memcg, MEM_CGROUP_TARGET_THRESH);
-               if (unlikely(__memcg_event_check(memcg,
-                            MEM_CGROUP_TARGET_SOFTLIMIT))) {
+               if (unlikely(do_softlimit))
                        mem_cgroup_update_tree(memcg, page);
-                       __mem_cgroup_target_update(memcg,
-                                                  MEM_CGROUP_TARGET_SOFTLIMIT);
-               }
 #if MAX_NUMNODES > 1
-               if (unlikely(__memcg_event_check(memcg,
-                       MEM_CGROUP_TARGET_NUMAINFO))) {
+               if (unlikely(do_numainfo))
                        atomic_inc(&memcg->numainfo_events);
-                       __mem_cgroup_target_update(memcg,
-                               MEM_CGROUP_TARGET_NUMAINFO);
-               }
 #endif
-       }
-       preempt_enable();
+       } else
+               preempt_enable();
 }
 
 struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)