]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
sched: cgroup: Implement different treatment for idle shares
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Thu, 3 Dec 2009 17:00:07 +0000 (18:00 +0100)
committerGreg Kroah-Hartman <gregkh@suse.de>
Tue, 10 Aug 2010 17:20:35 +0000 (10:20 -0700)
commit cd8ad40de36c2fe75f3b731bd70198b385895246 upstream.

When setting the weight for a per-cpu task-group, we have to put in a
phantom weight when there is no work on that cpu, otherwise we'll not
service that cpu when new work gets placed there until we again update
the per-cpu weights.

We used to add these phantom weights to the total, so that the idle
per-cpu shares don't get inflated, this however causes the non-idle
parts to get deflated, causing unexpected weight distibutions.

Reverse this, so that the non-idle shares are correct but the idle
shares are inflated.

Reported-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Tested-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1257934048.23203.76.camel@twins>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
kernel/sched.c

index a0157c72c8f99ca9a6fd6e2c6c479716a8be7419..d0958da992d7d6d4ce247c98915d59acd5b19c62 100644 (file)
@@ -1623,7 +1623,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
  */
 static int tg_shares_up(struct task_group *tg, void *data)
 {
-       unsigned long weight, rq_weight = 0, shares = 0;
+       unsigned long weight, rq_weight = 0, sum_weight = 0, shares = 0;
        unsigned long *usd_rq_weight;
        struct sched_domain *sd = data;
        unsigned long flags;
@@ -1639,6 +1639,7 @@ static int tg_shares_up(struct task_group *tg, void *data)
                weight = tg->cfs_rq[i]->load.weight;
                usd_rq_weight[i] = weight;
 
+               rq_weight += weight;
                /*
                 * If there are currently no tasks on the cpu pretend there
                 * is one of average load so that when a new task gets to
@@ -1647,10 +1648,13 @@ static int tg_shares_up(struct task_group *tg, void *data)
                if (!weight)
                        weight = NICE_0_LOAD;
 
-               rq_weight += weight;
+               sum_weight += weight;
                shares += tg->cfs_rq[i]->shares;
        }
 
+       if (!rq_weight)
+               rq_weight = sum_weight;
+
        if ((!shares && rq_weight) || shares > tg->shares)
                shares = tg->shares;