]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 23 Oct 2015 13:31:39 +0000 (22:31 +0900)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 23 Oct 2015 13:31:39 +0000 (22:31 +0900)
Pull scheduler fixes from Ingo Molnar:
 "Misc fixes all around the map: an instrumentation fix, a nohz
  usability fix, a lockdep annotation fix and two task group scheduling
  fixes"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/core: Add missing lockdep_unpin() annotations
  sched/deadline: Fix migration of SCHED_DEADLINE tasks
  nohz: Revert "nohz: Set isolcpus when nohz_full is set"
  sched/fair: Update task group's load_avg after task migration
  sched/fair: Fix overly small weight for interactive group entities
  sched, tracing: Stop/start critical timings around the idle=poll idle loop

kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/sched/idle.c

index 10a8faa1b0d4a5f737bd9008eb8f9a7e817b6ec9..bcd214e4b4d630eb3304f6000a220dc881bafc24 100644 (file)
@@ -2366,8 +2366,15 @@ void wake_up_new_task(struct task_struct *p)
        trace_sched_wakeup_new(p);
        check_preempt_curr(rq, p, WF_FORK);
 #ifdef CONFIG_SMP
-       if (p->sched_class->task_woken)
+       if (p->sched_class->task_woken) {
+               /*
+                * Nothing relies on rq->lock after this, so its fine to
+                * drop it.
+                */
+               lockdep_unpin_lock(&rq->lock);
                p->sched_class->task_woken(rq, p);
+               lockdep_pin_lock(&rq->lock);
+       }
 #endif
        task_rq_unlock(rq, p, &flags);
 }
@@ -7238,9 +7245,6 @@ void __init sched_init_smp(void)
        alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
        alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
 
-       /* nohz_full won't take effect without isolating the cpus. */
-       tick_nohz_full_add_cpus_to(cpu_isolated_map);
-
        sched_init_numa();
 
        /*
index fc8f01083527a570af73a8f8b4c6e586e7e332f9..8b0a15e285f9121ccd5540fa11eef49c94f017c1 100644 (file)
@@ -668,8 +668,15 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
         * Queueing this task back might have overloaded rq, check if we need
         * to kick someone away.
         */
-       if (has_pushable_dl_tasks(rq))
+       if (has_pushable_dl_tasks(rq)) {
+               /*
+                * Nothing relies on rq->lock after this, so its safe to drop
+                * rq->lock.
+                */
+               lockdep_unpin_lock(&rq->lock);
                push_dl_task(rq);
+               lockdep_pin_lock(&rq->lock);
+       }
 #endif
 
 unlock:
@@ -1066,8 +1073,9 @@ select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
                int target = find_later_rq(p);
 
                if (target != -1 &&
-                               dl_time_before(p->dl.deadline,
-                                       cpu_rq(target)->dl.earliest_dl.curr))
+                               (dl_time_before(p->dl.deadline,
+                                       cpu_rq(target)->dl.earliest_dl.curr) ||
+                               (cpu_rq(target)->dl.dl_nr_running == 0)))
                        cpu = target;
        }
        rcu_read_unlock();
@@ -1417,7 +1425,8 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
 
                later_rq = cpu_rq(cpu);
 
-               if (!dl_time_before(task->dl.deadline,
+               if (later_rq->dl.dl_nr_running &&
+                   !dl_time_before(task->dl.deadline,
                                        later_rq->dl.earliest_dl.curr)) {
                        /*
                         * Target rq has tasks of equal or earlier deadline,
index 6e2e3483b1ecff588e76103e0b7b7d673f16d2a5..9a5e60fe721a5e7e8036508f61950eeb0c8dea31 100644 (file)
@@ -2363,7 +2363,7 @@ static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
         */
        tg_weight = atomic_long_read(&tg->load_avg);
        tg_weight -= cfs_rq->tg_load_avg_contrib;
-       tg_weight += cfs_rq_load_avg(cfs_rq);
+       tg_weight += cfs_rq->load.weight;
 
        return tg_weight;
 }
@@ -2373,7 +2373,7 @@ static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
        long tg_weight, load, shares;
 
        tg_weight = calc_tg_weight(tg, cfs_rq);
-       load = cfs_rq_load_avg(cfs_rq);
+       load = cfs_rq->load.weight;
 
        shares = (tg->shares * load);
        if (tg_weight)
@@ -2664,13 +2664,14 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
 /* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
 static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
 {
-       int decayed;
        struct sched_avg *sa = &cfs_rq->avg;
+       int decayed, removed = 0;
 
        if (atomic_long_read(&cfs_rq->removed_load_avg)) {
                long r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
                sa->load_avg = max_t(long, sa->load_avg - r, 0);
                sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
+               removed = 1;
        }
 
        if (atomic_long_read(&cfs_rq->removed_util_avg)) {
@@ -2688,7 +2689,7 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
        cfs_rq->load_last_update_time_copy = sa->last_update_time;
 #endif
 
-       return decayed;
+       return decayed || removed;
 }
 
 /* Update task and its cfs_rq load average */
index 8f177c73ae199ba41878fea8de9a5b0a7196620a..4a2ef5a02fd3f91d7c4228378c23d5606bb73812 100644 (file)
@@ -57,9 +57,11 @@ static inline int cpu_idle_poll(void)
        rcu_idle_enter();
        trace_cpu_idle_rcuidle(0, smp_processor_id());
        local_irq_enable();
+       stop_critical_timings();
        while (!tif_need_resched() &&
                (cpu_idle_force_poll || tick_check_broadcast_expired()))
                cpu_relax();
+       start_critical_timings();
        trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
        rcu_idle_exit();
        return 1;