]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
sched/core: Robustify preemption leak checks
authorPeter Zijlstra <peterz@infradead.org>
Mon, 28 Sep 2015 15:57:39 +0000 (17:57 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 6 Oct 2015 15:08:17 +0000 (17:08 +0200)
When we warn about a preempt_count leak; reset the preempt_count to
the known good value such that the problem does not ripple forward.

This is most important on x86 which has a per cpu preempt_count that is
not saved/restored (after this series). So if you schedule with an
invalid (!2*PREEMPT_DISABLE_OFFSET) preempt_count the next task is
messed up too.

Enforcing this invariant limits the borkage to just the one task.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/exit.c
kernel/sched/core.c

index ea95ee1b5ef7038ec1ade3f5cd5d98181177f22f..443677c8efe6ec17ac11dc2fe699f0fcb61fa107 100644 (file)
@@ -706,10 +706,12 @@ void do_exit(long code)
        smp_mb();
        raw_spin_unlock_wait(&tsk->pi_lock);
 
-       if (unlikely(in_atomic()))
+       if (unlikely(in_atomic())) {
                pr_info("note: %s[%d] exited with preempt_count %d\n",
                        current->comm, task_pid_nr(current),
                        preempt_count());
+               preempt_count_set(PREEMPT_ENABLED);
+       }
 
        /* sync mm's RSS info before statistics gathering */
        if (tsk->mm)
index 6344d82a84f61d0ca62417ab6043515fe1eb74c3..d6989f85c641da84830c978a08b3af5fae4fa869 100644 (file)
@@ -2968,8 +2968,10 @@ static inline void schedule_debug(struct task_struct *prev)
        BUG_ON(unlikely(task_stack_end_corrupted(prev)));
 #endif
 
-       if (unlikely(in_atomic_preempt_off()))
+       if (unlikely(in_atomic_preempt_off())) {
                __schedule_bug(prev);
+               preempt_count_set(PREEMPT_DISABLED);
+       }
        rcu_sleep_check();
 
        profile_hit(SCHED_PROFILING, __builtin_return_address(0));