2 * Watchdog support on powerpc systems.
4 * Copyright 2017, IBM Corporation.
6 * This uses code from arch/sparc/kernel/nmi.c and kernel/watchdog.c
8 #include <linux/kernel.h>
9 #include <linux/param.h>
10 #include <linux/init.h>
11 #include <linux/percpu.h>
12 #include <linux/cpu.h>
13 #include <linux/nmi.h>
14 #include <linux/module.h>
15 #include <linux/export.h>
16 #include <linux/kprobes.h>
17 #include <linux/hardirq.h>
18 #include <linux/reboot.h>
19 #include <linux/slab.h>
20 #include <linux/kdebug.h>
21 #include <linux/sched/debug.h>
22 #include <linux/delay.h>
23 #include <linux/smp.h>
28 * The watchdog has a simple timer that runs on each CPU, once per timer
29 * period. This is the heartbeat.
31 * Then there are checks to see if the heartbeat has not triggered on a CPU
32 * for the panic timeout period. Currently the watchdog only supports an
33 * SMP check, so the heartbeat only turns on when we have 2 or more CPUs.
35 * This is not an NMI watchdog, but Linux uses that name for a generic
36 * watchdog in some cases, so NMI gets used in some places.
39 static cpumask_t wd_cpus_enabled __read_mostly;
41 static u64 wd_panic_timeout_tb __read_mostly; /* timebase ticks until panic */
42 static u64 wd_smp_panic_timeout_tb __read_mostly; /* panic other CPUs */
44 static u64 wd_timer_period_ms __read_mostly; /* interval between heartbeat */
46 static DEFINE_PER_CPU(struct timer_list, wd_timer);
47 static DEFINE_PER_CPU(u64, wd_timer_tb);
50 * These are for the SMP checker. CPUs clear their pending bit in their
51 * heartbeat. If the bitmask becomes empty, the time is noted and the
52 * bitmask is refilled.
54 * All CPUs clear their bit in the pending mask every timer period.
55 * Once all have cleared, the time is noted and the bits are reset.
56 * If the time since all clear was greater than the panic timeout,
57 * we can panic with the list of stuck CPUs.
59 * This will work best with NMI IPIs for crash code so the stuck CPUs
60 * can be pulled out to get their backtraces.
62 static unsigned long __wd_smp_lock;
63 static cpumask_t wd_smp_cpus_pending;
64 static cpumask_t wd_smp_cpus_stuck;
65 static u64 wd_smp_last_reset_tb;
67 static inline void wd_smp_lock(unsigned long *flags)
70 * Avoid locking layers if possible.
71 * This may be called from low level interrupt handlers at some
74 raw_local_irq_save(*flags);
75 hard_irq_disable(); /* Make it soft-NMI safe */
76 while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock))) {
77 raw_local_irq_restore(*flags);
78 spin_until_cond(!test_bit(0, &__wd_smp_lock));
79 raw_local_irq_save(*flags);
84 static inline void wd_smp_unlock(unsigned long *flags)
86 clear_bit_unlock(0, &__wd_smp_lock);
87 raw_local_irq_restore(*flags);
90 static void wd_lockup_ipi(struct pt_regs *regs)
92 pr_emerg("Watchdog CPU:%d Hard LOCKUP\n", raw_smp_processor_id());
94 print_irqtrace_events(current);
100 if (hardlockup_panic)
101 nmi_panic(regs, "Hard LOCKUP");
104 static void set_cpu_stuck(int cpu, u64 tb)
106 cpumask_set_cpu(cpu, &wd_smp_cpus_stuck);
107 cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
108 if (cpumask_empty(&wd_smp_cpus_pending)) {
109 wd_smp_last_reset_tb = tb;
110 cpumask_andnot(&wd_smp_cpus_pending,
116 static void watchdog_smp_panic(int cpu, u64 tb)
122 /* Double check some things under lock */
123 if ((s64)(tb - wd_smp_last_reset_tb) < (s64)wd_smp_panic_timeout_tb)
125 if (cpumask_test_cpu(cpu, &wd_smp_cpus_pending))
127 if (cpumask_weight(&wd_smp_cpus_pending) == 0)
130 pr_emerg("Watchdog CPU:%d detected Hard LOCKUP other CPUS:%*pbl\n",
131 cpu, cpumask_pr_args(&wd_smp_cpus_pending));
134 * Try to trigger the stuck CPUs.
136 for_each_cpu(c, &wd_smp_cpus_pending) {
139 smp_send_nmi_ipi(c, wd_lockup_ipi, 1000000);
141 smp_flush_nmi_ipi(1000000);
143 /* Take the stuck CPU out of the watch group */
144 for_each_cpu(c, &wd_smp_cpus_pending)
145 set_cpu_stuck(c, tb);
148 wd_smp_unlock(&flags);
152 * printk_safe_flush() seems to require another print
153 * before anything actually goes out to console.
155 if (sysctl_hardlockup_all_cpu_backtrace)
156 trigger_allbutself_cpu_backtrace();
158 if (hardlockup_panic)
159 nmi_panic(NULL, "Hard LOCKUP");
162 static void wd_smp_clear_cpu_pending(int cpu, u64 tb)
164 if (!cpumask_test_cpu(cpu, &wd_smp_cpus_pending)) {
165 if (unlikely(cpumask_test_cpu(cpu, &wd_smp_cpus_stuck))) {
168 pr_emerg("Watchdog CPU:%d became unstuck\n", cpu);
170 cpumask_clear_cpu(cpu, &wd_smp_cpus_stuck);
171 wd_smp_unlock(&flags);
175 cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
176 if (cpumask_empty(&wd_smp_cpus_pending)) {
180 if (cpumask_empty(&wd_smp_cpus_pending)) {
181 wd_smp_last_reset_tb = tb;
182 cpumask_andnot(&wd_smp_cpus_pending,
186 wd_smp_unlock(&flags);
190 static void watchdog_timer_interrupt(int cpu)
194 per_cpu(wd_timer_tb, cpu) = tb;
196 wd_smp_clear_cpu_pending(cpu, tb);
198 if ((s64)(tb - wd_smp_last_reset_tb) >= (s64)wd_smp_panic_timeout_tb)
199 watchdog_smp_panic(cpu, tb);
202 void soft_nmi_interrupt(struct pt_regs *regs)
205 int cpu = raw_smp_processor_id();
208 if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
213 if (tb - per_cpu(wd_timer_tb, cpu) >= wd_panic_timeout_tb) {
214 per_cpu(wd_timer_tb, cpu) = tb;
217 if (cpumask_test_cpu(cpu, &wd_smp_cpus_stuck)) {
218 wd_smp_unlock(&flags);
221 set_cpu_stuck(cpu, tb);
223 pr_emerg("Watchdog CPU:%d Hard LOCKUP\n", cpu);
225 print_irqtrace_events(current);
231 wd_smp_unlock(&flags);
233 if (sysctl_hardlockup_all_cpu_backtrace)
234 trigger_allbutself_cpu_backtrace();
236 if (hardlockup_panic)
237 nmi_panic(regs, "Hard LOCKUP");
239 if (wd_panic_timeout_tb < 0x7fffffff)
240 mtspr(SPRN_DEC, wd_panic_timeout_tb);
246 static void wd_timer_reset(unsigned int cpu, struct timer_list *t)
248 t->expires = jiffies + msecs_to_jiffies(wd_timer_period_ms);
249 if (wd_timer_period_ms > 1000)
250 t->expires = __round_jiffies_up(t->expires, cpu);
251 add_timer_on(t, cpu);
254 static void wd_timer_fn(unsigned long data)
256 struct timer_list *t = this_cpu_ptr(&wd_timer);
257 int cpu = smp_processor_id();
259 watchdog_timer_interrupt(cpu);
261 wd_timer_reset(cpu, t);
264 void arch_touch_nmi_watchdog(void)
266 int cpu = smp_processor_id();
268 watchdog_timer_interrupt(cpu);
270 EXPORT_SYMBOL(arch_touch_nmi_watchdog);
272 static void start_watchdog_timer_on(unsigned int cpu)
274 struct timer_list *t = per_cpu_ptr(&wd_timer, cpu);
276 per_cpu(wd_timer_tb, cpu) = get_tb();
278 setup_pinned_timer(t, wd_timer_fn, 0);
279 wd_timer_reset(cpu, t);
282 static void stop_watchdog_timer_on(unsigned int cpu)
284 struct timer_list *t = per_cpu_ptr(&wd_timer, cpu);
289 static int start_wd_on_cpu(unsigned int cpu)
291 if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
296 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
299 if (watchdog_suspended)
302 if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
305 cpumask_set_cpu(cpu, &wd_cpus_enabled);
306 if (cpumask_weight(&wd_cpus_enabled) == 1) {
307 cpumask_set_cpu(cpu, &wd_smp_cpus_pending);
308 wd_smp_last_reset_tb = get_tb();
311 start_watchdog_timer_on(cpu);
316 static int stop_wd_on_cpu(unsigned int cpu)
318 if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
319 return 0; /* Can happen in CPU unplug case */
321 stop_watchdog_timer_on(cpu);
323 cpumask_clear_cpu(cpu, &wd_cpus_enabled);
324 wd_smp_clear_cpu_pending(cpu, get_tb());
329 static void watchdog_calc_timeouts(void)
331 wd_panic_timeout_tb = watchdog_thresh * ppc_tb_freq;
333 /* Have the SMP detector trigger a bit later */
334 wd_smp_panic_timeout_tb = wd_panic_timeout_tb * 3 / 2;
336 /* 2/5 is the factor that the perf based detector uses */
337 wd_timer_period_ms = watchdog_thresh * 1000 * 2 / 5;
340 void watchdog_nmi_reconfigure(void)
344 watchdog_calc_timeouts();
346 for_each_cpu(cpu, &wd_cpus_enabled)
349 for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
350 start_wd_on_cpu(cpu);
354 * This runs after lockup_detector_init() which sets up watchdog_cpumask.
356 static int __init powerpc_watchdog_init(void)
360 watchdog_calc_timeouts();
362 err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/watchdog:online",
363 start_wd_on_cpu, stop_wd_on_cpu);
365 pr_warn("Watchdog could not be initialized");
369 arch_initcall(powerpc_watchdog_init);
371 static void handle_backtrace_ipi(struct pt_regs *regs)
373 nmi_cpu_backtrace(regs);
376 static void raise_backtrace_ipi(cpumask_t *mask)
380 for_each_cpu(cpu, mask) {
381 if (cpu == smp_processor_id())
382 handle_backtrace_ipi(NULL);
384 smp_send_nmi_ipi(cpu, handle_backtrace_ipi, 1000000);
388 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
390 nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi);