2 * Watchdog support on powerpc systems.
4 * Copyright 2017, IBM Corporation.
6 * This uses code from arch/sparc/kernel/nmi.c and kernel/watchdog.c
8 #include <linux/kernel.h>
9 #include <linux/param.h>
10 #include <linux/init.h>
11 #include <linux/percpu.h>
12 #include <linux/cpu.h>
13 #include <linux/nmi.h>
14 #include <linux/module.h>
15 #include <linux/export.h>
16 #include <linux/kprobes.h>
17 #include <linux/hardirq.h>
18 #include <linux/reboot.h>
19 #include <linux/slab.h>
20 #include <linux/kdebug.h>
21 #include <linux/sched/debug.h>
22 #include <linux/delay.h>
23 #include <linux/smp.h>
28 * The watchdog has a simple timer that runs on each CPU, once per timer
29 * period. This is the heartbeat.
31 * Then there are checks to see if the heartbeat has not triggered on a CPU
32 * for the panic timeout period. Currently the watchdog only supports an
33 * SMP check, so the heartbeat only turns on when we have 2 or more CPUs.
35 * This is not an NMI watchdog, but Linux uses that name for a generic
36 * watchdog in some cases, so NMI gets used in some places.
39 static cpumask_t wd_cpus_enabled __read_mostly;
41 static u64 wd_panic_timeout_tb __read_mostly; /* timebase ticks until panic */
42 static u64 wd_smp_panic_timeout_tb __read_mostly; /* panic other CPUs */
44 static u64 wd_timer_period_ms __read_mostly; /* interval between heartbeat */
46 static DEFINE_PER_CPU(struct timer_list, wd_timer);
47 static DEFINE_PER_CPU(u64, wd_timer_tb);
50 * These are for the SMP checker. CPUs clear their pending bit in their
51 * heartbeat. If the bitmask becomes empty, the time is noted and the
52 * bitmask is refilled.
54 * All CPUs clear their bit in the pending mask every timer period.
55 * Once all have cleared, the time is noted and the bits are reset.
56 * If the time since all clear was greater than the panic timeout,
57 * we can panic with the list of stuck CPUs.
59 * This will work best with NMI IPIs for crash code so the stuck CPUs
60 * can be pulled out to get their backtraces.
62 static unsigned long __wd_smp_lock;
63 static cpumask_t wd_smp_cpus_pending;
64 static cpumask_t wd_smp_cpus_stuck;
65 static u64 wd_smp_last_reset_tb;
67 static inline void wd_smp_lock(unsigned long *flags)
70 * Avoid locking layers if possible.
71 * This may be called from low level interrupt handlers at some
74 raw_local_irq_save(*flags);
75 hard_irq_disable(); /* Make it soft-NMI safe */
76 while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock))) {
77 raw_local_irq_restore(*flags);
78 spin_until_cond(!test_bit(0, &__wd_smp_lock));
79 raw_local_irq_save(*flags);
84 static inline void wd_smp_unlock(unsigned long *flags)
86 clear_bit_unlock(0, &__wd_smp_lock);
87 raw_local_irq_restore(*flags);
90 static void wd_lockup_ipi(struct pt_regs *regs)
92 pr_emerg("Watchdog CPU:%d Hard LOCKUP\n", raw_smp_processor_id());
94 print_irqtrace_events(current);
100 if (hardlockup_panic)
101 nmi_panic(regs, "Hard LOCKUP");
104 static void set_cpu_stuck(int cpu, u64 tb)
106 cpumask_set_cpu(cpu, &wd_smp_cpus_stuck);
107 cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
108 if (cpumask_empty(&wd_smp_cpus_pending)) {
109 wd_smp_last_reset_tb = tb;
110 cpumask_andnot(&wd_smp_cpus_pending,
116 static void watchdog_smp_panic(int cpu, u64 tb)
122 /* Double check some things under lock */
123 if ((s64)(tb - wd_smp_last_reset_tb) < (s64)wd_smp_panic_timeout_tb)
125 if (cpumask_test_cpu(cpu, &wd_smp_cpus_pending))
127 if (cpumask_weight(&wd_smp_cpus_pending) == 0)
130 pr_emerg("Watchdog CPU:%d detected Hard LOCKUP other CPUS:%*pbl\n",
131 cpu, cpumask_pr_args(&wd_smp_cpus_pending));
134 * Try to trigger the stuck CPUs.
136 for_each_cpu(c, &wd_smp_cpus_pending) {
139 smp_send_nmi_ipi(c, wd_lockup_ipi, 1000000);
141 smp_flush_nmi_ipi(1000000);
143 /* Take the stuck CPU out of the watch group */
144 for_each_cpu(c, &wd_smp_cpus_pending)
145 set_cpu_stuck(c, tb);
147 wd_smp_unlock(&flags);
151 * printk_safe_flush() seems to require another print
152 * before anything actually goes out to console.
154 if (sysctl_hardlockup_all_cpu_backtrace)
155 trigger_allbutself_cpu_backtrace();
157 if (hardlockup_panic)
158 nmi_panic(NULL, "Hard LOCKUP");
163 wd_smp_unlock(&flags);
166 static void wd_smp_clear_cpu_pending(int cpu, u64 tb)
168 if (!cpumask_test_cpu(cpu, &wd_smp_cpus_pending)) {
169 if (unlikely(cpumask_test_cpu(cpu, &wd_smp_cpus_stuck))) {
172 pr_emerg("Watchdog CPU:%d became unstuck\n", cpu);
174 cpumask_clear_cpu(cpu, &wd_smp_cpus_stuck);
175 wd_smp_unlock(&flags);
179 cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
180 if (cpumask_empty(&wd_smp_cpus_pending)) {
184 if (cpumask_empty(&wd_smp_cpus_pending)) {
185 wd_smp_last_reset_tb = tb;
186 cpumask_andnot(&wd_smp_cpus_pending,
190 wd_smp_unlock(&flags);
194 static void watchdog_timer_interrupt(int cpu)
198 per_cpu(wd_timer_tb, cpu) = tb;
200 wd_smp_clear_cpu_pending(cpu, tb);
202 if ((s64)(tb - wd_smp_last_reset_tb) >= (s64)wd_smp_panic_timeout_tb)
203 watchdog_smp_panic(cpu, tb);
206 void soft_nmi_interrupt(struct pt_regs *regs)
209 int cpu = raw_smp_processor_id();
212 if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
217 if (tb - per_cpu(wd_timer_tb, cpu) >= wd_panic_timeout_tb) {
218 per_cpu(wd_timer_tb, cpu) = tb;
221 if (cpumask_test_cpu(cpu, &wd_smp_cpus_stuck)) {
222 wd_smp_unlock(&flags);
225 set_cpu_stuck(cpu, tb);
227 pr_emerg("Watchdog CPU:%d Hard LOCKUP\n", cpu);
229 print_irqtrace_events(current);
235 wd_smp_unlock(&flags);
237 if (sysctl_hardlockup_all_cpu_backtrace)
238 trigger_allbutself_cpu_backtrace();
240 if (hardlockup_panic)
241 nmi_panic(regs, "Hard LOCKUP");
243 if (wd_panic_timeout_tb < 0x7fffffff)
244 mtspr(SPRN_DEC, wd_panic_timeout_tb);
250 static void wd_timer_reset(unsigned int cpu, struct timer_list *t)
252 t->expires = jiffies + msecs_to_jiffies(wd_timer_period_ms);
253 if (wd_timer_period_ms > 1000)
254 t->expires = __round_jiffies_up(t->expires, cpu);
255 add_timer_on(t, cpu);
258 static void wd_timer_fn(unsigned long data)
260 struct timer_list *t = this_cpu_ptr(&wd_timer);
261 int cpu = smp_processor_id();
263 watchdog_timer_interrupt(cpu);
265 wd_timer_reset(cpu, t);
268 void arch_touch_nmi_watchdog(void)
270 unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000;
271 int cpu = smp_processor_id();
273 if (get_tb() - per_cpu(wd_timer_tb, cpu) >= ticks)
274 watchdog_timer_interrupt(cpu);
276 EXPORT_SYMBOL(arch_touch_nmi_watchdog);
278 static void start_watchdog_timer_on(unsigned int cpu)
280 struct timer_list *t = per_cpu_ptr(&wd_timer, cpu);
282 per_cpu(wd_timer_tb, cpu) = get_tb();
284 setup_pinned_timer(t, wd_timer_fn, 0);
285 wd_timer_reset(cpu, t);
288 static void stop_watchdog_timer_on(unsigned int cpu)
290 struct timer_list *t = per_cpu_ptr(&wd_timer, cpu);
295 static int start_wd_on_cpu(unsigned int cpu)
297 if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
302 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
305 if (watchdog_suspended)
308 if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
311 cpumask_set_cpu(cpu, &wd_cpus_enabled);
312 if (cpumask_weight(&wd_cpus_enabled) == 1) {
313 cpumask_set_cpu(cpu, &wd_smp_cpus_pending);
314 wd_smp_last_reset_tb = get_tb();
317 start_watchdog_timer_on(cpu);
322 static int stop_wd_on_cpu(unsigned int cpu)
324 if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
325 return 0; /* Can happen in CPU unplug case */
327 stop_watchdog_timer_on(cpu);
329 cpumask_clear_cpu(cpu, &wd_cpus_enabled);
330 wd_smp_clear_cpu_pending(cpu, get_tb());
335 static void watchdog_calc_timeouts(void)
337 wd_panic_timeout_tb = watchdog_thresh * ppc_tb_freq;
339 /* Have the SMP detector trigger a bit later */
340 wd_smp_panic_timeout_tb = wd_panic_timeout_tb * 3 / 2;
342 /* 2/5 is the factor that the perf based detector uses */
343 wd_timer_period_ms = watchdog_thresh * 1000 * 2 / 5;
346 void watchdog_nmi_reconfigure(void)
350 watchdog_calc_timeouts();
352 for_each_cpu(cpu, &wd_cpus_enabled)
355 for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
356 start_wd_on_cpu(cpu);
360 * This runs after lockup_detector_init() which sets up watchdog_cpumask.
362 static int __init powerpc_watchdog_init(void)
366 watchdog_calc_timeouts();
368 err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/watchdog:online",
369 start_wd_on_cpu, stop_wd_on_cpu);
371 pr_warn("Watchdog could not be initialized");
375 arch_initcall(powerpc_watchdog_init);
377 static void handle_backtrace_ipi(struct pt_regs *regs)
379 nmi_cpu_backtrace(regs);
382 static void raise_backtrace_ipi(cpumask_t *mask)
386 for_each_cpu(cpu, mask) {
387 if (cpu == smp_processor_id())
388 handle_backtrace_ipi(NULL);
390 smp_send_nmi_ipi(cpu, handle_backtrace_ipi, 1000000);
394 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
396 nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi);