]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/x86/kernel/nmi.c
x86, nmi: Wire up NMI handlers to new routines
[karo-tx-linux.git] / arch / x86 / kernel / nmi.c
1 /*
2  *  Copyright (C) 1991, 1992  Linus Torvalds
3  *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4  *  Copyright (C) 2011  Don Zickus Red Hat, Inc.
5  *
6  *  Pentium III FXSR, SSE support
7  *      Gareth Hughes <gareth@valinux.com>, May 2000
8  */
9
10 /*
11  * Handle hardware traps and faults.
12  */
13 #include <linux/spinlock.h>
14 #include <linux/kprobes.h>
15 #include <linux/kdebug.h>
16 #include <linux/nmi.h>
17 #include <linux/delay.h>
18 #include <linux/hardirq.h>
19 #include <linux/slab.h>
20
21 #if defined(CONFIG_EDAC)
22 #include <linux/edac.h>
23 #endif
24
25 #include <linux/atomic.h>
26 #include <asm/traps.h>
27 #include <asm/mach_traps.h>
28 #include <asm/nmi.h>
29
30 #define NMI_MAX_NAMELEN 16
31 struct nmiaction {
32         struct list_head list;
33         nmi_handler_t handler;
34         unsigned int flags;
35         char *name;
36 };
37
38 struct nmi_desc {
39         spinlock_t lock;
40         struct list_head head;
41 };
42
43 static struct nmi_desc nmi_desc[NMI_MAX] = 
44 {
45         {
46                 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
47                 .head = LIST_HEAD_INIT(nmi_desc[0].head),
48         },
49         {
50                 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
51                 .head = LIST_HEAD_INIT(nmi_desc[1].head),
52         },
53
54 };
55
56 static int ignore_nmis;
57
58 int unknown_nmi_panic;
59 /*
60  * Prevent NMI reason port (0x61) being accessed simultaneously, can
61  * only be used in NMI handler.
62  */
63 static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
64
65 static int __init setup_unknown_nmi_panic(char *str)
66 {
67         unknown_nmi_panic = 1;
68         return 1;
69 }
70 __setup("unknown_nmi_panic", setup_unknown_nmi_panic);
71
72 #define nmi_to_desc(type) (&nmi_desc[type])
73
74 static int notrace __kprobes nmi_handle(unsigned int type, struct pt_regs *regs)
75 {
76         struct nmi_desc *desc = nmi_to_desc(type);
77         struct nmiaction *a;
78         int handled=0;
79
80         rcu_read_lock();
81
82         /*
83          * NMIs are edge-triggered, which means if you have enough
84          * of them concurrently, you can lose some because only one
85          * can be latched at any given time.  Walk the whole list
86          * to handle those situations.
87          */
88         list_for_each_entry_rcu(a, &desc->head, list) {
89
90                 handled += a->handler(type, regs);
91
92         }
93
94         rcu_read_unlock();
95
96         /* return total number of NMI events handled */
97         return handled;
98 }
99
100 static int __setup_nmi(unsigned int type, struct nmiaction *action)
101 {
102         struct nmi_desc *desc = nmi_to_desc(type);
103         unsigned long flags;
104
105         spin_lock_irqsave(&desc->lock, flags);
106
107         /*
108          * some handlers need to be executed first otherwise a fake
109          * event confuses some handlers (kdump uses this flag)
110          */
111         if (action->flags & NMI_FLAG_FIRST)
112                 list_add_rcu(&action->list, &desc->head);
113         else
114                 list_add_tail_rcu(&action->list, &desc->head);
115         
116         spin_unlock_irqrestore(&desc->lock, flags);
117         return 0;
118 }
119
120 static struct nmiaction *__free_nmi(unsigned int type, const char *name)
121 {
122         struct nmi_desc *desc = nmi_to_desc(type);
123         struct nmiaction *n;
124         unsigned long flags;
125
126         spin_lock_irqsave(&desc->lock, flags);
127
128         list_for_each_entry_rcu(n, &desc->head, list) {
129                 /*
130                  * the name passed in to describe the nmi handler
131                  * is used as the lookup key
132                  */
133                 if (!strcmp(n->name, name)) {
134                         WARN(in_nmi(),
135                                 "Trying to free NMI (%s) from NMI context!\n", n->name);
136                         list_del_rcu(&n->list);
137                         break;
138                 }
139         }
140
141         spin_unlock_irqrestore(&desc->lock, flags);
142         synchronize_rcu();
143         return (n);
144 }
145
146 int register_nmi_handler(unsigned int type, nmi_handler_t handler,
147                         unsigned long nmiflags, const char *devname)
148 {
149         struct nmiaction *action;
150         int retval = -ENOMEM;
151
152         if (!handler)
153                 return -EINVAL;
154
155         action = kzalloc(sizeof(struct nmiaction), GFP_KERNEL);
156         if (!action)
157                 goto fail_action;
158
159         action->handler = handler;
160         action->flags = nmiflags;
161         action->name = kstrndup(devname, NMI_MAX_NAMELEN, GFP_KERNEL);
162         if (!action->name)
163                 goto fail_action_name;
164
165         retval = __setup_nmi(type, action);
166
167         if (retval)
168                 goto fail_setup_nmi;
169
170         return retval;
171
172 fail_setup_nmi:
173         kfree(action->name);
174 fail_action_name:
175         kfree(action);
176 fail_action:    
177
178         return retval;
179 }
180 EXPORT_SYMBOL_GPL(register_nmi_handler);
181
182 void unregister_nmi_handler(unsigned int type, const char *name)
183 {
184         struct nmiaction *a;
185
186         a = __free_nmi(type, name);
187         if (a) {
188                 kfree(a->name);
189                 kfree(a);
190         }
191 }
192
193 EXPORT_SYMBOL_GPL(unregister_nmi_handler);
194
195 static notrace __kprobes void
196 pci_serr_error(unsigned char reason, struct pt_regs *regs)
197 {
198         pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
199                  reason, smp_processor_id());
200
201         /*
202          * On some machines, PCI SERR line is used to report memory
203          * errors. EDAC makes use of it.
204          */
205 #if defined(CONFIG_EDAC)
206         if (edac_handler_set()) {
207                 edac_atomic_assert_error();
208                 return;
209         }
210 #endif
211
212         if (panic_on_unrecovered_nmi)
213                 panic("NMI: Not continuing");
214
215         pr_emerg("Dazed and confused, but trying to continue\n");
216
217         /* Clear and disable the PCI SERR error line. */
218         reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
219         outb(reason, NMI_REASON_PORT);
220 }
221
222 static notrace __kprobes void
223 io_check_error(unsigned char reason, struct pt_regs *regs)
224 {
225         unsigned long i;
226
227         pr_emerg(
228         "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
229                  reason, smp_processor_id());
230         show_registers(regs);
231
232         if (panic_on_io_nmi)
233                 panic("NMI IOCK error: Not continuing");
234
235         /* Re-enable the IOCK line, wait for a few seconds */
236         reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
237         outb(reason, NMI_REASON_PORT);
238
239         i = 20000;
240         while (--i) {
241                 touch_nmi_watchdog();
242                 udelay(100);
243         }
244
245         reason &= ~NMI_REASON_CLEAR_IOCHK;
246         outb(reason, NMI_REASON_PORT);
247 }
248
249 static notrace __kprobes void
250 unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
251 {
252         int handled;
253
254         handled = nmi_handle(NMI_UNKNOWN, regs);
255         if (handled)
256                 return;
257 #ifdef CONFIG_MCA
258         /*
259          * Might actually be able to figure out what the guilty party
260          * is:
261          */
262         if (MCA_bus) {
263                 mca_handle_nmi();
264                 return;
265         }
266 #endif
267         pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
268                  reason, smp_processor_id());
269
270         pr_emerg("Do you have a strange power saving mode enabled?\n");
271         if (unknown_nmi_panic || panic_on_unrecovered_nmi)
272                 panic("NMI: Not continuing");
273
274         pr_emerg("Dazed and confused, but trying to continue\n");
275 }
276
277 static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
278 {
279         unsigned char reason = 0;
280         int handled;
281
282         /*
283          * CPU-specific NMI must be processed before non-CPU-specific
284          * NMI, otherwise we may lose it, because the CPU-specific
285          * NMI can not be detected/processed on other CPUs.
286          */
287         handled = nmi_handle(NMI_LOCAL, regs);
288         if (handled)
289                 return;
290
291         /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
292         raw_spin_lock(&nmi_reason_lock);
293         reason = get_nmi_reason();
294
295         if (reason & NMI_REASON_MASK) {
296                 if (reason & NMI_REASON_SERR)
297                         pci_serr_error(reason, regs);
298                 else if (reason & NMI_REASON_IOCHK)
299                         io_check_error(reason, regs);
300 #ifdef CONFIG_X86_32
301                 /*
302                  * Reassert NMI in case it became active
303                  * meanwhile as it's edge-triggered:
304                  */
305                 reassert_nmi();
306 #endif
307                 raw_spin_unlock(&nmi_reason_lock);
308                 return;
309         }
310         raw_spin_unlock(&nmi_reason_lock);
311
312         unknown_nmi_error(reason, regs);
313 }
314
315 dotraplinkage notrace __kprobes void
316 do_nmi(struct pt_regs *regs, long error_code)
317 {
318         nmi_enter();
319
320         inc_irq_stat(__nmi_count);
321
322         if (!ignore_nmis)
323                 default_do_nmi(regs);
324
325         nmi_exit();
326 }
327
328 void stop_nmi(void)
329 {
330         ignore_nmis++;
331 }
332
333 void restart_nmi(void)
334 {
335         ignore_nmis--;
336 }