]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/x86/kernel/nmi.c
x86, nmi: Create new NMI handler routines
[karo-tx-linux.git] / arch / x86 / kernel / nmi.c
1 /*
2  *  Copyright (C) 1991, 1992  Linus Torvalds
3  *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4  *
5  *  Pentium III FXSR, SSE support
6  *      Gareth Hughes <gareth@valinux.com>, May 2000
7  */
8
9 /*
10  * Handle hardware traps and faults.
11  */
12 #include <linux/spinlock.h>
13 #include <linux/kprobes.h>
14 #include <linux/kdebug.h>
15 #include <linux/nmi.h>
16 #include <linux/delay.h>
17 #include <linux/hardirq.h>
18 #include <linux/slab.h>
19
20 #if defined(CONFIG_EDAC)
21 #include <linux/edac.h>
22 #endif
23
24 #include <linux/atomic.h>
25 #include <asm/traps.h>
26 #include <asm/mach_traps.h>
27 #include <asm/nmi.h>
28
29 #define NMI_MAX_NAMELEN 16
30 struct nmiaction {
31         struct list_head list;
32         nmi_handler_t handler;
33         unsigned int flags;
34         char *name;
35 };
36
37 struct nmi_desc {
38         spinlock_t lock;
39         struct list_head head;
40 };
41
42 static struct nmi_desc nmi_desc[NMI_MAX] = 
43 {
44         {
45                 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
46                 .head = LIST_HEAD_INIT(nmi_desc[0].head),
47         },
48         {
49                 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
50                 .head = LIST_HEAD_INIT(nmi_desc[1].head),
51         },
52
53 };
54
55 static int ignore_nmis;
56
57 int unknown_nmi_panic;
58 /*
59  * Prevent NMI reason port (0x61) being accessed simultaneously, can
60  * only be used in NMI handler.
61  */
62 static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
63
64 static int __init setup_unknown_nmi_panic(char *str)
65 {
66         unknown_nmi_panic = 1;
67         return 1;
68 }
69 __setup("unknown_nmi_panic", setup_unknown_nmi_panic);
70
71 #define nmi_to_desc(type) (&nmi_desc[type])
72
73 static int notrace __kprobes nmi_handle(unsigned int type, struct pt_regs *regs)
74 {
75         struct nmi_desc *desc = nmi_to_desc(type);
76         struct nmiaction *a;
77         int handled=0;
78
79         rcu_read_lock();
80
81         /*
82          * NMIs are edge-triggered, which means if you have enough
83          * of them concurrently, you can lose some because only one
84          * can be latched at any given time.  Walk the whole list
85          * to handle those situations.
86          */
87         list_for_each_entry_rcu(a, &desc->head, list) {
88
89                 handled += a->handler(type, regs);
90
91         }
92
93         rcu_read_unlock();
94
95         /* return total number of NMI events handled */
96         return handled;
97 }
98
99 static int __setup_nmi(unsigned int type, struct nmiaction *action)
100 {
101         struct nmi_desc *desc = nmi_to_desc(type);
102         unsigned long flags;
103
104         spin_lock_irqsave(&desc->lock, flags);
105
106         /*
107          * some handlers need to be executed first otherwise a fake
108          * event confuses some handlers (kdump uses this flag)
109          */
110         if (action->flags & NMI_FLAG_FIRST)
111                 list_add_rcu(&action->list, &desc->head);
112         else
113                 list_add_tail_rcu(&action->list, &desc->head);
114         
115         spin_unlock_irqrestore(&desc->lock, flags);
116         return 0;
117 }
118
119 static struct nmiaction *__free_nmi(unsigned int type, const char *name)
120 {
121         struct nmi_desc *desc = nmi_to_desc(type);
122         struct nmiaction *n;
123         unsigned long flags;
124
125         spin_lock_irqsave(&desc->lock, flags);
126
127         list_for_each_entry_rcu(n, &desc->head, list) {
128                 /*
129                  * the name passed in to describe the nmi handler
130                  * is used as the lookup key
131                  */
132                 if (!strcmp(n->name, name)) {
133                         WARN(in_nmi(),
134                                 "Trying to free NMI (%s) from NMI context!\n", n->name);
135                         list_del_rcu(&n->list);
136                         break;
137                 }
138         }
139
140         spin_unlock_irqrestore(&desc->lock, flags);
141         synchronize_rcu();
142         return (n);
143 }
144
145 int register_nmi_handler(unsigned int type, nmi_handler_t handler,
146                         unsigned long nmiflags, const char *devname)
147 {
148         struct nmiaction *action;
149         int retval = -ENOMEM;
150
151         if (!handler)
152                 return -EINVAL;
153
154         action = kzalloc(sizeof(struct nmiaction), GFP_KERNEL);
155         if (!action)
156                 goto fail_action;
157
158         action->handler = handler;
159         action->flags = nmiflags;
160         action->name = kstrndup(devname, NMI_MAX_NAMELEN, GFP_KERNEL);
161         if (!action->name)
162                 goto fail_action_name;
163
164         retval = __setup_nmi(type, action);
165
166         if (retval)
167                 goto fail_setup_nmi;
168
169         return retval;
170
171 fail_setup_nmi:
172         kfree(action->name);
173 fail_action_name:
174         kfree(action);
175 fail_action:    
176
177         return retval;
178 }
179 EXPORT_SYMBOL_GPL(register_nmi_handler);
180
181 void unregister_nmi_handler(unsigned int type, const char *name)
182 {
183         struct nmiaction *a;
184
185         a = __free_nmi(type, name);
186         if (a) {
187                 kfree(a->name);
188                 kfree(a);
189         }
190 }
191
192 EXPORT_SYMBOL_GPL(unregister_nmi_handler);
193
194 static notrace __kprobes void
195 pci_serr_error(unsigned char reason, struct pt_regs *regs)
196 {
197         pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
198                  reason, smp_processor_id());
199
200         /*
201          * On some machines, PCI SERR line is used to report memory
202          * errors. EDAC makes use of it.
203          */
204 #if defined(CONFIG_EDAC)
205         if (edac_handler_set()) {
206                 edac_atomic_assert_error();
207                 return;
208         }
209 #endif
210
211         if (panic_on_unrecovered_nmi)
212                 panic("NMI: Not continuing");
213
214         pr_emerg("Dazed and confused, but trying to continue\n");
215
216         /* Clear and disable the PCI SERR error line. */
217         reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
218         outb(reason, NMI_REASON_PORT);
219 }
220
221 static notrace __kprobes void
222 io_check_error(unsigned char reason, struct pt_regs *regs)
223 {
224         unsigned long i;
225
226         pr_emerg(
227         "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
228                  reason, smp_processor_id());
229         show_registers(regs);
230
231         if (panic_on_io_nmi)
232                 panic("NMI IOCK error: Not continuing");
233
234         /* Re-enable the IOCK line, wait for a few seconds */
235         reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
236         outb(reason, NMI_REASON_PORT);
237
238         i = 20000;
239         while (--i) {
240                 touch_nmi_watchdog();
241                 udelay(100);
242         }
243
244         reason &= ~NMI_REASON_CLEAR_IOCHK;
245         outb(reason, NMI_REASON_PORT);
246 }
247
248 static notrace __kprobes void
249 unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
250 {
251         if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) ==
252                         NOTIFY_STOP)
253                 return;
254 #ifdef CONFIG_MCA
255         /*
256          * Might actually be able to figure out what the guilty party
257          * is:
258          */
259         if (MCA_bus) {
260                 mca_handle_nmi();
261                 return;
262         }
263 #endif
264         pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
265                  reason, smp_processor_id());
266
267         pr_emerg("Do you have a strange power saving mode enabled?\n");
268         if (unknown_nmi_panic || panic_on_unrecovered_nmi)
269                 panic("NMI: Not continuing");
270
271         pr_emerg("Dazed and confused, but trying to continue\n");
272 }
273
274 static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
275 {
276         unsigned char reason = 0;
277
278         /*
279          * CPU-specific NMI must be processed before non-CPU-specific
280          * NMI, otherwise we may lose it, because the CPU-specific
281          * NMI can not be detected/processed on other CPUs.
282          */
283         if (notify_die(DIE_NMI, "nmi", regs, 0, 2, SIGINT) == NOTIFY_STOP)
284                 return;
285
286         /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
287         raw_spin_lock(&nmi_reason_lock);
288         reason = get_nmi_reason();
289
290         if (reason & NMI_REASON_MASK) {
291                 if (reason & NMI_REASON_SERR)
292                         pci_serr_error(reason, regs);
293                 else if (reason & NMI_REASON_IOCHK)
294                         io_check_error(reason, regs);
295 #ifdef CONFIG_X86_32
296                 /*
297                  * Reassert NMI in case it became active
298                  * meanwhile as it's edge-triggered:
299                  */
300                 reassert_nmi();
301 #endif
302                 raw_spin_unlock(&nmi_reason_lock);
303                 return;
304         }
305         raw_spin_unlock(&nmi_reason_lock);
306
307         unknown_nmi_error(reason, regs);
308 }
309
310 dotraplinkage notrace __kprobes void
311 do_nmi(struct pt_regs *regs, long error_code)
312 {
313         nmi_enter();
314
315         inc_irq_stat(__nmi_count);
316
317         if (!ignore_nmis)
318                 default_do_nmi(regs);
319
320         nmi_exit();
321 }
322
323 void stop_nmi(void)
324 {
325         ignore_nmis++;
326 }
327
328 void restart_nmi(void)
329 {
330         ignore_nmis--;
331 }