2 * Machine check handler.
3 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
4 * Rest from unknown author(s).
5 * 2004 Andi Kleen. Rewrote most of it.
8 #include <linux/init.h>
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/smp_lock.h>
13 #include <linux/string.h>
14 #include <linux/rcupdate.h>
15 #include <linux/kallsyms.h>
16 #include <linux/sysdev.h>
17 #include <linux/miscdevice.h>
19 #include <linux/capability.h>
20 #include <linux/cpu.h>
21 #include <linux/percpu.h>
22 #include <linux/poll.h>
23 #include <linux/thread_info.h>
24 #include <linux/ctype.h>
25 #include <linux/kmod.h>
26 #include <linux/kdebug.h>
27 #include <asm/processor.h>
30 #include <asm/uaccess.h>
34 #define MISC_MCELOG_MINOR 227
35 #define NR_SYSFS_BANKS 6
39 static int mce_dont_init;
43 * 0: always panic on uncorrected errors, log corrected errors
44 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
45 * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
46 * 3: never panic or SIGBUS, log all errors (for testing only)
48 static int tolerant = 1;
50 static unsigned long bank[NR_SYSFS_BANKS] = { [0 ... NR_SYSFS_BANKS-1] = ~0UL };
51 static unsigned long notify_user;
53 static int mce_bootlog = -1;
54 static atomic_t mce_events;
56 static char trigger[128];
57 static char *trigger_argv[2] = { trigger, NULL };
59 static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
62 * Lockless MCE logging infrastructure.
63 * This avoids deadlocks on printk locks without having to break locks. Also
64 * separate MCEs from kernel messages to avoid bogus bug reports.
67 static struct mce_log mcelog = {
72 void mce_log(struct mce *mce)
75 atomic_inc(&mce_events);
79 entry = rcu_dereference(mcelog.next);
81 /* When the buffer fills up discard new entries. Assume
82 that the earlier errors are the more interesting. */
83 if (entry >= MCE_LOG_LEN) {
84 set_bit(MCE_OVERFLOW, (unsigned long *)&mcelog.flags);
87 /* Old left over entry. Skip. */
88 if (mcelog.entry[entry].finished) {
96 if (cmpxchg(&mcelog.next, entry, next) == entry)
99 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
101 mcelog.entry[entry].finished = 1;
104 set_bit(0, ¬ify_user);
107 static void print_mce(struct mce *m)
109 printk(KERN_EMERG "\n"
110 KERN_EMERG "HARDWARE ERROR\n"
112 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
113 m->cpu, m->mcgstatus, m->bank, m->status);
115 printk(KERN_EMERG "RIP%s %02x:<%016Lx> ",
116 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
118 if (m->cs == __KERNEL_CS)
119 print_symbol("{%s}", m->ip);
122 printk(KERN_EMERG "TSC %Lx ", m->tsc);
124 printk("ADDR %Lx ", m->addr);
126 printk("MISC %Lx ", m->misc);
128 printk(KERN_EMERG "This is not a software problem!\n");
129 printk(KERN_EMERG "Run through mcelog --ascii to decode "
130 "and contact your hardware vendor\n");
133 static void mce_panic(char *msg, struct mce *backup, unsigned long start)
138 for (i = 0; i < MCE_LOG_LEN; i++) {
139 unsigned long tsc = mcelog.entry[i].tsc;
141 if (time_before(tsc, start))
143 print_mce(&mcelog.entry[i]);
144 if (backup && mcelog.entry[i].tsc == backup->tsc)
152 static int mce_available(struct cpuinfo_x86 *c)
154 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
157 static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
159 if (regs && (m->mcgstatus & MCG_STATUS_RIPV)) {
167 /* Assume the RIP in the MSR is exact. Is this true? */
168 m->mcgstatus |= MCG_STATUS_EIPV;
169 rdmsrl(rip_msr, m->ip);
175 * The actual machine check handler
177 void do_machine_check(struct pt_regs * regs, long error_code)
179 struct mce m, panicm;
182 int panicm_found = 0;
184 * If no_way_out gets set, there is no safe way to recover from this
185 * MCE. If tolerant is cranked up, we'll try anyway.
189 * If kill_it gets set, there might be a way to recover from this
194 atomic_inc(&mce_entry);
197 && notify_die(DIE_NMI, "machine check", regs, error_code,
198 18, SIGKILL) == NOTIFY_STOP)
202 memset(&m, 0, sizeof(struct mce));
203 m.cpu = smp_processor_id();
204 rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
205 /* if the restart IP is not valid, we're done for */
206 if (!(m.mcgstatus & MCG_STATUS_RIPV))
212 for (i = 0; i < banks; i++) {
213 if (i < NR_SYSFS_BANKS && !bank[i])
221 rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status);
222 if ((m.status & MCI_STATUS_VAL) == 0)
225 if (m.status & MCI_STATUS_EN) {
226 /* if PCC was set, there's no way out */
227 no_way_out |= !!(m.status & MCI_STATUS_PCC);
229 * If this error was uncorrectable and there was
230 * an overflow, we're in trouble. If no overflow,
231 * we might get away with just killing a task.
233 if (m.status & MCI_STATUS_UC) {
234 if (tolerant < 1 || m.status & MCI_STATUS_OVER)
240 if (m.status & MCI_STATUS_MISCV)
241 rdmsrl(MSR_IA32_MC0_MISC + i*4, m.misc);
242 if (m.status & MCI_STATUS_ADDRV)
243 rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr);
245 mce_get_rip(&m, regs);
248 if (error_code != -2)
251 /* Did this bank cause the exception? */
252 /* Assume that the bank with uncorrectable errors did it,
253 and that there is only a single one. */
254 if ((m.status & MCI_STATUS_UC) && (m.status & MCI_STATUS_EN)) {
259 add_taint(TAINT_MACHINE_CHECK);
262 /* Never do anything final in the polling timer */
266 /* If we didn't find an uncorrectable error, pick
267 the last one (shouldn't happen, just being safe). */
272 * If we have decided that we just CAN'T continue, and the user
273 * has not set tolerant to an insane level, give up and die.
275 if (no_way_out && tolerant < 3)
276 mce_panic("Machine check", &panicm, mcestart);
279 * If the error seems to be unrecoverable, something should be
280 * done. Try to kill as little as possible. If we can kill just
281 * one task, do that. If the user has set the tolerance very
282 * high, don't try to do anything at all.
284 if (kill_it && tolerant < 3) {
288 * If the EIPV bit is set, it means the saved IP is the
289 * instruction which caused the MCE.
291 if (m.mcgstatus & MCG_STATUS_EIPV)
292 user_space = panicm.ip && (panicm.cs & 3);
295 * If we know that the error was in user space, send a
296 * SIGBUS. Otherwise, panic if tolerance is low.
298 * force_sig() takes an awful lot of locks and has a slight
299 * risk of deadlocking.
302 force_sig(SIGBUS, current);
303 } else if (panic_on_oops || tolerant < 2) {
304 mce_panic("Uncorrected machine check",
309 /* notify userspace ASAP */
310 set_thread_flag(TIF_MCE_NOTIFY);
313 /* the last thing we do is clear state */
314 for (i = 0; i < banks; i++)
315 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
316 wrmsrl(MSR_IA32_MCG_STATUS, 0);
318 atomic_dec(&mce_entry);
321 #ifdef CONFIG_X86_MCE_INTEL
323 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
324 * @cpu: The CPU on which the event occurred.
325 * @status: Event status information
327 * This function should be called by the thermal interrupt after the
328 * event has been processed and the decision was made to log the event
331 * The status parameter will be saved to the 'status' field of 'struct mce'
332 * and historically has been the register value of the
333 * MSR_IA32_THERMAL_STATUS (Intel) msr.
335 void mce_log_therm_throt_event(unsigned int cpu, __u64 status)
339 memset(&m, 0, sizeof(m));
341 m.bank = MCE_THERMAL_BANK;
346 #endif /* CONFIG_X86_MCE_INTEL */
349 * Periodic polling timer for "silent" machine check errors. If the
350 * poller finds an MCE, poll 2x faster. When the poller finds no more
351 * errors, poll 2x slower (up to check_interval seconds).
354 static int check_interval = 5 * 60; /* 5 minutes */
355 static int next_interval; /* in jiffies */
356 static void mcheck_timer(struct work_struct *work);
357 static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer);
359 static void mcheck_check_cpu(void *info)
361 if (mce_available(¤t_cpu_data))
362 do_machine_check(NULL, 0);
365 static void mcheck_timer(struct work_struct *work)
367 on_each_cpu(mcheck_check_cpu, NULL, 1);
370 * Alert userspace if needed. If we logged an MCE, reduce the
371 * polling interval, otherwise increase the polling interval.
373 if (mce_notify_user()) {
374 next_interval = max(next_interval/2, HZ/100);
376 next_interval = min(next_interval * 2,
377 (int)round_jiffies_relative(check_interval*HZ));
380 schedule_delayed_work(&mcheck_work, next_interval);
383 static void mce_do_trigger(struct work_struct *work)
385 call_usermodehelper(trigger, trigger_argv, NULL, UMH_NO_WAIT);
388 static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
391 * Notify the user(s) about new machine check events.
392 * Can be called from interrupt context, but not from machine check/NMI
395 int mce_notify_user(void)
397 clear_thread_flag(TIF_MCE_NOTIFY);
398 if (test_and_clear_bit(0, ¬ify_user)) {
399 static unsigned long last_print;
400 unsigned long now = jiffies;
402 wake_up_interruptible(&mce_wait);
405 * There is no risk of missing notifications because
406 * work_pending is always cleared before the function is
409 if (trigger[0] && !work_pending(&mce_trigger_work))
410 schedule_work(&mce_trigger_work);
412 if (time_after_eq(now, last_print + (check_interval*HZ))) {
414 printk(KERN_INFO "Machine check events logged\n");
422 /* see if the idle task needs to notify userspace */
424 mce_idle_callback(struct notifier_block *nfb, unsigned long action, void *junk)
426 /* IDLE_END should be safe - interrupts are back on */
427 if (action == IDLE_END && test_thread_flag(TIF_MCE_NOTIFY))
433 static struct notifier_block mce_idle_notifier = {
434 .notifier_call = mce_idle_callback,
437 static __init int periodic_mcheck_init(void)
439 next_interval = check_interval * HZ;
441 schedule_delayed_work(&mcheck_work,
442 round_jiffies_relative(next_interval));
443 idle_notifier_register(&mce_idle_notifier);
446 __initcall(periodic_mcheck_init);
450 * Initialize Machine Checks for a CPU.
452 static void mce_init(void *dummy)
457 rdmsrl(MSR_IA32_MCG_CAP, cap);
459 if (banks > MCE_EXTENDED_BANK) {
460 banks = MCE_EXTENDED_BANK;
461 printk(KERN_INFO "MCE: warning: using only %d banks\n",
464 /* Use accurate RIP reporting if available. */
465 if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9)
466 rip_msr = MSR_IA32_MCG_EIP;
468 /* Log the machine checks left over from the previous reset.
469 This also clears all registers */
470 do_machine_check(NULL, mce_bootlog ? -1 : -2);
472 set_in_cr4(X86_CR4_MCE);
475 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
477 for (i = 0; i < banks; i++) {
478 if (i < NR_SYSFS_BANKS)
479 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]);
481 wrmsrl(MSR_IA32_MC0_CTL+4*i, ~0UL);
483 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
487 /* Add per CPU specific workarounds here */
488 static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
490 /* This should be disabled by the BIOS, but isn't always */
491 if (c->x86_vendor == X86_VENDOR_AMD) {
493 /* disable GART TBL walk error reporting, which trips off
494 incorrectly with the IOMMU & 3ware & Cerberus. */
495 clear_bit(10, &bank[4]);
496 if(c->x86 <= 17 && mce_bootlog < 0)
497 /* Lots of broken BIOS around that don't clear them
498 by default and leave crap in there. Don't log. */
504 static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
506 switch (c->x86_vendor) {
507 case X86_VENDOR_INTEL:
508 mce_intel_feature_init(c);
511 mce_amd_feature_init(c);
519 * Called for each booted CPU to set up machine checks.
520 * Must be called with preempt off.
522 void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
535 * Character device to read and clear the MCE log.
538 static DEFINE_SPINLOCK(mce_state_lock);
539 static int open_count; /* #times opened */
540 static int open_exclu; /* already open exclusive? */
542 static int mce_open(struct inode *inode, struct file *file)
545 spin_lock(&mce_state_lock);
547 if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
548 spin_unlock(&mce_state_lock);
553 if (file->f_flags & O_EXCL)
557 spin_unlock(&mce_state_lock);
560 return nonseekable_open(inode, file);
563 static int mce_release(struct inode *inode, struct file *file)
565 spin_lock(&mce_state_lock);
570 spin_unlock(&mce_state_lock);
575 static void collect_tscs(void *data)
577 unsigned long *cpu_tsc = (unsigned long *)data;
579 rdtscll(cpu_tsc[smp_processor_id()]);
582 static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
585 unsigned long *cpu_tsc;
586 static DEFINE_MUTEX(mce_read_mutex);
588 char __user *buf = ubuf;
591 cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
595 mutex_lock(&mce_read_mutex);
596 next = rcu_dereference(mcelog.next);
598 /* Only supports full reads right now */
599 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
600 mutex_unlock(&mce_read_mutex);
606 for (i = 0; i < next; i++) {
607 unsigned long start = jiffies;
609 while (!mcelog.entry[i].finished) {
610 if (time_after_eq(jiffies, start + 2)) {
611 memset(mcelog.entry + i,0, sizeof(struct mce));
617 err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce));
618 buf += sizeof(struct mce);
623 memset(mcelog.entry, 0, next * sizeof(struct mce));
629 * Collect entries that were still getting written before the
632 on_each_cpu(collect_tscs, cpu_tsc, 1);
633 for (i = next; i < MCE_LOG_LEN; i++) {
634 if (mcelog.entry[i].finished &&
635 mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
636 err |= copy_to_user(buf, mcelog.entry+i,
639 buf += sizeof(struct mce);
640 memset(&mcelog.entry[i], 0, sizeof(struct mce));
643 mutex_unlock(&mce_read_mutex);
645 return err ? -EFAULT : buf - ubuf;
648 static unsigned int mce_poll(struct file *file, poll_table *wait)
650 poll_wait(file, &mce_wait, wait);
651 if (rcu_dereference(mcelog.next))
652 return POLLIN | POLLRDNORM;
656 static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
658 int __user *p = (int __user *)arg;
660 if (!capable(CAP_SYS_ADMIN))
663 case MCE_GET_RECORD_LEN:
664 return put_user(sizeof(struct mce), p);
665 case MCE_GET_LOG_LEN:
666 return put_user(MCE_LOG_LEN, p);
667 case MCE_GETCLEAR_FLAGS: {
671 flags = mcelog.flags;
672 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
673 return put_user(flags, p);
680 static const struct file_operations mce_chrdev_ops = {
682 .release = mce_release,
685 .unlocked_ioctl = mce_ioctl,
688 static struct miscdevice mce_log_device = {
695 * Old style boot options parsing. Only for compatibility.
697 static int __init mcheck_disable(char *str)
703 /* mce=off disables machine check. Note you can re-enable it later
705 mce=TOLERANCELEVEL (number, see above)
706 mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
707 mce=nobootlog Don't log MCEs from before booting. */
708 static int __init mcheck_enable(char *str)
710 if (!strcmp(str, "off"))
712 else if (!strcmp(str, "bootlog") || !strcmp(str,"nobootlog"))
713 mce_bootlog = str[0] == 'b';
714 else if (isdigit(str[0]))
715 get_option(&str, &tolerant);
717 printk("mce= argument %s ignored. Please use /sys", str);
721 __setup("nomce", mcheck_disable);
722 __setup("mce=", mcheck_enable);
729 * Disable machine checks on suspend and shutdown. We can't really handle
732 static int mce_disable(void)
736 for (i = 0; i < banks; i++)
737 wrmsrl(MSR_IA32_MC0_CTL + i*4, 0);
741 static int mce_suspend(struct sys_device *dev, pm_message_t state)
743 return mce_disable();
746 static int mce_shutdown(struct sys_device *dev)
748 return mce_disable();
751 /* On resume clear all MCE state. Don't want to see leftovers from the BIOS.
752 Only one CPU is active at this time, the others get readded later using
754 static int mce_resume(struct sys_device *dev)
757 mce_cpu_features(¤t_cpu_data);
761 /* Reinit MCEs after user configuration changes */
762 static void mce_restart(void)
765 cancel_delayed_work(&mcheck_work);
766 /* Timer race is harmless here */
767 on_each_cpu(mce_init, NULL, 1);
768 next_interval = check_interval * HZ;
770 schedule_delayed_work(&mcheck_work,
771 round_jiffies_relative(next_interval));
774 static struct sysdev_class mce_sysclass = {
775 .suspend = mce_suspend,
776 .shutdown = mce_shutdown,
777 .resume = mce_resume,
778 .name = "machinecheck",
781 DEFINE_PER_CPU(struct sys_device, device_mce);
782 void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu) __cpuinitdata;
784 /* Why are there no generic functions for this? */
785 #define ACCESSOR(name, var, start) \
786 static ssize_t show_ ## name(struct sys_device *s, \
787 struct sysdev_attribute *attr, \
789 return sprintf(buf, "%lx\n", (unsigned long)var); \
791 static ssize_t set_ ## name(struct sys_device *s, \
792 struct sysdev_attribute *attr, \
793 const char *buf, size_t siz) { \
795 unsigned long new = simple_strtoul(buf, &end, 0); \
796 if (end == buf) return -EINVAL; \
801 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name);
804 * TBD should generate these dynamically based on number of available banks.
805 * Have only 6 contol banks in /sysfs until then.
807 ACCESSOR(bank0ctl,bank[0],mce_restart())
808 ACCESSOR(bank1ctl,bank[1],mce_restart())
809 ACCESSOR(bank2ctl,bank[2],mce_restart())
810 ACCESSOR(bank3ctl,bank[3],mce_restart())
811 ACCESSOR(bank4ctl,bank[4],mce_restart())
812 ACCESSOR(bank5ctl,bank[5],mce_restart())
814 static ssize_t show_trigger(struct sys_device *s, struct sysdev_attribute *attr,
817 strcpy(buf, trigger);
819 return strlen(trigger) + 1;
822 static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr,
823 const char *buf,size_t siz)
827 strncpy(trigger, buf, sizeof(trigger));
828 trigger[sizeof(trigger)-1] = 0;
829 len = strlen(trigger);
830 p = strchr(trigger, '\n');
835 static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
836 static SYSDEV_INT_ATTR(tolerant, 0644, tolerant);
837 ACCESSOR(check_interval,check_interval,mce_restart())
838 static struct sysdev_attribute *mce_attributes[] = {
839 &attr_bank0ctl, &attr_bank1ctl, &attr_bank2ctl,
840 &attr_bank3ctl, &attr_bank4ctl, &attr_bank5ctl,
841 &attr_tolerant.attr, &attr_check_interval, &attr_trigger,
845 static cpumask_t mce_device_initialized = CPU_MASK_NONE;
847 /* Per cpu sysdev init. All of the cpus still share the same ctl bank */
848 static __cpuinit int mce_create_device(unsigned int cpu)
853 if (!mce_available(&boot_cpu_data))
856 memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
857 per_cpu(device_mce,cpu).id = cpu;
858 per_cpu(device_mce,cpu).cls = &mce_sysclass;
860 err = sysdev_register(&per_cpu(device_mce,cpu));
864 for (i = 0; mce_attributes[i]; i++) {
865 err = sysdev_create_file(&per_cpu(device_mce,cpu),
870 cpu_set(cpu, mce_device_initialized);
875 sysdev_remove_file(&per_cpu(device_mce,cpu),
878 sysdev_unregister(&per_cpu(device_mce,cpu));
883 static __cpuinit void mce_remove_device(unsigned int cpu)
887 if (!cpu_isset(cpu, mce_device_initialized))
890 for (i = 0; mce_attributes[i]; i++)
891 sysdev_remove_file(&per_cpu(device_mce,cpu),
893 sysdev_unregister(&per_cpu(device_mce,cpu));
894 cpu_clear(cpu, mce_device_initialized);
897 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
898 static int __cpuinit mce_cpu_callback(struct notifier_block *nfb,
899 unsigned long action, void *hcpu)
901 unsigned int cpu = (unsigned long)hcpu;
905 case CPU_ONLINE_FROZEN:
906 mce_create_device(cpu);
907 if (threshold_cpu_callback)
908 threshold_cpu_callback(action, cpu);
911 case CPU_DEAD_FROZEN:
912 if (threshold_cpu_callback)
913 threshold_cpu_callback(action, cpu);
914 mce_remove_device(cpu);
920 static struct notifier_block mce_cpu_notifier __cpuinitdata = {
921 .notifier_call = mce_cpu_callback,
924 static __init int mce_init_device(void)
929 if (!mce_available(&boot_cpu_data))
931 err = sysdev_class_register(&mce_sysclass);
935 for_each_online_cpu(i) {
936 err = mce_create_device(i);
941 register_hotcpu_notifier(&mce_cpu_notifier);
942 misc_register(&mce_log_device);
946 device_initcall(mce_init_device);