]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/powerpc/kvm/book3s_hv_ras.c
Merge commit '6bb27d7349db51b50c40534710fe164ca0d58902' into omap-timer-for-v3.10
[karo-tx-linux.git] / arch / powerpc / kvm / book3s_hv_ras.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * Copyright 2012 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7  */
8
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/kernel.h>
14 #include <asm/opal.h>
15
16 /* SRR1 bits for machine check on POWER7 */
17 #define SRR1_MC_LDSTERR         (1ul << (63-42))
18 #define SRR1_MC_IFETCH_SH       (63-45)
19 #define SRR1_MC_IFETCH_MASK     0x7
20 #define SRR1_MC_IFETCH_SLBPAR           2       /* SLB parity error */
21 #define SRR1_MC_IFETCH_SLBMULTI         3       /* SLB multi-hit */
22 #define SRR1_MC_IFETCH_SLBPARMULTI      4       /* SLB parity + multi-hit */
23 #define SRR1_MC_IFETCH_TLBMULTI         5       /* I-TLB multi-hit */
24
25 /* DSISR bits for machine check on POWER7 */
26 #define DSISR_MC_DERAT_MULTI    0x800           /* D-ERAT multi-hit */
27 #define DSISR_MC_TLB_MULTI      0x400           /* D-TLB multi-hit */
28 #define DSISR_MC_SLB_PARITY     0x100           /* SLB parity error */
29 #define DSISR_MC_SLB_MULTI      0x080           /* SLB multi-hit */
30 #define DSISR_MC_SLB_PARMULTI   0x040           /* SLB parity + multi-hit */
31
32 /* POWER7 SLB flush and reload */
33 static void reload_slb(struct kvm_vcpu *vcpu)
34 {
35         struct slb_shadow *slb;
36         unsigned long i, n;
37
38         /* First clear out SLB */
39         asm volatile("slbmte %0,%0; slbia" : : "r" (0));
40
41         /* Do they have an SLB shadow buffer registered? */
42         slb = vcpu->arch.slb_shadow.pinned_addr;
43         if (!slb)
44                 return;
45
46         /* Sanity check */
47         n = min_t(u32, slb->persistent, SLB_MIN_SIZE);
48         if ((void *) &slb->save_area[n] > vcpu->arch.slb_shadow.pinned_end)
49                 return;
50
51         /* Load up the SLB from that */
52         for (i = 0; i < n; ++i) {
53                 unsigned long rb = slb->save_area[i].esid;
54                 unsigned long rs = slb->save_area[i].vsid;
55
56                 rb = (rb & ~0xFFFul) | i;       /* insert entry number */
57                 asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
58         }
59 }
60
61 /* POWER7 TLB flush */
62 static void flush_tlb_power7(struct kvm_vcpu *vcpu)
63 {
64         unsigned long i, rb;
65
66         rb = TLBIEL_INVAL_SET_LPID;
67         for (i = 0; i < POWER7_TLB_SETS; ++i) {
68                 asm volatile("tlbiel %0" : : "r" (rb));
69                 rb += 1 << TLBIEL_INVAL_SET_SHIFT;
70         }
71 }
72
73 /*
74  * On POWER7, see if we can handle a machine check that occurred inside
75  * the guest in real mode, without switching to the host partition.
76  *
77  * Returns: 0 => exit guest, 1 => deliver machine check to guest
78  */
79 static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
80 {
81         unsigned long srr1 = vcpu->arch.shregs.msr;
82 #ifdef CONFIG_PPC_POWERNV
83         struct opal_machine_check_event *opal_evt;
84 #endif
85         long handled = 1;
86
87         if (srr1 & SRR1_MC_LDSTERR) {
88                 /* error on load/store */
89                 unsigned long dsisr = vcpu->arch.shregs.dsisr;
90
91                 if (dsisr & (DSISR_MC_SLB_PARMULTI | DSISR_MC_SLB_MULTI |
92                              DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI)) {
93                         /* flush and reload SLB; flushes D-ERAT too */
94                         reload_slb(vcpu);
95                         dsisr &= ~(DSISR_MC_SLB_PARMULTI | DSISR_MC_SLB_MULTI |
96                                    DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI);
97                 }
98                 if (dsisr & DSISR_MC_TLB_MULTI) {
99                         flush_tlb_power7(vcpu);
100                         dsisr &= ~DSISR_MC_TLB_MULTI;
101                 }
102                 /* Any other errors we don't understand? */
103                 if (dsisr & 0xffffffffUL)
104                         handled = 0;
105         }
106
107         switch ((srr1 >> SRR1_MC_IFETCH_SH) & SRR1_MC_IFETCH_MASK) {
108         case 0:
109                 break;
110         case SRR1_MC_IFETCH_SLBPAR:
111         case SRR1_MC_IFETCH_SLBMULTI:
112         case SRR1_MC_IFETCH_SLBPARMULTI:
113                 reload_slb(vcpu);
114                 break;
115         case SRR1_MC_IFETCH_TLBMULTI:
116                 flush_tlb_power7(vcpu);
117                 break;
118         default:
119                 handled = 0;
120         }
121
122 #ifdef CONFIG_PPC_POWERNV
123         /*
124          * See if OPAL has already handled the condition.
125          * We assume that if the condition is recovered then OPAL
126          * will have generated an error log event that we will pick
127          * up and log later.
128          */
129         opal_evt = local_paca->opal_mc_evt;
130         if (opal_evt->version == OpalMCE_V1 &&
131             (opal_evt->severity == OpalMCE_SEV_NO_ERROR ||
132              opal_evt->disposition == OpalMCE_DISPOSITION_RECOVERED))
133                 handled = 1;
134
135         if (handled)
136                 opal_evt->in_use = 0;
137 #endif
138
139         return handled;
140 }
141
142 long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu)
143 {
144         if (cpu_has_feature(CPU_FTR_ARCH_206))
145                 return kvmppc_realmode_mc_power7(vcpu);
146
147         return 0;
148 }