]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/powerpc/kvm/book3s_hv_ras.c
regulator: max8997: Convert max8997_safeout_ops to set_voltage_sel and list_voltage_table
[karo-tx-linux.git] / arch / powerpc / kvm / book3s_hv_ras.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * Copyright 2012 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7  */
8
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/kernel.h>
14 #include <asm/opal.h>
15
16 /* SRR1 bits for machine check on POWER7 */
17 #define SRR1_MC_LDSTERR         (1ul << (63-42))
18 #define SRR1_MC_IFETCH_SH       (63-45)
19 #define SRR1_MC_IFETCH_MASK     0x7
20 #define SRR1_MC_IFETCH_SLBPAR           2       /* SLB parity error */
21 #define SRR1_MC_IFETCH_SLBMULTI         3       /* SLB multi-hit */
22 #define SRR1_MC_IFETCH_SLBPARMULTI      4       /* SLB parity + multi-hit */
23 #define SRR1_MC_IFETCH_TLBMULTI         5       /* I-TLB multi-hit */
24
25 /* DSISR bits for machine check on POWER7 */
26 #define DSISR_MC_DERAT_MULTI    0x800           /* D-ERAT multi-hit */
27 #define DSISR_MC_TLB_MULTI      0x400           /* D-TLB multi-hit */
28 #define DSISR_MC_SLB_PARITY     0x100           /* SLB parity error */
29 #define DSISR_MC_SLB_MULTI      0x080           /* SLB multi-hit */
30 #define DSISR_MC_SLB_PARMULTI   0x040           /* SLB parity + multi-hit */
31
32 /* POWER7 SLB flush and reload */
33 static void reload_slb(struct kvm_vcpu *vcpu)
34 {
35         struct slb_shadow *slb;
36         unsigned long i, n;
37
38         /* First clear out SLB */
39         asm volatile("slbmte %0,%0; slbia" : : "r" (0));
40
41         /* Do they have an SLB shadow buffer registered? */
42         slb = vcpu->arch.slb_shadow.pinned_addr;
43         if (!slb)
44                 return;
45
46         /* Sanity check */
47         n = min_t(u32, slb->persistent, SLB_MIN_SIZE);
48         if ((void *) &slb->save_area[n] > vcpu->arch.slb_shadow.pinned_end)
49                 return;
50
51         /* Load up the SLB from that */
52         for (i = 0; i < n; ++i) {
53                 unsigned long rb = slb->save_area[i].esid;
54                 unsigned long rs = slb->save_area[i].vsid;
55
56                 rb = (rb & ~0xFFFul) | i;       /* insert entry number */
57                 asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
58         }
59 }
60
61 /* POWER7 TLB flush */
62 static void flush_tlb_power7(struct kvm_vcpu *vcpu)
63 {
64         unsigned long i, rb;
65
66         rb = TLBIEL_INVAL_SET_LPID;
67         for (i = 0; i < POWER7_TLB_SETS; ++i) {
68                 asm volatile("tlbiel %0" : : "r" (rb));
69                 rb += 1 << TLBIEL_INVAL_SET_SHIFT;
70         }
71 }
72
73 /*
74  * On POWER7, see if we can handle a machine check that occurred inside
75  * the guest in real mode, without switching to the host partition.
76  *
77  * Returns: 0 => exit guest, 1 => deliver machine check to guest
78  */
79 static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
80 {
81         unsigned long srr1 = vcpu->arch.shregs.msr;
82         struct opal_machine_check_event *opal_evt;
83         long handled = 1;
84
85         if (srr1 & SRR1_MC_LDSTERR) {
86                 /* error on load/store */
87                 unsigned long dsisr = vcpu->arch.shregs.dsisr;
88
89                 if (dsisr & (DSISR_MC_SLB_PARMULTI | DSISR_MC_SLB_MULTI |
90                              DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI)) {
91                         /* flush and reload SLB; flushes D-ERAT too */
92                         reload_slb(vcpu);
93                         dsisr &= ~(DSISR_MC_SLB_PARMULTI | DSISR_MC_SLB_MULTI |
94                                    DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI);
95                 }
96                 if (dsisr & DSISR_MC_TLB_MULTI) {
97                         flush_tlb_power7(vcpu);
98                         dsisr &= ~DSISR_MC_TLB_MULTI;
99                 }
100                 /* Any other errors we don't understand? */
101                 if (dsisr & 0xffffffffUL)
102                         handled = 0;
103         }
104
105         switch ((srr1 >> SRR1_MC_IFETCH_SH) & SRR1_MC_IFETCH_MASK) {
106         case 0:
107                 break;
108         case SRR1_MC_IFETCH_SLBPAR:
109         case SRR1_MC_IFETCH_SLBMULTI:
110         case SRR1_MC_IFETCH_SLBPARMULTI:
111                 reload_slb(vcpu);
112                 break;
113         case SRR1_MC_IFETCH_TLBMULTI:
114                 flush_tlb_power7(vcpu);
115                 break;
116         default:
117                 handled = 0;
118         }
119
120         /*
121          * See if OPAL has already handled the condition.
122          * We assume that if the condition is recovered then OPAL
123          * will have generated an error log event that we will pick
124          * up and log later.
125          */
126         opal_evt = local_paca->opal_mc_evt;
127         if (opal_evt->version == OpalMCE_V1 &&
128             (opal_evt->severity == OpalMCE_SEV_NO_ERROR ||
129              opal_evt->disposition == OpalMCE_DISPOSITION_RECOVERED))
130                 handled = 1;
131
132         if (handled)
133                 opal_evt->in_use = 0;
134
135         return handled;
136 }
137
138 long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu)
139 {
140         if (cpu_has_feature(CPU_FTR_ARCH_206))
141                 return kvmppc_realmode_mc_power7(vcpu);
142
143         return 0;
144 }