2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/types.h>
19 #include <linux/jump_label.h>
21 #include <asm/kvm_asm.h>
22 #include <asm/kvm_emulate.h>
23 #include <asm/kvm_hyp.h>
24 #include <asm/fpsimd.h>
26 static bool __hyp_text __fpsimd_enabled_nvhe(void)
28 return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP);
31 static bool __hyp_text __fpsimd_enabled_vhe(void)
33 return !!(read_sysreg(cpacr_el1) & CPACR_EL1_FPEN);
36 static hyp_alternate_select(__fpsimd_is_enabled,
37 __fpsimd_enabled_nvhe, __fpsimd_enabled_vhe,
38 ARM64_HAS_VIRT_HOST_EXTN);
40 bool __hyp_text __fpsimd_enabled(void)
42 return __fpsimd_is_enabled()();
45 static void __hyp_text __activate_traps_vhe(void)
49 val = read_sysreg(cpacr_el1);
51 val &= ~CPACR_EL1_FPEN;
52 write_sysreg(val, cpacr_el1);
54 write_sysreg(__kvm_hyp_vector, vbar_el1);
57 static void __hyp_text __activate_traps_nvhe(void)
61 val = CPTR_EL2_DEFAULT;
62 val |= CPTR_EL2_TTA | CPTR_EL2_TFP;
63 write_sysreg(val, cptr_el2);
66 static hyp_alternate_select(__activate_traps_arch,
67 __activate_traps_nvhe, __activate_traps_vhe,
68 ARM64_HAS_VIRT_HOST_EXTN);
70 static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
75 * We are about to set CPTR_EL2.TFP to trap all floating point
76 * register accesses to EL2, however, the ARM ARM clearly states that
77 * traps are only taken to EL2 if the operation would not otherwise
78 * trap to EL1. Therefore, always make sure that for 32-bit guests,
79 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
80 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
81 * it will cause an exception.
83 val = vcpu->arch.hcr_el2;
84 if (!(val & HCR_RW) && system_supports_fpsimd()) {
85 write_sysreg(1 << 30, fpexc32_el2);
88 write_sysreg(val, hcr_el2);
89 /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
90 write_sysreg(1 << 15, hstr_el2);
91 /* Make sure we trap PMU access from EL0 to EL2 */
92 write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
93 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
94 __activate_traps_arch()();
97 static void __hyp_text __deactivate_traps_vhe(void)
99 extern char vectors[]; /* kernel exception vectors */
101 write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
102 write_sysreg(CPACR_EL1_FPEN, cpacr_el1);
103 write_sysreg(vectors, vbar_el1);
106 static void __hyp_text __deactivate_traps_nvhe(void)
108 write_sysreg(HCR_RW, hcr_el2);
109 write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
112 static hyp_alternate_select(__deactivate_traps_arch,
113 __deactivate_traps_nvhe, __deactivate_traps_vhe,
114 ARM64_HAS_VIRT_HOST_EXTN);
116 static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
119 * If we pended a virtual abort, preserve it until it gets
120 * cleared. See D1.14.3 (Virtual Interrupts) for details, but
121 * the crucial bit is "On taking a vSError interrupt,
122 * HCR_EL2.VSE is cleared to 0."
124 if (vcpu->arch.hcr_el2 & HCR_VSE)
125 vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
127 __deactivate_traps_arch()();
128 write_sysreg(0, hstr_el2);
129 write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
130 write_sysreg(0, pmuserenr_el0);
133 static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
135 struct kvm *kvm = kern_hyp_va(vcpu->kvm);
136 write_sysreg(kvm->arch.vttbr, vttbr_el2);
139 static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
141 write_sysreg(0, vttbr_el2);
144 static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
146 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
147 __vgic_v3_save_state(vcpu);
149 __vgic_v2_save_state(vcpu);
151 write_sysreg(read_sysreg(hcr_el2) & ~HCR_INT_OVERRIDE, hcr_el2);
154 static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
158 val = read_sysreg(hcr_el2);
159 val |= HCR_INT_OVERRIDE;
160 val |= vcpu->arch.irq_lines;
161 write_sysreg(val, hcr_el2);
163 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
164 __vgic_v3_restore_state(vcpu);
166 __vgic_v2_restore_state(vcpu);
169 static bool __hyp_text __true_value(void)
174 static bool __hyp_text __false_value(void)
179 static hyp_alternate_select(__check_arm_834220,
180 __false_value, __true_value,
181 ARM64_WORKAROUND_834220);
183 static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
188 * Resolve the IPA the hard way using the guest VA.
190 * Stage-1 translation already validated the memory access
191 * rights. As such, we can use the EL1 translation regime, and
192 * don't have to distinguish between EL0 and EL1 access.
194 * We do need to save/restore PAR_EL1 though, as we haven't
195 * saved the guest context yet, and we may return early...
197 par = read_sysreg(par_el1);
198 asm volatile("at s1e1r, %0" : : "r" (far));
201 tmp = read_sysreg(par_el1);
202 write_sysreg(par, par_el1);
204 if (unlikely(tmp & 1))
205 return false; /* Translation failed, back to guest */
207 /* Convert PAR to HPFAR format */
208 *hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4;
212 static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
214 u64 esr = read_sysreg_el2(esr);
215 u8 ec = ESR_ELx_EC(esr);
218 vcpu->arch.fault.esr_el2 = esr;
220 if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
223 far = read_sysreg_el2(far);
226 * The HPFAR can be invalid if the stage 2 fault did not
227 * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
228 * bit is clear) and one of the two following cases are true:
229 * 1. The fault was due to a permission fault
230 * 2. The processor carries errata 834220
232 * Therefore, for all non S1PTW faults where we either have a
233 * permission fault or the errata workaround is enabled, we
234 * resolve the IPA using the AT instruction.
236 if (!(esr & ESR_ELx_S1PTW) &&
237 (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
238 if (!__translate_far_to_hpfar(far, &hpfar))
241 hpfar = read_sysreg(hpfar_el2);
244 vcpu->arch.fault.far_el2 = far;
245 vcpu->arch.fault.hpfar_el2 = hpfar;
249 static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
251 *vcpu_pc(vcpu) = read_sysreg_el2(elr);
253 if (vcpu_mode_is_32bit(vcpu)) {
254 vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr);
255 kvm_skip_instr32(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
256 write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr);
261 write_sysreg_el2(*vcpu_pc(vcpu), elr);
264 int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
266 struct kvm_cpu_context *host_ctxt;
267 struct kvm_cpu_context *guest_ctxt;
271 vcpu = kern_hyp_va(vcpu);
272 write_sysreg(vcpu, tpidr_el2);
274 host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
275 guest_ctxt = &vcpu->arch.ctxt;
277 __sysreg_save_host_state(host_ctxt);
278 __debug_cond_save_host_state(vcpu);
280 __activate_traps(vcpu);
283 __vgic_restore_state(vcpu);
284 __timer_restore_state(vcpu);
287 * We must restore the 32-bit state before the sysregs, thanks
288 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
290 __sysreg32_restore_state(vcpu);
291 __sysreg_restore_guest_state(guest_ctxt);
292 __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
294 /* Jump in the fire! */
296 exit_code = __guest_enter(vcpu, host_ctxt);
297 /* And we're baaack! */
300 * We're using the raw exception code in order to only process
301 * the trap if no SError is pending. We will come back to the
302 * same PC once the SError has been injected, and replay the
303 * trapping instruction.
305 if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
308 if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
309 exit_code == ARM_EXCEPTION_TRAP) {
312 valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
313 kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
314 kvm_vcpu_dabt_isvalid(vcpu) &&
315 !kvm_vcpu_dabt_isextabt(vcpu) &&
316 !kvm_vcpu_dabt_iss1tw(vcpu);
319 int ret = __vgic_v2_perform_cpuif_access(vcpu);
327 /* Promote an illegal access to an SError */
329 exit_code = ARM_EXCEPTION_EL1_SERROR;
332 /* 0 falls through to be handler out of EL2 */
336 fp_enabled = __fpsimd_enabled();
338 __sysreg_save_guest_state(guest_ctxt);
339 __sysreg32_save_state(vcpu);
340 __timer_save_state(vcpu);
341 __vgic_save_state(vcpu);
343 __deactivate_traps(vcpu);
344 __deactivate_vm(vcpu);
346 __sysreg_restore_host_state(host_ctxt);
349 __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs);
350 __fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs);
353 __debug_save_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
354 __debug_cond_restore_host_state(vcpu);
359 static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
361 static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)
363 unsigned long str_va;
366 * Force the panic string to be loaded from the literal pool,
367 * making sure it is a kernel address and not a PC-relative
370 asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va));
372 __hyp_do_panic(str_va,
374 read_sysreg(esr_el2), read_sysreg_el2(far),
375 read_sysreg(hpfar_el2), par,
376 (void *)read_sysreg(tpidr_el2));
379 static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par)
381 panic(__hyp_panic_string,
383 read_sysreg_el2(esr), read_sysreg_el2(far),
384 read_sysreg(hpfar_el2), par,
385 (void *)read_sysreg(tpidr_el2));
388 static hyp_alternate_select(__hyp_call_panic,
389 __hyp_call_panic_nvhe, __hyp_call_panic_vhe,
390 ARM64_HAS_VIRT_HOST_EXTN);
392 void __hyp_text __noreturn __hyp_panic(void)
394 u64 spsr = read_sysreg_el2(spsr);
395 u64 elr = read_sysreg_el2(elr);
396 u64 par = read_sysreg(par_el1);
398 if (read_sysreg(vttbr_el2)) {
399 struct kvm_vcpu *vcpu;
400 struct kvm_cpu_context *host_ctxt;
402 vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
403 host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
404 __deactivate_traps(vcpu);
405 __deactivate_vm(vcpu);
406 __sysreg_restore_host_state(host_ctxt);
409 /* Call panic for real */
410 __hyp_call_panic()(spsr, elr, par);