2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * Derived from arch/arm/kvm/coproc.c:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Authors: Rusty Russell <rusty@rustcorp.com.au>
8 * Christoffer Dall <c.dall@virtualopensystems.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include <linux/kvm_host.h>
25 #include <linux/uaccess.h>
27 #include <asm/cacheflush.h>
28 #include <asm/cputype.h>
29 #include <asm/debug-monitors.h>
31 #include <asm/kvm_arm.h>
32 #include <asm/kvm_coproc.h>
33 #include <asm/kvm_emulate.h>
34 #include <asm/kvm_host.h>
35 #include <asm/kvm_mmu.h>
37 #include <trace/events/kvm.h>
42 * All of this file is extremly similar to the ARM coproc.c, but the
43 * types are different. My gut feeling is that it should be pretty
44 * easy to merge, but that would be an ABI breakage -- again. VFP
45 * would also need to be abstracted.
47 * For AArch32, we only take care of what is being trapped. Anything
48 * that has to do with init and userspace access has to go via the
52 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
53 static u32 cache_levels;
55 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
58 /* Which cache CCSIDR represents depends on CSSELR value. */
59 static u32 get_ccsidr(u32 csselr)
63 /* Make sure noone else changes CSSELR during this! */
65 /* Put value into CSSELR */
66 asm volatile("msr csselr_el1, %x0" : : "r" (csselr));
68 /* Read result out of CCSIDR */
69 asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr));
75 static void do_dc_cisw(u32 val)
77 asm volatile("dc cisw, %x0" : : "r" (val));
81 static void do_dc_csw(u32 val)
83 asm volatile("dc csw, %x0" : : "r" (val));
87 /* See note at ARM ARM B1.14.4 */
88 static bool access_dcsw(struct kvm_vcpu *vcpu,
89 const struct sys_reg_params *p,
90 const struct sys_reg_desc *r)
96 return read_from_write_only(vcpu, p);
100 cpumask_setall(&vcpu->arch.require_dcache_flush);
101 cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
103 /* If we were already preempted, take the long way around */
104 if (cpu != vcpu->arch.last_pcpu) {
109 val = *vcpu_reg(vcpu, p->Rt);
112 case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
113 case 14: /* DCCISW */
129 * Generic accessor for VM registers. Only called as long as HCR_TVM
132 static bool access_vm_reg(struct kvm_vcpu *vcpu,
133 const struct sys_reg_params *p,
134 const struct sys_reg_desc *r)
138 BUG_ON(!p->is_write);
140 val = *vcpu_reg(vcpu, p->Rt);
141 if (!p->is_aarch32) {
142 vcpu_sys_reg(vcpu, r->reg) = val;
145 vcpu_cp15_64_high(vcpu, r->reg) = val >> 32;
146 vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
153 * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the
154 * guest enables the MMU, we stop trapping the VM sys_regs and leave
155 * it in complete control of the caches.
157 static bool access_sctlr(struct kvm_vcpu *vcpu,
158 const struct sys_reg_params *p,
159 const struct sys_reg_desc *r)
161 access_vm_reg(vcpu, p, r);
163 if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
164 vcpu->arch.hcr_el2 &= ~HCR_TVM;
165 stage2_flush_vm(vcpu->kvm);
172 * Trap handler for the GICv3 SGI generation system register.
173 * Forward the request to the VGIC emulation.
174 * The cp15_64 code makes sure this automatically works
175 * for both AArch64 and AArch32 accesses.
177 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
178 const struct sys_reg_params *p,
179 const struct sys_reg_desc *r)
184 return read_from_write_only(vcpu, p);
186 val = *vcpu_reg(vcpu, p->Rt);
187 vgic_v3_dispatch_sgi(vcpu, val);
192 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
193 const struct sys_reg_params *p,
194 const struct sys_reg_desc *r)
197 return ignore_write(vcpu, p);
199 return read_zero(vcpu, p);
202 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
203 const struct sys_reg_params *p,
204 const struct sys_reg_desc *r)
207 return ignore_write(vcpu, p);
209 *vcpu_reg(vcpu, p->Rt) = (1 << 3);
214 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
215 const struct sys_reg_params *p,
216 const struct sys_reg_desc *r)
219 return ignore_write(vcpu, p);
222 asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
223 *vcpu_reg(vcpu, p->Rt) = val;
229 * We want to avoid world-switching all the DBG registers all the
232 * - If we've touched any debug register, it is likely that we're
233 * going to touch more of them. It then makes sense to disable the
234 * traps and start doing the save/restore dance
235 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
236 * then mandatory to save/restore the registers, as the guest
239 * For this, we use a DIRTY bit, indicating the guest has modified the
240 * debug registers, used as follow:
243 * - If the dirty bit is set (because we're coming back from trapping),
244 * disable the traps, save host registers, restore guest registers.
245 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
246 * set the dirty bit, disable the traps, save host registers,
247 * restore guest registers.
248 * - Otherwise, enable the traps
251 * - If the dirty bit is set, save guest registers, restore host
252 * registers and clear the dirty bit. This ensure that the host can
253 * now use the debug registers.
255 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
256 const struct sys_reg_params *p,
257 const struct sys_reg_desc *r)
260 vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
261 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
263 *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
269 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
273 asm volatile("mrs %0, amair_el1\n" : "=r" (amair));
274 vcpu_sys_reg(vcpu, AMAIR_EL1) = amair;
277 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
282 * Map the vcpu_id into the first three affinity level fields of
283 * the MPIDR. We limit the number of VCPUs in level 0 due to a
284 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
285 * of the GICv3 to be able to address each CPU directly when
288 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
289 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
290 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
291 vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr;
294 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
295 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
297 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \
298 trap_debug_regs, reset_val, (DBGBVR0_EL1 + (n)), 0 }, \
300 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \
301 trap_debug_regs, reset_val, (DBGBCR0_EL1 + (n)), 0 }, \
303 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \
304 trap_debug_regs, reset_val, (DBGWVR0_EL1 + (n)), 0 }, \
306 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \
307 trap_debug_regs, reset_val, (DBGWCR0_EL1 + (n)), 0 }
310 * Architected system registers.
311 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
313 * We could trap ID_DFR0 and tell the guest we don't support performance
314 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
315 * NAKed, so it will read the PMCR anyway.
317 * Therefore we tell the guest we have 0 counters. Unfortunately, we
318 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
319 * all PM registers, which doesn't crash the guest kernel at least.
321 * Debug handling: We do trap most, if not all debug related system
322 * registers. The implementation is good enough to ensure that a guest
323 * can use these with minimal performance degradation. The drawback is
324 * that we don't implement any of the external debug, none of the
325 * OSlock protocol. This should be revisited if we ever encounter a
326 * more demanding guest...
328 static const struct sys_reg_desc sys_reg_descs[] = {
330 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010),
333 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010),
336 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
339 DBG_BCR_BVR_WCR_WVR_EL1(0),
340 DBG_BCR_BVR_WCR_WVR_EL1(1),
342 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
343 trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
345 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
346 trap_debug_regs, reset_val, MDSCR_EL1, 0 },
347 DBG_BCR_BVR_WCR_WVR_EL1(2),
348 DBG_BCR_BVR_WCR_WVR_EL1(3),
349 DBG_BCR_BVR_WCR_WVR_EL1(4),
350 DBG_BCR_BVR_WCR_WVR_EL1(5),
351 DBG_BCR_BVR_WCR_WVR_EL1(6),
352 DBG_BCR_BVR_WCR_WVR_EL1(7),
353 DBG_BCR_BVR_WCR_WVR_EL1(8),
354 DBG_BCR_BVR_WCR_WVR_EL1(9),
355 DBG_BCR_BVR_WCR_WVR_EL1(10),
356 DBG_BCR_BVR_WCR_WVR_EL1(11),
357 DBG_BCR_BVR_WCR_WVR_EL1(12),
358 DBG_BCR_BVR_WCR_WVR_EL1(13),
359 DBG_BCR_BVR_WCR_WVR_EL1(14),
360 DBG_BCR_BVR_WCR_WVR_EL1(15),
363 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
366 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100),
369 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100),
372 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100),
375 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100),
377 /* DBGCLAIMSET_EL1 */
378 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110),
380 /* DBGCLAIMCLR_EL1 */
381 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110),
383 /* DBGAUTHSTATUS_EL1 */
384 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
385 trap_dbgauthstatus_el1 },
388 { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
389 NULL, reset_val, TEECR32_EL1, 0 },
391 { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000),
392 NULL, reset_val, TEEHBR32_EL1, 0 },
395 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
398 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000),
400 /* DBGDTR[TR]X_EL0 */
401 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000),
405 { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
406 NULL, reset_val, DBGVCR32_EL2, 0 },
409 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101),
410 NULL, reset_mpidr, MPIDR_EL1 },
412 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
413 access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 },
415 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
416 NULL, reset_val, CPACR_EL1, 0 },
418 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
419 access_vm_reg, reset_unknown, TTBR0_EL1 },
421 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
422 access_vm_reg, reset_unknown, TTBR1_EL1 },
424 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
425 access_vm_reg, reset_val, TCR_EL1, 0 },
428 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
429 access_vm_reg, reset_unknown, AFSR0_EL1 },
431 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
432 access_vm_reg, reset_unknown, AFSR1_EL1 },
434 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
435 access_vm_reg, reset_unknown, ESR_EL1 },
437 { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
438 access_vm_reg, reset_unknown, FAR_EL1 },
440 { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
441 NULL, reset_unknown, PAR_EL1 },
444 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
447 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
451 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
452 access_vm_reg, reset_unknown, MAIR_EL1 },
454 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
455 access_vm_reg, reset_amair_el1, AMAIR_EL1 },
458 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
459 NULL, reset_val, VBAR_EL1, 0 },
462 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1011), Op2(0b101),
465 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
469 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
470 access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
472 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
473 NULL, reset_unknown, TPIDR_EL1 },
476 { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000),
477 NULL, reset_val, CNTKCTL_EL1, 0},
480 { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
481 NULL, reset_unknown, CSSELR_EL1 },
484 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
487 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
490 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
493 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
496 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
499 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
502 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
505 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
508 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
511 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
514 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
517 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
520 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
524 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
525 NULL, reset_unknown, TPIDR_EL0 },
527 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
528 NULL, reset_unknown, TPIDRRO_EL0 },
531 { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
532 NULL, reset_unknown, DACR32_EL2 },
534 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001),
535 NULL, reset_unknown, IFSR32_EL2 },
537 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000),
538 NULL, reset_val, FPEXC32_EL2, 0x70 },
541 static bool trap_dbgidr(struct kvm_vcpu *vcpu,
542 const struct sys_reg_params *p,
543 const struct sys_reg_desc *r)
546 return ignore_write(vcpu, p);
548 u64 dfr = read_cpuid(ID_AA64DFR0_EL1);
549 u64 pfr = read_cpuid(ID_AA64PFR0_EL1);
550 u32 el3 = !!((pfr >> 12) & 0xf);
552 *vcpu_reg(vcpu, p->Rt) = ((((dfr >> 20) & 0xf) << 28) |
553 (((dfr >> 12) & 0xf) << 24) |
554 (((dfr >> 28) & 0xf) << 20) |
555 (6 << 16) | (el3 << 14) | (el3 << 12));
560 static bool trap_debug32(struct kvm_vcpu *vcpu,
561 const struct sys_reg_params *p,
562 const struct sys_reg_desc *r)
565 vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
566 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
568 *vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg);
574 #define DBG_BCR_BVR_WCR_WVR(n) \
576 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_debug32, \
577 NULL, (cp14_DBGBVR0 + (n) * 2) }, \
579 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_debug32, \
580 NULL, (cp14_DBGBCR0 + (n) * 2) }, \
582 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_debug32, \
583 NULL, (cp14_DBGWVR0 + (n) * 2) }, \
585 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_debug32, \
586 NULL, (cp14_DBGWCR0 + (n) * 2) }
589 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_debug32, \
590 NULL, cp14_DBGBXVR0 + n * 2 }
593 * Trapped cp14 registers. We generally ignore most of the external
594 * debug, on the principle that they don't really make sense to a
595 * guest. Revisit this one day, whould this principle change.
597 static const struct sys_reg_desc cp14_regs[] = {
599 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
601 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
603 DBG_BCR_BVR_WCR_WVR(0),
605 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
606 DBG_BCR_BVR_WCR_WVR(1),
608 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
610 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
611 DBG_BCR_BVR_WCR_WVR(2),
613 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
615 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
616 DBG_BCR_BVR_WCR_WVR(3),
617 DBG_BCR_BVR_WCR_WVR(4),
618 DBG_BCR_BVR_WCR_WVR(5),
620 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
622 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
623 DBG_BCR_BVR_WCR_WVR(6),
625 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
626 DBG_BCR_BVR_WCR_WVR(7),
627 DBG_BCR_BVR_WCR_WVR(8),
628 DBG_BCR_BVR_WCR_WVR(9),
629 DBG_BCR_BVR_WCR_WVR(10),
630 DBG_BCR_BVR_WCR_WVR(11),
631 DBG_BCR_BVR_WCR_WVR(12),
632 DBG_BCR_BVR_WCR_WVR(13),
633 DBG_BCR_BVR_WCR_WVR(14),
634 DBG_BCR_BVR_WCR_WVR(15),
636 /* DBGDRAR (32bit) */
637 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
641 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
644 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
648 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
651 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
664 /* DBGDSAR (32bit) */
665 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
668 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
670 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
672 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
674 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
676 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
678 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
681 /* Trapped cp14 64bit registers */
682 static const struct sys_reg_desc cp14_64_regs[] = {
683 /* DBGDRAR (64bit) */
684 { Op1( 0), CRm( 1), .access = trap_raz_wi },
686 /* DBGDSAR (64bit) */
687 { Op1( 0), CRm( 2), .access = trap_raz_wi },
691 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
692 * depending on the way they are accessed (as a 32bit or a 64bit
695 static const struct sys_reg_desc cp15_regs[] = {
696 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
698 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR },
699 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
700 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
701 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
702 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
703 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
704 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
705 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
706 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
707 { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
708 { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
711 * DC{C,I,CI}SW operations:
713 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
714 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
715 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
718 { Op1( 0), CRn( 9), CRm(12), Op2( 0), trap_raz_wi },
719 { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi },
720 { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi },
721 { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi },
722 { Op1( 0), CRn( 9), CRm(12), Op2( 5), trap_raz_wi },
723 { Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi },
724 { Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi },
725 { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi },
726 { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi },
727 { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi },
728 { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi },
729 { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi },
730 { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi },
732 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
733 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
734 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
735 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
738 { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi },
740 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
743 static const struct sys_reg_desc cp15_64_regs[] = {
744 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
745 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
746 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
749 /* Target specific emulation tables */
750 static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
752 void kvm_register_target_sys_reg_table(unsigned int target,
753 struct kvm_sys_reg_target_table *table)
755 target_tables[target] = table;
758 /* Get specific register table for this target. */
759 static const struct sys_reg_desc *get_target_table(unsigned target,
763 struct kvm_sys_reg_target_table *table;
765 table = target_tables[target];
767 *num = table->table64.num;
768 return table->table64.table;
770 *num = table->table32.num;
771 return table->table32.table;
775 static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
776 const struct sys_reg_desc table[],
781 for (i = 0; i < num; i++) {
782 const struct sys_reg_desc *r = &table[i];
784 if (params->Op0 != r->Op0)
786 if (params->Op1 != r->Op1)
788 if (params->CRn != r->CRn)
790 if (params->CRm != r->CRm)
792 if (params->Op2 != r->Op2)
800 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
802 kvm_inject_undefined(vcpu);
807 * emulate_cp -- tries to match a sys_reg access in a handling table, and
808 * call the corresponding trap handler.
810 * @params: pointer to the descriptor of the access
811 * @table: array of trap descriptors
812 * @num: size of the trap descriptor array
814 * Return 0 if the access has been handled, and -1 if not.
816 static int emulate_cp(struct kvm_vcpu *vcpu,
817 const struct sys_reg_params *params,
818 const struct sys_reg_desc *table,
821 const struct sys_reg_desc *r;
824 return -1; /* Not handled */
826 r = find_reg(params, table, num);
830 * Not having an accessor means that we have
831 * configured a trap that we don't know how to
832 * handle. This certainly qualifies as a gross bug
833 * that should be fixed right away.
837 if (likely(r->access(vcpu, params, r))) {
838 /* Skip instruction, since it was emulated */
839 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
850 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
851 struct sys_reg_params *params)
853 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
857 case ESR_ELx_EC_CP15_32:
858 case ESR_ELx_EC_CP15_64:
861 case ESR_ELx_EC_CP14_MR:
862 case ESR_ELx_EC_CP14_64:
869 kvm_err("Unsupported guest CP%d access at: %08lx\n",
871 print_sys_reg_instr(params);
872 kvm_inject_undefined(vcpu);
876 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access
877 * @vcpu: The VCPU pointer
878 * @run: The kvm_run struct
880 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
881 const struct sys_reg_desc *global,
883 const struct sys_reg_desc *target_specific,
886 struct sys_reg_params params;
887 u32 hsr = kvm_vcpu_get_hsr(vcpu);
888 int Rt2 = (hsr >> 10) & 0xf;
890 params.is_aarch32 = true;
891 params.is_32bit = false;
892 params.CRm = (hsr >> 1) & 0xf;
893 params.Rt = (hsr >> 5) & 0xf;
894 params.is_write = ((hsr & 1) == 0);
897 params.Op1 = (hsr >> 16) & 0xf;
902 * Massive hack here. Store Rt2 in the top 32bits so we only
903 * have one register to deal with. As we use the same trap
904 * backends between AArch32 and AArch64, we get away with it.
906 if (params.is_write) {
907 u64 val = *vcpu_reg(vcpu, params.Rt);
909 val |= *vcpu_reg(vcpu, Rt2) << 32;
910 *vcpu_reg(vcpu, params.Rt) = val;
913 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific))
915 if (!emulate_cp(vcpu, ¶ms, global, nr_global))
918 unhandled_cp_access(vcpu, ¶ms);
921 /* Do the opposite hack for the read side */
922 if (!params.is_write) {
923 u64 val = *vcpu_reg(vcpu, params.Rt);
925 *vcpu_reg(vcpu, Rt2) = val;
932 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
933 * @vcpu: The VCPU pointer
934 * @run: The kvm_run struct
936 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
937 const struct sys_reg_desc *global,
939 const struct sys_reg_desc *target_specific,
942 struct sys_reg_params params;
943 u32 hsr = kvm_vcpu_get_hsr(vcpu);
945 params.is_aarch32 = true;
946 params.is_32bit = true;
947 params.CRm = (hsr >> 1) & 0xf;
948 params.Rt = (hsr >> 5) & 0xf;
949 params.is_write = ((hsr & 1) == 0);
950 params.CRn = (hsr >> 10) & 0xf;
952 params.Op1 = (hsr >> 14) & 0x7;
953 params.Op2 = (hsr >> 17) & 0x7;
955 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific))
957 if (!emulate_cp(vcpu, ¶ms, global, nr_global))
960 unhandled_cp_access(vcpu, ¶ms);
964 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
966 const struct sys_reg_desc *target_specific;
969 target_specific = get_target_table(vcpu->arch.target, false, &num);
970 return kvm_handle_cp_64(vcpu,
971 cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
972 target_specific, num);
975 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
977 const struct sys_reg_desc *target_specific;
980 target_specific = get_target_table(vcpu->arch.target, false, &num);
981 return kvm_handle_cp_32(vcpu,
982 cp15_regs, ARRAY_SIZE(cp15_regs),
983 target_specific, num);
986 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
988 return kvm_handle_cp_64(vcpu,
989 cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
993 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
995 return kvm_handle_cp_32(vcpu,
996 cp14_regs, ARRAY_SIZE(cp14_regs),
1000 static int emulate_sys_reg(struct kvm_vcpu *vcpu,
1001 const struct sys_reg_params *params)
1004 const struct sys_reg_desc *table, *r;
1006 table = get_target_table(vcpu->arch.target, true, &num);
1008 /* Search target-specific then generic table. */
1009 r = find_reg(params, table, num);
1011 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1015 * Not having an accessor means that we have
1016 * configured a trap that we don't know how to
1017 * handle. This certainly qualifies as a gross bug
1018 * that should be fixed right away.
1022 if (likely(r->access(vcpu, params, r))) {
1023 /* Skip instruction, since it was emulated */
1024 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1027 /* If access function fails, it should complain. */
1029 kvm_err("Unsupported guest sys_reg access at: %lx\n",
1031 print_sys_reg_instr(params);
1033 kvm_inject_undefined(vcpu);
1037 static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
1038 const struct sys_reg_desc *table, size_t num)
1042 for (i = 0; i < num; i++)
1044 table[i].reset(vcpu, &table[i]);
1048 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
1049 * @vcpu: The VCPU pointer
1050 * @run: The kvm_run struct
1052 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
1054 struct sys_reg_params params;
1055 unsigned long esr = kvm_vcpu_get_hsr(vcpu);
1057 params.is_aarch32 = false;
1058 params.is_32bit = false;
1059 params.Op0 = (esr >> 20) & 3;
1060 params.Op1 = (esr >> 14) & 0x7;
1061 params.CRn = (esr >> 10) & 0xf;
1062 params.CRm = (esr >> 1) & 0xf;
1063 params.Op2 = (esr >> 17) & 0x7;
1064 params.Rt = (esr >> 5) & 0x1f;
1065 params.is_write = !(esr & 1);
1067 return emulate_sys_reg(vcpu, ¶ms);
1070 /******************************************************************************
1072 *****************************************************************************/
1074 static bool index_to_params(u64 id, struct sys_reg_params *params)
1076 switch (id & KVM_REG_SIZE_MASK) {
1077 case KVM_REG_SIZE_U64:
1078 /* Any unused index bits means it's not valid. */
1079 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
1080 | KVM_REG_ARM_COPROC_MASK
1081 | KVM_REG_ARM64_SYSREG_OP0_MASK
1082 | KVM_REG_ARM64_SYSREG_OP1_MASK
1083 | KVM_REG_ARM64_SYSREG_CRN_MASK
1084 | KVM_REG_ARM64_SYSREG_CRM_MASK
1085 | KVM_REG_ARM64_SYSREG_OP2_MASK))
1087 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
1088 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
1089 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
1090 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
1091 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
1092 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
1093 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
1094 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
1095 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
1096 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
1103 /* Decode an index value, and find the sys_reg_desc entry. */
1104 static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
1108 const struct sys_reg_desc *table, *r;
1109 struct sys_reg_params params;
1111 /* We only do sys_reg for now. */
1112 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
1115 if (!index_to_params(id, ¶ms))
1118 table = get_target_table(vcpu->arch.target, true, &num);
1119 r = find_reg(¶ms, table, num);
1121 r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1123 /* Not saved in the sys_reg array? */
1131 * These are the invariant sys_reg registers: we let the guest see the
1132 * host versions of these, so they're part of the guest state.
1134 * A future CPU may provide a mechanism to present different values to
1135 * the guest, or a future kvm may trap them.
1138 #define FUNCTION_INVARIANT(reg) \
1139 static void get_##reg(struct kvm_vcpu *v, \
1140 const struct sys_reg_desc *r) \
1144 asm volatile("mrs %0, " __stringify(reg) "\n" \
1146 ((struct sys_reg_desc *)r)->val = val; \
1149 FUNCTION_INVARIANT(midr_el1)
1150 FUNCTION_INVARIANT(ctr_el0)
1151 FUNCTION_INVARIANT(revidr_el1)
1152 FUNCTION_INVARIANT(id_pfr0_el1)
1153 FUNCTION_INVARIANT(id_pfr1_el1)
1154 FUNCTION_INVARIANT(id_dfr0_el1)
1155 FUNCTION_INVARIANT(id_afr0_el1)
1156 FUNCTION_INVARIANT(id_mmfr0_el1)
1157 FUNCTION_INVARIANT(id_mmfr1_el1)
1158 FUNCTION_INVARIANT(id_mmfr2_el1)
1159 FUNCTION_INVARIANT(id_mmfr3_el1)
1160 FUNCTION_INVARIANT(id_isar0_el1)
1161 FUNCTION_INVARIANT(id_isar1_el1)
1162 FUNCTION_INVARIANT(id_isar2_el1)
1163 FUNCTION_INVARIANT(id_isar3_el1)
1164 FUNCTION_INVARIANT(id_isar4_el1)
1165 FUNCTION_INVARIANT(id_isar5_el1)
1166 FUNCTION_INVARIANT(clidr_el1)
1167 FUNCTION_INVARIANT(aidr_el1)
1169 /* ->val is filled in by kvm_sys_reg_table_init() */
1170 static struct sys_reg_desc invariant_sys_regs[] = {
1171 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000),
1172 NULL, get_midr_el1 },
1173 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110),
1174 NULL, get_revidr_el1 },
1175 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000),
1176 NULL, get_id_pfr0_el1 },
1177 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001),
1178 NULL, get_id_pfr1_el1 },
1179 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010),
1180 NULL, get_id_dfr0_el1 },
1181 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011),
1182 NULL, get_id_afr0_el1 },
1183 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100),
1184 NULL, get_id_mmfr0_el1 },
1185 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101),
1186 NULL, get_id_mmfr1_el1 },
1187 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110),
1188 NULL, get_id_mmfr2_el1 },
1189 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111),
1190 NULL, get_id_mmfr3_el1 },
1191 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
1192 NULL, get_id_isar0_el1 },
1193 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001),
1194 NULL, get_id_isar1_el1 },
1195 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
1196 NULL, get_id_isar2_el1 },
1197 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011),
1198 NULL, get_id_isar3_el1 },
1199 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100),
1200 NULL, get_id_isar4_el1 },
1201 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101),
1202 NULL, get_id_isar5_el1 },
1203 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001),
1204 NULL, get_clidr_el1 },
1205 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111),
1206 NULL, get_aidr_el1 },
1207 { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001),
1208 NULL, get_ctr_el0 },
1211 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
1213 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
1218 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
1220 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
1225 static int get_invariant_sys_reg(u64 id, void __user *uaddr)
1227 struct sys_reg_params params;
1228 const struct sys_reg_desc *r;
1230 if (!index_to_params(id, ¶ms))
1233 r = find_reg(¶ms, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
1237 return reg_to_user(uaddr, &r->val, id);
1240 static int set_invariant_sys_reg(u64 id, void __user *uaddr)
1242 struct sys_reg_params params;
1243 const struct sys_reg_desc *r;
1245 u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
1247 if (!index_to_params(id, ¶ms))
1249 r = find_reg(¶ms, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
1253 err = reg_from_user(&val, uaddr, id);
1257 /* This is what we mean by invariant: you can't change it. */
1264 static bool is_valid_cache(u32 val)
1268 if (val >= CSSELR_MAX)
1271 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
1273 ctype = (cache_levels >> (level * 3)) & 7;
1276 case 0: /* No cache */
1278 case 1: /* Instruction cache only */
1280 case 2: /* Data cache only */
1281 case 4: /* Unified cache */
1283 case 3: /* Separate instruction and data caches */
1285 default: /* Reserved: we can't know instruction or data. */
1290 static int demux_c15_get(u64 id, void __user *uaddr)
1293 u32 __user *uval = uaddr;
1295 /* Fail if we have unknown bits set. */
1296 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1297 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1300 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
1301 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
1302 if (KVM_REG_SIZE(id) != 4)
1304 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
1305 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
1306 if (!is_valid_cache(val))
1309 return put_user(get_ccsidr(val), uval);
1315 static int demux_c15_set(u64 id, void __user *uaddr)
1318 u32 __user *uval = uaddr;
1320 /* Fail if we have unknown bits set. */
1321 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1322 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1325 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
1326 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
1327 if (KVM_REG_SIZE(id) != 4)
1329 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
1330 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
1331 if (!is_valid_cache(val))
1334 if (get_user(newval, uval))
1337 /* This is also invariant: you can't change it. */
1338 if (newval != get_ccsidr(val))
1346 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1348 const struct sys_reg_desc *r;
1349 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
1351 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1352 return demux_c15_get(reg->id, uaddr);
1354 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
1357 r = index_to_sys_reg_desc(vcpu, reg->id);
1359 return get_invariant_sys_reg(reg->id, uaddr);
1361 return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
1364 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1366 const struct sys_reg_desc *r;
1367 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
1369 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1370 return demux_c15_set(reg->id, uaddr);
1372 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
1375 r = index_to_sys_reg_desc(vcpu, reg->id);
1377 return set_invariant_sys_reg(reg->id, uaddr);
1379 return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
1382 static unsigned int num_demux_regs(void)
1384 unsigned int i, count = 0;
1386 for (i = 0; i < CSSELR_MAX; i++)
1387 if (is_valid_cache(i))
1393 static int write_demux_regids(u64 __user *uindices)
1395 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
1398 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
1399 for (i = 0; i < CSSELR_MAX; i++) {
1400 if (!is_valid_cache(i))
1402 if (put_user(val | i, uindices))
1409 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
1411 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
1412 KVM_REG_ARM64_SYSREG |
1413 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
1414 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
1415 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
1416 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
1417 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
1420 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
1425 if (put_user(sys_reg_to_index(reg), *uind))
1432 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
1433 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
1435 const struct sys_reg_desc *i1, *i2, *end1, *end2;
1436 unsigned int total = 0;
1439 /* We check for duplicates here, to allow arch-specific overrides. */
1440 i1 = get_target_table(vcpu->arch.target, true, &num);
1443 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
1445 BUG_ON(i1 == end1 || i2 == end2);
1447 /* Walk carefully, as both tables may refer to the same register. */
1449 int cmp = cmp_sys_reg(i1, i2);
1450 /* target-specific overrides generic entry. */
1452 /* Ignore registers we trap but don't save. */
1454 if (!copy_reg_to_user(i1, &uind))
1459 /* Ignore registers we trap but don't save. */
1461 if (!copy_reg_to_user(i2, &uind))
1467 if (cmp <= 0 && ++i1 == end1)
1469 if (cmp >= 0 && ++i2 == end2)
1475 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
1477 return ARRAY_SIZE(invariant_sys_regs)
1479 + walk_sys_regs(vcpu, (u64 __user *)NULL);
1482 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1487 /* Then give them all the invariant registers' indices. */
1488 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
1489 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
1494 err = walk_sys_regs(vcpu, uindices);
1499 return write_demux_regids(uindices);
1502 static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
1506 for (i = 1; i < n; i++) {
1507 if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
1508 kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
1516 void kvm_sys_reg_table_init(void)
1519 struct sys_reg_desc clidr;
1521 /* Make sure tables are unique and in order. */
1522 BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
1523 BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
1524 BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
1525 BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
1526 BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
1527 BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
1529 /* We abuse the reset function to overwrite the table itself. */
1530 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
1531 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
1534 * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
1536 * If software reads the Cache Type fields from Ctype1
1537 * upwards, once it has seen a value of 0b000, no caches
1538 * exist at further-out levels of the hierarchy. So, for
1539 * example, if Ctype3 is the first Cache Type field with a
1540 * value of 0b000, the values of Ctype4 to Ctype7 must be
1543 get_clidr_el1(NULL, &clidr); /* Ugly... */
1544 cache_levels = clidr.val;
1545 for (i = 0; i < 7; i++)
1546 if (((cache_levels >> (i*3)) & 7) == 0)
1548 /* Clear all higher bits. */
1549 cache_levels &= (1 << (i*3))-1;
1553 * kvm_reset_sys_regs - sets system registers to reset value
1554 * @vcpu: The VCPU pointer
1556 * This function finds the right table above and sets the registers on the
1557 * virtual CPU struct to their architecturally defined reset values.
1559 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
1562 const struct sys_reg_desc *table;
1564 /* Catch someone adding a register without putting in reset entry. */
1565 memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
1567 /* Generic chip reset first (so target could override). */
1568 reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1570 table = get_target_table(vcpu->arch.target, true, &num);
1571 reset_sys_reg_descs(vcpu, table, num);
1573 for (num = 1; num < NR_SYS_REGS; num++)
1574 if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
1575 panic("Didn't reset vcpu_sys_reg(%zi)", num);