2 * Contains GICv2 specific emulation code, was in vgic.c before.
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/cpu.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/interrupt.h>
25 #include <linux/uaccess.h>
27 #include <linux/irqchip/arm-gic.h>
29 #include <asm/kvm_emulate.h>
30 #include <asm/kvm_arm.h>
31 #include <asm/kvm_mmu.h>
35 #define GICC_ARCH_VERSION_V2 0x2
37 static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
38 static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
40 return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi;
43 static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
44 struct kvm_exit_mmio *mmio, phys_addr_t offset)
47 u32 word_offset = offset & 3;
49 switch (offset & ~3) {
50 case 0: /* GICD_CTLR */
51 reg = vcpu->kvm->arch.vgic.enabled;
52 vgic_reg_access(mmio, ®, word_offset,
53 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
55 vcpu->kvm->arch.vgic.enabled = reg & 1;
56 vgic_update_state(vcpu->kvm);
61 case 4: /* GICD_TYPER */
62 reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
63 reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1;
64 vgic_reg_access(mmio, ®, word_offset,
65 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
68 case 8: /* GICD_IIDR */
69 reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
70 vgic_reg_access(mmio, ®, word_offset,
71 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
78 static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
79 struct kvm_exit_mmio *mmio,
82 return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
83 vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
86 static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
87 struct kvm_exit_mmio *mmio,
90 return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
91 vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
94 static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
95 struct kvm_exit_mmio *mmio,
98 return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
102 static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
103 struct kvm_exit_mmio *mmio,
106 return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
110 static bool handle_mmio_set_active_reg(struct kvm_vcpu *vcpu,
111 struct kvm_exit_mmio *mmio,
114 return vgic_handle_set_active_reg(vcpu->kvm, mmio, offset,
118 static bool handle_mmio_clear_active_reg(struct kvm_vcpu *vcpu,
119 struct kvm_exit_mmio *mmio,
122 return vgic_handle_clear_active_reg(vcpu->kvm, mmio, offset,
126 static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
127 struct kvm_exit_mmio *mmio,
130 u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
131 vcpu->vcpu_id, offset);
132 vgic_reg_access(mmio, reg, offset,
133 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
137 #define GICD_ITARGETSR_SIZE 32
138 #define GICD_CPUTARGETS_BITS 8
139 #define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
140 static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
142 struct vgic_dist *dist = &kvm->arch.vgic;
146 irq -= VGIC_NR_PRIVATE_IRQS;
148 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
149 val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
154 static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
156 struct vgic_dist *dist = &kvm->arch.vgic;
157 struct kvm_vcpu *vcpu;
162 irq -= VGIC_NR_PRIVATE_IRQS;
165 * Pick the LSB in each byte. This ensures we target exactly
166 * one vcpu per IRQ. If the byte is null, assume we target
169 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
170 int shift = i * GICD_CPUTARGETS_BITS;
172 target = ffs((val >> shift) & 0xffU);
173 target = target ? (target - 1) : 0;
174 dist->irq_spi_cpu[irq + i] = target;
175 kvm_for_each_vcpu(c, vcpu, kvm) {
176 bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
178 set_bit(irq + i, bmap);
180 clear_bit(irq + i, bmap);
185 static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
186 struct kvm_exit_mmio *mmio,
191 /* We treat the banked interrupts targets as read-only */
195 roreg = 1 << vcpu->vcpu_id;
197 roreg |= roreg << 16;
199 vgic_reg_access(mmio, &roreg, offset,
200 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
204 reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
205 vgic_reg_access(mmio, ®, offset,
206 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
207 if (mmio->is_write) {
208 vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
209 vgic_update_state(vcpu->kvm);
216 static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
217 struct kvm_exit_mmio *mmio, phys_addr_t offset)
221 reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
222 vcpu->vcpu_id, offset >> 1);
224 return vgic_handle_cfg_reg(reg, mmio, offset);
227 static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
228 struct kvm_exit_mmio *mmio, phys_addr_t offset)
232 vgic_reg_access(mmio, ®, offset,
233 ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
234 if (mmio->is_write) {
235 vgic_dispatch_sgi(vcpu, reg);
236 vgic_update_state(vcpu->kvm);
243 /* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
244 static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
245 struct kvm_exit_mmio *mmio,
248 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
250 int min_sgi = (offset & ~0x3);
251 int max_sgi = min_sgi + 3;
252 int vcpu_id = vcpu->vcpu_id;
255 /* Copy source SGIs from distributor side */
256 for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
257 u8 sources = *vgic_get_sgi_sources(dist, vcpu_id, sgi);
259 reg |= ((u32)sources) << (8 * (sgi - min_sgi));
262 mmio_data_write(mmio, ~0, reg);
266 static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
267 struct kvm_exit_mmio *mmio,
268 phys_addr_t offset, bool set)
270 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
272 int min_sgi = (offset & ~0x3);
273 int max_sgi = min_sgi + 3;
274 int vcpu_id = vcpu->vcpu_id;
276 bool updated = false;
278 reg = mmio_data_read(mmio, ~0);
280 /* Clear pending SGIs on the distributor */
281 for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
282 u8 mask = reg >> (8 * (sgi - min_sgi));
283 u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi);
286 if ((*src & mask) != mask)
297 vgic_update_state(vcpu->kvm);
302 static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
303 struct kvm_exit_mmio *mmio,
307 return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
309 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
312 static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
313 struct kvm_exit_mmio *mmio,
317 return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
319 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
322 static const struct vgic_io_range vgic_dist_ranges[] = {
324 .base = GIC_DIST_CTRL,
327 .handle_mmio = handle_mmio_misc,
330 .base = GIC_DIST_IGROUP,
331 .len = VGIC_MAX_IRQS / 8,
333 .handle_mmio = handle_mmio_raz_wi,
336 .base = GIC_DIST_ENABLE_SET,
337 .len = VGIC_MAX_IRQS / 8,
339 .handle_mmio = handle_mmio_set_enable_reg,
342 .base = GIC_DIST_ENABLE_CLEAR,
343 .len = VGIC_MAX_IRQS / 8,
345 .handle_mmio = handle_mmio_clear_enable_reg,
348 .base = GIC_DIST_PENDING_SET,
349 .len = VGIC_MAX_IRQS / 8,
351 .handle_mmio = handle_mmio_set_pending_reg,
354 .base = GIC_DIST_PENDING_CLEAR,
355 .len = VGIC_MAX_IRQS / 8,
357 .handle_mmio = handle_mmio_clear_pending_reg,
360 .base = GIC_DIST_ACTIVE_SET,
361 .len = VGIC_MAX_IRQS / 8,
363 .handle_mmio = handle_mmio_set_active_reg,
366 .base = GIC_DIST_ACTIVE_CLEAR,
367 .len = VGIC_MAX_IRQS / 8,
369 .handle_mmio = handle_mmio_clear_active_reg,
372 .base = GIC_DIST_PRI,
373 .len = VGIC_MAX_IRQS,
375 .handle_mmio = handle_mmio_priority_reg,
378 .base = GIC_DIST_TARGET,
379 .len = VGIC_MAX_IRQS,
381 .handle_mmio = handle_mmio_target_reg,
384 .base = GIC_DIST_CONFIG,
385 .len = VGIC_MAX_IRQS / 4,
387 .handle_mmio = handle_mmio_cfg_reg,
390 .base = GIC_DIST_SOFTINT,
392 .handle_mmio = handle_mmio_sgi_reg,
395 .base = GIC_DIST_SGI_PENDING_CLEAR,
397 .handle_mmio = handle_mmio_sgi_clear,
400 .base = GIC_DIST_SGI_PENDING_SET,
402 .handle_mmio = handle_mmio_sgi_set,
407 static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
408 struct kvm_exit_mmio *mmio)
410 unsigned long base = vcpu->kvm->arch.vgic.vgic_dist_base;
412 if (!is_in_range(mmio->phys_addr, mmio->len, base,
413 KVM_VGIC_V2_DIST_SIZE))
416 /* GICv2 does not support accesses wider than 32 bits */
418 kvm_inject_dabt(vcpu, mmio->phys_addr);
422 return vgic_handle_mmio_range(vcpu, run, mmio, vgic_dist_ranges, base);
425 static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
427 struct kvm *kvm = vcpu->kvm;
428 struct vgic_dist *dist = &kvm->arch.vgic;
429 int nrcpus = atomic_read(&kvm->online_vcpus);
431 int sgi, mode, c, vcpu_id;
433 vcpu_id = vcpu->vcpu_id;
436 target_cpus = (reg >> 16) & 0xff;
437 mode = (reg >> 24) & 3;
446 target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
450 target_cpus = 1 << vcpu_id;
454 kvm_for_each_vcpu(c, vcpu, kvm) {
455 if (target_cpus & 1) {
456 /* Flag the SGI as pending */
457 vgic_dist_irq_set_pending(vcpu, sgi);
458 *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id;
459 kvm_debug("SGI%d from CPU%d to CPU%d\n",
467 static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq)
469 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
470 unsigned long sources;
471 int vcpu_id = vcpu->vcpu_id;
474 sources = *vgic_get_sgi_sources(dist, vcpu_id, irq);
476 for_each_set_bit(c, &sources, dist->nr_cpus) {
477 if (vgic_queue_irq(vcpu, c, irq))
478 clear_bit(c, &sources);
481 *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources;
484 * If the sources bitmap has been cleared it means that we
485 * could queue all the SGIs onto link registers (see the
486 * clear_bit above), and therefore we are done with them in
487 * our emulated gic and can get rid of them.
490 vgic_dist_irq_clear_pending(vcpu, irq);
491 vgic_cpu_irq_clear(vcpu, irq);
499 * kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs
500 * @kvm: pointer to the kvm struct
502 * Map the virtual CPU interface into the VM before running any VCPUs. We
503 * can't do this at creation time, because user space must first set the
504 * virtual CPU interface address in the guest physical address space.
506 static int vgic_v2_map_resources(struct kvm *kvm,
507 const struct vgic_params *params)
511 if (!irqchip_in_kernel(kvm))
514 mutex_lock(&kvm->lock);
519 if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
520 IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) {
521 kvm_err("Need to set vgic cpu and dist addresses first\n");
527 * Initialize the vgic if this hasn't already been done on demand by
528 * accessing the vgic state from userspace.
530 ret = vgic_init(kvm);
532 kvm_err("Unable to allocate maps\n");
536 ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
537 params->vcpu_base, KVM_VGIC_V2_CPU_SIZE,
540 kvm_err("Unable to remap VGIC CPU to VCPU\n");
544 kvm->arch.vgic.ready = true;
547 kvm_vgic_destroy(kvm);
548 mutex_unlock(&kvm->lock);
552 static void vgic_v2_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
554 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
556 *vgic_get_sgi_sources(dist, vcpu->vcpu_id, irq) |= 1 << source;
559 static int vgic_v2_init_model(struct kvm *kvm)
563 for (i = VGIC_NR_PRIVATE_IRQS; i < kvm->arch.vgic.nr_irqs; i += 4)
564 vgic_set_target_reg(kvm, 0, i);
569 void vgic_v2_init_emulation(struct kvm *kvm)
571 struct vgic_dist *dist = &kvm->arch.vgic;
573 dist->vm_ops.handle_mmio = vgic_v2_handle_mmio;
574 dist->vm_ops.queue_sgi = vgic_v2_queue_sgi;
575 dist->vm_ops.add_sgi_source = vgic_v2_add_sgi_source;
576 dist->vm_ops.init_model = vgic_v2_init_model;
577 dist->vm_ops.map_resources = vgic_v2_map_resources;
579 kvm->arch.max_vcpus = VGIC_V2_MAX_CPUS;
582 static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
583 struct kvm_exit_mmio *mmio, phys_addr_t offset)
585 bool updated = false;
586 struct vgic_vmcr vmcr;
590 vgic_get_vmcr(vcpu, &vmcr);
592 switch (offset & ~0x3) {
594 vmcr_field = &vmcr.ctlr;
596 case GIC_CPU_PRIMASK:
597 vmcr_field = &vmcr.pmr;
599 case GIC_CPU_BINPOINT:
600 vmcr_field = &vmcr.bpr;
602 case GIC_CPU_ALIAS_BINPOINT:
603 vmcr_field = &vmcr.abpr;
609 if (!mmio->is_write) {
611 mmio_data_write(mmio, ~0, reg);
613 reg = mmio_data_read(mmio, ~0);
614 if (reg != *vmcr_field) {
616 vgic_set_vmcr(vcpu, &vmcr);
623 static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
624 struct kvm_exit_mmio *mmio, phys_addr_t offset)
626 return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
629 static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
630 struct kvm_exit_mmio *mmio,
639 reg = (PRODUCT_ID_KVM << 20) |
640 (GICC_ARCH_VERSION_V2 << 16) |
641 (IMPLEMENTER_ARM << 0);
642 mmio_data_write(mmio, ~0, reg);
647 * CPU Interface Register accesses - these are not accessed by the VM, but by
648 * user space for saving and restoring VGIC state.
650 static const struct vgic_io_range vgic_cpu_ranges[] = {
652 .base = GIC_CPU_CTRL,
654 .handle_mmio = handle_cpu_mmio_misc,
657 .base = GIC_CPU_ALIAS_BINPOINT,
659 .handle_mmio = handle_mmio_abpr,
662 .base = GIC_CPU_ACTIVEPRIO,
664 .handle_mmio = handle_mmio_raz_wi,
667 .base = GIC_CPU_IDENT,
669 .handle_mmio = handle_cpu_mmio_ident,
673 static int vgic_attr_regs_access(struct kvm_device *dev,
674 struct kvm_device_attr *attr,
675 u32 *reg, bool is_write)
677 const struct vgic_io_range *r = NULL, *ranges;
680 struct kvm_vcpu *vcpu, *tmp_vcpu;
681 struct vgic_dist *vgic;
682 struct kvm_exit_mmio mmio;
684 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
685 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
686 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
688 mutex_lock(&dev->kvm->lock);
690 ret = vgic_init(dev->kvm);
694 if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
699 vcpu = kvm_get_vcpu(dev->kvm, cpuid);
700 vgic = &dev->kvm->arch.vgic;
703 mmio.is_write = is_write;
705 mmio_data_write(&mmio, ~0, *reg);
706 switch (attr->group) {
707 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
708 mmio.phys_addr = vgic->vgic_dist_base + offset;
709 ranges = vgic_dist_ranges;
711 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
712 mmio.phys_addr = vgic->vgic_cpu_base + offset;
713 ranges = vgic_cpu_ranges;
718 r = vgic_find_range(ranges, &mmio, offset);
720 if (unlikely(!r || !r->handle_mmio)) {
726 spin_lock(&vgic->lock);
729 * Ensure that no other VCPU is running by checking the vcpu->cpu
730 * field. If no other VPCUs are running we can safely access the VGIC
731 * state, because even if another VPU is run after this point, that
732 * VCPU will not touch the vgic state, because it will block on
733 * getting the vgic->lock in kvm_vgic_sync_hwstate().
735 kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
736 if (unlikely(tmp_vcpu->cpu != -1)) {
738 goto out_vgic_unlock;
743 * Move all pending IRQs from the LRs on all VCPUs so the pending
744 * state can be properly represented in the register state accessible
747 kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
748 vgic_unqueue_irqs(tmp_vcpu);
751 r->handle_mmio(vcpu, &mmio, offset);
754 *reg = mmio_data_read(&mmio, ~0);
758 spin_unlock(&vgic->lock);
760 mutex_unlock(&dev->kvm->lock);
764 static int vgic_v2_create(struct kvm_device *dev, u32 type)
766 return kvm_vgic_create(dev->kvm, type);
769 static void vgic_v2_destroy(struct kvm_device *dev)
774 static int vgic_v2_set_attr(struct kvm_device *dev,
775 struct kvm_device_attr *attr)
779 ret = vgic_set_common_attr(dev, attr);
783 switch (attr->group) {
784 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
785 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
786 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
789 if (get_user(reg, uaddr))
792 return vgic_attr_regs_access(dev, attr, ®, true);
800 static int vgic_v2_get_attr(struct kvm_device *dev,
801 struct kvm_device_attr *attr)
805 ret = vgic_get_common_attr(dev, attr);
809 switch (attr->group) {
810 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
811 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
812 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
815 ret = vgic_attr_regs_access(dev, attr, ®, false);
818 return put_user(reg, uaddr);
826 static int vgic_v2_has_attr(struct kvm_device *dev,
827 struct kvm_device_attr *attr)
831 switch (attr->group) {
832 case KVM_DEV_ARM_VGIC_GRP_ADDR:
833 switch (attr->attr) {
834 case KVM_VGIC_V2_ADDR_TYPE_DIST:
835 case KVM_VGIC_V2_ADDR_TYPE_CPU:
839 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
840 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
841 return vgic_has_attr_regs(vgic_dist_ranges, offset);
842 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
843 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
844 return vgic_has_attr_regs(vgic_cpu_ranges, offset);
845 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
847 case KVM_DEV_ARM_VGIC_GRP_CTRL:
848 switch (attr->attr) {
849 case KVM_DEV_ARM_VGIC_CTRL_INIT:
856 struct kvm_device_ops kvm_arm_vgic_v2_ops = {
857 .name = "kvm-arm-vgic-v2",
858 .create = vgic_v2_create,
859 .destroy = vgic_v2_destroy,
860 .set_attr = vgic_v2_set_attr,
861 .get_attr = vgic_v2_get_attr,
862 .has_attr = vgic_v2_has_attr,