2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/cpu.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/interrupt.h>
25 #include <linux/of_address.h>
26 #include <linux/of_irq.h>
27 #include <linux/uaccess.h>
29 #include <linux/irqchip/arm-gic.h>
31 #include <asm/kvm_emulate.h>
32 #include <asm/kvm_arm.h>
33 #include <asm/kvm_mmu.h>
36 * How the whole thing works (courtesy of Christoffer Dall):
38 * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
39 * something is pending on the CPU interface.
40 * - Interrupts that are pending on the distributor are stored on the
41 * vgic.irq_pending vgic bitmap (this bitmap is updated by both user land
42 * ioctls and guest mmio ops, and other in-kernel peripherals such as the
44 * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
46 * - To calculate the oracle, we need info for each cpu from
47 * compute_pending_for_cpu, which considers:
48 * - PPI: dist->irq_pending & dist->irq_enable
49 * - SPI: dist->irq_pending & dist->irq_enable & dist->irq_spi_target
50 * - irq_spi_target is a 'formatted' version of the GICD_ITARGETSRn
51 * registers, stored on each vcpu. We only keep one bit of
52 * information per interrupt, making sure that only one vcpu can
53 * accept the interrupt.
54 * - If any of the above state changes, we must recalculate the oracle.
55 * - The same is true when injecting an interrupt, except that we only
56 * consider a single interrupt at a time. The irq_spi_cpu array
57 * contains the target CPU for each SPI.
59 * The handling of level interrupts adds some extra complexity. We
60 * need to track when the interrupt has been EOIed, so we can sample
61 * the 'line' again. This is achieved as such:
63 * - When a level interrupt is moved onto a vcpu, the corresponding
64 * bit in irq_queued is set. As long as this bit is set, the line
65 * will be ignored for further interrupts. The interrupt is injected
66 * into the vcpu with the GICH_LR_EOI bit set (generate a
67 * maintenance interrupt on EOI).
68 * - When the interrupt is EOIed, the maintenance interrupt fires,
69 * and clears the corresponding bit in irq_queued. This allows the
70 * interrupt line to be sampled again.
71 * - Note that level-triggered interrupts can also be set to pending from
72 * writes to GICD_ISPENDRn and lowering the external input line does not
73 * cause the interrupt to become inactive in such a situation.
74 * Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become
75 * inactive as long as the external input line is held high.
78 #define VGIC_ADDR_UNDEF (-1)
79 #define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
81 #define PRODUCT_ID_KVM 0x4b /* ASCII code K */
82 #define IMPLEMENTER_ARM 0x43b
83 #define GICC_ARCH_VERSION_V2 0x2
85 #define ACCESS_READ_VALUE (1 << 0)
86 #define ACCESS_READ_RAZ (0 << 0)
87 #define ACCESS_READ_MASK(x) ((x) & (1 << 0))
88 #define ACCESS_WRITE_IGNORED (0 << 1)
89 #define ACCESS_WRITE_SETBIT (1 << 1)
90 #define ACCESS_WRITE_CLEARBIT (2 << 1)
91 #define ACCESS_WRITE_VALUE (3 << 1)
92 #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
94 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
95 static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
96 static void vgic_update_state(struct kvm *kvm);
97 static void vgic_kick_vcpus(struct kvm *kvm);
98 static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi);
99 static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
100 static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
101 static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
102 static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
103 static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
105 static const struct vgic_ops *vgic_ops;
106 static const struct vgic_params *vgic;
109 * struct vgic_bitmap contains a bitmap made of unsigned longs, but
110 * extracts u32s out of them.
112 * This does not work on 64-bit BE systems, because the bitmap access
113 * will store two consecutive 32-bit words with the higher-addressed
114 * register's bits at the lower index and the lower-addressed register's
115 * bits at the higher index.
117 * Therefore, swizzle the register index when accessing the 32-bit word
118 * registers to access the right register's value.
120 #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 64
121 #define REG_OFFSET_SWIZZLE 1
123 #define REG_OFFSET_SWIZZLE 0
126 static int vgic_init_bitmap(struct vgic_bitmap *b, int nr_cpus, int nr_irqs)
130 nr_longs = nr_cpus + BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
132 b->private = kzalloc(sizeof(unsigned long) * nr_longs, GFP_KERNEL);
136 b->shared = b->private + nr_cpus;
141 static void vgic_free_bitmap(struct vgic_bitmap *b)
148 static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
149 int cpuid, u32 offset)
153 return (u32 *)(x->private + cpuid) + REG_OFFSET_SWIZZLE;
155 return (u32 *)(x->shared) + ((offset - 1) ^ REG_OFFSET_SWIZZLE);
158 static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
161 if (irq < VGIC_NR_PRIVATE_IRQS)
162 return test_bit(irq, x->private + cpuid);
164 return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared);
167 static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
172 if (irq < VGIC_NR_PRIVATE_IRQS) {
173 reg = x->private + cpuid;
176 irq -= VGIC_NR_PRIVATE_IRQS;
185 static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
187 return x->private + cpuid;
190 static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
195 static int vgic_init_bytemap(struct vgic_bytemap *x, int nr_cpus, int nr_irqs)
199 size = nr_cpus * VGIC_NR_PRIVATE_IRQS;
200 size += nr_irqs - VGIC_NR_PRIVATE_IRQS;
202 x->private = kzalloc(size, GFP_KERNEL);
206 x->shared = x->private + nr_cpus * VGIC_NR_PRIVATE_IRQS / sizeof(u32);
210 static void vgic_free_bytemap(struct vgic_bytemap *b)
217 static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
221 if (offset < VGIC_NR_PRIVATE_IRQS) {
223 offset += cpuid * VGIC_NR_PRIVATE_IRQS;
226 offset -= VGIC_NR_PRIVATE_IRQS;
229 return reg + (offset / sizeof(u32));
232 #define VGIC_CFG_LEVEL 0
233 #define VGIC_CFG_EDGE 1
235 static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq)
237 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
240 irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq);
241 return irq_val == VGIC_CFG_EDGE;
244 static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq)
246 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
248 return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq);
251 static int vgic_irq_is_queued(struct kvm_vcpu *vcpu, int irq)
253 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
255 return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq);
258 static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq)
260 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
262 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 1);
265 static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq)
267 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
269 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0);
272 static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq)
274 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
276 return vgic_bitmap_get_irq_val(&dist->irq_level, vcpu->vcpu_id, irq);
279 static void vgic_dist_irq_set_level(struct kvm_vcpu *vcpu, int irq)
281 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
283 vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 1);
286 static void vgic_dist_irq_clear_level(struct kvm_vcpu *vcpu, int irq)
288 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
290 vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 0);
293 static int vgic_dist_irq_soft_pend(struct kvm_vcpu *vcpu, int irq)
295 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
297 return vgic_bitmap_get_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq);
300 static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu *vcpu, int irq)
302 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
304 vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0);
307 static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
309 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
311 return vgic_bitmap_get_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq);
314 static void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq)
316 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
318 vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 1);
321 static void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq)
323 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
325 vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 0);
328 static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
330 if (irq < VGIC_NR_PRIVATE_IRQS)
331 set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
333 set_bit(irq - VGIC_NR_PRIVATE_IRQS,
334 vcpu->arch.vgic_cpu.pending_shared);
337 static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
339 if (irq < VGIC_NR_PRIVATE_IRQS)
340 clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
342 clear_bit(irq - VGIC_NR_PRIVATE_IRQS,
343 vcpu->arch.vgic_cpu.pending_shared);
346 static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq)
348 return vgic_irq_is_edge(vcpu, irq) || !vgic_irq_is_queued(vcpu, irq);
351 static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
353 return le32_to_cpu(*((u32 *)mmio->data)) & mask;
356 static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
358 *((u32 *)mmio->data) = cpu_to_le32(value) & mask;
362 * vgic_reg_access - access vgic register
363 * @mmio: pointer to the data describing the mmio access
364 * @reg: pointer to the virtual backing of vgic distributor data
365 * @offset: least significant 2 bits used for word offset
366 * @mode: ACCESS_ mode (see defines above)
368 * Helper to make vgic register access easier using one of the access
369 * modes defined for vgic register access
370 * (read,raz,write-ignored,setbit,clearbit,write)
372 static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
373 phys_addr_t offset, int mode)
375 int word_offset = (offset & 3) * 8;
376 u32 mask = (1UL << (mmio->len * 8)) - 1;
380 * Any alignment fault should have been delivered to the guest
381 * directly (ARM ARM B3.12.7 "Prioritization of aborts").
387 BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED));
391 if (mmio->is_write) {
392 u32 data = mmio_data_read(mmio, mask) << word_offset;
393 switch (ACCESS_WRITE_MASK(mode)) {
394 case ACCESS_WRITE_IGNORED:
397 case ACCESS_WRITE_SETBIT:
401 case ACCESS_WRITE_CLEARBIT:
405 case ACCESS_WRITE_VALUE:
406 regval = (regval & ~(mask << word_offset)) | data;
411 switch (ACCESS_READ_MASK(mode)) {
412 case ACCESS_READ_RAZ:
416 case ACCESS_READ_VALUE:
417 mmio_data_write(mmio, mask, regval >> word_offset);
422 static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
423 struct kvm_exit_mmio *mmio, phys_addr_t offset)
426 u32 word_offset = offset & 3;
428 switch (offset & ~3) {
429 case 0: /* GICD_CTLR */
430 reg = vcpu->kvm->arch.vgic.enabled;
431 vgic_reg_access(mmio, ®, word_offset,
432 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
433 if (mmio->is_write) {
434 vcpu->kvm->arch.vgic.enabled = reg & 1;
435 vgic_update_state(vcpu->kvm);
440 case 4: /* GICD_TYPER */
441 reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
442 reg |= (VGIC_NR_IRQS >> 5) - 1;
443 vgic_reg_access(mmio, ®, word_offset,
444 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
447 case 8: /* GICD_IIDR */
448 reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
449 vgic_reg_access(mmio, ®, word_offset,
450 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
457 static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
458 struct kvm_exit_mmio *mmio, phys_addr_t offset)
460 vgic_reg_access(mmio, NULL, offset,
461 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
465 static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
466 struct kvm_exit_mmio *mmio,
469 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
470 vcpu->vcpu_id, offset);
471 vgic_reg_access(mmio, reg, offset,
472 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
473 if (mmio->is_write) {
474 vgic_update_state(vcpu->kvm);
481 static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
482 struct kvm_exit_mmio *mmio,
485 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
486 vcpu->vcpu_id, offset);
487 vgic_reg_access(mmio, reg, offset,
488 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
489 if (mmio->is_write) {
490 if (offset < 4) /* Force SGI enabled */
492 vgic_retire_disabled_irqs(vcpu);
493 vgic_update_state(vcpu->kvm);
500 static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
501 struct kvm_exit_mmio *mmio,
506 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
508 reg = vgic_bitmap_get_reg(&dist->irq_cfg, vcpu->vcpu_id, offset);
509 level_mask = (~(*reg));
511 /* Mark both level and edge triggered irqs as pending */
512 reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset);
514 vgic_reg_access(mmio, reg, offset,
515 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
517 if (mmio->is_write) {
518 /* Set the soft-pending flag only for level-triggered irqs */
519 reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
520 vcpu->vcpu_id, offset);
521 vgic_reg_access(mmio, reg, offset,
522 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
525 /* Ignore writes to SGIs */
528 *reg |= orig & 0xffff;
531 vgic_update_state(vcpu->kvm);
538 static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
539 struct kvm_exit_mmio *mmio,
544 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
546 reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset);
548 vgic_reg_access(mmio, reg, offset,
549 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
550 if (mmio->is_write) {
551 /* Re-set level triggered level-active interrupts */
552 level_active = vgic_bitmap_get_reg(&dist->irq_level,
553 vcpu->vcpu_id, offset);
554 reg = vgic_bitmap_get_reg(&dist->irq_pending,
555 vcpu->vcpu_id, offset);
556 *reg |= *level_active;
558 /* Ignore writes to SGIs */
561 *reg |= orig & 0xffff;
564 /* Clear soft-pending flags */
565 reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
566 vcpu->vcpu_id, offset);
567 vgic_reg_access(mmio, reg, offset,
568 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
570 vgic_update_state(vcpu->kvm);
577 static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
578 struct kvm_exit_mmio *mmio,
581 u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
582 vcpu->vcpu_id, offset);
583 vgic_reg_access(mmio, reg, offset,
584 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
588 #define GICD_ITARGETSR_SIZE 32
589 #define GICD_CPUTARGETS_BITS 8
590 #define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
591 static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
593 struct vgic_dist *dist = &kvm->arch.vgic;
597 irq -= VGIC_NR_PRIVATE_IRQS;
599 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
600 val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
605 static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
607 struct vgic_dist *dist = &kvm->arch.vgic;
608 struct kvm_vcpu *vcpu;
613 irq -= VGIC_NR_PRIVATE_IRQS;
616 * Pick the LSB in each byte. This ensures we target exactly
617 * one vcpu per IRQ. If the byte is null, assume we target
620 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
621 int shift = i * GICD_CPUTARGETS_BITS;
622 target = ffs((val >> shift) & 0xffU);
623 target = target ? (target - 1) : 0;
624 dist->irq_spi_cpu[irq + i] = target;
625 kvm_for_each_vcpu(c, vcpu, kvm) {
626 bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
628 set_bit(irq + i, bmap);
630 clear_bit(irq + i, bmap);
635 static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
636 struct kvm_exit_mmio *mmio,
641 /* We treat the banked interrupts targets as read-only */
643 u32 roreg = 1 << vcpu->vcpu_id;
645 roreg |= roreg << 16;
647 vgic_reg_access(mmio, &roreg, offset,
648 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
652 reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
653 vgic_reg_access(mmio, ®, offset,
654 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
655 if (mmio->is_write) {
656 vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
657 vgic_update_state(vcpu->kvm);
664 static u32 vgic_cfg_expand(u16 val)
670 * Turn a 16bit value like abcd...mnop into a 32bit word
671 * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
673 for (i = 0; i < 16; i++)
674 res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1);
679 static u16 vgic_cfg_compress(u32 val)
685 * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
686 * abcd...mnop which is what we really care about.
688 for (i = 0; i < 16; i++)
689 res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i;
695 * The distributor uses 2 bits per IRQ for the CFG register, but the
696 * LSB is always 0. As such, we only keep the upper bit, and use the
697 * two above functions to compress/expand the bits
699 static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
700 struct kvm_exit_mmio *mmio, phys_addr_t offset)
705 reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
706 vcpu->vcpu_id, offset >> 1);
713 val = vgic_cfg_expand(val);
714 vgic_reg_access(mmio, &val, offset,
715 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
716 if (mmio->is_write) {
718 *reg = ~0U; /* Force PPIs/SGIs to 1 */
722 val = vgic_cfg_compress(val);
727 *reg &= 0xffff << 16;
735 static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
736 struct kvm_exit_mmio *mmio, phys_addr_t offset)
739 vgic_reg_access(mmio, ®, offset,
740 ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
741 if (mmio->is_write) {
742 vgic_dispatch_sgi(vcpu, reg);
743 vgic_update_state(vcpu->kvm);
751 * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
752 * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
754 * Move any pending IRQs that have already been assigned to LRs back to the
755 * emulated distributor state so that the complete emulated state can be read
756 * from the main emulation structures without investigating the LRs.
758 * Note that IRQs in the active state in the LRs get their pending state moved
759 * to the distributor but the active state stays in the LRs, because we don't
760 * track the active state on the distributor side.
762 static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
764 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
765 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
766 int vcpu_id = vcpu->vcpu_id;
769 for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
770 struct vgic_lr lr = vgic_get_lr(vcpu, i);
773 * There are three options for the state bits:
777 * 11: pending and active
779 * If the LR holds only an active interrupt (not pending) then
780 * just leave it alone.
782 if ((lr.state & LR_STATE_MASK) == LR_STATE_ACTIVE)
786 * Reestablish the pending state on the distributor and the
787 * CPU interface. It may have already been pending, but that
788 * is fine, then we are only setting a few bits that were
791 vgic_dist_irq_set_pending(vcpu, lr.irq);
792 if (lr.irq < VGIC_NR_SGIS)
793 *vgic_get_sgi_sources(dist, vcpu_id, lr.irq) |= 1 << lr.source;
794 lr.state &= ~LR_STATE_PENDING;
795 vgic_set_lr(vcpu, i, lr);
798 * If there's no state left on the LR (it could still be
799 * active), then the LR does not hold any useful info and can
800 * be marked as free for other use.
802 if (!(lr.state & LR_STATE_MASK)) {
803 vgic_retire_lr(i, lr.irq, vcpu);
804 vgic_irq_clear_queued(vcpu, lr.irq);
807 /* Finally update the VGIC state. */
808 vgic_update_state(vcpu->kvm);
812 /* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
813 static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
814 struct kvm_exit_mmio *mmio,
817 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
819 int min_sgi = (offset & ~0x3) * 4;
820 int max_sgi = min_sgi + 3;
821 int vcpu_id = vcpu->vcpu_id;
824 /* Copy source SGIs from distributor side */
825 for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
826 int shift = 8 * (sgi - min_sgi);
827 reg |= ((u32)*vgic_get_sgi_sources(dist, vcpu_id, sgi)) << shift;
830 mmio_data_write(mmio, ~0, reg);
834 static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
835 struct kvm_exit_mmio *mmio,
836 phys_addr_t offset, bool set)
838 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
840 int min_sgi = (offset & ~0x3) * 4;
841 int max_sgi = min_sgi + 3;
842 int vcpu_id = vcpu->vcpu_id;
844 bool updated = false;
846 reg = mmio_data_read(mmio, ~0);
848 /* Clear pending SGIs on the distributor */
849 for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
850 u8 mask = reg >> (8 * (sgi - min_sgi));
851 u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi);
853 if ((*src & mask) != mask)
864 vgic_update_state(vcpu->kvm);
869 static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
870 struct kvm_exit_mmio *mmio,
874 return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
876 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
879 static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
880 struct kvm_exit_mmio *mmio,
884 return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
886 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
890 * I would have liked to use the kvm_bus_io_*() API instead, but it
891 * cannot cope with banked registers (only the VM pointer is passed
892 * around, and we need the vcpu). One of these days, someone please
898 bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
902 static const struct mmio_range vgic_dist_ranges[] = {
904 .base = GIC_DIST_CTRL,
906 .handle_mmio = handle_mmio_misc,
909 .base = GIC_DIST_IGROUP,
910 .len = VGIC_NR_IRQS / 8,
911 .handle_mmio = handle_mmio_raz_wi,
914 .base = GIC_DIST_ENABLE_SET,
915 .len = VGIC_NR_IRQS / 8,
916 .handle_mmio = handle_mmio_set_enable_reg,
919 .base = GIC_DIST_ENABLE_CLEAR,
920 .len = VGIC_NR_IRQS / 8,
921 .handle_mmio = handle_mmio_clear_enable_reg,
924 .base = GIC_DIST_PENDING_SET,
925 .len = VGIC_NR_IRQS / 8,
926 .handle_mmio = handle_mmio_set_pending_reg,
929 .base = GIC_DIST_PENDING_CLEAR,
930 .len = VGIC_NR_IRQS / 8,
931 .handle_mmio = handle_mmio_clear_pending_reg,
934 .base = GIC_DIST_ACTIVE_SET,
935 .len = VGIC_NR_IRQS / 8,
936 .handle_mmio = handle_mmio_raz_wi,
939 .base = GIC_DIST_ACTIVE_CLEAR,
940 .len = VGIC_NR_IRQS / 8,
941 .handle_mmio = handle_mmio_raz_wi,
944 .base = GIC_DIST_PRI,
946 .handle_mmio = handle_mmio_priority_reg,
949 .base = GIC_DIST_TARGET,
951 .handle_mmio = handle_mmio_target_reg,
954 .base = GIC_DIST_CONFIG,
955 .len = VGIC_NR_IRQS / 4,
956 .handle_mmio = handle_mmio_cfg_reg,
959 .base = GIC_DIST_SOFTINT,
961 .handle_mmio = handle_mmio_sgi_reg,
964 .base = GIC_DIST_SGI_PENDING_CLEAR,
966 .handle_mmio = handle_mmio_sgi_clear,
969 .base = GIC_DIST_SGI_PENDING_SET,
971 .handle_mmio = handle_mmio_sgi_set,
977 struct mmio_range *find_matching_range(const struct mmio_range *ranges,
978 struct kvm_exit_mmio *mmio,
981 const struct mmio_range *r = ranges;
984 if (offset >= r->base &&
985 (offset + mmio->len) <= (r->base + r->len))
994 * vgic_handle_mmio - handle an in-kernel MMIO access
995 * @vcpu: pointer to the vcpu performing the access
996 * @run: pointer to the kvm_run structure
997 * @mmio: pointer to the data describing the access
999 * returns true if the MMIO access has been performed in kernel space,
1000 * and false if it needs to be emulated in user space.
1002 bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
1003 struct kvm_exit_mmio *mmio)
1005 const struct mmio_range *range;
1006 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1007 unsigned long base = dist->vgic_dist_base;
1009 unsigned long offset;
1011 if (!irqchip_in_kernel(vcpu->kvm) ||
1012 mmio->phys_addr < base ||
1013 (mmio->phys_addr + mmio->len) > (base + KVM_VGIC_V2_DIST_SIZE))
1016 /* We don't support ldrd / strd or ldm / stm to the emulated vgic */
1017 if (mmio->len > 4) {
1018 kvm_inject_dabt(vcpu, mmio->phys_addr);
1022 offset = mmio->phys_addr - base;
1023 range = find_matching_range(vgic_dist_ranges, mmio, offset);
1024 if (unlikely(!range || !range->handle_mmio)) {
1025 pr_warn("Unhandled access %d %08llx %d\n",
1026 mmio->is_write, mmio->phys_addr, mmio->len);
1030 spin_lock(&vcpu->kvm->arch.vgic.lock);
1031 offset = mmio->phys_addr - range->base - base;
1032 updated_state = range->handle_mmio(vcpu, mmio, offset);
1033 spin_unlock(&vcpu->kvm->arch.vgic.lock);
1034 kvm_prepare_mmio(run, mmio);
1035 kvm_handle_mmio_return(vcpu, run);
1038 vgic_kick_vcpus(vcpu->kvm);
1043 static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
1045 return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi;
1048 static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
1050 struct kvm *kvm = vcpu->kvm;
1051 struct vgic_dist *dist = &kvm->arch.vgic;
1052 int nrcpus = atomic_read(&kvm->online_vcpus);
1054 int sgi, mode, c, vcpu_id;
1056 vcpu_id = vcpu->vcpu_id;
1059 target_cpus = (reg >> 16) & 0xff;
1060 mode = (reg >> 24) & 3;
1069 target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
1073 target_cpus = 1 << vcpu_id;
1077 kvm_for_each_vcpu(c, vcpu, kvm) {
1078 if (target_cpus & 1) {
1079 /* Flag the SGI as pending */
1080 vgic_dist_irq_set_pending(vcpu, sgi);
1081 *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id;
1082 kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
1089 static int vgic_nr_shared_irqs(struct vgic_dist *dist)
1091 return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
1094 static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
1096 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1097 unsigned long *pending, *enabled, *pend_percpu, *pend_shared;
1098 unsigned long pending_private, pending_shared;
1099 int nr_shared = vgic_nr_shared_irqs(dist);
1102 vcpu_id = vcpu->vcpu_id;
1103 pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
1104 pend_shared = vcpu->arch.vgic_cpu.pending_shared;
1106 pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id);
1107 enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
1108 bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
1110 pending = vgic_bitmap_get_shared_map(&dist->irq_pending);
1111 enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
1112 bitmap_and(pend_shared, pending, enabled, nr_shared);
1113 bitmap_and(pend_shared, pend_shared,
1114 vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
1117 pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS);
1118 pending_shared = find_first_bit(pend_shared, nr_shared);
1119 return (pending_private < VGIC_NR_PRIVATE_IRQS ||
1120 pending_shared < vgic_nr_shared_irqs(dist));
1124 * Update the interrupt state and determine which CPUs have pending
1125 * interrupts. Must be called with distributor lock held.
1127 static void vgic_update_state(struct kvm *kvm)
1129 struct vgic_dist *dist = &kvm->arch.vgic;
1130 struct kvm_vcpu *vcpu;
1133 if (!dist->enabled) {
1134 set_bit(0, dist->irq_pending_on_cpu);
1138 kvm_for_each_vcpu(c, vcpu, kvm) {
1139 if (compute_pending_for_cpu(vcpu)) {
1140 pr_debug("CPU%d has pending interrupts\n", c);
1141 set_bit(c, dist->irq_pending_on_cpu);
1146 static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
1148 return vgic_ops->get_lr(vcpu, lr);
1151 static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
1154 vgic_ops->set_lr(vcpu, lr, vlr);
1157 static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
1160 vgic_ops->sync_lr_elrsr(vcpu, lr, vlr);
1163 static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
1165 return vgic_ops->get_elrsr(vcpu);
1168 static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
1170 return vgic_ops->get_eisr(vcpu);
1173 static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
1175 return vgic_ops->get_interrupt_status(vcpu);
1178 static inline void vgic_enable_underflow(struct kvm_vcpu *vcpu)
1180 vgic_ops->enable_underflow(vcpu);
1183 static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu)
1185 vgic_ops->disable_underflow(vcpu);
1188 static inline void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
1190 vgic_ops->get_vmcr(vcpu, vmcr);
1193 static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
1195 vgic_ops->set_vmcr(vcpu, vmcr);
1198 static inline void vgic_enable(struct kvm_vcpu *vcpu)
1200 vgic_ops->enable(vcpu);
1203 static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
1205 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1206 struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
1209 vgic_set_lr(vcpu, lr_nr, vlr);
1210 clear_bit(lr_nr, vgic_cpu->lr_used);
1211 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
1215 * An interrupt may have been disabled after being made pending on the
1216 * CPU interface (the classic case is a timer running while we're
1217 * rebooting the guest - the interrupt would kick as soon as the CPU
1218 * interface gets enabled, with deadly consequences).
1220 * The solution is to examine already active LRs, and check the
1221 * interrupt is still enabled. If not, just retire it.
1223 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
1225 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1228 for_each_set_bit(lr, vgic_cpu->lr_used, vgic->nr_lr) {
1229 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1231 if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
1232 vgic_retire_lr(lr, vlr.irq, vcpu);
1233 if (vgic_irq_is_queued(vcpu, vlr.irq))
1234 vgic_irq_clear_queued(vcpu, vlr.irq);
1240 * Queue an interrupt to a CPU virtual interface. Return true on success,
1241 * or false if it wasn't possible to queue it.
1243 static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
1245 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1249 /* Sanitize the input... */
1250 BUG_ON(sgi_source_id & ~7);
1251 BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS);
1252 BUG_ON(irq >= VGIC_NR_IRQS);
1254 kvm_debug("Queue IRQ%d\n", irq);
1256 lr = vgic_cpu->vgic_irq_lr_map[irq];
1258 /* Do we have an active interrupt for the same CPUID? */
1259 if (lr != LR_EMPTY) {
1260 vlr = vgic_get_lr(vcpu, lr);
1261 if (vlr.source == sgi_source_id) {
1262 kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
1263 BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
1264 vlr.state |= LR_STATE_PENDING;
1265 vgic_set_lr(vcpu, lr, vlr);
1270 /* Try to use another LR for this interrupt */
1271 lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
1273 if (lr >= vgic->nr_lr)
1276 kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
1277 vgic_cpu->vgic_irq_lr_map[irq] = lr;
1278 set_bit(lr, vgic_cpu->lr_used);
1281 vlr.source = sgi_source_id;
1282 vlr.state = LR_STATE_PENDING;
1283 if (!vgic_irq_is_edge(vcpu, irq))
1284 vlr.state |= LR_EOI_INT;
1286 vgic_set_lr(vcpu, lr, vlr);
1291 static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq)
1293 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1294 unsigned long sources;
1295 int vcpu_id = vcpu->vcpu_id;
1298 sources = *vgic_get_sgi_sources(dist, vcpu_id, irq);
1300 for_each_set_bit(c, &sources, VGIC_MAX_CPUS) {
1301 if (vgic_queue_irq(vcpu, c, irq))
1302 clear_bit(c, &sources);
1305 *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources;
1308 * If the sources bitmap has been cleared it means that we
1309 * could queue all the SGIs onto link registers (see the
1310 * clear_bit above), and therefore we are done with them in
1311 * our emulated gic and can get rid of them.
1314 vgic_dist_irq_clear_pending(vcpu, irq);
1315 vgic_cpu_irq_clear(vcpu, irq);
1322 static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
1324 if (!vgic_can_sample_irq(vcpu, irq))
1325 return true; /* level interrupt, already queued */
1327 if (vgic_queue_irq(vcpu, 0, irq)) {
1328 if (vgic_irq_is_edge(vcpu, irq)) {
1329 vgic_dist_irq_clear_pending(vcpu, irq);
1330 vgic_cpu_irq_clear(vcpu, irq);
1332 vgic_irq_set_queued(vcpu, irq);
1342 * Fill the list registers with pending interrupts before running the
1345 static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1347 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1348 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1352 vcpu_id = vcpu->vcpu_id;
1355 * We may not have any pending interrupt, or the interrupts
1356 * may have been serviced from another vcpu. In all cases,
1359 if (!kvm_vgic_vcpu_pending_irq(vcpu)) {
1360 pr_debug("CPU%d has no pending interrupt\n", vcpu_id);
1365 for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) {
1366 if (!vgic_queue_sgi(vcpu, i))
1371 for_each_set_bit_from(i, vgic_cpu->pending_percpu, VGIC_NR_PRIVATE_IRQS) {
1372 if (!vgic_queue_hwirq(vcpu, i))
1377 for_each_set_bit(i, vgic_cpu->pending_shared, vgic_nr_shared_irqs(dist)) {
1378 if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
1384 vgic_enable_underflow(vcpu);
1386 vgic_disable_underflow(vcpu);
1388 * We're about to run this VCPU, and we've consumed
1389 * everything the distributor had in store for
1390 * us. Claim we don't have anything pending. We'll
1391 * adjust that if needed while exiting.
1393 clear_bit(vcpu_id, dist->irq_pending_on_cpu);
1397 static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1399 u32 status = vgic_get_interrupt_status(vcpu);
1400 bool level_pending = false;
1402 kvm_debug("STATUS = %08x\n", status);
1404 if (status & INT_STATUS_EOI) {
1406 * Some level interrupts have been EOIed. Clear their
1409 u64 eisr = vgic_get_eisr(vcpu);
1410 unsigned long *eisr_ptr = (unsigned long *)&eisr;
1413 for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
1414 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1415 WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
1417 vgic_irq_clear_queued(vcpu, vlr.irq);
1418 WARN_ON(vlr.state & LR_STATE_MASK);
1420 vgic_set_lr(vcpu, lr, vlr);
1423 * If the IRQ was EOIed it was also ACKed and we we
1424 * therefore assume we can clear the soft pending
1425 * state (should it had been set) for this interrupt.
1427 * Note: if the IRQ soft pending state was set after
1428 * the IRQ was acked, it actually shouldn't be
1429 * cleared, but we have no way of knowing that unless
1430 * we start trapping ACKs when the soft-pending state
1433 vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
1435 /* Any additional pending interrupt? */
1436 if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
1437 vgic_cpu_irq_set(vcpu, vlr.irq);
1438 level_pending = true;
1440 vgic_dist_irq_clear_pending(vcpu, vlr.irq);
1441 vgic_cpu_irq_clear(vcpu, vlr.irq);
1445 * Despite being EOIed, the LR may not have
1446 * been marked as empty.
1448 vgic_sync_lr_elrsr(vcpu, lr, vlr);
1452 if (status & INT_STATUS_UNDERFLOW)
1453 vgic_disable_underflow(vcpu);
1455 return level_pending;
1459 * Sync back the VGIC state after a guest run. The distributor lock is
1460 * needed so we don't get preempted in the middle of the state processing.
1462 static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1464 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1465 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1467 unsigned long *elrsr_ptr;
1471 level_pending = vgic_process_maintenance(vcpu);
1472 elrsr = vgic_get_elrsr(vcpu);
1473 elrsr_ptr = (unsigned long *)&elrsr;
1475 /* Clear mappings for empty LRs */
1476 for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) {
1479 if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
1482 vlr = vgic_get_lr(vcpu, lr);
1484 BUG_ON(vlr.irq >= VGIC_NR_IRQS);
1485 vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY;
1488 /* Check if we still have something up our sleeve... */
1489 pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
1490 if (level_pending || pending < vgic->nr_lr)
1491 set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
1494 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1496 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1498 if (!irqchip_in_kernel(vcpu->kvm))
1501 spin_lock(&dist->lock);
1502 __kvm_vgic_flush_hwstate(vcpu);
1503 spin_unlock(&dist->lock);
1506 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1508 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1510 if (!irqchip_in_kernel(vcpu->kvm))
1513 spin_lock(&dist->lock);
1514 __kvm_vgic_sync_hwstate(vcpu);
1515 spin_unlock(&dist->lock);
1518 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
1520 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1522 if (!irqchip_in_kernel(vcpu->kvm))
1525 return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
1528 static void vgic_kick_vcpus(struct kvm *kvm)
1530 struct kvm_vcpu *vcpu;
1534 * We've injected an interrupt, time to find out who deserves
1537 kvm_for_each_vcpu(c, vcpu, kvm) {
1538 if (kvm_vgic_vcpu_pending_irq(vcpu))
1539 kvm_vcpu_kick(vcpu);
1543 static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
1545 int edge_triggered = vgic_irq_is_edge(vcpu, irq);
1548 * Only inject an interrupt if:
1549 * - edge triggered and we have a rising edge
1550 * - level triggered and we change level
1552 if (edge_triggered) {
1553 int state = vgic_dist_irq_is_pending(vcpu, irq);
1554 return level > state;
1556 int state = vgic_dist_irq_get_level(vcpu, irq);
1557 return level != state;
1561 static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid,
1562 unsigned int irq_num, bool level)
1564 struct vgic_dist *dist = &kvm->arch.vgic;
1565 struct kvm_vcpu *vcpu;
1566 int edge_triggered, level_triggered;
1570 spin_lock(&dist->lock);
1572 vcpu = kvm_get_vcpu(kvm, cpuid);
1573 edge_triggered = vgic_irq_is_edge(vcpu, irq_num);
1574 level_triggered = !edge_triggered;
1576 if (!vgic_validate_injection(vcpu, irq_num, level)) {
1581 if (irq_num >= VGIC_NR_PRIVATE_IRQS) {
1582 cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS];
1583 vcpu = kvm_get_vcpu(kvm, cpuid);
1586 kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid);
1589 if (level_triggered)
1590 vgic_dist_irq_set_level(vcpu, irq_num);
1591 vgic_dist_irq_set_pending(vcpu, irq_num);
1593 if (level_triggered) {
1594 vgic_dist_irq_clear_level(vcpu, irq_num);
1595 if (!vgic_dist_irq_soft_pend(vcpu, irq_num))
1596 vgic_dist_irq_clear_pending(vcpu, irq_num);
1598 vgic_dist_irq_clear_pending(vcpu, irq_num);
1602 enabled = vgic_irq_is_enabled(vcpu, irq_num);
1609 if (!vgic_can_sample_irq(vcpu, irq_num)) {
1611 * Level interrupt in progress, will be picked up
1619 vgic_cpu_irq_set(vcpu, irq_num);
1620 set_bit(cpuid, dist->irq_pending_on_cpu);
1624 spin_unlock(&dist->lock);
1630 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
1631 * @kvm: The VM structure pointer
1632 * @cpuid: The CPU for PPIs
1633 * @irq_num: The IRQ number that is assigned to the device
1634 * @level: Edge-triggered: true: to trigger the interrupt
1635 * false: to ignore the call
1636 * Level-sensitive true: activates an interrupt
1637 * false: deactivates an interrupt
1639 * The GIC is not concerned with devices being active-LOW or active-HIGH for
1640 * level-sensitive interrupts. You can think of the level parameter as 1
1641 * being HIGH and 0 being LOW and all devices being active-HIGH.
1643 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
1646 if (likely(vgic_initialized(kvm)) &&
1647 vgic_update_irq_pending(kvm, cpuid, irq_num, level))
1648 vgic_kick_vcpus(kvm);
1653 static irqreturn_t vgic_maintenance_handler(int irq, void *data)
1656 * We cannot rely on the vgic maintenance interrupt to be
1657 * delivered synchronously. This means we can only use it to
1658 * exit the VM, and we perform the handling of EOIed
1659 * interrupts on the exit path (see vgic_process_maintenance).
1664 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
1666 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1668 kfree(vgic_cpu->pending_shared);
1669 kfree(vgic_cpu->vgic_irq_lr_map);
1670 vgic_cpu->pending_shared = NULL;
1671 vgic_cpu->vgic_irq_lr_map = NULL;
1674 static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
1676 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1678 int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
1679 vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
1680 vgic_cpu->vgic_irq_lr_map = kzalloc(nr_irqs, GFP_KERNEL);
1682 if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) {
1683 kvm_vgic_vcpu_destroy(vcpu);
1691 * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state
1692 * @vcpu: pointer to the vcpu struct
1694 * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to
1695 * this vcpu and enable the VGIC for this VCPU
1697 int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
1699 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1700 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1703 if (vcpu->vcpu_id >= VGIC_MAX_CPUS)
1706 for (i = 0; i < VGIC_NR_IRQS; i++) {
1707 if (i < VGIC_NR_PPIS)
1708 vgic_bitmap_set_irq_val(&dist->irq_enabled,
1709 vcpu->vcpu_id, i, 1);
1710 if (i < VGIC_NR_PRIVATE_IRQS)
1711 vgic_bitmap_set_irq_val(&dist->irq_cfg,
1712 vcpu->vcpu_id, i, VGIC_CFG_EDGE);
1714 vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
1718 * Store the number of LRs per vcpu, so we don't have to go
1719 * all the way to the distributor structure to find out. Only
1720 * assembly code should use this one.
1722 vgic_cpu->nr_lr = vgic->nr_lr;
1729 void kvm_vgic_destroy(struct kvm *kvm)
1731 struct vgic_dist *dist = &kvm->arch.vgic;
1732 struct kvm_vcpu *vcpu;
1735 kvm_for_each_vcpu(i, vcpu, kvm)
1736 kvm_vgic_vcpu_destroy(vcpu);
1738 vgic_free_bitmap(&dist->irq_enabled);
1739 vgic_free_bitmap(&dist->irq_level);
1740 vgic_free_bitmap(&dist->irq_pending);
1741 vgic_free_bitmap(&dist->irq_soft_pend);
1742 vgic_free_bitmap(&dist->irq_queued);
1743 vgic_free_bitmap(&dist->irq_cfg);
1744 vgic_free_bytemap(&dist->irq_priority);
1745 if (dist->irq_spi_target) {
1746 for (i = 0; i < dist->nr_cpus; i++)
1747 vgic_free_bitmap(&dist->irq_spi_target[i]);
1749 kfree(dist->irq_sgi_sources);
1750 kfree(dist->irq_spi_cpu);
1751 kfree(dist->irq_spi_target);
1752 kfree(dist->irq_pending_on_cpu);
1753 dist->irq_sgi_sources = NULL;
1754 dist->irq_spi_cpu = NULL;
1755 dist->irq_spi_target = NULL;
1756 dist->irq_pending_on_cpu = NULL;
1760 * Allocate and initialize the various data structures. Must be called
1761 * with kvm->lock held!
1763 static int vgic_init_maps(struct kvm *kvm)
1765 struct vgic_dist *dist = &kvm->arch.vgic;
1766 struct kvm_vcpu *vcpu;
1767 int nr_cpus, nr_irqs;
1770 nr_cpus = dist->nr_cpus = VGIC_MAX_CPUS;
1771 nr_irqs = dist->nr_irqs = VGIC_NR_IRQS;
1773 ret = vgic_init_bitmap(&dist->irq_enabled, nr_cpus, nr_irqs);
1774 ret |= vgic_init_bitmap(&dist->irq_level, nr_cpus, nr_irqs);
1775 ret |= vgic_init_bitmap(&dist->irq_pending, nr_cpus, nr_irqs);
1776 ret |= vgic_init_bitmap(&dist->irq_soft_pend, nr_cpus, nr_irqs);
1777 ret |= vgic_init_bitmap(&dist->irq_queued, nr_cpus, nr_irqs);
1778 ret |= vgic_init_bitmap(&dist->irq_cfg, nr_cpus, nr_irqs);
1779 ret |= vgic_init_bytemap(&dist->irq_priority, nr_cpus, nr_irqs);
1784 dist->irq_sgi_sources = kzalloc(nr_cpus * VGIC_NR_SGIS, GFP_KERNEL);
1785 dist->irq_spi_cpu = kzalloc(nr_irqs - VGIC_NR_PRIVATE_IRQS, GFP_KERNEL);
1786 dist->irq_spi_target = kzalloc(sizeof(*dist->irq_spi_target) * nr_cpus,
1788 dist->irq_pending_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
1790 if (!dist->irq_sgi_sources ||
1791 !dist->irq_spi_cpu ||
1792 !dist->irq_spi_target ||
1793 !dist->irq_pending_on_cpu) {
1798 for (i = 0; i < nr_cpus; i++)
1799 ret |= vgic_init_bitmap(&dist->irq_spi_target[i],
1805 kvm_for_each_vcpu(i, vcpu, kvm) {
1806 ret = vgic_vcpu_init_maps(vcpu, nr_irqs);
1808 kvm_err("VGIC: Failed to allocate vcpu memory\n");
1815 kvm_vgic_destroy(kvm);
1821 * kvm_vgic_init - Initialize global VGIC state before running any VCPUs
1822 * @kvm: pointer to the kvm struct
1824 * Map the virtual CPU interface into the VM before running any VCPUs. We
1825 * can't do this at creation time, because user space must first set the
1826 * virtual CPU interface address in the guest physical address space. Also
1827 * initialize the ITARGETSRn regs to 0 on the emulated distributor.
1829 int kvm_vgic_init(struct kvm *kvm)
1833 if (!irqchip_in_kernel(kvm))
1836 mutex_lock(&kvm->lock);
1838 if (vgic_initialized(kvm))
1841 if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
1842 IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) {
1843 kvm_err("Need to set vgic cpu and dist addresses first\n");
1848 ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
1849 vgic->vcpu_base, KVM_VGIC_V2_CPU_SIZE);
1851 kvm_err("Unable to remap VGIC CPU to VCPU\n");
1855 for (i = VGIC_NR_PRIVATE_IRQS; i < VGIC_NR_IRQS; i += 4)
1856 vgic_set_target_reg(kvm, 0, i);
1858 kvm->arch.vgic.ready = true;
1860 mutex_unlock(&kvm->lock);
1864 int kvm_vgic_create(struct kvm *kvm)
1866 int i, vcpu_lock_idx = -1, ret = 0;
1867 struct kvm_vcpu *vcpu;
1869 mutex_lock(&kvm->lock);
1871 if (kvm->arch.vgic.vctrl_base) {
1877 * Any time a vcpu is run, vcpu_load is called which tries to grab the
1878 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
1879 * that no other VCPUs are run while we create the vgic.
1881 kvm_for_each_vcpu(i, vcpu, kvm) {
1882 if (!mutex_trylock(&vcpu->mutex))
1887 kvm_for_each_vcpu(i, vcpu, kvm) {
1888 if (vcpu->arch.has_run_once) {
1894 spin_lock_init(&kvm->arch.vgic.lock);
1895 kvm->arch.vgic.in_kernel = true;
1896 kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
1897 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
1898 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
1900 ret = vgic_init_maps(kvm);
1902 kvm_err("Unable to allocate maps\n");
1905 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
1906 vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
1907 mutex_unlock(&vcpu->mutex);
1911 mutex_unlock(&kvm->lock);
1915 static int vgic_ioaddr_overlap(struct kvm *kvm)
1917 phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
1918 phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
1920 if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu))
1922 if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) ||
1923 (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist))
1928 static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
1929 phys_addr_t addr, phys_addr_t size)
1933 if (addr & ~KVM_PHYS_MASK)
1936 if (addr & (SZ_4K - 1))
1939 if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
1941 if (addr + size < addr)
1945 ret = vgic_ioaddr_overlap(kvm);
1947 *ioaddr = VGIC_ADDR_UNDEF;
1953 * kvm_vgic_addr - set or get vgic VM base addresses
1954 * @kvm: pointer to the vm struct
1955 * @type: the VGIC addr type, one of KVM_VGIC_V2_ADDR_TYPE_XXX
1956 * @addr: pointer to address value
1957 * @write: if true set the address in the VM address space, if false read the
1960 * Set or get the vgic base addresses for the distributor and the virtual CPU
1961 * interface in the VM physical address space. These addresses are properties
1962 * of the emulated core/SoC and therefore user space initially knows this
1965 int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
1968 struct vgic_dist *vgic = &kvm->arch.vgic;
1970 mutex_lock(&kvm->lock);
1972 case KVM_VGIC_V2_ADDR_TYPE_DIST:
1974 r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base,
1975 *addr, KVM_VGIC_V2_DIST_SIZE);
1977 *addr = vgic->vgic_dist_base;
1980 case KVM_VGIC_V2_ADDR_TYPE_CPU:
1982 r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base,
1983 *addr, KVM_VGIC_V2_CPU_SIZE);
1985 *addr = vgic->vgic_cpu_base;
1992 mutex_unlock(&kvm->lock);
1996 static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
1997 struct kvm_exit_mmio *mmio, phys_addr_t offset)
1999 bool updated = false;
2000 struct vgic_vmcr vmcr;
2004 vgic_get_vmcr(vcpu, &vmcr);
2006 switch (offset & ~0x3) {
2008 vmcr_field = &vmcr.ctlr;
2010 case GIC_CPU_PRIMASK:
2011 vmcr_field = &vmcr.pmr;
2013 case GIC_CPU_BINPOINT:
2014 vmcr_field = &vmcr.bpr;
2016 case GIC_CPU_ALIAS_BINPOINT:
2017 vmcr_field = &vmcr.abpr;
2023 if (!mmio->is_write) {
2025 mmio_data_write(mmio, ~0, reg);
2027 reg = mmio_data_read(mmio, ~0);
2028 if (reg != *vmcr_field) {
2030 vgic_set_vmcr(vcpu, &vmcr);
2037 static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
2038 struct kvm_exit_mmio *mmio, phys_addr_t offset)
2040 return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
2043 static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
2044 struct kvm_exit_mmio *mmio,
2053 reg = (PRODUCT_ID_KVM << 20) |
2054 (GICC_ARCH_VERSION_V2 << 16) |
2055 (IMPLEMENTER_ARM << 0);
2056 mmio_data_write(mmio, ~0, reg);
2061 * CPU Interface Register accesses - these are not accessed by the VM, but by
2062 * user space for saving and restoring VGIC state.
2064 static const struct mmio_range vgic_cpu_ranges[] = {
2066 .base = GIC_CPU_CTRL,
2068 .handle_mmio = handle_cpu_mmio_misc,
2071 .base = GIC_CPU_ALIAS_BINPOINT,
2073 .handle_mmio = handle_mmio_abpr,
2076 .base = GIC_CPU_ACTIVEPRIO,
2078 .handle_mmio = handle_mmio_raz_wi,
2081 .base = GIC_CPU_IDENT,
2083 .handle_mmio = handle_cpu_mmio_ident,
2087 static int vgic_attr_regs_access(struct kvm_device *dev,
2088 struct kvm_device_attr *attr,
2089 u32 *reg, bool is_write)
2091 const struct mmio_range *r = NULL, *ranges;
2094 struct kvm_vcpu *vcpu, *tmp_vcpu;
2095 struct vgic_dist *vgic;
2096 struct kvm_exit_mmio mmio;
2098 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
2099 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
2100 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
2102 mutex_lock(&dev->kvm->lock);
2104 if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
2109 vcpu = kvm_get_vcpu(dev->kvm, cpuid);
2110 vgic = &dev->kvm->arch.vgic;
2113 mmio.is_write = is_write;
2115 mmio_data_write(&mmio, ~0, *reg);
2116 switch (attr->group) {
2117 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
2118 mmio.phys_addr = vgic->vgic_dist_base + offset;
2119 ranges = vgic_dist_ranges;
2121 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
2122 mmio.phys_addr = vgic->vgic_cpu_base + offset;
2123 ranges = vgic_cpu_ranges;
2128 r = find_matching_range(ranges, &mmio, offset);
2130 if (unlikely(!r || !r->handle_mmio)) {
2136 spin_lock(&vgic->lock);
2139 * Ensure that no other VCPU is running by checking the vcpu->cpu
2140 * field. If no other VPCUs are running we can safely access the VGIC
2141 * state, because even if another VPU is run after this point, that
2142 * VCPU will not touch the vgic state, because it will block on
2143 * getting the vgic->lock in kvm_vgic_sync_hwstate().
2145 kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
2146 if (unlikely(tmp_vcpu->cpu != -1)) {
2148 goto out_vgic_unlock;
2153 * Move all pending IRQs from the LRs on all VCPUs so the pending
2154 * state can be properly represented in the register state accessible
2157 kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
2158 vgic_unqueue_irqs(tmp_vcpu);
2161 r->handle_mmio(vcpu, &mmio, offset);
2164 *reg = mmio_data_read(&mmio, ~0);
2168 spin_unlock(&vgic->lock);
2170 mutex_unlock(&dev->kvm->lock);
2174 static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2178 switch (attr->group) {
2179 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2180 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2182 unsigned long type = (unsigned long)attr->attr;
2184 if (copy_from_user(&addr, uaddr, sizeof(addr)))
2187 r = kvm_vgic_addr(dev->kvm, type, &addr, true);
2188 return (r == -ENODEV) ? -ENXIO : r;
2191 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
2192 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
2193 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
2196 if (get_user(reg, uaddr))
2199 return vgic_attr_regs_access(dev, attr, ®, true);
2207 static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2211 switch (attr->group) {
2212 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2213 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2215 unsigned long type = (unsigned long)attr->attr;
2217 r = kvm_vgic_addr(dev->kvm, type, &addr, false);
2219 return (r == -ENODEV) ? -ENXIO : r;
2221 if (copy_to_user(uaddr, &addr, sizeof(addr)))
2226 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
2227 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
2228 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
2231 r = vgic_attr_regs_access(dev, attr, ®, false);
2234 r = put_user(reg, uaddr);
2243 static int vgic_has_attr_regs(const struct mmio_range *ranges,
2246 struct kvm_exit_mmio dev_attr_mmio;
2248 dev_attr_mmio.len = 4;
2249 if (find_matching_range(ranges, &dev_attr_mmio, offset))
2255 static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2259 switch (attr->group) {
2260 case KVM_DEV_ARM_VGIC_GRP_ADDR:
2261 switch (attr->attr) {
2262 case KVM_VGIC_V2_ADDR_TYPE_DIST:
2263 case KVM_VGIC_V2_ADDR_TYPE_CPU:
2267 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
2268 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
2269 return vgic_has_attr_regs(vgic_dist_ranges, offset);
2270 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
2271 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
2272 return vgic_has_attr_regs(vgic_cpu_ranges, offset);
2277 static void vgic_destroy(struct kvm_device *dev)
2282 static int vgic_create(struct kvm_device *dev, u32 type)
2284 return kvm_vgic_create(dev->kvm);
2287 static struct kvm_device_ops kvm_arm_vgic_v2_ops = {
2288 .name = "kvm-arm-vgic",
2289 .create = vgic_create,
2290 .destroy = vgic_destroy,
2291 .set_attr = vgic_set_attr,
2292 .get_attr = vgic_get_attr,
2293 .has_attr = vgic_has_attr,
2296 static void vgic_init_maintenance_interrupt(void *info)
2298 enable_percpu_irq(vgic->maint_irq, 0);
2301 static int vgic_cpu_notify(struct notifier_block *self,
2302 unsigned long action, void *cpu)
2306 case CPU_STARTING_FROZEN:
2307 vgic_init_maintenance_interrupt(NULL);
2310 case CPU_DYING_FROZEN:
2311 disable_percpu_irq(vgic->maint_irq);
2318 static struct notifier_block vgic_cpu_nb = {
2319 .notifier_call = vgic_cpu_notify,
2322 static const struct of_device_id vgic_ids[] = {
2323 { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, },
2324 { .compatible = "arm,gic-v3", .data = vgic_v3_probe, },
2328 int kvm_vgic_hyp_init(void)
2330 const struct of_device_id *matched_id;
2331 const int (*vgic_probe)(struct device_node *,const struct vgic_ops **,
2332 const struct vgic_params **);
2333 struct device_node *vgic_node;
2336 vgic_node = of_find_matching_node_and_match(NULL,
2337 vgic_ids, &matched_id);
2339 kvm_err("error: no compatible GIC node found\n");
2343 vgic_probe = matched_id->data;
2344 ret = vgic_probe(vgic_node, &vgic_ops, &vgic);
2348 ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
2349 "vgic", kvm_get_running_vcpus());
2351 kvm_err("Cannot register interrupt %d\n", vgic->maint_irq);
2355 ret = __register_cpu_notifier(&vgic_cpu_nb);
2357 kvm_err("Cannot register vgic CPU notifier\n");
2361 /* Callback into for arch code for setup */
2362 vgic_arch_setup(vgic);
2364 on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
2366 return kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
2367 KVM_DEV_TYPE_ARM_VGIC_V2);
2370 free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());