2 * VGIC MMIO handling functions
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/bitops.h>
15 #include <linux/bsearch.h>
16 #include <linux/kvm.h>
17 #include <linux/kvm_host.h>
18 #include <kvm/iodev.h>
19 #include <kvm/arm_vgic.h>
22 #include "vgic-mmio.h"
24 unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
25 gpa_t addr, unsigned int len)
30 unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
31 gpa_t addr, unsigned int len)
36 void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
37 unsigned int len, unsigned long val)
43 * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
44 * of the enabled bit, so there is only one function for both here.
46 unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
47 gpa_t addr, unsigned int len)
49 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
53 /* Loop over all IRQs affected by this read */
54 for (i = 0; i < len * 8; i++) {
55 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
64 void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
65 gpa_t addr, unsigned int len,
68 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
71 for_each_set_bit(i, &val, len * 8) {
72 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
74 spin_lock(&irq->irq_lock);
76 vgic_queue_irq_unlock(vcpu->kvm, irq);
80 void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
81 gpa_t addr, unsigned int len,
84 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
87 for_each_set_bit(i, &val, len * 8) {
88 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
90 spin_lock(&irq->irq_lock);
94 spin_unlock(&irq->irq_lock);
98 unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
99 gpa_t addr, unsigned int len)
101 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
105 /* Loop over all IRQs affected by this read */
106 for (i = 0; i < len * 8; i++) {
107 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
116 void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
117 gpa_t addr, unsigned int len,
120 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
123 for_each_set_bit(i, &val, len * 8) {
124 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
126 spin_lock(&irq->irq_lock);
128 if (irq->config == VGIC_CONFIG_LEVEL)
129 irq->soft_pending = true;
131 vgic_queue_irq_unlock(vcpu->kvm, irq);
135 void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
136 gpa_t addr, unsigned int len,
139 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
142 for_each_set_bit(i, &val, len * 8) {
143 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
145 spin_lock(&irq->irq_lock);
147 if (irq->config == VGIC_CONFIG_LEVEL) {
148 irq->soft_pending = false;
149 irq->pending = irq->line_level;
151 irq->pending = false;
154 spin_unlock(&irq->irq_lock);
158 unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
159 gpa_t addr, unsigned int len)
161 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
165 /* Loop over all IRQs affected by this read */
166 for (i = 0; i < len * 8; i++) {
167 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
176 static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
177 bool new_active_state)
179 spin_lock(&irq->irq_lock);
181 * If this virtual IRQ was written into a list register, we
182 * have to make sure the CPU that runs the VCPU thread has
183 * synced back LR state to the struct vgic_irq. We can only
184 * know this for sure, when either this irq is not assigned to
185 * anyone's AP list anymore, or the VCPU thread is not
186 * running on any CPUs.
188 * In the opposite case, we know the VCPU thread may be on its
189 * way back from the guest and still has to sync back this
190 * IRQ, so we release and re-acquire the spin_lock to let the
191 * other thread sync back the IRQ.
193 while (irq->vcpu && /* IRQ may have state in an LR somewhere */
194 irq->vcpu->cpu != -1) /* VCPU thread is running */
195 cond_resched_lock(&irq->irq_lock);
197 irq->active = new_active_state;
198 if (new_active_state)
199 vgic_queue_irq_unlock(vcpu->kvm, irq);
201 spin_unlock(&irq->irq_lock);
205 * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
206 * is not queued on some running VCPU's LRs, because then the change to the
207 * active state can be overwritten when the VCPU's state is synced coming back
210 * For shared interrupts, we have to stop all the VCPUs because interrupts can
211 * be migrated while we don't hold the IRQ locks and we don't want to be
212 * chasing moving targets.
214 * For private interrupts, we only have to make sure the single and only VCPU
215 * that can potentially queue the IRQ is stopped.
217 static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
219 if (intid < VGIC_NR_PRIVATE_IRQS)
220 kvm_arm_halt_vcpu(vcpu);
222 kvm_arm_halt_guest(vcpu->kvm);
225 /* See vgic_change_active_prepare */
226 static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
228 if (intid < VGIC_NR_PRIVATE_IRQS)
229 kvm_arm_resume_vcpu(vcpu);
231 kvm_arm_resume_guest(vcpu->kvm);
234 void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
235 gpa_t addr, unsigned int len,
238 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
241 vgic_change_active_prepare(vcpu, intid);
242 for_each_set_bit(i, &val, len * 8) {
243 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
244 vgic_mmio_change_active(vcpu, irq, false);
246 vgic_change_active_finish(vcpu, intid);
249 void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
250 gpa_t addr, unsigned int len,
253 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
256 vgic_change_active_prepare(vcpu, intid);
257 for_each_set_bit(i, &val, len * 8) {
258 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
259 vgic_mmio_change_active(vcpu, irq, true);
261 vgic_change_active_finish(vcpu, intid);
264 unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
265 gpa_t addr, unsigned int len)
267 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
271 for (i = 0; i < len; i++) {
272 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
274 val |= (u64)irq->priority << (i * 8);
281 * We currently don't handle changing the priority of an interrupt that
282 * is already pending on a VCPU. If there is a need for this, we would
283 * need to make this VCPU exit and re-evaluate the priorities, potentially
284 * leading to this interrupt getting presented now to the guest (if it has
285 * been masked by the priority mask before).
287 void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
288 gpa_t addr, unsigned int len,
291 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
294 for (i = 0; i < len; i++) {
295 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
297 spin_lock(&irq->irq_lock);
298 /* Narrow the priority range to what we actually support */
299 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
300 spin_unlock(&irq->irq_lock);
304 unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
305 gpa_t addr, unsigned int len)
307 u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
311 for (i = 0; i < len * 4; i++) {
312 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
314 if (irq->config == VGIC_CONFIG_EDGE)
315 value |= (2U << (i * 2));
321 void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
322 gpa_t addr, unsigned int len,
325 u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
328 for (i = 0; i < len * 4; i++) {
329 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
332 * The configuration cannot be changed for SGIs in general,
333 * for PPIs this is IMPLEMENTATION DEFINED. The arch timer
334 * code relies on PPIs being level triggered, so we also
335 * make them read-only here.
337 if (intid + i < VGIC_NR_PRIVATE_IRQS)
340 spin_lock(&irq->irq_lock);
341 if (test_bit(i * 2 + 1, &val)) {
342 irq->config = VGIC_CONFIG_EDGE;
344 irq->config = VGIC_CONFIG_LEVEL;
345 irq->pending = irq->line_level | irq->soft_pending;
347 spin_unlock(&irq->irq_lock);
351 static int match_region(const void *key, const void *elt)
353 const unsigned int offset = (unsigned long)key;
354 const struct vgic_register_region *region = elt;
356 if (offset < region->reg_offset)
359 if (offset >= region->reg_offset + region->len)
365 /* Find the proper register handler entry given a certain address offset. */
366 static const struct vgic_register_region *
367 vgic_find_mmio_region(const struct vgic_register_region *region, int nr_regions,
370 return bsearch((void *)(uintptr_t)offset, region, nr_regions,
371 sizeof(region[0]), match_region);
375 * kvm_mmio_read_buf() returns a value in a format where it can be converted
376 * to a byte array and be directly observed as the guest wanted it to appear
377 * in memory if it had done the store itself, which is LE for the GIC, as the
378 * guest knows the GIC is always LE.
380 * We convert this value to the CPUs native format to deal with it as a data
383 unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
385 unsigned long data = kvm_mmio_read_buf(val, len);
391 return le16_to_cpu(data);
393 return le32_to_cpu(data);
395 return le64_to_cpu(data);
400 * kvm_mmio_write_buf() expects a value in a format such that if converted to
401 * a byte array it is observed as the guest would see it if it could perform
402 * the load directly. Since the GIC is LE, and the guest knows this, the
403 * guest expects a value in little endian format.
405 * We convert the data value from the CPUs native format to LE so that the
406 * value is returned in the proper format.
408 void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
415 data = cpu_to_le16(data);
418 data = cpu_to_le32(data);
421 data = cpu_to_le64(data);
424 kvm_mmio_write_buf(buf, len, data);
428 struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
430 return container_of(dev, struct vgic_io_device, dev);
433 static bool check_region(const struct vgic_register_region *region,
436 if ((region->access_flags & VGIC_ACCESS_8bit) && len == 1)
438 if ((region->access_flags & VGIC_ACCESS_32bit) &&
439 len == sizeof(u32) && !(addr & 3))
441 if ((region->access_flags & VGIC_ACCESS_64bit) &&
442 len == sizeof(u64) && !(addr & 7))
448 static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
449 gpa_t addr, int len, void *val)
451 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
452 const struct vgic_register_region *region;
453 struct kvm_vcpu *r_vcpu;
456 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
457 addr - iodev->base_addr);
458 if (!region || !check_region(region, addr, len)) {
463 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
464 data = region->read(r_vcpu, addr, len);
465 vgic_data_host_to_mmio_bus(val, len, data);
469 static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
470 gpa_t addr, int len, const void *val)
472 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
473 const struct vgic_register_region *region;
474 struct kvm_vcpu *r_vcpu;
475 unsigned long data = vgic_data_mmio_bus_to_host(val, len);
477 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
478 addr - iodev->base_addr);
482 if (!check_region(region, addr, len))
485 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
486 region->write(r_vcpu, addr, len, data);
490 struct kvm_io_device_ops kvm_io_gic_ops = {
491 .read = dispatch_mmio_read,
492 .write = dispatch_mmio_write,
495 int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
498 struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
504 len = vgic_v2_init_dist_iodev(io_device);
506 #ifdef CONFIG_KVM_ARM_VGIC_V3
508 len = vgic_v3_init_dist_iodev(io_device);
515 io_device->base_addr = dist_base_address;
516 io_device->redist_vcpu = NULL;
518 mutex_lock(&kvm->slots_lock);
519 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
520 len, &io_device->dev);
521 mutex_unlock(&kvm->slots_lock);