]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - virt/kvm/arm/vgic-v2-emul.c
KVM: arm/arm64: rename struct kvm_mmio_range to vgic_io_range
[karo-tx-linux.git] / virt / kvm / arm / vgic-v2-emul.c
1 /*
2  * Contains GICv2 specific emulation code, was in vgic.c before.
3  *
4  * Copyright (C) 2012 ARM Ltd.
5  * Author: Marc Zyngier <marc.zyngier@arm.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include <linux/cpu.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/interrupt.h>
24 #include <linux/io.h>
25 #include <linux/uaccess.h>
26
27 #include <linux/irqchip/arm-gic.h>
28
29 #include <asm/kvm_emulate.h>
30 #include <asm/kvm_arm.h>
31 #include <asm/kvm_mmu.h>
32
33 #include "vgic.h"
34
35 #define GICC_ARCH_VERSION_V2            0x2
36
37 static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
38 static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
39 {
40         return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi;
41 }
42
43 static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
44                              struct kvm_exit_mmio *mmio, phys_addr_t offset)
45 {
46         u32 reg;
47         u32 word_offset = offset & 3;
48
49         switch (offset & ~3) {
50         case 0:                 /* GICD_CTLR */
51                 reg = vcpu->kvm->arch.vgic.enabled;
52                 vgic_reg_access(mmio, &reg, word_offset,
53                                 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
54                 if (mmio->is_write) {
55                         vcpu->kvm->arch.vgic.enabled = reg & 1;
56                         vgic_update_state(vcpu->kvm);
57                         return true;
58                 }
59                 break;
60
61         case 4:                 /* GICD_TYPER */
62                 reg  = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
63                 reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1;
64                 vgic_reg_access(mmio, &reg, word_offset,
65                                 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
66                 break;
67
68         case 8:                 /* GICD_IIDR */
69                 reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
70                 vgic_reg_access(mmio, &reg, word_offset,
71                                 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
72                 break;
73         }
74
75         return false;
76 }
77
78 static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
79                                        struct kvm_exit_mmio *mmio,
80                                        phys_addr_t offset)
81 {
82         return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
83                                       vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
84 }
85
86 static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
87                                          struct kvm_exit_mmio *mmio,
88                                          phys_addr_t offset)
89 {
90         return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
91                                       vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
92 }
93
94 static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
95                                         struct kvm_exit_mmio *mmio,
96                                         phys_addr_t offset)
97 {
98         return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
99                                            vcpu->vcpu_id);
100 }
101
102 static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
103                                           struct kvm_exit_mmio *mmio,
104                                           phys_addr_t offset)
105 {
106         return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
107                                              vcpu->vcpu_id);
108 }
109
110 static bool handle_mmio_set_active_reg(struct kvm_vcpu *vcpu,
111                                        struct kvm_exit_mmio *mmio,
112                                        phys_addr_t offset)
113 {
114         return vgic_handle_set_active_reg(vcpu->kvm, mmio, offset,
115                                           vcpu->vcpu_id);
116 }
117
118 static bool handle_mmio_clear_active_reg(struct kvm_vcpu *vcpu,
119                                          struct kvm_exit_mmio *mmio,
120                                          phys_addr_t offset)
121 {
122         return vgic_handle_clear_active_reg(vcpu->kvm, mmio, offset,
123                                             vcpu->vcpu_id);
124 }
125
126 static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
127                                      struct kvm_exit_mmio *mmio,
128                                      phys_addr_t offset)
129 {
130         u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
131                                         vcpu->vcpu_id, offset);
132         vgic_reg_access(mmio, reg, offset,
133                         ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
134         return false;
135 }
136
137 #define GICD_ITARGETSR_SIZE     32
138 #define GICD_CPUTARGETS_BITS    8
139 #define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
140 static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
141 {
142         struct vgic_dist *dist = &kvm->arch.vgic;
143         int i;
144         u32 val = 0;
145
146         irq -= VGIC_NR_PRIVATE_IRQS;
147
148         for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
149                 val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
150
151         return val;
152 }
153
154 static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
155 {
156         struct vgic_dist *dist = &kvm->arch.vgic;
157         struct kvm_vcpu *vcpu;
158         int i, c;
159         unsigned long *bmap;
160         u32 target;
161
162         irq -= VGIC_NR_PRIVATE_IRQS;
163
164         /*
165          * Pick the LSB in each byte. This ensures we target exactly
166          * one vcpu per IRQ. If the byte is null, assume we target
167          * CPU0.
168          */
169         for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
170                 int shift = i * GICD_CPUTARGETS_BITS;
171
172                 target = ffs((val >> shift) & 0xffU);
173                 target = target ? (target - 1) : 0;
174                 dist->irq_spi_cpu[irq + i] = target;
175                 kvm_for_each_vcpu(c, vcpu, kvm) {
176                         bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
177                         if (c == target)
178                                 set_bit(irq + i, bmap);
179                         else
180                                 clear_bit(irq + i, bmap);
181                 }
182         }
183 }
184
185 static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
186                                    struct kvm_exit_mmio *mmio,
187                                    phys_addr_t offset)
188 {
189         u32 reg;
190
191         /* We treat the banked interrupts targets as read-only */
192         if (offset < 32) {
193                 u32 roreg;
194
195                 roreg = 1 << vcpu->vcpu_id;
196                 roreg |= roreg << 8;
197                 roreg |= roreg << 16;
198
199                 vgic_reg_access(mmio, &roreg, offset,
200                                 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
201                 return false;
202         }
203
204         reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
205         vgic_reg_access(mmio, &reg, offset,
206                         ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
207         if (mmio->is_write) {
208                 vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
209                 vgic_update_state(vcpu->kvm);
210                 return true;
211         }
212
213         return false;
214 }
215
216 static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
217                                 struct kvm_exit_mmio *mmio, phys_addr_t offset)
218 {
219         u32 *reg;
220
221         reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
222                                   vcpu->vcpu_id, offset >> 1);
223
224         return vgic_handle_cfg_reg(reg, mmio, offset);
225 }
226
227 static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
228                                 struct kvm_exit_mmio *mmio, phys_addr_t offset)
229 {
230         u32 reg;
231
232         vgic_reg_access(mmio, &reg, offset,
233                         ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
234         if (mmio->is_write) {
235                 vgic_dispatch_sgi(vcpu, reg);
236                 vgic_update_state(vcpu->kvm);
237                 return true;
238         }
239
240         return false;
241 }
242
243 /* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
244 static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
245                                         struct kvm_exit_mmio *mmio,
246                                         phys_addr_t offset)
247 {
248         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
249         int sgi;
250         int min_sgi = (offset & ~0x3);
251         int max_sgi = min_sgi + 3;
252         int vcpu_id = vcpu->vcpu_id;
253         u32 reg = 0;
254
255         /* Copy source SGIs from distributor side */
256         for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
257                 u8 sources = *vgic_get_sgi_sources(dist, vcpu_id, sgi);
258
259                 reg |= ((u32)sources) << (8 * (sgi - min_sgi));
260         }
261
262         mmio_data_write(mmio, ~0, reg);
263         return false;
264 }
265
266 static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
267                                          struct kvm_exit_mmio *mmio,
268                                          phys_addr_t offset, bool set)
269 {
270         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
271         int sgi;
272         int min_sgi = (offset & ~0x3);
273         int max_sgi = min_sgi + 3;
274         int vcpu_id = vcpu->vcpu_id;
275         u32 reg;
276         bool updated = false;
277
278         reg = mmio_data_read(mmio, ~0);
279
280         /* Clear pending SGIs on the distributor */
281         for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
282                 u8 mask = reg >> (8 * (sgi - min_sgi));
283                 u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi);
284
285                 if (set) {
286                         if ((*src & mask) != mask)
287                                 updated = true;
288                         *src |= mask;
289                 } else {
290                         if (*src & mask)
291                                 updated = true;
292                         *src &= ~mask;
293                 }
294         }
295
296         if (updated)
297                 vgic_update_state(vcpu->kvm);
298
299         return updated;
300 }
301
302 static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
303                                 struct kvm_exit_mmio *mmio,
304                                 phys_addr_t offset)
305 {
306         if (!mmio->is_write)
307                 return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
308         else
309                 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
310 }
311
312 static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
313                                   struct kvm_exit_mmio *mmio,
314                                   phys_addr_t offset)
315 {
316         if (!mmio->is_write)
317                 return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
318         else
319                 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
320 }
321
322 static const struct vgic_io_range vgic_dist_ranges[] = {
323         {
324                 .base           = GIC_DIST_CTRL,
325                 .len            = 12,
326                 .bits_per_irq   = 0,
327                 .handle_mmio    = handle_mmio_misc,
328         },
329         {
330                 .base           = GIC_DIST_IGROUP,
331                 .len            = VGIC_MAX_IRQS / 8,
332                 .bits_per_irq   = 1,
333                 .handle_mmio    = handle_mmio_raz_wi,
334         },
335         {
336                 .base           = GIC_DIST_ENABLE_SET,
337                 .len            = VGIC_MAX_IRQS / 8,
338                 .bits_per_irq   = 1,
339                 .handle_mmio    = handle_mmio_set_enable_reg,
340         },
341         {
342                 .base           = GIC_DIST_ENABLE_CLEAR,
343                 .len            = VGIC_MAX_IRQS / 8,
344                 .bits_per_irq   = 1,
345                 .handle_mmio    = handle_mmio_clear_enable_reg,
346         },
347         {
348                 .base           = GIC_DIST_PENDING_SET,
349                 .len            = VGIC_MAX_IRQS / 8,
350                 .bits_per_irq   = 1,
351                 .handle_mmio    = handle_mmio_set_pending_reg,
352         },
353         {
354                 .base           = GIC_DIST_PENDING_CLEAR,
355                 .len            = VGIC_MAX_IRQS / 8,
356                 .bits_per_irq   = 1,
357                 .handle_mmio    = handle_mmio_clear_pending_reg,
358         },
359         {
360                 .base           = GIC_DIST_ACTIVE_SET,
361                 .len            = VGIC_MAX_IRQS / 8,
362                 .bits_per_irq   = 1,
363                 .handle_mmio    = handle_mmio_set_active_reg,
364         },
365         {
366                 .base           = GIC_DIST_ACTIVE_CLEAR,
367                 .len            = VGIC_MAX_IRQS / 8,
368                 .bits_per_irq   = 1,
369                 .handle_mmio    = handle_mmio_clear_active_reg,
370         },
371         {
372                 .base           = GIC_DIST_PRI,
373                 .len            = VGIC_MAX_IRQS,
374                 .bits_per_irq   = 8,
375                 .handle_mmio    = handle_mmio_priority_reg,
376         },
377         {
378                 .base           = GIC_DIST_TARGET,
379                 .len            = VGIC_MAX_IRQS,
380                 .bits_per_irq   = 8,
381                 .handle_mmio    = handle_mmio_target_reg,
382         },
383         {
384                 .base           = GIC_DIST_CONFIG,
385                 .len            = VGIC_MAX_IRQS / 4,
386                 .bits_per_irq   = 2,
387                 .handle_mmio    = handle_mmio_cfg_reg,
388         },
389         {
390                 .base           = GIC_DIST_SOFTINT,
391                 .len            = 4,
392                 .handle_mmio    = handle_mmio_sgi_reg,
393         },
394         {
395                 .base           = GIC_DIST_SGI_PENDING_CLEAR,
396                 .len            = VGIC_NR_SGIS,
397                 .handle_mmio    = handle_mmio_sgi_clear,
398         },
399         {
400                 .base           = GIC_DIST_SGI_PENDING_SET,
401                 .len            = VGIC_NR_SGIS,
402                 .handle_mmio    = handle_mmio_sgi_set,
403         },
404         {}
405 };
406
407 static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
408                                 struct kvm_exit_mmio *mmio)
409 {
410         unsigned long base = vcpu->kvm->arch.vgic.vgic_dist_base;
411
412         if (!is_in_range(mmio->phys_addr, mmio->len, base,
413                          KVM_VGIC_V2_DIST_SIZE))
414                 return false;
415
416         /* GICv2 does not support accesses wider than 32 bits */
417         if (mmio->len > 4) {
418                 kvm_inject_dabt(vcpu, mmio->phys_addr);
419                 return true;
420         }
421
422         return vgic_handle_mmio_range(vcpu, run, mmio, vgic_dist_ranges, base);
423 }
424
425 static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
426 {
427         struct kvm *kvm = vcpu->kvm;
428         struct vgic_dist *dist = &kvm->arch.vgic;
429         int nrcpus = atomic_read(&kvm->online_vcpus);
430         u8 target_cpus;
431         int sgi, mode, c, vcpu_id;
432
433         vcpu_id = vcpu->vcpu_id;
434
435         sgi = reg & 0xf;
436         target_cpus = (reg >> 16) & 0xff;
437         mode = (reg >> 24) & 3;
438
439         switch (mode) {
440         case 0:
441                 if (!target_cpus)
442                         return;
443                 break;
444
445         case 1:
446                 target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
447                 break;
448
449         case 2:
450                 target_cpus = 1 << vcpu_id;
451                 break;
452         }
453
454         kvm_for_each_vcpu(c, vcpu, kvm) {
455                 if (target_cpus & 1) {
456                         /* Flag the SGI as pending */
457                         vgic_dist_irq_set_pending(vcpu, sgi);
458                         *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id;
459                         kvm_debug("SGI%d from CPU%d to CPU%d\n",
460                                   sgi, vcpu_id, c);
461                 }
462
463                 target_cpus >>= 1;
464         }
465 }
466
467 static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq)
468 {
469         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
470         unsigned long sources;
471         int vcpu_id = vcpu->vcpu_id;
472         int c;
473
474         sources = *vgic_get_sgi_sources(dist, vcpu_id, irq);
475
476         for_each_set_bit(c, &sources, dist->nr_cpus) {
477                 if (vgic_queue_irq(vcpu, c, irq))
478                         clear_bit(c, &sources);
479         }
480
481         *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources;
482
483         /*
484          * If the sources bitmap has been cleared it means that we
485          * could queue all the SGIs onto link registers (see the
486          * clear_bit above), and therefore we are done with them in
487          * our emulated gic and can get rid of them.
488          */
489         if (!sources) {
490                 vgic_dist_irq_clear_pending(vcpu, irq);
491                 vgic_cpu_irq_clear(vcpu, irq);
492                 return true;
493         }
494
495         return false;
496 }
497
498 /**
499  * kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs
500  * @kvm: pointer to the kvm struct
501  *
502  * Map the virtual CPU interface into the VM before running any VCPUs.  We
503  * can't do this at creation time, because user space must first set the
504  * virtual CPU interface address in the guest physical address space.
505  */
506 static int vgic_v2_map_resources(struct kvm *kvm,
507                                  const struct vgic_params *params)
508 {
509         int ret = 0;
510
511         if (!irqchip_in_kernel(kvm))
512                 return 0;
513
514         mutex_lock(&kvm->lock);
515
516         if (vgic_ready(kvm))
517                 goto out;
518
519         if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
520             IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) {
521                 kvm_err("Need to set vgic cpu and dist addresses first\n");
522                 ret = -ENXIO;
523                 goto out;
524         }
525
526         /*
527          * Initialize the vgic if this hasn't already been done on demand by
528          * accessing the vgic state from userspace.
529          */
530         ret = vgic_init(kvm);
531         if (ret) {
532                 kvm_err("Unable to allocate maps\n");
533                 goto out;
534         }
535
536         ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
537                                     params->vcpu_base, KVM_VGIC_V2_CPU_SIZE,
538                                     true);
539         if (ret) {
540                 kvm_err("Unable to remap VGIC CPU to VCPU\n");
541                 goto out;
542         }
543
544         kvm->arch.vgic.ready = true;
545 out:
546         if (ret)
547                 kvm_vgic_destroy(kvm);
548         mutex_unlock(&kvm->lock);
549         return ret;
550 }
551
552 static void vgic_v2_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
553 {
554         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
555
556         *vgic_get_sgi_sources(dist, vcpu->vcpu_id, irq) |= 1 << source;
557 }
558
559 static int vgic_v2_init_model(struct kvm *kvm)
560 {
561         int i;
562
563         for (i = VGIC_NR_PRIVATE_IRQS; i < kvm->arch.vgic.nr_irqs; i += 4)
564                 vgic_set_target_reg(kvm, 0, i);
565
566         return 0;
567 }
568
569 void vgic_v2_init_emulation(struct kvm *kvm)
570 {
571         struct vgic_dist *dist = &kvm->arch.vgic;
572
573         dist->vm_ops.handle_mmio = vgic_v2_handle_mmio;
574         dist->vm_ops.queue_sgi = vgic_v2_queue_sgi;
575         dist->vm_ops.add_sgi_source = vgic_v2_add_sgi_source;
576         dist->vm_ops.init_model = vgic_v2_init_model;
577         dist->vm_ops.map_resources = vgic_v2_map_resources;
578
579         kvm->arch.max_vcpus = VGIC_V2_MAX_CPUS;
580 }
581
582 static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
583                                  struct kvm_exit_mmio *mmio, phys_addr_t offset)
584 {
585         bool updated = false;
586         struct vgic_vmcr vmcr;
587         u32 *vmcr_field;
588         u32 reg;
589
590         vgic_get_vmcr(vcpu, &vmcr);
591
592         switch (offset & ~0x3) {
593         case GIC_CPU_CTRL:
594                 vmcr_field = &vmcr.ctlr;
595                 break;
596         case GIC_CPU_PRIMASK:
597                 vmcr_field = &vmcr.pmr;
598                 break;
599         case GIC_CPU_BINPOINT:
600                 vmcr_field = &vmcr.bpr;
601                 break;
602         case GIC_CPU_ALIAS_BINPOINT:
603                 vmcr_field = &vmcr.abpr;
604                 break;
605         default:
606                 BUG();
607         }
608
609         if (!mmio->is_write) {
610                 reg = *vmcr_field;
611                 mmio_data_write(mmio, ~0, reg);
612         } else {
613                 reg = mmio_data_read(mmio, ~0);
614                 if (reg != *vmcr_field) {
615                         *vmcr_field = reg;
616                         vgic_set_vmcr(vcpu, &vmcr);
617                         updated = true;
618                 }
619         }
620         return updated;
621 }
622
623 static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
624                              struct kvm_exit_mmio *mmio, phys_addr_t offset)
625 {
626         return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
627 }
628
629 static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
630                                   struct kvm_exit_mmio *mmio,
631                                   phys_addr_t offset)
632 {
633         u32 reg;
634
635         if (mmio->is_write)
636                 return false;
637
638         /* GICC_IIDR */
639         reg = (PRODUCT_ID_KVM << 20) |
640               (GICC_ARCH_VERSION_V2 << 16) |
641               (IMPLEMENTER_ARM << 0);
642         mmio_data_write(mmio, ~0, reg);
643         return false;
644 }
645
646 /*
647  * CPU Interface Register accesses - these are not accessed by the VM, but by
648  * user space for saving and restoring VGIC state.
649  */
650 static const struct vgic_io_range vgic_cpu_ranges[] = {
651         {
652                 .base           = GIC_CPU_CTRL,
653                 .len            = 12,
654                 .handle_mmio    = handle_cpu_mmio_misc,
655         },
656         {
657                 .base           = GIC_CPU_ALIAS_BINPOINT,
658                 .len            = 4,
659                 .handle_mmio    = handle_mmio_abpr,
660         },
661         {
662                 .base           = GIC_CPU_ACTIVEPRIO,
663                 .len            = 16,
664                 .handle_mmio    = handle_mmio_raz_wi,
665         },
666         {
667                 .base           = GIC_CPU_IDENT,
668                 .len            = 4,
669                 .handle_mmio    = handle_cpu_mmio_ident,
670         },
671 };
672
673 static int vgic_attr_regs_access(struct kvm_device *dev,
674                                  struct kvm_device_attr *attr,
675                                  u32 *reg, bool is_write)
676 {
677         const struct vgic_io_range *r = NULL, *ranges;
678         phys_addr_t offset;
679         int ret, cpuid, c;
680         struct kvm_vcpu *vcpu, *tmp_vcpu;
681         struct vgic_dist *vgic;
682         struct kvm_exit_mmio mmio;
683
684         offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
685         cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
686                 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
687
688         mutex_lock(&dev->kvm->lock);
689
690         ret = vgic_init(dev->kvm);
691         if (ret)
692                 goto out;
693
694         if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
695                 ret = -EINVAL;
696                 goto out;
697         }
698
699         vcpu = kvm_get_vcpu(dev->kvm, cpuid);
700         vgic = &dev->kvm->arch.vgic;
701
702         mmio.len = 4;
703         mmio.is_write = is_write;
704         if (is_write)
705                 mmio_data_write(&mmio, ~0, *reg);
706         switch (attr->group) {
707         case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
708                 mmio.phys_addr = vgic->vgic_dist_base + offset;
709                 ranges = vgic_dist_ranges;
710                 break;
711         case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
712                 mmio.phys_addr = vgic->vgic_cpu_base + offset;
713                 ranges = vgic_cpu_ranges;
714                 break;
715         default:
716                 BUG();
717         }
718         r = vgic_find_range(ranges, &mmio, offset);
719
720         if (unlikely(!r || !r->handle_mmio)) {
721                 ret = -ENXIO;
722                 goto out;
723         }
724
725
726         spin_lock(&vgic->lock);
727
728         /*
729          * Ensure that no other VCPU is running by checking the vcpu->cpu
730          * field.  If no other VPCUs are running we can safely access the VGIC
731          * state, because even if another VPU is run after this point, that
732          * VCPU will not touch the vgic state, because it will block on
733          * getting the vgic->lock in kvm_vgic_sync_hwstate().
734          */
735         kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
736                 if (unlikely(tmp_vcpu->cpu != -1)) {
737                         ret = -EBUSY;
738                         goto out_vgic_unlock;
739                 }
740         }
741
742         /*
743          * Move all pending IRQs from the LRs on all VCPUs so the pending
744          * state can be properly represented in the register state accessible
745          * through this API.
746          */
747         kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
748                 vgic_unqueue_irqs(tmp_vcpu);
749
750         offset -= r->base;
751         r->handle_mmio(vcpu, &mmio, offset);
752
753         if (!is_write)
754                 *reg = mmio_data_read(&mmio, ~0);
755
756         ret = 0;
757 out_vgic_unlock:
758         spin_unlock(&vgic->lock);
759 out:
760         mutex_unlock(&dev->kvm->lock);
761         return ret;
762 }
763
764 static int vgic_v2_create(struct kvm_device *dev, u32 type)
765 {
766         return kvm_vgic_create(dev->kvm, type);
767 }
768
769 static void vgic_v2_destroy(struct kvm_device *dev)
770 {
771         kfree(dev);
772 }
773
774 static int vgic_v2_set_attr(struct kvm_device *dev,
775                             struct kvm_device_attr *attr)
776 {
777         int ret;
778
779         ret = vgic_set_common_attr(dev, attr);
780         if (ret != -ENXIO)
781                 return ret;
782
783         switch (attr->group) {
784         case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
785         case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
786                 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
787                 u32 reg;
788
789                 if (get_user(reg, uaddr))
790                         return -EFAULT;
791
792                 return vgic_attr_regs_access(dev, attr, &reg, true);
793         }
794
795         }
796
797         return -ENXIO;
798 }
799
800 static int vgic_v2_get_attr(struct kvm_device *dev,
801                             struct kvm_device_attr *attr)
802 {
803         int ret;
804
805         ret = vgic_get_common_attr(dev, attr);
806         if (ret != -ENXIO)
807                 return ret;
808
809         switch (attr->group) {
810         case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
811         case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
812                 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
813                 u32 reg = 0;
814
815                 ret = vgic_attr_regs_access(dev, attr, &reg, false);
816                 if (ret)
817                         return ret;
818                 return put_user(reg, uaddr);
819         }
820
821         }
822
823         return -ENXIO;
824 }
825
826 static int vgic_v2_has_attr(struct kvm_device *dev,
827                             struct kvm_device_attr *attr)
828 {
829         phys_addr_t offset;
830
831         switch (attr->group) {
832         case KVM_DEV_ARM_VGIC_GRP_ADDR:
833                 switch (attr->attr) {
834                 case KVM_VGIC_V2_ADDR_TYPE_DIST:
835                 case KVM_VGIC_V2_ADDR_TYPE_CPU:
836                         return 0;
837                 }
838                 break;
839         case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
840                 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
841                 return vgic_has_attr_regs(vgic_dist_ranges, offset);
842         case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
843                 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
844                 return vgic_has_attr_regs(vgic_cpu_ranges, offset);
845         case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
846                 return 0;
847         case KVM_DEV_ARM_VGIC_GRP_CTRL:
848                 switch (attr->attr) {
849                 case KVM_DEV_ARM_VGIC_CTRL_INIT:
850                         return 0;
851                 }
852         }
853         return -ENXIO;
854 }
855
856 struct kvm_device_ops kvm_arm_vgic_v2_ops = {
857         .name = "kvm-arm-vgic-v2",
858         .create = vgic_v2_create,
859         .destroy = vgic_v2_destroy,
860         .set_attr = vgic_v2_set_attr,
861         .get_attr = vgic_v2_get_attr,
862         .has_attr = vgic_v2_has_attr,
863 };