]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - virt/kvm/arm/vgic-v3-emul.c
KVM: arm/arm64: rename struct kvm_mmio_range to vgic_io_range
[karo-tx-linux.git] / virt / kvm / arm / vgic-v3-emul.c
1 /*
2  * GICv3 distributor and redistributor emulation
3  *
4  * GICv3 emulation is currently only supported on a GICv3 host (because
5  * we rely on the hardware's CPU interface virtualization support), but
6  * supports both hardware with or without the optional GICv2 backwards
7  * compatibility features.
8  *
9  * Limitations of the emulation:
10  * (RAZ/WI: read as zero, write ignore, RAO/WI: read as one, write ignore)
11  * - We do not support LPIs (yet). TYPER.LPIS is reported as 0 and is RAZ/WI.
12  * - We do not support the message based interrupts (MBIs) triggered by
13  *   writes to the GICD_{SET,CLR}SPI_* registers. TYPER.MBIS is reported as 0.
14  * - We do not support the (optional) backwards compatibility feature.
15  *   GICD_CTLR.ARE resets to 1 and is RAO/WI. If the _host_ GIC supports
16  *   the compatiblity feature, you can use a GICv2 in the guest, though.
17  * - We only support a single security state. GICD_CTLR.DS is 1 and is RAO/WI.
18  * - Priorities are not emulated (same as the GICv2 emulation). Linux
19  *   as a guest is fine with this, because it does not use priorities.
20  * - We only support Group1 interrupts. Again Linux uses only those.
21  *
22  * Copyright (C) 2014 ARM Ltd.
23  * Author: Andre Przywara <andre.przywara@arm.com>
24  *
25  * This program is free software; you can redistribute it and/or modify
26  * it under the terms of the GNU General Public License version 2 as
27  * published by the Free Software Foundation.
28  *
29  * This program is distributed in the hope that it will be useful,
30  * but WITHOUT ANY WARRANTY; without even the implied warranty of
31  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
32  * GNU General Public License for more details.
33  *
34  * You should have received a copy of the GNU General Public License
35  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
36  */
37
38 #include <linux/cpu.h>
39 #include <linux/kvm.h>
40 #include <linux/kvm_host.h>
41 #include <linux/interrupt.h>
42
43 #include <linux/irqchip/arm-gic-v3.h>
44 #include <kvm/arm_vgic.h>
45
46 #include <asm/kvm_emulate.h>
47 #include <asm/kvm_arm.h>
48 #include <asm/kvm_mmu.h>
49
50 #include "vgic.h"
51
52 static bool handle_mmio_rao_wi(struct kvm_vcpu *vcpu,
53                                struct kvm_exit_mmio *mmio, phys_addr_t offset)
54 {
55         u32 reg = 0xffffffff;
56
57         vgic_reg_access(mmio, &reg, offset,
58                         ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
59
60         return false;
61 }
62
63 static bool handle_mmio_ctlr(struct kvm_vcpu *vcpu,
64                              struct kvm_exit_mmio *mmio, phys_addr_t offset)
65 {
66         u32 reg = 0;
67
68         /*
69          * Force ARE and DS to 1, the guest cannot change this.
70          * For the time being we only support Group1 interrupts.
71          */
72         if (vcpu->kvm->arch.vgic.enabled)
73                 reg = GICD_CTLR_ENABLE_SS_G1;
74         reg |= GICD_CTLR_ARE_NS | GICD_CTLR_DS;
75
76         vgic_reg_access(mmio, &reg, offset,
77                         ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
78         if (mmio->is_write) {
79                 if (reg & GICD_CTLR_ENABLE_SS_G0)
80                         kvm_info("guest tried to enable unsupported Group0 interrupts\n");
81                 vcpu->kvm->arch.vgic.enabled = !!(reg & GICD_CTLR_ENABLE_SS_G1);
82                 vgic_update_state(vcpu->kvm);
83                 return true;
84         }
85         return false;
86 }
87
88 /*
89  * As this implementation does not provide compatibility
90  * with GICv2 (ARE==1), we report zero CPUs in bits [5..7].
91  * Also LPIs and MBIs are not supported, so we set the respective bits to 0.
92  * Also we report at most 2**10=1024 interrupt IDs (to match 1024 SPIs).
93  */
94 #define INTERRUPT_ID_BITS 10
95 static bool handle_mmio_typer(struct kvm_vcpu *vcpu,
96                               struct kvm_exit_mmio *mmio, phys_addr_t offset)
97 {
98         u32 reg;
99
100         reg = (min(vcpu->kvm->arch.vgic.nr_irqs, 1024) >> 5) - 1;
101
102         reg |= (INTERRUPT_ID_BITS - 1) << 19;
103
104         vgic_reg_access(mmio, &reg, offset,
105                         ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
106
107         return false;
108 }
109
110 static bool handle_mmio_iidr(struct kvm_vcpu *vcpu,
111                              struct kvm_exit_mmio *mmio, phys_addr_t offset)
112 {
113         u32 reg;
114
115         reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
116         vgic_reg_access(mmio, &reg, offset,
117                         ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
118
119         return false;
120 }
121
122 static bool handle_mmio_set_enable_reg_dist(struct kvm_vcpu *vcpu,
123                                             struct kvm_exit_mmio *mmio,
124                                             phys_addr_t offset)
125 {
126         if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
127                 return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
128                                               vcpu->vcpu_id,
129                                               ACCESS_WRITE_SETBIT);
130
131         vgic_reg_access(mmio, NULL, offset,
132                         ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
133         return false;
134 }
135
136 static bool handle_mmio_clear_enable_reg_dist(struct kvm_vcpu *vcpu,
137                                               struct kvm_exit_mmio *mmio,
138                                               phys_addr_t offset)
139 {
140         if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
141                 return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
142                                               vcpu->vcpu_id,
143                                               ACCESS_WRITE_CLEARBIT);
144
145         vgic_reg_access(mmio, NULL, offset,
146                         ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
147         return false;
148 }
149
150 static bool handle_mmio_set_pending_reg_dist(struct kvm_vcpu *vcpu,
151                                              struct kvm_exit_mmio *mmio,
152                                              phys_addr_t offset)
153 {
154         if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
155                 return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
156                                                    vcpu->vcpu_id);
157
158         vgic_reg_access(mmio, NULL, offset,
159                         ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
160         return false;
161 }
162
163 static bool handle_mmio_clear_pending_reg_dist(struct kvm_vcpu *vcpu,
164                                                struct kvm_exit_mmio *mmio,
165                                                phys_addr_t offset)
166 {
167         if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
168                 return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
169                                                      vcpu->vcpu_id);
170
171         vgic_reg_access(mmio, NULL, offset,
172                         ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
173         return false;
174 }
175
176 static bool handle_mmio_priority_reg_dist(struct kvm_vcpu *vcpu,
177                                           struct kvm_exit_mmio *mmio,
178                                           phys_addr_t offset)
179 {
180         u32 *reg;
181
182         if (unlikely(offset < VGIC_NR_PRIVATE_IRQS)) {
183                 vgic_reg_access(mmio, NULL, offset,
184                                 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
185                 return false;
186         }
187
188         reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
189                                    vcpu->vcpu_id, offset);
190         vgic_reg_access(mmio, reg, offset,
191                 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
192         return false;
193 }
194
195 static bool handle_mmio_cfg_reg_dist(struct kvm_vcpu *vcpu,
196                                      struct kvm_exit_mmio *mmio,
197                                      phys_addr_t offset)
198 {
199         u32 *reg;
200
201         if (unlikely(offset < VGIC_NR_PRIVATE_IRQS / 4)) {
202                 vgic_reg_access(mmio, NULL, offset,
203                                 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
204                 return false;
205         }
206
207         reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
208                                   vcpu->vcpu_id, offset >> 1);
209
210         return vgic_handle_cfg_reg(reg, mmio, offset);
211 }
212
213 /*
214  * We use a compressed version of the MPIDR (all 32 bits in one 32-bit word)
215  * when we store the target MPIDR written by the guest.
216  */
217 static u32 compress_mpidr(unsigned long mpidr)
218 {
219         u32 ret;
220
221         ret = MPIDR_AFFINITY_LEVEL(mpidr, 0);
222         ret |= MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8;
223         ret |= MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16;
224         ret |= MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24;
225
226         return ret;
227 }
228
229 static unsigned long uncompress_mpidr(u32 value)
230 {
231         unsigned long mpidr;
232
233         mpidr  = ((value >>  0) & 0xFF) << MPIDR_LEVEL_SHIFT(0);
234         mpidr |= ((value >>  8) & 0xFF) << MPIDR_LEVEL_SHIFT(1);
235         mpidr |= ((value >> 16) & 0xFF) << MPIDR_LEVEL_SHIFT(2);
236         mpidr |= (u64)((value >> 24) & 0xFF) << MPIDR_LEVEL_SHIFT(3);
237
238         return mpidr;
239 }
240
241 /*
242  * Lookup the given MPIDR value to get the vcpu_id (if there is one)
243  * and store that in the irq_spi_cpu[] array.
244  * This limits the number of VCPUs to 255 for now, extending the data
245  * type (or storing kvm_vcpu pointers) should lift the limit.
246  * Store the original MPIDR value in an extra array to support read-as-written.
247  * Unallocated MPIDRs are translated to a special value and caught
248  * before any array accesses.
249  */
250 static bool handle_mmio_route_reg(struct kvm_vcpu *vcpu,
251                                   struct kvm_exit_mmio *mmio,
252                                   phys_addr_t offset)
253 {
254         struct kvm *kvm = vcpu->kvm;
255         struct vgic_dist *dist = &kvm->arch.vgic;
256         int spi;
257         u32 reg;
258         int vcpu_id;
259         unsigned long *bmap, mpidr;
260
261         /*
262          * The upper 32 bits of each 64 bit register are zero,
263          * as we don't support Aff3.
264          */
265         if ((offset & 4)) {
266                 vgic_reg_access(mmio, NULL, offset,
267                                 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
268                 return false;
269         }
270
271         /* This region only covers SPIs, so no handling of private IRQs here. */
272         spi = offset / 8;
273
274         /* get the stored MPIDR for this IRQ */
275         mpidr = uncompress_mpidr(dist->irq_spi_mpidr[spi]);
276         reg = mpidr;
277
278         vgic_reg_access(mmio, &reg, offset,
279                         ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
280
281         if (!mmio->is_write)
282                 return false;
283
284         /*
285          * Now clear the currently assigned vCPU from the map, making room
286          * for the new one to be written below
287          */
288         vcpu = kvm_mpidr_to_vcpu(kvm, mpidr);
289         if (likely(vcpu)) {
290                 vcpu_id = vcpu->vcpu_id;
291                 bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]);
292                 __clear_bit(spi, bmap);
293         }
294
295         dist->irq_spi_mpidr[spi] = compress_mpidr(reg);
296         vcpu = kvm_mpidr_to_vcpu(kvm, reg & MPIDR_HWID_BITMASK);
297
298         /*
299          * The spec says that non-existent MPIDR values should not be
300          * forwarded to any existent (v)CPU, but should be able to become
301          * pending anyway. We simply keep the irq_spi_target[] array empty, so
302          * the interrupt will never be injected.
303          * irq_spi_cpu[irq] gets a magic value in this case.
304          */
305         if (likely(vcpu)) {
306                 vcpu_id = vcpu->vcpu_id;
307                 dist->irq_spi_cpu[spi] = vcpu_id;
308                 bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]);
309                 __set_bit(spi, bmap);
310         } else {
311                 dist->irq_spi_cpu[spi] = VCPU_NOT_ALLOCATED;
312         }
313
314         vgic_update_state(kvm);
315
316         return true;
317 }
318
319 /*
320  * We should be careful about promising too much when a guest reads
321  * this register. Don't claim to be like any hardware implementation,
322  * but just report the GIC as version 3 - which is what a Linux guest
323  * would check.
324  */
325 static bool handle_mmio_idregs(struct kvm_vcpu *vcpu,
326                                struct kvm_exit_mmio *mmio,
327                                phys_addr_t offset)
328 {
329         u32 reg = 0;
330
331         switch (offset + GICD_IDREGS) {
332         case GICD_PIDR2:
333                 reg = 0x3b;
334                 break;
335         }
336
337         vgic_reg_access(mmio, &reg, offset,
338                         ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
339
340         return false;
341 }
342
343 static const struct vgic_io_range vgic_v3_dist_ranges[] = {
344         {
345                 .base           = GICD_CTLR,
346                 .len            = 0x04,
347                 .bits_per_irq   = 0,
348                 .handle_mmio    = handle_mmio_ctlr,
349         },
350         {
351                 .base           = GICD_TYPER,
352                 .len            = 0x04,
353                 .bits_per_irq   = 0,
354                 .handle_mmio    = handle_mmio_typer,
355         },
356         {
357                 .base           = GICD_IIDR,
358                 .len            = 0x04,
359                 .bits_per_irq   = 0,
360                 .handle_mmio    = handle_mmio_iidr,
361         },
362         {
363                 /* this register is optional, it is RAZ/WI if not implemented */
364                 .base           = GICD_STATUSR,
365                 .len            = 0x04,
366                 .bits_per_irq   = 0,
367                 .handle_mmio    = handle_mmio_raz_wi,
368         },
369         {
370                 /* this write only register is WI when TYPER.MBIS=0 */
371                 .base           = GICD_SETSPI_NSR,
372                 .len            = 0x04,
373                 .bits_per_irq   = 0,
374                 .handle_mmio    = handle_mmio_raz_wi,
375         },
376         {
377                 /* this write only register is WI when TYPER.MBIS=0 */
378                 .base           = GICD_CLRSPI_NSR,
379                 .len            = 0x04,
380                 .bits_per_irq   = 0,
381                 .handle_mmio    = handle_mmio_raz_wi,
382         },
383         {
384                 /* this is RAZ/WI when DS=1 */
385                 .base           = GICD_SETSPI_SR,
386                 .len            = 0x04,
387                 .bits_per_irq   = 0,
388                 .handle_mmio    = handle_mmio_raz_wi,
389         },
390         {
391                 /* this is RAZ/WI when DS=1 */
392                 .base           = GICD_CLRSPI_SR,
393                 .len            = 0x04,
394                 .bits_per_irq   = 0,
395                 .handle_mmio    = handle_mmio_raz_wi,
396         },
397         {
398                 .base           = GICD_IGROUPR,
399                 .len            = 0x80,
400                 .bits_per_irq   = 1,
401                 .handle_mmio    = handle_mmio_rao_wi,
402         },
403         {
404                 .base           = GICD_ISENABLER,
405                 .len            = 0x80,
406                 .bits_per_irq   = 1,
407                 .handle_mmio    = handle_mmio_set_enable_reg_dist,
408         },
409         {
410                 .base           = GICD_ICENABLER,
411                 .len            = 0x80,
412                 .bits_per_irq   = 1,
413                 .handle_mmio    = handle_mmio_clear_enable_reg_dist,
414         },
415         {
416                 .base           = GICD_ISPENDR,
417                 .len            = 0x80,
418                 .bits_per_irq   = 1,
419                 .handle_mmio    = handle_mmio_set_pending_reg_dist,
420         },
421         {
422                 .base           = GICD_ICPENDR,
423                 .len            = 0x80,
424                 .bits_per_irq   = 1,
425                 .handle_mmio    = handle_mmio_clear_pending_reg_dist,
426         },
427         {
428                 .base           = GICD_ISACTIVER,
429                 .len            = 0x80,
430                 .bits_per_irq   = 1,
431                 .handle_mmio    = handle_mmio_raz_wi,
432         },
433         {
434                 .base           = GICD_ICACTIVER,
435                 .len            = 0x80,
436                 .bits_per_irq   = 1,
437                 .handle_mmio    = handle_mmio_raz_wi,
438         },
439         {
440                 .base           = GICD_IPRIORITYR,
441                 .len            = 0x400,
442                 .bits_per_irq   = 8,
443                 .handle_mmio    = handle_mmio_priority_reg_dist,
444         },
445         {
446                 /* TARGETSRn is RES0 when ARE=1 */
447                 .base           = GICD_ITARGETSR,
448                 .len            = 0x400,
449                 .bits_per_irq   = 8,
450                 .handle_mmio    = handle_mmio_raz_wi,
451         },
452         {
453                 .base           = GICD_ICFGR,
454                 .len            = 0x100,
455                 .bits_per_irq   = 2,
456                 .handle_mmio    = handle_mmio_cfg_reg_dist,
457         },
458         {
459                 /* this is RAZ/WI when DS=1 */
460                 .base           = GICD_IGRPMODR,
461                 .len            = 0x80,
462                 .bits_per_irq   = 1,
463                 .handle_mmio    = handle_mmio_raz_wi,
464         },
465         {
466                 /* this is RAZ/WI when DS=1 */
467                 .base           = GICD_NSACR,
468                 .len            = 0x100,
469                 .bits_per_irq   = 2,
470                 .handle_mmio    = handle_mmio_raz_wi,
471         },
472         {
473                 /* this is RAZ/WI when ARE=1 */
474                 .base           = GICD_SGIR,
475                 .len            = 0x04,
476                 .handle_mmio    = handle_mmio_raz_wi,
477         },
478         {
479                 /* this is RAZ/WI when ARE=1 */
480                 .base           = GICD_CPENDSGIR,
481                 .len            = 0x10,
482                 .handle_mmio    = handle_mmio_raz_wi,
483         },
484         {
485                 /* this is RAZ/WI when ARE=1 */
486                 .base           = GICD_SPENDSGIR,
487                 .len            = 0x10,
488                 .handle_mmio    = handle_mmio_raz_wi,
489         },
490         {
491                 .base           = GICD_IROUTER + 0x100,
492                 .len            = 0x1ee0,
493                 .bits_per_irq   = 64,
494                 .handle_mmio    = handle_mmio_route_reg,
495         },
496         {
497                 .base           = GICD_IDREGS,
498                 .len            = 0x30,
499                 .bits_per_irq   = 0,
500                 .handle_mmio    = handle_mmio_idregs,
501         },
502         {},
503 };
504
505 static bool handle_mmio_set_enable_reg_redist(struct kvm_vcpu *vcpu,
506                                               struct kvm_exit_mmio *mmio,
507                                               phys_addr_t offset)
508 {
509         struct kvm_vcpu *redist_vcpu = mmio->private;
510
511         return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
512                                       redist_vcpu->vcpu_id,
513                                       ACCESS_WRITE_SETBIT);
514 }
515
516 static bool handle_mmio_clear_enable_reg_redist(struct kvm_vcpu *vcpu,
517                                                 struct kvm_exit_mmio *mmio,
518                                                 phys_addr_t offset)
519 {
520         struct kvm_vcpu *redist_vcpu = mmio->private;
521
522         return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
523                                       redist_vcpu->vcpu_id,
524                                       ACCESS_WRITE_CLEARBIT);
525 }
526
527 static bool handle_mmio_set_pending_reg_redist(struct kvm_vcpu *vcpu,
528                                                struct kvm_exit_mmio *mmio,
529                                                phys_addr_t offset)
530 {
531         struct kvm_vcpu *redist_vcpu = mmio->private;
532
533         return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
534                                            redist_vcpu->vcpu_id);
535 }
536
537 static bool handle_mmio_clear_pending_reg_redist(struct kvm_vcpu *vcpu,
538                                                  struct kvm_exit_mmio *mmio,
539                                                  phys_addr_t offset)
540 {
541         struct kvm_vcpu *redist_vcpu = mmio->private;
542
543         return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
544                                              redist_vcpu->vcpu_id);
545 }
546
547 static bool handle_mmio_priority_reg_redist(struct kvm_vcpu *vcpu,
548                                             struct kvm_exit_mmio *mmio,
549                                             phys_addr_t offset)
550 {
551         struct kvm_vcpu *redist_vcpu = mmio->private;
552         u32 *reg;
553
554         reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
555                                    redist_vcpu->vcpu_id, offset);
556         vgic_reg_access(mmio, reg, offset,
557                         ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
558         return false;
559 }
560
561 static bool handle_mmio_cfg_reg_redist(struct kvm_vcpu *vcpu,
562                                        struct kvm_exit_mmio *mmio,
563                                        phys_addr_t offset)
564 {
565         struct kvm_vcpu *redist_vcpu = mmio->private;
566
567         u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
568                                        redist_vcpu->vcpu_id, offset >> 1);
569
570         return vgic_handle_cfg_reg(reg, mmio, offset);
571 }
572
573 static const struct vgic_io_range vgic_redist_sgi_ranges[] = {
574         {
575                 .base           = GICR_IGROUPR0,
576                 .len            = 0x04,
577                 .bits_per_irq   = 1,
578                 .handle_mmio    = handle_mmio_rao_wi,
579         },
580         {
581                 .base           = GICR_ISENABLER0,
582                 .len            = 0x04,
583                 .bits_per_irq   = 1,
584                 .handle_mmio    = handle_mmio_set_enable_reg_redist,
585         },
586         {
587                 .base           = GICR_ICENABLER0,
588                 .len            = 0x04,
589                 .bits_per_irq   = 1,
590                 .handle_mmio    = handle_mmio_clear_enable_reg_redist,
591         },
592         {
593                 .base           = GICR_ISPENDR0,
594                 .len            = 0x04,
595                 .bits_per_irq   = 1,
596                 .handle_mmio    = handle_mmio_set_pending_reg_redist,
597         },
598         {
599                 .base           = GICR_ICPENDR0,
600                 .len            = 0x04,
601                 .bits_per_irq   = 1,
602                 .handle_mmio    = handle_mmio_clear_pending_reg_redist,
603         },
604         {
605                 .base           = GICR_ISACTIVER0,
606                 .len            = 0x04,
607                 .bits_per_irq   = 1,
608                 .handle_mmio    = handle_mmio_raz_wi,
609         },
610         {
611                 .base           = GICR_ICACTIVER0,
612                 .len            = 0x04,
613                 .bits_per_irq   = 1,
614                 .handle_mmio    = handle_mmio_raz_wi,
615         },
616         {
617                 .base           = GICR_IPRIORITYR0,
618                 .len            = 0x20,
619                 .bits_per_irq   = 8,
620                 .handle_mmio    = handle_mmio_priority_reg_redist,
621         },
622         {
623                 .base           = GICR_ICFGR0,
624                 .len            = 0x08,
625                 .bits_per_irq   = 2,
626                 .handle_mmio    = handle_mmio_cfg_reg_redist,
627         },
628         {
629                 .base           = GICR_IGRPMODR0,
630                 .len            = 0x04,
631                 .bits_per_irq   = 1,
632                 .handle_mmio    = handle_mmio_raz_wi,
633         },
634         {
635                 .base           = GICR_NSACR,
636                 .len            = 0x04,
637                 .handle_mmio    = handle_mmio_raz_wi,
638         },
639         {},
640 };
641
642 static bool handle_mmio_ctlr_redist(struct kvm_vcpu *vcpu,
643                                     struct kvm_exit_mmio *mmio,
644                                     phys_addr_t offset)
645 {
646         /* since we don't support LPIs, this register is zero for now */
647         vgic_reg_access(mmio, NULL, offset,
648                         ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
649         return false;
650 }
651
652 static bool handle_mmio_typer_redist(struct kvm_vcpu *vcpu,
653                                      struct kvm_exit_mmio *mmio,
654                                      phys_addr_t offset)
655 {
656         u32 reg;
657         u64 mpidr;
658         struct kvm_vcpu *redist_vcpu = mmio->private;
659         int target_vcpu_id = redist_vcpu->vcpu_id;
660
661         /* the upper 32 bits contain the affinity value */
662         if ((offset & ~3) == 4) {
663                 mpidr = kvm_vcpu_get_mpidr_aff(redist_vcpu);
664                 reg = compress_mpidr(mpidr);
665
666                 vgic_reg_access(mmio, &reg, offset,
667                                 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
668                 return false;
669         }
670
671         reg = redist_vcpu->vcpu_id << 8;
672         if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
673                 reg |= GICR_TYPER_LAST;
674         vgic_reg_access(mmio, &reg, offset,
675                         ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
676         return false;
677 }
678
679 static const struct vgic_io_range vgic_redist_ranges[] = {
680         {
681                 .base           = GICR_CTLR,
682                 .len            = 0x04,
683                 .bits_per_irq   = 0,
684                 .handle_mmio    = handle_mmio_ctlr_redist,
685         },
686         {
687                 .base           = GICR_TYPER,
688                 .len            = 0x08,
689                 .bits_per_irq   = 0,
690                 .handle_mmio    = handle_mmio_typer_redist,
691         },
692         {
693                 .base           = GICR_IIDR,
694                 .len            = 0x04,
695                 .bits_per_irq   = 0,
696                 .handle_mmio    = handle_mmio_iidr,
697         },
698         {
699                 .base           = GICR_WAKER,
700                 .len            = 0x04,
701                 .bits_per_irq   = 0,
702                 .handle_mmio    = handle_mmio_raz_wi,
703         },
704         {
705                 .base           = GICR_IDREGS,
706                 .len            = 0x30,
707                 .bits_per_irq   = 0,
708                 .handle_mmio    = handle_mmio_idregs,
709         },
710         {},
711 };
712
713 /*
714  * This function splits accesses between the distributor and the two
715  * redistributor parts (private/SPI). As each redistributor is accessible
716  * from any CPU, we have to determine the affected VCPU by taking the faulting
717  * address into account. We then pass this VCPU to the handler function via
718  * the private parameter.
719  */
720 #define SGI_BASE_OFFSET SZ_64K
721 static bool vgic_v3_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
722                                 struct kvm_exit_mmio *mmio)
723 {
724         struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
725         unsigned long dbase = dist->vgic_dist_base;
726         unsigned long rdbase = dist->vgic_redist_base;
727         int nrcpus = atomic_read(&vcpu->kvm->online_vcpus);
728         int vcpu_id;
729         const struct vgic_io_range *mmio_range;
730
731         if (is_in_range(mmio->phys_addr, mmio->len, dbase, GIC_V3_DIST_SIZE)) {
732                 return vgic_handle_mmio_range(vcpu, run, mmio,
733                                               vgic_v3_dist_ranges, dbase);
734         }
735
736         if (!is_in_range(mmio->phys_addr, mmio->len, rdbase,
737             GIC_V3_REDIST_SIZE * nrcpus))
738                 return false;
739
740         vcpu_id = (mmio->phys_addr - rdbase) / GIC_V3_REDIST_SIZE;
741         rdbase += (vcpu_id * GIC_V3_REDIST_SIZE);
742         mmio->private = kvm_get_vcpu(vcpu->kvm, vcpu_id);
743
744         if (mmio->phys_addr >= rdbase + SGI_BASE_OFFSET) {
745                 rdbase += SGI_BASE_OFFSET;
746                 mmio_range = vgic_redist_sgi_ranges;
747         } else {
748                 mmio_range = vgic_redist_ranges;
749         }
750         return vgic_handle_mmio_range(vcpu, run, mmio, mmio_range, rdbase);
751 }
752
753 static bool vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, int irq)
754 {
755         if (vgic_queue_irq(vcpu, 0, irq)) {
756                 vgic_dist_irq_clear_pending(vcpu, irq);
757                 vgic_cpu_irq_clear(vcpu, irq);
758                 return true;
759         }
760
761         return false;
762 }
763
764 static int vgic_v3_map_resources(struct kvm *kvm,
765                                  const struct vgic_params *params)
766 {
767         int ret = 0;
768         struct vgic_dist *dist = &kvm->arch.vgic;
769
770         if (!irqchip_in_kernel(kvm))
771                 return 0;
772
773         mutex_lock(&kvm->lock);
774
775         if (vgic_ready(kvm))
776                 goto out;
777
778         if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
779             IS_VGIC_ADDR_UNDEF(dist->vgic_redist_base)) {
780                 kvm_err("Need to set vgic distributor addresses first\n");
781                 ret = -ENXIO;
782                 goto out;
783         }
784
785         /*
786          * For a VGICv3 we require the userland to explicitly initialize
787          * the VGIC before we need to use it.
788          */
789         if (!vgic_initialized(kvm)) {
790                 ret = -EBUSY;
791                 goto out;
792         }
793
794         kvm->arch.vgic.ready = true;
795 out:
796         if (ret)
797                 kvm_vgic_destroy(kvm);
798         mutex_unlock(&kvm->lock);
799         return ret;
800 }
801
802 static int vgic_v3_init_model(struct kvm *kvm)
803 {
804         int i;
805         u32 mpidr;
806         struct vgic_dist *dist = &kvm->arch.vgic;
807         int nr_spis = dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
808
809         dist->irq_spi_mpidr = kcalloc(nr_spis, sizeof(dist->irq_spi_mpidr[0]),
810                                       GFP_KERNEL);
811
812         if (!dist->irq_spi_mpidr)
813                 return -ENOMEM;
814
815         /* Initialize the target VCPUs for each IRQ to VCPU 0 */
816         mpidr = compress_mpidr(kvm_vcpu_get_mpidr_aff(kvm_get_vcpu(kvm, 0)));
817         for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i++) {
818                 dist->irq_spi_cpu[i - VGIC_NR_PRIVATE_IRQS] = 0;
819                 dist->irq_spi_mpidr[i - VGIC_NR_PRIVATE_IRQS] = mpidr;
820                 vgic_bitmap_set_irq_val(dist->irq_spi_target, 0, i, 1);
821         }
822
823         return 0;
824 }
825
826 /* GICv3 does not keep track of SGI sources anymore. */
827 static void vgic_v3_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
828 {
829 }
830
831 void vgic_v3_init_emulation(struct kvm *kvm)
832 {
833         struct vgic_dist *dist = &kvm->arch.vgic;
834
835         dist->vm_ops.handle_mmio = vgic_v3_handle_mmio;
836         dist->vm_ops.queue_sgi = vgic_v3_queue_sgi;
837         dist->vm_ops.add_sgi_source = vgic_v3_add_sgi_source;
838         dist->vm_ops.init_model = vgic_v3_init_model;
839         dist->vm_ops.map_resources = vgic_v3_map_resources;
840
841         kvm->arch.max_vcpus = KVM_MAX_VCPUS;
842 }
843
844 /*
845  * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI
846  * generation register ICC_SGI1R_EL1) with a given VCPU.
847  * If the VCPU's MPIDR matches, return the level0 affinity, otherwise
848  * return -1.
849  */
850 static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
851 {
852         unsigned long affinity;
853         int level0;
854
855         /*
856          * Split the current VCPU's MPIDR into affinity level 0 and the
857          * rest as this is what we have to compare against.
858          */
859         affinity = kvm_vcpu_get_mpidr_aff(vcpu);
860         level0 = MPIDR_AFFINITY_LEVEL(affinity, 0);
861         affinity &= ~MPIDR_LEVEL_MASK;
862
863         /* bail out if the upper three levels don't match */
864         if (sgi_aff != affinity)
865                 return -1;
866
867         /* Is this VCPU's bit set in the mask ? */
868         if (!(sgi_cpu_mask & BIT(level0)))
869                 return -1;
870
871         return level0;
872 }
873
874 #define SGI_AFFINITY_LEVEL(reg, level) \
875         ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
876         >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
877
878 /**
879  * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
880  * @vcpu: The VCPU requesting a SGI
881  * @reg: The value written into the ICC_SGI1R_EL1 register by that VCPU
882  *
883  * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
884  * This will trap in sys_regs.c and call this function.
885  * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the
886  * target processors as well as a bitmask of 16 Aff0 CPUs.
887  * If the interrupt routing mode bit is not set, we iterate over all VCPUs to
888  * check for matching ones. If this bit is set, we signal all, but not the
889  * calling VCPU.
890  */
891 void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
892 {
893         struct kvm *kvm = vcpu->kvm;
894         struct kvm_vcpu *c_vcpu;
895         struct vgic_dist *dist = &kvm->arch.vgic;
896         u16 target_cpus;
897         u64 mpidr;
898         int sgi, c;
899         int vcpu_id = vcpu->vcpu_id;
900         bool broadcast;
901         int updated = 0;
902
903         sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
904         broadcast = reg & BIT(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
905         target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT;
906         mpidr = SGI_AFFINITY_LEVEL(reg, 3);
907         mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
908         mpidr |= SGI_AFFINITY_LEVEL(reg, 1);
909
910         /*
911          * We take the dist lock here, because we come from the sysregs
912          * code path and not from the MMIO one (which already takes the lock).
913          */
914         spin_lock(&dist->lock);
915
916         /*
917          * We iterate over all VCPUs to find the MPIDRs matching the request.
918          * If we have handled one CPU, we clear it's bit to detect early
919          * if we are already finished. This avoids iterating through all
920          * VCPUs when most of the times we just signal a single VCPU.
921          */
922         kvm_for_each_vcpu(c, c_vcpu, kvm) {
923
924                 /* Exit early if we have dealt with all requested CPUs */
925                 if (!broadcast && target_cpus == 0)
926                         break;
927
928                  /* Don't signal the calling VCPU */
929                 if (broadcast && c == vcpu_id)
930                         continue;
931
932                 if (!broadcast) {
933                         int level0;
934
935                         level0 = match_mpidr(mpidr, target_cpus, c_vcpu);
936                         if (level0 == -1)
937                                 continue;
938
939                         /* remove this matching VCPU from the mask */
940                         target_cpus &= ~BIT(level0);
941                 }
942
943                 /* Flag the SGI as pending */
944                 vgic_dist_irq_set_pending(c_vcpu, sgi);
945                 updated = 1;
946                 kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
947         }
948         if (updated)
949                 vgic_update_state(vcpu->kvm);
950         spin_unlock(&dist->lock);
951         if (updated)
952                 vgic_kick_vcpus(vcpu->kvm);
953 }
954
955 static int vgic_v3_create(struct kvm_device *dev, u32 type)
956 {
957         return kvm_vgic_create(dev->kvm, type);
958 }
959
960 static void vgic_v3_destroy(struct kvm_device *dev)
961 {
962         kfree(dev);
963 }
964
965 static int vgic_v3_set_attr(struct kvm_device *dev,
966                             struct kvm_device_attr *attr)
967 {
968         int ret;
969
970         ret = vgic_set_common_attr(dev, attr);
971         if (ret != -ENXIO)
972                 return ret;
973
974         switch (attr->group) {
975         case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
976         case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
977                 return -ENXIO;
978         }
979
980         return -ENXIO;
981 }
982
983 static int vgic_v3_get_attr(struct kvm_device *dev,
984                             struct kvm_device_attr *attr)
985 {
986         int ret;
987
988         ret = vgic_get_common_attr(dev, attr);
989         if (ret != -ENXIO)
990                 return ret;
991
992         switch (attr->group) {
993         case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
994         case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
995                 return -ENXIO;
996         }
997
998         return -ENXIO;
999 }
1000
1001 static int vgic_v3_has_attr(struct kvm_device *dev,
1002                             struct kvm_device_attr *attr)
1003 {
1004         switch (attr->group) {
1005         case KVM_DEV_ARM_VGIC_GRP_ADDR:
1006                 switch (attr->attr) {
1007                 case KVM_VGIC_V2_ADDR_TYPE_DIST:
1008                 case KVM_VGIC_V2_ADDR_TYPE_CPU:
1009                         return -ENXIO;
1010                 case KVM_VGIC_V3_ADDR_TYPE_DIST:
1011                 case KVM_VGIC_V3_ADDR_TYPE_REDIST:
1012                         return 0;
1013                 }
1014                 break;
1015         case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
1016         case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
1017                 return -ENXIO;
1018         case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
1019                 return 0;
1020         case KVM_DEV_ARM_VGIC_GRP_CTRL:
1021                 switch (attr->attr) {
1022                 case KVM_DEV_ARM_VGIC_CTRL_INIT:
1023                         return 0;
1024                 }
1025         }
1026         return -ENXIO;
1027 }
1028
1029 struct kvm_device_ops kvm_arm_vgic_v3_ops = {
1030         .name = "kvm-arm-vgic-v3",
1031         .create = vgic_v3_create,
1032         .destroy = vgic_v3_destroy,
1033         .set_attr = vgic_v3_set_attr,
1034         .get_attr = vgic_v3_get_attr,
1035         .has_attr = vgic_v3_has_attr,
1036 };