4 * Copyright (C) 2015,2016 ARM Ltd.
5 * Author: Andre Przywara <andre.przywara@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/cpu.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/interrupt.h>
24 #include <linux/list.h>
25 #include <linux/uaccess.h>
27 #include <linux/irqchip/arm-gic-v3.h>
29 #include <asm/kvm_emulate.h>
30 #include <asm/kvm_arm.h>
31 #include <asm/kvm_mmu.h>
34 #include "vgic-mmio.h"
36 static int vgic_its_save_tables_v0(struct vgic_its *its);
37 static int vgic_its_restore_tables_v0(struct vgic_its *its);
38 static int vgic_its_commit_v0(struct vgic_its *its);
41 * Creates a new (reference to a) struct vgic_irq for a given LPI.
42 * If this LPI is already mapped on another ITS, we increase its refcount
43 * and return a pointer to the existing structure.
44 * If this is a "new" LPI, we allocate and initialize a new struct vgic_irq.
45 * This function returns a pointer to the _unlocked_ structure.
47 static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid)
49 struct vgic_dist *dist = &kvm->arch.vgic;
50 struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
52 /* In this case there is no put, since we keep the reference. */
56 irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL);
58 return ERR_PTR(-ENOMEM);
60 INIT_LIST_HEAD(&irq->lpi_list);
61 INIT_LIST_HEAD(&irq->ap_list);
62 spin_lock_init(&irq->irq_lock);
64 irq->config = VGIC_CONFIG_EDGE;
65 kref_init(&irq->refcount);
68 spin_lock(&dist->lpi_list_lock);
71 * There could be a race with another vgic_add_lpi(), so we need to
72 * check that we don't add a second list entry with the same LPI.
74 list_for_each_entry(oldirq, &dist->lpi_list_head, lpi_list) {
75 if (oldirq->intid != intid)
78 /* Someone was faster with adding this LPI, lets use that. */
83 * This increases the refcount, the caller is expected to
84 * call vgic_put_irq() on the returned pointer once it's
85 * finished with the IRQ.
87 vgic_get_irq_kref(irq);
92 list_add_tail(&irq->lpi_list, &dist->lpi_list_head);
93 dist->lpi_list_count++;
96 spin_unlock(&dist->lpi_list_lock);
102 struct list_head dev_list;
104 /* the head for the list of ITTEs */
105 struct list_head itt_head;
109 #define COLLECTION_NOT_MAPPED ((u32)~0)
111 struct its_collection {
112 struct list_head coll_list;
118 #define its_is_collection_mapped(coll) ((coll) && \
119 ((coll)->target_addr != COLLECTION_NOT_MAPPED))
122 struct list_head ite_list;
124 struct vgic_irq *irq;
125 struct its_collection *collection;
131 * struct vgic_its_abi - ITS abi ops and settings
132 * @cte_esz: collection table entry size
133 * @dte_esz: device table entry size
134 * @ite_esz: interrupt translation table entry size
135 * @save tables: save the ITS tables into guest RAM
136 * @restore_tables: restore the ITS internal structs from tables
137 * stored in guest RAM
138 * @commit: initialize the registers which expose the ABI settings,
139 * especially the entry sizes
141 struct vgic_its_abi {
145 int (*save_tables)(struct vgic_its *its);
146 int (*restore_tables)(struct vgic_its *its);
147 int (*commit)(struct vgic_its *its);
150 static const struct vgic_its_abi its_table_abi_versions[] = {
151 [0] = {.cte_esz = 8, .dte_esz = 8, .ite_esz = 8,
152 .save_tables = vgic_its_save_tables_v0,
153 .restore_tables = vgic_its_restore_tables_v0,
154 .commit = vgic_its_commit_v0,
158 #define NR_ITS_ABIS ARRAY_SIZE(its_table_abi_versions)
160 inline const struct vgic_its_abi *vgic_its_get_abi(struct vgic_its *its)
162 return &its_table_abi_versions[its->abi_rev];
165 int vgic_its_set_abi(struct vgic_its *its, int rev)
167 const struct vgic_its_abi *abi;
170 abi = vgic_its_get_abi(its);
171 return abi->commit(its);
175 * Find and returns a device in the device table for an ITS.
176 * Must be called with the its_lock mutex held.
178 static struct its_device *find_its_device(struct vgic_its *its, u32 device_id)
180 struct its_device *device;
182 list_for_each_entry(device, &its->device_list, dev_list)
183 if (device_id == device->device_id)
190 * Find and returns an interrupt translation table entry (ITTE) for a given
191 * Device ID/Event ID pair on an ITS.
192 * Must be called with the its_lock mutex held.
194 static struct its_ite *find_ite(struct vgic_its *its, u32 device_id,
197 struct its_device *device;
200 device = find_its_device(its, device_id);
204 list_for_each_entry(ite, &device->itt_head, ite_list)
205 if (ite->event_id == event_id)
211 /* To be used as an iterator this macro misses the enclosing parentheses */
212 #define for_each_lpi_its(dev, ite, its) \
213 list_for_each_entry(dev, &(its)->device_list, dev_list) \
214 list_for_each_entry(ite, &(dev)->itt_head, ite_list)
217 * We only implement 48 bits of PA at the moment, although the ITS
218 * supports more. Let's be restrictive here.
220 #define BASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 16))
221 #define CBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12))
222 #define PENDBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 16))
223 #define PROPBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12))
225 #define GIC_LPI_OFFSET 8192
228 * Finds and returns a collection in the ITS collection table.
229 * Must be called with the its_lock mutex held.
231 static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
233 struct its_collection *collection;
235 list_for_each_entry(collection, &its->collection_list, coll_list) {
236 if (coll_id == collection->collection_id)
243 #define LPI_PROP_ENABLE_BIT(p) ((p) & LPI_PROP_ENABLED)
244 #define LPI_PROP_PRIORITY(p) ((p) & 0xfc)
247 * Reads the configuration data for a given LPI from guest memory and
248 * updates the fields in struct vgic_irq.
249 * If filter_vcpu is not NULL, applies only if the IRQ is targeting this
250 * VCPU. Unconditionally applies if filter_vcpu is NULL.
252 static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
253 struct kvm_vcpu *filter_vcpu)
255 u64 propbase = PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
259 ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
265 spin_lock(&irq->irq_lock);
267 if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
268 irq->priority = LPI_PROP_PRIORITY(prop);
269 irq->enabled = LPI_PROP_ENABLE_BIT(prop);
271 vgic_queue_irq_unlock(kvm, irq);
273 spin_unlock(&irq->irq_lock);
280 * Create a snapshot of the current LPI list, so that we can enumerate all
281 * LPIs without holding any lock.
282 * Returns the array length and puts the kmalloc'ed array into intid_ptr.
284 static int vgic_copy_lpi_list(struct kvm *kvm, u32 **intid_ptr)
286 struct vgic_dist *dist = &kvm->arch.vgic;
287 struct vgic_irq *irq;
289 int irq_count = dist->lpi_list_count, i = 0;
292 * We use the current value of the list length, which may change
293 * after the kmalloc. We don't care, because the guest shouldn't
294 * change anything while the command handling is still running,
295 * and in the worst case we would miss a new IRQ, which one wouldn't
296 * expect to be covered by this command anyway.
298 intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL);
302 spin_lock(&dist->lpi_list_lock);
303 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
304 /* We don't need to "get" the IRQ, as we hold the list lock. */
305 intids[i] = irq->intid;
306 if (++i == irq_count)
309 spin_unlock(&dist->lpi_list_lock);
316 * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
317 * is targeting) to the VGIC's view, which deals with target VCPUs.
318 * Needs to be called whenever either the collection for a LPIs has
319 * changed or the collection itself got retargeted.
321 static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite)
323 struct kvm_vcpu *vcpu;
325 if (!its_is_collection_mapped(ite->collection))
328 vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
330 spin_lock(&ite->irq->irq_lock);
331 ite->irq->target_vcpu = vcpu;
332 spin_unlock(&ite->irq->irq_lock);
336 * Updates the target VCPU for every LPI targeting this collection.
337 * Must be called with the its_lock mutex held.
339 static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its,
340 struct its_collection *coll)
342 struct its_device *device;
345 for_each_lpi_its(device, ite, its) {
346 if (!ite->collection || coll != ite->collection)
349 update_affinity_ite(kvm, ite);
353 static u32 max_lpis_propbaser(u64 propbaser)
355 int nr_idbits = (propbaser & 0x1f) + 1;
357 return 1U << min(nr_idbits, INTERRUPT_ID_BITS_ITS);
361 * Scan the whole LPI pending table and sync the pending bit in there
362 * with our own data structures. This relies on the LPI being
365 static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
367 gpa_t pendbase = PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
368 struct vgic_irq *irq;
369 int last_byte_offset = -1;
374 nr_irqs = vgic_copy_lpi_list(vcpu->kvm, &intids);
378 for (i = 0; i < nr_irqs; i++) {
379 int byte_offset, bit_nr;
382 byte_offset = intids[i] / BITS_PER_BYTE;
383 bit_nr = intids[i] % BITS_PER_BYTE;
386 * For contiguously allocated LPIs chances are we just read
387 * this very same byte in the last iteration. Reuse that.
389 if (byte_offset != last_byte_offset) {
390 ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset,
396 last_byte_offset = byte_offset;
399 irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
400 spin_lock(&irq->irq_lock);
401 irq->pending_latch = pendmask & (1U << bit_nr);
402 vgic_queue_irq_unlock(vcpu->kvm, irq);
403 vgic_put_irq(vcpu->kvm, irq);
411 static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
412 struct vgic_its *its,
413 gpa_t addr, unsigned int len)
415 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
416 u64 reg = GITS_TYPER_PLPIS;
419 * We use linear CPU numbers for redistributor addressing,
420 * so GITS_TYPER.PTA is 0.
421 * Also we force all PROPBASER registers to be the same, so
422 * CommonLPIAff is 0 as well.
423 * To avoid memory waste in the guest, we keep the number of IDBits and
424 * DevBits low - as least for the time being.
426 reg |= 0x0f << GITS_TYPER_DEVBITS_SHIFT;
427 reg |= 0x0f << GITS_TYPER_IDBITS_SHIFT;
428 reg |= GIC_ENCODE_SZ(abi->ite_esz, 4) << GITS_TYPER_ITT_ENTRY_SIZE_SHIFT;
430 return extract_bytes(reg, addr & 7, len);
433 static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm,
434 struct vgic_its *its,
435 gpa_t addr, unsigned int len)
439 val = (its->abi_rev << GITS_IIDR_REV_SHIFT) & GITS_IIDR_REV_MASK;
440 val |= (PRODUCT_ID_KVM << GITS_IIDR_PRODUCTID_SHIFT) | IMPLEMENTER_ARM;
444 static int vgic_mmio_uaccess_write_its_iidr(struct kvm *kvm,
445 struct vgic_its *its,
446 gpa_t addr, unsigned int len,
449 u32 rev = GITS_IIDR_REV(val);
451 if (rev >= NR_ITS_ABIS)
453 return vgic_its_set_abi(its, rev);
456 static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
457 struct vgic_its *its,
458 gpa_t addr, unsigned int len)
460 switch (addr & 0xffff) {
462 return 0x92; /* part number, bits[7:0] */
464 return 0xb4; /* part number, bits[11:8] */
466 return GIC_PIDR2_ARCH_GICv3 | 0x0b;
468 return 0x40; /* This is a 64K software visible page */
469 /* The following are the ID registers for (any) GIC. */
484 * Find the target VCPU and the LPI number for a given devid/eventid pair
485 * and make this IRQ pending, possibly injecting it.
486 * Must be called with the its_lock mutex held.
487 * Returns 0 on success, a positive error value for any ITS mapping
488 * related errors and negative error values for generic errors.
490 static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
491 u32 devid, u32 eventid)
493 struct kvm_vcpu *vcpu;
499 ite = find_ite(its, devid, eventid);
500 if (!ite || !its_is_collection_mapped(ite->collection))
501 return E_ITS_INT_UNMAPPED_INTERRUPT;
503 vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
505 return E_ITS_INT_UNMAPPED_INTERRUPT;
507 if (!vcpu->arch.vgic_cpu.lpis_enabled)
510 spin_lock(&ite->irq->irq_lock);
511 ite->irq->pending_latch = true;
512 vgic_queue_irq_unlock(kvm, ite->irq);
517 static struct vgic_io_device *vgic_get_its_iodev(struct kvm_io_device *dev)
519 struct vgic_io_device *iodev;
521 if (dev->ops != &kvm_io_gic_ops)
524 iodev = container_of(dev, struct vgic_io_device, dev);
526 if (iodev->iodev_type != IODEV_ITS)
533 * Queries the KVM IO bus framework to get the ITS pointer from the given
535 * We then call vgic_its_trigger_msi() with the decoded data.
536 * According to the KVM_SIGNAL_MSI API description returns 1 on success.
538 int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
541 struct kvm_io_device *kvm_io_dev;
542 struct vgic_io_device *iodev;
545 if (!vgic_has_its(kvm))
548 if (!(msi->flags & KVM_MSI_VALID_DEVID))
551 address = (u64)msi->address_hi << 32 | msi->address_lo;
553 kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
557 iodev = vgic_get_its_iodev(kvm_io_dev);
561 mutex_lock(&iodev->its->its_lock);
562 ret = vgic_its_trigger_msi(kvm, iodev->its, msi->devid, msi->data);
563 mutex_unlock(&iodev->its->its_lock);
569 * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
570 * if the guest has blocked the MSI. So we map any LPI mapping
571 * related error to that.
579 /* Requires the its_lock to be held. */
580 static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
582 list_del(&ite->ite_list);
584 /* This put matches the get in vgic_add_lpi. */
586 vgic_put_irq(kvm, ite->irq);
591 static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size)
593 return (le64_to_cpu(its_cmd[word]) >> shift) & (BIT_ULL(size) - 1);
596 #define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8)
597 #define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32)
598 #define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32)
599 #define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32)
600 #define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16)
601 #define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32)
602 #define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1)
605 * The DISCARD command frees an Interrupt Translation Table Entry (ITTE).
606 * Must be called with the its_lock mutex held.
608 static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
611 u32 device_id = its_cmd_get_deviceid(its_cmd);
612 u32 event_id = its_cmd_get_id(its_cmd);
616 ite = find_ite(its, device_id, event_id);
617 if (ite && ite->collection) {
619 * Though the spec talks about removing the pending state, we
620 * don't bother here since we clear the ITTE anyway and the
621 * pending state is a property of the ITTE struct.
623 its_free_ite(kvm, ite);
627 return E_ITS_DISCARD_UNMAPPED_INTERRUPT;
631 * The MOVI command moves an ITTE to a different collection.
632 * Must be called with the its_lock mutex held.
634 static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
637 u32 device_id = its_cmd_get_deviceid(its_cmd);
638 u32 event_id = its_cmd_get_id(its_cmd);
639 u32 coll_id = its_cmd_get_collection(its_cmd);
640 struct kvm_vcpu *vcpu;
642 struct its_collection *collection;
644 ite = find_ite(its, device_id, event_id);
646 return E_ITS_MOVI_UNMAPPED_INTERRUPT;
648 if (!its_is_collection_mapped(ite->collection))
649 return E_ITS_MOVI_UNMAPPED_COLLECTION;
651 collection = find_collection(its, coll_id);
652 if (!its_is_collection_mapped(collection))
653 return E_ITS_MOVI_UNMAPPED_COLLECTION;
655 ite->collection = collection;
656 vcpu = kvm_get_vcpu(kvm, collection->target_addr);
658 spin_lock(&ite->irq->irq_lock);
659 ite->irq->target_vcpu = vcpu;
660 spin_unlock(&ite->irq->irq_lock);
666 * Check whether an ID can be stored into the corresponding guest table.
667 * For a direct table this is pretty easy, but gets a bit nasty for
668 * indirect tables. We check whether the resulting guest physical address
669 * is actually valid (covered by a memslot and guest accessbible).
670 * For this we have to read the respective first level entry.
672 static bool vgic_its_check_id(struct vgic_its *its, u64 baser, int id)
674 int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
678 int esz = GITS_BASER_ENTRY_SIZE(baser);
680 if (!(baser & GITS_BASER_INDIRECT)) {
683 if (id >= (l1_tbl_size / esz))
686 addr = BASER_ADDRESS(baser) + id * esz;
687 gfn = addr >> PAGE_SHIFT;
689 return kvm_is_visible_gfn(its->dev->kvm, gfn);
692 /* calculate and check the index into the 1st level */
693 index = id / (SZ_64K / esz);
694 if (index >= (l1_tbl_size / sizeof(u64)))
697 /* Each 1st level entry is represented by a 64-bit value. */
698 if (kvm_read_guest(its->dev->kvm,
699 BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
700 &indirect_ptr, sizeof(indirect_ptr)))
703 indirect_ptr = le64_to_cpu(indirect_ptr);
705 /* check the valid bit of the first level entry */
706 if (!(indirect_ptr & BIT_ULL(63)))
710 * Mask the guest physical address and calculate the frame number.
711 * Any address beyond our supported 48 bits of PA will be caught
712 * by the actual check in the final step.
714 indirect_ptr &= GENMASK_ULL(51, 16);
716 /* Find the address of the actual entry */
717 index = id % (SZ_64K / esz);
718 indirect_ptr += index * esz;
719 gfn = indirect_ptr >> PAGE_SHIFT;
721 return kvm_is_visible_gfn(its->dev->kvm, gfn);
724 static int vgic_its_alloc_collection(struct vgic_its *its,
725 struct its_collection **colp,
728 struct its_collection *collection;
730 if (!vgic_its_check_id(its, its->baser_coll_table, coll_id))
731 return E_ITS_MAPC_COLLECTION_OOR;
733 collection = kzalloc(sizeof(*collection), GFP_KERNEL);
735 collection->collection_id = coll_id;
736 collection->target_addr = COLLECTION_NOT_MAPPED;
738 list_add_tail(&collection->coll_list, &its->collection_list);
744 static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id)
746 struct its_collection *collection;
747 struct its_device *device;
751 * Clearing the mapping for that collection ID removes the
752 * entry from the list. If there wasn't any before, we can
755 collection = find_collection(its, coll_id);
759 for_each_lpi_its(device, ite, its)
760 if (ite->collection &&
761 ite->collection->collection_id == coll_id)
762 ite->collection = NULL;
764 list_del(&collection->coll_list);
769 * The MAPTI and MAPI commands map LPIs to ITTEs.
770 * Must be called with its_lock mutex held.
772 static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
775 u32 device_id = its_cmd_get_deviceid(its_cmd);
776 u32 event_id = its_cmd_get_id(its_cmd);
777 u32 coll_id = its_cmd_get_collection(its_cmd);
779 struct its_device *device;
780 struct its_collection *collection, *new_coll = NULL;
782 struct vgic_irq *irq;
784 device = find_its_device(its, device_id);
786 return E_ITS_MAPTI_UNMAPPED_DEVICE;
788 if (its_cmd_get_command(its_cmd) == GITS_CMD_MAPTI)
789 lpi_nr = its_cmd_get_physical_id(its_cmd);
792 if (lpi_nr < GIC_LPI_OFFSET ||
793 lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
794 return E_ITS_MAPTI_PHYSICALID_OOR;
796 /* If there is an existing mapping, behavior is UNPREDICTABLE. */
797 if (find_ite(its, device_id, event_id))
800 collection = find_collection(its, coll_id);
802 int ret = vgic_its_alloc_collection(its, &collection, coll_id);
805 new_coll = collection;
808 ite = kzalloc(sizeof(struct its_ite), GFP_KERNEL);
811 vgic_its_free_collection(its, coll_id);
815 ite->event_id = event_id;
816 list_add_tail(&ite->ite_list, &device->itt_head);
818 ite->collection = collection;
821 irq = vgic_add_lpi(kvm, lpi_nr);
824 vgic_its_free_collection(its, coll_id);
825 its_free_ite(kvm, ite);
830 update_affinity_ite(kvm, ite);
833 * We "cache" the configuration table entries in out struct vgic_irq's.
834 * However we only have those structs for mapped IRQs, so we read in
835 * the respective config data from memory here upon mapping the LPI.
837 update_lpi_config(kvm, ite->irq, NULL);
842 /* Requires the its_lock to be held. */
843 static void vgic_its_unmap_device(struct kvm *kvm, struct its_device *device)
845 struct its_ite *ite, *temp;
848 * The spec says that unmapping a device with still valid
849 * ITTEs associated is UNPREDICTABLE. We remove all ITTEs,
850 * since we cannot leave the memory unreferenced.
852 list_for_each_entry_safe(ite, temp, &device->itt_head, ite_list)
853 its_free_ite(kvm, ite);
855 list_del(&device->dev_list);
860 * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
861 * Must be called with the its_lock mutex held.
863 static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
866 u32 device_id = its_cmd_get_deviceid(its_cmd);
867 bool valid = its_cmd_get_validbit(its_cmd);
868 struct its_device *device;
870 if (!vgic_its_check_id(its, its->baser_device_table, device_id))
871 return E_ITS_MAPD_DEVICE_OOR;
873 device = find_its_device(its, device_id);
876 * The spec says that calling MAPD on an already mapped device
877 * invalidates all cached data for this device. We implement this
878 * by removing the mapping and re-establishing it.
881 vgic_its_unmap_device(kvm, device);
884 * The spec does not say whether unmapping a not-mapped device
885 * is an error, so we are done in any case.
890 device = kzalloc(sizeof(struct its_device), GFP_KERNEL);
894 device->device_id = device_id;
895 INIT_LIST_HEAD(&device->itt_head);
897 list_add_tail(&device->dev_list, &its->device_list);
903 * The MAPC command maps collection IDs to redistributors.
904 * Must be called with the its_lock mutex held.
906 static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
911 struct its_collection *collection;
914 valid = its_cmd_get_validbit(its_cmd);
915 coll_id = its_cmd_get_collection(its_cmd);
916 target_addr = its_cmd_get_target_addr(its_cmd);
918 if (target_addr >= atomic_read(&kvm->online_vcpus))
919 return E_ITS_MAPC_PROCNUM_OOR;
922 vgic_its_free_collection(its, coll_id);
924 collection = find_collection(its, coll_id);
929 ret = vgic_its_alloc_collection(its, &collection,
933 collection->target_addr = target_addr;
935 collection->target_addr = target_addr;
936 update_affinity_collection(kvm, its, collection);
944 * The CLEAR command removes the pending state for a particular LPI.
945 * Must be called with the its_lock mutex held.
947 static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
950 u32 device_id = its_cmd_get_deviceid(its_cmd);
951 u32 event_id = its_cmd_get_id(its_cmd);
955 ite = find_ite(its, device_id, event_id);
957 return E_ITS_CLEAR_UNMAPPED_INTERRUPT;
959 ite->irq->pending_latch = false;
965 * The INV command syncs the configuration bits from the memory table.
966 * Must be called with the its_lock mutex held.
968 static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
971 u32 device_id = its_cmd_get_deviceid(its_cmd);
972 u32 event_id = its_cmd_get_id(its_cmd);
976 ite = find_ite(its, device_id, event_id);
978 return E_ITS_INV_UNMAPPED_INTERRUPT;
980 return update_lpi_config(kvm, ite->irq, NULL);
984 * The INVALL command requests flushing of all IRQ data in this collection.
985 * Find the VCPU mapped to that collection, then iterate over the VM's list
986 * of mapped LPIs and update the configuration for each IRQ which targets
987 * the specified vcpu. The configuration will be read from the in-memory
988 * configuration table.
989 * Must be called with the its_lock mutex held.
991 static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
994 u32 coll_id = its_cmd_get_collection(its_cmd);
995 struct its_collection *collection;
996 struct kvm_vcpu *vcpu;
997 struct vgic_irq *irq;
1001 collection = find_collection(its, coll_id);
1002 if (!its_is_collection_mapped(collection))
1003 return E_ITS_INVALL_UNMAPPED_COLLECTION;
1005 vcpu = kvm_get_vcpu(kvm, collection->target_addr);
1007 irq_count = vgic_copy_lpi_list(kvm, &intids);
1011 for (i = 0; i < irq_count; i++) {
1012 irq = vgic_get_irq(kvm, NULL, intids[i]);
1015 update_lpi_config(kvm, irq, vcpu);
1016 vgic_put_irq(kvm, irq);
1025 * The MOVALL command moves the pending state of all IRQs targeting one
1026 * redistributor to another. We don't hold the pending state in the VCPUs,
1027 * but in the IRQs instead, so there is really not much to do for us here.
1028 * However the spec says that no IRQ must target the old redistributor
1029 * afterwards, so we make sure that no LPI is using the associated target_vcpu.
1030 * This command affects all LPIs in the system that target that redistributor.
1032 static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
1035 struct vgic_dist *dist = &kvm->arch.vgic;
1036 u32 target1_addr = its_cmd_get_target_addr(its_cmd);
1037 u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
1038 struct kvm_vcpu *vcpu1, *vcpu2;
1039 struct vgic_irq *irq;
1041 if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
1042 target2_addr >= atomic_read(&kvm->online_vcpus))
1043 return E_ITS_MOVALL_PROCNUM_OOR;
1045 if (target1_addr == target2_addr)
1048 vcpu1 = kvm_get_vcpu(kvm, target1_addr);
1049 vcpu2 = kvm_get_vcpu(kvm, target2_addr);
1051 spin_lock(&dist->lpi_list_lock);
1053 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
1054 spin_lock(&irq->irq_lock);
1056 if (irq->target_vcpu == vcpu1)
1057 irq->target_vcpu = vcpu2;
1059 spin_unlock(&irq->irq_lock);
1062 spin_unlock(&dist->lpi_list_lock);
1068 * The INT command injects the LPI associated with that DevID/EvID pair.
1069 * Must be called with the its_lock mutex held.
1071 static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its,
1074 u32 msi_data = its_cmd_get_id(its_cmd);
1075 u64 msi_devid = its_cmd_get_deviceid(its_cmd);
1077 return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
1081 * This function is called with the its_cmd lock held, but the ITS data
1082 * structure lock dropped.
1084 static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its,
1089 mutex_lock(&its->its_lock);
1090 switch (its_cmd_get_command(its_cmd)) {
1092 ret = vgic_its_cmd_handle_mapd(kvm, its, its_cmd);
1095 ret = vgic_its_cmd_handle_mapc(kvm, its, its_cmd);
1098 ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1100 case GITS_CMD_MAPTI:
1101 ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1104 ret = vgic_its_cmd_handle_movi(kvm, its, its_cmd);
1106 case GITS_CMD_DISCARD:
1107 ret = vgic_its_cmd_handle_discard(kvm, its, its_cmd);
1109 case GITS_CMD_CLEAR:
1110 ret = vgic_its_cmd_handle_clear(kvm, its, its_cmd);
1112 case GITS_CMD_MOVALL:
1113 ret = vgic_its_cmd_handle_movall(kvm, its, its_cmd);
1116 ret = vgic_its_cmd_handle_int(kvm, its, its_cmd);
1119 ret = vgic_its_cmd_handle_inv(kvm, its, its_cmd);
1121 case GITS_CMD_INVALL:
1122 ret = vgic_its_cmd_handle_invall(kvm, its, its_cmd);
1125 /* we ignore this command: we are in sync all of the time */
1129 mutex_unlock(&its->its_lock);
1134 static u64 vgic_sanitise_its_baser(u64 reg)
1136 reg = vgic_sanitise_field(reg, GITS_BASER_SHAREABILITY_MASK,
1137 GITS_BASER_SHAREABILITY_SHIFT,
1138 vgic_sanitise_shareability);
1139 reg = vgic_sanitise_field(reg, GITS_BASER_INNER_CACHEABILITY_MASK,
1140 GITS_BASER_INNER_CACHEABILITY_SHIFT,
1141 vgic_sanitise_inner_cacheability);
1142 reg = vgic_sanitise_field(reg, GITS_BASER_OUTER_CACHEABILITY_MASK,
1143 GITS_BASER_OUTER_CACHEABILITY_SHIFT,
1144 vgic_sanitise_outer_cacheability);
1146 /* Bits 15:12 contain bits 51:48 of the PA, which we don't support. */
1147 reg &= ~GENMASK_ULL(15, 12);
1149 /* We support only one (ITS) page size: 64K */
1150 reg = (reg & ~GITS_BASER_PAGE_SIZE_MASK) | GITS_BASER_PAGE_SIZE_64K;
1155 static u64 vgic_sanitise_its_cbaser(u64 reg)
1157 reg = vgic_sanitise_field(reg, GITS_CBASER_SHAREABILITY_MASK,
1158 GITS_CBASER_SHAREABILITY_SHIFT,
1159 vgic_sanitise_shareability);
1160 reg = vgic_sanitise_field(reg, GITS_CBASER_INNER_CACHEABILITY_MASK,
1161 GITS_CBASER_INNER_CACHEABILITY_SHIFT,
1162 vgic_sanitise_inner_cacheability);
1163 reg = vgic_sanitise_field(reg, GITS_CBASER_OUTER_CACHEABILITY_MASK,
1164 GITS_CBASER_OUTER_CACHEABILITY_SHIFT,
1165 vgic_sanitise_outer_cacheability);
1168 * Sanitise the physical address to be 64k aligned.
1169 * Also limit the physical addresses to 48 bits.
1171 reg &= ~(GENMASK_ULL(51, 48) | GENMASK_ULL(15, 12));
1176 static unsigned long vgic_mmio_read_its_cbaser(struct kvm *kvm,
1177 struct vgic_its *its,
1178 gpa_t addr, unsigned int len)
1180 return extract_bytes(its->cbaser, addr & 7, len);
1183 static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
1184 gpa_t addr, unsigned int len,
1187 /* When GITS_CTLR.Enable is 1, this register is RO. */
1191 mutex_lock(&its->cmd_lock);
1192 its->cbaser = update_64bit_reg(its->cbaser, addr & 7, len, val);
1193 its->cbaser = vgic_sanitise_its_cbaser(its->cbaser);
1196 * CWRITER is architecturally UNKNOWN on reset, but we need to reset
1197 * it to CREADR to make sure we start with an empty command buffer.
1199 its->cwriter = its->creadr;
1200 mutex_unlock(&its->cmd_lock);
1203 #define ITS_CMD_BUFFER_SIZE(baser) ((((baser) & 0xff) + 1) << 12)
1204 #define ITS_CMD_SIZE 32
1205 #define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5))
1207 /* Must be called with the cmd_lock held. */
1208 static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
1213 /* Commands are only processed when the ITS is enabled. */
1217 cbaser = CBASER_ADDRESS(its->cbaser);
1219 while (its->cwriter != its->creadr) {
1220 int ret = kvm_read_guest(kvm, cbaser + its->creadr,
1221 cmd_buf, ITS_CMD_SIZE);
1223 * If kvm_read_guest() fails, this could be due to the guest
1224 * programming a bogus value in CBASER or something else going
1225 * wrong from which we cannot easily recover.
1226 * According to section 6.3.2 in the GICv3 spec we can just
1227 * ignore that command then.
1230 vgic_its_handle_command(kvm, its, cmd_buf);
1232 its->creadr += ITS_CMD_SIZE;
1233 if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
1239 * By writing to CWRITER the guest announces new commands to be processed.
1240 * To avoid any races in the first place, we take the its_cmd lock, which
1241 * protects our ring buffer variables, so that there is only one user
1242 * per ITS handling commands at a given time.
1244 static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
1245 gpa_t addr, unsigned int len,
1253 mutex_lock(&its->cmd_lock);
1255 reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
1256 reg = ITS_CMD_OFFSET(reg);
1257 if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1258 mutex_unlock(&its->cmd_lock);
1263 vgic_its_process_commands(kvm, its);
1265 mutex_unlock(&its->cmd_lock);
1268 static unsigned long vgic_mmio_read_its_cwriter(struct kvm *kvm,
1269 struct vgic_its *its,
1270 gpa_t addr, unsigned int len)
1272 return extract_bytes(its->cwriter, addr & 0x7, len);
1275 static unsigned long vgic_mmio_read_its_creadr(struct kvm *kvm,
1276 struct vgic_its *its,
1277 gpa_t addr, unsigned int len)
1279 return extract_bytes(its->creadr, addr & 0x7, len);
1282 static int vgic_mmio_uaccess_write_its_creadr(struct kvm *kvm,
1283 struct vgic_its *its,
1284 gpa_t addr, unsigned int len,
1290 mutex_lock(&its->cmd_lock);
1297 cmd_offset = ITS_CMD_OFFSET(val);
1298 if (cmd_offset >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1303 its->creadr = cmd_offset;
1305 mutex_unlock(&its->cmd_lock);
1309 #define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7)
1310 static unsigned long vgic_mmio_read_its_baser(struct kvm *kvm,
1311 struct vgic_its *its,
1312 gpa_t addr, unsigned int len)
1316 switch (BASER_INDEX(addr)) {
1318 reg = its->baser_device_table;
1321 reg = its->baser_coll_table;
1328 return extract_bytes(reg, addr & 7, len);
1331 #define GITS_BASER_RO_MASK (GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56))
1332 static void vgic_mmio_write_its_baser(struct kvm *kvm,
1333 struct vgic_its *its,
1334 gpa_t addr, unsigned int len,
1337 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
1338 u64 entry_size, device_type;
1339 u64 reg, *regptr, clearbits = 0;
1341 /* When GITS_CTLR.Enable is 1, we ignore write accesses. */
1345 switch (BASER_INDEX(addr)) {
1347 regptr = &its->baser_device_table;
1348 entry_size = abi->dte_esz;
1349 device_type = GITS_BASER_TYPE_DEVICE;
1352 regptr = &its->baser_coll_table;
1353 entry_size = abi->cte_esz;
1354 device_type = GITS_BASER_TYPE_COLLECTION;
1355 clearbits = GITS_BASER_INDIRECT;
1361 reg = update_64bit_reg(*regptr, addr & 7, len, val);
1362 reg &= ~GITS_BASER_RO_MASK;
1365 reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT;
1366 reg |= device_type << GITS_BASER_TYPE_SHIFT;
1367 reg = vgic_sanitise_its_baser(reg);
1372 static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
1373 struct vgic_its *its,
1374 gpa_t addr, unsigned int len)
1378 mutex_lock(&its->cmd_lock);
1379 if (its->creadr == its->cwriter)
1380 reg |= GITS_CTLR_QUIESCENT;
1382 reg |= GITS_CTLR_ENABLE;
1383 mutex_unlock(&its->cmd_lock);
1388 static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
1389 gpa_t addr, unsigned int len,
1392 mutex_lock(&its->cmd_lock);
1394 its->enabled = !!(val & GITS_CTLR_ENABLE);
1397 * Try to process any pending commands. This function bails out early
1398 * if the ITS is disabled or no commands have been queued.
1400 vgic_its_process_commands(kvm, its);
1402 mutex_unlock(&its->cmd_lock);
1405 #define REGISTER_ITS_DESC(off, rd, wr, length, acc) \
1407 .reg_offset = off, \
1409 .access_flags = acc, \
1414 #define REGISTER_ITS_DESC_UACCESS(off, rd, wr, uwr, length, acc)\
1416 .reg_offset = off, \
1418 .access_flags = acc, \
1421 .uaccess_its_write = uwr, \
1424 static void its_mmio_write_wi(struct kvm *kvm, struct vgic_its *its,
1425 gpa_t addr, unsigned int len, unsigned long val)
1430 static struct vgic_register_region its_registers[] = {
1431 REGISTER_ITS_DESC(GITS_CTLR,
1432 vgic_mmio_read_its_ctlr, vgic_mmio_write_its_ctlr, 4,
1434 REGISTER_ITS_DESC_UACCESS(GITS_IIDR,
1435 vgic_mmio_read_its_iidr, its_mmio_write_wi,
1436 vgic_mmio_uaccess_write_its_iidr, 4,
1438 REGISTER_ITS_DESC(GITS_TYPER,
1439 vgic_mmio_read_its_typer, its_mmio_write_wi, 8,
1440 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1441 REGISTER_ITS_DESC(GITS_CBASER,
1442 vgic_mmio_read_its_cbaser, vgic_mmio_write_its_cbaser, 8,
1443 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1444 REGISTER_ITS_DESC(GITS_CWRITER,
1445 vgic_mmio_read_its_cwriter, vgic_mmio_write_its_cwriter, 8,
1446 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1447 REGISTER_ITS_DESC_UACCESS(GITS_CREADR,
1448 vgic_mmio_read_its_creadr, its_mmio_write_wi,
1449 vgic_mmio_uaccess_write_its_creadr, 8,
1450 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1451 REGISTER_ITS_DESC(GITS_BASER,
1452 vgic_mmio_read_its_baser, vgic_mmio_write_its_baser, 0x40,
1453 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1454 REGISTER_ITS_DESC(GITS_IDREGS_BASE,
1455 vgic_mmio_read_its_idregs, its_mmio_write_wi, 0x30,
1459 /* This is called on setting the LPI enable bit in the redistributor. */
1460 void vgic_enable_lpis(struct kvm_vcpu *vcpu)
1462 if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ))
1463 its_sync_lpi_pending_table(vcpu);
1466 static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its)
1468 struct vgic_io_device *iodev = &its->iodev;
1471 if (!its->initialized)
1474 if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base))
1477 iodev->regions = its_registers;
1478 iodev->nr_regions = ARRAY_SIZE(its_registers);
1479 kvm_iodevice_init(&iodev->dev, &kvm_io_gic_ops);
1481 iodev->base_addr = its->vgic_its_base;
1482 iodev->iodev_type = IODEV_ITS;
1484 mutex_lock(&kvm->slots_lock);
1485 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, iodev->base_addr,
1486 KVM_VGIC_V3_ITS_SIZE, &iodev->dev);
1487 mutex_unlock(&kvm->slots_lock);
1492 #define INITIAL_BASER_VALUE \
1493 (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \
1494 GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \
1495 GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) | \
1496 GITS_BASER_PAGE_SIZE_64K)
1498 #define INITIAL_PROPBASER_VALUE \
1499 (GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) | \
1500 GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner) | \
1501 GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable))
1503 static int vgic_its_create(struct kvm_device *dev, u32 type)
1505 struct vgic_its *its;
1507 if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
1510 its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL);
1514 mutex_init(&its->its_lock);
1515 mutex_init(&its->cmd_lock);
1517 its->vgic_its_base = VGIC_ADDR_UNDEF;
1519 INIT_LIST_HEAD(&its->device_list);
1520 INIT_LIST_HEAD(&its->collection_list);
1522 dev->kvm->arch.vgic.has_its = true;
1523 its->initialized = false;
1524 its->enabled = false;
1527 its->baser_device_table = INITIAL_BASER_VALUE |
1528 ((u64)GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT);
1529 its->baser_coll_table = INITIAL_BASER_VALUE |
1530 ((u64)GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT);
1531 dev->kvm->arch.vgic.propbaser = INITIAL_PROPBASER_VALUE;
1535 return vgic_its_set_abi(its, NR_ITS_ABIS - 1);
1538 static void vgic_its_destroy(struct kvm_device *kvm_dev)
1540 struct kvm *kvm = kvm_dev->kvm;
1541 struct vgic_its *its = kvm_dev->private;
1542 struct its_device *dev;
1543 struct its_ite *ite;
1544 struct list_head *dev_cur, *dev_temp;
1545 struct list_head *cur, *temp;
1548 * We may end up here without the lists ever having been initialized.
1549 * Check this and bail out early to avoid dereferencing a NULL pointer.
1551 if (!its->device_list.next)
1554 mutex_lock(&its->its_lock);
1555 list_for_each_safe(dev_cur, dev_temp, &its->device_list) {
1556 dev = container_of(dev_cur, struct its_device, dev_list);
1557 list_for_each_safe(cur, temp, &dev->itt_head) {
1558 ite = (container_of(cur, struct its_ite, ite_list));
1559 its_free_ite(kvm, ite);
1565 list_for_each_safe(cur, temp, &its->collection_list) {
1567 kfree(container_of(cur, struct its_collection, coll_list));
1569 mutex_unlock(&its->its_lock);
1574 int vgic_its_has_attr_regs(struct kvm_device *dev,
1575 struct kvm_device_attr *attr)
1577 const struct vgic_register_region *region;
1578 gpa_t offset = attr->attr;
1581 align = (offset < GITS_TYPER) || (offset >= GITS_PIDR4) ? 0x3 : 0x7;
1586 region = vgic_find_mmio_region(its_registers,
1587 ARRAY_SIZE(its_registers),
1595 int vgic_its_attr_regs_access(struct kvm_device *dev,
1596 struct kvm_device_attr *attr,
1597 u64 *reg, bool is_write)
1599 const struct vgic_register_region *region;
1600 struct vgic_its *its;
1606 offset = attr->attr;
1609 * Although the spec supports upper/lower 32-bit accesses to
1610 * 64-bit ITS registers, the userspace ABI requires 64-bit
1611 * accesses to all 64-bit wide registers. We therefore only
1612 * support 32-bit accesses to GITS_CTLR, GITS_IIDR and GITS ID
1615 if ((offset < GITS_TYPER) || (offset >= GITS_PIDR4))
1623 mutex_lock(&dev->kvm->lock);
1625 if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
1630 region = vgic_find_mmio_region(its_registers,
1631 ARRAY_SIZE(its_registers),
1638 if (!lock_all_vcpus(dev->kvm)) {
1643 addr = its->vgic_its_base + offset;
1645 len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
1648 if (region->uaccess_its_write)
1649 ret = region->uaccess_its_write(dev->kvm, its, addr,
1652 region->its_write(dev->kvm, its, addr, len, *reg);
1654 *reg = region->its_read(dev->kvm, its, addr, len);
1656 unlock_all_vcpus(dev->kvm);
1658 mutex_unlock(&dev->kvm->lock);
1663 * vgic_its_save_tables_v0 - Save the ITS tables into guest ARM
1664 * according to v0 ABI
1666 static int vgic_its_save_tables_v0(struct vgic_its *its)
1672 * vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM
1673 * to internal data structs according to V0 ABI
1676 static int vgic_its_restore_tables_v0(struct vgic_its *its)
1681 static int vgic_its_commit_v0(struct vgic_its *its)
1683 const struct vgic_its_abi *abi;
1685 abi = vgic_its_get_abi(its);
1686 its->baser_coll_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
1687 its->baser_device_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
1689 its->baser_coll_table |= (GIC_ENCODE_SZ(abi->cte_esz, 5)
1690 << GITS_BASER_ENTRY_SIZE_SHIFT);
1692 its->baser_device_table |= (GIC_ENCODE_SZ(abi->dte_esz, 5)
1693 << GITS_BASER_ENTRY_SIZE_SHIFT);
1697 static int vgic_its_has_attr(struct kvm_device *dev,
1698 struct kvm_device_attr *attr)
1700 switch (attr->group) {
1701 case KVM_DEV_ARM_VGIC_GRP_ADDR:
1702 switch (attr->attr) {
1703 case KVM_VGIC_ITS_ADDR_TYPE:
1707 case KVM_DEV_ARM_VGIC_GRP_CTRL:
1708 switch (attr->attr) {
1709 case KVM_DEV_ARM_VGIC_CTRL_INIT:
1713 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS:
1714 return vgic_its_has_attr_regs(dev, attr);
1719 static int vgic_its_set_attr(struct kvm_device *dev,
1720 struct kvm_device_attr *attr)
1722 struct vgic_its *its = dev->private;
1725 switch (attr->group) {
1726 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
1727 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
1728 unsigned long type = (unsigned long)attr->attr;
1731 if (type != KVM_VGIC_ITS_ADDR_TYPE)
1734 if (copy_from_user(&addr, uaddr, sizeof(addr)))
1737 ret = vgic_check_ioaddr(dev->kvm, &its->vgic_its_base,
1742 its->vgic_its_base = addr;
1746 case KVM_DEV_ARM_VGIC_GRP_CTRL:
1747 switch (attr->attr) {
1748 case KVM_DEV_ARM_VGIC_CTRL_INIT:
1749 its->initialized = true;
1754 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
1755 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
1758 if (get_user(reg, uaddr))
1761 return vgic_its_attr_regs_access(dev, attr, ®, true);
1767 static int vgic_its_get_attr(struct kvm_device *dev,
1768 struct kvm_device_attr *attr)
1770 switch (attr->group) {
1771 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
1772 struct vgic_its *its = dev->private;
1773 u64 addr = its->vgic_its_base;
1774 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
1775 unsigned long type = (unsigned long)attr->attr;
1777 if (type != KVM_VGIC_ITS_ADDR_TYPE)
1780 if (copy_to_user(uaddr, &addr, sizeof(addr)))
1784 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
1785 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
1789 ret = vgic_its_attr_regs_access(dev, attr, ®, false);
1792 return put_user(reg, uaddr);
1801 static struct kvm_device_ops kvm_arm_vgic_its_ops = {
1802 .name = "kvm-arm-vgic-its",
1803 .create = vgic_its_create,
1804 .destroy = vgic_its_destroy,
1805 .set_attr = vgic_its_set_attr,
1806 .get_attr = vgic_its_get_attr,
1807 .has_attr = vgic_its_has_attr,
1810 int kvm_vgic_register_its_device(void)
1812 return kvm_register_device_ops(&kvm_arm_vgic_its_ops,
1813 KVM_DEV_TYPE_ARM_VGIC_ITS);
1817 * Registers all ITSes with the kvm_io_bus framework.
1818 * To follow the existing VGIC initialization sequence, this has to be
1819 * done as late as possible, just before the first VCPU runs.
1821 int vgic_register_its_iodevs(struct kvm *kvm)
1823 struct kvm_device *dev;
1826 list_for_each_entry(dev, &kvm->devices, vm_node) {
1827 if (dev->ops != &kvm_arm_vgic_its_ops)
1830 ret = vgic_register_its_iodev(kvm, dev->private);
1834 * We don't need to care about tearing down previously
1835 * registered ITSes, as the kvm_io_bus framework removes
1836 * them for us if the VM gets destroyed.