]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - virt/kvm/arm/vgic.c
Merge remote-tracking branch 'kvm/linux-next'
[karo-tx-linux.git] / virt / kvm / arm / vgic.c
index 30489181922d28d9749feefb552e7b8f0fa97f52..533538385d5d294ce16bae446cc7e2b1578d9b7d 100644 (file)
@@ -34,6 +34,9 @@
 #include <asm/kvm.h>
 #include <kvm/iodev.h>
 
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
 /*
  * How the whole thing works (courtesy of Christoffer Dall):
  *
 #include "vgic.h"
 
 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
-static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
+static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu);
 static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
 static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
+static u64 vgic_get_elrsr(struct kvm_vcpu *vcpu);
 static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
                                                int virt_irq);
+static int compute_pending_for_cpu(struct kvm_vcpu *vcpu);
 
 static const struct vgic_ops *vgic_ops;
 static const struct vgic_params *vgic;
@@ -357,6 +362,11 @@ static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu *vcpu, int irq)
        struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
 
        vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0);
+       if (!vgic_dist_irq_get_level(vcpu, irq)) {
+               vgic_dist_irq_clear_pending(vcpu, irq);
+               if (!compute_pending_for_cpu(vcpu))
+                       clear_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
+       }
 }
 
 static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
@@ -531,34 +541,6 @@ bool vgic_handle_set_pending_reg(struct kvm *kvm,
        return false;
 }
 
-/*
- * If a mapped interrupt's state has been modified by the guest such that it
- * is no longer active or pending, without it have gone through the sync path,
- * then the map->active field must be cleared so the interrupt can be taken
- * again.
- */
-static void vgic_handle_clear_mapped_irq(struct kvm_vcpu *vcpu)
-{
-       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
-       struct list_head *root;
-       struct irq_phys_map_entry *entry;
-       struct irq_phys_map *map;
-
-       rcu_read_lock();
-
-       /* Check for PPIs */
-       root = &vgic_cpu->irq_phys_map_list;
-       list_for_each_entry_rcu(entry, root, entry) {
-               map = &entry->map;
-
-               if (!vgic_dist_irq_is_pending(vcpu, map->virt_irq) &&
-                   !vgic_irq_is_active(vcpu, map->virt_irq))
-                       map->active = false;
-       }
-
-       rcu_read_unlock();
-}
-
 bool vgic_handle_clear_pending_reg(struct kvm *kvm,
                                   struct kvm_exit_mmio *mmio,
                                   phys_addr_t offset, int vcpu_id)
@@ -589,7 +571,6 @@ bool vgic_handle_clear_pending_reg(struct kvm *kvm,
                                          vcpu_id, offset);
                vgic_reg_access(mmio, reg, offset, mode);
 
-               vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm, vcpu_id));
                vgic_update_state(kvm);
                return true;
        }
@@ -627,7 +608,6 @@ bool vgic_handle_clear_active_reg(struct kvm *kvm,
                        ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
 
        if (mmio->is_write) {
-               vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm, vcpu_id));
                vgic_update_state(kvm);
                return true;
        }
@@ -684,10 +664,9 @@ bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
        vgic_reg_access(mmio, &val, offset,
                        ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
        if (mmio->is_write) {
-               if (offset < 8) {
-                       *reg = ~0U; /* Force PPIs/SGIs to 1 */
+               /* Ignore writes to read-only SGI and PPI bits */
+               if (offset < 8)
                        return false;
-               }
 
                val = vgic_cfg_compress(val);
                if (offset & 4) {
@@ -713,9 +692,11 @@ bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
 void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
 {
        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+       u64 elrsr = vgic_get_elrsr(vcpu);
+       unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
        int i;
 
-       for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
+       for_each_clear_bit(i, elrsr_ptr, vgic_cpu->nr_lr) {
                struct vgic_lr lr = vgic_get_lr(vcpu, i);
 
                /*
@@ -736,30 +717,14 @@ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
                 * interrupt then move the active state to the
                 * distributor tracking bit.
                 */
-               if (lr.state & LR_STATE_ACTIVE) {
+               if (lr.state & LR_STATE_ACTIVE)
                        vgic_irq_set_active(vcpu, lr.irq);
-                       lr.state &= ~LR_STATE_ACTIVE;
-               }
 
                /*
                 * Reestablish the pending state on the distributor and the
-                * CPU interface.  It may have already been pending, but that
-                * is fine, then we are only setting a few bits that were
-                * already set.
+                * CPU interface and mark the LR as free for other use.
                 */
-               if (lr.state & LR_STATE_PENDING) {
-                       vgic_dist_irq_set_pending(vcpu, lr.irq);
-                       lr.state &= ~LR_STATE_PENDING;
-               }
-
-               vgic_set_lr(vcpu, i, lr);
-
-               /*
-                * Mark the LR as free for other use.
-                */
-               BUG_ON(lr.state & LR_STATE_MASK);
-               vgic_retire_lr(i, lr.irq, vcpu);
-               vgic_irq_clear_queued(vcpu, lr.irq);
+               vgic_retire_lr(i, vcpu);
 
                /* Finally update the VGIC state. */
                vgic_update_state(vcpu->kvm);
@@ -1067,12 +1032,6 @@ static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
        vgic_ops->set_lr(vcpu, lr, vlr);
 }
 
-static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
-                              struct vgic_lr vlr)
-{
-       vgic_ops->sync_lr_elrsr(vcpu, lr, vlr);
-}
-
 static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
 {
        return vgic_ops->get_elrsr(vcpu);
@@ -1118,25 +1077,23 @@ static inline void vgic_enable(struct kvm_vcpu *vcpu)
        vgic_ops->enable(vcpu);
 }
 
-static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
+static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu)
 {
-       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
        struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
 
+       vgic_irq_clear_queued(vcpu, vlr.irq);
+
        /*
         * We must transfer the pending state back to the distributor before
         * retiring the LR, otherwise we may loose edge-triggered interrupts.
         */
        if (vlr.state & LR_STATE_PENDING) {
-               vgic_dist_irq_set_pending(vcpu, irq);
+               vgic_dist_irq_set_pending(vcpu, vlr.irq);
                vlr.hwirq = 0;
        }
 
        vlr.state = 0;
        vgic_set_lr(vcpu, lr_nr, vlr);
-       clear_bit(lr_nr, vgic_cpu->lr_used);
-       vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
-       vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
 }
 
 /*
@@ -1150,17 +1107,15 @@ static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
  */
 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
 {
-       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+       u64 elrsr = vgic_get_elrsr(vcpu);
+       unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
        int lr;
 
-       for_each_set_bit(lr, vgic_cpu->lr_used, vgic->nr_lr) {
+       for_each_clear_bit(lr, elrsr_ptr, vgic->nr_lr) {
                struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
 
-               if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
-                       vgic_retire_lr(lr, vlr.irq, vcpu);
-                       if (vgic_irq_is_queued(vcpu, vlr.irq))
-                               vgic_irq_clear_queued(vcpu, vlr.irq);
-               }
+               if (!vgic_irq_is_enabled(vcpu, vlr.irq))
+                       vgic_retire_lr(lr, vcpu);
        }
 }
 
@@ -1200,7 +1155,6 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
        }
 
        vgic_set_lr(vcpu, lr_nr, vlr);
-       vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
 }
 
 /*
@@ -1210,8 +1164,9 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
  */
 bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
 {
-       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
        struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+       u64 elrsr = vgic_get_elrsr(vcpu);
+       unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
        struct vgic_lr vlr;
        int lr;
 
@@ -1222,28 +1177,22 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
 
        kvm_debug("Queue IRQ%d\n", irq);
 
-       lr = vgic_cpu->vgic_irq_lr_map[irq];
-
        /* Do we have an active interrupt for the same CPUID? */
-       if (lr != LR_EMPTY) {
+       for_each_clear_bit(lr, elrsr_ptr, vgic->nr_lr) {
                vlr = vgic_get_lr(vcpu, lr);
-               if (vlr.source == sgi_source_id) {
+               if (vlr.irq == irq && vlr.source == sgi_source_id) {
                        kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
-                       BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
                        vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
                        return true;
                }
        }
 
        /* Try to use another LR for this interrupt */
-       lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
-                              vgic->nr_lr);
+       lr = find_first_bit(elrsr_ptr, vgic->nr_lr);
        if (lr >= vgic->nr_lr)
                return false;
 
        kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
-       vgic_cpu->vgic_irq_lr_map[irq] = lr;
-       set_bit(lr, vgic_cpu->lr_used);
 
        vlr.irq = irq;
        vlr.source = sgi_source_id;
@@ -1338,12 +1287,60 @@ epilog:
        }
 }
 
+static int process_queued_irq(struct kvm_vcpu *vcpu,
+                                  int lr, struct vgic_lr vlr)
+{
+       int pending = 0;
+
+       /*
+        * If the IRQ was EOIed (called from vgic_process_maintenance) or it
+        * went from active to non-active (called from vgic_sync_hwirq) it was
+        * also ACKed and we we therefore assume we can clear the soft pending
+        * state (should it had been set) for this interrupt.
+        *
+        * Note: if the IRQ soft pending state was set after the IRQ was
+        * acked, it actually shouldn't be cleared, but we have no way of
+        * knowing that unless we start trapping ACKs when the soft-pending
+        * state is set.
+        */
+       vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
+
+       /*
+        * Tell the gic to start sampling this interrupt again.
+        */
+       vgic_irq_clear_queued(vcpu, vlr.irq);
+
+       /* Any additional pending interrupt? */
+       if (vgic_irq_is_edge(vcpu, vlr.irq)) {
+               BUG_ON(!(vlr.state & LR_HW));
+               pending = vgic_dist_irq_is_pending(vcpu, vlr.irq);
+       } else {
+               if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
+                       vgic_cpu_irq_set(vcpu, vlr.irq);
+                       pending = 1;
+               } else {
+                       vgic_dist_irq_clear_pending(vcpu, vlr.irq);
+                       vgic_cpu_irq_clear(vcpu, vlr.irq);
+               }
+       }
+
+       /*
+        * Despite being EOIed, the LR may not have
+        * been marked as empty.
+        */
+       vlr.state = 0;
+       vlr.hwirq = 0;
+       vgic_set_lr(vcpu, lr, vlr);
+
+       return pending;
+}
+
 static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
 {
        u32 status = vgic_get_interrupt_status(vcpu);
        struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-       bool level_pending = false;
        struct kvm *kvm = vcpu->kvm;
+       int level_pending = 0;
 
        kvm_debug("STATUS = %08x\n", status);
 
@@ -1358,54 +1355,22 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
 
                for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
                        struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
-                       WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
 
-                       spin_lock(&dist->lock);
-                       vgic_irq_clear_queued(vcpu, vlr.irq);
+                       WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
                        WARN_ON(vlr.state & LR_STATE_MASK);
-                       vlr.state = 0;
-                       vgic_set_lr(vcpu, lr, vlr);
 
-                       /*
-                        * If the IRQ was EOIed it was also ACKed and we we
-                        * therefore assume we can clear the soft pending
-                        * state (should it had been set) for this interrupt.
-                        *
-                        * Note: if the IRQ soft pending state was set after
-                        * the IRQ was acked, it actually shouldn't be
-                        * cleared, but we have no way of knowing that unless
-                        * we start trapping ACKs when the soft-pending state
-                        * is set.
-                        */
-                       vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
 
                        /*
                         * kvm_notify_acked_irq calls kvm_set_irq()
-                        * to reset the IRQ level. Need to release the
-                        * lock for kvm_set_irq to grab it.
+                        * to reset the IRQ level, which grabs the dist->lock
+                        * so we call this before taking the dist->lock.
                         */
-                       spin_unlock(&dist->lock);
-
                        kvm_notify_acked_irq(kvm, 0,
                                             vlr.irq - VGIC_NR_PRIVATE_IRQS);
-                       spin_lock(&dist->lock);
-
-                       /* Any additional pending interrupt? */
-                       if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
-                               vgic_cpu_irq_set(vcpu, vlr.irq);
-                               level_pending = true;
-                       } else {
-                               vgic_dist_irq_clear_pending(vcpu, vlr.irq);
-                               vgic_cpu_irq_clear(vcpu, vlr.irq);
-                       }
 
+                       spin_lock(&dist->lock);
+                       level_pending |= process_queued_irq(vcpu, lr, vlr);
                        spin_unlock(&dist->lock);
-
-                       /*
-                        * Despite being EOIed, the LR may not have
-                        * been marked as empty.
-                        */
-                       vgic_sync_lr_elrsr(vcpu, lr, vlr);
                }
        }
 
@@ -1426,35 +1391,40 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
 /*
  * Save the physical active state, and reset it to inactive.
  *
- * Return 1 if HW interrupt went from active to inactive, and 0 otherwise.
+ * Return true if there's a pending forwarded interrupt to queue.
  */
-static int vgic_sync_hwirq(struct kvm_vcpu *vcpu, struct vgic_lr vlr)
+static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr)
 {
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
        struct irq_phys_map *map;
+       bool phys_active;
+       bool level_pending;
        int ret;
 
        if (!(vlr.state & LR_HW))
-               return 0;
+               return false;
 
        map = vgic_irq_map_search(vcpu, vlr.irq);
        BUG_ON(!map);
 
        ret = irq_get_irqchip_state(map->irq,
                                    IRQCHIP_STATE_ACTIVE,
-                                   &map->active);
+                                   &phys_active);
 
        WARN_ON(ret);
 
-       if (map->active)
+       if (phys_active)
                return 0;
 
-       return 1;
+       spin_lock(&dist->lock);
+       level_pending = process_queued_irq(vcpu, lr, vlr);
+       spin_unlock(&dist->lock);
+       return level_pending;
 }
 
 /* Sync back the VGIC state after a guest run */
 static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
 {
-       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
        struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
        u64 elrsr;
        unsigned long *elrsr_ptr;
@@ -1462,40 +1432,18 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
        bool level_pending;
 
        level_pending = vgic_process_maintenance(vcpu);
-       elrsr = vgic_get_elrsr(vcpu);
-       elrsr_ptr = u64_to_bitmask(&elrsr);
 
        /* Deal with HW interrupts, and clear mappings for empty LRs */
        for (lr = 0; lr < vgic->nr_lr; lr++) {
-               struct vgic_lr vlr;
-
-               if (!test_bit(lr, vgic_cpu->lr_used))
-                       continue;
-
-               vlr = vgic_get_lr(vcpu, lr);
-               if (vgic_sync_hwirq(vcpu, vlr)) {
-                       /*
-                        * So this is a HW interrupt that the guest
-                        * EOI-ed. Clean the LR state and allow the
-                        * interrupt to be sampled again.
-                        */
-                       vlr.state = 0;
-                       vlr.hwirq = 0;
-                       vgic_set_lr(vcpu, lr, vlr);
-                       vgic_irq_clear_queued(vcpu, vlr.irq);
-                       set_bit(lr, elrsr_ptr);
-               }
-
-               if (!test_bit(lr, elrsr_ptr))
-                       continue;
-
-               clear_bit(lr, vgic_cpu->lr_used);
+               struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
 
+               level_pending |= vgic_sync_hwirq(vcpu, lr, vlr);
                BUG_ON(vlr.irq >= dist->nr_irqs);
-               vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY;
        }
 
        /* Check if we still have something up our sleeve... */
+       elrsr = vgic_get_elrsr(vcpu);
+       elrsr_ptr = u64_to_bitmask(&elrsr);
        pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
        if (level_pending || pending < vgic->nr_lr)
                set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
@@ -1585,6 +1533,8 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
        int enabled;
        bool ret = true, can_inject = true;
 
+       trace_vgic_update_irq_pending(cpuid, irq_num, level);
+
        if (irq_num >= min(kvm->arch.vgic.nr_irqs, 1020))
                return -EINVAL;
 
@@ -1863,30 +1813,6 @@ static void vgic_free_phys_irq_map_rcu(struct rcu_head *rcu)
        kfree(entry);
 }
 
-/**
- * kvm_vgic_get_phys_irq_active - Return the active state of a mapped IRQ
- *
- * Return the logical active state of a mapped interrupt. This doesn't
- * necessarily reflects the current HW state.
- */
-bool kvm_vgic_get_phys_irq_active(struct irq_phys_map *map)
-{
-       BUG_ON(!map);
-       return map->active;
-}
-
-/**
- * kvm_vgic_set_phys_irq_active - Set the active state of a mapped IRQ
- *
- * Set the logical active state of a mapped interrupt. This doesn't
- * immediately affects the HW state.
- */
-void kvm_vgic_set_phys_irq_active(struct irq_phys_map *map, bool active)
-{
-       BUG_ON(!map);
-       map->active = active;
-}
-
 /**
  * kvm_vgic_unmap_phys_irq - Remove a virtual to physical IRQ mapping
  * @vcpu: The VCPU pointer
@@ -1942,12 +1868,10 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
        kfree(vgic_cpu->pending_shared);
        kfree(vgic_cpu->active_shared);
        kfree(vgic_cpu->pend_act_shared);
-       kfree(vgic_cpu->vgic_irq_lr_map);
        vgic_destroy_irq_phys_map(vcpu->kvm, &vgic_cpu->irq_phys_map_list);
        vgic_cpu->pending_shared = NULL;
        vgic_cpu->active_shared = NULL;
        vgic_cpu->pend_act_shared = NULL;
-       vgic_cpu->vgic_irq_lr_map = NULL;
 }
 
 static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
@@ -1958,18 +1882,14 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
        vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
        vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
        vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
-       vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL);
 
        if (!vgic_cpu->pending_shared
                || !vgic_cpu->active_shared
-               || !vgic_cpu->pend_act_shared
-               || !vgic_cpu->vgic_irq_lr_map) {
+               || !vgic_cpu->pend_act_shared) {
                kvm_vgic_vcpu_destroy(vcpu);
                return -ENOMEM;
        }
 
-       memset(vgic_cpu->vgic_irq_lr_map, LR_EMPTY, nr_irqs);
-
        /*
         * Store the number of LRs per vcpu, so we don't have to go
         * all the way to the distributor structure to find out. Only
@@ -2111,14 +2031,24 @@ int vgic_init(struct kvm *kvm)
                        break;
                }
 
-               for (i = 0; i < dist->nr_irqs; i++) {
-                       if (i < VGIC_NR_PPIS)
+               /*
+                * Enable and configure all SGIs to be edge-triggere and
+                * configure all PPIs as level-triggered.
+                */
+               for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
+                       if (i < VGIC_NR_SGIS) {
+                               /* SGIs */
                                vgic_bitmap_set_irq_val(&dist->irq_enabled,
                                                        vcpu->vcpu_id, i, 1);
-                       if (i < VGIC_NR_PRIVATE_IRQS)
                                vgic_bitmap_set_irq_val(&dist->irq_cfg,
                                                        vcpu->vcpu_id, i,
                                                        VGIC_CFG_EDGE);
+                       } else if (i < VGIC_NR_PRIVATE_IRQS) {
+                               /* PPIs */
+                               vgic_bitmap_set_irq_val(&dist->irq_cfg,
+                                                       vcpu->vcpu_id, i,
+                                                       VGIC_CFG_LEVEL);
+                       }
                }
 
                vgic_enable(vcpu);