]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
KVM: PPC: Book3S: Facilities to save/restore XICS presentation ctrler state
authorPaul Mackerras <paulus@samba.org>
Wed, 17 Apr 2013 20:32:26 +0000 (20:32 +0000)
committerAlexander Graf <agraf@suse.de>
Fri, 26 Apr 2013 18:27:34 +0000 (20:27 +0200)
This adds the ability for userspace to save and restore the state
of the XICS interrupt presentation controllers (ICPs) via the
KVM_GET/SET_ONE_REG interface.  Since there is one ICP per vcpu, we
simply define a new 64-bit register in the ONE_REG space for the ICP
state.  The state includes the CPU priority setting, the pending IPI
priority, and the priority and source number of any pending external
interrupt.

Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
Documentation/virtual/kvm/api.txt
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/include/uapi/asm/kvm.h
arch/powerpc/kvm/book3s.c
arch/powerpc/kvm/book3s_xics.c

index fb308be8521bf76fe74f22ae7499e38bed35c245..c09d1832e9357566310f67c69c42074a35d9da77 100644 (file)
@@ -1808,6 +1808,7 @@ registers, find a list below:
   PPC   | KVM_REG_PPC_TLB2PS   | 32
   PPC   | KVM_REG_PPC_TLB3PS   | 32
   PPC   | KVM_REG_PPC_EPTCFG   | 32
+  PPC   | KVM_REG_PPC_ICP_STATE | 64
 
 ARM registers are mapped using the lower 32 bits.  The upper 16 of that
 is the register group type, or coprocessor number:
index cfaa47995c0e830d41f56b88a87cd8376bb3b70e..d7339df19259cb5ce23a32d7e7bf0b23d0f62f9a 100644 (file)
@@ -313,6 +313,8 @@ extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
 extern int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server);
 extern int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args);
 extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
+extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
+extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
 #else
 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
        { return 0; }
index eb9e25c194adb7bf6e97e3ee2fe6b6f29a35ad57..427b9aca2a0fc4adda202fa040a3828416a96ac9 100644 (file)
@@ -390,6 +390,18 @@ struct kvm_get_htab_header {
        __u16   n_invalid;
 };
 
+/* Per-vcpu XICS interrupt controller state */
+#define KVM_REG_PPC_ICP_STATE  (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c)
+
+#define  KVM_REG_PPC_ICP_CPPR_SHIFT    56      /* current proc priority */
+#define  KVM_REG_PPC_ICP_CPPR_MASK     0xff
+#define  KVM_REG_PPC_ICP_XISR_SHIFT    32      /* interrupt status field */
+#define  KVM_REG_PPC_ICP_XISR_MASK     0xffffff
+#define  KVM_REG_PPC_ICP_MFRR_SHIFT    24      /* pending IPI priority */
+#define  KVM_REG_PPC_ICP_MFRR_MASK     0xff
+#define  KVM_REG_PPC_ICP_PPRI_SHIFT    16      /* pending irq priority */
+#define  KVM_REG_PPC_ICP_PPRI_MASK     0xff
+
 /* Device control API: PPC-specific devices */
 #define KVM_DEV_MPIC_GRP_MISC          1
 #define   KVM_DEV_MPIC_BASE_ADDR       0       /* 64-bit */
index 1a4d787df507f4bb5d21d2c8e11cf54fe989f50f..700df6f1d32c5a6ad88542db1a0c8ccd1bbe26d4 100644 (file)
@@ -535,6 +535,15 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
                                         &opcode, sizeof(u32));
                        break;
                }
+#ifdef CONFIG_KVM_XICS
+               case KVM_REG_PPC_ICP_STATE:
+                       if (!vcpu->arch.icp) {
+                               r = -ENXIO;
+                               break;
+                       }
+                       val = get_reg_val(reg->id, kvmppc_xics_get_icp(vcpu));
+                       break;
+#endif /* CONFIG_KVM_XICS */
                default:
                        r = -EINVAL;
                        break;
@@ -597,6 +606,16 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
                        vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val);
                        break;
 #endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_KVM_XICS
+               case KVM_REG_PPC_ICP_STATE:
+                       if (!vcpu->arch.icp) {
+                               r = -ENXIO;
+                               break;
+                       }
+                       r = kvmppc_xics_set_icp(vcpu,
+                                               set_reg_val(reg->id, val));
+                       break;
+#endif /* CONFIG_KVM_XICS */
                default:
                        r = -EINVAL;
                        break;
index 9fb2d3909c463bf501d59d5d3e1bcf3181db86d0..ee841ed8a69025a1228ae061f3de55e4353c86d6 100644 (file)
@@ -954,6 +954,96 @@ int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num)
        return 0;
 }
 
+u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu)
+{
+       struct kvmppc_icp *icp = vcpu->arch.icp;
+       union kvmppc_icp_state state;
+
+       if (!icp)
+               return 0;
+       state = icp->state;
+       return ((u64)state.cppr << KVM_REG_PPC_ICP_CPPR_SHIFT) |
+               ((u64)state.xisr << KVM_REG_PPC_ICP_XISR_SHIFT) |
+               ((u64)state.mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT) |
+               ((u64)state.pending_pri << KVM_REG_PPC_ICP_PPRI_SHIFT);
+}
+
+int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
+{
+       struct kvmppc_icp *icp = vcpu->arch.icp;
+       struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
+       union kvmppc_icp_state old_state, new_state;
+       struct kvmppc_ics *ics;
+       u8 cppr, mfrr, pending_pri;
+       u32 xisr;
+       u16 src;
+       bool resend;
+
+       if (!icp || !xics)
+               return -ENOENT;
+
+       cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
+       xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
+               KVM_REG_PPC_ICP_XISR_MASK;
+       mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
+       pending_pri = icpval >> KVM_REG_PPC_ICP_PPRI_SHIFT;
+
+       /* Require the new state to be internally consistent */
+       if (xisr == 0) {
+               if (pending_pri != 0xff)
+                       return -EINVAL;
+       } else if (xisr == XICS_IPI) {
+               if (pending_pri != mfrr || pending_pri >= cppr)
+                       return -EINVAL;
+       } else {
+               if (pending_pri >= mfrr || pending_pri >= cppr)
+                       return -EINVAL;
+               ics = kvmppc_xics_find_ics(xics, xisr, &src);
+               if (!ics)
+                       return -EINVAL;
+       }
+
+       new_state.raw = 0;
+       new_state.cppr = cppr;
+       new_state.xisr = xisr;
+       new_state.mfrr = mfrr;
+       new_state.pending_pri = pending_pri;
+
+       /*
+        * Deassert the CPU interrupt request.
+        * icp_try_update will reassert it if necessary.
+        */
+       kvmppc_book3s_dequeue_irqprio(icp->vcpu,
+                                     BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
+
+       /*
+        * Note that if we displace an interrupt from old_state.xisr,
+        * we don't mark it as rejected.  We expect userspace to set
+        * the state of the interrupt sources to be consistent with
+        * the ICP states (either before or afterwards, which doesn't
+        * matter).  We do handle resends due to CPPR becoming less
+        * favoured because that is necessary to end up with a
+        * consistent state in the situation where userspace restores
+        * the ICS states before the ICP states.
+        */
+       do {
+               old_state = ACCESS_ONCE(icp->state);
+
+               if (new_state.mfrr <= old_state.mfrr) {
+                       resend = false;
+                       new_state.need_resend = old_state.need_resend;
+               } else {
+                       resend = old_state.need_resend;
+                       new_state.need_resend = 0;
+               }
+       } while (!icp_try_update(icp, old_state, new_state, false));
+
+       if (resend)
+               icp_check_resend(xics, icp);
+
+       return 0;
+}
+
 /* -- ioctls -- */
 
 int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args)