2 * Copyright 2012 Michael Ellerman, IBM Corporation.
3 * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/kvm_host.h>
12 #include <linux/err.h>
13 #include <linux/gfp.h>
15 #include <asm/uaccess.h>
16 #include <asm/kvm_book3s.h>
17 #include <asm/kvm_ppc.h>
18 #include <asm/hvcall.h>
20 #include <asm/debug.h>
22 #include <linux/debugfs.h>
23 #include <linux/seq_file.h>
25 #include "book3s_xics.h"
28 #define XICS_DBG(fmt...) do { } while (0)
30 #define XICS_DBG(fmt...) trace_printk(fmt)
33 #define ENABLE_REALMODE true
34 #define DEBUG_REALMODE false
40 * Each ICS has a mutex protecting the information about the IRQ
41 * sources and avoiding simultaneous deliveries if the same interrupt.
43 * ICP operations are done via a single compare & swap transaction
44 * (most ICP state fits in the union kvmppc_icp_state)
51 * - To speed up resends, keep a bitmap of "resend" set bits in the
54 * - Speed up server# -> ICP lookup (array ? hash table ?)
56 * - Make ICS lockless as well, or at least a per-interrupt lock or hashed
57 * locks array to improve scalability
59 * - ioctl's to save/restore the entire state for snapshot & migration
62 /* -- ICS routines -- */
64 static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
67 static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level)
69 struct ics_irq_state *state;
70 struct kvmppc_ics *ics;
73 XICS_DBG("ics deliver %#x (level: %d)\n", irq, level);
75 ics = kvmppc_xics_find_ics(xics, irq, &src);
77 XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq);
80 state = &ics->irq_state[src];
85 * We set state->asserted locklessly. This should be fine as
86 * we are the only setter, thus concurrent access is undefined
89 if (level == KVM_INTERRUPT_SET_LEVEL)
91 else if (level == KVM_INTERRUPT_UNSET) {
96 /* Attempt delivery */
97 icp_deliver_irq(xics, NULL, irq);
102 static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
103 struct kvmppc_icp *icp)
107 mutex_lock(&ics->lock);
109 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
110 struct ics_irq_state *state = &ics->irq_state[i];
115 XICS_DBG("resend %#x prio %#x\n", state->number,
118 mutex_unlock(&ics->lock);
119 icp_deliver_irq(xics, icp, state->number);
120 mutex_lock(&ics->lock);
123 mutex_unlock(&ics->lock);
126 static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
127 struct ics_irq_state *state,
128 u32 server, u32 priority, u32 saved_priority)
132 mutex_lock(&ics->lock);
134 state->server = server;
135 state->priority = priority;
136 state->saved_priority = saved_priority;
138 if ((state->masked_pending || state->resend) && priority != MASKED) {
139 state->masked_pending = 0;
143 mutex_unlock(&ics->lock);
148 int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority)
150 struct kvmppc_xics *xics = kvm->arch.xics;
151 struct kvmppc_icp *icp;
152 struct kvmppc_ics *ics;
153 struct ics_irq_state *state;
159 ics = kvmppc_xics_find_ics(xics, irq, &src);
162 state = &ics->irq_state[src];
164 icp = kvmppc_xics_find_server(kvm, server);
168 XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n",
169 irq, server, priority,
170 state->masked_pending, state->resend);
172 if (write_xive(xics, ics, state, server, priority, priority))
173 icp_deliver_irq(xics, icp, irq);
178 int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority)
180 struct kvmppc_xics *xics = kvm->arch.xics;
181 struct kvmppc_ics *ics;
182 struct ics_irq_state *state;
188 ics = kvmppc_xics_find_ics(xics, irq, &src);
191 state = &ics->irq_state[src];
193 mutex_lock(&ics->lock);
194 *server = state->server;
195 *priority = state->priority;
196 mutex_unlock(&ics->lock);
201 int kvmppc_xics_int_on(struct kvm *kvm, u32 irq)
203 struct kvmppc_xics *xics = kvm->arch.xics;
204 struct kvmppc_icp *icp;
205 struct kvmppc_ics *ics;
206 struct ics_irq_state *state;
212 ics = kvmppc_xics_find_ics(xics, irq, &src);
215 state = &ics->irq_state[src];
217 icp = kvmppc_xics_find_server(kvm, state->server);
221 if (write_xive(xics, ics, state, state->server, state->saved_priority,
222 state->saved_priority))
223 icp_deliver_irq(xics, icp, irq);
228 int kvmppc_xics_int_off(struct kvm *kvm, u32 irq)
230 struct kvmppc_xics *xics = kvm->arch.xics;
231 struct kvmppc_ics *ics;
232 struct ics_irq_state *state;
238 ics = kvmppc_xics_find_ics(xics, irq, &src);
241 state = &ics->irq_state[src];
243 write_xive(xics, ics, state, state->server, MASKED, state->priority);
248 /* -- ICP routines, including hcalls -- */
250 static inline bool icp_try_update(struct kvmppc_icp *icp,
251 union kvmppc_icp_state old,
252 union kvmppc_icp_state new,
257 /* Calculate new output value */
258 new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
260 /* Attempt atomic update */
261 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
265 XICS_DBG("UPD [%04x] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
267 old.cppr, old.mfrr, old.pending_pri, old.xisr,
268 old.need_resend, old.out_ee);
269 XICS_DBG("UPD - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
270 new.cppr, new.mfrr, new.pending_pri, new.xisr,
271 new.need_resend, new.out_ee);
273 * Check for output state update
275 * Note that this is racy since another processor could be updating
276 * the state already. This is why we never clear the interrupt output
277 * here, we only ever set it. The clear only happens prior to doing
278 * an update and only by the processor itself. Currently we do it
279 * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
281 * We also do not try to figure out whether the EE state has changed,
282 * we unconditionally set it if the new state calls for it. The reason
283 * for that is that we opportunistically remove the pending interrupt
284 * flag when raising CPPR, so we need to set it back here if an
285 * interrupt is still pending.
288 kvmppc_book3s_queue_irqprio(icp->vcpu,
289 BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
291 kvmppc_fast_vcpu_kick(icp->vcpu);
297 static void icp_check_resend(struct kvmppc_xics *xics,
298 struct kvmppc_icp *icp)
302 /* Order this load with the test for need_resend in the caller */
304 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
305 struct kvmppc_ics *ics = xics->ics[icsid];
307 if (!test_and_clear_bit(icsid, icp->resend_map))
311 ics_check_resend(xics, ics, icp);
315 static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
318 union kvmppc_icp_state old_state, new_state;
321 XICS_DBG("try deliver %#x(P:%#x) to server %#x\n", irq, priority,
325 old_state = new_state = ACCESS_ONCE(icp->state);
329 /* See if we can deliver */
330 success = new_state.cppr > priority &&
331 new_state.mfrr > priority &&
332 new_state.pending_pri > priority;
335 * If we can, check for a rejection and perform the
339 *reject = new_state.xisr;
340 new_state.xisr = irq;
341 new_state.pending_pri = priority;
344 * If we failed to deliver we set need_resend
345 * so a subsequent CPPR state change causes us
346 * to try a new delivery.
348 new_state.need_resend = true;
351 } while (!icp_try_update(icp, old_state, new_state, false));
356 static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
359 struct ics_irq_state *state;
360 struct kvmppc_ics *ics;
365 * This is used both for initial delivery of an interrupt and
366 * for subsequent rejection.
368 * Rejection can be racy vs. resends. We have evaluated the
369 * rejection in an atomic ICP transaction which is now complete,
370 * so potentially the ICP can already accept the interrupt again.
372 * So we need to retry the delivery. Essentially the reject path
373 * boils down to a failed delivery. Always.
375 * Now the interrupt could also have moved to a different target,
376 * thus we may need to re-do the ICP lookup as well
380 /* Get the ICS state and lock it */
381 ics = kvmppc_xics_find_ics(xics, new_irq, &src);
383 XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq);
386 state = &ics->irq_state[src];
388 /* Get a lock on the ICS */
389 mutex_lock(&ics->lock);
392 if (!icp || state->server != icp->server_num) {
393 icp = kvmppc_xics_find_server(xics->kvm, state->server);
395 pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n",
396 new_irq, state->server);
401 /* Clear the resend bit of that interrupt */
405 * If masked, bail out
407 * Note: PAPR doesn't mention anything about masked pending
408 * when doing a resend, only when doing a delivery.
410 * However that would have the effect of losing a masked
411 * interrupt that was rejected and isn't consistent with
412 * the whole masked_pending business which is about not
413 * losing interrupts that occur while masked.
415 * I don't differenciate normal deliveries and resends, this
416 * implementation will differ from PAPR and not lose such
419 if (state->priority == MASKED) {
420 XICS_DBG("irq %#x masked pending\n", new_irq);
421 state->masked_pending = 1;
426 * Try the delivery, this will set the need_resend flag
427 * in the ICP as part of the atomic transaction if the
428 * delivery is not possible.
430 * Note that if successful, the new delivery might have itself
431 * rejected an interrupt that was "delivered" before we took the
434 * In this case we do the whole sequence all over again for the
435 * new guy. We cannot assume that the rejected interrupt is less
436 * favored than the new one, and thus doesn't need to be delivered,
437 * because by the time we exit icp_try_to_deliver() the target
438 * processor may well have alrady consumed & completed it, and thus
439 * the rejected interrupt might actually be already acceptable.
441 if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) {
443 * Delivery was successful, did we reject somebody else ?
445 if (reject && reject != XICS_IPI) {
446 mutex_unlock(&ics->lock);
452 * We failed to deliver the interrupt we need to set the
453 * resend map bit and mark the ICS state as needing a resend
455 set_bit(ics->icsid, icp->resend_map);
459 * If the need_resend flag got cleared in the ICP some time
460 * between icp_try_to_deliver() atomic update and now, then
461 * we know it might have missed the resend_map bit. So we
465 if (!icp->state.need_resend) {
466 mutex_unlock(&ics->lock);
471 mutex_unlock(&ics->lock);
474 static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
477 union kvmppc_icp_state old_state, new_state;
481 * This handles several related states in one operation:
483 * ICP State: Down_CPPR
485 * Load CPPR with new value and if the XISR is 0
486 * then check for resends:
490 * If MFRR is more favored than CPPR, check for IPIs
491 * and notify ICS of a potential resend. This is done
492 * asynchronously (when used in real mode, we will have
495 * We do not handle the complete Check_IPI as documented
496 * here. In the PAPR, this state will be used for both
497 * Set_MFRR and Down_CPPR. However, we know that we aren't
498 * changing the MFRR state here so we don't need to handle
499 * the case of an MFRR causing a reject of a pending irq,
500 * this will have been handled when the MFRR was set in the
503 * Thus we don't have to handle rejects, only resends.
505 * When implementing real mode for HV KVM, resend will lead to
506 * a H_TOO_HARD return and the whole transaction will be handled
510 old_state = new_state = ACCESS_ONCE(icp->state);
513 new_state.cppr = new_cppr;
516 * Cut down Resend / Check_IPI / IPI
518 * The logic is that we cannot have a pending interrupt
519 * trumped by an IPI at this point (see above), so we
520 * know that either the pending interrupt is already an
521 * IPI (in which case we don't care to override it) or
522 * it's either more favored than us or non existent
524 if (new_state.mfrr < new_cppr &&
525 new_state.mfrr <= new_state.pending_pri) {
526 WARN_ON(new_state.xisr != XICS_IPI &&
527 new_state.xisr != 0);
528 new_state.pending_pri = new_state.mfrr;
529 new_state.xisr = XICS_IPI;
532 /* Latch/clear resend bit */
533 resend = new_state.need_resend;
534 new_state.need_resend = 0;
536 } while (!icp_try_update(icp, old_state, new_state, true));
539 * Now handle resend checks. Those are asynchronous to the ICP
540 * state update in HW (ie bus transactions) so we can handle them
541 * separately here too
544 icp_check_resend(xics, icp);
547 static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
549 union kvmppc_icp_state old_state, new_state;
550 struct kvmppc_icp *icp = vcpu->arch.icp;
553 /* First, remove EE from the processor */
554 kvmppc_book3s_dequeue_irqprio(icp->vcpu,
555 BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
558 * ICP State: Accept_Interrupt
560 * Return the pending interrupt (if any) along with the
561 * current CPPR, then clear the XISR & set CPPR to the
565 old_state = new_state = ACCESS_ONCE(icp->state);
567 xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
570 new_state.cppr = new_state.pending_pri;
571 new_state.pending_pri = 0xff;
574 } while (!icp_try_update(icp, old_state, new_state, true));
576 XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr);
581 static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
584 union kvmppc_icp_state old_state, new_state;
585 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
586 struct kvmppc_icp *icp;
591 XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n",
592 vcpu->vcpu_id, server, mfrr);
594 icp = vcpu->arch.icp;
595 local = icp->server_num == server;
597 icp = kvmppc_xics_find_server(vcpu->kvm, server);
603 * ICP state: Set_MFRR
605 * If the CPPR is more favored than the new MFRR, then
606 * nothing needs to be rejected as there can be no XISR to
607 * reject. If the MFRR is being made less favored then
608 * there might be a previously-rejected interrupt needing
611 * If the CPPR is less favored, then we might be replacing
612 * an interrupt, and thus need to possibly reject it as in
614 * ICP state: Check_IPI
617 old_state = new_state = ACCESS_ONCE(icp->state);
620 new_state.mfrr = mfrr;
625 if (mfrr < new_state.cppr) {
626 /* Reject a pending interrupt if not an IPI */
627 if (mfrr <= new_state.pending_pri)
628 reject = new_state.xisr;
629 new_state.pending_pri = mfrr;
630 new_state.xisr = XICS_IPI;
633 if (mfrr > old_state.mfrr && mfrr > new_state.cppr) {
634 resend = new_state.need_resend;
635 new_state.need_resend = 0;
637 } while (!icp_try_update(icp, old_state, new_state, local));
640 if (reject && reject != XICS_IPI)
641 icp_deliver_irq(xics, icp, reject);
645 icp_check_resend(xics, icp);
650 static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
652 union kvmppc_icp_state old_state, new_state;
653 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
654 struct kvmppc_icp *icp = vcpu->arch.icp;
657 XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu->vcpu_id, cppr);
660 * ICP State: Set_CPPR
662 * We can safely compare the new value with the current
663 * value outside of the transaction as the CPPR is only
664 * ever changed by the processor on itself
666 if (cppr > icp->state.cppr)
667 icp_down_cppr(xics, icp, cppr);
668 else if (cppr == icp->state.cppr)
674 * The processor is raising its priority, this can result
675 * in a rejection of a pending interrupt:
677 * ICP State: Reject_Current
679 * We can remove EE from the current processor, the update
680 * transaction will set it again if needed
682 kvmppc_book3s_dequeue_irqprio(icp->vcpu,
683 BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
686 old_state = new_state = ACCESS_ONCE(icp->state);
689 new_state.cppr = cppr;
691 if (cppr <= new_state.pending_pri) {
692 reject = new_state.xisr;
694 new_state.pending_pri = 0xff;
697 } while (!icp_try_update(icp, old_state, new_state, true));
700 * Check for rejects. They are handled by doing a new delivery
701 * attempt (see comments in icp_deliver_irq).
703 if (reject && reject != XICS_IPI)
704 icp_deliver_irq(xics, icp, reject);
707 static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
709 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
710 struct kvmppc_icp *icp = vcpu->arch.icp;
711 struct kvmppc_ics *ics;
712 struct ics_irq_state *state;
713 u32 irq = xirr & 0x00ffffff;
716 XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu->vcpu_id, xirr);
721 * Note: If EOI is incorrectly used by SW to lower the CPPR
722 * value (ie more favored), we do not check for rejection of
723 * a pending interrupt, this is a SW error and PAPR sepcifies
724 * that we don't have to deal with it.
726 * The sending of an EOI to the ICS is handled after the
729 * ICP State: Down_CPPR which we handle
730 * in a separate function as it's shared with H_CPPR.
732 icp_down_cppr(xics, icp, xirr >> 24);
734 /* IPIs have no EOI */
738 * EOI handling: If the interrupt is still asserted, we need to
739 * resend it. We can take a lockless "peek" at the ICS state here.
741 * "Message" interrupts will never have "asserted" set
743 ics = kvmppc_xics_find_ics(xics, irq, &src);
745 XICS_DBG("h_eoi: IRQ 0x%06x not found !\n", irq);
748 state = &ics->irq_state[src];
750 /* Still asserted, resend it */
752 icp_deliver_irq(xics, icp, irq);
757 static noinline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
759 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
760 struct kvmppc_icp *icp = vcpu->arch.icp;
762 XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n",
763 hcall, icp->rm_action, icp->rm_dbgstate.raw, icp->rm_dbgtgt);
765 if (icp->rm_action & XICS_RM_KICK_VCPU)
766 kvmppc_fast_vcpu_kick(icp->rm_kick_target);
767 if (icp->rm_action & XICS_RM_CHECK_RESEND)
768 icp_check_resend(xics, icp);
769 if (icp->rm_action & XICS_RM_REJECT)
770 icp_deliver_irq(xics, icp, icp->rm_reject);
777 int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
779 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
783 /* Check if we have an ICP */
784 if (!xics || !vcpu->arch.icp)
787 /* Check for real mode returning too hard */
789 return kvmppc_xics_rm_complete(vcpu, req);
793 res = kvmppc_h_xirr(vcpu);
794 kvmppc_set_gpr(vcpu, 4, res);
797 kvmppc_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
800 rc = kvmppc_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
803 rc = kvmppc_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
804 kvmppc_get_gpr(vcpu, 5));
812 /* -- Initialisation code etc. -- */
814 static int xics_debug_show(struct seq_file *m, void *private)
816 struct kvmppc_xics *xics = m->private;
817 struct kvm *kvm = xics->kvm;
818 struct kvm_vcpu *vcpu;
824 seq_printf(m, "=========\nICP state\n=========\n");
826 kvm_for_each_vcpu(i, vcpu, kvm) {
827 struct kvmppc_icp *icp = vcpu->arch.icp;
828 union kvmppc_icp_state state;
833 state.raw = ACCESS_ONCE(icp->state.raw);
834 seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
835 icp->server_num, state.xisr,
836 state.pending_pri, state.cppr, state.mfrr,
837 state.out_ee, state.need_resend);
840 for (icsid = 0; icsid <= KVMPPC_XICS_MAX_ICS_ID; icsid++) {
841 struct kvmppc_ics *ics = xics->ics[icsid];
846 seq_printf(m, "=========\nICS state for ICS 0x%x\n=========\n",
849 mutex_lock(&ics->lock);
851 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
852 struct ics_irq_state *irq = &ics->irq_state[i];
854 seq_printf(m, "irq 0x%06x: server %#x prio %#x save prio %#x asserted %d resend %d masked pending %d\n",
855 irq->number, irq->server, irq->priority,
856 irq->saved_priority, irq->asserted,
857 irq->resend, irq->masked_pending);
860 mutex_unlock(&ics->lock);
865 static int xics_debug_open(struct inode *inode, struct file *file)
867 return single_open(file, xics_debug_show, inode->i_private);
870 static const struct file_operations xics_debug_fops = {
871 .open = xics_debug_open,
874 .release = single_release,
877 static void xics_debugfs_init(struct kvmppc_xics *xics)
881 name = kasprintf(GFP_KERNEL, "kvm-xics-%p", xics);
883 pr_err("%s: no memory for name\n", __func__);
887 xics->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
888 xics, &xics_debug_fops);
890 pr_debug("%s: created %s\n", __func__, name);
894 struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm,
895 struct kvmppc_xics *xics, int irq)
897 struct kvmppc_ics *ics;
900 icsid = irq >> KVMPPC_XICS_ICS_SHIFT;
902 mutex_lock(&kvm->lock);
904 /* ICS already exists - somebody else got here first */
905 if (xics->ics[icsid])
909 ics = kzalloc(sizeof(struct kvmppc_ics), GFP_KERNEL);
913 mutex_init(&ics->lock);
916 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
917 ics->irq_state[i].number = (icsid << KVMPPC_XICS_ICS_SHIFT) | i;
918 ics->irq_state[i].priority = MASKED;
919 ics->irq_state[i].saved_priority = MASKED;
922 xics->ics[icsid] = ics;
924 if (icsid > xics->max_icsid)
925 xics->max_icsid = icsid;
928 mutex_unlock(&kvm->lock);
929 return xics->ics[icsid];
932 int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num)
934 struct kvmppc_icp *icp;
936 if (!vcpu->kvm->arch.xics)
939 if (kvmppc_xics_find_server(vcpu->kvm, server_num))
942 icp = kzalloc(sizeof(struct kvmppc_icp), GFP_KERNEL);
947 icp->server_num = server_num;
948 icp->state.mfrr = MASKED;
949 icp->state.pending_pri = MASKED;
950 vcpu->arch.icp = icp;
952 XICS_DBG("created server for vcpu %d\n", vcpu->vcpu_id);
959 int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args)
961 struct kvmppc_xics *xics;
964 /* locking against multiple callers? */
966 xics = kvm->arch.xics;
970 switch (args->level) {
971 case KVM_INTERRUPT_SET:
972 case KVM_INTERRUPT_SET_LEVEL:
973 case KVM_INTERRUPT_UNSET:
974 r = ics_deliver_irq(xics, args->irq, args->level);
983 void kvmppc_xics_free(struct kvmppc_xics *xics)
986 struct kvm *kvm = xics->kvm;
988 debugfs_remove(xics->dentry);
991 kvm->arch.xics = NULL;
993 for (i = 0; i <= xics->max_icsid; i++)
998 int kvm_xics_create(struct kvm *kvm, u32 type)
1000 struct kvmppc_xics *xics;
1003 xics = kzalloc(sizeof(*xics), GFP_KERNEL);
1009 /* Already there ? */
1010 mutex_lock(&kvm->lock);
1014 kvm->arch.xics = xics;
1015 mutex_unlock(&kvm->lock);
1020 xics_debugfs_init(xics);
1022 #ifdef CONFIG_KVM_BOOK3S_64_HV
1023 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
1024 /* Enable real mode support */
1025 xics->real_mode = ENABLE_REALMODE;
1026 xics->real_mode_dbg = DEBUG_REALMODE;
1028 #endif /* CONFIG_KVM_BOOK3S_64_HV */
1033 void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu)
1035 if (!vcpu->arch.icp)
1037 kfree(vcpu->arch.icp);
1038 vcpu->arch.icp = NULL;
1039 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;