4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is recieved, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
11 * There are four kinds of events which can be mapped to an event
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
19 * 4. Hardware interrupts. Not supported at present.
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
24 #include <linux/linkage.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
29 #include <linux/bootmem.h>
30 #include <linux/slab.h>
33 #include <asm/ptrace.h>
36 #include <asm/sync_bitops.h>
37 #include <asm/xen/hypercall.h>
38 #include <asm/xen/hypervisor.h>
42 #include <xen/xen-ops.h>
43 #include <xen/events.h>
44 #include <xen/interface/xen.h>
45 #include <xen/interface/event_channel.h>
46 #include <xen/interface/hvm/hvm_op.h>
47 #include <xen/interface/hvm/params.h>
50 * This lock protects updates to the following mapping and reference-count
51 * arrays. The lock does not need to be acquired to read the mapping tables.
53 static DEFINE_SPINLOCK(irq_mapping_update_lock);
55 /* IRQ <-> VIRQ mapping. */
56 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
58 /* IRQ <-> IPI mapping */
59 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
61 /* Interrupt types. */
71 * Packed IRQ information:
72 * type - enum xen_irq_type
73 * event channel - irq->event channel mapping
74 * cpu - cpu this event channel is bound to
75 * index - type-specific information:
76 * PIRQ - vector, with MSB being "needs EIO"
83 enum xen_irq_type type; /* type */
84 unsigned short evtchn; /* event channel */
85 unsigned short cpu; /* cpu bound */
92 unsigned short vector;
97 static struct irq_info irq_info[NR_IRQS];
99 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
100 [0 ... NR_EVENT_CHANNELS-1] = -1
102 struct cpu_evtchn_s {
103 unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
105 static struct cpu_evtchn_s *cpu_evtchn_mask_p;
106 static inline unsigned long *cpu_evtchn_mask(int cpu)
108 return cpu_evtchn_mask_p[cpu].bits;
111 /* Xen will never allocate port zero for any purpose. */
112 #define VALID_EVTCHN(chn) ((chn) != 0)
114 static struct irq_chip xen_dynamic_chip;
115 static struct irq_chip xen_percpu_chip;
117 /* Constructor for packed IRQ information. */
118 static struct irq_info mk_unbound_info(void)
120 return (struct irq_info) { .type = IRQT_UNBOUND };
123 static struct irq_info mk_evtchn_info(unsigned short evtchn)
125 return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn,
129 static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
131 return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
132 .cpu = 0, .u.ipi = ipi };
135 static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
137 return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
138 .cpu = 0, .u.virq = virq };
141 static struct irq_info mk_pirq_info(unsigned short evtchn,
142 unsigned short gsi, unsigned short vector)
144 return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
145 .cpu = 0, .u.pirq = { .gsi = gsi, .vector = vector } };
149 * Accessors for packed IRQ information.
151 static struct irq_info *info_for_irq(unsigned irq)
153 return &irq_info[irq];
156 static unsigned int evtchn_from_irq(unsigned irq)
158 return info_for_irq(irq)->evtchn;
161 unsigned irq_from_evtchn(unsigned int evtchn)
163 return evtchn_to_irq[evtchn];
165 EXPORT_SYMBOL_GPL(irq_from_evtchn);
167 static enum ipi_vector ipi_from_irq(unsigned irq)
169 struct irq_info *info = info_for_irq(irq);
171 BUG_ON(info == NULL);
172 BUG_ON(info->type != IRQT_IPI);
177 static unsigned virq_from_irq(unsigned irq)
179 struct irq_info *info = info_for_irq(irq);
181 BUG_ON(info == NULL);
182 BUG_ON(info->type != IRQT_VIRQ);
187 static unsigned gsi_from_irq(unsigned irq)
189 struct irq_info *info = info_for_irq(irq);
191 BUG_ON(info == NULL);
192 BUG_ON(info->type != IRQT_PIRQ);
194 return info->u.pirq.gsi;
197 static unsigned vector_from_irq(unsigned irq)
199 struct irq_info *info = info_for_irq(irq);
201 BUG_ON(info == NULL);
202 BUG_ON(info->type != IRQT_PIRQ);
204 return info->u.pirq.vector;
207 static enum xen_irq_type type_from_irq(unsigned irq)
209 return info_for_irq(irq)->type;
212 static unsigned cpu_from_irq(unsigned irq)
214 return info_for_irq(irq)->cpu;
217 static unsigned int cpu_from_evtchn(unsigned int evtchn)
219 int irq = evtchn_to_irq[evtchn];
223 ret = cpu_from_irq(irq);
228 static inline unsigned long active_evtchns(unsigned int cpu,
229 struct shared_info *sh,
232 return (sh->evtchn_pending[idx] &
233 cpu_evtchn_mask(cpu)[idx] &
234 ~sh->evtchn_mask[idx]);
237 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
239 int irq = evtchn_to_irq[chn];
243 cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
246 __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
247 __set_bit(chn, cpu_evtchn_mask(cpu));
249 irq_info[irq].cpu = cpu;
252 static void init_evtchn_cpu_bindings(void)
255 struct irq_desc *desc;
258 /* By default all event channels notify CPU#0. */
259 for_each_irq_desc(i, desc) {
260 cpumask_copy(desc->affinity, cpumask_of(0));
264 memset(cpu_evtchn_mask(0), ~0, sizeof(struct cpu_evtchn_s));
267 static inline void clear_evtchn(int port)
269 struct shared_info *s = HYPERVISOR_shared_info;
270 sync_clear_bit(port, &s->evtchn_pending[0]);
273 static inline void set_evtchn(int port)
275 struct shared_info *s = HYPERVISOR_shared_info;
276 sync_set_bit(port, &s->evtchn_pending[0]);
279 static inline int test_evtchn(int port)
281 struct shared_info *s = HYPERVISOR_shared_info;
282 return sync_test_bit(port, &s->evtchn_pending[0]);
287 * notify_remote_via_irq - send event to remote end of event channel via irq
288 * @irq: irq of event channel to send event to
290 * Unlike notify_remote_via_evtchn(), this is safe to use across
291 * save/restore. Notifications on a broken connection are silently
294 void notify_remote_via_irq(int irq)
296 int evtchn = evtchn_from_irq(irq);
298 if (VALID_EVTCHN(evtchn))
299 notify_remote_via_evtchn(evtchn);
301 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
303 static void mask_evtchn(int port)
305 struct shared_info *s = HYPERVISOR_shared_info;
306 sync_set_bit(port, &s->evtchn_mask[0]);
309 static void unmask_evtchn(int port)
311 struct shared_info *s = HYPERVISOR_shared_info;
312 unsigned int cpu = get_cpu();
314 BUG_ON(!irqs_disabled());
316 /* Slow path (hypercall) if this is a non-local port. */
317 if (unlikely(cpu != cpu_from_evtchn(port))) {
318 struct evtchn_unmask unmask = { .port = port };
319 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
321 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
323 sync_clear_bit(port, &s->evtchn_mask[0]);
326 * The following is basically the equivalent of
327 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
328 * the interrupt edge' if the channel is masked.
330 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
331 !sync_test_and_set_bit(port / BITS_PER_LONG,
332 &vcpu_info->evtchn_pending_sel))
333 vcpu_info->evtchn_upcall_pending = 1;
339 static int find_unbound_irq(void)
341 struct irq_data *data;
344 for (irq = 0; irq < nr_irqs; irq++) {
345 data = irq_get_irq_data(irq);
346 /* only 0->15 have init'd desc; handle irq > 16 */
349 if (data->chip == &no_irq_chip)
351 if (data->chip != &xen_dynamic_chip)
353 if (irq_info[irq].type == IRQT_UNBOUND)
358 panic("No available IRQ to bind to: increase nr_irqs!\n");
360 res = irq_alloc_desc_at(irq, 0);
362 if (WARN_ON(res != irq))
368 int bind_evtchn_to_irq(unsigned int evtchn)
372 spin_lock(&irq_mapping_update_lock);
374 irq = evtchn_to_irq[evtchn];
377 irq = find_unbound_irq();
379 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
380 handle_fasteoi_irq, "event");
382 evtchn_to_irq[evtchn] = irq;
383 irq_info[irq] = mk_evtchn_info(evtchn);
386 spin_unlock(&irq_mapping_update_lock);
390 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
392 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
394 struct evtchn_bind_ipi bind_ipi;
397 spin_lock(&irq_mapping_update_lock);
399 irq = per_cpu(ipi_to_irq, cpu)[ipi];
402 irq = find_unbound_irq();
406 set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
407 handle_percpu_irq, "ipi");
410 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
413 evtchn = bind_ipi.port;
415 evtchn_to_irq[evtchn] = irq;
416 irq_info[irq] = mk_ipi_info(evtchn, ipi);
417 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
419 bind_evtchn_to_cpu(evtchn, cpu);
423 spin_unlock(&irq_mapping_update_lock);
428 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
430 struct evtchn_bind_virq bind_virq;
433 spin_lock(&irq_mapping_update_lock);
435 irq = per_cpu(virq_to_irq, cpu)[virq];
438 irq = find_unbound_irq();
440 set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
441 handle_percpu_irq, "virq");
443 bind_virq.virq = virq;
444 bind_virq.vcpu = cpu;
445 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
448 evtchn = bind_virq.port;
450 evtchn_to_irq[evtchn] = irq;
451 irq_info[irq] = mk_virq_info(evtchn, virq);
453 per_cpu(virq_to_irq, cpu)[virq] = irq;
455 bind_evtchn_to_cpu(evtchn, cpu);
458 spin_unlock(&irq_mapping_update_lock);
463 static void unbind_from_irq(unsigned int irq)
465 struct evtchn_close close;
466 int evtchn = evtchn_from_irq(irq);
468 spin_lock(&irq_mapping_update_lock);
470 if (VALID_EVTCHN(evtchn)) {
472 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
475 switch (type_from_irq(irq)) {
477 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
478 [virq_from_irq(irq)] = -1;
481 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
482 [ipi_from_irq(irq)] = -1;
488 /* Closed ports are implicitly re-bound to VCPU0. */
489 bind_evtchn_to_cpu(evtchn, 0);
491 evtchn_to_irq[evtchn] = -1;
494 if (irq_info[irq].type != IRQT_UNBOUND) {
495 irq_info[irq] = mk_unbound_info();
500 spin_unlock(&irq_mapping_update_lock);
503 int bind_evtchn_to_irqhandler(unsigned int evtchn,
504 irq_handler_t handler,
505 unsigned long irqflags,
506 const char *devname, void *dev_id)
511 irq = bind_evtchn_to_irq(evtchn);
512 retval = request_irq(irq, handler, irqflags, devname, dev_id);
514 unbind_from_irq(irq);
520 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
522 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
523 irq_handler_t handler,
524 unsigned long irqflags, const char *devname, void *dev_id)
529 irq = bind_virq_to_irq(virq, cpu);
530 retval = request_irq(irq, handler, irqflags, devname, dev_id);
532 unbind_from_irq(irq);
538 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
540 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
542 irq_handler_t handler,
543 unsigned long irqflags,
549 irq = bind_ipi_to_irq(ipi, cpu);
553 irqflags |= IRQF_NO_SUSPEND;
554 retval = request_irq(irq, handler, irqflags, devname, dev_id);
556 unbind_from_irq(irq);
563 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
565 free_irq(irq, dev_id);
566 unbind_from_irq(irq);
568 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
570 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
572 int irq = per_cpu(ipi_to_irq, cpu)[vector];
574 notify_remote_via_irq(irq);
577 irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
579 struct shared_info *sh = HYPERVISOR_shared_info;
580 int cpu = smp_processor_id();
581 unsigned long *cpu_evtchn = cpu_evtchn_mask(cpu);
584 static DEFINE_SPINLOCK(debug_lock);
587 spin_lock_irqsave(&debug_lock, flags);
589 printk("\nvcpu %d\n ", cpu);
591 for_each_online_cpu(i) {
593 v = per_cpu(xen_vcpu, i);
594 pending = (get_irq_regs() && i == cpu)
595 ? xen_irqs_disabled(get_irq_regs())
596 : v->evtchn_upcall_mask;
597 printk("%d: masked=%d pending=%d event_sel %0*lx\n ", i,
598 pending, v->evtchn_upcall_pending,
599 (int)(sizeof(v->evtchn_pending_sel)*2),
600 v->evtchn_pending_sel);
602 v = per_cpu(xen_vcpu, cpu);
604 printk("\npending:\n ");
605 for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
606 printk("%0*lx%s", (int)sizeof(sh->evtchn_pending[0])*2,
607 sh->evtchn_pending[i],
608 i % 8 == 0 ? "\n " : " ");
609 printk("\nglobal mask:\n ");
610 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
612 (int)(sizeof(sh->evtchn_mask[0])*2),
614 i % 8 == 0 ? "\n " : " ");
616 printk("\nglobally unmasked:\n ");
617 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
618 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
619 sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
620 i % 8 == 0 ? "\n " : " ");
622 printk("\nlocal cpu%d mask:\n ", cpu);
623 for (i = (NR_EVENT_CHANNELS/BITS_PER_LONG)-1; i >= 0; i--)
624 printk("%0*lx%s", (int)(sizeof(cpu_evtchn[0])*2),
626 i % 8 == 0 ? "\n " : " ");
628 printk("\nlocally unmasked:\n ");
629 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
630 unsigned long pending = sh->evtchn_pending[i]
631 & ~sh->evtchn_mask[i]
633 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
634 pending, i % 8 == 0 ? "\n " : " ");
637 printk("\npending list:\n");
638 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
639 if (sync_test_bit(i, sh->evtchn_pending)) {
640 int word_idx = i / BITS_PER_LONG;
641 printk(" %d: event %d -> irq %d%s%s%s\n",
642 cpu_from_evtchn(i), i,
644 sync_test_bit(word_idx, &v->evtchn_pending_sel)
646 !sync_test_bit(i, sh->evtchn_mask)
647 ? "" : " globally-masked",
648 sync_test_bit(i, cpu_evtchn)
649 ? "" : " locally-masked");
653 spin_unlock_irqrestore(&debug_lock, flags);
658 static DEFINE_PER_CPU(unsigned, xed_nesting_count);
661 * Search the CPUs pending events bitmasks. For each one found, map
662 * the event number to an irq, and feed it into do_IRQ() for
665 * Xen uses a two-level bitmap to speed searching. The first level is
666 * a bitset of words which contain pending event bits. The second
667 * level is a bitset of pending events themselves.
669 static void __xen_evtchn_do_upcall(void)
672 struct shared_info *s = HYPERVISOR_shared_info;
673 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
677 unsigned long pending_words;
679 vcpu_info->evtchn_upcall_pending = 0;
681 if (__get_cpu_var(xed_nesting_count)++)
684 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
685 /* Clear master flag /before/ clearing selector flag. */
688 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
689 while (pending_words != 0) {
690 unsigned long pending_bits;
691 int word_idx = __ffs(pending_words);
692 pending_words &= ~(1UL << word_idx);
694 while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
695 int bit_idx = __ffs(pending_bits);
696 int port = (word_idx * BITS_PER_LONG) + bit_idx;
697 int irq = evtchn_to_irq[port];
698 struct irq_desc *desc;
704 desc = irq_to_desc(irq);
706 generic_handle_irq_desc(irq, desc);
711 BUG_ON(!irqs_disabled());
713 count = __get_cpu_var(xed_nesting_count);
714 __get_cpu_var(xed_nesting_count) = 0;
715 } while (count != 1 || vcpu_info->evtchn_upcall_pending);
722 void xen_evtchn_do_upcall(struct pt_regs *regs)
724 struct pt_regs *old_regs = set_irq_regs(regs);
729 __xen_evtchn_do_upcall();
732 set_irq_regs(old_regs);
735 void xen_hvm_evtchn_do_upcall(void)
737 __xen_evtchn_do_upcall();
739 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
741 /* Rebind a new event channel to an existing irq. */
742 void rebind_evtchn_irq(int evtchn, int irq)
744 struct irq_info *info = info_for_irq(irq);
746 /* Make sure the irq is masked, since the new event channel
747 will also be masked. */
750 spin_lock(&irq_mapping_update_lock);
752 /* After resume the irq<->evtchn mappings are all cleared out */
753 BUG_ON(evtchn_to_irq[evtchn] != -1);
754 /* Expect irq to have been bound before,
755 so there should be a proper type */
756 BUG_ON(info->type == IRQT_UNBOUND);
758 evtchn_to_irq[evtchn] = irq;
759 irq_info[irq] = mk_evtchn_info(evtchn);
761 spin_unlock(&irq_mapping_update_lock);
763 /* new event channels are always bound to cpu 0 */
764 irq_set_affinity(irq, cpumask_of(0));
766 /* Unmask the event channel. */
770 /* Rebind an evtchn so that it gets delivered to a specific cpu */
771 static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
773 struct evtchn_bind_vcpu bind_vcpu;
774 int evtchn = evtchn_from_irq(irq);
776 /* events delivered via platform PCI interrupts are always
777 * routed to vcpu 0 */
778 if (!VALID_EVTCHN(evtchn) ||
779 (xen_hvm_domain() && !xen_have_vector_callback))
782 /* Send future instances of this interrupt to other vcpu. */
783 bind_vcpu.port = evtchn;
784 bind_vcpu.vcpu = tcpu;
787 * If this fails, it usually just indicates that we're dealing with a
788 * virq or IPI channel, which don't actually need to be rebound. Ignore
789 * it, but don't do the xenlinux-level rebind in that case.
791 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
792 bind_evtchn_to_cpu(evtchn, tcpu);
797 static int set_affinity_irq(unsigned irq, const struct cpumask *dest)
799 unsigned tcpu = cpumask_first(dest);
801 return rebind_irq_to_cpu(irq, tcpu);
804 int resend_irq_on_evtchn(unsigned int irq)
806 int masked, evtchn = evtchn_from_irq(irq);
807 struct shared_info *s = HYPERVISOR_shared_info;
809 if (!VALID_EVTCHN(evtchn))
812 masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
813 sync_set_bit(evtchn, s->evtchn_pending);
815 unmask_evtchn(evtchn);
820 static void enable_dynirq(unsigned int irq)
822 int evtchn = evtchn_from_irq(irq);
824 if (VALID_EVTCHN(evtchn))
825 unmask_evtchn(evtchn);
828 static void disable_dynirq(unsigned int irq)
830 int evtchn = evtchn_from_irq(irq);
832 if (VALID_EVTCHN(evtchn))
836 static void ack_dynirq(unsigned int irq)
838 int evtchn = evtchn_from_irq(irq);
840 move_masked_irq(irq);
842 if (VALID_EVTCHN(evtchn))
843 unmask_evtchn(evtchn);
846 static int retrigger_dynirq(unsigned int irq)
848 int evtchn = evtchn_from_irq(irq);
849 struct shared_info *sh = HYPERVISOR_shared_info;
852 if (VALID_EVTCHN(evtchn)) {
855 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
856 sync_set_bit(evtchn, sh->evtchn_pending);
858 unmask_evtchn(evtchn);
865 static void restore_cpu_virqs(unsigned int cpu)
867 struct evtchn_bind_virq bind_virq;
868 int virq, irq, evtchn;
870 for (virq = 0; virq < NR_VIRQS; virq++) {
871 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
874 BUG_ON(virq_from_irq(irq) != virq);
876 /* Get a new binding from Xen. */
877 bind_virq.virq = virq;
878 bind_virq.vcpu = cpu;
879 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
882 evtchn = bind_virq.port;
884 /* Record the new mapping. */
885 evtchn_to_irq[evtchn] = irq;
886 irq_info[irq] = mk_virq_info(evtchn, virq);
887 bind_evtchn_to_cpu(evtchn, cpu);
890 unmask_evtchn(evtchn);
894 static void restore_cpu_ipis(unsigned int cpu)
896 struct evtchn_bind_ipi bind_ipi;
897 int ipi, irq, evtchn;
899 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
900 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
903 BUG_ON(ipi_from_irq(irq) != ipi);
905 /* Get a new binding from Xen. */
907 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
910 evtchn = bind_ipi.port;
912 /* Record the new mapping. */
913 evtchn_to_irq[evtchn] = irq;
914 irq_info[irq] = mk_ipi_info(evtchn, ipi);
915 bind_evtchn_to_cpu(evtchn, cpu);
918 unmask_evtchn(evtchn);
923 /* Clear an irq's pending state, in preparation for polling on it */
924 void xen_clear_irq_pending(int irq)
926 int evtchn = evtchn_from_irq(irq);
928 if (VALID_EVTCHN(evtchn))
929 clear_evtchn(evtchn);
932 void xen_set_irq_pending(int irq)
934 int evtchn = evtchn_from_irq(irq);
936 if (VALID_EVTCHN(evtchn))
940 bool xen_test_irq_pending(int irq)
942 int evtchn = evtchn_from_irq(irq);
945 if (VALID_EVTCHN(evtchn))
946 ret = test_evtchn(evtchn);
951 /* Poll waiting for an irq to become pending. In the usual case, the
952 irq will be disabled so it won't deliver an interrupt. */
953 void xen_poll_irq(int irq)
955 evtchn_port_t evtchn = evtchn_from_irq(irq);
957 if (VALID_EVTCHN(evtchn)) {
958 struct sched_poll poll;
962 set_xen_guest_handle(poll.ports, &evtchn);
964 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
969 void xen_irq_resume(void)
971 unsigned int cpu, irq, evtchn;
973 init_evtchn_cpu_bindings();
975 /* New event-channel space is not 'live' yet. */
976 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
979 /* No IRQ <-> event-channel mappings. */
980 for (irq = 0; irq < nr_irqs; irq++)
981 irq_info[irq].evtchn = 0; /* zap event-channel binding */
983 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
984 evtchn_to_irq[evtchn] = -1;
986 for_each_possible_cpu(cpu) {
987 restore_cpu_virqs(cpu);
988 restore_cpu_ipis(cpu);
992 static struct irq_chip xen_dynamic_chip __read_mostly = {
995 .disable = disable_dynirq,
996 .mask = disable_dynirq,
997 .unmask = enable_dynirq,
1000 .set_affinity = set_affinity_irq,
1001 .retrigger = retrigger_dynirq,
1004 static struct irq_chip xen_percpu_chip __read_mostly = {
1005 .name = "xen-percpu",
1007 .disable = disable_dynirq,
1008 .mask = disable_dynirq,
1009 .unmask = enable_dynirq,
1014 int xen_set_callback_via(uint64_t via)
1016 struct xen_hvm_param a;
1017 a.domid = DOMID_SELF;
1018 a.index = HVM_PARAM_CALLBACK_IRQ;
1020 return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
1022 EXPORT_SYMBOL_GPL(xen_set_callback_via);
1024 #ifdef CONFIG_XEN_PVHVM
1025 /* Vector callbacks are better than PCI interrupts to receive event
1026 * channel notifications because we can receive vector callbacks on any
1027 * vcpu and we don't need PCI support or APIC interactions. */
1028 void xen_callback_vector(void)
1031 uint64_t callback_via;
1032 if (xen_have_vector_callback) {
1033 callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK);
1034 rc = xen_set_callback_via(callback_via);
1036 printk(KERN_ERR "Request for Xen HVM callback vector"
1038 xen_have_vector_callback = 0;
1041 printk(KERN_INFO "Xen HVM callback vector for event delivery is "
1043 /* in the restore case the vector has already been allocated */
1044 if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors))
1045 alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector);
1049 void xen_callback_vector(void) {}
1052 void __init xen_init_IRQ(void)
1056 cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
1058 BUG_ON(cpu_evtchn_mask_p == NULL);
1060 init_evtchn_cpu_bindings();
1062 /* No event channels are 'live' right now. */
1063 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1066 if (xen_hvm_domain()) {
1067 xen_callback_vector();
1070 irq_ctx_init(smp_processor_id());