2 * Derived from arch/i386/kernel/irq.c
3 * Copyright (C) 1992 Linus Torvalds
4 * Adapted from arch/i386 by Gary Thomas
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
7 * Copyright (C) 1996-2001 Cort Dougan
8 * Adapted for Power Macintosh by Paul Mackerras
9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
16 * This file contains the code used by various IRQ handling routines:
17 * asking for different IRQ's should be done through these routines
18 * instead of just grabbing them. Thus setups with different IRQ numbers
19 * shouldn't result in any weird surprises, and installing new handlers
22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
24 * mask register (of which only 16 are defined), hence the weird shifting
25 * and complement of the cached_irq_mask. I want to be able to stuff
26 * this right into the SIU SMASK register.
27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28 * to reduce code space and undefined function references.
33 #include <linux/export.h>
34 #include <linux/threads.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/signal.h>
37 #include <linux/sched.h>
38 #include <linux/ptrace.h>
39 #include <linux/ioport.h>
40 #include <linux/interrupt.h>
41 #include <linux/timex.h>
42 #include <linux/init.h>
43 #include <linux/slab.h>
44 #include <linux/delay.h>
45 #include <linux/irq.h>
46 #include <linux/seq_file.h>
47 #include <linux/cpumask.h>
48 #include <linux/profile.h>
49 #include <linux/bitops.h>
50 #include <linux/list.h>
51 #include <linux/radix-tree.h>
52 #include <linux/mutex.h>
53 #include <linux/bootmem.h>
54 #include <linux/pci.h>
55 #include <linux/debugfs.h>
57 #include <linux/of_irq.h>
59 #include <asm/uaccess.h>
60 #include <asm/system.h>
62 #include <asm/pgtable.h>
64 #include <asm/cache.h>
66 #include <asm/ptrace.h>
67 #include <asm/machdep.h>
73 #include <asm/firmware.h>
74 #include <asm/lv1call.h>
76 #define CREATE_TRACE_POINTS
77 #include <asm/trace.h>
79 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
80 EXPORT_PER_CPU_SYMBOL(irq_stat);
82 int __irq_offset_value;
85 EXPORT_SYMBOL(__irq_offset_value);
86 atomic_t ppc_n_lost_interrupts;
89 extern int tau_initialized;
90 extern int tau_interrupts(int);
92 #endif /* CONFIG_PPC32 */
96 int distribute_irqs = 1;
98 static inline notrace unsigned long get_irq_happened(void)
100 unsigned long happened;
102 __asm__ __volatile__("lbz %0,%1(13)"
103 : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened)));
108 static inline notrace void set_soft_enabled(unsigned long enable)
110 __asm__ __volatile__("stb %0,%1(13)"
111 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
114 static inline notrace int decrementer_check_overflow(void)
116 u64 now = get_tb_or_rtc();
117 u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
121 return now >= *next_tb;
124 /* This is called whenever we are re-enabling interrupts
125 * and returns either 0 (nothing to do) or 500/900 if there's
126 * either an EE or a DEC to generate.
128 * This is called in two contexts: From arch_local_irq_restore()
129 * before soft-enabling interrupts, and from the exception exit
130 * path when returning from an interrupt from a soft-disabled to
131 * a soft enabled context. In both case we have interrupts hard
134 * We take care of only clearing the bits we handled in the
135 * PACA irq_happened field since we can only re-emit one at a
136 * time and we don't want to "lose" one.
138 notrace unsigned int __check_irq_replay(void)
141 * We use local_paca rather than get_paca() to avoid all
142 * the debug_smp_processor_id() business in this low level
145 unsigned char happened = local_paca->irq_happened;
147 /* Clear bit 0 which we wouldn't clear otherwise */
148 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
151 * Force the delivery of pending soft-disabled interrupts on PS3.
152 * Any HV call will have this side effect.
154 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
156 lv1_get_version_info(&tmp, &tmp2);
160 * We may have missed a decrementer interrupt. We check the
161 * decrementer itself rather than the paca irq_happened field
162 * in case we also had a rollover while hard disabled
164 local_paca->irq_happened &= ~PACA_IRQ_DEC;
165 if (decrementer_check_overflow())
168 /* Finally check if an external interrupt happened */
169 local_paca->irq_happened &= ~PACA_IRQ_EE;
170 if (happened & PACA_IRQ_EE)
173 #ifdef CONFIG_PPC_BOOK3E
174 /* Finally check if an EPR external interrupt happened
175 * this bit is typically set if we need to handle another
176 * "edge" interrupt from within the MPIC "EPR" handler
178 local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
179 if (happened & PACA_IRQ_EE_EDGE)
182 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
183 if (happened & PACA_IRQ_DBELL)
185 #endif /* CONFIG_PPC_BOOK3E */
187 /* There should be nothing left ! */
188 BUG_ON(local_paca->irq_happened != 0);
193 notrace void arch_local_irq_restore(unsigned long en)
195 unsigned char irq_happened;
198 /* Write the new soft-enabled value */
199 set_soft_enabled(en);
203 * From this point onward, we can take interrupts, preempt,
204 * etc... unless we got hard-disabled. We check if an event
205 * happened. If none happened, we know we can just return.
207 * We may have preempted before the check below, in which case
208 * we are checking the "new" CPU instead of the old one. This
209 * is only a problem if an event happened on the "old" CPU.
211 * External interrupt events on non-iseries will have caused
212 * interrupts to be hard-disabled, so there is no problem, we
213 * cannot have preempted.
215 irq_happened = get_irq_happened();
220 * We need to hard disable to get a trusted value from
221 * __check_irq_replay(). We also need to soft-disable
222 * again to avoid warnings in there due to the use of
225 * We know that if the value in irq_happened is exactly 0x01
226 * then we are already hard disabled (there are other less
227 * common cases that we'll ignore for now), so we skip the
228 * (expensive) mtmsrd.
230 if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
231 __hard_irq_disable();
235 * Check if anything needs to be re-emitted. We haven't
236 * soft-enabled yet to avoid warnings in decrementer_check_overflow
237 * accessing per-cpu variables
239 replay = __check_irq_replay();
241 /* We can soft-enable now */
245 * And replay if we have to. This will return with interrupts
249 __replay_interrupt(replay);
253 /* Finally, let's ensure we are hard enabled */
256 EXPORT_SYMBOL(arch_local_irq_restore);
259 * This is specifically called by assembly code to re-enable interrupts
260 * if they are currently disabled. This is typically called before
261 * schedule() or do_signal() when returning to userspace. We do it
262 * in C to avoid the burden of dealing with lockdep etc...
264 void restore_interrupts(void)
270 #endif /* CONFIG_PPC64 */
272 int arch_show_interrupts(struct seq_file *p, int prec)
276 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
277 if (tau_initialized) {
278 seq_printf(p, "%*s: ", prec, "TAU");
279 for_each_online_cpu(j)
280 seq_printf(p, "%10u ", tau_interrupts(j));
281 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
283 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
285 seq_printf(p, "%*s: ", prec, "LOC");
286 for_each_online_cpu(j)
287 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
288 seq_printf(p, " Local timer interrupts\n");
290 seq_printf(p, "%*s: ", prec, "SPU");
291 for_each_online_cpu(j)
292 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
293 seq_printf(p, " Spurious interrupts\n");
295 seq_printf(p, "%*s: ", prec, "CNT");
296 for_each_online_cpu(j)
297 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
298 seq_printf(p, " Performance monitoring interrupts\n");
300 seq_printf(p, "%*s: ", prec, "MCE");
301 for_each_online_cpu(j)
302 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
303 seq_printf(p, " Machine check exceptions\n");
311 u64 arch_irq_stat_cpu(unsigned int cpu)
313 u64 sum = per_cpu(irq_stat, cpu).timer_irqs;
315 sum += per_cpu(irq_stat, cpu).pmu_irqs;
316 sum += per_cpu(irq_stat, cpu).mce_exceptions;
317 sum += per_cpu(irq_stat, cpu).spurious_irqs;
322 #ifdef CONFIG_HOTPLUG_CPU
323 void migrate_irqs(void)
325 struct irq_desc *desc;
329 const struct cpumask *map = cpu_online_mask;
331 alloc_cpumask_var(&mask, GFP_KERNEL);
334 struct irq_data *data;
335 struct irq_chip *chip;
337 desc = irq_to_desc(irq);
341 data = irq_desc_get_irq_data(desc);
342 if (irqd_is_per_cpu(data))
345 chip = irq_data_get_irq_chip(data);
347 cpumask_and(mask, data->affinity, map);
348 if (cpumask_any(mask) >= nr_cpu_ids) {
349 printk("Breaking affinity for irq %i\n", irq);
350 cpumask_copy(mask, map);
352 if (chip->irq_set_affinity)
353 chip->irq_set_affinity(data, mask, true);
354 else if (desc->action && !(warned++))
355 printk("Cannot set affinity for irq %i\n", irq);
358 free_cpumask_var(mask);
366 static inline void handle_one_irq(unsigned int irq)
368 struct thread_info *curtp, *irqtp;
369 unsigned long saved_sp_limit;
370 struct irq_desc *desc;
372 desc = irq_to_desc(irq);
376 /* Switch to the irq stack to handle this */
377 curtp = current_thread_info();
378 irqtp = hardirq_ctx[smp_processor_id()];
380 if (curtp == irqtp) {
381 /* We're already on the irq stack, just handle it */
382 desc->handle_irq(irq, desc);
386 saved_sp_limit = current->thread.ksp_limit;
388 irqtp->task = curtp->task;
391 /* Copy the softirq bits in preempt_count so that the
392 * softirq checks work in the hardirq context. */
393 irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
394 (curtp->preempt_count & SOFTIRQ_MASK);
396 current->thread.ksp_limit = (unsigned long)irqtp +
397 _ALIGN_UP(sizeof(struct thread_info), 16);
399 call_handle_irq(irq, desc, irqtp, desc->handle_irq);
400 current->thread.ksp_limit = saved_sp_limit;
403 /* Set any flag that may have been set on the
407 set_bits(irqtp->flags, &curtp->flags);
410 static inline void check_stack_overflow(void)
412 #ifdef CONFIG_DEBUG_STACKOVERFLOW
415 sp = __get_SP() & (THREAD_SIZE-1);
417 /* check for stack overflow: is there less than 2KB free? */
418 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
419 printk("do_IRQ: stack overflow: %ld\n",
420 sp - sizeof(struct thread_info));
426 void do_IRQ(struct pt_regs *regs)
428 struct pt_regs *old_regs = set_irq_regs(regs);
431 trace_irq_entry(regs);
435 check_stack_overflow();
438 * Query the platform PIC for the interrupt & ack it.
440 * This will typically lower the interrupt line to the CPU
442 irq = ppc_md.get_irq();
444 /* We can hard enable interrupts now */
445 may_hard_irq_enable();
447 /* And finally process it */
448 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
450 else if (irq != NO_IRQ_IGNORE)
451 __get_cpu_var(irq_stat).spurious_irqs++;
454 set_irq_regs(old_regs);
456 trace_irq_exit(regs);
459 void __init init_IRQ(void)
469 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
470 struct thread_info *critirq_ctx[NR_CPUS] __read_mostly;
471 struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly;
472 struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
474 void exc_lvl_ctx_init(void)
476 struct thread_info *tp;
479 for_each_possible_cpu(i) {
483 cpu_nr = get_hard_smp_processor_id(i);
485 memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
486 tp = critirq_ctx[cpu_nr];
488 tp->preempt_count = 0;
491 memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE);
492 tp = dbgirq_ctx[cpu_nr];
494 tp->preempt_count = 0;
496 memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE);
497 tp = mcheckirq_ctx[cpu_nr];
499 tp->preempt_count = HARDIRQ_OFFSET;
505 struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
506 struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
508 void irq_ctx_init(void)
510 struct thread_info *tp;
513 for_each_possible_cpu(i) {
514 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
517 tp->preempt_count = 0;
519 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
522 tp->preempt_count = HARDIRQ_OFFSET;
526 static inline void do_softirq_onstack(void)
528 struct thread_info *curtp, *irqtp;
529 unsigned long saved_sp_limit = current->thread.ksp_limit;
531 curtp = current_thread_info();
532 irqtp = softirq_ctx[smp_processor_id()];
533 irqtp->task = curtp->task;
535 current->thread.ksp_limit = (unsigned long)irqtp +
536 _ALIGN_UP(sizeof(struct thread_info), 16);
537 call_do_softirq(irqtp);
538 current->thread.ksp_limit = saved_sp_limit;
541 /* Set any flag that may have been set on the
545 set_bits(irqtp->flags, &curtp->flags);
548 void do_softirq(void)
555 local_irq_save(flags);
557 if (local_softirq_pending())
558 do_softirq_onstack();
560 local_irq_restore(flags);
565 * IRQ controller and virtual interrupts
568 /* The main irq map itself is an array of NR_IRQ entries containing the
569 * associate host and irq number. An entry with a host of NULL is free.
570 * An entry can be allocated if it's free, the allocator always then sets
571 * hwirq first to the host's invalid irq number and then fills ops.
573 struct irq_map_entry {
574 irq_hw_number_t hwirq;
575 struct irq_host *host;
578 static LIST_HEAD(irq_hosts);
579 static DEFINE_RAW_SPINLOCK(irq_big_lock);
580 static DEFINE_MUTEX(revmap_trees_mutex);
581 static struct irq_map_entry irq_map[NR_IRQS];
582 static unsigned int irq_virq_count = NR_IRQS;
583 static struct irq_host *irq_default_host;
585 irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
587 return irq_map[d->irq].hwirq;
589 EXPORT_SYMBOL_GPL(irqd_to_hwirq);
591 irq_hw_number_t virq_to_hw(unsigned int virq)
593 return irq_map[virq].hwirq;
595 EXPORT_SYMBOL_GPL(virq_to_hw);
597 bool virq_is_host(unsigned int virq, struct irq_host *host)
599 return irq_map[virq].host == host;
601 EXPORT_SYMBOL_GPL(virq_is_host);
603 static int default_irq_host_match(struct irq_host *h, struct device_node *np)
605 return h->of_node != NULL && h->of_node == np;
608 struct irq_host *irq_alloc_host(struct device_node *of_node,
609 unsigned int revmap_type,
610 unsigned int revmap_arg,
611 struct irq_host_ops *ops,
612 irq_hw_number_t inval_irq)
614 struct irq_host *host;
615 unsigned int size = sizeof(struct irq_host);
620 /* Allocate structure and revmap table if using linear mapping */
621 if (revmap_type == IRQ_HOST_MAP_LINEAR)
622 size += revmap_arg * sizeof(unsigned int);
623 host = kzalloc(size, GFP_KERNEL);
628 host->revmap_type = revmap_type;
629 host->inval_irq = inval_irq;
631 host->of_node = of_node_get(of_node);
633 if (host->ops->match == NULL)
634 host->ops->match = default_irq_host_match;
636 raw_spin_lock_irqsave(&irq_big_lock, flags);
638 /* If it's a legacy controller, check for duplicates and
639 * mark it as allocated (we use irq 0 host pointer for that
641 if (revmap_type == IRQ_HOST_MAP_LEGACY) {
642 if (irq_map[0].host != NULL) {
643 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
644 of_node_put(host->of_node);
648 irq_map[0].host = host;
651 list_add(&host->link, &irq_hosts);
652 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
654 /* Additional setups per revmap type */
655 switch(revmap_type) {
656 case IRQ_HOST_MAP_LEGACY:
657 /* 0 is always the invalid number for legacy */
659 /* setup us as the host for all legacy interrupts */
660 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
661 irq_map[i].hwirq = i;
663 irq_map[i].host = host;
666 /* Legacy flags are left to default at this point,
667 * one can then use irq_create_mapping() to
668 * explicitly change them
670 ops->map(host, i, i);
672 /* Clear norequest flags */
673 irq_clear_status_flags(i, IRQ_NOREQUEST);
676 case IRQ_HOST_MAP_LINEAR:
677 rmap = (unsigned int *)(host + 1);
678 for (i = 0; i < revmap_arg; i++)
680 host->revmap_data.linear.size = revmap_arg;
682 host->revmap_data.linear.revmap = rmap;
684 case IRQ_HOST_MAP_TREE:
685 INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL);
691 pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
696 struct irq_host *irq_find_host(struct device_node *node)
698 struct irq_host *h, *found = NULL;
701 /* We might want to match the legacy controller last since
702 * it might potentially be set to match all interrupts in
703 * the absence of a device node. This isn't a problem so far
706 raw_spin_lock_irqsave(&irq_big_lock, flags);
707 list_for_each_entry(h, &irq_hosts, link)
708 if (h->ops->match(h, node)) {
712 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
715 EXPORT_SYMBOL_GPL(irq_find_host);
717 void irq_set_default_host(struct irq_host *host)
719 pr_debug("irq: Default host set to @0x%p\n", host);
721 irq_default_host = host;
724 void irq_set_virq_count(unsigned int count)
726 pr_debug("irq: Trying to set virq count to %d\n", count);
728 BUG_ON(count < NUM_ISA_INTERRUPTS);
730 irq_virq_count = count;
733 static int irq_setup_virq(struct irq_host *host, unsigned int virq,
734 irq_hw_number_t hwirq)
738 res = irq_alloc_desc_at(virq, 0);
740 pr_debug("irq: -> allocating desc failed\n");
746 irq_map[virq].hwirq = hwirq;
749 if (host->ops->map(host, virq, hwirq)) {
750 pr_debug("irq: -> mapping failed, freeing\n");
754 irq_clear_status_flags(virq, IRQ_NOREQUEST);
759 irq_free_descs(virq, 1);
761 irq_free_virt(virq, 1);
765 unsigned int irq_create_direct_mapping(struct irq_host *host)
770 host = irq_default_host;
772 BUG_ON(host == NULL);
773 WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
775 virq = irq_alloc_virt(host, 1, 0);
776 if (virq == NO_IRQ) {
777 pr_debug("irq: create_direct virq allocation failed\n");
781 pr_debug("irq: create_direct obtained virq %d\n", virq);
783 if (irq_setup_virq(host, virq, virq))
789 unsigned int irq_create_mapping(struct irq_host *host,
790 irq_hw_number_t hwirq)
792 unsigned int virq, hint;
794 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
796 /* Look for default host if nececssary */
798 host = irq_default_host;
800 printk(KERN_WARNING "irq_create_mapping called for"
801 " NULL host, hwirq=%lx\n", hwirq);
805 pr_debug("irq: -> using host @%p\n", host);
807 /* Check if mapping already exists */
808 virq = irq_find_mapping(host, hwirq);
809 if (virq != NO_IRQ) {
810 pr_debug("irq: -> existing mapping on virq %d\n", virq);
814 /* Get a virtual interrupt number */
815 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
817 virq = (unsigned int)hwirq;
818 if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
822 /* Allocate a virtual interrupt number */
823 hint = hwirq % irq_virq_count;
824 virq = irq_alloc_virt(host, 1, hint);
825 if (virq == NO_IRQ) {
826 pr_debug("irq: -> virq allocation failed\n");
831 if (irq_setup_virq(host, virq, hwirq))
834 pr_debug("irq: irq %lu on host %s mapped to virtual irq %u\n",
835 hwirq, host->of_node ? host->of_node->full_name : "null", virq);
839 EXPORT_SYMBOL_GPL(irq_create_mapping);
841 unsigned int irq_create_of_mapping(struct device_node *controller,
842 const u32 *intspec, unsigned int intsize)
844 struct irq_host *host;
845 irq_hw_number_t hwirq;
846 unsigned int type = IRQ_TYPE_NONE;
849 if (controller == NULL)
850 host = irq_default_host;
852 host = irq_find_host(controller);
854 printk(KERN_WARNING "irq: no irq host found for %s !\n",
855 controller->full_name);
859 /* If host has no translation, then we assume interrupt line */
860 if (host->ops->xlate == NULL)
863 if (host->ops->xlate(host, controller, intspec, intsize,
869 virq = irq_create_mapping(host, hwirq);
873 /* Set type if specified and different than the current one */
874 if (type != IRQ_TYPE_NONE &&
875 type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
876 irq_set_irq_type(virq, type);
879 EXPORT_SYMBOL_GPL(irq_create_of_mapping);
881 void irq_dispose_mapping(unsigned int virq)
883 struct irq_host *host;
884 irq_hw_number_t hwirq;
889 host = irq_map[virq].host;
890 if (WARN_ON(host == NULL))
893 /* Never unmap legacy interrupts */
894 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
897 irq_set_status_flags(virq, IRQ_NOREQUEST);
899 /* remove chip and handler */
900 irq_set_chip_and_handler(virq, NULL, NULL);
902 /* Make sure it's completed */
903 synchronize_irq(virq);
905 /* Tell the PIC about it */
906 if (host->ops->unmap)
907 host->ops->unmap(host, virq);
910 /* Clear reverse map */
911 hwirq = irq_map[virq].hwirq;
912 switch(host->revmap_type) {
913 case IRQ_HOST_MAP_LINEAR:
914 if (hwirq < host->revmap_data.linear.size)
915 host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
917 case IRQ_HOST_MAP_TREE:
918 mutex_lock(&revmap_trees_mutex);
919 radix_tree_delete(&host->revmap_data.tree, hwirq);
920 mutex_unlock(&revmap_trees_mutex);
926 irq_map[virq].hwirq = host->inval_irq;
928 irq_free_descs(virq, 1);
930 irq_free_virt(virq, 1);
932 EXPORT_SYMBOL_GPL(irq_dispose_mapping);
934 unsigned int irq_find_mapping(struct irq_host *host,
935 irq_hw_number_t hwirq)
938 unsigned int hint = hwirq % irq_virq_count;
940 /* Look for default host if nececssary */
942 host = irq_default_host;
946 /* legacy -> bail early */
947 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
950 /* Slow path does a linear search of the map */
951 if (hint < NUM_ISA_INTERRUPTS)
952 hint = NUM_ISA_INTERRUPTS;
955 if (irq_map[i].host == host &&
956 irq_map[i].hwirq == hwirq)
959 if (i >= irq_virq_count)
960 i = NUM_ISA_INTERRUPTS;
964 EXPORT_SYMBOL_GPL(irq_find_mapping);
967 int irq_choose_cpu(const struct cpumask *mask)
971 if (cpumask_equal(mask, cpu_all_mask)) {
972 static int irq_rover;
973 static DEFINE_RAW_SPINLOCK(irq_rover_lock);
976 /* Round-robin distribution... */
978 raw_spin_lock_irqsave(&irq_rover_lock, flags);
980 irq_rover = cpumask_next(irq_rover, cpu_online_mask);
981 if (irq_rover >= nr_cpu_ids)
982 irq_rover = cpumask_first(cpu_online_mask);
986 raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
988 cpuid = cpumask_first_and(mask, cpu_online_mask);
989 if (cpuid >= nr_cpu_ids)
993 return get_hard_smp_processor_id(cpuid);
996 int irq_choose_cpu(const struct cpumask *mask)
998 return hard_smp_processor_id();
1002 unsigned int irq_radix_revmap_lookup(struct irq_host *host,
1003 irq_hw_number_t hwirq)
1005 struct irq_map_entry *ptr;
1008 if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE))
1009 return irq_find_mapping(host, hwirq);
1012 * The ptr returned references the static global irq_map.
1013 * but freeing an irq can delete nodes along the path to
1014 * do the lookup via call_rcu.
1017 ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
1021 * If found in radix tree, then fine.
1022 * Else fallback to linear lookup - this should not happen in practice
1023 * as it means that we failed to insert the node in the radix tree.
1026 virq = ptr - irq_map;
1028 virq = irq_find_mapping(host, hwirq);
1033 void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
1034 irq_hw_number_t hwirq)
1036 if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE))
1039 if (virq != NO_IRQ) {
1040 mutex_lock(&revmap_trees_mutex);
1041 radix_tree_insert(&host->revmap_data.tree, hwirq,
1043 mutex_unlock(&revmap_trees_mutex);
1047 unsigned int irq_linear_revmap(struct irq_host *host,
1048 irq_hw_number_t hwirq)
1050 unsigned int *revmap;
1052 if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR))
1053 return irq_find_mapping(host, hwirq);
1055 /* Check revmap bounds */
1056 if (unlikely(hwirq >= host->revmap_data.linear.size))
1057 return irq_find_mapping(host, hwirq);
1059 /* Check if revmap was allocated */
1060 revmap = host->revmap_data.linear.revmap;
1061 if (unlikely(revmap == NULL))
1062 return irq_find_mapping(host, hwirq);
1064 /* Fill up revmap with slow path if no mapping found */
1065 if (unlikely(revmap[hwirq] == NO_IRQ))
1066 revmap[hwirq] = irq_find_mapping(host, hwirq);
1068 return revmap[hwirq];
1071 unsigned int irq_alloc_virt(struct irq_host *host,
1075 unsigned long flags;
1076 unsigned int i, j, found = NO_IRQ;
1078 if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
1081 raw_spin_lock_irqsave(&irq_big_lock, flags);
1083 /* Use hint for 1 interrupt if any */
1084 if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
1085 hint < irq_virq_count && irq_map[hint].host == NULL) {
1090 /* Look for count consecutive numbers in the allocatable
1091 * (non-legacy) space
1093 for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) {
1094 if (irq_map[i].host != NULL)
1100 found = i - count + 1;
1104 if (found == NO_IRQ) {
1105 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1109 for (i = found; i < (found + count); i++) {
1110 irq_map[i].hwirq = host->inval_irq;
1112 irq_map[i].host = host;
1114 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1118 void irq_free_virt(unsigned int virq, unsigned int count)
1120 unsigned long flags;
1123 WARN_ON (virq < NUM_ISA_INTERRUPTS);
1124 WARN_ON (count == 0 || (virq + count) > irq_virq_count);
1126 if (virq < NUM_ISA_INTERRUPTS) {
1127 if (virq + count < NUM_ISA_INTERRUPTS)
1129 count =- NUM_ISA_INTERRUPTS - virq;
1130 virq = NUM_ISA_INTERRUPTS;
1133 if (count > irq_virq_count || virq > irq_virq_count - count) {
1134 if (virq > irq_virq_count)
1136 count = irq_virq_count - virq;
1139 raw_spin_lock_irqsave(&irq_big_lock, flags);
1140 for (i = virq; i < (virq + count); i++) {
1141 struct irq_host *host;
1143 host = irq_map[i].host;
1144 irq_map[i].hwirq = host->inval_irq;
1146 irq_map[i].host = NULL;
1148 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1151 int arch_early_irq_init(void)
1156 #ifdef CONFIG_VIRQ_DEBUG
1157 static int virq_debug_show(struct seq_file *m, void *private)
1159 unsigned long flags;
1160 struct irq_desc *desc;
1162 static const char none[] = "none";
1166 seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq",
1167 "chip name", "chip data", "host name");
1169 for (i = 1; i < nr_irqs; i++) {
1170 desc = irq_to_desc(i);
1174 raw_spin_lock_irqsave(&desc->lock, flags);
1176 if (desc->action && desc->action->handler) {
1177 struct irq_chip *chip;
1179 seq_printf(m, "%5d ", i);
1180 seq_printf(m, "0x%05lx ", irq_map[i].hwirq);
1182 chip = irq_desc_get_chip(desc);
1183 if (chip && chip->name)
1187 seq_printf(m, "%-15s ", p);
1189 data = irq_desc_get_chip_data(desc);
1190 seq_printf(m, "0x%16p ", data);
1192 if (irq_map[i].host && irq_map[i].host->of_node)
1193 p = irq_map[i].host->of_node->full_name;
1196 seq_printf(m, "%s\n", p);
1199 raw_spin_unlock_irqrestore(&desc->lock, flags);
1205 static int virq_debug_open(struct inode *inode, struct file *file)
1207 return single_open(file, virq_debug_show, inode->i_private);
1210 static const struct file_operations virq_debug_fops = {
1211 .open = virq_debug_open,
1213 .llseek = seq_lseek,
1214 .release = single_release,
1217 static int __init irq_debugfs_init(void)
1219 if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
1220 NULL, &virq_debug_fops) == NULL)
1225 __initcall(irq_debugfs_init);
1226 #endif /* CONFIG_VIRQ_DEBUG */
1229 static int __init setup_noirqdistrib(char *str)
1231 distribute_irqs = 0;
1235 __setup("noirqdistrib", setup_noirqdistrib);
1236 #endif /* CONFIG_PPC64 */