2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2004-2008 Cavium Networks
9 #include <linux/interrupt.h>
10 #include <linux/smp.h>
12 #include <asm/octeon/octeon.h>
13 #include <asm/octeon/cvmx-pexp-defs.h>
14 #include <asm/octeon/cvmx-npi-defs.h>
16 DEFINE_RWLOCK(octeon_irq_ciu0_rwlock);
17 DEFINE_RWLOCK(octeon_irq_ciu1_rwlock);
19 static int octeon_coreid_for_cpu(int cpu)
22 return cpu_logical_map(cpu);
24 return cvmx_get_core_num();
28 static void octeon_irq_core_ack(unsigned int irq)
30 unsigned int bit = irq - OCTEON_IRQ_SW0;
32 * We don't need to disable IRQs to make these atomic since
33 * they are already disabled earlier in the low level
36 clear_c0_status(0x100 << bit);
37 /* The two user interrupts must be cleared manually. */
39 clear_c0_cause(0x100 << bit);
42 static void octeon_irq_core_eoi(unsigned int irq)
44 struct irq_desc *desc = irq_desc + irq;
45 unsigned int bit = irq - OCTEON_IRQ_SW0;
47 * If an IRQ is being processed while we are disabling it the
48 * handler will attempt to unmask the interrupt after it has
51 if (desc->status & IRQ_DISABLED)
54 * We don't need to disable IRQs to make these atomic since
55 * they are already disabled earlier in the low level
58 set_c0_status(0x100 << bit);
61 static void octeon_irq_core_enable(unsigned int irq)
64 unsigned int bit = irq - OCTEON_IRQ_SW0;
67 * We need to disable interrupts to make sure our updates are
70 local_irq_save(flags);
71 set_c0_status(0x100 << bit);
72 local_irq_restore(flags);
75 static void octeon_irq_core_disable_local(unsigned int irq)
78 unsigned int bit = irq - OCTEON_IRQ_SW0;
80 * We need to disable interrupts to make sure our updates are
83 local_irq_save(flags);
84 clear_c0_status(0x100 << bit);
85 local_irq_restore(flags);
88 static void octeon_irq_core_disable(unsigned int irq)
91 on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
92 (void *) (long) irq, 1);
94 octeon_irq_core_disable_local(irq);
98 static struct irq_chip octeon_irq_chip_core = {
100 .enable = octeon_irq_core_enable,
101 .disable = octeon_irq_core_disable,
102 .ack = octeon_irq_core_ack,
103 .eoi = octeon_irq_core_eoi,
107 static void octeon_irq_ciu0_ack(unsigned int irq)
110 * In order to avoid any locking accessing the CIU, we
111 * acknowledge CIU interrupts by disabling all of them. This
112 * way we can use a per core register and avoid any out of
113 * core locking requirements. This has the side affect that
114 * CIU interrupts can't be processed recursively.
116 * We don't need to disable IRQs to make these atomic since
117 * they are already disabled earlier in the low level
120 clear_c0_status(0x100 << 2);
123 static void octeon_irq_ciu0_eoi(unsigned int irq)
126 * Enable all CIU interrupts again. We don't need to disable
127 * IRQs to make these atomic since they are already disabled
128 * earlier in the low level interrupt code.
130 set_c0_status(0x100 << 2);
133 static void octeon_irq_ciu0_enable(unsigned int irq)
135 int coreid = cvmx_get_core_num();
138 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
141 * A read lock is used here to make sure only one core is ever
142 * updating the CIU enable bits at a time. During an enable
143 * the cores don't interfere with each other. During a disable
144 * the write lock stops any enables that might cause a
147 read_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
148 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
150 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
151 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
152 read_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
155 static void octeon_irq_ciu0_disable(unsigned int irq)
157 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
161 write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
162 for_each_online_cpu(cpu) {
163 int coreid = octeon_coreid_for_cpu(cpu);
164 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
165 en0 &= ~(1ull << bit);
166 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
169 * We need to do a read after the last update to make sure all
172 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
173 write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
177 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
180 static void octeon_irq_ciu0_enable_v2(unsigned int irq)
182 int index = cvmx_get_core_num() * 2;
183 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
185 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
189 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
192 static void octeon_irq_ciu0_ack_v2(unsigned int irq)
194 int index = cvmx_get_core_num() * 2;
195 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
197 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
201 * CIU timer type interrupts must be acknoleged by writing a '1' bit
204 static void octeon_irq_ciu0_timer_ack(unsigned int irq)
206 int index = cvmx_get_core_num() * 2;
207 uint64_t mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
208 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
211 static void octeon_irq_ciu0_timer_ack_v1(unsigned int irq)
213 octeon_irq_ciu0_timer_ack(irq);
214 octeon_irq_ciu0_ack(irq);
217 static void octeon_irq_ciu0_timer_ack_v2(unsigned int irq)
219 octeon_irq_ciu0_timer_ack(irq);
220 octeon_irq_ciu0_ack_v2(irq);
224 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
227 static void octeon_irq_ciu0_eoi_v2(unsigned int irq)
229 struct irq_desc *desc = irq_desc + irq;
230 int index = cvmx_get_core_num() * 2;
231 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
233 if ((desc->status & IRQ_DISABLED) == 0)
234 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
238 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
241 static void octeon_irq_ciu0_disable_all_v2(unsigned int irq)
243 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
246 for_each_online_cpu(cpu) {
247 index = octeon_coreid_for_cpu(cpu) * 2;
248 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
253 static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
257 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
259 write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
260 for_each_online_cpu(cpu) {
261 int coreid = octeon_coreid_for_cpu(cpu);
263 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
264 if (cpumask_test_cpu(cpu, dest))
267 en0 &= ~(1ull << bit);
268 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
271 * We need to do a read after the last update to make sure all
274 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
275 write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
281 * Set affinity for the irq for chips that have the EN*_W1{S,C}
284 static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq,
285 const struct cpumask *dest)
289 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
290 for_each_online_cpu(cpu) {
291 index = octeon_coreid_for_cpu(cpu) * 2;
292 if (cpumask_test_cpu(cpu, dest))
293 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
295 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
302 * Newer octeon chips have support for lockless CIU operation.
304 static struct irq_chip octeon_irq_chip_ciu0_v2 = {
306 .enable = octeon_irq_ciu0_enable_v2,
307 .disable = octeon_irq_ciu0_disable_all_v2,
308 .ack = octeon_irq_ciu0_ack_v2,
309 .eoi = octeon_irq_ciu0_eoi_v2,
311 .set_affinity = octeon_irq_ciu0_set_affinity_v2,
315 static struct irq_chip octeon_irq_chip_ciu0 = {
317 .enable = octeon_irq_ciu0_enable,
318 .disable = octeon_irq_ciu0_disable,
319 .ack = octeon_irq_ciu0_ack,
320 .eoi = octeon_irq_ciu0_eoi,
322 .set_affinity = octeon_irq_ciu0_set_affinity,
326 static struct irq_chip octeon_irq_chip_ciu0_timer_v2 = {
328 .enable = octeon_irq_ciu0_enable_v2,
329 .disable = octeon_irq_ciu0_disable_all_v2,
330 .ack = octeon_irq_ciu0_timer_ack_v2,
331 .eoi = octeon_irq_ciu0_eoi_v2,
333 .set_affinity = octeon_irq_ciu0_set_affinity_v2,
337 static struct irq_chip octeon_irq_chip_ciu0_timer = {
339 .enable = octeon_irq_ciu0_enable,
340 .disable = octeon_irq_ciu0_disable,
341 .ack = octeon_irq_ciu0_timer_ack_v1,
342 .eoi = octeon_irq_ciu0_eoi,
344 .set_affinity = octeon_irq_ciu0_set_affinity,
349 static void octeon_irq_ciu1_ack(unsigned int irq)
352 * In order to avoid any locking accessing the CIU, we
353 * acknowledge CIU interrupts by disabling all of them. This
354 * way we can use a per core register and avoid any out of
355 * core locking requirements. This has the side affect that
356 * CIU interrupts can't be processed recursively. We don't
357 * need to disable IRQs to make these atomic since they are
358 * already disabled earlier in the low level interrupt code.
360 clear_c0_status(0x100 << 3);
363 static void octeon_irq_ciu1_eoi(unsigned int irq)
366 * Enable all CIU interrupts again. We don't need to disable
367 * IRQs to make these atomic since they are already disabled
368 * earlier in the low level interrupt code.
370 set_c0_status(0x100 << 3);
373 static void octeon_irq_ciu1_enable(unsigned int irq)
375 int coreid = cvmx_get_core_num();
378 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
381 * A read lock is used here to make sure only one core is ever
382 * updating the CIU enable bits at a time. During an enable
383 * the cores don't interfere with each other. During a disable
384 * the write lock stops any enables that might cause a
387 read_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
388 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
390 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
391 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
392 read_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
395 static void octeon_irq_ciu1_disable(unsigned int irq)
397 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
401 write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
402 for_each_online_cpu(cpu) {
403 int coreid = octeon_coreid_for_cpu(cpu);
404 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
405 en1 &= ~(1ull << bit);
406 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
409 * We need to do a read after the last update to make sure all
412 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
413 write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
417 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
420 static void octeon_irq_ciu1_enable_v2(unsigned int irq)
422 int index = cvmx_get_core_num() * 2 + 1;
423 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
425 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
429 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
432 static void octeon_irq_ciu1_ack_v2(unsigned int irq)
434 int index = cvmx_get_core_num() * 2 + 1;
435 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
437 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
441 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
444 static void octeon_irq_ciu1_eoi_v2(unsigned int irq)
446 struct irq_desc *desc = irq_desc + irq;
447 int index = cvmx_get_core_num() * 2 + 1;
448 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
450 if ((desc->status & IRQ_DISABLED) == 0)
451 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
455 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
458 static void octeon_irq_ciu1_disable_all_v2(unsigned int irq)
460 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
463 for_each_online_cpu(cpu) {
464 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
465 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
470 static int octeon_irq_ciu1_set_affinity(unsigned int irq,
471 const struct cpumask *dest)
475 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
477 write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
478 for_each_online_cpu(cpu) {
479 int coreid = octeon_coreid_for_cpu(cpu);
481 cvmx_read_csr(CVMX_CIU_INTX_EN1
483 if (cpumask_test_cpu(cpu, dest))
486 en1 &= ~(1ull << bit);
487 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
490 * We need to do a read after the last update to make sure all
493 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
494 write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
500 * Set affinity for the irq for chips that have the EN*_W1{S,C}
503 static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq,
504 const struct cpumask *dest)
508 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
509 for_each_online_cpu(cpu) {
510 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
511 if (cpumask_test_cpu(cpu, dest))
512 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
514 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
521 * Newer octeon chips have support for lockless CIU operation.
523 static struct irq_chip octeon_irq_chip_ciu1_v2 = {
525 .enable = octeon_irq_ciu1_enable_v2,
526 .disable = octeon_irq_ciu1_disable_all_v2,
527 .ack = octeon_irq_ciu1_ack_v2,
528 .eoi = octeon_irq_ciu1_eoi_v2,
530 .set_affinity = octeon_irq_ciu1_set_affinity_v2,
534 static struct irq_chip octeon_irq_chip_ciu1 = {
536 .enable = octeon_irq_ciu1_enable,
537 .disable = octeon_irq_ciu1_disable,
538 .ack = octeon_irq_ciu1_ack,
539 .eoi = octeon_irq_ciu1_eoi,
541 .set_affinity = octeon_irq_ciu1_set_affinity,
545 #ifdef CONFIG_PCI_MSI
547 static DEFINE_SPINLOCK(octeon_irq_msi_lock);
549 static void octeon_irq_msi_ack(unsigned int irq)
551 if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
552 /* These chips have PCI */
553 cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV,
554 1ull << (irq - OCTEON_IRQ_MSI_BIT0));
557 * These chips have PCIe. Thankfully the ACK doesn't
560 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0,
561 1ull << (irq - OCTEON_IRQ_MSI_BIT0));
565 static void octeon_irq_msi_eoi(unsigned int irq)
570 static void octeon_irq_msi_enable(unsigned int irq)
572 if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
574 * Octeon PCI doesn't have the ability to mask/unmask
575 * MSI interrupts individually. Instead of
576 * masking/unmasking them in groups of 16, we simple
577 * assume MSI devices are well behaved. MSI
578 * interrupts are always enable and the ACK is assumed
582 /* These chips have PCIe. Note that we only support
583 * the first 64 MSI interrupts. Unfortunately all the
584 * MSI enables are in the same register. We use
585 * MSI0's lock to control access to them all.
589 spin_lock_irqsave(&octeon_irq_msi_lock, flags);
590 en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
591 en |= 1ull << (irq - OCTEON_IRQ_MSI_BIT0);
592 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
593 cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
594 spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
598 static void octeon_irq_msi_disable(unsigned int irq)
600 if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
601 /* See comment in enable */
604 * These chips have PCIe. Note that we only support
605 * the first 64 MSI interrupts. Unfortunately all the
606 * MSI enables are in the same register. We use
607 * MSI0's lock to control access to them all.
611 spin_lock_irqsave(&octeon_irq_msi_lock, flags);
612 en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
613 en &= ~(1ull << (irq - OCTEON_IRQ_MSI_BIT0));
614 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
615 cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
616 spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
620 static struct irq_chip octeon_irq_chip_msi = {
622 .enable = octeon_irq_msi_enable,
623 .disable = octeon_irq_msi_disable,
624 .ack = octeon_irq_msi_ack,
625 .eoi = octeon_irq_msi_eoi,
629 void __init arch_init_irq(void)
632 struct irq_chip *chip0;
633 struct irq_chip *chip0_timer;
634 struct irq_chip *chip1;
637 /* Set the default affinity to the boot cpu. */
638 cpumask_clear(irq_default_affinity);
639 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
642 if (NR_IRQS < OCTEON_IRQ_LAST)
643 pr_err("octeon_irq_init: NR_IRQS is set too low\n");
645 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
646 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
647 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) {
648 chip0 = &octeon_irq_chip_ciu0_v2;
649 chip0_timer = &octeon_irq_chip_ciu0_timer_v2;
650 chip1 = &octeon_irq_chip_ciu1_v2;
652 chip0 = &octeon_irq_chip_ciu0;
653 chip0_timer = &octeon_irq_chip_ciu0_timer;
654 chip1 = &octeon_irq_chip_ciu1;
657 /* 0 - 15 reserved for i8259 master and slave controller. */
659 /* 17 - 23 Mips internal */
660 for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
661 set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
665 /* 24 - 87 CIU_INT_SUM0 */
666 for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
668 case OCTEON_IRQ_GMX_DRP0:
669 case OCTEON_IRQ_GMX_DRP1:
670 case OCTEON_IRQ_IPD_DRP:
671 case OCTEON_IRQ_KEY_ZERO:
672 case OCTEON_IRQ_TIMER0:
673 case OCTEON_IRQ_TIMER1:
674 case OCTEON_IRQ_TIMER2:
675 case OCTEON_IRQ_TIMER3:
676 set_irq_chip_and_handler(irq, chip0_timer, handle_percpu_irq);
679 set_irq_chip_and_handler(irq, chip0, handle_percpu_irq);
684 /* 88 - 151 CIU_INT_SUM1 */
685 for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) {
686 set_irq_chip_and_handler(irq, chip1, handle_percpu_irq);
689 #ifdef CONFIG_PCI_MSI
690 /* 152 - 215 PCI/PCIe MSI interrupts */
691 for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_BIT63; irq++) {
692 set_irq_chip_and_handler(irq, &octeon_irq_chip_msi,
696 set_c0_status(0x300 << 2);
699 asmlinkage void plat_irq_dispatch(void)
701 const unsigned long core_id = cvmx_get_core_num();
702 const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
703 const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
704 const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
705 const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
706 unsigned long cop0_cause;
707 unsigned long cop0_status;
712 cop0_cause = read_c0_cause();
713 cop0_status = read_c0_status();
714 cop0_cause &= cop0_status;
715 cop0_cause &= ST0_IM;
717 if (unlikely(cop0_cause & STATUSF_IP2)) {
718 ciu_sum = cvmx_read_csr(ciu_sum0_address);
719 ciu_en = cvmx_read_csr(ciu_en0_address);
722 do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1);
724 spurious_interrupt();
725 } else if (unlikely(cop0_cause & STATUSF_IP3)) {
726 ciu_sum = cvmx_read_csr(ciu_sum1_address);
727 ciu_en = cvmx_read_csr(ciu_en1_address);
730 do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1);
732 spurious_interrupt();
733 } else if (likely(cop0_cause)) {
734 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
741 #ifdef CONFIG_HOTPLUG_CPU
742 static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu)
745 int coreid = octeon_coreid_for_cpu(cpu);
746 int bit = (irq < OCTEON_IRQ_WDOG0) ?
747 irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0;
749 isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) &
750 (1ull << bit)) >> bit;
752 isset = (cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)) &
753 (1ull << bit)) >> bit;
758 void fixup_irqs(void)
762 for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++)
763 octeon_irq_core_disable_local(irq);
765 for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_GPIO15; irq++) {
766 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
767 /* ciu irq migrates to next cpu */
768 octeon_irq_chip_ciu0.disable(irq);
769 octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
774 for (irq = OCTEON_IRQ_MBOX0; irq <= OCTEON_IRQ_MBOX1; irq++)
775 octeon_irq_mailbox_mask(irq);
777 for (irq = OCTEON_IRQ_UART0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
778 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
779 /* ciu irq migrates to next cpu */
780 octeon_irq_chip_ciu0.disable(irq);
781 octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
785 for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED135; irq++) {
786 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
787 /* ciu irq migrates to next cpu */
788 octeon_irq_chip_ciu1.disable(irq);
789 octeon_irq_ciu1_set_affinity(irq, &cpu_online_map);
794 #endif /* CONFIG_HOTPLUG_CPU */