]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/mips/cavium-octeon/octeon-irq.c
MIPS: Make various locks static.
[karo-tx-linux.git] / arch / mips / cavium-octeon / octeon-irq.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2004-2008 Cavium Networks
7  */
8 #include <linux/irq.h>
9 #include <linux/interrupt.h>
10 #include <linux/smp.h>
11
12 #include <asm/octeon/octeon.h>
13 #include <asm/octeon/cvmx-pexp-defs.h>
14 #include <asm/octeon/cvmx-npi-defs.h>
15
16 DEFINE_RWLOCK(octeon_irq_ciu0_rwlock);
17 DEFINE_RWLOCK(octeon_irq_ciu1_rwlock);
18
19 static int octeon_coreid_for_cpu(int cpu)
20 {
21 #ifdef CONFIG_SMP
22         return cpu_logical_map(cpu);
23 #else
24         return cvmx_get_core_num();
25 #endif
26 }
27
28 static void octeon_irq_core_ack(unsigned int irq)
29 {
30         unsigned int bit = irq - OCTEON_IRQ_SW0;
31         /*
32          * We don't need to disable IRQs to make these atomic since
33          * they are already disabled earlier in the low level
34          * interrupt code.
35          */
36         clear_c0_status(0x100 << bit);
37         /* The two user interrupts must be cleared manually. */
38         if (bit < 2)
39                 clear_c0_cause(0x100 << bit);
40 }
41
42 static void octeon_irq_core_eoi(unsigned int irq)
43 {
44         struct irq_desc *desc = irq_desc + irq;
45         unsigned int bit = irq - OCTEON_IRQ_SW0;
46         /*
47          * If an IRQ is being processed while we are disabling it the
48          * handler will attempt to unmask the interrupt after it has
49          * been disabled.
50          */
51         if (desc->status & IRQ_DISABLED)
52                 return;
53         /*
54          * We don't need to disable IRQs to make these atomic since
55          * they are already disabled earlier in the low level
56          * interrupt code.
57          */
58         set_c0_status(0x100 << bit);
59 }
60
61 static void octeon_irq_core_enable(unsigned int irq)
62 {
63         unsigned long flags;
64         unsigned int bit = irq - OCTEON_IRQ_SW0;
65
66         /*
67          * We need to disable interrupts to make sure our updates are
68          * atomic.
69          */
70         local_irq_save(flags);
71         set_c0_status(0x100 << bit);
72         local_irq_restore(flags);
73 }
74
75 static void octeon_irq_core_disable_local(unsigned int irq)
76 {
77         unsigned long flags;
78         unsigned int bit = irq - OCTEON_IRQ_SW0;
79         /*
80          * We need to disable interrupts to make sure our updates are
81          * atomic.
82          */
83         local_irq_save(flags);
84         clear_c0_status(0x100 << bit);
85         local_irq_restore(flags);
86 }
87
88 static void octeon_irq_core_disable(unsigned int irq)
89 {
90 #ifdef CONFIG_SMP
91         on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
92                     (void *) (long) irq, 1);
93 #else
94         octeon_irq_core_disable_local(irq);
95 #endif
96 }
97
98 static struct irq_chip octeon_irq_chip_core = {
99         .name = "Core",
100         .enable = octeon_irq_core_enable,
101         .disable = octeon_irq_core_disable,
102         .ack = octeon_irq_core_ack,
103         .eoi = octeon_irq_core_eoi,
104 };
105
106
107 static void octeon_irq_ciu0_ack(unsigned int irq)
108 {
109         /*
110          * In order to avoid any locking accessing the CIU, we
111          * acknowledge CIU interrupts by disabling all of them.  This
112          * way we can use a per core register and avoid any out of
113          * core locking requirements.  This has the side affect that
114          * CIU interrupts can't be processed recursively.
115          *
116          * We don't need to disable IRQs to make these atomic since
117          * they are already disabled earlier in the low level
118          * interrupt code.
119          */
120         clear_c0_status(0x100 << 2);
121 }
122
123 static void octeon_irq_ciu0_eoi(unsigned int irq)
124 {
125         /*
126          * Enable all CIU interrupts again.  We don't need to disable
127          * IRQs to make these atomic since they are already disabled
128          * earlier in the low level interrupt code.
129          */
130         set_c0_status(0x100 << 2);
131 }
132
133 static void octeon_irq_ciu0_enable(unsigned int irq)
134 {
135         int coreid = cvmx_get_core_num();
136         unsigned long flags;
137         uint64_t en0;
138         int bit = irq - OCTEON_IRQ_WORKQ0;      /* Bit 0-63 of EN0 */
139
140         /*
141          * A read lock is used here to make sure only one core is ever
142          * updating the CIU enable bits at a time. During an enable
143          * the cores don't interfere with each other. During a disable
144          * the write lock stops any enables that might cause a
145          * problem.
146          */
147         read_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
148         en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
149         en0 |= 1ull << bit;
150         cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
151         cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
152         read_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
153 }
154
155 static void octeon_irq_ciu0_disable(unsigned int irq)
156 {
157         int bit = irq - OCTEON_IRQ_WORKQ0;      /* Bit 0-63 of EN0 */
158         unsigned long flags;
159         uint64_t en0;
160         int cpu;
161         write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
162         for_each_online_cpu(cpu) {
163                 int coreid = octeon_coreid_for_cpu(cpu);
164                 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
165                 en0 &= ~(1ull << bit);
166                 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
167         }
168         /*
169          * We need to do a read after the last update to make sure all
170          * of them are done.
171          */
172         cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
173         write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
174 }
175
176 /*
177  * Enable the irq on the current core for chips that have the EN*_W1{S,C}
178  * registers.
179  */
180 static void octeon_irq_ciu0_enable_v2(unsigned int irq)
181 {
182         int index = cvmx_get_core_num() * 2;
183         u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
184
185         cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
186 }
187
188 /*
189  * Disable the irq on the current core for chips that have the EN*_W1{S,C}
190  * registers.
191  */
192 static void octeon_irq_ciu0_ack_v2(unsigned int irq)
193 {
194         int index = cvmx_get_core_num() * 2;
195         u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
196
197         cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
198 }
199
200 /*
201  * CIU timer type interrupts must be acknoleged by writing a '1' bit
202  * to their sum0 bit.
203  */
204 static void octeon_irq_ciu0_timer_ack(unsigned int irq)
205 {
206         int index = cvmx_get_core_num() * 2;
207         uint64_t mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
208         cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
209 }
210
211 static void octeon_irq_ciu0_timer_ack_v1(unsigned int irq)
212 {
213         octeon_irq_ciu0_timer_ack(irq);
214         octeon_irq_ciu0_ack(irq);
215 }
216
217 static void octeon_irq_ciu0_timer_ack_v2(unsigned int irq)
218 {
219         octeon_irq_ciu0_timer_ack(irq);
220         octeon_irq_ciu0_ack_v2(irq);
221 }
222
223 /*
224  * Enable the irq on the current core for chips that have the EN*_W1{S,C}
225  * registers.
226  */
227 static void octeon_irq_ciu0_eoi_v2(unsigned int irq)
228 {
229         struct irq_desc *desc = irq_desc + irq;
230         int index = cvmx_get_core_num() * 2;
231         u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
232
233         if ((desc->status & IRQ_DISABLED) == 0)
234                 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
235 }
236
237 /*
238  * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
239  * registers.
240  */
241 static void octeon_irq_ciu0_disable_all_v2(unsigned int irq)
242 {
243         u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
244         int index;
245         int cpu;
246         for_each_online_cpu(cpu) {
247                 index = octeon_coreid_for_cpu(cpu) * 2;
248                 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
249         }
250 }
251
252 #ifdef CONFIG_SMP
253 static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
254 {
255         int cpu;
256         unsigned long flags;
257         int bit = irq - OCTEON_IRQ_WORKQ0;      /* Bit 0-63 of EN0 */
258
259         write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
260         for_each_online_cpu(cpu) {
261                 int coreid = octeon_coreid_for_cpu(cpu);
262                 uint64_t en0 =
263                         cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
264                 if (cpumask_test_cpu(cpu, dest))
265                         en0 |= 1ull << bit;
266                 else
267                         en0 &= ~(1ull << bit);
268                 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
269         }
270         /*
271          * We need to do a read after the last update to make sure all
272          * of them are done.
273          */
274         cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
275         write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
276
277         return 0;
278 }
279
280 /*
281  * Set affinity for the irq for chips that have the EN*_W1{S,C}
282  * registers.
283  */
284 static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq,
285                                            const struct cpumask *dest)
286 {
287         int cpu;
288         int index;
289         u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
290         for_each_online_cpu(cpu) {
291                 index = octeon_coreid_for_cpu(cpu) * 2;
292                 if (cpumask_test_cpu(cpu, dest))
293                         cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
294                 else
295                         cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
296         }
297         return 0;
298 }
299 #endif
300
301 /*
302  * Newer octeon chips have support for lockless CIU operation.
303  */
304 static struct irq_chip octeon_irq_chip_ciu0_v2 = {
305         .name = "CIU0",
306         .enable = octeon_irq_ciu0_enable_v2,
307         .disable = octeon_irq_ciu0_disable_all_v2,
308         .ack = octeon_irq_ciu0_ack_v2,
309         .eoi = octeon_irq_ciu0_eoi_v2,
310 #ifdef CONFIG_SMP
311         .set_affinity = octeon_irq_ciu0_set_affinity_v2,
312 #endif
313 };
314
315 static struct irq_chip octeon_irq_chip_ciu0 = {
316         .name = "CIU0",
317         .enable = octeon_irq_ciu0_enable,
318         .disable = octeon_irq_ciu0_disable,
319         .ack = octeon_irq_ciu0_ack,
320         .eoi = octeon_irq_ciu0_eoi,
321 #ifdef CONFIG_SMP
322         .set_affinity = octeon_irq_ciu0_set_affinity,
323 #endif
324 };
325
326 static struct irq_chip octeon_irq_chip_ciu0_timer_v2 = {
327         .name = "CIU0-T",
328         .enable = octeon_irq_ciu0_enable_v2,
329         .disable = octeon_irq_ciu0_disable_all_v2,
330         .ack = octeon_irq_ciu0_timer_ack_v2,
331         .eoi = octeon_irq_ciu0_eoi_v2,
332 #ifdef CONFIG_SMP
333         .set_affinity = octeon_irq_ciu0_set_affinity_v2,
334 #endif
335 };
336
337 static struct irq_chip octeon_irq_chip_ciu0_timer = {
338         .name = "CIU0-T",
339         .enable = octeon_irq_ciu0_enable,
340         .disable = octeon_irq_ciu0_disable,
341         .ack = octeon_irq_ciu0_timer_ack_v1,
342         .eoi = octeon_irq_ciu0_eoi,
343 #ifdef CONFIG_SMP
344         .set_affinity = octeon_irq_ciu0_set_affinity,
345 #endif
346 };
347
348
349 static void octeon_irq_ciu1_ack(unsigned int irq)
350 {
351         /*
352          * In order to avoid any locking accessing the CIU, we
353          * acknowledge CIU interrupts by disabling all of them.  This
354          * way we can use a per core register and avoid any out of
355          * core locking requirements.  This has the side affect that
356          * CIU interrupts can't be processed recursively.  We don't
357          * need to disable IRQs to make these atomic since they are
358          * already disabled earlier in the low level interrupt code.
359          */
360         clear_c0_status(0x100 << 3);
361 }
362
363 static void octeon_irq_ciu1_eoi(unsigned int irq)
364 {
365         /*
366          * Enable all CIU interrupts again.  We don't need to disable
367          * IRQs to make these atomic since they are already disabled
368          * earlier in the low level interrupt code.
369          */
370         set_c0_status(0x100 << 3);
371 }
372
373 static void octeon_irq_ciu1_enable(unsigned int irq)
374 {
375         int coreid = cvmx_get_core_num();
376         unsigned long flags;
377         uint64_t en1;
378         int bit = irq - OCTEON_IRQ_WDOG0;       /* Bit 0-63 of EN1 */
379
380         /*
381          * A read lock is used here to make sure only one core is ever
382          * updating the CIU enable bits at a time.  During an enable
383          * the cores don't interfere with each other.  During a disable
384          * the write lock stops any enables that might cause a
385          * problem.
386          */
387         read_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
388         en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
389         en1 |= 1ull << bit;
390         cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
391         cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
392         read_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
393 }
394
395 static void octeon_irq_ciu1_disable(unsigned int irq)
396 {
397         int bit = irq - OCTEON_IRQ_WDOG0;       /* Bit 0-63 of EN1 */
398         unsigned long flags;
399         uint64_t en1;
400         int cpu;
401         write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
402         for_each_online_cpu(cpu) {
403                 int coreid = octeon_coreid_for_cpu(cpu);
404                 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
405                 en1 &= ~(1ull << bit);
406                 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
407         }
408         /*
409          * We need to do a read after the last update to make sure all
410          * of them are done.
411          */
412         cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
413         write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
414 }
415
416 /*
417  * Enable the irq on the current core for chips that have the EN*_W1{S,C}
418  * registers.
419  */
420 static void octeon_irq_ciu1_enable_v2(unsigned int irq)
421 {
422         int index = cvmx_get_core_num() * 2 + 1;
423         u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
424
425         cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
426 }
427
428 /*
429  * Disable the irq on the current core for chips that have the EN*_W1{S,C}
430  * registers.
431  */
432 static void octeon_irq_ciu1_ack_v2(unsigned int irq)
433 {
434         int index = cvmx_get_core_num() * 2 + 1;
435         u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
436
437         cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
438 }
439
440 /*
441  * Enable the irq on the current core for chips that have the EN*_W1{S,C}
442  * registers.
443  */
444 static void octeon_irq_ciu1_eoi_v2(unsigned int irq)
445 {
446         struct irq_desc *desc = irq_desc + irq;
447         int index = cvmx_get_core_num() * 2 + 1;
448         u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
449
450         if ((desc->status & IRQ_DISABLED) == 0)
451                 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
452 }
453
454 /*
455  * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
456  * registers.
457  */
458 static void octeon_irq_ciu1_disable_all_v2(unsigned int irq)
459 {
460         u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
461         int index;
462         int cpu;
463         for_each_online_cpu(cpu) {
464                 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
465                 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
466         }
467 }
468
469 #ifdef CONFIG_SMP
470 static int octeon_irq_ciu1_set_affinity(unsigned int irq,
471                                         const struct cpumask *dest)
472 {
473         int cpu;
474         unsigned long flags;
475         int bit = irq - OCTEON_IRQ_WDOG0;       /* Bit 0-63 of EN1 */
476
477         write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
478         for_each_online_cpu(cpu) {
479                 int coreid = octeon_coreid_for_cpu(cpu);
480                 uint64_t en1 =
481                         cvmx_read_csr(CVMX_CIU_INTX_EN1
482                                 (coreid * 2 + 1));
483                 if (cpumask_test_cpu(cpu, dest))
484                         en1 |= 1ull << bit;
485                 else
486                         en1 &= ~(1ull << bit);
487                 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
488         }
489         /*
490          * We need to do a read after the last update to make sure all
491          * of them are done.
492          */
493         cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
494         write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
495
496         return 0;
497 }
498
499 /*
500  * Set affinity for the irq for chips that have the EN*_W1{S,C}
501  * registers.
502  */
503 static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq,
504                                            const struct cpumask *dest)
505 {
506         int cpu;
507         int index;
508         u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
509         for_each_online_cpu(cpu) {
510                 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
511                 if (cpumask_test_cpu(cpu, dest))
512                         cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
513                 else
514                         cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
515         }
516         return 0;
517 }
518 #endif
519
520 /*
521  * Newer octeon chips have support for lockless CIU operation.
522  */
523 static struct irq_chip octeon_irq_chip_ciu1_v2 = {
524         .name = "CIU0",
525         .enable = octeon_irq_ciu1_enable_v2,
526         .disable = octeon_irq_ciu1_disable_all_v2,
527         .ack = octeon_irq_ciu1_ack_v2,
528         .eoi = octeon_irq_ciu1_eoi_v2,
529 #ifdef CONFIG_SMP
530         .set_affinity = octeon_irq_ciu1_set_affinity_v2,
531 #endif
532 };
533
534 static struct irq_chip octeon_irq_chip_ciu1 = {
535         .name = "CIU1",
536         .enable = octeon_irq_ciu1_enable,
537         .disable = octeon_irq_ciu1_disable,
538         .ack = octeon_irq_ciu1_ack,
539         .eoi = octeon_irq_ciu1_eoi,
540 #ifdef CONFIG_SMP
541         .set_affinity = octeon_irq_ciu1_set_affinity,
542 #endif
543 };
544
545 #ifdef CONFIG_PCI_MSI
546
547 static DEFINE_SPINLOCK(octeon_irq_msi_lock);
548
549 static void octeon_irq_msi_ack(unsigned int irq)
550 {
551         if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
552                 /* These chips have PCI */
553                 cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV,
554                                1ull << (irq - OCTEON_IRQ_MSI_BIT0));
555         } else {
556                 /*
557                  * These chips have PCIe. Thankfully the ACK doesn't
558                  * need any locking.
559                  */
560                 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0,
561                                1ull << (irq - OCTEON_IRQ_MSI_BIT0));
562         }
563 }
564
565 static void octeon_irq_msi_eoi(unsigned int irq)
566 {
567         /* Nothing needed */
568 }
569
570 static void octeon_irq_msi_enable(unsigned int irq)
571 {
572         if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
573                 /*
574                  * Octeon PCI doesn't have the ability to mask/unmask
575                  * MSI interrupts individually.  Instead of
576                  * masking/unmasking them in groups of 16, we simple
577                  * assume MSI devices are well behaved.  MSI
578                  * interrupts are always enable and the ACK is assumed
579                  * to be enough.
580                  */
581         } else {
582                 /* These chips have PCIe.  Note that we only support
583                  * the first 64 MSI interrupts.  Unfortunately all the
584                  * MSI enables are in the same register.  We use
585                  * MSI0's lock to control access to them all.
586                  */
587                 uint64_t en;
588                 unsigned long flags;
589                 spin_lock_irqsave(&octeon_irq_msi_lock, flags);
590                 en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
591                 en |= 1ull << (irq - OCTEON_IRQ_MSI_BIT0);
592                 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
593                 cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
594                 spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
595         }
596 }
597
598 static void octeon_irq_msi_disable(unsigned int irq)
599 {
600         if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
601                 /* See comment in enable */
602         } else {
603                 /*
604                  * These chips have PCIe.  Note that we only support
605                  * the first 64 MSI interrupts.  Unfortunately all the
606                  * MSI enables are in the same register.  We use
607                  * MSI0's lock to control access to them all.
608                  */
609                 uint64_t en;
610                 unsigned long flags;
611                 spin_lock_irqsave(&octeon_irq_msi_lock, flags);
612                 en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
613                 en &= ~(1ull << (irq - OCTEON_IRQ_MSI_BIT0));
614                 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
615                 cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
616                 spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
617         }
618 }
619
620 static struct irq_chip octeon_irq_chip_msi = {
621         .name = "MSI",
622         .enable = octeon_irq_msi_enable,
623         .disable = octeon_irq_msi_disable,
624         .ack = octeon_irq_msi_ack,
625         .eoi = octeon_irq_msi_eoi,
626 };
627 #endif
628
629 void __init arch_init_irq(void)
630 {
631         int irq;
632         struct irq_chip *chip0;
633         struct irq_chip *chip0_timer;
634         struct irq_chip *chip1;
635
636 #ifdef CONFIG_SMP
637         /* Set the default affinity to the boot cpu. */
638         cpumask_clear(irq_default_affinity);
639         cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
640 #endif
641
642         if (NR_IRQS < OCTEON_IRQ_LAST)
643                 pr_err("octeon_irq_init: NR_IRQS is set too low\n");
644
645         if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
646             OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
647             OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) {
648                 chip0 = &octeon_irq_chip_ciu0_v2;
649                 chip0_timer = &octeon_irq_chip_ciu0_timer_v2;
650                 chip1 = &octeon_irq_chip_ciu1_v2;
651         } else {
652                 chip0 = &octeon_irq_chip_ciu0;
653                 chip0_timer = &octeon_irq_chip_ciu0_timer;
654                 chip1 = &octeon_irq_chip_ciu1;
655         }
656
657         /* 0 - 15 reserved for i8259 master and slave controller. */
658
659         /* 17 - 23 Mips internal */
660         for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
661                 set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
662                                          handle_percpu_irq);
663         }
664
665         /* 24 - 87 CIU_INT_SUM0 */
666         for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
667                 switch (irq) {
668                 case OCTEON_IRQ_GMX_DRP0:
669                 case OCTEON_IRQ_GMX_DRP1:
670                 case OCTEON_IRQ_IPD_DRP:
671                 case OCTEON_IRQ_KEY_ZERO:
672                 case OCTEON_IRQ_TIMER0:
673                 case OCTEON_IRQ_TIMER1:
674                 case OCTEON_IRQ_TIMER2:
675                 case OCTEON_IRQ_TIMER3:
676                         set_irq_chip_and_handler(irq, chip0_timer, handle_percpu_irq);
677                         break;
678                 default:
679                         set_irq_chip_and_handler(irq, chip0, handle_percpu_irq);
680                         break;
681                 }
682         }
683
684         /* 88 - 151 CIU_INT_SUM1 */
685         for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) {
686                 set_irq_chip_and_handler(irq, chip1, handle_percpu_irq);
687         }
688
689 #ifdef CONFIG_PCI_MSI
690         /* 152 - 215 PCI/PCIe MSI interrupts */
691         for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_BIT63; irq++) {
692                 set_irq_chip_and_handler(irq, &octeon_irq_chip_msi,
693                                          handle_percpu_irq);
694         }
695 #endif
696         set_c0_status(0x300 << 2);
697 }
698
699 asmlinkage void plat_irq_dispatch(void)
700 {
701         const unsigned long core_id = cvmx_get_core_num();
702         const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
703         const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
704         const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
705         const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
706         unsigned long cop0_cause;
707         unsigned long cop0_status;
708         uint64_t ciu_en;
709         uint64_t ciu_sum;
710
711         while (1) {
712                 cop0_cause = read_c0_cause();
713                 cop0_status = read_c0_status();
714                 cop0_cause &= cop0_status;
715                 cop0_cause &= ST0_IM;
716
717                 if (unlikely(cop0_cause & STATUSF_IP2)) {
718                         ciu_sum = cvmx_read_csr(ciu_sum0_address);
719                         ciu_en = cvmx_read_csr(ciu_en0_address);
720                         ciu_sum &= ciu_en;
721                         if (likely(ciu_sum))
722                                 do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1);
723                         else
724                                 spurious_interrupt();
725                 } else if (unlikely(cop0_cause & STATUSF_IP3)) {
726                         ciu_sum = cvmx_read_csr(ciu_sum1_address);
727                         ciu_en = cvmx_read_csr(ciu_en1_address);
728                         ciu_sum &= ciu_en;
729                         if (likely(ciu_sum))
730                                 do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1);
731                         else
732                                 spurious_interrupt();
733                 } else if (likely(cop0_cause)) {
734                         do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
735                 } else {
736                         break;
737                 }
738         }
739 }
740
741 #ifdef CONFIG_HOTPLUG_CPU
742 static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu)
743 {
744         unsigned int isset;
745         int coreid = octeon_coreid_for_cpu(cpu);
746         int bit = (irq < OCTEON_IRQ_WDOG0) ?
747                    irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0;
748        if (irq < 64) {
749                 isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) &
750                         (1ull << bit)) >> bit;
751        } else {
752                isset = (cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)) &
753                         (1ull << bit)) >> bit;
754        }
755        return isset;
756 }
757
758 void fixup_irqs(void)
759 {
760        int irq;
761
762         for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++)
763                 octeon_irq_core_disable_local(irq);
764
765         for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_GPIO15; irq++) {
766                 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
767                         /* ciu irq migrates to next cpu */
768                         octeon_irq_chip_ciu0.disable(irq);
769                         octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
770                 }
771         }
772
773 #if 0
774         for (irq = OCTEON_IRQ_MBOX0; irq <= OCTEON_IRQ_MBOX1; irq++)
775                 octeon_irq_mailbox_mask(irq);
776 #endif
777         for (irq = OCTEON_IRQ_UART0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
778                 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
779                         /* ciu irq migrates to next cpu */
780                         octeon_irq_chip_ciu0.disable(irq);
781                         octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
782                 }
783         }
784
785         for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED135; irq++) {
786                 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
787                         /* ciu irq migrates to next cpu */
788                         octeon_irq_chip_ciu1.disable(irq);
789                         octeon_irq_ciu1_set_affinity(irq, &cpu_online_map);
790                 }
791         }
792 }
793
794 #endif /* CONFIG_HOTPLUG_CPU */