2 * linux/arch/arm/common/gic.c
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * Interrupt architecture for the GIC:
12 * o There is one Interrupt Distributor, which receives interrupts
13 * from system devices and sends them to the Interrupt Controllers.
15 * o There is one CPU Interface per CPU, which sends interrupts sent
16 * by the Distributor, and interrupts generated locally, to the
17 * associated CPU. The base address of the CPU interface is usually
18 * aliased so that the same address points to different chips depending
19 * on the CPU it is accessed from.
21 * Note that IRQs 0-31 are special - they are local to each CPU.
22 * As such, the enable set/clear, pending set/clear and active bit
23 * registers are banked per-cpu for these sources.
25 #include <linux/init.h>
26 #include <linux/kernel.h>
27 #include <linux/list.h>
28 #include <linux/smp.h>
29 #include <linux/cpumask.h>
31 #include <linux/interrupt.h>
32 #include <linux/percpu.h>
33 #include <linux/slab.h>
36 #include <asm/mach/irq.h>
37 #include <asm/hardware/gic.h>
39 static DEFINE_SPINLOCK(irq_controller_lock);
41 /* Address of GIC 0 CPU interface */
42 void __iomem *gic_cpu_base_addr __read_mostly;
45 * Supported arch specific GIC irq extension.
46 * Default make them NULL.
48 struct irq_chip gic_arch_extn = {
52 .irq_retrigger = NULL,
61 static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
63 static inline void __iomem *gic_dist_base(struct irq_data *d)
65 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
66 return gic_data->dist_base;
69 static inline void __iomem *gic_cpu_base(struct irq_data *d)
71 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
72 return gic_data->cpu_base;
75 static inline unsigned int gic_irq(struct irq_data *d)
77 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
78 return d->irq - gic_data->irq_offset;
82 * Routines to acknowledge, disable and enable interrupts
84 static void gic_mask_irq(struct irq_data *d)
86 u32 mask = 1 << (d->irq % 32);
88 spin_lock(&irq_controller_lock);
89 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
90 if (gic_arch_extn.irq_mask)
91 gic_arch_extn.irq_mask(d);
92 spin_unlock(&irq_controller_lock);
95 static void gic_unmask_irq(struct irq_data *d)
97 u32 mask = 1 << (d->irq % 32);
99 spin_lock(&irq_controller_lock);
100 if (gic_arch_extn.irq_unmask)
101 gic_arch_extn.irq_unmask(d);
102 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
103 spin_unlock(&irq_controller_lock);
106 static void gic_eoi_irq(struct irq_data *d)
108 if (gic_arch_extn.irq_eoi) {
109 spin_lock(&irq_controller_lock);
110 gic_arch_extn.irq_eoi(d);
111 spin_unlock(&irq_controller_lock);
114 writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
117 static int gic_set_type(struct irq_data *d, unsigned int type)
119 void __iomem *base = gic_dist_base(d);
120 unsigned int gicirq = gic_irq(d);
121 u32 enablemask = 1 << (gicirq % 32);
122 u32 enableoff = (gicirq / 32) * 4;
123 u32 confmask = 0x2 << ((gicirq % 16) * 2);
124 u32 confoff = (gicirq / 16) * 4;
125 bool enabled = false;
128 /* Interrupt configuration for SGIs can't be changed */
132 if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
135 spin_lock(&irq_controller_lock);
137 if (gic_arch_extn.irq_set_type)
138 gic_arch_extn.irq_set_type(d, type);
140 val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
141 if (type == IRQ_TYPE_LEVEL_HIGH)
143 else if (type == IRQ_TYPE_EDGE_RISING)
147 * As recommended by the spec, disable the interrupt before changing
150 if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
151 writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
155 writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
158 writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
160 spin_unlock(&irq_controller_lock);
165 static int gic_retrigger(struct irq_data *d)
167 if (gic_arch_extn.irq_retrigger)
168 return gic_arch_extn.irq_retrigger(d);
174 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
177 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
178 unsigned int shift = (d->irq % 4) * 8;
179 unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
182 if (cpu >= 8 || cpu >= nr_cpu_ids)
185 mask = 0xff << shift;
186 bit = 1 << (cpu_logical_map(cpu) + shift);
188 spin_lock(&irq_controller_lock);
189 val = readl_relaxed(reg) & ~mask;
190 writel_relaxed(val | bit, reg);
191 spin_unlock(&irq_controller_lock);
193 return IRQ_SET_MASK_OK;
198 static int gic_set_wake(struct irq_data *d, unsigned int on)
202 if (gic_arch_extn.irq_set_wake)
203 ret = gic_arch_extn.irq_set_wake(d, on);
209 #define gic_set_wake NULL
212 static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
214 struct gic_chip_data *chip_data = irq_get_handler_data(irq);
215 struct irq_chip *chip = irq_get_chip(irq);
216 unsigned int cascade_irq, gic_irq;
217 unsigned long status;
219 chained_irq_enter(chip, desc);
221 spin_lock(&irq_controller_lock);
222 status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK);
223 spin_unlock(&irq_controller_lock);
225 gic_irq = (status & 0x3ff);
229 cascade_irq = gic_irq + chip_data->irq_offset;
230 if (unlikely(gic_irq < 32 || gic_irq > 1020 || cascade_irq >= NR_IRQS))
231 do_bad_IRQ(cascade_irq, desc);
233 generic_handle_irq(cascade_irq);
236 chained_irq_exit(chip, desc);
239 static struct irq_chip gic_chip = {
241 .irq_mask = gic_mask_irq,
242 .irq_unmask = gic_unmask_irq,
243 .irq_eoi = gic_eoi_irq,
244 .irq_set_type = gic_set_type,
245 .irq_retrigger = gic_retrigger,
247 .irq_set_affinity = gic_set_affinity,
249 .irq_set_wake = gic_set_wake,
252 void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
254 if (gic_nr >= MAX_GIC_NR)
256 if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0)
258 irq_set_chained_handler(irq, gic_handle_cascade_irq);
261 static void __init gic_dist_init(struct gic_chip_data *gic,
262 unsigned int irq_start)
264 unsigned int gic_irqs, irq_limit, i;
266 void __iomem *base = gic->dist_base;
268 u32 nrppis = 0, ppi_base = 0;
271 cpu = cpu_logical_map(smp_processor_id());
275 cpumask |= cpumask << 8;
276 cpumask |= cpumask << 16;
278 writel_relaxed(0, base + GIC_DIST_CTRL);
281 * Find out how many interrupts are supported.
282 * The GIC only supports up to 1020 interrupt sources.
284 gic_irqs = readl_relaxed(base + GIC_DIST_CTR) & 0x1f;
285 gic_irqs = (gic_irqs + 1) * 32;
290 * Nobody would be insane enough to use PPIs on a secondary
293 if (gic == &gic_data[0]) {
294 nrppis = (32 - irq_start) & 31;
296 /* The GIC only supports up to 16 PPIs. */
300 ppi_base = gic->irq_offset + 32 - nrppis;
303 pr_info("Configuring GIC with %d sources (%d PPIs)\n",
304 gic_irqs, (gic == &gic_data[0]) ? nrppis : 0);
307 * Set all global interrupts to be level triggered, active low.
309 for (i = 32; i < gic_irqs; i += 16)
310 writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16);
313 * Set all global interrupts to this CPU only.
315 for (i = 32; i < gic_irqs; i += 4)
316 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
319 * Set priority on all global interrupts.
321 for (i = 32; i < gic_irqs; i += 4)
322 writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
325 * Disable all interrupts. Leave the PPI and SGIs alone
326 * as these enables are banked registers.
328 for (i = 32; i < gic_irqs; i += 32)
329 writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
332 * Limit number of interrupts registered to the platform maximum
334 irq_limit = gic->irq_offset + gic_irqs;
335 if (WARN_ON(irq_limit > NR_IRQS))
339 * Setup the Linux IRQ subsystem.
341 for (i = 0; i < nrppis; i++) {
342 int ppi = i + ppi_base;
344 irq_set_percpu_devid(ppi);
345 irq_set_chip_and_handler(ppi, &gic_chip,
346 handle_percpu_devid_irq);
347 irq_set_chip_data(ppi, gic);
348 set_irq_flags(ppi, IRQF_VALID | IRQF_NOAUTOEN);
351 for (i = irq_start + nrppis; i < irq_limit; i++) {
352 irq_set_chip_and_handler(i, &gic_chip, handle_fasteoi_irq);
353 irq_set_chip_data(i, gic);
354 set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
357 writel_relaxed(1, base + GIC_DIST_CTRL);
360 static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
362 void __iomem *dist_base = gic->dist_base;
363 void __iomem *base = gic->cpu_base;
367 * Deal with the banked PPI and SGI interrupts - disable all
368 * PPI interrupts, ensure all SGI interrupts are enabled.
370 writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
371 writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
374 * Set priority on PPI and SGI interrupts
376 for (i = 0; i < 32; i += 4)
377 writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
379 writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
380 writel_relaxed(1, base + GIC_CPU_CTRL);
383 void __init gic_init(unsigned int gic_nr, unsigned int irq_start,
384 void __iomem *dist_base, void __iomem *cpu_base)
386 struct gic_chip_data *gic;
388 BUG_ON(gic_nr >= MAX_GIC_NR);
390 gic = &gic_data[gic_nr];
391 gic->dist_base = dist_base;
392 gic->cpu_base = cpu_base;
393 gic->irq_offset = (irq_start - 1) & ~31;
396 gic_cpu_base_addr = cpu_base;
398 gic_dist_init(gic, irq_start);
402 void __cpuinit gic_secondary_init(unsigned int gic_nr)
404 BUG_ON(gic_nr >= MAX_GIC_NR);
406 gic_cpu_init(&gic_data[gic_nr]);
410 void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
413 unsigned long map = 0;
415 /* Convert our logical CPU mask into a physical one. */
416 for_each_cpu(cpu, mask)
417 map |= 1 << cpu_logical_map(cpu);
420 * Ensure that stores to Normal memory are visible to the
421 * other CPUs before issuing the IPI.
425 /* this always happens on GIC0 */
426 writel_relaxed(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT);