2 * ARC ARConnect (MultiCore IP) support (formerly known as MCIP)
4 * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/smp.h>
12 #include <linux/irq.h>
13 #include <linux/spinlock.h>
15 #include <asm/setup.h>
17 static char smp_cpuinfo_buf[128];
18 static int idu_detected;
20 static DEFINE_RAW_SPINLOCK(mcip_lock);
23 * Any SMP specific init any CPU does when it comes up.
24 * Here we setup the CPU to enable Inter-Processor-Interrupts
26 * -Master : init_IRQ()
27 * -Other(s) : start_kernel_secondary()
29 void mcip_init_smp(unsigned int cpu)
31 smp_ipi_irq_setup(cpu, IPI_IRQ);
34 static void mcip_ipi_send(int cpu)
40 * NOTE: We must spin here if the other cpu hasn't yet
41 * serviced a previous message. This can burn lots
42 * of time, but we MUST follows this protocol or
43 * ipi messages can be lost!!!
44 * Also, we must release the lock in this loop because
45 * the other side may get to this same loop and not
46 * be able to ack -- thus causing deadlock.
50 raw_spin_lock_irqsave(&mcip_lock, flags);
51 __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
52 ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK);
53 if (ipi_was_pending == 0)
54 break; /* break out but keep lock */
55 raw_spin_unlock_irqrestore(&mcip_lock, flags);
58 __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
59 raw_spin_unlock_irqrestore(&mcip_lock, flags);
61 #ifdef CONFIG_ARC_IPI_DBG
63 pr_info("IPI ACK delayed from cpu %d\n", cpu);
67 static void mcip_ipi_clear(int irq)
71 unsigned int __maybe_unused copy;
73 raw_spin_lock_irqsave(&mcip_lock, flags);
75 /* Who sent the IPI */
76 __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0);
78 copy = cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */
81 * In rare case, multiple concurrent IPIs sent to same target can
82 * possibly be coalesced by MCIP into 1 asserted IRQ, so @cpus can be
83 * "vectored" (multiple bits sets) as opposed to typical single bit
86 c = __ffs(cpu); /* 0,1,2,3 */
87 __mcip_cmd(CMD_INTRPT_GENERATE_ACK, c);
91 raw_spin_unlock_irqrestore(&mcip_lock, flags);
93 #ifdef CONFIG_ARC_IPI_DBG
95 pr_info("IPIs from %x coalesced to %x\n",
96 copy, raw_smp_processor_id());
100 volatile int wake_flag;
102 static void mcip_wakeup_cpu(int cpu, unsigned long pc)
108 void arc_platform_smp_wait_to_boot(int cpu)
110 while (wake_flag != cpu)
114 __asm__ __volatile__("j @first_lines_of_secondary \n");
117 struct plat_smp_ops plat_smp_ops = {
118 .info = smp_cpuinfo_buf,
119 .cpu_kick = mcip_wakeup_cpu,
120 .ipi_send = mcip_ipi_send,
121 .ipi_clear = mcip_ipi_clear,
124 void mcip_init_early_smp(void)
127 #ifdef CONFIG_CPU_BIG_ENDIAN
129 idu:1, llm:1, num_cores:6,
130 iocoh:1, grtc:1, dbg:1, pad2:1,
131 msg:1, sem:1, ipi:1, pad:1,
135 pad:1, ipi:1, sem:1, msg:1,
136 pad2:1, dbg:1, grtc:1, iocoh:1,
137 num_cores:6, llm:1, idu:1,
142 READ_BCR(ARC_REG_MCIP_BCR, mp);
144 sprintf(smp_cpuinfo_buf,
145 "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n",
146 mp.ver, mp.num_cores,
147 IS_AVAIL1(mp.ipi, "IPI "),
148 IS_AVAIL1(mp.idu, "IDU "),
149 IS_AVAIL1(mp.dbg, "DEBUG "),
150 IS_AVAIL1(mp.grtc, "GRTC"));
152 idu_detected = mp.idu;
155 __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf);
156 __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf);
159 if (IS_ENABLED(CONFIG_ARC_HAS_GRTC) && !mp.grtc)
160 panic("kernel trying to use non-existent GRTC\n");
163 /***************************************************************************
164 * ARCv2 Interrupt Distribution Unit (IDU)
166 * Connects external "COMMON" IRQs to core intc, providing:
167 * -dynamic routing (IRQ affinity)
168 * -load balancing (Round Robin interrupt distribution)
171 * It physically resides in the MCIP hw block
174 #include <linux/irqchip.h>
175 #include <linux/of.h>
176 #include <linux/of_irq.h>
179 * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core)
181 static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask)
183 __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask);
186 static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl,
192 unsigned int distr:2, pad:2, lvl:1, pad2:27;
198 __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word);
201 static void idu_irq_mask(struct irq_data *data)
205 raw_spin_lock_irqsave(&mcip_lock, flags);
206 __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 1);
207 raw_spin_unlock_irqrestore(&mcip_lock, flags);
210 static void idu_irq_unmask(struct irq_data *data)
214 raw_spin_lock_irqsave(&mcip_lock, flags);
215 __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 0);
216 raw_spin_unlock_irqrestore(&mcip_lock, flags);
221 idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
227 /* errout if no online cpu per @cpumask */
228 if (!cpumask_and(&online, cpumask, cpu_online_mask))
231 raw_spin_lock_irqsave(&mcip_lock, flags);
233 idu_set_dest(data->hwirq, cpumask_bits(&online)[0]);
234 idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
236 raw_spin_unlock_irqrestore(&mcip_lock, flags);
238 return IRQ_SET_MASK_OK;
242 static struct irq_chip idu_irq_chip = {
243 .name = "MCIP IDU Intc",
244 .irq_mask = idu_irq_mask,
245 .irq_unmask = idu_irq_unmask,
247 .irq_set_affinity = idu_irq_set_affinity,
252 static int idu_first_irq;
254 static void idu_cascade_isr(struct irq_desc *desc)
256 struct irq_domain *domain = irq_desc_get_handler_data(desc);
257 unsigned int core_irq = irq_desc_get_irq(desc);
258 unsigned int idu_irq;
260 idu_irq = core_irq - idu_first_irq;
261 generic_handle_irq(irq_find_mapping(domain, idu_irq));
264 static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
266 irq_set_chip_and_handler(virq, &idu_irq_chip, handle_level_irq);
267 irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
272 static int idu_irq_xlate(struct irq_domain *d, struct device_node *n,
273 const u32 *intspec, unsigned int intsize,
274 irq_hw_number_t *out_hwirq, unsigned int *out_type)
276 irq_hw_number_t hwirq = *out_hwirq = intspec[0];
277 int distri = intspec[1];
280 *out_type = IRQ_TYPE_NONE;
282 /* XXX: validate distribution scheme again online cpu mask */
284 /* 0 - Round Robin to all cpus, otherwise 1 bit per core */
285 raw_spin_lock_irqsave(&mcip_lock, flags);
286 idu_set_dest(hwirq, BIT(num_online_cpus()) - 1);
287 idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
288 raw_spin_unlock_irqrestore(&mcip_lock, flags);
291 * DEST based distribution for Level Triggered intr can only
292 * have 1 CPU, so generalize it to always contain 1 cpu
294 int cpu = ffs(distri);
296 if (cpu != fls(distri))
297 pr_warn("IDU irq %lx distri mode set to cpu %x\n",
300 raw_spin_lock_irqsave(&mcip_lock, flags);
301 idu_set_dest(hwirq, cpu);
302 idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_DEST);
303 raw_spin_unlock_irqrestore(&mcip_lock, flags);
309 static const struct irq_domain_ops idu_irq_ops = {
310 .xlate = idu_irq_xlate,
315 * [16, 23]: Statically assigned always private-per-core (Timers, WDT, IPI)
316 * [24, 23+C]: If C > 0 then "C" common IRQs
317 * [24+C, N]: Not statically assigned, private-per-core
322 idu_of_init(struct device_node *intc, struct device_node *parent)
324 struct irq_domain *domain;
325 /* Read IDU BCR to confirm nr_irqs */
326 int nr_irqs = of_irq_count(intc);
330 panic("IDU not detected, but DeviceTree using it");
332 pr_info("MCIP: IDU referenced from Devicetree %d irqs\n", nr_irqs);
334 domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL);
336 /* Parent interrupts (core-intc) are already mapped */
338 for (i = 0; i < nr_irqs; i++) {
340 * Return parent uplink IRQs (towards core intc) 24,25,.....
341 * this step has been done before already
342 * however we need it to get the parent virq and set IDU handler
345 irq = irq_of_parse_and_map(intc, i);
349 irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain);
352 __mcip_cmd(CMD_IDU_ENABLE, 0);
356 IRQCHIP_DECLARE(arcv2_idu_intc, "snps,archs-idu-intc", idu_of_init);