2 * linux/arch/arm/kernel/smp.c
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/config.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/cache.h>
17 #include <linux/profile.h>
18 #include <linux/errno.h>
20 #include <linux/cpu.h>
21 #include <linux/smp.h>
22 #include <linux/seq_file.h>
24 #include <asm/atomic.h>
25 #include <asm/cacheflush.h>
27 #include <asm/mmu_context.h>
28 #include <asm/pgtable.h>
29 #include <asm/pgalloc.h>
30 #include <asm/processor.h>
31 #include <asm/tlbflush.h>
32 #include <asm/ptrace.h>
35 * bitmask of present and online CPUs.
36 * The present bitmask indicates that the CPU is physically present.
37 * The online bitmask indicates that the CPU is up and running.
39 cpumask_t cpu_possible_map;
40 cpumask_t cpu_online_map;
43 * as from 2.5, kernels no longer have an init_tasks structure
44 * so we need some other way of telling a new secondary core
45 * where to place its SVC stack
47 struct secondary_data secondary_data;
50 * structures for inter-processor calls
51 * - A collection of single bit ipi messages.
55 unsigned long ipi_count;
59 static DEFINE_PER_CPU(struct ipi_data, ipi_data) = {
60 .lock = SPIN_LOCK_UNLOCKED,
70 struct smp_call_struct {
71 void (*func)(void *info);
78 static struct smp_call_struct * volatile smp_call_function_data;
79 static DEFINE_SPINLOCK(smp_call_function_lock);
81 int __cpuinit __cpu_up(unsigned int cpu)
83 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
84 struct task_struct *idle = ci->idle;
90 * Spawn a new process manually, if not already done.
91 * Grab a pointer to its task struct so we can mess with it
94 idle = fork_idle(cpu);
96 printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
103 * Allocate initial page tables to allow the new CPU to
104 * enable the MMU safely. This essentially means a set
105 * of our "standard" page tables, with the addition of
106 * a 1:1 mapping for the physical address of the kernel.
108 pgd = pgd_alloc(&init_mm);
109 pmd = pmd_offset(pgd, PHYS_OFFSET);
110 *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) |
111 PMD_TYPE_SECT | PMD_SECT_AP_WRITE);
114 * We need to tell the secondary core where to find
115 * its stack and the page tables.
117 secondary_data.stack = (void *)idle->thread_info + THREAD_START_SP;
118 secondary_data.pgdir = virt_to_phys(pgd);
122 * Now bring the CPU into our world.
124 ret = boot_secondary(cpu, idle);
126 unsigned long timeout;
129 * CPU was successfully started, wait for it
130 * to come online or time out.
132 timeout = jiffies + HZ;
133 while (time_before(jiffies, timeout)) {
141 if (!cpu_online(cpu))
145 secondary_data.stack = 0;
146 secondary_data.pgdir = 0;
148 *pmd_offset(pgd, PHYS_OFFSET) = __pmd(0);
152 printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu);
155 * FIXME: We need to clean up the new idle thread. --rmk
163 * This is the secondary CPU boot entry. We're using this CPUs
164 * idle thread stack, but a set of temporary page tables.
166 asmlinkage void __cpuinit secondary_start_kernel(void)
168 struct mm_struct *mm = &init_mm;
169 unsigned int cpu = smp_processor_id();
171 printk("CPU%u: Booted secondary processor\n", cpu);
174 * All kernel threads share the same mm context; grab a
175 * reference and switch to it.
177 atomic_inc(&mm->mm_users);
178 atomic_inc(&mm->mm_count);
179 current->active_mm = mm;
180 cpu_set(cpu, mm->cpu_vm_mask);
181 cpu_switch_mm(mm->pgd, mm);
182 enter_lazy_tlb(mm, current);
183 local_flush_tlb_all();
188 * Give the platform a chance to do its own initialisation.
190 platform_secondary_init(cpu);
193 * Enable local interrupts.
200 smp_store_cpu_info(cpu);
203 * OK, now it's safe to let the boot CPU continue
205 cpu_set(cpu, cpu_online_map);
208 * OK, it's off to the idle thread for us
214 * Called by both boot and secondaries to move global data into
215 * per-processor storage.
217 void __cpuinit smp_store_cpu_info(unsigned int cpuid)
219 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
221 cpu_info->loops_per_jiffy = loops_per_jiffy;
224 void __init smp_cpus_done(unsigned int max_cpus)
227 unsigned long bogosum = 0;
229 for_each_online_cpu(cpu)
230 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
232 printk(KERN_INFO "SMP: Total of %d processors activated "
233 "(%lu.%02lu BogoMIPS).\n",
235 bogosum / (500000/HZ),
236 (bogosum / (5000/HZ)) % 100);
239 void __init smp_prepare_boot_cpu(void)
241 unsigned int cpu = smp_processor_id();
243 per_cpu(cpu_data, cpu).idle = current;
245 cpu_set(cpu, cpu_possible_map);
246 cpu_set(cpu, cpu_present_map);
247 cpu_set(cpu, cpu_online_map);
250 static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg)
255 local_irq_save(flags);
257 for_each_cpu_mask(cpu, callmap) {
258 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
260 spin_lock(&ipi->lock);
261 ipi->bits |= 1 << msg;
262 spin_unlock(&ipi->lock);
266 * Call the platform specific cross-CPU call function.
268 smp_cross_call(callmap);
270 local_irq_restore(flags);
274 * You must not call this function with disabled interrupts, from a
275 * hardware interrupt handler, nor from a bottom half handler.
277 int smp_call_function_on_cpu(void (*func)(void *info), void *info, int retry,
278 int wait, cpumask_t callmap)
280 struct smp_call_struct data;
281 unsigned long timeout;
288 cpu_clear(smp_processor_id(), callmap);
289 if (cpus_empty(callmap))
292 data.pending = callmap;
294 data.unfinished = callmap;
297 * try to get the mutex on smp_call_function_data
299 spin_lock(&smp_call_function_lock);
300 smp_call_function_data = &data;
302 send_ipi_message(callmap, IPI_CALL_FUNC);
304 timeout = jiffies + HZ;
305 while (!cpus_empty(data.pending) && time_before(jiffies, timeout))
311 if (!cpus_empty(data.pending)) {
313 * this may be causing our panic - report it
316 "CPU%u: smp_call_function timeout for %p(%p)\n"
317 " callmap %lx pending %lx, %swait\n",
318 smp_processor_id(), func, info, *cpus_addr(callmap),
319 *cpus_addr(data.pending), wait ? "" : "no ");
324 timeout = jiffies + (5 * HZ);
325 while (!cpus_empty(data.pending) && time_before(jiffies, timeout))
328 if (cpus_empty(data.pending))
329 printk(KERN_CRIT " RESOLVED\n");
331 printk(KERN_CRIT " STILL STUCK\n");
335 * whatever happened, we're done with the data, so release it
337 smp_call_function_data = NULL;
338 spin_unlock(&smp_call_function_lock);
340 if (!cpus_empty(data.pending)) {
346 while (!cpus_empty(data.unfinished))
353 int smp_call_function(void (*func)(void *info), void *info, int retry,
356 return smp_call_function_on_cpu(func, info, retry, wait,
360 void show_ipi_list(struct seq_file *p)
366 for_each_present_cpu(cpu)
367 seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count);
372 static void ipi_timer(struct pt_regs *regs)
374 int user = user_mode(regs);
377 profile_tick(CPU_PROFILING, regs);
378 update_process_times(user);
383 * ipi_call_function - handle IPI from smp_call_function()
385 * Note that we copy data out of the cross-call structure and then
386 * let the caller know that we're here and have done with their data
388 static void ipi_call_function(unsigned int cpu)
390 struct smp_call_struct *data = smp_call_function_data;
391 void (*func)(void *info) = data->func;
392 void *info = data->info;
393 int wait = data->wait;
395 cpu_clear(cpu, data->pending);
400 cpu_clear(cpu, data->unfinished);
403 static DEFINE_SPINLOCK(stop_lock);
406 * ipi_cpu_stop - handle IPI from smp_send_stop()
408 static void ipi_cpu_stop(unsigned int cpu)
410 spin_lock(&stop_lock);
411 printk(KERN_CRIT "CPU%u: stopping\n", cpu);
413 spin_unlock(&stop_lock);
415 cpu_clear(cpu, cpu_online_map);
425 * Main handler for inter-processor interrupts
427 * For ARM, the ipimask now only identifies a single
428 * category of IPI (Bit 1 IPIs have been replaced by a
429 * different mechanism):
431 * Bit 0 - Inter-processor function call
433 void do_IPI(struct pt_regs *regs)
435 unsigned int cpu = smp_processor_id();
436 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
443 spin_lock(&ipi->lock);
446 spin_unlock(&ipi->lock);
454 nextmsg = msgs & -msgs;
456 nextmsg = ffz(~nextmsg);
465 * nothing more to do - eveything is
466 * done on the interrupt return path
471 ipi_call_function(cpu);
479 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
487 void smp_send_reschedule(int cpu)
489 send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
492 void smp_send_timer(void)
494 cpumask_t mask = cpu_online_map;
495 cpu_clear(smp_processor_id(), mask);
496 send_ipi_message(mask, IPI_TIMER);
499 void smp_send_stop(void)
501 cpumask_t mask = cpu_online_map;
502 cpu_clear(smp_processor_id(), mask);
503 send_ipi_message(mask, IPI_CPU_STOP);
509 int __init setup_profiling_timer(unsigned int multiplier)
515 on_each_cpu_mask(void (*func)(void *), void *info, int retry, int wait,
522 ret = smp_call_function_on_cpu(func, info, retry, wait, mask);
523 if (cpu_isset(smp_processor_id(), mask))
531 /**********************************************************************/
537 struct vm_area_struct *ta_vma;
538 unsigned long ta_start;
539 unsigned long ta_end;
542 static inline void ipi_flush_tlb_all(void *ignored)
544 local_flush_tlb_all();
547 static inline void ipi_flush_tlb_mm(void *arg)
549 struct mm_struct *mm = (struct mm_struct *)arg;
551 local_flush_tlb_mm(mm);
554 static inline void ipi_flush_tlb_page(void *arg)
556 struct tlb_args *ta = (struct tlb_args *)arg;
558 local_flush_tlb_page(ta->ta_vma, ta->ta_start);
561 static inline void ipi_flush_tlb_kernel_page(void *arg)
563 struct tlb_args *ta = (struct tlb_args *)arg;
565 local_flush_tlb_kernel_page(ta->ta_start);
568 static inline void ipi_flush_tlb_range(void *arg)
570 struct tlb_args *ta = (struct tlb_args *)arg;
572 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
575 static inline void ipi_flush_tlb_kernel_range(void *arg)
577 struct tlb_args *ta = (struct tlb_args *)arg;
579 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
582 void flush_tlb_all(void)
584 on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1);
587 void flush_tlb_mm(struct mm_struct *mm)
589 cpumask_t mask = mm->cpu_vm_mask;
591 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, 1, mask);
594 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
596 cpumask_t mask = vma->vm_mm->cpu_vm_mask;
602 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, 1, mask);
605 void flush_tlb_kernel_page(unsigned long kaddr)
611 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1, 1);
614 void flush_tlb_range(struct vm_area_struct *vma,
615 unsigned long start, unsigned long end)
617 cpumask_t mask = vma->vm_mm->cpu_vm_mask;
624 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, 1, mask);
627 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
634 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1, 1);