]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
[PATCH] ARM SMP: Add support for startup of secondary processors
authorRussell King <rmk@dyn-67.arm.linux.org.uk>
Sat, 18 Jun 2005 08:33:31 +0000 (09:33 +0100)
committerRussell King <rmk@dyn-67.arm.linux.org.uk>
Sat, 18 Jun 2005 08:33:31 +0000 (09:33 +0100)
Create a temporary page table to startup secondary processors.  This
page table must have a 1:1 virtual/physical mapping for the kernel
in addition to the standard mappings to ensure that the secondary
CPU can enable its MMU safely.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
arch/arm/kernel/head.S
arch/arm/kernel/smp.c
include/asm-arm/smp.h

index 4733877296d41209f6e0b0dd6c5c66cf6a0f3762..bd4823c74645f0899f91d61199f000a5342be40d 100644 (file)
@@ -2,6 +2,8 @@
  *  linux/arch/arm/kernel/head.S
  *
  *  Copyright (C) 1994-2002 Russell King
+ *  Copyright (c) 2003 ARM Limited
+ *  All Rights Reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -165,6 +167,48 @@ __mmap_switched:
        stmia   r6, {r0, r4}                    @ Save control register values
        b       start_kernel
 
+#if defined(CONFIG_SMP)
+       .type   secondary_startup, #function
+ENTRY(secondary_startup)
+       /*
+        * Common entry point for secondary CPUs.
+        *
+        * Ensure that we're in SVC mode, and IRQs are disabled.  Lookup
+        * the processor type - there is no need to check the machine type
+        * as it has already been validated by the primary processor.
+        */
+       msr     cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC
+       bl      __lookup_processor_type
+       movs    r10, r5                         @ invalid processor?
+       moveq   r0, #'p'                        @ yes, error 'p'
+       beq     __error
+
+       /*
+        * Use the page tables supplied from  __cpu_up.
+        */
+       adr     r4, __secondary_data
+       ldmia   r4, {r5, r6, r13}               @ address to jump to after
+       sub     r4, r4, r5                      @ mmu has been enabled
+       ldr     r4, [r6, r4]                    @ get secondary_data.pgdir
+       adr     lr, __enable_mmu                @ return address
+       add     pc, r10, #12                    @ initialise processor
+                                               @ (return control reg)
+
+       /*
+        * r6  = &secondary_data
+        */
+ENTRY(__secondary_switched)
+       ldr     sp, [r6, #4]                    @ get secondary_data.stack
+       mov     fp, #0
+       b       secondary_start_kernel
+
+       .type   __secondary_data, %object
+__secondary_data:
+       .long   .
+       .long   secondary_data
+       .long   __secondary_switched
+#endif /* defined(CONFIG_SMP) */
+
 
 
 /*
index ecc8c3332408a1c7ed394b6badcd3b608baf156c..45ed036336e0534eb8b9bb9ecf57fe4861fd6875 100644 (file)
@@ -24,6 +24,9 @@
 #include <asm/atomic.h>
 #include <asm/cacheflush.h>
 #include <asm/cpu.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
 #include <asm/processor.h>
 #include <asm/tlbflush.h>
 #include <asm/ptrace.h>
 cpumask_t cpu_present_mask;
 cpumask_t cpu_online_map;
 
+/*
+ * as from 2.5, kernels no longer have an init_tasks structure
+ * so we need some other way of telling a new secondary core
+ * where to place its SVC stack
+ */
+struct secondary_data secondary_data;
+
 /*
  * structures for inter-processor calls
  * - A collection of single bit ipi messages.
@@ -71,6 +81,8 @@ static DEFINE_SPINLOCK(smp_call_function_lock);
 int __init __cpu_up(unsigned int cpu)
 {
        struct task_struct *idle;
+       pgd_t *pgd;
+       pmd_t *pmd;
        int ret;
 
        /*
@@ -83,10 +95,55 @@ int __init __cpu_up(unsigned int cpu)
                return PTR_ERR(idle);
        }
 
+       /*
+        * Allocate initial page tables to allow the new CPU to
+        * enable the MMU safely.  This essentially means a set
+        * of our "standard" page tables, with the addition of
+        * a 1:1 mapping for the physical address of the kernel.
+        */
+       pgd = pgd_alloc(&init_mm);
+       pmd = pmd_offset(pgd, PHYS_OFFSET);
+       *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) |
+                    PMD_TYPE_SECT | PMD_SECT_AP_WRITE);
+
+       /*
+        * We need to tell the secondary core where to find
+        * its stack and the page tables.
+        */
+       secondary_data.stack = (void *)idle->thread_info + THREAD_SIZE - 8;
+       secondary_data.pgdir = virt_to_phys(pgd);
+       wmb();
+
        /*
         * Now bring the CPU into our world.
         */
        ret = boot_secondary(cpu, idle);
+       if (ret == 0) {
+               unsigned long timeout;
+
+               /*
+                * CPU was successfully started, wait for it
+                * to come online or time out.
+                */
+               timeout = jiffies + HZ;
+               while (time_before(jiffies, timeout)) {
+                       if (cpu_online(cpu))
+                               break;
+
+                       udelay(10);
+                       barrier();
+               }
+
+               if (!cpu_online(cpu))
+                       ret = -EIO;
+       }
+
+       secondary_data.stack = 0;
+       secondary_data.pgdir = 0;
+
+       *pmd_offset(pgd, PHYS_OFFSET) = __pmd(0);
+       pgd_free(pgd);
+
        if (ret) {
                printk(KERN_CRIT "cpu_up: processor %d failed to boot\n", cpu);
                /*
@@ -97,6 +154,56 @@ int __init __cpu_up(unsigned int cpu)
        return ret;
 }
 
+/*
+ * This is the secondary CPU boot entry.  We're using this CPUs
+ * idle thread stack, but a set of temporary page tables.
+ */
+asmlinkage void __init secondary_start_kernel(void)
+{
+       struct mm_struct *mm = &init_mm;
+       unsigned int cpu = smp_processor_id();
+
+       printk("CPU%u: Booted secondary processor\n", cpu);
+
+       /*
+        * All kernel threads share the same mm context; grab a
+        * reference and switch to it.
+        */
+       atomic_inc(&mm->mm_users);
+       atomic_inc(&mm->mm_count);
+       current->active_mm = mm;
+       cpu_set(cpu, mm->cpu_vm_mask);
+       cpu_switch_mm(mm->pgd, mm);
+       enter_lazy_tlb(mm, current);
+
+       cpu_init();
+
+       /*
+        * Give the platform a chance to do its own initialisation.
+        */
+       platform_secondary_init(cpu);
+
+       /*
+        * Enable local interrupts.
+        */
+       local_irq_enable();
+       local_fiq_enable();
+
+       calibrate_delay();
+
+       smp_store_cpu_info(cpu);
+
+       /*
+        * OK, now it's safe to let the boot CPU continue
+        */
+       cpu_set(cpu, cpu_online_map);
+
+       /*
+        * OK, it's off to the idle thread for us
+        */
+       cpu_idle();
+}
+
 /*
  * Called by both boot and secondaries to move global data into
  * per-processor storage.
index f21fd8f6bcdd04fc721bd6f4b0648d5778b77073..bd44f894690f55d74c8679ad2168c4e73dbd030c 100644 (file)
@@ -55,4 +55,18 @@ extern void smp_cross_call(cpumask_t callmap);
  */
 extern int boot_secondary(unsigned int cpu, struct task_struct *);
 
+/*
+ * Perform platform specific initialisation of the specified CPU.
+ */
+extern void platform_secondary_init(unsigned int cpu);
+
+/*
+ * Initial data for bringing up a secondary CPU.
+ */
+struct secondary_data {
+       unsigned long pgdir;
+       void *stack;
+};
+extern struct secondary_data secondary_data;
+
 #endif /* ifndef __ASM_ARM_SMP_H */