]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - arch/x86_64/kernel/smpboot.c
Merge master.kernel.org:/home/rmk/linux-2.6-serial
[karo-tx-linux.git] / arch / x86_64 / kernel / smpboot.c
index 6e4807d64d4618180f100befb9854299d742f0c4..4fb34b5cb1f964e8093bc73bfd5219649a952efa 100644 (file)
 /* Number of siblings per CPU package */
 int smp_num_siblings = 1;
 /* Package ID of each logical CPU */
-u8 phys_proc_id[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
-u8 cpu_core_id[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
+u8 phys_proc_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
+u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
 EXPORT_SYMBOL(phys_proc_id);
 EXPORT_SYMBOL(cpu_core_id);
 
 /* Bitmask of currently online CPUs */
-cpumask_t cpu_online_map;
+cpumask_t cpu_online_map __read_mostly;
 
 EXPORT_SYMBOL(cpu_online_map);
 
@@ -88,8 +88,8 @@ struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
 /* Set when the idlers are all forked */
 int smp_threads_ready;
 
-cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
-cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
+cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
+cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
 EXPORT_SYMBOL(cpu_core_map);
 
 /*
@@ -334,7 +334,7 @@ static void __cpuinit tsc_sync_wait(void)
 {
        if (notscsync || !cpu_has_tsc)
                return;
-       sync_tsc(boot_cpu_id);
+       sync_tsc(0);
 }
 
 static __init int notscsync_setup(char *s)
@@ -492,6 +492,14 @@ void __cpuinit start_secondary(void)
         */
        set_cpu_sibling_map(smp_processor_id());
 
+       /* 
+        * Wait for TSC sync to not schedule things before.
+        * We still process interrupts, which could see an inconsistent
+        * time in that window unfortunately. 
+        * Do this here because TSC sync has global unprotected state.
+        */
+       tsc_sync_wait();
+
        /*
         * We need to hold call_lock, so there is no inconsistency
         * between the time smp_call_function() determines number of
@@ -509,13 +517,6 @@ void __cpuinit start_secondary(void)
        per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
        unlock_ipi_call_lock();
 
-       mb();
-
-       /* Wait for TSC sync to not schedule things before.
-          We still process interrupts, which could see an inconsistent
-          time in that window unfortunately. */
-       tsc_sync_wait();
-
        cpu_idle();
 }
 
@@ -893,23 +894,6 @@ static __init void disable_smp(void)
        cpu_set(0, cpu_core_map[0]);
 }
 
-/*
- * Handle user cpus=... parameter.
- */
-static __init void enforce_max_cpus(unsigned max_cpus)
-{
-       int i, k;
-       k = 0;
-       for (i = 0; i < NR_CPUS; i++) {
-               if (!cpu_possible(i))
-                       continue;
-               if (++k > max_cpus) {
-                       cpu_clear(i, cpu_possible_map);
-                       cpu_clear(i, cpu_present_map);
-               }
-       }
-}
-
 #ifdef CONFIG_HOTPLUG_CPU
 /*
  * cpu_possible_map should be static, it cannot change as cpu's
@@ -998,8 +982,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
        current_cpu_data = boot_cpu_data;
        current_thread_info()->cpu = 0;  /* needed? */
 
-       enforce_max_cpus(max_cpus);
-
 #ifdef CONFIG_HOTPLUG_CPU
        prefill_possible_map();
 #endif