]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - arch/arm/kernel/smp.c
Merge remote-tracking branch 'arm/for-next'
[karo-tx-linux.git] / arch / arm / kernel / smp.c
index 939ac2f2f2a3e0d05fbcc8fa407ba2b22d06bca2..8e20754dd31d5946f5297aae2023ae2c8d5cd572 100644 (file)
 #include <linux/mm.h>
 #include <linux/err.h>
 #include <linux/cpu.h>
-#include <linux/smp.h>
 #include <linux/seq_file.h>
 #include <linux/irq.h>
 #include <linux/percpu.h>
 #include <linux/clockchips.h>
 #include <linux/completion.h>
+#include <linux/cpufreq.h>
 
 #include <linux/atomic.h>
+#include <asm/smp.h>
 #include <asm/cacheflush.h>
 #include <asm/cpu.h>
 #include <asm/cputype.h>
@@ -43,6 +44,7 @@
 #include <asm/localtimer.h>
 #include <asm/smp_plat.h>
 #include <asm/virt.h>
+#include <asm/mach/arch.h>
 
 /*
  * as from 2.5, kernels no longer have an init_tasks structure
  */
 struct secondary_data secondary_data;
 
+/*
+ * control for which core is the next to come out of the secondary
+ * boot "holding pen"
+ */
+volatile int __cpuinitdata pen_release = -1;
+
 enum ipi_msg_type {
        IPI_WAKEUP,
        IPI_TIMER,
@@ -62,6 +70,14 @@ enum ipi_msg_type {
 
 static DECLARE_COMPLETION(cpu_running);
 
+static struct smp_operations smp_ops;
+
+void __init smp_set_ops(struct smp_operations *ops)
+{
+       if (ops)
+               smp_ops = *ops;
+};
+
 int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
 {
        int ret;
@@ -102,13 +118,64 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
        return ret;
 }
 
+/* platform specific SMP operations */
+void __init smp_init_cpus(void)
+{
+       if (smp_ops.smp_init_cpus)
+               smp_ops.smp_init_cpus();
+}
+
+static void __init platform_smp_prepare_cpus(unsigned int max_cpus)
+{
+       if (smp_ops.smp_prepare_cpus)
+               smp_ops.smp_prepare_cpus(max_cpus);
+}
+
+static void __cpuinit platform_secondary_init(unsigned int cpu)
+{
+       if (smp_ops.smp_secondary_init)
+               smp_ops.smp_secondary_init(cpu);
+}
+
+int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+       if (smp_ops.smp_boot_secondary)
+               return smp_ops.smp_boot_secondary(cpu, idle);
+       return -ENOSYS;
+}
+
 #ifdef CONFIG_HOTPLUG_CPU
 static void percpu_timer_stop(void);
 
+static int platform_cpu_kill(unsigned int cpu)
+{
+       if (smp_ops.cpu_kill)
+               return smp_ops.cpu_kill(cpu);
+       return 1;
+}
+
+static void platform_cpu_die(unsigned int cpu)
+{
+       if (smp_ops.cpu_die)
+               smp_ops.cpu_die(cpu);
+}
+
+static int platform_cpu_disable(unsigned int cpu)
+{
+       if (smp_ops.cpu_disable)
+               return smp_ops.cpu_disable(cpu);
+
+       /*
+        * By default, allow disabling all CPUs except the first one,
+        * since this is special on a lot of platforms, e.g. because
+        * of clock tick interrupts.
+        */
+       return cpu == 0 ? -EPERM : 0;
+}
 /*
  * __cpu_disable runs on the processor to be shutdown.
  */
-int __cpu_disable(void)
+int __cpuinit __cpu_disable(void)
 {
        unsigned int cpu = smp_processor_id();
        int ret;
@@ -154,7 +221,7 @@ static DECLARE_COMPLETION(cpu_died);
  * called on the thread which is asking for a CPU to be shutdown -
  * waits until shutdown has completed, or it is timed out.
  */
-void __cpu_die(unsigned int cpu)
+void __cpuinit __cpu_die(unsigned int cpu)
 {
        if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
                pr_err("CPU%u: cpu didn't die\n", cpu);
@@ -595,3 +662,56 @@ int setup_profiling_timer(unsigned int multiplier)
 {
        return -EINVAL;
 }
+
+#ifdef CONFIG_CPU_FREQ
+
+static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
+static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
+static unsigned long global_l_p_j_ref;
+static unsigned long global_l_p_j_ref_freq;
+
+static int cpufreq_callback(struct notifier_block *nb,
+                                       unsigned long val, void *data)
+{
+       struct cpufreq_freqs *freq = data;
+       int cpu = freq->cpu;
+
+       if (freq->flags & CPUFREQ_CONST_LOOPS)
+               return NOTIFY_OK;
+
+       if (!per_cpu(l_p_j_ref, cpu)) {
+               per_cpu(l_p_j_ref, cpu) =
+                       per_cpu(cpu_data, cpu).loops_per_jiffy;
+               per_cpu(l_p_j_ref_freq, cpu) = freq->old;
+               if (!global_l_p_j_ref) {
+                       global_l_p_j_ref = loops_per_jiffy;
+                       global_l_p_j_ref_freq = freq->old;
+               }
+       }
+
+       if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
+           (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
+           (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
+               loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
+                                               global_l_p_j_ref_freq,
+                                               freq->new);
+               per_cpu(cpu_data, cpu).loops_per_jiffy =
+                       cpufreq_scale(per_cpu(l_p_j_ref, cpu),
+                                       per_cpu(l_p_j_ref_freq, cpu),
+                                       freq->new);
+       }
+       return NOTIFY_OK;
+}
+
+static struct notifier_block cpufreq_notifier = {
+       .notifier_call  = cpufreq_callback,
+};
+
+static int __init register_cpufreq_notifier(void)
+{
+       return cpufreq_register_notifier(&cpufreq_notifier,
+                                               CPUFREQ_TRANSITION_NOTIFIER);
+}
+core_initcall(register_cpufreq_notifier);
+
+#endif