]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - kernel/stop_machine.c
Merge remote-tracking branch 'ipsec/master'
[karo-tx-linux.git] / kernel / stop_machine.c
index 12484e5d5c88769058610aca529924ea9e882aff..867bc20e1ef142a63349c345932af24b26a1adfc 100644 (file)
@@ -73,21 +73,24 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
        }
 }
 
+static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
+                                       struct cpu_stop_work *work)
+{
+       list_add_tail(&work->list, &stopper->works);
+       wake_up_process(stopper->thread);
+}
+
 /* queue @work to @stopper.  if offline, @work is completed immediately */
 static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
 {
        struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
-
        unsigned long flags;
 
        spin_lock_irqsave(&stopper->lock, flags);
-
-       if (stopper->enabled) {
-               list_add_tail(&work->list, &stopper->works);
-               wake_up_process(stopper->thread);
-       } else
+       if (stopper->enabled)
+               __cpu_stop_queue_work(stopper, work);
+       else
                cpu_stop_signal_done(work->done, false);
-
        spin_unlock_irqrestore(&stopper->lock, flags);
 }
 
@@ -213,6 +216,31 @@ static int multi_cpu_stop(void *data)
        return err;
 }
 
+static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
+                                   int cpu2, struct cpu_stop_work *work2)
+{
+       struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
+       struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
+       int err;
+
+       lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
+       spin_lock_irq(&stopper1->lock);
+       spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
+
+       err = -ENOENT;
+       if (!stopper1->enabled || !stopper2->enabled)
+               goto unlock;
+
+       err = 0;
+       __cpu_stop_queue_work(stopper1, work1);
+       __cpu_stop_queue_work(stopper2, work2);
+unlock:
+       spin_unlock(&stopper2->lock);
+       spin_unlock_irq(&stopper1->lock);
+       lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);
+
+       return err;
+}
 /**
  * stop_two_cpus - stops two cpus
  * @cpu1: the cpu to stop
@@ -247,24 +275,13 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
        cpu_stop_init_done(&done, 2);
        set_state(&msdata, MULTI_STOP_PREPARE);
 
-       /*
-        * If we observe both CPUs active we know _cpu_down() cannot yet have
-        * queued its stop_machine works and therefore ours will get executed
-        * first. Or its not either one of our CPUs that's getting unplugged,
-        * in which case we don't care.
-        *
-        * This relies on the stopper workqueues to be FIFO.
-        */
-       if (!cpu_active(cpu1) || !cpu_active(cpu2)) {
+       if (cpu1 > cpu2)
+               swap(cpu1, cpu2);
+       if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2)) {
                preempt_enable();
                return -ENOENT;
        }
 
-       lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
-       cpu_stop_queue_work(cpu1, &work1);
-       cpu_stop_queue_work(cpu2, &work2);
-       lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);
-
        preempt_enable();
 
        wait_for_completion(&done.completion);
@@ -452,6 +469,18 @@ repeat:
        }
 }
 
+void stop_machine_park(int cpu)
+{
+       struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
+       /*
+        * Lockless. cpu_stopper_thread() will take stopper->lock and flush
+        * the pending works before it parks, until then it is fine to queue
+        * the new works.
+        */
+       stopper->enabled = false;
+       kthread_park(stopper->thread);
+}
+
 extern void sched_set_stop_task(int cpu, struct task_struct *stop);
 
 static void cpu_stop_create(unsigned int cpu)
@@ -462,26 +491,16 @@ static void cpu_stop_create(unsigned int cpu)
 static void cpu_stop_park(unsigned int cpu)
 {
        struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
-       struct cpu_stop_work *work, *tmp;
-       unsigned long flags;
 
-       /* drain remaining works */
-       spin_lock_irqsave(&stopper->lock, flags);
-       list_for_each_entry_safe(work, tmp, &stopper->works, list) {
-               list_del_init(&work->list);
-               cpu_stop_signal_done(work->done, false);
-       }
-       stopper->enabled = false;
-       spin_unlock_irqrestore(&stopper->lock, flags);
+       WARN_ON(!list_empty(&stopper->works));
 }
 
-static void cpu_stop_unpark(unsigned int cpu)
+void stop_machine_unpark(int cpu)
 {
        struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
 
-       spin_lock_irq(&stopper->lock);
        stopper->enabled = true;
-       spin_unlock_irq(&stopper->lock);
+       kthread_unpark(stopper->thread);
 }
 
 static struct smp_hotplug_thread cpu_stop_threads = {
@@ -490,9 +509,7 @@ static struct smp_hotplug_thread cpu_stop_threads = {
        .thread_fn              = cpu_stopper_thread,
        .thread_comm            = "migration/%u",
        .create                 = cpu_stop_create,
-       .setup                  = cpu_stop_unpark,
        .park                   = cpu_stop_park,
-       .pre_unpark             = cpu_stop_unpark,
        .selfparking            = true,
 };
 
@@ -508,6 +525,7 @@ static int __init cpu_stop_init(void)
        }
 
        BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
+       stop_machine_unpark(raw_smp_processor_id());
        stop_machine_initialized = true;
        return 0;
 }