]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
rcu: Switch synchronize_sched_expedited() to IPI
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Thu, 6 Aug 2015 23:50:39 +0000 (16:50 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Wed, 7 Oct 2015 23:01:12 +0000 (16:01 -0700)
This commit switches synchronize_sched_expedited() from stop_one_cpu_nowait()
to smp_call_function_single(), thus moving from an IPI and a pair of
context switches to an IPI and a single pass through the scheduler.
Of course, if the scheduler actually does decide to switch to a different
task, there will still be a pair of context switches, but there would
likely have been a pair of context switches anyway, just a bit later.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
kernel/rcu/tree.c
kernel/rcu/tree.h

index 3e2875b38eaeaca6f60a545fd81307524847d674..a42168911941eb501fd67664155373d9ed41acf4 100644 (file)
@@ -161,6 +161,8 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
 static void invoke_rcu_core(void);
 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
+static void rcu_report_exp_rdp(struct rcu_state *rsp,
+                              struct rcu_data *rdp, bool wake);
 
 /* rcuc/rcub kthread realtime priority */
 #ifdef CONFIG_RCU_KTHREAD_PRIO
@@ -250,6 +252,12 @@ void rcu_sched_qs(void)
                                       __this_cpu_read(rcu_sched_data.gpnum),
                                       TPS("cpuqs"));
                __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
+               if (__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) {
+                       __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
+                       rcu_report_exp_rdp(&rcu_sched_state,
+                                          this_cpu_ptr(&rcu_sched_data),
+                                          true);
+               }
        }
 }
 
@@ -3555,8 +3563,8 @@ static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
  * Report expedited quiescent state for specified rcu_data (CPU).
  * Caller must hold the root rcu_node's exp_funnel_mutex.
  */
-static void __maybe_unused rcu_report_exp_rdp(struct rcu_state *rsp,
-                                             struct rcu_data *rdp, bool wake)
+static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp,
+                              bool wake)
 {
        rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake);
 }
@@ -3637,14 +3645,10 @@ static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
 }
 
 /* Invoked on each online non-idle CPU for expedited quiescent state. */
-static int synchronize_sched_expedited_cpu_stop(void *data)
+static void synchronize_sched_expedited_cpu_stop(void *data)
 {
-       struct rcu_data *rdp = data;
-       struct rcu_state *rsp = rdp->rsp;
-
-       /* Report the quiescent state. */
-       rcu_report_exp_rdp(rsp, rdp, true);
-       return 0;
+       __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true);
+       resched_cpu(smp_processor_id());
 }
 
 /*
@@ -3659,6 +3663,7 @@ static void sync_sched_exp_select_cpus(struct rcu_state *rsp)
        unsigned long mask_ofl_test;
        unsigned long mask_ofl_ipi;
        struct rcu_data *rdp;
+       int ret;
        struct rcu_node *rnp;
 
        sync_exp_reset_tree(rsp);
@@ -3694,9 +3699,9 @@ static void sync_sched_exp_select_cpus(struct rcu_state *rsp)
                        if (!(mask_ofl_ipi & mask))
                                continue;
                        rdp = per_cpu_ptr(rsp->rda, cpu);
-                       stop_one_cpu_nowait(cpu, synchronize_sched_expedited_cpu_stop,
-                                           rdp, &rdp->exp_stop_work);
-                       mask_ofl_ipi &= ~mask;
+                       ret = smp_call_function_single(cpu, synchronize_sched_expedited_cpu_stop, NULL, 0);
+                       if (!ret)
+                               mask_ofl_ipi &= ~mask;
                }
                /* Report quiescent states for those that went offline. */
                mask_ofl_test |= mask_ofl_ipi;
@@ -4201,6 +4206,9 @@ int rcu_cpu_notify(struct notifier_block *self,
                        rcu_cleanup_dying_cpu(rsp);
                break;
        case CPU_DYING_IDLE:
+               /* QS for any half-done expedited RCU-sched GP. */
+               rcu_sched_qs();
+
                for_each_rcu_flavor(rsp) {
                        rcu_cleanup_dying_idle_cpu(cpu, rsp);
                }
index 3eee48bcf52b891590aa75a12ca038c5d8f478fe..1b969cef8fe4f5d3c04251a9ffe63ed17058baad 100644 (file)
@@ -324,9 +324,6 @@ struct rcu_data {
                                        /*  ticks this CPU has handled */
                                        /*  during and after the last grace */
                                        /* period it is aware of. */
-       struct cpu_stop_work exp_stop_work;
-                                       /* Expedited grace-period control */
-                                       /*  for CPU stopping. */
 
        /* 2) batch handling */
        /*