]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - kernel/rcu/tree.c
rcu: Convert ACCESS_ONCE() to READ_ONCE() and WRITE_ONCE()
[karo-tx-linux.git] / kernel / rcu / tree.c
index 48d640ca1a05b8c0f83fe2b217b925a6dec69fa4..0628df1559700e2efea2f92a68da8281b0c5131d 100644 (file)
@@ -91,8 +91,10 @@ static const char *tp_##sname##_varname __used __tracepoint_string = sname##_var
 
 #define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
 DEFINE_RCU_TPS(sname) \
+DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \
 struct rcu_state sname##_state = { \
        .level = { &sname##_state.node[0] }, \
+       .rda = &sname##_data, \
        .call = cr, \
        .fqs_state = RCU_GP_IDLE, \
        .gpnum = 0UL - 300UL, \
@@ -101,11 +103,9 @@ struct rcu_state sname##_state = { \
        .orphan_nxttail = &sname##_state.orphan_nxtlist, \
        .orphan_donetail = &sname##_state.orphan_donelist, \
        .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
-       .onoff_mutex = __MUTEX_INITIALIZER(sname##_state.onoff_mutex), \
        .name = RCU_STATE_NAME(sname), \
        .abbr = sabbr, \
-}; \
-DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data)
+}
 
 RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
 RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
@@ -152,6 +152,8 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
  */
 static int rcu_scheduler_fully_active __read_mostly;
 
+static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
+static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
 static void invoke_rcu_core(void);
 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
@@ -160,6 +162,15 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
 static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
 module_param(kthread_prio, int, 0644);
 
+/* Delay in jiffies for grace-period initialization delays, debug only. */
+#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT
+static int gp_init_delay = CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY;
+module_param(gp_init_delay, int, 0644);
+#else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */
+static const int gp_init_delay;
+#endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */
+#define PER_RCU_NODE_PERIOD 10 /* Number of grace periods between delays. */
+
 /*
  * Track the rcutorture test sequence number and the update version
  * number within a given test.  The rcutorture_testseq is incremented
@@ -173,13 +184,24 @@ unsigned long rcutorture_testseq;
 unsigned long rcutorture_vernum;
 
 /*
- * Return true if an RCU grace period is in progress.  The ACCESS_ONCE()s
+ * Compute the mask of online CPUs for the specified rcu_node structure.
+ * This will not be stable unless the rcu_node structure's ->lock is
+ * held, but the bit corresponding to the current CPU will be stable
+ * in most contexts.
+ */
+unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
+{
+       return READ_ONCE(rnp->qsmaskinitnext);
+}
+
+/*
+ * Return true if an RCU grace period is in progress.  The READ_ONCE()s
  * permit this function to be invoked without holding the root rcu_node
  * structure's ->lock, but of course results can be subject to change.
  */
 static int rcu_gp_in_progress(struct rcu_state *rsp)
 {
-       return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum);
+       return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum);
 }
 
 /*
@@ -256,8 +278,8 @@ static void rcu_momentary_dyntick_idle(void)
                if (!(resched_mask & rsp->flavor_mask))
                        continue;
                smp_mb(); /* rcu_sched_qs_mask before cond_resched_completed. */
-               if (ACCESS_ONCE(rdp->mynode->completed) !=
-                   ACCESS_ONCE(rdp->cond_resched_completed))
+               if (READ_ONCE(rdp->mynode->completed) !=
+                   READ_ONCE(rdp->cond_resched_completed))
                        continue;
 
                /*
@@ -292,10 +314,10 @@ void rcu_note_context_switch(void)
 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
 
 /*
- * Register a quiesecent state for all RCU flavors.  If there is an
+ * Register a quiescent state for all RCU flavors.  If there is an
  * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
  * dyntick-idle quiescent state visible to other CPUs (but only for those
- * RCU flavors in desparate need of a quiescent state, which will normally
+ * RCU flavors in desperate need of a quiescent state, which will normally
  * be none of them).  Either way, do a lightweight quiescent state for
  * all RCU flavors.
  */
@@ -409,6 +431,15 @@ void rcu_bh_force_quiescent_state(void)
 }
 EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
 
+/*
+ * Force a quiescent state for RCU-sched.
+ */
+void rcu_sched_force_quiescent_state(void)
+{
+       force_quiescent_state(&rcu_sched_state);
+}
+EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
+
 /*
  * Show the state of the grace-period kthreads.
  */
@@ -460,9 +491,9 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
                break;
        }
        if (rsp != NULL) {
-               *flags = ACCESS_ONCE(rsp->gp_flags);
-               *gpnum = ACCESS_ONCE(rsp->gpnum);
-               *completed = ACCESS_ONCE(rsp->completed);
+               *flags = READ_ONCE(rsp->gp_flags);
+               *gpnum = READ_ONCE(rsp->gpnum);
+               *completed = READ_ONCE(rsp->completed);
                return;
        }
        *flags = 0;
@@ -482,15 +513,6 @@ void rcutorture_record_progress(unsigned long vernum)
 }
 EXPORT_SYMBOL_GPL(rcutorture_record_progress);
 
-/*
- * Force a quiescent state for RCU-sched.
- */
-void rcu_sched_force_quiescent_state(void)
-{
-       force_quiescent_state(&rcu_sched_state);
-}
-EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
-
 /*
  * Does the CPU have callbacks ready to be invoked?
  */
@@ -517,10 +539,10 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
 static int rcu_future_needs_gp(struct rcu_state *rsp)
 {
        struct rcu_node *rnp = rcu_get_root(rsp);
-       int idx = (ACCESS_ONCE(rnp->completed) + 1) & 0x1;
+       int idx = (READ_ONCE(rnp->completed) + 1) & 0x1;
        int *fp = &rnp->need_future_gp[idx];
 
-       return ACCESS_ONCE(*fp);
+       return READ_ONCE(*fp);
 }
 
 /*
@@ -543,7 +565,7 @@ cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
                return 1;  /* Yes, this CPU has newly registered callbacks. */
        for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++)
                if (rdp->nxttail[i - 1] != rdp->nxttail[i] &&
-                   ULONG_CMP_LT(ACCESS_ONCE(rsp->completed),
+                   ULONG_CMP_LT(READ_ONCE(rsp->completed),
                                 rdp->nxtcompleted[i]))
                        return 1;  /* Yes, CBs for future grace period. */
        return 0; /* No grace period needed. */
@@ -954,7 +976,7 @@ bool rcu_lockdep_current_cpu_online(void)
        preempt_disable();
        rdp = this_cpu_ptr(&rcu_sched_data);
        rnp = rdp->mynode;
-       ret = (rdp->grpmask & rnp->qsmaskinit) ||
+       ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) ||
              !rcu_scheduler_fully_active;
        preempt_enable();
        return ret;
@@ -989,9 +1011,9 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
                trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
                return 1;
        } else {
-               if (ULONG_CMP_LT(ACCESS_ONCE(rdp->gpnum) + ULONG_MAX / 4,
+               if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4,
                                 rdp->mynode->gpnum))
-                       ACCESS_ONCE(rdp->gpwrap) = true;
+                       WRITE_ONCE(rdp->gpwrap, true);
                return 0;
        }
 }
@@ -1071,12 +1093,12 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
        if (ULONG_CMP_GE(jiffies,
                         rdp->rsp->gp_start + jiffies_till_sched_qs) ||
            ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
-               if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
-                       ACCESS_ONCE(rdp->cond_resched_completed) =
-                               ACCESS_ONCE(rdp->mynode->completed);
+               if (!(READ_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
+                       WRITE_ONCE(rdp->cond_resched_completed,
+                                  READ_ONCE(rdp->mynode->completed));
                        smp_mb(); /* ->cond_resched_completed before *rcrmp. */
-                       ACCESS_ONCE(*rcrmp) =
-                               ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
+                       WRITE_ONCE(*rcrmp,
+                                  READ_ONCE(*rcrmp) + rdp->rsp->flavor_mask);
                        resched_cpu(rdp->cpu);  /* Force CPU into scheduler. */
                        rdp->rsp->jiffies_resched += 5; /* Enable beating. */
                } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
@@ -1097,9 +1119,9 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
        rsp->gp_start = j;
        smp_wmb(); /* Record start time before stall time. */
        j1 = rcu_jiffies_till_stall_check();
-       ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
+       WRITE_ONCE(rsp->jiffies_stall, j + j1);
        rsp->jiffies_resched = j + j1 / 2;
-       rsp->n_force_qs_gpstart = ACCESS_ONCE(rsp->n_force_qs);
+       rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
 }
 
 /*
@@ -1111,7 +1133,7 @@ static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
        unsigned long j;
 
        j = jiffies;
-       gpa = ACCESS_ONCE(rsp->gp_activity);
+       gpa = READ_ONCE(rsp->gp_activity);
        if (j - gpa > 2 * HZ)
                pr_err("%s kthread starved for %ld jiffies!\n",
                       rsp->name, j - gpa);
@@ -1151,12 +1173,13 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
        /* Only let one CPU complain about others per time interval. */
 
        raw_spin_lock_irqsave(&rnp->lock, flags);
-       delta = jiffies - ACCESS_ONCE(rsp->jiffies_stall);
+       delta = jiffies - READ_ONCE(rsp->jiffies_stall);
        if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
                return;
        }
-       ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
+       WRITE_ONCE(rsp->jiffies_stall,
+                  jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
 
        /*
@@ -1190,15 +1213,16 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
        if (ndetected) {
                rcu_dump_cpu_stacks(rsp);
        } else {
-               if (ACCESS_ONCE(rsp->gpnum) != gpnum ||
-                   ACCESS_ONCE(rsp->completed) == gpnum) {
+               if (READ_ONCE(rsp->gpnum) != gpnum ||
+                   READ_ONCE(rsp->completed) == gpnum) {
                        pr_err("INFO: Stall ended before state dump start\n");
                } else {
                        j = jiffies;
-                       gpa = ACCESS_ONCE(rsp->gp_activity);
-                       pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld\n",
+                       gpa = READ_ONCE(rsp->gp_activity);
+                       pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
                               rsp->name, j - gpa, j, gpa,
-                              jiffies_till_next_fqs);
+                              jiffies_till_next_fqs,
+                              rcu_get_root(rsp)->qsmask);
                        /* In this case, the current CPU might be at fault. */
                        sched_show_task(current);
                }
@@ -1239,9 +1263,9 @@ static void print_cpu_stall(struct rcu_state *rsp)
        rcu_dump_cpu_stacks(rsp);
 
        raw_spin_lock_irqsave(&rnp->lock, flags);
-       if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
-               ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
-                                    3 * rcu_jiffies_till_stall_check() + 3;
+       if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
+               WRITE_ONCE(rsp->jiffies_stall,
+                          jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
 
        /*
@@ -1284,20 +1308,20 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
         * Given this check, comparisons of jiffies, rsp->jiffies_stall,
         * and rsp->gp_start suffice to forestall false positives.
         */
-       gpnum = ACCESS_ONCE(rsp->gpnum);
+       gpnum = READ_ONCE(rsp->gpnum);
        smp_rmb(); /* Pick up ->gpnum first... */
-       js = ACCESS_ONCE(rsp->jiffies_stall);
+       js = READ_ONCE(rsp->jiffies_stall);
        smp_rmb(); /* ...then ->jiffies_stall before the rest... */
-       gps = ACCESS_ONCE(rsp->gp_start);
+       gps = READ_ONCE(rsp->gp_start);
        smp_rmb(); /* ...and finally ->gp_start before ->completed. */
-       completed = ACCESS_ONCE(rsp->completed);
+       completed = READ_ONCE(rsp->completed);
        if (ULONG_CMP_GE(completed, gpnum) ||
            ULONG_CMP_LT(j, js) ||
            ULONG_CMP_GE(gps, js))
                return; /* No stall or GP completed since entering function. */
        rnp = rdp->mynode;
        if (rcu_gp_in_progress(rsp) &&
-           (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask)) {
+           (READ_ONCE(rnp->qsmask) & rdp->grpmask)) {
 
                /* We haven't checked in, so go dump stack. */
                print_cpu_stall(rsp);
@@ -1324,23 +1348,33 @@ void rcu_cpu_stall_reset(void)
        struct rcu_state *rsp;
 
        for_each_rcu_flavor(rsp)
-               ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
+               WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2);
 }
 
 /*
- * Initialize the specified rcu_data structure's callback list to empty.
+ * Initialize the specified rcu_data structure's default callback list
+ * to empty.  The default callback list is the one that is not used by
+ * no-callbacks CPUs.
  */
-static void init_callback_list(struct rcu_data *rdp)
+static void init_default_callback_list(struct rcu_data *rdp)
 {
        int i;
 
-       if (init_nocb_callback_list(rdp))
-               return;
        rdp->nxtlist = NULL;
        for (i = 0; i < RCU_NEXT_SIZE; i++)
                rdp->nxttail[i] = &rdp->nxtlist;
 }
 
+/*
+ * Initialize the specified rcu_data structure's callback list to empty.
+ */
+static void init_callback_list(struct rcu_data *rdp)
+{
+       if (init_nocb_callback_list(rdp))
+               return;
+       init_default_callback_list(rdp);
+}
+
 /*
  * Determine the value that ->completed will have at the end of the
  * next subsequent grace period.  This is used to tag callbacks so that
@@ -1424,7 +1458,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
         * doing some extra useless work.
         */
        if (rnp->gpnum != rnp->completed ||
-           ACCESS_ONCE(rnp_root->gpnum) != ACCESS_ONCE(rnp_root->completed)) {
+           READ_ONCE(rnp_root->gpnum) != READ_ONCE(rnp_root->completed)) {
                rnp->need_future_gp[c & 0x1]++;
                trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
                goto out;
@@ -1509,7 +1543,7 @@ static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
 static void rcu_gp_kthread_wake(struct rcu_state *rsp)
 {
        if (current == rsp->gp_kthread ||
-           !ACCESS_ONCE(rsp->gp_flags) ||
+           !READ_ONCE(rsp->gp_flags) ||
            !rsp->gp_kthread)
                return;
        wake_up(&rsp->gp_wq);
@@ -1644,7 +1678,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
 
        /* Handle the ends of any preceding grace periods first. */
        if (rdp->completed == rnp->completed &&
-           !unlikely(ACCESS_ONCE(rdp->gpwrap))) {
+           !unlikely(READ_ONCE(rdp->gpwrap))) {
 
                /* No grace period end, so just accelerate recent callbacks. */
                ret = rcu_accelerate_cbs(rsp, rnp, rdp);
@@ -1659,7 +1693,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
                trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
        }
 
-       if (rdp->gpnum != rnp->gpnum || unlikely(ACCESS_ONCE(rdp->gpwrap))) {
+       if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) {
                /*
                 * If the current grace period is waiting for this CPU,
                 * set up to detect a quiescent state, otherwise don't
@@ -1671,7 +1705,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
                rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
                rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
                zero_cpu_stall_ticks(rdp);
-               ACCESS_ONCE(rdp->gpwrap) = false;
+               WRITE_ONCE(rdp->gpwrap, false);
        }
        return ret;
 }
@@ -1684,9 +1718,9 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
 
        local_irq_save(flags);
        rnp = rdp->mynode;
-       if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) &&
-            rdp->completed == ACCESS_ONCE(rnp->completed) &&
-            !unlikely(ACCESS_ONCE(rdp->gpwrap))) || /* w/out lock. */
+       if ((rdp->gpnum == READ_ONCE(rnp->gpnum) &&
+            rdp->completed == READ_ONCE(rnp->completed) &&
+            !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
            !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
                local_irq_restore(flags);
                return;
@@ -1703,19 +1737,19 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
  */
 static int rcu_gp_init(struct rcu_state *rsp)
 {
+       unsigned long oldmask;
        struct rcu_data *rdp;
        struct rcu_node *rnp = rcu_get_root(rsp);
 
-       ACCESS_ONCE(rsp->gp_activity) = jiffies;
-       rcu_bind_gp_kthread();
+       WRITE_ONCE(rsp->gp_activity, jiffies);
        raw_spin_lock_irq(&rnp->lock);
        smp_mb__after_unlock_lock();
-       if (!ACCESS_ONCE(rsp->gp_flags)) {
+       if (!READ_ONCE(rsp->gp_flags)) {
                /* Spurious wakeup, tell caller to go back to sleep.  */
                raw_spin_unlock_irq(&rnp->lock);
                return 0;
        }
-       ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
+       WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */
 
        if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
                /*
@@ -1733,9 +1767,54 @@ static int rcu_gp_init(struct rcu_state *rsp)
        trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
        raw_spin_unlock_irq(&rnp->lock);
 
-       /* Exclude any concurrent CPU-hotplug operations. */
-       mutex_lock(&rsp->onoff_mutex);
-       smp_mb__after_unlock_lock(); /* ->gpnum increment before GP! */
+       /*
+        * Apply per-leaf buffered online and offline operations to the
+        * rcu_node tree.  Note that this new grace period need not wait
+        * for subsequent online CPUs, and that quiescent-state forcing
+        * will handle subsequent offline CPUs.
+        */
+       rcu_for_each_leaf_node(rsp, rnp) {
+               raw_spin_lock_irq(&rnp->lock);
+               smp_mb__after_unlock_lock();
+               if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
+                   !rnp->wait_blkd_tasks) {
+                       /* Nothing to do on this leaf rcu_node structure. */
+                       raw_spin_unlock_irq(&rnp->lock);
+                       continue;
+               }
+
+               /* Record old state, apply changes to ->qsmaskinit field. */
+               oldmask = rnp->qsmaskinit;
+               rnp->qsmaskinit = rnp->qsmaskinitnext;
+
+               /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
+               if (!oldmask != !rnp->qsmaskinit) {
+                       if (!oldmask) /* First online CPU for this rcu_node. */
+                               rcu_init_new_rnp(rnp);
+                       else if (rcu_preempt_has_tasks(rnp)) /* blocked tasks */
+                               rnp->wait_blkd_tasks = true;
+                       else /* Last offline CPU and can propagate. */
+                               rcu_cleanup_dead_rnp(rnp);
+               }
+
+               /*
+                * If all waited-on tasks from prior grace period are
+                * done, and if all this rcu_node structure's CPUs are
+                * still offline, propagate up the rcu_node tree and
+                * clear ->wait_blkd_tasks.  Otherwise, if one of this
+                * rcu_node structure's CPUs has since come back online,
+                * simply clear ->wait_blkd_tasks (but rcu_cleanup_dead_rnp()
+                * checks for this, so just call it unconditionally).
+                */
+               if (rnp->wait_blkd_tasks &&
+                   (!rcu_preempt_has_tasks(rnp) ||
+                    rnp->qsmaskinit)) {
+                       rnp->wait_blkd_tasks = false;
+                       rcu_cleanup_dead_rnp(rnp);
+               }
+
+               raw_spin_unlock_irq(&rnp->lock);
+       }
 
        /*
         * Set the quiescent-state-needed bits in all the rcu_node
@@ -1756,9 +1835,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
                rdp = this_cpu_ptr(rsp->rda);
                rcu_preempt_check_blocked_tasks(rnp);
                rnp->qsmask = rnp->qsmaskinit;
-               ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
-               WARN_ON_ONCE(rnp->completed != rsp->completed);
-               ACCESS_ONCE(rnp->completed) = rsp->completed;
+               WRITE_ONCE(rnp->gpnum, rsp->gpnum);
+               if (WARN_ON_ONCE(rnp->completed != rsp->completed))
+                       WRITE_ONCE(rnp->completed, rsp->completed);
                if (rnp == rdp->mynode)
                        (void)__note_gp_changes(rsp, rnp, rdp);
                rcu_preempt_boost_start_gp(rnp);
@@ -1767,10 +1846,12 @@ static int rcu_gp_init(struct rcu_state *rsp)
                                            rnp->grphi, rnp->qsmask);
                raw_spin_unlock_irq(&rnp->lock);
                cond_resched_rcu_qs();
-               ACCESS_ONCE(rsp->gp_activity) = jiffies;
+               WRITE_ONCE(rsp->gp_activity, jiffies);
+               if (gp_init_delay > 0 &&
+                   !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD)))
+                       schedule_timeout_uninterruptible(gp_init_delay);
        }
 
-       mutex_unlock(&rsp->onoff_mutex);
        return 1;
 }
 
@@ -1784,7 +1865,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
        unsigned long maxj;
        struct rcu_node *rnp = rcu_get_root(rsp);
 
-       ACCESS_ONCE(rsp->gp_activity) = jiffies;
+       WRITE_ONCE(rsp->gp_activity, jiffies);
        rsp->n_force_qs++;
        if (fqs_state == RCU_SAVE_DYNTICK) {
                /* Collect dyntick-idle snapshots. */
@@ -1798,15 +1879,15 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
                fqs_state = RCU_FORCE_QS;
        } else {
                /* Handle dyntick-idle and offline CPUs. */
-               isidle = false;
+               isidle = true;
                force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj);
        }
        /* Clear flag to prevent immediate re-entry. */
-       if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
+       if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
                raw_spin_lock_irq(&rnp->lock);
                smp_mb__after_unlock_lock();
-               ACCESS_ONCE(rsp->gp_flags) =
-                       ACCESS_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS;
+               WRITE_ONCE(rsp->gp_flags,
+                          READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
                raw_spin_unlock_irq(&rnp->lock);
        }
        return fqs_state;
@@ -1823,7 +1904,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
        struct rcu_data *rdp;
        struct rcu_node *rnp = rcu_get_root(rsp);
 
-       ACCESS_ONCE(rsp->gp_activity) = jiffies;
+       WRITE_ONCE(rsp->gp_activity, jiffies);
        raw_spin_lock_irq(&rnp->lock);
        smp_mb__after_unlock_lock();
        gp_duration = jiffies - rsp->gp_start;
@@ -1852,7 +1933,9 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
        rcu_for_each_node_breadth_first(rsp, rnp) {
                raw_spin_lock_irq(&rnp->lock);
                smp_mb__after_unlock_lock();
-               ACCESS_ONCE(rnp->completed) = rsp->gpnum;
+               WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
+               WARN_ON_ONCE(rnp->qsmask);
+               WRITE_ONCE(rnp->completed, rsp->gpnum);
                rdp = this_cpu_ptr(rsp->rda);
                if (rnp == rdp->mynode)
                        needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
@@ -1860,7 +1943,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
                nocb += rcu_future_gp_cleanup(rsp, rnp);
                raw_spin_unlock_irq(&rnp->lock);
                cond_resched_rcu_qs();
-               ACCESS_ONCE(rsp->gp_activity) = jiffies;
+               WRITE_ONCE(rsp->gp_activity, jiffies);
        }
        rnp = rcu_get_root(rsp);
        raw_spin_lock_irq(&rnp->lock);
@@ -1868,16 +1951,16 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
        rcu_nocb_gp_set(rnp, nocb);
 
        /* Declare grace period done. */
-       ACCESS_ONCE(rsp->completed) = rsp->gpnum;
+       WRITE_ONCE(rsp->completed, rsp->gpnum);
        trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
        rsp->fqs_state = RCU_GP_IDLE;
        rdp = this_cpu_ptr(rsp->rda);
        /* Advance CBs to reduce false positives below. */
        needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
        if (needgp || cpu_needs_another_gp(rsp, rdp)) {
-               ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
+               WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
                trace_rcu_grace_period(rsp->name,
-                                      ACCESS_ONCE(rsp->gpnum),
+                                      READ_ONCE(rsp->gpnum),
                                       TPS("newreq"));
        }
        raw_spin_unlock_irq(&rnp->lock);
@@ -1895,25 +1978,26 @@ static int __noreturn rcu_gp_kthread(void *arg)
        struct rcu_state *rsp = arg;
        struct rcu_node *rnp = rcu_get_root(rsp);
 
+       rcu_bind_gp_kthread();
        for (;;) {
 
                /* Handle grace-period start. */
                for (;;) {
                        trace_rcu_grace_period(rsp->name,
-                                              ACCESS_ONCE(rsp->gpnum),
+                                              READ_ONCE(rsp->gpnum),
                                               TPS("reqwait"));
                        rsp->gp_state = RCU_GP_WAIT_GPS;
                        wait_event_interruptible(rsp->gp_wq,
-                                                ACCESS_ONCE(rsp->gp_flags) &
+                                                READ_ONCE(rsp->gp_flags) &
                                                 RCU_GP_FLAG_INIT);
                        /* Locking provides needed memory barrier. */
                        if (rcu_gp_init(rsp))
                                break;
                        cond_resched_rcu_qs();
-                       ACCESS_ONCE(rsp->gp_activity) = jiffies;
+                       WRITE_ONCE(rsp->gp_activity, jiffies);
                        WARN_ON(signal_pending(current));
                        trace_rcu_grace_period(rsp->name,
-                                              ACCESS_ONCE(rsp->gpnum),
+                                              READ_ONCE(rsp->gpnum),
                                               TPS("reqwaitsig"));
                }
 
@@ -1929,39 +2013,39 @@ static int __noreturn rcu_gp_kthread(void *arg)
                        if (!ret)
                                rsp->jiffies_force_qs = jiffies + j;
                        trace_rcu_grace_period(rsp->name,
-                                              ACCESS_ONCE(rsp->gpnum),
+                                              READ_ONCE(rsp->gpnum),
                                               TPS("fqswait"));
                        rsp->gp_state = RCU_GP_WAIT_FQS;
                        ret = wait_event_interruptible_timeout(rsp->gp_wq,
-                                       ((gf = ACCESS_ONCE(rsp->gp_flags)) &
+                                       ((gf = READ_ONCE(rsp->gp_flags)) &
                                         RCU_GP_FLAG_FQS) ||
-                                       (!ACCESS_ONCE(rnp->qsmask) &&
+                                       (!READ_ONCE(rnp->qsmask) &&
                                         !rcu_preempt_blocked_readers_cgp(rnp)),
                                        j);
                        /* Locking provides needed memory barriers. */
                        /* If grace period done, leave loop. */
-                       if (!ACCESS_ONCE(rnp->qsmask) &&
+                       if (!READ_ONCE(rnp->qsmask) &&
                            !rcu_preempt_blocked_readers_cgp(rnp))
                                break;
                        /* If time for quiescent-state forcing, do it. */
                        if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
                            (gf & RCU_GP_FLAG_FQS)) {
                                trace_rcu_grace_period(rsp->name,
-                                                      ACCESS_ONCE(rsp->gpnum),
+                                                      READ_ONCE(rsp->gpnum),
                                                       TPS("fqsstart"));
                                fqs_state = rcu_gp_fqs(rsp, fqs_state);
                                trace_rcu_grace_period(rsp->name,
-                                                      ACCESS_ONCE(rsp->gpnum),
+                                                      READ_ONCE(rsp->gpnum),
                                                       TPS("fqsend"));
                                cond_resched_rcu_qs();
-                               ACCESS_ONCE(rsp->gp_activity) = jiffies;
+                               WRITE_ONCE(rsp->gp_activity, jiffies);
                        } else {
                                /* Deal with stray signal. */
                                cond_resched_rcu_qs();
-                               ACCESS_ONCE(rsp->gp_activity) = jiffies;
+                               WRITE_ONCE(rsp->gp_activity, jiffies);
                                WARN_ON(signal_pending(current));
                                trace_rcu_grace_period(rsp->name,
-                                                      ACCESS_ONCE(rsp->gpnum),
+                                                      READ_ONCE(rsp->gpnum),
                                                       TPS("fqswaitsig"));
                        }
                        j = jiffies_till_next_fqs;
@@ -2003,8 +2087,8 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
                 */
                return false;
        }
-       ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
-       trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
+       WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
+       trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum),
                               TPS("newreq"));
 
        /*
@@ -2062,25 +2146,32 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
  * Similar to rcu_report_qs_rdp(), for which it is a helper function.
  * Allows quiescent states for a group of CPUs to be reported at one go
  * to the specified rcu_node structure, though all the CPUs in the group
- * must be represented by the same rcu_node structure (which need not be
- * a leaf rcu_node structure, though it often will be).  That structure's
- * lock must be held upon entry, and it is released before return.
+ * must be represented by the same rcu_node structure (which need not be a
+ * leaf rcu_node structure, though it often will be).  The gps parameter
+ * is the grace-period snapshot, which means that the quiescent states
+ * are valid only if rnp->gpnum is equal to gps.  That structure's lock
+ * must be held upon entry, and it is released before return.
  */
 static void
 rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
-                 struct rcu_node *rnp, unsigned long flags)
+                 struct rcu_node *rnp, unsigned long gps, unsigned long flags)
        __releases(rnp->lock)
 {
+       unsigned long oldmask = 0;
        struct rcu_node *rnp_c;
 
        /* Walk up the rcu_node hierarchy. */
        for (;;) {
-               if (!(rnp->qsmask & mask)) {
+               if (!(rnp->qsmask & mask) || rnp->gpnum != gps) {
 
-                       /* Our bit has already been cleared, so done. */
+                       /*
+                        * Our bit has already been cleared, or the
+                        * relevant grace period is already over, so done.
+                        */
                        raw_spin_unlock_irqrestore(&rnp->lock, flags);
                        return;
                }
+               WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
                rnp->qsmask &= ~mask;
                trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
                                                 mask, rnp->qsmask, rnp->level,
@@ -2104,7 +2195,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
                rnp = rnp->parent;
                raw_spin_lock_irqsave(&rnp->lock, flags);
                smp_mb__after_unlock_lock();
-               WARN_ON_ONCE(rnp_c->qsmask);
+               oldmask = rnp_c->qsmask;
        }
 
        /*
@@ -2115,6 +2206,46 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
        rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
 }
 
+/*
+ * Record a quiescent state for all tasks that were previously queued
+ * on the specified rcu_node structure and that were blocking the current
+ * RCU grace period.  The caller must hold the specified rnp->lock with
+ * irqs disabled, and this lock is released upon return, but irqs remain
+ * disabled.
+ */
+static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
+                                     struct rcu_node *rnp, unsigned long flags)
+       __releases(rnp->lock)
+{
+       unsigned long gps;
+       unsigned long mask;
+       struct rcu_node *rnp_p;
+
+       if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
+           rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
+               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               return;  /* Still need more quiescent states! */
+       }
+
+       rnp_p = rnp->parent;
+       if (rnp_p == NULL) {
+               /*
+                * Only one rcu_node structure in the tree, so don't
+                * try to report up to its nonexistent parent!
+                */
+               rcu_report_qs_rsp(rsp, flags);
+               return;
+       }
+
+       /* Report up the rest of the hierarchy, tracking current ->gpnum. */
+       gps = rnp->gpnum;
+       mask = rnp->grpmask;
+       raw_spin_unlock(&rnp->lock);    /* irqs remain disabled. */
+       raw_spin_lock(&rnp_p->lock);    /* irqs already disabled. */
+       smp_mb__after_unlock_lock();
+       rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
+}
+
 /*
  * Record a quiescent state for the specified CPU to that CPU's rcu_data
  * structure.  This must be either called from the specified CPU, or
@@ -2163,7 +2294,8 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
                 */
                needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
 
-               rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
+               rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
+               /* ^^^ Released rnp->lock */
                if (needwake)
                        rcu_gp_kthread_wake(rsp);
        }
@@ -2228,7 +2360,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
                rsp->qlen += rdp->qlen;
                rdp->n_cbs_orphaned += rdp->qlen;
                rdp->qlen_lazy = 0;
-               ACCESS_ONCE(rdp->qlen) = 0;
+               WRITE_ONCE(rdp->qlen, 0);
        }
 
        /*
@@ -2256,8 +2388,12 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
                rsp->orphan_donetail = rdp->nxttail[RCU_DONE_TAIL];
        }
 
-       /* Finally, initialize the rcu_data structure's list to empty.  */
+       /*
+        * Finally, initialize the rcu_data structure's list to empty and
+        * disallow further callbacks on this CPU.
+        */
        init_callback_list(rdp);
+       rdp->nxttail[RCU_NEXT_TAIL] = NULL;
 }
 
 /*
@@ -2355,6 +2491,7 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
                raw_spin_lock(&rnp->lock); /* irqs already disabled. */
                smp_mb__after_unlock_lock(); /* GP memory ordering. */
                rnp->qsmaskinit &= ~mask;
+               rnp->qsmask &= ~mask;
                if (rnp->qsmaskinit) {
                        raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
                        return;
@@ -2363,6 +2500,26 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
        }
 }
 
+/*
+ * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
+ * function.  We now remove it from the rcu_node tree's ->qsmaskinit
+ * bit masks.
+ */
+static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
+{
+       unsigned long flags;
+       unsigned long mask;
+       struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
+       struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
+
+       /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
+       mask = rdp->grpmask;
+       raw_spin_lock_irqsave(&rnp->lock, flags);
+       smp_mb__after_unlock_lock();    /* Enforce GP memory-order guarantee. */
+       rnp->qsmaskinitnext &= ~mask;
+       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+}
+
 /*
  * The CPU has been completely removed, and some other CPU is reporting
  * this fact from process context.  Do the remainder of the cleanup,
@@ -2379,29 +2536,15 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
        /* Adjust any no-longer-needed kthreads. */
        rcu_boost_kthread_setaffinity(rnp, -1);
 
-       /* Exclude any attempts to start a new grace period. */
-       mutex_lock(&rsp->onoff_mutex);
-       raw_spin_lock_irqsave(&rsp->orphan_lock, flags);
-
        /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
+       raw_spin_lock_irqsave(&rsp->orphan_lock, flags);
        rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
        rcu_adopt_orphan_cbs(rsp, flags);
        raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags);
 
-       /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
-       raw_spin_lock_irqsave(&rnp->lock, flags);
-       smp_mb__after_unlock_lock();    /* Enforce GP memory-order guarantee. */
-       rnp->qsmaskinit &= ~rdp->grpmask;
-       if (rnp->qsmaskinit == 0 && !rcu_preempt_has_tasks(rnp))
-               rcu_cleanup_dead_rnp(rnp);
-       rcu_report_qs_rnp(rdp->grpmask, rsp, rnp, flags); /* Rlses rnp->lock. */
        WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
                  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
                  cpu, rdp->qlen, rdp->nxtlist);
-       init_callback_list(rdp);
-       /* Disallow further callbacks on this CPU. */
-       rdp->nxttail[RCU_NEXT_TAIL] = NULL;
-       mutex_unlock(&rsp->onoff_mutex);
 }
 
 #else /* #ifdef CONFIG_HOTPLUG_CPU */
@@ -2414,6 +2557,10 @@ static void __maybe_unused rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
 {
 }
 
+static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
+{
+}
+
 static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
 {
 }
@@ -2434,7 +2581,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
        /* If no callbacks are ready, just return. */
        if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
                trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0);
-               trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist),
+               trace_rcu_batch_end(rsp->name, 0, !!READ_ONCE(rdp->nxtlist),
                                    need_resched(), is_idle_task(current),
                                    rcu_is_callbacks_kthread());
                return;
@@ -2490,7 +2637,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
        }
        smp_mb(); /* List handling before counting for rcu_barrier(). */
        rdp->qlen_lazy -= count_lazy;
-       ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
+       WRITE_ONCE(rdp->qlen, rdp->qlen - count);
        rdp->n_cbs_invoked += count;
 
        /* Reinstate batch limit if we have worked down the excess. */
@@ -2589,26 +2736,47 @@ static void force_qs_rnp(struct rcu_state *rsp,
                        return;
                }
                if (rnp->qsmask == 0) {
-                       rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
-                       continue;
+                       if (rcu_state_p == &rcu_sched_state ||
+                           rsp != rcu_state_p ||
+                           rcu_preempt_blocked_readers_cgp(rnp)) {
+                               /*
+                                * No point in scanning bits because they
+                                * are all zero.  But we might need to
+                                * priority-boost blocked readers.
+                                */
+                               rcu_initiate_boost(rnp, flags);
+                               /* rcu_initiate_boost() releases rnp->lock */
+                               continue;
+                       }
+                       if (rnp->parent &&
+                           (rnp->parent->qsmask & rnp->grpmask)) {
+                               /*
+                                * Race between grace-period
+                                * initialization and task exiting RCU
+                                * read-side critical section: Report.
+                                */
+                               rcu_report_unblock_qs_rnp(rsp, rnp, flags);
+                               /* rcu_report_unblock_qs_rnp() rlses ->lock */
+                               continue;
+                       }
                }
                cpu = rnp->grplo;
                bit = 1;
                for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
                        if ((rnp->qsmask & bit) != 0) {
-                               if ((rnp->qsmaskinit & bit) != 0)
-                                       *isidle = false;
+                               if ((rnp->qsmaskinit & bit) == 0)
+                                       *isidle = false; /* Pending hotplug. */
                                if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj))
                                        mask |= bit;
                        }
                }
                if (mask != 0) {
-
-                       /* rcu_report_qs_rnp() releases rnp->lock. */
-                       rcu_report_qs_rnp(mask, rsp, rnp, flags);
-                       continue;
+                       /* Idle/offline CPUs, report (releases rnp->lock. */
+                       rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
+               } else {
+                       /* Nothing to do here, so just drop the lock. */
+                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
                }
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
        }
 }
 
@@ -2626,7 +2794,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
        /* Funnel through hierarchy to reduce memory contention. */
        rnp = __this_cpu_read(rsp->rda->mynode);
        for (; rnp != NULL; rnp = rnp->parent) {
-               ret = (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
+               ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
                      !raw_spin_trylock(&rnp->fqslock);
                if (rnp_old != NULL)
                        raw_spin_unlock(&rnp_old->fqslock);
@@ -2642,13 +2810,12 @@ static void force_quiescent_state(struct rcu_state *rsp)
        raw_spin_lock_irqsave(&rnp_old->lock, flags);
        smp_mb__after_unlock_lock();
        raw_spin_unlock(&rnp_old->fqslock);
-       if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
+       if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
                rsp->n_force_qs_lh++;
                raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
                return;  /* Someone beat us to it. */
        }
-       ACCESS_ONCE(rsp->gp_flags) =
-               ACCESS_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS;
+       WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
        raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
        rcu_gp_kthread_wake(rsp);
 }
@@ -2714,7 +2881,7 @@ static void rcu_process_callbacks(struct softirq_action *unused)
  */
 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
 {
-       if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
+       if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
                return;
        if (likely(!rsp->boost)) {
                rcu_do_batch(rsp, rdp);
@@ -2741,7 +2908,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
         * If called from an extended quiescent state, invoke the RCU
         * core in order to force a re-evaluation of RCU's idleness.
         */
-       if (!rcu_is_watching() && cpu_online(smp_processor_id()))
+       if (!rcu_is_watching())
                invoke_rcu_core();
 
        /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
@@ -2805,7 +2972,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
        WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
        if (debug_rcu_head_queue(head)) {
                /* Probable double call_rcu(), so leak the callback. */
-               ACCESS_ONCE(head->func) = rcu_leak_callback;
+               WRITE_ONCE(head->func, rcu_leak_callback);
                WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
                return;
        }
@@ -2827,13 +2994,24 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
 
                if (cpu != -1)
                        rdp = per_cpu_ptr(rsp->rda, cpu);
-               offline = !__call_rcu_nocb(rdp, head, lazy, flags);
-               WARN_ON_ONCE(offline);
-               /* _call_rcu() is illegal on offline CPU; leak the callback. */
-               local_irq_restore(flags);
-               return;
+               if (likely(rdp->mynode)) {
+                       /* Post-boot, so this should be for a no-CBs CPU. */
+                       offline = !__call_rcu_nocb(rdp, head, lazy, flags);
+                       WARN_ON_ONCE(offline);
+                       /* Offline CPU, _call_rcu() illegal, leak callback.  */
+                       local_irq_restore(flags);
+                       return;
+               }
+               /*
+                * Very early boot, before rcu_init().  Initialize if needed
+                * and then drop through to queue the callback.
+                */
+               BUG_ON(cpu != -1);
+               WARN_ON_ONCE(!rcu_is_watching());
+               if (!likely(rdp->nxtlist))
+                       init_default_callback_list(rdp);
        }
-       ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
+       WRITE_ONCE(rdp->qlen, rdp->qlen + 1);
        if (lazy)
                rdp->qlen_lazy++;
        else
@@ -2954,7 +3132,7 @@ void synchronize_sched(void)
                           "Illegal synchronize_sched() in RCU-sched read-side critical section");
        if (rcu_blocking_is_gp())
                return;
-       if (rcu_expedited)
+       if (rcu_gp_is_expedited())
                synchronize_sched_expedited();
        else
                wait_rcu_gp(call_rcu_sched);
@@ -2981,7 +3159,7 @@ void synchronize_rcu_bh(void)
                           "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
        if (rcu_blocking_is_gp())
                return;
-       if (rcu_expedited)
+       if (rcu_gp_is_expedited())
                synchronize_rcu_bh_expedited();
        else
                wait_rcu_gp(call_rcu_bh);
@@ -3272,14 +3450,14 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
        }
 
        /* Has another RCU grace period completed?  */
-       if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
+       if (READ_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
                rdp->n_rp_gp_completed++;
                return 1;
        }
 
        /* Has a new RCU grace period started? */
-       if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum ||
-           unlikely(ACCESS_ONCE(rdp->gpwrap))) { /* outside lock */
+       if (READ_ONCE(rnp->gpnum) != rdp->gpnum ||
+           unlikely(READ_ONCE(rdp->gpwrap))) { /* outside lock */
                rdp->n_rp_gp_started++;
                return 1;
        }
@@ -3386,7 +3564,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
 {
        int cpu;
        struct rcu_data *rdp;
-       unsigned long snap = ACCESS_ONCE(rsp->n_barrier_done);
+       unsigned long snap = READ_ONCE(rsp->n_barrier_done);
        unsigned long snap_done;
 
        _rcu_barrier_trace(rsp, "Begin", -1, snap);
@@ -3428,10 +3606,10 @@ static void _rcu_barrier(struct rcu_state *rsp)
 
        /*
         * Increment ->n_barrier_done to avoid duplicate work.  Use
-        * ACCESS_ONCE() to prevent the compiler from speculating
+        * WRITE_ONCE() to prevent the compiler from speculating
         * the increment to precede the early-exit check.
         */
-       ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
+       WRITE_ONCE(rsp->n_barrier_done, rsp->n_barrier_done + 1);
        WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
        _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
        smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
@@ -3467,7 +3645,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
                                __call_rcu(&rdp->barrier_head,
                                           rcu_barrier_callback, rsp, cpu, 0);
                        }
-               } else if (ACCESS_ONCE(rdp->qlen)) {
+               } else if (READ_ONCE(rdp->qlen)) {
                        _rcu_barrier_trace(rsp, "OnlineQ", cpu,
                                           rsp->n_barrier_done);
                        smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
@@ -3487,7 +3665,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
 
        /* Increment ->n_barrier_done to prevent duplicate work. */
        smp_mb(); /* Keep increment after above mechanism. */
-       ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
+       WRITE_ONCE(rsp->n_barrier_done, rsp->n_barrier_done + 1);
        WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
        _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
        smp_mb(); /* Keep increment before caller's subsequent code. */
@@ -3517,6 +3695,28 @@ void rcu_barrier_sched(void)
 }
 EXPORT_SYMBOL_GPL(rcu_barrier_sched);
 
+/*
+ * Propagate ->qsinitmask bits up the rcu_node tree to account for the
+ * first CPU in a given leaf rcu_node structure coming online.  The caller
+ * must hold the corresponding leaf rcu_node ->lock with interrrupts
+ * disabled.
+ */
+static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
+{
+       long mask;
+       struct rcu_node *rnp = rnp_leaf;
+
+       for (;;) {
+               mask = rnp->grpmask;
+               rnp = rnp->parent;
+               if (rnp == NULL)
+                       return;
+               raw_spin_lock(&rnp->lock); /* Interrupts already disabled. */
+               rnp->qsmaskinit |= mask;
+               raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */
+       }
+}
+
 /*
  * Do boot-time initialization of a CPU's per-CPU RCU data.
  */
@@ -3553,49 +3753,37 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
        struct rcu_node *rnp = rcu_get_root(rsp);
 
-       /* Exclude new grace periods. */
-       mutex_lock(&rsp->onoff_mutex);
-
        /* Set up local state, ensuring consistent view of global state. */
        raw_spin_lock_irqsave(&rnp->lock, flags);
        rdp->beenonline = 1;     /* We have now been online. */
        rdp->qlen_last_fqs_check = 0;
        rdp->n_force_qs_snap = rsp->n_force_qs;
        rdp->blimit = blimit;
-       init_callback_list(rdp);  /* Re-enable callbacks on this CPU. */
+       if (!rdp->nxtlist)
+               init_callback_list(rdp);  /* Re-enable callbacks on this CPU. */
        rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
        rcu_sysidle_init_percpu_data(rdp->dynticks);
        atomic_set(&rdp->dynticks->dynticks,
                   (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
        raw_spin_unlock(&rnp->lock);            /* irqs remain disabled. */
 
-       /* Add CPU to rcu_node bitmasks. */
+       /*
+        * Add CPU to leaf rcu_node pending-online bitmask.  Any needed
+        * propagation up the rcu_node tree will happen at the beginning
+        * of the next grace period.
+        */
        rnp = rdp->mynode;
        mask = rdp->grpmask;
-       do {
-               /* Exclude any attempts to start a new GP on small systems. */
-               raw_spin_lock(&rnp->lock);      /* irqs already disabled. */
-               rnp->qsmaskinit |= mask;
-               mask = rnp->grpmask;
-               if (rnp == rdp->mynode) {
-                       /*
-                        * If there is a grace period in progress, we will
-                        * set up to wait for it next time we run the
-                        * RCU core code.
-                        */
-                       rdp->gpnum = rnp->completed;
-                       rdp->completed = rnp->completed;
-                       rdp->passed_quiesce = 0;
-                       rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
-                       rdp->qs_pending = 0;
-                       trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
-               }
-               raw_spin_unlock(&rnp->lock); /* irqs already disabled. */
-               rnp = rnp->parent;
-       } while (rnp != NULL && !(rnp->qsmaskinit & mask));
-       local_irq_restore(flags);
-
-       mutex_unlock(&rsp->onoff_mutex);
+       raw_spin_lock(&rnp->lock);              /* irqs already disabled. */
+       smp_mb__after_unlock_lock();
+       rnp->qsmaskinitnext |= mask;
+       rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */
+       rdp->completed = rnp->completed;
+       rdp->passed_quiesce = false;
+       rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
+       rdp->qs_pending = false;
+       trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
+       raw_spin_unlock_irqrestore(&rnp->lock, flags);
 }
 
 static void rcu_prepare_cpu(int cpu)
@@ -3609,15 +3797,14 @@ static void rcu_prepare_cpu(int cpu)
 /*
  * Handle CPU online/offline notification events.
  */
-static int rcu_cpu_notify(struct notifier_block *self,
-                                   unsigned long action, void *hcpu)
+int rcu_cpu_notify(struct notifier_block *self,
+                  unsigned long action, void *hcpu)
 {
        long cpu = (long)hcpu;
        struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
        struct rcu_node *rnp = rdp->mynode;
        struct rcu_state *rsp;
 
-       trace_rcu_utilization(TPS("Start CPU hotplug"));
        switch (action) {
        case CPU_UP_PREPARE:
        case CPU_UP_PREPARE_FROZEN:
@@ -3637,6 +3824,11 @@ static int rcu_cpu_notify(struct notifier_block *self,
                for_each_rcu_flavor(rsp)
                        rcu_cleanup_dying_cpu(rsp);
                break;
+       case CPU_DYING_IDLE:
+               for_each_rcu_flavor(rsp) {
+                       rcu_cleanup_dying_idle_cpu(cpu, rsp);
+               }
+               break;
        case CPU_DEAD:
        case CPU_DEAD_FROZEN:
        case CPU_UP_CANCELED:
@@ -3649,7 +3841,6 @@ static int rcu_cpu_notify(struct notifier_block *self,
        default:
                break;
        }
-       trace_rcu_utilization(TPS("End CPU hotplug"));
        return NOTIFY_OK;
 }
 
@@ -3660,11 +3851,12 @@ static int rcu_pm_notify(struct notifier_block *self,
        case PM_HIBERNATION_PREPARE:
        case PM_SUSPEND_PREPARE:
                if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
-                       rcu_expedited = 1;
+                       rcu_expedite_gp();
                break;
        case PM_POST_HIBERNATION:
        case PM_POST_SUSPEND:
-               rcu_expedited = 0;
+               if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
+                       rcu_unexpedite_gp();
                break;
        default:
                break;
@@ -3734,30 +3926,26 @@ void rcu_scheduler_starting(void)
  * Compute the per-level fanout, either using the exact fanout specified
  * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT.
  */
-#ifdef CONFIG_RCU_FANOUT_EXACT
-static void __init rcu_init_levelspread(struct rcu_state *rsp)
-{
-       int i;
-
-       rsp->levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
-       for (i = rcu_num_lvls - 2; i >= 0; i--)
-               rsp->levelspread[i] = CONFIG_RCU_FANOUT;
-}
-#else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
 static void __init rcu_init_levelspread(struct rcu_state *rsp)
 {
-       int ccur;
-       int cprv;
        int i;
 
-       cprv = nr_cpu_ids;
-       for (i = rcu_num_lvls - 1; i >= 0; i--) {
-               ccur = rsp->levelcnt[i];
-               rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
-               cprv = ccur;
+       if (IS_ENABLED(CONFIG_RCU_FANOUT_EXACT)) {
+               rsp->levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
+               for (i = rcu_num_lvls - 2; i >= 0; i--)
+                       rsp->levelspread[i] = CONFIG_RCU_FANOUT;
+       } else {
+               int ccur;
+               int cprv;
+
+               cprv = nr_cpu_ids;
+               for (i = rcu_num_lvls - 1; i >= 0; i--) {
+                       ccur = rsp->levelcnt[i];
+                       rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
+                       cprv = ccur;
+               }
        }
 }
-#endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */
 
 /*
  * Helper function for rcu_init() that initializes one rcu_state structure.
@@ -3833,7 +4021,6 @@ static void __init rcu_init_one(struct rcu_state *rsp,
                }
        }
 
-       rsp->rda = rda;
        init_waitqueue_head(&rsp->gp_wq);
        rnp = rsp->level[rcu_num_lvls - 1];
        for_each_possible_cpu(i) {
@@ -3926,6 +4113,8 @@ void __init rcu_init(void)
 {
        int cpu;
 
+       rcu_early_boot_tests();
+
        rcu_bootup_announce();
        rcu_init_geometry();
        rcu_init_one(&rcu_bh_state, &rcu_bh_data);
@@ -3942,8 +4131,6 @@ void __init rcu_init(void)
        pm_notifier(rcu_pm_notify, 0);
        for_each_online_cpu(cpu)
                rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
-
-       rcu_early_boot_tests();
 }
 
 #include "tree_plugin.h"