]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - kernel/rcu/tree.c
rcu: Avoid clobbering early boot callbacks
[karo-tx-linux.git] / kernel / rcu / tree.c
index 48d640ca1a05b8c0f83fe2b217b925a6dec69fa4..92fd3eab5823df8dfd160401ec710471a89c5fec 100644 (file)
@@ -91,8 +91,10 @@ static const char *tp_##sname##_varname __used __tracepoint_string = sname##_var
 
 #define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
 DEFINE_RCU_TPS(sname) \
+DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \
 struct rcu_state sname##_state = { \
        .level = { &sname##_state.node[0] }, \
+       .rda = &sname##_data, \
        .call = cr, \
        .fqs_state = RCU_GP_IDLE, \
        .gpnum = 0UL - 300UL, \
@@ -104,8 +106,7 @@ struct rcu_state sname##_state = { \
        .onoff_mutex = __MUTEX_INITIALIZER(sname##_state.onoff_mutex), \
        .name = RCU_STATE_NAME(sname), \
        .abbr = sabbr, \
-}; \
-DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data)
+}
 
 RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
 RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
@@ -1328,19 +1329,29 @@ void rcu_cpu_stall_reset(void)
 }
 
 /*
- * Initialize the specified rcu_data structure's callback list to empty.
+ * Initialize the specified rcu_data structure's default callback list
+ * to empty.  The default callback list is the one that is not used by
+ * no-callbacks CPUs.
  */
-static void init_callback_list(struct rcu_data *rdp)
+static void init_default_callback_list(struct rcu_data *rdp)
 {
        int i;
 
-       if (init_nocb_callback_list(rdp))
-               return;
        rdp->nxtlist = NULL;
        for (i = 0; i < RCU_NEXT_SIZE; i++)
                rdp->nxttail[i] = &rdp->nxtlist;
 }
 
+/*
+ * Initialize the specified rcu_data structure's callback list to empty.
+ */
+static void init_callback_list(struct rcu_data *rdp)
+{
+       if (init_nocb_callback_list(rdp))
+               return;
+       init_default_callback_list(rdp);
+}
+
 /*
  * Determine the value that ->completed will have at the end of the
  * next subsequent grace period.  This is used to tag callbacks so that
@@ -2827,11 +2838,21 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
 
                if (cpu != -1)
                        rdp = per_cpu_ptr(rsp->rda, cpu);
-               offline = !__call_rcu_nocb(rdp, head, lazy, flags);
-               WARN_ON_ONCE(offline);
-               /* _call_rcu() is illegal on offline CPU; leak the callback. */
-               local_irq_restore(flags);
-               return;
+               if (likely(rdp->mynode)) {
+                       /* Post-boot, so this should be for a no-CBs CPU. */
+                       offline = !__call_rcu_nocb(rdp, head, lazy, flags);
+                       WARN_ON_ONCE(offline);
+                       /* Offline CPU, _call_rcu() illegal, leak callback.  */
+                       local_irq_restore(flags);
+                       return;
+               }
+               /*
+                * Very early boot, before rcu_init().  Initialize if needed
+                * and then drop through to queue the callback.
+                */
+               BUG_ON(cpu != -1);
+               if (!likely(rdp->nxtlist))
+                       init_default_callback_list(rdp);
        }
        ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
        if (lazy)
@@ -3562,7 +3583,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
        rdp->qlen_last_fqs_check = 0;
        rdp->n_force_qs_snap = rsp->n_force_qs;
        rdp->blimit = blimit;
-       init_callback_list(rdp);  /* Re-enable callbacks on this CPU. */
+       if (!rdp->nxtlist)
+               init_callback_list(rdp);  /* Re-enable callbacks on this CPU. */
        rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
        rcu_sysidle_init_percpu_data(rdp->dynticks);
        atomic_set(&rdp->dynticks->dynticks,
@@ -3833,7 +3855,6 @@ static void __init rcu_init_one(struct rcu_state *rsp,
                }
        }
 
-       rsp->rda = rda;
        init_waitqueue_head(&rsp->gp_wq);
        rnp = rsp->level[rcu_num_lvls - 1];
        for_each_possible_cpu(i) {