2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/hotplug.h>
12 #include <linux/sched/task.h>
13 #include <linux/unistd.h>
14 #include <linux/cpu.h>
15 #include <linux/oom.h>
16 #include <linux/rcupdate.h>
17 #include <linux/export.h>
18 #include <linux/bug.h>
19 #include <linux/kthread.h>
20 #include <linux/stop_machine.h>
21 #include <linux/mutex.h>
22 #include <linux/gfp.h>
23 #include <linux/suspend.h>
24 #include <linux/lockdep.h>
25 #include <linux/tick.h>
26 #include <linux/irq.h>
27 #include <linux/smpboot.h>
28 #include <linux/relay.h>
29 #include <linux/slab.h>
30 #include <linux/percpu-rwsem.h>
32 #include <trace/events/power.h>
33 #define CREATE_TRACE_POINTS
34 #include <trace/events/cpuhp.h>
39 * cpuhp_cpu_state - Per cpu hotplug state storage
40 * @state: The current cpu state
41 * @target: The target state
42 * @thread: Pointer to the hotplug thread
43 * @should_run: Thread should execute
44 * @rollback: Perform a rollback
45 * @single: Single callback invocation
46 * @bringup: Single callback bringup or teardown selector
47 * @cb_state: The state for a single callback (install/uninstall)
48 * @result: Result of the operation
49 * @done: Signal completion to the issuer of the task
51 struct cpuhp_cpu_state {
52 enum cpuhp_state state;
53 enum cpuhp_state target;
55 struct task_struct *thread;
60 struct hlist_node *node;
61 enum cpuhp_state cb_state;
63 struct completion done;
67 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
69 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
70 static struct lock_class_key cpuhp_state_key;
71 static struct lockdep_map cpuhp_state_lock_map =
72 STATIC_LOCKDEP_MAP_INIT("cpuhp_state", &cpuhp_state_key);
76 * cpuhp_step - Hotplug state machine step
77 * @name: Name of the step
78 * @startup: Startup function of the step
79 * @teardown: Teardown function of the step
80 * @skip_onerr: Do not invoke the functions on error rollback
81 * Will go away once the notifiers are gone
82 * @cant_stop: Bringup/teardown can't be stopped at this step
87 int (*single)(unsigned int cpu);
88 int (*multi)(unsigned int cpu,
89 struct hlist_node *node);
92 int (*single)(unsigned int cpu);
93 int (*multi)(unsigned int cpu,
94 struct hlist_node *node);
96 struct hlist_head list;
102 static DEFINE_MUTEX(cpuhp_state_mutex);
103 static struct cpuhp_step cpuhp_bp_states[];
104 static struct cpuhp_step cpuhp_ap_states[];
106 static bool cpuhp_is_ap_state(enum cpuhp_state state)
109 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
110 * purposes as that state is handled explicitly in cpu_down.
112 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
115 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
117 struct cpuhp_step *sp;
119 sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
124 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
125 * @cpu: The cpu for which the callback should be invoked
126 * @step: The step in the state machine
127 * @bringup: True if the bringup callback should be invoked
129 * Called from cpu hotplug and from the state register machinery.
131 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
132 bool bringup, struct hlist_node *node)
134 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
135 struct cpuhp_step *step = cpuhp_get_step(state);
136 int (*cbm)(unsigned int cpu, struct hlist_node *node);
137 int (*cb)(unsigned int cpu);
140 if (!step->multi_instance) {
141 cb = bringup ? step->startup.single : step->teardown.single;
144 trace_cpuhp_enter(cpu, st->target, state, cb);
146 trace_cpuhp_exit(cpu, st->state, state, ret);
149 cbm = bringup ? step->startup.multi : step->teardown.multi;
153 /* Single invocation for instance add/remove */
155 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
156 ret = cbm(cpu, node);
157 trace_cpuhp_exit(cpu, st->state, state, ret);
161 /* State transition. Invoke on all instances */
163 hlist_for_each(node, &step->list) {
164 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
165 ret = cbm(cpu, node);
166 trace_cpuhp_exit(cpu, st->state, state, ret);
173 /* Rollback the instances if one failed */
174 cbm = !bringup ? step->startup.multi : step->teardown.multi;
178 hlist_for_each(node, &step->list) {
187 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
188 static DEFINE_MUTEX(cpu_add_remove_lock);
189 bool cpuhp_tasks_frozen;
190 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
193 * The following two APIs (cpu_maps_update_begin/done) must be used when
194 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
196 void cpu_maps_update_begin(void)
198 mutex_lock(&cpu_add_remove_lock);
201 void cpu_maps_update_done(void)
203 mutex_unlock(&cpu_add_remove_lock);
207 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
208 * Should always be manipulated under cpu_add_remove_lock
210 static int cpu_hotplug_disabled;
212 #ifdef CONFIG_HOTPLUG_CPU
214 DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
216 void cpus_read_lock(void)
218 percpu_down_read(&cpu_hotplug_lock);
220 EXPORT_SYMBOL_GPL(cpus_read_lock);
222 void cpus_read_unlock(void)
224 percpu_up_read(&cpu_hotplug_lock);
226 EXPORT_SYMBOL_GPL(cpus_read_unlock);
228 void cpus_write_lock(void)
230 percpu_down_write(&cpu_hotplug_lock);
233 void cpus_write_unlock(void)
235 percpu_up_write(&cpu_hotplug_lock);
238 void lockdep_assert_cpus_held(void)
240 percpu_rwsem_assert_held(&cpu_hotplug_lock);
244 * Wait for currently running CPU hotplug operations to complete (if any) and
245 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
246 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
247 * hotplug path before performing hotplug operations. So acquiring that lock
248 * guarantees mutual exclusion from any currently running hotplug operations.
250 void cpu_hotplug_disable(void)
252 cpu_maps_update_begin();
253 cpu_hotplug_disabled++;
254 cpu_maps_update_done();
256 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
258 static void __cpu_hotplug_enable(void)
260 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
262 cpu_hotplug_disabled--;
265 void cpu_hotplug_enable(void)
267 cpu_maps_update_begin();
268 __cpu_hotplug_enable();
269 cpu_maps_update_done();
271 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
272 #endif /* CONFIG_HOTPLUG_CPU */
274 static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st);
276 static int bringup_wait_for_ap(unsigned int cpu)
278 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
280 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
281 wait_for_completion(&st->done);
282 BUG_ON(!cpu_online(cpu));
284 /* Unpark the stopper thread and the hotplug thread of the target cpu */
285 stop_machine_unpark(cpu);
286 kthread_unpark(st->thread);
288 /* Should we go further up ? */
289 if (st->target > CPUHP_AP_ONLINE_IDLE) {
290 __cpuhp_kick_ap_work(st);
291 wait_for_completion(&st->done);
296 static int bringup_cpu(unsigned int cpu)
298 struct task_struct *idle = idle_thread_get(cpu);
302 * Some architectures have to walk the irq descriptors to
303 * setup the vector space for the cpu which comes online.
304 * Prevent irq alloc/free across the bringup.
308 /* Arch-specific enabling code. */
309 ret = __cpu_up(cpu, idle);
313 return bringup_wait_for_ap(cpu);
317 * Hotplug state machine related functions
319 static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
321 for (st->state++; st->state < st->target; st->state++) {
322 struct cpuhp_step *step = cpuhp_get_step(st->state);
324 if (!step->skip_onerr)
325 cpuhp_invoke_callback(cpu, st->state, true, NULL);
329 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
330 enum cpuhp_state target)
332 enum cpuhp_state prev_state = st->state;
335 for (; st->state > target; st->state--) {
336 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL);
338 st->target = prev_state;
339 undo_cpu_down(cpu, st);
346 static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
348 for (st->state--; st->state > st->target; st->state--) {
349 struct cpuhp_step *step = cpuhp_get_step(st->state);
351 if (!step->skip_onerr)
352 cpuhp_invoke_callback(cpu, st->state, false, NULL);
356 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
357 enum cpuhp_state target)
359 enum cpuhp_state prev_state = st->state;
362 while (st->state < target) {
364 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL);
366 st->target = prev_state;
367 undo_cpu_up(cpu, st);
375 * The cpu hotplug threads manage the bringup and teardown of the cpus
377 static void cpuhp_create(unsigned int cpu)
379 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
381 init_completion(&st->done);
384 static int cpuhp_should_run(unsigned int cpu)
386 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
388 return st->should_run;
391 /* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
392 static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st)
394 enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU);
396 return cpuhp_down_callbacks(cpu, st, target);
399 /* Execute the online startup callbacks. Used to be CPU_ONLINE */
400 static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st)
402 return cpuhp_up_callbacks(cpu, st, st->target);
406 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
407 * callbacks when a state gets [un]installed at runtime.
409 static void cpuhp_thread_fun(unsigned int cpu)
411 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
415 * Paired with the mb() in cpuhp_kick_ap_work and
416 * cpuhp_invoke_ap_callback, so the work set is consistent visible.
422 st->should_run = false;
424 lock_map_acquire(&cpuhp_state_lock_map);
425 /* Single callback invocation for [un]install ? */
427 if (st->cb_state < CPUHP_AP_ONLINE) {
429 ret = cpuhp_invoke_callback(cpu, st->cb_state,
430 st->bringup, st->node);
433 ret = cpuhp_invoke_callback(cpu, st->cb_state,
434 st->bringup, st->node);
436 } else if (st->rollback) {
437 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
439 undo_cpu_down(cpu, st);
440 st->rollback = false;
442 /* Cannot happen .... */
443 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
445 /* Regular hotplug work */
446 if (st->state < st->target)
447 ret = cpuhp_ap_online(cpu, st);
448 else if (st->state > st->target)
449 ret = cpuhp_ap_offline(cpu, st);
451 lock_map_release(&cpuhp_state_lock_map);
456 /* Invoke a single callback on a remote cpu */
458 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
459 struct hlist_node *node)
461 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
463 if (!cpu_online(cpu))
466 lock_map_acquire(&cpuhp_state_lock_map);
467 lock_map_release(&cpuhp_state_lock_map);
470 * If we are up and running, use the hotplug thread. For early calls
471 * we invoke the thread function directly.
474 return cpuhp_invoke_callback(cpu, state, bringup, node);
476 st->cb_state = state;
478 st->bringup = bringup;
482 * Make sure the above stores are visible before should_run becomes
483 * true. Paired with the mb() above in cpuhp_thread_fun()
486 st->should_run = true;
487 wake_up_process(st->thread);
488 wait_for_completion(&st->done);
492 /* Regular hotplug invocation of the AP hotplug thread */
493 static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st)
498 * Make sure the above stores are visible before should_run becomes
499 * true. Paired with the mb() above in cpuhp_thread_fun()
502 st->should_run = true;
503 wake_up_process(st->thread);
506 static int cpuhp_kick_ap_work(unsigned int cpu)
508 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
509 enum cpuhp_state state = st->state;
511 trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
512 lock_map_acquire(&cpuhp_state_lock_map);
513 lock_map_release(&cpuhp_state_lock_map);
514 __cpuhp_kick_ap_work(st);
515 wait_for_completion(&st->done);
516 trace_cpuhp_exit(cpu, st->state, state, st->result);
520 static struct smp_hotplug_thread cpuhp_threads = {
521 .store = &cpuhp_state.thread,
522 .create = &cpuhp_create,
523 .thread_should_run = cpuhp_should_run,
524 .thread_fn = cpuhp_thread_fun,
525 .thread_comm = "cpuhp/%u",
529 void __init cpuhp_threads_init(void)
531 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
532 kthread_unpark(this_cpu_read(cpuhp_state.thread));
535 #ifdef CONFIG_HOTPLUG_CPU
537 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
540 * This function walks all processes, finds a valid mm struct for each one and
541 * then clears a corresponding bit in mm's cpumask. While this all sounds
542 * trivial, there are various non-obvious corner cases, which this function
543 * tries to solve in a safe manner.
545 * Also note that the function uses a somewhat relaxed locking scheme, so it may
546 * be called only for an already offlined CPU.
548 void clear_tasks_mm_cpumask(int cpu)
550 struct task_struct *p;
553 * This function is called after the cpu is taken down and marked
554 * offline, so its not like new tasks will ever get this cpu set in
555 * their mm mask. -- Peter Zijlstra
556 * Thus, we may use rcu_read_lock() here, instead of grabbing
557 * full-fledged tasklist_lock.
559 WARN_ON(cpu_online(cpu));
561 for_each_process(p) {
562 struct task_struct *t;
565 * Main thread might exit, but other threads may still have
566 * a valid mm. Find one.
568 t = find_lock_task_mm(p);
571 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
577 /* Take this CPU down. */
578 static int take_cpu_down(void *_param)
580 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
581 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
582 int err, cpu = smp_processor_id();
584 /* Ensure this CPU doesn't handle any more interrupts. */
585 err = __cpu_disable();
590 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
591 * do this step again.
593 WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
595 /* Invoke the former CPU_DYING callbacks */
596 for (; st->state > target; st->state--)
597 cpuhp_invoke_callback(cpu, st->state, false, NULL);
599 /* Give up timekeeping duties */
600 tick_handover_do_timer();
601 /* Park the stopper thread */
602 stop_machine_park(cpu);
606 static int takedown_cpu(unsigned int cpu)
608 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
611 /* Park the smpboot threads */
612 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
613 smpboot_park_threads(cpu);
616 * Prevent irq alloc/free while the dying cpu reorganizes the
617 * interrupt affinities.
622 * So now all preempt/rcu users must observe !cpu_active().
624 err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
626 /* CPU refused to die */
628 /* Unpark the hotplug thread so we can rollback there */
629 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
632 BUG_ON(cpu_online(cpu));
635 * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
636 * runnable tasks from the cpu, there's only the idle task left now
637 * that the migration thread is done doing the stop_machine thing.
639 * Wait for the stop thread to go away.
641 wait_for_completion(&st->done);
642 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
644 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
647 hotplug_cpu__broadcast_tick_pull(cpu);
648 /* This actually kills the CPU. */
651 tick_cleanup_dead_cpu(cpu);
655 static void cpuhp_complete_idle_dead(void *arg)
657 struct cpuhp_cpu_state *st = arg;
662 void cpuhp_report_idle_dead(void)
664 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
666 BUG_ON(st->state != CPUHP_AP_OFFLINE);
667 rcu_report_dead(smp_processor_id());
668 st->state = CPUHP_AP_IDLE_DEAD;
670 * We cannot call complete after rcu_report_dead() so we delegate it
673 smp_call_function_single(cpumask_first(cpu_online_mask),
674 cpuhp_complete_idle_dead, st, 0);
678 #define takedown_cpu NULL
681 #ifdef CONFIG_HOTPLUG_CPU
683 /* Requires cpu_add_remove_lock to be held */
684 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
685 enum cpuhp_state target)
687 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
688 int prev_state, ret = 0;
690 if (num_online_cpus() == 1)
693 if (!cpu_present(cpu))
698 cpuhp_tasks_frozen = tasks_frozen;
700 prev_state = st->state;
703 * If the current CPU state is in the range of the AP hotplug thread,
704 * then we need to kick the thread.
706 if (st->state > CPUHP_TEARDOWN_CPU) {
707 ret = cpuhp_kick_ap_work(cpu);
709 * The AP side has done the error rollback already. Just
710 * return the error code..
716 * We might have stopped still in the range of the AP hotplug
717 * thread. Nothing to do anymore.
719 if (st->state > CPUHP_TEARDOWN_CPU)
723 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
724 * to do the further cleanups.
726 ret = cpuhp_down_callbacks(cpu, st, target);
727 if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
728 st->target = prev_state;
730 cpuhp_kick_ap_work(cpu);
738 static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
742 cpu_maps_update_begin();
744 if (cpu_hotplug_disabled) {
749 err = _cpu_down(cpu, 0, target);
752 cpu_maps_update_done();
755 int cpu_down(unsigned int cpu)
757 return do_cpu_down(cpu, CPUHP_OFFLINE);
759 EXPORT_SYMBOL(cpu_down);
760 #endif /*CONFIG_HOTPLUG_CPU*/
763 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
764 * @cpu: cpu that just started
766 * It must be called by the arch code on the new cpu, before the new cpu
767 * enables interrupts and before the "boot" cpu returns from __cpu_up().
769 void notify_cpu_starting(unsigned int cpu)
771 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
772 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
774 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
775 while (st->state < target) {
777 cpuhp_invoke_callback(cpu, st->state, true, NULL);
782 * Called from the idle task. Wake up the controlling task which brings the
783 * stopper and the hotplug thread of the upcoming CPU up and then delegates
784 * the rest of the online bringup to the hotplug thread.
786 void cpuhp_online_idle(enum cpuhp_state state)
788 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
790 /* Happens for the boot cpu */
791 if (state != CPUHP_AP_ONLINE_IDLE)
794 st->state = CPUHP_AP_ONLINE_IDLE;
798 /* Requires cpu_add_remove_lock to be held */
799 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
801 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
802 struct task_struct *idle;
807 if (!cpu_present(cpu)) {
813 * The caller of do_cpu_up might have raced with another
814 * caller. Ignore it for now.
816 if (st->state >= target)
819 if (st->state == CPUHP_OFFLINE) {
820 /* Let it fail before we try to bring the cpu up */
821 idle = idle_thread_get(cpu);
828 cpuhp_tasks_frozen = tasks_frozen;
832 * If the current CPU state is in the range of the AP hotplug thread,
833 * then we need to kick the thread once more.
835 if (st->state > CPUHP_BRINGUP_CPU) {
836 ret = cpuhp_kick_ap_work(cpu);
838 * The AP side has done the error rollback already. Just
839 * return the error code..
846 * Try to reach the target state. We max out on the BP at
847 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
848 * responsible for bringing it up to the target state.
850 target = min((int)target, CPUHP_BRINGUP_CPU);
851 ret = cpuhp_up_callbacks(cpu, st, target);
857 static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
861 if (!cpu_possible(cpu)) {
862 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
864 #if defined(CONFIG_IA64)
865 pr_err("please check additional_cpus= boot parameter\n");
870 err = try_online_node(cpu_to_node(cpu));
874 cpu_maps_update_begin();
876 if (cpu_hotplug_disabled) {
881 err = _cpu_up(cpu, 0, target);
883 cpu_maps_update_done();
887 int cpu_up(unsigned int cpu)
889 return do_cpu_up(cpu, CPUHP_ONLINE);
891 EXPORT_SYMBOL_GPL(cpu_up);
893 #ifdef CONFIG_PM_SLEEP_SMP
894 static cpumask_var_t frozen_cpus;
896 int freeze_secondary_cpus(int primary)
900 cpu_maps_update_begin();
901 if (!cpu_online(primary))
902 primary = cpumask_first(cpu_online_mask);
904 * We take down all of the non-boot CPUs in one shot to avoid races
905 * with the userspace trying to use the CPU hotplug at the same time
907 cpumask_clear(frozen_cpus);
909 pr_info("Disabling non-boot CPUs ...\n");
910 for_each_online_cpu(cpu) {
913 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
914 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
915 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
917 cpumask_set_cpu(cpu, frozen_cpus);
919 pr_err("Error taking CPU%d down: %d\n", cpu, error);
925 BUG_ON(num_online_cpus() > 1);
927 pr_err("Non-boot CPUs are not disabled\n");
930 * Make sure the CPUs won't be enabled by someone else. We need to do
931 * this even in case of failure as all disable_nonboot_cpus() users are
932 * supposed to do enable_nonboot_cpus() on the failure path.
934 cpu_hotplug_disabled++;
936 cpu_maps_update_done();
940 void __weak arch_enable_nonboot_cpus_begin(void)
944 void __weak arch_enable_nonboot_cpus_end(void)
948 void enable_nonboot_cpus(void)
952 /* Allow everyone to use the CPU hotplug again */
953 cpu_maps_update_begin();
954 __cpu_hotplug_enable();
955 if (cpumask_empty(frozen_cpus))
958 pr_info("Enabling non-boot CPUs ...\n");
960 arch_enable_nonboot_cpus_begin();
962 for_each_cpu(cpu, frozen_cpus) {
963 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
964 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
965 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
967 pr_info("CPU%d is up\n", cpu);
970 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
973 arch_enable_nonboot_cpus_end();
975 cpumask_clear(frozen_cpus);
977 cpu_maps_update_done();
980 static int __init alloc_frozen_cpus(void)
982 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
986 core_initcall(alloc_frozen_cpus);
989 * When callbacks for CPU hotplug notifications are being executed, we must
990 * ensure that the state of the system with respect to the tasks being frozen
991 * or not, as reported by the notification, remains unchanged *throughout the
992 * duration* of the execution of the callbacks.
993 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
995 * This synchronization is implemented by mutually excluding regular CPU
996 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
997 * Hibernate notifications.
1000 cpu_hotplug_pm_callback(struct notifier_block *nb,
1001 unsigned long action, void *ptr)
1005 case PM_SUSPEND_PREPARE:
1006 case PM_HIBERNATION_PREPARE:
1007 cpu_hotplug_disable();
1010 case PM_POST_SUSPEND:
1011 case PM_POST_HIBERNATION:
1012 cpu_hotplug_enable();
1023 static int __init cpu_hotplug_pm_sync_init(void)
1026 * cpu_hotplug_pm_callback has higher priority than x86
1027 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1028 * to disable cpu hotplug to avoid cpu hotplug race.
1030 pm_notifier(cpu_hotplug_pm_callback, 0);
1033 core_initcall(cpu_hotplug_pm_sync_init);
1035 #endif /* CONFIG_PM_SLEEP_SMP */
1039 #endif /* CONFIG_SMP */
1041 /* Boot processor state steps */
1042 static struct cpuhp_step cpuhp_bp_states[] = {
1045 .startup.single = NULL,
1046 .teardown.single = NULL,
1049 [CPUHP_CREATE_THREADS]= {
1050 .name = "threads:prepare",
1051 .startup.single = smpboot_create_threads,
1052 .teardown.single = NULL,
1055 [CPUHP_PERF_PREPARE] = {
1056 .name = "perf:prepare",
1057 .startup.single = perf_event_init_cpu,
1058 .teardown.single = perf_event_exit_cpu,
1060 [CPUHP_WORKQUEUE_PREP] = {
1061 .name = "workqueue:prepare",
1062 .startup.single = workqueue_prepare_cpu,
1063 .teardown.single = NULL,
1065 [CPUHP_HRTIMERS_PREPARE] = {
1066 .name = "hrtimers:prepare",
1067 .startup.single = hrtimers_prepare_cpu,
1068 .teardown.single = hrtimers_dead_cpu,
1070 [CPUHP_SMPCFD_PREPARE] = {
1071 .name = "smpcfd:prepare",
1072 .startup.single = smpcfd_prepare_cpu,
1073 .teardown.single = smpcfd_dead_cpu,
1075 [CPUHP_RELAY_PREPARE] = {
1076 .name = "relay:prepare",
1077 .startup.single = relay_prepare_cpu,
1078 .teardown.single = NULL,
1080 [CPUHP_SLAB_PREPARE] = {
1081 .name = "slab:prepare",
1082 .startup.single = slab_prepare_cpu,
1083 .teardown.single = slab_dead_cpu,
1085 [CPUHP_RCUTREE_PREP] = {
1086 .name = "RCU/tree:prepare",
1087 .startup.single = rcutree_prepare_cpu,
1088 .teardown.single = rcutree_dead_cpu,
1091 * On the tear-down path, timers_dead_cpu() must be invoked
1092 * before blk_mq_queue_reinit_notify() from notify_dead(),
1093 * otherwise a RCU stall occurs.
1095 [CPUHP_TIMERS_DEAD] = {
1096 .name = "timers:dead",
1097 .startup.single = NULL,
1098 .teardown.single = timers_dead_cpu,
1100 /* Kicks the plugged cpu into life */
1101 [CPUHP_BRINGUP_CPU] = {
1102 .name = "cpu:bringup",
1103 .startup.single = bringup_cpu,
1104 .teardown.single = NULL,
1107 [CPUHP_AP_SMPCFD_DYING] = {
1108 .name = "smpcfd:dying",
1109 .startup.single = NULL,
1110 .teardown.single = smpcfd_dying_cpu,
1113 * Handled on controll processor until the plugged processor manages
1116 [CPUHP_TEARDOWN_CPU] = {
1117 .name = "cpu:teardown",
1118 .startup.single = NULL,
1119 .teardown.single = takedown_cpu,
1123 [CPUHP_BRINGUP_CPU] = { },
1127 /* Application processor state steps */
1128 static struct cpuhp_step cpuhp_ap_states[] = {
1130 /* Final state before CPU kills itself */
1131 [CPUHP_AP_IDLE_DEAD] = {
1132 .name = "idle:dead",
1135 * Last state before CPU enters the idle loop to die. Transient state
1136 * for synchronization.
1138 [CPUHP_AP_OFFLINE] = {
1139 .name = "ap:offline",
1142 /* First state is scheduler control. Interrupts are disabled */
1143 [CPUHP_AP_SCHED_STARTING] = {
1144 .name = "sched:starting",
1145 .startup.single = sched_cpu_starting,
1146 .teardown.single = sched_cpu_dying,
1148 [CPUHP_AP_RCUTREE_DYING] = {
1149 .name = "RCU/tree:dying",
1150 .startup.single = NULL,
1151 .teardown.single = rcutree_dying_cpu,
1153 /* Entry state on starting. Interrupts enabled from here on. Transient
1154 * state for synchronsization */
1155 [CPUHP_AP_ONLINE] = {
1156 .name = "ap:online",
1158 /* Handle smpboot threads park/unpark */
1159 [CPUHP_AP_SMPBOOT_THREADS] = {
1160 .name = "smpboot/threads:online",
1161 .startup.single = smpboot_unpark_threads,
1162 .teardown.single = NULL,
1164 [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
1165 .name = "irq/affinity:online",
1166 .startup.single = irq_affinity_online_cpu,
1167 .teardown.single = NULL,
1169 [CPUHP_AP_PERF_ONLINE] = {
1170 .name = "perf:online",
1171 .startup.single = perf_event_init_cpu,
1172 .teardown.single = perf_event_exit_cpu,
1174 [CPUHP_AP_WORKQUEUE_ONLINE] = {
1175 .name = "workqueue:online",
1176 .startup.single = workqueue_online_cpu,
1177 .teardown.single = workqueue_offline_cpu,
1179 [CPUHP_AP_RCUTREE_ONLINE] = {
1180 .name = "RCU/tree:online",
1181 .startup.single = rcutree_online_cpu,
1182 .teardown.single = rcutree_offline_cpu,
1186 * The dynamically registered state space is here
1190 /* Last state is scheduler control setting the cpu active */
1191 [CPUHP_AP_ACTIVE] = {
1192 .name = "sched:active",
1193 .startup.single = sched_cpu_activate,
1194 .teardown.single = sched_cpu_deactivate,
1198 /* CPU is fully up and running. */
1201 .startup.single = NULL,
1202 .teardown.single = NULL,
1206 /* Sanity check for callbacks */
1207 static int cpuhp_cb_check(enum cpuhp_state state)
1209 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1215 * Returns a free for dynamic slot assignment of the Online state. The states
1216 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1217 * by having no name assigned.
1219 static int cpuhp_reserve_state(enum cpuhp_state state)
1221 enum cpuhp_state i, end;
1222 struct cpuhp_step *step;
1225 case CPUHP_AP_ONLINE_DYN:
1226 step = cpuhp_ap_states + CPUHP_AP_ONLINE_DYN;
1227 end = CPUHP_AP_ONLINE_DYN_END;
1229 case CPUHP_BP_PREPARE_DYN:
1230 step = cpuhp_bp_states + CPUHP_BP_PREPARE_DYN;
1231 end = CPUHP_BP_PREPARE_DYN_END;
1237 for (i = state; i <= end; i++, step++) {
1241 WARN(1, "No more dynamic states available for CPU hotplug\n");
1245 static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1246 int (*startup)(unsigned int cpu),
1247 int (*teardown)(unsigned int cpu),
1248 bool multi_instance)
1250 /* (Un)Install the callbacks for further cpu hotplug operations */
1251 struct cpuhp_step *sp;
1254 if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) {
1255 ret = cpuhp_reserve_state(state);
1260 sp = cpuhp_get_step(state);
1261 if (name && sp->name)
1264 sp->startup.single = startup;
1265 sp->teardown.single = teardown;
1267 sp->multi_instance = multi_instance;
1268 INIT_HLIST_HEAD(&sp->list);
1272 static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1274 return cpuhp_get_step(state)->teardown.single;
1278 * Call the startup/teardown function for a step either on the AP or
1279 * on the current CPU.
1281 static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1282 struct hlist_node *node)
1284 struct cpuhp_step *sp = cpuhp_get_step(state);
1287 if ((bringup && !sp->startup.single) ||
1288 (!bringup && !sp->teardown.single))
1291 * The non AP bound callbacks can fail on bringup. On teardown
1292 * e.g. module removal we crash for now.
1295 if (cpuhp_is_ap_state(state))
1296 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1298 ret = cpuhp_invoke_callback(cpu, state, bringup, node);
1300 ret = cpuhp_invoke_callback(cpu, state, bringup, node);
1302 BUG_ON(ret && !bringup);
1307 * Called from __cpuhp_setup_state on a recoverable failure.
1309 * Note: The teardown callbacks for rollback are not allowed to fail!
1311 static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1312 struct hlist_node *node)
1316 /* Roll back the already executed steps on the other cpus */
1317 for_each_present_cpu(cpu) {
1318 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1319 int cpustate = st->state;
1321 if (cpu >= failedcpu)
1324 /* Did we invoke the startup call on that cpu ? */
1325 if (cpustate >= state)
1326 cpuhp_issue_call(cpu, state, false, node);
1330 int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
1331 struct hlist_node *node,
1334 struct cpuhp_step *sp;
1338 lockdep_assert_cpus_held();
1340 sp = cpuhp_get_step(state);
1341 if (sp->multi_instance == false)
1344 mutex_lock(&cpuhp_state_mutex);
1346 if (!invoke || !sp->startup.multi)
1350 * Try to call the startup callback for each present cpu
1351 * depending on the hotplug state of the cpu.
1353 for_each_present_cpu(cpu) {
1354 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1355 int cpustate = st->state;
1357 if (cpustate < state)
1360 ret = cpuhp_issue_call(cpu, state, true, node);
1362 if (sp->teardown.multi)
1363 cpuhp_rollback_install(cpu, state, node);
1369 hlist_add_head(node, &sp->list);
1371 mutex_unlock(&cpuhp_state_mutex);
1375 int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1381 ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
1385 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1388 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
1389 * @state: The state to setup
1390 * @invoke: If true, the startup function is invoked for cpus where
1391 * cpu state >= @state
1392 * @startup: startup callback function
1393 * @teardown: teardown callback function
1394 * @multi_instance: State is set up for multiple instances which get
1397 * The caller needs to hold cpus read locked while calling this function.
1400 * Positive state number if @state is CPUHP_AP_ONLINE_DYN
1401 * 0 for all other states
1402 * On failure: proper (negative) error code
1404 int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
1405 const char *name, bool invoke,
1406 int (*startup)(unsigned int cpu),
1407 int (*teardown)(unsigned int cpu),
1408 bool multi_instance)
1413 lockdep_assert_cpus_held();
1415 if (cpuhp_cb_check(state) || !name)
1418 mutex_lock(&cpuhp_state_mutex);
1420 ret = cpuhp_store_callbacks(state, name, startup, teardown,
1423 dynstate = state == CPUHP_AP_ONLINE_DYN;
1424 if (ret > 0 && dynstate) {
1429 if (ret || !invoke || !startup)
1433 * Try to call the startup callback for each present cpu
1434 * depending on the hotplug state of the cpu.
1436 for_each_present_cpu(cpu) {
1437 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1438 int cpustate = st->state;
1440 if (cpustate < state)
1443 ret = cpuhp_issue_call(cpu, state, true, NULL);
1446 cpuhp_rollback_install(cpu, state, NULL);
1447 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1452 mutex_unlock(&cpuhp_state_mutex);
1454 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
1455 * dynamically allocated state in case of success.
1457 if (!ret && dynstate)
1461 EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
1463 int __cpuhp_setup_state(enum cpuhp_state state,
1464 const char *name, bool invoke,
1465 int (*startup)(unsigned int cpu),
1466 int (*teardown)(unsigned int cpu),
1467 bool multi_instance)
1472 ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
1473 teardown, multi_instance);
1477 EXPORT_SYMBOL(__cpuhp_setup_state);
1479 int __cpuhp_state_remove_instance(enum cpuhp_state state,
1480 struct hlist_node *node, bool invoke)
1482 struct cpuhp_step *sp = cpuhp_get_step(state);
1485 BUG_ON(cpuhp_cb_check(state));
1487 if (!sp->multi_instance)
1491 mutex_lock(&cpuhp_state_mutex);
1493 if (!invoke || !cpuhp_get_teardown_cb(state))
1496 * Call the teardown callback for each present cpu depending
1497 * on the hotplug state of the cpu. This function is not
1498 * allowed to fail currently!
1500 for_each_present_cpu(cpu) {
1501 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1502 int cpustate = st->state;
1504 if (cpustate >= state)
1505 cpuhp_issue_call(cpu, state, false, node);
1510 mutex_unlock(&cpuhp_state_mutex);
1515 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
1518 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
1519 * @state: The state to remove
1520 * @invoke: If true, the teardown function is invoked for cpus where
1521 * cpu state >= @state
1523 * The caller needs to hold cpus read locked while calling this function.
1524 * The teardown callback is currently not allowed to fail. Think
1525 * about module removal!
1527 void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
1529 struct cpuhp_step *sp = cpuhp_get_step(state);
1532 BUG_ON(cpuhp_cb_check(state));
1534 lockdep_assert_cpus_held();
1536 mutex_lock(&cpuhp_state_mutex);
1537 if (sp->multi_instance) {
1538 WARN(!hlist_empty(&sp->list),
1539 "Error: Removing state %d which has instances left.\n",
1544 if (!invoke || !cpuhp_get_teardown_cb(state))
1548 * Call the teardown callback for each present cpu depending
1549 * on the hotplug state of the cpu. This function is not
1550 * allowed to fail currently!
1552 for_each_present_cpu(cpu) {
1553 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1554 int cpustate = st->state;
1556 if (cpustate >= state)
1557 cpuhp_issue_call(cpu, state, false, NULL);
1560 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1561 mutex_unlock(&cpuhp_state_mutex);
1563 EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
1565 void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1568 __cpuhp_remove_state_cpuslocked(state, invoke);
1571 EXPORT_SYMBOL(__cpuhp_remove_state);
1573 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1574 static ssize_t show_cpuhp_state(struct device *dev,
1575 struct device_attribute *attr, char *buf)
1577 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1579 return sprintf(buf, "%d\n", st->state);
1581 static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
1583 static ssize_t write_cpuhp_target(struct device *dev,
1584 struct device_attribute *attr,
1585 const char *buf, size_t count)
1587 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1588 struct cpuhp_step *sp;
1591 ret = kstrtoint(buf, 10, &target);
1595 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1596 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1599 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1603 ret = lock_device_hotplug_sysfs();
1607 mutex_lock(&cpuhp_state_mutex);
1608 sp = cpuhp_get_step(target);
1609 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1610 mutex_unlock(&cpuhp_state_mutex);
1614 if (st->state < target)
1615 ret = do_cpu_up(dev->id, target);
1617 ret = do_cpu_down(dev->id, target);
1619 unlock_device_hotplug();
1620 return ret ? ret : count;
1623 static ssize_t show_cpuhp_target(struct device *dev,
1624 struct device_attribute *attr, char *buf)
1626 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1628 return sprintf(buf, "%d\n", st->target);
1630 static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
1632 static struct attribute *cpuhp_cpu_attrs[] = {
1633 &dev_attr_state.attr,
1634 &dev_attr_target.attr,
1638 static const struct attribute_group cpuhp_cpu_attr_group = {
1639 .attrs = cpuhp_cpu_attrs,
1644 static ssize_t show_cpuhp_states(struct device *dev,
1645 struct device_attribute *attr, char *buf)
1647 ssize_t cur, res = 0;
1650 mutex_lock(&cpuhp_state_mutex);
1651 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
1652 struct cpuhp_step *sp = cpuhp_get_step(i);
1655 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
1660 mutex_unlock(&cpuhp_state_mutex);
1663 static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
1665 static struct attribute *cpuhp_cpu_root_attrs[] = {
1666 &dev_attr_states.attr,
1670 static const struct attribute_group cpuhp_cpu_root_attr_group = {
1671 .attrs = cpuhp_cpu_root_attrs,
1676 static int __init cpuhp_sysfs_init(void)
1680 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
1681 &cpuhp_cpu_root_attr_group);
1685 for_each_possible_cpu(cpu) {
1686 struct device *dev = get_cpu_device(cpu);
1690 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
1696 device_initcall(cpuhp_sysfs_init);
1700 * cpu_bit_bitmap[] is a special, "compressed" data structure that
1701 * represents all NR_CPUS bits binary values of 1<<nr.
1703 * It is used by cpumask_of() to get a constant address to a CPU
1704 * mask value that has a single bit set only.
1707 /* cpu_bit_bitmap[0] is empty - so we can back into it */
1708 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
1709 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
1710 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
1711 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
1713 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
1715 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
1716 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
1717 #if BITS_PER_LONG > 32
1718 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
1719 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
1722 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
1724 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
1725 EXPORT_SYMBOL(cpu_all_bits);
1727 #ifdef CONFIG_INIT_ALL_POSSIBLE
1728 struct cpumask __cpu_possible_mask __read_mostly
1731 struct cpumask __cpu_possible_mask __read_mostly;
1733 EXPORT_SYMBOL(__cpu_possible_mask);
1735 struct cpumask __cpu_online_mask __read_mostly;
1736 EXPORT_SYMBOL(__cpu_online_mask);
1738 struct cpumask __cpu_present_mask __read_mostly;
1739 EXPORT_SYMBOL(__cpu_present_mask);
1741 struct cpumask __cpu_active_mask __read_mostly;
1742 EXPORT_SYMBOL(__cpu_active_mask);
1744 void init_cpu_present(const struct cpumask *src)
1746 cpumask_copy(&__cpu_present_mask, src);
1749 void init_cpu_possible(const struct cpumask *src)
1751 cpumask_copy(&__cpu_possible_mask, src);
1754 void init_cpu_online(const struct cpumask *src)
1756 cpumask_copy(&__cpu_online_mask, src);
1760 * Activate the first processor.
1762 void __init boot_cpu_init(void)
1764 int cpu = smp_processor_id();
1766 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
1767 set_cpu_online(cpu, true);
1768 set_cpu_active(cpu, true);
1769 set_cpu_present(cpu, true);
1770 set_cpu_possible(cpu, true);
1773 __boot_cpu_id = cpu;
1778 * Must be called _AFTER_ setting up the per_cpu areas
1780 void __init boot_cpu_state_init(void)
1782 per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;