2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/export.h>
14 #include <linux/kthread.h>
15 #include <linux/stop_machine.h>
16 #include <linux/mutex.h>
17 #include <linux/gfp.h>
18 #include <linux/suspend.h>
21 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
22 static DEFINE_MUTEX(cpu_add_remove_lock);
25 * The following two API's must be used when attempting
26 * to serialize the updates to cpu_online_mask, cpu_present_mask.
28 void cpu_maps_update_begin(void)
30 mutex_lock(&cpu_add_remove_lock);
33 void cpu_maps_update_done(void)
35 mutex_unlock(&cpu_add_remove_lock);
38 static RAW_NOTIFIER_HEAD(cpu_chain);
40 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
41 * Should always be manipulated under cpu_add_remove_lock
43 static int cpu_hotplug_disabled;
45 #ifdef CONFIG_HOTPLUG_CPU
48 struct task_struct *active_writer;
49 struct mutex lock; /* Synchronizes accesses to refcount, */
51 * Also blocks the new readers during
52 * an ongoing cpu hotplug operation.
56 .active_writer = NULL,
57 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
61 void get_online_cpus(void)
64 if (cpu_hotplug.active_writer == current)
66 mutex_lock(&cpu_hotplug.lock);
67 cpu_hotplug.refcount++;
68 mutex_unlock(&cpu_hotplug.lock);
71 EXPORT_SYMBOL_GPL(get_online_cpus);
73 void put_online_cpus(void)
75 if (cpu_hotplug.active_writer == current)
77 mutex_lock(&cpu_hotplug.lock);
78 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
79 wake_up_process(cpu_hotplug.active_writer);
80 mutex_unlock(&cpu_hotplug.lock);
83 EXPORT_SYMBOL_GPL(put_online_cpus);
86 * This ensures that the hotplug operation can begin only when the
87 * refcount goes to zero.
89 * Note that during a cpu-hotplug operation, the new readers, if any,
90 * will be blocked by the cpu_hotplug.lock
92 * Since cpu_hotplug_begin() is always called after invoking
93 * cpu_maps_update_begin(), we can be sure that only one writer is active.
95 * Note that theoretically, there is a possibility of a livelock:
96 * - Refcount goes to zero, last reader wakes up the sleeping
98 * - Last reader unlocks the cpu_hotplug.lock.
99 * - A new reader arrives at this moment, bumps up the refcount.
100 * - The writer acquires the cpu_hotplug.lock finds the refcount
101 * non zero and goes to sleep again.
103 * However, this is very difficult to achieve in practice since
104 * get_online_cpus() not an api which is called all that often.
107 static void cpu_hotplug_begin(void)
109 cpu_hotplug.active_writer = current;
112 mutex_lock(&cpu_hotplug.lock);
113 if (likely(!cpu_hotplug.refcount))
115 __set_current_state(TASK_UNINTERRUPTIBLE);
116 mutex_unlock(&cpu_hotplug.lock);
121 static void cpu_hotplug_done(void)
123 cpu_hotplug.active_writer = NULL;
124 mutex_unlock(&cpu_hotplug.lock);
127 #else /* #if CONFIG_HOTPLUG_CPU */
128 static void cpu_hotplug_begin(void) {}
129 static void cpu_hotplug_done(void) {}
130 #endif /* #else #if CONFIG_HOTPLUG_CPU */
132 /* Need to know about CPUs going up/down? */
133 int __ref register_cpu_notifier(struct notifier_block *nb)
136 cpu_maps_update_begin();
137 ret = raw_notifier_chain_register(&cpu_chain, nb);
138 cpu_maps_update_done();
142 static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
147 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
150 return notifier_to_errno(ret);
153 static int cpu_notify(unsigned long val, void *v)
155 return __cpu_notify(val, v, -1, NULL);
158 #ifdef CONFIG_HOTPLUG_CPU
160 static void cpu_notify_nofail(unsigned long val, void *v)
162 BUG_ON(cpu_notify(val, v));
164 EXPORT_SYMBOL(register_cpu_notifier);
166 void __ref unregister_cpu_notifier(struct notifier_block *nb)
168 cpu_maps_update_begin();
169 raw_notifier_chain_unregister(&cpu_chain, nb);
170 cpu_maps_update_done();
172 EXPORT_SYMBOL(unregister_cpu_notifier);
174 static inline void check_for_tasks(int cpu)
176 struct task_struct *p;
178 write_lock_irq(&tasklist_lock);
179 for_each_process(p) {
180 if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
181 (!cputime_eq(p->utime, cputime_zero) ||
182 !cputime_eq(p->stime, cputime_zero)))
183 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
184 "(state = %ld, flags = %x)\n",
185 p->comm, task_pid_nr(p), cpu,
188 write_unlock_irq(&tasklist_lock);
191 struct take_cpu_down_param {
196 /* Take this CPU down. */
197 static int __ref take_cpu_down(void *_param)
199 struct take_cpu_down_param *param = _param;
202 /* Ensure this CPU doesn't handle any more interrupts. */
203 err = __cpu_disable();
207 cpu_notify(CPU_DYING | param->mod, param->hcpu);
211 /* Requires cpu_add_remove_lock to be held */
212 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
214 int err, nr_calls = 0;
215 void *hcpu = (void *)(long)cpu;
216 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
217 struct take_cpu_down_param tcd_param = {
222 if (num_online_cpus() == 1)
225 if (!cpu_online(cpu))
230 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
233 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
234 printk("%s: attempt to take down CPU %u failed\n",
239 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
241 /* CPU didn't die: tell everyone. Can't complain. */
242 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
246 BUG_ON(cpu_online(cpu));
249 * The migration_call() CPU_DYING callback will have removed all
250 * runnable tasks from the cpu, there's only the idle task left now
251 * that the migration thread is done doing the stop_machine thing.
253 * Wait for the stop thread to go away.
255 while (!idle_cpu(cpu))
258 /* This actually kills the CPU. */
261 /* CPU is completely dead: tell everyone. Too late to complain. */
262 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
264 check_for_tasks(cpu);
269 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
273 int __ref cpu_down(unsigned int cpu)
277 cpu_maps_update_begin();
279 if (cpu_hotplug_disabled) {
284 err = _cpu_down(cpu, 0);
287 cpu_maps_update_done();
290 EXPORT_SYMBOL(cpu_down);
291 #endif /*CONFIG_HOTPLUG_CPU*/
293 /* Requires cpu_add_remove_lock to be held */
294 static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
296 int ret, nr_calls = 0;
297 void *hcpu = (void *)(long)cpu;
298 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
300 if (cpu_online(cpu) || !cpu_present(cpu))
304 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
307 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
312 /* Arch-specific enabling code. */
316 BUG_ON(!cpu_online(cpu));
318 /* Now call notifier in preparation. */
319 cpu_notify(CPU_ONLINE | mod, hcpu);
323 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
329 int __cpuinit cpu_up(unsigned int cpu)
333 #ifdef CONFIG_MEMORY_HOTPLUG
338 if (!cpu_possible(cpu)) {
339 printk(KERN_ERR "can't online cpu %d because it is not "
340 "configured as may-hotadd at boot time\n", cpu);
341 #if defined(CONFIG_IA64)
342 printk(KERN_ERR "please check additional_cpus= boot "
348 #ifdef CONFIG_MEMORY_HOTPLUG
349 nid = cpu_to_node(cpu);
350 if (!node_online(nid)) {
351 err = mem_online_node(nid);
356 pgdat = NODE_DATA(nid);
359 "Can't online cpu %d due to NULL pgdat\n", cpu);
363 if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
364 mutex_lock(&zonelists_mutex);
365 build_all_zonelists(NULL);
366 mutex_unlock(&zonelists_mutex);
370 cpu_maps_update_begin();
372 if (cpu_hotplug_disabled) {
377 err = _cpu_up(cpu, 0);
380 cpu_maps_update_done();
383 EXPORT_SYMBOL_GPL(cpu_up);
385 #ifdef CONFIG_PM_SLEEP_SMP
386 static cpumask_var_t frozen_cpus;
388 void __weak arch_disable_nonboot_cpus_begin(void)
392 void __weak arch_disable_nonboot_cpus_end(void)
396 int disable_nonboot_cpus(void)
398 int cpu, first_cpu, error = 0;
400 cpu_maps_update_begin();
401 first_cpu = cpumask_first(cpu_online_mask);
403 * We take down all of the non-boot CPUs in one shot to avoid races
404 * with the userspace trying to use the CPU hotplug at the same time
406 cpumask_clear(frozen_cpus);
407 arch_disable_nonboot_cpus_begin();
409 printk("Disabling non-boot CPUs ...\n");
410 for_each_online_cpu(cpu) {
411 if (cpu == first_cpu)
413 error = _cpu_down(cpu, 1);
415 cpumask_set_cpu(cpu, frozen_cpus);
417 printk(KERN_ERR "Error taking CPU%d down: %d\n",
423 arch_disable_nonboot_cpus_end();
426 BUG_ON(num_online_cpus() > 1);
427 /* Make sure the CPUs won't be enabled by someone else */
428 cpu_hotplug_disabled = 1;
430 printk(KERN_ERR "Non-boot CPUs are not disabled\n");
432 cpu_maps_update_done();
436 void __weak arch_enable_nonboot_cpus_begin(void)
440 void __weak arch_enable_nonboot_cpus_end(void)
444 void __ref enable_nonboot_cpus(void)
448 /* Allow everyone to use the CPU hotplug again */
449 cpu_maps_update_begin();
450 cpu_hotplug_disabled = 0;
451 if (cpumask_empty(frozen_cpus))
454 printk(KERN_INFO "Enabling non-boot CPUs ...\n");
456 arch_enable_nonboot_cpus_begin();
458 for_each_cpu(cpu, frozen_cpus) {
459 error = _cpu_up(cpu, 1);
461 printk(KERN_INFO "CPU%d is up\n", cpu);
464 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
467 arch_enable_nonboot_cpus_end();
469 cpumask_clear(frozen_cpus);
471 cpu_maps_update_done();
474 static int alloc_frozen_cpus(void)
476 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
480 core_initcall(alloc_frozen_cpus);
483 * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU
484 * hotplug when tasks are about to be frozen. Also, don't allow the freezer
485 * to continue until any currently running CPU hotplug operation gets
487 * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the
488 * 'cpu_add_remove_lock'. And this same lock is also taken by the regular
489 * CPU hotplug path and released only after it is complete. Thus, we
490 * (and hence the freezer) will block here until any currently running CPU
491 * hotplug operation gets completed.
493 void cpu_hotplug_disable_before_freeze(void)
495 cpu_maps_update_begin();
496 cpu_hotplug_disabled = 1;
497 cpu_maps_update_done();
502 * When tasks have been thawed, re-enable regular CPU hotplug (which had been
503 * disabled while beginning to freeze tasks).
505 void cpu_hotplug_enable_after_thaw(void)
507 cpu_maps_update_begin();
508 cpu_hotplug_disabled = 0;
509 cpu_maps_update_done();
513 * When callbacks for CPU hotplug notifications are being executed, we must
514 * ensure that the state of the system with respect to the tasks being frozen
515 * or not, as reported by the notification, remains unchanged *throughout the
516 * duration* of the execution of the callbacks.
517 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
519 * This synchronization is implemented by mutually excluding regular CPU
520 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
521 * Hibernate notifications.
524 cpu_hotplug_pm_callback(struct notifier_block *nb,
525 unsigned long action, void *ptr)
529 case PM_SUSPEND_PREPARE:
530 case PM_HIBERNATION_PREPARE:
531 cpu_hotplug_disable_before_freeze();
534 case PM_POST_SUSPEND:
535 case PM_POST_HIBERNATION:
536 cpu_hotplug_enable_after_thaw();
547 int cpu_hotplug_pm_sync_init(void)
549 pm_notifier(cpu_hotplug_pm_callback, 0);
552 core_initcall(cpu_hotplug_pm_sync_init);
554 #endif /* CONFIG_PM_SLEEP_SMP */
557 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
558 * @cpu: cpu that just started
560 * This function calls the cpu_chain notifiers with CPU_STARTING.
561 * It must be called by the arch code on the new cpu, before the new cpu
562 * enables interrupts and before the "boot" cpu returns from __cpu_up().
564 void __cpuinit notify_cpu_starting(unsigned int cpu)
566 unsigned long val = CPU_STARTING;
568 #ifdef CONFIG_PM_SLEEP_SMP
569 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
570 val = CPU_STARTING_FROZEN;
571 #endif /* CONFIG_PM_SLEEP_SMP */
572 cpu_notify(val, (void *)(long)cpu);
575 #endif /* CONFIG_SMP */
578 * cpu_bit_bitmap[] is a special, "compressed" data structure that
579 * represents all NR_CPUS bits binary values of 1<<nr.
581 * It is used by cpumask_of() to get a constant address to a CPU
582 * mask value that has a single bit set only.
585 /* cpu_bit_bitmap[0] is empty - so we can back into it */
586 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
587 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
588 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
589 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
591 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
593 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
594 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
595 #if BITS_PER_LONG > 32
596 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
597 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
600 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
602 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
603 EXPORT_SYMBOL(cpu_all_bits);
605 #ifdef CONFIG_INIT_ALL_POSSIBLE
606 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
609 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
611 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
612 EXPORT_SYMBOL(cpu_possible_mask);
614 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
615 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
616 EXPORT_SYMBOL(cpu_online_mask);
618 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
619 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
620 EXPORT_SYMBOL(cpu_present_mask);
622 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
623 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
624 EXPORT_SYMBOL(cpu_active_mask);
626 void set_cpu_possible(unsigned int cpu, bool possible)
629 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
631 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
634 void set_cpu_present(unsigned int cpu, bool present)
637 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
639 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
642 void set_cpu_online(unsigned int cpu, bool online)
645 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
647 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
650 void set_cpu_active(unsigned int cpu, bool active)
653 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
655 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
658 void init_cpu_present(const struct cpumask *src)
660 cpumask_copy(to_cpumask(cpu_present_bits), src);
663 void init_cpu_possible(const struct cpumask *src)
665 cpumask_copy(to_cpumask(cpu_possible_bits), src);
668 void init_cpu_online(const struct cpumask *src)
670 cpumask_copy(to_cpumask(cpu_online_bits), src);