]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/powerpc/kernel/idle.c
Merge remote branch 'wireless-next/master' into ath6kl-next
[karo-tx-linux.git] / arch / powerpc / kernel / idle.c
1 /*
2  * Idle daemon for PowerPC.  Idle daemon will handle any action
3  * that needs to be taken when the system becomes idle.
4  *
5  * Originally written by Cort Dougan (cort@cs.nmt.edu).
6  * Subsequent 32-bit hacking by Tom Rini, Armin Kuster,
7  * Paul Mackerras and others.
8  *
9  * iSeries supported added by Mike Corrigan <mikejc@us.ibm.com>
10  *
11  * Additional shared processor, SMT, and firmware support
12  *    Copyright (c) 2003 Dave Engebretsen <engebret@us.ibm.com>
13  *
14  * 32-bit and 64-bit versions merged by Paul Mackerras <paulus@samba.org>
15  *
16  * This program is free software; you can redistribute it and/or
17  * modify it under the terms of the GNU General Public License
18  * as published by the Free Software Foundation; either version
19  * 2 of the License, or (at your option) any later version.
20  */
21
22 #include <linux/sched.h>
23 #include <linux/kernel.h>
24 #include <linux/smp.h>
25 #include <linux/cpu.h>
26 #include <linux/sysctl.h>
27 #include <linux/tick.h>
28
29 #include <asm/system.h>
30 #include <asm/processor.h>
31 #include <asm/cputable.h>
32 #include <asm/time.h>
33 #include <asm/machdep.h>
34 #include <asm/smp.h>
35
36 #ifdef CONFIG_HOTPLUG_CPU
37 #define cpu_should_die()        cpu_is_offline(smp_processor_id())
38 #else
39 #define cpu_should_die()        0
40 #endif
41
42 unsigned long cpuidle_disable = IDLE_NO_OVERRIDE;
43 EXPORT_SYMBOL(cpuidle_disable);
44
45 static int __init powersave_off(char *arg)
46 {
47         ppc_md.power_save = NULL;
48         cpuidle_disable = IDLE_POWERSAVE_OFF;
49         return 0;
50 }
51 __setup("powersave=off", powersave_off);
52
53 /*
54  * The body of the idle task.
55  */
56 void cpu_idle(void)
57 {
58         if (ppc_md.idle_loop)
59                 ppc_md.idle_loop();     /* doesn't return */
60
61         set_thread_flag(TIF_POLLING_NRFLAG);
62         while (1) {
63                 tick_nohz_idle_enter();
64                 rcu_idle_enter();
65
66                 while (!need_resched() && !cpu_should_die()) {
67                         ppc64_runlatch_off();
68
69                         if (ppc_md.power_save) {
70                                 clear_thread_flag(TIF_POLLING_NRFLAG);
71                                 /*
72                                  * smp_mb is so clearing of TIF_POLLING_NRFLAG
73                                  * is ordered w.r.t. need_resched() test.
74                                  */
75                                 smp_mb();
76                                 local_irq_disable();
77
78                                 /* Don't trace irqs off for idle */
79                                 stop_critical_timings();
80
81                                 /* check again after disabling irqs */
82                                 if (!need_resched() && !cpu_should_die())
83                                         ppc_md.power_save();
84
85                                 start_critical_timings();
86
87                                 local_irq_enable();
88                                 set_thread_flag(TIF_POLLING_NRFLAG);
89
90                         } else {
91                                 /*
92                                  * Go into low thread priority and possibly
93                                  * low power mode.
94                                  */
95                                 HMT_low();
96                                 HMT_very_low();
97                         }
98                 }
99
100                 HMT_medium();
101                 ppc64_runlatch_on();
102                 rcu_idle_exit();
103                 tick_nohz_idle_exit();
104                 preempt_enable_no_resched();
105                 if (cpu_should_die())
106                         cpu_die();
107                 schedule();
108                 preempt_disable();
109         }
110 }
111
112
113 /*
114  * cpu_idle_wait - Used to ensure that all the CPUs come out of the old
115  * idle loop and start using the new idle loop.
116  * Required while changing idle handler on SMP systems.
117  * Caller must have changed idle handler to the new value before the call.
118  * This window may be larger on shared systems.
119  */
120 void cpu_idle_wait(void)
121 {
122         int cpu;
123         smp_mb();
124
125         /* kick all the CPUs so that they exit out of old idle routine */
126         get_online_cpus();
127         for_each_online_cpu(cpu) {
128                 if (cpu != smp_processor_id())
129                         smp_send_reschedule(cpu);
130         }
131         put_online_cpus();
132 }
133 EXPORT_SYMBOL_GPL(cpu_idle_wait);
134
135 int powersave_nap;
136
137 #ifdef CONFIG_SYSCTL
138 /*
139  * Register the sysctl to set/clear powersave_nap.
140  */
141 static ctl_table powersave_nap_ctl_table[]={
142         {
143                 .procname       = "powersave-nap",
144                 .data           = &powersave_nap,
145                 .maxlen         = sizeof(int),
146                 .mode           = 0644,
147                 .proc_handler   = proc_dointvec,
148         },
149         {}
150 };
151 static ctl_table powersave_nap_sysctl_root[] = {
152         {
153                 .procname       = "kernel",
154                 .mode           = 0555,
155                 .child          = powersave_nap_ctl_table,
156         },
157         {}
158 };
159
160 static int __init
161 register_powersave_nap_sysctl(void)
162 {
163         register_sysctl_table(powersave_nap_sysctl_root);
164
165         return 0;
166 }
167 __initcall(register_powersave_nap_sysctl);
168 #endif