]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/cpufreq/cpufreq_governor.c
PM / hibernate: Define pr_fmt() and use pr_*() instead of printk()
[karo-tx-linux.git] / drivers / cpufreq / cpufreq_governor.c
1 /*
2  * drivers/cpufreq/cpufreq_governor.c
3  *
4  * CPUFREQ governors common code
5  *
6  * Copyright    (C) 2001 Russell King
7  *              (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
8  *              (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
9  *              (C) 2009 Alexander Clouter <alex@digriz.org.uk>
10  *              (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/export.h>
20 #include <linux/kernel_stat.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23
24 #include "cpufreq_governor.h"
25
26 static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
27
28 static DEFINE_MUTEX(gov_dbs_data_mutex);
29
30 /* Common sysfs tunables */
31 /**
32  * store_sampling_rate - update sampling rate effective immediately if needed.
33  *
34  * If new rate is smaller than the old, simply updating
35  * dbs.sampling_rate might not be appropriate. For example, if the
36  * original sampling_rate was 1 second and the requested new sampling rate is 10
37  * ms because the user needs immediate reaction from ondemand governor, but not
38  * sure if higher frequency will be required or not, then, the governor may
39  * change the sampling rate too late; up to 1 second later. Thus, if we are
40  * reducing the sampling rate, we need to make the new value effective
41  * immediately.
42  *
43  * This must be called with dbs_data->mutex held, otherwise traversing
44  * policy_dbs_list isn't safe.
45  */
46 ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
47                             size_t count)
48 {
49         struct dbs_data *dbs_data = to_dbs_data(attr_set);
50         struct policy_dbs_info *policy_dbs;
51         unsigned int rate;
52         int ret;
53         ret = sscanf(buf, "%u", &rate);
54         if (ret != 1)
55                 return -EINVAL;
56
57         dbs_data->sampling_rate = max(rate, dbs_data->min_sampling_rate);
58
59         /*
60          * We are operating under dbs_data->mutex and so the list and its
61          * entries can't be freed concurrently.
62          */
63         list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
64                 mutex_lock(&policy_dbs->update_mutex);
65                 /*
66                  * On 32-bit architectures this may race with the
67                  * sample_delay_ns read in dbs_update_util_handler(), but that
68                  * really doesn't matter.  If the read returns a value that's
69                  * too big, the sample will be skipped, but the next invocation
70                  * of dbs_update_util_handler() (when the update has been
71                  * completed) will take a sample.
72                  *
73                  * If this runs in parallel with dbs_work_handler(), we may end
74                  * up overwriting the sample_delay_ns value that it has just
75                  * written, but it will be corrected next time a sample is
76                  * taken, so it shouldn't be significant.
77                  */
78                 gov_update_sample_delay(policy_dbs, 0);
79                 mutex_unlock(&policy_dbs->update_mutex);
80         }
81
82         return count;
83 }
84 EXPORT_SYMBOL_GPL(store_sampling_rate);
85
86 /**
87  * gov_update_cpu_data - Update CPU load data.
88  * @dbs_data: Top-level governor data pointer.
89  *
90  * Update CPU load data for all CPUs in the domain governed by @dbs_data
91  * (that may be a single policy or a bunch of them if governor tunables are
92  * system-wide).
93  *
94  * Call under the @dbs_data mutex.
95  */
96 void gov_update_cpu_data(struct dbs_data *dbs_data)
97 {
98         struct policy_dbs_info *policy_dbs;
99
100         list_for_each_entry(policy_dbs, &dbs_data->attr_set.policy_list, list) {
101                 unsigned int j;
102
103                 for_each_cpu(j, policy_dbs->policy->cpus) {
104                         struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
105
106                         j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time,
107                                                                   dbs_data->io_is_busy);
108                         if (dbs_data->ignore_nice_load)
109                                 j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
110                 }
111         }
112 }
113 EXPORT_SYMBOL_GPL(gov_update_cpu_data);
114
115 unsigned int dbs_update(struct cpufreq_policy *policy)
116 {
117         struct policy_dbs_info *policy_dbs = policy->governor_data;
118         struct dbs_data *dbs_data = policy_dbs->dbs_data;
119         unsigned int ignore_nice = dbs_data->ignore_nice_load;
120         unsigned int max_load = 0, idle_periods = UINT_MAX;
121         unsigned int sampling_rate, io_busy, j;
122
123         /*
124          * Sometimes governors may use an additional multiplier to increase
125          * sample delays temporarily.  Apply that multiplier to sampling_rate
126          * so as to keep the wake-up-from-idle detection logic a bit
127          * conservative.
128          */
129         sampling_rate = dbs_data->sampling_rate * policy_dbs->rate_mult;
130         /*
131          * For the purpose of ondemand, waiting for disk IO is an indication
132          * that you're performance critical, and not that the system is actually
133          * idle, so do not add the iowait time to the CPU idle time then.
134          */
135         io_busy = dbs_data->io_is_busy;
136
137         /* Get Absolute Load */
138         for_each_cpu(j, policy->cpus) {
139                 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
140                 u64 update_time, cur_idle_time;
141                 unsigned int idle_time, time_elapsed;
142                 unsigned int load;
143
144                 cur_idle_time = get_cpu_idle_time(j, &update_time, io_busy);
145
146                 time_elapsed = update_time - j_cdbs->prev_update_time;
147                 j_cdbs->prev_update_time = update_time;
148
149                 idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
150                 j_cdbs->prev_cpu_idle = cur_idle_time;
151
152                 if (ignore_nice) {
153                         u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
154
155                         idle_time += cputime_to_usecs(cur_nice - j_cdbs->prev_cpu_nice);
156                         j_cdbs->prev_cpu_nice = cur_nice;
157                 }
158
159                 if (unlikely(!time_elapsed)) {
160                         /*
161                          * That can only happen when this function is called
162                          * twice in a row with a very short interval between the
163                          * calls, so the previous load value can be used then.
164                          */
165                         load = j_cdbs->prev_load;
166                 } else if (unlikely(time_elapsed > 2 * sampling_rate &&
167                                     j_cdbs->prev_load)) {
168                         /*
169                          * If the CPU had gone completely idle and a task has
170                          * just woken up on this CPU now, it would be unfair to
171                          * calculate 'load' the usual way for this elapsed
172                          * time-window, because it would show near-zero load,
173                          * irrespective of how CPU intensive that task actually
174                          * was. This is undesirable for latency-sensitive bursty
175                          * workloads.
176                          *
177                          * To avoid this, reuse the 'load' from the previous
178                          * time-window and give this task a chance to start with
179                          * a reasonably high CPU frequency. However, that
180                          * shouldn't be over-done, lest we get stuck at a high
181                          * load (high frequency) for too long, even when the
182                          * current system load has actually dropped down, so
183                          * clear prev_load to guarantee that the load will be
184                          * computed again next time.
185                          *
186                          * Detecting this situation is easy: the governor's
187                          * utilization update handler would not have run during
188                          * CPU-idle periods.  Hence, an unusually large
189                          * 'time_elapsed' (as compared to the sampling rate)
190                          * indicates this scenario.
191                          */
192                         load = j_cdbs->prev_load;
193                         j_cdbs->prev_load = 0;
194                 } else {
195                         if (time_elapsed >= idle_time) {
196                                 load = 100 * (time_elapsed - idle_time) / time_elapsed;
197                         } else {
198                                 /*
199                                  * That can happen if idle_time is returned by
200                                  * get_cpu_idle_time_jiffy().  In that case
201                                  * idle_time is roughly equal to the difference
202                                  * between time_elapsed and "busy time" obtained
203                                  * from CPU statistics.  Then, the "busy time"
204                                  * can end up being greater than time_elapsed
205                                  * (for example, if jiffies_64 and the CPU
206                                  * statistics are updated by different CPUs),
207                                  * so idle_time may in fact be negative.  That
208                                  * means, though, that the CPU was busy all
209                                  * the time (on the rough average) during the
210                                  * last sampling interval and 100 can be
211                                  * returned as the load.
212                                  */
213                                 load = (int)idle_time < 0 ? 100 : 0;
214                         }
215                         j_cdbs->prev_load = load;
216                 }
217
218                 if (time_elapsed > 2 * sampling_rate) {
219                         unsigned int periods = time_elapsed / sampling_rate;
220
221                         if (periods < idle_periods)
222                                 idle_periods = periods;
223                 }
224
225                 if (load > max_load)
226                         max_load = load;
227         }
228
229         policy_dbs->idle_periods = idle_periods;
230
231         return max_load;
232 }
233 EXPORT_SYMBOL_GPL(dbs_update);
234
235 static void dbs_work_handler(struct work_struct *work)
236 {
237         struct policy_dbs_info *policy_dbs;
238         struct cpufreq_policy *policy;
239         struct dbs_governor *gov;
240
241         policy_dbs = container_of(work, struct policy_dbs_info, work);
242         policy = policy_dbs->policy;
243         gov = dbs_governor_of(policy);
244
245         /*
246          * Make sure cpufreq_governor_limits() isn't evaluating load or the
247          * ondemand governor isn't updating the sampling rate in parallel.
248          */
249         mutex_lock(&policy_dbs->update_mutex);
250         gov_update_sample_delay(policy_dbs, gov->gov_dbs_update(policy));
251         mutex_unlock(&policy_dbs->update_mutex);
252
253         /* Allow the utilization update handler to queue up more work. */
254         atomic_set(&policy_dbs->work_count, 0);
255         /*
256          * If the update below is reordered with respect to the sample delay
257          * modification, the utilization update handler may end up using a stale
258          * sample delay value.
259          */
260         smp_wmb();
261         policy_dbs->work_in_progress = false;
262 }
263
264 static void dbs_irq_work(struct irq_work *irq_work)
265 {
266         struct policy_dbs_info *policy_dbs;
267
268         policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work);
269         schedule_work_on(smp_processor_id(), &policy_dbs->work);
270 }
271
272 static void dbs_update_util_handler(struct update_util_data *data, u64 time,
273                                     unsigned int flags)
274 {
275         struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
276         struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
277         u64 delta_ns, lst;
278
279         /*
280          * The work may not be allowed to be queued up right now.
281          * Possible reasons:
282          * - Work has already been queued up or is in progress.
283          * - It is too early (too little time from the previous sample).
284          */
285         if (policy_dbs->work_in_progress)
286                 return;
287
288         /*
289          * If the reads below are reordered before the check above, the value
290          * of sample_delay_ns used in the computation may be stale.
291          */
292         smp_rmb();
293         lst = READ_ONCE(policy_dbs->last_sample_time);
294         delta_ns = time - lst;
295         if ((s64)delta_ns < policy_dbs->sample_delay_ns)
296                 return;
297
298         /*
299          * If the policy is not shared, the irq_work may be queued up right away
300          * at this point.  Otherwise, we need to ensure that only one of the
301          * CPUs sharing the policy will do that.
302          */
303         if (policy_dbs->is_shared) {
304                 if (!atomic_add_unless(&policy_dbs->work_count, 1, 1))
305                         return;
306
307                 /*
308                  * If another CPU updated last_sample_time in the meantime, we
309                  * shouldn't be here, so clear the work counter and bail out.
310                  */
311                 if (unlikely(lst != READ_ONCE(policy_dbs->last_sample_time))) {
312                         atomic_set(&policy_dbs->work_count, 0);
313                         return;
314                 }
315         }
316
317         policy_dbs->last_sample_time = time;
318         policy_dbs->work_in_progress = true;
319         irq_work_queue(&policy_dbs->irq_work);
320 }
321
322 static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
323                                 unsigned int delay_us)
324 {
325         struct cpufreq_policy *policy = policy_dbs->policy;
326         int cpu;
327
328         gov_update_sample_delay(policy_dbs, delay_us);
329         policy_dbs->last_sample_time = 0;
330
331         for_each_cpu(cpu, policy->cpus) {
332                 struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
333
334                 cpufreq_add_update_util_hook(cpu, &cdbs->update_util,
335                                              dbs_update_util_handler);
336         }
337 }
338
339 static inline void gov_clear_update_util(struct cpufreq_policy *policy)
340 {
341         int i;
342
343         for_each_cpu(i, policy->cpus)
344                 cpufreq_remove_update_util_hook(i);
345
346         synchronize_sched();
347 }
348
349 static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
350                                                      struct dbs_governor *gov)
351 {
352         struct policy_dbs_info *policy_dbs;
353         int j;
354
355         /* Allocate memory for per-policy governor data. */
356         policy_dbs = gov->alloc();
357         if (!policy_dbs)
358                 return NULL;
359
360         policy_dbs->policy = policy;
361         mutex_init(&policy_dbs->update_mutex);
362         atomic_set(&policy_dbs->work_count, 0);
363         init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
364         INIT_WORK(&policy_dbs->work, dbs_work_handler);
365
366         /* Set policy_dbs for all CPUs, online+offline */
367         for_each_cpu(j, policy->related_cpus) {
368                 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
369
370                 j_cdbs->policy_dbs = policy_dbs;
371         }
372         return policy_dbs;
373 }
374
375 static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
376                                  struct dbs_governor *gov)
377 {
378         int j;
379
380         mutex_destroy(&policy_dbs->update_mutex);
381
382         for_each_cpu(j, policy_dbs->policy->related_cpus) {
383                 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
384
385                 j_cdbs->policy_dbs = NULL;
386                 j_cdbs->update_util.func = NULL;
387         }
388         gov->free(policy_dbs);
389 }
390
391 int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
392 {
393         struct dbs_governor *gov = dbs_governor_of(policy);
394         struct dbs_data *dbs_data;
395         struct policy_dbs_info *policy_dbs;
396         unsigned int latency;
397         int ret = 0;
398
399         /* State should be equivalent to EXIT */
400         if (policy->governor_data)
401                 return -EBUSY;
402
403         policy_dbs = alloc_policy_dbs_info(policy, gov);
404         if (!policy_dbs)
405                 return -ENOMEM;
406
407         /* Protect gov->gdbs_data against concurrent updates. */
408         mutex_lock(&gov_dbs_data_mutex);
409
410         dbs_data = gov->gdbs_data;
411         if (dbs_data) {
412                 if (WARN_ON(have_governor_per_policy())) {
413                         ret = -EINVAL;
414                         goto free_policy_dbs_info;
415                 }
416                 policy_dbs->dbs_data = dbs_data;
417                 policy->governor_data = policy_dbs;
418
419                 gov_attr_set_get(&dbs_data->attr_set, &policy_dbs->list);
420                 goto out;
421         }
422
423         dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
424         if (!dbs_data) {
425                 ret = -ENOMEM;
426                 goto free_policy_dbs_info;
427         }
428
429         gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list);
430
431         ret = gov->init(dbs_data);
432         if (ret)
433                 goto free_policy_dbs_info;
434
435         /* policy latency is in ns. Convert it to us first */
436         latency = policy->cpuinfo.transition_latency / 1000;
437         if (latency == 0)
438                 latency = 1;
439
440         /* Bring kernel and HW constraints together */
441         dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
442                                           MIN_LATENCY_MULTIPLIER * latency);
443         dbs_data->sampling_rate = max(dbs_data->min_sampling_rate,
444                                       LATENCY_MULTIPLIER * latency);
445
446         if (!have_governor_per_policy())
447                 gov->gdbs_data = dbs_data;
448
449         policy_dbs->dbs_data = dbs_data;
450         policy->governor_data = policy_dbs;
451
452         gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
453         ret = kobject_init_and_add(&dbs_data->attr_set.kobj, &gov->kobj_type,
454                                    get_governor_parent_kobj(policy),
455                                    "%s", gov->gov.name);
456         if (!ret)
457                 goto out;
458
459         /* Failure, so roll back. */
460         pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
461
462         policy->governor_data = NULL;
463
464         if (!have_governor_per_policy())
465                 gov->gdbs_data = NULL;
466         gov->exit(dbs_data);
467         kfree(dbs_data);
468
469 free_policy_dbs_info:
470         free_policy_dbs_info(policy_dbs, gov);
471
472 out:
473         mutex_unlock(&gov_dbs_data_mutex);
474         return ret;
475 }
476 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_init);
477
478 void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy)
479 {
480         struct dbs_governor *gov = dbs_governor_of(policy);
481         struct policy_dbs_info *policy_dbs = policy->governor_data;
482         struct dbs_data *dbs_data = policy_dbs->dbs_data;
483         unsigned int count;
484
485         /* Protect gov->gdbs_data against concurrent updates. */
486         mutex_lock(&gov_dbs_data_mutex);
487
488         count = gov_attr_set_put(&dbs_data->attr_set, &policy_dbs->list);
489
490         policy->governor_data = NULL;
491
492         if (!count) {
493                 if (!have_governor_per_policy())
494                         gov->gdbs_data = NULL;
495
496                 gov->exit(dbs_data);
497                 kfree(dbs_data);
498         }
499
500         free_policy_dbs_info(policy_dbs, gov);
501
502         mutex_unlock(&gov_dbs_data_mutex);
503 }
504 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_exit);
505
506 int cpufreq_dbs_governor_start(struct cpufreq_policy *policy)
507 {
508         struct dbs_governor *gov = dbs_governor_of(policy);
509         struct policy_dbs_info *policy_dbs = policy->governor_data;
510         struct dbs_data *dbs_data = policy_dbs->dbs_data;
511         unsigned int sampling_rate, ignore_nice, j;
512         unsigned int io_busy;
513
514         if (!policy->cur)
515                 return -EINVAL;
516
517         policy_dbs->is_shared = policy_is_shared(policy);
518         policy_dbs->rate_mult = 1;
519
520         sampling_rate = dbs_data->sampling_rate;
521         ignore_nice = dbs_data->ignore_nice_load;
522         io_busy = dbs_data->io_is_busy;
523
524         for_each_cpu(j, policy->cpus) {
525                 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
526
527                 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time, io_busy);
528                 /*
529                  * Make the first invocation of dbs_update() compute the load.
530                  */
531                 j_cdbs->prev_load = 0;
532
533                 if (ignore_nice)
534                         j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
535         }
536
537         gov->start(policy);
538
539         gov_set_update_util(policy_dbs, sampling_rate);
540         return 0;
541 }
542 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_start);
543
544 void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy)
545 {
546         struct policy_dbs_info *policy_dbs = policy->governor_data;
547
548         gov_clear_update_util(policy_dbs->policy);
549         irq_work_sync(&policy_dbs->irq_work);
550         cancel_work_sync(&policy_dbs->work);
551         atomic_set(&policy_dbs->work_count, 0);
552         policy_dbs->work_in_progress = false;
553 }
554 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop);
555
556 void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy)
557 {
558         struct policy_dbs_info *policy_dbs = policy->governor_data;
559
560         mutex_lock(&policy_dbs->update_mutex);
561         cpufreq_policy_apply_limits(policy);
562         gov_update_sample_delay(policy_dbs, 0);
563
564         mutex_unlock(&policy_dbs->update_mutex);
565 }
566 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits);