2 * intel_pstate.c: Native P state management for Intel processors
4 * (C) Copyright 2012 Intel Corporation
5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
13 #include <linux/kernel.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/module.h>
16 #include <linux/ktime.h>
17 #include <linux/hrtimer.h>
18 #include <linux/tick.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 #include <linux/list.h>
22 #include <linux/cpu.h>
23 #include <linux/cpufreq.h>
24 #include <linux/sysfs.h>
25 #include <linux/types.h>
27 #include <linux/debugfs.h>
28 #include <linux/acpi.h>
29 #include <linux/vmalloc.h>
30 #include <trace/events/power.h>
32 #include <asm/div64.h>
34 #include <asm/cpu_device_id.h>
35 #include <asm/cpufeature.h>
37 #define BYT_RATIOS 0x66a
38 #define BYT_VIDS 0x66b
39 #define BYT_TURBO_RATIOS 0x66c
40 #define BYT_TURBO_VIDS 0x66d
43 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
44 #define fp_toint(X) ((X) >> FRAC_BITS)
47 static inline int32_t mul_fp(int32_t x, int32_t y)
49 return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
52 static inline int32_t div_fp(s64 x, s64 y)
54 return div64_s64((int64_t)x << FRAC_BITS, y);
57 static inline int ceiling_fp(int32_t x)
62 mask = (1 << FRAC_BITS) - 1;
69 int32_t core_pct_busy;
105 struct timer_list timer;
107 struct pstate_data pstate;
111 ktime_t last_sample_time;
115 struct sample sample;
118 static struct cpudata **all_cpu_data;
119 struct pstate_adjust_policy {
128 struct pstate_funcs {
129 int (*get_max)(void);
130 int (*get_min)(void);
131 int (*get_turbo)(void);
132 int (*get_scaling)(void);
133 void (*set)(struct cpudata*, int pstate);
134 void (*get_vid)(struct cpudata *);
137 struct cpu_defaults {
138 struct pstate_adjust_policy pid_policy;
139 struct pstate_funcs funcs;
142 static struct pstate_adjust_policy pid_params;
143 static struct pstate_funcs pstate_funcs;
144 static int hwp_active;
159 static struct perf_limits limits = {
163 .max_perf = int_tofp(1),
166 .max_policy_pct = 100,
167 .max_sysfs_pct = 100,
172 static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
173 int deadband, int integral) {
174 pid->setpoint = setpoint;
175 pid->deadband = deadband;
176 pid->integral = int_tofp(integral);
177 pid->last_err = int_tofp(setpoint) - int_tofp(busy);
180 static inline void pid_p_gain_set(struct _pid *pid, int percent)
182 pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
185 static inline void pid_i_gain_set(struct _pid *pid, int percent)
187 pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
190 static inline void pid_d_gain_set(struct _pid *pid, int percent)
192 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
195 static signed int pid_calc(struct _pid *pid, int32_t busy)
198 int32_t pterm, dterm, fp_error;
199 int32_t integral_limit;
201 fp_error = int_tofp(pid->setpoint) - busy;
203 if (abs(fp_error) <= int_tofp(pid->deadband))
206 pterm = mul_fp(pid->p_gain, fp_error);
208 pid->integral += fp_error;
211 * We limit the integral here so that it will never
212 * get higher than 30. This prevents it from becoming
213 * too large an input over long periods of time and allows
214 * it to get factored out sooner.
216 * The value of 30 was chosen through experimentation.
218 integral_limit = int_tofp(30);
219 if (pid->integral > integral_limit)
220 pid->integral = integral_limit;
221 if (pid->integral < -integral_limit)
222 pid->integral = -integral_limit;
224 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
225 pid->last_err = fp_error;
227 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
228 result = result + (1 << (FRAC_BITS-1));
229 return (signed int)fp_toint(result);
232 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
234 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct);
235 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
236 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
238 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0);
241 static inline void intel_pstate_reset_all_pid(void)
245 for_each_online_cpu(cpu) {
246 if (all_cpu_data[cpu])
247 intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
251 static inline void update_turbo_state(void)
256 cpu = all_cpu_data[0];
257 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
258 limits.turbo_disabled =
259 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
260 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
263 #define PCT_TO_HWP(x) (x * 255 / 100)
264 static void intel_pstate_hwp_set(void)
271 for_each_online_cpu(cpu) {
272 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
273 min = PCT_TO_HWP(limits.min_perf_pct);
274 value &= ~HWP_MIN_PERF(~0L);
275 value |= HWP_MIN_PERF(min);
277 max = PCT_TO_HWP(limits.max_perf_pct);
278 if (limits.no_turbo) {
279 rdmsrl( MSR_HWP_CAPABILITIES, freq);
280 max = HWP_GUARANTEED_PERF(freq);
283 value &= ~HWP_MAX_PERF(~0L);
284 value |= HWP_MAX_PERF(max);
285 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
291 /************************** debugfs begin ************************/
292 static int pid_param_set(void *data, u64 val)
295 intel_pstate_reset_all_pid();
299 static int pid_param_get(void *data, u64 *val)
304 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n");
311 static struct pid_param pid_files[] = {
312 {"sample_rate_ms", &pid_params.sample_rate_ms},
313 {"d_gain_pct", &pid_params.d_gain_pct},
314 {"i_gain_pct", &pid_params.i_gain_pct},
315 {"deadband", &pid_params.deadband},
316 {"setpoint", &pid_params.setpoint},
317 {"p_gain_pct", &pid_params.p_gain_pct},
321 static void __init intel_pstate_debug_expose_params(void)
323 struct dentry *debugfs_parent;
328 debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
329 if (IS_ERR_OR_NULL(debugfs_parent))
331 while (pid_files[i].name) {
332 debugfs_create_file(pid_files[i].name, 0660,
333 debugfs_parent, pid_files[i].value,
339 /************************** debugfs end ************************/
341 /************************** sysfs begin ************************/
342 #define show_one(file_name, object) \
343 static ssize_t show_##file_name \
344 (struct kobject *kobj, struct attribute *attr, char *buf) \
346 return sprintf(buf, "%u\n", limits.object); \
349 static ssize_t show_turbo_pct(struct kobject *kobj,
350 struct attribute *attr, char *buf)
353 int total, no_turbo, turbo_pct;
356 cpu = all_cpu_data[0];
358 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
359 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
360 turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total));
361 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
362 return sprintf(buf, "%u\n", turbo_pct);
365 static ssize_t show_num_pstates(struct kobject *kobj,
366 struct attribute *attr, char *buf)
371 cpu = all_cpu_data[0];
372 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
373 return sprintf(buf, "%u\n", total);
376 static ssize_t show_no_turbo(struct kobject *kobj,
377 struct attribute *attr, char *buf)
381 update_turbo_state();
382 if (limits.turbo_disabled)
383 ret = sprintf(buf, "%u\n", limits.turbo_disabled);
385 ret = sprintf(buf, "%u\n", limits.no_turbo);
390 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
391 const char *buf, size_t count)
396 ret = sscanf(buf, "%u", &input);
400 update_turbo_state();
401 if (limits.turbo_disabled) {
402 pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n");
406 limits.no_turbo = clamp_t(int, input, 0, 1);
409 intel_pstate_hwp_set();
414 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
415 const char *buf, size_t count)
420 ret = sscanf(buf, "%u", &input);
424 limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
425 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
426 limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct);
427 limits.max_perf_pct = max(limits.min_perf_pct, limits.max_perf_pct);
428 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
431 intel_pstate_hwp_set();
435 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
436 const char *buf, size_t count)
441 ret = sscanf(buf, "%u", &input);
445 limits.min_sysfs_pct = clamp_t(int, input, 0 , 100);
446 limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
447 limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct);
448 limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct);
449 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
452 intel_pstate_hwp_set();
456 show_one(max_perf_pct, max_perf_pct);
457 show_one(min_perf_pct, min_perf_pct);
459 define_one_global_rw(no_turbo);
460 define_one_global_rw(max_perf_pct);
461 define_one_global_rw(min_perf_pct);
462 define_one_global_ro(turbo_pct);
463 define_one_global_ro(num_pstates);
465 static struct attribute *intel_pstate_attributes[] = {
474 static struct attribute_group intel_pstate_attr_group = {
475 .attrs = intel_pstate_attributes,
478 static void __init intel_pstate_sysfs_expose_params(void)
480 struct kobject *intel_pstate_kobject;
483 intel_pstate_kobject = kobject_create_and_add("intel_pstate",
484 &cpu_subsys.dev_root->kobj);
485 BUG_ON(!intel_pstate_kobject);
486 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
489 /************************** sysfs end ************************/
491 static void intel_pstate_hwp_enable(struct cpudata *cpudata)
493 pr_info("intel_pstate: HWP enabled\n");
495 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
498 static int byt_get_min_pstate(void)
502 rdmsrl(BYT_RATIOS, value);
503 return (value >> 8) & 0x7F;
506 static int byt_get_max_pstate(void)
510 rdmsrl(BYT_RATIOS, value);
511 return (value >> 16) & 0x7F;
514 static int byt_get_turbo_pstate(void)
518 rdmsrl(BYT_TURBO_RATIOS, value);
522 static void byt_set_pstate(struct cpudata *cpudata, int pstate)
528 val = (u64)pstate << 8;
529 if (limits.no_turbo && !limits.turbo_disabled)
532 vid_fp = cpudata->vid.min + mul_fp(
533 int_tofp(pstate - cpudata->pstate.min_pstate),
536 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
537 vid = ceiling_fp(vid_fp);
539 if (pstate > cpudata->pstate.max_pstate)
540 vid = cpudata->vid.turbo;
544 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
547 #define BYT_BCLK_FREQS 5
548 static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800};
550 static int byt_get_scaling(void)
555 rdmsrl(MSR_FSB_FREQ, value);
558 BUG_ON(i > BYT_BCLK_FREQS);
560 return byt_freq_table[i] * 100;
563 static void byt_get_vid(struct cpudata *cpudata)
567 rdmsrl(BYT_VIDS, value);
568 cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
569 cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
570 cpudata->vid.ratio = div_fp(
571 cpudata->vid.max - cpudata->vid.min,
572 int_tofp(cpudata->pstate.max_pstate -
573 cpudata->pstate.min_pstate));
575 rdmsrl(BYT_TURBO_VIDS, value);
576 cpudata->vid.turbo = value & 0x7f;
579 static int core_get_min_pstate(void)
583 rdmsrl(MSR_PLATFORM_INFO, value);
584 return (value >> 40) & 0xFF;
587 static int core_get_max_pstate(void)
591 rdmsrl(MSR_PLATFORM_INFO, value);
592 return (value >> 8) & 0xFF;
595 static int core_get_turbo_pstate(void)
600 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
601 nont = core_get_max_pstate();
608 static inline int core_get_scaling(void)
613 static void core_set_pstate(struct cpudata *cpudata, int pstate)
617 val = (u64)pstate << 8;
618 if (limits.no_turbo && !limits.turbo_disabled)
621 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
624 static int knl_get_turbo_pstate(void)
629 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
630 nont = core_get_max_pstate();
631 ret = (((value) >> 8) & 0xFF);
637 static struct cpu_defaults core_params = {
639 .sample_rate_ms = 10,
647 .get_max = core_get_max_pstate,
648 .get_min = core_get_min_pstate,
649 .get_turbo = core_get_turbo_pstate,
650 .get_scaling = core_get_scaling,
651 .set = core_set_pstate,
655 static struct cpu_defaults byt_params = {
657 .sample_rate_ms = 10,
665 .get_max = byt_get_max_pstate,
666 .get_min = byt_get_min_pstate,
667 .get_turbo = byt_get_turbo_pstate,
668 .set = byt_set_pstate,
669 .get_scaling = byt_get_scaling,
670 .get_vid = byt_get_vid,
674 static struct cpu_defaults knl_params = {
676 .sample_rate_ms = 10,
684 .get_max = core_get_max_pstate,
685 .get_min = core_get_min_pstate,
686 .get_turbo = knl_get_turbo_pstate,
687 .get_scaling = core_get_scaling,
688 .set = core_set_pstate,
692 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
694 int max_perf = cpu->pstate.turbo_pstate;
698 if (limits.no_turbo || limits.turbo_disabled)
699 max_perf = cpu->pstate.max_pstate;
702 * performance can be limited by user through sysfs, by cpufreq
703 * policy, or by cpu specific default values determined through
706 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
707 *max = clamp_t(int, max_perf_adj,
708 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
710 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
711 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
714 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force)
716 int max_perf, min_perf;
719 update_turbo_state();
721 intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
723 pstate = clamp_t(int, pstate, min_perf, max_perf);
725 if (pstate == cpu->pstate.current_pstate)
728 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
730 cpu->pstate.current_pstate = pstate;
732 pstate_funcs.set(cpu, pstate);
735 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
737 cpu->pstate.min_pstate = pstate_funcs.get_min();
738 cpu->pstate.max_pstate = pstate_funcs.get_max();
739 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
740 cpu->pstate.scaling = pstate_funcs.get_scaling();
742 if (pstate_funcs.get_vid)
743 pstate_funcs.get_vid(cpu);
744 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
747 static inline void intel_pstate_calc_busy(struct cpudata *cpu)
749 struct sample *sample = &cpu->sample;
752 core_pct = int_tofp(sample->aperf) * int_tofp(100);
753 core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
755 sample->freq = fp_toint(
757 cpu->pstate.max_pstate * cpu->pstate.scaling / 100),
760 sample->core_pct_busy = (int32_t)core_pct;
763 static inline void intel_pstate_sample(struct cpudata *cpu)
769 local_irq_save(flags);
770 rdmsrl(MSR_IA32_APERF, aperf);
771 rdmsrl(MSR_IA32_MPERF, mperf);
772 tsc = native_read_tsc();
773 local_irq_restore(flags);
775 cpu->last_sample_time = cpu->sample.time;
776 cpu->sample.time = ktime_get();
777 cpu->sample.aperf = aperf;
778 cpu->sample.mperf = mperf;
779 cpu->sample.tsc = tsc;
780 cpu->sample.aperf -= cpu->prev_aperf;
781 cpu->sample.mperf -= cpu->prev_mperf;
782 cpu->sample.tsc -= cpu->prev_tsc;
784 intel_pstate_calc_busy(cpu);
786 cpu->prev_aperf = aperf;
787 cpu->prev_mperf = mperf;
791 static inline void intel_hwp_set_sample_time(struct cpudata *cpu)
795 delay = msecs_to_jiffies(50);
796 mod_timer_pinned(&cpu->timer, jiffies + delay);
799 static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
803 delay = msecs_to_jiffies(pid_params.sample_rate_ms);
804 mod_timer_pinned(&cpu->timer, jiffies + delay);
807 static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
809 int32_t core_busy, max_pstate, current_pstate, sample_ratio;
814 * core_busy is the ratio of actual performance to max
815 * max_pstate is the max non turbo pstate available
816 * current_pstate was the pstate that was requested during
817 * the last sample period.
819 * We normalize core_busy, which was our actual percent
820 * performance to what we requested during the last sample
821 * period. The result will be a percentage of busy at a
824 core_busy = cpu->sample.core_pct_busy;
825 max_pstate = int_tofp(cpu->pstate.max_pstate);
826 current_pstate = int_tofp(cpu->pstate.current_pstate);
827 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
830 * Since we have a deferred timer, it will not fire unless
831 * we are in C0. So, determine if the actual elapsed time
832 * is significantly greater (3x) than our sample interval. If it
833 * is, then we were idle for a long enough period of time
834 * to adjust our busyness.
836 sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC;
837 duration_us = ktime_us_delta(cpu->sample.time,
838 cpu->last_sample_time);
839 if (duration_us > sample_time * 3) {
840 sample_ratio = div_fp(int_tofp(sample_time),
841 int_tofp(duration_us));
842 core_busy = mul_fp(core_busy, sample_ratio);
848 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
854 struct sample *sample;
856 from = cpu->pstate.current_pstate;
859 busy_scaled = intel_pstate_get_scaled_busy(cpu);
861 ctl = pid_calc(pid, busy_scaled);
863 /* Negative values of ctl increase the pstate and vice versa */
864 intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl, true);
866 sample = &cpu->sample;
867 trace_pstate_sample(fp_toint(sample->core_pct_busy),
868 fp_toint(busy_scaled),
870 cpu->pstate.current_pstate,
877 static void intel_hwp_timer_func(unsigned long __data)
879 struct cpudata *cpu = (struct cpudata *) __data;
881 intel_pstate_sample(cpu);
882 intel_hwp_set_sample_time(cpu);
885 static void intel_pstate_timer_func(unsigned long __data)
887 struct cpudata *cpu = (struct cpudata *) __data;
889 intel_pstate_sample(cpu);
891 intel_pstate_adjust_busy_pstate(cpu);
893 intel_pstate_set_sample_time(cpu);
896 #define ICPU(model, policy) \
897 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
898 (unsigned long)&policy }
900 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
901 ICPU(0x2a, core_params),
902 ICPU(0x2d, core_params),
903 ICPU(0x37, byt_params),
904 ICPU(0x3a, core_params),
905 ICPU(0x3c, core_params),
906 ICPU(0x3d, core_params),
907 ICPU(0x3e, core_params),
908 ICPU(0x3f, core_params),
909 ICPU(0x45, core_params),
910 ICPU(0x46, core_params),
911 ICPU(0x47, core_params),
912 ICPU(0x4c, byt_params),
913 ICPU(0x4e, core_params),
914 ICPU(0x4f, core_params),
915 ICPU(0x5e, core_params),
916 ICPU(0x56, core_params),
917 ICPU(0x57, knl_params),
920 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
922 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = {
923 ICPU(0x56, core_params),
927 static int intel_pstate_init_cpu(unsigned int cpunum)
931 if (!all_cpu_data[cpunum])
932 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata),
934 if (!all_cpu_data[cpunum])
937 cpu = all_cpu_data[cpunum];
942 intel_pstate_hwp_enable(cpu);
944 intel_pstate_get_cpu_pstates(cpu);
946 init_timer_deferrable(&cpu->timer);
947 cpu->timer.data = (unsigned long)cpu;
948 cpu->timer.expires = jiffies + HZ/100;
951 cpu->timer.function = intel_pstate_timer_func;
953 cpu->timer.function = intel_hwp_timer_func;
955 intel_pstate_busy_pid_reset(cpu);
956 intel_pstate_sample(cpu);
958 add_timer_on(&cpu->timer, cpunum);
960 pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
965 static unsigned int intel_pstate_get(unsigned int cpu_num)
967 struct sample *sample;
970 cpu = all_cpu_data[cpu_num];
973 sample = &cpu->sample;
977 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
979 if (!policy->cpuinfo.max_freq)
982 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE &&
983 policy->max >= policy->cpuinfo.max_freq) {
984 limits.min_policy_pct = 100;
985 limits.min_perf_pct = 100;
986 limits.min_perf = int_tofp(1);
987 limits.max_policy_pct = 100;
988 limits.max_perf_pct = 100;
989 limits.max_perf = int_tofp(1);
994 limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
995 limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100);
996 limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq;
997 limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
999 /* Normalize user input to [min_policy_pct, max_policy_pct] */
1000 limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
1001 limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct);
1002 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
1003 limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct);
1005 /* Make sure min_perf_pct <= max_perf_pct */
1006 limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct);
1008 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
1009 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
1012 intel_pstate_hwp_set();
1017 static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
1019 cpufreq_verify_within_cpu_limits(policy);
1021 if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
1022 policy->policy != CPUFREQ_POLICY_PERFORMANCE)
1028 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
1030 int cpu_num = policy->cpu;
1031 struct cpudata *cpu = all_cpu_data[cpu_num];
1033 pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
1035 del_timer_sync(&all_cpu_data[cpu_num]->timer);
1039 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
1042 static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
1044 struct cpudata *cpu;
1047 rc = intel_pstate_init_cpu(policy->cpu);
1051 cpu = all_cpu_data[policy->cpu];
1053 if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
1054 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
1056 policy->policy = CPUFREQ_POLICY_POWERSAVE;
1058 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
1059 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1061 /* cpuinfo and default policy values */
1062 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
1063 policy->cpuinfo.max_freq =
1064 cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1065 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
1066 cpumask_set_cpu(policy->cpu, policy->cpus);
1071 static struct cpufreq_driver intel_pstate_driver = {
1072 .flags = CPUFREQ_CONST_LOOPS,
1073 .verify = intel_pstate_verify_policy,
1074 .setpolicy = intel_pstate_set_policy,
1075 .get = intel_pstate_get,
1076 .init = intel_pstate_cpu_init,
1077 .stop_cpu = intel_pstate_stop_cpu,
1078 .name = "intel_pstate",
1081 static int __initdata no_load;
1082 static int __initdata no_hwp;
1083 static int __initdata hwp_only;
1084 static unsigned int force_load;
1086 static int intel_pstate_msrs_not_valid(void)
1088 if (!pstate_funcs.get_max() ||
1089 !pstate_funcs.get_min() ||
1090 !pstate_funcs.get_turbo())
1096 static void copy_pid_params(struct pstate_adjust_policy *policy)
1098 pid_params.sample_rate_ms = policy->sample_rate_ms;
1099 pid_params.p_gain_pct = policy->p_gain_pct;
1100 pid_params.i_gain_pct = policy->i_gain_pct;
1101 pid_params.d_gain_pct = policy->d_gain_pct;
1102 pid_params.deadband = policy->deadband;
1103 pid_params.setpoint = policy->setpoint;
1106 static void copy_cpu_funcs(struct pstate_funcs *funcs)
1108 pstate_funcs.get_max = funcs->get_max;
1109 pstate_funcs.get_min = funcs->get_min;
1110 pstate_funcs.get_turbo = funcs->get_turbo;
1111 pstate_funcs.get_scaling = funcs->get_scaling;
1112 pstate_funcs.set = funcs->set;
1113 pstate_funcs.get_vid = funcs->get_vid;
1116 #if IS_ENABLED(CONFIG_ACPI)
1117 #include <acpi/processor.h>
1119 static bool intel_pstate_no_acpi_pss(void)
1123 for_each_possible_cpu(i) {
1125 union acpi_object *pss;
1126 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1127 struct acpi_processor *pr = per_cpu(processors, i);
1132 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
1133 if (ACPI_FAILURE(status))
1136 pss = buffer.pointer;
1137 if (pss && pss->type == ACPI_TYPE_PACKAGE) {
1148 static bool intel_pstate_has_acpi_ppc(void)
1152 for_each_possible_cpu(i) {
1153 struct acpi_processor *pr = per_cpu(processors, i);
1157 if (acpi_has_method(pr->handle, "_PPC"))
1168 struct hw_vendor_info {
1170 char oem_id[ACPI_OEM_ID_SIZE];
1171 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
1175 /* Hardware vendor-specific info that has its own power management modes */
1176 static struct hw_vendor_info vendor_info[] = {
1177 {1, "HP ", "ProLiant", PSS},
1178 {1, "ORACLE", "X4-2 ", PPC},
1179 {1, "ORACLE", "X4-2L ", PPC},
1180 {1, "ORACLE", "X4-2B ", PPC},
1181 {1, "ORACLE", "X3-2 ", PPC},
1182 {1, "ORACLE", "X3-2L ", PPC},
1183 {1, "ORACLE", "X3-2B ", PPC},
1184 {1, "ORACLE", "X4470M2 ", PPC},
1185 {1, "ORACLE", "X4270M3 ", PPC},
1186 {1, "ORACLE", "X4270M2 ", PPC},
1187 {1, "ORACLE", "X4170M2 ", PPC},
1188 {1, "ORACLE", "X4170 M3", PPC},
1189 {1, "ORACLE", "X4275 M3", PPC},
1190 {1, "ORACLE", "X6-2 ", PPC},
1191 {1, "ORACLE", "Sudbury ", PPC},
1195 static bool intel_pstate_platform_pwr_mgmt_exists(void)
1197 struct acpi_table_header hdr;
1198 struct hw_vendor_info *v_info;
1199 const struct x86_cpu_id *id;
1202 id = x86_match_cpu(intel_pstate_cpu_oob_ids);
1204 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
1205 if ( misc_pwr & (1 << 8))
1209 if (acpi_disabled ||
1210 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
1213 for (v_info = vendor_info; v_info->valid; v_info++) {
1214 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) &&
1215 !strncmp(hdr.oem_table_id, v_info->oem_table_id,
1216 ACPI_OEM_TABLE_ID_SIZE))
1217 switch (v_info->oem_pwr_table) {
1219 return intel_pstate_no_acpi_pss();
1221 return intel_pstate_has_acpi_ppc() &&
1228 #else /* CONFIG_ACPI not enabled */
1229 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
1230 static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
1231 #endif /* CONFIG_ACPI */
1233 static int __init intel_pstate_init(void)
1236 const struct x86_cpu_id *id;
1237 struct cpu_defaults *cpu_def;
1242 id = x86_match_cpu(intel_pstate_cpu_ids);
1247 * The Intel pstate driver will be ignored if the platform
1248 * firmware has its own power management modes.
1250 if (intel_pstate_platform_pwr_mgmt_exists())
1253 cpu_def = (struct cpu_defaults *)id->driver_data;
1255 copy_pid_params(&cpu_def->pid_policy);
1256 copy_cpu_funcs(&cpu_def->funcs);
1258 if (intel_pstate_msrs_not_valid())
1261 pr_info("Intel P-state driver initializing.\n");
1263 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
1267 if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp)
1270 if (!hwp_active && hwp_only)
1273 rc = cpufreq_register_driver(&intel_pstate_driver);
1277 intel_pstate_debug_expose_params();
1278 intel_pstate_sysfs_expose_params();
1283 for_each_online_cpu(cpu) {
1284 if (all_cpu_data[cpu]) {
1285 del_timer_sync(&all_cpu_data[cpu]->timer);
1286 kfree(all_cpu_data[cpu]);
1291 vfree(all_cpu_data);
1294 device_initcall(intel_pstate_init);
1296 static int __init intel_pstate_setup(char *str)
1301 if (!strcmp(str, "disable"))
1303 if (!strcmp(str, "no_hwp"))
1305 if (!strcmp(str, "force"))
1307 if (!strcmp(str, "hwp_only"))
1311 early_param("intel_pstate", intel_pstate_setup);
1313 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
1314 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
1315 MODULE_LICENSE("GPL");