2 * (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
4 * Licensed under the terms of the GNU GPL License version 2.
6 * Based on Len Brown's <lenb@kernel.org> turbostat tool.
9 #if defined(__i386__) || defined(__x86_64__)
16 #include "helpers/helpers.h"
17 #include "idle_monitor/cpupower-monitor.h"
19 #define MSR_PKG_C3_RESIDENCY 0x3F8
20 #define MSR_PKG_C6_RESIDENCY 0x3F9
21 #define MSR_CORE_C3_RESIDENCY 0x3FC
22 #define MSR_CORE_C6_RESIDENCY 0x3FD
26 #define NHM_CSTATE_COUNT 4
28 enum intel_nhm_id { C3 = 0, C6, PC3, PC6, TSC = 0xFFFF };
30 static int nhm_get_count_percent(unsigned int self_id, double *percent,
33 static cstate_t nhm_cstates[NHM_CSTATE_COUNT] = {
36 .desc = N_("Processor Core C3"),
39 .get_count_percent = nhm_get_count_percent,
43 .desc = N_("Processor Core C6"),
46 .get_count_percent = nhm_get_count_percent,
51 .desc = N_("Processor Package C3"),
53 .range = RANGE_PACKAGE,
54 .get_count_percent = nhm_get_count_percent,
58 .desc = N_("Processor Package C6"),
60 .range = RANGE_PACKAGE,
61 .get_count_percent = nhm_get_count_percent,
65 static unsigned long long tsc_at_measure_start;
66 static unsigned long long tsc_at_measure_end;
67 static unsigned long long *previous_count[NHM_CSTATE_COUNT];
68 static unsigned long long *current_count[NHM_CSTATE_COUNT];
69 /* valid flag for all CPUs. If a MSR read failed it will be zero */
72 static int nhm_get_count(enum intel_nhm_id id, unsigned long long *val, unsigned int cpu)
78 msr = MSR_CORE_C3_RESIDENCY;
81 msr = MSR_CORE_C6_RESIDENCY;
84 msr = MSR_PKG_C3_RESIDENCY;
87 msr = MSR_PKG_C6_RESIDENCY;
95 if (read_msr(cpu, msr, val))
101 static int nhm_get_count_percent(unsigned int id, double *percent,
109 *percent = (100.0 * (current_count[id][cpu] - previous_count[id][cpu])) /
110 (tsc_at_measure_end - tsc_at_measure_start);
112 dprint("%s: previous: %llu - current: %llu - (%u)\n", nhm_cstates[id].name,
113 previous_count[id][cpu], current_count[id][cpu],
116 dprint("%s: tsc_diff: %llu - count_diff: %llu - percent: %2.f (%u)\n",
117 nhm_cstates[id].name,
118 (unsigned long long) tsc_at_measure_end - tsc_at_measure_start,
119 current_count[id][cpu] - previous_count[id][cpu],
125 static int nhm_start(void)
128 unsigned long long dbg, val;
130 nhm_get_count(TSC, &tsc_at_measure_start, 0);
132 for (num = 0; num < NHM_CSTATE_COUNT; num++) {
133 for (cpu = 0; cpu < cpu_count; cpu++) {
134 is_valid[cpu] = !nhm_get_count(num, &val, cpu);
135 previous_count[num][cpu] = val;
138 nhm_get_count(TSC, &dbg, 0);
139 dprint("TSC diff: %llu\n", dbg - tsc_at_measure_start);
143 static int nhm_stop(void)
145 unsigned long long val;
146 unsigned long long dbg;
149 nhm_get_count(TSC, &tsc_at_measure_end, 0);
151 for (num = 0; num < NHM_CSTATE_COUNT; num++) {
152 for (cpu = 0; cpu < cpu_count; cpu++) {
153 is_valid[cpu] = !nhm_get_count(num, &val, cpu);
154 current_count[num][cpu] = val;
157 nhm_get_count(TSC, &dbg, 0);
158 dprint("TSC diff: %llu\n", dbg - tsc_at_measure_end);
163 struct cpuidle_monitor intel_nhm_monitor;
165 struct cpuidle_monitor* intel_nhm_register(void) {
168 if (cpupower_cpu_info.vendor != X86_VENDOR_INTEL)
171 if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_INV_TSC))
174 if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_APERF))
177 /* Free this at program termination */
178 is_valid = calloc(cpu_count, sizeof (int));
179 for (num = 0; num < NHM_CSTATE_COUNT; num++) {
180 previous_count[num] = calloc (cpu_count,
181 sizeof(unsigned long long));
182 current_count[num] = calloc (cpu_count,
183 sizeof(unsigned long long));
186 intel_nhm_monitor.name_len = strlen(intel_nhm_monitor.name);
187 return &intel_nhm_monitor;
190 void intel_nhm_unregister(void) {
193 for (num = 0; num < NHM_CSTATE_COUNT; num++) {
194 free(previous_count[num]);
195 free(current_count[num]);
200 struct cpuidle_monitor intel_nhm_monitor = {
202 .hw_states_num = NHM_CSTATE_COUNT,
203 .hw_states = nhm_cstates,
206 .do_register = intel_nhm_register,
207 .unregister = intel_nhm_unregister,
209 .overflow_s = 922000000 /* 922337203 seconds TSC overflow