4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/stddef.h>
11 #include <linux/types.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
16 #include <asm/hardirq.h>
19 #include "perf_event.h"
22 * Intel PerfMon, used on Core and later.
24 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
26 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
27 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
28 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
29 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
30 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
31 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
32 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
33 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
36 static struct event_constraint intel_core_event_constraints[] __read_mostly =
38 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
39 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
40 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
41 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
42 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
43 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
47 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
49 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
50 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
51 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
52 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
53 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
54 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
55 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
56 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
57 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
58 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
59 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
60 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
61 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
65 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
67 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
68 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
69 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
70 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
71 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
72 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
73 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
74 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
75 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
76 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
77 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
81 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
83 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
87 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
89 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
90 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
91 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
92 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
93 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
94 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
95 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
99 static struct event_constraint intel_snb_event_constraints[] __read_mostly =
101 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
102 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
103 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
104 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
105 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
106 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
107 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
108 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
109 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
110 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
114 static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
116 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
117 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
118 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
119 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
120 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
121 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
122 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
123 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
124 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
125 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
126 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
127 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
128 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
129 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
130 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
131 INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
135 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
137 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
138 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
142 static struct event_constraint intel_v1_event_constraints[] __read_mostly =
147 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
149 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
150 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
151 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
155 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
156 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
157 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
161 static u64 intel_pmu_event_map(int hw_event)
163 return intel_perfmon_event_map[hw_event];
166 #define SNB_DMND_DATA_RD (1ULL << 0)
167 #define SNB_DMND_RFO (1ULL << 1)
168 #define SNB_DMND_IFETCH (1ULL << 2)
169 #define SNB_DMND_WB (1ULL << 3)
170 #define SNB_PF_DATA_RD (1ULL << 4)
171 #define SNB_PF_RFO (1ULL << 5)
172 #define SNB_PF_IFETCH (1ULL << 6)
173 #define SNB_LLC_DATA_RD (1ULL << 7)
174 #define SNB_LLC_RFO (1ULL << 8)
175 #define SNB_LLC_IFETCH (1ULL << 9)
176 #define SNB_BUS_LOCKS (1ULL << 10)
177 #define SNB_STRM_ST (1ULL << 11)
178 #define SNB_OTHER (1ULL << 15)
179 #define SNB_RESP_ANY (1ULL << 16)
180 #define SNB_NO_SUPP (1ULL << 17)
181 #define SNB_LLC_HITM (1ULL << 18)
182 #define SNB_LLC_HITE (1ULL << 19)
183 #define SNB_LLC_HITS (1ULL << 20)
184 #define SNB_LLC_HITF (1ULL << 21)
185 #define SNB_LOCAL (1ULL << 22)
186 #define SNB_REMOTE (0xffULL << 23)
187 #define SNB_SNP_NONE (1ULL << 31)
188 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
189 #define SNB_SNP_MISS (1ULL << 33)
190 #define SNB_NO_FWD (1ULL << 34)
191 #define SNB_SNP_FWD (1ULL << 35)
192 #define SNB_HITM (1ULL << 36)
193 #define SNB_NON_DRAM (1ULL << 37)
195 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
196 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
197 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
199 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
200 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
203 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
204 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
206 #define SNB_L3_ACCESS SNB_RESP_ANY
207 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
209 static __initconst const u64 snb_hw_cache_extra_regs
210 [PERF_COUNT_HW_CACHE_MAX]
211 [PERF_COUNT_HW_CACHE_OP_MAX]
212 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
216 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
217 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
220 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
221 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
223 [ C(OP_PREFETCH) ] = {
224 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
225 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
230 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
231 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
234 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
235 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
237 [ C(OP_PREFETCH) ] = {
238 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
239 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
244 static __initconst const u64 snb_hw_cache_event_ids
245 [PERF_COUNT_HW_CACHE_MAX]
246 [PERF_COUNT_HW_CACHE_OP_MAX]
247 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
251 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
252 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
255 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
256 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
258 [ C(OP_PREFETCH) ] = {
259 [ C(RESULT_ACCESS) ] = 0x0,
260 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
265 [ C(RESULT_ACCESS) ] = 0x0,
266 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
269 [ C(RESULT_ACCESS) ] = -1,
270 [ C(RESULT_MISS) ] = -1,
272 [ C(OP_PREFETCH) ] = {
273 [ C(RESULT_ACCESS) ] = 0x0,
274 [ C(RESULT_MISS) ] = 0x0,
279 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
280 [ C(RESULT_ACCESS) ] = 0x01b7,
281 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
282 [ C(RESULT_MISS) ] = 0x01b7,
285 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
286 [ C(RESULT_ACCESS) ] = 0x01b7,
287 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
288 [ C(RESULT_MISS) ] = 0x01b7,
290 [ C(OP_PREFETCH) ] = {
291 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
292 [ C(RESULT_ACCESS) ] = 0x01b7,
293 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
294 [ C(RESULT_MISS) ] = 0x01b7,
299 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
300 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
303 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
304 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
306 [ C(OP_PREFETCH) ] = {
307 [ C(RESULT_ACCESS) ] = 0x0,
308 [ C(RESULT_MISS) ] = 0x0,
313 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
314 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
317 [ C(RESULT_ACCESS) ] = -1,
318 [ C(RESULT_MISS) ] = -1,
320 [ C(OP_PREFETCH) ] = {
321 [ C(RESULT_ACCESS) ] = -1,
322 [ C(RESULT_MISS) ] = -1,
327 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
328 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
331 [ C(RESULT_ACCESS) ] = -1,
332 [ C(RESULT_MISS) ] = -1,
334 [ C(OP_PREFETCH) ] = {
335 [ C(RESULT_ACCESS) ] = -1,
336 [ C(RESULT_MISS) ] = -1,
341 [ C(RESULT_ACCESS) ] = 0x01b7,
342 [ C(RESULT_MISS) ] = 0x01b7,
345 [ C(RESULT_ACCESS) ] = 0x01b7,
346 [ C(RESULT_MISS) ] = 0x01b7,
348 [ C(OP_PREFETCH) ] = {
349 [ C(RESULT_ACCESS) ] = 0x01b7,
350 [ C(RESULT_MISS) ] = 0x01b7,
356 static __initconst const u64 westmere_hw_cache_event_ids
357 [PERF_COUNT_HW_CACHE_MAX]
358 [PERF_COUNT_HW_CACHE_OP_MAX]
359 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
363 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
364 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
367 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
368 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
370 [ C(OP_PREFETCH) ] = {
371 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
372 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
377 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
378 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
381 [ C(RESULT_ACCESS) ] = -1,
382 [ C(RESULT_MISS) ] = -1,
384 [ C(OP_PREFETCH) ] = {
385 [ C(RESULT_ACCESS) ] = 0x0,
386 [ C(RESULT_MISS) ] = 0x0,
391 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
392 [ C(RESULT_ACCESS) ] = 0x01b7,
393 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
394 [ C(RESULT_MISS) ] = 0x01b7,
397 * Use RFO, not WRITEBACK, because a write miss would typically occur
401 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
402 [ C(RESULT_ACCESS) ] = 0x01b7,
403 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
404 [ C(RESULT_MISS) ] = 0x01b7,
406 [ C(OP_PREFETCH) ] = {
407 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
408 [ C(RESULT_ACCESS) ] = 0x01b7,
409 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
410 [ C(RESULT_MISS) ] = 0x01b7,
415 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
416 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
419 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
420 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
422 [ C(OP_PREFETCH) ] = {
423 [ C(RESULT_ACCESS) ] = 0x0,
424 [ C(RESULT_MISS) ] = 0x0,
429 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
430 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
433 [ C(RESULT_ACCESS) ] = -1,
434 [ C(RESULT_MISS) ] = -1,
436 [ C(OP_PREFETCH) ] = {
437 [ C(RESULT_ACCESS) ] = -1,
438 [ C(RESULT_MISS) ] = -1,
443 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
444 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
447 [ C(RESULT_ACCESS) ] = -1,
448 [ C(RESULT_MISS) ] = -1,
450 [ C(OP_PREFETCH) ] = {
451 [ C(RESULT_ACCESS) ] = -1,
452 [ C(RESULT_MISS) ] = -1,
457 [ C(RESULT_ACCESS) ] = 0x01b7,
458 [ C(RESULT_MISS) ] = 0x01b7,
461 [ C(RESULT_ACCESS) ] = 0x01b7,
462 [ C(RESULT_MISS) ] = 0x01b7,
464 [ C(OP_PREFETCH) ] = {
465 [ C(RESULT_ACCESS) ] = 0x01b7,
466 [ C(RESULT_MISS) ] = 0x01b7,
472 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
473 * See IA32 SDM Vol 3B 30.6.1.3
476 #define NHM_DMND_DATA_RD (1 << 0)
477 #define NHM_DMND_RFO (1 << 1)
478 #define NHM_DMND_IFETCH (1 << 2)
479 #define NHM_DMND_WB (1 << 3)
480 #define NHM_PF_DATA_RD (1 << 4)
481 #define NHM_PF_DATA_RFO (1 << 5)
482 #define NHM_PF_IFETCH (1 << 6)
483 #define NHM_OFFCORE_OTHER (1 << 7)
484 #define NHM_UNCORE_HIT (1 << 8)
485 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
486 #define NHM_OTHER_CORE_HITM (1 << 10)
488 #define NHM_REMOTE_CACHE_FWD (1 << 12)
489 #define NHM_REMOTE_DRAM (1 << 13)
490 #define NHM_LOCAL_DRAM (1 << 14)
491 #define NHM_NON_DRAM (1 << 15)
493 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
494 #define NHM_REMOTE (NHM_REMOTE_DRAM)
496 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
497 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
498 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
500 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
501 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
502 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
504 static __initconst const u64 nehalem_hw_cache_extra_regs
505 [PERF_COUNT_HW_CACHE_MAX]
506 [PERF_COUNT_HW_CACHE_OP_MAX]
507 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
511 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
512 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
515 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
516 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
518 [ C(OP_PREFETCH) ] = {
519 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
520 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
525 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
526 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
529 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
530 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
532 [ C(OP_PREFETCH) ] = {
533 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
534 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
539 static __initconst const u64 nehalem_hw_cache_event_ids
540 [PERF_COUNT_HW_CACHE_MAX]
541 [PERF_COUNT_HW_CACHE_OP_MAX]
542 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
546 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
547 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
550 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
551 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
553 [ C(OP_PREFETCH) ] = {
554 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
555 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
560 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
561 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
564 [ C(RESULT_ACCESS) ] = -1,
565 [ C(RESULT_MISS) ] = -1,
567 [ C(OP_PREFETCH) ] = {
568 [ C(RESULT_ACCESS) ] = 0x0,
569 [ C(RESULT_MISS) ] = 0x0,
574 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
575 [ C(RESULT_ACCESS) ] = 0x01b7,
576 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
577 [ C(RESULT_MISS) ] = 0x01b7,
580 * Use RFO, not WRITEBACK, because a write miss would typically occur
584 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
585 [ C(RESULT_ACCESS) ] = 0x01b7,
586 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
587 [ C(RESULT_MISS) ] = 0x01b7,
589 [ C(OP_PREFETCH) ] = {
590 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
591 [ C(RESULT_ACCESS) ] = 0x01b7,
592 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
593 [ C(RESULT_MISS) ] = 0x01b7,
598 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
599 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
602 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
603 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
605 [ C(OP_PREFETCH) ] = {
606 [ C(RESULT_ACCESS) ] = 0x0,
607 [ C(RESULT_MISS) ] = 0x0,
612 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
613 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
616 [ C(RESULT_ACCESS) ] = -1,
617 [ C(RESULT_MISS) ] = -1,
619 [ C(OP_PREFETCH) ] = {
620 [ C(RESULT_ACCESS) ] = -1,
621 [ C(RESULT_MISS) ] = -1,
626 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
627 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
630 [ C(RESULT_ACCESS) ] = -1,
631 [ C(RESULT_MISS) ] = -1,
633 [ C(OP_PREFETCH) ] = {
634 [ C(RESULT_ACCESS) ] = -1,
635 [ C(RESULT_MISS) ] = -1,
640 [ C(RESULT_ACCESS) ] = 0x01b7,
641 [ C(RESULT_MISS) ] = 0x01b7,
644 [ C(RESULT_ACCESS) ] = 0x01b7,
645 [ C(RESULT_MISS) ] = 0x01b7,
647 [ C(OP_PREFETCH) ] = {
648 [ C(RESULT_ACCESS) ] = 0x01b7,
649 [ C(RESULT_MISS) ] = 0x01b7,
654 static __initconst const u64 core2_hw_cache_event_ids
655 [PERF_COUNT_HW_CACHE_MAX]
656 [PERF_COUNT_HW_CACHE_OP_MAX]
657 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
661 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
662 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
665 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
666 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
668 [ C(OP_PREFETCH) ] = {
669 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
670 [ C(RESULT_MISS) ] = 0,
675 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
676 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
679 [ C(RESULT_ACCESS) ] = -1,
680 [ C(RESULT_MISS) ] = -1,
682 [ C(OP_PREFETCH) ] = {
683 [ C(RESULT_ACCESS) ] = 0,
684 [ C(RESULT_MISS) ] = 0,
689 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
690 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
693 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
694 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
696 [ C(OP_PREFETCH) ] = {
697 [ C(RESULT_ACCESS) ] = 0,
698 [ C(RESULT_MISS) ] = 0,
703 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
704 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
707 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
708 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
710 [ C(OP_PREFETCH) ] = {
711 [ C(RESULT_ACCESS) ] = 0,
712 [ C(RESULT_MISS) ] = 0,
717 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
718 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
721 [ C(RESULT_ACCESS) ] = -1,
722 [ C(RESULT_MISS) ] = -1,
724 [ C(OP_PREFETCH) ] = {
725 [ C(RESULT_ACCESS) ] = -1,
726 [ C(RESULT_MISS) ] = -1,
731 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
732 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
735 [ C(RESULT_ACCESS) ] = -1,
736 [ C(RESULT_MISS) ] = -1,
738 [ C(OP_PREFETCH) ] = {
739 [ C(RESULT_ACCESS) ] = -1,
740 [ C(RESULT_MISS) ] = -1,
745 static __initconst const u64 atom_hw_cache_event_ids
746 [PERF_COUNT_HW_CACHE_MAX]
747 [PERF_COUNT_HW_CACHE_OP_MAX]
748 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
752 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
753 [ C(RESULT_MISS) ] = 0,
756 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
757 [ C(RESULT_MISS) ] = 0,
759 [ C(OP_PREFETCH) ] = {
760 [ C(RESULT_ACCESS) ] = 0x0,
761 [ C(RESULT_MISS) ] = 0,
766 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
767 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
770 [ C(RESULT_ACCESS) ] = -1,
771 [ C(RESULT_MISS) ] = -1,
773 [ C(OP_PREFETCH) ] = {
774 [ C(RESULT_ACCESS) ] = 0,
775 [ C(RESULT_MISS) ] = 0,
780 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
781 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
784 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
785 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
787 [ C(OP_PREFETCH) ] = {
788 [ C(RESULT_ACCESS) ] = 0,
789 [ C(RESULT_MISS) ] = 0,
794 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
795 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
798 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
799 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
801 [ C(OP_PREFETCH) ] = {
802 [ C(RESULT_ACCESS) ] = 0,
803 [ C(RESULT_MISS) ] = 0,
808 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
809 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
812 [ C(RESULT_ACCESS) ] = -1,
813 [ C(RESULT_MISS) ] = -1,
815 [ C(OP_PREFETCH) ] = {
816 [ C(RESULT_ACCESS) ] = -1,
817 [ C(RESULT_MISS) ] = -1,
822 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
823 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
826 [ C(RESULT_ACCESS) ] = -1,
827 [ C(RESULT_MISS) ] = -1,
829 [ C(OP_PREFETCH) ] = {
830 [ C(RESULT_ACCESS) ] = -1,
831 [ C(RESULT_MISS) ] = -1,
836 static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
838 /* user explicitly requested branch sampling */
839 if (has_branch_stack(event))
842 /* implicit branch sampling to correct PEBS skid */
843 if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
849 static void intel_pmu_disable_all(void)
851 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
853 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
855 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
856 intel_pmu_disable_bts();
858 intel_pmu_pebs_disable_all();
859 intel_pmu_lbr_disable_all();
862 static void intel_pmu_enable_all(int added)
864 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
866 intel_pmu_pebs_enable_all();
867 intel_pmu_lbr_enable_all();
868 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
869 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
871 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
872 struct perf_event *event =
873 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
875 if (WARN_ON_ONCE(!event))
878 intel_pmu_enable_bts(event->hw.config);
884 * Intel Errata AAK100 (model 26)
885 * Intel Errata AAP53 (model 30)
886 * Intel Errata BD53 (model 44)
888 * The official story:
889 * These chips need to be 'reset' when adding counters by programming the
890 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
891 * in sequence on the same PMC or on different PMCs.
893 * In practise it appears some of these events do in fact count, and
894 * we need to programm all 4 events.
896 static void intel_pmu_nhm_workaround(void)
898 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
899 static const unsigned long nhm_magic[4] = {
905 struct perf_event *event;
909 * The Errata requires below steps:
910 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
911 * 2) Configure 4 PERFEVTSELx with the magic events and clear
912 * the corresponding PMCx;
913 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
914 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
915 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
919 * The real steps we choose are a little different from above.
920 * A) To reduce MSR operations, we don't run step 1) as they
921 * are already cleared before this function is called;
922 * B) Call x86_perf_event_update to save PMCx before configuring
923 * PERFEVTSELx with magic number;
924 * C) With step 5), we do clear only when the PERFEVTSELx is
925 * not used currently.
926 * D) Call x86_perf_event_set_period to restore PMCx;
929 /* We always operate 4 pairs of PERF Counters */
930 for (i = 0; i < 4; i++) {
931 event = cpuc->events[i];
933 x86_perf_event_update(event);
936 for (i = 0; i < 4; i++) {
937 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
938 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
941 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
942 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
944 for (i = 0; i < 4; i++) {
945 event = cpuc->events[i];
948 x86_perf_event_set_period(event);
949 __x86_pmu_enable_event(&event->hw,
950 ARCH_PERFMON_EVENTSEL_ENABLE);
952 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
956 static void intel_pmu_nhm_enable_all(int added)
959 intel_pmu_nhm_workaround();
960 intel_pmu_enable_all(added);
963 static inline u64 intel_pmu_get_status(void)
967 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
972 static inline void intel_pmu_ack_status(u64 ack)
974 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
977 static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
979 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
982 mask = 0xfULL << (idx * 4);
984 rdmsrl(hwc->config_base, ctrl_val);
986 wrmsrl(hwc->config_base, ctrl_val);
989 static void intel_pmu_disable_event(struct perf_event *event)
991 struct hw_perf_event *hwc = &event->hw;
992 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
994 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
995 intel_pmu_disable_bts();
996 intel_pmu_drain_bts_buffer();
1000 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
1001 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
1004 * must disable before any actual event
1005 * because any event may be combined with LBR
1007 if (intel_pmu_needs_lbr_smpl(event))
1008 intel_pmu_lbr_disable(event);
1010 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1011 intel_pmu_disable_fixed(hwc);
1015 x86_pmu_disable_event(event);
1017 if (unlikely(event->attr.precise_ip))
1018 intel_pmu_pebs_disable(event);
1021 static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
1023 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
1024 u64 ctrl_val, bits, mask;
1027 * Enable IRQ generation (0x8),
1028 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1032 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1034 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1038 * ANY bit is supported in v3 and up
1040 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
1044 mask = 0xfULL << (idx * 4);
1046 rdmsrl(hwc->config_base, ctrl_val);
1049 wrmsrl(hwc->config_base, ctrl_val);
1052 static void intel_pmu_enable_event(struct perf_event *event)
1054 struct hw_perf_event *hwc = &event->hw;
1055 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1057 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
1058 if (!__this_cpu_read(cpu_hw_events.enabled))
1061 intel_pmu_enable_bts(hwc->config);
1065 * must enabled before any actual event
1066 * because any event may be combined with LBR
1068 if (intel_pmu_needs_lbr_smpl(event))
1069 intel_pmu_lbr_enable(event);
1071 if (event->attr.exclude_host)
1072 cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
1073 if (event->attr.exclude_guest)
1074 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
1076 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1077 intel_pmu_enable_fixed(hwc);
1081 if (unlikely(event->attr.precise_ip))
1082 intel_pmu_pebs_enable(event);
1084 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
1088 * Save and restart an expired event. Called by NMI contexts,
1089 * so it has to be careful about preempting normal event ops:
1091 int intel_pmu_save_and_restart(struct perf_event *event)
1093 x86_perf_event_update(event);
1094 return x86_perf_event_set_period(event);
1097 static void intel_pmu_reset(void)
1099 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
1100 unsigned long flags;
1103 if (!x86_pmu.num_counters)
1106 local_irq_save(flags);
1108 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
1110 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1111 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
1112 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
1114 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
1115 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1118 ds->bts_index = ds->bts_buffer_base;
1120 local_irq_restore(flags);
1124 * This handler is triggered by the local APIC, so the APIC IRQ handling
1127 static int intel_pmu_handle_irq(struct pt_regs *regs)
1129 struct perf_sample_data data;
1130 struct cpu_hw_events *cpuc;
1135 cpuc = &__get_cpu_var(cpu_hw_events);
1138 * Some chipsets need to unmask the LVTPC in a particular spot
1139 * inside the nmi handler. As a result, the unmasking was pushed
1140 * into all the nmi handlers.
1142 * This handler doesn't seem to have any issues with the unmasking
1143 * so it was left at the top.
1145 apic_write(APIC_LVTPC, APIC_DM_NMI);
1147 intel_pmu_disable_all();
1148 handled = intel_pmu_drain_bts_buffer();
1149 status = intel_pmu_get_status();
1151 intel_pmu_enable_all(0);
1157 intel_pmu_ack_status(status);
1158 if (++loops > 100) {
1159 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1160 perf_event_print_debug();
1165 inc_irq_stat(apic_perf_irqs);
1167 intel_pmu_lbr_read();
1170 * PEBS overflow sets bit 62 in the global status register
1172 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
1174 x86_pmu.drain_pebs(regs);
1177 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
1178 struct perf_event *event = cpuc->events[bit];
1182 if (!test_bit(bit, cpuc->active_mask))
1185 if (!intel_pmu_save_and_restart(event))
1188 perf_sample_data_init(&data, 0, event->hw.last_period);
1190 if (has_branch_stack(event))
1191 data.br_stack = &cpuc->lbr_stack;
1193 if (perf_event_overflow(event, &data, regs))
1194 x86_pmu_stop(event, 0);
1198 * Repeat if there is more work to be done:
1200 status = intel_pmu_get_status();
1205 intel_pmu_enable_all(0);
1209 static struct event_constraint *
1210 intel_bts_constraints(struct perf_event *event)
1212 struct hw_perf_event *hwc = &event->hw;
1213 unsigned int hw_event, bts_event;
1215 if (event->attr.freq)
1218 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
1219 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
1221 if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
1222 return &bts_constraint;
1227 static int intel_alt_er(int idx)
1229 if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
1232 if (idx == EXTRA_REG_RSP_0)
1233 return EXTRA_REG_RSP_1;
1235 if (idx == EXTRA_REG_RSP_1)
1236 return EXTRA_REG_RSP_0;
1241 static void intel_fixup_er(struct perf_event *event, int idx)
1243 event->hw.extra_reg.idx = idx;
1245 if (idx == EXTRA_REG_RSP_0) {
1246 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1247 event->hw.config |= 0x01b7;
1248 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
1249 } else if (idx == EXTRA_REG_RSP_1) {
1250 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1251 event->hw.config |= 0x01bb;
1252 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
1257 * manage allocation of shared extra msr for certain events
1260 * per-cpu: to be shared between the various events on a single PMU
1261 * per-core: per-cpu + shared by HT threads
1263 static struct event_constraint *
1264 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
1265 struct perf_event *event,
1266 struct hw_perf_event_extra *reg)
1268 struct event_constraint *c = &emptyconstraint;
1269 struct er_account *era;
1270 unsigned long flags;
1274 * reg->alloc can be set due to existing state, so for fake cpuc we
1275 * need to ignore this, otherwise we might fail to allocate proper fake
1276 * state for this extra reg constraint. Also see the comment below.
1278 if (reg->alloc && !cpuc->is_fake)
1279 return NULL; /* call x86_get_event_constraint() */
1282 era = &cpuc->shared_regs->regs[idx];
1284 * we use spin_lock_irqsave() to avoid lockdep issues when
1285 * passing a fake cpuc
1287 raw_spin_lock_irqsave(&era->lock, flags);
1289 if (!atomic_read(&era->ref) || era->config == reg->config) {
1292 * If its a fake cpuc -- as per validate_{group,event}() we
1293 * shouldn't touch event state and we can avoid doing so
1294 * since both will only call get_event_constraints() once
1295 * on each event, this avoids the need for reg->alloc.
1297 * Not doing the ER fixup will only result in era->reg being
1298 * wrong, but since we won't actually try and program hardware
1299 * this isn't a problem either.
1301 if (!cpuc->is_fake) {
1302 if (idx != reg->idx)
1303 intel_fixup_er(event, idx);
1306 * x86_schedule_events() can call get_event_constraints()
1307 * multiple times on events in the case of incremental
1308 * scheduling(). reg->alloc ensures we only do the ER
1314 /* lock in msr value */
1315 era->config = reg->config;
1316 era->reg = reg->reg;
1319 atomic_inc(&era->ref);
1322 * need to call x86_get_event_constraint()
1323 * to check if associated event has constraints
1327 idx = intel_alt_er(idx);
1328 if (idx != reg->idx) {
1329 raw_spin_unlock_irqrestore(&era->lock, flags);
1333 raw_spin_unlock_irqrestore(&era->lock, flags);
1339 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
1340 struct hw_perf_event_extra *reg)
1342 struct er_account *era;
1345 * Only put constraint if extra reg was actually allocated. Also takes
1346 * care of event which do not use an extra shared reg.
1348 * Also, if this is a fake cpuc we shouldn't touch any event state
1349 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
1350 * either since it'll be thrown out.
1352 if (!reg->alloc || cpuc->is_fake)
1355 era = &cpuc->shared_regs->regs[reg->idx];
1357 /* one fewer user */
1358 atomic_dec(&era->ref);
1360 /* allocate again next time */
1364 static struct event_constraint *
1365 intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
1366 struct perf_event *event)
1368 struct event_constraint *c = NULL, *d;
1369 struct hw_perf_event_extra *xreg, *breg;
1371 xreg = &event->hw.extra_reg;
1372 if (xreg->idx != EXTRA_REG_NONE) {
1373 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
1374 if (c == &emptyconstraint)
1377 breg = &event->hw.branch_reg;
1378 if (breg->idx != EXTRA_REG_NONE) {
1379 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
1380 if (d == &emptyconstraint) {
1381 __intel_shared_reg_put_constraints(cpuc, xreg);
1388 struct event_constraint *
1389 x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1391 struct event_constraint *c;
1393 if (x86_pmu.event_constraints) {
1394 for_each_event_constraint(c, x86_pmu.event_constraints) {
1395 if ((event->hw.config & c->cmask) == c->code)
1400 return &unconstrained;
1403 static struct event_constraint *
1404 intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1406 struct event_constraint *c;
1408 c = intel_bts_constraints(event);
1412 c = intel_pebs_constraints(event);
1416 c = intel_shared_regs_constraints(cpuc, event);
1420 return x86_get_event_constraints(cpuc, event);
1424 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
1425 struct perf_event *event)
1427 struct hw_perf_event_extra *reg;
1429 reg = &event->hw.extra_reg;
1430 if (reg->idx != EXTRA_REG_NONE)
1431 __intel_shared_reg_put_constraints(cpuc, reg);
1433 reg = &event->hw.branch_reg;
1434 if (reg->idx != EXTRA_REG_NONE)
1435 __intel_shared_reg_put_constraints(cpuc, reg);
1438 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
1439 struct perf_event *event)
1441 intel_put_shared_regs_event_constraints(cpuc, event);
1444 static void intel_pebs_aliases_core2(struct perf_event *event)
1446 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
1448 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1449 * (0x003c) so that we can use it with PEBS.
1451 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1452 * PEBS capable. However we can use INST_RETIRED.ANY_P
1453 * (0x00c0), which is a PEBS capable event, to get the same
1456 * INST_RETIRED.ANY_P counts the number of cycles that retires
1457 * CNTMASK instructions. By setting CNTMASK to a value (16)
1458 * larger than the maximum number of instructions that can be
1459 * retired per cycle (4) and then inverting the condition, we
1460 * count all cycles that retire 16 or less instructions, which
1463 * Thereby we gain a PEBS capable cycle counter.
1465 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
1467 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1468 event->hw.config = alt_config;
1472 static void intel_pebs_aliases_snb(struct perf_event *event)
1474 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
1476 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1477 * (0x003c) so that we can use it with PEBS.
1479 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1480 * PEBS capable. However we can use UOPS_RETIRED.ALL
1481 * (0x01c2), which is a PEBS capable event, to get the same
1484 * UOPS_RETIRED.ALL counts the number of cycles that retires
1485 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
1486 * larger than the maximum number of micro-ops that can be
1487 * retired per cycle (4) and then inverting the condition, we
1488 * count all cycles that retire 16 or less micro-ops, which
1491 * Thereby we gain a PEBS capable cycle counter.
1493 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
1495 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1496 event->hw.config = alt_config;
1500 static int intel_pmu_hw_config(struct perf_event *event)
1502 int ret = x86_pmu_hw_config(event);
1507 if (event->attr.precise_ip && x86_pmu.pebs_aliases)
1508 x86_pmu.pebs_aliases(event);
1510 if (intel_pmu_needs_lbr_smpl(event)) {
1511 ret = intel_pmu_setup_lbr_filter(event);
1516 if (event->attr.type != PERF_TYPE_RAW)
1519 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
1522 if (x86_pmu.version < 3)
1525 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1528 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
1533 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
1535 if (x86_pmu.guest_get_msrs)
1536 return x86_pmu.guest_get_msrs(nr);
1540 EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
1542 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
1544 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1545 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1547 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
1548 arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
1549 arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
1551 * If PMU counter has PEBS enabled it is not enough to disable counter
1552 * on a guest entry since PEBS memory write can overshoot guest entry
1553 * and corrupt guest memory. Disabling PEBS solves the problem.
1555 arr[1].msr = MSR_IA32_PEBS_ENABLE;
1556 arr[1].host = cpuc->pebs_enabled;
1563 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
1565 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1566 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1569 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1570 struct perf_event *event = cpuc->events[idx];
1572 arr[idx].msr = x86_pmu_config_addr(idx);
1573 arr[idx].host = arr[idx].guest = 0;
1575 if (!test_bit(idx, cpuc->active_mask))
1578 arr[idx].host = arr[idx].guest =
1579 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
1581 if (event->attr.exclude_host)
1582 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
1583 else if (event->attr.exclude_guest)
1584 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
1587 *nr = x86_pmu.num_counters;
1591 static void core_pmu_enable_event(struct perf_event *event)
1593 if (!event->attr.exclude_host)
1594 x86_pmu_enable_event(event);
1597 static void core_pmu_enable_all(int added)
1599 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1602 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1603 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
1605 if (!test_bit(idx, cpuc->active_mask) ||
1606 cpuc->events[idx]->attr.exclude_host)
1609 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
1613 PMU_FORMAT_ATTR(event, "config:0-7" );
1614 PMU_FORMAT_ATTR(umask, "config:8-15" );
1615 PMU_FORMAT_ATTR(edge, "config:18" );
1616 PMU_FORMAT_ATTR(pc, "config:19" );
1617 PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
1618 PMU_FORMAT_ATTR(inv, "config:23" );
1619 PMU_FORMAT_ATTR(cmask, "config:24-31" );
1621 static struct attribute *intel_arch_formats_attr[] = {
1622 &format_attr_event.attr,
1623 &format_attr_umask.attr,
1624 &format_attr_edge.attr,
1625 &format_attr_pc.attr,
1626 &format_attr_inv.attr,
1627 &format_attr_cmask.attr,
1631 ssize_t intel_event_sysfs_show(char *page, u64 config)
1633 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
1635 return x86_event_sysfs_show(page, config, event);
1638 static __initconst const struct x86_pmu core_pmu = {
1640 .handle_irq = x86_pmu_handle_irq,
1641 .disable_all = x86_pmu_disable_all,
1642 .enable_all = core_pmu_enable_all,
1643 .enable = core_pmu_enable_event,
1644 .disable = x86_pmu_disable_event,
1645 .hw_config = x86_pmu_hw_config,
1646 .schedule_events = x86_schedule_events,
1647 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
1648 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
1649 .event_map = intel_pmu_event_map,
1650 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
1653 * Intel PMCs cannot be accessed sanely above 32 bit width,
1654 * so we install an artificial 1<<31 period regardless of
1655 * the generic event period:
1657 .max_period = (1ULL << 31) - 1,
1658 .get_event_constraints = intel_get_event_constraints,
1659 .put_event_constraints = intel_put_event_constraints,
1660 .event_constraints = intel_core_event_constraints,
1661 .guest_get_msrs = core_guest_get_msrs,
1662 .format_attrs = intel_arch_formats_attr,
1663 .events_sysfs_show = intel_event_sysfs_show,
1666 struct intel_shared_regs *allocate_shared_regs(int cpu)
1668 struct intel_shared_regs *regs;
1671 regs = kzalloc_node(sizeof(struct intel_shared_regs),
1672 GFP_KERNEL, cpu_to_node(cpu));
1675 * initialize the locks to keep lockdep happy
1677 for (i = 0; i < EXTRA_REG_MAX; i++)
1678 raw_spin_lock_init(®s->regs[i].lock);
1685 static int intel_pmu_cpu_prepare(int cpu)
1687 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1689 if (!(x86_pmu.extra_regs || x86_pmu.lbr_sel_map))
1692 cpuc->shared_regs = allocate_shared_regs(cpu);
1693 if (!cpuc->shared_regs)
1699 static void intel_pmu_cpu_starting(int cpu)
1701 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1702 int core_id = topology_core_id(cpu);
1705 init_debug_store_on_cpu(cpu);
1707 * Deal with CPUs that don't clear their LBRs on power-up.
1709 intel_pmu_lbr_reset();
1711 cpuc->lbr_sel = NULL;
1713 if (!cpuc->shared_regs)
1716 if (!(x86_pmu.er_flags & ERF_NO_HT_SHARING)) {
1717 for_each_cpu(i, topology_thread_cpumask(cpu)) {
1718 struct intel_shared_regs *pc;
1720 pc = per_cpu(cpu_hw_events, i).shared_regs;
1721 if (pc && pc->core_id == core_id) {
1722 cpuc->kfree_on_online = cpuc->shared_regs;
1723 cpuc->shared_regs = pc;
1727 cpuc->shared_regs->core_id = core_id;
1728 cpuc->shared_regs->refcnt++;
1731 if (x86_pmu.lbr_sel_map)
1732 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
1735 static void intel_pmu_cpu_dying(int cpu)
1737 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1738 struct intel_shared_regs *pc;
1740 pc = cpuc->shared_regs;
1742 if (pc->core_id == -1 || --pc->refcnt == 0)
1744 cpuc->shared_regs = NULL;
1747 fini_debug_store_on_cpu(cpu);
1750 static void intel_pmu_flush_branch_stack(void)
1753 * Intel LBR does not tag entries with the
1754 * PID of the current task, then we need to
1756 * For now, we simply reset it
1759 intel_pmu_lbr_reset();
1762 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
1764 static struct attribute *intel_arch3_formats_attr[] = {
1765 &format_attr_event.attr,
1766 &format_attr_umask.attr,
1767 &format_attr_edge.attr,
1768 &format_attr_pc.attr,
1769 &format_attr_any.attr,
1770 &format_attr_inv.attr,
1771 &format_attr_cmask.attr,
1773 &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
1777 static __initconst const struct x86_pmu intel_pmu = {
1779 .handle_irq = intel_pmu_handle_irq,
1780 .disable_all = intel_pmu_disable_all,
1781 .enable_all = intel_pmu_enable_all,
1782 .enable = intel_pmu_enable_event,
1783 .disable = intel_pmu_disable_event,
1784 .hw_config = intel_pmu_hw_config,
1785 .schedule_events = x86_schedule_events,
1786 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
1787 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
1788 .event_map = intel_pmu_event_map,
1789 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
1792 * Intel PMCs cannot be accessed sanely above 32 bit width,
1793 * so we install an artificial 1<<31 period regardless of
1794 * the generic event period:
1796 .max_period = (1ULL << 31) - 1,
1797 .get_event_constraints = intel_get_event_constraints,
1798 .put_event_constraints = intel_put_event_constraints,
1799 .pebs_aliases = intel_pebs_aliases_core2,
1801 .format_attrs = intel_arch3_formats_attr,
1802 .events_sysfs_show = intel_event_sysfs_show,
1804 .cpu_prepare = intel_pmu_cpu_prepare,
1805 .cpu_starting = intel_pmu_cpu_starting,
1806 .cpu_dying = intel_pmu_cpu_dying,
1807 .guest_get_msrs = intel_guest_get_msrs,
1808 .flush_branch_stack = intel_pmu_flush_branch_stack,
1811 static __init void intel_clovertown_quirk(void)
1814 * PEBS is unreliable due to:
1816 * AJ67 - PEBS may experience CPL leaks
1817 * AJ68 - PEBS PMI may be delayed by one event
1818 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
1819 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
1821 * AJ67 could be worked around by restricting the OS/USR flags.
1822 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
1824 * AJ106 could possibly be worked around by not allowing LBR
1825 * usage from PEBS, including the fixup.
1826 * AJ68 could possibly be worked around by always programming
1827 * a pebs_event_reset[0] value and coping with the lost events.
1829 * But taken together it might just make sense to not enable PEBS on
1832 pr_warn("PEBS disabled due to CPU errata\n");
1834 x86_pmu.pebs_constraints = NULL;
1837 static int intel_snb_pebs_broken(int cpu)
1839 u32 rev = UINT_MAX; /* default to broken for unknown models */
1841 switch (cpu_data(cpu).x86_model) {
1846 case 45: /* SNB-EP */
1847 switch (cpu_data(cpu).x86_mask) {
1848 case 6: rev = 0x618; break;
1849 case 7: rev = 0x70c; break;
1853 return (cpu_data(cpu).microcode < rev);
1856 static void intel_snb_check_microcode(void)
1858 int pebs_broken = 0;
1862 for_each_online_cpu(cpu) {
1863 if ((pebs_broken = intel_snb_pebs_broken(cpu)))
1868 if (pebs_broken == x86_pmu.pebs_broken)
1872 * Serialized by the microcode lock..
1874 if (x86_pmu.pebs_broken) {
1875 pr_info("PEBS enabled due to microcode update\n");
1876 x86_pmu.pebs_broken = 0;
1878 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
1879 x86_pmu.pebs_broken = 1;
1883 static __init void intel_sandybridge_quirk(void)
1885 x86_pmu.check_microcode = intel_snb_check_microcode;
1886 intel_snb_check_microcode();
1889 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
1890 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
1891 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
1892 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
1893 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
1894 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
1895 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
1896 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
1899 static __init void intel_arch_events_quirk(void)
1903 /* disable event that reported as not presend by cpuid */
1904 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
1905 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
1906 pr_warn("CPUID marked event: \'%s\' unavailable\n",
1907 intel_arch_events_map[bit].name);
1911 static __init void intel_nehalem_quirk(void)
1913 union cpuid10_ebx ebx;
1915 ebx.full = x86_pmu.events_maskl;
1916 if (ebx.split.no_branch_misses_retired) {
1918 * Erratum AAJ80 detected, we work it around by using
1919 * the BR_MISP_EXEC.ANY event. This will over-count
1920 * branch-misses, but it's still much better than the
1921 * architectural event which is often completely bogus:
1923 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
1924 ebx.split.no_branch_misses_retired = 0;
1925 x86_pmu.events_maskl = ebx.full;
1926 pr_info("CPU erratum AAJ80 worked around\n");
1930 __init int intel_pmu_init(void)
1932 union cpuid10_edx edx;
1933 union cpuid10_eax eax;
1934 union cpuid10_ebx ebx;
1935 struct event_constraint *c;
1936 unsigned int unused;
1939 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
1940 switch (boot_cpu_data.x86) {
1942 return p6_pmu_init();
1944 return knc_pmu_init();
1946 return p4_pmu_init();
1952 * Check whether the Architectural PerfMon supports
1953 * Branch Misses Retired hw_event or not.
1955 cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
1956 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
1959 version = eax.split.version_id;
1963 x86_pmu = intel_pmu;
1965 x86_pmu.version = version;
1966 x86_pmu.num_counters = eax.split.num_counters;
1967 x86_pmu.cntval_bits = eax.split.bit_width;
1968 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
1970 x86_pmu.events_maskl = ebx.full;
1971 x86_pmu.events_mask_len = eax.split.mask_length;
1973 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
1976 * Quirk: v2 perfmon does not report fixed-purpose events, so
1977 * assume at least 3 events:
1980 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
1983 * v2 and above have a perf capabilities MSR
1988 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
1989 x86_pmu.intel_cap.capabilities = capabilities;
1994 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
1997 * Install the hw-cache-events table:
1999 switch (boot_cpu_data.x86_model) {
2000 case 14: /* 65 nm core solo/duo, "Yonah" */
2001 pr_cont("Core events, ");
2004 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2005 x86_add_quirk(intel_clovertown_quirk);
2006 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2007 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2008 case 29: /* six-core 45 nm xeon "Dunnington" */
2009 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
2010 sizeof(hw_cache_event_ids));
2012 intel_pmu_lbr_init_core();
2014 x86_pmu.event_constraints = intel_core2_event_constraints;
2015 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
2016 pr_cont("Core2 events, ");
2019 case 26: /* 45 nm nehalem, "Bloomfield" */
2020 case 30: /* 45 nm nehalem, "Lynnfield" */
2021 case 46: /* 45 nm nehalem-ex, "Beckton" */
2022 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
2023 sizeof(hw_cache_event_ids));
2024 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
2025 sizeof(hw_cache_extra_regs));
2027 intel_pmu_lbr_init_nhm();
2029 x86_pmu.event_constraints = intel_nehalem_event_constraints;
2030 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
2031 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
2032 x86_pmu.extra_regs = intel_nehalem_extra_regs;
2034 /* UOPS_ISSUED.STALLED_CYCLES */
2035 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2036 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
2037 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2038 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2039 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
2041 x86_add_quirk(intel_nehalem_quirk);
2043 pr_cont("Nehalem events, ");
2047 case 38: /* Lincroft */
2048 case 39: /* Penwell */
2049 case 53: /* Cloverview */
2050 case 54: /* Cedarview */
2051 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2052 sizeof(hw_cache_event_ids));
2054 intel_pmu_lbr_init_atom();
2056 x86_pmu.event_constraints = intel_gen_event_constraints;
2057 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
2058 pr_cont("Atom events, ");
2061 case 37: /* 32 nm nehalem, "Clarkdale" */
2062 case 44: /* 32 nm nehalem, "Gulftown" */
2063 case 47: /* 32 nm Xeon E7 */
2064 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
2065 sizeof(hw_cache_event_ids));
2066 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
2067 sizeof(hw_cache_extra_regs));
2069 intel_pmu_lbr_init_nhm();
2071 x86_pmu.event_constraints = intel_westmere_event_constraints;
2072 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
2073 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
2074 x86_pmu.extra_regs = intel_westmere_extra_regs;
2075 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2077 /* UOPS_ISSUED.STALLED_CYCLES */
2078 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2079 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
2080 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2081 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2082 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
2084 pr_cont("Westmere events, ");
2087 case 42: /* SandyBridge */
2088 case 45: /* SandyBridge, "Romely-EP" */
2089 x86_add_quirk(intel_sandybridge_quirk);
2090 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
2091 sizeof(hw_cache_event_ids));
2092 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
2093 sizeof(hw_cache_extra_regs));
2095 intel_pmu_lbr_init_snb();
2097 x86_pmu.event_constraints = intel_snb_event_constraints;
2098 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
2099 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
2100 x86_pmu.extra_regs = intel_snb_extra_regs;
2101 /* all extra regs are per-cpu when HT is on */
2102 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2103 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2105 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2106 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2107 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
2108 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
2109 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2110 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
2112 pr_cont("SandyBridge events, ");
2114 case 58: /* IvyBridge */
2115 case 62: /* IvyBridge EP */
2116 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
2117 sizeof(hw_cache_event_ids));
2118 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
2119 sizeof(hw_cache_extra_regs));
2121 intel_pmu_lbr_init_snb();
2123 x86_pmu.event_constraints = intel_ivb_event_constraints;
2124 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
2125 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
2126 x86_pmu.extra_regs = intel_snb_extra_regs;
2127 /* all extra regs are per-cpu when HT is on */
2128 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2129 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2131 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2132 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2133 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
2135 pr_cont("IvyBridge events, ");
2140 switch (x86_pmu.version) {
2142 x86_pmu.event_constraints = intel_v1_event_constraints;
2143 pr_cont("generic architected perfmon v1, ");
2147 * default constraints for v2 and up
2149 x86_pmu.event_constraints = intel_gen_event_constraints;
2150 pr_cont("generic architected perfmon, ");
2155 if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
2156 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2157 x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
2158 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
2160 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
2162 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
2163 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2164 x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
2165 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
2168 x86_pmu.intel_ctrl |=
2169 ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
2171 if (x86_pmu.event_constraints) {
2173 * event on fixed counter2 (REF_CYCLES) only works on this
2174 * counter, so do not extend mask to generic counters
2176 for_each_event_constraint(c, x86_pmu.event_constraints) {
2177 if (c->cmask != X86_RAW_EVENT_MASK
2178 || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
2182 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
2183 c->weight += x86_pmu.num_counters;