]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
Merge remote-tracking branch 'samsung/for-next'
[karo-tx-linux.git] / arch / x86 / kernel / cpu / perf_event_intel_uncore_snbep.c
1 /* SandyBridge-EP/IvyTown uncore support */
2 #include "perf_event_intel_uncore.h"
3
4
5 /* SNB-EP Box level control */
6 #define SNBEP_PMON_BOX_CTL_RST_CTRL     (1 << 0)
7 #define SNBEP_PMON_BOX_CTL_RST_CTRS     (1 << 1)
8 #define SNBEP_PMON_BOX_CTL_FRZ          (1 << 8)
9 #define SNBEP_PMON_BOX_CTL_FRZ_EN       (1 << 16)
10 #define SNBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
11                                          SNBEP_PMON_BOX_CTL_RST_CTRS | \
12                                          SNBEP_PMON_BOX_CTL_FRZ_EN)
13 /* SNB-EP event control */
14 #define SNBEP_PMON_CTL_EV_SEL_MASK      0x000000ff
15 #define SNBEP_PMON_CTL_UMASK_MASK       0x0000ff00
16 #define SNBEP_PMON_CTL_RST              (1 << 17)
17 #define SNBEP_PMON_CTL_EDGE_DET         (1 << 18)
18 #define SNBEP_PMON_CTL_EV_SEL_EXT       (1 << 21)
19 #define SNBEP_PMON_CTL_EN               (1 << 22)
20 #define SNBEP_PMON_CTL_INVERT           (1 << 23)
21 #define SNBEP_PMON_CTL_TRESH_MASK       0xff000000
22 #define SNBEP_PMON_RAW_EVENT_MASK       (SNBEP_PMON_CTL_EV_SEL_MASK | \
23                                          SNBEP_PMON_CTL_UMASK_MASK | \
24                                          SNBEP_PMON_CTL_EDGE_DET | \
25                                          SNBEP_PMON_CTL_INVERT | \
26                                          SNBEP_PMON_CTL_TRESH_MASK)
27
28 /* SNB-EP Ubox event control */
29 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK         0x1f000000
30 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK         \
31                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
32                                  SNBEP_PMON_CTL_UMASK_MASK | \
33                                  SNBEP_PMON_CTL_EDGE_DET | \
34                                  SNBEP_PMON_CTL_INVERT | \
35                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
36
37 #define SNBEP_CBO_PMON_CTL_TID_EN               (1 << 19)
38 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK       (SNBEP_PMON_RAW_EVENT_MASK | \
39                                                  SNBEP_CBO_PMON_CTL_TID_EN)
40
41 /* SNB-EP PCU event control */
42 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK     0x0000c000
43 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK       0x1f000000
44 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT       (1 << 30)
45 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET     (1 << 31)
46 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
47                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
48                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
49                                  SNBEP_PMON_CTL_EDGE_DET | \
50                                  SNBEP_PMON_CTL_EV_SEL_EXT | \
51                                  SNBEP_PMON_CTL_INVERT | \
52                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
53                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
54                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
55
56 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
57                                 (SNBEP_PMON_RAW_EVENT_MASK | \
58                                  SNBEP_PMON_CTL_EV_SEL_EXT)
59
60 /* SNB-EP pci control register */
61 #define SNBEP_PCI_PMON_BOX_CTL                  0xf4
62 #define SNBEP_PCI_PMON_CTL0                     0xd8
63 /* SNB-EP pci counter register */
64 #define SNBEP_PCI_PMON_CTR0                     0xa0
65
66 /* SNB-EP home agent register */
67 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0        0x40
68 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1        0x44
69 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH       0x48
70 /* SNB-EP memory controller register */
71 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL         0xf0
72 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR         0xd0
73 /* SNB-EP QPI register */
74 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0          0x228
75 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1          0x22c
76 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0           0x238
77 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1           0x23c
78
79 /* SNB-EP Ubox register */
80 #define SNBEP_U_MSR_PMON_CTR0                   0xc16
81 #define SNBEP_U_MSR_PMON_CTL0                   0xc10
82
83 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL         0xc08
84 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR         0xc09
85
86 /* SNB-EP Cbo register */
87 #define SNBEP_C0_MSR_PMON_CTR0                  0xd16
88 #define SNBEP_C0_MSR_PMON_CTL0                  0xd10
89 #define SNBEP_C0_MSR_PMON_BOX_CTL               0xd04
90 #define SNBEP_C0_MSR_PMON_BOX_FILTER            0xd14
91 #define SNBEP_CBO_MSR_OFFSET                    0x20
92
93 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID       0x1f
94 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID       0x3fc00
95 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE     0x7c0000
96 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC       0xff800000
97
98 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {    \
99         .event = (e),                           \
100         .msr = SNBEP_C0_MSR_PMON_BOX_FILTER,    \
101         .config_mask = (m),                     \
102         .idx = (i)                              \
103 }
104
105 /* SNB-EP PCU register */
106 #define SNBEP_PCU_MSR_PMON_CTR0                 0xc36
107 #define SNBEP_PCU_MSR_PMON_CTL0                 0xc30
108 #define SNBEP_PCU_MSR_PMON_BOX_CTL              0xc24
109 #define SNBEP_PCU_MSR_PMON_BOX_FILTER           0xc34
110 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK      0xffffffff
111 #define SNBEP_PCU_MSR_CORE_C3_CTR               0x3fc
112 #define SNBEP_PCU_MSR_CORE_C6_CTR               0x3fd
113
114 /* IVBEP event control */
115 #define IVBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
116                                          SNBEP_PMON_BOX_CTL_RST_CTRS)
117 #define IVBEP_PMON_RAW_EVENT_MASK               (SNBEP_PMON_CTL_EV_SEL_MASK | \
118                                          SNBEP_PMON_CTL_UMASK_MASK | \
119                                          SNBEP_PMON_CTL_EDGE_DET | \
120                                          SNBEP_PMON_CTL_TRESH_MASK)
121 /* IVBEP Ubox */
122 #define IVBEP_U_MSR_PMON_GLOBAL_CTL             0xc00
123 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL             (1 << 31)
124 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL           (1 << 29)
125
126 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
127                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
128                                  SNBEP_PMON_CTL_UMASK_MASK | \
129                                  SNBEP_PMON_CTL_EDGE_DET | \
130                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
131 /* IVBEP Cbo */
132 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK               (IVBEP_PMON_RAW_EVENT_MASK | \
133                                                  SNBEP_CBO_PMON_CTL_TID_EN)
134
135 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x1fULL << 0)
136 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 5)
137 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x3fULL << 17)
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
143
144 /* IVBEP home agent */
145 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST         (1 << 16)
146 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK                \
147                                 (IVBEP_PMON_RAW_EVENT_MASK | \
148                                  IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
149 /* IVBEP PCU */
150 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
151                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
152                                  SNBEP_PMON_CTL_EV_SEL_EXT | \
153                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
154                                  SNBEP_PMON_CTL_EDGE_DET | \
155                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
156                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
157                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
158 /* IVBEP QPI */
159 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
160                                 (IVBEP_PMON_RAW_EVENT_MASK | \
161                                  SNBEP_PMON_CTL_EV_SEL_EXT)
162
163 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
164                                 ((1ULL << (n)) - 1)))
165
166 /* Haswell-EP Ubox */
167 #define HSWEP_U_MSR_PMON_CTR0                   0x709
168 #define HSWEP_U_MSR_PMON_CTL0                   0x705
169 #define HSWEP_U_MSR_PMON_FILTER                 0x707
170
171 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL         0x703
172 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR         0x704
173
174 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID         (0x1 << 0)
175 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID         (0x1fULL << 1)
176 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
177                                         (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
178                                          HSWEP_U_MSR_PMON_BOX_FILTER_CID)
179
180 /* Haswell-EP CBo */
181 #define HSWEP_C0_MSR_PMON_CTR0                  0xe08
182 #define HSWEP_C0_MSR_PMON_CTL0                  0xe01
183 #define HSWEP_C0_MSR_PMON_BOX_CTL                       0xe00
184 #define HSWEP_C0_MSR_PMON_BOX_FILTER0           0xe05
185 #define HSWEP_CBO_MSR_OFFSET                    0x10
186
187
188 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x3fULL << 0)
189 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 6)
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x7fULL << 17)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
196
197
198 /* Haswell-EP Sbox */
199 #define HSWEP_S0_MSR_PMON_CTR0                  0x726
200 #define HSWEP_S0_MSR_PMON_CTL0                  0x721
201 #define HSWEP_S0_MSR_PMON_BOX_CTL                       0x720
202 #define HSWEP_SBOX_MSR_OFFSET                   0xa
203 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
204                                                  SNBEP_CBO_PMON_CTL_TID_EN)
205
206 /* Haswell-EP PCU */
207 #define HSWEP_PCU_MSR_PMON_CTR0                 0x717
208 #define HSWEP_PCU_MSR_PMON_CTL0                 0x711
209 #define HSWEP_PCU_MSR_PMON_BOX_CTL              0x710
210 #define HSWEP_PCU_MSR_PMON_BOX_FILTER           0x715
211
212
213 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
214 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
215 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
216 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
217 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
218 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
219 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
220 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
221 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
222 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
223 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
224 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
225 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
226 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
227 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
228 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
229 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
230 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
231 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
232 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
233 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
234 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
235 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
236 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
237 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
238 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
239 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
240 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
241 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
242 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
243 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
244 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
245 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
246 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
247 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
248 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
249 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
250 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
251 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
252 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
253 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
254 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
255 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
256 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
257 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
258 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
259 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
260 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
261 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
262
263 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
264 {
265         struct pci_dev *pdev = box->pci_dev;
266         int box_ctl = uncore_pci_box_ctl(box);
267         u32 config = 0;
268
269         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
270                 config |= SNBEP_PMON_BOX_CTL_FRZ;
271                 pci_write_config_dword(pdev, box_ctl, config);
272         }
273 }
274
275 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
276 {
277         struct pci_dev *pdev = box->pci_dev;
278         int box_ctl = uncore_pci_box_ctl(box);
279         u32 config = 0;
280
281         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
282                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
283                 pci_write_config_dword(pdev, box_ctl, config);
284         }
285 }
286
287 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
288 {
289         struct pci_dev *pdev = box->pci_dev;
290         struct hw_perf_event *hwc = &event->hw;
291
292         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
293 }
294
295 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
296 {
297         struct pci_dev *pdev = box->pci_dev;
298         struct hw_perf_event *hwc = &event->hw;
299
300         pci_write_config_dword(pdev, hwc->config_base, hwc->config);
301 }
302
303 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
304 {
305         struct pci_dev *pdev = box->pci_dev;
306         struct hw_perf_event *hwc = &event->hw;
307         u64 count = 0;
308
309         pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
310         pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
311
312         return count;
313 }
314
315 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
316 {
317         struct pci_dev *pdev = box->pci_dev;
318
319         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
320 }
321
322 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
323 {
324         u64 config;
325         unsigned msr;
326
327         msr = uncore_msr_box_ctl(box);
328         if (msr) {
329                 rdmsrl(msr, config);
330                 config |= SNBEP_PMON_BOX_CTL_FRZ;
331                 wrmsrl(msr, config);
332         }
333 }
334
335 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
336 {
337         u64 config;
338         unsigned msr;
339
340         msr = uncore_msr_box_ctl(box);
341         if (msr) {
342                 rdmsrl(msr, config);
343                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
344                 wrmsrl(msr, config);
345         }
346 }
347
348 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
349 {
350         struct hw_perf_event *hwc = &event->hw;
351         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
352
353         if (reg1->idx != EXTRA_REG_NONE)
354                 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
355
356         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
357 }
358
359 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
360                                         struct perf_event *event)
361 {
362         struct hw_perf_event *hwc = &event->hw;
363
364         wrmsrl(hwc->config_base, hwc->config);
365 }
366
367 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
368 {
369         unsigned msr = uncore_msr_box_ctl(box);
370
371         if (msr)
372                 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
373 }
374
375 static struct attribute *snbep_uncore_formats_attr[] = {
376         &format_attr_event.attr,
377         &format_attr_umask.attr,
378         &format_attr_edge.attr,
379         &format_attr_inv.attr,
380         &format_attr_thresh8.attr,
381         NULL,
382 };
383
384 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
385         &format_attr_event.attr,
386         &format_attr_umask.attr,
387         &format_attr_edge.attr,
388         &format_attr_inv.attr,
389         &format_attr_thresh5.attr,
390         NULL,
391 };
392
393 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
394         &format_attr_event.attr,
395         &format_attr_umask.attr,
396         &format_attr_edge.attr,
397         &format_attr_tid_en.attr,
398         &format_attr_inv.attr,
399         &format_attr_thresh8.attr,
400         &format_attr_filter_tid.attr,
401         &format_attr_filter_nid.attr,
402         &format_attr_filter_state.attr,
403         &format_attr_filter_opc.attr,
404         NULL,
405 };
406
407 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
408         &format_attr_event_ext.attr,
409         &format_attr_occ_sel.attr,
410         &format_attr_edge.attr,
411         &format_attr_inv.attr,
412         &format_attr_thresh5.attr,
413         &format_attr_occ_invert.attr,
414         &format_attr_occ_edge.attr,
415         &format_attr_filter_band0.attr,
416         &format_attr_filter_band1.attr,
417         &format_attr_filter_band2.attr,
418         &format_attr_filter_band3.attr,
419         NULL,
420 };
421
422 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
423         &format_attr_event_ext.attr,
424         &format_attr_umask.attr,
425         &format_attr_edge.attr,
426         &format_attr_inv.attr,
427         &format_attr_thresh8.attr,
428         &format_attr_match_rds.attr,
429         &format_attr_match_rnid30.attr,
430         &format_attr_match_rnid4.attr,
431         &format_attr_match_dnid.attr,
432         &format_attr_match_mc.attr,
433         &format_attr_match_opc.attr,
434         &format_attr_match_vnw.attr,
435         &format_attr_match0.attr,
436         &format_attr_match1.attr,
437         &format_attr_mask_rds.attr,
438         &format_attr_mask_rnid30.attr,
439         &format_attr_mask_rnid4.attr,
440         &format_attr_mask_dnid.attr,
441         &format_attr_mask_mc.attr,
442         &format_attr_mask_opc.attr,
443         &format_attr_mask_vnw.attr,
444         &format_attr_mask0.attr,
445         &format_attr_mask1.attr,
446         NULL,
447 };
448
449 static struct uncore_event_desc snbep_uncore_imc_events[] = {
450         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
451         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
452         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
453         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
454         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
455         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
456         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
457         { /* end: all zeroes */ },
458 };
459
460 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
461         INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
462         INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
463         INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
464         INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
465         { /* end: all zeroes */ },
466 };
467
468 static struct attribute_group snbep_uncore_format_group = {
469         .name = "format",
470         .attrs = snbep_uncore_formats_attr,
471 };
472
473 static struct attribute_group snbep_uncore_ubox_format_group = {
474         .name = "format",
475         .attrs = snbep_uncore_ubox_formats_attr,
476 };
477
478 static struct attribute_group snbep_uncore_cbox_format_group = {
479         .name = "format",
480         .attrs = snbep_uncore_cbox_formats_attr,
481 };
482
483 static struct attribute_group snbep_uncore_pcu_format_group = {
484         .name = "format",
485         .attrs = snbep_uncore_pcu_formats_attr,
486 };
487
488 static struct attribute_group snbep_uncore_qpi_format_group = {
489         .name = "format",
490         .attrs = snbep_uncore_qpi_formats_attr,
491 };
492
493 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                    \
494         .disable_box    = snbep_uncore_msr_disable_box,         \
495         .enable_box     = snbep_uncore_msr_enable_box,          \
496         .disable_event  = snbep_uncore_msr_disable_event,       \
497         .enable_event   = snbep_uncore_msr_enable_event,        \
498         .read_counter   = uncore_msr_read_counter
499
500 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
501         __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),                   \
502         .init_box       = snbep_uncore_msr_init_box             \
503
504 static struct intel_uncore_ops snbep_uncore_msr_ops = {
505         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
506 };
507
508 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()                      \
509         .init_box       = snbep_uncore_pci_init_box,            \
510         .disable_box    = snbep_uncore_pci_disable_box,         \
511         .enable_box     = snbep_uncore_pci_enable_box,          \
512         .disable_event  = snbep_uncore_pci_disable_event,       \
513         .read_counter   = snbep_uncore_pci_read_counter
514
515 static struct intel_uncore_ops snbep_uncore_pci_ops = {
516         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
517         .enable_event   = snbep_uncore_pci_enable_event,        \
518 };
519
520 static struct event_constraint snbep_uncore_cbox_constraints[] = {
521         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
522         UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
523         UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
524         UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
525         UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
526         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
527         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
528         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
529         UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
530         UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
531         UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
532         UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
533         UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
534         EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
535         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
536         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
537         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
538         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
539         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
540         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
541         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
542         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
543         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
544         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
545         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
546         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
547         EVENT_CONSTRAINT_END
548 };
549
550 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
551         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
552         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
553         UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
554         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
555         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
556         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
557         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
558         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
559         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
560         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
561         EVENT_CONSTRAINT_END
562 };
563
564 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
565         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
566         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
567         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
568         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
569         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
570         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
571         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
572         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
573         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
574         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
575         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
576         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
577         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
578         UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
579         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
580         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
581         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
582         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
583         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
584         UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
585         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
586         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
587         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
588         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
589         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
590         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
591         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
592         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
593         EVENT_CONSTRAINT_END
594 };
595
596 static struct intel_uncore_type snbep_uncore_ubox = {
597         .name           = "ubox",
598         .num_counters   = 2,
599         .num_boxes      = 1,
600         .perf_ctr_bits  = 44,
601         .fixed_ctr_bits = 48,
602         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
603         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
604         .event_mask     = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
605         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
606         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
607         .ops            = &snbep_uncore_msr_ops,
608         .format_group   = &snbep_uncore_ubox_format_group,
609 };
610
611 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
612         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
613                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
614         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
615         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
616         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
617         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
618         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
619         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
620         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
621         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
622         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
623         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
624         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
625         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
626         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
627         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
628         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
629         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
630         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
631         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
632         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
633         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
634         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
635         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
636         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
637         EVENT_EXTRA_END
638 };
639
640 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
641 {
642         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
643         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
644         int i;
645
646         if (uncore_box_is_fake(box))
647                 return;
648
649         for (i = 0; i < 5; i++) {
650                 if (reg1->alloc & (0x1 << i))
651                         atomic_sub(1 << (i * 6), &er->ref);
652         }
653         reg1->alloc = 0;
654 }
655
656 static struct event_constraint *
657 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
658                             u64 (*cbox_filter_mask)(int fields))
659 {
660         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
661         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
662         int i, alloc = 0;
663         unsigned long flags;
664         u64 mask;
665
666         if (reg1->idx == EXTRA_REG_NONE)
667                 return NULL;
668
669         raw_spin_lock_irqsave(&er->lock, flags);
670         for (i = 0; i < 5; i++) {
671                 if (!(reg1->idx & (0x1 << i)))
672                         continue;
673                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
674                         continue;
675
676                 mask = cbox_filter_mask(0x1 << i);
677                 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
678                     !((reg1->config ^ er->config) & mask)) {
679                         atomic_add(1 << (i * 6), &er->ref);
680                         er->config &= ~mask;
681                         er->config |= reg1->config & mask;
682                         alloc |= (0x1 << i);
683                 } else {
684                         break;
685                 }
686         }
687         raw_spin_unlock_irqrestore(&er->lock, flags);
688         if (i < 5)
689                 goto fail;
690
691         if (!uncore_box_is_fake(box))
692                 reg1->alloc |= alloc;
693
694         return NULL;
695 fail:
696         for (; i >= 0; i--) {
697                 if (alloc & (0x1 << i))
698                         atomic_sub(1 << (i * 6), &er->ref);
699         }
700         return &uncore_constraint_empty;
701 }
702
703 static u64 snbep_cbox_filter_mask(int fields)
704 {
705         u64 mask = 0;
706
707         if (fields & 0x1)
708                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
709         if (fields & 0x2)
710                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
711         if (fields & 0x4)
712                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
713         if (fields & 0x8)
714                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
715
716         return mask;
717 }
718
719 static struct event_constraint *
720 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
721 {
722         return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
723 }
724
725 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
726 {
727         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
728         struct extra_reg *er;
729         int idx = 0;
730
731         for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
732                 if (er->event != (event->hw.config & er->config_mask))
733                         continue;
734                 idx |= er->idx;
735         }
736
737         if (idx) {
738                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
739                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
740                 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
741                 reg1->idx = idx;
742         }
743         return 0;
744 }
745
746 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
747         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
748         .hw_config              = snbep_cbox_hw_config,
749         .get_constraint         = snbep_cbox_get_constraint,
750         .put_constraint         = snbep_cbox_put_constraint,
751 };
752
753 static struct intel_uncore_type snbep_uncore_cbox = {
754         .name                   = "cbox",
755         .num_counters           = 4,
756         .num_boxes              = 8,
757         .perf_ctr_bits          = 44,
758         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
759         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
760         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
761         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
762         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
763         .num_shared_regs        = 1,
764         .constraints            = snbep_uncore_cbox_constraints,
765         .ops                    = &snbep_uncore_cbox_ops,
766         .format_group           = &snbep_uncore_cbox_format_group,
767 };
768
769 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
770 {
771         struct hw_perf_event *hwc = &event->hw;
772         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
773         u64 config = reg1->config;
774
775         if (new_idx > reg1->idx)
776                 config <<= 8 * (new_idx - reg1->idx);
777         else
778                 config >>= 8 * (reg1->idx - new_idx);
779
780         if (modify) {
781                 hwc->config += new_idx - reg1->idx;
782                 reg1->config = config;
783                 reg1->idx = new_idx;
784         }
785         return config;
786 }
787
788 static struct event_constraint *
789 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
790 {
791         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
792         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
793         unsigned long flags;
794         int idx = reg1->idx;
795         u64 mask, config1 = reg1->config;
796         bool ok = false;
797
798         if (reg1->idx == EXTRA_REG_NONE ||
799             (!uncore_box_is_fake(box) && reg1->alloc))
800                 return NULL;
801 again:
802         mask = 0xffULL << (idx * 8);
803         raw_spin_lock_irqsave(&er->lock, flags);
804         if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
805             !((config1 ^ er->config) & mask)) {
806                 atomic_add(1 << (idx * 8), &er->ref);
807                 er->config &= ~mask;
808                 er->config |= config1 & mask;
809                 ok = true;
810         }
811         raw_spin_unlock_irqrestore(&er->lock, flags);
812
813         if (!ok) {
814                 idx = (idx + 1) % 4;
815                 if (idx != reg1->idx) {
816                         config1 = snbep_pcu_alter_er(event, idx, false);
817                         goto again;
818                 }
819                 return &uncore_constraint_empty;
820         }
821
822         if (!uncore_box_is_fake(box)) {
823                 if (idx != reg1->idx)
824                         snbep_pcu_alter_er(event, idx, true);
825                 reg1->alloc = 1;
826         }
827         return NULL;
828 }
829
830 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
831 {
832         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
833         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
834
835         if (uncore_box_is_fake(box) || !reg1->alloc)
836                 return;
837
838         atomic_sub(1 << (reg1->idx * 8), &er->ref);
839         reg1->alloc = 0;
840 }
841
842 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
843 {
844         struct hw_perf_event *hwc = &event->hw;
845         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
846         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
847
848         if (ev_sel >= 0xb && ev_sel <= 0xe) {
849                 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
850                 reg1->idx = ev_sel - 0xb;
851                 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
852         }
853         return 0;
854 }
855
856 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
857         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
858         .hw_config              = snbep_pcu_hw_config,
859         .get_constraint         = snbep_pcu_get_constraint,
860         .put_constraint         = snbep_pcu_put_constraint,
861 };
862
863 static struct intel_uncore_type snbep_uncore_pcu = {
864         .name                   = "pcu",
865         .num_counters           = 4,
866         .num_boxes              = 1,
867         .perf_ctr_bits          = 48,
868         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
869         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
870         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
871         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
872         .num_shared_regs        = 1,
873         .ops                    = &snbep_uncore_pcu_ops,
874         .format_group           = &snbep_uncore_pcu_format_group,
875 };
876
877 static struct intel_uncore_type *snbep_msr_uncores[] = {
878         &snbep_uncore_ubox,
879         &snbep_uncore_cbox,
880         &snbep_uncore_pcu,
881         NULL,
882 };
883
884 void snbep_uncore_cpu_init(void)
885 {
886         if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
887                 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
888         uncore_msr_uncores = snbep_msr_uncores;
889 }
890
891 enum {
892         SNBEP_PCI_QPI_PORT0_FILTER,
893         SNBEP_PCI_QPI_PORT1_FILTER,
894         HSWEP_PCI_PCU_3,
895 };
896
897 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
898 {
899         struct hw_perf_event *hwc = &event->hw;
900         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
901         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
902
903         if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
904                 reg1->idx = 0;
905                 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
906                 reg1->config = event->attr.config1;
907                 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
908                 reg2->config = event->attr.config2;
909         }
910         return 0;
911 }
912
913 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
914 {
915         struct pci_dev *pdev = box->pci_dev;
916         struct hw_perf_event *hwc = &event->hw;
917         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
918         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
919
920         if (reg1->idx != EXTRA_REG_NONE) {
921                 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
922                 struct pci_dev *filter_pdev = uncore_extra_pci_dev[box->phys_id][idx];
923                 if (filter_pdev) {
924                         pci_write_config_dword(filter_pdev, reg1->reg,
925                                                 (u32)reg1->config);
926                         pci_write_config_dword(filter_pdev, reg1->reg + 4,
927                                                 (u32)(reg1->config >> 32));
928                         pci_write_config_dword(filter_pdev, reg2->reg,
929                                                 (u32)reg2->config);
930                         pci_write_config_dword(filter_pdev, reg2->reg + 4,
931                                                 (u32)(reg2->config >> 32));
932                 }
933         }
934
935         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
936 }
937
938 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
939         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
940         .enable_event           = snbep_qpi_enable_event,
941         .hw_config              = snbep_qpi_hw_config,
942         .get_constraint         = uncore_get_constraint,
943         .put_constraint         = uncore_put_constraint,
944 };
945
946 #define SNBEP_UNCORE_PCI_COMMON_INIT()                          \
947         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
948         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
949         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,            \
950         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
951         .ops            = &snbep_uncore_pci_ops,                \
952         .format_group   = &snbep_uncore_format_group
953
954 static struct intel_uncore_type snbep_uncore_ha = {
955         .name           = "ha",
956         .num_counters   = 4,
957         .num_boxes      = 1,
958         .perf_ctr_bits  = 48,
959         SNBEP_UNCORE_PCI_COMMON_INIT(),
960 };
961
962 static struct intel_uncore_type snbep_uncore_imc = {
963         .name           = "imc",
964         .num_counters   = 4,
965         .num_boxes      = 4,
966         .perf_ctr_bits  = 48,
967         .fixed_ctr_bits = 48,
968         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
969         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
970         .event_descs    = snbep_uncore_imc_events,
971         SNBEP_UNCORE_PCI_COMMON_INIT(),
972 };
973
974 static struct intel_uncore_type snbep_uncore_qpi = {
975         .name                   = "qpi",
976         .num_counters           = 4,
977         .num_boxes              = 2,
978         .perf_ctr_bits          = 48,
979         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
980         .event_ctl              = SNBEP_PCI_PMON_CTL0,
981         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
982         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
983         .num_shared_regs        = 1,
984         .ops                    = &snbep_uncore_qpi_ops,
985         .event_descs            = snbep_uncore_qpi_events,
986         .format_group           = &snbep_uncore_qpi_format_group,
987 };
988
989
990 static struct intel_uncore_type snbep_uncore_r2pcie = {
991         .name           = "r2pcie",
992         .num_counters   = 4,
993         .num_boxes      = 1,
994         .perf_ctr_bits  = 44,
995         .constraints    = snbep_uncore_r2pcie_constraints,
996         SNBEP_UNCORE_PCI_COMMON_INIT(),
997 };
998
999 static struct intel_uncore_type snbep_uncore_r3qpi = {
1000         .name           = "r3qpi",
1001         .num_counters   = 3,
1002         .num_boxes      = 2,
1003         .perf_ctr_bits  = 44,
1004         .constraints    = snbep_uncore_r3qpi_constraints,
1005         SNBEP_UNCORE_PCI_COMMON_INIT(),
1006 };
1007
1008 enum {
1009         SNBEP_PCI_UNCORE_HA,
1010         SNBEP_PCI_UNCORE_IMC,
1011         SNBEP_PCI_UNCORE_QPI,
1012         SNBEP_PCI_UNCORE_R2PCIE,
1013         SNBEP_PCI_UNCORE_R3QPI,
1014 };
1015
1016 static struct intel_uncore_type *snbep_pci_uncores[] = {
1017         [SNBEP_PCI_UNCORE_HA]           = &snbep_uncore_ha,
1018         [SNBEP_PCI_UNCORE_IMC]          = &snbep_uncore_imc,
1019         [SNBEP_PCI_UNCORE_QPI]          = &snbep_uncore_qpi,
1020         [SNBEP_PCI_UNCORE_R2PCIE]       = &snbep_uncore_r2pcie,
1021         [SNBEP_PCI_UNCORE_R3QPI]        = &snbep_uncore_r3qpi,
1022         NULL,
1023 };
1024
1025 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1026         { /* Home Agent */
1027                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1028                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1029         },
1030         { /* MC Channel 0 */
1031                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1032                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1033         },
1034         { /* MC Channel 1 */
1035                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1036                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1037         },
1038         { /* MC Channel 2 */
1039                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1040                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1041         },
1042         { /* MC Channel 3 */
1043                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1044                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1045         },
1046         { /* QPI Port 0 */
1047                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1048                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1049         },
1050         { /* QPI Port 1 */
1051                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1052                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1053         },
1054         { /* R2PCIe */
1055                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1056                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1057         },
1058         { /* R3QPI Link 0 */
1059                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1060                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1061         },
1062         { /* R3QPI Link 1 */
1063                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1064                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1065         },
1066         { /* QPI Port 0 filter  */
1067                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1068                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1069                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1070         },
1071         { /* QPI Port 0 filter  */
1072                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1073                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1074                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1075         },
1076         { /* end: all zeroes */ }
1077 };
1078
1079 static struct pci_driver snbep_uncore_pci_driver = {
1080         .name           = "snbep_uncore",
1081         .id_table       = snbep_uncore_pci_ids,
1082 };
1083
1084 /*
1085  * build pci bus to socket mapping
1086  */
1087 static int snbep_pci2phy_map_init(int devid)
1088 {
1089         struct pci_dev *ubox_dev = NULL;
1090         int i, bus, nodeid, segment;
1091         struct pci2phy_map *map;
1092         int err = 0;
1093         u32 config = 0;
1094
1095         while (1) {
1096                 /* find the UBOX device */
1097                 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1098                 if (!ubox_dev)
1099                         break;
1100                 bus = ubox_dev->bus->number;
1101                 /* get the Node ID of the local register */
1102                 err = pci_read_config_dword(ubox_dev, 0x40, &config);
1103                 if (err)
1104                         break;
1105                 nodeid = config;
1106                 /* get the Node ID mapping */
1107                 err = pci_read_config_dword(ubox_dev, 0x54, &config);
1108                 if (err)
1109                         break;
1110
1111                 segment = pci_domain_nr(ubox_dev->bus);
1112                 raw_spin_lock(&pci2phy_map_lock);
1113                 map = __find_pci2phy_map(segment);
1114                 if (!map) {
1115                         raw_spin_unlock(&pci2phy_map_lock);
1116                         err = -ENOMEM;
1117                         break;
1118                 }
1119
1120                 /*
1121                  * every three bits in the Node ID mapping register maps
1122                  * to a particular node.
1123                  */
1124                 for (i = 0; i < 8; i++) {
1125                         if (nodeid == ((config >> (3 * i)) & 0x7)) {
1126                                 map->pbus_to_physid[bus] = i;
1127                                 break;
1128                         }
1129                 }
1130                 raw_spin_unlock(&pci2phy_map_lock);
1131         }
1132
1133         if (!err) {
1134                 /*
1135                  * For PCI bus with no UBOX device, find the next bus
1136                  * that has UBOX device and use its mapping.
1137                  */
1138                 raw_spin_lock(&pci2phy_map_lock);
1139                 list_for_each_entry(map, &pci2phy_map_head, list) {
1140                         i = -1;
1141                         for (bus = 255; bus >= 0; bus--) {
1142                                 if (map->pbus_to_physid[bus] >= 0)
1143                                         i = map->pbus_to_physid[bus];
1144                                 else
1145                                         map->pbus_to_physid[bus] = i;
1146                         }
1147                 }
1148                 raw_spin_unlock(&pci2phy_map_lock);
1149         }
1150
1151         pci_dev_put(ubox_dev);
1152
1153         return err ? pcibios_err_to_errno(err) : 0;
1154 }
1155
1156 int snbep_uncore_pci_init(void)
1157 {
1158         int ret = snbep_pci2phy_map_init(0x3ce0);
1159         if (ret)
1160                 return ret;
1161         uncore_pci_uncores = snbep_pci_uncores;
1162         uncore_pci_driver = &snbep_uncore_pci_driver;
1163         return 0;
1164 }
1165 /* end of Sandy Bridge-EP uncore support */
1166
1167 /* IvyTown uncore support */
1168 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1169 {
1170         unsigned msr = uncore_msr_box_ctl(box);
1171         if (msr)
1172                 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1173 }
1174
1175 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1176 {
1177         struct pci_dev *pdev = box->pci_dev;
1178
1179         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1180 }
1181
1182 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
1183         .init_box       = ivbep_uncore_msr_init_box,            \
1184         .disable_box    = snbep_uncore_msr_disable_box,         \
1185         .enable_box     = snbep_uncore_msr_enable_box,          \
1186         .disable_event  = snbep_uncore_msr_disable_event,       \
1187         .enable_event   = snbep_uncore_msr_enable_event,        \
1188         .read_counter   = uncore_msr_read_counter
1189
1190 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1191         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1192 };
1193
1194 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1195         .init_box       = ivbep_uncore_pci_init_box,
1196         .disable_box    = snbep_uncore_pci_disable_box,
1197         .enable_box     = snbep_uncore_pci_enable_box,
1198         .disable_event  = snbep_uncore_pci_disable_event,
1199         .enable_event   = snbep_uncore_pci_enable_event,
1200         .read_counter   = snbep_uncore_pci_read_counter,
1201 };
1202
1203 #define IVBEP_UNCORE_PCI_COMMON_INIT()                          \
1204         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1205         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1206         .event_mask     = IVBEP_PMON_RAW_EVENT_MASK,            \
1207         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1208         .ops            = &ivbep_uncore_pci_ops,                        \
1209         .format_group   = &ivbep_uncore_format_group
1210
1211 static struct attribute *ivbep_uncore_formats_attr[] = {
1212         &format_attr_event.attr,
1213         &format_attr_umask.attr,
1214         &format_attr_edge.attr,
1215         &format_attr_inv.attr,
1216         &format_attr_thresh8.attr,
1217         NULL,
1218 };
1219
1220 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1221         &format_attr_event.attr,
1222         &format_attr_umask.attr,
1223         &format_attr_edge.attr,
1224         &format_attr_inv.attr,
1225         &format_attr_thresh5.attr,
1226         NULL,
1227 };
1228
1229 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1230         &format_attr_event.attr,
1231         &format_attr_umask.attr,
1232         &format_attr_edge.attr,
1233         &format_attr_tid_en.attr,
1234         &format_attr_thresh8.attr,
1235         &format_attr_filter_tid.attr,
1236         &format_attr_filter_link.attr,
1237         &format_attr_filter_state2.attr,
1238         &format_attr_filter_nid2.attr,
1239         &format_attr_filter_opc2.attr,
1240         &format_attr_filter_nc.attr,
1241         &format_attr_filter_c6.attr,
1242         &format_attr_filter_isoc.attr,
1243         NULL,
1244 };
1245
1246 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1247         &format_attr_event_ext.attr,
1248         &format_attr_occ_sel.attr,
1249         &format_attr_edge.attr,
1250         &format_attr_thresh5.attr,
1251         &format_attr_occ_invert.attr,
1252         &format_attr_occ_edge.attr,
1253         &format_attr_filter_band0.attr,
1254         &format_attr_filter_band1.attr,
1255         &format_attr_filter_band2.attr,
1256         &format_attr_filter_band3.attr,
1257         NULL,
1258 };
1259
1260 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1261         &format_attr_event_ext.attr,
1262         &format_attr_umask.attr,
1263         &format_attr_edge.attr,
1264         &format_attr_thresh8.attr,
1265         &format_attr_match_rds.attr,
1266         &format_attr_match_rnid30.attr,
1267         &format_attr_match_rnid4.attr,
1268         &format_attr_match_dnid.attr,
1269         &format_attr_match_mc.attr,
1270         &format_attr_match_opc.attr,
1271         &format_attr_match_vnw.attr,
1272         &format_attr_match0.attr,
1273         &format_attr_match1.attr,
1274         &format_attr_mask_rds.attr,
1275         &format_attr_mask_rnid30.attr,
1276         &format_attr_mask_rnid4.attr,
1277         &format_attr_mask_dnid.attr,
1278         &format_attr_mask_mc.attr,
1279         &format_attr_mask_opc.attr,
1280         &format_attr_mask_vnw.attr,
1281         &format_attr_mask0.attr,
1282         &format_attr_mask1.attr,
1283         NULL,
1284 };
1285
1286 static struct attribute_group ivbep_uncore_format_group = {
1287         .name = "format",
1288         .attrs = ivbep_uncore_formats_attr,
1289 };
1290
1291 static struct attribute_group ivbep_uncore_ubox_format_group = {
1292         .name = "format",
1293         .attrs = ivbep_uncore_ubox_formats_attr,
1294 };
1295
1296 static struct attribute_group ivbep_uncore_cbox_format_group = {
1297         .name = "format",
1298         .attrs = ivbep_uncore_cbox_formats_attr,
1299 };
1300
1301 static struct attribute_group ivbep_uncore_pcu_format_group = {
1302         .name = "format",
1303         .attrs = ivbep_uncore_pcu_formats_attr,
1304 };
1305
1306 static struct attribute_group ivbep_uncore_qpi_format_group = {
1307         .name = "format",
1308         .attrs = ivbep_uncore_qpi_formats_attr,
1309 };
1310
1311 static struct intel_uncore_type ivbep_uncore_ubox = {
1312         .name           = "ubox",
1313         .num_counters   = 2,
1314         .num_boxes      = 1,
1315         .perf_ctr_bits  = 44,
1316         .fixed_ctr_bits = 48,
1317         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
1318         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
1319         .event_mask     = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1320         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1321         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1322         .ops            = &ivbep_uncore_msr_ops,
1323         .format_group   = &ivbep_uncore_ubox_format_group,
1324 };
1325
1326 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1327         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1328                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1329         SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1330         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1331         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1332         SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1333         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1334         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1335         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1336         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1337         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1338         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1339         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1340         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1341         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1342         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1343         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1344         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1345         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1346         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1347         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1348         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1349         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1350         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1351         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1352         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1353         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1354         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1355         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1356         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1357         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1358         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1359         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1360         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1361         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1362         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1363         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1364         EVENT_EXTRA_END
1365 };
1366
1367 static u64 ivbep_cbox_filter_mask(int fields)
1368 {
1369         u64 mask = 0;
1370
1371         if (fields & 0x1)
1372                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1373         if (fields & 0x2)
1374                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1375         if (fields & 0x4)
1376                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1377         if (fields & 0x8)
1378                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1379         if (fields & 0x10) {
1380                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1381                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1382                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1383                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1384         }
1385
1386         return mask;
1387 }
1388
1389 static struct event_constraint *
1390 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1391 {
1392         return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1393 }
1394
1395 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1396 {
1397         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1398         struct extra_reg *er;
1399         int idx = 0;
1400
1401         for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1402                 if (er->event != (event->hw.config & er->config_mask))
1403                         continue;
1404                 idx |= er->idx;
1405         }
1406
1407         if (idx) {
1408                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1409                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1410                 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1411                 reg1->idx = idx;
1412         }
1413         return 0;
1414 }
1415
1416 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1417 {
1418         struct hw_perf_event *hwc = &event->hw;
1419         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1420
1421         if (reg1->idx != EXTRA_REG_NONE) {
1422                 u64 filter = uncore_shared_reg_config(box, 0);
1423                 wrmsrl(reg1->reg, filter & 0xffffffff);
1424                 wrmsrl(reg1->reg + 6, filter >> 32);
1425         }
1426
1427         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1428 }
1429
1430 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1431         .init_box               = ivbep_uncore_msr_init_box,
1432         .disable_box            = snbep_uncore_msr_disable_box,
1433         .enable_box             = snbep_uncore_msr_enable_box,
1434         .disable_event          = snbep_uncore_msr_disable_event,
1435         .enable_event           = ivbep_cbox_enable_event,
1436         .read_counter           = uncore_msr_read_counter,
1437         .hw_config              = ivbep_cbox_hw_config,
1438         .get_constraint         = ivbep_cbox_get_constraint,
1439         .put_constraint         = snbep_cbox_put_constraint,
1440 };
1441
1442 static struct intel_uncore_type ivbep_uncore_cbox = {
1443         .name                   = "cbox",
1444         .num_counters           = 4,
1445         .num_boxes              = 15,
1446         .perf_ctr_bits          = 44,
1447         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1448         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1449         .event_mask             = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1450         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1451         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1452         .num_shared_regs        = 1,
1453         .constraints            = snbep_uncore_cbox_constraints,
1454         .ops                    = &ivbep_uncore_cbox_ops,
1455         .format_group           = &ivbep_uncore_cbox_format_group,
1456 };
1457
1458 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1459         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1460         .hw_config              = snbep_pcu_hw_config,
1461         .get_constraint         = snbep_pcu_get_constraint,
1462         .put_constraint         = snbep_pcu_put_constraint,
1463 };
1464
1465 static struct intel_uncore_type ivbep_uncore_pcu = {
1466         .name                   = "pcu",
1467         .num_counters           = 4,
1468         .num_boxes              = 1,
1469         .perf_ctr_bits          = 48,
1470         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1471         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1472         .event_mask             = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1473         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1474         .num_shared_regs        = 1,
1475         .ops                    = &ivbep_uncore_pcu_ops,
1476         .format_group           = &ivbep_uncore_pcu_format_group,
1477 };
1478
1479 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1480         &ivbep_uncore_ubox,
1481         &ivbep_uncore_cbox,
1482         &ivbep_uncore_pcu,
1483         NULL,
1484 };
1485
1486 void ivbep_uncore_cpu_init(void)
1487 {
1488         if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1489                 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1490         uncore_msr_uncores = ivbep_msr_uncores;
1491 }
1492
1493 static struct intel_uncore_type ivbep_uncore_ha = {
1494         .name           = "ha",
1495         .num_counters   = 4,
1496         .num_boxes      = 2,
1497         .perf_ctr_bits  = 48,
1498         IVBEP_UNCORE_PCI_COMMON_INIT(),
1499 };
1500
1501 static struct intel_uncore_type ivbep_uncore_imc = {
1502         .name           = "imc",
1503         .num_counters   = 4,
1504         .num_boxes      = 8,
1505         .perf_ctr_bits  = 48,
1506         .fixed_ctr_bits = 48,
1507         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1508         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1509         .event_descs    = snbep_uncore_imc_events,
1510         IVBEP_UNCORE_PCI_COMMON_INIT(),
1511 };
1512
1513 /* registers in IRP boxes are not properly aligned */
1514 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1515 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1516
1517 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1518 {
1519         struct pci_dev *pdev = box->pci_dev;
1520         struct hw_perf_event *hwc = &event->hw;
1521
1522         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1523                                hwc->config | SNBEP_PMON_CTL_EN);
1524 }
1525
1526 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1527 {
1528         struct pci_dev *pdev = box->pci_dev;
1529         struct hw_perf_event *hwc = &event->hw;
1530
1531         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1532 }
1533
1534 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1535 {
1536         struct pci_dev *pdev = box->pci_dev;
1537         struct hw_perf_event *hwc = &event->hw;
1538         u64 count = 0;
1539
1540         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1541         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1542
1543         return count;
1544 }
1545
1546 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1547         .init_box       = ivbep_uncore_pci_init_box,
1548         .disable_box    = snbep_uncore_pci_disable_box,
1549         .enable_box     = snbep_uncore_pci_enable_box,
1550         .disable_event  = ivbep_uncore_irp_disable_event,
1551         .enable_event   = ivbep_uncore_irp_enable_event,
1552         .read_counter   = ivbep_uncore_irp_read_counter,
1553 };
1554
1555 static struct intel_uncore_type ivbep_uncore_irp = {
1556         .name                   = "irp",
1557         .num_counters           = 4,
1558         .num_boxes              = 1,
1559         .perf_ctr_bits          = 48,
1560         .event_mask             = IVBEP_PMON_RAW_EVENT_MASK,
1561         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1562         .ops                    = &ivbep_uncore_irp_ops,
1563         .format_group           = &ivbep_uncore_format_group,
1564 };
1565
1566 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1567         .init_box       = ivbep_uncore_pci_init_box,
1568         .disable_box    = snbep_uncore_pci_disable_box,
1569         .enable_box     = snbep_uncore_pci_enable_box,
1570         .disable_event  = snbep_uncore_pci_disable_event,
1571         .enable_event   = snbep_qpi_enable_event,
1572         .read_counter   = snbep_uncore_pci_read_counter,
1573         .hw_config      = snbep_qpi_hw_config,
1574         .get_constraint = uncore_get_constraint,
1575         .put_constraint = uncore_put_constraint,
1576 };
1577
1578 static struct intel_uncore_type ivbep_uncore_qpi = {
1579         .name                   = "qpi",
1580         .num_counters           = 4,
1581         .num_boxes              = 3,
1582         .perf_ctr_bits          = 48,
1583         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1584         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1585         .event_mask             = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1586         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1587         .num_shared_regs        = 1,
1588         .ops                    = &ivbep_uncore_qpi_ops,
1589         .format_group           = &ivbep_uncore_qpi_format_group,
1590 };
1591
1592 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1593         .name           = "r2pcie",
1594         .num_counters   = 4,
1595         .num_boxes      = 1,
1596         .perf_ctr_bits  = 44,
1597         .constraints    = snbep_uncore_r2pcie_constraints,
1598         IVBEP_UNCORE_PCI_COMMON_INIT(),
1599 };
1600
1601 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1602         .name           = "r3qpi",
1603         .num_counters   = 3,
1604         .num_boxes      = 2,
1605         .perf_ctr_bits  = 44,
1606         .constraints    = snbep_uncore_r3qpi_constraints,
1607         IVBEP_UNCORE_PCI_COMMON_INIT(),
1608 };
1609
1610 enum {
1611         IVBEP_PCI_UNCORE_HA,
1612         IVBEP_PCI_UNCORE_IMC,
1613         IVBEP_PCI_UNCORE_IRP,
1614         IVBEP_PCI_UNCORE_QPI,
1615         IVBEP_PCI_UNCORE_R2PCIE,
1616         IVBEP_PCI_UNCORE_R3QPI,
1617 };
1618
1619 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1620         [IVBEP_PCI_UNCORE_HA]   = &ivbep_uncore_ha,
1621         [IVBEP_PCI_UNCORE_IMC]  = &ivbep_uncore_imc,
1622         [IVBEP_PCI_UNCORE_IRP]  = &ivbep_uncore_irp,
1623         [IVBEP_PCI_UNCORE_QPI]  = &ivbep_uncore_qpi,
1624         [IVBEP_PCI_UNCORE_R2PCIE]       = &ivbep_uncore_r2pcie,
1625         [IVBEP_PCI_UNCORE_R3QPI]        = &ivbep_uncore_r3qpi,
1626         NULL,
1627 };
1628
1629 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1630         { /* Home Agent 0 */
1631                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1632                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1633         },
1634         { /* Home Agent 1 */
1635                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1636                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1637         },
1638         { /* MC0 Channel 0 */
1639                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1640                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1641         },
1642         { /* MC0 Channel 1 */
1643                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1644                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1645         },
1646         { /* MC0 Channel 3 */
1647                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1648                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1649         },
1650         { /* MC0 Channel 4 */
1651                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1652                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1653         },
1654         { /* MC1 Channel 0 */
1655                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1656                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1657         },
1658         { /* MC1 Channel 1 */
1659                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1660                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1661         },
1662         { /* MC1 Channel 3 */
1663                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1664                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1665         },
1666         { /* MC1 Channel 4 */
1667                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1668                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1669         },
1670         { /* IRP */
1671                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1672                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1673         },
1674         { /* QPI0 Port 0 */
1675                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1676                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
1677         },
1678         { /* QPI0 Port 1 */
1679                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1680                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
1681         },
1682         { /* QPI1 Port 2 */
1683                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1684                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
1685         },
1686         { /* R2PCIe */
1687                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1688                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
1689         },
1690         { /* R3QPI0 Link 0 */
1691                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1692                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
1693         },
1694         { /* R3QPI0 Link 1 */
1695                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1696                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
1697         },
1698         { /* R3QPI1 Link 2 */
1699                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1700                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
1701         },
1702         { /* QPI Port 0 filter  */
1703                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1704                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1705                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1706         },
1707         { /* QPI Port 0 filter  */
1708                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1709                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1710                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1711         },
1712         { /* end: all zeroes */ }
1713 };
1714
1715 static struct pci_driver ivbep_uncore_pci_driver = {
1716         .name           = "ivbep_uncore",
1717         .id_table       = ivbep_uncore_pci_ids,
1718 };
1719
1720 int ivbep_uncore_pci_init(void)
1721 {
1722         int ret = snbep_pci2phy_map_init(0x0e1e);
1723         if (ret)
1724                 return ret;
1725         uncore_pci_uncores = ivbep_pci_uncores;
1726         uncore_pci_driver = &ivbep_uncore_pci_driver;
1727         return 0;
1728 }
1729 /* end of IvyTown uncore support */
1730
1731 /* Haswell-EP uncore support */
1732 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
1733         &format_attr_event.attr,
1734         &format_attr_umask.attr,
1735         &format_attr_edge.attr,
1736         &format_attr_inv.attr,
1737         &format_attr_thresh5.attr,
1738         &format_attr_filter_tid2.attr,
1739         &format_attr_filter_cid.attr,
1740         NULL,
1741 };
1742
1743 static struct attribute_group hswep_uncore_ubox_format_group = {
1744         .name = "format",
1745         .attrs = hswep_uncore_ubox_formats_attr,
1746 };
1747
1748 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1749 {
1750         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1751         reg1->reg = HSWEP_U_MSR_PMON_FILTER;
1752         reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
1753         reg1->idx = 0;
1754         return 0;
1755 }
1756
1757 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
1758         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1759         .hw_config              = hswep_ubox_hw_config,
1760         .get_constraint         = uncore_get_constraint,
1761         .put_constraint         = uncore_put_constraint,
1762 };
1763
1764 static struct intel_uncore_type hswep_uncore_ubox = {
1765         .name                   = "ubox",
1766         .num_counters           = 2,
1767         .num_boxes              = 1,
1768         .perf_ctr_bits          = 44,
1769         .fixed_ctr_bits         = 48,
1770         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
1771         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
1772         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
1773         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
1774         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
1775         .num_shared_regs        = 1,
1776         .ops                    = &hswep_uncore_ubox_ops,
1777         .format_group           = &hswep_uncore_ubox_format_group,
1778 };
1779
1780 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
1781         &format_attr_event.attr,
1782         &format_attr_umask.attr,
1783         &format_attr_edge.attr,
1784         &format_attr_tid_en.attr,
1785         &format_attr_thresh8.attr,
1786         &format_attr_filter_tid3.attr,
1787         &format_attr_filter_link2.attr,
1788         &format_attr_filter_state3.attr,
1789         &format_attr_filter_nid2.attr,
1790         &format_attr_filter_opc2.attr,
1791         &format_attr_filter_nc.attr,
1792         &format_attr_filter_c6.attr,
1793         &format_attr_filter_isoc.attr,
1794         NULL,
1795 };
1796
1797 static struct attribute_group hswep_uncore_cbox_format_group = {
1798         .name = "format",
1799         .attrs = hswep_uncore_cbox_formats_attr,
1800 };
1801
1802 static struct event_constraint hswep_uncore_cbox_constraints[] = {
1803         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
1804         UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
1805         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
1806         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
1807         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
1808         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
1809         UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
1810         EVENT_CONSTRAINT_END
1811 };
1812
1813 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
1814         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1815                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1816         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1817         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1818         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1819         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1820         SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
1821         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
1822         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1823         SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
1824         SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
1825         SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
1826         SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
1827         SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
1828         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
1829         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1830         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1831         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1832         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1833         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1834         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1835         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1836         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1837         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1838         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1839         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1840         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1841         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1842         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1843         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1844         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1845         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1846         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1847         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1848         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1849         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1850         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1851         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1852         EVENT_EXTRA_END
1853 };
1854
1855 static u64 hswep_cbox_filter_mask(int fields)
1856 {
1857         u64 mask = 0;
1858         if (fields & 0x1)
1859                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
1860         if (fields & 0x2)
1861                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1862         if (fields & 0x4)
1863                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1864         if (fields & 0x8)
1865                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
1866         if (fields & 0x10) {
1867                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1868                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
1869                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
1870                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1871         }
1872         return mask;
1873 }
1874
1875 static struct event_constraint *
1876 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1877 {
1878         return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
1879 }
1880
1881 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1882 {
1883         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1884         struct extra_reg *er;
1885         int idx = 0;
1886
1887         for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
1888                 if (er->event != (event->hw.config & er->config_mask))
1889                         continue;
1890                 idx |= er->idx;
1891         }
1892
1893         if (idx) {
1894                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
1895                             HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1896                 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
1897                 reg1->idx = idx;
1898         }
1899         return 0;
1900 }
1901
1902 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
1903                                   struct perf_event *event)
1904 {
1905         struct hw_perf_event *hwc = &event->hw;
1906         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1907
1908         if (reg1->idx != EXTRA_REG_NONE) {
1909                 u64 filter = uncore_shared_reg_config(box, 0);
1910                 wrmsrl(reg1->reg, filter & 0xffffffff);
1911                 wrmsrl(reg1->reg + 1, filter >> 32);
1912         }
1913
1914         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1915 }
1916
1917 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
1918         .init_box               = snbep_uncore_msr_init_box,
1919         .disable_box            = snbep_uncore_msr_disable_box,
1920         .enable_box             = snbep_uncore_msr_enable_box,
1921         .disable_event          = snbep_uncore_msr_disable_event,
1922         .enable_event           = hswep_cbox_enable_event,
1923         .read_counter           = uncore_msr_read_counter,
1924         .hw_config              = hswep_cbox_hw_config,
1925         .get_constraint         = hswep_cbox_get_constraint,
1926         .put_constraint         = snbep_cbox_put_constraint,
1927 };
1928
1929 static struct intel_uncore_type hswep_uncore_cbox = {
1930         .name                   = "cbox",
1931         .num_counters           = 4,
1932         .num_boxes              = 18,
1933         .perf_ctr_bits          = 48,
1934         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
1935         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
1936         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1937         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
1938         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
1939         .num_shared_regs        = 1,
1940         .constraints            = hswep_uncore_cbox_constraints,
1941         .ops                    = &hswep_uncore_cbox_ops,
1942         .format_group           = &hswep_uncore_cbox_format_group,
1943 };
1944
1945 /*
1946  * Write SBOX Initialization register bit by bit to avoid spurious #GPs
1947  */
1948 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
1949 {
1950         unsigned msr = uncore_msr_box_ctl(box);
1951
1952         if (msr) {
1953                 u64 init = SNBEP_PMON_BOX_CTL_INT;
1954                 u64 flags = 0;
1955                 int i;
1956
1957                 for_each_set_bit(i, (unsigned long *)&init, 64) {
1958                         flags |= (1ULL << i);
1959                         wrmsrl(msr, flags);
1960                 }
1961         }
1962 }
1963
1964 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
1965         __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1966         .init_box               = hswep_uncore_sbox_msr_init_box
1967 };
1968
1969 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
1970         &format_attr_event.attr,
1971         &format_attr_umask.attr,
1972         &format_attr_edge.attr,
1973         &format_attr_tid_en.attr,
1974         &format_attr_inv.attr,
1975         &format_attr_thresh8.attr,
1976         NULL,
1977 };
1978
1979 static struct attribute_group hswep_uncore_sbox_format_group = {
1980         .name = "format",
1981         .attrs = hswep_uncore_sbox_formats_attr,
1982 };
1983
1984 static struct intel_uncore_type hswep_uncore_sbox = {
1985         .name                   = "sbox",
1986         .num_counters           = 4,
1987         .num_boxes              = 4,
1988         .perf_ctr_bits          = 44,
1989         .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
1990         .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
1991         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
1992         .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
1993         .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
1994         .ops                    = &hswep_uncore_sbox_msr_ops,
1995         .format_group           = &hswep_uncore_sbox_format_group,
1996 };
1997
1998 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1999 {
2000         struct hw_perf_event *hwc = &event->hw;
2001         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2002         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2003
2004         if (ev_sel >= 0xb && ev_sel <= 0xe) {
2005                 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2006                 reg1->idx = ev_sel - 0xb;
2007                 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2008         }
2009         return 0;
2010 }
2011
2012 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2013         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2014         .hw_config              = hswep_pcu_hw_config,
2015         .get_constraint         = snbep_pcu_get_constraint,
2016         .put_constraint         = snbep_pcu_put_constraint,
2017 };
2018
2019 static struct intel_uncore_type hswep_uncore_pcu = {
2020         .name                   = "pcu",
2021         .num_counters           = 4,
2022         .num_boxes              = 1,
2023         .perf_ctr_bits          = 48,
2024         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
2025         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
2026         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2027         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
2028         .num_shared_regs        = 1,
2029         .ops                    = &hswep_uncore_pcu_ops,
2030         .format_group           = &snbep_uncore_pcu_format_group,
2031 };
2032
2033 static struct intel_uncore_type *hswep_msr_uncores[] = {
2034         &hswep_uncore_ubox,
2035         &hswep_uncore_cbox,
2036         &hswep_uncore_sbox,
2037         &hswep_uncore_pcu,
2038         NULL,
2039 };
2040
2041 void hswep_uncore_cpu_init(void)
2042 {
2043         if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2044                 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2045
2046         /* Detect 6-8 core systems with only two SBOXes */
2047         if (uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3]) {
2048                 u32 capid4;
2049
2050                 pci_read_config_dword(uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3],
2051                                       0x94, &capid4);
2052                 if (((capid4 >> 6) & 0x3) == 0)
2053                         hswep_uncore_sbox.num_boxes = 2;
2054         }
2055
2056         uncore_msr_uncores = hswep_msr_uncores;
2057 }
2058
2059 static struct intel_uncore_type hswep_uncore_ha = {
2060         .name           = "ha",
2061         .num_counters   = 5,
2062         .num_boxes      = 2,
2063         .perf_ctr_bits  = 48,
2064         SNBEP_UNCORE_PCI_COMMON_INIT(),
2065 };
2066
2067 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2068         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2069         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2070         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2071         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2072         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2073         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2074         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2075         { /* end: all zeroes */ },
2076 };
2077
2078 static struct intel_uncore_type hswep_uncore_imc = {
2079         .name           = "imc",
2080         .num_counters   = 5,
2081         .num_boxes      = 8,
2082         .perf_ctr_bits  = 48,
2083         .fixed_ctr_bits = 48,
2084         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2085         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2086         .event_descs    = hswep_uncore_imc_events,
2087         SNBEP_UNCORE_PCI_COMMON_INIT(),
2088 };
2089
2090 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2091
2092 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2093 {
2094         struct pci_dev *pdev = box->pci_dev;
2095         struct hw_perf_event *hwc = &event->hw;
2096         u64 count = 0;
2097
2098         pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2099         pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2100
2101         return count;
2102 }
2103
2104 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2105         .init_box       = snbep_uncore_pci_init_box,
2106         .disable_box    = snbep_uncore_pci_disable_box,
2107         .enable_box     = snbep_uncore_pci_enable_box,
2108         .disable_event  = ivbep_uncore_irp_disable_event,
2109         .enable_event   = ivbep_uncore_irp_enable_event,
2110         .read_counter   = hswep_uncore_irp_read_counter,
2111 };
2112
2113 static struct intel_uncore_type hswep_uncore_irp = {
2114         .name                   = "irp",
2115         .num_counters           = 4,
2116         .num_boxes              = 1,
2117         .perf_ctr_bits          = 48,
2118         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2119         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2120         .ops                    = &hswep_uncore_irp_ops,
2121         .format_group           = &snbep_uncore_format_group,
2122 };
2123
2124 static struct intel_uncore_type hswep_uncore_qpi = {
2125         .name                   = "qpi",
2126         .num_counters           = 5,
2127         .num_boxes              = 3,
2128         .perf_ctr_bits          = 48,
2129         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2130         .event_ctl              = SNBEP_PCI_PMON_CTL0,
2131         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2132         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2133         .num_shared_regs        = 1,
2134         .ops                    = &snbep_uncore_qpi_ops,
2135         .format_group           = &snbep_uncore_qpi_format_group,
2136 };
2137
2138 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2139         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2140         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2141         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2142         UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2143         UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2144         UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2145         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2146         UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2147         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2148         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2149         UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2150         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2151         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2152         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2153         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2154         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2155         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2156         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2157         EVENT_CONSTRAINT_END
2158 };
2159
2160 static struct intel_uncore_type hswep_uncore_r2pcie = {
2161         .name           = "r2pcie",
2162         .num_counters   = 4,
2163         .num_boxes      = 1,
2164         .perf_ctr_bits  = 48,
2165         .constraints    = hswep_uncore_r2pcie_constraints,
2166         SNBEP_UNCORE_PCI_COMMON_INIT(),
2167 };
2168
2169 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
2170         UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2171         UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2172         UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2173         UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2174         UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2175         UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2176         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2177         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2178         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
2179         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2180         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2181         UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2182         UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2183         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2184         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2185         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2186         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2187         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2188         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2189         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2190         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2191         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2192         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2193         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2194         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2195         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
2196         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2197         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2198         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2199         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2200         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2201         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2202         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2203         EVENT_CONSTRAINT_END
2204 };
2205
2206 static struct intel_uncore_type hswep_uncore_r3qpi = {
2207         .name           = "r3qpi",
2208         .num_counters   = 4,
2209         .num_boxes      = 3,
2210         .perf_ctr_bits  = 44,
2211         .constraints    = hswep_uncore_r3qpi_constraints,
2212         SNBEP_UNCORE_PCI_COMMON_INIT(),
2213 };
2214
2215 enum {
2216         HSWEP_PCI_UNCORE_HA,
2217         HSWEP_PCI_UNCORE_IMC,
2218         HSWEP_PCI_UNCORE_IRP,
2219         HSWEP_PCI_UNCORE_QPI,
2220         HSWEP_PCI_UNCORE_R2PCIE,
2221         HSWEP_PCI_UNCORE_R3QPI,
2222 };
2223
2224 static struct intel_uncore_type *hswep_pci_uncores[] = {
2225         [HSWEP_PCI_UNCORE_HA]   = &hswep_uncore_ha,
2226         [HSWEP_PCI_UNCORE_IMC]  = &hswep_uncore_imc,
2227         [HSWEP_PCI_UNCORE_IRP]  = &hswep_uncore_irp,
2228         [HSWEP_PCI_UNCORE_QPI]  = &hswep_uncore_qpi,
2229         [HSWEP_PCI_UNCORE_R2PCIE]       = &hswep_uncore_r2pcie,
2230         [HSWEP_PCI_UNCORE_R3QPI]        = &hswep_uncore_r3qpi,
2231         NULL,
2232 };
2233
2234 static const struct pci_device_id hswep_uncore_pci_ids[] = {
2235         { /* Home Agent 0 */
2236                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
2237                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
2238         },
2239         { /* Home Agent 1 */
2240                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
2241                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
2242         },
2243         { /* MC0 Channel 0 */
2244                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
2245                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
2246         },
2247         { /* MC0 Channel 1 */
2248                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
2249                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
2250         },
2251         { /* MC0 Channel 2 */
2252                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
2253                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
2254         },
2255         { /* MC0 Channel 3 */
2256                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
2257                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
2258         },
2259         { /* MC1 Channel 0 */
2260                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
2261                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
2262         },
2263         { /* MC1 Channel 1 */
2264                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
2265                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
2266         },
2267         { /* MC1 Channel 2 */
2268                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
2269                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
2270         },
2271         { /* MC1 Channel 3 */
2272                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
2273                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
2274         },
2275         { /* IRP */
2276                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
2277                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
2278         },
2279         { /* QPI0 Port 0 */
2280                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
2281                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
2282         },
2283         { /* QPI0 Port 1 */
2284                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
2285                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
2286         },
2287         { /* QPI1 Port 2 */
2288                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
2289                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
2290         },
2291         { /* R2PCIe */
2292                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
2293                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
2294         },
2295         { /* R3QPI0 Link 0 */
2296                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
2297                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
2298         },
2299         { /* R3QPI0 Link 1 */
2300                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
2301                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
2302         },
2303         { /* R3QPI1 Link 2 */
2304                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
2305                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
2306         },
2307         { /* QPI Port 0 filter  */
2308                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
2309                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2310                                                    SNBEP_PCI_QPI_PORT0_FILTER),
2311         },
2312         { /* QPI Port 1 filter  */
2313                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
2314                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2315                                                    SNBEP_PCI_QPI_PORT1_FILTER),
2316         },
2317         { /* PCU.3 (for Capability registers) */
2318                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
2319                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2320                                                    HSWEP_PCI_PCU_3),
2321         },
2322         { /* end: all zeroes */ }
2323 };
2324
2325 static struct pci_driver hswep_uncore_pci_driver = {
2326         .name           = "hswep_uncore",
2327         .id_table       = hswep_uncore_pci_ids,
2328 };
2329
2330 int hswep_uncore_pci_init(void)
2331 {
2332         int ret = snbep_pci2phy_map_init(0x2f1e);
2333         if (ret)
2334                 return ret;
2335         uncore_pci_uncores = hswep_pci_uncores;
2336         uncore_pci_driver = &hswep_uncore_pci_driver;
2337         return 0;
2338 }
2339 /* end of Haswell-EP uncore support */
2340
2341 /* BDX-DE uncore support */
2342
2343 static struct intel_uncore_type bdx_uncore_ubox = {
2344         .name                   = "ubox",
2345         .num_counters           = 2,
2346         .num_boxes              = 1,
2347         .perf_ctr_bits          = 48,
2348         .fixed_ctr_bits         = 48,
2349         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
2350         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
2351         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2352         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2353         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2354         .num_shared_regs        = 1,
2355         .ops                    = &ivbep_uncore_msr_ops,
2356         .format_group           = &ivbep_uncore_ubox_format_group,
2357 };
2358
2359 static struct event_constraint bdx_uncore_cbox_constraints[] = {
2360         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
2361         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2362         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2363         EVENT_CONSTRAINT_END
2364 };
2365
2366 static struct intel_uncore_type bdx_uncore_cbox = {
2367         .name                   = "cbox",
2368         .num_counters           = 4,
2369         .num_boxes              = 8,
2370         .perf_ctr_bits          = 48,
2371         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2372         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2373         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2374         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2375         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
2376         .num_shared_regs        = 1,
2377         .constraints            = bdx_uncore_cbox_constraints,
2378         .ops                    = &hswep_uncore_cbox_ops,
2379         .format_group           = &hswep_uncore_cbox_format_group,
2380 };
2381
2382 static struct intel_uncore_type *bdx_msr_uncores[] = {
2383         &bdx_uncore_ubox,
2384         &bdx_uncore_cbox,
2385         &hswep_uncore_pcu,
2386         NULL,
2387 };
2388
2389 void bdx_uncore_cpu_init(void)
2390 {
2391         if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2392                 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2393         uncore_msr_uncores = bdx_msr_uncores;
2394 }
2395
2396 static struct intel_uncore_type bdx_uncore_ha = {
2397         .name           = "ha",
2398         .num_counters   = 4,
2399         .num_boxes      = 1,
2400         .perf_ctr_bits  = 48,
2401         SNBEP_UNCORE_PCI_COMMON_INIT(),
2402 };
2403
2404 static struct intel_uncore_type bdx_uncore_imc = {
2405         .name           = "imc",
2406         .num_counters   = 5,
2407         .num_boxes      = 2,
2408         .perf_ctr_bits  = 48,
2409         .fixed_ctr_bits = 48,
2410         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2411         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2412         .event_descs    = hswep_uncore_imc_events,
2413         SNBEP_UNCORE_PCI_COMMON_INIT(),
2414 };
2415
2416 static struct intel_uncore_type bdx_uncore_irp = {
2417         .name                   = "irp",
2418         .num_counters           = 4,
2419         .num_boxes              = 1,
2420         .perf_ctr_bits          = 48,
2421         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2422         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2423         .ops                    = &hswep_uncore_irp_ops,
2424         .format_group           = &snbep_uncore_format_group,
2425 };
2426
2427
2428 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
2429         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2430         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2431         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2432         UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2433         UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2434         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2435         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2436         EVENT_CONSTRAINT_END
2437 };
2438
2439 static struct intel_uncore_type bdx_uncore_r2pcie = {
2440         .name           = "r2pcie",
2441         .num_counters   = 4,
2442         .num_boxes      = 1,
2443         .perf_ctr_bits  = 48,
2444         .constraints    = bdx_uncore_r2pcie_constraints,
2445         SNBEP_UNCORE_PCI_COMMON_INIT(),
2446 };
2447
2448 enum {
2449         BDX_PCI_UNCORE_HA,
2450         BDX_PCI_UNCORE_IMC,
2451         BDX_PCI_UNCORE_IRP,
2452         BDX_PCI_UNCORE_R2PCIE,
2453 };
2454
2455 static struct intel_uncore_type *bdx_pci_uncores[] = {
2456         [BDX_PCI_UNCORE_HA]     = &bdx_uncore_ha,
2457         [BDX_PCI_UNCORE_IMC]    = &bdx_uncore_imc,
2458         [BDX_PCI_UNCORE_IRP]    = &bdx_uncore_irp,
2459         [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
2460         NULL,
2461 };
2462
2463 static const struct pci_device_id bdx_uncore_pci_ids[] = {
2464         { /* Home Agent 0 */
2465                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
2466                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
2467         },
2468         { /* MC0 Channel 0 */
2469                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
2470                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
2471         },
2472         { /* MC0 Channel 1 */
2473                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
2474                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
2475         },
2476         { /* IRP */
2477                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
2478                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
2479         },
2480         { /* R2PCIe */
2481                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
2482                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
2483         },
2484         { /* end: all zeroes */ }
2485 };
2486
2487 static struct pci_driver bdx_uncore_pci_driver = {
2488         .name           = "bdx_uncore",
2489         .id_table       = bdx_uncore_pci_ids,
2490 };
2491
2492 int bdx_uncore_pci_init(void)
2493 {
2494         int ret = snbep_pci2phy_map_init(0x6f1e);
2495
2496         if (ret)
2497                 return ret;
2498         uncore_pci_uncores = bdx_pci_uncores;
2499         uncore_pci_driver = &bdx_uncore_pci_driver;
2500         return 0;
2501 }
2502
2503 /* end of BDX-DE uncore support */