]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/x86/events/intel/uncore_snbep.c
Merge tag 'char-misc-4.13-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/gregk...
[karo-tx-linux.git] / arch / x86 / events / intel / uncore_snbep.c
1 /* SandyBridge-EP/IvyTown uncore support */
2 #include "uncore.h"
3
4 /* SNB-EP pci bus to socket mapping */
5 #define SNBEP_CPUNODEID                 0x40
6 #define SNBEP_GIDNIDMAP                 0x54
7
8 /* SNB-EP Box level control */
9 #define SNBEP_PMON_BOX_CTL_RST_CTRL     (1 << 0)
10 #define SNBEP_PMON_BOX_CTL_RST_CTRS     (1 << 1)
11 #define SNBEP_PMON_BOX_CTL_FRZ          (1 << 8)
12 #define SNBEP_PMON_BOX_CTL_FRZ_EN       (1 << 16)
13 #define SNBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
14                                          SNBEP_PMON_BOX_CTL_RST_CTRS | \
15                                          SNBEP_PMON_BOX_CTL_FRZ_EN)
16 /* SNB-EP event control */
17 #define SNBEP_PMON_CTL_EV_SEL_MASK      0x000000ff
18 #define SNBEP_PMON_CTL_UMASK_MASK       0x0000ff00
19 #define SNBEP_PMON_CTL_RST              (1 << 17)
20 #define SNBEP_PMON_CTL_EDGE_DET         (1 << 18)
21 #define SNBEP_PMON_CTL_EV_SEL_EXT       (1 << 21)
22 #define SNBEP_PMON_CTL_EN               (1 << 22)
23 #define SNBEP_PMON_CTL_INVERT           (1 << 23)
24 #define SNBEP_PMON_CTL_TRESH_MASK       0xff000000
25 #define SNBEP_PMON_RAW_EVENT_MASK       (SNBEP_PMON_CTL_EV_SEL_MASK | \
26                                          SNBEP_PMON_CTL_UMASK_MASK | \
27                                          SNBEP_PMON_CTL_EDGE_DET | \
28                                          SNBEP_PMON_CTL_INVERT | \
29                                          SNBEP_PMON_CTL_TRESH_MASK)
30
31 /* SNB-EP Ubox event control */
32 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK         0x1f000000
33 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK         \
34                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
35                                  SNBEP_PMON_CTL_UMASK_MASK | \
36                                  SNBEP_PMON_CTL_EDGE_DET | \
37                                  SNBEP_PMON_CTL_INVERT | \
38                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
39
40 #define SNBEP_CBO_PMON_CTL_TID_EN               (1 << 19)
41 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK       (SNBEP_PMON_RAW_EVENT_MASK | \
42                                                  SNBEP_CBO_PMON_CTL_TID_EN)
43
44 /* SNB-EP PCU event control */
45 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK     0x0000c000
46 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK       0x1f000000
47 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT       (1 << 30)
48 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET     (1 << 31)
49 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
50                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
51                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
52                                  SNBEP_PMON_CTL_EDGE_DET | \
53                                  SNBEP_PMON_CTL_INVERT | \
54                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
55                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
56                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
57
58 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
59                                 (SNBEP_PMON_RAW_EVENT_MASK | \
60                                  SNBEP_PMON_CTL_EV_SEL_EXT)
61
62 /* SNB-EP pci control register */
63 #define SNBEP_PCI_PMON_BOX_CTL                  0xf4
64 #define SNBEP_PCI_PMON_CTL0                     0xd8
65 /* SNB-EP pci counter register */
66 #define SNBEP_PCI_PMON_CTR0                     0xa0
67
68 /* SNB-EP home agent register */
69 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0        0x40
70 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1        0x44
71 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH       0x48
72 /* SNB-EP memory controller register */
73 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL         0xf0
74 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR         0xd0
75 /* SNB-EP QPI register */
76 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0          0x228
77 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1          0x22c
78 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0           0x238
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1           0x23c
80
81 /* SNB-EP Ubox register */
82 #define SNBEP_U_MSR_PMON_CTR0                   0xc16
83 #define SNBEP_U_MSR_PMON_CTL0                   0xc10
84
85 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL         0xc08
86 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR         0xc09
87
88 /* SNB-EP Cbo register */
89 #define SNBEP_C0_MSR_PMON_CTR0                  0xd16
90 #define SNBEP_C0_MSR_PMON_CTL0                  0xd10
91 #define SNBEP_C0_MSR_PMON_BOX_CTL               0xd04
92 #define SNBEP_C0_MSR_PMON_BOX_FILTER            0xd14
93 #define SNBEP_CBO_MSR_OFFSET                    0x20
94
95 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID       0x1f
96 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID       0x3fc00
97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE     0x7c0000
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC       0xff800000
99
100 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {    \
101         .event = (e),                           \
102         .msr = SNBEP_C0_MSR_PMON_BOX_FILTER,    \
103         .config_mask = (m),                     \
104         .idx = (i)                              \
105 }
106
107 /* SNB-EP PCU register */
108 #define SNBEP_PCU_MSR_PMON_CTR0                 0xc36
109 #define SNBEP_PCU_MSR_PMON_CTL0                 0xc30
110 #define SNBEP_PCU_MSR_PMON_BOX_CTL              0xc24
111 #define SNBEP_PCU_MSR_PMON_BOX_FILTER           0xc34
112 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK      0xffffffff
113 #define SNBEP_PCU_MSR_CORE_C3_CTR               0x3fc
114 #define SNBEP_PCU_MSR_CORE_C6_CTR               0x3fd
115
116 /* IVBEP event control */
117 #define IVBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
118                                          SNBEP_PMON_BOX_CTL_RST_CTRS)
119 #define IVBEP_PMON_RAW_EVENT_MASK               (SNBEP_PMON_CTL_EV_SEL_MASK | \
120                                          SNBEP_PMON_CTL_UMASK_MASK | \
121                                          SNBEP_PMON_CTL_EDGE_DET | \
122                                          SNBEP_PMON_CTL_TRESH_MASK)
123 /* IVBEP Ubox */
124 #define IVBEP_U_MSR_PMON_GLOBAL_CTL             0xc00
125 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL             (1 << 31)
126 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL           (1 << 29)
127
128 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
129                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
130                                  SNBEP_PMON_CTL_UMASK_MASK | \
131                                  SNBEP_PMON_CTL_EDGE_DET | \
132                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
133 /* IVBEP Cbo */
134 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK               (IVBEP_PMON_RAW_EVENT_MASK | \
135                                                  SNBEP_CBO_PMON_CTL_TID_EN)
136
137 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x1fULL << 0)
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 5)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x3fULL << 17)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
145
146 /* IVBEP home agent */
147 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST         (1 << 16)
148 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK                \
149                                 (IVBEP_PMON_RAW_EVENT_MASK | \
150                                  IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
151 /* IVBEP PCU */
152 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
153                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
154                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
155                                  SNBEP_PMON_CTL_EDGE_DET | \
156                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
157                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
158                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
159 /* IVBEP QPI */
160 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
161                                 (IVBEP_PMON_RAW_EVENT_MASK | \
162                                  SNBEP_PMON_CTL_EV_SEL_EXT)
163
164 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
165                                 ((1ULL << (n)) - 1)))
166
167 /* Haswell-EP Ubox */
168 #define HSWEP_U_MSR_PMON_CTR0                   0x709
169 #define HSWEP_U_MSR_PMON_CTL0                   0x705
170 #define HSWEP_U_MSR_PMON_FILTER                 0x707
171
172 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL         0x703
173 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR         0x704
174
175 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID         (0x1 << 0)
176 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID         (0x1fULL << 1)
177 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
178                                         (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
179                                          HSWEP_U_MSR_PMON_BOX_FILTER_CID)
180
181 /* Haswell-EP CBo */
182 #define HSWEP_C0_MSR_PMON_CTR0                  0xe08
183 #define HSWEP_C0_MSR_PMON_CTL0                  0xe01
184 #define HSWEP_C0_MSR_PMON_BOX_CTL                       0xe00
185 #define HSWEP_C0_MSR_PMON_BOX_FILTER0           0xe05
186 #define HSWEP_CBO_MSR_OFFSET                    0x10
187
188
189 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x3fULL << 0)
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 6)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x7fULL << 17)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
197
198
199 /* Haswell-EP Sbox */
200 #define HSWEP_S0_MSR_PMON_CTR0                  0x726
201 #define HSWEP_S0_MSR_PMON_CTL0                  0x721
202 #define HSWEP_S0_MSR_PMON_BOX_CTL                       0x720
203 #define HSWEP_SBOX_MSR_OFFSET                   0xa
204 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
205                                                  SNBEP_CBO_PMON_CTL_TID_EN)
206
207 /* Haswell-EP PCU */
208 #define HSWEP_PCU_MSR_PMON_CTR0                 0x717
209 #define HSWEP_PCU_MSR_PMON_CTL0                 0x711
210 #define HSWEP_PCU_MSR_PMON_BOX_CTL              0x710
211 #define HSWEP_PCU_MSR_PMON_BOX_FILTER           0x715
212
213 /* KNL Ubox */
214 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
215                                         (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
216                                                 SNBEP_CBO_PMON_CTL_TID_EN)
217 /* KNL CHA */
218 #define KNL_CHA_MSR_OFFSET                      0xc
219 #define KNL_CHA_MSR_PMON_CTL_QOR                (1 << 16)
220 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
221                                         (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
222                                          KNL_CHA_MSR_PMON_CTL_QOR)
223 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID         0x1ff
224 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE       (7 << 18)
225 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP          (0xfffffe2aULL << 32)
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE  (0x1ULL << 33)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC         (0x1ULL << 37)
229
230 /* KNL EDC/MC UCLK */
231 #define KNL_UCLK_MSR_PMON_CTR0_LOW              0x400
232 #define KNL_UCLK_MSR_PMON_CTL0                  0x420
233 #define KNL_UCLK_MSR_PMON_BOX_CTL               0x430
234 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW        0x44c
235 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL        0x454
236 #define KNL_PMON_FIXED_CTL_EN                   0x1
237
238 /* KNL EDC */
239 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW         0xa00
240 #define KNL_EDC0_ECLK_MSR_PMON_CTL0             0xa20
241 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL          0xa30
242 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW   0xa3c
243 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL   0xa44
244
245 /* KNL MC */
246 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW           0xb00
247 #define KNL_MC0_CH0_MSR_PMON_CTL0               0xb20
248 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL            0xb30
249 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW          0xb3c
250 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL          0xb44
251
252 /* KNL IRP */
253 #define KNL_IRP_PCI_PMON_BOX_CTL                0xf0
254 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
255                                                  KNL_CHA_MSR_PMON_CTL_QOR)
256 /* KNL PCU */
257 #define KNL_PCU_PMON_CTL_EV_SEL_MASK            0x0000007f
258 #define KNL_PCU_PMON_CTL_USE_OCC_CTR            (1 << 7)
259 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK         0x3f000000
260 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
261                                 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
262                                  KNL_PCU_PMON_CTL_USE_OCC_CTR | \
263                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
264                                  SNBEP_PMON_CTL_EDGE_DET | \
265                                  SNBEP_CBO_PMON_CTL_TID_EN | \
266                                  SNBEP_PMON_CTL_INVERT | \
267                                  KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
268                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
269                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
270
271 /* SKX pci bus to socket mapping */
272 #define SKX_CPUNODEID                   0xc0
273 #define SKX_GIDNIDMAP                   0xd4
274
275 /* SKX CHA */
276 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID         (0x1ffULL << 0)
277 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK        (0xfULL << 9)
278 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE       (0x3ffULL << 17)
279 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM         (0x1ULL << 32)
280 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC         (0x1ULL << 33)
281 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC     (0x1ULL << 35)
282 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM          (0x1ULL << 36)
283 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM      (0x1ULL << 37)
284 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0        (0x3ffULL << 41)
285 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1        (0x3ffULL << 51)
286 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6          (0x1ULL << 61)
287 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC          (0x1ULL << 62)
288 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC        (0x1ULL << 63)
289
290 /* SKX IIO */
291 #define SKX_IIO0_MSR_PMON_CTL0          0xa48
292 #define SKX_IIO0_MSR_PMON_CTR0          0xa41
293 #define SKX_IIO0_MSR_PMON_BOX_CTL       0xa40
294 #define SKX_IIO_MSR_OFFSET              0x20
295
296 #define SKX_PMON_CTL_TRESH_MASK         (0xff << 24)
297 #define SKX_PMON_CTL_TRESH_MASK_EXT     (0xf)
298 #define SKX_PMON_CTL_CH_MASK            (0xff << 4)
299 #define SKX_PMON_CTL_FC_MASK            (0x7 << 12)
300 #define SKX_IIO_PMON_RAW_EVENT_MASK     (SNBEP_PMON_CTL_EV_SEL_MASK | \
301                                          SNBEP_PMON_CTL_UMASK_MASK | \
302                                          SNBEP_PMON_CTL_EDGE_DET | \
303                                          SNBEP_PMON_CTL_INVERT | \
304                                          SKX_PMON_CTL_TRESH_MASK)
305 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
306                                          SKX_PMON_CTL_CH_MASK | \
307                                          SKX_PMON_CTL_FC_MASK)
308
309 /* SKX IRP */
310 #define SKX_IRP0_MSR_PMON_CTL0          0xa5b
311 #define SKX_IRP0_MSR_PMON_CTR0          0xa59
312 #define SKX_IRP0_MSR_PMON_BOX_CTL       0xa58
313 #define SKX_IRP_MSR_OFFSET              0x20
314
315 /* SKX UPI */
316 #define SKX_UPI_PCI_PMON_CTL0           0x350
317 #define SKX_UPI_PCI_PMON_CTR0           0x318
318 #define SKX_UPI_PCI_PMON_BOX_CTL        0x378
319 #define SKX_UPI_CTL_UMASK_EXT           0xffefff
320
321 /* SKX M2M */
322 #define SKX_M2M_PCI_PMON_CTL0           0x228
323 #define SKX_M2M_PCI_PMON_CTR0           0x200
324 #define SKX_M2M_PCI_PMON_BOX_CTL        0x258
325
326 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
327 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
328 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
329 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
330 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
331 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
332 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
333 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
334 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
335 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
336 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
337 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
338 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
339 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
340 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
341 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
342 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
343 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
344 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
345 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
346 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
347 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
348 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
349 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
350 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
351 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
352 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
353 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
354 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
355 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
356 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
357 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
358 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
359 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
360 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
361 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
362 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
363 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
364 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
365 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
366 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
367 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
368 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
369 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
370 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
371 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
372 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
373 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
374 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
375 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
376 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
377 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
378 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
379 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
380 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
381 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
382 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
383 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
384 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
385 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
386 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
387 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
388 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
389 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
390 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
391 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
392 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
393 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
394 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
395 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
396 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
397 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
398
399 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
400 {
401         struct pci_dev *pdev = box->pci_dev;
402         int box_ctl = uncore_pci_box_ctl(box);
403         u32 config = 0;
404
405         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
406                 config |= SNBEP_PMON_BOX_CTL_FRZ;
407                 pci_write_config_dword(pdev, box_ctl, config);
408         }
409 }
410
411 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
412 {
413         struct pci_dev *pdev = box->pci_dev;
414         int box_ctl = uncore_pci_box_ctl(box);
415         u32 config = 0;
416
417         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
418                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
419                 pci_write_config_dword(pdev, box_ctl, config);
420         }
421 }
422
423 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
424 {
425         struct pci_dev *pdev = box->pci_dev;
426         struct hw_perf_event *hwc = &event->hw;
427
428         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
429 }
430
431 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
432 {
433         struct pci_dev *pdev = box->pci_dev;
434         struct hw_perf_event *hwc = &event->hw;
435
436         pci_write_config_dword(pdev, hwc->config_base, hwc->config);
437 }
438
439 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
440 {
441         struct pci_dev *pdev = box->pci_dev;
442         struct hw_perf_event *hwc = &event->hw;
443         u64 count = 0;
444
445         pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
446         pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
447
448         return count;
449 }
450
451 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
452 {
453         struct pci_dev *pdev = box->pci_dev;
454         int box_ctl = uncore_pci_box_ctl(box);
455
456         pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
457 }
458
459 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
460 {
461         u64 config;
462         unsigned msr;
463
464         msr = uncore_msr_box_ctl(box);
465         if (msr) {
466                 rdmsrl(msr, config);
467                 config |= SNBEP_PMON_BOX_CTL_FRZ;
468                 wrmsrl(msr, config);
469         }
470 }
471
472 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
473 {
474         u64 config;
475         unsigned msr;
476
477         msr = uncore_msr_box_ctl(box);
478         if (msr) {
479                 rdmsrl(msr, config);
480                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
481                 wrmsrl(msr, config);
482         }
483 }
484
485 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
486 {
487         struct hw_perf_event *hwc = &event->hw;
488         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
489
490         if (reg1->idx != EXTRA_REG_NONE)
491                 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
492
493         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
494 }
495
496 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
497                                         struct perf_event *event)
498 {
499         struct hw_perf_event *hwc = &event->hw;
500
501         wrmsrl(hwc->config_base, hwc->config);
502 }
503
504 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
505 {
506         unsigned msr = uncore_msr_box_ctl(box);
507
508         if (msr)
509                 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
510 }
511
512 static struct attribute *snbep_uncore_formats_attr[] = {
513         &format_attr_event.attr,
514         &format_attr_umask.attr,
515         &format_attr_edge.attr,
516         &format_attr_inv.attr,
517         &format_attr_thresh8.attr,
518         NULL,
519 };
520
521 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
522         &format_attr_event.attr,
523         &format_attr_umask.attr,
524         &format_attr_edge.attr,
525         &format_attr_inv.attr,
526         &format_attr_thresh5.attr,
527         NULL,
528 };
529
530 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
531         &format_attr_event.attr,
532         &format_attr_umask.attr,
533         &format_attr_edge.attr,
534         &format_attr_tid_en.attr,
535         &format_attr_inv.attr,
536         &format_attr_thresh8.attr,
537         &format_attr_filter_tid.attr,
538         &format_attr_filter_nid.attr,
539         &format_attr_filter_state.attr,
540         &format_attr_filter_opc.attr,
541         NULL,
542 };
543
544 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
545         &format_attr_event.attr,
546         &format_attr_occ_sel.attr,
547         &format_attr_edge.attr,
548         &format_attr_inv.attr,
549         &format_attr_thresh5.attr,
550         &format_attr_occ_invert.attr,
551         &format_attr_occ_edge.attr,
552         &format_attr_filter_band0.attr,
553         &format_attr_filter_band1.attr,
554         &format_attr_filter_band2.attr,
555         &format_attr_filter_band3.attr,
556         NULL,
557 };
558
559 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
560         &format_attr_event_ext.attr,
561         &format_attr_umask.attr,
562         &format_attr_edge.attr,
563         &format_attr_inv.attr,
564         &format_attr_thresh8.attr,
565         &format_attr_match_rds.attr,
566         &format_attr_match_rnid30.attr,
567         &format_attr_match_rnid4.attr,
568         &format_attr_match_dnid.attr,
569         &format_attr_match_mc.attr,
570         &format_attr_match_opc.attr,
571         &format_attr_match_vnw.attr,
572         &format_attr_match0.attr,
573         &format_attr_match1.attr,
574         &format_attr_mask_rds.attr,
575         &format_attr_mask_rnid30.attr,
576         &format_attr_mask_rnid4.attr,
577         &format_attr_mask_dnid.attr,
578         &format_attr_mask_mc.attr,
579         &format_attr_mask_opc.attr,
580         &format_attr_mask_vnw.attr,
581         &format_attr_mask0.attr,
582         &format_attr_mask1.attr,
583         NULL,
584 };
585
586 static struct uncore_event_desc snbep_uncore_imc_events[] = {
587         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
588         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
589         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
590         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
591         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
592         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
593         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
594         { /* end: all zeroes */ },
595 };
596
597 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
598         INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
599         INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
600         INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
601         INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
602         { /* end: all zeroes */ },
603 };
604
605 static struct attribute_group snbep_uncore_format_group = {
606         .name = "format",
607         .attrs = snbep_uncore_formats_attr,
608 };
609
610 static struct attribute_group snbep_uncore_ubox_format_group = {
611         .name = "format",
612         .attrs = snbep_uncore_ubox_formats_attr,
613 };
614
615 static struct attribute_group snbep_uncore_cbox_format_group = {
616         .name = "format",
617         .attrs = snbep_uncore_cbox_formats_attr,
618 };
619
620 static struct attribute_group snbep_uncore_pcu_format_group = {
621         .name = "format",
622         .attrs = snbep_uncore_pcu_formats_attr,
623 };
624
625 static struct attribute_group snbep_uncore_qpi_format_group = {
626         .name = "format",
627         .attrs = snbep_uncore_qpi_formats_attr,
628 };
629
630 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                    \
631         .disable_box    = snbep_uncore_msr_disable_box,         \
632         .enable_box     = snbep_uncore_msr_enable_box,          \
633         .disable_event  = snbep_uncore_msr_disable_event,       \
634         .enable_event   = snbep_uncore_msr_enable_event,        \
635         .read_counter   = uncore_msr_read_counter
636
637 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
638         __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),                   \
639         .init_box       = snbep_uncore_msr_init_box             \
640
641 static struct intel_uncore_ops snbep_uncore_msr_ops = {
642         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
643 };
644
645 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()                      \
646         .init_box       = snbep_uncore_pci_init_box,            \
647         .disable_box    = snbep_uncore_pci_disable_box,         \
648         .enable_box     = snbep_uncore_pci_enable_box,          \
649         .disable_event  = snbep_uncore_pci_disable_event,       \
650         .read_counter   = snbep_uncore_pci_read_counter
651
652 static struct intel_uncore_ops snbep_uncore_pci_ops = {
653         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
654         .enable_event   = snbep_uncore_pci_enable_event,        \
655 };
656
657 static struct event_constraint snbep_uncore_cbox_constraints[] = {
658         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
659         UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
660         UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
661         UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
662         UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
663         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
664         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
665         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
666         UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
667         UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
668         UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
669         UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
670         UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
671         UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
672         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
673         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
674         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
675         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
676         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
677         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
678         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
679         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
680         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
681         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
682         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
683         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
684         EVENT_CONSTRAINT_END
685 };
686
687 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
688         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
689         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
690         UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
691         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
692         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
693         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
694         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
695         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
696         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
697         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
698         EVENT_CONSTRAINT_END
699 };
700
701 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
702         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
703         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
704         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
705         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
706         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
707         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
708         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
709         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
710         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
711         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
712         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
713         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
714         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
715         UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
716         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
717         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
718         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
719         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
720         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
721         UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
722         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
723         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
724         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
725         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
726         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
727         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
728         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
729         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
730         EVENT_CONSTRAINT_END
731 };
732
733 static struct intel_uncore_type snbep_uncore_ubox = {
734         .name           = "ubox",
735         .num_counters   = 2,
736         .num_boxes      = 1,
737         .perf_ctr_bits  = 44,
738         .fixed_ctr_bits = 48,
739         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
740         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
741         .event_mask     = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
742         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
743         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
744         .ops            = &snbep_uncore_msr_ops,
745         .format_group   = &snbep_uncore_ubox_format_group,
746 };
747
748 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
749         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
750                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
751         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
752         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
753         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
754         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
755         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
756         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
757         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
758         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
759         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
760         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
761         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
762         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
763         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
764         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
765         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
766         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
767         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
768         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
769         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
770         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
771         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
772         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
773         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
774         EVENT_EXTRA_END
775 };
776
777 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
778 {
779         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
780         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
781         int i;
782
783         if (uncore_box_is_fake(box))
784                 return;
785
786         for (i = 0; i < 5; i++) {
787                 if (reg1->alloc & (0x1 << i))
788                         atomic_sub(1 << (i * 6), &er->ref);
789         }
790         reg1->alloc = 0;
791 }
792
793 static struct event_constraint *
794 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
795                             u64 (*cbox_filter_mask)(int fields))
796 {
797         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
798         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
799         int i, alloc = 0;
800         unsigned long flags;
801         u64 mask;
802
803         if (reg1->idx == EXTRA_REG_NONE)
804                 return NULL;
805
806         raw_spin_lock_irqsave(&er->lock, flags);
807         for (i = 0; i < 5; i++) {
808                 if (!(reg1->idx & (0x1 << i)))
809                         continue;
810                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
811                         continue;
812
813                 mask = cbox_filter_mask(0x1 << i);
814                 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
815                     !((reg1->config ^ er->config) & mask)) {
816                         atomic_add(1 << (i * 6), &er->ref);
817                         er->config &= ~mask;
818                         er->config |= reg1->config & mask;
819                         alloc |= (0x1 << i);
820                 } else {
821                         break;
822                 }
823         }
824         raw_spin_unlock_irqrestore(&er->lock, flags);
825         if (i < 5)
826                 goto fail;
827
828         if (!uncore_box_is_fake(box))
829                 reg1->alloc |= alloc;
830
831         return NULL;
832 fail:
833         for (; i >= 0; i--) {
834                 if (alloc & (0x1 << i))
835                         atomic_sub(1 << (i * 6), &er->ref);
836         }
837         return &uncore_constraint_empty;
838 }
839
840 static u64 snbep_cbox_filter_mask(int fields)
841 {
842         u64 mask = 0;
843
844         if (fields & 0x1)
845                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
846         if (fields & 0x2)
847                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
848         if (fields & 0x4)
849                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
850         if (fields & 0x8)
851                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
852
853         return mask;
854 }
855
856 static struct event_constraint *
857 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
858 {
859         return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
860 }
861
862 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
863 {
864         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
865         struct extra_reg *er;
866         int idx = 0;
867
868         for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
869                 if (er->event != (event->hw.config & er->config_mask))
870                         continue;
871                 idx |= er->idx;
872         }
873
874         if (idx) {
875                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
876                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
877                 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
878                 reg1->idx = idx;
879         }
880         return 0;
881 }
882
883 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
884         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
885         .hw_config              = snbep_cbox_hw_config,
886         .get_constraint         = snbep_cbox_get_constraint,
887         .put_constraint         = snbep_cbox_put_constraint,
888 };
889
890 static struct intel_uncore_type snbep_uncore_cbox = {
891         .name                   = "cbox",
892         .num_counters           = 4,
893         .num_boxes              = 8,
894         .perf_ctr_bits          = 44,
895         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
896         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
897         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
898         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
899         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
900         .num_shared_regs        = 1,
901         .constraints            = snbep_uncore_cbox_constraints,
902         .ops                    = &snbep_uncore_cbox_ops,
903         .format_group           = &snbep_uncore_cbox_format_group,
904 };
905
906 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
907 {
908         struct hw_perf_event *hwc = &event->hw;
909         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
910         u64 config = reg1->config;
911
912         if (new_idx > reg1->idx)
913                 config <<= 8 * (new_idx - reg1->idx);
914         else
915                 config >>= 8 * (reg1->idx - new_idx);
916
917         if (modify) {
918                 hwc->config += new_idx - reg1->idx;
919                 reg1->config = config;
920                 reg1->idx = new_idx;
921         }
922         return config;
923 }
924
925 static struct event_constraint *
926 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
927 {
928         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
929         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
930         unsigned long flags;
931         int idx = reg1->idx;
932         u64 mask, config1 = reg1->config;
933         bool ok = false;
934
935         if (reg1->idx == EXTRA_REG_NONE ||
936             (!uncore_box_is_fake(box) && reg1->alloc))
937                 return NULL;
938 again:
939         mask = 0xffULL << (idx * 8);
940         raw_spin_lock_irqsave(&er->lock, flags);
941         if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
942             !((config1 ^ er->config) & mask)) {
943                 atomic_add(1 << (idx * 8), &er->ref);
944                 er->config &= ~mask;
945                 er->config |= config1 & mask;
946                 ok = true;
947         }
948         raw_spin_unlock_irqrestore(&er->lock, flags);
949
950         if (!ok) {
951                 idx = (idx + 1) % 4;
952                 if (idx != reg1->idx) {
953                         config1 = snbep_pcu_alter_er(event, idx, false);
954                         goto again;
955                 }
956                 return &uncore_constraint_empty;
957         }
958
959         if (!uncore_box_is_fake(box)) {
960                 if (idx != reg1->idx)
961                         snbep_pcu_alter_er(event, idx, true);
962                 reg1->alloc = 1;
963         }
964         return NULL;
965 }
966
967 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
968 {
969         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
970         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
971
972         if (uncore_box_is_fake(box) || !reg1->alloc)
973                 return;
974
975         atomic_sub(1 << (reg1->idx * 8), &er->ref);
976         reg1->alloc = 0;
977 }
978
979 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
980 {
981         struct hw_perf_event *hwc = &event->hw;
982         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
983         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
984
985         if (ev_sel >= 0xb && ev_sel <= 0xe) {
986                 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
987                 reg1->idx = ev_sel - 0xb;
988                 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
989         }
990         return 0;
991 }
992
993 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
994         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
995         .hw_config              = snbep_pcu_hw_config,
996         .get_constraint         = snbep_pcu_get_constraint,
997         .put_constraint         = snbep_pcu_put_constraint,
998 };
999
1000 static struct intel_uncore_type snbep_uncore_pcu = {
1001         .name                   = "pcu",
1002         .num_counters           = 4,
1003         .num_boxes              = 1,
1004         .perf_ctr_bits          = 48,
1005         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1006         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1007         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1008         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1009         .num_shared_regs        = 1,
1010         .ops                    = &snbep_uncore_pcu_ops,
1011         .format_group           = &snbep_uncore_pcu_format_group,
1012 };
1013
1014 static struct intel_uncore_type *snbep_msr_uncores[] = {
1015         &snbep_uncore_ubox,
1016         &snbep_uncore_cbox,
1017         &snbep_uncore_pcu,
1018         NULL,
1019 };
1020
1021 void snbep_uncore_cpu_init(void)
1022 {
1023         if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1024                 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1025         uncore_msr_uncores = snbep_msr_uncores;
1026 }
1027
1028 enum {
1029         SNBEP_PCI_QPI_PORT0_FILTER,
1030         SNBEP_PCI_QPI_PORT1_FILTER,
1031         HSWEP_PCI_PCU_3,
1032 };
1033
1034 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1035 {
1036         struct hw_perf_event *hwc = &event->hw;
1037         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1038         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1039
1040         if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1041                 reg1->idx = 0;
1042                 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1043                 reg1->config = event->attr.config1;
1044                 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1045                 reg2->config = event->attr.config2;
1046         }
1047         return 0;
1048 }
1049
1050 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1051 {
1052         struct pci_dev *pdev = box->pci_dev;
1053         struct hw_perf_event *hwc = &event->hw;
1054         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1055         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1056
1057         if (reg1->idx != EXTRA_REG_NONE) {
1058                 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1059                 int pkg = topology_phys_to_logical_pkg(box->pci_phys_id);
1060                 struct pci_dev *filter_pdev = uncore_extra_pci_dev[pkg].dev[idx];
1061
1062                 if (filter_pdev) {
1063                         pci_write_config_dword(filter_pdev, reg1->reg,
1064                                                 (u32)reg1->config);
1065                         pci_write_config_dword(filter_pdev, reg1->reg + 4,
1066                                                 (u32)(reg1->config >> 32));
1067                         pci_write_config_dword(filter_pdev, reg2->reg,
1068                                                 (u32)reg2->config);
1069                         pci_write_config_dword(filter_pdev, reg2->reg + 4,
1070                                                 (u32)(reg2->config >> 32));
1071                 }
1072         }
1073
1074         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1075 }
1076
1077 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1078         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1079         .enable_event           = snbep_qpi_enable_event,
1080         .hw_config              = snbep_qpi_hw_config,
1081         .get_constraint         = uncore_get_constraint,
1082         .put_constraint         = uncore_put_constraint,
1083 };
1084
1085 #define SNBEP_UNCORE_PCI_COMMON_INIT()                          \
1086         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1087         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1088         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,            \
1089         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1090         .ops            = &snbep_uncore_pci_ops,                \
1091         .format_group   = &snbep_uncore_format_group
1092
1093 static struct intel_uncore_type snbep_uncore_ha = {
1094         .name           = "ha",
1095         .num_counters   = 4,
1096         .num_boxes      = 1,
1097         .perf_ctr_bits  = 48,
1098         SNBEP_UNCORE_PCI_COMMON_INIT(),
1099 };
1100
1101 static struct intel_uncore_type snbep_uncore_imc = {
1102         .name           = "imc",
1103         .num_counters   = 4,
1104         .num_boxes      = 4,
1105         .perf_ctr_bits  = 48,
1106         .fixed_ctr_bits = 48,
1107         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1108         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1109         .event_descs    = snbep_uncore_imc_events,
1110         SNBEP_UNCORE_PCI_COMMON_INIT(),
1111 };
1112
1113 static struct intel_uncore_type snbep_uncore_qpi = {
1114         .name                   = "qpi",
1115         .num_counters           = 4,
1116         .num_boxes              = 2,
1117         .perf_ctr_bits          = 48,
1118         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1119         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1120         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1121         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1122         .num_shared_regs        = 1,
1123         .ops                    = &snbep_uncore_qpi_ops,
1124         .event_descs            = snbep_uncore_qpi_events,
1125         .format_group           = &snbep_uncore_qpi_format_group,
1126 };
1127
1128
1129 static struct intel_uncore_type snbep_uncore_r2pcie = {
1130         .name           = "r2pcie",
1131         .num_counters   = 4,
1132         .num_boxes      = 1,
1133         .perf_ctr_bits  = 44,
1134         .constraints    = snbep_uncore_r2pcie_constraints,
1135         SNBEP_UNCORE_PCI_COMMON_INIT(),
1136 };
1137
1138 static struct intel_uncore_type snbep_uncore_r3qpi = {
1139         .name           = "r3qpi",
1140         .num_counters   = 3,
1141         .num_boxes      = 2,
1142         .perf_ctr_bits  = 44,
1143         .constraints    = snbep_uncore_r3qpi_constraints,
1144         SNBEP_UNCORE_PCI_COMMON_INIT(),
1145 };
1146
1147 enum {
1148         SNBEP_PCI_UNCORE_HA,
1149         SNBEP_PCI_UNCORE_IMC,
1150         SNBEP_PCI_UNCORE_QPI,
1151         SNBEP_PCI_UNCORE_R2PCIE,
1152         SNBEP_PCI_UNCORE_R3QPI,
1153 };
1154
1155 static struct intel_uncore_type *snbep_pci_uncores[] = {
1156         [SNBEP_PCI_UNCORE_HA]           = &snbep_uncore_ha,
1157         [SNBEP_PCI_UNCORE_IMC]          = &snbep_uncore_imc,
1158         [SNBEP_PCI_UNCORE_QPI]          = &snbep_uncore_qpi,
1159         [SNBEP_PCI_UNCORE_R2PCIE]       = &snbep_uncore_r2pcie,
1160         [SNBEP_PCI_UNCORE_R3QPI]        = &snbep_uncore_r3qpi,
1161         NULL,
1162 };
1163
1164 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1165         { /* Home Agent */
1166                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1167                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1168         },
1169         { /* MC Channel 0 */
1170                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1171                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1172         },
1173         { /* MC Channel 1 */
1174                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1175                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1176         },
1177         { /* MC Channel 2 */
1178                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1179                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1180         },
1181         { /* MC Channel 3 */
1182                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1183                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1184         },
1185         { /* QPI Port 0 */
1186                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1187                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1188         },
1189         { /* QPI Port 1 */
1190                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1191                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1192         },
1193         { /* R2PCIe */
1194                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1195                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1196         },
1197         { /* R3QPI Link 0 */
1198                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1199                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1200         },
1201         { /* R3QPI Link 1 */
1202                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1203                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1204         },
1205         { /* QPI Port 0 filter  */
1206                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1207                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1208                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1209         },
1210         { /* QPI Port 0 filter  */
1211                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1212                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1213                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1214         },
1215         { /* end: all zeroes */ }
1216 };
1217
1218 static struct pci_driver snbep_uncore_pci_driver = {
1219         .name           = "snbep_uncore",
1220         .id_table       = snbep_uncore_pci_ids,
1221 };
1222
1223 /*
1224  * build pci bus to socket mapping
1225  */
1226 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1227 {
1228         struct pci_dev *ubox_dev = NULL;
1229         int i, bus, nodeid, segment;
1230         struct pci2phy_map *map;
1231         int err = 0;
1232         u32 config = 0;
1233
1234         while (1) {
1235                 /* find the UBOX device */
1236                 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1237                 if (!ubox_dev)
1238                         break;
1239                 bus = ubox_dev->bus->number;
1240                 /* get the Node ID of the local register */
1241                 err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1242                 if (err)
1243                         break;
1244                 nodeid = config;
1245                 /* get the Node ID mapping */
1246                 err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1247                 if (err)
1248                         break;
1249
1250                 segment = pci_domain_nr(ubox_dev->bus);
1251                 raw_spin_lock(&pci2phy_map_lock);
1252                 map = __find_pci2phy_map(segment);
1253                 if (!map) {
1254                         raw_spin_unlock(&pci2phy_map_lock);
1255                         err = -ENOMEM;
1256                         break;
1257                 }
1258
1259                 /*
1260                  * every three bits in the Node ID mapping register maps
1261                  * to a particular node.
1262                  */
1263                 for (i = 0; i < 8; i++) {
1264                         if (nodeid == ((config >> (3 * i)) & 0x7)) {
1265                                 map->pbus_to_physid[bus] = i;
1266                                 break;
1267                         }
1268                 }
1269                 raw_spin_unlock(&pci2phy_map_lock);
1270         }
1271
1272         if (!err) {
1273                 /*
1274                  * For PCI bus with no UBOX device, find the next bus
1275                  * that has UBOX device and use its mapping.
1276                  */
1277                 raw_spin_lock(&pci2phy_map_lock);
1278                 list_for_each_entry(map, &pci2phy_map_head, list) {
1279                         i = -1;
1280                         if (reverse) {
1281                                 for (bus = 255; bus >= 0; bus--) {
1282                                         if (map->pbus_to_physid[bus] >= 0)
1283                                                 i = map->pbus_to_physid[bus];
1284                                         else
1285                                                 map->pbus_to_physid[bus] = i;
1286                                 }
1287                         } else {
1288                                 for (bus = 0; bus <= 255; bus++) {
1289                                         if (map->pbus_to_physid[bus] >= 0)
1290                                                 i = map->pbus_to_physid[bus];
1291                                         else
1292                                                 map->pbus_to_physid[bus] = i;
1293                                 }
1294                         }
1295                 }
1296                 raw_spin_unlock(&pci2phy_map_lock);
1297         }
1298
1299         pci_dev_put(ubox_dev);
1300
1301         return err ? pcibios_err_to_errno(err) : 0;
1302 }
1303
1304 int snbep_uncore_pci_init(void)
1305 {
1306         int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1307         if (ret)
1308                 return ret;
1309         uncore_pci_uncores = snbep_pci_uncores;
1310         uncore_pci_driver = &snbep_uncore_pci_driver;
1311         return 0;
1312 }
1313 /* end of Sandy Bridge-EP uncore support */
1314
1315 /* IvyTown uncore support */
1316 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1317 {
1318         unsigned msr = uncore_msr_box_ctl(box);
1319         if (msr)
1320                 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1321 }
1322
1323 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1324 {
1325         struct pci_dev *pdev = box->pci_dev;
1326
1327         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1328 }
1329
1330 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
1331         .init_box       = ivbep_uncore_msr_init_box,            \
1332         .disable_box    = snbep_uncore_msr_disable_box,         \
1333         .enable_box     = snbep_uncore_msr_enable_box,          \
1334         .disable_event  = snbep_uncore_msr_disable_event,       \
1335         .enable_event   = snbep_uncore_msr_enable_event,        \
1336         .read_counter   = uncore_msr_read_counter
1337
1338 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1339         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1340 };
1341
1342 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1343         .init_box       = ivbep_uncore_pci_init_box,
1344         .disable_box    = snbep_uncore_pci_disable_box,
1345         .enable_box     = snbep_uncore_pci_enable_box,
1346         .disable_event  = snbep_uncore_pci_disable_event,
1347         .enable_event   = snbep_uncore_pci_enable_event,
1348         .read_counter   = snbep_uncore_pci_read_counter,
1349 };
1350
1351 #define IVBEP_UNCORE_PCI_COMMON_INIT()                          \
1352         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1353         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1354         .event_mask     = IVBEP_PMON_RAW_EVENT_MASK,            \
1355         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1356         .ops            = &ivbep_uncore_pci_ops,                        \
1357         .format_group   = &ivbep_uncore_format_group
1358
1359 static struct attribute *ivbep_uncore_formats_attr[] = {
1360         &format_attr_event.attr,
1361         &format_attr_umask.attr,
1362         &format_attr_edge.attr,
1363         &format_attr_inv.attr,
1364         &format_attr_thresh8.attr,
1365         NULL,
1366 };
1367
1368 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1369         &format_attr_event.attr,
1370         &format_attr_umask.attr,
1371         &format_attr_edge.attr,
1372         &format_attr_inv.attr,
1373         &format_attr_thresh5.attr,
1374         NULL,
1375 };
1376
1377 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1378         &format_attr_event.attr,
1379         &format_attr_umask.attr,
1380         &format_attr_edge.attr,
1381         &format_attr_tid_en.attr,
1382         &format_attr_thresh8.attr,
1383         &format_attr_filter_tid.attr,
1384         &format_attr_filter_link.attr,
1385         &format_attr_filter_state2.attr,
1386         &format_attr_filter_nid2.attr,
1387         &format_attr_filter_opc2.attr,
1388         &format_attr_filter_nc.attr,
1389         &format_attr_filter_c6.attr,
1390         &format_attr_filter_isoc.attr,
1391         NULL,
1392 };
1393
1394 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1395         &format_attr_event.attr,
1396         &format_attr_occ_sel.attr,
1397         &format_attr_edge.attr,
1398         &format_attr_thresh5.attr,
1399         &format_attr_occ_invert.attr,
1400         &format_attr_occ_edge.attr,
1401         &format_attr_filter_band0.attr,
1402         &format_attr_filter_band1.attr,
1403         &format_attr_filter_band2.attr,
1404         &format_attr_filter_band3.attr,
1405         NULL,
1406 };
1407
1408 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1409         &format_attr_event_ext.attr,
1410         &format_attr_umask.attr,
1411         &format_attr_edge.attr,
1412         &format_attr_thresh8.attr,
1413         &format_attr_match_rds.attr,
1414         &format_attr_match_rnid30.attr,
1415         &format_attr_match_rnid4.attr,
1416         &format_attr_match_dnid.attr,
1417         &format_attr_match_mc.attr,
1418         &format_attr_match_opc.attr,
1419         &format_attr_match_vnw.attr,
1420         &format_attr_match0.attr,
1421         &format_attr_match1.attr,
1422         &format_attr_mask_rds.attr,
1423         &format_attr_mask_rnid30.attr,
1424         &format_attr_mask_rnid4.attr,
1425         &format_attr_mask_dnid.attr,
1426         &format_attr_mask_mc.attr,
1427         &format_attr_mask_opc.attr,
1428         &format_attr_mask_vnw.attr,
1429         &format_attr_mask0.attr,
1430         &format_attr_mask1.attr,
1431         NULL,
1432 };
1433
1434 static struct attribute_group ivbep_uncore_format_group = {
1435         .name = "format",
1436         .attrs = ivbep_uncore_formats_attr,
1437 };
1438
1439 static struct attribute_group ivbep_uncore_ubox_format_group = {
1440         .name = "format",
1441         .attrs = ivbep_uncore_ubox_formats_attr,
1442 };
1443
1444 static struct attribute_group ivbep_uncore_cbox_format_group = {
1445         .name = "format",
1446         .attrs = ivbep_uncore_cbox_formats_attr,
1447 };
1448
1449 static struct attribute_group ivbep_uncore_pcu_format_group = {
1450         .name = "format",
1451         .attrs = ivbep_uncore_pcu_formats_attr,
1452 };
1453
1454 static struct attribute_group ivbep_uncore_qpi_format_group = {
1455         .name = "format",
1456         .attrs = ivbep_uncore_qpi_formats_attr,
1457 };
1458
1459 static struct intel_uncore_type ivbep_uncore_ubox = {
1460         .name           = "ubox",
1461         .num_counters   = 2,
1462         .num_boxes      = 1,
1463         .perf_ctr_bits  = 44,
1464         .fixed_ctr_bits = 48,
1465         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
1466         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
1467         .event_mask     = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1468         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1469         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1470         .ops            = &ivbep_uncore_msr_ops,
1471         .format_group   = &ivbep_uncore_ubox_format_group,
1472 };
1473
1474 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1475         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1476                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1477         SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1478         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1479         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1480         SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1481         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1482         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1483         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1484         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1485         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1486         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1487         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1488         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1489         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1490         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1491         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1492         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1493         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1494         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1495         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1496         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1497         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1498         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1499         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1500         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1501         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1502         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1503         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1504         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1505         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1506         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1507         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1508         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1509         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1510         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1511         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1512         EVENT_EXTRA_END
1513 };
1514
1515 static u64 ivbep_cbox_filter_mask(int fields)
1516 {
1517         u64 mask = 0;
1518
1519         if (fields & 0x1)
1520                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1521         if (fields & 0x2)
1522                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1523         if (fields & 0x4)
1524                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1525         if (fields & 0x8)
1526                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1527         if (fields & 0x10) {
1528                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1529                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1530                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1531                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1532         }
1533
1534         return mask;
1535 }
1536
1537 static struct event_constraint *
1538 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1539 {
1540         return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1541 }
1542
1543 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1544 {
1545         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1546         struct extra_reg *er;
1547         int idx = 0;
1548
1549         for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1550                 if (er->event != (event->hw.config & er->config_mask))
1551                         continue;
1552                 idx |= er->idx;
1553         }
1554
1555         if (idx) {
1556                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1557                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1558                 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1559                 reg1->idx = idx;
1560         }
1561         return 0;
1562 }
1563
1564 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1565 {
1566         struct hw_perf_event *hwc = &event->hw;
1567         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1568
1569         if (reg1->idx != EXTRA_REG_NONE) {
1570                 u64 filter = uncore_shared_reg_config(box, 0);
1571                 wrmsrl(reg1->reg, filter & 0xffffffff);
1572                 wrmsrl(reg1->reg + 6, filter >> 32);
1573         }
1574
1575         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1576 }
1577
1578 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1579         .init_box               = ivbep_uncore_msr_init_box,
1580         .disable_box            = snbep_uncore_msr_disable_box,
1581         .enable_box             = snbep_uncore_msr_enable_box,
1582         .disable_event          = snbep_uncore_msr_disable_event,
1583         .enable_event           = ivbep_cbox_enable_event,
1584         .read_counter           = uncore_msr_read_counter,
1585         .hw_config              = ivbep_cbox_hw_config,
1586         .get_constraint         = ivbep_cbox_get_constraint,
1587         .put_constraint         = snbep_cbox_put_constraint,
1588 };
1589
1590 static struct intel_uncore_type ivbep_uncore_cbox = {
1591         .name                   = "cbox",
1592         .num_counters           = 4,
1593         .num_boxes              = 15,
1594         .perf_ctr_bits          = 44,
1595         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1596         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1597         .event_mask             = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1598         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1599         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1600         .num_shared_regs        = 1,
1601         .constraints            = snbep_uncore_cbox_constraints,
1602         .ops                    = &ivbep_uncore_cbox_ops,
1603         .format_group           = &ivbep_uncore_cbox_format_group,
1604 };
1605
1606 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1607         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1608         .hw_config              = snbep_pcu_hw_config,
1609         .get_constraint         = snbep_pcu_get_constraint,
1610         .put_constraint         = snbep_pcu_put_constraint,
1611 };
1612
1613 static struct intel_uncore_type ivbep_uncore_pcu = {
1614         .name                   = "pcu",
1615         .num_counters           = 4,
1616         .num_boxes              = 1,
1617         .perf_ctr_bits          = 48,
1618         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1619         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1620         .event_mask             = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1621         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1622         .num_shared_regs        = 1,
1623         .ops                    = &ivbep_uncore_pcu_ops,
1624         .format_group           = &ivbep_uncore_pcu_format_group,
1625 };
1626
1627 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1628         &ivbep_uncore_ubox,
1629         &ivbep_uncore_cbox,
1630         &ivbep_uncore_pcu,
1631         NULL,
1632 };
1633
1634 void ivbep_uncore_cpu_init(void)
1635 {
1636         if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1637                 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1638         uncore_msr_uncores = ivbep_msr_uncores;
1639 }
1640
1641 static struct intel_uncore_type ivbep_uncore_ha = {
1642         .name           = "ha",
1643         .num_counters   = 4,
1644         .num_boxes      = 2,
1645         .perf_ctr_bits  = 48,
1646         IVBEP_UNCORE_PCI_COMMON_INIT(),
1647 };
1648
1649 static struct intel_uncore_type ivbep_uncore_imc = {
1650         .name           = "imc",
1651         .num_counters   = 4,
1652         .num_boxes      = 8,
1653         .perf_ctr_bits  = 48,
1654         .fixed_ctr_bits = 48,
1655         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1656         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1657         .event_descs    = snbep_uncore_imc_events,
1658         IVBEP_UNCORE_PCI_COMMON_INIT(),
1659 };
1660
1661 /* registers in IRP boxes are not properly aligned */
1662 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1663 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1664
1665 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1666 {
1667         struct pci_dev *pdev = box->pci_dev;
1668         struct hw_perf_event *hwc = &event->hw;
1669
1670         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1671                                hwc->config | SNBEP_PMON_CTL_EN);
1672 }
1673
1674 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1675 {
1676         struct pci_dev *pdev = box->pci_dev;
1677         struct hw_perf_event *hwc = &event->hw;
1678
1679         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1680 }
1681
1682 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1683 {
1684         struct pci_dev *pdev = box->pci_dev;
1685         struct hw_perf_event *hwc = &event->hw;
1686         u64 count = 0;
1687
1688         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1689         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1690
1691         return count;
1692 }
1693
1694 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1695         .init_box       = ivbep_uncore_pci_init_box,
1696         .disable_box    = snbep_uncore_pci_disable_box,
1697         .enable_box     = snbep_uncore_pci_enable_box,
1698         .disable_event  = ivbep_uncore_irp_disable_event,
1699         .enable_event   = ivbep_uncore_irp_enable_event,
1700         .read_counter   = ivbep_uncore_irp_read_counter,
1701 };
1702
1703 static struct intel_uncore_type ivbep_uncore_irp = {
1704         .name                   = "irp",
1705         .num_counters           = 4,
1706         .num_boxes              = 1,
1707         .perf_ctr_bits          = 48,
1708         .event_mask             = IVBEP_PMON_RAW_EVENT_MASK,
1709         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1710         .ops                    = &ivbep_uncore_irp_ops,
1711         .format_group           = &ivbep_uncore_format_group,
1712 };
1713
1714 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1715         .init_box       = ivbep_uncore_pci_init_box,
1716         .disable_box    = snbep_uncore_pci_disable_box,
1717         .enable_box     = snbep_uncore_pci_enable_box,
1718         .disable_event  = snbep_uncore_pci_disable_event,
1719         .enable_event   = snbep_qpi_enable_event,
1720         .read_counter   = snbep_uncore_pci_read_counter,
1721         .hw_config      = snbep_qpi_hw_config,
1722         .get_constraint = uncore_get_constraint,
1723         .put_constraint = uncore_put_constraint,
1724 };
1725
1726 static struct intel_uncore_type ivbep_uncore_qpi = {
1727         .name                   = "qpi",
1728         .num_counters           = 4,
1729         .num_boxes              = 3,
1730         .perf_ctr_bits          = 48,
1731         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1732         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1733         .event_mask             = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1734         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1735         .num_shared_regs        = 1,
1736         .ops                    = &ivbep_uncore_qpi_ops,
1737         .format_group           = &ivbep_uncore_qpi_format_group,
1738 };
1739
1740 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1741         .name           = "r2pcie",
1742         .num_counters   = 4,
1743         .num_boxes      = 1,
1744         .perf_ctr_bits  = 44,
1745         .constraints    = snbep_uncore_r2pcie_constraints,
1746         IVBEP_UNCORE_PCI_COMMON_INIT(),
1747 };
1748
1749 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1750         .name           = "r3qpi",
1751         .num_counters   = 3,
1752         .num_boxes      = 2,
1753         .perf_ctr_bits  = 44,
1754         .constraints    = snbep_uncore_r3qpi_constraints,
1755         IVBEP_UNCORE_PCI_COMMON_INIT(),
1756 };
1757
1758 enum {
1759         IVBEP_PCI_UNCORE_HA,
1760         IVBEP_PCI_UNCORE_IMC,
1761         IVBEP_PCI_UNCORE_IRP,
1762         IVBEP_PCI_UNCORE_QPI,
1763         IVBEP_PCI_UNCORE_R2PCIE,
1764         IVBEP_PCI_UNCORE_R3QPI,
1765 };
1766
1767 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1768         [IVBEP_PCI_UNCORE_HA]   = &ivbep_uncore_ha,
1769         [IVBEP_PCI_UNCORE_IMC]  = &ivbep_uncore_imc,
1770         [IVBEP_PCI_UNCORE_IRP]  = &ivbep_uncore_irp,
1771         [IVBEP_PCI_UNCORE_QPI]  = &ivbep_uncore_qpi,
1772         [IVBEP_PCI_UNCORE_R2PCIE]       = &ivbep_uncore_r2pcie,
1773         [IVBEP_PCI_UNCORE_R3QPI]        = &ivbep_uncore_r3qpi,
1774         NULL,
1775 };
1776
1777 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1778         { /* Home Agent 0 */
1779                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1780                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1781         },
1782         { /* Home Agent 1 */
1783                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1784                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1785         },
1786         { /* MC0 Channel 0 */
1787                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1788                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1789         },
1790         { /* MC0 Channel 1 */
1791                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1792                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1793         },
1794         { /* MC0 Channel 3 */
1795                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1796                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1797         },
1798         { /* MC0 Channel 4 */
1799                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1800                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1801         },
1802         { /* MC1 Channel 0 */
1803                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1804                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1805         },
1806         { /* MC1 Channel 1 */
1807                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1808                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1809         },
1810         { /* MC1 Channel 3 */
1811                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1812                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1813         },
1814         { /* MC1 Channel 4 */
1815                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1816                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1817         },
1818         { /* IRP */
1819                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1820                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1821         },
1822         { /* QPI0 Port 0 */
1823                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1824                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
1825         },
1826         { /* QPI0 Port 1 */
1827                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1828                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
1829         },
1830         { /* QPI1 Port 2 */
1831                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1832                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
1833         },
1834         { /* R2PCIe */
1835                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1836                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
1837         },
1838         { /* R3QPI0 Link 0 */
1839                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1840                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
1841         },
1842         { /* R3QPI0 Link 1 */
1843                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1844                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
1845         },
1846         { /* R3QPI1 Link 2 */
1847                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1848                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
1849         },
1850         { /* QPI Port 0 filter  */
1851                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1852                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1853                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1854         },
1855         { /* QPI Port 0 filter  */
1856                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1857                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1858                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1859         },
1860         { /* end: all zeroes */ }
1861 };
1862
1863 static struct pci_driver ivbep_uncore_pci_driver = {
1864         .name           = "ivbep_uncore",
1865         .id_table       = ivbep_uncore_pci_ids,
1866 };
1867
1868 int ivbep_uncore_pci_init(void)
1869 {
1870         int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1871         if (ret)
1872                 return ret;
1873         uncore_pci_uncores = ivbep_pci_uncores;
1874         uncore_pci_driver = &ivbep_uncore_pci_driver;
1875         return 0;
1876 }
1877 /* end of IvyTown uncore support */
1878
1879 /* KNL uncore support */
1880 static struct attribute *knl_uncore_ubox_formats_attr[] = {
1881         &format_attr_event.attr,
1882         &format_attr_umask.attr,
1883         &format_attr_edge.attr,
1884         &format_attr_tid_en.attr,
1885         &format_attr_inv.attr,
1886         &format_attr_thresh5.attr,
1887         NULL,
1888 };
1889
1890 static struct attribute_group knl_uncore_ubox_format_group = {
1891         .name = "format",
1892         .attrs = knl_uncore_ubox_formats_attr,
1893 };
1894
1895 static struct intel_uncore_type knl_uncore_ubox = {
1896         .name                   = "ubox",
1897         .num_counters           = 2,
1898         .num_boxes              = 1,
1899         .perf_ctr_bits          = 48,
1900         .fixed_ctr_bits         = 48,
1901         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
1902         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
1903         .event_mask             = KNL_U_MSR_PMON_RAW_EVENT_MASK,
1904         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
1905         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
1906         .ops                    = &snbep_uncore_msr_ops,
1907         .format_group           = &knl_uncore_ubox_format_group,
1908 };
1909
1910 static struct attribute *knl_uncore_cha_formats_attr[] = {
1911         &format_attr_event.attr,
1912         &format_attr_umask.attr,
1913         &format_attr_qor.attr,
1914         &format_attr_edge.attr,
1915         &format_attr_tid_en.attr,
1916         &format_attr_inv.attr,
1917         &format_attr_thresh8.attr,
1918         &format_attr_filter_tid4.attr,
1919         &format_attr_filter_link3.attr,
1920         &format_attr_filter_state4.attr,
1921         &format_attr_filter_local.attr,
1922         &format_attr_filter_all_op.attr,
1923         &format_attr_filter_nnm.attr,
1924         &format_attr_filter_opc3.attr,
1925         &format_attr_filter_nc.attr,
1926         &format_attr_filter_isoc.attr,
1927         NULL,
1928 };
1929
1930 static struct attribute_group knl_uncore_cha_format_group = {
1931         .name = "format",
1932         .attrs = knl_uncore_cha_formats_attr,
1933 };
1934
1935 static struct event_constraint knl_uncore_cha_constraints[] = {
1936         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
1937         UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
1938         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
1939         EVENT_CONSTRAINT_END
1940 };
1941
1942 static struct extra_reg knl_uncore_cha_extra_regs[] = {
1943         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1944                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1945         SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
1946         SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
1947         SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
1948         EVENT_EXTRA_END
1949 };
1950
1951 static u64 knl_cha_filter_mask(int fields)
1952 {
1953         u64 mask = 0;
1954
1955         if (fields & 0x1)
1956                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
1957         if (fields & 0x2)
1958                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
1959         if (fields & 0x4)
1960                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
1961         return mask;
1962 }
1963
1964 static struct event_constraint *
1965 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1966 {
1967         return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
1968 }
1969
1970 static int knl_cha_hw_config(struct intel_uncore_box *box,
1971                              struct perf_event *event)
1972 {
1973         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1974         struct extra_reg *er;
1975         int idx = 0;
1976
1977         for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
1978                 if (er->event != (event->hw.config & er->config_mask))
1979                         continue;
1980                 idx |= er->idx;
1981         }
1982
1983         if (idx) {
1984                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
1985                             KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
1986                 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
1987
1988                 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
1989                 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
1990                 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
1991                 reg1->idx = idx;
1992         }
1993         return 0;
1994 }
1995
1996 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
1997                                     struct perf_event *event);
1998
1999 static struct intel_uncore_ops knl_uncore_cha_ops = {
2000         .init_box               = snbep_uncore_msr_init_box,
2001         .disable_box            = snbep_uncore_msr_disable_box,
2002         .enable_box             = snbep_uncore_msr_enable_box,
2003         .disable_event          = snbep_uncore_msr_disable_event,
2004         .enable_event           = hswep_cbox_enable_event,
2005         .read_counter           = uncore_msr_read_counter,
2006         .hw_config              = knl_cha_hw_config,
2007         .get_constraint         = knl_cha_get_constraint,
2008         .put_constraint         = snbep_cbox_put_constraint,
2009 };
2010
2011 static struct intel_uncore_type knl_uncore_cha = {
2012         .name                   = "cha",
2013         .num_counters           = 4,
2014         .num_boxes              = 38,
2015         .perf_ctr_bits          = 48,
2016         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2017         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2018         .event_mask             = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2019         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2020         .msr_offset             = KNL_CHA_MSR_OFFSET,
2021         .num_shared_regs        = 1,
2022         .constraints            = knl_uncore_cha_constraints,
2023         .ops                    = &knl_uncore_cha_ops,
2024         .format_group           = &knl_uncore_cha_format_group,
2025 };
2026
2027 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2028         &format_attr_event2.attr,
2029         &format_attr_use_occ_ctr.attr,
2030         &format_attr_occ_sel.attr,
2031         &format_attr_edge.attr,
2032         &format_attr_tid_en.attr,
2033         &format_attr_inv.attr,
2034         &format_attr_thresh6.attr,
2035         &format_attr_occ_invert.attr,
2036         &format_attr_occ_edge_det.attr,
2037         NULL,
2038 };
2039
2040 static struct attribute_group knl_uncore_pcu_format_group = {
2041         .name = "format",
2042         .attrs = knl_uncore_pcu_formats_attr,
2043 };
2044
2045 static struct intel_uncore_type knl_uncore_pcu = {
2046         .name                   = "pcu",
2047         .num_counters           = 4,
2048         .num_boxes              = 1,
2049         .perf_ctr_bits          = 48,
2050         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
2051         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
2052         .event_mask             = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2053         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
2054         .ops                    = &snbep_uncore_msr_ops,
2055         .format_group           = &knl_uncore_pcu_format_group,
2056 };
2057
2058 static struct intel_uncore_type *knl_msr_uncores[] = {
2059         &knl_uncore_ubox,
2060         &knl_uncore_cha,
2061         &knl_uncore_pcu,
2062         NULL,
2063 };
2064
2065 void knl_uncore_cpu_init(void)
2066 {
2067         uncore_msr_uncores = knl_msr_uncores;
2068 }
2069
2070 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2071 {
2072         struct pci_dev *pdev = box->pci_dev;
2073         int box_ctl = uncore_pci_box_ctl(box);
2074
2075         pci_write_config_dword(pdev, box_ctl, 0);
2076 }
2077
2078 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2079                                         struct perf_event *event)
2080 {
2081         struct pci_dev *pdev = box->pci_dev;
2082         struct hw_perf_event *hwc = &event->hw;
2083
2084         if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2085                                                         == UNCORE_FIXED_EVENT)
2086                 pci_write_config_dword(pdev, hwc->config_base,
2087                                        hwc->config | KNL_PMON_FIXED_CTL_EN);
2088         else
2089                 pci_write_config_dword(pdev, hwc->config_base,
2090                                        hwc->config | SNBEP_PMON_CTL_EN);
2091 }
2092
2093 static struct intel_uncore_ops knl_uncore_imc_ops = {
2094         .init_box       = snbep_uncore_pci_init_box,
2095         .disable_box    = snbep_uncore_pci_disable_box,
2096         .enable_box     = knl_uncore_imc_enable_box,
2097         .read_counter   = snbep_uncore_pci_read_counter,
2098         .enable_event   = knl_uncore_imc_enable_event,
2099         .disable_event  = snbep_uncore_pci_disable_event,
2100 };
2101
2102 static struct intel_uncore_type knl_uncore_imc_uclk = {
2103         .name                   = "imc_uclk",
2104         .num_counters           = 4,
2105         .num_boxes              = 2,
2106         .perf_ctr_bits          = 48,
2107         .fixed_ctr_bits         = 48,
2108         .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2109         .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2110         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2111         .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2112         .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2113         .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2114         .ops                    = &knl_uncore_imc_ops,
2115         .format_group           = &snbep_uncore_format_group,
2116 };
2117
2118 static struct intel_uncore_type knl_uncore_imc_dclk = {
2119         .name                   = "imc",
2120         .num_counters           = 4,
2121         .num_boxes              = 6,
2122         .perf_ctr_bits          = 48,
2123         .fixed_ctr_bits         = 48,
2124         .perf_ctr               = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2125         .event_ctl              = KNL_MC0_CH0_MSR_PMON_CTL0,
2126         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2127         .fixed_ctr              = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2128         .fixed_ctl              = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2129         .box_ctl                = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2130         .ops                    = &knl_uncore_imc_ops,
2131         .format_group           = &snbep_uncore_format_group,
2132 };
2133
2134 static struct intel_uncore_type knl_uncore_edc_uclk = {
2135         .name                   = "edc_uclk",
2136         .num_counters           = 4,
2137         .num_boxes              = 8,
2138         .perf_ctr_bits          = 48,
2139         .fixed_ctr_bits         = 48,
2140         .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2141         .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2142         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2143         .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2144         .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2145         .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2146         .ops                    = &knl_uncore_imc_ops,
2147         .format_group           = &snbep_uncore_format_group,
2148 };
2149
2150 static struct intel_uncore_type knl_uncore_edc_eclk = {
2151         .name                   = "edc_eclk",
2152         .num_counters           = 4,
2153         .num_boxes              = 8,
2154         .perf_ctr_bits          = 48,
2155         .fixed_ctr_bits         = 48,
2156         .perf_ctr               = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2157         .event_ctl              = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2158         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2159         .fixed_ctr              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2160         .fixed_ctl              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2161         .box_ctl                = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2162         .ops                    = &knl_uncore_imc_ops,
2163         .format_group           = &snbep_uncore_format_group,
2164 };
2165
2166 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2167         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2168         EVENT_CONSTRAINT_END
2169 };
2170
2171 static struct intel_uncore_type knl_uncore_m2pcie = {
2172         .name           = "m2pcie",
2173         .num_counters   = 4,
2174         .num_boxes      = 1,
2175         .perf_ctr_bits  = 48,
2176         .constraints    = knl_uncore_m2pcie_constraints,
2177         SNBEP_UNCORE_PCI_COMMON_INIT(),
2178 };
2179
2180 static struct attribute *knl_uncore_irp_formats_attr[] = {
2181         &format_attr_event.attr,
2182         &format_attr_umask.attr,
2183         &format_attr_qor.attr,
2184         &format_attr_edge.attr,
2185         &format_attr_inv.attr,
2186         &format_attr_thresh8.attr,
2187         NULL,
2188 };
2189
2190 static struct attribute_group knl_uncore_irp_format_group = {
2191         .name = "format",
2192         .attrs = knl_uncore_irp_formats_attr,
2193 };
2194
2195 static struct intel_uncore_type knl_uncore_irp = {
2196         .name                   = "irp",
2197         .num_counters           = 2,
2198         .num_boxes              = 1,
2199         .perf_ctr_bits          = 48,
2200         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2201         .event_ctl              = SNBEP_PCI_PMON_CTL0,
2202         .event_mask             = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2203         .box_ctl                = KNL_IRP_PCI_PMON_BOX_CTL,
2204         .ops                    = &snbep_uncore_pci_ops,
2205         .format_group           = &knl_uncore_irp_format_group,
2206 };
2207
2208 enum {
2209         KNL_PCI_UNCORE_MC_UCLK,
2210         KNL_PCI_UNCORE_MC_DCLK,
2211         KNL_PCI_UNCORE_EDC_UCLK,
2212         KNL_PCI_UNCORE_EDC_ECLK,
2213         KNL_PCI_UNCORE_M2PCIE,
2214         KNL_PCI_UNCORE_IRP,
2215 };
2216
2217 static struct intel_uncore_type *knl_pci_uncores[] = {
2218         [KNL_PCI_UNCORE_MC_UCLK]        = &knl_uncore_imc_uclk,
2219         [KNL_PCI_UNCORE_MC_DCLK]        = &knl_uncore_imc_dclk,
2220         [KNL_PCI_UNCORE_EDC_UCLK]       = &knl_uncore_edc_uclk,
2221         [KNL_PCI_UNCORE_EDC_ECLK]       = &knl_uncore_edc_eclk,
2222         [KNL_PCI_UNCORE_M2PCIE]         = &knl_uncore_m2pcie,
2223         [KNL_PCI_UNCORE_IRP]            = &knl_uncore_irp,
2224         NULL,
2225 };
2226
2227 /*
2228  * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2229  * device type. prior to KNL, each instance of a PMU device type had a unique
2230  * device ID.
2231  *
2232  *      PCI Device ID   Uncore PMU Devices
2233  *      ----------------------------------
2234  *      0x7841          MC0 UClk, MC1 UClk
2235  *      0x7843          MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2236  *                      MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2237  *      0x7833          EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2238  *                      EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2239  *      0x7835          EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2240  *                      EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2241  *      0x7817          M2PCIe
2242  *      0x7814          IRP
2243 */
2244
2245 static const struct pci_device_id knl_uncore_pci_ids[] = {
2246         { /* MC0 UClk */
2247                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2248                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2249         },
2250         { /* MC1 UClk */
2251                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2252                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2253         },
2254         { /* MC0 DClk CH 0 */
2255                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2256                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2257         },
2258         { /* MC0 DClk CH 1 */
2259                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2260                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2261         },
2262         { /* MC0 DClk CH 2 */
2263                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2264                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2265         },
2266         { /* MC1 DClk CH 0 */
2267                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2268                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2269         },
2270         { /* MC1 DClk CH 1 */
2271                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2272                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2273         },
2274         { /* MC1 DClk CH 2 */
2275                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2276                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2277         },
2278         { /* EDC0 UClk */
2279                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2280                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2281         },
2282         { /* EDC1 UClk */
2283                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2284                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2285         },
2286         { /* EDC2 UClk */
2287                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2288                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2289         },
2290         { /* EDC3 UClk */
2291                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2292                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2293         },
2294         { /* EDC4 UClk */
2295                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2296                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2297         },
2298         { /* EDC5 UClk */
2299                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2300                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2301         },
2302         { /* EDC6 UClk */
2303                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2304                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2305         },
2306         { /* EDC7 UClk */
2307                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2308                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2309         },
2310         { /* EDC0 EClk */
2311                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2312                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2313         },
2314         { /* EDC1 EClk */
2315                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2316                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2317         },
2318         { /* EDC2 EClk */
2319                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2320                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2321         },
2322         { /* EDC3 EClk */
2323                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2324                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2325         },
2326         { /* EDC4 EClk */
2327                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2328                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2329         },
2330         { /* EDC5 EClk */
2331                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2332                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2333         },
2334         { /* EDC6 EClk */
2335                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2336                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2337         },
2338         { /* EDC7 EClk */
2339                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2340                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2341         },
2342         { /* M2PCIe */
2343                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2344                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2345         },
2346         { /* IRP */
2347                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2348                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2349         },
2350         { /* end: all zeroes */ }
2351 };
2352
2353 static struct pci_driver knl_uncore_pci_driver = {
2354         .name           = "knl_uncore",
2355         .id_table       = knl_uncore_pci_ids,
2356 };
2357
2358 int knl_uncore_pci_init(void)
2359 {
2360         int ret;
2361
2362         /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2363         ret = snb_pci2phy_map_init(0x7814); /* IRP */
2364         if (ret)
2365                 return ret;
2366         ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2367         if (ret)
2368                 return ret;
2369         uncore_pci_uncores = knl_pci_uncores;
2370         uncore_pci_driver = &knl_uncore_pci_driver;
2371         return 0;
2372 }
2373
2374 /* end of KNL uncore support */
2375
2376 /* Haswell-EP uncore support */
2377 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2378         &format_attr_event.attr,
2379         &format_attr_umask.attr,
2380         &format_attr_edge.attr,
2381         &format_attr_inv.attr,
2382         &format_attr_thresh5.attr,
2383         &format_attr_filter_tid2.attr,
2384         &format_attr_filter_cid.attr,
2385         NULL,
2386 };
2387
2388 static struct attribute_group hswep_uncore_ubox_format_group = {
2389         .name = "format",
2390         .attrs = hswep_uncore_ubox_formats_attr,
2391 };
2392
2393 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2394 {
2395         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2396         reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2397         reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2398         reg1->idx = 0;
2399         return 0;
2400 }
2401
2402 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2403         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2404         .hw_config              = hswep_ubox_hw_config,
2405         .get_constraint         = uncore_get_constraint,
2406         .put_constraint         = uncore_put_constraint,
2407 };
2408
2409 static struct intel_uncore_type hswep_uncore_ubox = {
2410         .name                   = "ubox",
2411         .num_counters           = 2,
2412         .num_boxes              = 1,
2413         .perf_ctr_bits          = 44,
2414         .fixed_ctr_bits         = 48,
2415         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
2416         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
2417         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2418         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2419         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2420         .num_shared_regs        = 1,
2421         .ops                    = &hswep_uncore_ubox_ops,
2422         .format_group           = &hswep_uncore_ubox_format_group,
2423 };
2424
2425 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2426         &format_attr_event.attr,
2427         &format_attr_umask.attr,
2428         &format_attr_edge.attr,
2429         &format_attr_tid_en.attr,
2430         &format_attr_thresh8.attr,
2431         &format_attr_filter_tid3.attr,
2432         &format_attr_filter_link2.attr,
2433         &format_attr_filter_state3.attr,
2434         &format_attr_filter_nid2.attr,
2435         &format_attr_filter_opc2.attr,
2436         &format_attr_filter_nc.attr,
2437         &format_attr_filter_c6.attr,
2438         &format_attr_filter_isoc.attr,
2439         NULL,
2440 };
2441
2442 static struct attribute_group hswep_uncore_cbox_format_group = {
2443         .name = "format",
2444         .attrs = hswep_uncore_cbox_formats_attr,
2445 };
2446
2447 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2448         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2449         UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2450         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2451         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2452         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2453         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2454         UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2455         EVENT_CONSTRAINT_END
2456 };
2457
2458 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2459         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2460                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2461         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2462         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2463         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2464         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2465         SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2466         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2467         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2468         SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2469         SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2470         SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2471         SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2472         SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2473         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2474         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2475         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2476         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2477         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2478         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2479         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2480         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2481         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2482         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2483         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2484         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2485         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2486         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2487         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2488         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2489         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2490         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2491         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2492         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2493         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2494         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2495         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2496         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2497         EVENT_EXTRA_END
2498 };
2499
2500 static u64 hswep_cbox_filter_mask(int fields)
2501 {
2502         u64 mask = 0;
2503         if (fields & 0x1)
2504                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2505         if (fields & 0x2)
2506                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2507         if (fields & 0x4)
2508                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2509         if (fields & 0x8)
2510                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2511         if (fields & 0x10) {
2512                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2513                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2514                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2515                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2516         }
2517         return mask;
2518 }
2519
2520 static struct event_constraint *
2521 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2522 {
2523         return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2524 }
2525
2526 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2527 {
2528         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2529         struct extra_reg *er;
2530         int idx = 0;
2531
2532         for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2533                 if (er->event != (event->hw.config & er->config_mask))
2534                         continue;
2535                 idx |= er->idx;
2536         }
2537
2538         if (idx) {
2539                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2540                             HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2541                 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2542                 reg1->idx = idx;
2543         }
2544         return 0;
2545 }
2546
2547 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2548                                   struct perf_event *event)
2549 {
2550         struct hw_perf_event *hwc = &event->hw;
2551         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2552
2553         if (reg1->idx != EXTRA_REG_NONE) {
2554                 u64 filter = uncore_shared_reg_config(box, 0);
2555                 wrmsrl(reg1->reg, filter & 0xffffffff);
2556                 wrmsrl(reg1->reg + 1, filter >> 32);
2557         }
2558
2559         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2560 }
2561
2562 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2563         .init_box               = snbep_uncore_msr_init_box,
2564         .disable_box            = snbep_uncore_msr_disable_box,
2565         .enable_box             = snbep_uncore_msr_enable_box,
2566         .disable_event          = snbep_uncore_msr_disable_event,
2567         .enable_event           = hswep_cbox_enable_event,
2568         .read_counter           = uncore_msr_read_counter,
2569         .hw_config              = hswep_cbox_hw_config,
2570         .get_constraint         = hswep_cbox_get_constraint,
2571         .put_constraint         = snbep_cbox_put_constraint,
2572 };
2573
2574 static struct intel_uncore_type hswep_uncore_cbox = {
2575         .name                   = "cbox",
2576         .num_counters           = 4,
2577         .num_boxes              = 18,
2578         .perf_ctr_bits          = 48,
2579         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2580         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2581         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2582         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2583         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
2584         .num_shared_regs        = 1,
2585         .constraints            = hswep_uncore_cbox_constraints,
2586         .ops                    = &hswep_uncore_cbox_ops,
2587         .format_group           = &hswep_uncore_cbox_format_group,
2588 };
2589
2590 /*
2591  * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2592  */
2593 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2594 {
2595         unsigned msr = uncore_msr_box_ctl(box);
2596
2597         if (msr) {
2598                 u64 init = SNBEP_PMON_BOX_CTL_INT;
2599                 u64 flags = 0;
2600                 int i;
2601
2602                 for_each_set_bit(i, (unsigned long *)&init, 64) {
2603                         flags |= (1ULL << i);
2604                         wrmsrl(msr, flags);
2605                 }
2606         }
2607 }
2608
2609 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2610         __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2611         .init_box               = hswep_uncore_sbox_msr_init_box
2612 };
2613
2614 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2615         &format_attr_event.attr,
2616         &format_attr_umask.attr,
2617         &format_attr_edge.attr,
2618         &format_attr_tid_en.attr,
2619         &format_attr_inv.attr,
2620         &format_attr_thresh8.attr,
2621         NULL,
2622 };
2623
2624 static struct attribute_group hswep_uncore_sbox_format_group = {
2625         .name = "format",
2626         .attrs = hswep_uncore_sbox_formats_attr,
2627 };
2628
2629 static struct intel_uncore_type hswep_uncore_sbox = {
2630         .name                   = "sbox",
2631         .num_counters           = 4,
2632         .num_boxes              = 4,
2633         .perf_ctr_bits          = 44,
2634         .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
2635         .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
2636         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2637         .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
2638         .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
2639         .ops                    = &hswep_uncore_sbox_msr_ops,
2640         .format_group           = &hswep_uncore_sbox_format_group,
2641 };
2642
2643 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2644 {
2645         struct hw_perf_event *hwc = &event->hw;
2646         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2647         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2648
2649         if (ev_sel >= 0xb && ev_sel <= 0xe) {
2650                 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2651                 reg1->idx = ev_sel - 0xb;
2652                 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2653         }
2654         return 0;
2655 }
2656
2657 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2658         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2659         .hw_config              = hswep_pcu_hw_config,
2660         .get_constraint         = snbep_pcu_get_constraint,
2661         .put_constraint         = snbep_pcu_put_constraint,
2662 };
2663
2664 static struct intel_uncore_type hswep_uncore_pcu = {
2665         .name                   = "pcu",
2666         .num_counters           = 4,
2667         .num_boxes              = 1,
2668         .perf_ctr_bits          = 48,
2669         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
2670         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
2671         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2672         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
2673         .num_shared_regs        = 1,
2674         .ops                    = &hswep_uncore_pcu_ops,
2675         .format_group           = &snbep_uncore_pcu_format_group,
2676 };
2677
2678 static struct intel_uncore_type *hswep_msr_uncores[] = {
2679         &hswep_uncore_ubox,
2680         &hswep_uncore_cbox,
2681         &hswep_uncore_sbox,
2682         &hswep_uncore_pcu,
2683         NULL,
2684 };
2685
2686 void hswep_uncore_cpu_init(void)
2687 {
2688         int pkg = boot_cpu_data.logical_proc_id;
2689
2690         if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2691                 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2692
2693         /* Detect 6-8 core systems with only two SBOXes */
2694         if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
2695                 u32 capid4;
2696
2697                 pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3],
2698                                       0x94, &capid4);
2699                 if (((capid4 >> 6) & 0x3) == 0)
2700                         hswep_uncore_sbox.num_boxes = 2;
2701         }
2702
2703         uncore_msr_uncores = hswep_msr_uncores;
2704 }
2705
2706 static struct intel_uncore_type hswep_uncore_ha = {
2707         .name           = "ha",
2708         .num_counters   = 4,
2709         .num_boxes      = 2,
2710         .perf_ctr_bits  = 48,
2711         SNBEP_UNCORE_PCI_COMMON_INIT(),
2712 };
2713
2714 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2715         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2716         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2717         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2718         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2719         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2720         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2721         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2722         { /* end: all zeroes */ },
2723 };
2724
2725 static struct intel_uncore_type hswep_uncore_imc = {
2726         .name           = "imc",
2727         .num_counters   = 4,
2728         .num_boxes      = 8,
2729         .perf_ctr_bits  = 48,
2730         .fixed_ctr_bits = 48,
2731         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2732         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2733         .event_descs    = hswep_uncore_imc_events,
2734         SNBEP_UNCORE_PCI_COMMON_INIT(),
2735 };
2736
2737 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2738
2739 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2740 {
2741         struct pci_dev *pdev = box->pci_dev;
2742         struct hw_perf_event *hwc = &event->hw;
2743         u64 count = 0;
2744
2745         pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2746         pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2747
2748         return count;
2749 }
2750
2751 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2752         .init_box       = snbep_uncore_pci_init_box,
2753         .disable_box    = snbep_uncore_pci_disable_box,
2754         .enable_box     = snbep_uncore_pci_enable_box,
2755         .disable_event  = ivbep_uncore_irp_disable_event,
2756         .enable_event   = ivbep_uncore_irp_enable_event,
2757         .read_counter   = hswep_uncore_irp_read_counter,
2758 };
2759
2760 static struct intel_uncore_type hswep_uncore_irp = {
2761         .name                   = "irp",
2762         .num_counters           = 4,
2763         .num_boxes              = 1,
2764         .perf_ctr_bits          = 48,
2765         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2766         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2767         .ops                    = &hswep_uncore_irp_ops,
2768         .format_group           = &snbep_uncore_format_group,
2769 };
2770
2771 static struct intel_uncore_type hswep_uncore_qpi = {
2772         .name                   = "qpi",
2773         .num_counters           = 4,
2774         .num_boxes              = 3,
2775         .perf_ctr_bits          = 48,
2776         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2777         .event_ctl              = SNBEP_PCI_PMON_CTL0,
2778         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2779         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2780         .num_shared_regs        = 1,
2781         .ops                    = &snbep_uncore_qpi_ops,
2782         .format_group           = &snbep_uncore_qpi_format_group,
2783 };
2784
2785 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2786         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2787         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2788         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2789         UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2790         UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2791         UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2792         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2793         UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2794         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2795         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2796         UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2797         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2798         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2799         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2800         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2801         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2802         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2803         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2804         EVENT_CONSTRAINT_END
2805 };
2806
2807 static struct intel_uncore_type hswep_uncore_r2pcie = {
2808         .name           = "r2pcie",
2809         .num_counters   = 4,
2810         .num_boxes      = 1,
2811         .perf_ctr_bits  = 48,
2812         .constraints    = hswep_uncore_r2pcie_constraints,
2813         SNBEP_UNCORE_PCI_COMMON_INIT(),
2814 };
2815
2816 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
2817         UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2818         UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2819         UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2820         UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2821         UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2822         UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2823         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2824         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2825         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
2826         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2827         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2828         UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2829         UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2830         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2831         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2832         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2833         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2834         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2835         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2836         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2837         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2838         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2839         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2840         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2841         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2842         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
2843         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2844         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2845         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2846         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2847         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2848         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2849         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2850         EVENT_CONSTRAINT_END
2851 };
2852
2853 static struct intel_uncore_type hswep_uncore_r3qpi = {
2854         .name           = "r3qpi",
2855         .num_counters   = 3,
2856         .num_boxes      = 3,
2857         .perf_ctr_bits  = 44,
2858         .constraints    = hswep_uncore_r3qpi_constraints,
2859         SNBEP_UNCORE_PCI_COMMON_INIT(),
2860 };
2861
2862 enum {
2863         HSWEP_PCI_UNCORE_HA,
2864         HSWEP_PCI_UNCORE_IMC,
2865         HSWEP_PCI_UNCORE_IRP,
2866         HSWEP_PCI_UNCORE_QPI,
2867         HSWEP_PCI_UNCORE_R2PCIE,
2868         HSWEP_PCI_UNCORE_R3QPI,
2869 };
2870
2871 static struct intel_uncore_type *hswep_pci_uncores[] = {
2872         [HSWEP_PCI_UNCORE_HA]   = &hswep_uncore_ha,
2873         [HSWEP_PCI_UNCORE_IMC]  = &hswep_uncore_imc,
2874         [HSWEP_PCI_UNCORE_IRP]  = &hswep_uncore_irp,
2875         [HSWEP_PCI_UNCORE_QPI]  = &hswep_uncore_qpi,
2876         [HSWEP_PCI_UNCORE_R2PCIE]       = &hswep_uncore_r2pcie,
2877         [HSWEP_PCI_UNCORE_R3QPI]        = &hswep_uncore_r3qpi,
2878         NULL,
2879 };
2880
2881 static const struct pci_device_id hswep_uncore_pci_ids[] = {
2882         { /* Home Agent 0 */
2883                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
2884                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
2885         },
2886         { /* Home Agent 1 */
2887                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
2888                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
2889         },
2890         { /* MC0 Channel 0 */
2891                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
2892                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
2893         },
2894         { /* MC0 Channel 1 */
2895                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
2896                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
2897         },
2898         { /* MC0 Channel 2 */
2899                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
2900                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
2901         },
2902         { /* MC0 Channel 3 */
2903                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
2904                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
2905         },
2906         { /* MC1 Channel 0 */
2907                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
2908                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
2909         },
2910         { /* MC1 Channel 1 */
2911                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
2912                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
2913         },
2914         { /* MC1 Channel 2 */
2915                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
2916                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
2917         },
2918         { /* MC1 Channel 3 */
2919                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
2920                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
2921         },
2922         { /* IRP */
2923                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
2924                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
2925         },
2926         { /* QPI0 Port 0 */
2927                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
2928                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
2929         },
2930         { /* QPI0 Port 1 */
2931                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
2932                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
2933         },
2934         { /* QPI1 Port 2 */
2935                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
2936                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
2937         },
2938         { /* R2PCIe */
2939                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
2940                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
2941         },
2942         { /* R3QPI0 Link 0 */
2943                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
2944                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
2945         },
2946         { /* R3QPI0 Link 1 */
2947                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
2948                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
2949         },
2950         { /* R3QPI1 Link 2 */
2951                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
2952                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
2953         },
2954         { /* QPI Port 0 filter  */
2955                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
2956                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2957                                                    SNBEP_PCI_QPI_PORT0_FILTER),
2958         },
2959         { /* QPI Port 1 filter  */
2960                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
2961                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2962                                                    SNBEP_PCI_QPI_PORT1_FILTER),
2963         },
2964         { /* PCU.3 (for Capability registers) */
2965                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
2966                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2967                                                    HSWEP_PCI_PCU_3),
2968         },
2969         { /* end: all zeroes */ }
2970 };
2971
2972 static struct pci_driver hswep_uncore_pci_driver = {
2973         .name           = "hswep_uncore",
2974         .id_table       = hswep_uncore_pci_ids,
2975 };
2976
2977 int hswep_uncore_pci_init(void)
2978 {
2979         int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2980         if (ret)
2981                 return ret;
2982         uncore_pci_uncores = hswep_pci_uncores;
2983         uncore_pci_driver = &hswep_uncore_pci_driver;
2984         return 0;
2985 }
2986 /* end of Haswell-EP uncore support */
2987
2988 /* BDX uncore support */
2989
2990 static struct intel_uncore_type bdx_uncore_ubox = {
2991         .name                   = "ubox",
2992         .num_counters           = 2,
2993         .num_boxes              = 1,
2994         .perf_ctr_bits          = 48,
2995         .fixed_ctr_bits         = 48,
2996         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
2997         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
2998         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2999         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3000         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3001         .num_shared_regs        = 1,
3002         .ops                    = &ivbep_uncore_msr_ops,
3003         .format_group           = &ivbep_uncore_ubox_format_group,
3004 };
3005
3006 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3007         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3008         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3009         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3010         UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3011         EVENT_CONSTRAINT_END
3012 };
3013
3014 static struct intel_uncore_type bdx_uncore_cbox = {
3015         .name                   = "cbox",
3016         .num_counters           = 4,
3017         .num_boxes              = 24,
3018         .perf_ctr_bits          = 48,
3019         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
3020         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
3021         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3022         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
3023         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
3024         .num_shared_regs        = 1,
3025         .constraints            = bdx_uncore_cbox_constraints,
3026         .ops                    = &hswep_uncore_cbox_ops,
3027         .format_group           = &hswep_uncore_cbox_format_group,
3028 };
3029
3030 static struct intel_uncore_type *bdx_msr_uncores[] = {
3031         &bdx_uncore_ubox,
3032         &bdx_uncore_cbox,
3033         &hswep_uncore_pcu,
3034         NULL,
3035 };
3036
3037 void bdx_uncore_cpu_init(void)
3038 {
3039         if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3040                 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3041         uncore_msr_uncores = bdx_msr_uncores;
3042 }
3043
3044 static struct intel_uncore_type bdx_uncore_ha = {
3045         .name           = "ha",
3046         .num_counters   = 4,
3047         .num_boxes      = 2,
3048         .perf_ctr_bits  = 48,
3049         SNBEP_UNCORE_PCI_COMMON_INIT(),
3050 };
3051
3052 static struct intel_uncore_type bdx_uncore_imc = {
3053         .name           = "imc",
3054         .num_counters   = 4,
3055         .num_boxes      = 8,
3056         .perf_ctr_bits  = 48,
3057         .fixed_ctr_bits = 48,
3058         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3059         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3060         .event_descs    = hswep_uncore_imc_events,
3061         SNBEP_UNCORE_PCI_COMMON_INIT(),
3062 };
3063
3064 static struct intel_uncore_type bdx_uncore_irp = {
3065         .name                   = "irp",
3066         .num_counters           = 4,
3067         .num_boxes              = 1,
3068         .perf_ctr_bits          = 48,
3069         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
3070         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
3071         .ops                    = &hswep_uncore_irp_ops,
3072         .format_group           = &snbep_uncore_format_group,
3073 };
3074
3075 static struct intel_uncore_type bdx_uncore_qpi = {
3076         .name                   = "qpi",
3077         .num_counters           = 4,
3078         .num_boxes              = 3,
3079         .perf_ctr_bits          = 48,
3080         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
3081         .event_ctl              = SNBEP_PCI_PMON_CTL0,
3082         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3083         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
3084         .num_shared_regs        = 1,
3085         .ops                    = &snbep_uncore_qpi_ops,
3086         .format_group           = &snbep_uncore_qpi_format_group,
3087 };
3088
3089 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3090         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3091         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3092         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3093         UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3094         UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3095         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3096         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3097         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3098         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3099         EVENT_CONSTRAINT_END
3100 };
3101
3102 static struct intel_uncore_type bdx_uncore_r2pcie = {
3103         .name           = "r2pcie",
3104         .num_counters   = 4,
3105         .num_boxes      = 1,
3106         .perf_ctr_bits  = 48,
3107         .constraints    = bdx_uncore_r2pcie_constraints,
3108         SNBEP_UNCORE_PCI_COMMON_INIT(),
3109 };
3110
3111 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3112         UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3113         UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3114         UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3115         UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3116         UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3117         UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3118         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3119         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3120         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3121         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3122         UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3123         UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3124         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3125         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3126         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3127         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3128         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3129         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3130         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3131         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3132         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3133         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3134         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3135         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3136         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3137         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3138         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3139         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3140         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3141         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3142         EVENT_CONSTRAINT_END
3143 };
3144
3145 static struct intel_uncore_type bdx_uncore_r3qpi = {
3146         .name           = "r3qpi",
3147         .num_counters   = 3,
3148         .num_boxes      = 3,
3149         .perf_ctr_bits  = 48,
3150         .constraints    = bdx_uncore_r3qpi_constraints,
3151         SNBEP_UNCORE_PCI_COMMON_INIT(),
3152 };
3153
3154 enum {
3155         BDX_PCI_UNCORE_HA,
3156         BDX_PCI_UNCORE_IMC,
3157         BDX_PCI_UNCORE_IRP,
3158         BDX_PCI_UNCORE_QPI,
3159         BDX_PCI_UNCORE_R2PCIE,
3160         BDX_PCI_UNCORE_R3QPI,
3161 };
3162
3163 static struct intel_uncore_type *bdx_pci_uncores[] = {
3164         [BDX_PCI_UNCORE_HA]     = &bdx_uncore_ha,
3165         [BDX_PCI_UNCORE_IMC]    = &bdx_uncore_imc,
3166         [BDX_PCI_UNCORE_IRP]    = &bdx_uncore_irp,
3167         [BDX_PCI_UNCORE_QPI]    = &bdx_uncore_qpi,
3168         [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
3169         [BDX_PCI_UNCORE_R3QPI]  = &bdx_uncore_r3qpi,
3170         NULL,
3171 };
3172
3173 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3174         { /* Home Agent 0 */
3175                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3176                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3177         },
3178         { /* Home Agent 1 */
3179                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3180                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3181         },
3182         { /* MC0 Channel 0 */
3183                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3184                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3185         },
3186         { /* MC0 Channel 1 */
3187                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3188                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3189         },
3190         { /* MC0 Channel 2 */
3191                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3192                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3193         },
3194         { /* MC0 Channel 3 */
3195                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3196                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3197         },
3198         { /* MC1 Channel 0 */
3199                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3200                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3201         },
3202         { /* MC1 Channel 1 */
3203                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3204                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3205         },
3206         { /* MC1 Channel 2 */
3207                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3208                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3209         },
3210         { /* MC1 Channel 3 */
3211                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3212                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3213         },
3214         { /* IRP */
3215                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3216                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3217         },
3218         { /* QPI0 Port 0 */
3219                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3220                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3221         },
3222         { /* QPI0 Port 1 */
3223                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3224                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3225         },
3226         { /* QPI1 Port 2 */
3227                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3228                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3229         },
3230         { /* R2PCIe */
3231                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3232                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3233         },
3234         { /* R3QPI0 Link 0 */
3235                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3236                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3237         },
3238         { /* R3QPI0 Link 1 */
3239                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3240                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3241         },
3242         { /* R3QPI1 Link 2 */
3243                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3244                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3245         },
3246         { /* QPI Port 0 filter  */
3247                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3248                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 0),
3249         },
3250         { /* QPI Port 1 filter  */
3251                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3252                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1),
3253         },
3254         { /* QPI Port 2 filter  */
3255                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3256                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2),
3257         },
3258         { /* end: all zeroes */ }
3259 };
3260
3261 static struct pci_driver bdx_uncore_pci_driver = {
3262         .name           = "bdx_uncore",
3263         .id_table       = bdx_uncore_pci_ids,
3264 };
3265
3266 int bdx_uncore_pci_init(void)
3267 {
3268         int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3269
3270         if (ret)
3271                 return ret;
3272         uncore_pci_uncores = bdx_pci_uncores;
3273         uncore_pci_driver = &bdx_uncore_pci_driver;
3274         return 0;
3275 }
3276
3277 /* end of BDX uncore support */
3278
3279 /* SKX uncore support */
3280
3281 static struct intel_uncore_type skx_uncore_ubox = {
3282         .name                   = "ubox",
3283         .num_counters           = 2,
3284         .num_boxes              = 1,
3285         .perf_ctr_bits          = 48,
3286         .fixed_ctr_bits         = 48,
3287         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
3288         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
3289         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3290         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3291         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3292         .ops                    = &ivbep_uncore_msr_ops,
3293         .format_group           = &ivbep_uncore_ubox_format_group,
3294 };
3295
3296 static struct attribute *skx_uncore_cha_formats_attr[] = {
3297         &format_attr_event.attr,
3298         &format_attr_umask.attr,
3299         &format_attr_edge.attr,
3300         &format_attr_tid_en.attr,
3301         &format_attr_inv.attr,
3302         &format_attr_thresh8.attr,
3303         &format_attr_filter_tid4.attr,
3304         &format_attr_filter_state5.attr,
3305         &format_attr_filter_rem.attr,
3306         &format_attr_filter_loc.attr,
3307         &format_attr_filter_nm.attr,
3308         &format_attr_filter_all_op.attr,
3309         &format_attr_filter_not_nm.attr,
3310         &format_attr_filter_opc_0.attr,
3311         &format_attr_filter_opc_1.attr,
3312         &format_attr_filter_nc.attr,
3313         &format_attr_filter_isoc.attr,
3314         NULL,
3315 };
3316
3317 static struct attribute_group skx_uncore_chabox_format_group = {
3318         .name = "format",
3319         .attrs = skx_uncore_cha_formats_attr,
3320 };
3321
3322 static struct event_constraint skx_uncore_chabox_constraints[] = {
3323         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3324         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3325         EVENT_CONSTRAINT_END
3326 };
3327
3328 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3329         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3330         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3331         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3332         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3333         SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3334         SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3335         SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3336         SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3337         EVENT_EXTRA_END
3338 };
3339
3340 static u64 skx_cha_filter_mask(int fields)
3341 {
3342         u64 mask = 0;
3343
3344         if (fields & 0x1)
3345                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3346         if (fields & 0x2)
3347                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3348         if (fields & 0x4)
3349                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3350         if (fields & 0x8) {
3351                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3352                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3353                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3354                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3355                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3356                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3357                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3358                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3359                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3360         }
3361         return mask;
3362 }
3363
3364 static struct event_constraint *
3365 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3366 {
3367         return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3368 }
3369
3370 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3371 {
3372         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3373         struct extra_reg *er;
3374         int idx = 0;
3375
3376         for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3377                 if (er->event != (event->hw.config & er->config_mask))
3378                         continue;
3379                 idx |= er->idx;
3380         }
3381
3382         if (idx) {
3383                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3384                             HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3385                 reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3386                 reg1->idx = idx;
3387         }
3388         return 0;
3389 }
3390
3391 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3392         /* There is no frz_en for chabox ctl */
3393         .init_box               = ivbep_uncore_msr_init_box,
3394         .disable_box            = snbep_uncore_msr_disable_box,
3395         .enable_box             = snbep_uncore_msr_enable_box,
3396         .disable_event          = snbep_uncore_msr_disable_event,
3397         .enable_event           = hswep_cbox_enable_event,
3398         .read_counter           = uncore_msr_read_counter,
3399         .hw_config              = skx_cha_hw_config,
3400         .get_constraint         = skx_cha_get_constraint,
3401         .put_constraint         = snbep_cbox_put_constraint,
3402 };
3403
3404 static struct intel_uncore_type skx_uncore_chabox = {
3405         .name                   = "cha",
3406         .num_counters           = 4,
3407         .perf_ctr_bits          = 48,
3408         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
3409         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
3410         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3411         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
3412         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
3413         .num_shared_regs        = 1,
3414         .constraints            = skx_uncore_chabox_constraints,
3415         .ops                    = &skx_uncore_chabox_ops,
3416         .format_group           = &skx_uncore_chabox_format_group,
3417 };
3418
3419 static struct attribute *skx_uncore_iio_formats_attr[] = {
3420         &format_attr_event.attr,
3421         &format_attr_umask.attr,
3422         &format_attr_edge.attr,
3423         &format_attr_inv.attr,
3424         &format_attr_thresh9.attr,
3425         &format_attr_ch_mask.attr,
3426         &format_attr_fc_mask.attr,
3427         NULL,
3428 };
3429
3430 static struct attribute_group skx_uncore_iio_format_group = {
3431         .name = "format",
3432         .attrs = skx_uncore_iio_formats_attr,
3433 };
3434
3435 static struct event_constraint skx_uncore_iio_constraints[] = {
3436         UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3437         UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3438         UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3439         UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3440         UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3441         UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3442         EVENT_CONSTRAINT_END
3443 };
3444
3445 static void skx_iio_enable_event(struct intel_uncore_box *box,
3446                                  struct perf_event *event)
3447 {
3448         struct hw_perf_event *hwc = &event->hw;
3449
3450         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3451 }
3452
3453 static struct intel_uncore_ops skx_uncore_iio_ops = {
3454         .init_box               = ivbep_uncore_msr_init_box,
3455         .disable_box            = snbep_uncore_msr_disable_box,
3456         .enable_box             = snbep_uncore_msr_enable_box,
3457         .disable_event          = snbep_uncore_msr_disable_event,
3458         .enable_event           = skx_iio_enable_event,
3459         .read_counter           = uncore_msr_read_counter,
3460 };
3461
3462 static struct intel_uncore_type skx_uncore_iio = {
3463         .name                   = "iio",
3464         .num_counters           = 4,
3465         .num_boxes              = 5,
3466         .perf_ctr_bits          = 48,
3467         .event_ctl              = SKX_IIO0_MSR_PMON_CTL0,
3468         .perf_ctr               = SKX_IIO0_MSR_PMON_CTR0,
3469         .event_mask             = SKX_IIO_PMON_RAW_EVENT_MASK,
3470         .event_mask_ext         = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3471         .box_ctl                = SKX_IIO0_MSR_PMON_BOX_CTL,
3472         .msr_offset             = SKX_IIO_MSR_OFFSET,
3473         .constraints            = skx_uncore_iio_constraints,
3474         .ops                    = &skx_uncore_iio_ops,
3475         .format_group           = &skx_uncore_iio_format_group,
3476 };
3477
3478 static struct attribute *skx_uncore_formats_attr[] = {
3479         &format_attr_event.attr,
3480         &format_attr_umask.attr,
3481         &format_attr_edge.attr,
3482         &format_attr_inv.attr,
3483         &format_attr_thresh8.attr,
3484         NULL,
3485 };
3486
3487 static struct attribute_group skx_uncore_format_group = {
3488         .name = "format",
3489         .attrs = skx_uncore_formats_attr,
3490 };
3491
3492 static struct intel_uncore_type skx_uncore_irp = {
3493         .name                   = "irp",
3494         .num_counters           = 2,
3495         .num_boxes              = 5,
3496         .perf_ctr_bits          = 48,
3497         .event_ctl              = SKX_IRP0_MSR_PMON_CTL0,
3498         .perf_ctr               = SKX_IRP0_MSR_PMON_CTR0,
3499         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
3500         .box_ctl                = SKX_IRP0_MSR_PMON_BOX_CTL,
3501         .msr_offset             = SKX_IRP_MSR_OFFSET,
3502         .ops                    = &skx_uncore_iio_ops,
3503         .format_group           = &skx_uncore_format_group,
3504 };
3505
3506 static struct attribute *skx_uncore_pcu_formats_attr[] = {
3507         &format_attr_event.attr,
3508         &format_attr_umask.attr,
3509         &format_attr_edge.attr,
3510         &format_attr_inv.attr,
3511         &format_attr_thresh8.attr,
3512         &format_attr_occ_invert.attr,
3513         &format_attr_occ_edge_det.attr,
3514         &format_attr_filter_band0.attr,
3515         &format_attr_filter_band1.attr,
3516         &format_attr_filter_band2.attr,
3517         &format_attr_filter_band3.attr,
3518         NULL,
3519 };
3520
3521 static struct attribute_group skx_uncore_pcu_format_group = {
3522         .name = "format",
3523         .attrs = skx_uncore_pcu_formats_attr,
3524 };
3525
3526 static struct intel_uncore_ops skx_uncore_pcu_ops = {
3527         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
3528         .hw_config              = hswep_pcu_hw_config,
3529         .get_constraint         = snbep_pcu_get_constraint,
3530         .put_constraint         = snbep_pcu_put_constraint,
3531 };
3532
3533 static struct intel_uncore_type skx_uncore_pcu = {
3534         .name                   = "pcu",
3535         .num_counters           = 4,
3536         .num_boxes              = 1,
3537         .perf_ctr_bits          = 48,
3538         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
3539         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
3540         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
3541         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
3542         .num_shared_regs        = 1,
3543         .ops                    = &skx_uncore_pcu_ops,
3544         .format_group           = &skx_uncore_pcu_format_group,
3545 };
3546
3547 static struct intel_uncore_type *skx_msr_uncores[] = {
3548         &skx_uncore_ubox,
3549         &skx_uncore_chabox,
3550         &skx_uncore_iio,
3551         &skx_uncore_irp,
3552         &skx_uncore_pcu,
3553         NULL,
3554 };
3555
3556 static int skx_count_chabox(void)
3557 {
3558         struct pci_dev *chabox_dev = NULL;
3559         int bus, count = 0;
3560
3561         while (1) {
3562                 chabox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x208d, chabox_dev);
3563                 if (!chabox_dev)
3564                         break;
3565                 if (count == 0)
3566                         bus = chabox_dev->bus->number;
3567                 if (bus != chabox_dev->bus->number)
3568                         break;
3569                 count++;
3570         }
3571
3572         pci_dev_put(chabox_dev);
3573         return count;
3574 }
3575
3576 void skx_uncore_cpu_init(void)
3577 {
3578         skx_uncore_chabox.num_boxes = skx_count_chabox();
3579         uncore_msr_uncores = skx_msr_uncores;
3580 }
3581
3582 static struct intel_uncore_type skx_uncore_imc = {
3583         .name           = "imc",
3584         .num_counters   = 4,
3585         .num_boxes      = 6,
3586         .perf_ctr_bits  = 48,
3587         .fixed_ctr_bits = 48,
3588         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3589         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3590         .event_descs    = hswep_uncore_imc_events,
3591         .perf_ctr       = SNBEP_PCI_PMON_CTR0,
3592         .event_ctl      = SNBEP_PCI_PMON_CTL0,
3593         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3594         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
3595         .ops            = &ivbep_uncore_pci_ops,
3596         .format_group   = &skx_uncore_format_group,
3597 };
3598
3599 static struct attribute *skx_upi_uncore_formats_attr[] = {
3600         &format_attr_event_ext.attr,
3601         &format_attr_umask_ext.attr,
3602         &format_attr_edge.attr,
3603         &format_attr_inv.attr,
3604         &format_attr_thresh8.attr,
3605         NULL,
3606 };
3607
3608 static struct attribute_group skx_upi_uncore_format_group = {
3609         .name = "format",
3610         .attrs = skx_upi_uncore_formats_attr,
3611 };
3612
3613 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
3614 {
3615         struct pci_dev *pdev = box->pci_dev;
3616
3617         __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3618         pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3619 }
3620
3621 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
3622         .init_box       = skx_upi_uncore_pci_init_box,
3623         .disable_box    = snbep_uncore_pci_disable_box,
3624         .enable_box     = snbep_uncore_pci_enable_box,
3625         .disable_event  = snbep_uncore_pci_disable_event,
3626         .enable_event   = snbep_uncore_pci_enable_event,
3627         .read_counter   = snbep_uncore_pci_read_counter,
3628 };
3629
3630 static struct intel_uncore_type skx_uncore_upi = {
3631         .name           = "upi",
3632         .num_counters   = 4,
3633         .num_boxes      = 3,
3634         .perf_ctr_bits  = 48,
3635         .perf_ctr       = SKX_UPI_PCI_PMON_CTR0,
3636         .event_ctl      = SKX_UPI_PCI_PMON_CTL0,
3637         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3638         .event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
3639         .box_ctl        = SKX_UPI_PCI_PMON_BOX_CTL,
3640         .ops            = &skx_upi_uncore_pci_ops,
3641         .format_group   = &skx_upi_uncore_format_group,
3642 };
3643
3644 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
3645 {
3646         struct pci_dev *pdev = box->pci_dev;
3647
3648         __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3649         pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3650 }
3651
3652 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
3653         .init_box       = skx_m2m_uncore_pci_init_box,
3654         .disable_box    = snbep_uncore_pci_disable_box,
3655         .enable_box     = snbep_uncore_pci_enable_box,
3656         .disable_event  = snbep_uncore_pci_disable_event,
3657         .enable_event   = snbep_uncore_pci_enable_event,
3658         .read_counter   = snbep_uncore_pci_read_counter,
3659 };
3660
3661 static struct intel_uncore_type skx_uncore_m2m = {
3662         .name           = "m2m",
3663         .num_counters   = 4,
3664         .num_boxes      = 2,
3665         .perf_ctr_bits  = 48,
3666         .perf_ctr       = SKX_M2M_PCI_PMON_CTR0,
3667         .event_ctl      = SKX_M2M_PCI_PMON_CTL0,
3668         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3669         .box_ctl        = SKX_M2M_PCI_PMON_BOX_CTL,
3670         .ops            = &skx_m2m_uncore_pci_ops,
3671         .format_group   = &skx_uncore_format_group,
3672 };
3673
3674 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
3675         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3676         EVENT_CONSTRAINT_END
3677 };
3678
3679 static struct intel_uncore_type skx_uncore_m2pcie = {
3680         .name           = "m2pcie",
3681         .num_counters   = 4,
3682         .num_boxes      = 4,
3683         .perf_ctr_bits  = 48,
3684         .constraints    = skx_uncore_m2pcie_constraints,
3685         .perf_ctr       = SNBEP_PCI_PMON_CTR0,
3686         .event_ctl      = SNBEP_PCI_PMON_CTL0,
3687         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3688         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
3689         .ops            = &ivbep_uncore_pci_ops,
3690         .format_group   = &skx_uncore_format_group,
3691 };
3692
3693 static struct event_constraint skx_uncore_m3upi_constraints[] = {
3694         UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
3695         UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
3696         UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
3697         UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
3698         UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
3699         UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
3700         UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
3701         UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
3702         EVENT_CONSTRAINT_END
3703 };
3704
3705 static struct intel_uncore_type skx_uncore_m3upi = {
3706         .name           = "m3upi",
3707         .num_counters   = 3,
3708         .num_boxes      = 3,
3709         .perf_ctr_bits  = 48,
3710         .constraints    = skx_uncore_m3upi_constraints,
3711         .perf_ctr       = SNBEP_PCI_PMON_CTR0,
3712         .event_ctl      = SNBEP_PCI_PMON_CTL0,
3713         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3714         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
3715         .ops            = &ivbep_uncore_pci_ops,
3716         .format_group   = &skx_uncore_format_group,
3717 };
3718
3719 enum {
3720         SKX_PCI_UNCORE_IMC,
3721         SKX_PCI_UNCORE_M2M,
3722         SKX_PCI_UNCORE_UPI,
3723         SKX_PCI_UNCORE_M2PCIE,
3724         SKX_PCI_UNCORE_M3UPI,
3725 };
3726
3727 static struct intel_uncore_type *skx_pci_uncores[] = {
3728         [SKX_PCI_UNCORE_IMC]    = &skx_uncore_imc,
3729         [SKX_PCI_UNCORE_M2M]    = &skx_uncore_m2m,
3730         [SKX_PCI_UNCORE_UPI]    = &skx_uncore_upi,
3731         [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie,
3732         [SKX_PCI_UNCORE_M3UPI]  = &skx_uncore_m3upi,
3733         NULL,
3734 };
3735
3736 static const struct pci_device_id skx_uncore_pci_ids[] = {
3737         { /* MC0 Channel 0 */
3738                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3739                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
3740         },
3741         { /* MC0 Channel 1 */
3742                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3743                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
3744         },
3745         { /* MC0 Channel 2 */
3746                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3747                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
3748         },
3749         { /* MC1 Channel 0 */
3750                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3751                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
3752         },
3753         { /* MC1 Channel 1 */
3754                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3755                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
3756         },
3757         { /* MC1 Channel 2 */
3758                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3759                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
3760         },
3761         { /* M2M0 */
3762                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
3763                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
3764         },
3765         { /* M2M1 */
3766                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
3767                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
3768         },
3769         { /* UPI0 Link 0 */
3770                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3771                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
3772         },
3773         { /* UPI0 Link 1 */
3774                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3775                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
3776         },
3777         { /* UPI1 Link 2 */
3778                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3779                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
3780         },
3781         { /* M2PCIe 0 */
3782                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3783                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
3784         },
3785         { /* M2PCIe 1 */
3786                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3787                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
3788         },
3789         { /* M2PCIe 2 */
3790                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3791                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
3792         },
3793         { /* M2PCIe 3 */
3794                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3795                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
3796         },
3797         { /* M3UPI0 Link 0 */
3798                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
3799                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, SKX_PCI_UNCORE_M3UPI, 0),
3800         },
3801         { /* M3UPI0 Link 1 */
3802                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
3803                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 1),
3804         },
3805         { /* M3UPI1 Link 2 */
3806                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
3807                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 4, SKX_PCI_UNCORE_M3UPI, 2),
3808         },
3809         { /* end: all zeroes */ }
3810 };
3811
3812
3813 static struct pci_driver skx_uncore_pci_driver = {
3814         .name           = "skx_uncore",
3815         .id_table       = skx_uncore_pci_ids,
3816 };
3817
3818 int skx_uncore_pci_init(void)
3819 {
3820         /* need to double check pci address */
3821         int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
3822
3823         if (ret)
3824                 return ret;
3825
3826         uncore_pci_uncores = skx_pci_uncores;
3827         uncore_pci_driver = &skx_uncore_pci_driver;
3828         return 0;
3829 }
3830
3831 /* end of SKX uncore support */