1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
3 #include <asm/cpu_device_id.h>
4 #include <asm/msr.h>
5 #include "uncore.h"
6 #include "uncore_discovery.h"
7
8 /* SNB-EP pci bus to socket mapping */
9 #define SNBEP_CPUNODEID 0x40
10 #define SNBEP_GIDNIDMAP 0x54
11
12 /* SNB-EP Box level control */
13 #define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
14 #define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
15 #define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
16 #define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
17 #define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
18 SNBEP_PMON_BOX_CTL_RST_CTRS | \
19 SNBEP_PMON_BOX_CTL_FRZ_EN)
20 /* SNB-EP event control */
21 #define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
22 #define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
23 #define SNBEP_PMON_CTL_RST (1 << 17)
24 #define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
25 #define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21)
26 #define SNBEP_PMON_CTL_EN (1 << 22)
27 #define SNBEP_PMON_CTL_INVERT (1 << 23)
28 #define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
29 #define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
30 SNBEP_PMON_CTL_UMASK_MASK | \
31 SNBEP_PMON_CTL_EDGE_DET | \
32 SNBEP_PMON_CTL_INVERT | \
33 SNBEP_PMON_CTL_TRESH_MASK)
34
35 /* SNB-EP Ubox event control */
36 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
37 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
38 (SNBEP_PMON_CTL_EV_SEL_MASK | \
39 SNBEP_PMON_CTL_UMASK_MASK | \
40 SNBEP_PMON_CTL_EDGE_DET | \
41 SNBEP_PMON_CTL_INVERT | \
42 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
43
44 #define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
45 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
46 SNBEP_CBO_PMON_CTL_TID_EN)
47
48 /* SNB-EP PCU event control */
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
50 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
51 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
52 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
53 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
54 (SNBEP_PMON_CTL_EV_SEL_MASK | \
55 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
56 SNBEP_PMON_CTL_EDGE_DET | \
57 SNBEP_PMON_CTL_INVERT | \
58 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
59 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
60 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
61
62 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
63 (SNBEP_PMON_RAW_EVENT_MASK | \
64 SNBEP_PMON_CTL_EV_SEL_EXT)
65
66 /* SNB-EP pci control register */
67 #define SNBEP_PCI_PMON_BOX_CTL 0xf4
68 #define SNBEP_PCI_PMON_CTL0 0xd8
69 /* SNB-EP pci counter register */
70 #define SNBEP_PCI_PMON_CTR0 0xa0
71
72 /* SNB-EP home agent register */
73 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
74 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
75 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
76 /* SNB-EP memory controller register */
77 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
78 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
79 /* SNB-EP QPI register */
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
81 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
82 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
83 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
84
85 /* SNB-EP Ubox register */
86 #define SNBEP_U_MSR_PMON_CTR0 0xc16
87 #define SNBEP_U_MSR_PMON_CTL0 0xc10
88
89 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
90 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
91
92 /* SNB-EP Cbo register */
93 #define SNBEP_C0_MSR_PMON_CTR0 0xd16
94 #define SNBEP_C0_MSR_PMON_CTL0 0xd10
95 #define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
96 #define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
97 #define SNBEP_CBO_MSR_OFFSET 0x20
98
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
100 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
101 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
102 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
103
104 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
105 .event = (e), \
106 .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
107 .config_mask = (m), \
108 .idx = (i) \
109 }
110
111 /* SNB-EP PCU register */
112 #define SNBEP_PCU_MSR_PMON_CTR0 0xc36
113 #define SNBEP_PCU_MSR_PMON_CTL0 0xc30
114 #define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
115 #define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
116 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
117 #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
118 #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
119
120 /* IVBEP event control */
121 #define IVBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
122 SNBEP_PMON_BOX_CTL_RST_CTRS)
123 #define IVBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
124 SNBEP_PMON_CTL_UMASK_MASK | \
125 SNBEP_PMON_CTL_EDGE_DET | \
126 SNBEP_PMON_CTL_TRESH_MASK)
127 /* IVBEP Ubox */
128 #define IVBEP_U_MSR_PMON_GLOBAL_CTL 0xc00
129 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
130 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29)
131
132 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
133 (SNBEP_PMON_CTL_EV_SEL_MASK | \
134 SNBEP_PMON_CTL_UMASK_MASK | \
135 SNBEP_PMON_CTL_EDGE_DET | \
136 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
137 /* IVBEP Cbo */
138 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK (IVBEP_PMON_RAW_EVENT_MASK | \
139 SNBEP_CBO_PMON_CTL_TID_EN)
140
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
146 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
147 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
148 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
149
150 /* IVBEP home agent */
151 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16)
152 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK \
153 (IVBEP_PMON_RAW_EVENT_MASK | \
154 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
155 /* IVBEP PCU */
156 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
157 (SNBEP_PMON_CTL_EV_SEL_MASK | \
158 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
159 SNBEP_PMON_CTL_EDGE_DET | \
160 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
161 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
162 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
163 /* IVBEP QPI */
164 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
165 (IVBEP_PMON_RAW_EVENT_MASK | \
166 SNBEP_PMON_CTL_EV_SEL_EXT)
167
168 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
169 ((1ULL << (n)) - 1)))
170
171 /* Haswell-EP Ubox */
172 #define HSWEP_U_MSR_PMON_CTR0 0x709
173 #define HSWEP_U_MSR_PMON_CTL0 0x705
174 #define HSWEP_U_MSR_PMON_FILTER 0x707
175
176 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703
177 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR 0x704
178
179 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID (0x1 << 0)
180 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID (0x1fULL << 1)
181 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
182 (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
183 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
184
185 /* Haswell-EP CBo */
186 #define HSWEP_C0_MSR_PMON_CTR0 0xe08
187 #define HSWEP_C0_MSR_PMON_CTL0 0xe01
188 #define HSWEP_C0_MSR_PMON_BOX_CTL 0xe00
189 #define HSWEP_C0_MSR_PMON_BOX_FILTER0 0xe05
190 #define HSWEP_CBO_MSR_OFFSET 0x10
191
192
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID (0x3fULL << 0)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 6)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x7fULL << 17)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
198 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
199 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
200 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
201
202
203 /* Haswell-EP Sbox */
204 #define HSWEP_S0_MSR_PMON_CTR0 0x726
205 #define HSWEP_S0_MSR_PMON_CTL0 0x721
206 #define HSWEP_S0_MSR_PMON_BOX_CTL 0x720
207 #define HSWEP_SBOX_MSR_OFFSET 0xa
208 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
209 SNBEP_CBO_PMON_CTL_TID_EN)
210
211 /* Haswell-EP PCU */
212 #define HSWEP_PCU_MSR_PMON_CTR0 0x717
213 #define HSWEP_PCU_MSR_PMON_CTL0 0x711
214 #define HSWEP_PCU_MSR_PMON_BOX_CTL 0x710
215 #define HSWEP_PCU_MSR_PMON_BOX_FILTER 0x715
216
217 /* KNL Ubox */
218 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
219 (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
220 SNBEP_CBO_PMON_CTL_TID_EN)
221 /* KNL CHA */
222 #define KNL_CHA_MSR_OFFSET 0xc
223 #define KNL_CHA_MSR_PMON_CTL_QOR (1 << 16)
224 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
225 (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
226 KNL_CHA_MSR_PMON_CTL_QOR)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID 0x1ff
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE (7 << 18)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP (0xfffffe2aULL << 32)
230 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
231 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE (0x1ULL << 33)
232 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC (0x1ULL << 37)
233
234 /* KNL EDC/MC UCLK */
235 #define KNL_UCLK_MSR_PMON_CTR0_LOW 0x400
236 #define KNL_UCLK_MSR_PMON_CTL0 0x420
237 #define KNL_UCLK_MSR_PMON_BOX_CTL 0x430
238 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW 0x44c
239 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL 0x454
240 #define KNL_PMON_FIXED_CTL_EN 0x1
241
242 /* KNL EDC */
243 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW 0xa00
244 #define KNL_EDC0_ECLK_MSR_PMON_CTL0 0xa20
245 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL 0xa30
246 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW 0xa3c
247 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL 0xa44
248
249 /* KNL MC */
250 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW 0xb00
251 #define KNL_MC0_CH0_MSR_PMON_CTL0 0xb20
252 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL 0xb30
253 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW 0xb3c
254 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL 0xb44
255
256 /* KNL IRP */
257 #define KNL_IRP_PCI_PMON_BOX_CTL 0xf0
258 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
259 KNL_CHA_MSR_PMON_CTL_QOR)
260 /* KNL PCU */
261 #define KNL_PCU_PMON_CTL_EV_SEL_MASK 0x0000007f
262 #define KNL_PCU_PMON_CTL_USE_OCC_CTR (1 << 7)
263 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK 0x3f000000
264 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
265 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
266 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
267 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
268 SNBEP_PMON_CTL_EDGE_DET | \
269 SNBEP_CBO_PMON_CTL_TID_EN | \
270 SNBEP_PMON_CTL_INVERT | \
271 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
272 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
273 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
274
275 /* SKX pci bus to socket mapping */
276 #define SKX_CPUNODEID 0xc0
277 #define SKX_GIDNIDMAP 0xd4
278
279 /*
280 * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR
281 * that BIOS programmed. MSR has package scope.
282 * | Bit | Default | Description
283 * | [63] | 00h | VALID - When set, indicates the CPU bus
284 * numbers have been initialized. (RO)
285 * |[62:48]| --- | Reserved
286 * |[47:40]| 00h | BUS_NUM_5 - Return the bus number BIOS assigned
287 * CPUBUSNO(5). (RO)
288 * |[39:32]| 00h | BUS_NUM_4 - Return the bus number BIOS assigned
289 * CPUBUSNO(4). (RO)
290 * |[31:24]| 00h | BUS_NUM_3 - Return the bus number BIOS assigned
291 * CPUBUSNO(3). (RO)
292 * |[23:16]| 00h | BUS_NUM_2 - Return the bus number BIOS assigned
293 * CPUBUSNO(2). (RO)
294 * |[15:8] | 00h | BUS_NUM_1 - Return the bus number BIOS assigned
295 * CPUBUSNO(1). (RO)
296 * | [7:0] | 00h | BUS_NUM_0 - Return the bus number BIOS assigned
297 * CPUBUSNO(0). (RO)
298 */
299 #define SKX_MSR_CPU_BUS_NUMBER 0x300
300 #define SKX_MSR_CPU_BUS_VALID_BIT (1ULL << 63)
301 #define BUS_NUM_STRIDE 8
302
303 /* SKX CHA */
304 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID (0x1ffULL << 0)
305 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK (0xfULL << 9)
306 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE (0x3ffULL << 17)
307 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM (0x1ULL << 32)
308 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC (0x1ULL << 33)
309 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC (0x1ULL << 35)
310 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM (0x1ULL << 36)
311 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM (0x1ULL << 37)
312 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0 (0x3ffULL << 41)
313 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1 (0x3ffULL << 51)
314 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
315 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
316 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
317
318 /* SKX IIO */
319 #define SKX_IIO0_MSR_PMON_CTL0 0xa48
320 #define SKX_IIO0_MSR_PMON_CTR0 0xa41
321 #define SKX_IIO0_MSR_PMON_BOX_CTL 0xa40
322 #define SKX_IIO_MSR_OFFSET 0x20
323
324 #define SKX_PMON_CTL_TRESH_MASK (0xff << 24)
325 #define SKX_PMON_CTL_TRESH_MASK_EXT (0xf)
326 #define SKX_PMON_CTL_CH_MASK (0xff << 4)
327 #define SKX_PMON_CTL_FC_MASK (0x7 << 12)
328 #define SKX_IIO_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
329 SNBEP_PMON_CTL_UMASK_MASK | \
330 SNBEP_PMON_CTL_EDGE_DET | \
331 SNBEP_PMON_CTL_INVERT | \
332 SKX_PMON_CTL_TRESH_MASK)
333 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
334 SKX_PMON_CTL_CH_MASK | \
335 SKX_PMON_CTL_FC_MASK)
336
337 /* SKX IRP */
338 #define SKX_IRP0_MSR_PMON_CTL0 0xa5b
339 #define SKX_IRP0_MSR_PMON_CTR0 0xa59
340 #define SKX_IRP0_MSR_PMON_BOX_CTL 0xa58
341 #define SKX_IRP_MSR_OFFSET 0x20
342
343 /* SKX UPI */
344 #define SKX_UPI_PCI_PMON_CTL0 0x350
345 #define SKX_UPI_PCI_PMON_CTR0 0x318
346 #define SKX_UPI_PCI_PMON_BOX_CTL 0x378
347 #define SKX_UPI_CTL_UMASK_EXT 0xffefff
348
349 /* SKX M2M */
350 #define SKX_M2M_PCI_PMON_CTL0 0x228
351 #define SKX_M2M_PCI_PMON_CTR0 0x200
352 #define SKX_M2M_PCI_PMON_BOX_CTL 0x258
353
354 /* Memory Map registers device ID */
355 #define SNR_ICX_MESH2IIO_MMAP_DID 0x9a2
356 #define SNR_ICX_SAD_CONTROL_CFG 0x3f4
357
358 /* Getting I/O stack id in SAD_COTROL_CFG notation */
359 #define SAD_CONTROL_STACK_ID(data) (((data) >> 4) & 0x7)
360
361 /* SNR Ubox */
362 #define SNR_U_MSR_PMON_CTR0 0x1f98
363 #define SNR_U_MSR_PMON_CTL0 0x1f91
364 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL 0x1f93
365 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR 0x1f94
366
367 /* SNR CHA */
368 #define SNR_CHA_RAW_EVENT_MASK_EXT 0x3ffffff
369 #define SNR_CHA_MSR_PMON_CTL0 0x1c01
370 #define SNR_CHA_MSR_PMON_CTR0 0x1c08
371 #define SNR_CHA_MSR_PMON_BOX_CTL 0x1c00
372 #define SNR_C0_MSR_PMON_BOX_FILTER0 0x1c05
373
374
375 /* SNR IIO */
376 #define SNR_IIO_MSR_PMON_CTL0 0x1e08
377 #define SNR_IIO_MSR_PMON_CTR0 0x1e01
378 #define SNR_IIO_MSR_PMON_BOX_CTL 0x1e00
379 #define SNR_IIO_MSR_OFFSET 0x10
380 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT 0x7ffff
381
382 /* SNR IRP */
383 #define SNR_IRP0_MSR_PMON_CTL0 0x1ea8
384 #define SNR_IRP0_MSR_PMON_CTR0 0x1ea1
385 #define SNR_IRP0_MSR_PMON_BOX_CTL 0x1ea0
386 #define SNR_IRP_MSR_OFFSET 0x10
387
388 /* SNR M2PCIE */
389 #define SNR_M2PCIE_MSR_PMON_CTL0 0x1e58
390 #define SNR_M2PCIE_MSR_PMON_CTR0 0x1e51
391 #define SNR_M2PCIE_MSR_PMON_BOX_CTL 0x1e50
392 #define SNR_M2PCIE_MSR_OFFSET 0x10
393
394 /* SNR PCU */
395 #define SNR_PCU_MSR_PMON_CTL0 0x1ef1
396 #define SNR_PCU_MSR_PMON_CTR0 0x1ef8
397 #define SNR_PCU_MSR_PMON_BOX_CTL 0x1ef0
398 #define SNR_PCU_MSR_PMON_BOX_FILTER 0x1efc
399
400 /* SNR M2M */
401 #define SNR_M2M_PCI_PMON_CTL0 0x468
402 #define SNR_M2M_PCI_PMON_CTR0 0x440
403 #define SNR_M2M_PCI_PMON_BOX_CTL 0x438
404 #define SNR_M2M_PCI_PMON_UMASK_EXT 0xff
405
406 /* SNR PCIE3 */
407 #define SNR_PCIE3_PCI_PMON_CTL0 0x508
408 #define SNR_PCIE3_PCI_PMON_CTR0 0x4e8
409 #define SNR_PCIE3_PCI_PMON_BOX_CTL 0x4e0
410
411 /* SNR IMC */
412 #define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54
413 #define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38
414 #define SNR_IMC_MMIO_PMON_CTL0 0x40
415 #define SNR_IMC_MMIO_PMON_CTR0 0x8
416 #define SNR_IMC_MMIO_PMON_BOX_CTL 0x22800
417 #define SNR_IMC_MMIO_OFFSET 0x4000
418 #define SNR_IMC_MMIO_SIZE 0x4000
419 #define SNR_IMC_MMIO_BASE_OFFSET 0xd0
420 #define SNR_IMC_MMIO_BASE_MASK 0x1FFFFFFF
421 #define SNR_IMC_MMIO_MEM0_OFFSET 0xd8
422 #define SNR_IMC_MMIO_MEM0_MASK 0x7FF
423
424 /* ICX CHA */
425 #define ICX_C34_MSR_PMON_CTR0 0xb68
426 #define ICX_C34_MSR_PMON_CTL0 0xb61
427 #define ICX_C34_MSR_PMON_BOX_CTL 0xb60
428 #define ICX_C34_MSR_PMON_BOX_FILTER0 0xb65
429
430 /* ICX IIO */
431 #define ICX_IIO_MSR_PMON_CTL0 0xa58
432 #define ICX_IIO_MSR_PMON_CTR0 0xa51
433 #define ICX_IIO_MSR_PMON_BOX_CTL 0xa50
434
435 /* ICX IRP */
436 #define ICX_IRP0_MSR_PMON_CTL0 0xa4d
437 #define ICX_IRP0_MSR_PMON_CTR0 0xa4b
438 #define ICX_IRP0_MSR_PMON_BOX_CTL 0xa4a
439
440 /* ICX M2PCIE */
441 #define ICX_M2PCIE_MSR_PMON_CTL0 0xa46
442 #define ICX_M2PCIE_MSR_PMON_CTR0 0xa41
443 #define ICX_M2PCIE_MSR_PMON_BOX_CTL 0xa40
444
445 /* ICX UPI */
446 #define ICX_UPI_PCI_PMON_CTL0 0x350
447 #define ICX_UPI_PCI_PMON_CTR0 0x320
448 #define ICX_UPI_PCI_PMON_BOX_CTL 0x318
449 #define ICX_UPI_CTL_UMASK_EXT 0xffffff
450 #define ICX_UBOX_DID 0x3450
451
452 /* ICX M3UPI*/
453 #define ICX_M3UPI_PCI_PMON_CTL0 0xd8
454 #define ICX_M3UPI_PCI_PMON_CTR0 0xa8
455 #define ICX_M3UPI_PCI_PMON_BOX_CTL 0xa0
456
457 /* ICX IMC */
458 #define ICX_NUMBER_IMC_CHN 3
459 #define ICX_IMC_MEM_STRIDE 0x4
460
461 /* SPR */
462 #define SPR_RAW_EVENT_MASK_EXT 0xffffff
463 #define SPR_UBOX_DID 0x3250
464
465 /* SPR CHA */
466 #define SPR_CHA_EVENT_MASK_EXT 0xffffffff
467 #define SPR_CHA_PMON_CTL_TID_EN (1 << 16)
468 #define SPR_CHA_PMON_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
469 SPR_CHA_PMON_CTL_TID_EN)
470 #define SPR_CHA_PMON_BOX_FILTER_TID 0x3ff
471
472 #define SPR_C0_MSR_PMON_BOX_FILTER0 0x200e
473
474 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
475 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
476 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
477 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
478 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
479 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
480 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
481 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
482 DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
483 DEFINE_UNCORE_FORMAT_ATTR(umask_ext5, umask, "config:8-15,32-63");
484 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
485 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
486 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
487 DEFINE_UNCORE_FORMAT_ATTR(tid_en2, tid_en, "config:16");
488 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
489 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
490 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
491 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
492 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
493 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
494 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
495 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
496 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
497 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
498 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
499 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
500 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
501 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
502 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
503 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
504 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
505 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
506 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
507 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
508 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
509 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
510 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
511 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
512 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
513 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
514 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
515 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
516 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
517 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
518 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
519 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
520 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
521 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
522 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
523 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
524 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
525 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
526 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
527 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
528 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
529 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
530 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
531 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
532 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
533 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
534 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
535 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
536 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
537 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
538 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
539 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
540 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
541 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
542 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
543 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
544 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
545 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
546 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
547 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
548 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
549 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
550 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
551 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
552 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
553 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
554
snbep_uncore_pci_disable_box(struct intel_uncore_box * box)555 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
556 {
557 struct pci_dev *pdev = box->pci_dev;
558 int box_ctl = uncore_pci_box_ctl(box);
559 u32 config = 0;
560
561 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
562 config |= SNBEP_PMON_BOX_CTL_FRZ;
563 pci_write_config_dword(pdev, box_ctl, config);
564 }
565 }
566
snbep_uncore_pci_enable_box(struct intel_uncore_box * box)567 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
568 {
569 struct pci_dev *pdev = box->pci_dev;
570 int box_ctl = uncore_pci_box_ctl(box);
571 u32 config = 0;
572
573 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
574 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
575 pci_write_config_dword(pdev, box_ctl, config);
576 }
577 }
578
snbep_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)579 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
580 {
581 struct pci_dev *pdev = box->pci_dev;
582 struct hw_perf_event *hwc = &event->hw;
583
584 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
585 }
586
snbep_uncore_pci_disable_event(struct intel_uncore_box * box,struct perf_event * event)587 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
588 {
589 struct pci_dev *pdev = box->pci_dev;
590 struct hw_perf_event *hwc = &event->hw;
591
592 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
593 }
594
snbep_uncore_pci_read_counter(struct intel_uncore_box * box,struct perf_event * event)595 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
596 {
597 struct pci_dev *pdev = box->pci_dev;
598 struct hw_perf_event *hwc = &event->hw;
599 u64 count = 0;
600
601 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
602 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
603
604 return count;
605 }
606
snbep_uncore_pci_init_box(struct intel_uncore_box * box)607 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
608 {
609 struct pci_dev *pdev = box->pci_dev;
610 int box_ctl = uncore_pci_box_ctl(box);
611
612 pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
613 }
614
snbep_uncore_msr_disable_box(struct intel_uncore_box * box)615 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
616 {
617 u64 config;
618 unsigned msr;
619
620 msr = uncore_msr_box_ctl(box);
621 if (msr) {
622 rdmsrq(msr, config);
623 config |= SNBEP_PMON_BOX_CTL_FRZ;
624 wrmsrq(msr, config);
625 }
626 }
627
snbep_uncore_msr_enable_box(struct intel_uncore_box * box)628 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
629 {
630 u64 config;
631 unsigned msr;
632
633 msr = uncore_msr_box_ctl(box);
634 if (msr) {
635 rdmsrq(msr, config);
636 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
637 wrmsrq(msr, config);
638 }
639 }
640
snbep_uncore_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)641 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
642 {
643 struct hw_perf_event *hwc = &event->hw;
644 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
645
646 if (reg1->idx != EXTRA_REG_NONE)
647 wrmsrq(reg1->reg, uncore_shared_reg_config(box, 0));
648
649 wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
650 }
651
snbep_uncore_msr_disable_event(struct intel_uncore_box * box,struct perf_event * event)652 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
653 struct perf_event *event)
654 {
655 struct hw_perf_event *hwc = &event->hw;
656
657 wrmsrq(hwc->config_base, hwc->config);
658 }
659
snbep_uncore_msr_init_box(struct intel_uncore_box * box)660 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
661 {
662 unsigned msr = uncore_msr_box_ctl(box);
663
664 if (msr)
665 wrmsrq(msr, SNBEP_PMON_BOX_CTL_INT);
666 }
667
668 static struct attribute *snbep_uncore_formats_attr[] = {
669 &format_attr_event.attr,
670 &format_attr_umask.attr,
671 &format_attr_edge.attr,
672 &format_attr_inv.attr,
673 &format_attr_thresh8.attr,
674 NULL,
675 };
676
677 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
678 &format_attr_event.attr,
679 &format_attr_umask.attr,
680 &format_attr_edge.attr,
681 &format_attr_inv.attr,
682 &format_attr_thresh5.attr,
683 NULL,
684 };
685
686 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
687 &format_attr_event.attr,
688 &format_attr_umask.attr,
689 &format_attr_edge.attr,
690 &format_attr_tid_en.attr,
691 &format_attr_inv.attr,
692 &format_attr_thresh8.attr,
693 &format_attr_filter_tid.attr,
694 &format_attr_filter_nid.attr,
695 &format_attr_filter_state.attr,
696 &format_attr_filter_opc.attr,
697 NULL,
698 };
699
700 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
701 &format_attr_event.attr,
702 &format_attr_occ_sel.attr,
703 &format_attr_edge.attr,
704 &format_attr_inv.attr,
705 &format_attr_thresh5.attr,
706 &format_attr_occ_invert.attr,
707 &format_attr_occ_edge.attr,
708 &format_attr_filter_band0.attr,
709 &format_attr_filter_band1.attr,
710 &format_attr_filter_band2.attr,
711 &format_attr_filter_band3.attr,
712 NULL,
713 };
714
715 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
716 &format_attr_event_ext.attr,
717 &format_attr_umask.attr,
718 &format_attr_edge.attr,
719 &format_attr_inv.attr,
720 &format_attr_thresh8.attr,
721 &format_attr_match_rds.attr,
722 &format_attr_match_rnid30.attr,
723 &format_attr_match_rnid4.attr,
724 &format_attr_match_dnid.attr,
725 &format_attr_match_mc.attr,
726 &format_attr_match_opc.attr,
727 &format_attr_match_vnw.attr,
728 &format_attr_match0.attr,
729 &format_attr_match1.attr,
730 &format_attr_mask_rds.attr,
731 &format_attr_mask_rnid30.attr,
732 &format_attr_mask_rnid4.attr,
733 &format_attr_mask_dnid.attr,
734 &format_attr_mask_mc.attr,
735 &format_attr_mask_opc.attr,
736 &format_attr_mask_vnw.attr,
737 &format_attr_mask0.attr,
738 &format_attr_mask1.attr,
739 NULL,
740 };
741
742 static struct uncore_event_desc snbep_uncore_imc_events[] = {
743 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
744 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
745 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
746 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
747 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
748 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
749 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
750 { /* end: all zeroes */ },
751 };
752
753 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
754 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
755 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
756 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
757 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
758 { /* end: all zeroes */ },
759 };
760
761 static const struct attribute_group snbep_uncore_format_group = {
762 .name = "format",
763 .attrs = snbep_uncore_formats_attr,
764 };
765
766 static const struct attribute_group snbep_uncore_ubox_format_group = {
767 .name = "format",
768 .attrs = snbep_uncore_ubox_formats_attr,
769 };
770
771 static const struct attribute_group snbep_uncore_cbox_format_group = {
772 .name = "format",
773 .attrs = snbep_uncore_cbox_formats_attr,
774 };
775
776 static const struct attribute_group snbep_uncore_pcu_format_group = {
777 .name = "format",
778 .attrs = snbep_uncore_pcu_formats_attr,
779 };
780
781 static const struct attribute_group snbep_uncore_qpi_format_group = {
782 .name = "format",
783 .attrs = snbep_uncore_qpi_formats_attr,
784 };
785
786 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
787 .disable_box = snbep_uncore_msr_disable_box, \
788 .enable_box = snbep_uncore_msr_enable_box, \
789 .disable_event = snbep_uncore_msr_disable_event, \
790 .enable_event = snbep_uncore_msr_enable_event, \
791 .read_counter = uncore_msr_read_counter
792
793 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
794 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \
795 .init_box = snbep_uncore_msr_init_box \
796
797 static struct intel_uncore_ops snbep_uncore_msr_ops = {
798 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
799 };
800
801 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
802 .init_box = snbep_uncore_pci_init_box, \
803 .disable_box = snbep_uncore_pci_disable_box, \
804 .enable_box = snbep_uncore_pci_enable_box, \
805 .disable_event = snbep_uncore_pci_disable_event, \
806 .read_counter = snbep_uncore_pci_read_counter
807
808 static struct intel_uncore_ops snbep_uncore_pci_ops = {
809 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
810 .enable_event = snbep_uncore_pci_enable_event, \
811 };
812
813 static struct event_constraint snbep_uncore_cbox_constraints[] = {
814 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
815 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
816 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
817 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
818 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
819 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
820 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
821 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
822 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
823 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
824 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
825 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
826 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
827 UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
828 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
829 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
830 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
831 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
832 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
833 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
834 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
835 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
836 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
837 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
838 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
839 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
840 EVENT_CONSTRAINT_END
841 };
842
843 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
844 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
845 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
846 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
847 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
848 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
849 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
850 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
851 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
852 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
853 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
854 EVENT_CONSTRAINT_END
855 };
856
857 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
858 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
859 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
860 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
861 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
862 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
863 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
864 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
865 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
866 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
867 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
868 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
869 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
870 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
871 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
872 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
873 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
874 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
875 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
876 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
877 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
878 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
879 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
880 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
881 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
882 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
883 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
884 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
885 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
886 EVENT_CONSTRAINT_END
887 };
888
889 static struct intel_uncore_type snbep_uncore_ubox = {
890 .name = "ubox",
891 .num_counters = 2,
892 .num_boxes = 1,
893 .perf_ctr_bits = 44,
894 .fixed_ctr_bits = 48,
895 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
896 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
897 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
898 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
899 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
900 .ops = &snbep_uncore_msr_ops,
901 .format_group = &snbep_uncore_ubox_format_group,
902 };
903
904 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
905 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
906 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
907 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
908 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
909 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
910 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
911 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
912 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
913 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
914 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
915 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
916 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
917 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
918 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
919 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
920 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
921 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
922 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
923 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
924 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
925 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
926 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
927 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
928 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
929 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
930 EVENT_EXTRA_END
931 };
932
snbep_cbox_put_constraint(struct intel_uncore_box * box,struct perf_event * event)933 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
934 {
935 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
936 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
937 int i;
938
939 if (uncore_box_is_fake(box))
940 return;
941
942 for (i = 0; i < 5; i++) {
943 if (reg1->alloc & (0x1 << i))
944 atomic_sub(1 << (i * 6), &er->ref);
945 }
946 reg1->alloc = 0;
947 }
948
949 static struct event_constraint *
__snbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event,u64 (* cbox_filter_mask)(int fields))950 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
951 u64 (*cbox_filter_mask)(int fields))
952 {
953 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
954 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
955 int i, alloc = 0;
956 unsigned long flags;
957 u64 mask;
958
959 if (reg1->idx == EXTRA_REG_NONE)
960 return NULL;
961
962 raw_spin_lock_irqsave(&er->lock, flags);
963 for (i = 0; i < 5; i++) {
964 if (!(reg1->idx & (0x1 << i)))
965 continue;
966 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
967 continue;
968
969 mask = cbox_filter_mask(0x1 << i);
970 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
971 !((reg1->config ^ er->config) & mask)) {
972 atomic_add(1 << (i * 6), &er->ref);
973 er->config &= ~mask;
974 er->config |= reg1->config & mask;
975 alloc |= (0x1 << i);
976 } else {
977 break;
978 }
979 }
980 raw_spin_unlock_irqrestore(&er->lock, flags);
981 if (i < 5)
982 goto fail;
983
984 if (!uncore_box_is_fake(box))
985 reg1->alloc |= alloc;
986
987 return NULL;
988 fail:
989 for (; i >= 0; i--) {
990 if (alloc & (0x1 << i))
991 atomic_sub(1 << (i * 6), &er->ref);
992 }
993 return &uncore_constraint_empty;
994 }
995
snbep_cbox_filter_mask(int fields)996 static u64 snbep_cbox_filter_mask(int fields)
997 {
998 u64 mask = 0;
999
1000 if (fields & 0x1)
1001 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1002 if (fields & 0x2)
1003 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1004 if (fields & 0x4)
1005 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1006 if (fields & 0x8)
1007 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1008
1009 return mask;
1010 }
1011
1012 static struct event_constraint *
snbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)1013 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1014 {
1015 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
1016 }
1017
snbep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)1018 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1019 {
1020 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1021 struct extra_reg *er;
1022 int idx = 0;
1023
1024 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
1025 if (er->event != (event->hw.config & er->config_mask))
1026 continue;
1027 idx |= er->idx;
1028 }
1029
1030 if (idx) {
1031 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1032 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1033 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
1034 reg1->idx = idx;
1035 }
1036 return 0;
1037 }
1038
1039 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
1040 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1041 .hw_config = snbep_cbox_hw_config,
1042 .get_constraint = snbep_cbox_get_constraint,
1043 .put_constraint = snbep_cbox_put_constraint,
1044 };
1045
1046 static struct intel_uncore_type snbep_uncore_cbox = {
1047 .name = "cbox",
1048 .num_counters = 4,
1049 .num_boxes = 8,
1050 .perf_ctr_bits = 44,
1051 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1052 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1053 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1054 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1055 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1056 .num_shared_regs = 1,
1057 .constraints = snbep_uncore_cbox_constraints,
1058 .ops = &snbep_uncore_cbox_ops,
1059 .format_group = &snbep_uncore_cbox_format_group,
1060 };
1061
snbep_pcu_alter_er(struct perf_event * event,int new_idx,bool modify)1062 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
1063 {
1064 struct hw_perf_event *hwc = &event->hw;
1065 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1066 u64 config = reg1->config;
1067
1068 if (new_idx > reg1->idx)
1069 config <<= 8 * (new_idx - reg1->idx);
1070 else
1071 config >>= 8 * (reg1->idx - new_idx);
1072
1073 if (modify) {
1074 hwc->config += new_idx - reg1->idx;
1075 reg1->config = config;
1076 reg1->idx = new_idx;
1077 }
1078 return config;
1079 }
1080
1081 static struct event_constraint *
snbep_pcu_get_constraint(struct intel_uncore_box * box,struct perf_event * event)1082 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1083 {
1084 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1085 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1086 unsigned long flags;
1087 int idx = reg1->idx;
1088 u64 mask, config1 = reg1->config;
1089 bool ok = false;
1090
1091 if (reg1->idx == EXTRA_REG_NONE ||
1092 (!uncore_box_is_fake(box) && reg1->alloc))
1093 return NULL;
1094 again:
1095 mask = 0xffULL << (idx * 8);
1096 raw_spin_lock_irqsave(&er->lock, flags);
1097 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1098 !((config1 ^ er->config) & mask)) {
1099 atomic_add(1 << (idx * 8), &er->ref);
1100 er->config &= ~mask;
1101 er->config |= config1 & mask;
1102 ok = true;
1103 }
1104 raw_spin_unlock_irqrestore(&er->lock, flags);
1105
1106 if (!ok) {
1107 idx = (idx + 1) % 4;
1108 if (idx != reg1->idx) {
1109 config1 = snbep_pcu_alter_er(event, idx, false);
1110 goto again;
1111 }
1112 return &uncore_constraint_empty;
1113 }
1114
1115 if (!uncore_box_is_fake(box)) {
1116 if (idx != reg1->idx)
1117 snbep_pcu_alter_er(event, idx, true);
1118 reg1->alloc = 1;
1119 }
1120 return NULL;
1121 }
1122
snbep_pcu_put_constraint(struct intel_uncore_box * box,struct perf_event * event)1123 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1124 {
1125 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1126 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1127
1128 if (uncore_box_is_fake(box) || !reg1->alloc)
1129 return;
1130
1131 atomic_sub(1 << (reg1->idx * 8), &er->ref);
1132 reg1->alloc = 0;
1133 }
1134
snbep_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)1135 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1136 {
1137 struct hw_perf_event *hwc = &event->hw;
1138 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1139 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1140
1141 if (ev_sel >= 0xb && ev_sel <= 0xe) {
1142 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1143 reg1->idx = ev_sel - 0xb;
1144 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1145 }
1146 return 0;
1147 }
1148
1149 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1150 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1151 .hw_config = snbep_pcu_hw_config,
1152 .get_constraint = snbep_pcu_get_constraint,
1153 .put_constraint = snbep_pcu_put_constraint,
1154 };
1155
1156 static struct intel_uncore_type snbep_uncore_pcu = {
1157 .name = "pcu",
1158 .num_counters = 4,
1159 .num_boxes = 1,
1160 .perf_ctr_bits = 48,
1161 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1162 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1163 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1164 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1165 .num_shared_regs = 1,
1166 .ops = &snbep_uncore_pcu_ops,
1167 .format_group = &snbep_uncore_pcu_format_group,
1168 };
1169
1170 static struct intel_uncore_type *snbep_msr_uncores[] = {
1171 &snbep_uncore_ubox,
1172 &snbep_uncore_cbox,
1173 &snbep_uncore_pcu,
1174 NULL,
1175 };
1176
snbep_uncore_cpu_init(void)1177 void snbep_uncore_cpu_init(void)
1178 {
1179 if (snbep_uncore_cbox.num_boxes > topology_num_cores_per_package())
1180 snbep_uncore_cbox.num_boxes = topology_num_cores_per_package();
1181 uncore_msr_uncores = snbep_msr_uncores;
1182 }
1183
1184 enum {
1185 SNBEP_PCI_QPI_PORT0_FILTER,
1186 SNBEP_PCI_QPI_PORT1_FILTER,
1187 BDX_PCI_QPI_PORT2_FILTER,
1188 };
1189
snbep_qpi_hw_config(struct intel_uncore_box * box,struct perf_event * event)1190 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1191 {
1192 struct hw_perf_event *hwc = &event->hw;
1193 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1194 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1195
1196 if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1197 reg1->idx = 0;
1198 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1199 reg1->config = event->attr.config1;
1200 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1201 reg2->config = event->attr.config2;
1202 }
1203 return 0;
1204 }
1205
snbep_qpi_enable_event(struct intel_uncore_box * box,struct perf_event * event)1206 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1207 {
1208 struct pci_dev *pdev = box->pci_dev;
1209 struct hw_perf_event *hwc = &event->hw;
1210 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1211 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1212
1213 if (reg1->idx != EXTRA_REG_NONE) {
1214 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1215 int die = box->dieid;
1216 struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1217
1218 if (filter_pdev) {
1219 pci_write_config_dword(filter_pdev, reg1->reg,
1220 (u32)reg1->config);
1221 pci_write_config_dword(filter_pdev, reg1->reg + 4,
1222 (u32)(reg1->config >> 32));
1223 pci_write_config_dword(filter_pdev, reg2->reg,
1224 (u32)reg2->config);
1225 pci_write_config_dword(filter_pdev, reg2->reg + 4,
1226 (u32)(reg2->config >> 32));
1227 }
1228 }
1229
1230 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1231 }
1232
1233 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1234 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1235 .enable_event = snbep_qpi_enable_event,
1236 .hw_config = snbep_qpi_hw_config,
1237 .get_constraint = uncore_get_constraint,
1238 .put_constraint = uncore_put_constraint,
1239 };
1240
1241 #define SNBEP_UNCORE_PCI_COMMON_INIT() \
1242 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1243 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1244 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
1245 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1246 .ops = &snbep_uncore_pci_ops, \
1247 .format_group = &snbep_uncore_format_group
1248
1249 static struct intel_uncore_type snbep_uncore_ha = {
1250 .name = "ha",
1251 .num_counters = 4,
1252 .num_boxes = 1,
1253 .perf_ctr_bits = 48,
1254 SNBEP_UNCORE_PCI_COMMON_INIT(),
1255 };
1256
1257 static struct intel_uncore_type snbep_uncore_imc = {
1258 .name = "imc",
1259 .num_counters = 4,
1260 .num_boxes = 4,
1261 .perf_ctr_bits = 48,
1262 .fixed_ctr_bits = 48,
1263 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1264 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1265 .event_descs = snbep_uncore_imc_events,
1266 SNBEP_UNCORE_PCI_COMMON_INIT(),
1267 };
1268
1269 static struct intel_uncore_type snbep_uncore_qpi = {
1270 .name = "qpi",
1271 .num_counters = 4,
1272 .num_boxes = 2,
1273 .perf_ctr_bits = 48,
1274 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1275 .event_ctl = SNBEP_PCI_PMON_CTL0,
1276 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1277 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1278 .num_shared_regs = 1,
1279 .ops = &snbep_uncore_qpi_ops,
1280 .event_descs = snbep_uncore_qpi_events,
1281 .format_group = &snbep_uncore_qpi_format_group,
1282 };
1283
1284
1285 static struct intel_uncore_type snbep_uncore_r2pcie = {
1286 .name = "r2pcie",
1287 .num_counters = 4,
1288 .num_boxes = 1,
1289 .perf_ctr_bits = 44,
1290 .constraints = snbep_uncore_r2pcie_constraints,
1291 SNBEP_UNCORE_PCI_COMMON_INIT(),
1292 };
1293
1294 static struct intel_uncore_type snbep_uncore_r3qpi = {
1295 .name = "r3qpi",
1296 .num_counters = 3,
1297 .num_boxes = 2,
1298 .perf_ctr_bits = 44,
1299 .constraints = snbep_uncore_r3qpi_constraints,
1300 SNBEP_UNCORE_PCI_COMMON_INIT(),
1301 };
1302
1303 enum {
1304 SNBEP_PCI_UNCORE_HA,
1305 SNBEP_PCI_UNCORE_IMC,
1306 SNBEP_PCI_UNCORE_QPI,
1307 SNBEP_PCI_UNCORE_R2PCIE,
1308 SNBEP_PCI_UNCORE_R3QPI,
1309 };
1310
1311 static struct intel_uncore_type *snbep_pci_uncores[] = {
1312 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
1313 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
1314 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
1315 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
1316 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
1317 NULL,
1318 };
1319
1320 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1321 { /* Home Agent */
1322 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1323 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1324 },
1325 { /* MC Channel 0 */
1326 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1327 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1328 },
1329 { /* MC Channel 1 */
1330 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1331 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1332 },
1333 { /* MC Channel 2 */
1334 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1335 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1336 },
1337 { /* MC Channel 3 */
1338 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1339 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1340 },
1341 { /* QPI Port 0 */
1342 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1343 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1344 },
1345 { /* QPI Port 1 */
1346 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1347 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1348 },
1349 { /* R2PCIe */
1350 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1351 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1352 },
1353 { /* R3QPI Link 0 */
1354 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1355 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1356 },
1357 { /* R3QPI Link 1 */
1358 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1359 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1360 },
1361 { /* QPI Port 0 filter */
1362 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1363 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1364 SNBEP_PCI_QPI_PORT0_FILTER),
1365 },
1366 { /* QPI Port 0 filter */
1367 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1368 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1369 SNBEP_PCI_QPI_PORT1_FILTER),
1370 },
1371 { /* end: all zeroes */ }
1372 };
1373
1374 static struct pci_driver snbep_uncore_pci_driver = {
1375 .name = "snbep_uncore",
1376 .id_table = snbep_uncore_pci_ids,
1377 };
1378
1379 #define NODE_ID_MASK 0x7
1380
1381 /* Each three bits from 0 to 23 of GIDNIDMAP register correspond Node ID. */
1382 #define GIDNIDMAP(config, id) (((config) >> (3 * (id))) & 0x7)
1383
upi_nodeid_groupid(struct pci_dev * ubox_dev,int nodeid_loc,int idmap_loc,int * nodeid,int * groupid)1384 static int upi_nodeid_groupid(struct pci_dev *ubox_dev, int nodeid_loc, int idmap_loc,
1385 int *nodeid, int *groupid)
1386 {
1387 int ret;
1388
1389 /* get the Node ID of the local register */
1390 ret = pci_read_config_dword(ubox_dev, nodeid_loc, nodeid);
1391 if (ret)
1392 goto err;
1393
1394 *nodeid = *nodeid & NODE_ID_MASK;
1395 /* get the Node ID mapping */
1396 ret = pci_read_config_dword(ubox_dev, idmap_loc, groupid);
1397 if (ret)
1398 goto err;
1399 err:
1400 return ret;
1401 }
1402
topology_gidnid_map(int nodeid,u32 gidnid)1403 static int topology_gidnid_map(int nodeid, u32 gidnid)
1404 {
1405 int i, die_id = -1;
1406
1407 /*
1408 * every three bits in the Node ID mapping register maps
1409 * to a particular node.
1410 */
1411 for (i = 0; i < 8; i++) {
1412 if (nodeid == GIDNIDMAP(gidnid, i)) {
1413 if (topology_max_dies_per_package() > 1)
1414 die_id = i;
1415 else
1416 die_id = topology_phys_to_logical_pkg(i);
1417 if (die_id < 0)
1418 die_id = -ENODEV;
1419 break;
1420 }
1421 }
1422
1423 return die_id;
1424 }
1425
1426 /*
1427 * build pci bus to socket mapping
1428 */
snbep_pci2phy_map_init(int devid,int nodeid_loc,int idmap_loc,bool reverse)1429 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1430 {
1431 struct pci_dev *ubox_dev = NULL;
1432 int i, bus, nodeid, segment, die_id;
1433 struct pci2phy_map *map;
1434 int err = 0;
1435 u32 config = 0;
1436
1437 while (1) {
1438 /* find the UBOX device */
1439 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1440 if (!ubox_dev)
1441 break;
1442 bus = ubox_dev->bus->number;
1443 /*
1444 * The nodeid and idmap registers only contain enough
1445 * information to handle 8 nodes. On systems with more
1446 * than 8 nodes, we need to rely on NUMA information,
1447 * filled in from BIOS supplied information, to determine
1448 * the topology.
1449 */
1450 if (nr_node_ids <= 8) {
1451 err = upi_nodeid_groupid(ubox_dev, nodeid_loc, idmap_loc,
1452 &nodeid, &config);
1453 if (err)
1454 break;
1455
1456 segment = pci_domain_nr(ubox_dev->bus);
1457 raw_spin_lock(&pci2phy_map_lock);
1458 map = __find_pci2phy_map(segment);
1459 if (!map) {
1460 raw_spin_unlock(&pci2phy_map_lock);
1461 err = -ENOMEM;
1462 break;
1463 }
1464
1465 map->pbus_to_dieid[bus] = topology_gidnid_map(nodeid, config);
1466 raw_spin_unlock(&pci2phy_map_lock);
1467 } else {
1468 segment = pci_domain_nr(ubox_dev->bus);
1469 raw_spin_lock(&pci2phy_map_lock);
1470 map = __find_pci2phy_map(segment);
1471 if (!map) {
1472 raw_spin_unlock(&pci2phy_map_lock);
1473 err = -ENOMEM;
1474 break;
1475 }
1476
1477 map->pbus_to_dieid[bus] = die_id = uncore_device_to_die(ubox_dev);
1478
1479 raw_spin_unlock(&pci2phy_map_lock);
1480
1481 if (WARN_ON_ONCE(die_id == -1)) {
1482 err = -EINVAL;
1483 break;
1484 }
1485 }
1486 }
1487
1488 if (!err) {
1489 /*
1490 * For PCI bus with no UBOX device, find the next bus
1491 * that has UBOX device and use its mapping.
1492 */
1493 raw_spin_lock(&pci2phy_map_lock);
1494 list_for_each_entry(map, &pci2phy_map_head, list) {
1495 i = -1;
1496 if (reverse) {
1497 for (bus = 255; bus >= 0; bus--) {
1498 if (map->pbus_to_dieid[bus] != -1)
1499 i = map->pbus_to_dieid[bus];
1500 else
1501 map->pbus_to_dieid[bus] = i;
1502 }
1503 } else {
1504 for (bus = 0; bus <= 255; bus++) {
1505 if (map->pbus_to_dieid[bus] != -1)
1506 i = map->pbus_to_dieid[bus];
1507 else
1508 map->pbus_to_dieid[bus] = i;
1509 }
1510 }
1511 }
1512 raw_spin_unlock(&pci2phy_map_lock);
1513 }
1514
1515 pci_dev_put(ubox_dev);
1516
1517 return pcibios_err_to_errno(err);
1518 }
1519
snbep_uncore_pci_init(void)1520 int snbep_uncore_pci_init(void)
1521 {
1522 int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1523 if (ret)
1524 return ret;
1525 uncore_pci_uncores = snbep_pci_uncores;
1526 uncore_pci_driver = &snbep_uncore_pci_driver;
1527 return 0;
1528 }
1529 /* end of Sandy Bridge-EP uncore support */
1530
1531 /* IvyTown uncore support */
ivbep_uncore_msr_init_box(struct intel_uncore_box * box)1532 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1533 {
1534 unsigned msr = uncore_msr_box_ctl(box);
1535 if (msr)
1536 wrmsrq(msr, IVBEP_PMON_BOX_CTL_INT);
1537 }
1538
ivbep_uncore_pci_init_box(struct intel_uncore_box * box)1539 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1540 {
1541 struct pci_dev *pdev = box->pci_dev;
1542
1543 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1544 }
1545
1546 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \
1547 .init_box = ivbep_uncore_msr_init_box, \
1548 .disable_box = snbep_uncore_msr_disable_box, \
1549 .enable_box = snbep_uncore_msr_enable_box, \
1550 .disable_event = snbep_uncore_msr_disable_event, \
1551 .enable_event = snbep_uncore_msr_enable_event, \
1552 .read_counter = uncore_msr_read_counter
1553
1554 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1555 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1556 };
1557
1558 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1559 .init_box = ivbep_uncore_pci_init_box,
1560 .disable_box = snbep_uncore_pci_disable_box,
1561 .enable_box = snbep_uncore_pci_enable_box,
1562 .disable_event = snbep_uncore_pci_disable_event,
1563 .enable_event = snbep_uncore_pci_enable_event,
1564 .read_counter = snbep_uncore_pci_read_counter,
1565 };
1566
1567 #define IVBEP_UNCORE_PCI_COMMON_INIT() \
1568 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1569 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1570 .event_mask = IVBEP_PMON_RAW_EVENT_MASK, \
1571 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1572 .ops = &ivbep_uncore_pci_ops, \
1573 .format_group = &ivbep_uncore_format_group
1574
1575 static struct attribute *ivbep_uncore_formats_attr[] = {
1576 &format_attr_event.attr,
1577 &format_attr_umask.attr,
1578 &format_attr_edge.attr,
1579 &format_attr_inv.attr,
1580 &format_attr_thresh8.attr,
1581 NULL,
1582 };
1583
1584 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1585 &format_attr_event.attr,
1586 &format_attr_umask.attr,
1587 &format_attr_edge.attr,
1588 &format_attr_inv.attr,
1589 &format_attr_thresh5.attr,
1590 NULL,
1591 };
1592
1593 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1594 &format_attr_event.attr,
1595 &format_attr_umask.attr,
1596 &format_attr_edge.attr,
1597 &format_attr_tid_en.attr,
1598 &format_attr_thresh8.attr,
1599 &format_attr_filter_tid.attr,
1600 &format_attr_filter_link.attr,
1601 &format_attr_filter_state2.attr,
1602 &format_attr_filter_nid2.attr,
1603 &format_attr_filter_opc2.attr,
1604 &format_attr_filter_nc.attr,
1605 &format_attr_filter_c6.attr,
1606 &format_attr_filter_isoc.attr,
1607 NULL,
1608 };
1609
1610 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1611 &format_attr_event.attr,
1612 &format_attr_occ_sel.attr,
1613 &format_attr_edge.attr,
1614 &format_attr_thresh5.attr,
1615 &format_attr_occ_invert.attr,
1616 &format_attr_occ_edge.attr,
1617 &format_attr_filter_band0.attr,
1618 &format_attr_filter_band1.attr,
1619 &format_attr_filter_band2.attr,
1620 &format_attr_filter_band3.attr,
1621 NULL,
1622 };
1623
1624 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1625 &format_attr_event_ext.attr,
1626 &format_attr_umask.attr,
1627 &format_attr_edge.attr,
1628 &format_attr_thresh8.attr,
1629 &format_attr_match_rds.attr,
1630 &format_attr_match_rnid30.attr,
1631 &format_attr_match_rnid4.attr,
1632 &format_attr_match_dnid.attr,
1633 &format_attr_match_mc.attr,
1634 &format_attr_match_opc.attr,
1635 &format_attr_match_vnw.attr,
1636 &format_attr_match0.attr,
1637 &format_attr_match1.attr,
1638 &format_attr_mask_rds.attr,
1639 &format_attr_mask_rnid30.attr,
1640 &format_attr_mask_rnid4.attr,
1641 &format_attr_mask_dnid.attr,
1642 &format_attr_mask_mc.attr,
1643 &format_attr_mask_opc.attr,
1644 &format_attr_mask_vnw.attr,
1645 &format_attr_mask0.attr,
1646 &format_attr_mask1.attr,
1647 NULL,
1648 };
1649
1650 static const struct attribute_group ivbep_uncore_format_group = {
1651 .name = "format",
1652 .attrs = ivbep_uncore_formats_attr,
1653 };
1654
1655 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1656 .name = "format",
1657 .attrs = ivbep_uncore_ubox_formats_attr,
1658 };
1659
1660 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1661 .name = "format",
1662 .attrs = ivbep_uncore_cbox_formats_attr,
1663 };
1664
1665 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1666 .name = "format",
1667 .attrs = ivbep_uncore_pcu_formats_attr,
1668 };
1669
1670 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1671 .name = "format",
1672 .attrs = ivbep_uncore_qpi_formats_attr,
1673 };
1674
1675 static struct intel_uncore_type ivbep_uncore_ubox = {
1676 .name = "ubox",
1677 .num_counters = 2,
1678 .num_boxes = 1,
1679 .perf_ctr_bits = 44,
1680 .fixed_ctr_bits = 48,
1681 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
1682 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
1683 .event_mask = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1684 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1685 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1686 .ops = &ivbep_uncore_msr_ops,
1687 .format_group = &ivbep_uncore_ubox_format_group,
1688 };
1689
1690 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1691 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1692 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1693 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1694 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1695 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1696 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1697 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1698 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1699 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1700 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1701 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1702 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1703 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1704 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1705 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1706 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1707 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1708 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1709 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1710 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1711 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1712 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1713 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1714 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1715 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1716 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1717 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1718 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1719 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1720 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1721 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1722 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1723 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1724 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1725 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1726 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1727 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1728 EVENT_EXTRA_END
1729 };
1730
ivbep_cbox_filter_mask(int fields)1731 static u64 ivbep_cbox_filter_mask(int fields)
1732 {
1733 u64 mask = 0;
1734
1735 if (fields & 0x1)
1736 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1737 if (fields & 0x2)
1738 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1739 if (fields & 0x4)
1740 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1741 if (fields & 0x8)
1742 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1743 if (fields & 0x10) {
1744 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1745 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1746 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1747 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1748 }
1749
1750 return mask;
1751 }
1752
1753 static struct event_constraint *
ivbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)1754 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1755 {
1756 return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1757 }
1758
ivbep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)1759 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1760 {
1761 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1762 struct extra_reg *er;
1763 int idx = 0;
1764
1765 for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1766 if (er->event != (event->hw.config & er->config_mask))
1767 continue;
1768 idx |= er->idx;
1769 }
1770
1771 if (idx) {
1772 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1773 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1774 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1775 reg1->idx = idx;
1776 }
1777 return 0;
1778 }
1779
ivbep_cbox_enable_event(struct intel_uncore_box * box,struct perf_event * event)1780 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1781 {
1782 struct hw_perf_event *hwc = &event->hw;
1783 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1784
1785 if (reg1->idx != EXTRA_REG_NONE) {
1786 u64 filter = uncore_shared_reg_config(box, 0);
1787 wrmsrq(reg1->reg, filter & 0xffffffff);
1788 wrmsrq(reg1->reg + 6, filter >> 32);
1789 }
1790
1791 wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1792 }
1793
1794 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1795 .init_box = ivbep_uncore_msr_init_box,
1796 .disable_box = snbep_uncore_msr_disable_box,
1797 .enable_box = snbep_uncore_msr_enable_box,
1798 .disable_event = snbep_uncore_msr_disable_event,
1799 .enable_event = ivbep_cbox_enable_event,
1800 .read_counter = uncore_msr_read_counter,
1801 .hw_config = ivbep_cbox_hw_config,
1802 .get_constraint = ivbep_cbox_get_constraint,
1803 .put_constraint = snbep_cbox_put_constraint,
1804 };
1805
1806 static struct intel_uncore_type ivbep_uncore_cbox = {
1807 .name = "cbox",
1808 .num_counters = 4,
1809 .num_boxes = 15,
1810 .perf_ctr_bits = 44,
1811 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1812 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1813 .event_mask = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1814 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1815 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1816 .num_shared_regs = 1,
1817 .constraints = snbep_uncore_cbox_constraints,
1818 .ops = &ivbep_uncore_cbox_ops,
1819 .format_group = &ivbep_uncore_cbox_format_group,
1820 };
1821
1822 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1823 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1824 .hw_config = snbep_pcu_hw_config,
1825 .get_constraint = snbep_pcu_get_constraint,
1826 .put_constraint = snbep_pcu_put_constraint,
1827 };
1828
1829 static struct intel_uncore_type ivbep_uncore_pcu = {
1830 .name = "pcu",
1831 .num_counters = 4,
1832 .num_boxes = 1,
1833 .perf_ctr_bits = 48,
1834 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1835 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1836 .event_mask = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1837 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1838 .num_shared_regs = 1,
1839 .ops = &ivbep_uncore_pcu_ops,
1840 .format_group = &ivbep_uncore_pcu_format_group,
1841 };
1842
1843 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1844 &ivbep_uncore_ubox,
1845 &ivbep_uncore_cbox,
1846 &ivbep_uncore_pcu,
1847 NULL,
1848 };
1849
ivbep_uncore_cpu_init(void)1850 void ivbep_uncore_cpu_init(void)
1851 {
1852 if (ivbep_uncore_cbox.num_boxes > topology_num_cores_per_package())
1853 ivbep_uncore_cbox.num_boxes = topology_num_cores_per_package();
1854 uncore_msr_uncores = ivbep_msr_uncores;
1855 }
1856
1857 static struct intel_uncore_type ivbep_uncore_ha = {
1858 .name = "ha",
1859 .num_counters = 4,
1860 .num_boxes = 2,
1861 .perf_ctr_bits = 48,
1862 IVBEP_UNCORE_PCI_COMMON_INIT(),
1863 };
1864
1865 static struct intel_uncore_type ivbep_uncore_imc = {
1866 .name = "imc",
1867 .num_counters = 4,
1868 .num_boxes = 8,
1869 .perf_ctr_bits = 48,
1870 .fixed_ctr_bits = 48,
1871 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1872 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1873 .event_descs = snbep_uncore_imc_events,
1874 IVBEP_UNCORE_PCI_COMMON_INIT(),
1875 };
1876
1877 /* registers in IRP boxes are not properly aligned */
1878 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1879 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1880
ivbep_uncore_irp_enable_event(struct intel_uncore_box * box,struct perf_event * event)1881 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1882 {
1883 struct pci_dev *pdev = box->pci_dev;
1884 struct hw_perf_event *hwc = &event->hw;
1885
1886 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1887 hwc->config | SNBEP_PMON_CTL_EN);
1888 }
1889
ivbep_uncore_irp_disable_event(struct intel_uncore_box * box,struct perf_event * event)1890 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1891 {
1892 struct pci_dev *pdev = box->pci_dev;
1893 struct hw_perf_event *hwc = &event->hw;
1894
1895 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1896 }
1897
ivbep_uncore_irp_read_counter(struct intel_uncore_box * box,struct perf_event * event)1898 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1899 {
1900 struct pci_dev *pdev = box->pci_dev;
1901 struct hw_perf_event *hwc = &event->hw;
1902 u64 count = 0;
1903
1904 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1905 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1906
1907 return count;
1908 }
1909
1910 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1911 .init_box = ivbep_uncore_pci_init_box,
1912 .disable_box = snbep_uncore_pci_disable_box,
1913 .enable_box = snbep_uncore_pci_enable_box,
1914 .disable_event = ivbep_uncore_irp_disable_event,
1915 .enable_event = ivbep_uncore_irp_enable_event,
1916 .read_counter = ivbep_uncore_irp_read_counter,
1917 };
1918
1919 static struct intel_uncore_type ivbep_uncore_irp = {
1920 .name = "irp",
1921 .num_counters = 4,
1922 .num_boxes = 1,
1923 .perf_ctr_bits = 48,
1924 .event_mask = IVBEP_PMON_RAW_EVENT_MASK,
1925 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1926 .ops = &ivbep_uncore_irp_ops,
1927 .format_group = &ivbep_uncore_format_group,
1928 };
1929
1930 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1931 .init_box = ivbep_uncore_pci_init_box,
1932 .disable_box = snbep_uncore_pci_disable_box,
1933 .enable_box = snbep_uncore_pci_enable_box,
1934 .disable_event = snbep_uncore_pci_disable_event,
1935 .enable_event = snbep_qpi_enable_event,
1936 .read_counter = snbep_uncore_pci_read_counter,
1937 .hw_config = snbep_qpi_hw_config,
1938 .get_constraint = uncore_get_constraint,
1939 .put_constraint = uncore_put_constraint,
1940 };
1941
1942 static struct intel_uncore_type ivbep_uncore_qpi = {
1943 .name = "qpi",
1944 .num_counters = 4,
1945 .num_boxes = 3,
1946 .perf_ctr_bits = 48,
1947 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1948 .event_ctl = SNBEP_PCI_PMON_CTL0,
1949 .event_mask = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1950 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1951 .num_shared_regs = 1,
1952 .ops = &ivbep_uncore_qpi_ops,
1953 .format_group = &ivbep_uncore_qpi_format_group,
1954 };
1955
1956 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1957 .name = "r2pcie",
1958 .num_counters = 4,
1959 .num_boxes = 1,
1960 .perf_ctr_bits = 44,
1961 .constraints = snbep_uncore_r2pcie_constraints,
1962 IVBEP_UNCORE_PCI_COMMON_INIT(),
1963 };
1964
1965 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1966 .name = "r3qpi",
1967 .num_counters = 3,
1968 .num_boxes = 2,
1969 .perf_ctr_bits = 44,
1970 .constraints = snbep_uncore_r3qpi_constraints,
1971 IVBEP_UNCORE_PCI_COMMON_INIT(),
1972 };
1973
1974 enum {
1975 IVBEP_PCI_UNCORE_HA,
1976 IVBEP_PCI_UNCORE_IMC,
1977 IVBEP_PCI_UNCORE_IRP,
1978 IVBEP_PCI_UNCORE_QPI,
1979 IVBEP_PCI_UNCORE_R2PCIE,
1980 IVBEP_PCI_UNCORE_R3QPI,
1981 };
1982
1983 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1984 [IVBEP_PCI_UNCORE_HA] = &ivbep_uncore_ha,
1985 [IVBEP_PCI_UNCORE_IMC] = &ivbep_uncore_imc,
1986 [IVBEP_PCI_UNCORE_IRP] = &ivbep_uncore_irp,
1987 [IVBEP_PCI_UNCORE_QPI] = &ivbep_uncore_qpi,
1988 [IVBEP_PCI_UNCORE_R2PCIE] = &ivbep_uncore_r2pcie,
1989 [IVBEP_PCI_UNCORE_R3QPI] = &ivbep_uncore_r3qpi,
1990 NULL,
1991 };
1992
1993 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1994 { /* Home Agent 0 */
1995 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1996 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1997 },
1998 { /* Home Agent 1 */
1999 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
2000 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
2001 },
2002 { /* MC0 Channel 0 */
2003 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
2004 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
2005 },
2006 { /* MC0 Channel 1 */
2007 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
2008 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
2009 },
2010 { /* MC0 Channel 3 */
2011 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
2012 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
2013 },
2014 { /* MC0 Channel 4 */
2015 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
2016 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
2017 },
2018 { /* MC1 Channel 0 */
2019 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
2020 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
2021 },
2022 { /* MC1 Channel 1 */
2023 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
2024 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
2025 },
2026 { /* MC1 Channel 3 */
2027 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
2028 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
2029 },
2030 { /* MC1 Channel 4 */
2031 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
2032 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
2033 },
2034 { /* IRP */
2035 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
2036 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
2037 },
2038 { /* QPI0 Port 0 */
2039 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
2040 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
2041 },
2042 { /* QPI0 Port 1 */
2043 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
2044 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
2045 },
2046 { /* QPI1 Port 2 */
2047 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
2048 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
2049 },
2050 { /* R2PCIe */
2051 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
2052 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
2053 },
2054 { /* R3QPI0 Link 0 */
2055 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
2056 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
2057 },
2058 { /* R3QPI0 Link 1 */
2059 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
2060 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
2061 },
2062 { /* R3QPI1 Link 2 */
2063 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
2064 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
2065 },
2066 { /* QPI Port 0 filter */
2067 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
2068 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2069 SNBEP_PCI_QPI_PORT0_FILTER),
2070 },
2071 { /* QPI Port 0 filter */
2072 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
2073 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2074 SNBEP_PCI_QPI_PORT1_FILTER),
2075 },
2076 { /* end: all zeroes */ }
2077 };
2078
2079 static struct pci_driver ivbep_uncore_pci_driver = {
2080 .name = "ivbep_uncore",
2081 .id_table = ivbep_uncore_pci_ids,
2082 };
2083
ivbep_uncore_pci_init(void)2084 int ivbep_uncore_pci_init(void)
2085 {
2086 int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2087 if (ret)
2088 return ret;
2089 uncore_pci_uncores = ivbep_pci_uncores;
2090 uncore_pci_driver = &ivbep_uncore_pci_driver;
2091 return 0;
2092 }
2093 /* end of IvyTown uncore support */
2094
2095 /* KNL uncore support */
2096 static struct attribute *knl_uncore_ubox_formats_attr[] = {
2097 &format_attr_event.attr,
2098 &format_attr_umask.attr,
2099 &format_attr_edge.attr,
2100 &format_attr_tid_en.attr,
2101 &format_attr_inv.attr,
2102 &format_attr_thresh5.attr,
2103 NULL,
2104 };
2105
2106 static const struct attribute_group knl_uncore_ubox_format_group = {
2107 .name = "format",
2108 .attrs = knl_uncore_ubox_formats_attr,
2109 };
2110
2111 static struct intel_uncore_type knl_uncore_ubox = {
2112 .name = "ubox",
2113 .num_counters = 2,
2114 .num_boxes = 1,
2115 .perf_ctr_bits = 48,
2116 .fixed_ctr_bits = 48,
2117 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
2118 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
2119 .event_mask = KNL_U_MSR_PMON_RAW_EVENT_MASK,
2120 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2121 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2122 .ops = &snbep_uncore_msr_ops,
2123 .format_group = &knl_uncore_ubox_format_group,
2124 };
2125
2126 static struct attribute *knl_uncore_cha_formats_attr[] = {
2127 &format_attr_event.attr,
2128 &format_attr_umask.attr,
2129 &format_attr_qor.attr,
2130 &format_attr_edge.attr,
2131 &format_attr_tid_en.attr,
2132 &format_attr_inv.attr,
2133 &format_attr_thresh8.attr,
2134 &format_attr_filter_tid4.attr,
2135 &format_attr_filter_link3.attr,
2136 &format_attr_filter_state4.attr,
2137 &format_attr_filter_local.attr,
2138 &format_attr_filter_all_op.attr,
2139 &format_attr_filter_nnm.attr,
2140 &format_attr_filter_opc3.attr,
2141 &format_attr_filter_nc.attr,
2142 &format_attr_filter_isoc.attr,
2143 NULL,
2144 };
2145
2146 static const struct attribute_group knl_uncore_cha_format_group = {
2147 .name = "format",
2148 .attrs = knl_uncore_cha_formats_attr,
2149 };
2150
2151 static struct event_constraint knl_uncore_cha_constraints[] = {
2152 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2153 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2154 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2155 EVENT_CONSTRAINT_END
2156 };
2157
2158 static struct extra_reg knl_uncore_cha_extra_regs[] = {
2159 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2160 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2161 SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2162 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2163 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2164 EVENT_EXTRA_END
2165 };
2166
knl_cha_filter_mask(int fields)2167 static u64 knl_cha_filter_mask(int fields)
2168 {
2169 u64 mask = 0;
2170
2171 if (fields & 0x1)
2172 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2173 if (fields & 0x2)
2174 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2175 if (fields & 0x4)
2176 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2177 return mask;
2178 }
2179
2180 static struct event_constraint *
knl_cha_get_constraint(struct intel_uncore_box * box,struct perf_event * event)2181 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2182 {
2183 return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2184 }
2185
knl_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)2186 static int knl_cha_hw_config(struct intel_uncore_box *box,
2187 struct perf_event *event)
2188 {
2189 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2190 struct extra_reg *er;
2191 int idx = 0;
2192
2193 for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2194 if (er->event != (event->hw.config & er->config_mask))
2195 continue;
2196 idx |= er->idx;
2197 }
2198
2199 if (idx) {
2200 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2201 KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2202 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2203
2204 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2205 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2206 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2207 reg1->idx = idx;
2208 }
2209 return 0;
2210 }
2211
2212 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2213 struct perf_event *event);
2214
2215 static struct intel_uncore_ops knl_uncore_cha_ops = {
2216 .init_box = snbep_uncore_msr_init_box,
2217 .disable_box = snbep_uncore_msr_disable_box,
2218 .enable_box = snbep_uncore_msr_enable_box,
2219 .disable_event = snbep_uncore_msr_disable_event,
2220 .enable_event = hswep_cbox_enable_event,
2221 .read_counter = uncore_msr_read_counter,
2222 .hw_config = knl_cha_hw_config,
2223 .get_constraint = knl_cha_get_constraint,
2224 .put_constraint = snbep_cbox_put_constraint,
2225 };
2226
2227 static struct intel_uncore_type knl_uncore_cha = {
2228 .name = "cha",
2229 .num_counters = 4,
2230 .num_boxes = 38,
2231 .perf_ctr_bits = 48,
2232 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2233 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2234 .event_mask = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2235 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2236 .msr_offset = KNL_CHA_MSR_OFFSET,
2237 .num_shared_regs = 1,
2238 .constraints = knl_uncore_cha_constraints,
2239 .ops = &knl_uncore_cha_ops,
2240 .format_group = &knl_uncore_cha_format_group,
2241 };
2242
2243 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2244 &format_attr_event2.attr,
2245 &format_attr_use_occ_ctr.attr,
2246 &format_attr_occ_sel.attr,
2247 &format_attr_edge.attr,
2248 &format_attr_tid_en.attr,
2249 &format_attr_inv.attr,
2250 &format_attr_thresh6.attr,
2251 &format_attr_occ_invert.attr,
2252 &format_attr_occ_edge_det.attr,
2253 NULL,
2254 };
2255
2256 static const struct attribute_group knl_uncore_pcu_format_group = {
2257 .name = "format",
2258 .attrs = knl_uncore_pcu_formats_attr,
2259 };
2260
2261 static struct intel_uncore_type knl_uncore_pcu = {
2262 .name = "pcu",
2263 .num_counters = 4,
2264 .num_boxes = 1,
2265 .perf_ctr_bits = 48,
2266 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2267 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2268 .event_mask = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2269 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2270 .ops = &snbep_uncore_msr_ops,
2271 .format_group = &knl_uncore_pcu_format_group,
2272 };
2273
2274 static struct intel_uncore_type *knl_msr_uncores[] = {
2275 &knl_uncore_ubox,
2276 &knl_uncore_cha,
2277 &knl_uncore_pcu,
2278 NULL,
2279 };
2280
knl_uncore_cpu_init(void)2281 void knl_uncore_cpu_init(void)
2282 {
2283 uncore_msr_uncores = knl_msr_uncores;
2284 }
2285
knl_uncore_imc_enable_box(struct intel_uncore_box * box)2286 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2287 {
2288 struct pci_dev *pdev = box->pci_dev;
2289 int box_ctl = uncore_pci_box_ctl(box);
2290
2291 pci_write_config_dword(pdev, box_ctl, 0);
2292 }
2293
knl_uncore_imc_enable_event(struct intel_uncore_box * box,struct perf_event * event)2294 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2295 struct perf_event *event)
2296 {
2297 struct pci_dev *pdev = box->pci_dev;
2298 struct hw_perf_event *hwc = &event->hw;
2299
2300 if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2301 == UNCORE_FIXED_EVENT)
2302 pci_write_config_dword(pdev, hwc->config_base,
2303 hwc->config | KNL_PMON_FIXED_CTL_EN);
2304 else
2305 pci_write_config_dword(pdev, hwc->config_base,
2306 hwc->config | SNBEP_PMON_CTL_EN);
2307 }
2308
2309 static struct intel_uncore_ops knl_uncore_imc_ops = {
2310 .init_box = snbep_uncore_pci_init_box,
2311 .disable_box = snbep_uncore_pci_disable_box,
2312 .enable_box = knl_uncore_imc_enable_box,
2313 .read_counter = snbep_uncore_pci_read_counter,
2314 .enable_event = knl_uncore_imc_enable_event,
2315 .disable_event = snbep_uncore_pci_disable_event,
2316 };
2317
2318 static struct intel_uncore_type knl_uncore_imc_uclk = {
2319 .name = "imc_uclk",
2320 .num_counters = 4,
2321 .num_boxes = 2,
2322 .perf_ctr_bits = 48,
2323 .fixed_ctr_bits = 48,
2324 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2325 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2326 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2327 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2328 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2329 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2330 .ops = &knl_uncore_imc_ops,
2331 .format_group = &snbep_uncore_format_group,
2332 };
2333
2334 static struct intel_uncore_type knl_uncore_imc_dclk = {
2335 .name = "imc",
2336 .num_counters = 4,
2337 .num_boxes = 6,
2338 .perf_ctr_bits = 48,
2339 .fixed_ctr_bits = 48,
2340 .perf_ctr = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2341 .event_ctl = KNL_MC0_CH0_MSR_PMON_CTL0,
2342 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2343 .fixed_ctr = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2344 .fixed_ctl = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2345 .box_ctl = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2346 .ops = &knl_uncore_imc_ops,
2347 .format_group = &snbep_uncore_format_group,
2348 };
2349
2350 static struct intel_uncore_type knl_uncore_edc_uclk = {
2351 .name = "edc_uclk",
2352 .num_counters = 4,
2353 .num_boxes = 8,
2354 .perf_ctr_bits = 48,
2355 .fixed_ctr_bits = 48,
2356 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2357 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2358 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2359 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2360 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2361 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2362 .ops = &knl_uncore_imc_ops,
2363 .format_group = &snbep_uncore_format_group,
2364 };
2365
2366 static struct intel_uncore_type knl_uncore_edc_eclk = {
2367 .name = "edc_eclk",
2368 .num_counters = 4,
2369 .num_boxes = 8,
2370 .perf_ctr_bits = 48,
2371 .fixed_ctr_bits = 48,
2372 .perf_ctr = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2373 .event_ctl = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2374 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2375 .fixed_ctr = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2376 .fixed_ctl = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2377 .box_ctl = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2378 .ops = &knl_uncore_imc_ops,
2379 .format_group = &snbep_uncore_format_group,
2380 };
2381
2382 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2383 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2384 EVENT_CONSTRAINT_END
2385 };
2386
2387 static struct intel_uncore_type knl_uncore_m2pcie = {
2388 .name = "m2pcie",
2389 .num_counters = 4,
2390 .num_boxes = 1,
2391 .perf_ctr_bits = 48,
2392 .constraints = knl_uncore_m2pcie_constraints,
2393 SNBEP_UNCORE_PCI_COMMON_INIT(),
2394 };
2395
2396 static struct attribute *knl_uncore_irp_formats_attr[] = {
2397 &format_attr_event.attr,
2398 &format_attr_umask.attr,
2399 &format_attr_qor.attr,
2400 &format_attr_edge.attr,
2401 &format_attr_inv.attr,
2402 &format_attr_thresh8.attr,
2403 NULL,
2404 };
2405
2406 static const struct attribute_group knl_uncore_irp_format_group = {
2407 .name = "format",
2408 .attrs = knl_uncore_irp_formats_attr,
2409 };
2410
2411 static struct intel_uncore_type knl_uncore_irp = {
2412 .name = "irp",
2413 .num_counters = 2,
2414 .num_boxes = 1,
2415 .perf_ctr_bits = 48,
2416 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2417 .event_ctl = SNBEP_PCI_PMON_CTL0,
2418 .event_mask = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2419 .box_ctl = KNL_IRP_PCI_PMON_BOX_CTL,
2420 .ops = &snbep_uncore_pci_ops,
2421 .format_group = &knl_uncore_irp_format_group,
2422 };
2423
2424 enum {
2425 KNL_PCI_UNCORE_MC_UCLK,
2426 KNL_PCI_UNCORE_MC_DCLK,
2427 KNL_PCI_UNCORE_EDC_UCLK,
2428 KNL_PCI_UNCORE_EDC_ECLK,
2429 KNL_PCI_UNCORE_M2PCIE,
2430 KNL_PCI_UNCORE_IRP,
2431 };
2432
2433 static struct intel_uncore_type *knl_pci_uncores[] = {
2434 [KNL_PCI_UNCORE_MC_UCLK] = &knl_uncore_imc_uclk,
2435 [KNL_PCI_UNCORE_MC_DCLK] = &knl_uncore_imc_dclk,
2436 [KNL_PCI_UNCORE_EDC_UCLK] = &knl_uncore_edc_uclk,
2437 [KNL_PCI_UNCORE_EDC_ECLK] = &knl_uncore_edc_eclk,
2438 [KNL_PCI_UNCORE_M2PCIE] = &knl_uncore_m2pcie,
2439 [KNL_PCI_UNCORE_IRP] = &knl_uncore_irp,
2440 NULL,
2441 };
2442
2443 /*
2444 * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2445 * device type. prior to KNL, each instance of a PMU device type had a unique
2446 * device ID.
2447 *
2448 * PCI Device ID Uncore PMU Devices
2449 * ----------------------------------
2450 * 0x7841 MC0 UClk, MC1 UClk
2451 * 0x7843 MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2452 * MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2453 * 0x7833 EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2454 * EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2455 * 0x7835 EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2456 * EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2457 * 0x7817 M2PCIe
2458 * 0x7814 IRP
2459 */
2460
2461 static const struct pci_device_id knl_uncore_pci_ids[] = {
2462 { /* MC0 UClk */
2463 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2464 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2465 },
2466 { /* MC1 UClk */
2467 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2468 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2469 },
2470 { /* MC0 DClk CH 0 */
2471 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2472 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2473 },
2474 { /* MC0 DClk CH 1 */
2475 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2476 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2477 },
2478 { /* MC0 DClk CH 2 */
2479 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2480 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2481 },
2482 { /* MC1 DClk CH 0 */
2483 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2484 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2485 },
2486 { /* MC1 DClk CH 1 */
2487 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2488 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2489 },
2490 { /* MC1 DClk CH 2 */
2491 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2492 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2493 },
2494 { /* EDC0 UClk */
2495 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2496 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2497 },
2498 { /* EDC1 UClk */
2499 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2500 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2501 },
2502 { /* EDC2 UClk */
2503 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2504 .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2505 },
2506 { /* EDC3 UClk */
2507 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2508 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2509 },
2510 { /* EDC4 UClk */
2511 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2512 .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2513 },
2514 { /* EDC5 UClk */
2515 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2516 .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2517 },
2518 { /* EDC6 UClk */
2519 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2520 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2521 },
2522 { /* EDC7 UClk */
2523 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2524 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2525 },
2526 { /* EDC0 EClk */
2527 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2528 .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2529 },
2530 { /* EDC1 EClk */
2531 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2532 .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2533 },
2534 { /* EDC2 EClk */
2535 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2536 .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2537 },
2538 { /* EDC3 EClk */
2539 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2540 .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2541 },
2542 { /* EDC4 EClk */
2543 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2544 .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2545 },
2546 { /* EDC5 EClk */
2547 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2548 .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2549 },
2550 { /* EDC6 EClk */
2551 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2552 .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2553 },
2554 { /* EDC7 EClk */
2555 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2556 .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2557 },
2558 { /* M2PCIe */
2559 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2560 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2561 },
2562 { /* IRP */
2563 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2564 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2565 },
2566 { /* end: all zeroes */ }
2567 };
2568
2569 static struct pci_driver knl_uncore_pci_driver = {
2570 .name = "knl_uncore",
2571 .id_table = knl_uncore_pci_ids,
2572 };
2573
knl_uncore_pci_init(void)2574 int knl_uncore_pci_init(void)
2575 {
2576 int ret;
2577
2578 /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2579 ret = snb_pci2phy_map_init(0x7814); /* IRP */
2580 if (ret)
2581 return ret;
2582 ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2583 if (ret)
2584 return ret;
2585 uncore_pci_uncores = knl_pci_uncores;
2586 uncore_pci_driver = &knl_uncore_pci_driver;
2587 return 0;
2588 }
2589
2590 /* end of KNL uncore support */
2591
2592 /* Haswell-EP uncore support */
2593 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2594 &format_attr_event.attr,
2595 &format_attr_umask.attr,
2596 &format_attr_edge.attr,
2597 &format_attr_inv.attr,
2598 &format_attr_thresh5.attr,
2599 &format_attr_filter_tid2.attr,
2600 &format_attr_filter_cid.attr,
2601 NULL,
2602 };
2603
2604 static const struct attribute_group hswep_uncore_ubox_format_group = {
2605 .name = "format",
2606 .attrs = hswep_uncore_ubox_formats_attr,
2607 };
2608
hswep_ubox_hw_config(struct intel_uncore_box * box,struct perf_event * event)2609 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2610 {
2611 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2612 reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2613 reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2614 reg1->idx = 0;
2615 return 0;
2616 }
2617
2618 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2619 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2620 .hw_config = hswep_ubox_hw_config,
2621 .get_constraint = uncore_get_constraint,
2622 .put_constraint = uncore_put_constraint,
2623 };
2624
2625 static struct intel_uncore_type hswep_uncore_ubox = {
2626 .name = "ubox",
2627 .num_counters = 2,
2628 .num_boxes = 1,
2629 .perf_ctr_bits = 44,
2630 .fixed_ctr_bits = 48,
2631 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
2632 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
2633 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2634 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2635 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2636 .num_shared_regs = 1,
2637 .ops = &hswep_uncore_ubox_ops,
2638 .format_group = &hswep_uncore_ubox_format_group,
2639 };
2640
2641 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2642 &format_attr_event.attr,
2643 &format_attr_umask.attr,
2644 &format_attr_edge.attr,
2645 &format_attr_tid_en.attr,
2646 &format_attr_thresh8.attr,
2647 &format_attr_filter_tid3.attr,
2648 &format_attr_filter_link2.attr,
2649 &format_attr_filter_state3.attr,
2650 &format_attr_filter_nid2.attr,
2651 &format_attr_filter_opc2.attr,
2652 &format_attr_filter_nc.attr,
2653 &format_attr_filter_c6.attr,
2654 &format_attr_filter_isoc.attr,
2655 NULL,
2656 };
2657
2658 static const struct attribute_group hswep_uncore_cbox_format_group = {
2659 .name = "format",
2660 .attrs = hswep_uncore_cbox_formats_attr,
2661 };
2662
2663 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2664 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2665 UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2666 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2667 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2668 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2669 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2670 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2671 EVENT_CONSTRAINT_END
2672 };
2673
2674 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2675 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2676 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2677 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2678 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2679 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2680 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2681 SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2682 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2683 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2684 SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2685 SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2686 SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2687 SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2688 SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2689 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2690 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2691 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2692 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2693 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2694 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2695 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2696 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2697 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2698 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2699 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2700 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2701 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2702 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2703 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2704 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2705 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2706 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2707 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2708 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2709 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2710 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2711 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2712 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2713 EVENT_EXTRA_END
2714 };
2715
hswep_cbox_filter_mask(int fields)2716 static u64 hswep_cbox_filter_mask(int fields)
2717 {
2718 u64 mask = 0;
2719 if (fields & 0x1)
2720 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2721 if (fields & 0x2)
2722 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2723 if (fields & 0x4)
2724 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2725 if (fields & 0x8)
2726 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2727 if (fields & 0x10) {
2728 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2729 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2730 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2731 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2732 }
2733 return mask;
2734 }
2735
2736 static struct event_constraint *
hswep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)2737 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2738 {
2739 return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2740 }
2741
hswep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)2742 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2743 {
2744 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2745 struct extra_reg *er;
2746 int idx = 0;
2747
2748 for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2749 if (er->event != (event->hw.config & er->config_mask))
2750 continue;
2751 idx |= er->idx;
2752 }
2753
2754 if (idx) {
2755 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2756 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2757 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2758 reg1->idx = idx;
2759 }
2760 return 0;
2761 }
2762
hswep_cbox_enable_event(struct intel_uncore_box * box,struct perf_event * event)2763 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2764 struct perf_event *event)
2765 {
2766 struct hw_perf_event *hwc = &event->hw;
2767 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2768
2769 if (reg1->idx != EXTRA_REG_NONE) {
2770 u64 filter = uncore_shared_reg_config(box, 0);
2771 wrmsrq(reg1->reg, filter & 0xffffffff);
2772 wrmsrq(reg1->reg + 1, filter >> 32);
2773 }
2774
2775 wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2776 }
2777
2778 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2779 .init_box = snbep_uncore_msr_init_box,
2780 .disable_box = snbep_uncore_msr_disable_box,
2781 .enable_box = snbep_uncore_msr_enable_box,
2782 .disable_event = snbep_uncore_msr_disable_event,
2783 .enable_event = hswep_cbox_enable_event,
2784 .read_counter = uncore_msr_read_counter,
2785 .hw_config = hswep_cbox_hw_config,
2786 .get_constraint = hswep_cbox_get_constraint,
2787 .put_constraint = snbep_cbox_put_constraint,
2788 };
2789
2790 static struct intel_uncore_type hswep_uncore_cbox = {
2791 .name = "cbox",
2792 .num_counters = 4,
2793 .num_boxes = 18,
2794 .perf_ctr_bits = 48,
2795 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2796 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2797 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2798 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2799 .msr_offset = HSWEP_CBO_MSR_OFFSET,
2800 .num_shared_regs = 1,
2801 .constraints = hswep_uncore_cbox_constraints,
2802 .ops = &hswep_uncore_cbox_ops,
2803 .format_group = &hswep_uncore_cbox_format_group,
2804 };
2805
2806 /*
2807 * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2808 */
hswep_uncore_sbox_msr_init_box(struct intel_uncore_box * box)2809 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2810 {
2811 unsigned msr = uncore_msr_box_ctl(box);
2812
2813 if (msr) {
2814 u64 init = SNBEP_PMON_BOX_CTL_INT;
2815 u64 flags = 0;
2816 int i;
2817
2818 for_each_set_bit(i, (unsigned long *)&init, 64) {
2819 flags |= (1ULL << i);
2820 wrmsrq(msr, flags);
2821 }
2822 }
2823 }
2824
2825 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2826 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2827 .init_box = hswep_uncore_sbox_msr_init_box
2828 };
2829
2830 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2831 &format_attr_event.attr,
2832 &format_attr_umask.attr,
2833 &format_attr_edge.attr,
2834 &format_attr_tid_en.attr,
2835 &format_attr_inv.attr,
2836 &format_attr_thresh8.attr,
2837 NULL,
2838 };
2839
2840 static const struct attribute_group hswep_uncore_sbox_format_group = {
2841 .name = "format",
2842 .attrs = hswep_uncore_sbox_formats_attr,
2843 };
2844
2845 static struct intel_uncore_type hswep_uncore_sbox = {
2846 .name = "sbox",
2847 .num_counters = 4,
2848 .num_boxes = 4,
2849 .perf_ctr_bits = 44,
2850 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
2851 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
2852 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2853 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
2854 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
2855 .ops = &hswep_uncore_sbox_msr_ops,
2856 .format_group = &hswep_uncore_sbox_format_group,
2857 };
2858
hswep_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)2859 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2860 {
2861 struct hw_perf_event *hwc = &event->hw;
2862 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2863 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2864
2865 if (ev_sel >= 0xb && ev_sel <= 0xe) {
2866 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2867 reg1->idx = ev_sel - 0xb;
2868 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2869 }
2870 return 0;
2871 }
2872
2873 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2874 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2875 .hw_config = hswep_pcu_hw_config,
2876 .get_constraint = snbep_pcu_get_constraint,
2877 .put_constraint = snbep_pcu_put_constraint,
2878 };
2879
2880 static struct intel_uncore_type hswep_uncore_pcu = {
2881 .name = "pcu",
2882 .num_counters = 4,
2883 .num_boxes = 1,
2884 .perf_ctr_bits = 48,
2885 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2886 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2887 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2888 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2889 .num_shared_regs = 1,
2890 .ops = &hswep_uncore_pcu_ops,
2891 .format_group = &snbep_uncore_pcu_format_group,
2892 };
2893
2894 static struct intel_uncore_type *hswep_msr_uncores[] = {
2895 &hswep_uncore_ubox,
2896 &hswep_uncore_cbox,
2897 &hswep_uncore_sbox,
2898 &hswep_uncore_pcu,
2899 NULL,
2900 };
2901
2902 #define HSWEP_PCU_DID 0x2fc0
2903 #define HSWEP_PCU_CAPID4_OFFET 0x94
2904 #define hswep_get_chop(_cap) (((_cap) >> 6) & 0x3)
2905
hswep_has_limit_sbox(unsigned int device)2906 static bool hswep_has_limit_sbox(unsigned int device)
2907 {
2908 struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
2909 u32 capid4;
2910
2911 if (!dev)
2912 return false;
2913
2914 pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
2915 pci_dev_put(dev);
2916 if (!hswep_get_chop(capid4))
2917 return true;
2918
2919 return false;
2920 }
2921
hswep_uncore_cpu_init(void)2922 void hswep_uncore_cpu_init(void)
2923 {
2924 if (hswep_uncore_cbox.num_boxes > topology_num_cores_per_package())
2925 hswep_uncore_cbox.num_boxes = topology_num_cores_per_package();
2926
2927 /* Detect 6-8 core systems with only two SBOXes */
2928 if (hswep_has_limit_sbox(HSWEP_PCU_DID))
2929 hswep_uncore_sbox.num_boxes = 2;
2930
2931 uncore_msr_uncores = hswep_msr_uncores;
2932 }
2933
2934 static struct intel_uncore_type hswep_uncore_ha = {
2935 .name = "ha",
2936 .num_counters = 4,
2937 .num_boxes = 2,
2938 .perf_ctr_bits = 48,
2939 SNBEP_UNCORE_PCI_COMMON_INIT(),
2940 };
2941
2942 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2943 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
2944 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
2945 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2946 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2947 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2948 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2949 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2950 { /* end: all zeroes */ },
2951 };
2952
2953 static struct intel_uncore_type hswep_uncore_imc = {
2954 .name = "imc",
2955 .num_counters = 4,
2956 .num_boxes = 8,
2957 .perf_ctr_bits = 48,
2958 .fixed_ctr_bits = 48,
2959 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2960 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2961 .event_descs = hswep_uncore_imc_events,
2962 SNBEP_UNCORE_PCI_COMMON_INIT(),
2963 };
2964
2965 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2966
hswep_uncore_irp_read_counter(struct intel_uncore_box * box,struct perf_event * event)2967 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2968 {
2969 struct pci_dev *pdev = box->pci_dev;
2970 struct hw_perf_event *hwc = &event->hw;
2971 u64 count = 0;
2972
2973 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2974 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2975
2976 return count;
2977 }
2978
2979 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2980 .init_box = snbep_uncore_pci_init_box,
2981 .disable_box = snbep_uncore_pci_disable_box,
2982 .enable_box = snbep_uncore_pci_enable_box,
2983 .disable_event = ivbep_uncore_irp_disable_event,
2984 .enable_event = ivbep_uncore_irp_enable_event,
2985 .read_counter = hswep_uncore_irp_read_counter,
2986 };
2987
2988 static struct intel_uncore_type hswep_uncore_irp = {
2989 .name = "irp",
2990 .num_counters = 4,
2991 .num_boxes = 1,
2992 .perf_ctr_bits = 48,
2993 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2994 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2995 .ops = &hswep_uncore_irp_ops,
2996 .format_group = &snbep_uncore_format_group,
2997 };
2998
2999 static struct intel_uncore_type hswep_uncore_qpi = {
3000 .name = "qpi",
3001 .num_counters = 4,
3002 .num_boxes = 3,
3003 .perf_ctr_bits = 48,
3004 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3005 .event_ctl = SNBEP_PCI_PMON_CTL0,
3006 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3007 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3008 .num_shared_regs = 1,
3009 .ops = &snbep_uncore_qpi_ops,
3010 .format_group = &snbep_uncore_qpi_format_group,
3011 };
3012
3013 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
3014 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3015 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3016 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3017 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3018 UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
3019 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3020 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3021 UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
3022 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3023 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3024 UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
3025 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
3026 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3027 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3028 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3029 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3030 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3031 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
3032 EVENT_CONSTRAINT_END
3033 };
3034
3035 static struct intel_uncore_type hswep_uncore_r2pcie = {
3036 .name = "r2pcie",
3037 .num_counters = 4,
3038 .num_boxes = 1,
3039 .perf_ctr_bits = 48,
3040 .constraints = hswep_uncore_r2pcie_constraints,
3041 SNBEP_UNCORE_PCI_COMMON_INIT(),
3042 };
3043
3044 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
3045 UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
3046 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3047 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3048 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3049 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3050 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3051 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3052 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3053 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
3054 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3055 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3056 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3057 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3058 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3059 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3060 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3061 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3062 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3063 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3064 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3065 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3066 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3067 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3068 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3069 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3070 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
3071 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3072 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3073 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3074 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3075 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3076 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3077 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3078 EVENT_CONSTRAINT_END
3079 };
3080
3081 static struct intel_uncore_type hswep_uncore_r3qpi = {
3082 .name = "r3qpi",
3083 .num_counters = 3,
3084 .num_boxes = 3,
3085 .perf_ctr_bits = 44,
3086 .constraints = hswep_uncore_r3qpi_constraints,
3087 SNBEP_UNCORE_PCI_COMMON_INIT(),
3088 };
3089
3090 enum {
3091 HSWEP_PCI_UNCORE_HA,
3092 HSWEP_PCI_UNCORE_IMC,
3093 HSWEP_PCI_UNCORE_IRP,
3094 HSWEP_PCI_UNCORE_QPI,
3095 HSWEP_PCI_UNCORE_R2PCIE,
3096 HSWEP_PCI_UNCORE_R3QPI,
3097 };
3098
3099 static struct intel_uncore_type *hswep_pci_uncores[] = {
3100 [HSWEP_PCI_UNCORE_HA] = &hswep_uncore_ha,
3101 [HSWEP_PCI_UNCORE_IMC] = &hswep_uncore_imc,
3102 [HSWEP_PCI_UNCORE_IRP] = &hswep_uncore_irp,
3103 [HSWEP_PCI_UNCORE_QPI] = &hswep_uncore_qpi,
3104 [HSWEP_PCI_UNCORE_R2PCIE] = &hswep_uncore_r2pcie,
3105 [HSWEP_PCI_UNCORE_R3QPI] = &hswep_uncore_r3qpi,
3106 NULL,
3107 };
3108
3109 static const struct pci_device_id hswep_uncore_pci_ids[] = {
3110 { /* Home Agent 0 */
3111 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
3112 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
3113 },
3114 { /* Home Agent 1 */
3115 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
3116 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
3117 },
3118 { /* MC0 Channel 0 */
3119 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
3120 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
3121 },
3122 { /* MC0 Channel 1 */
3123 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
3124 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
3125 },
3126 { /* MC0 Channel 2 */
3127 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
3128 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
3129 },
3130 { /* MC0 Channel 3 */
3131 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
3132 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
3133 },
3134 { /* MC1 Channel 0 */
3135 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
3136 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
3137 },
3138 { /* MC1 Channel 1 */
3139 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
3140 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
3141 },
3142 { /* MC1 Channel 2 */
3143 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
3144 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
3145 },
3146 { /* MC1 Channel 3 */
3147 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
3148 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
3149 },
3150 { /* IRP */
3151 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
3152 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
3153 },
3154 { /* QPI0 Port 0 */
3155 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3156 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3157 },
3158 { /* QPI0 Port 1 */
3159 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3160 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3161 },
3162 { /* QPI1 Port 2 */
3163 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3164 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3165 },
3166 { /* R2PCIe */
3167 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3168 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3169 },
3170 { /* R3QPI0 Link 0 */
3171 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3172 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3173 },
3174 { /* R3QPI0 Link 1 */
3175 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3176 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3177 },
3178 { /* R3QPI1 Link 2 */
3179 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3180 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3181 },
3182 { /* QPI Port 0 filter */
3183 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3184 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3185 SNBEP_PCI_QPI_PORT0_FILTER),
3186 },
3187 { /* QPI Port 1 filter */
3188 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3189 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3190 SNBEP_PCI_QPI_PORT1_FILTER),
3191 },
3192 { /* end: all zeroes */ }
3193 };
3194
3195 static struct pci_driver hswep_uncore_pci_driver = {
3196 .name = "hswep_uncore",
3197 .id_table = hswep_uncore_pci_ids,
3198 };
3199
hswep_uncore_pci_init(void)3200 int hswep_uncore_pci_init(void)
3201 {
3202 int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3203 if (ret)
3204 return ret;
3205 uncore_pci_uncores = hswep_pci_uncores;
3206 uncore_pci_driver = &hswep_uncore_pci_driver;
3207 return 0;
3208 }
3209 /* end of Haswell-EP uncore support */
3210
3211 /* BDX uncore support */
3212
3213 static struct intel_uncore_type bdx_uncore_ubox = {
3214 .name = "ubox",
3215 .num_counters = 2,
3216 .num_boxes = 1,
3217 .perf_ctr_bits = 48,
3218 .fixed_ctr_bits = 48,
3219 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3220 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3221 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3222 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3223 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3224 .num_shared_regs = 1,
3225 .ops = &ivbep_uncore_msr_ops,
3226 .format_group = &ivbep_uncore_ubox_format_group,
3227 };
3228
3229 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3230 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3231 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3232 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3233 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3234 EVENT_CONSTRAINT_END
3235 };
3236
3237 static struct intel_uncore_type bdx_uncore_cbox = {
3238 .name = "cbox",
3239 .num_counters = 4,
3240 .num_boxes = 24,
3241 .perf_ctr_bits = 48,
3242 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3243 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3244 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3245 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3246 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3247 .num_shared_regs = 1,
3248 .constraints = bdx_uncore_cbox_constraints,
3249 .ops = &hswep_uncore_cbox_ops,
3250 .format_group = &hswep_uncore_cbox_format_group,
3251 };
3252
3253 static struct intel_uncore_type bdx_uncore_sbox = {
3254 .name = "sbox",
3255 .num_counters = 4,
3256 .num_boxes = 4,
3257 .perf_ctr_bits = 48,
3258 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
3259 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
3260 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3261 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
3262 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
3263 .ops = &hswep_uncore_sbox_msr_ops,
3264 .format_group = &hswep_uncore_sbox_format_group,
3265 };
3266
3267 #define BDX_MSR_UNCORE_SBOX 3
3268
3269 static struct intel_uncore_type *bdx_msr_uncores[] = {
3270 &bdx_uncore_ubox,
3271 &bdx_uncore_cbox,
3272 &hswep_uncore_pcu,
3273 &bdx_uncore_sbox,
3274 NULL,
3275 };
3276
3277 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3278 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3279 EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3280 EVENT_CONSTRAINT_END
3281 };
3282
3283 #define BDX_PCU_DID 0x6fc0
3284
bdx_uncore_cpu_init(void)3285 void bdx_uncore_cpu_init(void)
3286 {
3287 if (bdx_uncore_cbox.num_boxes > topology_num_cores_per_package())
3288 bdx_uncore_cbox.num_boxes = topology_num_cores_per_package();
3289 uncore_msr_uncores = bdx_msr_uncores;
3290
3291 /* Detect systems with no SBOXes */
3292 if (boot_cpu_data.x86_vfm == INTEL_BROADWELL_D || hswep_has_limit_sbox(BDX_PCU_DID))
3293 uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3294
3295 hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3296 }
3297
3298 static struct intel_uncore_type bdx_uncore_ha = {
3299 .name = "ha",
3300 .num_counters = 4,
3301 .num_boxes = 2,
3302 .perf_ctr_bits = 48,
3303 SNBEP_UNCORE_PCI_COMMON_INIT(),
3304 };
3305
3306 static struct intel_uncore_type bdx_uncore_imc = {
3307 .name = "imc",
3308 .num_counters = 4,
3309 .num_boxes = 8,
3310 .perf_ctr_bits = 48,
3311 .fixed_ctr_bits = 48,
3312 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3313 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3314 .event_descs = hswep_uncore_imc_events,
3315 SNBEP_UNCORE_PCI_COMMON_INIT(),
3316 };
3317
3318 static struct intel_uncore_type bdx_uncore_irp = {
3319 .name = "irp",
3320 .num_counters = 4,
3321 .num_boxes = 1,
3322 .perf_ctr_bits = 48,
3323 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3324 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3325 .ops = &hswep_uncore_irp_ops,
3326 .format_group = &snbep_uncore_format_group,
3327 };
3328
3329 static struct intel_uncore_type bdx_uncore_qpi = {
3330 .name = "qpi",
3331 .num_counters = 4,
3332 .num_boxes = 3,
3333 .perf_ctr_bits = 48,
3334 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3335 .event_ctl = SNBEP_PCI_PMON_CTL0,
3336 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3337 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3338 .num_shared_regs = 1,
3339 .ops = &snbep_uncore_qpi_ops,
3340 .format_group = &snbep_uncore_qpi_format_group,
3341 };
3342
3343 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3344 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3345 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3346 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3347 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3348 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3349 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3350 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3351 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3352 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3353 EVENT_CONSTRAINT_END
3354 };
3355
3356 static struct intel_uncore_type bdx_uncore_r2pcie = {
3357 .name = "r2pcie",
3358 .num_counters = 4,
3359 .num_boxes = 1,
3360 .perf_ctr_bits = 48,
3361 .constraints = bdx_uncore_r2pcie_constraints,
3362 SNBEP_UNCORE_PCI_COMMON_INIT(),
3363 };
3364
3365 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3366 UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3367 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3368 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3369 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3370 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3371 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3372 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3373 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3374 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3375 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3376 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3377 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3378 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3379 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3380 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3381 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3382 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3383 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3384 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3385 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3386 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3387 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3388 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3389 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3390 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3391 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3392 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3393 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3394 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3395 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3396 EVENT_CONSTRAINT_END
3397 };
3398
3399 static struct intel_uncore_type bdx_uncore_r3qpi = {
3400 .name = "r3qpi",
3401 .num_counters = 3,
3402 .num_boxes = 3,
3403 .perf_ctr_bits = 48,
3404 .constraints = bdx_uncore_r3qpi_constraints,
3405 SNBEP_UNCORE_PCI_COMMON_INIT(),
3406 };
3407
3408 enum {
3409 BDX_PCI_UNCORE_HA,
3410 BDX_PCI_UNCORE_IMC,
3411 BDX_PCI_UNCORE_IRP,
3412 BDX_PCI_UNCORE_QPI,
3413 BDX_PCI_UNCORE_R2PCIE,
3414 BDX_PCI_UNCORE_R3QPI,
3415 };
3416
3417 static struct intel_uncore_type *bdx_pci_uncores[] = {
3418 [BDX_PCI_UNCORE_HA] = &bdx_uncore_ha,
3419 [BDX_PCI_UNCORE_IMC] = &bdx_uncore_imc,
3420 [BDX_PCI_UNCORE_IRP] = &bdx_uncore_irp,
3421 [BDX_PCI_UNCORE_QPI] = &bdx_uncore_qpi,
3422 [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
3423 [BDX_PCI_UNCORE_R3QPI] = &bdx_uncore_r3qpi,
3424 NULL,
3425 };
3426
3427 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3428 { /* Home Agent 0 */
3429 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3430 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3431 },
3432 { /* Home Agent 1 */
3433 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3434 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3435 },
3436 { /* MC0 Channel 0 */
3437 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3438 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3439 },
3440 { /* MC0 Channel 1 */
3441 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3442 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3443 },
3444 { /* MC0 Channel 2 */
3445 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3446 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3447 },
3448 { /* MC0 Channel 3 */
3449 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3450 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3451 },
3452 { /* MC1 Channel 0 */
3453 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3454 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3455 },
3456 { /* MC1 Channel 1 */
3457 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3458 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3459 },
3460 { /* MC1 Channel 2 */
3461 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3462 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3463 },
3464 { /* MC1 Channel 3 */
3465 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3466 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3467 },
3468 { /* IRP */
3469 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3470 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3471 },
3472 { /* QPI0 Port 0 */
3473 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3474 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3475 },
3476 { /* QPI0 Port 1 */
3477 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3478 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3479 },
3480 { /* QPI1 Port 2 */
3481 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3482 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3483 },
3484 { /* R2PCIe */
3485 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3486 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3487 },
3488 { /* R3QPI0 Link 0 */
3489 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3490 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3491 },
3492 { /* R3QPI0 Link 1 */
3493 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3494 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3495 },
3496 { /* R3QPI1 Link 2 */
3497 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3498 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3499 },
3500 { /* QPI Port 0 filter */
3501 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3502 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3503 SNBEP_PCI_QPI_PORT0_FILTER),
3504 },
3505 { /* QPI Port 1 filter */
3506 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3507 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3508 SNBEP_PCI_QPI_PORT1_FILTER),
3509 },
3510 { /* QPI Port 2 filter */
3511 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3512 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3513 BDX_PCI_QPI_PORT2_FILTER),
3514 },
3515 { /* end: all zeroes */ }
3516 };
3517
3518 static struct pci_driver bdx_uncore_pci_driver = {
3519 .name = "bdx_uncore",
3520 .id_table = bdx_uncore_pci_ids,
3521 };
3522
bdx_uncore_pci_init(void)3523 int bdx_uncore_pci_init(void)
3524 {
3525 int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3526
3527 if (ret)
3528 return ret;
3529 uncore_pci_uncores = bdx_pci_uncores;
3530 uncore_pci_driver = &bdx_uncore_pci_driver;
3531 return 0;
3532 }
3533
3534 /* end of BDX uncore support */
3535
3536 /* SKX uncore support */
3537
3538 static struct intel_uncore_type skx_uncore_ubox = {
3539 .name = "ubox",
3540 .num_counters = 2,
3541 .num_boxes = 1,
3542 .perf_ctr_bits = 48,
3543 .fixed_ctr_bits = 48,
3544 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3545 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3546 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3547 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3548 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3549 .ops = &ivbep_uncore_msr_ops,
3550 .format_group = &ivbep_uncore_ubox_format_group,
3551 };
3552
3553 static struct attribute *skx_uncore_cha_formats_attr[] = {
3554 &format_attr_event.attr,
3555 &format_attr_umask.attr,
3556 &format_attr_edge.attr,
3557 &format_attr_tid_en.attr,
3558 &format_attr_inv.attr,
3559 &format_attr_thresh8.attr,
3560 &format_attr_filter_tid4.attr,
3561 &format_attr_filter_state5.attr,
3562 &format_attr_filter_rem.attr,
3563 &format_attr_filter_loc.attr,
3564 &format_attr_filter_nm.attr,
3565 &format_attr_filter_all_op.attr,
3566 &format_attr_filter_not_nm.attr,
3567 &format_attr_filter_opc_0.attr,
3568 &format_attr_filter_opc_1.attr,
3569 &format_attr_filter_nc.attr,
3570 &format_attr_filter_isoc.attr,
3571 NULL,
3572 };
3573
3574 static const struct attribute_group skx_uncore_chabox_format_group = {
3575 .name = "format",
3576 .attrs = skx_uncore_cha_formats_attr,
3577 };
3578
3579 static struct event_constraint skx_uncore_chabox_constraints[] = {
3580 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3581 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3582 EVENT_CONSTRAINT_END
3583 };
3584
3585 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3586 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3587 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3588 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3589 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3590 SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3591 SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3592 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3593 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3594 SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3595 EVENT_EXTRA_END
3596 };
3597
skx_cha_filter_mask(int fields)3598 static u64 skx_cha_filter_mask(int fields)
3599 {
3600 u64 mask = 0;
3601
3602 if (fields & 0x1)
3603 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3604 if (fields & 0x2)
3605 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3606 if (fields & 0x4)
3607 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3608 if (fields & 0x8) {
3609 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3610 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3611 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3612 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3613 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3614 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3615 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3616 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3617 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3618 }
3619 return mask;
3620 }
3621
3622 static struct event_constraint *
skx_cha_get_constraint(struct intel_uncore_box * box,struct perf_event * event)3623 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3624 {
3625 return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3626 }
3627
skx_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)3628 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3629 {
3630 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3631 struct extra_reg *er;
3632 int idx = 0;
3633 /* Any of the CHA events may be filtered by Thread/Core-ID.*/
3634 if (event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN)
3635 idx = SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3636
3637 for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3638 if (er->event != (event->hw.config & er->config_mask))
3639 continue;
3640 idx |= er->idx;
3641 }
3642
3643 if (idx) {
3644 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3645 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3646 reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3647 reg1->idx = idx;
3648 }
3649 return 0;
3650 }
3651
3652 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3653 /* There is no frz_en for chabox ctl */
3654 .init_box = ivbep_uncore_msr_init_box,
3655 .disable_box = snbep_uncore_msr_disable_box,
3656 .enable_box = snbep_uncore_msr_enable_box,
3657 .disable_event = snbep_uncore_msr_disable_event,
3658 .enable_event = hswep_cbox_enable_event,
3659 .read_counter = uncore_msr_read_counter,
3660 .hw_config = skx_cha_hw_config,
3661 .get_constraint = skx_cha_get_constraint,
3662 .put_constraint = snbep_cbox_put_constraint,
3663 };
3664
3665 static struct intel_uncore_type skx_uncore_chabox = {
3666 .name = "cha",
3667 .num_counters = 4,
3668 .perf_ctr_bits = 48,
3669 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3670 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3671 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3672 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3673 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3674 .num_shared_regs = 1,
3675 .constraints = skx_uncore_chabox_constraints,
3676 .ops = &skx_uncore_chabox_ops,
3677 .format_group = &skx_uncore_chabox_format_group,
3678 };
3679
3680 static struct attribute *skx_uncore_iio_formats_attr[] = {
3681 &format_attr_event.attr,
3682 &format_attr_umask.attr,
3683 &format_attr_edge.attr,
3684 &format_attr_inv.attr,
3685 &format_attr_thresh9.attr,
3686 &format_attr_ch_mask.attr,
3687 &format_attr_fc_mask.attr,
3688 NULL,
3689 };
3690
3691 static const struct attribute_group skx_uncore_iio_format_group = {
3692 .name = "format",
3693 .attrs = skx_uncore_iio_formats_attr,
3694 };
3695
3696 static struct event_constraint skx_uncore_iio_constraints[] = {
3697 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3698 UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3699 UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3700 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3701 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3702 UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3703 UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
3704 EVENT_CONSTRAINT_END
3705 };
3706
skx_iio_enable_event(struct intel_uncore_box * box,struct perf_event * event)3707 static void skx_iio_enable_event(struct intel_uncore_box *box,
3708 struct perf_event *event)
3709 {
3710 struct hw_perf_event *hwc = &event->hw;
3711
3712 wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3713 }
3714
3715 static struct intel_uncore_ops skx_uncore_iio_ops = {
3716 .init_box = ivbep_uncore_msr_init_box,
3717 .disable_box = snbep_uncore_msr_disable_box,
3718 .enable_box = snbep_uncore_msr_enable_box,
3719 .disable_event = snbep_uncore_msr_disable_event,
3720 .enable_event = skx_iio_enable_event,
3721 .read_counter = uncore_msr_read_counter,
3722 };
3723
pmu_topology(struct intel_uncore_pmu * pmu,int die)3724 static struct intel_uncore_topology *pmu_topology(struct intel_uncore_pmu *pmu, int die)
3725 {
3726 int idx;
3727
3728 for (idx = 0; idx < pmu->type->num_boxes; idx++) {
3729 if (pmu->type->topology[die][idx].pmu_idx == pmu->pmu_idx)
3730 return &pmu->type->topology[die][idx];
3731 }
3732
3733 return NULL;
3734 }
3735
3736 static umode_t
pmu_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die,int zero_bus_pmu)3737 pmu_iio_mapping_visible(struct kobject *kobj, struct attribute *attr,
3738 int die, int zero_bus_pmu)
3739 {
3740 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
3741 struct intel_uncore_topology *pmut = pmu_topology(pmu, die);
3742
3743 return (pmut && !pmut->iio->pci_bus_no && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode;
3744 }
3745
3746 static umode_t
skx_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)3747 skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
3748 {
3749 /* Root bus 0x00 is valid only for pmu_idx = 0. */
3750 return pmu_iio_mapping_visible(kobj, attr, die, 0);
3751 }
3752
skx_iio_mapping_show(struct device * dev,struct device_attribute * attr,char * buf)3753 static ssize_t skx_iio_mapping_show(struct device *dev,
3754 struct device_attribute *attr, char *buf)
3755 {
3756 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
3757 struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
3758 long die = (long)ea->var;
3759 struct intel_uncore_topology *pmut = pmu_topology(pmu, die);
3760
3761 return sprintf(buf, "%04x:%02x\n", pmut ? pmut->iio->segment : 0,
3762 pmut ? pmut->iio->pci_bus_no : 0);
3763 }
3764
skx_msr_cpu_bus_read(int cpu,u64 * topology)3765 static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
3766 {
3767 u64 msr_value;
3768
3769 if (rdmsrq_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
3770 !(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
3771 return -ENXIO;
3772
3773 *topology = msr_value;
3774
3775 return 0;
3776 }
3777
die_to_cpu(int die)3778 static int die_to_cpu(int die)
3779 {
3780 int res = 0, cpu, current_die;
3781 /*
3782 * Using cpus_read_lock() to ensure cpu is not going down between
3783 * looking at cpu_online_mask.
3784 */
3785 cpus_read_lock();
3786 for_each_online_cpu(cpu) {
3787 current_die = topology_logical_die_id(cpu);
3788 if (current_die == die) {
3789 res = cpu;
3790 break;
3791 }
3792 }
3793 cpus_read_unlock();
3794 return res;
3795 }
3796
3797 enum {
3798 IIO_TOPOLOGY_TYPE,
3799 UPI_TOPOLOGY_TYPE,
3800 TOPOLOGY_MAX
3801 };
3802
3803 static const size_t topology_size[TOPOLOGY_MAX] = {
3804 sizeof(*((struct intel_uncore_topology *)NULL)->iio),
3805 sizeof(*((struct intel_uncore_topology *)NULL)->upi)
3806 };
3807
pmu_alloc_topology(struct intel_uncore_type * type,int topology_type)3808 static int pmu_alloc_topology(struct intel_uncore_type *type, int topology_type)
3809 {
3810 int die, idx;
3811 struct intel_uncore_topology **topology;
3812
3813 if (!type->num_boxes)
3814 return -EPERM;
3815
3816 topology = kcalloc(uncore_max_dies(), sizeof(*topology), GFP_KERNEL);
3817 if (!topology)
3818 goto err;
3819
3820 for (die = 0; die < uncore_max_dies(); die++) {
3821 topology[die] = kcalloc(type->num_boxes, sizeof(**topology), GFP_KERNEL);
3822 if (!topology[die])
3823 goto clear;
3824 for (idx = 0; idx < type->num_boxes; idx++) {
3825 topology[die][idx].untyped = kcalloc(type->num_boxes,
3826 topology_size[topology_type],
3827 GFP_KERNEL);
3828 if (!topology[die][idx].untyped)
3829 goto clear;
3830 }
3831 }
3832
3833 type->topology = topology;
3834
3835 return 0;
3836 clear:
3837 for (; die >= 0; die--) {
3838 for (idx = 0; idx < type->num_boxes; idx++)
3839 kfree(topology[die][idx].untyped);
3840 kfree(topology[die]);
3841 }
3842 kfree(topology);
3843 err:
3844 return -ENOMEM;
3845 }
3846
pmu_free_topology(struct intel_uncore_type * type)3847 static void pmu_free_topology(struct intel_uncore_type *type)
3848 {
3849 int die, idx;
3850
3851 if (type->topology) {
3852 for (die = 0; die < uncore_max_dies(); die++) {
3853 for (idx = 0; idx < type->num_boxes; idx++)
3854 kfree(type->topology[die][idx].untyped);
3855 kfree(type->topology[die]);
3856 }
3857 kfree(type->topology);
3858 type->topology = NULL;
3859 }
3860 }
3861
skx_pmu_get_topology(struct intel_uncore_type * type,int (* topology_cb)(struct intel_uncore_type *,int,int,u64))3862 static int skx_pmu_get_topology(struct intel_uncore_type *type,
3863 int (*topology_cb)(struct intel_uncore_type*, int, int, u64))
3864 {
3865 int die, ret = -EPERM;
3866 u64 cpu_bus_msr;
3867
3868 for (die = 0; die < uncore_max_dies(); die++) {
3869 ret = skx_msr_cpu_bus_read(die_to_cpu(die), &cpu_bus_msr);
3870 if (ret)
3871 break;
3872
3873 ret = uncore_die_to_segment(die);
3874 if (ret < 0)
3875 break;
3876
3877 ret = topology_cb(type, ret, die, cpu_bus_msr);
3878 if (ret)
3879 break;
3880 }
3881
3882 return ret;
3883 }
3884
skx_iio_topology_cb(struct intel_uncore_type * type,int segment,int die,u64 cpu_bus_msr)3885 static int skx_iio_topology_cb(struct intel_uncore_type *type, int segment,
3886 int die, u64 cpu_bus_msr)
3887 {
3888 int idx;
3889 struct intel_uncore_topology *t;
3890
3891 for (idx = 0; idx < type->num_boxes; idx++) {
3892 t = &type->topology[die][idx];
3893 t->pmu_idx = idx;
3894 t->iio->segment = segment;
3895 t->iio->pci_bus_no = (cpu_bus_msr >> (idx * BUS_NUM_STRIDE)) & 0xff;
3896 }
3897
3898 return 0;
3899 }
3900
skx_iio_get_topology(struct intel_uncore_type * type)3901 static int skx_iio_get_topology(struct intel_uncore_type *type)
3902 {
3903 return skx_pmu_get_topology(type, skx_iio_topology_cb);
3904 }
3905
3906 static struct attribute_group skx_iio_mapping_group = {
3907 .is_visible = skx_iio_mapping_visible,
3908 };
3909
3910 static const struct attribute_group *skx_iio_attr_update[] = {
3911 &skx_iio_mapping_group,
3912 NULL,
3913 };
3914
pmu_clear_mapping_attr(const struct attribute_group ** groups,struct attribute_group * ag)3915 static void pmu_clear_mapping_attr(const struct attribute_group **groups,
3916 struct attribute_group *ag)
3917 {
3918 int i;
3919
3920 for (i = 0; groups[i]; i++) {
3921 if (groups[i] == ag) {
3922 for (i++; groups[i]; i++)
3923 groups[i - 1] = groups[i];
3924 groups[i - 1] = NULL;
3925 break;
3926 }
3927 }
3928 }
3929
3930 static void
pmu_set_mapping(struct intel_uncore_type * type,struct attribute_group * ag,ssize_t (* show)(struct device *,struct device_attribute *,char *),int topology_type)3931 pmu_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag,
3932 ssize_t (*show)(struct device*, struct device_attribute*, char*),
3933 int topology_type)
3934 {
3935 char buf[64];
3936 int ret;
3937 long die = -1;
3938 struct attribute **attrs = NULL;
3939 struct dev_ext_attribute *eas = NULL;
3940
3941 ret = pmu_alloc_topology(type, topology_type);
3942 if (ret < 0)
3943 goto clear_attr_update;
3944
3945 ret = type->get_topology(type);
3946 if (ret < 0)
3947 goto clear_topology;
3948
3949 /* One more for NULL. */
3950 attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
3951 if (!attrs)
3952 goto clear_topology;
3953
3954 eas = kcalloc(uncore_max_dies(), sizeof(*eas), GFP_KERNEL);
3955 if (!eas)
3956 goto clear_attrs;
3957
3958 for (die = 0; die < uncore_max_dies(); die++) {
3959 snprintf(buf, sizeof(buf), "die%ld", die);
3960 sysfs_attr_init(&eas[die].attr.attr);
3961 eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
3962 if (!eas[die].attr.attr.name)
3963 goto err;
3964 eas[die].attr.attr.mode = 0444;
3965 eas[die].attr.show = show;
3966 eas[die].attr.store = NULL;
3967 eas[die].var = (void *)die;
3968 attrs[die] = &eas[die].attr.attr;
3969 }
3970 ag->attrs = attrs;
3971
3972 return;
3973 err:
3974 for (; die >= 0; die--)
3975 kfree(eas[die].attr.attr.name);
3976 kfree(eas);
3977 clear_attrs:
3978 kfree(attrs);
3979 clear_topology:
3980 pmu_free_topology(type);
3981 clear_attr_update:
3982 pmu_clear_mapping_attr(type->attr_update, ag);
3983 }
3984
3985 static void
pmu_cleanup_mapping(struct intel_uncore_type * type,struct attribute_group * ag)3986 pmu_cleanup_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
3987 {
3988 struct attribute **attr = ag->attrs;
3989
3990 if (!attr)
3991 return;
3992
3993 for (; *attr; attr++)
3994 kfree((*attr)->name);
3995 kfree(attr_to_ext_attr(*ag->attrs));
3996 kfree(ag->attrs);
3997 ag->attrs = NULL;
3998 pmu_free_topology(type);
3999 }
4000
4001 static void
pmu_iio_set_mapping(struct intel_uncore_type * type,struct attribute_group * ag)4002 pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
4003 {
4004 pmu_set_mapping(type, ag, skx_iio_mapping_show, IIO_TOPOLOGY_TYPE);
4005 }
4006
skx_iio_set_mapping(struct intel_uncore_type * type)4007 static void skx_iio_set_mapping(struct intel_uncore_type *type)
4008 {
4009 pmu_iio_set_mapping(type, &skx_iio_mapping_group);
4010 }
4011
skx_iio_cleanup_mapping(struct intel_uncore_type * type)4012 static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
4013 {
4014 pmu_cleanup_mapping(type, &skx_iio_mapping_group);
4015 }
4016
4017 static struct intel_uncore_type skx_uncore_iio = {
4018 .name = "iio",
4019 .num_counters = 4,
4020 .num_boxes = 6,
4021 .perf_ctr_bits = 48,
4022 .event_ctl = SKX_IIO0_MSR_PMON_CTL0,
4023 .perf_ctr = SKX_IIO0_MSR_PMON_CTR0,
4024 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
4025 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
4026 .box_ctl = SKX_IIO0_MSR_PMON_BOX_CTL,
4027 .msr_offset = SKX_IIO_MSR_OFFSET,
4028 .constraints = skx_uncore_iio_constraints,
4029 .ops = &skx_uncore_iio_ops,
4030 .format_group = &skx_uncore_iio_format_group,
4031 .attr_update = skx_iio_attr_update,
4032 .get_topology = skx_iio_get_topology,
4033 .set_mapping = skx_iio_set_mapping,
4034 .cleanup_mapping = skx_iio_cleanup_mapping,
4035 };
4036
4037 enum perf_uncore_iio_freerunning_type_id {
4038 SKX_IIO_MSR_IOCLK = 0,
4039 SKX_IIO_MSR_BW = 1,
4040 SKX_IIO_MSR_UTIL = 2,
4041
4042 SKX_IIO_FREERUNNING_TYPE_MAX,
4043 };
4044
4045
4046 static struct freerunning_counters skx_iio_freerunning[] = {
4047 [SKX_IIO_MSR_IOCLK] = { 0xa45, 0x1, 0x20, 1, 36 },
4048 [SKX_IIO_MSR_BW] = { 0xb00, 0x1, 0x10, 8, 36 },
4049 [SKX_IIO_MSR_UTIL] = { 0xb08, 0x1, 0x10, 8, 36 },
4050 };
4051
4052 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
4053 /* Free-Running IO CLOCKS Counter */
4054 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
4055 /* Free-Running IIO BANDWIDTH Counters */
4056 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
4057 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
4058 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
4059 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
4060 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
4061 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
4062 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
4063 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
4064 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
4065 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
4066 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
4067 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
4068 INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x24"),
4069 INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6"),
4070 INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB"),
4071 INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x25"),
4072 INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6"),
4073 INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB"),
4074 INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x26"),
4075 INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6"),
4076 INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB"),
4077 INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x27"),
4078 INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6"),
4079 INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB"),
4080 /* Free-running IIO UTILIZATION Counters */
4081 INTEL_UNCORE_EVENT_DESC(util_in_port0, "event=0xff,umask=0x30"),
4082 INTEL_UNCORE_EVENT_DESC(util_out_port0, "event=0xff,umask=0x31"),
4083 INTEL_UNCORE_EVENT_DESC(util_in_port1, "event=0xff,umask=0x32"),
4084 INTEL_UNCORE_EVENT_DESC(util_out_port1, "event=0xff,umask=0x33"),
4085 INTEL_UNCORE_EVENT_DESC(util_in_port2, "event=0xff,umask=0x34"),
4086 INTEL_UNCORE_EVENT_DESC(util_out_port2, "event=0xff,umask=0x35"),
4087 INTEL_UNCORE_EVENT_DESC(util_in_port3, "event=0xff,umask=0x36"),
4088 INTEL_UNCORE_EVENT_DESC(util_out_port3, "event=0xff,umask=0x37"),
4089 { /* end: all zeroes */ },
4090 };
4091
4092 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
4093 .read_counter = uncore_msr_read_counter,
4094 .hw_config = uncore_freerunning_hw_config,
4095 };
4096
4097 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
4098 &format_attr_event.attr,
4099 &format_attr_umask.attr,
4100 NULL,
4101 };
4102
4103 static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
4104 .name = "format",
4105 .attrs = skx_uncore_iio_freerunning_formats_attr,
4106 };
4107
4108 static struct intel_uncore_type skx_uncore_iio_free_running = {
4109 .name = "iio_free_running",
4110 .num_counters = 17,
4111 .num_boxes = 6,
4112 .num_freerunning_types = SKX_IIO_FREERUNNING_TYPE_MAX,
4113 .freerunning = skx_iio_freerunning,
4114 .ops = &skx_uncore_iio_freerunning_ops,
4115 .event_descs = skx_uncore_iio_freerunning_events,
4116 .format_group = &skx_uncore_iio_freerunning_format_group,
4117 };
4118
4119 static struct attribute *skx_uncore_formats_attr[] = {
4120 &format_attr_event.attr,
4121 &format_attr_umask.attr,
4122 &format_attr_edge.attr,
4123 &format_attr_inv.attr,
4124 &format_attr_thresh8.attr,
4125 NULL,
4126 };
4127
4128 static const struct attribute_group skx_uncore_format_group = {
4129 .name = "format",
4130 .attrs = skx_uncore_formats_attr,
4131 };
4132
4133 static struct intel_uncore_type skx_uncore_irp = {
4134 .name = "irp",
4135 .num_counters = 2,
4136 .num_boxes = 6,
4137 .perf_ctr_bits = 48,
4138 .event_ctl = SKX_IRP0_MSR_PMON_CTL0,
4139 .perf_ctr = SKX_IRP0_MSR_PMON_CTR0,
4140 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4141 .box_ctl = SKX_IRP0_MSR_PMON_BOX_CTL,
4142 .msr_offset = SKX_IRP_MSR_OFFSET,
4143 .ops = &skx_uncore_iio_ops,
4144 .format_group = &skx_uncore_format_group,
4145 };
4146
4147 static struct attribute *skx_uncore_pcu_formats_attr[] = {
4148 &format_attr_event.attr,
4149 &format_attr_umask.attr,
4150 &format_attr_edge.attr,
4151 &format_attr_inv.attr,
4152 &format_attr_thresh8.attr,
4153 &format_attr_occ_invert.attr,
4154 &format_attr_occ_edge_det.attr,
4155 &format_attr_filter_band0.attr,
4156 &format_attr_filter_band1.attr,
4157 &format_attr_filter_band2.attr,
4158 &format_attr_filter_band3.attr,
4159 NULL,
4160 };
4161
4162 static struct attribute_group skx_uncore_pcu_format_group = {
4163 .name = "format",
4164 .attrs = skx_uncore_pcu_formats_attr,
4165 };
4166
4167 static struct intel_uncore_ops skx_uncore_pcu_ops = {
4168 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4169 .hw_config = hswep_pcu_hw_config,
4170 .get_constraint = snbep_pcu_get_constraint,
4171 .put_constraint = snbep_pcu_put_constraint,
4172 };
4173
4174 static struct intel_uncore_type skx_uncore_pcu = {
4175 .name = "pcu",
4176 .num_counters = 4,
4177 .num_boxes = 1,
4178 .perf_ctr_bits = 48,
4179 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
4180 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
4181 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
4182 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
4183 .num_shared_regs = 1,
4184 .ops = &skx_uncore_pcu_ops,
4185 .format_group = &skx_uncore_pcu_format_group,
4186 };
4187
4188 static struct intel_uncore_type *skx_msr_uncores[] = {
4189 &skx_uncore_ubox,
4190 &skx_uncore_chabox,
4191 &skx_uncore_iio,
4192 &skx_uncore_iio_free_running,
4193 &skx_uncore_irp,
4194 &skx_uncore_pcu,
4195 NULL,
4196 };
4197
4198 /*
4199 * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
4200 * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
4201 */
4202 #define SKX_CAPID6 0x9c
4203 #define SKX_CHA_BIT_MASK GENMASK(27, 0)
4204
skx_count_chabox(void)4205 static int skx_count_chabox(void)
4206 {
4207 struct pci_dev *dev = NULL;
4208 u32 val = 0;
4209
4210 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
4211 if (!dev)
4212 goto out;
4213
4214 pci_read_config_dword(dev, SKX_CAPID6, &val);
4215 val &= SKX_CHA_BIT_MASK;
4216 out:
4217 pci_dev_put(dev);
4218 return hweight32(val);
4219 }
4220
skx_uncore_cpu_init(void)4221 void skx_uncore_cpu_init(void)
4222 {
4223 skx_uncore_chabox.num_boxes = skx_count_chabox();
4224 uncore_msr_uncores = skx_msr_uncores;
4225 }
4226
4227 static struct intel_uncore_type skx_uncore_imc = {
4228 .name = "imc",
4229 .num_counters = 4,
4230 .num_boxes = 6,
4231 .perf_ctr_bits = 48,
4232 .fixed_ctr_bits = 48,
4233 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
4234 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
4235 .event_descs = hswep_uncore_imc_events,
4236 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4237 .event_ctl = SNBEP_PCI_PMON_CTL0,
4238 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4239 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4240 .ops = &ivbep_uncore_pci_ops,
4241 .format_group = &skx_uncore_format_group,
4242 };
4243
4244 static struct attribute *skx_upi_uncore_formats_attr[] = {
4245 &format_attr_event.attr,
4246 &format_attr_umask_ext.attr,
4247 &format_attr_edge.attr,
4248 &format_attr_inv.attr,
4249 &format_attr_thresh8.attr,
4250 NULL,
4251 };
4252
4253 static const struct attribute_group skx_upi_uncore_format_group = {
4254 .name = "format",
4255 .attrs = skx_upi_uncore_formats_attr,
4256 };
4257
skx_upi_uncore_pci_init_box(struct intel_uncore_box * box)4258 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
4259 {
4260 struct pci_dev *pdev = box->pci_dev;
4261
4262 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4263 pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4264 }
4265
4266 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
4267 .init_box = skx_upi_uncore_pci_init_box,
4268 .disable_box = snbep_uncore_pci_disable_box,
4269 .enable_box = snbep_uncore_pci_enable_box,
4270 .disable_event = snbep_uncore_pci_disable_event,
4271 .enable_event = snbep_uncore_pci_enable_event,
4272 .read_counter = snbep_uncore_pci_read_counter,
4273 };
4274
4275 static umode_t
skx_upi_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)4276 skx_upi_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
4277 {
4278 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
4279
4280 return pmu->type->topology[die][pmu->pmu_idx].upi->enabled ? attr->mode : 0;
4281 }
4282
skx_upi_mapping_show(struct device * dev,struct device_attribute * attr,char * buf)4283 static ssize_t skx_upi_mapping_show(struct device *dev,
4284 struct device_attribute *attr, char *buf)
4285 {
4286 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
4287 struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
4288 long die = (long)ea->var;
4289 struct uncore_upi_topology *upi = pmu->type->topology[die][pmu->pmu_idx].upi;
4290
4291 return sysfs_emit(buf, "upi_%d,die_%d\n", upi->pmu_idx_to, upi->die_to);
4292 }
4293
4294 #define SKX_UPI_REG_DID 0x2058
4295 #define SKX_UPI_REGS_ADDR_DEVICE_LINK0 0x0e
4296 #define SKX_UPI_REGS_ADDR_FUNCTION 0x00
4297
4298 /*
4299 * UPI Link Parameter 0
4300 * | Bit | Default | Description
4301 * | 19:16 | 0h | base_nodeid - The NodeID of the sending socket.
4302 * | 12:8 | 00h | sending_port - The processor die port number of the sending port.
4303 */
4304 #define SKX_KTILP0_OFFSET 0x94
4305
4306 /*
4307 * UPI Pcode Status. This register is used by PCode to store the link training status.
4308 * | Bit | Default | Description
4309 * | 4 | 0h | ll_status_valid — Bit indicates the valid training status
4310 * logged from PCode to the BIOS.
4311 */
4312 #define SKX_KTIPCSTS_OFFSET 0x120
4313
upi_fill_topology(struct pci_dev * dev,struct intel_uncore_topology * tp,int pmu_idx)4314 static int upi_fill_topology(struct pci_dev *dev, struct intel_uncore_topology *tp,
4315 int pmu_idx)
4316 {
4317 int ret;
4318 u32 upi_conf;
4319 struct uncore_upi_topology *upi = tp->upi;
4320
4321 tp->pmu_idx = pmu_idx;
4322 ret = pci_read_config_dword(dev, SKX_KTIPCSTS_OFFSET, &upi_conf);
4323 if (ret) {
4324 ret = pcibios_err_to_errno(ret);
4325 goto err;
4326 }
4327 upi->enabled = (upi_conf >> 4) & 1;
4328 if (upi->enabled) {
4329 ret = pci_read_config_dword(dev, SKX_KTILP0_OFFSET,
4330 &upi_conf);
4331 if (ret) {
4332 ret = pcibios_err_to_errno(ret);
4333 goto err;
4334 }
4335 upi->die_to = (upi_conf >> 16) & 0xf;
4336 upi->pmu_idx_to = (upi_conf >> 8) & 0x1f;
4337 }
4338 err:
4339 return ret;
4340 }
4341
skx_upi_topology_cb(struct intel_uncore_type * type,int segment,int die,u64 cpu_bus_msr)4342 static int skx_upi_topology_cb(struct intel_uncore_type *type, int segment,
4343 int die, u64 cpu_bus_msr)
4344 {
4345 int idx, ret;
4346 struct intel_uncore_topology *upi;
4347 unsigned int devfn;
4348 struct pci_dev *dev = NULL;
4349 u8 bus = cpu_bus_msr >> (3 * BUS_NUM_STRIDE);
4350
4351 for (idx = 0; idx < type->num_boxes; idx++) {
4352 upi = &type->topology[die][idx];
4353 devfn = PCI_DEVFN(SKX_UPI_REGS_ADDR_DEVICE_LINK0 + idx,
4354 SKX_UPI_REGS_ADDR_FUNCTION);
4355 dev = pci_get_domain_bus_and_slot(segment, bus, devfn);
4356 if (dev) {
4357 ret = upi_fill_topology(dev, upi, idx);
4358 if (ret)
4359 break;
4360 }
4361 }
4362
4363 pci_dev_put(dev);
4364 return ret;
4365 }
4366
skx_upi_get_topology(struct intel_uncore_type * type)4367 static int skx_upi_get_topology(struct intel_uncore_type *type)
4368 {
4369 /* CPX case is not supported */
4370 if (boot_cpu_data.x86_stepping == 11)
4371 return -EPERM;
4372
4373 return skx_pmu_get_topology(type, skx_upi_topology_cb);
4374 }
4375
4376 static struct attribute_group skx_upi_mapping_group = {
4377 .is_visible = skx_upi_mapping_visible,
4378 };
4379
4380 static const struct attribute_group *skx_upi_attr_update[] = {
4381 &skx_upi_mapping_group,
4382 NULL
4383 };
4384
4385 static void
pmu_upi_set_mapping(struct intel_uncore_type * type,struct attribute_group * ag)4386 pmu_upi_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
4387 {
4388 pmu_set_mapping(type, ag, skx_upi_mapping_show, UPI_TOPOLOGY_TYPE);
4389 }
4390
skx_upi_set_mapping(struct intel_uncore_type * type)4391 static void skx_upi_set_mapping(struct intel_uncore_type *type)
4392 {
4393 pmu_upi_set_mapping(type, &skx_upi_mapping_group);
4394 }
4395
skx_upi_cleanup_mapping(struct intel_uncore_type * type)4396 static void skx_upi_cleanup_mapping(struct intel_uncore_type *type)
4397 {
4398 pmu_cleanup_mapping(type, &skx_upi_mapping_group);
4399 }
4400
4401 static struct intel_uncore_type skx_uncore_upi = {
4402 .name = "upi",
4403 .num_counters = 4,
4404 .num_boxes = 3,
4405 .perf_ctr_bits = 48,
4406 .perf_ctr = SKX_UPI_PCI_PMON_CTR0,
4407 .event_ctl = SKX_UPI_PCI_PMON_CTL0,
4408 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4409 .event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
4410 .box_ctl = SKX_UPI_PCI_PMON_BOX_CTL,
4411 .ops = &skx_upi_uncore_pci_ops,
4412 .format_group = &skx_upi_uncore_format_group,
4413 .attr_update = skx_upi_attr_update,
4414 .get_topology = skx_upi_get_topology,
4415 .set_mapping = skx_upi_set_mapping,
4416 .cleanup_mapping = skx_upi_cleanup_mapping,
4417 };
4418
skx_m2m_uncore_pci_init_box(struct intel_uncore_box * box)4419 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4420 {
4421 struct pci_dev *pdev = box->pci_dev;
4422
4423 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4424 pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4425 }
4426
4427 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
4428 .init_box = skx_m2m_uncore_pci_init_box,
4429 .disable_box = snbep_uncore_pci_disable_box,
4430 .enable_box = snbep_uncore_pci_enable_box,
4431 .disable_event = snbep_uncore_pci_disable_event,
4432 .enable_event = snbep_uncore_pci_enable_event,
4433 .read_counter = snbep_uncore_pci_read_counter,
4434 };
4435
4436 static struct intel_uncore_type skx_uncore_m2m = {
4437 .name = "m2m",
4438 .num_counters = 4,
4439 .num_boxes = 2,
4440 .perf_ctr_bits = 48,
4441 .perf_ctr = SKX_M2M_PCI_PMON_CTR0,
4442 .event_ctl = SKX_M2M_PCI_PMON_CTL0,
4443 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4444 .box_ctl = SKX_M2M_PCI_PMON_BOX_CTL,
4445 .ops = &skx_m2m_uncore_pci_ops,
4446 .format_group = &skx_uncore_format_group,
4447 };
4448
4449 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
4450 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4451 EVENT_CONSTRAINT_END
4452 };
4453
4454 static struct intel_uncore_type skx_uncore_m2pcie = {
4455 .name = "m2pcie",
4456 .num_counters = 4,
4457 .num_boxes = 4,
4458 .perf_ctr_bits = 48,
4459 .constraints = skx_uncore_m2pcie_constraints,
4460 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4461 .event_ctl = SNBEP_PCI_PMON_CTL0,
4462 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4463 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4464 .ops = &ivbep_uncore_pci_ops,
4465 .format_group = &skx_uncore_format_group,
4466 };
4467
4468 static struct event_constraint skx_uncore_m3upi_constraints[] = {
4469 UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
4470 UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
4471 UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
4472 UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
4473 UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
4474 UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
4475 UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
4476 UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
4477 EVENT_CONSTRAINT_END
4478 };
4479
4480 static struct intel_uncore_type skx_uncore_m3upi = {
4481 .name = "m3upi",
4482 .num_counters = 3,
4483 .num_boxes = 3,
4484 .perf_ctr_bits = 48,
4485 .constraints = skx_uncore_m3upi_constraints,
4486 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4487 .event_ctl = SNBEP_PCI_PMON_CTL0,
4488 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4489 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4490 .ops = &ivbep_uncore_pci_ops,
4491 .format_group = &skx_uncore_format_group,
4492 };
4493
4494 enum {
4495 SKX_PCI_UNCORE_IMC,
4496 SKX_PCI_UNCORE_M2M,
4497 SKX_PCI_UNCORE_UPI,
4498 SKX_PCI_UNCORE_M2PCIE,
4499 SKX_PCI_UNCORE_M3UPI,
4500 };
4501
4502 static struct intel_uncore_type *skx_pci_uncores[] = {
4503 [SKX_PCI_UNCORE_IMC] = &skx_uncore_imc,
4504 [SKX_PCI_UNCORE_M2M] = &skx_uncore_m2m,
4505 [SKX_PCI_UNCORE_UPI] = &skx_uncore_upi,
4506 [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie,
4507 [SKX_PCI_UNCORE_M3UPI] = &skx_uncore_m3upi,
4508 NULL,
4509 };
4510
4511 static const struct pci_device_id skx_uncore_pci_ids[] = {
4512 { /* MC0 Channel 0 */
4513 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4514 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
4515 },
4516 { /* MC0 Channel 1 */
4517 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4518 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
4519 },
4520 { /* MC0 Channel 2 */
4521 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4522 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
4523 },
4524 { /* MC1 Channel 0 */
4525 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4526 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
4527 },
4528 { /* MC1 Channel 1 */
4529 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4530 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
4531 },
4532 { /* MC1 Channel 2 */
4533 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4534 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
4535 },
4536 { /* M2M0 */
4537 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4538 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
4539 },
4540 { /* M2M1 */
4541 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4542 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
4543 },
4544 { /* UPI0 Link 0 */
4545 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4546 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
4547 },
4548 { /* UPI0 Link 1 */
4549 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4550 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
4551 },
4552 { /* UPI1 Link 2 */
4553 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4554 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
4555 },
4556 { /* M2PCIe 0 */
4557 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4558 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
4559 },
4560 { /* M2PCIe 1 */
4561 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4562 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
4563 },
4564 { /* M2PCIe 2 */
4565 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4566 .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
4567 },
4568 { /* M2PCIe 3 */
4569 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4570 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
4571 },
4572 { /* M3UPI0 Link 0 */
4573 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4574 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
4575 },
4576 { /* M3UPI0 Link 1 */
4577 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4578 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4579 },
4580 { /* M3UPI1 Link 2 */
4581 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4582 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4583 },
4584 { /* end: all zeroes */ }
4585 };
4586
4587
4588 static struct pci_driver skx_uncore_pci_driver = {
4589 .name = "skx_uncore",
4590 .id_table = skx_uncore_pci_ids,
4591 };
4592
skx_uncore_pci_init(void)4593 int skx_uncore_pci_init(void)
4594 {
4595 /* need to double check pci address */
4596 int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4597
4598 if (ret)
4599 return ret;
4600
4601 uncore_pci_uncores = skx_pci_uncores;
4602 uncore_pci_driver = &skx_uncore_pci_driver;
4603 return 0;
4604 }
4605
4606 /* end of SKX uncore support */
4607
4608 /* SNR uncore support */
4609
4610 static struct intel_uncore_type snr_uncore_ubox = {
4611 .name = "ubox",
4612 .num_counters = 2,
4613 .num_boxes = 1,
4614 .perf_ctr_bits = 48,
4615 .fixed_ctr_bits = 48,
4616 .perf_ctr = SNR_U_MSR_PMON_CTR0,
4617 .event_ctl = SNR_U_MSR_PMON_CTL0,
4618 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4619 .fixed_ctr = SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4620 .fixed_ctl = SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4621 .ops = &ivbep_uncore_msr_ops,
4622 .format_group = &ivbep_uncore_format_group,
4623 };
4624
4625 static struct attribute *snr_uncore_cha_formats_attr[] = {
4626 &format_attr_event.attr,
4627 &format_attr_umask_ext2.attr,
4628 &format_attr_edge.attr,
4629 &format_attr_tid_en.attr,
4630 &format_attr_inv.attr,
4631 &format_attr_thresh8.attr,
4632 &format_attr_filter_tid5.attr,
4633 NULL,
4634 };
4635 static const struct attribute_group snr_uncore_chabox_format_group = {
4636 .name = "format",
4637 .attrs = snr_uncore_cha_formats_attr,
4638 };
4639
snr_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)4640 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4641 {
4642 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4643
4644 reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4645 box->pmu->type->msr_offset * box->pmu->pmu_idx;
4646 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4647 reg1->idx = 0;
4648
4649 return 0;
4650 }
4651
snr_cha_enable_event(struct intel_uncore_box * box,struct perf_event * event)4652 static void snr_cha_enable_event(struct intel_uncore_box *box,
4653 struct perf_event *event)
4654 {
4655 struct hw_perf_event *hwc = &event->hw;
4656 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4657
4658 if (reg1->idx != EXTRA_REG_NONE)
4659 wrmsrq(reg1->reg, reg1->config);
4660
4661 wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4662 }
4663
4664 static struct intel_uncore_ops snr_uncore_chabox_ops = {
4665 .init_box = ivbep_uncore_msr_init_box,
4666 .disable_box = snbep_uncore_msr_disable_box,
4667 .enable_box = snbep_uncore_msr_enable_box,
4668 .disable_event = snbep_uncore_msr_disable_event,
4669 .enable_event = snr_cha_enable_event,
4670 .read_counter = uncore_msr_read_counter,
4671 .hw_config = snr_cha_hw_config,
4672 };
4673
4674 static struct intel_uncore_type snr_uncore_chabox = {
4675 .name = "cha",
4676 .num_counters = 4,
4677 .num_boxes = 6,
4678 .perf_ctr_bits = 48,
4679 .event_ctl = SNR_CHA_MSR_PMON_CTL0,
4680 .perf_ctr = SNR_CHA_MSR_PMON_CTR0,
4681 .box_ctl = SNR_CHA_MSR_PMON_BOX_CTL,
4682 .msr_offset = HSWEP_CBO_MSR_OFFSET,
4683 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4684 .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT,
4685 .ops = &snr_uncore_chabox_ops,
4686 .format_group = &snr_uncore_chabox_format_group,
4687 };
4688
4689 static struct attribute *snr_uncore_iio_formats_attr[] = {
4690 &format_attr_event.attr,
4691 &format_attr_umask.attr,
4692 &format_attr_edge.attr,
4693 &format_attr_inv.attr,
4694 &format_attr_thresh9.attr,
4695 &format_attr_ch_mask2.attr,
4696 &format_attr_fc_mask2.attr,
4697 NULL,
4698 };
4699
4700 static const struct attribute_group snr_uncore_iio_format_group = {
4701 .name = "format",
4702 .attrs = snr_uncore_iio_formats_attr,
4703 };
4704
4705 static umode_t
snr_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)4706 snr_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
4707 {
4708 /* Root bus 0x00 is valid only for pmu_idx = 1. */
4709 return pmu_iio_mapping_visible(kobj, attr, die, 1);
4710 }
4711
4712 static struct attribute_group snr_iio_mapping_group = {
4713 .is_visible = snr_iio_mapping_visible,
4714 };
4715
4716 static const struct attribute_group *snr_iio_attr_update[] = {
4717 &snr_iio_mapping_group,
4718 NULL,
4719 };
4720
sad_cfg_iio_topology(struct intel_uncore_type * type,u8 * sad_pmon_mapping)4721 static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_mapping)
4722 {
4723 u32 sad_cfg;
4724 int die, stack_id, ret = -EPERM;
4725 struct pci_dev *dev = NULL;
4726
4727 while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, SNR_ICX_MESH2IIO_MMAP_DID, dev))) {
4728 ret = pci_read_config_dword(dev, SNR_ICX_SAD_CONTROL_CFG, &sad_cfg);
4729 if (ret) {
4730 ret = pcibios_err_to_errno(ret);
4731 break;
4732 }
4733
4734 die = uncore_pcibus_to_dieid(dev->bus);
4735 stack_id = SAD_CONTROL_STACK_ID(sad_cfg);
4736 if (die < 0 || stack_id >= type->num_boxes) {
4737 ret = -EPERM;
4738 break;
4739 }
4740
4741 /* Convert stack id from SAD_CONTROL to PMON notation. */
4742 stack_id = sad_pmon_mapping[stack_id];
4743
4744 type->topology[die][stack_id].iio->segment = pci_domain_nr(dev->bus);
4745 type->topology[die][stack_id].pmu_idx = stack_id;
4746 type->topology[die][stack_id].iio->pci_bus_no = dev->bus->number;
4747 }
4748
4749 pci_dev_put(dev);
4750
4751 return ret;
4752 }
4753
4754 /*
4755 * SNR has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
4756 */
4757 enum {
4758 SNR_QAT_PMON_ID,
4759 SNR_CBDMA_DMI_PMON_ID,
4760 SNR_NIS_PMON_ID,
4761 SNR_DLB_PMON_ID,
4762 SNR_PCIE_GEN3_PMON_ID
4763 };
4764
4765 static u8 snr_sad_pmon_mapping[] = {
4766 SNR_CBDMA_DMI_PMON_ID,
4767 SNR_PCIE_GEN3_PMON_ID,
4768 SNR_DLB_PMON_ID,
4769 SNR_NIS_PMON_ID,
4770 SNR_QAT_PMON_ID
4771 };
4772
snr_iio_get_topology(struct intel_uncore_type * type)4773 static int snr_iio_get_topology(struct intel_uncore_type *type)
4774 {
4775 return sad_cfg_iio_topology(type, snr_sad_pmon_mapping);
4776 }
4777
snr_iio_set_mapping(struct intel_uncore_type * type)4778 static void snr_iio_set_mapping(struct intel_uncore_type *type)
4779 {
4780 pmu_iio_set_mapping(type, &snr_iio_mapping_group);
4781 }
4782
snr_iio_cleanup_mapping(struct intel_uncore_type * type)4783 static void snr_iio_cleanup_mapping(struct intel_uncore_type *type)
4784 {
4785 pmu_cleanup_mapping(type, &snr_iio_mapping_group);
4786 }
4787
4788 static struct event_constraint snr_uncore_iio_constraints[] = {
4789 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
4790 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
4791 UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
4792 EVENT_CONSTRAINT_END
4793 };
4794
4795 static struct intel_uncore_type snr_uncore_iio = {
4796 .name = "iio",
4797 .num_counters = 4,
4798 .num_boxes = 5,
4799 .perf_ctr_bits = 48,
4800 .event_ctl = SNR_IIO_MSR_PMON_CTL0,
4801 .perf_ctr = SNR_IIO_MSR_PMON_CTR0,
4802 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4803 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4804 .box_ctl = SNR_IIO_MSR_PMON_BOX_CTL,
4805 .msr_offset = SNR_IIO_MSR_OFFSET,
4806 .constraints = snr_uncore_iio_constraints,
4807 .ops = &ivbep_uncore_msr_ops,
4808 .format_group = &snr_uncore_iio_format_group,
4809 .attr_update = snr_iio_attr_update,
4810 .get_topology = snr_iio_get_topology,
4811 .set_mapping = snr_iio_set_mapping,
4812 .cleanup_mapping = snr_iio_cleanup_mapping,
4813 };
4814
4815 static struct intel_uncore_type snr_uncore_irp = {
4816 .name = "irp",
4817 .num_counters = 2,
4818 .num_boxes = 5,
4819 .perf_ctr_bits = 48,
4820 .event_ctl = SNR_IRP0_MSR_PMON_CTL0,
4821 .perf_ctr = SNR_IRP0_MSR_PMON_CTR0,
4822 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4823 .box_ctl = SNR_IRP0_MSR_PMON_BOX_CTL,
4824 .msr_offset = SNR_IRP_MSR_OFFSET,
4825 .ops = &ivbep_uncore_msr_ops,
4826 .format_group = &ivbep_uncore_format_group,
4827 };
4828
4829 static struct intel_uncore_type snr_uncore_m2pcie = {
4830 .name = "m2pcie",
4831 .num_counters = 4,
4832 .num_boxes = 5,
4833 .perf_ctr_bits = 48,
4834 .event_ctl = SNR_M2PCIE_MSR_PMON_CTL0,
4835 .perf_ctr = SNR_M2PCIE_MSR_PMON_CTR0,
4836 .box_ctl = SNR_M2PCIE_MSR_PMON_BOX_CTL,
4837 .msr_offset = SNR_M2PCIE_MSR_OFFSET,
4838 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4839 .ops = &ivbep_uncore_msr_ops,
4840 .format_group = &ivbep_uncore_format_group,
4841 };
4842
snr_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)4843 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4844 {
4845 struct hw_perf_event *hwc = &event->hw;
4846 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4847 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4848
4849 if (ev_sel >= 0xb && ev_sel <= 0xe) {
4850 reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4851 reg1->idx = ev_sel - 0xb;
4852 reg1->config = event->attr.config1 & (0xff << reg1->idx);
4853 }
4854 return 0;
4855 }
4856
4857 static struct intel_uncore_ops snr_uncore_pcu_ops = {
4858 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4859 .hw_config = snr_pcu_hw_config,
4860 .get_constraint = snbep_pcu_get_constraint,
4861 .put_constraint = snbep_pcu_put_constraint,
4862 };
4863
4864 static struct intel_uncore_type snr_uncore_pcu = {
4865 .name = "pcu",
4866 .num_counters = 4,
4867 .num_boxes = 1,
4868 .perf_ctr_bits = 48,
4869 .perf_ctr = SNR_PCU_MSR_PMON_CTR0,
4870 .event_ctl = SNR_PCU_MSR_PMON_CTL0,
4871 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4872 .box_ctl = SNR_PCU_MSR_PMON_BOX_CTL,
4873 .num_shared_regs = 1,
4874 .ops = &snr_uncore_pcu_ops,
4875 .format_group = &skx_uncore_pcu_format_group,
4876 };
4877
4878 enum perf_uncore_snr_iio_freerunning_type_id {
4879 SNR_IIO_MSR_IOCLK,
4880 SNR_IIO_MSR_BW_IN,
4881
4882 SNR_IIO_FREERUNNING_TYPE_MAX,
4883 };
4884
4885 static struct freerunning_counters snr_iio_freerunning[] = {
4886 [SNR_IIO_MSR_IOCLK] = { 0x1eac, 0x1, 0x10, 1, 48 },
4887 [SNR_IIO_MSR_BW_IN] = { 0x1f00, 0x1, 0x10, 8, 48 },
4888 };
4889
4890 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4891 /* Free-Running IIO CLOCKS Counter */
4892 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
4893 /* Free-Running IIO BANDWIDTH IN Counters */
4894 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
4895 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.0517578125e-5"),
4896 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
4897 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
4898 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.0517578125e-5"),
4899 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
4900 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
4901 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.0517578125e-5"),
4902 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
4903 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
4904 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.0517578125e-5"),
4905 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
4906 INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
4907 INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.0517578125e-5"),
4908 INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
4909 INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
4910 INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.0517578125e-5"),
4911 INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
4912 INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
4913 INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.0517578125e-5"),
4914 INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
4915 INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
4916 INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.0517578125e-5"),
4917 INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
4918 { /* end: all zeroes */ },
4919 };
4920
4921 static struct intel_uncore_type snr_uncore_iio_free_running = {
4922 .name = "iio_free_running",
4923 .num_counters = 9,
4924 .num_boxes = 5,
4925 .num_freerunning_types = SNR_IIO_FREERUNNING_TYPE_MAX,
4926 .freerunning = snr_iio_freerunning,
4927 .ops = &skx_uncore_iio_freerunning_ops,
4928 .event_descs = snr_uncore_iio_freerunning_events,
4929 .format_group = &skx_uncore_iio_freerunning_format_group,
4930 };
4931
4932 static struct intel_uncore_type *snr_msr_uncores[] = {
4933 &snr_uncore_ubox,
4934 &snr_uncore_chabox,
4935 &snr_uncore_iio,
4936 &snr_uncore_irp,
4937 &snr_uncore_m2pcie,
4938 &snr_uncore_pcu,
4939 &snr_uncore_iio_free_running,
4940 NULL,
4941 };
4942
snr_uncore_cpu_init(void)4943 void snr_uncore_cpu_init(void)
4944 {
4945 uncore_msr_uncores = snr_msr_uncores;
4946 }
4947
snr_m2m_uncore_pci_init_box(struct intel_uncore_box * box)4948 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4949 {
4950 struct pci_dev *pdev = box->pci_dev;
4951 int box_ctl = uncore_pci_box_ctl(box);
4952
4953 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4954 pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4955 }
4956
4957 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4958 .init_box = snr_m2m_uncore_pci_init_box,
4959 .disable_box = snbep_uncore_pci_disable_box,
4960 .enable_box = snbep_uncore_pci_enable_box,
4961 .disable_event = snbep_uncore_pci_disable_event,
4962 .enable_event = snbep_uncore_pci_enable_event,
4963 .read_counter = snbep_uncore_pci_read_counter,
4964 };
4965
4966 static struct attribute *snr_m2m_uncore_formats_attr[] = {
4967 &format_attr_event.attr,
4968 &format_attr_umask_ext3.attr,
4969 &format_attr_edge.attr,
4970 &format_attr_inv.attr,
4971 &format_attr_thresh8.attr,
4972 NULL,
4973 };
4974
4975 static const struct attribute_group snr_m2m_uncore_format_group = {
4976 .name = "format",
4977 .attrs = snr_m2m_uncore_formats_attr,
4978 };
4979
4980 static struct intel_uncore_type snr_uncore_m2m = {
4981 .name = "m2m",
4982 .num_counters = 4,
4983 .num_boxes = 1,
4984 .perf_ctr_bits = 48,
4985 .perf_ctr = SNR_M2M_PCI_PMON_CTR0,
4986 .event_ctl = SNR_M2M_PCI_PMON_CTL0,
4987 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4988 .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
4989 .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
4990 .ops = &snr_m2m_uncore_pci_ops,
4991 .format_group = &snr_m2m_uncore_format_group,
4992 };
4993
snr_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)4994 static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
4995 {
4996 struct pci_dev *pdev = box->pci_dev;
4997 struct hw_perf_event *hwc = &event->hw;
4998
4999 pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN));
5000 pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
5001 }
5002
5003 static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = {
5004 .init_box = snr_m2m_uncore_pci_init_box,
5005 .disable_box = snbep_uncore_pci_disable_box,
5006 .enable_box = snbep_uncore_pci_enable_box,
5007 .disable_event = snbep_uncore_pci_disable_event,
5008 .enable_event = snr_uncore_pci_enable_event,
5009 .read_counter = snbep_uncore_pci_read_counter,
5010 };
5011
5012 static struct intel_uncore_type snr_uncore_pcie3 = {
5013 .name = "pcie3",
5014 .num_counters = 4,
5015 .num_boxes = 1,
5016 .perf_ctr_bits = 48,
5017 .perf_ctr = SNR_PCIE3_PCI_PMON_CTR0,
5018 .event_ctl = SNR_PCIE3_PCI_PMON_CTL0,
5019 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
5020 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
5021 .box_ctl = SNR_PCIE3_PCI_PMON_BOX_CTL,
5022 .ops = &snr_pcie3_uncore_pci_ops,
5023 .format_group = &skx_uncore_iio_format_group,
5024 };
5025
5026 enum {
5027 SNR_PCI_UNCORE_M2M,
5028 SNR_PCI_UNCORE_PCIE3,
5029 };
5030
5031 static struct intel_uncore_type *snr_pci_uncores[] = {
5032 [SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m,
5033 [SNR_PCI_UNCORE_PCIE3] = &snr_uncore_pcie3,
5034 NULL,
5035 };
5036
5037 static const struct pci_device_id snr_uncore_pci_ids[] = {
5038 { /* M2M */
5039 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5040 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
5041 },
5042 { /* end: all zeroes */ }
5043 };
5044
5045 static struct pci_driver snr_uncore_pci_driver = {
5046 .name = "snr_uncore",
5047 .id_table = snr_uncore_pci_ids,
5048 };
5049
5050 static const struct pci_device_id snr_uncore_pci_sub_ids[] = {
5051 { /* PCIe3 RP */
5052 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
5053 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
5054 },
5055 { /* end: all zeroes */ }
5056 };
5057
5058 static struct pci_driver snr_uncore_pci_sub_driver = {
5059 .name = "snr_uncore_sub",
5060 .id_table = snr_uncore_pci_sub_ids,
5061 };
5062
snr_uncore_pci_init(void)5063 int snr_uncore_pci_init(void)
5064 {
5065 /* SNR UBOX DID */
5066 int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
5067 SKX_GIDNIDMAP, true);
5068
5069 if (ret)
5070 return ret;
5071
5072 uncore_pci_uncores = snr_pci_uncores;
5073 uncore_pci_driver = &snr_uncore_pci_driver;
5074 uncore_pci_sub_driver = &snr_uncore_pci_sub_driver;
5075 return 0;
5076 }
5077
5078 #define SNR_MC_DEVICE_ID 0x3451
5079
snr_uncore_get_mc_dev(unsigned int device,int id)5080 static struct pci_dev *snr_uncore_get_mc_dev(unsigned int device, int id)
5081 {
5082 struct pci_dev *mc_dev = NULL;
5083 int pkg;
5084
5085 while (1) {
5086 mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, mc_dev);
5087 if (!mc_dev)
5088 break;
5089 pkg = uncore_pcibus_to_dieid(mc_dev->bus);
5090 if (pkg == id)
5091 break;
5092 }
5093 return mc_dev;
5094 }
5095
snr_uncore_mmio_map(struct intel_uncore_box * box,unsigned int box_ctl,int mem_offset,unsigned int device)5096 static int snr_uncore_mmio_map(struct intel_uncore_box *box,
5097 unsigned int box_ctl, int mem_offset,
5098 unsigned int device)
5099 {
5100 struct pci_dev *pdev = snr_uncore_get_mc_dev(device, box->dieid);
5101 struct intel_uncore_type *type = box->pmu->type;
5102 resource_size_t addr;
5103 u32 pci_dword;
5104
5105 if (!pdev)
5106 return -ENODEV;
5107
5108 pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
5109 addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
5110
5111 pci_read_config_dword(pdev, mem_offset, &pci_dword);
5112 addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
5113
5114 addr += box_ctl;
5115
5116 pci_dev_put(pdev);
5117
5118 box->io_addr = ioremap(addr, type->mmio_map_size);
5119 if (!box->io_addr) {
5120 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
5121 return -EINVAL;
5122 }
5123
5124 return 0;
5125 }
5126
__snr_uncore_mmio_init_box(struct intel_uncore_box * box,unsigned int box_ctl,int mem_offset,unsigned int device)5127 static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
5128 unsigned int box_ctl, int mem_offset,
5129 unsigned int device)
5130 {
5131 if (!snr_uncore_mmio_map(box, box_ctl, mem_offset, device))
5132 writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
5133 }
5134
snr_uncore_mmio_init_box(struct intel_uncore_box * box)5135 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
5136 {
5137 __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
5138 SNR_IMC_MMIO_MEM0_OFFSET,
5139 SNR_MC_DEVICE_ID);
5140 }
5141
snr_uncore_mmio_disable_box(struct intel_uncore_box * box)5142 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
5143 {
5144 u32 config;
5145
5146 if (!box->io_addr)
5147 return;
5148
5149 config = readl(box->io_addr);
5150 config |= SNBEP_PMON_BOX_CTL_FRZ;
5151 writel(config, box->io_addr);
5152 }
5153
snr_uncore_mmio_enable_box(struct intel_uncore_box * box)5154 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
5155 {
5156 u32 config;
5157
5158 if (!box->io_addr)
5159 return;
5160
5161 config = readl(box->io_addr);
5162 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
5163 writel(config, box->io_addr);
5164 }
5165
snr_uncore_mmio_enable_event(struct intel_uncore_box * box,struct perf_event * event)5166 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
5167 struct perf_event *event)
5168 {
5169 struct hw_perf_event *hwc = &event->hw;
5170
5171 if (!box->io_addr)
5172 return;
5173
5174 if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
5175 return;
5176
5177 writel(hwc->config | SNBEP_PMON_CTL_EN,
5178 box->io_addr + hwc->config_base);
5179 }
5180
snr_uncore_mmio_disable_event(struct intel_uncore_box * box,struct perf_event * event)5181 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
5182 struct perf_event *event)
5183 {
5184 struct hw_perf_event *hwc = &event->hw;
5185
5186 if (!box->io_addr)
5187 return;
5188
5189 if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
5190 return;
5191
5192 writel(hwc->config, box->io_addr + hwc->config_base);
5193 }
5194
5195 static struct intel_uncore_ops snr_uncore_mmio_ops = {
5196 .init_box = snr_uncore_mmio_init_box,
5197 .exit_box = uncore_mmio_exit_box,
5198 .disable_box = snr_uncore_mmio_disable_box,
5199 .enable_box = snr_uncore_mmio_enable_box,
5200 .disable_event = snr_uncore_mmio_disable_event,
5201 .enable_event = snr_uncore_mmio_enable_event,
5202 .read_counter = uncore_mmio_read_counter,
5203 };
5204
5205 static struct uncore_event_desc snr_uncore_imc_events[] = {
5206 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
5207 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x0f"),
5208 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
5209 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
5210 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
5211 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
5212 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
5213 { /* end: all zeroes */ },
5214 };
5215
5216 static struct intel_uncore_type snr_uncore_imc = {
5217 .name = "imc",
5218 .num_counters = 4,
5219 .num_boxes = 2,
5220 .perf_ctr_bits = 48,
5221 .fixed_ctr_bits = 48,
5222 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
5223 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
5224 .event_descs = snr_uncore_imc_events,
5225 .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
5226 .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
5227 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5228 .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
5229 .mmio_offset = SNR_IMC_MMIO_OFFSET,
5230 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5231 .ops = &snr_uncore_mmio_ops,
5232 .format_group = &skx_uncore_format_group,
5233 };
5234
5235 enum perf_uncore_snr_imc_freerunning_type_id {
5236 SNR_IMC_DCLK,
5237 SNR_IMC_DDR,
5238
5239 SNR_IMC_FREERUNNING_TYPE_MAX,
5240 };
5241
5242 static struct freerunning_counters snr_imc_freerunning[] = {
5243 [SNR_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
5244 [SNR_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 },
5245 };
5246
5247 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
5248 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
5249
5250 INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
5251 INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
5252 INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
5253 INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
5254 INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
5255 INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
5256 { /* end: all zeroes */ },
5257 };
5258
5259 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
5260 .init_box = snr_uncore_mmio_init_box,
5261 .exit_box = uncore_mmio_exit_box,
5262 .read_counter = uncore_mmio_read_counter,
5263 .hw_config = uncore_freerunning_hw_config,
5264 };
5265
5266 static struct intel_uncore_type snr_uncore_imc_free_running = {
5267 .name = "imc_free_running",
5268 .num_counters = 3,
5269 .num_boxes = 1,
5270 .num_freerunning_types = SNR_IMC_FREERUNNING_TYPE_MAX,
5271 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5272 .freerunning = snr_imc_freerunning,
5273 .ops = &snr_uncore_imc_freerunning_ops,
5274 .event_descs = snr_uncore_imc_freerunning_events,
5275 .format_group = &skx_uncore_iio_freerunning_format_group,
5276 };
5277
5278 static struct intel_uncore_type *snr_mmio_uncores[] = {
5279 &snr_uncore_imc,
5280 &snr_uncore_imc_free_running,
5281 NULL,
5282 };
5283
snr_uncore_mmio_init(void)5284 void snr_uncore_mmio_init(void)
5285 {
5286 uncore_mmio_uncores = snr_mmio_uncores;
5287 }
5288
5289 /* end of SNR uncore support */
5290
5291 /* ICX uncore support */
5292
5293 static u64 icx_cha_msr_offsets[] = {
5294 0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310,
5295 0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e,
5296 0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a,
5297 0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0, 0xe,
5298 0x1c, 0x2a, 0x38, 0x46,
5299 };
5300
icx_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)5301 static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
5302 {
5303 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
5304 bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN);
5305
5306 if (tie_en) {
5307 reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 +
5308 icx_cha_msr_offsets[box->pmu->pmu_idx];
5309 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
5310 reg1->idx = 0;
5311 }
5312
5313 return 0;
5314 }
5315
5316 static struct intel_uncore_ops icx_uncore_chabox_ops = {
5317 .init_box = ivbep_uncore_msr_init_box,
5318 .disable_box = snbep_uncore_msr_disable_box,
5319 .enable_box = snbep_uncore_msr_enable_box,
5320 .disable_event = snbep_uncore_msr_disable_event,
5321 .enable_event = snr_cha_enable_event,
5322 .read_counter = uncore_msr_read_counter,
5323 .hw_config = icx_cha_hw_config,
5324 };
5325
5326 static struct intel_uncore_type icx_uncore_chabox = {
5327 .name = "cha",
5328 .num_counters = 4,
5329 .perf_ctr_bits = 48,
5330 .event_ctl = ICX_C34_MSR_PMON_CTL0,
5331 .perf_ctr = ICX_C34_MSR_PMON_CTR0,
5332 .box_ctl = ICX_C34_MSR_PMON_BOX_CTL,
5333 .msr_offsets = icx_cha_msr_offsets,
5334 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
5335 .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT,
5336 .constraints = skx_uncore_chabox_constraints,
5337 .ops = &icx_uncore_chabox_ops,
5338 .format_group = &snr_uncore_chabox_format_group,
5339 };
5340
5341 static u64 icx_msr_offsets[] = {
5342 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5343 };
5344
5345 static struct event_constraint icx_uncore_iio_constraints[] = {
5346 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
5347 UNCORE_EVENT_CONSTRAINT(0x03, 0x3),
5348 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
5349 UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
5350 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
5351 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
5352 UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
5353 EVENT_CONSTRAINT_END
5354 };
5355
5356 static umode_t
icx_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)5357 icx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
5358 {
5359 /* Root bus 0x00 is valid only for pmu_idx = 5. */
5360 return pmu_iio_mapping_visible(kobj, attr, die, 5);
5361 }
5362
5363 static struct attribute_group icx_iio_mapping_group = {
5364 .is_visible = icx_iio_mapping_visible,
5365 };
5366
5367 static const struct attribute_group *icx_iio_attr_update[] = {
5368 &icx_iio_mapping_group,
5369 NULL,
5370 };
5371
5372 /*
5373 * ICX has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
5374 */
5375 enum {
5376 ICX_PCIE1_PMON_ID,
5377 ICX_PCIE2_PMON_ID,
5378 ICX_PCIE3_PMON_ID,
5379 ICX_PCIE4_PMON_ID,
5380 ICX_PCIE5_PMON_ID,
5381 ICX_CBDMA_DMI_PMON_ID
5382 };
5383
5384 static u8 icx_sad_pmon_mapping[] = {
5385 ICX_CBDMA_DMI_PMON_ID,
5386 ICX_PCIE1_PMON_ID,
5387 ICX_PCIE2_PMON_ID,
5388 ICX_PCIE3_PMON_ID,
5389 ICX_PCIE4_PMON_ID,
5390 ICX_PCIE5_PMON_ID,
5391 };
5392
icx_iio_get_topology(struct intel_uncore_type * type)5393 static int icx_iio_get_topology(struct intel_uncore_type *type)
5394 {
5395 return sad_cfg_iio_topology(type, icx_sad_pmon_mapping);
5396 }
5397
icx_iio_set_mapping(struct intel_uncore_type * type)5398 static void icx_iio_set_mapping(struct intel_uncore_type *type)
5399 {
5400 /* Detect ICX-D system. This case is not supported */
5401 if (boot_cpu_data.x86_vfm == INTEL_ICELAKE_D) {
5402 pmu_clear_mapping_attr(type->attr_update, &icx_iio_mapping_group);
5403 return;
5404 }
5405 pmu_iio_set_mapping(type, &icx_iio_mapping_group);
5406 }
5407
icx_iio_cleanup_mapping(struct intel_uncore_type * type)5408 static void icx_iio_cleanup_mapping(struct intel_uncore_type *type)
5409 {
5410 pmu_cleanup_mapping(type, &icx_iio_mapping_group);
5411 }
5412
5413 static struct intel_uncore_type icx_uncore_iio = {
5414 .name = "iio",
5415 .num_counters = 4,
5416 .num_boxes = 6,
5417 .perf_ctr_bits = 48,
5418 .event_ctl = ICX_IIO_MSR_PMON_CTL0,
5419 .perf_ctr = ICX_IIO_MSR_PMON_CTR0,
5420 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5421 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
5422 .box_ctl = ICX_IIO_MSR_PMON_BOX_CTL,
5423 .msr_offsets = icx_msr_offsets,
5424 .constraints = icx_uncore_iio_constraints,
5425 .ops = &skx_uncore_iio_ops,
5426 .format_group = &snr_uncore_iio_format_group,
5427 .attr_update = icx_iio_attr_update,
5428 .get_topology = icx_iio_get_topology,
5429 .set_mapping = icx_iio_set_mapping,
5430 .cleanup_mapping = icx_iio_cleanup_mapping,
5431 };
5432
5433 static struct intel_uncore_type icx_uncore_irp = {
5434 .name = "irp",
5435 .num_counters = 2,
5436 .num_boxes = 6,
5437 .perf_ctr_bits = 48,
5438 .event_ctl = ICX_IRP0_MSR_PMON_CTL0,
5439 .perf_ctr = ICX_IRP0_MSR_PMON_CTR0,
5440 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5441 .box_ctl = ICX_IRP0_MSR_PMON_BOX_CTL,
5442 .msr_offsets = icx_msr_offsets,
5443 .ops = &ivbep_uncore_msr_ops,
5444 .format_group = &ivbep_uncore_format_group,
5445 };
5446
5447 static struct event_constraint icx_uncore_m2pcie_constraints[] = {
5448 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
5449 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
5450 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
5451 EVENT_CONSTRAINT_END
5452 };
5453
5454 static struct intel_uncore_type icx_uncore_m2pcie = {
5455 .name = "m2pcie",
5456 .num_counters = 4,
5457 .num_boxes = 6,
5458 .perf_ctr_bits = 48,
5459 .event_ctl = ICX_M2PCIE_MSR_PMON_CTL0,
5460 .perf_ctr = ICX_M2PCIE_MSR_PMON_CTR0,
5461 .box_ctl = ICX_M2PCIE_MSR_PMON_BOX_CTL,
5462 .msr_offsets = icx_msr_offsets,
5463 .constraints = icx_uncore_m2pcie_constraints,
5464 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5465 .ops = &ivbep_uncore_msr_ops,
5466 .format_group = &ivbep_uncore_format_group,
5467 };
5468
5469 enum perf_uncore_icx_iio_freerunning_type_id {
5470 ICX_IIO_MSR_IOCLK,
5471 ICX_IIO_MSR_BW_IN,
5472
5473 ICX_IIO_FREERUNNING_TYPE_MAX,
5474 };
5475
5476 static unsigned icx_iio_clk_freerunning_box_offsets[] = {
5477 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5478 };
5479
5480 static unsigned icx_iio_bw_freerunning_box_offsets[] = {
5481 0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0,
5482 };
5483
5484 static struct freerunning_counters icx_iio_freerunning[] = {
5485 [ICX_IIO_MSR_IOCLK] = { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets },
5486 [ICX_IIO_MSR_BW_IN] = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
5487 };
5488
5489 static struct intel_uncore_type icx_uncore_iio_free_running = {
5490 .name = "iio_free_running",
5491 .num_counters = 9,
5492 .num_boxes = 6,
5493 .num_freerunning_types = ICX_IIO_FREERUNNING_TYPE_MAX,
5494 .freerunning = icx_iio_freerunning,
5495 .ops = &skx_uncore_iio_freerunning_ops,
5496 .event_descs = snr_uncore_iio_freerunning_events,
5497 .format_group = &skx_uncore_iio_freerunning_format_group,
5498 };
5499
5500 static struct intel_uncore_type *icx_msr_uncores[] = {
5501 &skx_uncore_ubox,
5502 &icx_uncore_chabox,
5503 &icx_uncore_iio,
5504 &icx_uncore_irp,
5505 &icx_uncore_m2pcie,
5506 &skx_uncore_pcu,
5507 &icx_uncore_iio_free_running,
5508 NULL,
5509 };
5510
5511 /*
5512 * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High)
5513 * registers which located at Device 30, Function 3
5514 */
5515 #define ICX_CAPID6 0x9c
5516 #define ICX_CAPID7 0xa0
5517
icx_count_chabox(void)5518 static u64 icx_count_chabox(void)
5519 {
5520 struct pci_dev *dev = NULL;
5521 u64 caps = 0;
5522
5523 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev);
5524 if (!dev)
5525 goto out;
5526
5527 pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps);
5528 pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1);
5529 out:
5530 pci_dev_put(dev);
5531 return hweight64(caps);
5532 }
5533
icx_uncore_cpu_init(void)5534 void icx_uncore_cpu_init(void)
5535 {
5536 u64 num_boxes = icx_count_chabox();
5537
5538 if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets)))
5539 return;
5540 icx_uncore_chabox.num_boxes = num_boxes;
5541 uncore_msr_uncores = icx_msr_uncores;
5542 }
5543
5544 static struct intel_uncore_type icx_uncore_m2m = {
5545 .name = "m2m",
5546 .num_counters = 4,
5547 .num_boxes = 4,
5548 .perf_ctr_bits = 48,
5549 .perf_ctr = SNR_M2M_PCI_PMON_CTR0,
5550 .event_ctl = SNR_M2M_PCI_PMON_CTL0,
5551 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5552 .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
5553 .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
5554 .ops = &snr_m2m_uncore_pci_ops,
5555 .format_group = &snr_m2m_uncore_format_group,
5556 };
5557
5558 static struct attribute *icx_upi_uncore_formats_attr[] = {
5559 &format_attr_event.attr,
5560 &format_attr_umask_ext4.attr,
5561 &format_attr_edge.attr,
5562 &format_attr_inv.attr,
5563 &format_attr_thresh8.attr,
5564 NULL,
5565 };
5566
5567 static const struct attribute_group icx_upi_uncore_format_group = {
5568 .name = "format",
5569 .attrs = icx_upi_uncore_formats_attr,
5570 };
5571
5572 #define ICX_UPI_REGS_ADDR_DEVICE_LINK0 0x02
5573 #define ICX_UPI_REGS_ADDR_FUNCTION 0x01
5574
discover_upi_topology(struct intel_uncore_type * type,int ubox_did,int dev_link0)5575 static int discover_upi_topology(struct intel_uncore_type *type, int ubox_did, int dev_link0)
5576 {
5577 struct pci_dev *ubox = NULL;
5578 struct pci_dev *dev = NULL;
5579 u32 nid, gid;
5580 int idx, lgc_pkg, ret = -EPERM;
5581 struct intel_uncore_topology *upi;
5582 unsigned int devfn;
5583
5584 /* GIDNIDMAP method supports machines which have less than 8 sockets. */
5585 if (uncore_max_dies() > 8)
5586 goto err;
5587
5588 while ((ubox = pci_get_device(PCI_VENDOR_ID_INTEL, ubox_did, ubox))) {
5589 ret = upi_nodeid_groupid(ubox, SKX_CPUNODEID, SKX_GIDNIDMAP, &nid, &gid);
5590 if (ret) {
5591 ret = pcibios_err_to_errno(ret);
5592 break;
5593 }
5594
5595 lgc_pkg = topology_gidnid_map(nid, gid);
5596 if (lgc_pkg < 0) {
5597 ret = -EPERM;
5598 goto err;
5599 }
5600 for (idx = 0; idx < type->num_boxes; idx++) {
5601 upi = &type->topology[lgc_pkg][idx];
5602 devfn = PCI_DEVFN(dev_link0 + idx, ICX_UPI_REGS_ADDR_FUNCTION);
5603 dev = pci_get_domain_bus_and_slot(pci_domain_nr(ubox->bus),
5604 ubox->bus->number,
5605 devfn);
5606 if (dev) {
5607 ret = upi_fill_topology(dev, upi, idx);
5608 if (ret)
5609 goto err;
5610 }
5611 }
5612 }
5613 err:
5614 pci_dev_put(ubox);
5615 pci_dev_put(dev);
5616 return ret;
5617 }
5618
icx_upi_get_topology(struct intel_uncore_type * type)5619 static int icx_upi_get_topology(struct intel_uncore_type *type)
5620 {
5621 return discover_upi_topology(type, ICX_UBOX_DID, ICX_UPI_REGS_ADDR_DEVICE_LINK0);
5622 }
5623
5624 static struct attribute_group icx_upi_mapping_group = {
5625 .is_visible = skx_upi_mapping_visible,
5626 };
5627
5628 static const struct attribute_group *icx_upi_attr_update[] = {
5629 &icx_upi_mapping_group,
5630 NULL
5631 };
5632
icx_upi_set_mapping(struct intel_uncore_type * type)5633 static void icx_upi_set_mapping(struct intel_uncore_type *type)
5634 {
5635 pmu_upi_set_mapping(type, &icx_upi_mapping_group);
5636 }
5637
icx_upi_cleanup_mapping(struct intel_uncore_type * type)5638 static void icx_upi_cleanup_mapping(struct intel_uncore_type *type)
5639 {
5640 pmu_cleanup_mapping(type, &icx_upi_mapping_group);
5641 }
5642
5643 static struct intel_uncore_type icx_uncore_upi = {
5644 .name = "upi",
5645 .num_counters = 4,
5646 .num_boxes = 3,
5647 .perf_ctr_bits = 48,
5648 .perf_ctr = ICX_UPI_PCI_PMON_CTR0,
5649 .event_ctl = ICX_UPI_PCI_PMON_CTL0,
5650 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5651 .event_mask_ext = ICX_UPI_CTL_UMASK_EXT,
5652 .box_ctl = ICX_UPI_PCI_PMON_BOX_CTL,
5653 .ops = &skx_upi_uncore_pci_ops,
5654 .format_group = &icx_upi_uncore_format_group,
5655 .attr_update = icx_upi_attr_update,
5656 .get_topology = icx_upi_get_topology,
5657 .set_mapping = icx_upi_set_mapping,
5658 .cleanup_mapping = icx_upi_cleanup_mapping,
5659 };
5660
5661 static struct event_constraint icx_uncore_m3upi_constraints[] = {
5662 UNCORE_EVENT_CONSTRAINT(0x1c, 0x1),
5663 UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
5664 UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
5665 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
5666 UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
5667 UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
5668 UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
5669 UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
5670 EVENT_CONSTRAINT_END
5671 };
5672
5673 static struct intel_uncore_type icx_uncore_m3upi = {
5674 .name = "m3upi",
5675 .num_counters = 4,
5676 .num_boxes = 3,
5677 .perf_ctr_bits = 48,
5678 .perf_ctr = ICX_M3UPI_PCI_PMON_CTR0,
5679 .event_ctl = ICX_M3UPI_PCI_PMON_CTL0,
5680 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5681 .box_ctl = ICX_M3UPI_PCI_PMON_BOX_CTL,
5682 .constraints = icx_uncore_m3upi_constraints,
5683 .ops = &ivbep_uncore_pci_ops,
5684 .format_group = &skx_uncore_format_group,
5685 };
5686
5687 enum {
5688 ICX_PCI_UNCORE_M2M,
5689 ICX_PCI_UNCORE_UPI,
5690 ICX_PCI_UNCORE_M3UPI,
5691 };
5692
5693 static struct intel_uncore_type *icx_pci_uncores[] = {
5694 [ICX_PCI_UNCORE_M2M] = &icx_uncore_m2m,
5695 [ICX_PCI_UNCORE_UPI] = &icx_uncore_upi,
5696 [ICX_PCI_UNCORE_M3UPI] = &icx_uncore_m3upi,
5697 NULL,
5698 };
5699
5700 static const struct pci_device_id icx_uncore_pci_ids[] = {
5701 { /* M2M 0 */
5702 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5703 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0),
5704 },
5705 { /* M2M 1 */
5706 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5707 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1),
5708 },
5709 { /* M2M 2 */
5710 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5711 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2),
5712 },
5713 { /* M2M 3 */
5714 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5715 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3),
5716 },
5717 { /* UPI Link 0 */
5718 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5719 .driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0),
5720 },
5721 { /* UPI Link 1 */
5722 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5723 .driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1),
5724 },
5725 { /* UPI Link 2 */
5726 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5727 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2),
5728 },
5729 { /* M3UPI Link 0 */
5730 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5731 .driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0),
5732 },
5733 { /* M3UPI Link 1 */
5734 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5735 .driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1),
5736 },
5737 { /* M3UPI Link 2 */
5738 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5739 .driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2),
5740 },
5741 { /* end: all zeroes */ }
5742 };
5743
5744 static struct pci_driver icx_uncore_pci_driver = {
5745 .name = "icx_uncore",
5746 .id_table = icx_uncore_pci_ids,
5747 };
5748
icx_uncore_pci_init(void)5749 int icx_uncore_pci_init(void)
5750 {
5751 /* ICX UBOX DID */
5752 int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID,
5753 SKX_GIDNIDMAP, true);
5754
5755 if (ret)
5756 return ret;
5757
5758 uncore_pci_uncores = icx_pci_uncores;
5759 uncore_pci_driver = &icx_uncore_pci_driver;
5760 return 0;
5761 }
5762
icx_uncore_imc_init_box(struct intel_uncore_box * box)5763 static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
5764 {
5765 unsigned int box_ctl = box->pmu->type->box_ctl +
5766 box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN);
5767 int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
5768 SNR_IMC_MMIO_MEM0_OFFSET;
5769
5770 __snr_uncore_mmio_init_box(box, box_ctl, mem_offset,
5771 SNR_MC_DEVICE_ID);
5772 }
5773
5774 static struct intel_uncore_ops icx_uncore_mmio_ops = {
5775 .init_box = icx_uncore_imc_init_box,
5776 .exit_box = uncore_mmio_exit_box,
5777 .disable_box = snr_uncore_mmio_disable_box,
5778 .enable_box = snr_uncore_mmio_enable_box,
5779 .disable_event = snr_uncore_mmio_disable_event,
5780 .enable_event = snr_uncore_mmio_enable_event,
5781 .read_counter = uncore_mmio_read_counter,
5782 };
5783
5784 static struct intel_uncore_type icx_uncore_imc = {
5785 .name = "imc",
5786 .num_counters = 4,
5787 .num_boxes = 12,
5788 .perf_ctr_bits = 48,
5789 .fixed_ctr_bits = 48,
5790 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
5791 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
5792 .event_descs = snr_uncore_imc_events,
5793 .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
5794 .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
5795 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5796 .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
5797 .mmio_offset = SNR_IMC_MMIO_OFFSET,
5798 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5799 .ops = &icx_uncore_mmio_ops,
5800 .format_group = &skx_uncore_format_group,
5801 };
5802
5803 enum perf_uncore_icx_imc_freerunning_type_id {
5804 ICX_IMC_DCLK,
5805 ICX_IMC_DDR,
5806 ICX_IMC_DDRT,
5807
5808 ICX_IMC_FREERUNNING_TYPE_MAX,
5809 };
5810
5811 static struct freerunning_counters icx_imc_freerunning[] = {
5812 [ICX_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
5813 [ICX_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 },
5814 [ICX_IMC_DDRT] = { 0x22a0, 0x8, 0, 2, 48 },
5815 };
5816
5817 static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
5818 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
5819
5820 INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
5821 INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
5822 INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
5823 INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
5824 INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
5825 INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
5826
5827 INTEL_UNCORE_EVENT_DESC(ddrt_read, "event=0xff,umask=0x30"),
5828 INTEL_UNCORE_EVENT_DESC(ddrt_read.scale, "6.103515625e-5"),
5829 INTEL_UNCORE_EVENT_DESC(ddrt_read.unit, "MiB"),
5830 INTEL_UNCORE_EVENT_DESC(ddrt_write, "event=0xff,umask=0x31"),
5831 INTEL_UNCORE_EVENT_DESC(ddrt_write.scale, "6.103515625e-5"),
5832 INTEL_UNCORE_EVENT_DESC(ddrt_write.unit, "MiB"),
5833 { /* end: all zeroes */ },
5834 };
5835
icx_uncore_imc_freerunning_init_box(struct intel_uncore_box * box)5836 static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5837 {
5838 int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
5839 SNR_IMC_MMIO_MEM0_OFFSET;
5840
5841 snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
5842 mem_offset, SNR_MC_DEVICE_ID);
5843 }
5844
5845 static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
5846 .init_box = icx_uncore_imc_freerunning_init_box,
5847 .exit_box = uncore_mmio_exit_box,
5848 .read_counter = uncore_mmio_read_counter,
5849 .hw_config = uncore_freerunning_hw_config,
5850 };
5851
5852 static struct intel_uncore_type icx_uncore_imc_free_running = {
5853 .name = "imc_free_running",
5854 .num_counters = 5,
5855 .num_boxes = 4,
5856 .num_freerunning_types = ICX_IMC_FREERUNNING_TYPE_MAX,
5857 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5858 .freerunning = icx_imc_freerunning,
5859 .ops = &icx_uncore_imc_freerunning_ops,
5860 .event_descs = icx_uncore_imc_freerunning_events,
5861 .format_group = &skx_uncore_iio_freerunning_format_group,
5862 };
5863
5864 static struct intel_uncore_type *icx_mmio_uncores[] = {
5865 &icx_uncore_imc,
5866 &icx_uncore_imc_free_running,
5867 NULL,
5868 };
5869
icx_uncore_mmio_init(void)5870 void icx_uncore_mmio_init(void)
5871 {
5872 uncore_mmio_uncores = icx_mmio_uncores;
5873 }
5874
5875 /* end of ICX uncore support */
5876
5877 /* SPR uncore support */
5878
spr_uncore_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)5879 static void spr_uncore_msr_enable_event(struct intel_uncore_box *box,
5880 struct perf_event *event)
5881 {
5882 struct hw_perf_event *hwc = &event->hw;
5883 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
5884
5885 if (reg1->idx != EXTRA_REG_NONE)
5886 wrmsrq(reg1->reg, reg1->config);
5887
5888 wrmsrq(hwc->config_base, hwc->config);
5889 }
5890
spr_uncore_msr_disable_event(struct intel_uncore_box * box,struct perf_event * event)5891 static void spr_uncore_msr_disable_event(struct intel_uncore_box *box,
5892 struct perf_event *event)
5893 {
5894 struct hw_perf_event *hwc = &event->hw;
5895 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
5896
5897 if (reg1->idx != EXTRA_REG_NONE)
5898 wrmsrq(reg1->reg, 0);
5899
5900 wrmsrq(hwc->config_base, 0);
5901 }
5902
spr_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)5903 static int spr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
5904 {
5905 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
5906 bool tie_en = !!(event->hw.config & SPR_CHA_PMON_CTL_TID_EN);
5907 struct intel_uncore_type *type = box->pmu->type;
5908 int id = intel_uncore_find_discovery_unit_id(type->boxes, -1, box->pmu->pmu_idx);
5909
5910 if (tie_en) {
5911 reg1->reg = SPR_C0_MSR_PMON_BOX_FILTER0 +
5912 HSWEP_CBO_MSR_OFFSET * id;
5913 reg1->config = event->attr.config1 & SPR_CHA_PMON_BOX_FILTER_TID;
5914 reg1->idx = 0;
5915 }
5916
5917 return 0;
5918 }
5919
5920 static struct intel_uncore_ops spr_uncore_chabox_ops = {
5921 .init_box = intel_generic_uncore_msr_init_box,
5922 .disable_box = intel_generic_uncore_msr_disable_box,
5923 .enable_box = intel_generic_uncore_msr_enable_box,
5924 .disable_event = spr_uncore_msr_disable_event,
5925 .enable_event = spr_uncore_msr_enable_event,
5926 .read_counter = uncore_msr_read_counter,
5927 .hw_config = spr_cha_hw_config,
5928 .get_constraint = uncore_get_constraint,
5929 .put_constraint = uncore_put_constraint,
5930 };
5931
5932 static struct attribute *spr_uncore_cha_formats_attr[] = {
5933 &format_attr_event.attr,
5934 &format_attr_umask_ext5.attr,
5935 &format_attr_tid_en2.attr,
5936 &format_attr_edge.attr,
5937 &format_attr_inv.attr,
5938 &format_attr_thresh8.attr,
5939 &format_attr_filter_tid5.attr,
5940 NULL,
5941 };
5942 static const struct attribute_group spr_uncore_chabox_format_group = {
5943 .name = "format",
5944 .attrs = spr_uncore_cha_formats_attr,
5945 };
5946
alias_show(struct device * dev,struct device_attribute * attr,char * buf)5947 static ssize_t alias_show(struct device *dev,
5948 struct device_attribute *attr,
5949 char *buf)
5950 {
5951 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
5952 char pmu_name[UNCORE_PMU_NAME_LEN];
5953
5954 uncore_get_alias_name(pmu_name, pmu);
5955 return sysfs_emit(buf, "%s\n", pmu_name);
5956 }
5957
5958 static DEVICE_ATTR_RO(alias);
5959
5960 static struct attribute *uncore_alias_attrs[] = {
5961 &dev_attr_alias.attr,
5962 NULL
5963 };
5964
5965 ATTRIBUTE_GROUPS(uncore_alias);
5966
5967 static struct intel_uncore_type spr_uncore_chabox = {
5968 .name = "cha",
5969 .event_mask = SPR_CHA_PMON_EVENT_MASK,
5970 .event_mask_ext = SPR_CHA_EVENT_MASK_EXT,
5971 .num_shared_regs = 1,
5972 .constraints = skx_uncore_chabox_constraints,
5973 .ops = &spr_uncore_chabox_ops,
5974 .format_group = &spr_uncore_chabox_format_group,
5975 .attr_update = uncore_alias_groups,
5976 };
5977
5978 static struct intel_uncore_type spr_uncore_iio = {
5979 .name = "iio",
5980 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5981 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
5982 .format_group = &snr_uncore_iio_format_group,
5983 .attr_update = uncore_alias_groups,
5984 .constraints = icx_uncore_iio_constraints,
5985 };
5986
5987 static struct attribute *spr_uncore_raw_formats_attr[] = {
5988 &format_attr_event.attr,
5989 &format_attr_umask_ext4.attr,
5990 &format_attr_edge.attr,
5991 &format_attr_inv.attr,
5992 &format_attr_thresh8.attr,
5993 NULL,
5994 };
5995
5996 static const struct attribute_group spr_uncore_raw_format_group = {
5997 .name = "format",
5998 .attrs = spr_uncore_raw_formats_attr,
5999 };
6000
6001 #define SPR_UNCORE_COMMON_FORMAT() \
6002 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
6003 .event_mask_ext = SPR_RAW_EVENT_MASK_EXT, \
6004 .format_group = &spr_uncore_raw_format_group, \
6005 .attr_update = uncore_alias_groups
6006
6007 static struct intel_uncore_type spr_uncore_irp = {
6008 SPR_UNCORE_COMMON_FORMAT(),
6009 .name = "irp",
6010
6011 };
6012
6013 static struct event_constraint spr_uncore_m2pcie_constraints[] = {
6014 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
6015 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
6016 EVENT_CONSTRAINT_END
6017 };
6018
6019 static struct intel_uncore_type spr_uncore_m2pcie = {
6020 SPR_UNCORE_COMMON_FORMAT(),
6021 .name = "m2pcie",
6022 .constraints = spr_uncore_m2pcie_constraints,
6023 };
6024
6025 static struct intel_uncore_type spr_uncore_pcu = {
6026 .name = "pcu",
6027 .attr_update = uncore_alias_groups,
6028 };
6029
spr_uncore_mmio_enable_event(struct intel_uncore_box * box,struct perf_event * event)6030 static void spr_uncore_mmio_enable_event(struct intel_uncore_box *box,
6031 struct perf_event *event)
6032 {
6033 struct hw_perf_event *hwc = &event->hw;
6034
6035 if (!box->io_addr)
6036 return;
6037
6038 if (uncore_pmc_fixed(hwc->idx))
6039 writel(SNBEP_PMON_CTL_EN, box->io_addr + hwc->config_base);
6040 else
6041 writel(hwc->config, box->io_addr + hwc->config_base);
6042 }
6043
6044 static struct intel_uncore_ops spr_uncore_mmio_ops = {
6045 .init_box = intel_generic_uncore_mmio_init_box,
6046 .exit_box = uncore_mmio_exit_box,
6047 .disable_box = intel_generic_uncore_mmio_disable_box,
6048 .enable_box = intel_generic_uncore_mmio_enable_box,
6049 .disable_event = intel_generic_uncore_mmio_disable_event,
6050 .enable_event = spr_uncore_mmio_enable_event,
6051 .read_counter = uncore_mmio_read_counter,
6052 };
6053
6054 static struct uncore_event_desc spr_uncore_imc_events[] = {
6055 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x01,umask=0x00"),
6056 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x05,umask=0xcf"),
6057 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
6058 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
6059 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x05,umask=0xf0"),
6060 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
6061 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
6062 { /* end: all zeroes */ },
6063 };
6064
6065 #define SPR_UNCORE_MMIO_COMMON_FORMAT() \
6066 SPR_UNCORE_COMMON_FORMAT(), \
6067 .ops = &spr_uncore_mmio_ops
6068
6069 static struct intel_uncore_type spr_uncore_imc = {
6070 SPR_UNCORE_MMIO_COMMON_FORMAT(),
6071 .name = "imc",
6072 .fixed_ctr_bits = 48,
6073 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
6074 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
6075 .event_descs = spr_uncore_imc_events,
6076 };
6077
spr_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)6078 static void spr_uncore_pci_enable_event(struct intel_uncore_box *box,
6079 struct perf_event *event)
6080 {
6081 struct pci_dev *pdev = box->pci_dev;
6082 struct hw_perf_event *hwc = &event->hw;
6083
6084 pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
6085 pci_write_config_dword(pdev, hwc->config_base, (u32)hwc->config);
6086 }
6087
6088 static struct intel_uncore_ops spr_uncore_pci_ops = {
6089 .init_box = intel_generic_uncore_pci_init_box,
6090 .disable_box = intel_generic_uncore_pci_disable_box,
6091 .enable_box = intel_generic_uncore_pci_enable_box,
6092 .disable_event = intel_generic_uncore_pci_disable_event,
6093 .enable_event = spr_uncore_pci_enable_event,
6094 .read_counter = intel_generic_uncore_pci_read_counter,
6095 };
6096
6097 #define SPR_UNCORE_PCI_COMMON_FORMAT() \
6098 SPR_UNCORE_COMMON_FORMAT(), \
6099 .ops = &spr_uncore_pci_ops
6100
6101 static struct intel_uncore_type spr_uncore_m2m = {
6102 SPR_UNCORE_PCI_COMMON_FORMAT(),
6103 .name = "m2m",
6104 };
6105
6106 static struct attribute_group spr_upi_mapping_group = {
6107 .is_visible = skx_upi_mapping_visible,
6108 };
6109
6110 static const struct attribute_group *spr_upi_attr_update[] = {
6111 &uncore_alias_group,
6112 &spr_upi_mapping_group,
6113 NULL
6114 };
6115
6116 #define SPR_UPI_REGS_ADDR_DEVICE_LINK0 0x01
6117
spr_upi_set_mapping(struct intel_uncore_type * type)6118 static void spr_upi_set_mapping(struct intel_uncore_type *type)
6119 {
6120 pmu_upi_set_mapping(type, &spr_upi_mapping_group);
6121 }
6122
spr_upi_cleanup_mapping(struct intel_uncore_type * type)6123 static void spr_upi_cleanup_mapping(struct intel_uncore_type *type)
6124 {
6125 pmu_cleanup_mapping(type, &spr_upi_mapping_group);
6126 }
6127
spr_upi_get_topology(struct intel_uncore_type * type)6128 static int spr_upi_get_topology(struct intel_uncore_type *type)
6129 {
6130 return discover_upi_topology(type, SPR_UBOX_DID, SPR_UPI_REGS_ADDR_DEVICE_LINK0);
6131 }
6132
6133 static struct intel_uncore_type spr_uncore_mdf = {
6134 SPR_UNCORE_COMMON_FORMAT(),
6135 .name = "mdf",
6136 };
6137
spr_uncore_mmio_offs8_init_box(struct intel_uncore_box * box)6138 static void spr_uncore_mmio_offs8_init_box(struct intel_uncore_box *box)
6139 {
6140 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
6141 intel_generic_uncore_mmio_init_box(box);
6142 }
6143
6144 static struct intel_uncore_ops spr_uncore_mmio_offs8_ops = {
6145 .init_box = spr_uncore_mmio_offs8_init_box,
6146 .exit_box = uncore_mmio_exit_box,
6147 .disable_box = intel_generic_uncore_mmio_disable_box,
6148 .enable_box = intel_generic_uncore_mmio_enable_box,
6149 .disable_event = intel_generic_uncore_mmio_disable_event,
6150 .enable_event = spr_uncore_mmio_enable_event,
6151 .read_counter = uncore_mmio_read_counter,
6152 };
6153
6154 #define SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT() \
6155 SPR_UNCORE_COMMON_FORMAT(), \
6156 .ops = &spr_uncore_mmio_offs8_ops
6157
6158 static struct event_constraint spr_uncore_cxlcm_constraints[] = {
6159 UNCORE_EVENT_CONSTRAINT(0x02, 0x0f),
6160 UNCORE_EVENT_CONSTRAINT(0x05, 0x0f),
6161 UNCORE_EVENT_CONSTRAINT(0x40, 0xf0),
6162 UNCORE_EVENT_CONSTRAINT(0x41, 0xf0),
6163 UNCORE_EVENT_CONSTRAINT(0x42, 0xf0),
6164 UNCORE_EVENT_CONSTRAINT(0x43, 0xf0),
6165 UNCORE_EVENT_CONSTRAINT(0x4b, 0xf0),
6166 UNCORE_EVENT_CONSTRAINT(0x52, 0xf0),
6167 EVENT_CONSTRAINT_END
6168 };
6169
6170 static struct intel_uncore_type spr_uncore_cxlcm = {
6171 SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT(),
6172 .name = "cxlcm",
6173 .constraints = spr_uncore_cxlcm_constraints,
6174 };
6175
6176 static struct intel_uncore_type spr_uncore_cxldp = {
6177 SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT(),
6178 .name = "cxldp",
6179 };
6180
6181 static struct intel_uncore_type spr_uncore_hbm = {
6182 SPR_UNCORE_COMMON_FORMAT(),
6183 .name = "hbm",
6184 };
6185
6186 #define UNCORE_SPR_NUM_UNCORE_TYPES 15
6187 #define UNCORE_SPR_CHA 0
6188 #define UNCORE_SPR_IIO 1
6189 #define UNCORE_SPR_IMC 6
6190 #define UNCORE_SPR_UPI 8
6191 #define UNCORE_SPR_M3UPI 9
6192
6193 /*
6194 * The uncore units, which are supported by the discovery table,
6195 * are defined here.
6196 */
6197 static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = {
6198 &spr_uncore_chabox,
6199 &spr_uncore_iio,
6200 &spr_uncore_irp,
6201 &spr_uncore_m2pcie,
6202 &spr_uncore_pcu,
6203 NULL,
6204 &spr_uncore_imc,
6205 &spr_uncore_m2m,
6206 NULL,
6207 NULL,
6208 NULL,
6209 &spr_uncore_mdf,
6210 &spr_uncore_cxlcm,
6211 &spr_uncore_cxldp,
6212 &spr_uncore_hbm,
6213 };
6214
6215 /*
6216 * The uncore units, which are not supported by the discovery table,
6217 * are implemented from here.
6218 */
6219 #define SPR_UNCORE_UPI_NUM_BOXES 4
6220
6221 static u64 spr_upi_pci_offsets[SPR_UNCORE_UPI_NUM_BOXES] = {
6222 0, 0x8000, 0x10000, 0x18000
6223 };
6224
spr_extra_boxes_cleanup(struct intel_uncore_type * type)6225 static void spr_extra_boxes_cleanup(struct intel_uncore_type *type)
6226 {
6227 struct intel_uncore_discovery_unit *pos;
6228 struct rb_node *node;
6229
6230 if (!type->boxes)
6231 return;
6232
6233 while (!RB_EMPTY_ROOT(type->boxes)) {
6234 node = rb_first(type->boxes);
6235 pos = rb_entry(node, struct intel_uncore_discovery_unit, node);
6236 rb_erase(node, type->boxes);
6237 kfree(pos);
6238 }
6239 kfree(type->boxes);
6240 type->boxes = NULL;
6241 }
6242
6243 static struct intel_uncore_type spr_uncore_upi = {
6244 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
6245 .event_mask_ext = SPR_RAW_EVENT_MASK_EXT,
6246 .format_group = &spr_uncore_raw_format_group,
6247 .ops = &spr_uncore_pci_ops,
6248 .name = "upi",
6249 .attr_update = spr_upi_attr_update,
6250 .get_topology = spr_upi_get_topology,
6251 .set_mapping = spr_upi_set_mapping,
6252 .cleanup_mapping = spr_upi_cleanup_mapping,
6253 .type_id = UNCORE_SPR_UPI,
6254 .num_counters = 4,
6255 .num_boxes = SPR_UNCORE_UPI_NUM_BOXES,
6256 .perf_ctr_bits = 48,
6257 .perf_ctr = ICX_UPI_PCI_PMON_CTR0 - ICX_UPI_PCI_PMON_BOX_CTL,
6258 .event_ctl = ICX_UPI_PCI_PMON_CTL0 - ICX_UPI_PCI_PMON_BOX_CTL,
6259 .box_ctl = ICX_UPI_PCI_PMON_BOX_CTL,
6260 .pci_offsets = spr_upi_pci_offsets,
6261 .cleanup_extra_boxes = spr_extra_boxes_cleanup,
6262 };
6263
6264 static struct intel_uncore_type spr_uncore_m3upi = {
6265 SPR_UNCORE_PCI_COMMON_FORMAT(),
6266 .name = "m3upi",
6267 .type_id = UNCORE_SPR_M3UPI,
6268 .num_counters = 4,
6269 .num_boxes = SPR_UNCORE_UPI_NUM_BOXES,
6270 .perf_ctr_bits = 48,
6271 .perf_ctr = ICX_M3UPI_PCI_PMON_CTR0 - ICX_M3UPI_PCI_PMON_BOX_CTL,
6272 .event_ctl = ICX_M3UPI_PCI_PMON_CTL0 - ICX_M3UPI_PCI_PMON_BOX_CTL,
6273 .box_ctl = ICX_M3UPI_PCI_PMON_BOX_CTL,
6274 .pci_offsets = spr_upi_pci_offsets,
6275 .constraints = icx_uncore_m3upi_constraints,
6276 .cleanup_extra_boxes = spr_extra_boxes_cleanup,
6277 };
6278
6279 enum perf_uncore_spr_iio_freerunning_type_id {
6280 SPR_IIO_MSR_IOCLK,
6281 SPR_IIO_MSR_BW_IN,
6282 SPR_IIO_MSR_BW_OUT,
6283
6284 SPR_IIO_FREERUNNING_TYPE_MAX,
6285 };
6286
6287 static struct freerunning_counters spr_iio_freerunning[] = {
6288 [SPR_IIO_MSR_IOCLK] = { 0x340e, 0x1, 0x10, 1, 48 },
6289 [SPR_IIO_MSR_BW_IN] = { 0x3800, 0x1, 0x10, 8, 48 },
6290 [SPR_IIO_MSR_BW_OUT] = { 0x3808, 0x1, 0x10, 8, 48 },
6291 };
6292
6293 static struct intel_uncore_type spr_uncore_iio_free_running = {
6294 .name = "iio_free_running",
6295 .num_counters = 17,
6296 .num_freerunning_types = SPR_IIO_FREERUNNING_TYPE_MAX,
6297 .freerunning = spr_iio_freerunning,
6298 .ops = &skx_uncore_iio_freerunning_ops,
6299 .event_descs = snr_uncore_iio_freerunning_events,
6300 .format_group = &skx_uncore_iio_freerunning_format_group,
6301 };
6302
6303 enum perf_uncore_spr_imc_freerunning_type_id {
6304 SPR_IMC_DCLK,
6305 SPR_IMC_PQ_CYCLES,
6306
6307 SPR_IMC_FREERUNNING_TYPE_MAX,
6308 };
6309
6310 static struct freerunning_counters spr_imc_freerunning[] = {
6311 [SPR_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
6312 [SPR_IMC_PQ_CYCLES] = { 0x2318, 0x8, 0, 2, 48 },
6313 };
6314
6315 static struct uncore_event_desc spr_uncore_imc_freerunning_events[] = {
6316 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
6317
6318 INTEL_UNCORE_EVENT_DESC(rpq_cycles, "event=0xff,umask=0x20"),
6319 INTEL_UNCORE_EVENT_DESC(wpq_cycles, "event=0xff,umask=0x21"),
6320 { /* end: all zeroes */ },
6321 };
6322
6323 #define SPR_MC_DEVICE_ID 0x3251
6324
spr_uncore_imc_freerunning_init_box(struct intel_uncore_box * box)6325 static void spr_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
6326 {
6327 int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE + SNR_IMC_MMIO_MEM0_OFFSET;
6328
6329 snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
6330 mem_offset, SPR_MC_DEVICE_ID);
6331 }
6332
6333 static struct intel_uncore_ops spr_uncore_imc_freerunning_ops = {
6334 .init_box = spr_uncore_imc_freerunning_init_box,
6335 .exit_box = uncore_mmio_exit_box,
6336 .read_counter = uncore_mmio_read_counter,
6337 .hw_config = uncore_freerunning_hw_config,
6338 };
6339
6340 static struct intel_uncore_type spr_uncore_imc_free_running = {
6341 .name = "imc_free_running",
6342 .num_counters = 3,
6343 .mmio_map_size = SNR_IMC_MMIO_SIZE,
6344 .num_freerunning_types = SPR_IMC_FREERUNNING_TYPE_MAX,
6345 .freerunning = spr_imc_freerunning,
6346 .ops = &spr_uncore_imc_freerunning_ops,
6347 .event_descs = spr_uncore_imc_freerunning_events,
6348 .format_group = &skx_uncore_iio_freerunning_format_group,
6349 };
6350
6351 #define UNCORE_SPR_MSR_EXTRA_UNCORES 1
6352 #define UNCORE_SPR_MMIO_EXTRA_UNCORES 1
6353 #define UNCORE_SPR_PCI_EXTRA_UNCORES 2
6354
6355 static struct intel_uncore_type *spr_msr_uncores[UNCORE_SPR_MSR_EXTRA_UNCORES] = {
6356 &spr_uncore_iio_free_running,
6357 };
6358
6359 static struct intel_uncore_type *spr_mmio_uncores[UNCORE_SPR_MMIO_EXTRA_UNCORES] = {
6360 &spr_uncore_imc_free_running,
6361 };
6362
6363 static struct intel_uncore_type *spr_pci_uncores[UNCORE_SPR_PCI_EXTRA_UNCORES] = {
6364 &spr_uncore_upi,
6365 &spr_uncore_m3upi
6366 };
6367
6368 int spr_uncore_units_ignore[] = {
6369 UNCORE_SPR_UPI,
6370 UNCORE_SPR_M3UPI,
6371 UNCORE_IGNORE_END
6372 };
6373
uncore_type_customized_copy(struct intel_uncore_type * to_type,struct intel_uncore_type * from_type)6374 static void uncore_type_customized_copy(struct intel_uncore_type *to_type,
6375 struct intel_uncore_type *from_type)
6376 {
6377 if (!to_type || !from_type)
6378 return;
6379
6380 if (from_type->name)
6381 to_type->name = from_type->name;
6382 if (from_type->fixed_ctr_bits)
6383 to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
6384 if (from_type->event_mask)
6385 to_type->event_mask = from_type->event_mask;
6386 if (from_type->event_mask_ext)
6387 to_type->event_mask_ext = from_type->event_mask_ext;
6388 if (from_type->fixed_ctr)
6389 to_type->fixed_ctr = from_type->fixed_ctr;
6390 if (from_type->fixed_ctl)
6391 to_type->fixed_ctl = from_type->fixed_ctl;
6392 if (from_type->fixed_ctr_bits)
6393 to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
6394 if (from_type->num_shared_regs)
6395 to_type->num_shared_regs = from_type->num_shared_regs;
6396 if (from_type->constraints)
6397 to_type->constraints = from_type->constraints;
6398 if (from_type->ops)
6399 to_type->ops = from_type->ops;
6400 if (from_type->event_descs)
6401 to_type->event_descs = from_type->event_descs;
6402 if (from_type->format_group)
6403 to_type->format_group = from_type->format_group;
6404 if (from_type->attr_update)
6405 to_type->attr_update = from_type->attr_update;
6406 if (from_type->set_mapping)
6407 to_type->set_mapping = from_type->set_mapping;
6408 if (from_type->get_topology)
6409 to_type->get_topology = from_type->get_topology;
6410 if (from_type->cleanup_mapping)
6411 to_type->cleanup_mapping = from_type->cleanup_mapping;
6412 }
6413
6414 static struct intel_uncore_type **
uncore_get_uncores(enum uncore_access_type type_id,int num_extra,struct intel_uncore_type ** extra,int max_num_types,struct intel_uncore_type ** uncores)6415 uncore_get_uncores(enum uncore_access_type type_id, int num_extra,
6416 struct intel_uncore_type **extra, int max_num_types,
6417 struct intel_uncore_type **uncores)
6418 {
6419 struct intel_uncore_type **types, **start_types;
6420 int i;
6421
6422 start_types = types = intel_uncore_generic_init_uncores(type_id, num_extra);
6423
6424 /* Only copy the customized features */
6425 for (; *types; types++) {
6426 if ((*types)->type_id >= max_num_types)
6427 continue;
6428 uncore_type_customized_copy(*types, uncores[(*types)->type_id]);
6429 }
6430
6431 for (i = 0; i < num_extra; i++, types++)
6432 *types = extra[i];
6433
6434 return start_types;
6435 }
6436
6437 static struct intel_uncore_type *
uncore_find_type_by_id(struct intel_uncore_type ** types,int type_id)6438 uncore_find_type_by_id(struct intel_uncore_type **types, int type_id)
6439 {
6440 for (; *types; types++) {
6441 if (type_id == (*types)->type_id)
6442 return *types;
6443 }
6444
6445 return NULL;
6446 }
6447
uncore_type_max_boxes(struct intel_uncore_type ** types,int type_id)6448 static int uncore_type_max_boxes(struct intel_uncore_type **types,
6449 int type_id)
6450 {
6451 struct intel_uncore_discovery_unit *unit;
6452 struct intel_uncore_type *type;
6453 struct rb_node *node;
6454 int max = 0;
6455
6456 type = uncore_find_type_by_id(types, type_id);
6457 if (!type)
6458 return 0;
6459
6460 for (node = rb_first(type->boxes); node; node = rb_next(node)) {
6461 unit = rb_entry(node, struct intel_uncore_discovery_unit, node);
6462
6463 if (unit->id > max)
6464 max = unit->id;
6465 }
6466 return max + 1;
6467 }
6468
6469 #define SPR_MSR_UNC_CBO_CONFIG 0x2FFE
6470
spr_uncore_cpu_init(void)6471 void spr_uncore_cpu_init(void)
6472 {
6473 struct intel_uncore_type *type;
6474 u64 num_cbo;
6475
6476 uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR,
6477 UNCORE_SPR_MSR_EXTRA_UNCORES,
6478 spr_msr_uncores,
6479 UNCORE_SPR_NUM_UNCORE_TYPES,
6480 spr_uncores);
6481
6482 type = uncore_find_type_by_id(uncore_msr_uncores, UNCORE_SPR_CHA);
6483 if (type) {
6484 /*
6485 * The value from the discovery table (stored in the type->num_boxes
6486 * of UNCORE_SPR_CHA) is incorrect on some SPR variants because of a
6487 * firmware bug. Using the value from SPR_MSR_UNC_CBO_CONFIG to replace it.
6488 */
6489 rdmsrq(SPR_MSR_UNC_CBO_CONFIG, num_cbo);
6490 /*
6491 * The MSR doesn't work on the EMR XCC, but the firmware bug doesn't impact
6492 * the EMR XCC. Don't let the value from the MSR replace the existing value.
6493 */
6494 if (num_cbo)
6495 type->num_boxes = num_cbo;
6496 }
6497 spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
6498 }
6499
6500 #define SPR_UNCORE_UPI_PCIID 0x3241
6501 #define SPR_UNCORE_UPI0_DEVFN 0x9
6502 #define SPR_UNCORE_M3UPI_PCIID 0x3246
6503 #define SPR_UNCORE_M3UPI0_DEVFN 0x29
6504
spr_update_device_location(int type_id)6505 static void spr_update_device_location(int type_id)
6506 {
6507 struct intel_uncore_discovery_unit *unit;
6508 struct intel_uncore_type *type;
6509 struct pci_dev *dev = NULL;
6510 struct rb_root *root;
6511 u32 device, devfn;
6512 int die;
6513
6514 if (type_id == UNCORE_SPR_UPI) {
6515 type = &spr_uncore_upi;
6516 device = SPR_UNCORE_UPI_PCIID;
6517 devfn = SPR_UNCORE_UPI0_DEVFN;
6518 } else if (type_id == UNCORE_SPR_M3UPI) {
6519 type = &spr_uncore_m3upi;
6520 device = SPR_UNCORE_M3UPI_PCIID;
6521 devfn = SPR_UNCORE_M3UPI0_DEVFN;
6522 } else
6523 return;
6524
6525 root = kzalloc(sizeof(struct rb_root), GFP_KERNEL);
6526 if (!root) {
6527 type->num_boxes = 0;
6528 return;
6529 }
6530 *root = RB_ROOT;
6531
6532 while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, dev)) != NULL) {
6533
6534 die = uncore_device_to_die(dev);
6535 if (die < 0)
6536 continue;
6537
6538 unit = kzalloc(sizeof(*unit), GFP_KERNEL);
6539 if (!unit)
6540 continue;
6541 unit->die = die;
6542 unit->id = PCI_SLOT(dev->devfn) - PCI_SLOT(devfn);
6543 unit->addr = pci_domain_nr(dev->bus) << UNCORE_DISCOVERY_PCI_DOMAIN_OFFSET |
6544 dev->bus->number << UNCORE_DISCOVERY_PCI_BUS_OFFSET |
6545 devfn << UNCORE_DISCOVERY_PCI_DEVFN_OFFSET |
6546 type->box_ctl;
6547
6548 unit->pmu_idx = unit->id;
6549
6550 uncore_find_add_unit(unit, root, NULL);
6551 }
6552
6553 type->boxes = root;
6554 }
6555
spr_uncore_pci_init(void)6556 int spr_uncore_pci_init(void)
6557 {
6558 /*
6559 * The discovery table of UPI on some SPR variant is broken,
6560 * which impacts the detection of both UPI and M3UPI uncore PMON.
6561 * Use the pre-defined UPI and M3UPI table to replace.
6562 *
6563 * The accurate location, e.g., domain and BUS number,
6564 * can only be retrieved at load time.
6565 * Update the location of UPI and M3UPI.
6566 */
6567 spr_update_device_location(UNCORE_SPR_UPI);
6568 spr_update_device_location(UNCORE_SPR_M3UPI);
6569 uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI,
6570 UNCORE_SPR_PCI_EXTRA_UNCORES,
6571 spr_pci_uncores,
6572 UNCORE_SPR_NUM_UNCORE_TYPES,
6573 spr_uncores);
6574 return 0;
6575 }
6576
spr_uncore_mmio_init(void)6577 void spr_uncore_mmio_init(void)
6578 {
6579 int ret = snbep_pci2phy_map_init(0x3250, SKX_CPUNODEID, SKX_GIDNIDMAP, true);
6580
6581 if (ret) {
6582 uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL,
6583 UNCORE_SPR_NUM_UNCORE_TYPES,
6584 spr_uncores);
6585 } else {
6586 uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO,
6587 UNCORE_SPR_MMIO_EXTRA_UNCORES,
6588 spr_mmio_uncores,
6589 UNCORE_SPR_NUM_UNCORE_TYPES,
6590 spr_uncores);
6591
6592 spr_uncore_imc_free_running.num_boxes = uncore_type_max_boxes(uncore_mmio_uncores, UNCORE_SPR_IMC) / 2;
6593 }
6594 }
6595
6596 /* end of SPR uncore support */
6597
6598 /* GNR uncore support */
6599
6600 #define UNCORE_GNR_NUM_UNCORE_TYPES 23
6601
6602 int gnr_uncore_units_ignore[] = {
6603 UNCORE_IGNORE_END
6604 };
6605
6606 static struct intel_uncore_type gnr_uncore_ubox = {
6607 .name = "ubox",
6608 .attr_update = uncore_alias_groups,
6609 };
6610
6611 static struct intel_uncore_type gnr_uncore_pciex8 = {
6612 SPR_UNCORE_PCI_COMMON_FORMAT(),
6613 .name = "pciex8",
6614 };
6615
6616 static struct intel_uncore_type gnr_uncore_pciex16 = {
6617 SPR_UNCORE_PCI_COMMON_FORMAT(),
6618 .name = "pciex16",
6619 };
6620
6621 static struct intel_uncore_type gnr_uncore_upi = {
6622 SPR_UNCORE_PCI_COMMON_FORMAT(),
6623 .name = "upi",
6624 };
6625
6626 static struct intel_uncore_type gnr_uncore_b2upi = {
6627 SPR_UNCORE_PCI_COMMON_FORMAT(),
6628 .name = "b2upi",
6629 };
6630
6631 static struct intel_uncore_type gnr_uncore_b2hot = {
6632 .name = "b2hot",
6633 .attr_update = uncore_alias_groups,
6634 };
6635
6636 static struct intel_uncore_type gnr_uncore_b2cmi = {
6637 SPR_UNCORE_PCI_COMMON_FORMAT(),
6638 .name = "b2cmi",
6639 };
6640
6641 static struct intel_uncore_type gnr_uncore_b2cxl = {
6642 SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT(),
6643 .name = "b2cxl",
6644 };
6645
6646 static struct intel_uncore_type gnr_uncore_mdf_sbo = {
6647 .name = "mdf_sbo",
6648 .attr_update = uncore_alias_groups,
6649 };
6650
6651 static struct intel_uncore_type *gnr_uncores[UNCORE_GNR_NUM_UNCORE_TYPES] = {
6652 &spr_uncore_chabox,
6653 &spr_uncore_iio,
6654 &spr_uncore_irp,
6655 NULL,
6656 &spr_uncore_pcu,
6657 &gnr_uncore_ubox,
6658 &spr_uncore_imc,
6659 NULL,
6660 &gnr_uncore_upi,
6661 NULL,
6662 NULL,
6663 NULL,
6664 &spr_uncore_cxlcm,
6665 &spr_uncore_cxldp,
6666 NULL,
6667 &gnr_uncore_b2hot,
6668 &gnr_uncore_b2cmi,
6669 &gnr_uncore_b2cxl,
6670 &gnr_uncore_b2upi,
6671 NULL,
6672 &gnr_uncore_mdf_sbo,
6673 &gnr_uncore_pciex16,
6674 &gnr_uncore_pciex8,
6675 };
6676
6677 static struct freerunning_counters gnr_iio_freerunning[] = {
6678 [SPR_IIO_MSR_IOCLK] = { 0x290e, 0x01, 0x10, 1, 48 },
6679 [SPR_IIO_MSR_BW_IN] = { 0x360e, 0x10, 0x80, 8, 48 },
6680 [SPR_IIO_MSR_BW_OUT] = { 0x2e0e, 0x10, 0x80, 8, 48 },
6681 };
6682
gnr_uncore_cpu_init(void)6683 void gnr_uncore_cpu_init(void)
6684 {
6685 uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR,
6686 UNCORE_SPR_MSR_EXTRA_UNCORES,
6687 spr_msr_uncores,
6688 UNCORE_GNR_NUM_UNCORE_TYPES,
6689 gnr_uncores);
6690 spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
6691 spr_uncore_iio_free_running.freerunning = gnr_iio_freerunning;
6692 }
6693
gnr_uncore_pci_init(void)6694 int gnr_uncore_pci_init(void)
6695 {
6696 uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, 0, NULL,
6697 UNCORE_GNR_NUM_UNCORE_TYPES,
6698 gnr_uncores);
6699 return 0;
6700 }
6701
gnr_uncore_mmio_init(void)6702 void gnr_uncore_mmio_init(void)
6703 {
6704 uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL,
6705 UNCORE_GNR_NUM_UNCORE_TYPES,
6706 gnr_uncores);
6707 }
6708
6709 /* end of GNR uncore support */
6710