1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
3 #include <asm/cpu_device_id.h>
4 #include <asm/msr.h>
5 #include "uncore.h"
6 #include "uncore_discovery.h"
7
8 /* SNB-EP pci bus to socket mapping */
9 #define SNBEP_CPUNODEID 0x40
10 #define SNBEP_GIDNIDMAP 0x54
11
12 /* SNB-EP Box level control */
13 #define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
14 #define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
15 #define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
16 #define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
17 #define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
18 SNBEP_PMON_BOX_CTL_RST_CTRS | \
19 SNBEP_PMON_BOX_CTL_FRZ_EN)
20 /* SNB-EP event control */
21 #define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
22 #define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
23 #define SNBEP_PMON_CTL_RST (1 << 17)
24 #define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
25 #define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21)
26 #define SNBEP_PMON_CTL_EN (1 << 22)
27 #define SNBEP_PMON_CTL_INVERT (1 << 23)
28 #define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
29 #define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
30 SNBEP_PMON_CTL_UMASK_MASK | \
31 SNBEP_PMON_CTL_EDGE_DET | \
32 SNBEP_PMON_CTL_INVERT | \
33 SNBEP_PMON_CTL_TRESH_MASK)
34
35 /* SNB-EP Ubox event control */
36 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
37 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
38 (SNBEP_PMON_CTL_EV_SEL_MASK | \
39 SNBEP_PMON_CTL_UMASK_MASK | \
40 SNBEP_PMON_CTL_EDGE_DET | \
41 SNBEP_PMON_CTL_INVERT | \
42 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
43
44 #define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
45 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
46 SNBEP_CBO_PMON_CTL_TID_EN)
47
48 /* SNB-EP PCU event control */
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
50 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
51 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
52 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
53 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
54 (SNBEP_PMON_CTL_EV_SEL_MASK | \
55 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
56 SNBEP_PMON_CTL_EDGE_DET | \
57 SNBEP_PMON_CTL_INVERT | \
58 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
59 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
60 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
61
62 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
63 (SNBEP_PMON_RAW_EVENT_MASK | \
64 SNBEP_PMON_CTL_EV_SEL_EXT)
65
66 /* SNB-EP pci control register */
67 #define SNBEP_PCI_PMON_BOX_CTL 0xf4
68 #define SNBEP_PCI_PMON_CTL0 0xd8
69 /* SNB-EP pci counter register */
70 #define SNBEP_PCI_PMON_CTR0 0xa0
71
72 /* SNB-EP home agent register */
73 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
74 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
75 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
76 /* SNB-EP memory controller register */
77 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
78 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
79 /* SNB-EP QPI register */
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
81 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
82 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
83 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
84
85 /* SNB-EP Ubox register */
86 #define SNBEP_U_MSR_PMON_CTR0 0xc16
87 #define SNBEP_U_MSR_PMON_CTL0 0xc10
88
89 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
90 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
91
92 /* SNB-EP Cbo register */
93 #define SNBEP_C0_MSR_PMON_CTR0 0xd16
94 #define SNBEP_C0_MSR_PMON_CTL0 0xd10
95 #define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
96 #define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
97 #define SNBEP_CBO_MSR_OFFSET 0x20
98
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
100 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
101 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
102 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
103
104 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
105 .event = (e), \
106 .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
107 .config_mask = (m), \
108 .idx = (i) \
109 }
110
111 /* SNB-EP PCU register */
112 #define SNBEP_PCU_MSR_PMON_CTR0 0xc36
113 #define SNBEP_PCU_MSR_PMON_CTL0 0xc30
114 #define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
115 #define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
116 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
117 #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
118 #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
119
120 /* IVBEP event control */
121 #define IVBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
122 SNBEP_PMON_BOX_CTL_RST_CTRS)
123 #define IVBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
124 SNBEP_PMON_CTL_UMASK_MASK | \
125 SNBEP_PMON_CTL_EDGE_DET | \
126 SNBEP_PMON_CTL_TRESH_MASK)
127 /* IVBEP Ubox */
128 #define IVBEP_U_MSR_PMON_GLOBAL_CTL 0xc00
129 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
130 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29)
131
132 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
133 (SNBEP_PMON_CTL_EV_SEL_MASK | \
134 SNBEP_PMON_CTL_UMASK_MASK | \
135 SNBEP_PMON_CTL_EDGE_DET | \
136 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
137 /* IVBEP Cbo */
138 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK (IVBEP_PMON_RAW_EVENT_MASK | \
139 SNBEP_CBO_PMON_CTL_TID_EN)
140
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
146 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
147 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
148 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
149
150 /* IVBEP home agent */
151 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16)
152 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK \
153 (IVBEP_PMON_RAW_EVENT_MASK | \
154 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
155 /* IVBEP PCU */
156 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
157 (SNBEP_PMON_CTL_EV_SEL_MASK | \
158 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
159 SNBEP_PMON_CTL_EDGE_DET | \
160 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
161 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
162 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
163 /* IVBEP QPI */
164 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
165 (IVBEP_PMON_RAW_EVENT_MASK | \
166 SNBEP_PMON_CTL_EV_SEL_EXT)
167
168 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
169 ((1ULL << (n)) - 1)))
170
171 /* Haswell-EP Ubox */
172 #define HSWEP_U_MSR_PMON_CTR0 0x709
173 #define HSWEP_U_MSR_PMON_CTL0 0x705
174 #define HSWEP_U_MSR_PMON_FILTER 0x707
175
176 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703
177 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR 0x704
178
179 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID (0x1 << 0)
180 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID (0x1fULL << 1)
181 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
182 (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
183 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
184
185 /* Haswell-EP CBo */
186 #define HSWEP_C0_MSR_PMON_CTR0 0xe08
187 #define HSWEP_C0_MSR_PMON_CTL0 0xe01
188 #define HSWEP_C0_MSR_PMON_BOX_CTL 0xe00
189 #define HSWEP_C0_MSR_PMON_BOX_FILTER0 0xe05
190 #define HSWEP_CBO_MSR_OFFSET 0x10
191
192
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID (0x3fULL << 0)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 6)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x7fULL << 17)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
198 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
199 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
200 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
201
202
203 /* Haswell-EP Sbox */
204 #define HSWEP_S0_MSR_PMON_CTR0 0x726
205 #define HSWEP_S0_MSR_PMON_CTL0 0x721
206 #define HSWEP_S0_MSR_PMON_BOX_CTL 0x720
207 #define HSWEP_SBOX_MSR_OFFSET 0xa
208 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
209 SNBEP_CBO_PMON_CTL_TID_EN)
210
211 /* Haswell-EP PCU */
212 #define HSWEP_PCU_MSR_PMON_CTR0 0x717
213 #define HSWEP_PCU_MSR_PMON_CTL0 0x711
214 #define HSWEP_PCU_MSR_PMON_BOX_CTL 0x710
215 #define HSWEP_PCU_MSR_PMON_BOX_FILTER 0x715
216
217 /* KNL Ubox */
218 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
219 (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
220 SNBEP_CBO_PMON_CTL_TID_EN)
221 /* KNL CHA */
222 #define KNL_CHA_MSR_OFFSET 0xc
223 #define KNL_CHA_MSR_PMON_CTL_QOR (1 << 16)
224 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
225 (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
226 KNL_CHA_MSR_PMON_CTL_QOR)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID 0x1ff
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE (7 << 18)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP (0xfffffe2aULL << 32)
230 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
231 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE (0x1ULL << 33)
232 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC (0x1ULL << 37)
233
234 /* KNL EDC/MC UCLK */
235 #define KNL_UCLK_MSR_PMON_CTR0_LOW 0x400
236 #define KNL_UCLK_MSR_PMON_CTL0 0x420
237 #define KNL_UCLK_MSR_PMON_BOX_CTL 0x430
238 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW 0x44c
239 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL 0x454
240 #define KNL_PMON_FIXED_CTL_EN 0x1
241
242 /* KNL EDC */
243 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW 0xa00
244 #define KNL_EDC0_ECLK_MSR_PMON_CTL0 0xa20
245 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL 0xa30
246 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW 0xa3c
247 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL 0xa44
248
249 /* KNL MC */
250 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW 0xb00
251 #define KNL_MC0_CH0_MSR_PMON_CTL0 0xb20
252 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL 0xb30
253 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW 0xb3c
254 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL 0xb44
255
256 /* KNL IRP */
257 #define KNL_IRP_PCI_PMON_BOX_CTL 0xf0
258 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
259 KNL_CHA_MSR_PMON_CTL_QOR)
260 /* KNL PCU */
261 #define KNL_PCU_PMON_CTL_EV_SEL_MASK 0x0000007f
262 #define KNL_PCU_PMON_CTL_USE_OCC_CTR (1 << 7)
263 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK 0x3f000000
264 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
265 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
266 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
267 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
268 SNBEP_PMON_CTL_EDGE_DET | \
269 SNBEP_CBO_PMON_CTL_TID_EN | \
270 SNBEP_PMON_CTL_INVERT | \
271 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
272 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
273 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
274
275 /* SKX pci bus to socket mapping */
276 #define SKX_CPUNODEID 0xc0
277 #define SKX_GIDNIDMAP 0xd4
278
279 /*
280 * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR
281 * that BIOS programmed. MSR has package scope.
282 * | Bit | Default | Description
283 * | [63] | 00h | VALID - When set, indicates the CPU bus
284 * numbers have been initialized. (RO)
285 * |[62:48]| --- | Reserved
286 * |[47:40]| 00h | BUS_NUM_5 - Return the bus number BIOS assigned
287 * CPUBUSNO(5). (RO)
288 * |[39:32]| 00h | BUS_NUM_4 - Return the bus number BIOS assigned
289 * CPUBUSNO(4). (RO)
290 * |[31:24]| 00h | BUS_NUM_3 - Return the bus number BIOS assigned
291 * CPUBUSNO(3). (RO)
292 * |[23:16]| 00h | BUS_NUM_2 - Return the bus number BIOS assigned
293 * CPUBUSNO(2). (RO)
294 * |[15:8] | 00h | BUS_NUM_1 - Return the bus number BIOS assigned
295 * CPUBUSNO(1). (RO)
296 * | [7:0] | 00h | BUS_NUM_0 - Return the bus number BIOS assigned
297 * CPUBUSNO(0). (RO)
298 */
299 #define SKX_MSR_CPU_BUS_NUMBER 0x300
300 #define SKX_MSR_CPU_BUS_VALID_BIT (1ULL << 63)
301 #define BUS_NUM_STRIDE 8
302
303 /* SKX CHA */
304 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID (0x1ffULL << 0)
305 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK (0xfULL << 9)
306 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE (0x3ffULL << 17)
307 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM (0x1ULL << 32)
308 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC (0x1ULL << 33)
309 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC (0x1ULL << 35)
310 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM (0x1ULL << 36)
311 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM (0x1ULL << 37)
312 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0 (0x3ffULL << 41)
313 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1 (0x3ffULL << 51)
314 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
315 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
316 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
317
318 /* SKX IIO */
319 #define SKX_IIO0_MSR_PMON_CTL0 0xa48
320 #define SKX_IIO0_MSR_PMON_CTR0 0xa41
321 #define SKX_IIO0_MSR_PMON_BOX_CTL 0xa40
322 #define SKX_IIO_MSR_OFFSET 0x20
323
324 #define SKX_PMON_CTL_TRESH_MASK (0xff << 24)
325 #define SKX_PMON_CTL_TRESH_MASK_EXT (0xf)
326 #define SKX_PMON_CTL_CH_MASK (0xff << 4)
327 #define SKX_PMON_CTL_FC_MASK (0x7 << 12)
328 #define SKX_IIO_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
329 SNBEP_PMON_CTL_UMASK_MASK | \
330 SNBEP_PMON_CTL_EDGE_DET | \
331 SNBEP_PMON_CTL_INVERT | \
332 SKX_PMON_CTL_TRESH_MASK)
333 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
334 SKX_PMON_CTL_CH_MASK | \
335 SKX_PMON_CTL_FC_MASK)
336
337 /* SKX IRP */
338 #define SKX_IRP0_MSR_PMON_CTL0 0xa5b
339 #define SKX_IRP0_MSR_PMON_CTR0 0xa59
340 #define SKX_IRP0_MSR_PMON_BOX_CTL 0xa58
341 #define SKX_IRP_MSR_OFFSET 0x20
342
343 /* SKX UPI */
344 #define SKX_UPI_PCI_PMON_CTL0 0x350
345 #define SKX_UPI_PCI_PMON_CTR0 0x318
346 #define SKX_UPI_PCI_PMON_BOX_CTL 0x378
347 #define SKX_UPI_CTL_UMASK_EXT 0xffefff
348
349 /* SKX M2M */
350 #define SKX_M2M_PCI_PMON_CTL0 0x228
351 #define SKX_M2M_PCI_PMON_CTR0 0x200
352 #define SKX_M2M_PCI_PMON_BOX_CTL 0x258
353
354 /* Memory Map registers device ID */
355 #define SNR_ICX_MESH2IIO_MMAP_DID 0x9a2
356 #define SNR_ICX_SAD_CONTROL_CFG 0x3f4
357
358 /* Getting I/O stack id in SAD_COTROL_CFG notation */
359 #define SAD_CONTROL_STACK_ID(data) (((data) >> 4) & 0x7)
360
361 /* SNR Ubox */
362 #define SNR_U_MSR_PMON_CTR0 0x1f98
363 #define SNR_U_MSR_PMON_CTL0 0x1f91
364 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL 0x1f93
365 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR 0x1f94
366
367 /* SNR CHA */
368 #define SNR_CHA_RAW_EVENT_MASK_EXT 0x3ffffff
369 #define SNR_CHA_MSR_PMON_CTL0 0x1c01
370 #define SNR_CHA_MSR_PMON_CTR0 0x1c08
371 #define SNR_CHA_MSR_PMON_BOX_CTL 0x1c00
372 #define SNR_C0_MSR_PMON_BOX_FILTER0 0x1c05
373
374
375 /* SNR IIO */
376 #define SNR_IIO_MSR_PMON_CTL0 0x1e08
377 #define SNR_IIO_MSR_PMON_CTR0 0x1e01
378 #define SNR_IIO_MSR_PMON_BOX_CTL 0x1e00
379 #define SNR_IIO_MSR_OFFSET 0x10
380 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT 0x7ffff
381
382 /* SNR IRP */
383 #define SNR_IRP0_MSR_PMON_CTL0 0x1ea8
384 #define SNR_IRP0_MSR_PMON_CTR0 0x1ea1
385 #define SNR_IRP0_MSR_PMON_BOX_CTL 0x1ea0
386 #define SNR_IRP_MSR_OFFSET 0x10
387
388 /* SNR M2PCIE */
389 #define SNR_M2PCIE_MSR_PMON_CTL0 0x1e58
390 #define SNR_M2PCIE_MSR_PMON_CTR0 0x1e51
391 #define SNR_M2PCIE_MSR_PMON_BOX_CTL 0x1e50
392 #define SNR_M2PCIE_MSR_OFFSET 0x10
393
394 /* SNR PCU */
395 #define SNR_PCU_MSR_PMON_CTL0 0x1ef1
396 #define SNR_PCU_MSR_PMON_CTR0 0x1ef8
397 #define SNR_PCU_MSR_PMON_BOX_CTL 0x1ef0
398 #define SNR_PCU_MSR_PMON_BOX_FILTER 0x1efc
399
400 /* SNR M2M */
401 #define SNR_M2M_PCI_PMON_CTL0 0x468
402 #define SNR_M2M_PCI_PMON_CTR0 0x440
403 #define SNR_M2M_PCI_PMON_BOX_CTL 0x438
404 #define SNR_M2M_PCI_PMON_UMASK_EXT 0xff
405
406 /* SNR PCIE3 */
407 #define SNR_PCIE3_PCI_PMON_CTL0 0x508
408 #define SNR_PCIE3_PCI_PMON_CTR0 0x4e8
409 #define SNR_PCIE3_PCI_PMON_BOX_CTL 0x4e0
410
411 /* SNR IMC */
412 #define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54
413 #define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38
414 #define SNR_IMC_MMIO_PMON_CTL0 0x40
415 #define SNR_IMC_MMIO_PMON_CTR0 0x8
416 #define SNR_IMC_MMIO_PMON_BOX_CTL 0x22800
417 #define SNR_IMC_MMIO_OFFSET 0x4000
418 #define SNR_IMC_MMIO_SIZE 0x4000
419 #define SNR_IMC_MMIO_BASE_OFFSET 0xd0
420 #define SNR_IMC_MMIO_BASE_MASK 0x1FFFFFFF
421 #define SNR_IMC_MMIO_MEM0_OFFSET 0xd8
422 #define SNR_IMC_MMIO_MEM0_MASK 0x7FF
423
424 /* ICX CHA */
425 #define ICX_C34_MSR_PMON_CTR0 0xb68
426 #define ICX_C34_MSR_PMON_CTL0 0xb61
427 #define ICX_C34_MSR_PMON_BOX_CTL 0xb60
428 #define ICX_C34_MSR_PMON_BOX_FILTER0 0xb65
429
430 /* ICX IIO */
431 #define ICX_IIO_MSR_PMON_CTL0 0xa58
432 #define ICX_IIO_MSR_PMON_CTR0 0xa51
433 #define ICX_IIO_MSR_PMON_BOX_CTL 0xa50
434
435 /* ICX IRP */
436 #define ICX_IRP0_MSR_PMON_CTL0 0xa4d
437 #define ICX_IRP0_MSR_PMON_CTR0 0xa4b
438 #define ICX_IRP0_MSR_PMON_BOX_CTL 0xa4a
439
440 /* ICX M2PCIE */
441 #define ICX_M2PCIE_MSR_PMON_CTL0 0xa46
442 #define ICX_M2PCIE_MSR_PMON_CTR0 0xa41
443 #define ICX_M2PCIE_MSR_PMON_BOX_CTL 0xa40
444
445 /* ICX UPI */
446 #define ICX_UPI_PCI_PMON_CTL0 0x350
447 #define ICX_UPI_PCI_PMON_CTR0 0x320
448 #define ICX_UPI_PCI_PMON_BOX_CTL 0x318
449 #define ICX_UPI_CTL_UMASK_EXT 0xffffff
450 #define ICX_UBOX_DID 0x3450
451
452 /* ICX M3UPI*/
453 #define ICX_M3UPI_PCI_PMON_CTL0 0xd8
454 #define ICX_M3UPI_PCI_PMON_CTR0 0xa8
455 #define ICX_M3UPI_PCI_PMON_BOX_CTL 0xa0
456
457 /* ICX IMC */
458 #define ICX_NUMBER_IMC_CHN 3
459 #define ICX_IMC_MEM_STRIDE 0x4
460
461 /* SPR */
462 #define SPR_RAW_EVENT_MASK_EXT 0xffffff
463 #define SPR_UBOX_DID 0x3250
464
465 /* SPR CHA */
466 #define SPR_CHA_EVENT_MASK_EXT 0xffffffff
467 #define SPR_CHA_PMON_CTL_TID_EN (1 << 16)
468 #define SPR_CHA_PMON_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
469 SPR_CHA_PMON_CTL_TID_EN)
470 #define SPR_CHA_PMON_BOX_FILTER_TID 0x3ff
471
472 #define SPR_C0_MSR_PMON_BOX_FILTER0 0x200e
473
474 /* DMR */
475 #define DMR_IMH1_HIOP_MMIO_BASE 0x1ffff6ae7000
476 #define DMR_HIOP_MMIO_SIZE 0x8000
477 #define DMR_CXLCM_EVENT_MASK_EXT 0xf
478 #define DMR_HAMVF_EVENT_MASK_EXT 0xffffffff
479 #define DMR_PCIE4_EVENT_MASK_EXT 0xffffff
480
481 #define UNCORE_DMR_ITC 0x30
482
483 #define DMR_IMC_PMON_FIXED_CTR 0x18
484 #define DMR_IMC_PMON_FIXED_CTL 0x10
485
486 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
487 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
488 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
489 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
490 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
491 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
492 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
493 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
494 DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
495 DEFINE_UNCORE_FORMAT_ATTR(umask_ext5, umask, "config:8-15,32-63");
496 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
497 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
498 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
499 DEFINE_UNCORE_FORMAT_ATTR(tid_en2, tid_en, "config:16");
500 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
501 DEFINE_UNCORE_FORMAT_ATTR(inv2, inv, "config:21");
502 DEFINE_UNCORE_FORMAT_ATTR(thresh_ext, thresh_ext, "config:32-35");
503 DEFINE_UNCORE_FORMAT_ATTR(thresh10, thresh, "config:23-32");
504 DEFINE_UNCORE_FORMAT_ATTR(thresh9_2, thresh, "config:23-31");
505 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
506 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
507 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
508 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
509 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
510 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
511 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
512 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
513 DEFINE_UNCORE_FORMAT_ATTR(port_en, port_en, "config:32-35");
514 DEFINE_UNCORE_FORMAT_ATTR(rs3_sel, rs3_sel, "config:36");
515 DEFINE_UNCORE_FORMAT_ATTR(rx_sel, rx_sel, "config:37");
516 DEFINE_UNCORE_FORMAT_ATTR(tx_sel, tx_sel, "config:38");
517 DEFINE_UNCORE_FORMAT_ATTR(iep_sel, iep_sel, "config:39");
518 DEFINE_UNCORE_FORMAT_ATTR(vc_sel, vc_sel, "config:40-47");
519 DEFINE_UNCORE_FORMAT_ATTR(port_sel, port_sel, "config:48-55");
520 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
521 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
522 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
523 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
524 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
525 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
526 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
527 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
528 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
529 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
530 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
531 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
532 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
533 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
534 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
535 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
536 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
537 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
538 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
539 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
540 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
541 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
542 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
543 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
544 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
545 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
546 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
547 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
548 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
549 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
550 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
551 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
552 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
553 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
554 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
555 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
556 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
557 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
558 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
559 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
560 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
561 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
562 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
563 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
564 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
565 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
566 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
567 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
568 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
569 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
570 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
571 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
572 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
573 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
574 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
575 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
576 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
577
snbep_uncore_pci_disable_box(struct intel_uncore_box * box)578 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
579 {
580 struct pci_dev *pdev = box->pci_dev;
581 int box_ctl = uncore_pci_box_ctl(box);
582 u32 config = 0;
583
584 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
585 config |= SNBEP_PMON_BOX_CTL_FRZ;
586 pci_write_config_dword(pdev, box_ctl, config);
587 }
588 }
589
snbep_uncore_pci_enable_box(struct intel_uncore_box * box)590 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
591 {
592 struct pci_dev *pdev = box->pci_dev;
593 int box_ctl = uncore_pci_box_ctl(box);
594 u32 config = 0;
595
596 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
597 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
598 pci_write_config_dword(pdev, box_ctl, config);
599 }
600 }
601
snbep_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)602 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
603 {
604 struct pci_dev *pdev = box->pci_dev;
605 struct hw_perf_event *hwc = &event->hw;
606
607 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
608 }
609
snbep_uncore_pci_disable_event(struct intel_uncore_box * box,struct perf_event * event)610 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
611 {
612 struct pci_dev *pdev = box->pci_dev;
613 struct hw_perf_event *hwc = &event->hw;
614
615 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
616 }
617
snbep_uncore_pci_read_counter(struct intel_uncore_box * box,struct perf_event * event)618 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
619 {
620 struct pci_dev *pdev = box->pci_dev;
621 struct hw_perf_event *hwc = &event->hw;
622 u64 count = 0;
623
624 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
625 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
626
627 return count;
628 }
629
snbep_uncore_pci_init_box(struct intel_uncore_box * box)630 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
631 {
632 struct pci_dev *pdev = box->pci_dev;
633 int box_ctl = uncore_pci_box_ctl(box);
634
635 pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
636 }
637
snbep_uncore_msr_disable_box(struct intel_uncore_box * box)638 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
639 {
640 u64 config;
641 unsigned msr;
642
643 msr = uncore_msr_box_ctl(box);
644 if (msr) {
645 rdmsrq(msr, config);
646 config |= SNBEP_PMON_BOX_CTL_FRZ;
647 wrmsrq(msr, config);
648 }
649 }
650
snbep_uncore_msr_enable_box(struct intel_uncore_box * box)651 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
652 {
653 u64 config;
654 unsigned msr;
655
656 msr = uncore_msr_box_ctl(box);
657 if (msr) {
658 rdmsrq(msr, config);
659 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
660 wrmsrq(msr, config);
661 }
662 }
663
snbep_uncore_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)664 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
665 {
666 struct hw_perf_event *hwc = &event->hw;
667 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
668
669 if (reg1->idx != EXTRA_REG_NONE)
670 wrmsrq(reg1->reg, uncore_shared_reg_config(box, 0));
671
672 wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
673 }
674
snbep_uncore_msr_disable_event(struct intel_uncore_box * box,struct perf_event * event)675 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
676 struct perf_event *event)
677 {
678 struct hw_perf_event *hwc = &event->hw;
679
680 wrmsrq(hwc->config_base, hwc->config);
681 }
682
snbep_uncore_msr_init_box(struct intel_uncore_box * box)683 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
684 {
685 unsigned msr = uncore_msr_box_ctl(box);
686
687 if (msr)
688 wrmsrq(msr, SNBEP_PMON_BOX_CTL_INT);
689 }
690
691 static struct attribute *snbep_uncore_formats_attr[] = {
692 &format_attr_event.attr,
693 &format_attr_umask.attr,
694 &format_attr_edge.attr,
695 &format_attr_inv.attr,
696 &format_attr_thresh8.attr,
697 NULL,
698 };
699
700 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
701 &format_attr_event.attr,
702 &format_attr_umask.attr,
703 &format_attr_edge.attr,
704 &format_attr_inv.attr,
705 &format_attr_thresh5.attr,
706 NULL,
707 };
708
709 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
710 &format_attr_event.attr,
711 &format_attr_umask.attr,
712 &format_attr_edge.attr,
713 &format_attr_tid_en.attr,
714 &format_attr_inv.attr,
715 &format_attr_thresh8.attr,
716 &format_attr_filter_tid.attr,
717 &format_attr_filter_nid.attr,
718 &format_attr_filter_state.attr,
719 &format_attr_filter_opc.attr,
720 NULL,
721 };
722
723 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
724 &format_attr_event.attr,
725 &format_attr_occ_sel.attr,
726 &format_attr_edge.attr,
727 &format_attr_inv.attr,
728 &format_attr_thresh5.attr,
729 &format_attr_occ_invert.attr,
730 &format_attr_occ_edge.attr,
731 &format_attr_filter_band0.attr,
732 &format_attr_filter_band1.attr,
733 &format_attr_filter_band2.attr,
734 &format_attr_filter_band3.attr,
735 NULL,
736 };
737
738 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
739 &format_attr_event_ext.attr,
740 &format_attr_umask.attr,
741 &format_attr_edge.attr,
742 &format_attr_inv.attr,
743 &format_attr_thresh8.attr,
744 &format_attr_match_rds.attr,
745 &format_attr_match_rnid30.attr,
746 &format_attr_match_rnid4.attr,
747 &format_attr_match_dnid.attr,
748 &format_attr_match_mc.attr,
749 &format_attr_match_opc.attr,
750 &format_attr_match_vnw.attr,
751 &format_attr_match0.attr,
752 &format_attr_match1.attr,
753 &format_attr_mask_rds.attr,
754 &format_attr_mask_rnid30.attr,
755 &format_attr_mask_rnid4.attr,
756 &format_attr_mask_dnid.attr,
757 &format_attr_mask_mc.attr,
758 &format_attr_mask_opc.attr,
759 &format_attr_mask_vnw.attr,
760 &format_attr_mask0.attr,
761 &format_attr_mask1.attr,
762 NULL,
763 };
764
765 static struct uncore_event_desc snbep_uncore_imc_events[] = {
766 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
767 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
768 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
769 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
770 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
771 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
772 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
773 { /* end: all zeroes */ },
774 };
775
776 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
777 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
778 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
779 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
780 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
781 { /* end: all zeroes */ },
782 };
783
784 static const struct attribute_group snbep_uncore_format_group = {
785 .name = "format",
786 .attrs = snbep_uncore_formats_attr,
787 };
788
789 static const struct attribute_group snbep_uncore_ubox_format_group = {
790 .name = "format",
791 .attrs = snbep_uncore_ubox_formats_attr,
792 };
793
794 static const struct attribute_group snbep_uncore_cbox_format_group = {
795 .name = "format",
796 .attrs = snbep_uncore_cbox_formats_attr,
797 };
798
799 static const struct attribute_group snbep_uncore_pcu_format_group = {
800 .name = "format",
801 .attrs = snbep_uncore_pcu_formats_attr,
802 };
803
804 static const struct attribute_group snbep_uncore_qpi_format_group = {
805 .name = "format",
806 .attrs = snbep_uncore_qpi_formats_attr,
807 };
808
809 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
810 .disable_box = snbep_uncore_msr_disable_box, \
811 .enable_box = snbep_uncore_msr_enable_box, \
812 .disable_event = snbep_uncore_msr_disable_event, \
813 .enable_event = snbep_uncore_msr_enable_event, \
814 .read_counter = uncore_msr_read_counter
815
816 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
817 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \
818 .init_box = snbep_uncore_msr_init_box \
819
820 static struct intel_uncore_ops snbep_uncore_msr_ops = {
821 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
822 };
823
824 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
825 .init_box = snbep_uncore_pci_init_box, \
826 .disable_box = snbep_uncore_pci_disable_box, \
827 .enable_box = snbep_uncore_pci_enable_box, \
828 .disable_event = snbep_uncore_pci_disable_event, \
829 .read_counter = snbep_uncore_pci_read_counter
830
831 static struct intel_uncore_ops snbep_uncore_pci_ops = {
832 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
833 .enable_event = snbep_uncore_pci_enable_event, \
834 };
835
836 static struct event_constraint snbep_uncore_cbox_constraints[] = {
837 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
838 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
839 UNCORE_EVENT_CONSTRAINT_RANGE(0x04, 0x5, 0x3),
840 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
841 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
842 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
843 UNCORE_EVENT_CONSTRAINT_RANGE(0x12, 0x13, 0x3),
844 UNCORE_EVENT_CONSTRAINT_RANGE(0x1b, 0x1e, 0xc),
845 UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
846 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
847 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
848 UNCORE_EVENT_CONSTRAINT_RANGE(0x31, 0x35, 0x3),
849 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
850 UNCORE_EVENT_CONSTRAINT_RANGE(0x37, 0x39, 0x3),
851 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
852 EVENT_CONSTRAINT_END
853 };
854
855 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
856 UNCORE_EVENT_CONSTRAINT_RANGE(0x10, 0x11, 0x3),
857 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
858 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
859 UNCORE_EVENT_CONSTRAINT_RANGE(0x24, 0x26, 0x3),
860 UNCORE_EVENT_CONSTRAINT_RANGE(0x32, 0x34, 0x3),
861 EVENT_CONSTRAINT_END
862 };
863
864 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
865 UNCORE_EVENT_CONSTRAINT_RANGE(0x10, 0x12, 0x3),
866 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
867 UNCORE_EVENT_CONSTRAINT_RANGE(0x20, 0x26, 0x3),
868 UNCORE_EVENT_CONSTRAINT_RANGE(0x28, 0x34, 0x3),
869 UNCORE_EVENT_CONSTRAINT_RANGE(0x36, 0x39, 0x3),
870 EVENT_CONSTRAINT_END
871 };
872
873 static struct intel_uncore_type snbep_uncore_ubox = {
874 .name = "ubox",
875 .num_counters = 2,
876 .num_boxes = 1,
877 .perf_ctr_bits = 44,
878 .fixed_ctr_bits = 48,
879 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
880 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
881 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
882 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
883 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
884 .ops = &snbep_uncore_msr_ops,
885 .format_group = &snbep_uncore_ubox_format_group,
886 };
887
888 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
889 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
890 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
891 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
892 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
893 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
894 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
895 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
896 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
897 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
898 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
899 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
900 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
901 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
902 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
903 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
904 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
905 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
906 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
907 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
908 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
909 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
910 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
911 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
912 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
913 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
914 EVENT_EXTRA_END
915 };
916
snbep_cbox_put_constraint(struct intel_uncore_box * box,struct perf_event * event)917 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
918 {
919 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
920 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
921 int i;
922
923 if (uncore_box_is_fake(box))
924 return;
925
926 for (i = 0; i < 5; i++) {
927 if (reg1->alloc & (0x1 << i))
928 atomic_sub(1 << (i * 6), &er->ref);
929 }
930 reg1->alloc = 0;
931 }
932
933 static struct event_constraint *
__snbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event,u64 (* cbox_filter_mask)(int fields))934 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
935 u64 (*cbox_filter_mask)(int fields))
936 {
937 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
938 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
939 int i, alloc = 0;
940 unsigned long flags;
941 u64 mask;
942
943 if (reg1->idx == EXTRA_REG_NONE)
944 return NULL;
945
946 raw_spin_lock_irqsave(&er->lock, flags);
947 for (i = 0; i < 5; i++) {
948 if (!(reg1->idx & (0x1 << i)))
949 continue;
950 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
951 continue;
952
953 mask = cbox_filter_mask(0x1 << i);
954 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
955 !((reg1->config ^ er->config) & mask)) {
956 atomic_add(1 << (i * 6), &er->ref);
957 er->config &= ~mask;
958 er->config |= reg1->config & mask;
959 alloc |= (0x1 << i);
960 } else {
961 break;
962 }
963 }
964 raw_spin_unlock_irqrestore(&er->lock, flags);
965 if (i < 5)
966 goto fail;
967
968 if (!uncore_box_is_fake(box))
969 reg1->alloc |= alloc;
970
971 return NULL;
972 fail:
973 for (; i >= 0; i--) {
974 if (alloc & (0x1 << i))
975 atomic_sub(1 << (i * 6), &er->ref);
976 }
977 return &uncore_constraint_empty;
978 }
979
snbep_cbox_filter_mask(int fields)980 static u64 snbep_cbox_filter_mask(int fields)
981 {
982 u64 mask = 0;
983
984 if (fields & 0x1)
985 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
986 if (fields & 0x2)
987 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
988 if (fields & 0x4)
989 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
990 if (fields & 0x8)
991 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
992
993 return mask;
994 }
995
996 static struct event_constraint *
snbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)997 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
998 {
999 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
1000 }
1001
snbep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)1002 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1003 {
1004 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1005 struct extra_reg *er;
1006 int idx = 0;
1007
1008 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
1009 if (er->event != (event->hw.config & er->config_mask))
1010 continue;
1011 idx |= er->idx;
1012 }
1013
1014 if (idx) {
1015 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1016 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1017 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
1018 reg1->idx = idx;
1019 }
1020 return 0;
1021 }
1022
1023 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
1024 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1025 .hw_config = snbep_cbox_hw_config,
1026 .get_constraint = snbep_cbox_get_constraint,
1027 .put_constraint = snbep_cbox_put_constraint,
1028 };
1029
1030 static struct intel_uncore_type snbep_uncore_cbox = {
1031 .name = "cbox",
1032 .num_counters = 4,
1033 .num_boxes = 8,
1034 .perf_ctr_bits = 44,
1035 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1036 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1037 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1038 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1039 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1040 .num_shared_regs = 1,
1041 .constraints = snbep_uncore_cbox_constraints,
1042 .ops = &snbep_uncore_cbox_ops,
1043 .format_group = &snbep_uncore_cbox_format_group,
1044 };
1045
snbep_pcu_alter_er(struct perf_event * event,int new_idx,bool modify)1046 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
1047 {
1048 struct hw_perf_event *hwc = &event->hw;
1049 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1050 u64 config = reg1->config;
1051
1052 if (new_idx > reg1->idx)
1053 config <<= 8 * (new_idx - reg1->idx);
1054 else
1055 config >>= 8 * (reg1->idx - new_idx);
1056
1057 if (modify) {
1058 hwc->config += new_idx - reg1->idx;
1059 reg1->config = config;
1060 reg1->idx = new_idx;
1061 }
1062 return config;
1063 }
1064
1065 static struct event_constraint *
snbep_pcu_get_constraint(struct intel_uncore_box * box,struct perf_event * event)1066 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1067 {
1068 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1069 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1070 unsigned long flags;
1071 int idx = reg1->idx;
1072 u64 mask, config1 = reg1->config;
1073 bool ok = false;
1074
1075 if (reg1->idx == EXTRA_REG_NONE ||
1076 (!uncore_box_is_fake(box) && reg1->alloc))
1077 return NULL;
1078 again:
1079 mask = 0xffULL << (idx * 8);
1080 raw_spin_lock_irqsave(&er->lock, flags);
1081 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1082 !((config1 ^ er->config) & mask)) {
1083 atomic_add(1 << (idx * 8), &er->ref);
1084 er->config &= ~mask;
1085 er->config |= config1 & mask;
1086 ok = true;
1087 }
1088 raw_spin_unlock_irqrestore(&er->lock, flags);
1089
1090 if (!ok) {
1091 idx = (idx + 1) % 4;
1092 if (idx != reg1->idx) {
1093 config1 = snbep_pcu_alter_er(event, idx, false);
1094 goto again;
1095 }
1096 return &uncore_constraint_empty;
1097 }
1098
1099 if (!uncore_box_is_fake(box)) {
1100 if (idx != reg1->idx)
1101 snbep_pcu_alter_er(event, idx, true);
1102 reg1->alloc = 1;
1103 }
1104 return NULL;
1105 }
1106
snbep_pcu_put_constraint(struct intel_uncore_box * box,struct perf_event * event)1107 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1108 {
1109 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1110 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1111
1112 if (uncore_box_is_fake(box) || !reg1->alloc)
1113 return;
1114
1115 atomic_sub(1 << (reg1->idx * 8), &er->ref);
1116 reg1->alloc = 0;
1117 }
1118
snbep_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)1119 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1120 {
1121 struct hw_perf_event *hwc = &event->hw;
1122 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1123 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1124
1125 if (ev_sel >= 0xb && ev_sel <= 0xe) {
1126 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1127 reg1->idx = ev_sel - 0xb;
1128 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1129 }
1130 return 0;
1131 }
1132
1133 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1134 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1135 .hw_config = snbep_pcu_hw_config,
1136 .get_constraint = snbep_pcu_get_constraint,
1137 .put_constraint = snbep_pcu_put_constraint,
1138 };
1139
1140 static struct intel_uncore_type snbep_uncore_pcu = {
1141 .name = "pcu",
1142 .num_counters = 4,
1143 .num_boxes = 1,
1144 .perf_ctr_bits = 48,
1145 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1146 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1147 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1148 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1149 .num_shared_regs = 1,
1150 .ops = &snbep_uncore_pcu_ops,
1151 .format_group = &snbep_uncore_pcu_format_group,
1152 };
1153
1154 static struct intel_uncore_type *snbep_msr_uncores[] = {
1155 &snbep_uncore_ubox,
1156 &snbep_uncore_cbox,
1157 &snbep_uncore_pcu,
1158 NULL,
1159 };
1160
snbep_uncore_cpu_init(void)1161 void snbep_uncore_cpu_init(void)
1162 {
1163 if (snbep_uncore_cbox.num_boxes > topology_num_cores_per_package())
1164 snbep_uncore_cbox.num_boxes = topology_num_cores_per_package();
1165 uncore_msr_uncores = snbep_msr_uncores;
1166 }
1167
1168 enum {
1169 SNBEP_PCI_QPI_PORT0_FILTER,
1170 SNBEP_PCI_QPI_PORT1_FILTER,
1171 BDX_PCI_QPI_PORT2_FILTER,
1172 };
1173
snbep_qpi_hw_config(struct intel_uncore_box * box,struct perf_event * event)1174 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1175 {
1176 struct hw_perf_event *hwc = &event->hw;
1177 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1178 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1179
1180 if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1181 reg1->idx = 0;
1182 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1183 reg1->config = event->attr.config1;
1184 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1185 reg2->config = event->attr.config2;
1186 }
1187 return 0;
1188 }
1189
snbep_qpi_enable_event(struct intel_uncore_box * box,struct perf_event * event)1190 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1191 {
1192 struct pci_dev *pdev = box->pci_dev;
1193 struct hw_perf_event *hwc = &event->hw;
1194 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1195 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1196
1197 if (reg1->idx != EXTRA_REG_NONE) {
1198 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1199 int die = box->dieid;
1200 struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1201
1202 if (filter_pdev) {
1203 pci_write_config_dword(filter_pdev, reg1->reg,
1204 (u32)reg1->config);
1205 pci_write_config_dword(filter_pdev, reg1->reg + 4,
1206 (u32)(reg1->config >> 32));
1207 pci_write_config_dword(filter_pdev, reg2->reg,
1208 (u32)reg2->config);
1209 pci_write_config_dword(filter_pdev, reg2->reg + 4,
1210 (u32)(reg2->config >> 32));
1211 }
1212 }
1213
1214 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1215 }
1216
1217 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1218 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1219 .enable_event = snbep_qpi_enable_event,
1220 .hw_config = snbep_qpi_hw_config,
1221 .get_constraint = uncore_get_constraint,
1222 .put_constraint = uncore_put_constraint,
1223 };
1224
1225 #define SNBEP_UNCORE_PCI_COMMON_INIT() \
1226 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1227 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1228 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
1229 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1230 .ops = &snbep_uncore_pci_ops, \
1231 .format_group = &snbep_uncore_format_group
1232
1233 static struct intel_uncore_type snbep_uncore_ha = {
1234 .name = "ha",
1235 .num_counters = 4,
1236 .num_boxes = 1,
1237 .perf_ctr_bits = 48,
1238 SNBEP_UNCORE_PCI_COMMON_INIT(),
1239 };
1240
1241 static struct intel_uncore_type snbep_uncore_imc = {
1242 .name = "imc",
1243 .num_counters = 4,
1244 .num_boxes = 4,
1245 .perf_ctr_bits = 48,
1246 .fixed_ctr_bits = 48,
1247 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1248 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1249 .event_descs = snbep_uncore_imc_events,
1250 SNBEP_UNCORE_PCI_COMMON_INIT(),
1251 };
1252
1253 static struct intel_uncore_type snbep_uncore_qpi = {
1254 .name = "qpi",
1255 .num_counters = 4,
1256 .num_boxes = 2,
1257 .perf_ctr_bits = 48,
1258 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1259 .event_ctl = SNBEP_PCI_PMON_CTL0,
1260 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1261 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1262 .num_shared_regs = 1,
1263 .ops = &snbep_uncore_qpi_ops,
1264 .event_descs = snbep_uncore_qpi_events,
1265 .format_group = &snbep_uncore_qpi_format_group,
1266 };
1267
1268
1269 static struct intel_uncore_type snbep_uncore_r2pcie = {
1270 .name = "r2pcie",
1271 .num_counters = 4,
1272 .num_boxes = 1,
1273 .perf_ctr_bits = 44,
1274 .constraints = snbep_uncore_r2pcie_constraints,
1275 SNBEP_UNCORE_PCI_COMMON_INIT(),
1276 };
1277
1278 static struct intel_uncore_type snbep_uncore_r3qpi = {
1279 .name = "r3qpi",
1280 .num_counters = 3,
1281 .num_boxes = 2,
1282 .perf_ctr_bits = 44,
1283 .constraints = snbep_uncore_r3qpi_constraints,
1284 SNBEP_UNCORE_PCI_COMMON_INIT(),
1285 };
1286
1287 enum {
1288 SNBEP_PCI_UNCORE_HA,
1289 SNBEP_PCI_UNCORE_IMC,
1290 SNBEP_PCI_UNCORE_QPI,
1291 SNBEP_PCI_UNCORE_R2PCIE,
1292 SNBEP_PCI_UNCORE_R3QPI,
1293 };
1294
1295 static struct intel_uncore_type *snbep_pci_uncores[] = {
1296 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
1297 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
1298 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
1299 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
1300 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
1301 NULL,
1302 };
1303
1304 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1305 { /* Home Agent */
1306 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1307 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1308 },
1309 { /* MC Channel 0 */
1310 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1311 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1312 },
1313 { /* MC Channel 1 */
1314 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1315 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1316 },
1317 { /* MC Channel 2 */
1318 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1319 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1320 },
1321 { /* MC Channel 3 */
1322 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1323 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1324 },
1325 { /* QPI Port 0 */
1326 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1327 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1328 },
1329 { /* QPI Port 1 */
1330 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1331 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1332 },
1333 { /* R2PCIe */
1334 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1335 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1336 },
1337 { /* R3QPI Link 0 */
1338 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1339 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1340 },
1341 { /* R3QPI Link 1 */
1342 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1343 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1344 },
1345 { /* QPI Port 0 filter */
1346 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1347 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1348 SNBEP_PCI_QPI_PORT0_FILTER),
1349 },
1350 { /* QPI Port 0 filter */
1351 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1352 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1353 SNBEP_PCI_QPI_PORT1_FILTER),
1354 },
1355 { /* end: all zeroes */ }
1356 };
1357
1358 static struct pci_driver snbep_uncore_pci_driver = {
1359 .name = "snbep_uncore",
1360 .id_table = snbep_uncore_pci_ids,
1361 };
1362
1363 #define NODE_ID_MASK 0x7
1364
1365 /* Each three bits from 0 to 23 of GIDNIDMAP register correspond Node ID. */
1366 #define GIDNIDMAP(config, id) (((config) >> (3 * (id))) & 0x7)
1367
upi_nodeid_groupid(struct pci_dev * ubox_dev,int nodeid_loc,int idmap_loc,int * nodeid,int * groupid)1368 static int upi_nodeid_groupid(struct pci_dev *ubox_dev, int nodeid_loc, int idmap_loc,
1369 int *nodeid, int *groupid)
1370 {
1371 int ret;
1372
1373 /* get the Node ID of the local register */
1374 ret = pci_read_config_dword(ubox_dev, nodeid_loc, nodeid);
1375 if (ret)
1376 goto err;
1377
1378 *nodeid = *nodeid & NODE_ID_MASK;
1379 /* get the Node ID mapping */
1380 ret = pci_read_config_dword(ubox_dev, idmap_loc, groupid);
1381 if (ret)
1382 goto err;
1383 err:
1384 return ret;
1385 }
1386
topology_gidnid_map(int nodeid,u32 gidnid)1387 static int topology_gidnid_map(int nodeid, u32 gidnid)
1388 {
1389 int i, die_id = -1;
1390
1391 /*
1392 * every three bits in the Node ID mapping register maps
1393 * to a particular node.
1394 */
1395 for (i = 0; i < 8; i++) {
1396 if (nodeid == GIDNIDMAP(gidnid, i)) {
1397 if (topology_max_dies_per_package() > 1)
1398 die_id = i;
1399 else
1400 die_id = topology_phys_to_logical_pkg(i);
1401 if (die_id < 0)
1402 die_id = -ENODEV;
1403 break;
1404 }
1405 }
1406
1407 return die_id;
1408 }
1409
1410 /*
1411 * build pci bus to socket mapping
1412 */
snbep_pci2phy_map_init(int devid,int nodeid_loc,int idmap_loc,bool reverse)1413 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1414 {
1415 struct pci_dev *ubox_dev = NULL;
1416 int i, bus, nodeid, segment, die_id;
1417 struct pci2phy_map *map;
1418 int err = 0;
1419 u32 config = 0;
1420
1421 while (1) {
1422 /* find the UBOX device */
1423 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1424 if (!ubox_dev)
1425 break;
1426 bus = ubox_dev->bus->number;
1427 /*
1428 * The nodeid and idmap registers only contain enough
1429 * information to handle 8 nodes. On systems with more
1430 * than 8 nodes, we need to rely on NUMA information,
1431 * filled in from BIOS supplied information, to determine
1432 * the topology.
1433 */
1434 if (nr_node_ids <= 8) {
1435 err = upi_nodeid_groupid(ubox_dev, nodeid_loc, idmap_loc,
1436 &nodeid, &config);
1437 if (err)
1438 break;
1439
1440 segment = pci_domain_nr(ubox_dev->bus);
1441 raw_spin_lock(&pci2phy_map_lock);
1442 map = __find_pci2phy_map(segment);
1443 if (!map) {
1444 raw_spin_unlock(&pci2phy_map_lock);
1445 err = -ENOMEM;
1446 break;
1447 }
1448
1449 map->pbus_to_dieid[bus] = topology_gidnid_map(nodeid, config);
1450 raw_spin_unlock(&pci2phy_map_lock);
1451 } else {
1452 segment = pci_domain_nr(ubox_dev->bus);
1453 raw_spin_lock(&pci2phy_map_lock);
1454 map = __find_pci2phy_map(segment);
1455 if (!map) {
1456 raw_spin_unlock(&pci2phy_map_lock);
1457 err = -ENOMEM;
1458 break;
1459 }
1460
1461 map->pbus_to_dieid[bus] = die_id = uncore_device_to_die(ubox_dev);
1462
1463 raw_spin_unlock(&pci2phy_map_lock);
1464
1465 if (WARN_ON_ONCE(die_id == -1)) {
1466 err = -EINVAL;
1467 break;
1468 }
1469 }
1470 }
1471
1472 if (!err) {
1473 /*
1474 * For PCI bus with no UBOX device, find the next bus
1475 * that has UBOX device and use its mapping.
1476 */
1477 raw_spin_lock(&pci2phy_map_lock);
1478 list_for_each_entry(map, &pci2phy_map_head, list) {
1479 i = -1;
1480 if (reverse) {
1481 for (bus = 255; bus >= 0; bus--) {
1482 if (map->pbus_to_dieid[bus] != -1)
1483 i = map->pbus_to_dieid[bus];
1484 else
1485 map->pbus_to_dieid[bus] = i;
1486 }
1487 } else {
1488 for (bus = 0; bus <= 255; bus++) {
1489 if (map->pbus_to_dieid[bus] != -1)
1490 i = map->pbus_to_dieid[bus];
1491 else
1492 map->pbus_to_dieid[bus] = i;
1493 }
1494 }
1495 }
1496 raw_spin_unlock(&pci2phy_map_lock);
1497 }
1498
1499 pci_dev_put(ubox_dev);
1500
1501 return pcibios_err_to_errno(err);
1502 }
1503
snbep_uncore_pci_init(void)1504 int snbep_uncore_pci_init(void)
1505 {
1506 int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1507 if (ret)
1508 return ret;
1509 uncore_pci_uncores = snbep_pci_uncores;
1510 uncore_pci_driver = &snbep_uncore_pci_driver;
1511 return 0;
1512 }
1513 /* end of Sandy Bridge-EP uncore support */
1514
1515 /* IvyTown uncore support */
ivbep_uncore_msr_init_box(struct intel_uncore_box * box)1516 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1517 {
1518 unsigned msr = uncore_msr_box_ctl(box);
1519 if (msr)
1520 wrmsrq(msr, IVBEP_PMON_BOX_CTL_INT);
1521 }
1522
ivbep_uncore_pci_init_box(struct intel_uncore_box * box)1523 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1524 {
1525 struct pci_dev *pdev = box->pci_dev;
1526
1527 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1528 }
1529
1530 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \
1531 .init_box = ivbep_uncore_msr_init_box, \
1532 .disable_box = snbep_uncore_msr_disable_box, \
1533 .enable_box = snbep_uncore_msr_enable_box, \
1534 .disable_event = snbep_uncore_msr_disable_event, \
1535 .enable_event = snbep_uncore_msr_enable_event, \
1536 .read_counter = uncore_msr_read_counter
1537
1538 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1539 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1540 };
1541
1542 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1543 .init_box = ivbep_uncore_pci_init_box,
1544 .disable_box = snbep_uncore_pci_disable_box,
1545 .enable_box = snbep_uncore_pci_enable_box,
1546 .disable_event = snbep_uncore_pci_disable_event,
1547 .enable_event = snbep_uncore_pci_enable_event,
1548 .read_counter = snbep_uncore_pci_read_counter,
1549 };
1550
1551 #define IVBEP_UNCORE_PCI_COMMON_INIT() \
1552 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1553 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1554 .event_mask = IVBEP_PMON_RAW_EVENT_MASK, \
1555 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1556 .ops = &ivbep_uncore_pci_ops, \
1557 .format_group = &ivbep_uncore_format_group
1558
1559 static struct attribute *ivbep_uncore_formats_attr[] = {
1560 &format_attr_event.attr,
1561 &format_attr_umask.attr,
1562 &format_attr_edge.attr,
1563 &format_attr_inv.attr,
1564 &format_attr_thresh8.attr,
1565 NULL,
1566 };
1567
1568 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1569 &format_attr_event.attr,
1570 &format_attr_umask.attr,
1571 &format_attr_edge.attr,
1572 &format_attr_inv.attr,
1573 &format_attr_thresh5.attr,
1574 NULL,
1575 };
1576
1577 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1578 &format_attr_event.attr,
1579 &format_attr_umask.attr,
1580 &format_attr_edge.attr,
1581 &format_attr_tid_en.attr,
1582 &format_attr_thresh8.attr,
1583 &format_attr_filter_tid.attr,
1584 &format_attr_filter_link.attr,
1585 &format_attr_filter_state2.attr,
1586 &format_attr_filter_nid2.attr,
1587 &format_attr_filter_opc2.attr,
1588 &format_attr_filter_nc.attr,
1589 &format_attr_filter_c6.attr,
1590 &format_attr_filter_isoc.attr,
1591 NULL,
1592 };
1593
1594 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1595 &format_attr_event.attr,
1596 &format_attr_occ_sel.attr,
1597 &format_attr_edge.attr,
1598 &format_attr_thresh5.attr,
1599 &format_attr_occ_invert.attr,
1600 &format_attr_occ_edge.attr,
1601 &format_attr_filter_band0.attr,
1602 &format_attr_filter_band1.attr,
1603 &format_attr_filter_band2.attr,
1604 &format_attr_filter_band3.attr,
1605 NULL,
1606 };
1607
1608 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1609 &format_attr_event_ext.attr,
1610 &format_attr_umask.attr,
1611 &format_attr_edge.attr,
1612 &format_attr_thresh8.attr,
1613 &format_attr_match_rds.attr,
1614 &format_attr_match_rnid30.attr,
1615 &format_attr_match_rnid4.attr,
1616 &format_attr_match_dnid.attr,
1617 &format_attr_match_mc.attr,
1618 &format_attr_match_opc.attr,
1619 &format_attr_match_vnw.attr,
1620 &format_attr_match0.attr,
1621 &format_attr_match1.attr,
1622 &format_attr_mask_rds.attr,
1623 &format_attr_mask_rnid30.attr,
1624 &format_attr_mask_rnid4.attr,
1625 &format_attr_mask_dnid.attr,
1626 &format_attr_mask_mc.attr,
1627 &format_attr_mask_opc.attr,
1628 &format_attr_mask_vnw.attr,
1629 &format_attr_mask0.attr,
1630 &format_attr_mask1.attr,
1631 NULL,
1632 };
1633
1634 static const struct attribute_group ivbep_uncore_format_group = {
1635 .name = "format",
1636 .attrs = ivbep_uncore_formats_attr,
1637 };
1638
1639 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1640 .name = "format",
1641 .attrs = ivbep_uncore_ubox_formats_attr,
1642 };
1643
1644 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1645 .name = "format",
1646 .attrs = ivbep_uncore_cbox_formats_attr,
1647 };
1648
1649 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1650 .name = "format",
1651 .attrs = ivbep_uncore_pcu_formats_attr,
1652 };
1653
1654 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1655 .name = "format",
1656 .attrs = ivbep_uncore_qpi_formats_attr,
1657 };
1658
1659 static struct intel_uncore_type ivbep_uncore_ubox = {
1660 .name = "ubox",
1661 .num_counters = 2,
1662 .num_boxes = 1,
1663 .perf_ctr_bits = 44,
1664 .fixed_ctr_bits = 48,
1665 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
1666 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
1667 .event_mask = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1668 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1669 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1670 .ops = &ivbep_uncore_msr_ops,
1671 .format_group = &ivbep_uncore_ubox_format_group,
1672 };
1673
1674 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1675 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1676 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1677 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1678 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1679 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1680 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1681 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1682 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1683 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1684 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1685 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1686 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1687 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1688 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1689 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1690 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1691 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1692 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1693 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1694 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1695 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1696 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1697 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1698 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1699 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1700 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1701 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1702 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1703 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1704 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1705 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1706 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1707 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1708 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1709 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1710 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1711 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1712 EVENT_EXTRA_END
1713 };
1714
ivbep_cbox_filter_mask(int fields)1715 static u64 ivbep_cbox_filter_mask(int fields)
1716 {
1717 u64 mask = 0;
1718
1719 if (fields & 0x1)
1720 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1721 if (fields & 0x2)
1722 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1723 if (fields & 0x4)
1724 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1725 if (fields & 0x8)
1726 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1727 if (fields & 0x10) {
1728 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1729 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1730 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1731 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1732 }
1733
1734 return mask;
1735 }
1736
1737 static struct event_constraint *
ivbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)1738 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1739 {
1740 return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1741 }
1742
ivbep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)1743 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1744 {
1745 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1746 struct extra_reg *er;
1747 int idx = 0;
1748
1749 for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1750 if (er->event != (event->hw.config & er->config_mask))
1751 continue;
1752 idx |= er->idx;
1753 }
1754
1755 if (idx) {
1756 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1757 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1758 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1759 reg1->idx = idx;
1760 }
1761 return 0;
1762 }
1763
ivbep_cbox_enable_event(struct intel_uncore_box * box,struct perf_event * event)1764 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1765 {
1766 struct hw_perf_event *hwc = &event->hw;
1767 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1768
1769 if (reg1->idx != EXTRA_REG_NONE) {
1770 u64 filter = uncore_shared_reg_config(box, 0);
1771 wrmsrq(reg1->reg, filter & 0xffffffff);
1772 wrmsrq(reg1->reg + 6, filter >> 32);
1773 }
1774
1775 wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1776 }
1777
1778 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1779 .init_box = ivbep_uncore_msr_init_box,
1780 .disable_box = snbep_uncore_msr_disable_box,
1781 .enable_box = snbep_uncore_msr_enable_box,
1782 .disable_event = snbep_uncore_msr_disable_event,
1783 .enable_event = ivbep_cbox_enable_event,
1784 .read_counter = uncore_msr_read_counter,
1785 .hw_config = ivbep_cbox_hw_config,
1786 .get_constraint = ivbep_cbox_get_constraint,
1787 .put_constraint = snbep_cbox_put_constraint,
1788 };
1789
1790 static struct intel_uncore_type ivbep_uncore_cbox = {
1791 .name = "cbox",
1792 .num_counters = 4,
1793 .num_boxes = 15,
1794 .perf_ctr_bits = 44,
1795 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1796 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1797 .event_mask = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1798 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1799 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1800 .num_shared_regs = 1,
1801 .constraints = snbep_uncore_cbox_constraints,
1802 .ops = &ivbep_uncore_cbox_ops,
1803 .format_group = &ivbep_uncore_cbox_format_group,
1804 };
1805
1806 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1807 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1808 .hw_config = snbep_pcu_hw_config,
1809 .get_constraint = snbep_pcu_get_constraint,
1810 .put_constraint = snbep_pcu_put_constraint,
1811 };
1812
1813 static struct intel_uncore_type ivbep_uncore_pcu = {
1814 .name = "pcu",
1815 .num_counters = 4,
1816 .num_boxes = 1,
1817 .perf_ctr_bits = 48,
1818 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1819 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1820 .event_mask = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1821 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1822 .num_shared_regs = 1,
1823 .ops = &ivbep_uncore_pcu_ops,
1824 .format_group = &ivbep_uncore_pcu_format_group,
1825 };
1826
1827 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1828 &ivbep_uncore_ubox,
1829 &ivbep_uncore_cbox,
1830 &ivbep_uncore_pcu,
1831 NULL,
1832 };
1833
ivbep_uncore_cpu_init(void)1834 void ivbep_uncore_cpu_init(void)
1835 {
1836 if (ivbep_uncore_cbox.num_boxes > topology_num_cores_per_package())
1837 ivbep_uncore_cbox.num_boxes = topology_num_cores_per_package();
1838 uncore_msr_uncores = ivbep_msr_uncores;
1839 }
1840
1841 static struct intel_uncore_type ivbep_uncore_ha = {
1842 .name = "ha",
1843 .num_counters = 4,
1844 .num_boxes = 2,
1845 .perf_ctr_bits = 48,
1846 IVBEP_UNCORE_PCI_COMMON_INIT(),
1847 };
1848
1849 static struct intel_uncore_type ivbep_uncore_imc = {
1850 .name = "imc",
1851 .num_counters = 4,
1852 .num_boxes = 8,
1853 .perf_ctr_bits = 48,
1854 .fixed_ctr_bits = 48,
1855 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1856 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1857 .event_descs = snbep_uncore_imc_events,
1858 IVBEP_UNCORE_PCI_COMMON_INIT(),
1859 };
1860
1861 /* registers in IRP boxes are not properly aligned */
1862 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1863 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1864
ivbep_uncore_irp_enable_event(struct intel_uncore_box * box,struct perf_event * event)1865 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1866 {
1867 struct pci_dev *pdev = box->pci_dev;
1868 struct hw_perf_event *hwc = &event->hw;
1869
1870 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1871 hwc->config | SNBEP_PMON_CTL_EN);
1872 }
1873
ivbep_uncore_irp_disable_event(struct intel_uncore_box * box,struct perf_event * event)1874 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1875 {
1876 struct pci_dev *pdev = box->pci_dev;
1877 struct hw_perf_event *hwc = &event->hw;
1878
1879 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1880 }
1881
ivbep_uncore_irp_read_counter(struct intel_uncore_box * box,struct perf_event * event)1882 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1883 {
1884 struct pci_dev *pdev = box->pci_dev;
1885 struct hw_perf_event *hwc = &event->hw;
1886 u64 count = 0;
1887
1888 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1889 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1890
1891 return count;
1892 }
1893
1894 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1895 .init_box = ivbep_uncore_pci_init_box,
1896 .disable_box = snbep_uncore_pci_disable_box,
1897 .enable_box = snbep_uncore_pci_enable_box,
1898 .disable_event = ivbep_uncore_irp_disable_event,
1899 .enable_event = ivbep_uncore_irp_enable_event,
1900 .read_counter = ivbep_uncore_irp_read_counter,
1901 };
1902
1903 static struct intel_uncore_type ivbep_uncore_irp = {
1904 .name = "irp",
1905 .num_counters = 4,
1906 .num_boxes = 1,
1907 .perf_ctr_bits = 48,
1908 .event_mask = IVBEP_PMON_RAW_EVENT_MASK,
1909 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1910 .ops = &ivbep_uncore_irp_ops,
1911 .format_group = &ivbep_uncore_format_group,
1912 };
1913
1914 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1915 .init_box = ivbep_uncore_pci_init_box,
1916 .disable_box = snbep_uncore_pci_disable_box,
1917 .enable_box = snbep_uncore_pci_enable_box,
1918 .disable_event = snbep_uncore_pci_disable_event,
1919 .enable_event = snbep_qpi_enable_event,
1920 .read_counter = snbep_uncore_pci_read_counter,
1921 .hw_config = snbep_qpi_hw_config,
1922 .get_constraint = uncore_get_constraint,
1923 .put_constraint = uncore_put_constraint,
1924 };
1925
1926 static struct intel_uncore_type ivbep_uncore_qpi = {
1927 .name = "qpi",
1928 .num_counters = 4,
1929 .num_boxes = 3,
1930 .perf_ctr_bits = 48,
1931 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1932 .event_ctl = SNBEP_PCI_PMON_CTL0,
1933 .event_mask = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1934 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1935 .num_shared_regs = 1,
1936 .ops = &ivbep_uncore_qpi_ops,
1937 .format_group = &ivbep_uncore_qpi_format_group,
1938 };
1939
1940 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1941 .name = "r2pcie",
1942 .num_counters = 4,
1943 .num_boxes = 1,
1944 .perf_ctr_bits = 44,
1945 .constraints = snbep_uncore_r2pcie_constraints,
1946 IVBEP_UNCORE_PCI_COMMON_INIT(),
1947 };
1948
1949 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1950 .name = "r3qpi",
1951 .num_counters = 3,
1952 .num_boxes = 2,
1953 .perf_ctr_bits = 44,
1954 .constraints = snbep_uncore_r3qpi_constraints,
1955 IVBEP_UNCORE_PCI_COMMON_INIT(),
1956 };
1957
1958 enum {
1959 IVBEP_PCI_UNCORE_HA,
1960 IVBEP_PCI_UNCORE_IMC,
1961 IVBEP_PCI_UNCORE_IRP,
1962 IVBEP_PCI_UNCORE_QPI,
1963 IVBEP_PCI_UNCORE_R2PCIE,
1964 IVBEP_PCI_UNCORE_R3QPI,
1965 };
1966
1967 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1968 [IVBEP_PCI_UNCORE_HA] = &ivbep_uncore_ha,
1969 [IVBEP_PCI_UNCORE_IMC] = &ivbep_uncore_imc,
1970 [IVBEP_PCI_UNCORE_IRP] = &ivbep_uncore_irp,
1971 [IVBEP_PCI_UNCORE_QPI] = &ivbep_uncore_qpi,
1972 [IVBEP_PCI_UNCORE_R2PCIE] = &ivbep_uncore_r2pcie,
1973 [IVBEP_PCI_UNCORE_R3QPI] = &ivbep_uncore_r3qpi,
1974 NULL,
1975 };
1976
1977 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1978 { /* Home Agent 0 */
1979 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1980 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1981 },
1982 { /* Home Agent 1 */
1983 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1984 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1985 },
1986 { /* MC0 Channel 0 */
1987 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1988 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1989 },
1990 { /* MC0 Channel 1 */
1991 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1992 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1993 },
1994 { /* MC0 Channel 3 */
1995 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1996 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1997 },
1998 { /* MC0 Channel 4 */
1999 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
2000 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
2001 },
2002 { /* MC1 Channel 0 */
2003 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
2004 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
2005 },
2006 { /* MC1 Channel 1 */
2007 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
2008 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
2009 },
2010 { /* MC1 Channel 3 */
2011 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
2012 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
2013 },
2014 { /* MC1 Channel 4 */
2015 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
2016 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
2017 },
2018 { /* IRP */
2019 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
2020 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
2021 },
2022 { /* QPI0 Port 0 */
2023 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
2024 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
2025 },
2026 { /* QPI0 Port 1 */
2027 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
2028 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
2029 },
2030 { /* QPI1 Port 2 */
2031 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
2032 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
2033 },
2034 { /* R2PCIe */
2035 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
2036 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
2037 },
2038 { /* R3QPI0 Link 0 */
2039 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
2040 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
2041 },
2042 { /* R3QPI0 Link 1 */
2043 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
2044 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
2045 },
2046 { /* R3QPI1 Link 2 */
2047 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
2048 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
2049 },
2050 { /* QPI Port 0 filter */
2051 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
2052 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2053 SNBEP_PCI_QPI_PORT0_FILTER),
2054 },
2055 { /* QPI Port 0 filter */
2056 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
2057 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2058 SNBEP_PCI_QPI_PORT1_FILTER),
2059 },
2060 { /* end: all zeroes */ }
2061 };
2062
2063 static struct pci_driver ivbep_uncore_pci_driver = {
2064 .name = "ivbep_uncore",
2065 .id_table = ivbep_uncore_pci_ids,
2066 };
2067
ivbep_uncore_pci_init(void)2068 int ivbep_uncore_pci_init(void)
2069 {
2070 int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2071 if (ret)
2072 return ret;
2073 uncore_pci_uncores = ivbep_pci_uncores;
2074 uncore_pci_driver = &ivbep_uncore_pci_driver;
2075 return 0;
2076 }
2077 /* end of IvyTown uncore support */
2078
2079 /* KNL uncore support */
2080 static struct attribute *knl_uncore_ubox_formats_attr[] = {
2081 &format_attr_event.attr,
2082 &format_attr_umask.attr,
2083 &format_attr_edge.attr,
2084 &format_attr_tid_en.attr,
2085 &format_attr_inv.attr,
2086 &format_attr_thresh5.attr,
2087 NULL,
2088 };
2089
2090 static const struct attribute_group knl_uncore_ubox_format_group = {
2091 .name = "format",
2092 .attrs = knl_uncore_ubox_formats_attr,
2093 };
2094
2095 static struct intel_uncore_type knl_uncore_ubox = {
2096 .name = "ubox",
2097 .num_counters = 2,
2098 .num_boxes = 1,
2099 .perf_ctr_bits = 48,
2100 .fixed_ctr_bits = 48,
2101 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
2102 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
2103 .event_mask = KNL_U_MSR_PMON_RAW_EVENT_MASK,
2104 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2105 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2106 .ops = &snbep_uncore_msr_ops,
2107 .format_group = &knl_uncore_ubox_format_group,
2108 };
2109
2110 static struct attribute *knl_uncore_cha_formats_attr[] = {
2111 &format_attr_event.attr,
2112 &format_attr_umask.attr,
2113 &format_attr_qor.attr,
2114 &format_attr_edge.attr,
2115 &format_attr_tid_en.attr,
2116 &format_attr_inv.attr,
2117 &format_attr_thresh8.attr,
2118 &format_attr_filter_tid4.attr,
2119 &format_attr_filter_link3.attr,
2120 &format_attr_filter_state4.attr,
2121 &format_attr_filter_local.attr,
2122 &format_attr_filter_all_op.attr,
2123 &format_attr_filter_nnm.attr,
2124 &format_attr_filter_opc3.attr,
2125 &format_attr_filter_nc.attr,
2126 &format_attr_filter_isoc.attr,
2127 NULL,
2128 };
2129
2130 static const struct attribute_group knl_uncore_cha_format_group = {
2131 .name = "format",
2132 .attrs = knl_uncore_cha_formats_attr,
2133 };
2134
2135 static struct event_constraint knl_uncore_cha_constraints[] = {
2136 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2137 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2138 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2139 EVENT_CONSTRAINT_END
2140 };
2141
2142 static struct extra_reg knl_uncore_cha_extra_regs[] = {
2143 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2144 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2145 SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2146 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2147 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2148 EVENT_EXTRA_END
2149 };
2150
knl_cha_filter_mask(int fields)2151 static u64 knl_cha_filter_mask(int fields)
2152 {
2153 u64 mask = 0;
2154
2155 if (fields & 0x1)
2156 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2157 if (fields & 0x2)
2158 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2159 if (fields & 0x4)
2160 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2161 return mask;
2162 }
2163
2164 static struct event_constraint *
knl_cha_get_constraint(struct intel_uncore_box * box,struct perf_event * event)2165 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2166 {
2167 return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2168 }
2169
knl_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)2170 static int knl_cha_hw_config(struct intel_uncore_box *box,
2171 struct perf_event *event)
2172 {
2173 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2174 struct extra_reg *er;
2175 int idx = 0;
2176
2177 for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2178 if (er->event != (event->hw.config & er->config_mask))
2179 continue;
2180 idx |= er->idx;
2181 }
2182
2183 if (idx) {
2184 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2185 KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2186 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2187
2188 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2189 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2190 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2191 reg1->idx = idx;
2192 }
2193 return 0;
2194 }
2195
2196 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2197 struct perf_event *event);
2198
2199 static struct intel_uncore_ops knl_uncore_cha_ops = {
2200 .init_box = snbep_uncore_msr_init_box,
2201 .disable_box = snbep_uncore_msr_disable_box,
2202 .enable_box = snbep_uncore_msr_enable_box,
2203 .disable_event = snbep_uncore_msr_disable_event,
2204 .enable_event = hswep_cbox_enable_event,
2205 .read_counter = uncore_msr_read_counter,
2206 .hw_config = knl_cha_hw_config,
2207 .get_constraint = knl_cha_get_constraint,
2208 .put_constraint = snbep_cbox_put_constraint,
2209 };
2210
2211 static struct intel_uncore_type knl_uncore_cha = {
2212 .name = "cha",
2213 .num_counters = 4,
2214 .num_boxes = 38,
2215 .perf_ctr_bits = 48,
2216 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2217 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2218 .event_mask = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2219 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2220 .msr_offset = KNL_CHA_MSR_OFFSET,
2221 .num_shared_regs = 1,
2222 .constraints = knl_uncore_cha_constraints,
2223 .ops = &knl_uncore_cha_ops,
2224 .format_group = &knl_uncore_cha_format_group,
2225 };
2226
2227 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2228 &format_attr_event2.attr,
2229 &format_attr_use_occ_ctr.attr,
2230 &format_attr_occ_sel.attr,
2231 &format_attr_edge.attr,
2232 &format_attr_tid_en.attr,
2233 &format_attr_inv.attr,
2234 &format_attr_thresh6.attr,
2235 &format_attr_occ_invert.attr,
2236 &format_attr_occ_edge_det.attr,
2237 NULL,
2238 };
2239
2240 static const struct attribute_group knl_uncore_pcu_format_group = {
2241 .name = "format",
2242 .attrs = knl_uncore_pcu_formats_attr,
2243 };
2244
2245 static struct intel_uncore_type knl_uncore_pcu = {
2246 .name = "pcu",
2247 .num_counters = 4,
2248 .num_boxes = 1,
2249 .perf_ctr_bits = 48,
2250 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2251 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2252 .event_mask = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2253 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2254 .ops = &snbep_uncore_msr_ops,
2255 .format_group = &knl_uncore_pcu_format_group,
2256 };
2257
2258 static struct intel_uncore_type *knl_msr_uncores[] = {
2259 &knl_uncore_ubox,
2260 &knl_uncore_cha,
2261 &knl_uncore_pcu,
2262 NULL,
2263 };
2264
knl_uncore_cpu_init(void)2265 void knl_uncore_cpu_init(void)
2266 {
2267 uncore_msr_uncores = knl_msr_uncores;
2268 }
2269
knl_uncore_imc_enable_box(struct intel_uncore_box * box)2270 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2271 {
2272 struct pci_dev *pdev = box->pci_dev;
2273 int box_ctl = uncore_pci_box_ctl(box);
2274
2275 pci_write_config_dword(pdev, box_ctl, 0);
2276 }
2277
knl_uncore_imc_enable_event(struct intel_uncore_box * box,struct perf_event * event)2278 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2279 struct perf_event *event)
2280 {
2281 struct pci_dev *pdev = box->pci_dev;
2282 struct hw_perf_event *hwc = &event->hw;
2283
2284 if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2285 == UNCORE_FIXED_EVENT)
2286 pci_write_config_dword(pdev, hwc->config_base,
2287 hwc->config | KNL_PMON_FIXED_CTL_EN);
2288 else
2289 pci_write_config_dword(pdev, hwc->config_base,
2290 hwc->config | SNBEP_PMON_CTL_EN);
2291 }
2292
2293 static struct intel_uncore_ops knl_uncore_imc_ops = {
2294 .init_box = snbep_uncore_pci_init_box,
2295 .disable_box = snbep_uncore_pci_disable_box,
2296 .enable_box = knl_uncore_imc_enable_box,
2297 .read_counter = snbep_uncore_pci_read_counter,
2298 .enable_event = knl_uncore_imc_enable_event,
2299 .disable_event = snbep_uncore_pci_disable_event,
2300 };
2301
2302 static struct intel_uncore_type knl_uncore_imc_uclk = {
2303 .name = "imc_uclk",
2304 .num_counters = 4,
2305 .num_boxes = 2,
2306 .perf_ctr_bits = 48,
2307 .fixed_ctr_bits = 48,
2308 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2309 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2310 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2311 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2312 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2313 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2314 .ops = &knl_uncore_imc_ops,
2315 .format_group = &snbep_uncore_format_group,
2316 };
2317
2318 static struct intel_uncore_type knl_uncore_imc_dclk = {
2319 .name = "imc",
2320 .num_counters = 4,
2321 .num_boxes = 6,
2322 .perf_ctr_bits = 48,
2323 .fixed_ctr_bits = 48,
2324 .perf_ctr = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2325 .event_ctl = KNL_MC0_CH0_MSR_PMON_CTL0,
2326 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2327 .fixed_ctr = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2328 .fixed_ctl = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2329 .box_ctl = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2330 .ops = &knl_uncore_imc_ops,
2331 .format_group = &snbep_uncore_format_group,
2332 };
2333
2334 static struct intel_uncore_type knl_uncore_edc_uclk = {
2335 .name = "edc_uclk",
2336 .num_counters = 4,
2337 .num_boxes = 8,
2338 .perf_ctr_bits = 48,
2339 .fixed_ctr_bits = 48,
2340 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2341 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2342 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2343 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2344 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2345 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2346 .ops = &knl_uncore_imc_ops,
2347 .format_group = &snbep_uncore_format_group,
2348 };
2349
2350 static struct intel_uncore_type knl_uncore_edc_eclk = {
2351 .name = "edc_eclk",
2352 .num_counters = 4,
2353 .num_boxes = 8,
2354 .perf_ctr_bits = 48,
2355 .fixed_ctr_bits = 48,
2356 .perf_ctr = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2357 .event_ctl = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2358 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2359 .fixed_ctr = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2360 .fixed_ctl = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2361 .box_ctl = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2362 .ops = &knl_uncore_imc_ops,
2363 .format_group = &snbep_uncore_format_group,
2364 };
2365
2366 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2367 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2368 EVENT_CONSTRAINT_END
2369 };
2370
2371 static struct intel_uncore_type knl_uncore_m2pcie = {
2372 .name = "m2pcie",
2373 .num_counters = 4,
2374 .num_boxes = 1,
2375 .perf_ctr_bits = 48,
2376 .constraints = knl_uncore_m2pcie_constraints,
2377 SNBEP_UNCORE_PCI_COMMON_INIT(),
2378 };
2379
2380 static struct attribute *knl_uncore_irp_formats_attr[] = {
2381 &format_attr_event.attr,
2382 &format_attr_umask.attr,
2383 &format_attr_qor.attr,
2384 &format_attr_edge.attr,
2385 &format_attr_inv.attr,
2386 &format_attr_thresh8.attr,
2387 NULL,
2388 };
2389
2390 static const struct attribute_group knl_uncore_irp_format_group = {
2391 .name = "format",
2392 .attrs = knl_uncore_irp_formats_attr,
2393 };
2394
2395 static struct intel_uncore_type knl_uncore_irp = {
2396 .name = "irp",
2397 .num_counters = 2,
2398 .num_boxes = 1,
2399 .perf_ctr_bits = 48,
2400 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2401 .event_ctl = SNBEP_PCI_PMON_CTL0,
2402 .event_mask = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2403 .box_ctl = KNL_IRP_PCI_PMON_BOX_CTL,
2404 .ops = &snbep_uncore_pci_ops,
2405 .format_group = &knl_uncore_irp_format_group,
2406 };
2407
2408 enum {
2409 KNL_PCI_UNCORE_MC_UCLK,
2410 KNL_PCI_UNCORE_MC_DCLK,
2411 KNL_PCI_UNCORE_EDC_UCLK,
2412 KNL_PCI_UNCORE_EDC_ECLK,
2413 KNL_PCI_UNCORE_M2PCIE,
2414 KNL_PCI_UNCORE_IRP,
2415 };
2416
2417 static struct intel_uncore_type *knl_pci_uncores[] = {
2418 [KNL_PCI_UNCORE_MC_UCLK] = &knl_uncore_imc_uclk,
2419 [KNL_PCI_UNCORE_MC_DCLK] = &knl_uncore_imc_dclk,
2420 [KNL_PCI_UNCORE_EDC_UCLK] = &knl_uncore_edc_uclk,
2421 [KNL_PCI_UNCORE_EDC_ECLK] = &knl_uncore_edc_eclk,
2422 [KNL_PCI_UNCORE_M2PCIE] = &knl_uncore_m2pcie,
2423 [KNL_PCI_UNCORE_IRP] = &knl_uncore_irp,
2424 NULL,
2425 };
2426
2427 /*
2428 * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2429 * device type. prior to KNL, each instance of a PMU device type had a unique
2430 * device ID.
2431 *
2432 * PCI Device ID Uncore PMU Devices
2433 * ----------------------------------
2434 * 0x7841 MC0 UClk, MC1 UClk
2435 * 0x7843 MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2436 * MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2437 * 0x7833 EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2438 * EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2439 * 0x7835 EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2440 * EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2441 * 0x7817 M2PCIe
2442 * 0x7814 IRP
2443 */
2444
2445 static const struct pci_device_id knl_uncore_pci_ids[] = {
2446 { /* MC0 UClk */
2447 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2448 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2449 },
2450 { /* MC1 UClk */
2451 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2452 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2453 },
2454 { /* MC0 DClk CH 0 */
2455 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2456 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2457 },
2458 { /* MC0 DClk CH 1 */
2459 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2460 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2461 },
2462 { /* MC0 DClk CH 2 */
2463 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2464 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2465 },
2466 { /* MC1 DClk CH 0 */
2467 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2468 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2469 },
2470 { /* MC1 DClk CH 1 */
2471 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2472 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2473 },
2474 { /* MC1 DClk CH 2 */
2475 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2476 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2477 },
2478 { /* EDC0 UClk */
2479 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2480 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2481 },
2482 { /* EDC1 UClk */
2483 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2484 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2485 },
2486 { /* EDC2 UClk */
2487 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2488 .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2489 },
2490 { /* EDC3 UClk */
2491 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2492 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2493 },
2494 { /* EDC4 UClk */
2495 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2496 .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2497 },
2498 { /* EDC5 UClk */
2499 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2500 .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2501 },
2502 { /* EDC6 UClk */
2503 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2504 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2505 },
2506 { /* EDC7 UClk */
2507 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2508 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2509 },
2510 { /* EDC0 EClk */
2511 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2512 .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2513 },
2514 { /* EDC1 EClk */
2515 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2516 .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2517 },
2518 { /* EDC2 EClk */
2519 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2520 .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2521 },
2522 { /* EDC3 EClk */
2523 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2524 .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2525 },
2526 { /* EDC4 EClk */
2527 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2528 .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2529 },
2530 { /* EDC5 EClk */
2531 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2532 .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2533 },
2534 { /* EDC6 EClk */
2535 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2536 .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2537 },
2538 { /* EDC7 EClk */
2539 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2540 .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2541 },
2542 { /* M2PCIe */
2543 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2544 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2545 },
2546 { /* IRP */
2547 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2548 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2549 },
2550 { /* end: all zeroes */ }
2551 };
2552
2553 static struct pci_driver knl_uncore_pci_driver = {
2554 .name = "knl_uncore",
2555 .id_table = knl_uncore_pci_ids,
2556 };
2557
knl_uncore_pci_init(void)2558 int knl_uncore_pci_init(void)
2559 {
2560 int ret;
2561
2562 /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2563 ret = snb_pci2phy_map_init(0x7814); /* IRP */
2564 if (ret)
2565 return ret;
2566 ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2567 if (ret)
2568 return ret;
2569 uncore_pci_uncores = knl_pci_uncores;
2570 uncore_pci_driver = &knl_uncore_pci_driver;
2571 return 0;
2572 }
2573
2574 /* end of KNL uncore support */
2575
2576 /* Haswell-EP uncore support */
2577 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2578 &format_attr_event.attr,
2579 &format_attr_umask.attr,
2580 &format_attr_edge.attr,
2581 &format_attr_inv.attr,
2582 &format_attr_thresh5.attr,
2583 &format_attr_filter_tid2.attr,
2584 &format_attr_filter_cid.attr,
2585 NULL,
2586 };
2587
2588 static const struct attribute_group hswep_uncore_ubox_format_group = {
2589 .name = "format",
2590 .attrs = hswep_uncore_ubox_formats_attr,
2591 };
2592
hswep_ubox_hw_config(struct intel_uncore_box * box,struct perf_event * event)2593 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2594 {
2595 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2596 reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2597 reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2598 reg1->idx = 0;
2599 return 0;
2600 }
2601
2602 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2603 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2604 .hw_config = hswep_ubox_hw_config,
2605 .get_constraint = uncore_get_constraint,
2606 .put_constraint = uncore_put_constraint,
2607 };
2608
2609 static struct intel_uncore_type hswep_uncore_ubox = {
2610 .name = "ubox",
2611 .num_counters = 2,
2612 .num_boxes = 1,
2613 .perf_ctr_bits = 44,
2614 .fixed_ctr_bits = 48,
2615 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
2616 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
2617 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2618 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2619 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2620 .num_shared_regs = 1,
2621 .ops = &hswep_uncore_ubox_ops,
2622 .format_group = &hswep_uncore_ubox_format_group,
2623 };
2624
2625 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2626 &format_attr_event.attr,
2627 &format_attr_umask.attr,
2628 &format_attr_edge.attr,
2629 &format_attr_tid_en.attr,
2630 &format_attr_thresh8.attr,
2631 &format_attr_filter_tid3.attr,
2632 &format_attr_filter_link2.attr,
2633 &format_attr_filter_state3.attr,
2634 &format_attr_filter_nid2.attr,
2635 &format_attr_filter_opc2.attr,
2636 &format_attr_filter_nc.attr,
2637 &format_attr_filter_c6.attr,
2638 &format_attr_filter_isoc.attr,
2639 NULL,
2640 };
2641
2642 static const struct attribute_group hswep_uncore_cbox_format_group = {
2643 .name = "format",
2644 .attrs = hswep_uncore_cbox_formats_attr,
2645 };
2646
2647 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2648 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2649 UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2650 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2651 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2652 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2653 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2654 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2655 EVENT_CONSTRAINT_END
2656 };
2657
2658 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2659 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2660 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2661 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2662 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2663 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2664 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2665 SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2666 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2667 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2668 SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2669 SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2670 SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2671 SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2672 SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2673 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2674 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2675 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2676 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2677 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2678 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2679 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2680 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2681 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2682 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2683 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2684 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2685 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2686 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2687 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2688 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2689 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2690 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2691 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2692 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2693 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2694 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2695 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2696 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2697 EVENT_EXTRA_END
2698 };
2699
hswep_cbox_filter_mask(int fields)2700 static u64 hswep_cbox_filter_mask(int fields)
2701 {
2702 u64 mask = 0;
2703 if (fields & 0x1)
2704 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2705 if (fields & 0x2)
2706 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2707 if (fields & 0x4)
2708 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2709 if (fields & 0x8)
2710 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2711 if (fields & 0x10) {
2712 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2713 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2714 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2715 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2716 }
2717 return mask;
2718 }
2719
2720 static struct event_constraint *
hswep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)2721 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2722 {
2723 return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2724 }
2725
hswep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)2726 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2727 {
2728 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2729 struct extra_reg *er;
2730 int idx = 0;
2731
2732 for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2733 if (er->event != (event->hw.config & er->config_mask))
2734 continue;
2735 idx |= er->idx;
2736 }
2737
2738 if (idx) {
2739 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2740 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2741 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2742 reg1->idx = idx;
2743 }
2744 return 0;
2745 }
2746
hswep_cbox_enable_event(struct intel_uncore_box * box,struct perf_event * event)2747 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2748 struct perf_event *event)
2749 {
2750 struct hw_perf_event *hwc = &event->hw;
2751 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2752
2753 if (reg1->idx != EXTRA_REG_NONE) {
2754 u64 filter = uncore_shared_reg_config(box, 0);
2755 wrmsrq(reg1->reg, filter & 0xffffffff);
2756 wrmsrq(reg1->reg + 1, filter >> 32);
2757 }
2758
2759 wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2760 }
2761
2762 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2763 .init_box = snbep_uncore_msr_init_box,
2764 .disable_box = snbep_uncore_msr_disable_box,
2765 .enable_box = snbep_uncore_msr_enable_box,
2766 .disable_event = snbep_uncore_msr_disable_event,
2767 .enable_event = hswep_cbox_enable_event,
2768 .read_counter = uncore_msr_read_counter,
2769 .hw_config = hswep_cbox_hw_config,
2770 .get_constraint = hswep_cbox_get_constraint,
2771 .put_constraint = snbep_cbox_put_constraint,
2772 };
2773
2774 static struct intel_uncore_type hswep_uncore_cbox = {
2775 .name = "cbox",
2776 .num_counters = 4,
2777 .num_boxes = 18,
2778 .perf_ctr_bits = 48,
2779 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2780 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2781 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2782 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2783 .msr_offset = HSWEP_CBO_MSR_OFFSET,
2784 .num_shared_regs = 1,
2785 .constraints = hswep_uncore_cbox_constraints,
2786 .ops = &hswep_uncore_cbox_ops,
2787 .format_group = &hswep_uncore_cbox_format_group,
2788 };
2789
2790 /*
2791 * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2792 */
hswep_uncore_sbox_msr_init_box(struct intel_uncore_box * box)2793 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2794 {
2795 unsigned msr = uncore_msr_box_ctl(box);
2796
2797 if (msr) {
2798 u64 init = SNBEP_PMON_BOX_CTL_INT;
2799 u64 flags = 0;
2800 int i;
2801
2802 for_each_set_bit(i, (unsigned long *)&init, 64) {
2803 flags |= (1ULL << i);
2804 wrmsrq(msr, flags);
2805 }
2806 }
2807 }
2808
2809 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2810 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2811 .init_box = hswep_uncore_sbox_msr_init_box
2812 };
2813
2814 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2815 &format_attr_event.attr,
2816 &format_attr_umask.attr,
2817 &format_attr_edge.attr,
2818 &format_attr_tid_en.attr,
2819 &format_attr_inv.attr,
2820 &format_attr_thresh8.attr,
2821 NULL,
2822 };
2823
2824 static const struct attribute_group hswep_uncore_sbox_format_group = {
2825 .name = "format",
2826 .attrs = hswep_uncore_sbox_formats_attr,
2827 };
2828
2829 static struct intel_uncore_type hswep_uncore_sbox = {
2830 .name = "sbox",
2831 .num_counters = 4,
2832 .num_boxes = 4,
2833 .perf_ctr_bits = 44,
2834 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
2835 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
2836 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2837 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
2838 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
2839 .ops = &hswep_uncore_sbox_msr_ops,
2840 .format_group = &hswep_uncore_sbox_format_group,
2841 };
2842
hswep_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)2843 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2844 {
2845 struct hw_perf_event *hwc = &event->hw;
2846 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2847 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2848
2849 if (ev_sel >= 0xb && ev_sel <= 0xe) {
2850 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2851 reg1->idx = ev_sel - 0xb;
2852 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2853 }
2854 return 0;
2855 }
2856
2857 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2858 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2859 .hw_config = hswep_pcu_hw_config,
2860 .get_constraint = snbep_pcu_get_constraint,
2861 .put_constraint = snbep_pcu_put_constraint,
2862 };
2863
2864 static struct intel_uncore_type hswep_uncore_pcu = {
2865 .name = "pcu",
2866 .num_counters = 4,
2867 .num_boxes = 1,
2868 .perf_ctr_bits = 48,
2869 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2870 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2871 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2872 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2873 .num_shared_regs = 1,
2874 .ops = &hswep_uncore_pcu_ops,
2875 .format_group = &snbep_uncore_pcu_format_group,
2876 };
2877
2878 static struct intel_uncore_type *hswep_msr_uncores[] = {
2879 &hswep_uncore_ubox,
2880 &hswep_uncore_cbox,
2881 &hswep_uncore_sbox,
2882 &hswep_uncore_pcu,
2883 NULL,
2884 };
2885
2886 #define HSWEP_PCU_DID 0x2fc0
2887 #define HSWEP_PCU_CAPID4_OFFET 0x94
2888 #define hswep_get_chop(_cap) (((_cap) >> 6) & 0x3)
2889
hswep_has_limit_sbox(unsigned int device)2890 static bool hswep_has_limit_sbox(unsigned int device)
2891 {
2892 struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
2893 u32 capid4;
2894
2895 if (!dev)
2896 return false;
2897
2898 pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
2899 pci_dev_put(dev);
2900 if (!hswep_get_chop(capid4))
2901 return true;
2902
2903 return false;
2904 }
2905
hswep_uncore_cpu_init(void)2906 void hswep_uncore_cpu_init(void)
2907 {
2908 if (hswep_uncore_cbox.num_boxes > topology_num_cores_per_package())
2909 hswep_uncore_cbox.num_boxes = topology_num_cores_per_package();
2910
2911 /* Detect 6-8 core systems with only two SBOXes */
2912 if (hswep_has_limit_sbox(HSWEP_PCU_DID))
2913 hswep_uncore_sbox.num_boxes = 2;
2914
2915 uncore_msr_uncores = hswep_msr_uncores;
2916 }
2917
2918 static struct intel_uncore_type hswep_uncore_ha = {
2919 .name = "ha",
2920 .num_counters = 4,
2921 .num_boxes = 2,
2922 .perf_ctr_bits = 48,
2923 SNBEP_UNCORE_PCI_COMMON_INIT(),
2924 };
2925
2926 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2927 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
2928 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
2929 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2930 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2931 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2932 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2933 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2934 { /* end: all zeroes */ },
2935 };
2936
2937 static struct intel_uncore_type hswep_uncore_imc = {
2938 .name = "imc",
2939 .num_counters = 4,
2940 .num_boxes = 8,
2941 .perf_ctr_bits = 48,
2942 .fixed_ctr_bits = 48,
2943 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2944 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2945 .event_descs = hswep_uncore_imc_events,
2946 SNBEP_UNCORE_PCI_COMMON_INIT(),
2947 };
2948
2949 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2950
hswep_uncore_irp_read_counter(struct intel_uncore_box * box,struct perf_event * event)2951 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2952 {
2953 struct pci_dev *pdev = box->pci_dev;
2954 struct hw_perf_event *hwc = &event->hw;
2955 u64 count = 0;
2956
2957 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2958 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2959
2960 return count;
2961 }
2962
2963 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2964 .init_box = snbep_uncore_pci_init_box,
2965 .disable_box = snbep_uncore_pci_disable_box,
2966 .enable_box = snbep_uncore_pci_enable_box,
2967 .disable_event = ivbep_uncore_irp_disable_event,
2968 .enable_event = ivbep_uncore_irp_enable_event,
2969 .read_counter = hswep_uncore_irp_read_counter,
2970 };
2971
2972 static struct intel_uncore_type hswep_uncore_irp = {
2973 .name = "irp",
2974 .num_counters = 4,
2975 .num_boxes = 1,
2976 .perf_ctr_bits = 48,
2977 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2978 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2979 .ops = &hswep_uncore_irp_ops,
2980 .format_group = &snbep_uncore_format_group,
2981 };
2982
2983 static struct intel_uncore_type hswep_uncore_qpi = {
2984 .name = "qpi",
2985 .num_counters = 4,
2986 .num_boxes = 3,
2987 .perf_ctr_bits = 48,
2988 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2989 .event_ctl = SNBEP_PCI_PMON_CTL0,
2990 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2991 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2992 .num_shared_regs = 1,
2993 .ops = &snbep_uncore_qpi_ops,
2994 .format_group = &snbep_uncore_qpi_format_group,
2995 };
2996
2997 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2998 UNCORE_EVENT_CONSTRAINT_RANGE(0x10, 0x11, 0x3),
2999 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3000 UNCORE_EVENT_CONSTRAINT_RANGE(0x23, 0x25, 0x1),
3001 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3002 UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
3003 UNCORE_EVENT_CONSTRAINT_RANGE(0x28, 0x29, 0x3),
3004 UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
3005 UNCORE_EVENT_CONSTRAINT_RANGE(0x2b, 0x2d, 0x3),
3006 UNCORE_EVENT_CONSTRAINT_RANGE(0x32, 0x35, 0x3),
3007 EVENT_CONSTRAINT_END
3008 };
3009
3010 static struct intel_uncore_type hswep_uncore_r2pcie = {
3011 .name = "r2pcie",
3012 .num_counters = 4,
3013 .num_boxes = 1,
3014 .perf_ctr_bits = 48,
3015 .constraints = hswep_uncore_r2pcie_constraints,
3016 SNBEP_UNCORE_PCI_COMMON_INIT(),
3017 };
3018
3019 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
3020 UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
3021 UNCORE_EVENT_CONSTRAINT_RANGE(0x7, 0x0a, 0x7),
3022 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3023 UNCORE_EVENT_CONSTRAINT_RANGE(0x10, 0x12, 0x3),
3024 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3025 UNCORE_EVENT_CONSTRAINT_RANGE(0x14, 0x15, 0x3),
3026 UNCORE_EVENT_CONSTRAINT_RANGE(0x1f, 0x23, 0x3),
3027 UNCORE_EVENT_CONSTRAINT_RANGE(0x25, 0x26, 0x3),
3028 UNCORE_EVENT_CONSTRAINT_RANGE(0x28, 0x29, 0x3),
3029 UNCORE_EVENT_CONSTRAINT_RANGE(0x2c, 0x2f, 0x3),
3030 UNCORE_EVENT_CONSTRAINT_RANGE(0x31, 0x34, 0x3),
3031 UNCORE_EVENT_CONSTRAINT_RANGE(0x36, 0x39, 0x3),
3032 EVENT_CONSTRAINT_END
3033 };
3034
3035 static struct intel_uncore_type hswep_uncore_r3qpi = {
3036 .name = "r3qpi",
3037 .num_counters = 3,
3038 .num_boxes = 3,
3039 .perf_ctr_bits = 44,
3040 .constraints = hswep_uncore_r3qpi_constraints,
3041 SNBEP_UNCORE_PCI_COMMON_INIT(),
3042 };
3043
3044 enum {
3045 HSWEP_PCI_UNCORE_HA,
3046 HSWEP_PCI_UNCORE_IMC,
3047 HSWEP_PCI_UNCORE_IRP,
3048 HSWEP_PCI_UNCORE_QPI,
3049 HSWEP_PCI_UNCORE_R2PCIE,
3050 HSWEP_PCI_UNCORE_R3QPI,
3051 };
3052
3053 static struct intel_uncore_type *hswep_pci_uncores[] = {
3054 [HSWEP_PCI_UNCORE_HA] = &hswep_uncore_ha,
3055 [HSWEP_PCI_UNCORE_IMC] = &hswep_uncore_imc,
3056 [HSWEP_PCI_UNCORE_IRP] = &hswep_uncore_irp,
3057 [HSWEP_PCI_UNCORE_QPI] = &hswep_uncore_qpi,
3058 [HSWEP_PCI_UNCORE_R2PCIE] = &hswep_uncore_r2pcie,
3059 [HSWEP_PCI_UNCORE_R3QPI] = &hswep_uncore_r3qpi,
3060 NULL,
3061 };
3062
3063 static const struct pci_device_id hswep_uncore_pci_ids[] = {
3064 { /* Home Agent 0 */
3065 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
3066 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
3067 },
3068 { /* Home Agent 1 */
3069 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
3070 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
3071 },
3072 { /* MC0 Channel 0 */
3073 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
3074 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
3075 },
3076 { /* MC0 Channel 1 */
3077 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
3078 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
3079 },
3080 { /* MC0 Channel 2 */
3081 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
3082 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
3083 },
3084 { /* MC0 Channel 3 */
3085 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
3086 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
3087 },
3088 { /* MC1 Channel 0 */
3089 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
3090 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
3091 },
3092 { /* MC1 Channel 1 */
3093 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
3094 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
3095 },
3096 { /* MC1 Channel 2 */
3097 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
3098 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
3099 },
3100 { /* MC1 Channel 3 */
3101 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
3102 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
3103 },
3104 { /* IRP */
3105 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
3106 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
3107 },
3108 { /* QPI0 Port 0 */
3109 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3110 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3111 },
3112 { /* QPI0 Port 1 */
3113 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3114 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3115 },
3116 { /* QPI1 Port 2 */
3117 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3118 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3119 },
3120 { /* R2PCIe */
3121 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3122 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3123 },
3124 { /* R3QPI0 Link 0 */
3125 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3126 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3127 },
3128 { /* R3QPI0 Link 1 */
3129 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3130 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3131 },
3132 { /* R3QPI1 Link 2 */
3133 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3134 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3135 },
3136 { /* QPI Port 0 filter */
3137 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3138 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3139 SNBEP_PCI_QPI_PORT0_FILTER),
3140 },
3141 { /* QPI Port 1 filter */
3142 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3143 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3144 SNBEP_PCI_QPI_PORT1_FILTER),
3145 },
3146 { /* end: all zeroes */ }
3147 };
3148
3149 static struct pci_driver hswep_uncore_pci_driver = {
3150 .name = "hswep_uncore",
3151 .id_table = hswep_uncore_pci_ids,
3152 };
3153
hswep_uncore_pci_init(void)3154 int hswep_uncore_pci_init(void)
3155 {
3156 int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3157 if (ret)
3158 return ret;
3159 uncore_pci_uncores = hswep_pci_uncores;
3160 uncore_pci_driver = &hswep_uncore_pci_driver;
3161 return 0;
3162 }
3163 /* end of Haswell-EP uncore support */
3164
3165 /* BDX uncore support */
3166
3167 static struct intel_uncore_type bdx_uncore_ubox = {
3168 .name = "ubox",
3169 .num_counters = 2,
3170 .num_boxes = 1,
3171 .perf_ctr_bits = 48,
3172 .fixed_ctr_bits = 48,
3173 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3174 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3175 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3176 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3177 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3178 .num_shared_regs = 1,
3179 .ops = &ivbep_uncore_msr_ops,
3180 .format_group = &ivbep_uncore_ubox_format_group,
3181 };
3182
3183 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3184 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3185 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3186 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3187 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3188 EVENT_CONSTRAINT_END
3189 };
3190
3191 static struct intel_uncore_type bdx_uncore_cbox = {
3192 .name = "cbox",
3193 .num_counters = 4,
3194 .num_boxes = 24,
3195 .perf_ctr_bits = 48,
3196 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3197 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3198 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3199 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3200 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3201 .num_shared_regs = 1,
3202 .constraints = bdx_uncore_cbox_constraints,
3203 .ops = &hswep_uncore_cbox_ops,
3204 .format_group = &hswep_uncore_cbox_format_group,
3205 };
3206
3207 static struct intel_uncore_type bdx_uncore_sbox = {
3208 .name = "sbox",
3209 .num_counters = 4,
3210 .num_boxes = 4,
3211 .perf_ctr_bits = 48,
3212 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
3213 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
3214 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3215 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
3216 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
3217 .ops = &hswep_uncore_sbox_msr_ops,
3218 .format_group = &hswep_uncore_sbox_format_group,
3219 };
3220
3221 #define BDX_MSR_UNCORE_SBOX 3
3222
3223 static struct intel_uncore_type *bdx_msr_uncores[] = {
3224 &bdx_uncore_ubox,
3225 &bdx_uncore_cbox,
3226 &hswep_uncore_pcu,
3227 &bdx_uncore_sbox,
3228 NULL,
3229 };
3230
3231 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3232 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3233 EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3234 EVENT_CONSTRAINT_END
3235 };
3236
3237 #define BDX_PCU_DID 0x6fc0
3238
bdx_uncore_cpu_init(void)3239 void bdx_uncore_cpu_init(void)
3240 {
3241 if (bdx_uncore_cbox.num_boxes > topology_num_cores_per_package())
3242 bdx_uncore_cbox.num_boxes = topology_num_cores_per_package();
3243 uncore_msr_uncores = bdx_msr_uncores;
3244
3245 /* Detect systems with no SBOXes */
3246 if (boot_cpu_data.x86_vfm == INTEL_BROADWELL_D || hswep_has_limit_sbox(BDX_PCU_DID))
3247 uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3248
3249 hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3250 }
3251
3252 static struct intel_uncore_type bdx_uncore_ha = {
3253 .name = "ha",
3254 .num_counters = 4,
3255 .num_boxes = 2,
3256 .perf_ctr_bits = 48,
3257 SNBEP_UNCORE_PCI_COMMON_INIT(),
3258 };
3259
3260 static struct intel_uncore_type bdx_uncore_imc = {
3261 .name = "imc",
3262 .num_counters = 4,
3263 .num_boxes = 8,
3264 .perf_ctr_bits = 48,
3265 .fixed_ctr_bits = 48,
3266 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3267 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3268 .event_descs = hswep_uncore_imc_events,
3269 SNBEP_UNCORE_PCI_COMMON_INIT(),
3270 };
3271
3272 static struct intel_uncore_type bdx_uncore_irp = {
3273 .name = "irp",
3274 .num_counters = 4,
3275 .num_boxes = 1,
3276 .perf_ctr_bits = 48,
3277 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3278 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3279 .ops = &hswep_uncore_irp_ops,
3280 .format_group = &snbep_uncore_format_group,
3281 };
3282
3283 static struct intel_uncore_type bdx_uncore_qpi = {
3284 .name = "qpi",
3285 .num_counters = 4,
3286 .num_boxes = 3,
3287 .perf_ctr_bits = 48,
3288 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3289 .event_ctl = SNBEP_PCI_PMON_CTL0,
3290 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3291 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3292 .num_shared_regs = 1,
3293 .ops = &snbep_uncore_qpi_ops,
3294 .format_group = &snbep_uncore_qpi_format_group,
3295 };
3296
3297 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3298 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3299 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3300 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3301 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3302 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3303 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3304 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3305 UNCORE_EVENT_CONSTRAINT_RANGE(0x2c, 0x2d, 0x3),
3306 EVENT_CONSTRAINT_END
3307 };
3308
3309 static struct intel_uncore_type bdx_uncore_r2pcie = {
3310 .name = "r2pcie",
3311 .num_counters = 4,
3312 .num_boxes = 1,
3313 .perf_ctr_bits = 48,
3314 .constraints = bdx_uncore_r2pcie_constraints,
3315 SNBEP_UNCORE_PCI_COMMON_INIT(),
3316 };
3317
3318 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3319 UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3320 UNCORE_EVENT_CONSTRAINT_RANGE(0x07, 0x0a, 0x7),
3321 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3322 UNCORE_EVENT_CONSTRAINT_RANGE(0x10, 0x11, 0x3),
3323 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3324 UNCORE_EVENT_CONSTRAINT_RANGE(0x14, 0x15, 0x3),
3325 UNCORE_EVENT_CONSTRAINT_RANGE(0x1f, 0x23, 0x3),
3326 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3327 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3328 UNCORE_EVENT_CONSTRAINT_RANGE(0x28, 0x29, 0x3),
3329 UNCORE_EVENT_CONSTRAINT_RANGE(0x2c, 0x2f, 0x3),
3330 UNCORE_EVENT_CONSTRAINT_RANGE(0x33, 0x34, 0x3),
3331 UNCORE_EVENT_CONSTRAINT_RANGE(0x36, 0x39, 0x3),
3332 EVENT_CONSTRAINT_END
3333 };
3334
3335 static struct intel_uncore_type bdx_uncore_r3qpi = {
3336 .name = "r3qpi",
3337 .num_counters = 3,
3338 .num_boxes = 3,
3339 .perf_ctr_bits = 48,
3340 .constraints = bdx_uncore_r3qpi_constraints,
3341 SNBEP_UNCORE_PCI_COMMON_INIT(),
3342 };
3343
3344 enum {
3345 BDX_PCI_UNCORE_HA,
3346 BDX_PCI_UNCORE_IMC,
3347 BDX_PCI_UNCORE_IRP,
3348 BDX_PCI_UNCORE_QPI,
3349 BDX_PCI_UNCORE_R2PCIE,
3350 BDX_PCI_UNCORE_R3QPI,
3351 };
3352
3353 static struct intel_uncore_type *bdx_pci_uncores[] = {
3354 [BDX_PCI_UNCORE_HA] = &bdx_uncore_ha,
3355 [BDX_PCI_UNCORE_IMC] = &bdx_uncore_imc,
3356 [BDX_PCI_UNCORE_IRP] = &bdx_uncore_irp,
3357 [BDX_PCI_UNCORE_QPI] = &bdx_uncore_qpi,
3358 [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
3359 [BDX_PCI_UNCORE_R3QPI] = &bdx_uncore_r3qpi,
3360 NULL,
3361 };
3362
3363 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3364 { /* Home Agent 0 */
3365 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3366 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3367 },
3368 { /* Home Agent 1 */
3369 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3370 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3371 },
3372 { /* MC0 Channel 0 */
3373 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3374 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3375 },
3376 { /* MC0 Channel 1 */
3377 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3378 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3379 },
3380 { /* MC0 Channel 2 */
3381 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3382 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3383 },
3384 { /* MC0 Channel 3 */
3385 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3386 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3387 },
3388 { /* MC1 Channel 0 */
3389 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3390 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3391 },
3392 { /* MC1 Channel 1 */
3393 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3394 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3395 },
3396 { /* MC1 Channel 2 */
3397 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3398 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3399 },
3400 { /* MC1 Channel 3 */
3401 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3402 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3403 },
3404 { /* IRP */
3405 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3406 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3407 },
3408 { /* QPI0 Port 0 */
3409 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3410 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3411 },
3412 { /* QPI0 Port 1 */
3413 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3414 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3415 },
3416 { /* QPI1 Port 2 */
3417 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3418 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3419 },
3420 { /* R2PCIe */
3421 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3422 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3423 },
3424 { /* R3QPI0 Link 0 */
3425 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3426 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3427 },
3428 { /* R3QPI0 Link 1 */
3429 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3430 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3431 },
3432 { /* R3QPI1 Link 2 */
3433 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3434 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3435 },
3436 { /* QPI Port 0 filter */
3437 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3438 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3439 SNBEP_PCI_QPI_PORT0_FILTER),
3440 },
3441 { /* QPI Port 1 filter */
3442 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3443 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3444 SNBEP_PCI_QPI_PORT1_FILTER),
3445 },
3446 { /* QPI Port 2 filter */
3447 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3448 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3449 BDX_PCI_QPI_PORT2_FILTER),
3450 },
3451 { /* end: all zeroes */ }
3452 };
3453
3454 static struct pci_driver bdx_uncore_pci_driver = {
3455 .name = "bdx_uncore",
3456 .id_table = bdx_uncore_pci_ids,
3457 };
3458
bdx_uncore_pci_init(void)3459 int bdx_uncore_pci_init(void)
3460 {
3461 int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3462
3463 if (ret)
3464 return ret;
3465 uncore_pci_uncores = bdx_pci_uncores;
3466 uncore_pci_driver = &bdx_uncore_pci_driver;
3467 return 0;
3468 }
3469
3470 /* end of BDX uncore support */
3471
3472 /* SKX uncore support */
3473
3474 static struct intel_uncore_type skx_uncore_ubox = {
3475 .name = "ubox",
3476 .num_counters = 2,
3477 .num_boxes = 1,
3478 .perf_ctr_bits = 48,
3479 .fixed_ctr_bits = 48,
3480 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3481 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3482 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3483 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3484 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3485 .ops = &ivbep_uncore_msr_ops,
3486 .format_group = &ivbep_uncore_ubox_format_group,
3487 };
3488
3489 static struct attribute *skx_uncore_cha_formats_attr[] = {
3490 &format_attr_event.attr,
3491 &format_attr_umask.attr,
3492 &format_attr_edge.attr,
3493 &format_attr_tid_en.attr,
3494 &format_attr_inv.attr,
3495 &format_attr_thresh8.attr,
3496 &format_attr_filter_tid4.attr,
3497 &format_attr_filter_state5.attr,
3498 &format_attr_filter_rem.attr,
3499 &format_attr_filter_loc.attr,
3500 &format_attr_filter_nm.attr,
3501 &format_attr_filter_all_op.attr,
3502 &format_attr_filter_not_nm.attr,
3503 &format_attr_filter_opc_0.attr,
3504 &format_attr_filter_opc_1.attr,
3505 &format_attr_filter_nc.attr,
3506 &format_attr_filter_isoc.attr,
3507 NULL,
3508 };
3509
3510 static const struct attribute_group skx_uncore_chabox_format_group = {
3511 .name = "format",
3512 .attrs = skx_uncore_cha_formats_attr,
3513 };
3514
3515 static struct event_constraint skx_uncore_chabox_constraints[] = {
3516 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3517 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3518 EVENT_CONSTRAINT_END
3519 };
3520
3521 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3522 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3523 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3524 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3525 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3526 SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3527 SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3528 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3529 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3530 SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3531 EVENT_EXTRA_END
3532 };
3533
skx_cha_filter_mask(int fields)3534 static u64 skx_cha_filter_mask(int fields)
3535 {
3536 u64 mask = 0;
3537
3538 if (fields & 0x1)
3539 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3540 if (fields & 0x2)
3541 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3542 if (fields & 0x4)
3543 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3544 if (fields & 0x8) {
3545 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3546 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3547 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3548 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3549 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3550 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3551 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3552 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3553 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3554 }
3555 return mask;
3556 }
3557
3558 static struct event_constraint *
skx_cha_get_constraint(struct intel_uncore_box * box,struct perf_event * event)3559 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3560 {
3561 return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3562 }
3563
skx_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)3564 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3565 {
3566 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3567 struct extra_reg *er;
3568 int idx = 0;
3569 /* Any of the CHA events may be filtered by Thread/Core-ID.*/
3570 if (event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN)
3571 idx = SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3572
3573 for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3574 if (er->event != (event->hw.config & er->config_mask))
3575 continue;
3576 idx |= er->idx;
3577 }
3578
3579 if (idx) {
3580 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3581 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3582 reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3583 reg1->idx = idx;
3584 }
3585 return 0;
3586 }
3587
3588 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3589 /* There is no frz_en for chabox ctl */
3590 .init_box = ivbep_uncore_msr_init_box,
3591 .disable_box = snbep_uncore_msr_disable_box,
3592 .enable_box = snbep_uncore_msr_enable_box,
3593 .disable_event = snbep_uncore_msr_disable_event,
3594 .enable_event = hswep_cbox_enable_event,
3595 .read_counter = uncore_msr_read_counter,
3596 .hw_config = skx_cha_hw_config,
3597 .get_constraint = skx_cha_get_constraint,
3598 .put_constraint = snbep_cbox_put_constraint,
3599 };
3600
3601 static struct intel_uncore_type skx_uncore_chabox = {
3602 .name = "cha",
3603 .num_counters = 4,
3604 .perf_ctr_bits = 48,
3605 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3606 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3607 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3608 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3609 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3610 .num_shared_regs = 1,
3611 .constraints = skx_uncore_chabox_constraints,
3612 .ops = &skx_uncore_chabox_ops,
3613 .format_group = &skx_uncore_chabox_format_group,
3614 };
3615
3616 static struct attribute *skx_uncore_iio_formats_attr[] = {
3617 &format_attr_event.attr,
3618 &format_attr_umask.attr,
3619 &format_attr_edge.attr,
3620 &format_attr_inv.attr,
3621 &format_attr_thresh9.attr,
3622 &format_attr_ch_mask.attr,
3623 &format_attr_fc_mask.attr,
3624 NULL,
3625 };
3626
3627 static const struct attribute_group skx_uncore_iio_format_group = {
3628 .name = "format",
3629 .attrs = skx_uncore_iio_formats_attr,
3630 };
3631
3632 static struct event_constraint skx_uncore_iio_constraints[] = {
3633 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3634 UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3635 UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3636 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3637 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3638 UNCORE_EVENT_CONSTRAINT_RANGE(0xd4, 0xd5, 0xc),
3639 EVENT_CONSTRAINT_END
3640 };
3641
skx_iio_enable_event(struct intel_uncore_box * box,struct perf_event * event)3642 static void skx_iio_enable_event(struct intel_uncore_box *box,
3643 struct perf_event *event)
3644 {
3645 struct hw_perf_event *hwc = &event->hw;
3646
3647 wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3648 }
3649
3650 static struct intel_uncore_ops skx_uncore_iio_ops = {
3651 .init_box = ivbep_uncore_msr_init_box,
3652 .disable_box = snbep_uncore_msr_disable_box,
3653 .enable_box = snbep_uncore_msr_enable_box,
3654 .disable_event = snbep_uncore_msr_disable_event,
3655 .enable_event = skx_iio_enable_event,
3656 .read_counter = uncore_msr_read_counter,
3657 };
3658
pmu_topology(struct intel_uncore_pmu * pmu,int die)3659 static struct intel_uncore_topology *pmu_topology(struct intel_uncore_pmu *pmu, int die)
3660 {
3661 int idx;
3662
3663 for (idx = 0; idx < pmu->type->num_boxes; idx++) {
3664 if (pmu->type->topology[die][idx].pmu_idx == pmu->pmu_idx)
3665 return &pmu->type->topology[die][idx];
3666 }
3667
3668 return NULL;
3669 }
3670
3671 static umode_t
pmu_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die,int zero_bus_pmu)3672 pmu_iio_mapping_visible(struct kobject *kobj, struct attribute *attr,
3673 int die, int zero_bus_pmu)
3674 {
3675 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
3676 struct intel_uncore_topology *pmut = pmu_topology(pmu, die);
3677
3678 return (pmut && !pmut->iio->pci_bus_no && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode;
3679 }
3680
3681 static umode_t
skx_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)3682 skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
3683 {
3684 /* Root bus 0x00 is valid only for pmu_idx = 0. */
3685 return pmu_iio_mapping_visible(kobj, attr, die, 0);
3686 }
3687
skx_iio_mapping_show(struct device * dev,struct device_attribute * attr,char * buf)3688 static ssize_t skx_iio_mapping_show(struct device *dev,
3689 struct device_attribute *attr, char *buf)
3690 {
3691 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
3692 struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
3693 long die = (long)ea->var;
3694 struct intel_uncore_topology *pmut = pmu_topology(pmu, die);
3695
3696 return sprintf(buf, "%04x:%02x\n", pmut ? pmut->iio->segment : 0,
3697 pmut ? pmut->iio->pci_bus_no : 0);
3698 }
3699
skx_msr_cpu_bus_read(int cpu,u64 * topology)3700 static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
3701 {
3702 u64 msr_value;
3703
3704 if (rdmsrq_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
3705 !(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
3706 return -ENXIO;
3707
3708 *topology = msr_value;
3709
3710 return 0;
3711 }
3712
die_to_cpu(int die)3713 static int die_to_cpu(int die)
3714 {
3715 int res = 0, cpu, current_die;
3716 /*
3717 * Using cpus_read_lock() to ensure cpu is not going down between
3718 * looking at cpu_online_mask.
3719 */
3720 cpus_read_lock();
3721 for_each_online_cpu(cpu) {
3722 current_die = topology_logical_die_id(cpu);
3723 if (current_die == die) {
3724 res = cpu;
3725 break;
3726 }
3727 }
3728 cpus_read_unlock();
3729 return res;
3730 }
3731
3732 enum {
3733 IIO_TOPOLOGY_TYPE,
3734 UPI_TOPOLOGY_TYPE,
3735 TOPOLOGY_MAX
3736 };
3737
3738 static const size_t topology_size[TOPOLOGY_MAX] = {
3739 sizeof(*((struct intel_uncore_topology *)NULL)->iio),
3740 sizeof(*((struct intel_uncore_topology *)NULL)->upi)
3741 };
3742
pmu_alloc_topology(struct intel_uncore_type * type,int topology_type)3743 static int pmu_alloc_topology(struct intel_uncore_type *type, int topology_type)
3744 {
3745 int die, idx;
3746 struct intel_uncore_topology **topology;
3747
3748 if (!type->num_boxes)
3749 return -EPERM;
3750
3751 topology = kzalloc_objs(*topology, uncore_max_dies());
3752 if (!topology)
3753 goto err;
3754
3755 for (die = 0; die < uncore_max_dies(); die++) {
3756 topology[die] = kzalloc_objs(**topology, type->num_boxes);
3757 if (!topology[die])
3758 goto clear;
3759 for (idx = 0; idx < type->num_boxes; idx++) {
3760 topology[die][idx].untyped = kcalloc(type->num_boxes,
3761 topology_size[topology_type],
3762 GFP_KERNEL);
3763 if (!topology[die][idx].untyped)
3764 goto clear;
3765 }
3766 }
3767
3768 type->topology = topology;
3769
3770 return 0;
3771 clear:
3772 for (; die >= 0; die--) {
3773 for (idx = 0; idx < type->num_boxes; idx++)
3774 kfree(topology[die][idx].untyped);
3775 kfree(topology[die]);
3776 }
3777 kfree(topology);
3778 err:
3779 return -ENOMEM;
3780 }
3781
pmu_free_topology(struct intel_uncore_type * type)3782 static void pmu_free_topology(struct intel_uncore_type *type)
3783 {
3784 int die, idx;
3785
3786 if (type->topology) {
3787 for (die = 0; die < uncore_max_dies(); die++) {
3788 for (idx = 0; idx < type->num_boxes; idx++)
3789 kfree(type->topology[die][idx].untyped);
3790 kfree(type->topology[die]);
3791 }
3792 kfree(type->topology);
3793 type->topology = NULL;
3794 }
3795 }
3796
skx_pmu_get_topology(struct intel_uncore_type * type,int (* topology_cb)(struct intel_uncore_type *,int,int,u64))3797 static int skx_pmu_get_topology(struct intel_uncore_type *type,
3798 int (*topology_cb)(struct intel_uncore_type*, int, int, u64))
3799 {
3800 int die, ret = -EPERM;
3801 u64 cpu_bus_msr;
3802
3803 for (die = 0; die < uncore_max_dies(); die++) {
3804 ret = skx_msr_cpu_bus_read(die_to_cpu(die), &cpu_bus_msr);
3805 if (ret)
3806 break;
3807
3808 ret = uncore_die_to_segment(die);
3809 if (ret < 0)
3810 break;
3811
3812 ret = topology_cb(type, ret, die, cpu_bus_msr);
3813 if (ret)
3814 break;
3815 }
3816
3817 return ret;
3818 }
3819
skx_iio_topology_cb(struct intel_uncore_type * type,int segment,int die,u64 cpu_bus_msr)3820 static int skx_iio_topology_cb(struct intel_uncore_type *type, int segment,
3821 int die, u64 cpu_bus_msr)
3822 {
3823 int idx;
3824 struct intel_uncore_topology *t;
3825
3826 for (idx = 0; idx < type->num_boxes; idx++) {
3827 t = &type->topology[die][idx];
3828 t->pmu_idx = idx;
3829 t->iio->segment = segment;
3830 t->iio->pci_bus_no = (cpu_bus_msr >> (idx * BUS_NUM_STRIDE)) & 0xff;
3831 }
3832
3833 return 0;
3834 }
3835
skx_iio_get_topology(struct intel_uncore_type * type)3836 static int skx_iio_get_topology(struct intel_uncore_type *type)
3837 {
3838 return skx_pmu_get_topology(type, skx_iio_topology_cb);
3839 }
3840
3841 static struct attribute_group skx_iio_mapping_group = {
3842 .is_visible = skx_iio_mapping_visible,
3843 };
3844
3845 static const struct attribute_group *skx_iio_attr_update[] = {
3846 &skx_iio_mapping_group,
3847 NULL,
3848 };
3849
pmu_clear_mapping_attr(const struct attribute_group ** groups,struct attribute_group * ag)3850 static void pmu_clear_mapping_attr(const struct attribute_group **groups,
3851 struct attribute_group *ag)
3852 {
3853 int i;
3854
3855 for (i = 0; groups[i]; i++) {
3856 if (groups[i] == ag) {
3857 for (i++; groups[i]; i++)
3858 groups[i - 1] = groups[i];
3859 groups[i - 1] = NULL;
3860 break;
3861 }
3862 }
3863 }
3864
3865 static void
pmu_set_mapping(struct intel_uncore_type * type,struct attribute_group * ag,ssize_t (* show)(struct device *,struct device_attribute *,char *),int topology_type)3866 pmu_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag,
3867 ssize_t (*show)(struct device*, struct device_attribute*, char*),
3868 int topology_type)
3869 {
3870 char buf[64];
3871 int ret;
3872 long die = -1;
3873 struct attribute **attrs = NULL;
3874 struct dev_ext_attribute *eas = NULL;
3875
3876 ret = pmu_alloc_topology(type, topology_type);
3877 if (ret < 0)
3878 goto clear_attr_update;
3879
3880 ret = type->get_topology(type);
3881 if (ret < 0)
3882 goto clear_topology;
3883
3884 /* One more for NULL. */
3885 attrs = kzalloc_objs(*attrs, (uncore_max_dies() + 1));
3886 if (!attrs)
3887 goto clear_topology;
3888
3889 eas = kzalloc_objs(*eas, uncore_max_dies());
3890 if (!eas)
3891 goto clear_attrs;
3892
3893 for (die = 0; die < uncore_max_dies(); die++) {
3894 snprintf(buf, sizeof(buf), "die%ld", die);
3895 sysfs_attr_init(&eas[die].attr.attr);
3896 eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
3897 if (!eas[die].attr.attr.name)
3898 goto err;
3899 eas[die].attr.attr.mode = 0444;
3900 eas[die].attr.show = show;
3901 eas[die].attr.store = NULL;
3902 eas[die].var = (void *)die;
3903 attrs[die] = &eas[die].attr.attr;
3904 }
3905 ag->attrs = attrs;
3906
3907 return;
3908 err:
3909 for (; die >= 0; die--)
3910 kfree(eas[die].attr.attr.name);
3911 kfree(eas);
3912 clear_attrs:
3913 kfree(attrs);
3914 clear_topology:
3915 pmu_free_topology(type);
3916 clear_attr_update:
3917 pmu_clear_mapping_attr(type->attr_update, ag);
3918 }
3919
3920 static void
pmu_cleanup_mapping(struct intel_uncore_type * type,struct attribute_group * ag)3921 pmu_cleanup_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
3922 {
3923 struct attribute **attr = ag->attrs;
3924
3925 if (!attr)
3926 return;
3927
3928 for (; *attr; attr++)
3929 kfree((*attr)->name);
3930 kfree(attr_to_ext_attr(*ag->attrs));
3931 kfree(ag->attrs);
3932 ag->attrs = NULL;
3933 pmu_free_topology(type);
3934 }
3935
3936 static void
pmu_iio_set_mapping(struct intel_uncore_type * type,struct attribute_group * ag)3937 pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
3938 {
3939 pmu_set_mapping(type, ag, skx_iio_mapping_show, IIO_TOPOLOGY_TYPE);
3940 }
3941
skx_iio_set_mapping(struct intel_uncore_type * type)3942 static void skx_iio_set_mapping(struct intel_uncore_type *type)
3943 {
3944 pmu_iio_set_mapping(type, &skx_iio_mapping_group);
3945 }
3946
skx_iio_cleanup_mapping(struct intel_uncore_type * type)3947 static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
3948 {
3949 pmu_cleanup_mapping(type, &skx_iio_mapping_group);
3950 }
3951
3952 static struct intel_uncore_type skx_uncore_iio = {
3953 .name = "iio",
3954 .num_counters = 4,
3955 .num_boxes = 6,
3956 .perf_ctr_bits = 48,
3957 .event_ctl = SKX_IIO0_MSR_PMON_CTL0,
3958 .perf_ctr = SKX_IIO0_MSR_PMON_CTR0,
3959 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
3960 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3961 .box_ctl = SKX_IIO0_MSR_PMON_BOX_CTL,
3962 .msr_offset = SKX_IIO_MSR_OFFSET,
3963 .constraints = skx_uncore_iio_constraints,
3964 .ops = &skx_uncore_iio_ops,
3965 .format_group = &skx_uncore_iio_format_group,
3966 .attr_update = skx_iio_attr_update,
3967 .get_topology = skx_iio_get_topology,
3968 .set_mapping = skx_iio_set_mapping,
3969 .cleanup_mapping = skx_iio_cleanup_mapping,
3970 };
3971
3972 enum perf_uncore_iio_freerunning_type_id {
3973 SKX_IIO_MSR_IOCLK = 0,
3974 SKX_IIO_MSR_BW = 1,
3975 SKX_IIO_MSR_UTIL = 2,
3976
3977 SKX_IIO_FREERUNNING_TYPE_MAX,
3978 };
3979
3980
3981 static struct freerunning_counters skx_iio_freerunning[] = {
3982 [SKX_IIO_MSR_IOCLK] = { 0xa45, 0x1, 0x20, 1, 36 },
3983 [SKX_IIO_MSR_BW] = { 0xb00, 0x1, 0x10, 8, 36 },
3984 [SKX_IIO_MSR_UTIL] = { 0xb08, 0x1, 0x10, 8, 36 },
3985 };
3986
3987 #define INTEL_UNCORE_FR_EVENT_DESC(name, umask, scl) \
3988 INTEL_UNCORE_EVENT_DESC(name, \
3989 "event=0xff,umask=" __stringify(umask)),\
3990 INTEL_UNCORE_EVENT_DESC(name.scale, __stringify(scl)), \
3991 INTEL_UNCORE_EVENT_DESC(name.unit, "MiB")
3992
3993 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
3994 /* Free-Running IO CLOCKS Counter */
3995 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
3996 /* Free-Running IIO BANDWIDTH Counters */
3997 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port0, 0x20, 3.814697266e-6),
3998 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port1, 0x21, 3.814697266e-6),
3999 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port2, 0x22, 3.814697266e-6),
4000 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port3, 0x23, 3.814697266e-6),
4001 INTEL_UNCORE_FR_EVENT_DESC(bw_out_port0, 0x24, 3.814697266e-6),
4002 INTEL_UNCORE_FR_EVENT_DESC(bw_out_port1, 0x25, 3.814697266e-6),
4003 INTEL_UNCORE_FR_EVENT_DESC(bw_out_port2, 0x26, 3.814697266e-6),
4004 INTEL_UNCORE_FR_EVENT_DESC(bw_out_port3, 0x27, 3.814697266e-6),
4005 /* Free-running IIO UTILIZATION Counters */
4006 INTEL_UNCORE_EVENT_DESC(util_in_port0, "event=0xff,umask=0x30"),
4007 INTEL_UNCORE_EVENT_DESC(util_out_port0, "event=0xff,umask=0x31"),
4008 INTEL_UNCORE_EVENT_DESC(util_in_port1, "event=0xff,umask=0x32"),
4009 INTEL_UNCORE_EVENT_DESC(util_out_port1, "event=0xff,umask=0x33"),
4010 INTEL_UNCORE_EVENT_DESC(util_in_port2, "event=0xff,umask=0x34"),
4011 INTEL_UNCORE_EVENT_DESC(util_out_port2, "event=0xff,umask=0x35"),
4012 INTEL_UNCORE_EVENT_DESC(util_in_port3, "event=0xff,umask=0x36"),
4013 INTEL_UNCORE_EVENT_DESC(util_out_port3, "event=0xff,umask=0x37"),
4014 { /* end: all zeroes */ },
4015 };
4016
4017 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
4018 .read_counter = uncore_msr_read_counter,
4019 .hw_config = uncore_freerunning_hw_config,
4020 };
4021
4022 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
4023 &format_attr_event.attr,
4024 &format_attr_umask.attr,
4025 NULL,
4026 };
4027
4028 static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
4029 .name = "format",
4030 .attrs = skx_uncore_iio_freerunning_formats_attr,
4031 };
4032
4033 static struct intel_uncore_type skx_uncore_iio_free_running = {
4034 .name = "iio_free_running",
4035 .num_counters = 17,
4036 .num_boxes = 6,
4037 .num_freerunning_types = SKX_IIO_FREERUNNING_TYPE_MAX,
4038 .freerunning = skx_iio_freerunning,
4039 .ops = &skx_uncore_iio_freerunning_ops,
4040 .event_descs = skx_uncore_iio_freerunning_events,
4041 .format_group = &skx_uncore_iio_freerunning_format_group,
4042 };
4043
4044 static struct attribute *skx_uncore_formats_attr[] = {
4045 &format_attr_event.attr,
4046 &format_attr_umask.attr,
4047 &format_attr_edge.attr,
4048 &format_attr_inv.attr,
4049 &format_attr_thresh8.attr,
4050 NULL,
4051 };
4052
4053 static const struct attribute_group skx_uncore_format_group = {
4054 .name = "format",
4055 .attrs = skx_uncore_formats_attr,
4056 };
4057
4058 static struct intel_uncore_type skx_uncore_irp = {
4059 .name = "irp",
4060 .num_counters = 2,
4061 .num_boxes = 6,
4062 .perf_ctr_bits = 48,
4063 .event_ctl = SKX_IRP0_MSR_PMON_CTL0,
4064 .perf_ctr = SKX_IRP0_MSR_PMON_CTR0,
4065 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4066 .box_ctl = SKX_IRP0_MSR_PMON_BOX_CTL,
4067 .msr_offset = SKX_IRP_MSR_OFFSET,
4068 .ops = &skx_uncore_iio_ops,
4069 .format_group = &skx_uncore_format_group,
4070 };
4071
4072 static struct attribute *skx_uncore_pcu_formats_attr[] = {
4073 &format_attr_event.attr,
4074 &format_attr_umask.attr,
4075 &format_attr_edge.attr,
4076 &format_attr_inv.attr,
4077 &format_attr_thresh8.attr,
4078 &format_attr_occ_invert.attr,
4079 &format_attr_occ_edge_det.attr,
4080 &format_attr_filter_band0.attr,
4081 &format_attr_filter_band1.attr,
4082 &format_attr_filter_band2.attr,
4083 &format_attr_filter_band3.attr,
4084 NULL,
4085 };
4086
4087 static struct attribute_group skx_uncore_pcu_format_group = {
4088 .name = "format",
4089 .attrs = skx_uncore_pcu_formats_attr,
4090 };
4091
4092 static struct intel_uncore_ops skx_uncore_pcu_ops = {
4093 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4094 .hw_config = hswep_pcu_hw_config,
4095 .get_constraint = snbep_pcu_get_constraint,
4096 .put_constraint = snbep_pcu_put_constraint,
4097 };
4098
4099 static struct intel_uncore_type skx_uncore_pcu = {
4100 .name = "pcu",
4101 .num_counters = 4,
4102 .num_boxes = 1,
4103 .perf_ctr_bits = 48,
4104 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
4105 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
4106 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
4107 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
4108 .num_shared_regs = 1,
4109 .ops = &skx_uncore_pcu_ops,
4110 .format_group = &skx_uncore_pcu_format_group,
4111 };
4112
4113 static struct intel_uncore_type *skx_msr_uncores[] = {
4114 &skx_uncore_ubox,
4115 &skx_uncore_chabox,
4116 &skx_uncore_iio,
4117 &skx_uncore_iio_free_running,
4118 &skx_uncore_irp,
4119 &skx_uncore_pcu,
4120 NULL,
4121 };
4122
4123 /*
4124 * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
4125 * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
4126 */
4127 #define SKX_CAPID6 0x9c
4128 #define SKX_CHA_BIT_MASK GENMASK(27, 0)
4129
skx_count_chabox(void)4130 static int skx_count_chabox(void)
4131 {
4132 struct pci_dev *dev = NULL;
4133 u32 val = 0;
4134
4135 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
4136 if (!dev)
4137 goto out;
4138
4139 pci_read_config_dword(dev, SKX_CAPID6, &val);
4140 val &= SKX_CHA_BIT_MASK;
4141 out:
4142 pci_dev_put(dev);
4143 return hweight32(val);
4144 }
4145
skx_uncore_cpu_init(void)4146 void skx_uncore_cpu_init(void)
4147 {
4148 skx_uncore_chabox.num_boxes = skx_count_chabox();
4149 uncore_msr_uncores = skx_msr_uncores;
4150 }
4151
4152 static struct intel_uncore_type skx_uncore_imc = {
4153 .name = "imc",
4154 .num_counters = 4,
4155 .num_boxes = 6,
4156 .perf_ctr_bits = 48,
4157 .fixed_ctr_bits = 48,
4158 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
4159 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
4160 .event_descs = hswep_uncore_imc_events,
4161 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4162 .event_ctl = SNBEP_PCI_PMON_CTL0,
4163 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4164 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4165 .ops = &ivbep_uncore_pci_ops,
4166 .format_group = &skx_uncore_format_group,
4167 };
4168
4169 static struct attribute *skx_upi_uncore_formats_attr[] = {
4170 &format_attr_event.attr,
4171 &format_attr_umask_ext.attr,
4172 &format_attr_edge.attr,
4173 &format_attr_inv.attr,
4174 &format_attr_thresh8.attr,
4175 NULL,
4176 };
4177
4178 static const struct attribute_group skx_upi_uncore_format_group = {
4179 .name = "format",
4180 .attrs = skx_upi_uncore_formats_attr,
4181 };
4182
skx_upi_uncore_pci_init_box(struct intel_uncore_box * box)4183 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
4184 {
4185 struct pci_dev *pdev = box->pci_dev;
4186
4187 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4188 pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4189 }
4190
4191 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
4192 .init_box = skx_upi_uncore_pci_init_box,
4193 .disable_box = snbep_uncore_pci_disable_box,
4194 .enable_box = snbep_uncore_pci_enable_box,
4195 .disable_event = snbep_uncore_pci_disable_event,
4196 .enable_event = snbep_uncore_pci_enable_event,
4197 .read_counter = snbep_uncore_pci_read_counter,
4198 };
4199
4200 static umode_t
skx_upi_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)4201 skx_upi_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
4202 {
4203 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
4204
4205 return pmu->type->topology[die][pmu->pmu_idx].upi->enabled ? attr->mode : 0;
4206 }
4207
skx_upi_mapping_show(struct device * dev,struct device_attribute * attr,char * buf)4208 static ssize_t skx_upi_mapping_show(struct device *dev,
4209 struct device_attribute *attr, char *buf)
4210 {
4211 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
4212 struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
4213 long die = (long)ea->var;
4214 struct uncore_upi_topology *upi = pmu->type->topology[die][pmu->pmu_idx].upi;
4215
4216 return sysfs_emit(buf, "upi_%d,die_%d\n", upi->pmu_idx_to, upi->die_to);
4217 }
4218
4219 #define SKX_UPI_REG_DID 0x2058
4220 #define SKX_UPI_REGS_ADDR_DEVICE_LINK0 0x0e
4221 #define SKX_UPI_REGS_ADDR_FUNCTION 0x00
4222
4223 /*
4224 * UPI Link Parameter 0
4225 * | Bit | Default | Description
4226 * | 19:16 | 0h | base_nodeid - The NodeID of the sending socket.
4227 * | 12:8 | 00h | sending_port - The processor die port number of the sending port.
4228 */
4229 #define SKX_KTILP0_OFFSET 0x94
4230
4231 /*
4232 * UPI Pcode Status. This register is used by PCode to store the link training status.
4233 * | Bit | Default | Description
4234 * | 4 | 0h | ll_status_valid — Bit indicates the valid training status
4235 * logged from PCode to the BIOS.
4236 */
4237 #define SKX_KTIPCSTS_OFFSET 0x120
4238
upi_fill_topology(struct pci_dev * dev,struct intel_uncore_topology * tp,int pmu_idx)4239 static int upi_fill_topology(struct pci_dev *dev, struct intel_uncore_topology *tp,
4240 int pmu_idx)
4241 {
4242 int ret;
4243 u32 upi_conf;
4244 struct uncore_upi_topology *upi = tp->upi;
4245
4246 tp->pmu_idx = pmu_idx;
4247 ret = pci_read_config_dword(dev, SKX_KTIPCSTS_OFFSET, &upi_conf);
4248 if (ret) {
4249 ret = pcibios_err_to_errno(ret);
4250 goto err;
4251 }
4252 upi->enabled = (upi_conf >> 4) & 1;
4253 if (upi->enabled) {
4254 ret = pci_read_config_dword(dev, SKX_KTILP0_OFFSET,
4255 &upi_conf);
4256 if (ret) {
4257 ret = pcibios_err_to_errno(ret);
4258 goto err;
4259 }
4260 upi->die_to = (upi_conf >> 16) & 0xf;
4261 upi->pmu_idx_to = (upi_conf >> 8) & 0x1f;
4262 }
4263 err:
4264 return ret;
4265 }
4266
skx_upi_topology_cb(struct intel_uncore_type * type,int segment,int die,u64 cpu_bus_msr)4267 static int skx_upi_topology_cb(struct intel_uncore_type *type, int segment,
4268 int die, u64 cpu_bus_msr)
4269 {
4270 int idx, ret;
4271 struct intel_uncore_topology *upi;
4272 unsigned int devfn;
4273 struct pci_dev *dev = NULL;
4274 u8 bus = cpu_bus_msr >> (3 * BUS_NUM_STRIDE);
4275
4276 for (idx = 0; idx < type->num_boxes; idx++) {
4277 upi = &type->topology[die][idx];
4278 devfn = PCI_DEVFN(SKX_UPI_REGS_ADDR_DEVICE_LINK0 + idx,
4279 SKX_UPI_REGS_ADDR_FUNCTION);
4280 dev = pci_get_domain_bus_and_slot(segment, bus, devfn);
4281 if (dev) {
4282 ret = upi_fill_topology(dev, upi, idx);
4283 if (ret)
4284 break;
4285 }
4286 }
4287
4288 pci_dev_put(dev);
4289 return ret;
4290 }
4291
skx_upi_get_topology(struct intel_uncore_type * type)4292 static int skx_upi_get_topology(struct intel_uncore_type *type)
4293 {
4294 /* CPX case is not supported */
4295 if (boot_cpu_data.x86_stepping == 11)
4296 return -EPERM;
4297
4298 return skx_pmu_get_topology(type, skx_upi_topology_cb);
4299 }
4300
4301 static struct attribute_group skx_upi_mapping_group = {
4302 .is_visible = skx_upi_mapping_visible,
4303 };
4304
4305 static const struct attribute_group *skx_upi_attr_update[] = {
4306 &skx_upi_mapping_group,
4307 NULL
4308 };
4309
4310 static void
pmu_upi_set_mapping(struct intel_uncore_type * type,struct attribute_group * ag)4311 pmu_upi_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
4312 {
4313 pmu_set_mapping(type, ag, skx_upi_mapping_show, UPI_TOPOLOGY_TYPE);
4314 }
4315
skx_upi_set_mapping(struct intel_uncore_type * type)4316 static void skx_upi_set_mapping(struct intel_uncore_type *type)
4317 {
4318 pmu_upi_set_mapping(type, &skx_upi_mapping_group);
4319 }
4320
skx_upi_cleanup_mapping(struct intel_uncore_type * type)4321 static void skx_upi_cleanup_mapping(struct intel_uncore_type *type)
4322 {
4323 pmu_cleanup_mapping(type, &skx_upi_mapping_group);
4324 }
4325
4326 static struct intel_uncore_type skx_uncore_upi = {
4327 .name = "upi",
4328 .num_counters = 4,
4329 .num_boxes = 3,
4330 .perf_ctr_bits = 48,
4331 .perf_ctr = SKX_UPI_PCI_PMON_CTR0,
4332 .event_ctl = SKX_UPI_PCI_PMON_CTL0,
4333 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4334 .event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
4335 .box_ctl = SKX_UPI_PCI_PMON_BOX_CTL,
4336 .ops = &skx_upi_uncore_pci_ops,
4337 .format_group = &skx_upi_uncore_format_group,
4338 .attr_update = skx_upi_attr_update,
4339 .get_topology = skx_upi_get_topology,
4340 .set_mapping = skx_upi_set_mapping,
4341 .cleanup_mapping = skx_upi_cleanup_mapping,
4342 };
4343
skx_m2m_uncore_pci_init_box(struct intel_uncore_box * box)4344 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4345 {
4346 struct pci_dev *pdev = box->pci_dev;
4347
4348 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4349 pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4350 }
4351
4352 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
4353 .init_box = skx_m2m_uncore_pci_init_box,
4354 .disable_box = snbep_uncore_pci_disable_box,
4355 .enable_box = snbep_uncore_pci_enable_box,
4356 .disable_event = snbep_uncore_pci_disable_event,
4357 .enable_event = snbep_uncore_pci_enable_event,
4358 .read_counter = snbep_uncore_pci_read_counter,
4359 };
4360
4361 static struct intel_uncore_type skx_uncore_m2m = {
4362 .name = "m2m",
4363 .num_counters = 4,
4364 .num_boxes = 2,
4365 .perf_ctr_bits = 48,
4366 .perf_ctr = SKX_M2M_PCI_PMON_CTR0,
4367 .event_ctl = SKX_M2M_PCI_PMON_CTL0,
4368 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4369 .box_ctl = SKX_M2M_PCI_PMON_BOX_CTL,
4370 .ops = &skx_m2m_uncore_pci_ops,
4371 .format_group = &skx_uncore_format_group,
4372 };
4373
4374 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
4375 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4376 EVENT_CONSTRAINT_END
4377 };
4378
4379 static struct intel_uncore_type skx_uncore_m2pcie = {
4380 .name = "m2pcie",
4381 .num_counters = 4,
4382 .num_boxes = 4,
4383 .perf_ctr_bits = 48,
4384 .constraints = skx_uncore_m2pcie_constraints,
4385 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4386 .event_ctl = SNBEP_PCI_PMON_CTL0,
4387 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4388 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4389 .ops = &ivbep_uncore_pci_ops,
4390 .format_group = &skx_uncore_format_group,
4391 };
4392
4393 static struct event_constraint skx_uncore_m3upi_constraints[] = {
4394 UNCORE_EVENT_CONSTRAINT_RANGE(0x1d, 0x1e, 0x1),
4395 UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
4396 UNCORE_EVENT_CONSTRAINT_RANGE(0x4e, 0x52, 0x7),
4397 EVENT_CONSTRAINT_END
4398 };
4399
4400 static struct intel_uncore_type skx_uncore_m3upi = {
4401 .name = "m3upi",
4402 .num_counters = 3,
4403 .num_boxes = 3,
4404 .perf_ctr_bits = 48,
4405 .constraints = skx_uncore_m3upi_constraints,
4406 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4407 .event_ctl = SNBEP_PCI_PMON_CTL0,
4408 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4409 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4410 .ops = &ivbep_uncore_pci_ops,
4411 .format_group = &skx_uncore_format_group,
4412 };
4413
4414 enum {
4415 SKX_PCI_UNCORE_IMC,
4416 SKX_PCI_UNCORE_M2M,
4417 SKX_PCI_UNCORE_UPI,
4418 SKX_PCI_UNCORE_M2PCIE,
4419 SKX_PCI_UNCORE_M3UPI,
4420 };
4421
4422 static struct intel_uncore_type *skx_pci_uncores[] = {
4423 [SKX_PCI_UNCORE_IMC] = &skx_uncore_imc,
4424 [SKX_PCI_UNCORE_M2M] = &skx_uncore_m2m,
4425 [SKX_PCI_UNCORE_UPI] = &skx_uncore_upi,
4426 [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie,
4427 [SKX_PCI_UNCORE_M3UPI] = &skx_uncore_m3upi,
4428 NULL,
4429 };
4430
4431 static const struct pci_device_id skx_uncore_pci_ids[] = {
4432 { /* MC0 Channel 0 */
4433 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4434 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
4435 },
4436 { /* MC0 Channel 1 */
4437 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4438 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
4439 },
4440 { /* MC0 Channel 2 */
4441 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4442 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
4443 },
4444 { /* MC1 Channel 0 */
4445 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4446 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
4447 },
4448 { /* MC1 Channel 1 */
4449 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4450 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
4451 },
4452 { /* MC1 Channel 2 */
4453 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4454 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
4455 },
4456 { /* M2M0 */
4457 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4458 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
4459 },
4460 { /* M2M1 */
4461 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4462 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
4463 },
4464 { /* UPI0 Link 0 */
4465 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4466 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
4467 },
4468 { /* UPI0 Link 1 */
4469 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4470 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
4471 },
4472 { /* UPI1 Link 2 */
4473 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4474 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
4475 },
4476 { /* M2PCIe 0 */
4477 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4478 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
4479 },
4480 { /* M2PCIe 1 */
4481 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4482 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
4483 },
4484 { /* M2PCIe 2 */
4485 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4486 .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
4487 },
4488 { /* M2PCIe 3 */
4489 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4490 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
4491 },
4492 { /* M3UPI0 Link 0 */
4493 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4494 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
4495 },
4496 { /* M3UPI0 Link 1 */
4497 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4498 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4499 },
4500 { /* M3UPI1 Link 2 */
4501 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4502 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4503 },
4504 { /* end: all zeroes */ }
4505 };
4506
4507
4508 static struct pci_driver skx_uncore_pci_driver = {
4509 .name = "skx_uncore",
4510 .id_table = skx_uncore_pci_ids,
4511 };
4512
skx_uncore_pci_init(void)4513 int skx_uncore_pci_init(void)
4514 {
4515 /* need to double check pci address */
4516 int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4517
4518 if (ret)
4519 return ret;
4520
4521 uncore_pci_uncores = skx_pci_uncores;
4522 uncore_pci_driver = &skx_uncore_pci_driver;
4523 return 0;
4524 }
4525
4526 /* end of SKX uncore support */
4527
4528 /* SNR uncore support */
4529
4530 static struct intel_uncore_type snr_uncore_ubox = {
4531 .name = "ubox",
4532 .num_counters = 2,
4533 .num_boxes = 1,
4534 .perf_ctr_bits = 48,
4535 .fixed_ctr_bits = 48,
4536 .perf_ctr = SNR_U_MSR_PMON_CTR0,
4537 .event_ctl = SNR_U_MSR_PMON_CTL0,
4538 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4539 .fixed_ctr = SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4540 .fixed_ctl = SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4541 .ops = &ivbep_uncore_msr_ops,
4542 .format_group = &ivbep_uncore_format_group,
4543 };
4544
4545 static struct attribute *snr_uncore_cha_formats_attr[] = {
4546 &format_attr_event.attr,
4547 &format_attr_umask_ext2.attr,
4548 &format_attr_edge.attr,
4549 &format_attr_tid_en.attr,
4550 &format_attr_inv.attr,
4551 &format_attr_thresh8.attr,
4552 &format_attr_filter_tid5.attr,
4553 NULL,
4554 };
4555 static const struct attribute_group snr_uncore_chabox_format_group = {
4556 .name = "format",
4557 .attrs = snr_uncore_cha_formats_attr,
4558 };
4559
snr_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)4560 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4561 {
4562 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4563
4564 reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4565 box->pmu->type->msr_offset * box->pmu->pmu_idx;
4566 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4567 reg1->idx = 0;
4568
4569 return 0;
4570 }
4571
snr_cha_enable_event(struct intel_uncore_box * box,struct perf_event * event)4572 static void snr_cha_enable_event(struct intel_uncore_box *box,
4573 struct perf_event *event)
4574 {
4575 struct hw_perf_event *hwc = &event->hw;
4576 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4577
4578 if (reg1->idx != EXTRA_REG_NONE)
4579 wrmsrq(reg1->reg, reg1->config);
4580
4581 wrmsrq(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4582 }
4583
4584 static struct intel_uncore_ops snr_uncore_chabox_ops = {
4585 .init_box = ivbep_uncore_msr_init_box,
4586 .disable_box = snbep_uncore_msr_disable_box,
4587 .enable_box = snbep_uncore_msr_enable_box,
4588 .disable_event = snbep_uncore_msr_disable_event,
4589 .enable_event = snr_cha_enable_event,
4590 .read_counter = uncore_msr_read_counter,
4591 .hw_config = snr_cha_hw_config,
4592 };
4593
4594 static struct intel_uncore_type snr_uncore_chabox = {
4595 .name = "cha",
4596 .num_counters = 4,
4597 .num_boxes = 6,
4598 .perf_ctr_bits = 48,
4599 .event_ctl = SNR_CHA_MSR_PMON_CTL0,
4600 .perf_ctr = SNR_CHA_MSR_PMON_CTR0,
4601 .box_ctl = SNR_CHA_MSR_PMON_BOX_CTL,
4602 .msr_offset = HSWEP_CBO_MSR_OFFSET,
4603 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4604 .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT,
4605 .ops = &snr_uncore_chabox_ops,
4606 .format_group = &snr_uncore_chabox_format_group,
4607 };
4608
4609 static struct attribute *snr_uncore_iio_formats_attr[] = {
4610 &format_attr_event.attr,
4611 &format_attr_umask.attr,
4612 &format_attr_edge.attr,
4613 &format_attr_inv.attr,
4614 &format_attr_thresh9.attr,
4615 &format_attr_ch_mask2.attr,
4616 &format_attr_fc_mask2.attr,
4617 NULL,
4618 };
4619
4620 static const struct attribute_group snr_uncore_iio_format_group = {
4621 .name = "format",
4622 .attrs = snr_uncore_iio_formats_attr,
4623 };
4624
4625 static umode_t
snr_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)4626 snr_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
4627 {
4628 /* Root bus 0x00 is valid only for pmu_idx = 1. */
4629 return pmu_iio_mapping_visible(kobj, attr, die, 1);
4630 }
4631
4632 static struct attribute_group snr_iio_mapping_group = {
4633 .is_visible = snr_iio_mapping_visible,
4634 };
4635
4636 static const struct attribute_group *snr_iio_attr_update[] = {
4637 &snr_iio_mapping_group,
4638 NULL,
4639 };
4640
sad_cfg_iio_topology(struct intel_uncore_type * type,u8 * sad_pmon_mapping)4641 static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_mapping)
4642 {
4643 u32 sad_cfg;
4644 int die, stack_id, ret = -EPERM;
4645 struct pci_dev *dev = NULL;
4646
4647 while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, SNR_ICX_MESH2IIO_MMAP_DID, dev))) {
4648 ret = pci_read_config_dword(dev, SNR_ICX_SAD_CONTROL_CFG, &sad_cfg);
4649 if (ret) {
4650 ret = pcibios_err_to_errno(ret);
4651 break;
4652 }
4653
4654 die = uncore_pcibus_to_dieid(dev->bus);
4655 stack_id = SAD_CONTROL_STACK_ID(sad_cfg);
4656 if (die < 0 || stack_id >= type->num_boxes) {
4657 ret = -EPERM;
4658 break;
4659 }
4660
4661 /* Convert stack id from SAD_CONTROL to PMON notation. */
4662 stack_id = sad_pmon_mapping[stack_id];
4663
4664 type->topology[die][stack_id].iio->segment = pci_domain_nr(dev->bus);
4665 type->topology[die][stack_id].pmu_idx = stack_id;
4666 type->topology[die][stack_id].iio->pci_bus_no = dev->bus->number;
4667 }
4668
4669 pci_dev_put(dev);
4670
4671 return ret;
4672 }
4673
4674 /*
4675 * SNR has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
4676 */
4677 enum {
4678 SNR_QAT_PMON_ID,
4679 SNR_CBDMA_DMI_PMON_ID,
4680 SNR_NIS_PMON_ID,
4681 SNR_DLB_PMON_ID,
4682 SNR_PCIE_GEN3_PMON_ID
4683 };
4684
4685 static u8 snr_sad_pmon_mapping[] = {
4686 SNR_CBDMA_DMI_PMON_ID,
4687 SNR_PCIE_GEN3_PMON_ID,
4688 SNR_DLB_PMON_ID,
4689 SNR_NIS_PMON_ID,
4690 SNR_QAT_PMON_ID
4691 };
4692
snr_iio_get_topology(struct intel_uncore_type * type)4693 static int snr_iio_get_topology(struct intel_uncore_type *type)
4694 {
4695 return sad_cfg_iio_topology(type, snr_sad_pmon_mapping);
4696 }
4697
snr_iio_set_mapping(struct intel_uncore_type * type)4698 static void snr_iio_set_mapping(struct intel_uncore_type *type)
4699 {
4700 pmu_iio_set_mapping(type, &snr_iio_mapping_group);
4701 }
4702
snr_iio_cleanup_mapping(struct intel_uncore_type * type)4703 static void snr_iio_cleanup_mapping(struct intel_uncore_type *type)
4704 {
4705 pmu_cleanup_mapping(type, &snr_iio_mapping_group);
4706 }
4707
4708 static struct event_constraint snr_uncore_iio_constraints[] = {
4709 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
4710 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
4711 UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
4712 EVENT_CONSTRAINT_END
4713 };
4714
4715 static struct intel_uncore_type snr_uncore_iio = {
4716 .name = "iio",
4717 .num_counters = 4,
4718 .num_boxes = 5,
4719 .perf_ctr_bits = 48,
4720 .event_ctl = SNR_IIO_MSR_PMON_CTL0,
4721 .perf_ctr = SNR_IIO_MSR_PMON_CTR0,
4722 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4723 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4724 .box_ctl = SNR_IIO_MSR_PMON_BOX_CTL,
4725 .msr_offset = SNR_IIO_MSR_OFFSET,
4726 .constraints = snr_uncore_iio_constraints,
4727 .ops = &ivbep_uncore_msr_ops,
4728 .format_group = &snr_uncore_iio_format_group,
4729 .attr_update = snr_iio_attr_update,
4730 .get_topology = snr_iio_get_topology,
4731 .set_mapping = snr_iio_set_mapping,
4732 .cleanup_mapping = snr_iio_cleanup_mapping,
4733 };
4734
4735 static struct intel_uncore_type snr_uncore_irp = {
4736 .name = "irp",
4737 .num_counters = 2,
4738 .num_boxes = 5,
4739 .perf_ctr_bits = 48,
4740 .event_ctl = SNR_IRP0_MSR_PMON_CTL0,
4741 .perf_ctr = SNR_IRP0_MSR_PMON_CTR0,
4742 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4743 .box_ctl = SNR_IRP0_MSR_PMON_BOX_CTL,
4744 .msr_offset = SNR_IRP_MSR_OFFSET,
4745 .ops = &ivbep_uncore_msr_ops,
4746 .format_group = &ivbep_uncore_format_group,
4747 };
4748
4749 static struct intel_uncore_type snr_uncore_m2pcie = {
4750 .name = "m2pcie",
4751 .num_counters = 4,
4752 .num_boxes = 5,
4753 .perf_ctr_bits = 48,
4754 .event_ctl = SNR_M2PCIE_MSR_PMON_CTL0,
4755 .perf_ctr = SNR_M2PCIE_MSR_PMON_CTR0,
4756 .box_ctl = SNR_M2PCIE_MSR_PMON_BOX_CTL,
4757 .msr_offset = SNR_M2PCIE_MSR_OFFSET,
4758 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4759 .ops = &ivbep_uncore_msr_ops,
4760 .format_group = &ivbep_uncore_format_group,
4761 };
4762
snr_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)4763 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4764 {
4765 struct hw_perf_event *hwc = &event->hw;
4766 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4767 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4768
4769 if (ev_sel >= 0xb && ev_sel <= 0xe) {
4770 reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4771 reg1->idx = ev_sel - 0xb;
4772 reg1->config = event->attr.config1 & (0xff << reg1->idx);
4773 }
4774 return 0;
4775 }
4776
4777 static struct intel_uncore_ops snr_uncore_pcu_ops = {
4778 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4779 .hw_config = snr_pcu_hw_config,
4780 .get_constraint = snbep_pcu_get_constraint,
4781 .put_constraint = snbep_pcu_put_constraint,
4782 };
4783
4784 static struct intel_uncore_type snr_uncore_pcu = {
4785 .name = "pcu",
4786 .num_counters = 4,
4787 .num_boxes = 1,
4788 .perf_ctr_bits = 48,
4789 .perf_ctr = SNR_PCU_MSR_PMON_CTR0,
4790 .event_ctl = SNR_PCU_MSR_PMON_CTL0,
4791 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4792 .box_ctl = SNR_PCU_MSR_PMON_BOX_CTL,
4793 .num_shared_regs = 1,
4794 .ops = &snr_uncore_pcu_ops,
4795 .format_group = &skx_uncore_pcu_format_group,
4796 };
4797
4798 enum perf_uncore_snr_iio_freerunning_type_id {
4799 SNR_IIO_MSR_IOCLK,
4800 SNR_IIO_MSR_BW_IN,
4801
4802 SNR_IIO_FREERUNNING_TYPE_MAX,
4803 };
4804
4805 static struct freerunning_counters snr_iio_freerunning[] = {
4806 [SNR_IIO_MSR_IOCLK] = { 0x1eac, 0x1, 0x10, 1, 48 },
4807 [SNR_IIO_MSR_BW_IN] = { 0x1f00, 0x1, 0x10, 8, 48 },
4808 };
4809
4810 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4811 /* Free-Running IIO CLOCKS Counter */
4812 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
4813 /* Free-Running IIO BANDWIDTH IN Counters */
4814 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port0, 0x20, 3.0517578125e-5),
4815 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port1, 0x21, 3.0517578125e-5),
4816 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port2, 0x22, 3.0517578125e-5),
4817 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port3, 0x23, 3.0517578125e-5),
4818 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port4, 0x24, 3.0517578125e-5),
4819 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port5, 0x25, 3.0517578125e-5),
4820 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port6, 0x26, 3.0517578125e-5),
4821 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port7, 0x27, 3.0517578125e-5),
4822 { /* end: all zeroes */ },
4823 };
4824
4825 static struct intel_uncore_type snr_uncore_iio_free_running = {
4826 .name = "iio_free_running",
4827 .num_counters = 9,
4828 .num_boxes = 5,
4829 .num_freerunning_types = SNR_IIO_FREERUNNING_TYPE_MAX,
4830 .freerunning = snr_iio_freerunning,
4831 .ops = &skx_uncore_iio_freerunning_ops,
4832 .event_descs = snr_uncore_iio_freerunning_events,
4833 .format_group = &skx_uncore_iio_freerunning_format_group,
4834 };
4835
4836 static struct intel_uncore_type *snr_msr_uncores[] = {
4837 &snr_uncore_ubox,
4838 &snr_uncore_chabox,
4839 &snr_uncore_iio,
4840 &snr_uncore_irp,
4841 &snr_uncore_m2pcie,
4842 &snr_uncore_pcu,
4843 &snr_uncore_iio_free_running,
4844 NULL,
4845 };
4846
snr_uncore_cpu_init(void)4847 void snr_uncore_cpu_init(void)
4848 {
4849 uncore_msr_uncores = snr_msr_uncores;
4850 }
4851
snr_m2m_uncore_pci_init_box(struct intel_uncore_box * box)4852 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4853 {
4854 struct pci_dev *pdev = box->pci_dev;
4855 int box_ctl = uncore_pci_box_ctl(box);
4856
4857 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4858 pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4859 }
4860
4861 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4862 .init_box = snr_m2m_uncore_pci_init_box,
4863 .disable_box = snbep_uncore_pci_disable_box,
4864 .enable_box = snbep_uncore_pci_enable_box,
4865 .disable_event = snbep_uncore_pci_disable_event,
4866 .enable_event = snbep_uncore_pci_enable_event,
4867 .read_counter = snbep_uncore_pci_read_counter,
4868 };
4869
4870 static struct attribute *snr_m2m_uncore_formats_attr[] = {
4871 &format_attr_event.attr,
4872 &format_attr_umask_ext3.attr,
4873 &format_attr_edge.attr,
4874 &format_attr_inv.attr,
4875 &format_attr_thresh8.attr,
4876 NULL,
4877 };
4878
4879 static const struct attribute_group snr_m2m_uncore_format_group = {
4880 .name = "format",
4881 .attrs = snr_m2m_uncore_formats_attr,
4882 };
4883
4884 static struct intel_uncore_type snr_uncore_m2m = {
4885 .name = "m2m",
4886 .num_counters = 4,
4887 .num_boxes = 1,
4888 .perf_ctr_bits = 48,
4889 .perf_ctr = SNR_M2M_PCI_PMON_CTR0,
4890 .event_ctl = SNR_M2M_PCI_PMON_CTL0,
4891 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4892 .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
4893 .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
4894 .ops = &snr_m2m_uncore_pci_ops,
4895 .format_group = &snr_m2m_uncore_format_group,
4896 };
4897
snr_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)4898 static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
4899 {
4900 struct pci_dev *pdev = box->pci_dev;
4901 struct hw_perf_event *hwc = &event->hw;
4902
4903 pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN));
4904 pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
4905 }
4906
4907 static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = {
4908 .init_box = snr_m2m_uncore_pci_init_box,
4909 .disable_box = snbep_uncore_pci_disable_box,
4910 .enable_box = snbep_uncore_pci_enable_box,
4911 .disable_event = snbep_uncore_pci_disable_event,
4912 .enable_event = snr_uncore_pci_enable_event,
4913 .read_counter = snbep_uncore_pci_read_counter,
4914 };
4915
4916 static struct intel_uncore_type snr_uncore_pcie3 = {
4917 .name = "pcie3",
4918 .num_counters = 4,
4919 .num_boxes = 1,
4920 .perf_ctr_bits = 48,
4921 .perf_ctr = SNR_PCIE3_PCI_PMON_CTR0,
4922 .event_ctl = SNR_PCIE3_PCI_PMON_CTL0,
4923 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
4924 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
4925 .box_ctl = SNR_PCIE3_PCI_PMON_BOX_CTL,
4926 .ops = &snr_pcie3_uncore_pci_ops,
4927 .format_group = &skx_uncore_iio_format_group,
4928 };
4929
4930 enum {
4931 SNR_PCI_UNCORE_M2M,
4932 SNR_PCI_UNCORE_PCIE3,
4933 };
4934
4935 static struct intel_uncore_type *snr_pci_uncores[] = {
4936 [SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m,
4937 [SNR_PCI_UNCORE_PCIE3] = &snr_uncore_pcie3,
4938 NULL,
4939 };
4940
4941 static const struct pci_device_id snr_uncore_pci_ids[] = {
4942 { /* M2M */
4943 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4944 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
4945 },
4946 { /* end: all zeroes */ }
4947 };
4948
4949 static struct pci_driver snr_uncore_pci_driver = {
4950 .name = "snr_uncore",
4951 .id_table = snr_uncore_pci_ids,
4952 };
4953
4954 static const struct pci_device_id snr_uncore_pci_sub_ids[] = {
4955 { /* PCIe3 RP */
4956 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
4957 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
4958 },
4959 { /* end: all zeroes */ }
4960 };
4961
4962 static struct pci_driver snr_uncore_pci_sub_driver = {
4963 .name = "snr_uncore_sub",
4964 .id_table = snr_uncore_pci_sub_ids,
4965 };
4966
snr_uncore_pci_init(void)4967 int snr_uncore_pci_init(void)
4968 {
4969 /* SNR UBOX DID */
4970 int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
4971 SKX_GIDNIDMAP, true);
4972
4973 if (ret)
4974 return ret;
4975
4976 uncore_pci_uncores = snr_pci_uncores;
4977 uncore_pci_driver = &snr_uncore_pci_driver;
4978 uncore_pci_sub_driver = &snr_uncore_pci_sub_driver;
4979 return 0;
4980 }
4981
4982 #define SNR_MC_DEVICE_ID 0x3451
4983
snr_uncore_get_mc_dev(unsigned int device,int id)4984 static struct pci_dev *snr_uncore_get_mc_dev(unsigned int device, int id)
4985 {
4986 struct pci_dev *mc_dev = NULL;
4987 int pkg;
4988
4989 while (1) {
4990 mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, mc_dev);
4991 if (!mc_dev)
4992 break;
4993 pkg = uncore_pcibus_to_dieid(mc_dev->bus);
4994 if (pkg == id)
4995 break;
4996 }
4997 return mc_dev;
4998 }
4999
snr_uncore_mmio_map(struct intel_uncore_box * box,unsigned int box_ctl,int mem_offset,unsigned int device)5000 static int snr_uncore_mmio_map(struct intel_uncore_box *box,
5001 unsigned int box_ctl, int mem_offset,
5002 unsigned int device)
5003 {
5004 struct pci_dev *pdev = snr_uncore_get_mc_dev(device, box->dieid);
5005 struct intel_uncore_type *type = box->pmu->type;
5006 resource_size_t addr;
5007 u32 pci_dword;
5008
5009 if (!pdev)
5010 return -ENODEV;
5011
5012 pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
5013 addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
5014
5015 pci_read_config_dword(pdev, mem_offset, &pci_dword);
5016 addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
5017
5018 addr += box_ctl;
5019
5020 pci_dev_put(pdev);
5021
5022 box->io_addr = ioremap(addr, type->mmio_map_size);
5023 if (!box->io_addr) {
5024 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
5025 return -EINVAL;
5026 }
5027
5028 return 0;
5029 }
5030
__snr_uncore_mmio_init_box(struct intel_uncore_box * box,unsigned int box_ctl,int mem_offset,unsigned int device)5031 static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
5032 unsigned int box_ctl, int mem_offset,
5033 unsigned int device)
5034 {
5035 if (!snr_uncore_mmio_map(box, box_ctl, mem_offset, device))
5036 writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
5037 }
5038
snr_uncore_mmio_init_box(struct intel_uncore_box * box)5039 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
5040 {
5041 __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
5042 SNR_IMC_MMIO_MEM0_OFFSET,
5043 SNR_MC_DEVICE_ID);
5044 }
5045
snr_uncore_mmio_disable_box(struct intel_uncore_box * box)5046 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
5047 {
5048 u32 config;
5049
5050 if (!box->io_addr)
5051 return;
5052
5053 config = readl(box->io_addr);
5054 config |= SNBEP_PMON_BOX_CTL_FRZ;
5055 writel(config, box->io_addr);
5056 }
5057
snr_uncore_mmio_enable_box(struct intel_uncore_box * box)5058 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
5059 {
5060 u32 config;
5061
5062 if (!box->io_addr)
5063 return;
5064
5065 config = readl(box->io_addr);
5066 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
5067 writel(config, box->io_addr);
5068 }
5069
snr_uncore_mmio_enable_event(struct intel_uncore_box * box,struct perf_event * event)5070 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
5071 struct perf_event *event)
5072 {
5073 struct hw_perf_event *hwc = &event->hw;
5074
5075 if (!box->io_addr)
5076 return;
5077
5078 if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
5079 return;
5080
5081 writel(hwc->config | SNBEP_PMON_CTL_EN,
5082 box->io_addr + hwc->config_base);
5083 }
5084
snr_uncore_mmio_disable_event(struct intel_uncore_box * box,struct perf_event * event)5085 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
5086 struct perf_event *event)
5087 {
5088 struct hw_perf_event *hwc = &event->hw;
5089
5090 if (!box->io_addr)
5091 return;
5092
5093 if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
5094 return;
5095
5096 writel(hwc->config, box->io_addr + hwc->config_base);
5097 }
5098
5099 static struct intel_uncore_ops snr_uncore_mmio_ops = {
5100 .init_box = snr_uncore_mmio_init_box,
5101 .exit_box = uncore_mmio_exit_box,
5102 .disable_box = snr_uncore_mmio_disable_box,
5103 .enable_box = snr_uncore_mmio_enable_box,
5104 .disable_event = snr_uncore_mmio_disable_event,
5105 .enable_event = snr_uncore_mmio_enable_event,
5106 .read_counter = uncore_mmio_read_counter,
5107 };
5108
5109 static struct uncore_event_desc snr_uncore_imc_events[] = {
5110 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
5111 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x0f"),
5112 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
5113 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
5114 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
5115 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
5116 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
5117 { /* end: all zeroes */ },
5118 };
5119
5120 static struct intel_uncore_type snr_uncore_imc = {
5121 .name = "imc",
5122 .num_counters = 4,
5123 .num_boxes = 2,
5124 .perf_ctr_bits = 48,
5125 .fixed_ctr_bits = 48,
5126 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
5127 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
5128 .event_descs = snr_uncore_imc_events,
5129 .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
5130 .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
5131 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5132 .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
5133 .mmio_offset = SNR_IMC_MMIO_OFFSET,
5134 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5135 .ops = &snr_uncore_mmio_ops,
5136 .format_group = &skx_uncore_format_group,
5137 };
5138
5139 enum perf_uncore_snr_imc_freerunning_type_id {
5140 SNR_IMC_DCLK,
5141 SNR_IMC_DDR,
5142
5143 SNR_IMC_FREERUNNING_TYPE_MAX,
5144 };
5145
5146 static struct freerunning_counters snr_imc_freerunning[] = {
5147 [SNR_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
5148 [SNR_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 },
5149 };
5150
5151 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
5152 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
5153
5154 INTEL_UNCORE_FR_EVENT_DESC(read, 0x20, 6.103515625e-5),
5155 INTEL_UNCORE_FR_EVENT_DESC(write, 0x21, 6.103515625e-5),
5156 { /* end: all zeroes */ },
5157 };
5158
5159 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
5160 .init_box = snr_uncore_mmio_init_box,
5161 .exit_box = uncore_mmio_exit_box,
5162 .read_counter = uncore_mmio_read_counter,
5163 .hw_config = uncore_freerunning_hw_config,
5164 };
5165
5166 static struct intel_uncore_type snr_uncore_imc_free_running = {
5167 .name = "imc_free_running",
5168 .num_counters = 3,
5169 .num_boxes = 1,
5170 .num_freerunning_types = SNR_IMC_FREERUNNING_TYPE_MAX,
5171 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5172 .freerunning = snr_imc_freerunning,
5173 .ops = &snr_uncore_imc_freerunning_ops,
5174 .event_descs = snr_uncore_imc_freerunning_events,
5175 .format_group = &skx_uncore_iio_freerunning_format_group,
5176 };
5177
5178 static struct intel_uncore_type *snr_mmio_uncores[] = {
5179 &snr_uncore_imc,
5180 &snr_uncore_imc_free_running,
5181 NULL,
5182 };
5183
snr_uncore_mmio_init(void)5184 void snr_uncore_mmio_init(void)
5185 {
5186 uncore_mmio_uncores = snr_mmio_uncores;
5187 }
5188
5189 /* end of SNR uncore support */
5190
5191 /* ICX uncore support */
5192
5193 static u64 icx_cha_msr_offsets[] = {
5194 0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310,
5195 0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e,
5196 0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a,
5197 0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0, 0xe,
5198 0x1c, 0x2a, 0x38, 0x46,
5199 };
5200
icx_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)5201 static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
5202 {
5203 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
5204 bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN);
5205
5206 if (tie_en) {
5207 reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 +
5208 icx_cha_msr_offsets[box->pmu->pmu_idx];
5209 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
5210 reg1->idx = 0;
5211 }
5212
5213 return 0;
5214 }
5215
5216 static struct intel_uncore_ops icx_uncore_chabox_ops = {
5217 .init_box = ivbep_uncore_msr_init_box,
5218 .disable_box = snbep_uncore_msr_disable_box,
5219 .enable_box = snbep_uncore_msr_enable_box,
5220 .disable_event = snbep_uncore_msr_disable_event,
5221 .enable_event = snr_cha_enable_event,
5222 .read_counter = uncore_msr_read_counter,
5223 .hw_config = icx_cha_hw_config,
5224 };
5225
5226 static struct intel_uncore_type icx_uncore_chabox = {
5227 .name = "cha",
5228 .num_counters = 4,
5229 .perf_ctr_bits = 48,
5230 .event_ctl = ICX_C34_MSR_PMON_CTL0,
5231 .perf_ctr = ICX_C34_MSR_PMON_CTR0,
5232 .box_ctl = ICX_C34_MSR_PMON_BOX_CTL,
5233 .msr_offsets = icx_cha_msr_offsets,
5234 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
5235 .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT,
5236 .constraints = skx_uncore_chabox_constraints,
5237 .ops = &icx_uncore_chabox_ops,
5238 .format_group = &snr_uncore_chabox_format_group,
5239 };
5240
5241 static u64 icx_msr_offsets[] = {
5242 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5243 };
5244
5245 static struct event_constraint icx_uncore_iio_constraints[] = {
5246 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
5247 UNCORE_EVENT_CONSTRAINT(0x03, 0x3),
5248 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
5249 UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
5250 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
5251 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
5252 UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
5253 EVENT_CONSTRAINT_END
5254 };
5255
5256 static umode_t
icx_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)5257 icx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
5258 {
5259 /* Root bus 0x00 is valid only for pmu_idx = 5. */
5260 return pmu_iio_mapping_visible(kobj, attr, die, 5);
5261 }
5262
5263 static struct attribute_group icx_iio_mapping_group = {
5264 .is_visible = icx_iio_mapping_visible,
5265 };
5266
5267 static const struct attribute_group *icx_iio_attr_update[] = {
5268 &icx_iio_mapping_group,
5269 NULL,
5270 };
5271
5272 /*
5273 * ICX has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
5274 */
5275 enum {
5276 ICX_PCIE1_PMON_ID,
5277 ICX_PCIE2_PMON_ID,
5278 ICX_PCIE3_PMON_ID,
5279 ICX_PCIE4_PMON_ID,
5280 ICX_PCIE5_PMON_ID,
5281 ICX_CBDMA_DMI_PMON_ID
5282 };
5283
5284 static u8 icx_sad_pmon_mapping[] = {
5285 ICX_CBDMA_DMI_PMON_ID,
5286 ICX_PCIE1_PMON_ID,
5287 ICX_PCIE2_PMON_ID,
5288 ICX_PCIE3_PMON_ID,
5289 ICX_PCIE4_PMON_ID,
5290 ICX_PCIE5_PMON_ID,
5291 };
5292
icx_iio_get_topology(struct intel_uncore_type * type)5293 static int icx_iio_get_topology(struct intel_uncore_type *type)
5294 {
5295 return sad_cfg_iio_topology(type, icx_sad_pmon_mapping);
5296 }
5297
icx_iio_set_mapping(struct intel_uncore_type * type)5298 static void icx_iio_set_mapping(struct intel_uncore_type *type)
5299 {
5300 /* Detect ICX-D system. This case is not supported */
5301 if (boot_cpu_data.x86_vfm == INTEL_ICELAKE_D) {
5302 pmu_clear_mapping_attr(type->attr_update, &icx_iio_mapping_group);
5303 return;
5304 }
5305 pmu_iio_set_mapping(type, &icx_iio_mapping_group);
5306 }
5307
icx_iio_cleanup_mapping(struct intel_uncore_type * type)5308 static void icx_iio_cleanup_mapping(struct intel_uncore_type *type)
5309 {
5310 pmu_cleanup_mapping(type, &icx_iio_mapping_group);
5311 }
5312
5313 static struct intel_uncore_type icx_uncore_iio = {
5314 .name = "iio",
5315 .num_counters = 4,
5316 .num_boxes = 6,
5317 .perf_ctr_bits = 48,
5318 .event_ctl = ICX_IIO_MSR_PMON_CTL0,
5319 .perf_ctr = ICX_IIO_MSR_PMON_CTR0,
5320 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5321 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
5322 .box_ctl = ICX_IIO_MSR_PMON_BOX_CTL,
5323 .msr_offsets = icx_msr_offsets,
5324 .constraints = icx_uncore_iio_constraints,
5325 .ops = &skx_uncore_iio_ops,
5326 .format_group = &snr_uncore_iio_format_group,
5327 .attr_update = icx_iio_attr_update,
5328 .get_topology = icx_iio_get_topology,
5329 .set_mapping = icx_iio_set_mapping,
5330 .cleanup_mapping = icx_iio_cleanup_mapping,
5331 };
5332
5333 static struct intel_uncore_type icx_uncore_irp = {
5334 .name = "irp",
5335 .num_counters = 2,
5336 .num_boxes = 6,
5337 .perf_ctr_bits = 48,
5338 .event_ctl = ICX_IRP0_MSR_PMON_CTL0,
5339 .perf_ctr = ICX_IRP0_MSR_PMON_CTR0,
5340 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5341 .box_ctl = ICX_IRP0_MSR_PMON_BOX_CTL,
5342 .msr_offsets = icx_msr_offsets,
5343 .ops = &ivbep_uncore_msr_ops,
5344 .format_group = &ivbep_uncore_format_group,
5345 };
5346
5347 static struct event_constraint icx_uncore_m2pcie_constraints[] = {
5348 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
5349 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
5350 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
5351 EVENT_CONSTRAINT_END
5352 };
5353
5354 static struct intel_uncore_type icx_uncore_m2pcie = {
5355 .name = "m2pcie",
5356 .num_counters = 4,
5357 .num_boxes = 6,
5358 .perf_ctr_bits = 48,
5359 .event_ctl = ICX_M2PCIE_MSR_PMON_CTL0,
5360 .perf_ctr = ICX_M2PCIE_MSR_PMON_CTR0,
5361 .box_ctl = ICX_M2PCIE_MSR_PMON_BOX_CTL,
5362 .msr_offsets = icx_msr_offsets,
5363 .constraints = icx_uncore_m2pcie_constraints,
5364 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5365 .ops = &ivbep_uncore_msr_ops,
5366 .format_group = &ivbep_uncore_format_group,
5367 };
5368
5369 enum perf_uncore_icx_iio_freerunning_type_id {
5370 ICX_IIO_MSR_IOCLK,
5371 ICX_IIO_MSR_BW_IN,
5372
5373 ICX_IIO_FREERUNNING_TYPE_MAX,
5374 };
5375
5376 static unsigned icx_iio_clk_freerunning_box_offsets[] = {
5377 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5378 };
5379
5380 static unsigned icx_iio_bw_freerunning_box_offsets[] = {
5381 0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0,
5382 };
5383
5384 static struct freerunning_counters icx_iio_freerunning[] = {
5385 [ICX_IIO_MSR_IOCLK] = { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets },
5386 [ICX_IIO_MSR_BW_IN] = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
5387 };
5388
5389 static struct intel_uncore_type icx_uncore_iio_free_running = {
5390 .name = "iio_free_running",
5391 .num_counters = 9,
5392 .num_boxes = 6,
5393 .num_freerunning_types = ICX_IIO_FREERUNNING_TYPE_MAX,
5394 .freerunning = icx_iio_freerunning,
5395 .ops = &skx_uncore_iio_freerunning_ops,
5396 .event_descs = snr_uncore_iio_freerunning_events,
5397 .format_group = &skx_uncore_iio_freerunning_format_group,
5398 };
5399
5400 static struct intel_uncore_type *icx_msr_uncores[] = {
5401 &skx_uncore_ubox,
5402 &icx_uncore_chabox,
5403 &icx_uncore_iio,
5404 &icx_uncore_irp,
5405 &icx_uncore_m2pcie,
5406 &skx_uncore_pcu,
5407 &icx_uncore_iio_free_running,
5408 NULL,
5409 };
5410
5411 /*
5412 * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High)
5413 * registers which located at Device 30, Function 3
5414 */
5415 #define ICX_CAPID6 0x9c
5416 #define ICX_CAPID7 0xa0
5417
icx_count_chabox(void)5418 static u64 icx_count_chabox(void)
5419 {
5420 struct pci_dev *dev = NULL;
5421 u64 caps = 0;
5422
5423 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev);
5424 if (!dev)
5425 goto out;
5426
5427 pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps);
5428 pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1);
5429 out:
5430 pci_dev_put(dev);
5431 return hweight64(caps);
5432 }
5433
icx_uncore_cpu_init(void)5434 void icx_uncore_cpu_init(void)
5435 {
5436 u64 num_boxes = icx_count_chabox();
5437
5438 if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets)))
5439 return;
5440 icx_uncore_chabox.num_boxes = num_boxes;
5441 uncore_msr_uncores = icx_msr_uncores;
5442 }
5443
5444 static struct intel_uncore_type icx_uncore_m2m = {
5445 .name = "m2m",
5446 .num_counters = 4,
5447 .num_boxes = 4,
5448 .perf_ctr_bits = 48,
5449 .perf_ctr = SNR_M2M_PCI_PMON_CTR0,
5450 .event_ctl = SNR_M2M_PCI_PMON_CTL0,
5451 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5452 .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
5453 .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
5454 .ops = &snr_m2m_uncore_pci_ops,
5455 .format_group = &snr_m2m_uncore_format_group,
5456 };
5457
5458 static struct attribute *icx_upi_uncore_formats_attr[] = {
5459 &format_attr_event.attr,
5460 &format_attr_umask_ext4.attr,
5461 &format_attr_edge.attr,
5462 &format_attr_inv.attr,
5463 &format_attr_thresh8.attr,
5464 NULL,
5465 };
5466
5467 static const struct attribute_group icx_upi_uncore_format_group = {
5468 .name = "format",
5469 .attrs = icx_upi_uncore_formats_attr,
5470 };
5471
5472 #define ICX_UPI_REGS_ADDR_DEVICE_LINK0 0x02
5473 #define ICX_UPI_REGS_ADDR_FUNCTION 0x01
5474
discover_upi_topology(struct intel_uncore_type * type,int ubox_did,int dev_link0)5475 static int discover_upi_topology(struct intel_uncore_type *type, int ubox_did, int dev_link0)
5476 {
5477 struct pci_dev *ubox = NULL;
5478 struct pci_dev *dev = NULL;
5479 u32 nid, gid;
5480 int idx, lgc_pkg, ret = -EPERM;
5481 struct intel_uncore_topology *upi;
5482 unsigned int devfn;
5483
5484 /* GIDNIDMAP method supports machines which have less than 8 sockets. */
5485 if (uncore_max_dies() > 8)
5486 goto err;
5487
5488 while ((ubox = pci_get_device(PCI_VENDOR_ID_INTEL, ubox_did, ubox))) {
5489 ret = upi_nodeid_groupid(ubox, SKX_CPUNODEID, SKX_GIDNIDMAP, &nid, &gid);
5490 if (ret) {
5491 ret = pcibios_err_to_errno(ret);
5492 break;
5493 }
5494
5495 lgc_pkg = topology_gidnid_map(nid, gid);
5496 if (lgc_pkg < 0) {
5497 ret = -EPERM;
5498 goto err;
5499 }
5500 for (idx = 0; idx < type->num_boxes; idx++) {
5501 upi = &type->topology[lgc_pkg][idx];
5502 devfn = PCI_DEVFN(dev_link0 + idx, ICX_UPI_REGS_ADDR_FUNCTION);
5503 dev = pci_get_domain_bus_and_slot(pci_domain_nr(ubox->bus),
5504 ubox->bus->number,
5505 devfn);
5506 if (dev) {
5507 ret = upi_fill_topology(dev, upi, idx);
5508 if (ret)
5509 goto err;
5510 }
5511 }
5512 }
5513 err:
5514 pci_dev_put(ubox);
5515 pci_dev_put(dev);
5516 return ret;
5517 }
5518
icx_upi_get_topology(struct intel_uncore_type * type)5519 static int icx_upi_get_topology(struct intel_uncore_type *type)
5520 {
5521 return discover_upi_topology(type, ICX_UBOX_DID, ICX_UPI_REGS_ADDR_DEVICE_LINK0);
5522 }
5523
5524 static struct attribute_group icx_upi_mapping_group = {
5525 .is_visible = skx_upi_mapping_visible,
5526 };
5527
5528 static const struct attribute_group *icx_upi_attr_update[] = {
5529 &icx_upi_mapping_group,
5530 NULL
5531 };
5532
icx_upi_set_mapping(struct intel_uncore_type * type)5533 static void icx_upi_set_mapping(struct intel_uncore_type *type)
5534 {
5535 pmu_upi_set_mapping(type, &icx_upi_mapping_group);
5536 }
5537
icx_upi_cleanup_mapping(struct intel_uncore_type * type)5538 static void icx_upi_cleanup_mapping(struct intel_uncore_type *type)
5539 {
5540 pmu_cleanup_mapping(type, &icx_upi_mapping_group);
5541 }
5542
5543 static struct intel_uncore_type icx_uncore_upi = {
5544 .name = "upi",
5545 .num_counters = 4,
5546 .num_boxes = 3,
5547 .perf_ctr_bits = 48,
5548 .perf_ctr = ICX_UPI_PCI_PMON_CTR0,
5549 .event_ctl = ICX_UPI_PCI_PMON_CTL0,
5550 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5551 .event_mask_ext = ICX_UPI_CTL_UMASK_EXT,
5552 .box_ctl = ICX_UPI_PCI_PMON_BOX_CTL,
5553 .ops = &skx_upi_uncore_pci_ops,
5554 .format_group = &icx_upi_uncore_format_group,
5555 .attr_update = icx_upi_attr_update,
5556 .get_topology = icx_upi_get_topology,
5557 .set_mapping = icx_upi_set_mapping,
5558 .cleanup_mapping = icx_upi_cleanup_mapping,
5559 };
5560
5561 static struct event_constraint icx_uncore_m3upi_constraints[] = {
5562 UNCORE_EVENT_CONSTRAINT_RANGE(0x1c, 0x1f, 0x1),
5563 UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
5564 UNCORE_EVENT_CONSTRAINT_RANGE(0x4e, 0x50, 0x7),
5565 EVENT_CONSTRAINT_END
5566 };
5567
5568 static struct intel_uncore_type icx_uncore_m3upi = {
5569 .name = "m3upi",
5570 .num_counters = 4,
5571 .num_boxes = 3,
5572 .perf_ctr_bits = 48,
5573 .perf_ctr = ICX_M3UPI_PCI_PMON_CTR0,
5574 .event_ctl = ICX_M3UPI_PCI_PMON_CTL0,
5575 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5576 .box_ctl = ICX_M3UPI_PCI_PMON_BOX_CTL,
5577 .constraints = icx_uncore_m3upi_constraints,
5578 .ops = &ivbep_uncore_pci_ops,
5579 .format_group = &skx_uncore_format_group,
5580 };
5581
5582 enum {
5583 ICX_PCI_UNCORE_M2M,
5584 ICX_PCI_UNCORE_UPI,
5585 ICX_PCI_UNCORE_M3UPI,
5586 };
5587
5588 static struct intel_uncore_type *icx_pci_uncores[] = {
5589 [ICX_PCI_UNCORE_M2M] = &icx_uncore_m2m,
5590 [ICX_PCI_UNCORE_UPI] = &icx_uncore_upi,
5591 [ICX_PCI_UNCORE_M3UPI] = &icx_uncore_m3upi,
5592 NULL,
5593 };
5594
5595 static const struct pci_device_id icx_uncore_pci_ids[] = {
5596 { /* M2M 0 */
5597 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5598 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0),
5599 },
5600 { /* M2M 1 */
5601 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5602 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1),
5603 },
5604 { /* M2M 2 */
5605 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5606 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2),
5607 },
5608 { /* M2M 3 */
5609 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5610 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3),
5611 },
5612 { /* UPI Link 0 */
5613 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5614 .driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0),
5615 },
5616 { /* UPI Link 1 */
5617 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5618 .driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1),
5619 },
5620 { /* UPI Link 2 */
5621 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5622 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2),
5623 },
5624 { /* M3UPI Link 0 */
5625 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5626 .driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0),
5627 },
5628 { /* M3UPI Link 1 */
5629 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5630 .driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1),
5631 },
5632 { /* M3UPI Link 2 */
5633 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5634 .driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2),
5635 },
5636 { /* end: all zeroes */ }
5637 };
5638
5639 static struct pci_driver icx_uncore_pci_driver = {
5640 .name = "icx_uncore",
5641 .id_table = icx_uncore_pci_ids,
5642 };
5643
icx_uncore_pci_init(void)5644 int icx_uncore_pci_init(void)
5645 {
5646 /* ICX UBOX DID */
5647 int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID,
5648 SKX_GIDNIDMAP, true);
5649
5650 if (ret)
5651 return ret;
5652
5653 uncore_pci_uncores = icx_pci_uncores;
5654 uncore_pci_driver = &icx_uncore_pci_driver;
5655 return 0;
5656 }
5657
icx_uncore_imc_init_box(struct intel_uncore_box * box)5658 static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
5659 {
5660 unsigned int box_ctl = box->pmu->type->box_ctl +
5661 box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN);
5662 int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
5663 SNR_IMC_MMIO_MEM0_OFFSET;
5664
5665 __snr_uncore_mmio_init_box(box, box_ctl, mem_offset,
5666 SNR_MC_DEVICE_ID);
5667 }
5668
5669 static struct intel_uncore_ops icx_uncore_mmio_ops = {
5670 .init_box = icx_uncore_imc_init_box,
5671 .exit_box = uncore_mmio_exit_box,
5672 .disable_box = snr_uncore_mmio_disable_box,
5673 .enable_box = snr_uncore_mmio_enable_box,
5674 .disable_event = snr_uncore_mmio_disable_event,
5675 .enable_event = snr_uncore_mmio_enable_event,
5676 .read_counter = uncore_mmio_read_counter,
5677 };
5678
5679 static struct intel_uncore_type icx_uncore_imc = {
5680 .name = "imc",
5681 .num_counters = 4,
5682 .num_boxes = 12,
5683 .perf_ctr_bits = 48,
5684 .fixed_ctr_bits = 48,
5685 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
5686 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
5687 .event_descs = snr_uncore_imc_events,
5688 .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
5689 .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
5690 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5691 .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
5692 .mmio_offset = SNR_IMC_MMIO_OFFSET,
5693 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5694 .ops = &icx_uncore_mmio_ops,
5695 .format_group = &skx_uncore_format_group,
5696 };
5697
5698 enum perf_uncore_icx_imc_freerunning_type_id {
5699 ICX_IMC_DCLK,
5700 ICX_IMC_DDR,
5701 ICX_IMC_DDRT,
5702
5703 ICX_IMC_FREERUNNING_TYPE_MAX,
5704 };
5705
5706 static struct freerunning_counters icx_imc_freerunning[] = {
5707 [ICX_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
5708 [ICX_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 },
5709 [ICX_IMC_DDRT] = { 0x22a0, 0x8, 0, 2, 48 },
5710 };
5711
5712 static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
5713 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
5714
5715 INTEL_UNCORE_FR_EVENT_DESC(read, 0x20, 6.103515625e-5),
5716 INTEL_UNCORE_FR_EVENT_DESC(write, 0x21, 6.103515625e-5),
5717 INTEL_UNCORE_FR_EVENT_DESC(ddrt_read, 0x30, 6.103515625e-5),
5718 INTEL_UNCORE_FR_EVENT_DESC(ddrt_write, 0x31, 6.103515625e-5),
5719 { /* end: all zeroes */ },
5720 };
5721
icx_uncore_imc_freerunning_init_box(struct intel_uncore_box * box)5722 static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5723 {
5724 int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
5725 SNR_IMC_MMIO_MEM0_OFFSET;
5726
5727 snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
5728 mem_offset, SNR_MC_DEVICE_ID);
5729 }
5730
5731 static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
5732 .init_box = icx_uncore_imc_freerunning_init_box,
5733 .exit_box = uncore_mmio_exit_box,
5734 .read_counter = uncore_mmio_read_counter,
5735 .hw_config = uncore_freerunning_hw_config,
5736 };
5737
5738 static struct intel_uncore_type icx_uncore_imc_free_running = {
5739 .name = "imc_free_running",
5740 .num_counters = 5,
5741 .num_boxes = 4,
5742 .num_freerunning_types = ICX_IMC_FREERUNNING_TYPE_MAX,
5743 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5744 .freerunning = icx_imc_freerunning,
5745 .ops = &icx_uncore_imc_freerunning_ops,
5746 .event_descs = icx_uncore_imc_freerunning_events,
5747 .format_group = &skx_uncore_iio_freerunning_format_group,
5748 };
5749
5750 static struct intel_uncore_type *icx_mmio_uncores[] = {
5751 &icx_uncore_imc,
5752 &icx_uncore_imc_free_running,
5753 NULL,
5754 };
5755
icx_uncore_mmio_init(void)5756 void icx_uncore_mmio_init(void)
5757 {
5758 uncore_mmio_uncores = icx_mmio_uncores;
5759 }
5760
5761 /* end of ICX uncore support */
5762
5763 /* SPR uncore support */
5764
spr_uncore_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)5765 static void spr_uncore_msr_enable_event(struct intel_uncore_box *box,
5766 struct perf_event *event)
5767 {
5768 struct hw_perf_event *hwc = &event->hw;
5769 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
5770
5771 if (reg1->idx != EXTRA_REG_NONE)
5772 wrmsrq(reg1->reg, reg1->config);
5773
5774 wrmsrq(hwc->config_base, hwc->config);
5775 }
5776
spr_uncore_msr_disable_event(struct intel_uncore_box * box,struct perf_event * event)5777 static void spr_uncore_msr_disable_event(struct intel_uncore_box *box,
5778 struct perf_event *event)
5779 {
5780 struct hw_perf_event *hwc = &event->hw;
5781 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
5782
5783 if (reg1->idx != EXTRA_REG_NONE)
5784 wrmsrq(reg1->reg, 0);
5785
5786 wrmsrq(hwc->config_base, 0);
5787 }
5788
spr_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)5789 static int spr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
5790 {
5791 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
5792 bool tie_en = !!(event->hw.config & SPR_CHA_PMON_CTL_TID_EN);
5793 struct intel_uncore_type *type = box->pmu->type;
5794 int id = intel_uncore_find_discovery_unit_id(type->boxes, -1, box->pmu->pmu_idx);
5795
5796 if (tie_en) {
5797 reg1->reg = SPR_C0_MSR_PMON_BOX_FILTER0 +
5798 HSWEP_CBO_MSR_OFFSET * id;
5799 reg1->config = event->attr.config1 & SPR_CHA_PMON_BOX_FILTER_TID;
5800 reg1->idx = 0;
5801 }
5802
5803 return 0;
5804 }
5805
5806 static struct intel_uncore_ops spr_uncore_chabox_ops = {
5807 .init_box = intel_generic_uncore_msr_init_box,
5808 .disable_box = intel_generic_uncore_msr_disable_box,
5809 .enable_box = intel_generic_uncore_msr_enable_box,
5810 .disable_event = spr_uncore_msr_disable_event,
5811 .enable_event = spr_uncore_msr_enable_event,
5812 .read_counter = uncore_msr_read_counter,
5813 .hw_config = spr_cha_hw_config,
5814 .get_constraint = uncore_get_constraint,
5815 .put_constraint = uncore_put_constraint,
5816 };
5817
5818 static struct attribute *spr_uncore_cha_formats_attr[] = {
5819 &format_attr_event.attr,
5820 &format_attr_umask_ext5.attr,
5821 &format_attr_tid_en2.attr,
5822 &format_attr_edge.attr,
5823 &format_attr_inv.attr,
5824 &format_attr_thresh8.attr,
5825 &format_attr_filter_tid5.attr,
5826 NULL,
5827 };
5828 static const struct attribute_group spr_uncore_chabox_format_group = {
5829 .name = "format",
5830 .attrs = spr_uncore_cha_formats_attr,
5831 };
5832
alias_show(struct device * dev,struct device_attribute * attr,char * buf)5833 static ssize_t alias_show(struct device *dev,
5834 struct device_attribute *attr,
5835 char *buf)
5836 {
5837 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
5838 char pmu_name[UNCORE_PMU_NAME_LEN];
5839
5840 uncore_get_alias_name(pmu_name, pmu);
5841 return sysfs_emit(buf, "%s\n", pmu_name);
5842 }
5843
5844 static DEVICE_ATTR_RO(alias);
5845
5846 static struct attribute *uncore_alias_attrs[] = {
5847 &dev_attr_alias.attr,
5848 NULL
5849 };
5850
5851 ATTRIBUTE_GROUPS(uncore_alias);
5852
5853 static struct intel_uncore_type spr_uncore_chabox = {
5854 .name = "cha",
5855 .event_mask = SPR_CHA_PMON_EVENT_MASK,
5856 .event_mask_ext = SPR_CHA_EVENT_MASK_EXT,
5857 .num_shared_regs = 1,
5858 .constraints = skx_uncore_chabox_constraints,
5859 .ops = &spr_uncore_chabox_ops,
5860 .format_group = &spr_uncore_chabox_format_group,
5861 .attr_update = uncore_alias_groups,
5862 };
5863
5864 static struct intel_uncore_type spr_uncore_iio = {
5865 .name = "iio",
5866 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5867 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
5868 .format_group = &snr_uncore_iio_format_group,
5869 .attr_update = uncore_alias_groups,
5870 .constraints = icx_uncore_iio_constraints,
5871 };
5872
5873 static struct attribute *spr_uncore_raw_formats_attr[] = {
5874 &format_attr_event.attr,
5875 &format_attr_umask_ext4.attr,
5876 &format_attr_edge.attr,
5877 &format_attr_inv.attr,
5878 &format_attr_thresh8.attr,
5879 NULL,
5880 };
5881
5882 static const struct attribute_group spr_uncore_raw_format_group = {
5883 .name = "format",
5884 .attrs = spr_uncore_raw_formats_attr,
5885 };
5886
5887 #define SPR_UNCORE_COMMON_FORMAT() \
5888 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
5889 .event_mask_ext = SPR_RAW_EVENT_MASK_EXT, \
5890 .format_group = &spr_uncore_raw_format_group, \
5891 .attr_update = uncore_alias_groups
5892
5893 static struct intel_uncore_type spr_uncore_irp = {
5894 SPR_UNCORE_COMMON_FORMAT(),
5895 .name = "irp",
5896
5897 };
5898
5899 static struct event_constraint spr_uncore_m2pcie_constraints[] = {
5900 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
5901 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
5902 EVENT_CONSTRAINT_END
5903 };
5904
5905 static struct intel_uncore_type spr_uncore_m2pcie = {
5906 SPR_UNCORE_COMMON_FORMAT(),
5907 .name = "m2pcie",
5908 .constraints = spr_uncore_m2pcie_constraints,
5909 };
5910
5911 static struct intel_uncore_type spr_uncore_pcu = {
5912 .name = "pcu",
5913 .attr_update = uncore_alias_groups,
5914 };
5915
spr_uncore_mmio_enable_event(struct intel_uncore_box * box,struct perf_event * event)5916 static void spr_uncore_mmio_enable_event(struct intel_uncore_box *box,
5917 struct perf_event *event)
5918 {
5919 struct hw_perf_event *hwc = &event->hw;
5920
5921 if (!box->io_addr)
5922 return;
5923
5924 if (uncore_pmc_fixed(hwc->idx))
5925 writel(SNBEP_PMON_CTL_EN, box->io_addr + hwc->config_base);
5926 else
5927 writel(hwc->config, box->io_addr + hwc->config_base);
5928 }
5929
5930 static struct intel_uncore_ops spr_uncore_mmio_ops = {
5931 .init_box = intel_generic_uncore_mmio_init_box,
5932 .exit_box = uncore_mmio_exit_box,
5933 .disable_box = intel_generic_uncore_mmio_disable_box,
5934 .enable_box = intel_generic_uncore_mmio_enable_box,
5935 .disable_event = intel_generic_uncore_mmio_disable_event,
5936 .enable_event = spr_uncore_mmio_enable_event,
5937 .read_counter = uncore_mmio_read_counter,
5938 };
5939
5940 static struct uncore_event_desc spr_uncore_imc_events[] = {
5941 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x01,umask=0x00"),
5942 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x05,umask=0xcf"),
5943 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
5944 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
5945 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x05,umask=0xf0"),
5946 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
5947 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
5948 { /* end: all zeroes */ },
5949 };
5950
5951 #define SPR_UNCORE_MMIO_COMMON_FORMAT() \
5952 SPR_UNCORE_COMMON_FORMAT(), \
5953 .ops = &spr_uncore_mmio_ops
5954
5955 static struct intel_uncore_type spr_uncore_imc = {
5956 SPR_UNCORE_MMIO_COMMON_FORMAT(),
5957 .name = "imc",
5958 .fixed_ctr_bits = 48,
5959 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
5960 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
5961 .event_descs = spr_uncore_imc_events,
5962 };
5963
spr_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)5964 static void spr_uncore_pci_enable_event(struct intel_uncore_box *box,
5965 struct perf_event *event)
5966 {
5967 struct pci_dev *pdev = box->pci_dev;
5968 struct hw_perf_event *hwc = &event->hw;
5969
5970 pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
5971 pci_write_config_dword(pdev, hwc->config_base, (u32)hwc->config);
5972 }
5973
5974 static struct intel_uncore_ops spr_uncore_pci_ops = {
5975 .init_box = intel_generic_uncore_pci_init_box,
5976 .disable_box = intel_generic_uncore_pci_disable_box,
5977 .enable_box = intel_generic_uncore_pci_enable_box,
5978 .disable_event = intel_generic_uncore_pci_disable_event,
5979 .enable_event = spr_uncore_pci_enable_event,
5980 .read_counter = intel_generic_uncore_pci_read_counter,
5981 };
5982
5983 #define SPR_UNCORE_PCI_COMMON_FORMAT() \
5984 SPR_UNCORE_COMMON_FORMAT(), \
5985 .ops = &spr_uncore_pci_ops
5986
5987 static struct intel_uncore_type spr_uncore_m2m = {
5988 SPR_UNCORE_PCI_COMMON_FORMAT(),
5989 .name = "m2m",
5990 };
5991
5992 static struct attribute_group spr_upi_mapping_group = {
5993 .is_visible = skx_upi_mapping_visible,
5994 };
5995
5996 static const struct attribute_group *spr_upi_attr_update[] = {
5997 &uncore_alias_group,
5998 &spr_upi_mapping_group,
5999 NULL
6000 };
6001
6002 #define SPR_UPI_REGS_ADDR_DEVICE_LINK0 0x01
6003
spr_upi_set_mapping(struct intel_uncore_type * type)6004 static void spr_upi_set_mapping(struct intel_uncore_type *type)
6005 {
6006 pmu_upi_set_mapping(type, &spr_upi_mapping_group);
6007 }
6008
spr_upi_cleanup_mapping(struct intel_uncore_type * type)6009 static void spr_upi_cleanup_mapping(struct intel_uncore_type *type)
6010 {
6011 pmu_cleanup_mapping(type, &spr_upi_mapping_group);
6012 }
6013
spr_upi_get_topology(struct intel_uncore_type * type)6014 static int spr_upi_get_topology(struct intel_uncore_type *type)
6015 {
6016 return discover_upi_topology(type, SPR_UBOX_DID, SPR_UPI_REGS_ADDR_DEVICE_LINK0);
6017 }
6018
6019 static struct intel_uncore_type spr_uncore_mdf = {
6020 SPR_UNCORE_COMMON_FORMAT(),
6021 .name = "mdf",
6022 };
6023
spr_uncore_mmio_offs8_init_box(struct intel_uncore_box * box)6024 static void spr_uncore_mmio_offs8_init_box(struct intel_uncore_box *box)
6025 {
6026 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
6027 intel_generic_uncore_mmio_init_box(box);
6028 }
6029
6030 static struct intel_uncore_ops spr_uncore_mmio_offs8_ops = {
6031 .init_box = spr_uncore_mmio_offs8_init_box,
6032 .exit_box = uncore_mmio_exit_box,
6033 .disable_box = intel_generic_uncore_mmio_disable_box,
6034 .enable_box = intel_generic_uncore_mmio_enable_box,
6035 .disable_event = intel_generic_uncore_mmio_disable_event,
6036 .enable_event = spr_uncore_mmio_enable_event,
6037 .read_counter = uncore_mmio_read_counter,
6038 };
6039
6040 #define SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT() \
6041 SPR_UNCORE_COMMON_FORMAT(), \
6042 .ops = &spr_uncore_mmio_offs8_ops
6043
6044 static struct event_constraint spr_uncore_cxlcm_constraints[] = {
6045 UNCORE_EVENT_CONSTRAINT(0x02, 0x0f),
6046 UNCORE_EVENT_CONSTRAINT(0x05, 0x0f),
6047 UNCORE_EVENT_CONSTRAINT_RANGE(0x40, 0x43, 0xf0),
6048 UNCORE_EVENT_CONSTRAINT(0x4b, 0xf0),
6049 UNCORE_EVENT_CONSTRAINT(0x52, 0xf0),
6050 EVENT_CONSTRAINT_END
6051 };
6052
6053 static struct intel_uncore_type spr_uncore_cxlcm = {
6054 SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT(),
6055 .name = "cxlcm",
6056 .constraints = spr_uncore_cxlcm_constraints,
6057 };
6058
6059 static struct intel_uncore_type spr_uncore_cxldp = {
6060 SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT(),
6061 .name = "cxldp",
6062 };
6063
6064 static struct intel_uncore_type spr_uncore_hbm = {
6065 SPR_UNCORE_COMMON_FORMAT(),
6066 .name = "hbm",
6067 };
6068
6069 #define UNCORE_SPR_NUM_UNCORE_TYPES 15
6070 #define UNCORE_SPR_CHA 0
6071 #define UNCORE_SPR_IIO 1
6072 #define UNCORE_SPR_IMC 6
6073 #define UNCORE_SPR_UPI 8
6074 #define UNCORE_SPR_M3UPI 9
6075
6076 /*
6077 * The uncore units, which are supported by the discovery table,
6078 * are defined here.
6079 */
6080 static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = {
6081 &spr_uncore_chabox,
6082 &spr_uncore_iio,
6083 &spr_uncore_irp,
6084 &spr_uncore_m2pcie,
6085 &spr_uncore_pcu,
6086 NULL,
6087 &spr_uncore_imc,
6088 &spr_uncore_m2m,
6089 NULL,
6090 NULL,
6091 NULL,
6092 &spr_uncore_mdf,
6093 &spr_uncore_cxlcm,
6094 &spr_uncore_cxldp,
6095 &spr_uncore_hbm,
6096 };
6097
6098 /*
6099 * The uncore units, which are not supported by the discovery table,
6100 * are implemented from here.
6101 */
6102 #define SPR_UNCORE_UPI_NUM_BOXES 4
6103
6104 static u64 spr_upi_pci_offsets[SPR_UNCORE_UPI_NUM_BOXES] = {
6105 0, 0x8000, 0x10000, 0x18000
6106 };
6107
spr_extra_boxes_cleanup(struct intel_uncore_type * type)6108 static void spr_extra_boxes_cleanup(struct intel_uncore_type *type)
6109 {
6110 struct intel_uncore_discovery_unit *pos;
6111 struct rb_node *node;
6112
6113 if (!type->boxes)
6114 return;
6115
6116 while (!RB_EMPTY_ROOT(type->boxes)) {
6117 node = rb_first(type->boxes);
6118 pos = rb_entry(node, struct intel_uncore_discovery_unit, node);
6119 rb_erase(node, type->boxes);
6120 kfree(pos);
6121 }
6122 kfree(type->boxes);
6123 type->boxes = NULL;
6124 }
6125
6126 static struct intel_uncore_type spr_uncore_upi = {
6127 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
6128 .event_mask_ext = SPR_RAW_EVENT_MASK_EXT,
6129 .format_group = &spr_uncore_raw_format_group,
6130 .ops = &spr_uncore_pci_ops,
6131 .name = "upi",
6132 .attr_update = spr_upi_attr_update,
6133 .get_topology = spr_upi_get_topology,
6134 .set_mapping = spr_upi_set_mapping,
6135 .cleanup_mapping = spr_upi_cleanup_mapping,
6136 .type_id = UNCORE_SPR_UPI,
6137 .num_counters = 4,
6138 .num_boxes = SPR_UNCORE_UPI_NUM_BOXES,
6139 .perf_ctr_bits = 48,
6140 .perf_ctr = ICX_UPI_PCI_PMON_CTR0 - ICX_UPI_PCI_PMON_BOX_CTL,
6141 .event_ctl = ICX_UPI_PCI_PMON_CTL0 - ICX_UPI_PCI_PMON_BOX_CTL,
6142 .box_ctl = ICX_UPI_PCI_PMON_BOX_CTL,
6143 .pci_offsets = spr_upi_pci_offsets,
6144 .cleanup_extra_boxes = spr_extra_boxes_cleanup,
6145 };
6146
6147 static struct intel_uncore_type spr_uncore_m3upi = {
6148 SPR_UNCORE_PCI_COMMON_FORMAT(),
6149 .name = "m3upi",
6150 .type_id = UNCORE_SPR_M3UPI,
6151 .num_counters = 4,
6152 .num_boxes = SPR_UNCORE_UPI_NUM_BOXES,
6153 .perf_ctr_bits = 48,
6154 .perf_ctr = ICX_M3UPI_PCI_PMON_CTR0 - ICX_M3UPI_PCI_PMON_BOX_CTL,
6155 .event_ctl = ICX_M3UPI_PCI_PMON_CTL0 - ICX_M3UPI_PCI_PMON_BOX_CTL,
6156 .box_ctl = ICX_M3UPI_PCI_PMON_BOX_CTL,
6157 .pci_offsets = spr_upi_pci_offsets,
6158 .constraints = icx_uncore_m3upi_constraints,
6159 .cleanup_extra_boxes = spr_extra_boxes_cleanup,
6160 };
6161
6162 enum perf_uncore_spr_iio_freerunning_type_id {
6163 SPR_IIO_MSR_IOCLK,
6164 SPR_IIO_MSR_BW_IN,
6165 SPR_IIO_MSR_BW_OUT,
6166
6167 SPR_IIO_FREERUNNING_TYPE_MAX,
6168 };
6169
6170 static struct freerunning_counters spr_iio_freerunning[] = {
6171 [SPR_IIO_MSR_IOCLK] = { 0x340e, 0x1, 0x10, 1, 48 },
6172 [SPR_IIO_MSR_BW_IN] = { 0x3800, 0x1, 0x10, 8, 48 },
6173 [SPR_IIO_MSR_BW_OUT] = { 0x3808, 0x1, 0x10, 8, 48 },
6174 };
6175
6176 static struct intel_uncore_type spr_uncore_iio_free_running = {
6177 .name = "iio_free_running",
6178 .num_counters = 17,
6179 .num_freerunning_types = SPR_IIO_FREERUNNING_TYPE_MAX,
6180 .freerunning = spr_iio_freerunning,
6181 .ops = &skx_uncore_iio_freerunning_ops,
6182 .event_descs = snr_uncore_iio_freerunning_events,
6183 .format_group = &skx_uncore_iio_freerunning_format_group,
6184 };
6185
6186 enum perf_uncore_spr_imc_freerunning_type_id {
6187 SPR_IMC_DCLK,
6188 SPR_IMC_PQ_CYCLES,
6189
6190 SPR_IMC_FREERUNNING_TYPE_MAX,
6191 };
6192
6193 static struct freerunning_counters spr_imc_freerunning[] = {
6194 [SPR_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
6195 [SPR_IMC_PQ_CYCLES] = { 0x2318, 0x8, 0, 2, 48 },
6196 };
6197
6198 static struct uncore_event_desc spr_uncore_imc_freerunning_events[] = {
6199 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
6200
6201 INTEL_UNCORE_EVENT_DESC(rpq_cycles, "event=0xff,umask=0x20"),
6202 INTEL_UNCORE_EVENT_DESC(wpq_cycles, "event=0xff,umask=0x21"),
6203 { /* end: all zeroes */ },
6204 };
6205
6206 #define SPR_MC_DEVICE_ID 0x3251
6207
spr_uncore_imc_freerunning_init_box(struct intel_uncore_box * box)6208 static void spr_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
6209 {
6210 int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE + SNR_IMC_MMIO_MEM0_OFFSET;
6211
6212 snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
6213 mem_offset, SPR_MC_DEVICE_ID);
6214 }
6215
6216 static struct intel_uncore_ops spr_uncore_imc_freerunning_ops = {
6217 .init_box = spr_uncore_imc_freerunning_init_box,
6218 .exit_box = uncore_mmio_exit_box,
6219 .read_counter = uncore_mmio_read_counter,
6220 .hw_config = uncore_freerunning_hw_config,
6221 };
6222
6223 static struct intel_uncore_type spr_uncore_imc_free_running = {
6224 .name = "imc_free_running",
6225 .num_counters = 3,
6226 .mmio_map_size = SNR_IMC_MMIO_SIZE,
6227 .num_freerunning_types = SPR_IMC_FREERUNNING_TYPE_MAX,
6228 .freerunning = spr_imc_freerunning,
6229 .ops = &spr_uncore_imc_freerunning_ops,
6230 .event_descs = spr_uncore_imc_freerunning_events,
6231 .format_group = &skx_uncore_iio_freerunning_format_group,
6232 };
6233
6234 #define UNCORE_SPR_MSR_EXTRA_UNCORES 1
6235 #define UNCORE_SPR_MMIO_EXTRA_UNCORES 1
6236 #define UNCORE_SPR_PCI_EXTRA_UNCORES 2
6237
6238 static struct intel_uncore_type *spr_msr_uncores[UNCORE_SPR_MSR_EXTRA_UNCORES] = {
6239 &spr_uncore_iio_free_running,
6240 };
6241
6242 static struct intel_uncore_type *spr_mmio_uncores[UNCORE_SPR_MMIO_EXTRA_UNCORES] = {
6243 &spr_uncore_imc_free_running,
6244 };
6245
6246 static struct intel_uncore_type *spr_pci_uncores[UNCORE_SPR_PCI_EXTRA_UNCORES] = {
6247 &spr_uncore_upi,
6248 &spr_uncore_m3upi
6249 };
6250
6251 int spr_uncore_units_ignore[] = {
6252 UNCORE_SPR_UPI,
6253 UNCORE_SPR_M3UPI,
6254 UNCORE_IGNORE_END
6255 };
6256
uncore_type_customized_copy(struct intel_uncore_type * to_type,struct intel_uncore_type * from_type)6257 static void uncore_type_customized_copy(struct intel_uncore_type *to_type,
6258 struct intel_uncore_type *from_type)
6259 {
6260 if (!to_type || !from_type)
6261 return;
6262
6263 if (from_type->name)
6264 to_type->name = from_type->name;
6265 if (from_type->fixed_ctr_bits)
6266 to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
6267 if (from_type->event_mask)
6268 to_type->event_mask = from_type->event_mask;
6269 if (from_type->event_mask_ext)
6270 to_type->event_mask_ext = from_type->event_mask_ext;
6271 if (from_type->fixed_ctr)
6272 to_type->fixed_ctr = from_type->fixed_ctr;
6273 if (from_type->fixed_ctl)
6274 to_type->fixed_ctl = from_type->fixed_ctl;
6275 if (from_type->fixed_ctr_bits)
6276 to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
6277 if (from_type->num_shared_regs)
6278 to_type->num_shared_regs = from_type->num_shared_regs;
6279 if (from_type->constraints)
6280 to_type->constraints = from_type->constraints;
6281 if (from_type->ops)
6282 to_type->ops = from_type->ops;
6283 if (from_type->event_descs)
6284 to_type->event_descs = from_type->event_descs;
6285 if (from_type->format_group)
6286 to_type->format_group = from_type->format_group;
6287 if (from_type->attr_update)
6288 to_type->attr_update = from_type->attr_update;
6289 if (from_type->set_mapping)
6290 to_type->set_mapping = from_type->set_mapping;
6291 if (from_type->get_topology)
6292 to_type->get_topology = from_type->get_topology;
6293 if (from_type->cleanup_mapping)
6294 to_type->cleanup_mapping = from_type->cleanup_mapping;
6295 if (from_type->mmio_map_size)
6296 to_type->mmio_map_size = from_type->mmio_map_size;
6297 }
6298
6299 struct intel_uncore_type **
uncore_get_uncores(enum uncore_access_type type_id,int num_extra,struct intel_uncore_type ** extra,int max_num_types,struct intel_uncore_type ** uncores)6300 uncore_get_uncores(enum uncore_access_type type_id, int num_extra,
6301 struct intel_uncore_type **extra, int max_num_types,
6302 struct intel_uncore_type **uncores)
6303 {
6304 struct intel_uncore_type **types, **start_types;
6305 int i;
6306
6307 start_types = types = intel_uncore_generic_init_uncores(type_id, num_extra);
6308
6309 /* Only copy the customized features */
6310 for (; *types; types++) {
6311 if ((*types)->type_id >= max_num_types)
6312 continue;
6313 uncore_type_customized_copy(*types, uncores[(*types)->type_id]);
6314 }
6315
6316 for (i = 0; i < num_extra; i++, types++)
6317 *types = extra[i];
6318
6319 return start_types;
6320 }
6321
6322 static struct intel_uncore_type *
uncore_find_type_by_id(struct intel_uncore_type ** types,int type_id)6323 uncore_find_type_by_id(struct intel_uncore_type **types, int type_id)
6324 {
6325 for (; *types; types++) {
6326 if (type_id == (*types)->type_id)
6327 return *types;
6328 }
6329
6330 return NULL;
6331 }
6332
uncore_type_max_boxes(struct intel_uncore_type ** types,int type_id)6333 static int uncore_type_max_boxes(struct intel_uncore_type **types,
6334 int type_id)
6335 {
6336 struct intel_uncore_discovery_unit *unit;
6337 struct intel_uncore_type *type;
6338 struct rb_node *node;
6339 int max = 0;
6340
6341 type = uncore_find_type_by_id(types, type_id);
6342 if (!type)
6343 return 0;
6344
6345 for (node = rb_first(type->boxes); node; node = rb_next(node)) {
6346 unit = rb_entry(node, struct intel_uncore_discovery_unit, node);
6347
6348 /*
6349 * on DMR IMH2, the unit id starts from 0x8000,
6350 * and we don't need to count it.
6351 */
6352 if ((unit->id > max) && (unit->id < 0x8000))
6353 max = unit->id;
6354 }
6355 return max + 1;
6356 }
6357
6358 #define SPR_MSR_UNC_CBO_CONFIG 0x2FFE
6359
spr_uncore_cpu_init(void)6360 void spr_uncore_cpu_init(void)
6361 {
6362 struct intel_uncore_type *type;
6363 u64 num_cbo;
6364
6365 uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR,
6366 UNCORE_SPR_MSR_EXTRA_UNCORES,
6367 spr_msr_uncores,
6368 UNCORE_SPR_NUM_UNCORE_TYPES,
6369 spr_uncores);
6370
6371 type = uncore_find_type_by_id(uncore_msr_uncores, UNCORE_SPR_CHA);
6372 if (type) {
6373 /*
6374 * The value from the discovery table (stored in the type->num_boxes
6375 * of UNCORE_SPR_CHA) is incorrect on some SPR variants because of a
6376 * firmware bug. Using the value from SPR_MSR_UNC_CBO_CONFIG to replace it.
6377 */
6378 rdmsrq(SPR_MSR_UNC_CBO_CONFIG, num_cbo);
6379 /*
6380 * The MSR doesn't work on the EMR XCC, but the firmware bug doesn't impact
6381 * the EMR XCC. Don't let the value from the MSR replace the existing value.
6382 */
6383 if (num_cbo)
6384 type->num_boxes = num_cbo;
6385 }
6386 spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
6387 }
6388
6389 #define SPR_UNCORE_UPI_PCIID 0x3241
6390 #define SPR_UNCORE_UPI0_DEVFN 0x9
6391 #define SPR_UNCORE_M3UPI_PCIID 0x3246
6392 #define SPR_UNCORE_M3UPI0_DEVFN 0x29
6393
spr_update_device_location(int type_id)6394 static void spr_update_device_location(int type_id)
6395 {
6396 struct intel_uncore_discovery_unit *unit;
6397 struct intel_uncore_type *type;
6398 struct pci_dev *dev = NULL;
6399 struct rb_root *root;
6400 u32 device, devfn;
6401 int die;
6402
6403 if (type_id == UNCORE_SPR_UPI) {
6404 type = &spr_uncore_upi;
6405 device = SPR_UNCORE_UPI_PCIID;
6406 devfn = SPR_UNCORE_UPI0_DEVFN;
6407 } else if (type_id == UNCORE_SPR_M3UPI) {
6408 type = &spr_uncore_m3upi;
6409 device = SPR_UNCORE_M3UPI_PCIID;
6410 devfn = SPR_UNCORE_M3UPI0_DEVFN;
6411 } else
6412 return;
6413
6414 root = kzalloc_obj(struct rb_root);
6415 if (!root) {
6416 type->num_boxes = 0;
6417 return;
6418 }
6419 *root = RB_ROOT;
6420
6421 while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, dev)) != NULL) {
6422
6423 die = uncore_device_to_die(dev);
6424 if (die < 0)
6425 continue;
6426
6427 unit = kzalloc_obj(*unit);
6428 if (!unit)
6429 continue;
6430 unit->die = die;
6431 unit->id = PCI_SLOT(dev->devfn) - PCI_SLOT(devfn);
6432 unit->addr = pci_domain_nr(dev->bus) << UNCORE_DISCOVERY_PCI_DOMAIN_OFFSET |
6433 dev->bus->number << UNCORE_DISCOVERY_PCI_BUS_OFFSET |
6434 devfn << UNCORE_DISCOVERY_PCI_DEVFN_OFFSET |
6435 type->box_ctl;
6436
6437 unit->pmu_idx = unit->id;
6438
6439 uncore_find_add_unit(unit, root, NULL);
6440 }
6441
6442 type->boxes = root;
6443 }
6444
spr_uncore_pci_init(void)6445 int spr_uncore_pci_init(void)
6446 {
6447 /*
6448 * The discovery table of UPI on some SPR variant is broken,
6449 * which impacts the detection of both UPI and M3UPI uncore PMON.
6450 * Use the pre-defined UPI and M3UPI table to replace.
6451 *
6452 * The accurate location, e.g., domain and BUS number,
6453 * can only be retrieved at load time.
6454 * Update the location of UPI and M3UPI.
6455 */
6456 spr_update_device_location(UNCORE_SPR_UPI);
6457 spr_update_device_location(UNCORE_SPR_M3UPI);
6458 uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI,
6459 UNCORE_SPR_PCI_EXTRA_UNCORES,
6460 spr_pci_uncores,
6461 UNCORE_SPR_NUM_UNCORE_TYPES,
6462 spr_uncores);
6463 return 0;
6464 }
6465
spr_uncore_mmio_init(void)6466 void spr_uncore_mmio_init(void)
6467 {
6468 int ret = snbep_pci2phy_map_init(0x3250, SKX_CPUNODEID, SKX_GIDNIDMAP, true);
6469
6470 if (ret) {
6471 uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL,
6472 UNCORE_SPR_NUM_UNCORE_TYPES,
6473 spr_uncores);
6474 } else {
6475 uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO,
6476 UNCORE_SPR_MMIO_EXTRA_UNCORES,
6477 spr_mmio_uncores,
6478 UNCORE_SPR_NUM_UNCORE_TYPES,
6479 spr_uncores);
6480
6481 spr_uncore_imc_free_running.num_boxes = uncore_type_max_boxes(uncore_mmio_uncores, UNCORE_SPR_IMC) / 2;
6482 }
6483 }
6484
6485 /* end of SPR uncore support */
6486
6487 /* GNR uncore support */
6488
6489 #define UNCORE_GNR_NUM_UNCORE_TYPES 23
6490
6491 int gnr_uncore_units_ignore[] = {
6492 UNCORE_IGNORE_END
6493 };
6494
6495 static struct intel_uncore_type gnr_uncore_ubox = {
6496 .name = "ubox",
6497 .attr_update = uncore_alias_groups,
6498 };
6499
6500 static struct uncore_event_desc gnr_uncore_imc_events[] = {
6501 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x01,umask=0x00"),
6502 INTEL_UNCORE_EVENT_DESC(cas_count_read_sch0, "event=0x05,umask=0xcf"),
6503 INTEL_UNCORE_EVENT_DESC(cas_count_read_sch0.scale, "6.103515625e-5"),
6504 INTEL_UNCORE_EVENT_DESC(cas_count_read_sch0.unit, "MiB"),
6505 INTEL_UNCORE_EVENT_DESC(cas_count_read_sch1, "event=0x06,umask=0xcf"),
6506 INTEL_UNCORE_EVENT_DESC(cas_count_read_sch1.scale, "6.103515625e-5"),
6507 INTEL_UNCORE_EVENT_DESC(cas_count_read_sch1.unit, "MiB"),
6508 INTEL_UNCORE_EVENT_DESC(cas_count_write_sch0, "event=0x05,umask=0xf0"),
6509 INTEL_UNCORE_EVENT_DESC(cas_count_write_sch0.scale, "6.103515625e-5"),
6510 INTEL_UNCORE_EVENT_DESC(cas_count_write_sch0.unit, "MiB"),
6511 INTEL_UNCORE_EVENT_DESC(cas_count_write_sch1, "event=0x06,umask=0xf0"),
6512 INTEL_UNCORE_EVENT_DESC(cas_count_write_sch1.scale, "6.103515625e-5"),
6513 INTEL_UNCORE_EVENT_DESC(cas_count_write_sch1.unit, "MiB"),
6514 { /* end: all zeroes */ },
6515 };
6516
6517 static struct intel_uncore_type gnr_uncore_imc = {
6518 SPR_UNCORE_MMIO_COMMON_FORMAT(),
6519 .name = "imc",
6520 .fixed_ctr_bits = 48,
6521 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
6522 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
6523 .event_descs = gnr_uncore_imc_events,
6524 };
6525
6526 static struct intel_uncore_type gnr_uncore_pciex8 = {
6527 SPR_UNCORE_PCI_COMMON_FORMAT(),
6528 .name = "pciex8",
6529 };
6530
6531 static struct intel_uncore_type gnr_uncore_pciex16 = {
6532 SPR_UNCORE_PCI_COMMON_FORMAT(),
6533 .name = "pciex16",
6534 };
6535
6536 static struct intel_uncore_type gnr_uncore_upi = {
6537 SPR_UNCORE_PCI_COMMON_FORMAT(),
6538 .name = "upi",
6539 };
6540
6541 static struct intel_uncore_type gnr_uncore_b2upi = {
6542 SPR_UNCORE_PCI_COMMON_FORMAT(),
6543 .name = "b2upi",
6544 };
6545
6546 static struct intel_uncore_type gnr_uncore_b2hot = {
6547 .name = "b2hot",
6548 .attr_update = uncore_alias_groups,
6549 };
6550
6551 static struct intel_uncore_type gnr_uncore_b2cmi = {
6552 SPR_UNCORE_PCI_COMMON_FORMAT(),
6553 .name = "b2cmi",
6554 };
6555
6556 static struct intel_uncore_type gnr_uncore_b2cxl = {
6557 SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT(),
6558 .name = "b2cxl",
6559 };
6560
6561 static struct intel_uncore_type gnr_uncore_mdf_sbo = {
6562 .name = "mdf_sbo",
6563 .attr_update = uncore_alias_groups,
6564 };
6565
6566 static struct intel_uncore_type *gnr_uncores[UNCORE_GNR_NUM_UNCORE_TYPES] = {
6567 &spr_uncore_chabox,
6568 &spr_uncore_iio,
6569 &spr_uncore_irp,
6570 NULL,
6571 &spr_uncore_pcu,
6572 &gnr_uncore_ubox,
6573 &gnr_uncore_imc,
6574 NULL,
6575 &gnr_uncore_upi,
6576 NULL,
6577 NULL,
6578 NULL,
6579 &spr_uncore_cxlcm,
6580 &spr_uncore_cxldp,
6581 NULL,
6582 &gnr_uncore_b2hot,
6583 &gnr_uncore_b2cmi,
6584 &gnr_uncore_b2cxl,
6585 &gnr_uncore_b2upi,
6586 NULL,
6587 &gnr_uncore_mdf_sbo,
6588 &gnr_uncore_pciex16,
6589 &gnr_uncore_pciex8,
6590 };
6591
6592 static struct freerunning_counters gnr_iio_freerunning[] = {
6593 [SPR_IIO_MSR_IOCLK] = { 0x290e, 0x01, 0x10, 1, 48 },
6594 [SPR_IIO_MSR_BW_IN] = { 0x360e, 0x10, 0x80, 8, 48 },
6595 [SPR_IIO_MSR_BW_OUT] = { 0x2e0e, 0x10, 0x80, 8, 48 },
6596 };
6597
gnr_uncore_cpu_init(void)6598 void gnr_uncore_cpu_init(void)
6599 {
6600 uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR,
6601 UNCORE_SPR_MSR_EXTRA_UNCORES,
6602 spr_msr_uncores,
6603 UNCORE_GNR_NUM_UNCORE_TYPES,
6604 gnr_uncores);
6605 spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
6606 spr_uncore_iio_free_running.freerunning = gnr_iio_freerunning;
6607 }
6608
gnr_uncore_pci_init(void)6609 int gnr_uncore_pci_init(void)
6610 {
6611 uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, 0, NULL,
6612 UNCORE_GNR_NUM_UNCORE_TYPES,
6613 gnr_uncores);
6614 return 0;
6615 }
6616
gnr_uncore_mmio_init(void)6617 void gnr_uncore_mmio_init(void)
6618 {
6619 uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL,
6620 UNCORE_GNR_NUM_UNCORE_TYPES,
6621 gnr_uncores);
6622 }
6623
6624 /* end of GNR uncore support */
6625
6626 /* DMR uncore support */
6627 #define UNCORE_DMR_NUM_UNCORE_TYPES 52
6628
6629 static struct attribute *dmr_imc_uncore_formats_attr[] = {
6630 &format_attr_event.attr,
6631 &format_attr_umask.attr,
6632 &format_attr_edge.attr,
6633 &format_attr_inv.attr,
6634 &format_attr_thresh10.attr,
6635 NULL,
6636 };
6637
6638 static const struct attribute_group dmr_imc_uncore_format_group = {
6639 .name = "format",
6640 .attrs = dmr_imc_uncore_formats_attr,
6641 };
6642
6643 static struct intel_uncore_type dmr_uncore_imc = {
6644 .name = "imc",
6645 .fixed_ctr_bits = 48,
6646 .fixed_ctr = DMR_IMC_PMON_FIXED_CTR,
6647 .fixed_ctl = DMR_IMC_PMON_FIXED_CTL,
6648 .ops = &spr_uncore_mmio_ops,
6649 .format_group = &dmr_imc_uncore_format_group,
6650 .attr_update = uncore_alias_groups,
6651 };
6652
6653 static struct attribute *dmr_sca_uncore_formats_attr[] = {
6654 &format_attr_event.attr,
6655 &format_attr_umask_ext5.attr,
6656 &format_attr_edge.attr,
6657 &format_attr_inv.attr,
6658 &format_attr_thresh8.attr,
6659 NULL,
6660 };
6661
6662 static const struct attribute_group dmr_sca_uncore_format_group = {
6663 .name = "format",
6664 .attrs = dmr_sca_uncore_formats_attr,
6665 };
6666
6667 static struct intel_uncore_type dmr_uncore_sca = {
6668 .name = "sca",
6669 .event_mask_ext = DMR_HAMVF_EVENT_MASK_EXT,
6670 .format_group = &dmr_sca_uncore_format_group,
6671 .attr_update = uncore_alias_groups,
6672 };
6673
6674 static struct attribute *dmr_cxlcm_uncore_formats_attr[] = {
6675 &format_attr_event.attr,
6676 &format_attr_umask.attr,
6677 &format_attr_edge.attr,
6678 &format_attr_inv2.attr,
6679 &format_attr_thresh9_2.attr,
6680 &format_attr_port_en.attr,
6681 NULL,
6682 };
6683
6684 static const struct attribute_group dmr_cxlcm_uncore_format_group = {
6685 .name = "format",
6686 .attrs = dmr_cxlcm_uncore_formats_attr,
6687 };
6688
6689 static struct event_constraint dmr_uncore_cxlcm_constraints[] = {
6690 UNCORE_EVENT_CONSTRAINT_RANGE(0x1, 0x24, 0x0f),
6691 UNCORE_EVENT_CONSTRAINT_RANGE(0x41, 0x41, 0xf0),
6692 UNCORE_EVENT_CONSTRAINT_RANGE(0x50, 0x5e, 0xf0),
6693 UNCORE_EVENT_CONSTRAINT_RANGE(0x60, 0x61, 0xf0),
6694 EVENT_CONSTRAINT_END
6695 };
6696
6697 static struct intel_uncore_type dmr_uncore_cxlcm = {
6698 .name = "cxlcm",
6699 .event_mask = GENERIC_PMON_RAW_EVENT_MASK,
6700 .event_mask_ext = DMR_CXLCM_EVENT_MASK_EXT,
6701 .constraints = dmr_uncore_cxlcm_constraints,
6702 .format_group = &dmr_cxlcm_uncore_format_group,
6703 .attr_update = uncore_alias_groups,
6704 };
6705
6706 static struct intel_uncore_type dmr_uncore_hamvf = {
6707 .name = "hamvf",
6708 .event_mask_ext = DMR_HAMVF_EVENT_MASK_EXT,
6709 .format_group = &dmr_sca_uncore_format_group,
6710 .attr_update = uncore_alias_groups,
6711 };
6712
6713 static struct event_constraint dmr_uncore_cbo_constraints[] = {
6714 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
6715 UNCORE_EVENT_CONSTRAINT_RANGE(0x19, 0x1a, 0x1),
6716 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
6717 UNCORE_EVENT_CONSTRAINT(0x21, 0x1),
6718 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
6719 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
6720 EVENT_CONSTRAINT_END
6721 };
6722
6723 static struct intel_uncore_type dmr_uncore_cbo = {
6724 .name = "cbo",
6725 .event_mask_ext = DMR_HAMVF_EVENT_MASK_EXT,
6726 .constraints = dmr_uncore_cbo_constraints,
6727 .format_group = &dmr_sca_uncore_format_group,
6728 .attr_update = uncore_alias_groups,
6729 };
6730
6731 static struct intel_uncore_type dmr_uncore_santa = {
6732 .name = "santa",
6733 .attr_update = uncore_alias_groups,
6734 };
6735
6736 static struct intel_uncore_type dmr_uncore_cncu = {
6737 .name = "cncu",
6738 .attr_update = uncore_alias_groups,
6739 };
6740
6741 static struct intel_uncore_type dmr_uncore_sncu = {
6742 .name = "sncu",
6743 .attr_update = uncore_alias_groups,
6744 };
6745
6746 static struct intel_uncore_type dmr_uncore_ula = {
6747 .name = "ula",
6748 .event_mask_ext = DMR_HAMVF_EVENT_MASK_EXT,
6749 .format_group = &dmr_sca_uncore_format_group,
6750 .attr_update = uncore_alias_groups,
6751 };
6752
6753 static struct intel_uncore_type dmr_uncore_dda = {
6754 .name = "dda",
6755 .event_mask_ext = DMR_HAMVF_EVENT_MASK_EXT,
6756 .format_group = &dmr_sca_uncore_format_group,
6757 .attr_update = uncore_alias_groups,
6758 };
6759
6760 static struct event_constraint dmr_uncore_sbo_constraints[] = {
6761 UNCORE_EVENT_CONSTRAINT(0x1f, 0x01),
6762 UNCORE_EVENT_CONSTRAINT(0x25, 0x01),
6763 EVENT_CONSTRAINT_END
6764 };
6765
6766 static struct intel_uncore_type dmr_uncore_sbo = {
6767 .name = "sbo",
6768 .event_mask_ext = DMR_HAMVF_EVENT_MASK_EXT,
6769 .constraints = dmr_uncore_sbo_constraints,
6770 .format_group = &dmr_sca_uncore_format_group,
6771 .attr_update = uncore_alias_groups,
6772 };
6773
6774 static struct intel_uncore_type dmr_uncore_ubr = {
6775 .name = "ubr",
6776 .event_mask_ext = DMR_HAMVF_EVENT_MASK_EXT,
6777 .format_group = &dmr_sca_uncore_format_group,
6778 .attr_update = uncore_alias_groups,
6779 };
6780
6781 static struct attribute *dmr_pcie4_uncore_formats_attr[] = {
6782 &format_attr_event.attr,
6783 &format_attr_umask.attr,
6784 &format_attr_edge.attr,
6785 &format_attr_inv.attr,
6786 &format_attr_thresh8.attr,
6787 &format_attr_thresh_ext.attr,
6788 &format_attr_rs3_sel.attr,
6789 &format_attr_rx_sel.attr,
6790 &format_attr_tx_sel.attr,
6791 &format_attr_iep_sel.attr,
6792 &format_attr_vc_sel.attr,
6793 &format_attr_port_sel.attr,
6794 NULL,
6795 };
6796
6797 static const struct attribute_group dmr_pcie4_uncore_format_group = {
6798 .name = "format",
6799 .attrs = dmr_pcie4_uncore_formats_attr,
6800 };
6801
6802 static struct intel_uncore_type dmr_uncore_pcie4 = {
6803 .name = "pcie4",
6804 .event_mask_ext = DMR_PCIE4_EVENT_MASK_EXT,
6805 .format_group = &dmr_pcie4_uncore_format_group,
6806 .attr_update = uncore_alias_groups,
6807 };
6808
6809 static struct intel_uncore_type dmr_uncore_crs = {
6810 .name = "crs",
6811 .attr_update = uncore_alias_groups,
6812 };
6813
6814 static struct intel_uncore_type dmr_uncore_cpc = {
6815 .name = "cpc",
6816 .event_mask_ext = DMR_HAMVF_EVENT_MASK_EXT,
6817 .format_group = &dmr_sca_uncore_format_group,
6818 .attr_update = uncore_alias_groups,
6819 };
6820
6821 static struct intel_uncore_type dmr_uncore_itc = {
6822 .name = "itc",
6823 .event_mask_ext = DMR_HAMVF_EVENT_MASK_EXT,
6824 .format_group = &dmr_sca_uncore_format_group,
6825 .attr_update = uncore_alias_groups,
6826 };
6827
6828 static struct intel_uncore_type dmr_uncore_otc = {
6829 .name = "otc",
6830 .event_mask_ext = DMR_HAMVF_EVENT_MASK_EXT,
6831 .format_group = &dmr_sca_uncore_format_group,
6832 .attr_update = uncore_alias_groups,
6833 };
6834
6835 static struct intel_uncore_type dmr_uncore_cms = {
6836 .name = "cms",
6837 .attr_update = uncore_alias_groups,
6838 };
6839
6840 static struct intel_uncore_type dmr_uncore_pcie6 = {
6841 .name = "pcie6",
6842 .event_mask_ext = DMR_PCIE4_EVENT_MASK_EXT,
6843 .format_group = &dmr_pcie4_uncore_format_group,
6844 .attr_update = uncore_alias_groups,
6845 };
6846
6847 static struct intel_uncore_type *dmr_uncores[UNCORE_DMR_NUM_UNCORE_TYPES] = {
6848 NULL, NULL, NULL, NULL,
6849 &spr_uncore_pcu,
6850 &gnr_uncore_ubox,
6851 &dmr_uncore_imc,
6852 NULL,
6853 NULL, NULL, NULL, NULL,
6854 NULL, NULL, NULL, NULL,
6855 NULL, NULL, NULL, NULL,
6856 NULL, NULL, NULL,
6857 &dmr_uncore_sca,
6858 &dmr_uncore_cxlcm,
6859 NULL, NULL, NULL,
6860 NULL, NULL,
6861 &dmr_uncore_hamvf,
6862 &dmr_uncore_cbo,
6863 &dmr_uncore_santa,
6864 &dmr_uncore_cncu,
6865 &dmr_uncore_sncu,
6866 &dmr_uncore_ula,
6867 &dmr_uncore_dda,
6868 NULL,
6869 &dmr_uncore_sbo,
6870 NULL,
6871 NULL, NULL, NULL,
6872 &dmr_uncore_ubr,
6873 NULL,
6874 &dmr_uncore_pcie4,
6875 &dmr_uncore_crs,
6876 &dmr_uncore_cpc,
6877 &dmr_uncore_itc,
6878 &dmr_uncore_otc,
6879 &dmr_uncore_cms,
6880 &dmr_uncore_pcie6,
6881 };
6882
6883 int dmr_uncore_imh_units_ignore[] = {
6884 0x13, /* MSE */
6885 UNCORE_IGNORE_END
6886 };
6887
6888 int dmr_uncore_cbb_units_ignore[] = {
6889 0x25, /* SB2UCIE */
6890 UNCORE_IGNORE_END
6891 };
6892
6893 static unsigned int dmr_iio_freerunning_box_offsets[] = {
6894 0x0, 0x8000, 0x18000, 0x20000
6895 };
6896
dmr_uncore_freerunning_init_box(struct intel_uncore_box * box)6897 static void dmr_uncore_freerunning_init_box(struct intel_uncore_box *box)
6898 {
6899 struct intel_uncore_type *type = box->pmu->type;
6900 u64 mmio_base;
6901
6902 if (box->pmu->pmu_idx >= type->num_boxes)
6903 return;
6904
6905 mmio_base = DMR_IMH1_HIOP_MMIO_BASE;
6906 mmio_base += dmr_iio_freerunning_box_offsets[box->pmu->pmu_idx];
6907
6908 box->io_addr = ioremap(mmio_base, type->mmio_map_size);
6909 if (!box->io_addr)
6910 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
6911 }
6912
6913 static struct intel_uncore_ops dmr_uncore_freerunning_ops = {
6914 .init_box = dmr_uncore_freerunning_init_box,
6915 .exit_box = uncore_mmio_exit_box,
6916 .read_counter = uncore_mmio_read_counter,
6917 .hw_config = uncore_freerunning_hw_config,
6918 };
6919
6920 enum perf_uncore_dmr_iio_freerunning_type_id {
6921 DMR_ITC_INB_DATA_BW,
6922 DMR_ITC_BW_IN,
6923 DMR_OTC_BW_OUT,
6924 DMR_OTC_CLOCK_TICKS,
6925
6926 DMR_IIO_FREERUNNING_TYPE_MAX,
6927 };
6928
6929 static struct freerunning_counters dmr_iio_freerunning[] = {
6930 [DMR_ITC_INB_DATA_BW] = { 0x4d40, 0x8, 0, 8, 48},
6931 [DMR_ITC_BW_IN] = { 0x6b00, 0x8, 0, 8, 48},
6932 [DMR_OTC_BW_OUT] = { 0x6b60, 0x8, 0, 8, 48},
6933 [DMR_OTC_CLOCK_TICKS] = { 0x6bb0, 0x8, 0, 1, 48},
6934 };
6935
6936 static struct uncore_event_desc dmr_uncore_iio_freerunning_events[] = {
6937 /* ITC Free Running Data BW counter for inbound traffic */
6938 INTEL_UNCORE_FR_EVENT_DESC(inb_data_port0, 0x10, "3.814697266e-6"),
6939 INTEL_UNCORE_FR_EVENT_DESC(inb_data_port1, 0x11, "3.814697266e-6"),
6940 INTEL_UNCORE_FR_EVENT_DESC(inb_data_port2, 0x12, "3.814697266e-6"),
6941 INTEL_UNCORE_FR_EVENT_DESC(inb_data_port3, 0x13, "3.814697266e-6"),
6942 INTEL_UNCORE_FR_EVENT_DESC(inb_data_port4, 0x14, "3.814697266e-6"),
6943 INTEL_UNCORE_FR_EVENT_DESC(inb_data_port5, 0x15, "3.814697266e-6"),
6944 INTEL_UNCORE_FR_EVENT_DESC(inb_data_port6, 0x16, "3.814697266e-6"),
6945 INTEL_UNCORE_FR_EVENT_DESC(inb_data_port7, 0x17, "3.814697266e-6"),
6946
6947 /* ITC Free Running BW IN counters */
6948 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port0, 0x20, "3.814697266e-6"),
6949 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port1, 0x21, "3.814697266e-6"),
6950 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port2, 0x22, "3.814697266e-6"),
6951 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port3, 0x23, "3.814697266e-6"),
6952 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port4, 0x24, "3.814697266e-6"),
6953 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port5, 0x25, "3.814697266e-6"),
6954 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port6, 0x26, "3.814697266e-6"),
6955 INTEL_UNCORE_FR_EVENT_DESC(bw_in_port7, 0x27, "3.814697266e-6"),
6956
6957 /* ITC Free Running BW OUT counters */
6958 INTEL_UNCORE_FR_EVENT_DESC(bw_out_port0, 0x30, "3.814697266e-6"),
6959 INTEL_UNCORE_FR_EVENT_DESC(bw_out_port1, 0x31, "3.814697266e-6"),
6960 INTEL_UNCORE_FR_EVENT_DESC(bw_out_port2, 0x32, "3.814697266e-6"),
6961 INTEL_UNCORE_FR_EVENT_DESC(bw_out_port3, 0x33, "3.814697266e-6"),
6962 INTEL_UNCORE_FR_EVENT_DESC(bw_out_port4, 0x34, "3.814697266e-6"),
6963 INTEL_UNCORE_FR_EVENT_DESC(bw_out_port5, 0x35, "3.814697266e-6"),
6964 INTEL_UNCORE_FR_EVENT_DESC(bw_out_port6, 0x36, "3.814697266e-6"),
6965 INTEL_UNCORE_FR_EVENT_DESC(bw_out_port7, 0x37, "3.814697266e-6"),
6966
6967 /* Free Running Clock Counter */
6968 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x40"),
6969 { /* end: all zeroes */ },
6970 };
6971
6972 static struct intel_uncore_type dmr_uncore_iio_free_running = {
6973 .name = "iio_free_running",
6974 .num_counters = 25,
6975 .mmio_map_size = DMR_HIOP_MMIO_SIZE,
6976 .num_freerunning_types = DMR_IIO_FREERUNNING_TYPE_MAX,
6977 .freerunning = dmr_iio_freerunning,
6978 .ops = &dmr_uncore_freerunning_ops,
6979 .event_descs = dmr_uncore_iio_freerunning_events,
6980 .format_group = &skx_uncore_iio_freerunning_format_group,
6981 };
6982
6983 #define UNCORE_DMR_MMIO_EXTRA_UNCORES 1
6984 static struct intel_uncore_type *dmr_mmio_uncores[UNCORE_DMR_MMIO_EXTRA_UNCORES] = {
6985 &dmr_uncore_iio_free_running,
6986 };
6987
dmr_uncore_pci_init(void)6988 int dmr_uncore_pci_init(void)
6989 {
6990 uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, 0, NULL,
6991 UNCORE_DMR_NUM_UNCORE_TYPES,
6992 dmr_uncores);
6993 return 0;
6994 }
6995
dmr_uncore_mmio_init(void)6996 void dmr_uncore_mmio_init(void)
6997 {
6998 uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO,
6999 UNCORE_DMR_MMIO_EXTRA_UNCORES,
7000 dmr_mmio_uncores,
7001 UNCORE_DMR_NUM_UNCORE_TYPES,
7002 dmr_uncores);
7003
7004 dmr_uncore_iio_free_running.num_boxes =
7005 uncore_type_max_boxes(uncore_mmio_uncores, UNCORE_DMR_ITC);
7006 }
7007 /* end of DMR uncore support */
7008