xref: /linux/arch/x86/events/intel/uncore_snbep.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
3 #include <asm/cpu_device_id.h>
4 #include "uncore.h"
5 #include "uncore_discovery.h"
6 
7 /* SNB-EP pci bus to socket mapping */
8 #define SNBEP_CPUNODEID			0x40
9 #define SNBEP_GIDNIDMAP			0x54
10 
11 /* SNB-EP Box level control */
12 #define SNBEP_PMON_BOX_CTL_RST_CTRL	(1 << 0)
13 #define SNBEP_PMON_BOX_CTL_RST_CTRS	(1 << 1)
14 #define SNBEP_PMON_BOX_CTL_FRZ		(1 << 8)
15 #define SNBEP_PMON_BOX_CTL_FRZ_EN	(1 << 16)
16 #define SNBEP_PMON_BOX_CTL_INT		(SNBEP_PMON_BOX_CTL_RST_CTRL | \
17 					 SNBEP_PMON_BOX_CTL_RST_CTRS | \
18 					 SNBEP_PMON_BOX_CTL_FRZ_EN)
19 /* SNB-EP event control */
20 #define SNBEP_PMON_CTL_EV_SEL_MASK	0x000000ff
21 #define SNBEP_PMON_CTL_UMASK_MASK	0x0000ff00
22 #define SNBEP_PMON_CTL_RST		(1 << 17)
23 #define SNBEP_PMON_CTL_EDGE_DET		(1 << 18)
24 #define SNBEP_PMON_CTL_EV_SEL_EXT	(1 << 21)
25 #define SNBEP_PMON_CTL_EN		(1 << 22)
26 #define SNBEP_PMON_CTL_INVERT		(1 << 23)
27 #define SNBEP_PMON_CTL_TRESH_MASK	0xff000000
28 #define SNBEP_PMON_RAW_EVENT_MASK	(SNBEP_PMON_CTL_EV_SEL_MASK | \
29 					 SNBEP_PMON_CTL_UMASK_MASK | \
30 					 SNBEP_PMON_CTL_EDGE_DET | \
31 					 SNBEP_PMON_CTL_INVERT | \
32 					 SNBEP_PMON_CTL_TRESH_MASK)
33 
34 /* SNB-EP Ubox event control */
35 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK		0x1f000000
36 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK		\
37 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
38 				 SNBEP_PMON_CTL_UMASK_MASK | \
39 				 SNBEP_PMON_CTL_EDGE_DET | \
40 				 SNBEP_PMON_CTL_INVERT | \
41 				 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
42 
43 #define SNBEP_CBO_PMON_CTL_TID_EN		(1 << 19)
44 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK	(SNBEP_PMON_RAW_EVENT_MASK | \
45 						 SNBEP_CBO_PMON_CTL_TID_EN)
46 
47 /* SNB-EP PCU event control */
48 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK	0x0000c000
49 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK	0x1f000000
50 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT	(1 << 30)
51 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET	(1 << 31)
52 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK	\
53 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
54 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
55 				 SNBEP_PMON_CTL_EDGE_DET | \
56 				 SNBEP_PMON_CTL_INVERT | \
57 				 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
58 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
59 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
60 
61 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK	\
62 				(SNBEP_PMON_RAW_EVENT_MASK | \
63 				 SNBEP_PMON_CTL_EV_SEL_EXT)
64 
65 /* SNB-EP pci control register */
66 #define SNBEP_PCI_PMON_BOX_CTL			0xf4
67 #define SNBEP_PCI_PMON_CTL0			0xd8
68 /* SNB-EP pci counter register */
69 #define SNBEP_PCI_PMON_CTR0			0xa0
70 
71 /* SNB-EP home agent register */
72 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0	0x40
73 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1	0x44
74 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH	0x48
75 /* SNB-EP memory controller register */
76 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL		0xf0
77 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR		0xd0
78 /* SNB-EP QPI register */
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0		0x228
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1		0x22c
81 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0		0x238
82 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1		0x23c
83 
84 /* SNB-EP Ubox register */
85 #define SNBEP_U_MSR_PMON_CTR0			0xc16
86 #define SNBEP_U_MSR_PMON_CTL0			0xc10
87 
88 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL		0xc08
89 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR		0xc09
90 
91 /* SNB-EP Cbo register */
92 #define SNBEP_C0_MSR_PMON_CTR0			0xd16
93 #define SNBEP_C0_MSR_PMON_CTL0			0xd10
94 #define SNBEP_C0_MSR_PMON_BOX_CTL		0xd04
95 #define SNBEP_C0_MSR_PMON_BOX_FILTER		0xd14
96 #define SNBEP_CBO_MSR_OFFSET			0x20
97 
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID	0x1f
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID	0x3fc00
100 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE	0x7c0000
101 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC	0xff800000
102 
103 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {	\
104 	.event = (e),				\
105 	.msr = SNBEP_C0_MSR_PMON_BOX_FILTER,	\
106 	.config_mask = (m),			\
107 	.idx = (i)				\
108 }
109 
110 /* SNB-EP PCU register */
111 #define SNBEP_PCU_MSR_PMON_CTR0			0xc36
112 #define SNBEP_PCU_MSR_PMON_CTL0			0xc30
113 #define SNBEP_PCU_MSR_PMON_BOX_CTL		0xc24
114 #define SNBEP_PCU_MSR_PMON_BOX_FILTER		0xc34
115 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK	0xffffffff
116 #define SNBEP_PCU_MSR_CORE_C3_CTR		0x3fc
117 #define SNBEP_PCU_MSR_CORE_C6_CTR		0x3fd
118 
119 /* IVBEP event control */
120 #define IVBEP_PMON_BOX_CTL_INT		(SNBEP_PMON_BOX_CTL_RST_CTRL | \
121 					 SNBEP_PMON_BOX_CTL_RST_CTRS)
122 #define IVBEP_PMON_RAW_EVENT_MASK		(SNBEP_PMON_CTL_EV_SEL_MASK | \
123 					 SNBEP_PMON_CTL_UMASK_MASK | \
124 					 SNBEP_PMON_CTL_EDGE_DET | \
125 					 SNBEP_PMON_CTL_TRESH_MASK)
126 /* IVBEP Ubox */
127 #define IVBEP_U_MSR_PMON_GLOBAL_CTL		0xc00
128 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL		(1 << 31)
129 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL		(1 << 29)
130 
131 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK	\
132 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
133 				 SNBEP_PMON_CTL_UMASK_MASK | \
134 				 SNBEP_PMON_CTL_EDGE_DET | \
135 				 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
136 /* IVBEP Cbo */
137 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK		(IVBEP_PMON_RAW_EVENT_MASK | \
138 						 SNBEP_CBO_PMON_CTL_TID_EN)
139 
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID		(0x1fULL << 0)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 5)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE	(0x3fULL << 17)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID		(0xffffULL << 32)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC		(0x1ffULL << 52)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
146 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
147 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
148 
149 /* IVBEP home agent */
150 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST		(1 << 16)
151 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK		\
152 				(IVBEP_PMON_RAW_EVENT_MASK | \
153 				 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
154 /* IVBEP PCU */
155 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK	\
156 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
157 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
158 				 SNBEP_PMON_CTL_EDGE_DET | \
159 				 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
160 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
161 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
162 /* IVBEP QPI */
163 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK	\
164 				(IVBEP_PMON_RAW_EVENT_MASK | \
165 				 SNBEP_PMON_CTL_EV_SEL_EXT)
166 
167 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
168 				((1ULL << (n)) - 1)))
169 
170 /* Haswell-EP Ubox */
171 #define HSWEP_U_MSR_PMON_CTR0			0x709
172 #define HSWEP_U_MSR_PMON_CTL0			0x705
173 #define HSWEP_U_MSR_PMON_FILTER			0x707
174 
175 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL		0x703
176 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR		0x704
177 
178 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID		(0x1 << 0)
179 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID		(0x1fULL << 1)
180 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
181 					(HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
182 					 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
183 
184 /* Haswell-EP CBo */
185 #define HSWEP_C0_MSR_PMON_CTR0			0xe08
186 #define HSWEP_C0_MSR_PMON_CTL0			0xe01
187 #define HSWEP_C0_MSR_PMON_BOX_CTL			0xe00
188 #define HSWEP_C0_MSR_PMON_BOX_FILTER0		0xe05
189 #define HSWEP_CBO_MSR_OFFSET			0x10
190 
191 
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID		(0x3fULL << 0)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 6)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE	(0x7fULL << 17)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID		(0xffffULL << 32)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC		(0x1ffULL << 52)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
198 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
199 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
200 
201 
202 /* Haswell-EP Sbox */
203 #define HSWEP_S0_MSR_PMON_CTR0			0x726
204 #define HSWEP_S0_MSR_PMON_CTL0			0x721
205 #define HSWEP_S0_MSR_PMON_BOX_CTL			0x720
206 #define HSWEP_SBOX_MSR_OFFSET			0xa
207 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK		(SNBEP_PMON_RAW_EVENT_MASK | \
208 						 SNBEP_CBO_PMON_CTL_TID_EN)
209 
210 /* Haswell-EP PCU */
211 #define HSWEP_PCU_MSR_PMON_CTR0			0x717
212 #define HSWEP_PCU_MSR_PMON_CTL0			0x711
213 #define HSWEP_PCU_MSR_PMON_BOX_CTL		0x710
214 #define HSWEP_PCU_MSR_PMON_BOX_FILTER		0x715
215 
216 /* KNL Ubox */
217 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
218 					(SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
219 						SNBEP_CBO_PMON_CTL_TID_EN)
220 /* KNL CHA */
221 #define KNL_CHA_MSR_OFFSET			0xc
222 #define KNL_CHA_MSR_PMON_CTL_QOR		(1 << 16)
223 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
224 					(SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
225 					 KNL_CHA_MSR_PMON_CTL_QOR)
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID		0x1ff
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE	(7 << 18)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP		(0xfffffe2aULL << 32)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE	(0x1ULL << 32)
230 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE	(0x1ULL << 33)
231 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC		(0x1ULL << 37)
232 
233 /* KNL EDC/MC UCLK */
234 #define KNL_UCLK_MSR_PMON_CTR0_LOW		0x400
235 #define KNL_UCLK_MSR_PMON_CTL0			0x420
236 #define KNL_UCLK_MSR_PMON_BOX_CTL		0x430
237 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW	0x44c
238 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL	0x454
239 #define KNL_PMON_FIXED_CTL_EN			0x1
240 
241 /* KNL EDC */
242 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW		0xa00
243 #define KNL_EDC0_ECLK_MSR_PMON_CTL0		0xa20
244 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL		0xa30
245 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW	0xa3c
246 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL	0xa44
247 
248 /* KNL MC */
249 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW		0xb00
250 #define KNL_MC0_CH0_MSR_PMON_CTL0		0xb20
251 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL		0xb30
252 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW		0xb3c
253 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL		0xb44
254 
255 /* KNL IRP */
256 #define KNL_IRP_PCI_PMON_BOX_CTL		0xf0
257 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK		(SNBEP_PMON_RAW_EVENT_MASK | \
258 						 KNL_CHA_MSR_PMON_CTL_QOR)
259 /* KNL PCU */
260 #define KNL_PCU_PMON_CTL_EV_SEL_MASK		0x0000007f
261 #define KNL_PCU_PMON_CTL_USE_OCC_CTR		(1 << 7)
262 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK		0x3f000000
263 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK	\
264 				(KNL_PCU_PMON_CTL_EV_SEL_MASK | \
265 				 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
266 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
267 				 SNBEP_PMON_CTL_EDGE_DET | \
268 				 SNBEP_CBO_PMON_CTL_TID_EN | \
269 				 SNBEP_PMON_CTL_INVERT | \
270 				 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
271 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
272 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
273 
274 /* SKX pci bus to socket mapping */
275 #define SKX_CPUNODEID			0xc0
276 #define SKX_GIDNIDMAP			0xd4
277 
278 /*
279  * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR
280  * that BIOS programmed. MSR has package scope.
281  * |  Bit  |  Default  |  Description
282  * | [63]  |    00h    | VALID - When set, indicates the CPU bus
283  *                       numbers have been initialized. (RO)
284  * |[62:48]|    ---    | Reserved
285  * |[47:40]|    00h    | BUS_NUM_5 - Return the bus number BIOS assigned
286  *                       CPUBUSNO(5). (RO)
287  * |[39:32]|    00h    | BUS_NUM_4 - Return the bus number BIOS assigned
288  *                       CPUBUSNO(4). (RO)
289  * |[31:24]|    00h    | BUS_NUM_3 - Return the bus number BIOS assigned
290  *                       CPUBUSNO(3). (RO)
291  * |[23:16]|    00h    | BUS_NUM_2 - Return the bus number BIOS assigned
292  *                       CPUBUSNO(2). (RO)
293  * |[15:8] |    00h    | BUS_NUM_1 - Return the bus number BIOS assigned
294  *                       CPUBUSNO(1). (RO)
295  * | [7:0] |    00h    | BUS_NUM_0 - Return the bus number BIOS assigned
296  *                       CPUBUSNO(0). (RO)
297  */
298 #define SKX_MSR_CPU_BUS_NUMBER		0x300
299 #define SKX_MSR_CPU_BUS_VALID_BIT	(1ULL << 63)
300 #define BUS_NUM_STRIDE			8
301 
302 /* SKX CHA */
303 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID		(0x1ffULL << 0)
304 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 9)
305 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE	(0x3ffULL << 17)
306 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM		(0x1ULL << 32)
307 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC		(0x1ULL << 33)
308 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC	(0x1ULL << 35)
309 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM		(0x1ULL << 36)
310 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM	(0x1ULL << 37)
311 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0	(0x3ffULL << 41)
312 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1	(0x3ffULL << 51)
313 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
314 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
315 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
316 
317 /* SKX IIO */
318 #define SKX_IIO0_MSR_PMON_CTL0		0xa48
319 #define SKX_IIO0_MSR_PMON_CTR0		0xa41
320 #define SKX_IIO0_MSR_PMON_BOX_CTL	0xa40
321 #define SKX_IIO_MSR_OFFSET		0x20
322 
323 #define SKX_PMON_CTL_TRESH_MASK		(0xff << 24)
324 #define SKX_PMON_CTL_TRESH_MASK_EXT	(0xf)
325 #define SKX_PMON_CTL_CH_MASK		(0xff << 4)
326 #define SKX_PMON_CTL_FC_MASK		(0x7 << 12)
327 #define SKX_IIO_PMON_RAW_EVENT_MASK	(SNBEP_PMON_CTL_EV_SEL_MASK | \
328 					 SNBEP_PMON_CTL_UMASK_MASK | \
329 					 SNBEP_PMON_CTL_EDGE_DET | \
330 					 SNBEP_PMON_CTL_INVERT | \
331 					 SKX_PMON_CTL_TRESH_MASK)
332 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT	(SKX_PMON_CTL_TRESH_MASK_EXT | \
333 					 SKX_PMON_CTL_CH_MASK | \
334 					 SKX_PMON_CTL_FC_MASK)
335 
336 /* SKX IRP */
337 #define SKX_IRP0_MSR_PMON_CTL0		0xa5b
338 #define SKX_IRP0_MSR_PMON_CTR0		0xa59
339 #define SKX_IRP0_MSR_PMON_BOX_CTL	0xa58
340 #define SKX_IRP_MSR_OFFSET		0x20
341 
342 /* SKX UPI */
343 #define SKX_UPI_PCI_PMON_CTL0		0x350
344 #define SKX_UPI_PCI_PMON_CTR0		0x318
345 #define SKX_UPI_PCI_PMON_BOX_CTL	0x378
346 #define SKX_UPI_CTL_UMASK_EXT		0xffefff
347 
348 /* SKX M2M */
349 #define SKX_M2M_PCI_PMON_CTL0		0x228
350 #define SKX_M2M_PCI_PMON_CTR0		0x200
351 #define SKX_M2M_PCI_PMON_BOX_CTL	0x258
352 
353 /* Memory Map registers device ID */
354 #define SNR_ICX_MESH2IIO_MMAP_DID		0x9a2
355 #define SNR_ICX_SAD_CONTROL_CFG		0x3f4
356 
357 /* Getting I/O stack id in SAD_COTROL_CFG notation */
358 #define SAD_CONTROL_STACK_ID(data)		(((data) >> 4) & 0x7)
359 
360 /* SNR Ubox */
361 #define SNR_U_MSR_PMON_CTR0			0x1f98
362 #define SNR_U_MSR_PMON_CTL0			0x1f91
363 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL		0x1f93
364 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR		0x1f94
365 
366 /* SNR CHA */
367 #define SNR_CHA_RAW_EVENT_MASK_EXT		0x3ffffff
368 #define SNR_CHA_MSR_PMON_CTL0			0x1c01
369 #define SNR_CHA_MSR_PMON_CTR0			0x1c08
370 #define SNR_CHA_MSR_PMON_BOX_CTL		0x1c00
371 #define SNR_C0_MSR_PMON_BOX_FILTER0		0x1c05
372 
373 
374 /* SNR IIO */
375 #define SNR_IIO_MSR_PMON_CTL0			0x1e08
376 #define SNR_IIO_MSR_PMON_CTR0			0x1e01
377 #define SNR_IIO_MSR_PMON_BOX_CTL		0x1e00
378 #define SNR_IIO_MSR_OFFSET			0x10
379 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT		0x7ffff
380 
381 /* SNR IRP */
382 #define SNR_IRP0_MSR_PMON_CTL0			0x1ea8
383 #define SNR_IRP0_MSR_PMON_CTR0			0x1ea1
384 #define SNR_IRP0_MSR_PMON_BOX_CTL		0x1ea0
385 #define SNR_IRP_MSR_OFFSET			0x10
386 
387 /* SNR M2PCIE */
388 #define SNR_M2PCIE_MSR_PMON_CTL0		0x1e58
389 #define SNR_M2PCIE_MSR_PMON_CTR0		0x1e51
390 #define SNR_M2PCIE_MSR_PMON_BOX_CTL		0x1e50
391 #define SNR_M2PCIE_MSR_OFFSET			0x10
392 
393 /* SNR PCU */
394 #define SNR_PCU_MSR_PMON_CTL0			0x1ef1
395 #define SNR_PCU_MSR_PMON_CTR0			0x1ef8
396 #define SNR_PCU_MSR_PMON_BOX_CTL		0x1ef0
397 #define SNR_PCU_MSR_PMON_BOX_FILTER		0x1efc
398 
399 /* SNR M2M */
400 #define SNR_M2M_PCI_PMON_CTL0			0x468
401 #define SNR_M2M_PCI_PMON_CTR0			0x440
402 #define SNR_M2M_PCI_PMON_BOX_CTL		0x438
403 #define SNR_M2M_PCI_PMON_UMASK_EXT		0xff
404 
405 /* SNR PCIE3 */
406 #define SNR_PCIE3_PCI_PMON_CTL0			0x508
407 #define SNR_PCIE3_PCI_PMON_CTR0			0x4e8
408 #define SNR_PCIE3_PCI_PMON_BOX_CTL		0x4e0
409 
410 /* SNR IMC */
411 #define SNR_IMC_MMIO_PMON_FIXED_CTL		0x54
412 #define SNR_IMC_MMIO_PMON_FIXED_CTR		0x38
413 #define SNR_IMC_MMIO_PMON_CTL0			0x40
414 #define SNR_IMC_MMIO_PMON_CTR0			0x8
415 #define SNR_IMC_MMIO_PMON_BOX_CTL		0x22800
416 #define SNR_IMC_MMIO_OFFSET			0x4000
417 #define SNR_IMC_MMIO_SIZE			0x4000
418 #define SNR_IMC_MMIO_BASE_OFFSET		0xd0
419 #define SNR_IMC_MMIO_BASE_MASK			0x1FFFFFFF
420 #define SNR_IMC_MMIO_MEM0_OFFSET		0xd8
421 #define SNR_IMC_MMIO_MEM0_MASK			0x7FF
422 
423 /* ICX CHA */
424 #define ICX_C34_MSR_PMON_CTR0			0xb68
425 #define ICX_C34_MSR_PMON_CTL0			0xb61
426 #define ICX_C34_MSR_PMON_BOX_CTL		0xb60
427 #define ICX_C34_MSR_PMON_BOX_FILTER0		0xb65
428 
429 /* ICX IIO */
430 #define ICX_IIO_MSR_PMON_CTL0			0xa58
431 #define ICX_IIO_MSR_PMON_CTR0			0xa51
432 #define ICX_IIO_MSR_PMON_BOX_CTL		0xa50
433 
434 /* ICX IRP */
435 #define ICX_IRP0_MSR_PMON_CTL0			0xa4d
436 #define ICX_IRP0_MSR_PMON_CTR0			0xa4b
437 #define ICX_IRP0_MSR_PMON_BOX_CTL		0xa4a
438 
439 /* ICX M2PCIE */
440 #define ICX_M2PCIE_MSR_PMON_CTL0		0xa46
441 #define ICX_M2PCIE_MSR_PMON_CTR0		0xa41
442 #define ICX_M2PCIE_MSR_PMON_BOX_CTL		0xa40
443 
444 /* ICX UPI */
445 #define ICX_UPI_PCI_PMON_CTL0			0x350
446 #define ICX_UPI_PCI_PMON_CTR0			0x320
447 #define ICX_UPI_PCI_PMON_BOX_CTL		0x318
448 #define ICX_UPI_CTL_UMASK_EXT			0xffffff
449 #define ICX_UBOX_DID				0x3450
450 
451 /* ICX M3UPI*/
452 #define ICX_M3UPI_PCI_PMON_CTL0			0xd8
453 #define ICX_M3UPI_PCI_PMON_CTR0			0xa8
454 #define ICX_M3UPI_PCI_PMON_BOX_CTL		0xa0
455 
456 /* ICX IMC */
457 #define ICX_NUMBER_IMC_CHN			3
458 #define ICX_IMC_MEM_STRIDE			0x4
459 
460 /* SPR */
461 #define SPR_RAW_EVENT_MASK_EXT			0xffffff
462 #define SPR_UBOX_DID				0x3250
463 
464 /* SPR CHA */
465 #define SPR_CHA_EVENT_MASK_EXT			0xffffffff
466 #define SPR_CHA_PMON_CTL_TID_EN			(1 << 16)
467 #define SPR_CHA_PMON_EVENT_MASK			(SNBEP_PMON_RAW_EVENT_MASK | \
468 						 SPR_CHA_PMON_CTL_TID_EN)
469 #define SPR_CHA_PMON_BOX_FILTER_TID		0x3ff
470 
471 #define SPR_C0_MSR_PMON_BOX_FILTER0		0x200e
472 
473 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
474 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
475 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
476 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
477 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
478 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
479 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
480 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
481 DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
482 DEFINE_UNCORE_FORMAT_ATTR(umask_ext5, umask, "config:8-15,32-63");
483 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
484 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
485 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
486 DEFINE_UNCORE_FORMAT_ATTR(tid_en2, tid_en, "config:16");
487 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
488 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
489 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
490 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
491 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
492 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
493 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
494 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
495 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
496 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
497 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
498 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
499 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
500 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
501 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
502 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
503 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
504 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
505 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
506 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
507 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
508 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
509 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
510 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
511 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
512 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
513 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
514 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
515 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
516 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
517 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
518 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
519 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
520 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
521 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
522 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
523 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
524 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
525 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
526 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
527 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
528 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
529 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
530 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
531 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
532 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
533 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
534 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
535 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
536 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
537 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
538 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
539 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
540 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
541 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
542 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
543 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
544 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
545 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
546 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
547 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
548 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
549 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
550 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
551 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
552 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
553 
snbep_uncore_pci_disable_box(struct intel_uncore_box * box)554 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
555 {
556 	struct pci_dev *pdev = box->pci_dev;
557 	int box_ctl = uncore_pci_box_ctl(box);
558 	u32 config = 0;
559 
560 	if (!pci_read_config_dword(pdev, box_ctl, &config)) {
561 		config |= SNBEP_PMON_BOX_CTL_FRZ;
562 		pci_write_config_dword(pdev, box_ctl, config);
563 	}
564 }
565 
snbep_uncore_pci_enable_box(struct intel_uncore_box * box)566 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
567 {
568 	struct pci_dev *pdev = box->pci_dev;
569 	int box_ctl = uncore_pci_box_ctl(box);
570 	u32 config = 0;
571 
572 	if (!pci_read_config_dword(pdev, box_ctl, &config)) {
573 		config &= ~SNBEP_PMON_BOX_CTL_FRZ;
574 		pci_write_config_dword(pdev, box_ctl, config);
575 	}
576 }
577 
snbep_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)578 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
579 {
580 	struct pci_dev *pdev = box->pci_dev;
581 	struct hw_perf_event *hwc = &event->hw;
582 
583 	pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
584 }
585 
snbep_uncore_pci_disable_event(struct intel_uncore_box * box,struct perf_event * event)586 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
587 {
588 	struct pci_dev *pdev = box->pci_dev;
589 	struct hw_perf_event *hwc = &event->hw;
590 
591 	pci_write_config_dword(pdev, hwc->config_base, hwc->config);
592 }
593 
snbep_uncore_pci_read_counter(struct intel_uncore_box * box,struct perf_event * event)594 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
595 {
596 	struct pci_dev *pdev = box->pci_dev;
597 	struct hw_perf_event *hwc = &event->hw;
598 	u64 count = 0;
599 
600 	pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
601 	pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
602 
603 	return count;
604 }
605 
snbep_uncore_pci_init_box(struct intel_uncore_box * box)606 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
607 {
608 	struct pci_dev *pdev = box->pci_dev;
609 	int box_ctl = uncore_pci_box_ctl(box);
610 
611 	pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
612 }
613 
snbep_uncore_msr_disable_box(struct intel_uncore_box * box)614 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
615 {
616 	u64 config;
617 	unsigned msr;
618 
619 	msr = uncore_msr_box_ctl(box);
620 	if (msr) {
621 		rdmsrl(msr, config);
622 		config |= SNBEP_PMON_BOX_CTL_FRZ;
623 		wrmsrl(msr, config);
624 	}
625 }
626 
snbep_uncore_msr_enable_box(struct intel_uncore_box * box)627 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
628 {
629 	u64 config;
630 	unsigned msr;
631 
632 	msr = uncore_msr_box_ctl(box);
633 	if (msr) {
634 		rdmsrl(msr, config);
635 		config &= ~SNBEP_PMON_BOX_CTL_FRZ;
636 		wrmsrl(msr, config);
637 	}
638 }
639 
snbep_uncore_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)640 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
641 {
642 	struct hw_perf_event *hwc = &event->hw;
643 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
644 
645 	if (reg1->idx != EXTRA_REG_NONE)
646 		wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
647 
648 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
649 }
650 
snbep_uncore_msr_disable_event(struct intel_uncore_box * box,struct perf_event * event)651 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
652 					struct perf_event *event)
653 {
654 	struct hw_perf_event *hwc = &event->hw;
655 
656 	wrmsrl(hwc->config_base, hwc->config);
657 }
658 
snbep_uncore_msr_init_box(struct intel_uncore_box * box)659 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
660 {
661 	unsigned msr = uncore_msr_box_ctl(box);
662 
663 	if (msr)
664 		wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
665 }
666 
667 static struct attribute *snbep_uncore_formats_attr[] = {
668 	&format_attr_event.attr,
669 	&format_attr_umask.attr,
670 	&format_attr_edge.attr,
671 	&format_attr_inv.attr,
672 	&format_attr_thresh8.attr,
673 	NULL,
674 };
675 
676 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
677 	&format_attr_event.attr,
678 	&format_attr_umask.attr,
679 	&format_attr_edge.attr,
680 	&format_attr_inv.attr,
681 	&format_attr_thresh5.attr,
682 	NULL,
683 };
684 
685 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
686 	&format_attr_event.attr,
687 	&format_attr_umask.attr,
688 	&format_attr_edge.attr,
689 	&format_attr_tid_en.attr,
690 	&format_attr_inv.attr,
691 	&format_attr_thresh8.attr,
692 	&format_attr_filter_tid.attr,
693 	&format_attr_filter_nid.attr,
694 	&format_attr_filter_state.attr,
695 	&format_attr_filter_opc.attr,
696 	NULL,
697 };
698 
699 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
700 	&format_attr_event.attr,
701 	&format_attr_occ_sel.attr,
702 	&format_attr_edge.attr,
703 	&format_attr_inv.attr,
704 	&format_attr_thresh5.attr,
705 	&format_attr_occ_invert.attr,
706 	&format_attr_occ_edge.attr,
707 	&format_attr_filter_band0.attr,
708 	&format_attr_filter_band1.attr,
709 	&format_attr_filter_band2.attr,
710 	&format_attr_filter_band3.attr,
711 	NULL,
712 };
713 
714 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
715 	&format_attr_event_ext.attr,
716 	&format_attr_umask.attr,
717 	&format_attr_edge.attr,
718 	&format_attr_inv.attr,
719 	&format_attr_thresh8.attr,
720 	&format_attr_match_rds.attr,
721 	&format_attr_match_rnid30.attr,
722 	&format_attr_match_rnid4.attr,
723 	&format_attr_match_dnid.attr,
724 	&format_attr_match_mc.attr,
725 	&format_attr_match_opc.attr,
726 	&format_attr_match_vnw.attr,
727 	&format_attr_match0.attr,
728 	&format_attr_match1.attr,
729 	&format_attr_mask_rds.attr,
730 	&format_attr_mask_rnid30.attr,
731 	&format_attr_mask_rnid4.attr,
732 	&format_attr_mask_dnid.attr,
733 	&format_attr_mask_mc.attr,
734 	&format_attr_mask_opc.attr,
735 	&format_attr_mask_vnw.attr,
736 	&format_attr_mask0.attr,
737 	&format_attr_mask1.attr,
738 	NULL,
739 };
740 
741 static struct uncore_event_desc snbep_uncore_imc_events[] = {
742 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
743 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
744 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
745 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
746 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
747 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
748 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
749 	{ /* end: all zeroes */ },
750 };
751 
752 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
753 	INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
754 	INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
755 	INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
756 	INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
757 	{ /* end: all zeroes */ },
758 };
759 
760 static const struct attribute_group snbep_uncore_format_group = {
761 	.name = "format",
762 	.attrs = snbep_uncore_formats_attr,
763 };
764 
765 static const struct attribute_group snbep_uncore_ubox_format_group = {
766 	.name = "format",
767 	.attrs = snbep_uncore_ubox_formats_attr,
768 };
769 
770 static const struct attribute_group snbep_uncore_cbox_format_group = {
771 	.name = "format",
772 	.attrs = snbep_uncore_cbox_formats_attr,
773 };
774 
775 static const struct attribute_group snbep_uncore_pcu_format_group = {
776 	.name = "format",
777 	.attrs = snbep_uncore_pcu_formats_attr,
778 };
779 
780 static const struct attribute_group snbep_uncore_qpi_format_group = {
781 	.name = "format",
782 	.attrs = snbep_uncore_qpi_formats_attr,
783 };
784 
785 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
786 	.disable_box	= snbep_uncore_msr_disable_box,		\
787 	.enable_box	= snbep_uncore_msr_enable_box,		\
788 	.disable_event	= snbep_uncore_msr_disable_event,	\
789 	.enable_event	= snbep_uncore_msr_enable_event,	\
790 	.read_counter	= uncore_msr_read_counter
791 
792 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
793 	__SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),			\
794 	.init_box	= snbep_uncore_msr_init_box		\
795 
796 static struct intel_uncore_ops snbep_uncore_msr_ops = {
797 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
798 };
799 
800 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()			\
801 	.init_box	= snbep_uncore_pci_init_box,		\
802 	.disable_box	= snbep_uncore_pci_disable_box,		\
803 	.enable_box	= snbep_uncore_pci_enable_box,		\
804 	.disable_event	= snbep_uncore_pci_disable_event,	\
805 	.read_counter	= snbep_uncore_pci_read_counter
806 
807 static struct intel_uncore_ops snbep_uncore_pci_ops = {
808 	SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
809 	.enable_event	= snbep_uncore_pci_enable_event,	\
810 };
811 
812 static struct event_constraint snbep_uncore_cbox_constraints[] = {
813 	UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
814 	UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
815 	UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
816 	UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
817 	UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
818 	UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
819 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
820 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
821 	UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
822 	UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
823 	UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
824 	UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
825 	UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
826 	UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
827 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
828 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
829 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
830 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
831 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
832 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
833 	UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
834 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
835 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
836 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
837 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
838 	UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
839 	EVENT_CONSTRAINT_END
840 };
841 
842 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
843 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
844 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
845 	UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
846 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
847 	UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
848 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
849 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
850 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
851 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
852 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
853 	EVENT_CONSTRAINT_END
854 };
855 
856 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
857 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
858 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
859 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
860 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
861 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
862 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
863 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
864 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
865 	UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
866 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
867 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
868 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
869 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
870 	UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
871 	UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
872 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
873 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
874 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
875 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
876 	UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
877 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
878 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
879 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
880 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
881 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
882 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
883 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
884 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
885 	EVENT_CONSTRAINT_END
886 };
887 
888 static struct intel_uncore_type snbep_uncore_ubox = {
889 	.name		= "ubox",
890 	.num_counters   = 2,
891 	.num_boxes	= 1,
892 	.perf_ctr_bits	= 44,
893 	.fixed_ctr_bits	= 48,
894 	.perf_ctr	= SNBEP_U_MSR_PMON_CTR0,
895 	.event_ctl	= SNBEP_U_MSR_PMON_CTL0,
896 	.event_mask	= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
897 	.fixed_ctr	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
898 	.fixed_ctl	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
899 	.ops		= &snbep_uncore_msr_ops,
900 	.format_group	= &snbep_uncore_ubox_format_group,
901 };
902 
903 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
904 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
905 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
906 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
907 	SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
908 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
909 	SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
910 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
911 	SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
912 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
913 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
914 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
915 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
916 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
917 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
918 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
919 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
920 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
921 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
922 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
923 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
924 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
925 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
926 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
927 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
928 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
929 	EVENT_EXTRA_END
930 };
931 
snbep_cbox_put_constraint(struct intel_uncore_box * box,struct perf_event * event)932 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
933 {
934 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
935 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
936 	int i;
937 
938 	if (uncore_box_is_fake(box))
939 		return;
940 
941 	for (i = 0; i < 5; i++) {
942 		if (reg1->alloc & (0x1 << i))
943 			atomic_sub(1 << (i * 6), &er->ref);
944 	}
945 	reg1->alloc = 0;
946 }
947 
948 static struct event_constraint *
__snbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event,u64 (* cbox_filter_mask)(int fields))949 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
950 			    u64 (*cbox_filter_mask)(int fields))
951 {
952 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
953 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
954 	int i, alloc = 0;
955 	unsigned long flags;
956 	u64 mask;
957 
958 	if (reg1->idx == EXTRA_REG_NONE)
959 		return NULL;
960 
961 	raw_spin_lock_irqsave(&er->lock, flags);
962 	for (i = 0; i < 5; i++) {
963 		if (!(reg1->idx & (0x1 << i)))
964 			continue;
965 		if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
966 			continue;
967 
968 		mask = cbox_filter_mask(0x1 << i);
969 		if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
970 		    !((reg1->config ^ er->config) & mask)) {
971 			atomic_add(1 << (i * 6), &er->ref);
972 			er->config &= ~mask;
973 			er->config |= reg1->config & mask;
974 			alloc |= (0x1 << i);
975 		} else {
976 			break;
977 		}
978 	}
979 	raw_spin_unlock_irqrestore(&er->lock, flags);
980 	if (i < 5)
981 		goto fail;
982 
983 	if (!uncore_box_is_fake(box))
984 		reg1->alloc |= alloc;
985 
986 	return NULL;
987 fail:
988 	for (; i >= 0; i--) {
989 		if (alloc & (0x1 << i))
990 			atomic_sub(1 << (i * 6), &er->ref);
991 	}
992 	return &uncore_constraint_empty;
993 }
994 
snbep_cbox_filter_mask(int fields)995 static u64 snbep_cbox_filter_mask(int fields)
996 {
997 	u64 mask = 0;
998 
999 	if (fields & 0x1)
1000 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1001 	if (fields & 0x2)
1002 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1003 	if (fields & 0x4)
1004 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1005 	if (fields & 0x8)
1006 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1007 
1008 	return mask;
1009 }
1010 
1011 static struct event_constraint *
snbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)1012 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1013 {
1014 	return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
1015 }
1016 
snbep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)1017 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1018 {
1019 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1020 	struct extra_reg *er;
1021 	int idx = 0;
1022 
1023 	for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
1024 		if (er->event != (event->hw.config & er->config_mask))
1025 			continue;
1026 		idx |= er->idx;
1027 	}
1028 
1029 	if (idx) {
1030 		reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1031 			SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1032 		reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
1033 		reg1->idx = idx;
1034 	}
1035 	return 0;
1036 }
1037 
1038 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
1039 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1040 	.hw_config		= snbep_cbox_hw_config,
1041 	.get_constraint		= snbep_cbox_get_constraint,
1042 	.put_constraint		= snbep_cbox_put_constraint,
1043 };
1044 
1045 static struct intel_uncore_type snbep_uncore_cbox = {
1046 	.name			= "cbox",
1047 	.num_counters		= 4,
1048 	.num_boxes		= 8,
1049 	.perf_ctr_bits		= 44,
1050 	.event_ctl		= SNBEP_C0_MSR_PMON_CTL0,
1051 	.perf_ctr		= SNBEP_C0_MSR_PMON_CTR0,
1052 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1053 	.box_ctl		= SNBEP_C0_MSR_PMON_BOX_CTL,
1054 	.msr_offset		= SNBEP_CBO_MSR_OFFSET,
1055 	.num_shared_regs	= 1,
1056 	.constraints		= snbep_uncore_cbox_constraints,
1057 	.ops			= &snbep_uncore_cbox_ops,
1058 	.format_group		= &snbep_uncore_cbox_format_group,
1059 };
1060 
snbep_pcu_alter_er(struct perf_event * event,int new_idx,bool modify)1061 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
1062 {
1063 	struct hw_perf_event *hwc = &event->hw;
1064 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1065 	u64 config = reg1->config;
1066 
1067 	if (new_idx > reg1->idx)
1068 		config <<= 8 * (new_idx - reg1->idx);
1069 	else
1070 		config >>= 8 * (reg1->idx - new_idx);
1071 
1072 	if (modify) {
1073 		hwc->config += new_idx - reg1->idx;
1074 		reg1->config = config;
1075 		reg1->idx = new_idx;
1076 	}
1077 	return config;
1078 }
1079 
1080 static struct event_constraint *
snbep_pcu_get_constraint(struct intel_uncore_box * box,struct perf_event * event)1081 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1082 {
1083 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1084 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1085 	unsigned long flags;
1086 	int idx = reg1->idx;
1087 	u64 mask, config1 = reg1->config;
1088 	bool ok = false;
1089 
1090 	if (reg1->idx == EXTRA_REG_NONE ||
1091 	    (!uncore_box_is_fake(box) && reg1->alloc))
1092 		return NULL;
1093 again:
1094 	mask = 0xffULL << (idx * 8);
1095 	raw_spin_lock_irqsave(&er->lock, flags);
1096 	if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1097 	    !((config1 ^ er->config) & mask)) {
1098 		atomic_add(1 << (idx * 8), &er->ref);
1099 		er->config &= ~mask;
1100 		er->config |= config1 & mask;
1101 		ok = true;
1102 	}
1103 	raw_spin_unlock_irqrestore(&er->lock, flags);
1104 
1105 	if (!ok) {
1106 		idx = (idx + 1) % 4;
1107 		if (idx != reg1->idx) {
1108 			config1 = snbep_pcu_alter_er(event, idx, false);
1109 			goto again;
1110 		}
1111 		return &uncore_constraint_empty;
1112 	}
1113 
1114 	if (!uncore_box_is_fake(box)) {
1115 		if (idx != reg1->idx)
1116 			snbep_pcu_alter_er(event, idx, true);
1117 		reg1->alloc = 1;
1118 	}
1119 	return NULL;
1120 }
1121 
snbep_pcu_put_constraint(struct intel_uncore_box * box,struct perf_event * event)1122 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1123 {
1124 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1125 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1126 
1127 	if (uncore_box_is_fake(box) || !reg1->alloc)
1128 		return;
1129 
1130 	atomic_sub(1 << (reg1->idx * 8), &er->ref);
1131 	reg1->alloc = 0;
1132 }
1133 
snbep_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)1134 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1135 {
1136 	struct hw_perf_event *hwc = &event->hw;
1137 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1138 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1139 
1140 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
1141 		reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1142 		reg1->idx = ev_sel - 0xb;
1143 		reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1144 	}
1145 	return 0;
1146 }
1147 
1148 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1149 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1150 	.hw_config		= snbep_pcu_hw_config,
1151 	.get_constraint		= snbep_pcu_get_constraint,
1152 	.put_constraint		= snbep_pcu_put_constraint,
1153 };
1154 
1155 static struct intel_uncore_type snbep_uncore_pcu = {
1156 	.name			= "pcu",
1157 	.num_counters		= 4,
1158 	.num_boxes		= 1,
1159 	.perf_ctr_bits		= 48,
1160 	.perf_ctr		= SNBEP_PCU_MSR_PMON_CTR0,
1161 	.event_ctl		= SNBEP_PCU_MSR_PMON_CTL0,
1162 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1163 	.box_ctl		= SNBEP_PCU_MSR_PMON_BOX_CTL,
1164 	.num_shared_regs	= 1,
1165 	.ops			= &snbep_uncore_pcu_ops,
1166 	.format_group		= &snbep_uncore_pcu_format_group,
1167 };
1168 
1169 static struct intel_uncore_type *snbep_msr_uncores[] = {
1170 	&snbep_uncore_ubox,
1171 	&snbep_uncore_cbox,
1172 	&snbep_uncore_pcu,
1173 	NULL,
1174 };
1175 
snbep_uncore_cpu_init(void)1176 void snbep_uncore_cpu_init(void)
1177 {
1178 	if (snbep_uncore_cbox.num_boxes > topology_num_cores_per_package())
1179 		snbep_uncore_cbox.num_boxes = topology_num_cores_per_package();
1180 	uncore_msr_uncores = snbep_msr_uncores;
1181 }
1182 
1183 enum {
1184 	SNBEP_PCI_QPI_PORT0_FILTER,
1185 	SNBEP_PCI_QPI_PORT1_FILTER,
1186 	BDX_PCI_QPI_PORT2_FILTER,
1187 };
1188 
snbep_qpi_hw_config(struct intel_uncore_box * box,struct perf_event * event)1189 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1190 {
1191 	struct hw_perf_event *hwc = &event->hw;
1192 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1193 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1194 
1195 	if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1196 		reg1->idx = 0;
1197 		reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1198 		reg1->config = event->attr.config1;
1199 		reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1200 		reg2->config = event->attr.config2;
1201 	}
1202 	return 0;
1203 }
1204 
snbep_qpi_enable_event(struct intel_uncore_box * box,struct perf_event * event)1205 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1206 {
1207 	struct pci_dev *pdev = box->pci_dev;
1208 	struct hw_perf_event *hwc = &event->hw;
1209 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1210 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1211 
1212 	if (reg1->idx != EXTRA_REG_NONE) {
1213 		int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1214 		int die = box->dieid;
1215 		struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1216 
1217 		if (filter_pdev) {
1218 			pci_write_config_dword(filter_pdev, reg1->reg,
1219 						(u32)reg1->config);
1220 			pci_write_config_dword(filter_pdev, reg1->reg + 4,
1221 						(u32)(reg1->config >> 32));
1222 			pci_write_config_dword(filter_pdev, reg2->reg,
1223 						(u32)reg2->config);
1224 			pci_write_config_dword(filter_pdev, reg2->reg + 4,
1225 						(u32)(reg2->config >> 32));
1226 		}
1227 	}
1228 
1229 	pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1230 }
1231 
1232 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1233 	SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1234 	.enable_event		= snbep_qpi_enable_event,
1235 	.hw_config		= snbep_qpi_hw_config,
1236 	.get_constraint		= uncore_get_constraint,
1237 	.put_constraint		= uncore_put_constraint,
1238 };
1239 
1240 #define SNBEP_UNCORE_PCI_COMMON_INIT()				\
1241 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,			\
1242 	.event_ctl	= SNBEP_PCI_PMON_CTL0,			\
1243 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,		\
1244 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,		\
1245 	.ops		= &snbep_uncore_pci_ops,		\
1246 	.format_group	= &snbep_uncore_format_group
1247 
1248 static struct intel_uncore_type snbep_uncore_ha = {
1249 	.name		= "ha",
1250 	.num_counters   = 4,
1251 	.num_boxes	= 1,
1252 	.perf_ctr_bits	= 48,
1253 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1254 };
1255 
1256 static struct intel_uncore_type snbep_uncore_imc = {
1257 	.name		= "imc",
1258 	.num_counters   = 4,
1259 	.num_boxes	= 4,
1260 	.perf_ctr_bits	= 48,
1261 	.fixed_ctr_bits	= 48,
1262 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1263 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1264 	.event_descs	= snbep_uncore_imc_events,
1265 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1266 };
1267 
1268 static struct intel_uncore_type snbep_uncore_qpi = {
1269 	.name			= "qpi",
1270 	.num_counters		= 4,
1271 	.num_boxes		= 2,
1272 	.perf_ctr_bits		= 48,
1273 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
1274 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
1275 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1276 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1277 	.num_shared_regs	= 1,
1278 	.ops			= &snbep_uncore_qpi_ops,
1279 	.event_descs		= snbep_uncore_qpi_events,
1280 	.format_group		= &snbep_uncore_qpi_format_group,
1281 };
1282 
1283 
1284 static struct intel_uncore_type snbep_uncore_r2pcie = {
1285 	.name		= "r2pcie",
1286 	.num_counters   = 4,
1287 	.num_boxes	= 1,
1288 	.perf_ctr_bits	= 44,
1289 	.constraints	= snbep_uncore_r2pcie_constraints,
1290 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1291 };
1292 
1293 static struct intel_uncore_type snbep_uncore_r3qpi = {
1294 	.name		= "r3qpi",
1295 	.num_counters   = 3,
1296 	.num_boxes	= 2,
1297 	.perf_ctr_bits	= 44,
1298 	.constraints	= snbep_uncore_r3qpi_constraints,
1299 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1300 };
1301 
1302 enum {
1303 	SNBEP_PCI_UNCORE_HA,
1304 	SNBEP_PCI_UNCORE_IMC,
1305 	SNBEP_PCI_UNCORE_QPI,
1306 	SNBEP_PCI_UNCORE_R2PCIE,
1307 	SNBEP_PCI_UNCORE_R3QPI,
1308 };
1309 
1310 static struct intel_uncore_type *snbep_pci_uncores[] = {
1311 	[SNBEP_PCI_UNCORE_HA]		= &snbep_uncore_ha,
1312 	[SNBEP_PCI_UNCORE_IMC]		= &snbep_uncore_imc,
1313 	[SNBEP_PCI_UNCORE_QPI]		= &snbep_uncore_qpi,
1314 	[SNBEP_PCI_UNCORE_R2PCIE]	= &snbep_uncore_r2pcie,
1315 	[SNBEP_PCI_UNCORE_R3QPI]	= &snbep_uncore_r3qpi,
1316 	NULL,
1317 };
1318 
1319 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1320 	{ /* Home Agent */
1321 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1322 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1323 	},
1324 	{ /* MC Channel 0 */
1325 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1326 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1327 	},
1328 	{ /* MC Channel 1 */
1329 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1330 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1331 	},
1332 	{ /* MC Channel 2 */
1333 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1334 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1335 	},
1336 	{ /* MC Channel 3 */
1337 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1338 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1339 	},
1340 	{ /* QPI Port 0 */
1341 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1342 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1343 	},
1344 	{ /* QPI Port 1 */
1345 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1346 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1347 	},
1348 	{ /* R2PCIe */
1349 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1350 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1351 	},
1352 	{ /* R3QPI Link 0 */
1353 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1354 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1355 	},
1356 	{ /* R3QPI Link 1 */
1357 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1358 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1359 	},
1360 	{ /* QPI Port 0 filter  */
1361 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1362 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1363 						   SNBEP_PCI_QPI_PORT0_FILTER),
1364 	},
1365 	{ /* QPI Port 0 filter  */
1366 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1367 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1368 						   SNBEP_PCI_QPI_PORT1_FILTER),
1369 	},
1370 	{ /* end: all zeroes */ }
1371 };
1372 
1373 static struct pci_driver snbep_uncore_pci_driver = {
1374 	.name		= "snbep_uncore",
1375 	.id_table	= snbep_uncore_pci_ids,
1376 };
1377 
1378 #define NODE_ID_MASK	0x7
1379 
1380 /* Each three bits from 0 to 23 of GIDNIDMAP register correspond Node ID. */
1381 #define GIDNIDMAP(config, id)	(((config) >> (3 * (id))) & 0x7)
1382 
upi_nodeid_groupid(struct pci_dev * ubox_dev,int nodeid_loc,int idmap_loc,int * nodeid,int * groupid)1383 static int upi_nodeid_groupid(struct pci_dev *ubox_dev, int nodeid_loc, int idmap_loc,
1384 			      int *nodeid, int *groupid)
1385 {
1386 	int ret;
1387 
1388 	/* get the Node ID of the local register */
1389 	ret = pci_read_config_dword(ubox_dev, nodeid_loc, nodeid);
1390 	if (ret)
1391 		goto err;
1392 
1393 	*nodeid = *nodeid & NODE_ID_MASK;
1394 	/* get the Node ID mapping */
1395 	ret = pci_read_config_dword(ubox_dev, idmap_loc, groupid);
1396 	if (ret)
1397 		goto err;
1398 err:
1399 	return ret;
1400 }
1401 
topology_gidnid_map(int nodeid,u32 gidnid)1402 static int topology_gidnid_map(int nodeid, u32 gidnid)
1403 {
1404 	int i, die_id = -1;
1405 
1406 	/*
1407 	 * every three bits in the Node ID mapping register maps
1408 	 * to a particular node.
1409 	 */
1410 	for (i = 0; i < 8; i++) {
1411 		if (nodeid == GIDNIDMAP(gidnid, i)) {
1412 			if (topology_max_dies_per_package() > 1)
1413 				die_id = i;
1414 			else
1415 				die_id = topology_phys_to_logical_pkg(i);
1416 			if (die_id < 0)
1417 				die_id = -ENODEV;
1418 			break;
1419 		}
1420 	}
1421 
1422 	return die_id;
1423 }
1424 
1425 /*
1426  * build pci bus to socket mapping
1427  */
snbep_pci2phy_map_init(int devid,int nodeid_loc,int idmap_loc,bool reverse)1428 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1429 {
1430 	struct pci_dev *ubox_dev = NULL;
1431 	int i, bus, nodeid, segment, die_id;
1432 	struct pci2phy_map *map;
1433 	int err = 0;
1434 	u32 config = 0;
1435 
1436 	while (1) {
1437 		/* find the UBOX device */
1438 		ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1439 		if (!ubox_dev)
1440 			break;
1441 		bus = ubox_dev->bus->number;
1442 		/*
1443 		 * The nodeid and idmap registers only contain enough
1444 		 * information to handle 8 nodes.  On systems with more
1445 		 * than 8 nodes, we need to rely on NUMA information,
1446 		 * filled in from BIOS supplied information, to determine
1447 		 * the topology.
1448 		 */
1449 		if (nr_node_ids <= 8) {
1450 			err = upi_nodeid_groupid(ubox_dev, nodeid_loc, idmap_loc,
1451 						 &nodeid, &config);
1452 			if (err)
1453 				break;
1454 
1455 			segment = pci_domain_nr(ubox_dev->bus);
1456 			raw_spin_lock(&pci2phy_map_lock);
1457 			map = __find_pci2phy_map(segment);
1458 			if (!map) {
1459 				raw_spin_unlock(&pci2phy_map_lock);
1460 				err = -ENOMEM;
1461 				break;
1462 			}
1463 
1464 			map->pbus_to_dieid[bus] = topology_gidnid_map(nodeid, config);
1465 			raw_spin_unlock(&pci2phy_map_lock);
1466 		} else {
1467 			segment = pci_domain_nr(ubox_dev->bus);
1468 			raw_spin_lock(&pci2phy_map_lock);
1469 			map = __find_pci2phy_map(segment);
1470 			if (!map) {
1471 				raw_spin_unlock(&pci2phy_map_lock);
1472 				err = -ENOMEM;
1473 				break;
1474 			}
1475 
1476 			map->pbus_to_dieid[bus] = die_id = uncore_device_to_die(ubox_dev);
1477 
1478 			raw_spin_unlock(&pci2phy_map_lock);
1479 
1480 			if (WARN_ON_ONCE(die_id == -1)) {
1481 				err = -EINVAL;
1482 				break;
1483 			}
1484 		}
1485 	}
1486 
1487 	if (!err) {
1488 		/*
1489 		 * For PCI bus with no UBOX device, find the next bus
1490 		 * that has UBOX device and use its mapping.
1491 		 */
1492 		raw_spin_lock(&pci2phy_map_lock);
1493 		list_for_each_entry(map, &pci2phy_map_head, list) {
1494 			i = -1;
1495 			if (reverse) {
1496 				for (bus = 255; bus >= 0; bus--) {
1497 					if (map->pbus_to_dieid[bus] != -1)
1498 						i = map->pbus_to_dieid[bus];
1499 					else
1500 						map->pbus_to_dieid[bus] = i;
1501 				}
1502 			} else {
1503 				for (bus = 0; bus <= 255; bus++) {
1504 					if (map->pbus_to_dieid[bus] != -1)
1505 						i = map->pbus_to_dieid[bus];
1506 					else
1507 						map->pbus_to_dieid[bus] = i;
1508 				}
1509 			}
1510 		}
1511 		raw_spin_unlock(&pci2phy_map_lock);
1512 	}
1513 
1514 	pci_dev_put(ubox_dev);
1515 
1516 	return pcibios_err_to_errno(err);
1517 }
1518 
snbep_uncore_pci_init(void)1519 int snbep_uncore_pci_init(void)
1520 {
1521 	int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1522 	if (ret)
1523 		return ret;
1524 	uncore_pci_uncores = snbep_pci_uncores;
1525 	uncore_pci_driver = &snbep_uncore_pci_driver;
1526 	return 0;
1527 }
1528 /* end of Sandy Bridge-EP uncore support */
1529 
1530 /* IvyTown uncore support */
ivbep_uncore_msr_init_box(struct intel_uncore_box * box)1531 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1532 {
1533 	unsigned msr = uncore_msr_box_ctl(box);
1534 	if (msr)
1535 		wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1536 }
1537 
ivbep_uncore_pci_init_box(struct intel_uncore_box * box)1538 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1539 {
1540 	struct pci_dev *pdev = box->pci_dev;
1541 
1542 	pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1543 }
1544 
1545 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
1546 	.init_box	= ivbep_uncore_msr_init_box,		\
1547 	.disable_box	= snbep_uncore_msr_disable_box,		\
1548 	.enable_box	= snbep_uncore_msr_enable_box,		\
1549 	.disable_event	= snbep_uncore_msr_disable_event,	\
1550 	.enable_event	= snbep_uncore_msr_enable_event,	\
1551 	.read_counter	= uncore_msr_read_counter
1552 
1553 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1554 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1555 };
1556 
1557 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1558 	.init_box	= ivbep_uncore_pci_init_box,
1559 	.disable_box	= snbep_uncore_pci_disable_box,
1560 	.enable_box	= snbep_uncore_pci_enable_box,
1561 	.disable_event	= snbep_uncore_pci_disable_event,
1562 	.enable_event	= snbep_uncore_pci_enable_event,
1563 	.read_counter	= snbep_uncore_pci_read_counter,
1564 };
1565 
1566 #define IVBEP_UNCORE_PCI_COMMON_INIT()				\
1567 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,			\
1568 	.event_ctl	= SNBEP_PCI_PMON_CTL0,			\
1569 	.event_mask	= IVBEP_PMON_RAW_EVENT_MASK,		\
1570 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,		\
1571 	.ops		= &ivbep_uncore_pci_ops,			\
1572 	.format_group	= &ivbep_uncore_format_group
1573 
1574 static struct attribute *ivbep_uncore_formats_attr[] = {
1575 	&format_attr_event.attr,
1576 	&format_attr_umask.attr,
1577 	&format_attr_edge.attr,
1578 	&format_attr_inv.attr,
1579 	&format_attr_thresh8.attr,
1580 	NULL,
1581 };
1582 
1583 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1584 	&format_attr_event.attr,
1585 	&format_attr_umask.attr,
1586 	&format_attr_edge.attr,
1587 	&format_attr_inv.attr,
1588 	&format_attr_thresh5.attr,
1589 	NULL,
1590 };
1591 
1592 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1593 	&format_attr_event.attr,
1594 	&format_attr_umask.attr,
1595 	&format_attr_edge.attr,
1596 	&format_attr_tid_en.attr,
1597 	&format_attr_thresh8.attr,
1598 	&format_attr_filter_tid.attr,
1599 	&format_attr_filter_link.attr,
1600 	&format_attr_filter_state2.attr,
1601 	&format_attr_filter_nid2.attr,
1602 	&format_attr_filter_opc2.attr,
1603 	&format_attr_filter_nc.attr,
1604 	&format_attr_filter_c6.attr,
1605 	&format_attr_filter_isoc.attr,
1606 	NULL,
1607 };
1608 
1609 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1610 	&format_attr_event.attr,
1611 	&format_attr_occ_sel.attr,
1612 	&format_attr_edge.attr,
1613 	&format_attr_thresh5.attr,
1614 	&format_attr_occ_invert.attr,
1615 	&format_attr_occ_edge.attr,
1616 	&format_attr_filter_band0.attr,
1617 	&format_attr_filter_band1.attr,
1618 	&format_attr_filter_band2.attr,
1619 	&format_attr_filter_band3.attr,
1620 	NULL,
1621 };
1622 
1623 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1624 	&format_attr_event_ext.attr,
1625 	&format_attr_umask.attr,
1626 	&format_attr_edge.attr,
1627 	&format_attr_thresh8.attr,
1628 	&format_attr_match_rds.attr,
1629 	&format_attr_match_rnid30.attr,
1630 	&format_attr_match_rnid4.attr,
1631 	&format_attr_match_dnid.attr,
1632 	&format_attr_match_mc.attr,
1633 	&format_attr_match_opc.attr,
1634 	&format_attr_match_vnw.attr,
1635 	&format_attr_match0.attr,
1636 	&format_attr_match1.attr,
1637 	&format_attr_mask_rds.attr,
1638 	&format_attr_mask_rnid30.attr,
1639 	&format_attr_mask_rnid4.attr,
1640 	&format_attr_mask_dnid.attr,
1641 	&format_attr_mask_mc.attr,
1642 	&format_attr_mask_opc.attr,
1643 	&format_attr_mask_vnw.attr,
1644 	&format_attr_mask0.attr,
1645 	&format_attr_mask1.attr,
1646 	NULL,
1647 };
1648 
1649 static const struct attribute_group ivbep_uncore_format_group = {
1650 	.name = "format",
1651 	.attrs = ivbep_uncore_formats_attr,
1652 };
1653 
1654 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1655 	.name = "format",
1656 	.attrs = ivbep_uncore_ubox_formats_attr,
1657 };
1658 
1659 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1660 	.name = "format",
1661 	.attrs = ivbep_uncore_cbox_formats_attr,
1662 };
1663 
1664 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1665 	.name = "format",
1666 	.attrs = ivbep_uncore_pcu_formats_attr,
1667 };
1668 
1669 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1670 	.name = "format",
1671 	.attrs = ivbep_uncore_qpi_formats_attr,
1672 };
1673 
1674 static struct intel_uncore_type ivbep_uncore_ubox = {
1675 	.name		= "ubox",
1676 	.num_counters   = 2,
1677 	.num_boxes	= 1,
1678 	.perf_ctr_bits	= 44,
1679 	.fixed_ctr_bits	= 48,
1680 	.perf_ctr	= SNBEP_U_MSR_PMON_CTR0,
1681 	.event_ctl	= SNBEP_U_MSR_PMON_CTL0,
1682 	.event_mask	= IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1683 	.fixed_ctr	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1684 	.fixed_ctl	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1685 	.ops		= &ivbep_uncore_msr_ops,
1686 	.format_group	= &ivbep_uncore_ubox_format_group,
1687 };
1688 
1689 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1690 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1691 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1692 	SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1693 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1694 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1695 	SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1696 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1697 	SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1698 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1699 	SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1700 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1701 	SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1702 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1703 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1704 	SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1705 	SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1706 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1707 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1708 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1709 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1710 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1711 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1712 	SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1713 	SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1714 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1715 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1716 	SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1717 	SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1718 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1719 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1720 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1721 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1722 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1723 	SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1724 	SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1725 	SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1726 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1727 	EVENT_EXTRA_END
1728 };
1729 
ivbep_cbox_filter_mask(int fields)1730 static u64 ivbep_cbox_filter_mask(int fields)
1731 {
1732 	u64 mask = 0;
1733 
1734 	if (fields & 0x1)
1735 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1736 	if (fields & 0x2)
1737 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1738 	if (fields & 0x4)
1739 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1740 	if (fields & 0x8)
1741 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1742 	if (fields & 0x10) {
1743 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1744 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1745 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1746 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1747 	}
1748 
1749 	return mask;
1750 }
1751 
1752 static struct event_constraint *
ivbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)1753 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1754 {
1755 	return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1756 }
1757 
ivbep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)1758 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1759 {
1760 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1761 	struct extra_reg *er;
1762 	int idx = 0;
1763 
1764 	for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1765 		if (er->event != (event->hw.config & er->config_mask))
1766 			continue;
1767 		idx |= er->idx;
1768 	}
1769 
1770 	if (idx) {
1771 		reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1772 			SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1773 		reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1774 		reg1->idx = idx;
1775 	}
1776 	return 0;
1777 }
1778 
ivbep_cbox_enable_event(struct intel_uncore_box * box,struct perf_event * event)1779 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1780 {
1781 	struct hw_perf_event *hwc = &event->hw;
1782 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1783 
1784 	if (reg1->idx != EXTRA_REG_NONE) {
1785 		u64 filter = uncore_shared_reg_config(box, 0);
1786 		wrmsrl(reg1->reg, filter & 0xffffffff);
1787 		wrmsrl(reg1->reg + 6, filter >> 32);
1788 	}
1789 
1790 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1791 }
1792 
1793 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1794 	.init_box		= ivbep_uncore_msr_init_box,
1795 	.disable_box		= snbep_uncore_msr_disable_box,
1796 	.enable_box		= snbep_uncore_msr_enable_box,
1797 	.disable_event		= snbep_uncore_msr_disable_event,
1798 	.enable_event		= ivbep_cbox_enable_event,
1799 	.read_counter		= uncore_msr_read_counter,
1800 	.hw_config		= ivbep_cbox_hw_config,
1801 	.get_constraint		= ivbep_cbox_get_constraint,
1802 	.put_constraint		= snbep_cbox_put_constraint,
1803 };
1804 
1805 static struct intel_uncore_type ivbep_uncore_cbox = {
1806 	.name			= "cbox",
1807 	.num_counters		= 4,
1808 	.num_boxes		= 15,
1809 	.perf_ctr_bits		= 44,
1810 	.event_ctl		= SNBEP_C0_MSR_PMON_CTL0,
1811 	.perf_ctr		= SNBEP_C0_MSR_PMON_CTR0,
1812 	.event_mask		= IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1813 	.box_ctl		= SNBEP_C0_MSR_PMON_BOX_CTL,
1814 	.msr_offset		= SNBEP_CBO_MSR_OFFSET,
1815 	.num_shared_regs	= 1,
1816 	.constraints		= snbep_uncore_cbox_constraints,
1817 	.ops			= &ivbep_uncore_cbox_ops,
1818 	.format_group		= &ivbep_uncore_cbox_format_group,
1819 };
1820 
1821 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1822 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1823 	.hw_config		= snbep_pcu_hw_config,
1824 	.get_constraint		= snbep_pcu_get_constraint,
1825 	.put_constraint		= snbep_pcu_put_constraint,
1826 };
1827 
1828 static struct intel_uncore_type ivbep_uncore_pcu = {
1829 	.name			= "pcu",
1830 	.num_counters		= 4,
1831 	.num_boxes		= 1,
1832 	.perf_ctr_bits		= 48,
1833 	.perf_ctr		= SNBEP_PCU_MSR_PMON_CTR0,
1834 	.event_ctl		= SNBEP_PCU_MSR_PMON_CTL0,
1835 	.event_mask		= IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1836 	.box_ctl		= SNBEP_PCU_MSR_PMON_BOX_CTL,
1837 	.num_shared_regs	= 1,
1838 	.ops			= &ivbep_uncore_pcu_ops,
1839 	.format_group		= &ivbep_uncore_pcu_format_group,
1840 };
1841 
1842 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1843 	&ivbep_uncore_ubox,
1844 	&ivbep_uncore_cbox,
1845 	&ivbep_uncore_pcu,
1846 	NULL,
1847 };
1848 
ivbep_uncore_cpu_init(void)1849 void ivbep_uncore_cpu_init(void)
1850 {
1851 	if (ivbep_uncore_cbox.num_boxes > topology_num_cores_per_package())
1852 		ivbep_uncore_cbox.num_boxes = topology_num_cores_per_package();
1853 	uncore_msr_uncores = ivbep_msr_uncores;
1854 }
1855 
1856 static struct intel_uncore_type ivbep_uncore_ha = {
1857 	.name		= "ha",
1858 	.num_counters   = 4,
1859 	.num_boxes	= 2,
1860 	.perf_ctr_bits	= 48,
1861 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1862 };
1863 
1864 static struct intel_uncore_type ivbep_uncore_imc = {
1865 	.name		= "imc",
1866 	.num_counters   = 4,
1867 	.num_boxes	= 8,
1868 	.perf_ctr_bits	= 48,
1869 	.fixed_ctr_bits	= 48,
1870 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1871 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1872 	.event_descs	= snbep_uncore_imc_events,
1873 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1874 };
1875 
1876 /* registers in IRP boxes are not properly aligned */
1877 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1878 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1879 
ivbep_uncore_irp_enable_event(struct intel_uncore_box * box,struct perf_event * event)1880 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1881 {
1882 	struct pci_dev *pdev = box->pci_dev;
1883 	struct hw_perf_event *hwc = &event->hw;
1884 
1885 	pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1886 			       hwc->config | SNBEP_PMON_CTL_EN);
1887 }
1888 
ivbep_uncore_irp_disable_event(struct intel_uncore_box * box,struct perf_event * event)1889 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1890 {
1891 	struct pci_dev *pdev = box->pci_dev;
1892 	struct hw_perf_event *hwc = &event->hw;
1893 
1894 	pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1895 }
1896 
ivbep_uncore_irp_read_counter(struct intel_uncore_box * box,struct perf_event * event)1897 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1898 {
1899 	struct pci_dev *pdev = box->pci_dev;
1900 	struct hw_perf_event *hwc = &event->hw;
1901 	u64 count = 0;
1902 
1903 	pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1904 	pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1905 
1906 	return count;
1907 }
1908 
1909 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1910 	.init_box	= ivbep_uncore_pci_init_box,
1911 	.disable_box	= snbep_uncore_pci_disable_box,
1912 	.enable_box	= snbep_uncore_pci_enable_box,
1913 	.disable_event	= ivbep_uncore_irp_disable_event,
1914 	.enable_event	= ivbep_uncore_irp_enable_event,
1915 	.read_counter	= ivbep_uncore_irp_read_counter,
1916 };
1917 
1918 static struct intel_uncore_type ivbep_uncore_irp = {
1919 	.name			= "irp",
1920 	.num_counters		= 4,
1921 	.num_boxes		= 1,
1922 	.perf_ctr_bits		= 48,
1923 	.event_mask		= IVBEP_PMON_RAW_EVENT_MASK,
1924 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1925 	.ops			= &ivbep_uncore_irp_ops,
1926 	.format_group		= &ivbep_uncore_format_group,
1927 };
1928 
1929 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1930 	.init_box	= ivbep_uncore_pci_init_box,
1931 	.disable_box	= snbep_uncore_pci_disable_box,
1932 	.enable_box	= snbep_uncore_pci_enable_box,
1933 	.disable_event	= snbep_uncore_pci_disable_event,
1934 	.enable_event	= snbep_qpi_enable_event,
1935 	.read_counter	= snbep_uncore_pci_read_counter,
1936 	.hw_config	= snbep_qpi_hw_config,
1937 	.get_constraint	= uncore_get_constraint,
1938 	.put_constraint	= uncore_put_constraint,
1939 };
1940 
1941 static struct intel_uncore_type ivbep_uncore_qpi = {
1942 	.name			= "qpi",
1943 	.num_counters		= 4,
1944 	.num_boxes		= 3,
1945 	.perf_ctr_bits		= 48,
1946 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
1947 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
1948 	.event_mask		= IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1949 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1950 	.num_shared_regs	= 1,
1951 	.ops			= &ivbep_uncore_qpi_ops,
1952 	.format_group		= &ivbep_uncore_qpi_format_group,
1953 };
1954 
1955 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1956 	.name		= "r2pcie",
1957 	.num_counters   = 4,
1958 	.num_boxes	= 1,
1959 	.perf_ctr_bits	= 44,
1960 	.constraints	= snbep_uncore_r2pcie_constraints,
1961 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1962 };
1963 
1964 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1965 	.name		= "r3qpi",
1966 	.num_counters   = 3,
1967 	.num_boxes	= 2,
1968 	.perf_ctr_bits	= 44,
1969 	.constraints	= snbep_uncore_r3qpi_constraints,
1970 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1971 };
1972 
1973 enum {
1974 	IVBEP_PCI_UNCORE_HA,
1975 	IVBEP_PCI_UNCORE_IMC,
1976 	IVBEP_PCI_UNCORE_IRP,
1977 	IVBEP_PCI_UNCORE_QPI,
1978 	IVBEP_PCI_UNCORE_R2PCIE,
1979 	IVBEP_PCI_UNCORE_R3QPI,
1980 };
1981 
1982 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1983 	[IVBEP_PCI_UNCORE_HA]	= &ivbep_uncore_ha,
1984 	[IVBEP_PCI_UNCORE_IMC]	= &ivbep_uncore_imc,
1985 	[IVBEP_PCI_UNCORE_IRP]	= &ivbep_uncore_irp,
1986 	[IVBEP_PCI_UNCORE_QPI]	= &ivbep_uncore_qpi,
1987 	[IVBEP_PCI_UNCORE_R2PCIE]	= &ivbep_uncore_r2pcie,
1988 	[IVBEP_PCI_UNCORE_R3QPI]	= &ivbep_uncore_r3qpi,
1989 	NULL,
1990 };
1991 
1992 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1993 	{ /* Home Agent 0 */
1994 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1995 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1996 	},
1997 	{ /* Home Agent 1 */
1998 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1999 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
2000 	},
2001 	{ /* MC0 Channel 0 */
2002 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
2003 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
2004 	},
2005 	{ /* MC0 Channel 1 */
2006 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
2007 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
2008 	},
2009 	{ /* MC0 Channel 3 */
2010 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
2011 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
2012 	},
2013 	{ /* MC0 Channel 4 */
2014 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
2015 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
2016 	},
2017 	{ /* MC1 Channel 0 */
2018 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
2019 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
2020 	},
2021 	{ /* MC1 Channel 1 */
2022 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
2023 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
2024 	},
2025 	{ /* MC1 Channel 3 */
2026 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
2027 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
2028 	},
2029 	{ /* MC1 Channel 4 */
2030 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
2031 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
2032 	},
2033 	{ /* IRP */
2034 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
2035 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
2036 	},
2037 	{ /* QPI0 Port 0 */
2038 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
2039 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
2040 	},
2041 	{ /* QPI0 Port 1 */
2042 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
2043 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
2044 	},
2045 	{ /* QPI1 Port 2 */
2046 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
2047 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
2048 	},
2049 	{ /* R2PCIe */
2050 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
2051 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
2052 	},
2053 	{ /* R3QPI0 Link 0 */
2054 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
2055 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
2056 	},
2057 	{ /* R3QPI0 Link 1 */
2058 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
2059 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
2060 	},
2061 	{ /* R3QPI1 Link 2 */
2062 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
2063 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
2064 	},
2065 	{ /* QPI Port 0 filter  */
2066 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
2067 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2068 						   SNBEP_PCI_QPI_PORT0_FILTER),
2069 	},
2070 	{ /* QPI Port 0 filter  */
2071 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
2072 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2073 						   SNBEP_PCI_QPI_PORT1_FILTER),
2074 	},
2075 	{ /* end: all zeroes */ }
2076 };
2077 
2078 static struct pci_driver ivbep_uncore_pci_driver = {
2079 	.name		= "ivbep_uncore",
2080 	.id_table	= ivbep_uncore_pci_ids,
2081 };
2082 
ivbep_uncore_pci_init(void)2083 int ivbep_uncore_pci_init(void)
2084 {
2085 	int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2086 	if (ret)
2087 		return ret;
2088 	uncore_pci_uncores = ivbep_pci_uncores;
2089 	uncore_pci_driver = &ivbep_uncore_pci_driver;
2090 	return 0;
2091 }
2092 /* end of IvyTown uncore support */
2093 
2094 /* KNL uncore support */
2095 static struct attribute *knl_uncore_ubox_formats_attr[] = {
2096 	&format_attr_event.attr,
2097 	&format_attr_umask.attr,
2098 	&format_attr_edge.attr,
2099 	&format_attr_tid_en.attr,
2100 	&format_attr_inv.attr,
2101 	&format_attr_thresh5.attr,
2102 	NULL,
2103 };
2104 
2105 static const struct attribute_group knl_uncore_ubox_format_group = {
2106 	.name = "format",
2107 	.attrs = knl_uncore_ubox_formats_attr,
2108 };
2109 
2110 static struct intel_uncore_type knl_uncore_ubox = {
2111 	.name			= "ubox",
2112 	.num_counters		= 2,
2113 	.num_boxes		= 1,
2114 	.perf_ctr_bits		= 48,
2115 	.fixed_ctr_bits		= 48,
2116 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
2117 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
2118 	.event_mask		= KNL_U_MSR_PMON_RAW_EVENT_MASK,
2119 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2120 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2121 	.ops			= &snbep_uncore_msr_ops,
2122 	.format_group		= &knl_uncore_ubox_format_group,
2123 };
2124 
2125 static struct attribute *knl_uncore_cha_formats_attr[] = {
2126 	&format_attr_event.attr,
2127 	&format_attr_umask.attr,
2128 	&format_attr_qor.attr,
2129 	&format_attr_edge.attr,
2130 	&format_attr_tid_en.attr,
2131 	&format_attr_inv.attr,
2132 	&format_attr_thresh8.attr,
2133 	&format_attr_filter_tid4.attr,
2134 	&format_attr_filter_link3.attr,
2135 	&format_attr_filter_state4.attr,
2136 	&format_attr_filter_local.attr,
2137 	&format_attr_filter_all_op.attr,
2138 	&format_attr_filter_nnm.attr,
2139 	&format_attr_filter_opc3.attr,
2140 	&format_attr_filter_nc.attr,
2141 	&format_attr_filter_isoc.attr,
2142 	NULL,
2143 };
2144 
2145 static const struct attribute_group knl_uncore_cha_format_group = {
2146 	.name = "format",
2147 	.attrs = knl_uncore_cha_formats_attr,
2148 };
2149 
2150 static struct event_constraint knl_uncore_cha_constraints[] = {
2151 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2152 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2153 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2154 	EVENT_CONSTRAINT_END
2155 };
2156 
2157 static struct extra_reg knl_uncore_cha_extra_regs[] = {
2158 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2159 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2160 	SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2161 	SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2162 	SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2163 	EVENT_EXTRA_END
2164 };
2165 
knl_cha_filter_mask(int fields)2166 static u64 knl_cha_filter_mask(int fields)
2167 {
2168 	u64 mask = 0;
2169 
2170 	if (fields & 0x1)
2171 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2172 	if (fields & 0x2)
2173 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2174 	if (fields & 0x4)
2175 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2176 	return mask;
2177 }
2178 
2179 static struct event_constraint *
knl_cha_get_constraint(struct intel_uncore_box * box,struct perf_event * event)2180 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2181 {
2182 	return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2183 }
2184 
knl_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)2185 static int knl_cha_hw_config(struct intel_uncore_box *box,
2186 			     struct perf_event *event)
2187 {
2188 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2189 	struct extra_reg *er;
2190 	int idx = 0;
2191 
2192 	for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2193 		if (er->event != (event->hw.config & er->config_mask))
2194 			continue;
2195 		idx |= er->idx;
2196 	}
2197 
2198 	if (idx) {
2199 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2200 			    KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2201 		reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2202 
2203 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2204 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2205 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2206 		reg1->idx = idx;
2207 	}
2208 	return 0;
2209 }
2210 
2211 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2212 				    struct perf_event *event);
2213 
2214 static struct intel_uncore_ops knl_uncore_cha_ops = {
2215 	.init_box		= snbep_uncore_msr_init_box,
2216 	.disable_box		= snbep_uncore_msr_disable_box,
2217 	.enable_box		= snbep_uncore_msr_enable_box,
2218 	.disable_event		= snbep_uncore_msr_disable_event,
2219 	.enable_event		= hswep_cbox_enable_event,
2220 	.read_counter		= uncore_msr_read_counter,
2221 	.hw_config		= knl_cha_hw_config,
2222 	.get_constraint		= knl_cha_get_constraint,
2223 	.put_constraint		= snbep_cbox_put_constraint,
2224 };
2225 
2226 static struct intel_uncore_type knl_uncore_cha = {
2227 	.name			= "cha",
2228 	.num_counters		= 4,
2229 	.num_boxes		= 38,
2230 	.perf_ctr_bits		= 48,
2231 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
2232 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
2233 	.event_mask		= KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2234 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
2235 	.msr_offset		= KNL_CHA_MSR_OFFSET,
2236 	.num_shared_regs	= 1,
2237 	.constraints		= knl_uncore_cha_constraints,
2238 	.ops			= &knl_uncore_cha_ops,
2239 	.format_group		= &knl_uncore_cha_format_group,
2240 };
2241 
2242 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2243 	&format_attr_event2.attr,
2244 	&format_attr_use_occ_ctr.attr,
2245 	&format_attr_occ_sel.attr,
2246 	&format_attr_edge.attr,
2247 	&format_attr_tid_en.attr,
2248 	&format_attr_inv.attr,
2249 	&format_attr_thresh6.attr,
2250 	&format_attr_occ_invert.attr,
2251 	&format_attr_occ_edge_det.attr,
2252 	NULL,
2253 };
2254 
2255 static const struct attribute_group knl_uncore_pcu_format_group = {
2256 	.name = "format",
2257 	.attrs = knl_uncore_pcu_formats_attr,
2258 };
2259 
2260 static struct intel_uncore_type knl_uncore_pcu = {
2261 	.name			= "pcu",
2262 	.num_counters		= 4,
2263 	.num_boxes		= 1,
2264 	.perf_ctr_bits		= 48,
2265 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
2266 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
2267 	.event_mask		= KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2268 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
2269 	.ops			= &snbep_uncore_msr_ops,
2270 	.format_group		= &knl_uncore_pcu_format_group,
2271 };
2272 
2273 static struct intel_uncore_type *knl_msr_uncores[] = {
2274 	&knl_uncore_ubox,
2275 	&knl_uncore_cha,
2276 	&knl_uncore_pcu,
2277 	NULL,
2278 };
2279 
knl_uncore_cpu_init(void)2280 void knl_uncore_cpu_init(void)
2281 {
2282 	uncore_msr_uncores = knl_msr_uncores;
2283 }
2284 
knl_uncore_imc_enable_box(struct intel_uncore_box * box)2285 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2286 {
2287 	struct pci_dev *pdev = box->pci_dev;
2288 	int box_ctl = uncore_pci_box_ctl(box);
2289 
2290 	pci_write_config_dword(pdev, box_ctl, 0);
2291 }
2292 
knl_uncore_imc_enable_event(struct intel_uncore_box * box,struct perf_event * event)2293 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2294 					struct perf_event *event)
2295 {
2296 	struct pci_dev *pdev = box->pci_dev;
2297 	struct hw_perf_event *hwc = &event->hw;
2298 
2299 	if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2300 							== UNCORE_FIXED_EVENT)
2301 		pci_write_config_dword(pdev, hwc->config_base,
2302 				       hwc->config | KNL_PMON_FIXED_CTL_EN);
2303 	else
2304 		pci_write_config_dword(pdev, hwc->config_base,
2305 				       hwc->config | SNBEP_PMON_CTL_EN);
2306 }
2307 
2308 static struct intel_uncore_ops knl_uncore_imc_ops = {
2309 	.init_box	= snbep_uncore_pci_init_box,
2310 	.disable_box	= snbep_uncore_pci_disable_box,
2311 	.enable_box	= knl_uncore_imc_enable_box,
2312 	.read_counter	= snbep_uncore_pci_read_counter,
2313 	.enable_event	= knl_uncore_imc_enable_event,
2314 	.disable_event	= snbep_uncore_pci_disable_event,
2315 };
2316 
2317 static struct intel_uncore_type knl_uncore_imc_uclk = {
2318 	.name			= "imc_uclk",
2319 	.num_counters		= 4,
2320 	.num_boxes		= 2,
2321 	.perf_ctr_bits		= 48,
2322 	.fixed_ctr_bits		= 48,
2323 	.perf_ctr		= KNL_UCLK_MSR_PMON_CTR0_LOW,
2324 	.event_ctl		= KNL_UCLK_MSR_PMON_CTL0,
2325 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2326 	.fixed_ctr		= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2327 	.fixed_ctl		= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2328 	.box_ctl		= KNL_UCLK_MSR_PMON_BOX_CTL,
2329 	.ops			= &knl_uncore_imc_ops,
2330 	.format_group		= &snbep_uncore_format_group,
2331 };
2332 
2333 static struct intel_uncore_type knl_uncore_imc_dclk = {
2334 	.name			= "imc",
2335 	.num_counters		= 4,
2336 	.num_boxes		= 6,
2337 	.perf_ctr_bits		= 48,
2338 	.fixed_ctr_bits		= 48,
2339 	.perf_ctr		= KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2340 	.event_ctl		= KNL_MC0_CH0_MSR_PMON_CTL0,
2341 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2342 	.fixed_ctr		= KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2343 	.fixed_ctl		= KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2344 	.box_ctl		= KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2345 	.ops			= &knl_uncore_imc_ops,
2346 	.format_group		= &snbep_uncore_format_group,
2347 };
2348 
2349 static struct intel_uncore_type knl_uncore_edc_uclk = {
2350 	.name			= "edc_uclk",
2351 	.num_counters		= 4,
2352 	.num_boxes		= 8,
2353 	.perf_ctr_bits		= 48,
2354 	.fixed_ctr_bits		= 48,
2355 	.perf_ctr		= KNL_UCLK_MSR_PMON_CTR0_LOW,
2356 	.event_ctl		= KNL_UCLK_MSR_PMON_CTL0,
2357 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2358 	.fixed_ctr		= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2359 	.fixed_ctl		= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2360 	.box_ctl		= KNL_UCLK_MSR_PMON_BOX_CTL,
2361 	.ops			= &knl_uncore_imc_ops,
2362 	.format_group		= &snbep_uncore_format_group,
2363 };
2364 
2365 static struct intel_uncore_type knl_uncore_edc_eclk = {
2366 	.name			= "edc_eclk",
2367 	.num_counters		= 4,
2368 	.num_boxes		= 8,
2369 	.perf_ctr_bits		= 48,
2370 	.fixed_ctr_bits		= 48,
2371 	.perf_ctr		= KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2372 	.event_ctl		= KNL_EDC0_ECLK_MSR_PMON_CTL0,
2373 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2374 	.fixed_ctr		= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2375 	.fixed_ctl		= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2376 	.box_ctl		= KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2377 	.ops			= &knl_uncore_imc_ops,
2378 	.format_group		= &snbep_uncore_format_group,
2379 };
2380 
2381 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2382 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2383 	EVENT_CONSTRAINT_END
2384 };
2385 
2386 static struct intel_uncore_type knl_uncore_m2pcie = {
2387 	.name		= "m2pcie",
2388 	.num_counters   = 4,
2389 	.num_boxes	= 1,
2390 	.perf_ctr_bits	= 48,
2391 	.constraints	= knl_uncore_m2pcie_constraints,
2392 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2393 };
2394 
2395 static struct attribute *knl_uncore_irp_formats_attr[] = {
2396 	&format_attr_event.attr,
2397 	&format_attr_umask.attr,
2398 	&format_attr_qor.attr,
2399 	&format_attr_edge.attr,
2400 	&format_attr_inv.attr,
2401 	&format_attr_thresh8.attr,
2402 	NULL,
2403 };
2404 
2405 static const struct attribute_group knl_uncore_irp_format_group = {
2406 	.name = "format",
2407 	.attrs = knl_uncore_irp_formats_attr,
2408 };
2409 
2410 static struct intel_uncore_type knl_uncore_irp = {
2411 	.name			= "irp",
2412 	.num_counters		= 2,
2413 	.num_boxes		= 1,
2414 	.perf_ctr_bits		= 48,
2415 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
2416 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
2417 	.event_mask		= KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2418 	.box_ctl		= KNL_IRP_PCI_PMON_BOX_CTL,
2419 	.ops			= &snbep_uncore_pci_ops,
2420 	.format_group		= &knl_uncore_irp_format_group,
2421 };
2422 
2423 enum {
2424 	KNL_PCI_UNCORE_MC_UCLK,
2425 	KNL_PCI_UNCORE_MC_DCLK,
2426 	KNL_PCI_UNCORE_EDC_UCLK,
2427 	KNL_PCI_UNCORE_EDC_ECLK,
2428 	KNL_PCI_UNCORE_M2PCIE,
2429 	KNL_PCI_UNCORE_IRP,
2430 };
2431 
2432 static struct intel_uncore_type *knl_pci_uncores[] = {
2433 	[KNL_PCI_UNCORE_MC_UCLK]	= &knl_uncore_imc_uclk,
2434 	[KNL_PCI_UNCORE_MC_DCLK]	= &knl_uncore_imc_dclk,
2435 	[KNL_PCI_UNCORE_EDC_UCLK]	= &knl_uncore_edc_uclk,
2436 	[KNL_PCI_UNCORE_EDC_ECLK]	= &knl_uncore_edc_eclk,
2437 	[KNL_PCI_UNCORE_M2PCIE]		= &knl_uncore_m2pcie,
2438 	[KNL_PCI_UNCORE_IRP]		= &knl_uncore_irp,
2439 	NULL,
2440 };
2441 
2442 /*
2443  * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2444  * device type. prior to KNL, each instance of a PMU device type had a unique
2445  * device ID.
2446  *
2447  *	PCI Device ID	Uncore PMU Devices
2448  *	----------------------------------
2449  *	0x7841		MC0 UClk, MC1 UClk
2450  *	0x7843		MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2451  *			MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2452  *	0x7833		EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2453  *			EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2454  *	0x7835		EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2455  *			EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2456  *	0x7817		M2PCIe
2457  *	0x7814		IRP
2458 */
2459 
2460 static const struct pci_device_id knl_uncore_pci_ids[] = {
2461 	{ /* MC0 UClk */
2462 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2463 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2464 	},
2465 	{ /* MC1 UClk */
2466 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2467 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2468 	},
2469 	{ /* MC0 DClk CH 0 */
2470 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2471 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2472 	},
2473 	{ /* MC0 DClk CH 1 */
2474 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2475 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2476 	},
2477 	{ /* MC0 DClk CH 2 */
2478 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2479 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2480 	},
2481 	{ /* MC1 DClk CH 0 */
2482 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2483 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2484 	},
2485 	{ /* MC1 DClk CH 1 */
2486 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2487 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2488 	},
2489 	{ /* MC1 DClk CH 2 */
2490 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2491 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2492 	},
2493 	{ /* EDC0 UClk */
2494 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2495 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2496 	},
2497 	{ /* EDC1 UClk */
2498 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2499 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2500 	},
2501 	{ /* EDC2 UClk */
2502 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2503 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2504 	},
2505 	{ /* EDC3 UClk */
2506 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2507 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2508 	},
2509 	{ /* EDC4 UClk */
2510 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2511 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2512 	},
2513 	{ /* EDC5 UClk */
2514 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2515 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2516 	},
2517 	{ /* EDC6 UClk */
2518 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2519 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2520 	},
2521 	{ /* EDC7 UClk */
2522 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2523 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2524 	},
2525 	{ /* EDC0 EClk */
2526 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2527 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2528 	},
2529 	{ /* EDC1 EClk */
2530 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2531 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2532 	},
2533 	{ /* EDC2 EClk */
2534 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2535 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2536 	},
2537 	{ /* EDC3 EClk */
2538 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2539 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2540 	},
2541 	{ /* EDC4 EClk */
2542 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2543 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2544 	},
2545 	{ /* EDC5 EClk */
2546 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2547 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2548 	},
2549 	{ /* EDC6 EClk */
2550 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2551 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2552 	},
2553 	{ /* EDC7 EClk */
2554 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2555 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2556 	},
2557 	{ /* M2PCIe */
2558 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2559 		.driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2560 	},
2561 	{ /* IRP */
2562 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2563 		.driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2564 	},
2565 	{ /* end: all zeroes */ }
2566 };
2567 
2568 static struct pci_driver knl_uncore_pci_driver = {
2569 	.name		= "knl_uncore",
2570 	.id_table	= knl_uncore_pci_ids,
2571 };
2572 
knl_uncore_pci_init(void)2573 int knl_uncore_pci_init(void)
2574 {
2575 	int ret;
2576 
2577 	/* All KNL PCI based PMON units are on the same PCI bus except IRP */
2578 	ret = snb_pci2phy_map_init(0x7814); /* IRP */
2579 	if (ret)
2580 		return ret;
2581 	ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2582 	if (ret)
2583 		return ret;
2584 	uncore_pci_uncores = knl_pci_uncores;
2585 	uncore_pci_driver = &knl_uncore_pci_driver;
2586 	return 0;
2587 }
2588 
2589 /* end of KNL uncore support */
2590 
2591 /* Haswell-EP uncore support */
2592 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2593 	&format_attr_event.attr,
2594 	&format_attr_umask.attr,
2595 	&format_attr_edge.attr,
2596 	&format_attr_inv.attr,
2597 	&format_attr_thresh5.attr,
2598 	&format_attr_filter_tid2.attr,
2599 	&format_attr_filter_cid.attr,
2600 	NULL,
2601 };
2602 
2603 static const struct attribute_group hswep_uncore_ubox_format_group = {
2604 	.name = "format",
2605 	.attrs = hswep_uncore_ubox_formats_attr,
2606 };
2607 
hswep_ubox_hw_config(struct intel_uncore_box * box,struct perf_event * event)2608 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2609 {
2610 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2611 	reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2612 	reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2613 	reg1->idx = 0;
2614 	return 0;
2615 }
2616 
2617 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2618 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2619 	.hw_config		= hswep_ubox_hw_config,
2620 	.get_constraint		= uncore_get_constraint,
2621 	.put_constraint		= uncore_put_constraint,
2622 };
2623 
2624 static struct intel_uncore_type hswep_uncore_ubox = {
2625 	.name			= "ubox",
2626 	.num_counters		= 2,
2627 	.num_boxes		= 1,
2628 	.perf_ctr_bits		= 44,
2629 	.fixed_ctr_bits		= 48,
2630 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
2631 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
2632 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2633 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2634 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2635 	.num_shared_regs	= 1,
2636 	.ops			= &hswep_uncore_ubox_ops,
2637 	.format_group		= &hswep_uncore_ubox_format_group,
2638 };
2639 
2640 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2641 	&format_attr_event.attr,
2642 	&format_attr_umask.attr,
2643 	&format_attr_edge.attr,
2644 	&format_attr_tid_en.attr,
2645 	&format_attr_thresh8.attr,
2646 	&format_attr_filter_tid3.attr,
2647 	&format_attr_filter_link2.attr,
2648 	&format_attr_filter_state3.attr,
2649 	&format_attr_filter_nid2.attr,
2650 	&format_attr_filter_opc2.attr,
2651 	&format_attr_filter_nc.attr,
2652 	&format_attr_filter_c6.attr,
2653 	&format_attr_filter_isoc.attr,
2654 	NULL,
2655 };
2656 
2657 static const struct attribute_group hswep_uncore_cbox_format_group = {
2658 	.name = "format",
2659 	.attrs = hswep_uncore_cbox_formats_attr,
2660 };
2661 
2662 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2663 	UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2664 	UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2665 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2666 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2667 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2668 	UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2669 	UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2670 	EVENT_CONSTRAINT_END
2671 };
2672 
2673 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2674 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2675 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2676 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2677 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2678 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2679 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2680 	SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2681 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2682 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2683 	SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2684 	SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2685 	SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2686 	SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2687 	SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2688 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2689 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2690 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2691 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2692 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2693 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2694 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2695 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2696 	SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2697 	SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2698 	SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2699 	SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2700 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2701 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2702 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2703 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2704 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2705 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2706 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2707 	SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2708 	SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2709 	SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2710 	SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2711 	SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2712 	EVENT_EXTRA_END
2713 };
2714 
hswep_cbox_filter_mask(int fields)2715 static u64 hswep_cbox_filter_mask(int fields)
2716 {
2717 	u64 mask = 0;
2718 	if (fields & 0x1)
2719 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2720 	if (fields & 0x2)
2721 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2722 	if (fields & 0x4)
2723 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2724 	if (fields & 0x8)
2725 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2726 	if (fields & 0x10) {
2727 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2728 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2729 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2730 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2731 	}
2732 	return mask;
2733 }
2734 
2735 static struct event_constraint *
hswep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)2736 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2737 {
2738 	return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2739 }
2740 
hswep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)2741 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2742 {
2743 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2744 	struct extra_reg *er;
2745 	int idx = 0;
2746 
2747 	for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2748 		if (er->event != (event->hw.config & er->config_mask))
2749 			continue;
2750 		idx |= er->idx;
2751 	}
2752 
2753 	if (idx) {
2754 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2755 			    HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2756 		reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2757 		reg1->idx = idx;
2758 	}
2759 	return 0;
2760 }
2761 
hswep_cbox_enable_event(struct intel_uncore_box * box,struct perf_event * event)2762 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2763 				  struct perf_event *event)
2764 {
2765 	struct hw_perf_event *hwc = &event->hw;
2766 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2767 
2768 	if (reg1->idx != EXTRA_REG_NONE) {
2769 		u64 filter = uncore_shared_reg_config(box, 0);
2770 		wrmsrl(reg1->reg, filter & 0xffffffff);
2771 		wrmsrl(reg1->reg + 1, filter >> 32);
2772 	}
2773 
2774 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2775 }
2776 
2777 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2778 	.init_box		= snbep_uncore_msr_init_box,
2779 	.disable_box		= snbep_uncore_msr_disable_box,
2780 	.enable_box		= snbep_uncore_msr_enable_box,
2781 	.disable_event		= snbep_uncore_msr_disable_event,
2782 	.enable_event		= hswep_cbox_enable_event,
2783 	.read_counter		= uncore_msr_read_counter,
2784 	.hw_config		= hswep_cbox_hw_config,
2785 	.get_constraint		= hswep_cbox_get_constraint,
2786 	.put_constraint		= snbep_cbox_put_constraint,
2787 };
2788 
2789 static struct intel_uncore_type hswep_uncore_cbox = {
2790 	.name			= "cbox",
2791 	.num_counters		= 4,
2792 	.num_boxes		= 18,
2793 	.perf_ctr_bits		= 48,
2794 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
2795 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
2796 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2797 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
2798 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
2799 	.num_shared_regs	= 1,
2800 	.constraints		= hswep_uncore_cbox_constraints,
2801 	.ops			= &hswep_uncore_cbox_ops,
2802 	.format_group		= &hswep_uncore_cbox_format_group,
2803 };
2804 
2805 /*
2806  * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2807  */
hswep_uncore_sbox_msr_init_box(struct intel_uncore_box * box)2808 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2809 {
2810 	unsigned msr = uncore_msr_box_ctl(box);
2811 
2812 	if (msr) {
2813 		u64 init = SNBEP_PMON_BOX_CTL_INT;
2814 		u64 flags = 0;
2815 		int i;
2816 
2817 		for_each_set_bit(i, (unsigned long *)&init, 64) {
2818 			flags |= (1ULL << i);
2819 			wrmsrl(msr, flags);
2820 		}
2821 	}
2822 }
2823 
2824 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2825 	__SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2826 	.init_box		= hswep_uncore_sbox_msr_init_box
2827 };
2828 
2829 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2830 	&format_attr_event.attr,
2831 	&format_attr_umask.attr,
2832 	&format_attr_edge.attr,
2833 	&format_attr_tid_en.attr,
2834 	&format_attr_inv.attr,
2835 	&format_attr_thresh8.attr,
2836 	NULL,
2837 };
2838 
2839 static const struct attribute_group hswep_uncore_sbox_format_group = {
2840 	.name = "format",
2841 	.attrs = hswep_uncore_sbox_formats_attr,
2842 };
2843 
2844 static struct intel_uncore_type hswep_uncore_sbox = {
2845 	.name			= "sbox",
2846 	.num_counters		= 4,
2847 	.num_boxes		= 4,
2848 	.perf_ctr_bits		= 44,
2849 	.event_ctl		= HSWEP_S0_MSR_PMON_CTL0,
2850 	.perf_ctr		= HSWEP_S0_MSR_PMON_CTR0,
2851 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2852 	.box_ctl		= HSWEP_S0_MSR_PMON_BOX_CTL,
2853 	.msr_offset		= HSWEP_SBOX_MSR_OFFSET,
2854 	.ops			= &hswep_uncore_sbox_msr_ops,
2855 	.format_group		= &hswep_uncore_sbox_format_group,
2856 };
2857 
hswep_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)2858 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2859 {
2860 	struct hw_perf_event *hwc = &event->hw;
2861 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2862 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2863 
2864 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
2865 		reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2866 		reg1->idx = ev_sel - 0xb;
2867 		reg1->config = event->attr.config1 & (0xff << reg1->idx);
2868 	}
2869 	return 0;
2870 }
2871 
2872 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2873 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2874 	.hw_config		= hswep_pcu_hw_config,
2875 	.get_constraint		= snbep_pcu_get_constraint,
2876 	.put_constraint		= snbep_pcu_put_constraint,
2877 };
2878 
2879 static struct intel_uncore_type hswep_uncore_pcu = {
2880 	.name			= "pcu",
2881 	.num_counters		= 4,
2882 	.num_boxes		= 1,
2883 	.perf_ctr_bits		= 48,
2884 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
2885 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
2886 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2887 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
2888 	.num_shared_regs	= 1,
2889 	.ops			= &hswep_uncore_pcu_ops,
2890 	.format_group		= &snbep_uncore_pcu_format_group,
2891 };
2892 
2893 static struct intel_uncore_type *hswep_msr_uncores[] = {
2894 	&hswep_uncore_ubox,
2895 	&hswep_uncore_cbox,
2896 	&hswep_uncore_sbox,
2897 	&hswep_uncore_pcu,
2898 	NULL,
2899 };
2900 
2901 #define HSWEP_PCU_DID			0x2fc0
2902 #define HSWEP_PCU_CAPID4_OFFET		0x94
2903 #define hswep_get_chop(_cap)		(((_cap) >> 6) & 0x3)
2904 
hswep_has_limit_sbox(unsigned int device)2905 static bool hswep_has_limit_sbox(unsigned int device)
2906 {
2907 	struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
2908 	u32 capid4;
2909 
2910 	if (!dev)
2911 		return false;
2912 
2913 	pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
2914 	pci_dev_put(dev);
2915 	if (!hswep_get_chop(capid4))
2916 		return true;
2917 
2918 	return false;
2919 }
2920 
hswep_uncore_cpu_init(void)2921 void hswep_uncore_cpu_init(void)
2922 {
2923 	if (hswep_uncore_cbox.num_boxes > topology_num_cores_per_package())
2924 		hswep_uncore_cbox.num_boxes = topology_num_cores_per_package();
2925 
2926 	/* Detect 6-8 core systems with only two SBOXes */
2927 	if (hswep_has_limit_sbox(HSWEP_PCU_DID))
2928 		hswep_uncore_sbox.num_boxes = 2;
2929 
2930 	uncore_msr_uncores = hswep_msr_uncores;
2931 }
2932 
2933 static struct intel_uncore_type hswep_uncore_ha = {
2934 	.name		= "ha",
2935 	.num_counters   = 4,
2936 	.num_boxes	= 2,
2937 	.perf_ctr_bits	= 48,
2938 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2939 };
2940 
2941 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2942 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2943 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2944 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2945 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2946 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2947 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2948 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2949 	{ /* end: all zeroes */ },
2950 };
2951 
2952 static struct intel_uncore_type hswep_uncore_imc = {
2953 	.name		= "imc",
2954 	.num_counters   = 4,
2955 	.num_boxes	= 8,
2956 	.perf_ctr_bits	= 48,
2957 	.fixed_ctr_bits	= 48,
2958 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2959 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2960 	.event_descs	= hswep_uncore_imc_events,
2961 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2962 };
2963 
2964 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2965 
hswep_uncore_irp_read_counter(struct intel_uncore_box * box,struct perf_event * event)2966 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2967 {
2968 	struct pci_dev *pdev = box->pci_dev;
2969 	struct hw_perf_event *hwc = &event->hw;
2970 	u64 count = 0;
2971 
2972 	pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2973 	pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2974 
2975 	return count;
2976 }
2977 
2978 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2979 	.init_box	= snbep_uncore_pci_init_box,
2980 	.disable_box	= snbep_uncore_pci_disable_box,
2981 	.enable_box	= snbep_uncore_pci_enable_box,
2982 	.disable_event	= ivbep_uncore_irp_disable_event,
2983 	.enable_event	= ivbep_uncore_irp_enable_event,
2984 	.read_counter	= hswep_uncore_irp_read_counter,
2985 };
2986 
2987 static struct intel_uncore_type hswep_uncore_irp = {
2988 	.name			= "irp",
2989 	.num_counters		= 4,
2990 	.num_boxes		= 1,
2991 	.perf_ctr_bits		= 48,
2992 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2993 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
2994 	.ops			= &hswep_uncore_irp_ops,
2995 	.format_group		= &snbep_uncore_format_group,
2996 };
2997 
2998 static struct intel_uncore_type hswep_uncore_qpi = {
2999 	.name			= "qpi",
3000 	.num_counters		= 4,
3001 	.num_boxes		= 3,
3002 	.perf_ctr_bits		= 48,
3003 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
3004 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
3005 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3006 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
3007 	.num_shared_regs	= 1,
3008 	.ops			= &snbep_uncore_qpi_ops,
3009 	.format_group		= &snbep_uncore_qpi_format_group,
3010 };
3011 
3012 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
3013 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3014 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3015 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3016 	UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3017 	UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
3018 	UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3019 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3020 	UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
3021 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3022 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3023 	UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
3024 	UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
3025 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3026 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3027 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3028 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3029 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3030 	UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
3031 	EVENT_CONSTRAINT_END
3032 };
3033 
3034 static struct intel_uncore_type hswep_uncore_r2pcie = {
3035 	.name		= "r2pcie",
3036 	.num_counters   = 4,
3037 	.num_boxes	= 1,
3038 	.perf_ctr_bits	= 48,
3039 	.constraints	= hswep_uncore_r2pcie_constraints,
3040 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3041 };
3042 
3043 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
3044 	UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
3045 	UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3046 	UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3047 	UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3048 	UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3049 	UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3050 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3051 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3052 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
3053 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3054 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3055 	UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3056 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3057 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3058 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3059 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3060 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3061 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3062 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3063 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3064 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3065 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3066 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3067 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3068 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3069 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
3070 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3071 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3072 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3073 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3074 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3075 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3076 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3077 	EVENT_CONSTRAINT_END
3078 };
3079 
3080 static struct intel_uncore_type hswep_uncore_r3qpi = {
3081 	.name		= "r3qpi",
3082 	.num_counters   = 3,
3083 	.num_boxes	= 3,
3084 	.perf_ctr_bits	= 44,
3085 	.constraints	= hswep_uncore_r3qpi_constraints,
3086 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3087 };
3088 
3089 enum {
3090 	HSWEP_PCI_UNCORE_HA,
3091 	HSWEP_PCI_UNCORE_IMC,
3092 	HSWEP_PCI_UNCORE_IRP,
3093 	HSWEP_PCI_UNCORE_QPI,
3094 	HSWEP_PCI_UNCORE_R2PCIE,
3095 	HSWEP_PCI_UNCORE_R3QPI,
3096 };
3097 
3098 static struct intel_uncore_type *hswep_pci_uncores[] = {
3099 	[HSWEP_PCI_UNCORE_HA]	= &hswep_uncore_ha,
3100 	[HSWEP_PCI_UNCORE_IMC]	= &hswep_uncore_imc,
3101 	[HSWEP_PCI_UNCORE_IRP]	= &hswep_uncore_irp,
3102 	[HSWEP_PCI_UNCORE_QPI]	= &hswep_uncore_qpi,
3103 	[HSWEP_PCI_UNCORE_R2PCIE]	= &hswep_uncore_r2pcie,
3104 	[HSWEP_PCI_UNCORE_R3QPI]	= &hswep_uncore_r3qpi,
3105 	NULL,
3106 };
3107 
3108 static const struct pci_device_id hswep_uncore_pci_ids[] = {
3109 	{ /* Home Agent 0 */
3110 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
3111 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
3112 	},
3113 	{ /* Home Agent 1 */
3114 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
3115 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
3116 	},
3117 	{ /* MC0 Channel 0 */
3118 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
3119 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
3120 	},
3121 	{ /* MC0 Channel 1 */
3122 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
3123 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
3124 	},
3125 	{ /* MC0 Channel 2 */
3126 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
3127 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
3128 	},
3129 	{ /* MC0 Channel 3 */
3130 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
3131 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
3132 	},
3133 	{ /* MC1 Channel 0 */
3134 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
3135 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
3136 	},
3137 	{ /* MC1 Channel 1 */
3138 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
3139 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
3140 	},
3141 	{ /* MC1 Channel 2 */
3142 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
3143 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
3144 	},
3145 	{ /* MC1 Channel 3 */
3146 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
3147 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
3148 	},
3149 	{ /* IRP */
3150 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
3151 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
3152 	},
3153 	{ /* QPI0 Port 0 */
3154 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3155 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3156 	},
3157 	{ /* QPI0 Port 1 */
3158 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3159 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3160 	},
3161 	{ /* QPI1 Port 2 */
3162 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3163 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3164 	},
3165 	{ /* R2PCIe */
3166 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3167 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3168 	},
3169 	{ /* R3QPI0 Link 0 */
3170 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3171 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3172 	},
3173 	{ /* R3QPI0 Link 1 */
3174 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3175 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3176 	},
3177 	{ /* R3QPI1 Link 2 */
3178 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3179 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3180 	},
3181 	{ /* QPI Port 0 filter  */
3182 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3183 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3184 						   SNBEP_PCI_QPI_PORT0_FILTER),
3185 	},
3186 	{ /* QPI Port 1 filter  */
3187 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3188 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3189 						   SNBEP_PCI_QPI_PORT1_FILTER),
3190 	},
3191 	{ /* end: all zeroes */ }
3192 };
3193 
3194 static struct pci_driver hswep_uncore_pci_driver = {
3195 	.name		= "hswep_uncore",
3196 	.id_table	= hswep_uncore_pci_ids,
3197 };
3198 
hswep_uncore_pci_init(void)3199 int hswep_uncore_pci_init(void)
3200 {
3201 	int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3202 	if (ret)
3203 		return ret;
3204 	uncore_pci_uncores = hswep_pci_uncores;
3205 	uncore_pci_driver = &hswep_uncore_pci_driver;
3206 	return 0;
3207 }
3208 /* end of Haswell-EP uncore support */
3209 
3210 /* BDX uncore support */
3211 
3212 static struct intel_uncore_type bdx_uncore_ubox = {
3213 	.name			= "ubox",
3214 	.num_counters		= 2,
3215 	.num_boxes		= 1,
3216 	.perf_ctr_bits		= 48,
3217 	.fixed_ctr_bits		= 48,
3218 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
3219 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
3220 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3221 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3222 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3223 	.num_shared_regs	= 1,
3224 	.ops			= &ivbep_uncore_msr_ops,
3225 	.format_group		= &ivbep_uncore_ubox_format_group,
3226 };
3227 
3228 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3229 	UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3230 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3231 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3232 	UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3233 	EVENT_CONSTRAINT_END
3234 };
3235 
3236 static struct intel_uncore_type bdx_uncore_cbox = {
3237 	.name			= "cbox",
3238 	.num_counters		= 4,
3239 	.num_boxes		= 24,
3240 	.perf_ctr_bits		= 48,
3241 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
3242 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
3243 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3244 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
3245 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
3246 	.num_shared_regs	= 1,
3247 	.constraints		= bdx_uncore_cbox_constraints,
3248 	.ops			= &hswep_uncore_cbox_ops,
3249 	.format_group		= &hswep_uncore_cbox_format_group,
3250 };
3251 
3252 static struct intel_uncore_type bdx_uncore_sbox = {
3253 	.name			= "sbox",
3254 	.num_counters		= 4,
3255 	.num_boxes		= 4,
3256 	.perf_ctr_bits		= 48,
3257 	.event_ctl		= HSWEP_S0_MSR_PMON_CTL0,
3258 	.perf_ctr		= HSWEP_S0_MSR_PMON_CTR0,
3259 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3260 	.box_ctl		= HSWEP_S0_MSR_PMON_BOX_CTL,
3261 	.msr_offset		= HSWEP_SBOX_MSR_OFFSET,
3262 	.ops			= &hswep_uncore_sbox_msr_ops,
3263 	.format_group		= &hswep_uncore_sbox_format_group,
3264 };
3265 
3266 #define BDX_MSR_UNCORE_SBOX	3
3267 
3268 static struct intel_uncore_type *bdx_msr_uncores[] = {
3269 	&bdx_uncore_ubox,
3270 	&bdx_uncore_cbox,
3271 	&hswep_uncore_pcu,
3272 	&bdx_uncore_sbox,
3273 	NULL,
3274 };
3275 
3276 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3277 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3278 	EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3279 	EVENT_CONSTRAINT_END
3280 };
3281 
3282 #define BDX_PCU_DID			0x6fc0
3283 
bdx_uncore_cpu_init(void)3284 void bdx_uncore_cpu_init(void)
3285 {
3286 	if (bdx_uncore_cbox.num_boxes > topology_num_cores_per_package())
3287 		bdx_uncore_cbox.num_boxes = topology_num_cores_per_package();
3288 	uncore_msr_uncores = bdx_msr_uncores;
3289 
3290 	/* Detect systems with no SBOXes */
3291 	if (boot_cpu_data.x86_vfm == INTEL_BROADWELL_D || hswep_has_limit_sbox(BDX_PCU_DID))
3292 		uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3293 
3294 	hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3295 }
3296 
3297 static struct intel_uncore_type bdx_uncore_ha = {
3298 	.name		= "ha",
3299 	.num_counters   = 4,
3300 	.num_boxes	= 2,
3301 	.perf_ctr_bits	= 48,
3302 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3303 };
3304 
3305 static struct intel_uncore_type bdx_uncore_imc = {
3306 	.name		= "imc",
3307 	.num_counters   = 4,
3308 	.num_boxes	= 8,
3309 	.perf_ctr_bits	= 48,
3310 	.fixed_ctr_bits	= 48,
3311 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3312 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3313 	.event_descs	= hswep_uncore_imc_events,
3314 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3315 };
3316 
3317 static struct intel_uncore_type bdx_uncore_irp = {
3318 	.name			= "irp",
3319 	.num_counters		= 4,
3320 	.num_boxes		= 1,
3321 	.perf_ctr_bits		= 48,
3322 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
3323 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
3324 	.ops			= &hswep_uncore_irp_ops,
3325 	.format_group		= &snbep_uncore_format_group,
3326 };
3327 
3328 static struct intel_uncore_type bdx_uncore_qpi = {
3329 	.name			= "qpi",
3330 	.num_counters		= 4,
3331 	.num_boxes		= 3,
3332 	.perf_ctr_bits		= 48,
3333 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
3334 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
3335 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3336 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
3337 	.num_shared_regs	= 1,
3338 	.ops			= &snbep_uncore_qpi_ops,
3339 	.format_group		= &snbep_uncore_qpi_format_group,
3340 };
3341 
3342 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3343 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3344 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3345 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3346 	UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3347 	UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3348 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3349 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3350 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3351 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3352 	EVENT_CONSTRAINT_END
3353 };
3354 
3355 static struct intel_uncore_type bdx_uncore_r2pcie = {
3356 	.name		= "r2pcie",
3357 	.num_counters   = 4,
3358 	.num_boxes	= 1,
3359 	.perf_ctr_bits	= 48,
3360 	.constraints	= bdx_uncore_r2pcie_constraints,
3361 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3362 };
3363 
3364 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3365 	UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3366 	UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3367 	UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3368 	UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3369 	UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3370 	UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3371 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3372 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3373 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3374 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3375 	UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3376 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3377 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3378 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3379 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3380 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3381 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3382 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3383 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3384 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3385 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3386 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3387 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3388 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3389 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3390 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3391 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3392 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3393 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3394 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3395 	EVENT_CONSTRAINT_END
3396 };
3397 
3398 static struct intel_uncore_type bdx_uncore_r3qpi = {
3399 	.name		= "r3qpi",
3400 	.num_counters   = 3,
3401 	.num_boxes	= 3,
3402 	.perf_ctr_bits	= 48,
3403 	.constraints	= bdx_uncore_r3qpi_constraints,
3404 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3405 };
3406 
3407 enum {
3408 	BDX_PCI_UNCORE_HA,
3409 	BDX_PCI_UNCORE_IMC,
3410 	BDX_PCI_UNCORE_IRP,
3411 	BDX_PCI_UNCORE_QPI,
3412 	BDX_PCI_UNCORE_R2PCIE,
3413 	BDX_PCI_UNCORE_R3QPI,
3414 };
3415 
3416 static struct intel_uncore_type *bdx_pci_uncores[] = {
3417 	[BDX_PCI_UNCORE_HA]	= &bdx_uncore_ha,
3418 	[BDX_PCI_UNCORE_IMC]	= &bdx_uncore_imc,
3419 	[BDX_PCI_UNCORE_IRP]	= &bdx_uncore_irp,
3420 	[BDX_PCI_UNCORE_QPI]	= &bdx_uncore_qpi,
3421 	[BDX_PCI_UNCORE_R2PCIE]	= &bdx_uncore_r2pcie,
3422 	[BDX_PCI_UNCORE_R3QPI]	= &bdx_uncore_r3qpi,
3423 	NULL,
3424 };
3425 
3426 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3427 	{ /* Home Agent 0 */
3428 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3429 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3430 	},
3431 	{ /* Home Agent 1 */
3432 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3433 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3434 	},
3435 	{ /* MC0 Channel 0 */
3436 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3437 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3438 	},
3439 	{ /* MC0 Channel 1 */
3440 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3441 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3442 	},
3443 	{ /* MC0 Channel 2 */
3444 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3445 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3446 	},
3447 	{ /* MC0 Channel 3 */
3448 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3449 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3450 	},
3451 	{ /* MC1 Channel 0 */
3452 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3453 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3454 	},
3455 	{ /* MC1 Channel 1 */
3456 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3457 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3458 	},
3459 	{ /* MC1 Channel 2 */
3460 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3461 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3462 	},
3463 	{ /* MC1 Channel 3 */
3464 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3465 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3466 	},
3467 	{ /* IRP */
3468 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3469 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3470 	},
3471 	{ /* QPI0 Port 0 */
3472 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3473 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3474 	},
3475 	{ /* QPI0 Port 1 */
3476 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3477 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3478 	},
3479 	{ /* QPI1 Port 2 */
3480 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3481 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3482 	},
3483 	{ /* R2PCIe */
3484 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3485 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3486 	},
3487 	{ /* R3QPI0 Link 0 */
3488 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3489 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3490 	},
3491 	{ /* R3QPI0 Link 1 */
3492 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3493 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3494 	},
3495 	{ /* R3QPI1 Link 2 */
3496 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3497 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3498 	},
3499 	{ /* QPI Port 0 filter  */
3500 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3501 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3502 						   SNBEP_PCI_QPI_PORT0_FILTER),
3503 	},
3504 	{ /* QPI Port 1 filter  */
3505 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3506 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3507 						   SNBEP_PCI_QPI_PORT1_FILTER),
3508 	},
3509 	{ /* QPI Port 2 filter  */
3510 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3511 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3512 						   BDX_PCI_QPI_PORT2_FILTER),
3513 	},
3514 	{ /* end: all zeroes */ }
3515 };
3516 
3517 static struct pci_driver bdx_uncore_pci_driver = {
3518 	.name		= "bdx_uncore",
3519 	.id_table	= bdx_uncore_pci_ids,
3520 };
3521 
bdx_uncore_pci_init(void)3522 int bdx_uncore_pci_init(void)
3523 {
3524 	int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3525 
3526 	if (ret)
3527 		return ret;
3528 	uncore_pci_uncores = bdx_pci_uncores;
3529 	uncore_pci_driver = &bdx_uncore_pci_driver;
3530 	return 0;
3531 }
3532 
3533 /* end of BDX uncore support */
3534 
3535 /* SKX uncore support */
3536 
3537 static struct intel_uncore_type skx_uncore_ubox = {
3538 	.name			= "ubox",
3539 	.num_counters		= 2,
3540 	.num_boxes		= 1,
3541 	.perf_ctr_bits		= 48,
3542 	.fixed_ctr_bits		= 48,
3543 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
3544 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
3545 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3546 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3547 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3548 	.ops			= &ivbep_uncore_msr_ops,
3549 	.format_group		= &ivbep_uncore_ubox_format_group,
3550 };
3551 
3552 static struct attribute *skx_uncore_cha_formats_attr[] = {
3553 	&format_attr_event.attr,
3554 	&format_attr_umask.attr,
3555 	&format_attr_edge.attr,
3556 	&format_attr_tid_en.attr,
3557 	&format_attr_inv.attr,
3558 	&format_attr_thresh8.attr,
3559 	&format_attr_filter_tid4.attr,
3560 	&format_attr_filter_state5.attr,
3561 	&format_attr_filter_rem.attr,
3562 	&format_attr_filter_loc.attr,
3563 	&format_attr_filter_nm.attr,
3564 	&format_attr_filter_all_op.attr,
3565 	&format_attr_filter_not_nm.attr,
3566 	&format_attr_filter_opc_0.attr,
3567 	&format_attr_filter_opc_1.attr,
3568 	&format_attr_filter_nc.attr,
3569 	&format_attr_filter_isoc.attr,
3570 	NULL,
3571 };
3572 
3573 static const struct attribute_group skx_uncore_chabox_format_group = {
3574 	.name = "format",
3575 	.attrs = skx_uncore_cha_formats_attr,
3576 };
3577 
3578 static struct event_constraint skx_uncore_chabox_constraints[] = {
3579 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3580 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3581 	EVENT_CONSTRAINT_END
3582 };
3583 
3584 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3585 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3586 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3587 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3588 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3589 	SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3590 	SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3591 	SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3592 	SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3593 	SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3594 	EVENT_EXTRA_END
3595 };
3596 
skx_cha_filter_mask(int fields)3597 static u64 skx_cha_filter_mask(int fields)
3598 {
3599 	u64 mask = 0;
3600 
3601 	if (fields & 0x1)
3602 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3603 	if (fields & 0x2)
3604 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3605 	if (fields & 0x4)
3606 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3607 	if (fields & 0x8) {
3608 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3609 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3610 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3611 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3612 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3613 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3614 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3615 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3616 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3617 	}
3618 	return mask;
3619 }
3620 
3621 static struct event_constraint *
skx_cha_get_constraint(struct intel_uncore_box * box,struct perf_event * event)3622 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3623 {
3624 	return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3625 }
3626 
skx_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)3627 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3628 {
3629 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3630 	struct extra_reg *er;
3631 	int idx = 0;
3632 	/* Any of the CHA events may be filtered by Thread/Core-ID.*/
3633 	if (event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN)
3634 		idx = SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3635 
3636 	for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3637 		if (er->event != (event->hw.config & er->config_mask))
3638 			continue;
3639 		idx |= er->idx;
3640 	}
3641 
3642 	if (idx) {
3643 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3644 			    HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3645 		reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3646 		reg1->idx = idx;
3647 	}
3648 	return 0;
3649 }
3650 
3651 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3652 	/* There is no frz_en for chabox ctl */
3653 	.init_box		= ivbep_uncore_msr_init_box,
3654 	.disable_box		= snbep_uncore_msr_disable_box,
3655 	.enable_box		= snbep_uncore_msr_enable_box,
3656 	.disable_event		= snbep_uncore_msr_disable_event,
3657 	.enable_event		= hswep_cbox_enable_event,
3658 	.read_counter		= uncore_msr_read_counter,
3659 	.hw_config		= skx_cha_hw_config,
3660 	.get_constraint		= skx_cha_get_constraint,
3661 	.put_constraint		= snbep_cbox_put_constraint,
3662 };
3663 
3664 static struct intel_uncore_type skx_uncore_chabox = {
3665 	.name			= "cha",
3666 	.num_counters		= 4,
3667 	.perf_ctr_bits		= 48,
3668 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
3669 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
3670 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3671 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
3672 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
3673 	.num_shared_regs	= 1,
3674 	.constraints		= skx_uncore_chabox_constraints,
3675 	.ops			= &skx_uncore_chabox_ops,
3676 	.format_group		= &skx_uncore_chabox_format_group,
3677 };
3678 
3679 static struct attribute *skx_uncore_iio_formats_attr[] = {
3680 	&format_attr_event.attr,
3681 	&format_attr_umask.attr,
3682 	&format_attr_edge.attr,
3683 	&format_attr_inv.attr,
3684 	&format_attr_thresh9.attr,
3685 	&format_attr_ch_mask.attr,
3686 	&format_attr_fc_mask.attr,
3687 	NULL,
3688 };
3689 
3690 static const struct attribute_group skx_uncore_iio_format_group = {
3691 	.name = "format",
3692 	.attrs = skx_uncore_iio_formats_attr,
3693 };
3694 
3695 static struct event_constraint skx_uncore_iio_constraints[] = {
3696 	UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3697 	UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3698 	UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3699 	UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3700 	UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3701 	UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3702 	UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
3703 	EVENT_CONSTRAINT_END
3704 };
3705 
skx_iio_enable_event(struct intel_uncore_box * box,struct perf_event * event)3706 static void skx_iio_enable_event(struct intel_uncore_box *box,
3707 				 struct perf_event *event)
3708 {
3709 	struct hw_perf_event *hwc = &event->hw;
3710 
3711 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3712 }
3713 
3714 static struct intel_uncore_ops skx_uncore_iio_ops = {
3715 	.init_box		= ivbep_uncore_msr_init_box,
3716 	.disable_box		= snbep_uncore_msr_disable_box,
3717 	.enable_box		= snbep_uncore_msr_enable_box,
3718 	.disable_event		= snbep_uncore_msr_disable_event,
3719 	.enable_event		= skx_iio_enable_event,
3720 	.read_counter		= uncore_msr_read_counter,
3721 };
3722 
pmu_topology(struct intel_uncore_pmu * pmu,int die)3723 static struct intel_uncore_topology *pmu_topology(struct intel_uncore_pmu *pmu, int die)
3724 {
3725 	int idx;
3726 
3727 	for (idx = 0; idx < pmu->type->num_boxes; idx++) {
3728 		if (pmu->type->topology[die][idx].pmu_idx == pmu->pmu_idx)
3729 			return &pmu->type->topology[die][idx];
3730 	}
3731 
3732 	return NULL;
3733 }
3734 
3735 static umode_t
pmu_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die,int zero_bus_pmu)3736 pmu_iio_mapping_visible(struct kobject *kobj, struct attribute *attr,
3737 			 int die, int zero_bus_pmu)
3738 {
3739 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
3740 	struct intel_uncore_topology *pmut = pmu_topology(pmu, die);
3741 
3742 	return (pmut && !pmut->iio->pci_bus_no && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode;
3743 }
3744 
3745 static umode_t
skx_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)3746 skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
3747 {
3748 	/* Root bus 0x00 is valid only for pmu_idx = 0. */
3749 	return pmu_iio_mapping_visible(kobj, attr, die, 0);
3750 }
3751 
skx_iio_mapping_show(struct device * dev,struct device_attribute * attr,char * buf)3752 static ssize_t skx_iio_mapping_show(struct device *dev,
3753 				    struct device_attribute *attr, char *buf)
3754 {
3755 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
3756 	struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
3757 	long die = (long)ea->var;
3758 	struct intel_uncore_topology *pmut = pmu_topology(pmu, die);
3759 
3760 	return sprintf(buf, "%04x:%02x\n", pmut ? pmut->iio->segment : 0,
3761 					   pmut ? pmut->iio->pci_bus_no : 0);
3762 }
3763 
skx_msr_cpu_bus_read(int cpu,u64 * topology)3764 static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
3765 {
3766 	u64 msr_value;
3767 
3768 	if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
3769 			!(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
3770 		return -ENXIO;
3771 
3772 	*topology = msr_value;
3773 
3774 	return 0;
3775 }
3776 
die_to_cpu(int die)3777 static int die_to_cpu(int die)
3778 {
3779 	int res = 0, cpu, current_die;
3780 	/*
3781 	 * Using cpus_read_lock() to ensure cpu is not going down between
3782 	 * looking at cpu_online_mask.
3783 	 */
3784 	cpus_read_lock();
3785 	for_each_online_cpu(cpu) {
3786 		current_die = topology_logical_die_id(cpu);
3787 		if (current_die == die) {
3788 			res = cpu;
3789 			break;
3790 		}
3791 	}
3792 	cpus_read_unlock();
3793 	return res;
3794 }
3795 
3796 enum {
3797 	IIO_TOPOLOGY_TYPE,
3798 	UPI_TOPOLOGY_TYPE,
3799 	TOPOLOGY_MAX
3800 };
3801 
3802 static const size_t topology_size[TOPOLOGY_MAX] = {
3803 	sizeof(*((struct intel_uncore_topology *)NULL)->iio),
3804 	sizeof(*((struct intel_uncore_topology *)NULL)->upi)
3805 };
3806 
pmu_alloc_topology(struct intel_uncore_type * type,int topology_type)3807 static int pmu_alloc_topology(struct intel_uncore_type *type, int topology_type)
3808 {
3809 	int die, idx;
3810 	struct intel_uncore_topology **topology;
3811 
3812 	if (!type->num_boxes)
3813 		return -EPERM;
3814 
3815 	topology = kcalloc(uncore_max_dies(), sizeof(*topology), GFP_KERNEL);
3816 	if (!topology)
3817 		goto err;
3818 
3819 	for (die = 0; die < uncore_max_dies(); die++) {
3820 		topology[die] = kcalloc(type->num_boxes, sizeof(**topology), GFP_KERNEL);
3821 		if (!topology[die])
3822 			goto clear;
3823 		for (idx = 0; idx < type->num_boxes; idx++) {
3824 			topology[die][idx].untyped = kcalloc(type->num_boxes,
3825 							     topology_size[topology_type],
3826 							     GFP_KERNEL);
3827 			if (!topology[die][idx].untyped)
3828 				goto clear;
3829 		}
3830 	}
3831 
3832 	type->topology = topology;
3833 
3834 	return 0;
3835 clear:
3836 	for (; die >= 0; die--) {
3837 		for (idx = 0; idx < type->num_boxes; idx++)
3838 			kfree(topology[die][idx].untyped);
3839 		kfree(topology[die]);
3840 	}
3841 	kfree(topology);
3842 err:
3843 	return -ENOMEM;
3844 }
3845 
pmu_free_topology(struct intel_uncore_type * type)3846 static void pmu_free_topology(struct intel_uncore_type *type)
3847 {
3848 	int die, idx;
3849 
3850 	if (type->topology) {
3851 		for (die = 0; die < uncore_max_dies(); die++) {
3852 			for (idx = 0; idx < type->num_boxes; idx++)
3853 				kfree(type->topology[die][idx].untyped);
3854 			kfree(type->topology[die]);
3855 		}
3856 		kfree(type->topology);
3857 		type->topology = NULL;
3858 	}
3859 }
3860 
skx_pmu_get_topology(struct intel_uncore_type * type,int (* topology_cb)(struct intel_uncore_type *,int,int,u64))3861 static int skx_pmu_get_topology(struct intel_uncore_type *type,
3862 				 int (*topology_cb)(struct intel_uncore_type*, int, int, u64))
3863 {
3864 	int die, ret = -EPERM;
3865 	u64 cpu_bus_msr;
3866 
3867 	for (die = 0; die < uncore_max_dies(); die++) {
3868 		ret = skx_msr_cpu_bus_read(die_to_cpu(die), &cpu_bus_msr);
3869 		if (ret)
3870 			break;
3871 
3872 		ret = uncore_die_to_segment(die);
3873 		if (ret < 0)
3874 			break;
3875 
3876 		ret = topology_cb(type, ret, die, cpu_bus_msr);
3877 		if (ret)
3878 			break;
3879 	}
3880 
3881 	return ret;
3882 }
3883 
skx_iio_topology_cb(struct intel_uncore_type * type,int segment,int die,u64 cpu_bus_msr)3884 static int skx_iio_topology_cb(struct intel_uncore_type *type, int segment,
3885 				int die, u64 cpu_bus_msr)
3886 {
3887 	int idx;
3888 	struct intel_uncore_topology *t;
3889 
3890 	for (idx = 0; idx < type->num_boxes; idx++) {
3891 		t = &type->topology[die][idx];
3892 		t->pmu_idx = idx;
3893 		t->iio->segment = segment;
3894 		t->iio->pci_bus_no = (cpu_bus_msr >> (idx * BUS_NUM_STRIDE)) & 0xff;
3895 	}
3896 
3897 	return 0;
3898 }
3899 
skx_iio_get_topology(struct intel_uncore_type * type)3900 static int skx_iio_get_topology(struct intel_uncore_type *type)
3901 {
3902 	return skx_pmu_get_topology(type, skx_iio_topology_cb);
3903 }
3904 
3905 static struct attribute_group skx_iio_mapping_group = {
3906 	.is_visible	= skx_iio_mapping_visible,
3907 };
3908 
3909 static const struct attribute_group *skx_iio_attr_update[] = {
3910 	&skx_iio_mapping_group,
3911 	NULL,
3912 };
3913 
pmu_clear_mapping_attr(const struct attribute_group ** groups,struct attribute_group * ag)3914 static void pmu_clear_mapping_attr(const struct attribute_group **groups,
3915 				   struct attribute_group *ag)
3916 {
3917 	int i;
3918 
3919 	for (i = 0; groups[i]; i++) {
3920 		if (groups[i] == ag) {
3921 			for (i++; groups[i]; i++)
3922 				groups[i - 1] = groups[i];
3923 			groups[i - 1] = NULL;
3924 			break;
3925 		}
3926 	}
3927 }
3928 
3929 static void
pmu_set_mapping(struct intel_uncore_type * type,struct attribute_group * ag,ssize_t (* show)(struct device *,struct device_attribute *,char *),int topology_type)3930 pmu_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag,
3931 		ssize_t (*show)(struct device*, struct device_attribute*, char*),
3932 		int topology_type)
3933 {
3934 	char buf[64];
3935 	int ret;
3936 	long die = -1;
3937 	struct attribute **attrs = NULL;
3938 	struct dev_ext_attribute *eas = NULL;
3939 
3940 	ret = pmu_alloc_topology(type, topology_type);
3941 	if (ret < 0)
3942 		goto clear_attr_update;
3943 
3944 	ret = type->get_topology(type);
3945 	if (ret < 0)
3946 		goto clear_topology;
3947 
3948 	/* One more for NULL. */
3949 	attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
3950 	if (!attrs)
3951 		goto clear_topology;
3952 
3953 	eas = kcalloc(uncore_max_dies(), sizeof(*eas), GFP_KERNEL);
3954 	if (!eas)
3955 		goto clear_attrs;
3956 
3957 	for (die = 0; die < uncore_max_dies(); die++) {
3958 		snprintf(buf, sizeof(buf), "die%ld", die);
3959 		sysfs_attr_init(&eas[die].attr.attr);
3960 		eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
3961 		if (!eas[die].attr.attr.name)
3962 			goto err;
3963 		eas[die].attr.attr.mode = 0444;
3964 		eas[die].attr.show = show;
3965 		eas[die].attr.store = NULL;
3966 		eas[die].var = (void *)die;
3967 		attrs[die] = &eas[die].attr.attr;
3968 	}
3969 	ag->attrs = attrs;
3970 
3971 	return;
3972 err:
3973 	for (; die >= 0; die--)
3974 		kfree(eas[die].attr.attr.name);
3975 	kfree(eas);
3976 clear_attrs:
3977 	kfree(attrs);
3978 clear_topology:
3979 	pmu_free_topology(type);
3980 clear_attr_update:
3981 	pmu_clear_mapping_attr(type->attr_update, ag);
3982 }
3983 
3984 static void
pmu_cleanup_mapping(struct intel_uncore_type * type,struct attribute_group * ag)3985 pmu_cleanup_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
3986 {
3987 	struct attribute **attr = ag->attrs;
3988 
3989 	if (!attr)
3990 		return;
3991 
3992 	for (; *attr; attr++)
3993 		kfree((*attr)->name);
3994 	kfree(attr_to_ext_attr(*ag->attrs));
3995 	kfree(ag->attrs);
3996 	ag->attrs = NULL;
3997 	pmu_free_topology(type);
3998 }
3999 
4000 static void
pmu_iio_set_mapping(struct intel_uncore_type * type,struct attribute_group * ag)4001 pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
4002 {
4003 	pmu_set_mapping(type, ag, skx_iio_mapping_show, IIO_TOPOLOGY_TYPE);
4004 }
4005 
skx_iio_set_mapping(struct intel_uncore_type * type)4006 static void skx_iio_set_mapping(struct intel_uncore_type *type)
4007 {
4008 	pmu_iio_set_mapping(type, &skx_iio_mapping_group);
4009 }
4010 
skx_iio_cleanup_mapping(struct intel_uncore_type * type)4011 static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
4012 {
4013 	pmu_cleanup_mapping(type, &skx_iio_mapping_group);
4014 }
4015 
4016 static struct intel_uncore_type skx_uncore_iio = {
4017 	.name			= "iio",
4018 	.num_counters		= 4,
4019 	.num_boxes		= 6,
4020 	.perf_ctr_bits		= 48,
4021 	.event_ctl		= SKX_IIO0_MSR_PMON_CTL0,
4022 	.perf_ctr		= SKX_IIO0_MSR_PMON_CTR0,
4023 	.event_mask		= SKX_IIO_PMON_RAW_EVENT_MASK,
4024 	.event_mask_ext		= SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
4025 	.box_ctl		= SKX_IIO0_MSR_PMON_BOX_CTL,
4026 	.msr_offset		= SKX_IIO_MSR_OFFSET,
4027 	.constraints		= skx_uncore_iio_constraints,
4028 	.ops			= &skx_uncore_iio_ops,
4029 	.format_group		= &skx_uncore_iio_format_group,
4030 	.attr_update		= skx_iio_attr_update,
4031 	.get_topology		= skx_iio_get_topology,
4032 	.set_mapping		= skx_iio_set_mapping,
4033 	.cleanup_mapping	= skx_iio_cleanup_mapping,
4034 };
4035 
4036 enum perf_uncore_iio_freerunning_type_id {
4037 	SKX_IIO_MSR_IOCLK			= 0,
4038 	SKX_IIO_MSR_BW				= 1,
4039 	SKX_IIO_MSR_UTIL			= 2,
4040 
4041 	SKX_IIO_FREERUNNING_TYPE_MAX,
4042 };
4043 
4044 
4045 static struct freerunning_counters skx_iio_freerunning[] = {
4046 	[SKX_IIO_MSR_IOCLK]	= { 0xa45, 0x1, 0x20, 1, 36 },
4047 	[SKX_IIO_MSR_BW]	= { 0xb00, 0x1, 0x10, 8, 36 },
4048 	[SKX_IIO_MSR_UTIL]	= { 0xb08, 0x1, 0x10, 8, 36 },
4049 };
4050 
4051 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
4052 	/* Free-Running IO CLOCKS Counter */
4053 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
4054 	/* Free-Running IIO BANDWIDTH Counters */
4055 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
4056 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
4057 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
4058 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
4059 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
4060 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
4061 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
4062 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
4063 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
4064 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
4065 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
4066 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
4067 	INTEL_UNCORE_EVENT_DESC(bw_out_port0,		"event=0xff,umask=0x24"),
4068 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,	"3.814697266e-6"),
4069 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,	"MiB"),
4070 	INTEL_UNCORE_EVENT_DESC(bw_out_port1,		"event=0xff,umask=0x25"),
4071 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,	"3.814697266e-6"),
4072 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,	"MiB"),
4073 	INTEL_UNCORE_EVENT_DESC(bw_out_port2,		"event=0xff,umask=0x26"),
4074 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,	"3.814697266e-6"),
4075 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,	"MiB"),
4076 	INTEL_UNCORE_EVENT_DESC(bw_out_port3,		"event=0xff,umask=0x27"),
4077 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,	"3.814697266e-6"),
4078 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,	"MiB"),
4079 	/* Free-running IIO UTILIZATION Counters */
4080 	INTEL_UNCORE_EVENT_DESC(util_in_port0,		"event=0xff,umask=0x30"),
4081 	INTEL_UNCORE_EVENT_DESC(util_out_port0,		"event=0xff,umask=0x31"),
4082 	INTEL_UNCORE_EVENT_DESC(util_in_port1,		"event=0xff,umask=0x32"),
4083 	INTEL_UNCORE_EVENT_DESC(util_out_port1,		"event=0xff,umask=0x33"),
4084 	INTEL_UNCORE_EVENT_DESC(util_in_port2,		"event=0xff,umask=0x34"),
4085 	INTEL_UNCORE_EVENT_DESC(util_out_port2,		"event=0xff,umask=0x35"),
4086 	INTEL_UNCORE_EVENT_DESC(util_in_port3,		"event=0xff,umask=0x36"),
4087 	INTEL_UNCORE_EVENT_DESC(util_out_port3,		"event=0xff,umask=0x37"),
4088 	{ /* end: all zeroes */ },
4089 };
4090 
4091 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
4092 	.read_counter		= uncore_msr_read_counter,
4093 	.hw_config		= uncore_freerunning_hw_config,
4094 };
4095 
4096 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
4097 	&format_attr_event.attr,
4098 	&format_attr_umask.attr,
4099 	NULL,
4100 };
4101 
4102 static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
4103 	.name = "format",
4104 	.attrs = skx_uncore_iio_freerunning_formats_attr,
4105 };
4106 
4107 static struct intel_uncore_type skx_uncore_iio_free_running = {
4108 	.name			= "iio_free_running",
4109 	.num_counters		= 17,
4110 	.num_boxes		= 6,
4111 	.num_freerunning_types	= SKX_IIO_FREERUNNING_TYPE_MAX,
4112 	.freerunning		= skx_iio_freerunning,
4113 	.ops			= &skx_uncore_iio_freerunning_ops,
4114 	.event_descs		= skx_uncore_iio_freerunning_events,
4115 	.format_group		= &skx_uncore_iio_freerunning_format_group,
4116 };
4117 
4118 static struct attribute *skx_uncore_formats_attr[] = {
4119 	&format_attr_event.attr,
4120 	&format_attr_umask.attr,
4121 	&format_attr_edge.attr,
4122 	&format_attr_inv.attr,
4123 	&format_attr_thresh8.attr,
4124 	NULL,
4125 };
4126 
4127 static const struct attribute_group skx_uncore_format_group = {
4128 	.name = "format",
4129 	.attrs = skx_uncore_formats_attr,
4130 };
4131 
4132 static struct intel_uncore_type skx_uncore_irp = {
4133 	.name			= "irp",
4134 	.num_counters		= 2,
4135 	.num_boxes		= 6,
4136 	.perf_ctr_bits		= 48,
4137 	.event_ctl		= SKX_IRP0_MSR_PMON_CTL0,
4138 	.perf_ctr		= SKX_IRP0_MSR_PMON_CTR0,
4139 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4140 	.box_ctl		= SKX_IRP0_MSR_PMON_BOX_CTL,
4141 	.msr_offset		= SKX_IRP_MSR_OFFSET,
4142 	.ops			= &skx_uncore_iio_ops,
4143 	.format_group		= &skx_uncore_format_group,
4144 };
4145 
4146 static struct attribute *skx_uncore_pcu_formats_attr[] = {
4147 	&format_attr_event.attr,
4148 	&format_attr_umask.attr,
4149 	&format_attr_edge.attr,
4150 	&format_attr_inv.attr,
4151 	&format_attr_thresh8.attr,
4152 	&format_attr_occ_invert.attr,
4153 	&format_attr_occ_edge_det.attr,
4154 	&format_attr_filter_band0.attr,
4155 	&format_attr_filter_band1.attr,
4156 	&format_attr_filter_band2.attr,
4157 	&format_attr_filter_band3.attr,
4158 	NULL,
4159 };
4160 
4161 static struct attribute_group skx_uncore_pcu_format_group = {
4162 	.name = "format",
4163 	.attrs = skx_uncore_pcu_formats_attr,
4164 };
4165 
4166 static struct intel_uncore_ops skx_uncore_pcu_ops = {
4167 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4168 	.hw_config		= hswep_pcu_hw_config,
4169 	.get_constraint		= snbep_pcu_get_constraint,
4170 	.put_constraint		= snbep_pcu_put_constraint,
4171 };
4172 
4173 static struct intel_uncore_type skx_uncore_pcu = {
4174 	.name			= "pcu",
4175 	.num_counters		= 4,
4176 	.num_boxes		= 1,
4177 	.perf_ctr_bits		= 48,
4178 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
4179 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
4180 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
4181 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
4182 	.num_shared_regs	= 1,
4183 	.ops			= &skx_uncore_pcu_ops,
4184 	.format_group		= &skx_uncore_pcu_format_group,
4185 };
4186 
4187 static struct intel_uncore_type *skx_msr_uncores[] = {
4188 	&skx_uncore_ubox,
4189 	&skx_uncore_chabox,
4190 	&skx_uncore_iio,
4191 	&skx_uncore_iio_free_running,
4192 	&skx_uncore_irp,
4193 	&skx_uncore_pcu,
4194 	NULL,
4195 };
4196 
4197 /*
4198  * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
4199  * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
4200  */
4201 #define SKX_CAPID6		0x9c
4202 #define SKX_CHA_BIT_MASK	GENMASK(27, 0)
4203 
skx_count_chabox(void)4204 static int skx_count_chabox(void)
4205 {
4206 	struct pci_dev *dev = NULL;
4207 	u32 val = 0;
4208 
4209 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
4210 	if (!dev)
4211 		goto out;
4212 
4213 	pci_read_config_dword(dev, SKX_CAPID6, &val);
4214 	val &= SKX_CHA_BIT_MASK;
4215 out:
4216 	pci_dev_put(dev);
4217 	return hweight32(val);
4218 }
4219 
skx_uncore_cpu_init(void)4220 void skx_uncore_cpu_init(void)
4221 {
4222 	skx_uncore_chabox.num_boxes = skx_count_chabox();
4223 	uncore_msr_uncores = skx_msr_uncores;
4224 }
4225 
4226 static struct intel_uncore_type skx_uncore_imc = {
4227 	.name		= "imc",
4228 	.num_counters   = 4,
4229 	.num_boxes	= 6,
4230 	.perf_ctr_bits	= 48,
4231 	.fixed_ctr_bits	= 48,
4232 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
4233 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
4234 	.event_descs	= hswep_uncore_imc_events,
4235 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
4236 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
4237 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4238 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
4239 	.ops		= &ivbep_uncore_pci_ops,
4240 	.format_group	= &skx_uncore_format_group,
4241 };
4242 
4243 static struct attribute *skx_upi_uncore_formats_attr[] = {
4244 	&format_attr_event.attr,
4245 	&format_attr_umask_ext.attr,
4246 	&format_attr_edge.attr,
4247 	&format_attr_inv.attr,
4248 	&format_attr_thresh8.attr,
4249 	NULL,
4250 };
4251 
4252 static const struct attribute_group skx_upi_uncore_format_group = {
4253 	.name = "format",
4254 	.attrs = skx_upi_uncore_formats_attr,
4255 };
4256 
skx_upi_uncore_pci_init_box(struct intel_uncore_box * box)4257 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
4258 {
4259 	struct pci_dev *pdev = box->pci_dev;
4260 
4261 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4262 	pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4263 }
4264 
4265 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
4266 	.init_box	= skx_upi_uncore_pci_init_box,
4267 	.disable_box	= snbep_uncore_pci_disable_box,
4268 	.enable_box	= snbep_uncore_pci_enable_box,
4269 	.disable_event	= snbep_uncore_pci_disable_event,
4270 	.enable_event	= snbep_uncore_pci_enable_event,
4271 	.read_counter	= snbep_uncore_pci_read_counter,
4272 };
4273 
4274 static umode_t
skx_upi_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)4275 skx_upi_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
4276 {
4277 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
4278 
4279 	return pmu->type->topology[die][pmu->pmu_idx].upi->enabled ? attr->mode : 0;
4280 }
4281 
skx_upi_mapping_show(struct device * dev,struct device_attribute * attr,char * buf)4282 static ssize_t skx_upi_mapping_show(struct device *dev,
4283 				    struct device_attribute *attr, char *buf)
4284 {
4285 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
4286 	struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
4287 	long die = (long)ea->var;
4288 	struct uncore_upi_topology *upi = pmu->type->topology[die][pmu->pmu_idx].upi;
4289 
4290 	return sysfs_emit(buf, "upi_%d,die_%d\n", upi->pmu_idx_to, upi->die_to);
4291 }
4292 
4293 #define SKX_UPI_REG_DID			0x2058
4294 #define SKX_UPI_REGS_ADDR_DEVICE_LINK0	0x0e
4295 #define SKX_UPI_REGS_ADDR_FUNCTION	0x00
4296 
4297 /*
4298  * UPI Link Parameter 0
4299  * |  Bit  |  Default  |  Description
4300  * | 19:16 |     0h    | base_nodeid - The NodeID of the sending socket.
4301  * | 12:8  |    00h    | sending_port - The processor die port number of the sending port.
4302  */
4303 #define SKX_KTILP0_OFFSET	0x94
4304 
4305 /*
4306  * UPI Pcode Status. This register is used by PCode to store the link training status.
4307  * |  Bit  |  Default  |  Description
4308  * |   4   |     0h    | ll_status_valid — Bit indicates the valid training status
4309  *                       logged from PCode to the BIOS.
4310  */
4311 #define SKX_KTIPCSTS_OFFSET	0x120
4312 
upi_fill_topology(struct pci_dev * dev,struct intel_uncore_topology * tp,int pmu_idx)4313 static int upi_fill_topology(struct pci_dev *dev, struct intel_uncore_topology *tp,
4314 			     int pmu_idx)
4315 {
4316 	int ret;
4317 	u32 upi_conf;
4318 	struct uncore_upi_topology *upi = tp->upi;
4319 
4320 	tp->pmu_idx = pmu_idx;
4321 	ret = pci_read_config_dword(dev, SKX_KTIPCSTS_OFFSET, &upi_conf);
4322 	if (ret) {
4323 		ret = pcibios_err_to_errno(ret);
4324 		goto err;
4325 	}
4326 	upi->enabled = (upi_conf >> 4) & 1;
4327 	if (upi->enabled) {
4328 		ret = pci_read_config_dword(dev, SKX_KTILP0_OFFSET,
4329 					    &upi_conf);
4330 		if (ret) {
4331 			ret = pcibios_err_to_errno(ret);
4332 			goto err;
4333 		}
4334 		upi->die_to = (upi_conf >> 16) & 0xf;
4335 		upi->pmu_idx_to = (upi_conf >> 8) & 0x1f;
4336 	}
4337 err:
4338 	return ret;
4339 }
4340 
skx_upi_topology_cb(struct intel_uncore_type * type,int segment,int die,u64 cpu_bus_msr)4341 static int skx_upi_topology_cb(struct intel_uncore_type *type, int segment,
4342 				int die, u64 cpu_bus_msr)
4343 {
4344 	int idx, ret;
4345 	struct intel_uncore_topology *upi;
4346 	unsigned int devfn;
4347 	struct pci_dev *dev = NULL;
4348 	u8 bus = cpu_bus_msr >> (3 * BUS_NUM_STRIDE);
4349 
4350 	for (idx = 0; idx < type->num_boxes; idx++) {
4351 		upi = &type->topology[die][idx];
4352 		devfn = PCI_DEVFN(SKX_UPI_REGS_ADDR_DEVICE_LINK0 + idx,
4353 				  SKX_UPI_REGS_ADDR_FUNCTION);
4354 		dev = pci_get_domain_bus_and_slot(segment, bus, devfn);
4355 		if (dev) {
4356 			ret = upi_fill_topology(dev, upi, idx);
4357 			if (ret)
4358 				break;
4359 		}
4360 	}
4361 
4362 	pci_dev_put(dev);
4363 	return ret;
4364 }
4365 
skx_upi_get_topology(struct intel_uncore_type * type)4366 static int skx_upi_get_topology(struct intel_uncore_type *type)
4367 {
4368 	/* CPX case is not supported */
4369 	if (boot_cpu_data.x86_stepping == 11)
4370 		return -EPERM;
4371 
4372 	return skx_pmu_get_topology(type, skx_upi_topology_cb);
4373 }
4374 
4375 static struct attribute_group skx_upi_mapping_group = {
4376 	.is_visible	= skx_upi_mapping_visible,
4377 };
4378 
4379 static const struct attribute_group *skx_upi_attr_update[] = {
4380 	&skx_upi_mapping_group,
4381 	NULL
4382 };
4383 
4384 static void
pmu_upi_set_mapping(struct intel_uncore_type * type,struct attribute_group * ag)4385 pmu_upi_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
4386 {
4387 	pmu_set_mapping(type, ag, skx_upi_mapping_show, UPI_TOPOLOGY_TYPE);
4388 }
4389 
skx_upi_set_mapping(struct intel_uncore_type * type)4390 static void skx_upi_set_mapping(struct intel_uncore_type *type)
4391 {
4392 	pmu_upi_set_mapping(type, &skx_upi_mapping_group);
4393 }
4394 
skx_upi_cleanup_mapping(struct intel_uncore_type * type)4395 static void skx_upi_cleanup_mapping(struct intel_uncore_type *type)
4396 {
4397 	pmu_cleanup_mapping(type, &skx_upi_mapping_group);
4398 }
4399 
4400 static struct intel_uncore_type skx_uncore_upi = {
4401 	.name		= "upi",
4402 	.num_counters   = 4,
4403 	.num_boxes	= 3,
4404 	.perf_ctr_bits	= 48,
4405 	.perf_ctr	= SKX_UPI_PCI_PMON_CTR0,
4406 	.event_ctl	= SKX_UPI_PCI_PMON_CTL0,
4407 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4408 	.event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
4409 	.box_ctl	= SKX_UPI_PCI_PMON_BOX_CTL,
4410 	.ops		= &skx_upi_uncore_pci_ops,
4411 	.format_group	= &skx_upi_uncore_format_group,
4412 	.attr_update	= skx_upi_attr_update,
4413 	.get_topology	= skx_upi_get_topology,
4414 	.set_mapping	= skx_upi_set_mapping,
4415 	.cleanup_mapping = skx_upi_cleanup_mapping,
4416 };
4417 
skx_m2m_uncore_pci_init_box(struct intel_uncore_box * box)4418 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4419 {
4420 	struct pci_dev *pdev = box->pci_dev;
4421 
4422 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4423 	pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4424 }
4425 
4426 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
4427 	.init_box	= skx_m2m_uncore_pci_init_box,
4428 	.disable_box	= snbep_uncore_pci_disable_box,
4429 	.enable_box	= snbep_uncore_pci_enable_box,
4430 	.disable_event	= snbep_uncore_pci_disable_event,
4431 	.enable_event	= snbep_uncore_pci_enable_event,
4432 	.read_counter	= snbep_uncore_pci_read_counter,
4433 };
4434 
4435 static struct intel_uncore_type skx_uncore_m2m = {
4436 	.name		= "m2m",
4437 	.num_counters   = 4,
4438 	.num_boxes	= 2,
4439 	.perf_ctr_bits	= 48,
4440 	.perf_ctr	= SKX_M2M_PCI_PMON_CTR0,
4441 	.event_ctl	= SKX_M2M_PCI_PMON_CTL0,
4442 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4443 	.box_ctl	= SKX_M2M_PCI_PMON_BOX_CTL,
4444 	.ops		= &skx_m2m_uncore_pci_ops,
4445 	.format_group	= &skx_uncore_format_group,
4446 };
4447 
4448 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
4449 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4450 	EVENT_CONSTRAINT_END
4451 };
4452 
4453 static struct intel_uncore_type skx_uncore_m2pcie = {
4454 	.name		= "m2pcie",
4455 	.num_counters   = 4,
4456 	.num_boxes	= 4,
4457 	.perf_ctr_bits	= 48,
4458 	.constraints	= skx_uncore_m2pcie_constraints,
4459 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
4460 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
4461 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4462 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
4463 	.ops		= &ivbep_uncore_pci_ops,
4464 	.format_group	= &skx_uncore_format_group,
4465 };
4466 
4467 static struct event_constraint skx_uncore_m3upi_constraints[] = {
4468 	UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
4469 	UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
4470 	UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
4471 	UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
4472 	UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
4473 	UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
4474 	UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
4475 	UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
4476 	EVENT_CONSTRAINT_END
4477 };
4478 
4479 static struct intel_uncore_type skx_uncore_m3upi = {
4480 	.name		= "m3upi",
4481 	.num_counters   = 3,
4482 	.num_boxes	= 3,
4483 	.perf_ctr_bits	= 48,
4484 	.constraints	= skx_uncore_m3upi_constraints,
4485 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
4486 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
4487 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4488 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
4489 	.ops		= &ivbep_uncore_pci_ops,
4490 	.format_group	= &skx_uncore_format_group,
4491 };
4492 
4493 enum {
4494 	SKX_PCI_UNCORE_IMC,
4495 	SKX_PCI_UNCORE_M2M,
4496 	SKX_PCI_UNCORE_UPI,
4497 	SKX_PCI_UNCORE_M2PCIE,
4498 	SKX_PCI_UNCORE_M3UPI,
4499 };
4500 
4501 static struct intel_uncore_type *skx_pci_uncores[] = {
4502 	[SKX_PCI_UNCORE_IMC]	= &skx_uncore_imc,
4503 	[SKX_PCI_UNCORE_M2M]	= &skx_uncore_m2m,
4504 	[SKX_PCI_UNCORE_UPI]	= &skx_uncore_upi,
4505 	[SKX_PCI_UNCORE_M2PCIE]	= &skx_uncore_m2pcie,
4506 	[SKX_PCI_UNCORE_M3UPI]	= &skx_uncore_m3upi,
4507 	NULL,
4508 };
4509 
4510 static const struct pci_device_id skx_uncore_pci_ids[] = {
4511 	{ /* MC0 Channel 0 */
4512 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4513 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
4514 	},
4515 	{ /* MC0 Channel 1 */
4516 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4517 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
4518 	},
4519 	{ /* MC0 Channel 2 */
4520 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4521 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
4522 	},
4523 	{ /* MC1 Channel 0 */
4524 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4525 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
4526 	},
4527 	{ /* MC1 Channel 1 */
4528 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4529 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
4530 	},
4531 	{ /* MC1 Channel 2 */
4532 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4533 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
4534 	},
4535 	{ /* M2M0 */
4536 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4537 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
4538 	},
4539 	{ /* M2M1 */
4540 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4541 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
4542 	},
4543 	{ /* UPI0 Link 0 */
4544 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4545 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
4546 	},
4547 	{ /* UPI0 Link 1 */
4548 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4549 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
4550 	},
4551 	{ /* UPI1 Link 2 */
4552 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4553 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
4554 	},
4555 	{ /* M2PCIe 0 */
4556 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4557 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
4558 	},
4559 	{ /* M2PCIe 1 */
4560 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4561 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
4562 	},
4563 	{ /* M2PCIe 2 */
4564 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4565 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
4566 	},
4567 	{ /* M2PCIe 3 */
4568 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4569 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
4570 	},
4571 	{ /* M3UPI0 Link 0 */
4572 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4573 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
4574 	},
4575 	{ /* M3UPI0 Link 1 */
4576 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4577 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4578 	},
4579 	{ /* M3UPI1 Link 2 */
4580 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4581 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4582 	},
4583 	{ /* end: all zeroes */ }
4584 };
4585 
4586 
4587 static struct pci_driver skx_uncore_pci_driver = {
4588 	.name		= "skx_uncore",
4589 	.id_table	= skx_uncore_pci_ids,
4590 };
4591 
skx_uncore_pci_init(void)4592 int skx_uncore_pci_init(void)
4593 {
4594 	/* need to double check pci address */
4595 	int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4596 
4597 	if (ret)
4598 		return ret;
4599 
4600 	uncore_pci_uncores = skx_pci_uncores;
4601 	uncore_pci_driver = &skx_uncore_pci_driver;
4602 	return 0;
4603 }
4604 
4605 /* end of SKX uncore support */
4606 
4607 /* SNR uncore support */
4608 
4609 static struct intel_uncore_type snr_uncore_ubox = {
4610 	.name			= "ubox",
4611 	.num_counters		= 2,
4612 	.num_boxes		= 1,
4613 	.perf_ctr_bits		= 48,
4614 	.fixed_ctr_bits		= 48,
4615 	.perf_ctr		= SNR_U_MSR_PMON_CTR0,
4616 	.event_ctl		= SNR_U_MSR_PMON_CTL0,
4617 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4618 	.fixed_ctr		= SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4619 	.fixed_ctl		= SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4620 	.ops			= &ivbep_uncore_msr_ops,
4621 	.format_group		= &ivbep_uncore_format_group,
4622 };
4623 
4624 static struct attribute *snr_uncore_cha_formats_attr[] = {
4625 	&format_attr_event.attr,
4626 	&format_attr_umask_ext2.attr,
4627 	&format_attr_edge.attr,
4628 	&format_attr_tid_en.attr,
4629 	&format_attr_inv.attr,
4630 	&format_attr_thresh8.attr,
4631 	&format_attr_filter_tid5.attr,
4632 	NULL,
4633 };
4634 static const struct attribute_group snr_uncore_chabox_format_group = {
4635 	.name = "format",
4636 	.attrs = snr_uncore_cha_formats_attr,
4637 };
4638 
snr_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)4639 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4640 {
4641 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4642 
4643 	reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4644 		    box->pmu->type->msr_offset * box->pmu->pmu_idx;
4645 	reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4646 	reg1->idx = 0;
4647 
4648 	return 0;
4649 }
4650 
snr_cha_enable_event(struct intel_uncore_box * box,struct perf_event * event)4651 static void snr_cha_enable_event(struct intel_uncore_box *box,
4652 				   struct perf_event *event)
4653 {
4654 	struct hw_perf_event *hwc = &event->hw;
4655 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4656 
4657 	if (reg1->idx != EXTRA_REG_NONE)
4658 		wrmsrl(reg1->reg, reg1->config);
4659 
4660 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4661 }
4662 
4663 static struct intel_uncore_ops snr_uncore_chabox_ops = {
4664 	.init_box		= ivbep_uncore_msr_init_box,
4665 	.disable_box		= snbep_uncore_msr_disable_box,
4666 	.enable_box		= snbep_uncore_msr_enable_box,
4667 	.disable_event		= snbep_uncore_msr_disable_event,
4668 	.enable_event		= snr_cha_enable_event,
4669 	.read_counter		= uncore_msr_read_counter,
4670 	.hw_config		= snr_cha_hw_config,
4671 };
4672 
4673 static struct intel_uncore_type snr_uncore_chabox = {
4674 	.name			= "cha",
4675 	.num_counters		= 4,
4676 	.num_boxes		= 6,
4677 	.perf_ctr_bits		= 48,
4678 	.event_ctl		= SNR_CHA_MSR_PMON_CTL0,
4679 	.perf_ctr		= SNR_CHA_MSR_PMON_CTR0,
4680 	.box_ctl		= SNR_CHA_MSR_PMON_BOX_CTL,
4681 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
4682 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4683 	.event_mask_ext		= SNR_CHA_RAW_EVENT_MASK_EXT,
4684 	.ops			= &snr_uncore_chabox_ops,
4685 	.format_group		= &snr_uncore_chabox_format_group,
4686 };
4687 
4688 static struct attribute *snr_uncore_iio_formats_attr[] = {
4689 	&format_attr_event.attr,
4690 	&format_attr_umask.attr,
4691 	&format_attr_edge.attr,
4692 	&format_attr_inv.attr,
4693 	&format_attr_thresh9.attr,
4694 	&format_attr_ch_mask2.attr,
4695 	&format_attr_fc_mask2.attr,
4696 	NULL,
4697 };
4698 
4699 static const struct attribute_group snr_uncore_iio_format_group = {
4700 	.name = "format",
4701 	.attrs = snr_uncore_iio_formats_attr,
4702 };
4703 
4704 static umode_t
snr_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)4705 snr_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
4706 {
4707 	/* Root bus 0x00 is valid only for pmu_idx = 1. */
4708 	return pmu_iio_mapping_visible(kobj, attr, die, 1);
4709 }
4710 
4711 static struct attribute_group snr_iio_mapping_group = {
4712 	.is_visible	= snr_iio_mapping_visible,
4713 };
4714 
4715 static const struct attribute_group *snr_iio_attr_update[] = {
4716 	&snr_iio_mapping_group,
4717 	NULL,
4718 };
4719 
sad_cfg_iio_topology(struct intel_uncore_type * type,u8 * sad_pmon_mapping)4720 static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_mapping)
4721 {
4722 	u32 sad_cfg;
4723 	int die, stack_id, ret = -EPERM;
4724 	struct pci_dev *dev = NULL;
4725 
4726 	while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, SNR_ICX_MESH2IIO_MMAP_DID, dev))) {
4727 		ret = pci_read_config_dword(dev, SNR_ICX_SAD_CONTROL_CFG, &sad_cfg);
4728 		if (ret) {
4729 			ret = pcibios_err_to_errno(ret);
4730 			break;
4731 		}
4732 
4733 		die = uncore_pcibus_to_dieid(dev->bus);
4734 		stack_id = SAD_CONTROL_STACK_ID(sad_cfg);
4735 		if (die < 0 || stack_id >= type->num_boxes) {
4736 			ret = -EPERM;
4737 			break;
4738 		}
4739 
4740 		/* Convert stack id from SAD_CONTROL to PMON notation. */
4741 		stack_id = sad_pmon_mapping[stack_id];
4742 
4743 		type->topology[die][stack_id].iio->segment = pci_domain_nr(dev->bus);
4744 		type->topology[die][stack_id].pmu_idx = stack_id;
4745 		type->topology[die][stack_id].iio->pci_bus_no = dev->bus->number;
4746 	}
4747 
4748 	pci_dev_put(dev);
4749 
4750 	return ret;
4751 }
4752 
4753 /*
4754  * SNR has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
4755  */
4756 enum {
4757 	SNR_QAT_PMON_ID,
4758 	SNR_CBDMA_DMI_PMON_ID,
4759 	SNR_NIS_PMON_ID,
4760 	SNR_DLB_PMON_ID,
4761 	SNR_PCIE_GEN3_PMON_ID
4762 };
4763 
4764 static u8 snr_sad_pmon_mapping[] = {
4765 	SNR_CBDMA_DMI_PMON_ID,
4766 	SNR_PCIE_GEN3_PMON_ID,
4767 	SNR_DLB_PMON_ID,
4768 	SNR_NIS_PMON_ID,
4769 	SNR_QAT_PMON_ID
4770 };
4771 
snr_iio_get_topology(struct intel_uncore_type * type)4772 static int snr_iio_get_topology(struct intel_uncore_type *type)
4773 {
4774 	return sad_cfg_iio_topology(type, snr_sad_pmon_mapping);
4775 }
4776 
snr_iio_set_mapping(struct intel_uncore_type * type)4777 static void snr_iio_set_mapping(struct intel_uncore_type *type)
4778 {
4779 	pmu_iio_set_mapping(type, &snr_iio_mapping_group);
4780 }
4781 
snr_iio_cleanup_mapping(struct intel_uncore_type * type)4782 static void snr_iio_cleanup_mapping(struct intel_uncore_type *type)
4783 {
4784 	pmu_cleanup_mapping(type, &snr_iio_mapping_group);
4785 }
4786 
4787 static struct event_constraint snr_uncore_iio_constraints[] = {
4788 	UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
4789 	UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
4790 	UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
4791 	EVENT_CONSTRAINT_END
4792 };
4793 
4794 static struct intel_uncore_type snr_uncore_iio = {
4795 	.name			= "iio",
4796 	.num_counters		= 4,
4797 	.num_boxes		= 5,
4798 	.perf_ctr_bits		= 48,
4799 	.event_ctl		= SNR_IIO_MSR_PMON_CTL0,
4800 	.perf_ctr		= SNR_IIO_MSR_PMON_CTR0,
4801 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4802 	.event_mask_ext		= SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4803 	.box_ctl		= SNR_IIO_MSR_PMON_BOX_CTL,
4804 	.msr_offset		= SNR_IIO_MSR_OFFSET,
4805 	.constraints		= snr_uncore_iio_constraints,
4806 	.ops			= &ivbep_uncore_msr_ops,
4807 	.format_group		= &snr_uncore_iio_format_group,
4808 	.attr_update		= snr_iio_attr_update,
4809 	.get_topology		= snr_iio_get_topology,
4810 	.set_mapping		= snr_iio_set_mapping,
4811 	.cleanup_mapping	= snr_iio_cleanup_mapping,
4812 };
4813 
4814 static struct intel_uncore_type snr_uncore_irp = {
4815 	.name			= "irp",
4816 	.num_counters		= 2,
4817 	.num_boxes		= 5,
4818 	.perf_ctr_bits		= 48,
4819 	.event_ctl		= SNR_IRP0_MSR_PMON_CTL0,
4820 	.perf_ctr		= SNR_IRP0_MSR_PMON_CTR0,
4821 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4822 	.box_ctl		= SNR_IRP0_MSR_PMON_BOX_CTL,
4823 	.msr_offset		= SNR_IRP_MSR_OFFSET,
4824 	.ops			= &ivbep_uncore_msr_ops,
4825 	.format_group		= &ivbep_uncore_format_group,
4826 };
4827 
4828 static struct intel_uncore_type snr_uncore_m2pcie = {
4829 	.name		= "m2pcie",
4830 	.num_counters	= 4,
4831 	.num_boxes	= 5,
4832 	.perf_ctr_bits	= 48,
4833 	.event_ctl	= SNR_M2PCIE_MSR_PMON_CTL0,
4834 	.perf_ctr	= SNR_M2PCIE_MSR_PMON_CTR0,
4835 	.box_ctl	= SNR_M2PCIE_MSR_PMON_BOX_CTL,
4836 	.msr_offset	= SNR_M2PCIE_MSR_OFFSET,
4837 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4838 	.ops		= &ivbep_uncore_msr_ops,
4839 	.format_group	= &ivbep_uncore_format_group,
4840 };
4841 
snr_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)4842 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4843 {
4844 	struct hw_perf_event *hwc = &event->hw;
4845 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4846 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4847 
4848 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
4849 		reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4850 		reg1->idx = ev_sel - 0xb;
4851 		reg1->config = event->attr.config1 & (0xff << reg1->idx);
4852 	}
4853 	return 0;
4854 }
4855 
4856 static struct intel_uncore_ops snr_uncore_pcu_ops = {
4857 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4858 	.hw_config		= snr_pcu_hw_config,
4859 	.get_constraint		= snbep_pcu_get_constraint,
4860 	.put_constraint		= snbep_pcu_put_constraint,
4861 };
4862 
4863 static struct intel_uncore_type snr_uncore_pcu = {
4864 	.name			= "pcu",
4865 	.num_counters		= 4,
4866 	.num_boxes		= 1,
4867 	.perf_ctr_bits		= 48,
4868 	.perf_ctr		= SNR_PCU_MSR_PMON_CTR0,
4869 	.event_ctl		= SNR_PCU_MSR_PMON_CTL0,
4870 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4871 	.box_ctl		= SNR_PCU_MSR_PMON_BOX_CTL,
4872 	.num_shared_regs	= 1,
4873 	.ops			= &snr_uncore_pcu_ops,
4874 	.format_group		= &skx_uncore_pcu_format_group,
4875 };
4876 
4877 enum perf_uncore_snr_iio_freerunning_type_id {
4878 	SNR_IIO_MSR_IOCLK,
4879 	SNR_IIO_MSR_BW_IN,
4880 
4881 	SNR_IIO_FREERUNNING_TYPE_MAX,
4882 };
4883 
4884 static struct freerunning_counters snr_iio_freerunning[] = {
4885 	[SNR_IIO_MSR_IOCLK]	= { 0x1eac, 0x1, 0x10, 1, 48 },
4886 	[SNR_IIO_MSR_BW_IN]	= { 0x1f00, 0x1, 0x10, 8, 48 },
4887 };
4888 
4889 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4890 	/* Free-Running IIO CLOCKS Counter */
4891 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
4892 	/* Free-Running IIO BANDWIDTH IN Counters */
4893 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
4894 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
4895 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
4896 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
4897 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
4898 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
4899 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
4900 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
4901 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
4902 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
4903 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
4904 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
4905 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
4906 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
4907 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
4908 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
4909 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
4910 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
4911 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
4912 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
4913 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
4914 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
4915 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
4916 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
4917 	{ /* end: all zeroes */ },
4918 };
4919 
4920 static struct intel_uncore_type snr_uncore_iio_free_running = {
4921 	.name			= "iio_free_running",
4922 	.num_counters		= 9,
4923 	.num_boxes		= 5,
4924 	.num_freerunning_types	= SNR_IIO_FREERUNNING_TYPE_MAX,
4925 	.freerunning		= snr_iio_freerunning,
4926 	.ops			= &skx_uncore_iio_freerunning_ops,
4927 	.event_descs		= snr_uncore_iio_freerunning_events,
4928 	.format_group		= &skx_uncore_iio_freerunning_format_group,
4929 };
4930 
4931 static struct intel_uncore_type *snr_msr_uncores[] = {
4932 	&snr_uncore_ubox,
4933 	&snr_uncore_chabox,
4934 	&snr_uncore_iio,
4935 	&snr_uncore_irp,
4936 	&snr_uncore_m2pcie,
4937 	&snr_uncore_pcu,
4938 	&snr_uncore_iio_free_running,
4939 	NULL,
4940 };
4941 
snr_uncore_cpu_init(void)4942 void snr_uncore_cpu_init(void)
4943 {
4944 	uncore_msr_uncores = snr_msr_uncores;
4945 }
4946 
snr_m2m_uncore_pci_init_box(struct intel_uncore_box * box)4947 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4948 {
4949 	struct pci_dev *pdev = box->pci_dev;
4950 	int box_ctl = uncore_pci_box_ctl(box);
4951 
4952 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4953 	pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4954 }
4955 
4956 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4957 	.init_box	= snr_m2m_uncore_pci_init_box,
4958 	.disable_box	= snbep_uncore_pci_disable_box,
4959 	.enable_box	= snbep_uncore_pci_enable_box,
4960 	.disable_event	= snbep_uncore_pci_disable_event,
4961 	.enable_event	= snbep_uncore_pci_enable_event,
4962 	.read_counter	= snbep_uncore_pci_read_counter,
4963 };
4964 
4965 static struct attribute *snr_m2m_uncore_formats_attr[] = {
4966 	&format_attr_event.attr,
4967 	&format_attr_umask_ext3.attr,
4968 	&format_attr_edge.attr,
4969 	&format_attr_inv.attr,
4970 	&format_attr_thresh8.attr,
4971 	NULL,
4972 };
4973 
4974 static const struct attribute_group snr_m2m_uncore_format_group = {
4975 	.name = "format",
4976 	.attrs = snr_m2m_uncore_formats_attr,
4977 };
4978 
4979 static struct intel_uncore_type snr_uncore_m2m = {
4980 	.name		= "m2m",
4981 	.num_counters   = 4,
4982 	.num_boxes	= 1,
4983 	.perf_ctr_bits	= 48,
4984 	.perf_ctr	= SNR_M2M_PCI_PMON_CTR0,
4985 	.event_ctl	= SNR_M2M_PCI_PMON_CTL0,
4986 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4987 	.event_mask_ext	= SNR_M2M_PCI_PMON_UMASK_EXT,
4988 	.box_ctl	= SNR_M2M_PCI_PMON_BOX_CTL,
4989 	.ops		= &snr_m2m_uncore_pci_ops,
4990 	.format_group	= &snr_m2m_uncore_format_group,
4991 };
4992 
snr_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)4993 static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
4994 {
4995 	struct pci_dev *pdev = box->pci_dev;
4996 	struct hw_perf_event *hwc = &event->hw;
4997 
4998 	pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN));
4999 	pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
5000 }
5001 
5002 static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = {
5003 	.init_box	= snr_m2m_uncore_pci_init_box,
5004 	.disable_box	= snbep_uncore_pci_disable_box,
5005 	.enable_box	= snbep_uncore_pci_enable_box,
5006 	.disable_event	= snbep_uncore_pci_disable_event,
5007 	.enable_event	= snr_uncore_pci_enable_event,
5008 	.read_counter	= snbep_uncore_pci_read_counter,
5009 };
5010 
5011 static struct intel_uncore_type snr_uncore_pcie3 = {
5012 	.name		= "pcie3",
5013 	.num_counters	= 4,
5014 	.num_boxes	= 1,
5015 	.perf_ctr_bits	= 48,
5016 	.perf_ctr	= SNR_PCIE3_PCI_PMON_CTR0,
5017 	.event_ctl	= SNR_PCIE3_PCI_PMON_CTL0,
5018 	.event_mask	= SKX_IIO_PMON_RAW_EVENT_MASK,
5019 	.event_mask_ext	= SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
5020 	.box_ctl	= SNR_PCIE3_PCI_PMON_BOX_CTL,
5021 	.ops		= &snr_pcie3_uncore_pci_ops,
5022 	.format_group	= &skx_uncore_iio_format_group,
5023 };
5024 
5025 enum {
5026 	SNR_PCI_UNCORE_M2M,
5027 	SNR_PCI_UNCORE_PCIE3,
5028 };
5029 
5030 static struct intel_uncore_type *snr_pci_uncores[] = {
5031 	[SNR_PCI_UNCORE_M2M]		= &snr_uncore_m2m,
5032 	[SNR_PCI_UNCORE_PCIE3]		= &snr_uncore_pcie3,
5033 	NULL,
5034 };
5035 
5036 static const struct pci_device_id snr_uncore_pci_ids[] = {
5037 	{ /* M2M */
5038 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5039 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
5040 	},
5041 	{ /* end: all zeroes */ }
5042 };
5043 
5044 static struct pci_driver snr_uncore_pci_driver = {
5045 	.name		= "snr_uncore",
5046 	.id_table	= snr_uncore_pci_ids,
5047 };
5048 
5049 static const struct pci_device_id snr_uncore_pci_sub_ids[] = {
5050 	{ /* PCIe3 RP */
5051 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
5052 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
5053 	},
5054 	{ /* end: all zeroes */ }
5055 };
5056 
5057 static struct pci_driver snr_uncore_pci_sub_driver = {
5058 	.name		= "snr_uncore_sub",
5059 	.id_table	= snr_uncore_pci_sub_ids,
5060 };
5061 
snr_uncore_pci_init(void)5062 int snr_uncore_pci_init(void)
5063 {
5064 	/* SNR UBOX DID */
5065 	int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
5066 					 SKX_GIDNIDMAP, true);
5067 
5068 	if (ret)
5069 		return ret;
5070 
5071 	uncore_pci_uncores = snr_pci_uncores;
5072 	uncore_pci_driver = &snr_uncore_pci_driver;
5073 	uncore_pci_sub_driver = &snr_uncore_pci_sub_driver;
5074 	return 0;
5075 }
5076 
5077 #define SNR_MC_DEVICE_ID	0x3451
5078 
snr_uncore_get_mc_dev(unsigned int device,int id)5079 static struct pci_dev *snr_uncore_get_mc_dev(unsigned int device, int id)
5080 {
5081 	struct pci_dev *mc_dev = NULL;
5082 	int pkg;
5083 
5084 	while (1) {
5085 		mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, mc_dev);
5086 		if (!mc_dev)
5087 			break;
5088 		pkg = uncore_pcibus_to_dieid(mc_dev->bus);
5089 		if (pkg == id)
5090 			break;
5091 	}
5092 	return mc_dev;
5093 }
5094 
snr_uncore_mmio_map(struct intel_uncore_box * box,unsigned int box_ctl,int mem_offset,unsigned int device)5095 static int snr_uncore_mmio_map(struct intel_uncore_box *box,
5096 			       unsigned int box_ctl, int mem_offset,
5097 			       unsigned int device)
5098 {
5099 	struct pci_dev *pdev = snr_uncore_get_mc_dev(device, box->dieid);
5100 	struct intel_uncore_type *type = box->pmu->type;
5101 	resource_size_t addr;
5102 	u32 pci_dword;
5103 
5104 	if (!pdev)
5105 		return -ENODEV;
5106 
5107 	pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
5108 	addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
5109 
5110 	pci_read_config_dword(pdev, mem_offset, &pci_dword);
5111 	addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
5112 
5113 	addr += box_ctl;
5114 
5115 	pci_dev_put(pdev);
5116 
5117 	box->io_addr = ioremap(addr, type->mmio_map_size);
5118 	if (!box->io_addr) {
5119 		pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
5120 		return -EINVAL;
5121 	}
5122 
5123 	return 0;
5124 }
5125 
__snr_uncore_mmio_init_box(struct intel_uncore_box * box,unsigned int box_ctl,int mem_offset,unsigned int device)5126 static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
5127 				       unsigned int box_ctl, int mem_offset,
5128 				       unsigned int device)
5129 {
5130 	if (!snr_uncore_mmio_map(box, box_ctl, mem_offset, device))
5131 		writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
5132 }
5133 
snr_uncore_mmio_init_box(struct intel_uncore_box * box)5134 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
5135 {
5136 	__snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
5137 				   SNR_IMC_MMIO_MEM0_OFFSET,
5138 				   SNR_MC_DEVICE_ID);
5139 }
5140 
snr_uncore_mmio_disable_box(struct intel_uncore_box * box)5141 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
5142 {
5143 	u32 config;
5144 
5145 	if (!box->io_addr)
5146 		return;
5147 
5148 	config = readl(box->io_addr);
5149 	config |= SNBEP_PMON_BOX_CTL_FRZ;
5150 	writel(config, box->io_addr);
5151 }
5152 
snr_uncore_mmio_enable_box(struct intel_uncore_box * box)5153 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
5154 {
5155 	u32 config;
5156 
5157 	if (!box->io_addr)
5158 		return;
5159 
5160 	config = readl(box->io_addr);
5161 	config &= ~SNBEP_PMON_BOX_CTL_FRZ;
5162 	writel(config, box->io_addr);
5163 }
5164 
snr_uncore_mmio_enable_event(struct intel_uncore_box * box,struct perf_event * event)5165 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
5166 					   struct perf_event *event)
5167 {
5168 	struct hw_perf_event *hwc = &event->hw;
5169 
5170 	if (!box->io_addr)
5171 		return;
5172 
5173 	if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
5174 		return;
5175 
5176 	writel(hwc->config | SNBEP_PMON_CTL_EN,
5177 	       box->io_addr + hwc->config_base);
5178 }
5179 
snr_uncore_mmio_disable_event(struct intel_uncore_box * box,struct perf_event * event)5180 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
5181 					    struct perf_event *event)
5182 {
5183 	struct hw_perf_event *hwc = &event->hw;
5184 
5185 	if (!box->io_addr)
5186 		return;
5187 
5188 	if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
5189 		return;
5190 
5191 	writel(hwc->config, box->io_addr + hwc->config_base);
5192 }
5193 
5194 static struct intel_uncore_ops snr_uncore_mmio_ops = {
5195 	.init_box	= snr_uncore_mmio_init_box,
5196 	.exit_box	= uncore_mmio_exit_box,
5197 	.disable_box	= snr_uncore_mmio_disable_box,
5198 	.enable_box	= snr_uncore_mmio_enable_box,
5199 	.disable_event	= snr_uncore_mmio_disable_event,
5200 	.enable_event	= snr_uncore_mmio_enable_event,
5201 	.read_counter	= uncore_mmio_read_counter,
5202 };
5203 
5204 static struct uncore_event_desc snr_uncore_imc_events[] = {
5205 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
5206 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x0f"),
5207 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
5208 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
5209 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
5210 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
5211 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
5212 	{ /* end: all zeroes */ },
5213 };
5214 
5215 static struct intel_uncore_type snr_uncore_imc = {
5216 	.name		= "imc",
5217 	.num_counters   = 4,
5218 	.num_boxes	= 2,
5219 	.perf_ctr_bits	= 48,
5220 	.fixed_ctr_bits	= 48,
5221 	.fixed_ctr	= SNR_IMC_MMIO_PMON_FIXED_CTR,
5222 	.fixed_ctl	= SNR_IMC_MMIO_PMON_FIXED_CTL,
5223 	.event_descs	= snr_uncore_imc_events,
5224 	.perf_ctr	= SNR_IMC_MMIO_PMON_CTR0,
5225 	.event_ctl	= SNR_IMC_MMIO_PMON_CTL0,
5226 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5227 	.box_ctl	= SNR_IMC_MMIO_PMON_BOX_CTL,
5228 	.mmio_offset	= SNR_IMC_MMIO_OFFSET,
5229 	.mmio_map_size	= SNR_IMC_MMIO_SIZE,
5230 	.ops		= &snr_uncore_mmio_ops,
5231 	.format_group	= &skx_uncore_format_group,
5232 };
5233 
5234 enum perf_uncore_snr_imc_freerunning_type_id {
5235 	SNR_IMC_DCLK,
5236 	SNR_IMC_DDR,
5237 
5238 	SNR_IMC_FREERUNNING_TYPE_MAX,
5239 };
5240 
5241 static struct freerunning_counters snr_imc_freerunning[] = {
5242 	[SNR_IMC_DCLK]	= { 0x22b0, 0x0, 0, 1, 48 },
5243 	[SNR_IMC_DDR]	= { 0x2290, 0x8, 0, 2, 48 },
5244 };
5245 
5246 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
5247 	INTEL_UNCORE_EVENT_DESC(dclk,		"event=0xff,umask=0x10"),
5248 
5249 	INTEL_UNCORE_EVENT_DESC(read,		"event=0xff,umask=0x20"),
5250 	INTEL_UNCORE_EVENT_DESC(read.scale,	"6.103515625e-5"),
5251 	INTEL_UNCORE_EVENT_DESC(read.unit,	"MiB"),
5252 	INTEL_UNCORE_EVENT_DESC(write,		"event=0xff,umask=0x21"),
5253 	INTEL_UNCORE_EVENT_DESC(write.scale,	"6.103515625e-5"),
5254 	INTEL_UNCORE_EVENT_DESC(write.unit,	"MiB"),
5255 	{ /* end: all zeroes */ },
5256 };
5257 
5258 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
5259 	.init_box	= snr_uncore_mmio_init_box,
5260 	.exit_box	= uncore_mmio_exit_box,
5261 	.read_counter	= uncore_mmio_read_counter,
5262 	.hw_config	= uncore_freerunning_hw_config,
5263 };
5264 
5265 static struct intel_uncore_type snr_uncore_imc_free_running = {
5266 	.name			= "imc_free_running",
5267 	.num_counters		= 3,
5268 	.num_boxes		= 1,
5269 	.num_freerunning_types	= SNR_IMC_FREERUNNING_TYPE_MAX,
5270 	.mmio_map_size		= SNR_IMC_MMIO_SIZE,
5271 	.freerunning		= snr_imc_freerunning,
5272 	.ops			= &snr_uncore_imc_freerunning_ops,
5273 	.event_descs		= snr_uncore_imc_freerunning_events,
5274 	.format_group		= &skx_uncore_iio_freerunning_format_group,
5275 };
5276 
5277 static struct intel_uncore_type *snr_mmio_uncores[] = {
5278 	&snr_uncore_imc,
5279 	&snr_uncore_imc_free_running,
5280 	NULL,
5281 };
5282 
snr_uncore_mmio_init(void)5283 void snr_uncore_mmio_init(void)
5284 {
5285 	uncore_mmio_uncores = snr_mmio_uncores;
5286 }
5287 
5288 /* end of SNR uncore support */
5289 
5290 /* ICX uncore support */
5291 
5292 static u64 icx_cha_msr_offsets[] = {
5293 	0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310,
5294 	0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e,
5295 	0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a,
5296 	0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0,   0xe,
5297 	0x1c,  0x2a,  0x38,  0x46,
5298 };
5299 
icx_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)5300 static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
5301 {
5302 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
5303 	bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN);
5304 
5305 	if (tie_en) {
5306 		reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 +
5307 			    icx_cha_msr_offsets[box->pmu->pmu_idx];
5308 		reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
5309 		reg1->idx = 0;
5310 	}
5311 
5312 	return 0;
5313 }
5314 
5315 static struct intel_uncore_ops icx_uncore_chabox_ops = {
5316 	.init_box		= ivbep_uncore_msr_init_box,
5317 	.disable_box		= snbep_uncore_msr_disable_box,
5318 	.enable_box		= snbep_uncore_msr_enable_box,
5319 	.disable_event		= snbep_uncore_msr_disable_event,
5320 	.enable_event		= snr_cha_enable_event,
5321 	.read_counter		= uncore_msr_read_counter,
5322 	.hw_config		= icx_cha_hw_config,
5323 };
5324 
5325 static struct intel_uncore_type icx_uncore_chabox = {
5326 	.name			= "cha",
5327 	.num_counters		= 4,
5328 	.perf_ctr_bits		= 48,
5329 	.event_ctl		= ICX_C34_MSR_PMON_CTL0,
5330 	.perf_ctr		= ICX_C34_MSR_PMON_CTR0,
5331 	.box_ctl		= ICX_C34_MSR_PMON_BOX_CTL,
5332 	.msr_offsets		= icx_cha_msr_offsets,
5333 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
5334 	.event_mask_ext		= SNR_CHA_RAW_EVENT_MASK_EXT,
5335 	.constraints		= skx_uncore_chabox_constraints,
5336 	.ops			= &icx_uncore_chabox_ops,
5337 	.format_group		= &snr_uncore_chabox_format_group,
5338 };
5339 
5340 static u64 icx_msr_offsets[] = {
5341 	0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5342 };
5343 
5344 static struct event_constraint icx_uncore_iio_constraints[] = {
5345 	UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
5346 	UNCORE_EVENT_CONSTRAINT(0x03, 0x3),
5347 	UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
5348 	UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
5349 	UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
5350 	UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
5351 	UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
5352 	EVENT_CONSTRAINT_END
5353 };
5354 
5355 static umode_t
icx_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)5356 icx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
5357 {
5358 	/* Root bus 0x00 is valid only for pmu_idx = 5. */
5359 	return pmu_iio_mapping_visible(kobj, attr, die, 5);
5360 }
5361 
5362 static struct attribute_group icx_iio_mapping_group = {
5363 	.is_visible	= icx_iio_mapping_visible,
5364 };
5365 
5366 static const struct attribute_group *icx_iio_attr_update[] = {
5367 	&icx_iio_mapping_group,
5368 	NULL,
5369 };
5370 
5371 /*
5372  * ICX has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
5373  */
5374 enum {
5375 	ICX_PCIE1_PMON_ID,
5376 	ICX_PCIE2_PMON_ID,
5377 	ICX_PCIE3_PMON_ID,
5378 	ICX_PCIE4_PMON_ID,
5379 	ICX_PCIE5_PMON_ID,
5380 	ICX_CBDMA_DMI_PMON_ID
5381 };
5382 
5383 static u8 icx_sad_pmon_mapping[] = {
5384 	ICX_CBDMA_DMI_PMON_ID,
5385 	ICX_PCIE1_PMON_ID,
5386 	ICX_PCIE2_PMON_ID,
5387 	ICX_PCIE3_PMON_ID,
5388 	ICX_PCIE4_PMON_ID,
5389 	ICX_PCIE5_PMON_ID,
5390 };
5391 
icx_iio_get_topology(struct intel_uncore_type * type)5392 static int icx_iio_get_topology(struct intel_uncore_type *type)
5393 {
5394 	return sad_cfg_iio_topology(type, icx_sad_pmon_mapping);
5395 }
5396 
icx_iio_set_mapping(struct intel_uncore_type * type)5397 static void icx_iio_set_mapping(struct intel_uncore_type *type)
5398 {
5399 	/* Detect ICX-D system. This case is not supported */
5400 	if (boot_cpu_data.x86_vfm == INTEL_ICELAKE_D) {
5401 		pmu_clear_mapping_attr(type->attr_update, &icx_iio_mapping_group);
5402 		return;
5403 	}
5404 	pmu_iio_set_mapping(type, &icx_iio_mapping_group);
5405 }
5406 
icx_iio_cleanup_mapping(struct intel_uncore_type * type)5407 static void icx_iio_cleanup_mapping(struct intel_uncore_type *type)
5408 {
5409 	pmu_cleanup_mapping(type, &icx_iio_mapping_group);
5410 }
5411 
5412 static struct intel_uncore_type icx_uncore_iio = {
5413 	.name			= "iio",
5414 	.num_counters		= 4,
5415 	.num_boxes		= 6,
5416 	.perf_ctr_bits		= 48,
5417 	.event_ctl		= ICX_IIO_MSR_PMON_CTL0,
5418 	.perf_ctr		= ICX_IIO_MSR_PMON_CTR0,
5419 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
5420 	.event_mask_ext		= SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
5421 	.box_ctl		= ICX_IIO_MSR_PMON_BOX_CTL,
5422 	.msr_offsets		= icx_msr_offsets,
5423 	.constraints		= icx_uncore_iio_constraints,
5424 	.ops			= &skx_uncore_iio_ops,
5425 	.format_group		= &snr_uncore_iio_format_group,
5426 	.attr_update		= icx_iio_attr_update,
5427 	.get_topology		= icx_iio_get_topology,
5428 	.set_mapping		= icx_iio_set_mapping,
5429 	.cleanup_mapping	= icx_iio_cleanup_mapping,
5430 };
5431 
5432 static struct intel_uncore_type icx_uncore_irp = {
5433 	.name			= "irp",
5434 	.num_counters		= 2,
5435 	.num_boxes		= 6,
5436 	.perf_ctr_bits		= 48,
5437 	.event_ctl		= ICX_IRP0_MSR_PMON_CTL0,
5438 	.perf_ctr		= ICX_IRP0_MSR_PMON_CTR0,
5439 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
5440 	.box_ctl		= ICX_IRP0_MSR_PMON_BOX_CTL,
5441 	.msr_offsets		= icx_msr_offsets,
5442 	.ops			= &ivbep_uncore_msr_ops,
5443 	.format_group		= &ivbep_uncore_format_group,
5444 };
5445 
5446 static struct event_constraint icx_uncore_m2pcie_constraints[] = {
5447 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
5448 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
5449 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
5450 	EVENT_CONSTRAINT_END
5451 };
5452 
5453 static struct intel_uncore_type icx_uncore_m2pcie = {
5454 	.name		= "m2pcie",
5455 	.num_counters	= 4,
5456 	.num_boxes	= 6,
5457 	.perf_ctr_bits	= 48,
5458 	.event_ctl	= ICX_M2PCIE_MSR_PMON_CTL0,
5459 	.perf_ctr	= ICX_M2PCIE_MSR_PMON_CTR0,
5460 	.box_ctl	= ICX_M2PCIE_MSR_PMON_BOX_CTL,
5461 	.msr_offsets	= icx_msr_offsets,
5462 	.constraints	= icx_uncore_m2pcie_constraints,
5463 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5464 	.ops		= &ivbep_uncore_msr_ops,
5465 	.format_group	= &ivbep_uncore_format_group,
5466 };
5467 
5468 enum perf_uncore_icx_iio_freerunning_type_id {
5469 	ICX_IIO_MSR_IOCLK,
5470 	ICX_IIO_MSR_BW_IN,
5471 
5472 	ICX_IIO_FREERUNNING_TYPE_MAX,
5473 };
5474 
5475 static unsigned icx_iio_clk_freerunning_box_offsets[] = {
5476 	0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5477 };
5478 
5479 static unsigned icx_iio_bw_freerunning_box_offsets[] = {
5480 	0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0,
5481 };
5482 
5483 static struct freerunning_counters icx_iio_freerunning[] = {
5484 	[ICX_IIO_MSR_IOCLK]	= { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets },
5485 	[ICX_IIO_MSR_BW_IN]	= { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
5486 };
5487 
5488 static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
5489 	/* Free-Running IIO CLOCKS Counter */
5490 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
5491 	/* Free-Running IIO BANDWIDTH IN Counters */
5492 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
5493 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
5494 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
5495 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
5496 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
5497 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
5498 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
5499 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
5500 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
5501 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
5502 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
5503 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
5504 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
5505 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
5506 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
5507 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
5508 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
5509 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
5510 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
5511 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
5512 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
5513 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
5514 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
5515 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
5516 	{ /* end: all zeroes */ },
5517 };
5518 
5519 static struct intel_uncore_type icx_uncore_iio_free_running = {
5520 	.name			= "iio_free_running",
5521 	.num_counters		= 9,
5522 	.num_boxes		= 6,
5523 	.num_freerunning_types	= ICX_IIO_FREERUNNING_TYPE_MAX,
5524 	.freerunning		= icx_iio_freerunning,
5525 	.ops			= &skx_uncore_iio_freerunning_ops,
5526 	.event_descs		= icx_uncore_iio_freerunning_events,
5527 	.format_group		= &skx_uncore_iio_freerunning_format_group,
5528 };
5529 
5530 static struct intel_uncore_type *icx_msr_uncores[] = {
5531 	&skx_uncore_ubox,
5532 	&icx_uncore_chabox,
5533 	&icx_uncore_iio,
5534 	&icx_uncore_irp,
5535 	&icx_uncore_m2pcie,
5536 	&skx_uncore_pcu,
5537 	&icx_uncore_iio_free_running,
5538 	NULL,
5539 };
5540 
5541 /*
5542  * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High)
5543  * registers which located at Device 30, Function 3
5544  */
5545 #define ICX_CAPID6		0x9c
5546 #define ICX_CAPID7		0xa0
5547 
icx_count_chabox(void)5548 static u64 icx_count_chabox(void)
5549 {
5550 	struct pci_dev *dev = NULL;
5551 	u64 caps = 0;
5552 
5553 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev);
5554 	if (!dev)
5555 		goto out;
5556 
5557 	pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps);
5558 	pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1);
5559 out:
5560 	pci_dev_put(dev);
5561 	return hweight64(caps);
5562 }
5563 
icx_uncore_cpu_init(void)5564 void icx_uncore_cpu_init(void)
5565 {
5566 	u64 num_boxes = icx_count_chabox();
5567 
5568 	if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets)))
5569 		return;
5570 	icx_uncore_chabox.num_boxes = num_boxes;
5571 	uncore_msr_uncores = icx_msr_uncores;
5572 }
5573 
5574 static struct intel_uncore_type icx_uncore_m2m = {
5575 	.name		= "m2m",
5576 	.num_counters   = 4,
5577 	.num_boxes	= 4,
5578 	.perf_ctr_bits	= 48,
5579 	.perf_ctr	= SNR_M2M_PCI_PMON_CTR0,
5580 	.event_ctl	= SNR_M2M_PCI_PMON_CTL0,
5581 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5582 	.event_mask_ext	= SNR_M2M_PCI_PMON_UMASK_EXT,
5583 	.box_ctl	= SNR_M2M_PCI_PMON_BOX_CTL,
5584 	.ops		= &snr_m2m_uncore_pci_ops,
5585 	.format_group	= &snr_m2m_uncore_format_group,
5586 };
5587 
5588 static struct attribute *icx_upi_uncore_formats_attr[] = {
5589 	&format_attr_event.attr,
5590 	&format_attr_umask_ext4.attr,
5591 	&format_attr_edge.attr,
5592 	&format_attr_inv.attr,
5593 	&format_attr_thresh8.attr,
5594 	NULL,
5595 };
5596 
5597 static const struct attribute_group icx_upi_uncore_format_group = {
5598 	.name = "format",
5599 	.attrs = icx_upi_uncore_formats_attr,
5600 };
5601 
5602 #define ICX_UPI_REGS_ADDR_DEVICE_LINK0	0x02
5603 #define ICX_UPI_REGS_ADDR_FUNCTION	0x01
5604 
discover_upi_topology(struct intel_uncore_type * type,int ubox_did,int dev_link0)5605 static int discover_upi_topology(struct intel_uncore_type *type, int ubox_did, int dev_link0)
5606 {
5607 	struct pci_dev *ubox = NULL;
5608 	struct pci_dev *dev = NULL;
5609 	u32 nid, gid;
5610 	int idx, lgc_pkg, ret = -EPERM;
5611 	struct intel_uncore_topology *upi;
5612 	unsigned int devfn;
5613 
5614 	/* GIDNIDMAP method supports machines which have less than 8 sockets. */
5615 	if (uncore_max_dies() > 8)
5616 		goto err;
5617 
5618 	while ((ubox = pci_get_device(PCI_VENDOR_ID_INTEL, ubox_did, ubox))) {
5619 		ret = upi_nodeid_groupid(ubox, SKX_CPUNODEID, SKX_GIDNIDMAP, &nid, &gid);
5620 		if (ret) {
5621 			ret = pcibios_err_to_errno(ret);
5622 			break;
5623 		}
5624 
5625 		lgc_pkg = topology_gidnid_map(nid, gid);
5626 		if (lgc_pkg < 0) {
5627 			ret = -EPERM;
5628 			goto err;
5629 		}
5630 		for (idx = 0; idx < type->num_boxes; idx++) {
5631 			upi = &type->topology[lgc_pkg][idx];
5632 			devfn = PCI_DEVFN(dev_link0 + idx, ICX_UPI_REGS_ADDR_FUNCTION);
5633 			dev = pci_get_domain_bus_and_slot(pci_domain_nr(ubox->bus),
5634 							  ubox->bus->number,
5635 							  devfn);
5636 			if (dev) {
5637 				ret = upi_fill_topology(dev, upi, idx);
5638 				if (ret)
5639 					goto err;
5640 			}
5641 		}
5642 	}
5643 err:
5644 	pci_dev_put(ubox);
5645 	pci_dev_put(dev);
5646 	return ret;
5647 }
5648 
icx_upi_get_topology(struct intel_uncore_type * type)5649 static int icx_upi_get_topology(struct intel_uncore_type *type)
5650 {
5651 	return discover_upi_topology(type, ICX_UBOX_DID, ICX_UPI_REGS_ADDR_DEVICE_LINK0);
5652 }
5653 
5654 static struct attribute_group icx_upi_mapping_group = {
5655 	.is_visible	= skx_upi_mapping_visible,
5656 };
5657 
5658 static const struct attribute_group *icx_upi_attr_update[] = {
5659 	&icx_upi_mapping_group,
5660 	NULL
5661 };
5662 
icx_upi_set_mapping(struct intel_uncore_type * type)5663 static void icx_upi_set_mapping(struct intel_uncore_type *type)
5664 {
5665 	pmu_upi_set_mapping(type, &icx_upi_mapping_group);
5666 }
5667 
icx_upi_cleanup_mapping(struct intel_uncore_type * type)5668 static void icx_upi_cleanup_mapping(struct intel_uncore_type *type)
5669 {
5670 	pmu_cleanup_mapping(type, &icx_upi_mapping_group);
5671 }
5672 
5673 static struct intel_uncore_type icx_uncore_upi = {
5674 	.name		= "upi",
5675 	.num_counters   = 4,
5676 	.num_boxes	= 3,
5677 	.perf_ctr_bits	= 48,
5678 	.perf_ctr	= ICX_UPI_PCI_PMON_CTR0,
5679 	.event_ctl	= ICX_UPI_PCI_PMON_CTL0,
5680 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5681 	.event_mask_ext = ICX_UPI_CTL_UMASK_EXT,
5682 	.box_ctl	= ICX_UPI_PCI_PMON_BOX_CTL,
5683 	.ops		= &skx_upi_uncore_pci_ops,
5684 	.format_group	= &icx_upi_uncore_format_group,
5685 	.attr_update	= icx_upi_attr_update,
5686 	.get_topology	= icx_upi_get_topology,
5687 	.set_mapping	= icx_upi_set_mapping,
5688 	.cleanup_mapping = icx_upi_cleanup_mapping,
5689 };
5690 
5691 static struct event_constraint icx_uncore_m3upi_constraints[] = {
5692 	UNCORE_EVENT_CONSTRAINT(0x1c, 0x1),
5693 	UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
5694 	UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
5695 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
5696 	UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
5697 	UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
5698 	UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
5699 	UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
5700 	EVENT_CONSTRAINT_END
5701 };
5702 
5703 static struct intel_uncore_type icx_uncore_m3upi = {
5704 	.name		= "m3upi",
5705 	.num_counters   = 4,
5706 	.num_boxes	= 3,
5707 	.perf_ctr_bits	= 48,
5708 	.perf_ctr	= ICX_M3UPI_PCI_PMON_CTR0,
5709 	.event_ctl	= ICX_M3UPI_PCI_PMON_CTL0,
5710 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5711 	.box_ctl	= ICX_M3UPI_PCI_PMON_BOX_CTL,
5712 	.constraints	= icx_uncore_m3upi_constraints,
5713 	.ops		= &ivbep_uncore_pci_ops,
5714 	.format_group	= &skx_uncore_format_group,
5715 };
5716 
5717 enum {
5718 	ICX_PCI_UNCORE_M2M,
5719 	ICX_PCI_UNCORE_UPI,
5720 	ICX_PCI_UNCORE_M3UPI,
5721 };
5722 
5723 static struct intel_uncore_type *icx_pci_uncores[] = {
5724 	[ICX_PCI_UNCORE_M2M]		= &icx_uncore_m2m,
5725 	[ICX_PCI_UNCORE_UPI]		= &icx_uncore_upi,
5726 	[ICX_PCI_UNCORE_M3UPI]		= &icx_uncore_m3upi,
5727 	NULL,
5728 };
5729 
5730 static const struct pci_device_id icx_uncore_pci_ids[] = {
5731 	{ /* M2M 0 */
5732 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5733 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0),
5734 	},
5735 	{ /* M2M 1 */
5736 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5737 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1),
5738 	},
5739 	{ /* M2M 2 */
5740 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5741 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2),
5742 	},
5743 	{ /* M2M 3 */
5744 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5745 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3),
5746 	},
5747 	{ /* UPI Link 0 */
5748 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5749 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0),
5750 	},
5751 	{ /* UPI Link 1 */
5752 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5753 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1),
5754 	},
5755 	{ /* UPI Link 2 */
5756 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5757 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2),
5758 	},
5759 	{ /* M3UPI Link 0 */
5760 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5761 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0),
5762 	},
5763 	{ /* M3UPI Link 1 */
5764 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5765 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1),
5766 	},
5767 	{ /* M3UPI Link 2 */
5768 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5769 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2),
5770 	},
5771 	{ /* end: all zeroes */ }
5772 };
5773 
5774 static struct pci_driver icx_uncore_pci_driver = {
5775 	.name		= "icx_uncore",
5776 	.id_table	= icx_uncore_pci_ids,
5777 };
5778 
icx_uncore_pci_init(void)5779 int icx_uncore_pci_init(void)
5780 {
5781 	/* ICX UBOX DID */
5782 	int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID,
5783 					 SKX_GIDNIDMAP, true);
5784 
5785 	if (ret)
5786 		return ret;
5787 
5788 	uncore_pci_uncores = icx_pci_uncores;
5789 	uncore_pci_driver = &icx_uncore_pci_driver;
5790 	return 0;
5791 }
5792 
icx_uncore_imc_init_box(struct intel_uncore_box * box)5793 static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
5794 {
5795 	unsigned int box_ctl = box->pmu->type->box_ctl +
5796 			       box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN);
5797 	int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
5798 			 SNR_IMC_MMIO_MEM0_OFFSET;
5799 
5800 	__snr_uncore_mmio_init_box(box, box_ctl, mem_offset,
5801 				   SNR_MC_DEVICE_ID);
5802 }
5803 
5804 static struct intel_uncore_ops icx_uncore_mmio_ops = {
5805 	.init_box	= icx_uncore_imc_init_box,
5806 	.exit_box	= uncore_mmio_exit_box,
5807 	.disable_box	= snr_uncore_mmio_disable_box,
5808 	.enable_box	= snr_uncore_mmio_enable_box,
5809 	.disable_event	= snr_uncore_mmio_disable_event,
5810 	.enable_event	= snr_uncore_mmio_enable_event,
5811 	.read_counter	= uncore_mmio_read_counter,
5812 };
5813 
5814 static struct intel_uncore_type icx_uncore_imc = {
5815 	.name		= "imc",
5816 	.num_counters   = 4,
5817 	.num_boxes	= 12,
5818 	.perf_ctr_bits	= 48,
5819 	.fixed_ctr_bits	= 48,
5820 	.fixed_ctr	= SNR_IMC_MMIO_PMON_FIXED_CTR,
5821 	.fixed_ctl	= SNR_IMC_MMIO_PMON_FIXED_CTL,
5822 	.event_descs	= snr_uncore_imc_events,
5823 	.perf_ctr	= SNR_IMC_MMIO_PMON_CTR0,
5824 	.event_ctl	= SNR_IMC_MMIO_PMON_CTL0,
5825 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5826 	.box_ctl	= SNR_IMC_MMIO_PMON_BOX_CTL,
5827 	.mmio_offset	= SNR_IMC_MMIO_OFFSET,
5828 	.mmio_map_size	= SNR_IMC_MMIO_SIZE,
5829 	.ops		= &icx_uncore_mmio_ops,
5830 	.format_group	= &skx_uncore_format_group,
5831 };
5832 
5833 enum perf_uncore_icx_imc_freerunning_type_id {
5834 	ICX_IMC_DCLK,
5835 	ICX_IMC_DDR,
5836 	ICX_IMC_DDRT,
5837 
5838 	ICX_IMC_FREERUNNING_TYPE_MAX,
5839 };
5840 
5841 static struct freerunning_counters icx_imc_freerunning[] = {
5842 	[ICX_IMC_DCLK]	= { 0x22b0, 0x0, 0, 1, 48 },
5843 	[ICX_IMC_DDR]	= { 0x2290, 0x8, 0, 2, 48 },
5844 	[ICX_IMC_DDRT]	= { 0x22a0, 0x8, 0, 2, 48 },
5845 };
5846 
5847 static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
5848 	INTEL_UNCORE_EVENT_DESC(dclk,			"event=0xff,umask=0x10"),
5849 
5850 	INTEL_UNCORE_EVENT_DESC(read,			"event=0xff,umask=0x20"),
5851 	INTEL_UNCORE_EVENT_DESC(read.scale,		"6.103515625e-5"),
5852 	INTEL_UNCORE_EVENT_DESC(read.unit,		"MiB"),
5853 	INTEL_UNCORE_EVENT_DESC(write,			"event=0xff,umask=0x21"),
5854 	INTEL_UNCORE_EVENT_DESC(write.scale,		"6.103515625e-5"),
5855 	INTEL_UNCORE_EVENT_DESC(write.unit,		"MiB"),
5856 
5857 	INTEL_UNCORE_EVENT_DESC(ddrt_read,		"event=0xff,umask=0x30"),
5858 	INTEL_UNCORE_EVENT_DESC(ddrt_read.scale,	"6.103515625e-5"),
5859 	INTEL_UNCORE_EVENT_DESC(ddrt_read.unit,		"MiB"),
5860 	INTEL_UNCORE_EVENT_DESC(ddrt_write,		"event=0xff,umask=0x31"),
5861 	INTEL_UNCORE_EVENT_DESC(ddrt_write.scale,	"6.103515625e-5"),
5862 	INTEL_UNCORE_EVENT_DESC(ddrt_write.unit,	"MiB"),
5863 	{ /* end: all zeroes */ },
5864 };
5865 
icx_uncore_imc_freerunning_init_box(struct intel_uncore_box * box)5866 static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5867 {
5868 	int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
5869 			 SNR_IMC_MMIO_MEM0_OFFSET;
5870 
5871 	snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
5872 			    mem_offset, SNR_MC_DEVICE_ID);
5873 }
5874 
5875 static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
5876 	.init_box	= icx_uncore_imc_freerunning_init_box,
5877 	.exit_box	= uncore_mmio_exit_box,
5878 	.read_counter	= uncore_mmio_read_counter,
5879 	.hw_config	= uncore_freerunning_hw_config,
5880 };
5881 
5882 static struct intel_uncore_type icx_uncore_imc_free_running = {
5883 	.name			= "imc_free_running",
5884 	.num_counters		= 5,
5885 	.num_boxes		= 4,
5886 	.num_freerunning_types	= ICX_IMC_FREERUNNING_TYPE_MAX,
5887 	.mmio_map_size		= SNR_IMC_MMIO_SIZE,
5888 	.freerunning		= icx_imc_freerunning,
5889 	.ops			= &icx_uncore_imc_freerunning_ops,
5890 	.event_descs		= icx_uncore_imc_freerunning_events,
5891 	.format_group		= &skx_uncore_iio_freerunning_format_group,
5892 };
5893 
5894 static struct intel_uncore_type *icx_mmio_uncores[] = {
5895 	&icx_uncore_imc,
5896 	&icx_uncore_imc_free_running,
5897 	NULL,
5898 };
5899 
icx_uncore_mmio_init(void)5900 void icx_uncore_mmio_init(void)
5901 {
5902 	uncore_mmio_uncores = icx_mmio_uncores;
5903 }
5904 
5905 /* end of ICX uncore support */
5906 
5907 /* SPR uncore support */
5908 
spr_uncore_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)5909 static void spr_uncore_msr_enable_event(struct intel_uncore_box *box,
5910 					struct perf_event *event)
5911 {
5912 	struct hw_perf_event *hwc = &event->hw;
5913 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
5914 
5915 	if (reg1->idx != EXTRA_REG_NONE)
5916 		wrmsrl(reg1->reg, reg1->config);
5917 
5918 	wrmsrl(hwc->config_base, hwc->config);
5919 }
5920 
spr_uncore_msr_disable_event(struct intel_uncore_box * box,struct perf_event * event)5921 static void spr_uncore_msr_disable_event(struct intel_uncore_box *box,
5922 					 struct perf_event *event)
5923 {
5924 	struct hw_perf_event *hwc = &event->hw;
5925 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
5926 
5927 	if (reg1->idx != EXTRA_REG_NONE)
5928 		wrmsrl(reg1->reg, 0);
5929 
5930 	wrmsrl(hwc->config_base, 0);
5931 }
5932 
spr_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)5933 static int spr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
5934 {
5935 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
5936 	bool tie_en = !!(event->hw.config & SPR_CHA_PMON_CTL_TID_EN);
5937 	struct intel_uncore_type *type = box->pmu->type;
5938 	int id = intel_uncore_find_discovery_unit_id(type->boxes, -1, box->pmu->pmu_idx);
5939 
5940 	if (tie_en) {
5941 		reg1->reg = SPR_C0_MSR_PMON_BOX_FILTER0 +
5942 			    HSWEP_CBO_MSR_OFFSET * id;
5943 		reg1->config = event->attr.config1 & SPR_CHA_PMON_BOX_FILTER_TID;
5944 		reg1->idx = 0;
5945 	}
5946 
5947 	return 0;
5948 }
5949 
5950 static struct intel_uncore_ops spr_uncore_chabox_ops = {
5951 	.init_box		= intel_generic_uncore_msr_init_box,
5952 	.disable_box		= intel_generic_uncore_msr_disable_box,
5953 	.enable_box		= intel_generic_uncore_msr_enable_box,
5954 	.disable_event		= spr_uncore_msr_disable_event,
5955 	.enable_event		= spr_uncore_msr_enable_event,
5956 	.read_counter		= uncore_msr_read_counter,
5957 	.hw_config		= spr_cha_hw_config,
5958 	.get_constraint		= uncore_get_constraint,
5959 	.put_constraint		= uncore_put_constraint,
5960 };
5961 
5962 static struct attribute *spr_uncore_cha_formats_attr[] = {
5963 	&format_attr_event.attr,
5964 	&format_attr_umask_ext5.attr,
5965 	&format_attr_tid_en2.attr,
5966 	&format_attr_edge.attr,
5967 	&format_attr_inv.attr,
5968 	&format_attr_thresh8.attr,
5969 	&format_attr_filter_tid5.attr,
5970 	NULL,
5971 };
5972 static const struct attribute_group spr_uncore_chabox_format_group = {
5973 	.name = "format",
5974 	.attrs = spr_uncore_cha_formats_attr,
5975 };
5976 
alias_show(struct device * dev,struct device_attribute * attr,char * buf)5977 static ssize_t alias_show(struct device *dev,
5978 			  struct device_attribute *attr,
5979 			  char *buf)
5980 {
5981 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
5982 	char pmu_name[UNCORE_PMU_NAME_LEN];
5983 
5984 	uncore_get_alias_name(pmu_name, pmu);
5985 	return sysfs_emit(buf, "%s\n", pmu_name);
5986 }
5987 
5988 static DEVICE_ATTR_RO(alias);
5989 
5990 static struct attribute *uncore_alias_attrs[] = {
5991 	&dev_attr_alias.attr,
5992 	NULL
5993 };
5994 
5995 ATTRIBUTE_GROUPS(uncore_alias);
5996 
5997 static struct intel_uncore_type spr_uncore_chabox = {
5998 	.name			= "cha",
5999 	.event_mask		= SPR_CHA_PMON_EVENT_MASK,
6000 	.event_mask_ext		= SPR_CHA_EVENT_MASK_EXT,
6001 	.num_shared_regs	= 1,
6002 	.constraints		= skx_uncore_chabox_constraints,
6003 	.ops			= &spr_uncore_chabox_ops,
6004 	.format_group		= &spr_uncore_chabox_format_group,
6005 	.attr_update		= uncore_alias_groups,
6006 };
6007 
6008 static struct intel_uncore_type spr_uncore_iio = {
6009 	.name			= "iio",
6010 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
6011 	.event_mask_ext		= SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
6012 	.format_group		= &snr_uncore_iio_format_group,
6013 	.attr_update		= uncore_alias_groups,
6014 	.constraints		= icx_uncore_iio_constraints,
6015 };
6016 
6017 static struct attribute *spr_uncore_raw_formats_attr[] = {
6018 	&format_attr_event.attr,
6019 	&format_attr_umask_ext4.attr,
6020 	&format_attr_edge.attr,
6021 	&format_attr_inv.attr,
6022 	&format_attr_thresh8.attr,
6023 	NULL,
6024 };
6025 
6026 static const struct attribute_group spr_uncore_raw_format_group = {
6027 	.name			= "format",
6028 	.attrs			= spr_uncore_raw_formats_attr,
6029 };
6030 
6031 #define SPR_UNCORE_COMMON_FORMAT()				\
6032 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,	\
6033 	.event_mask_ext		= SPR_RAW_EVENT_MASK_EXT,	\
6034 	.format_group		= &spr_uncore_raw_format_group,	\
6035 	.attr_update		= uncore_alias_groups
6036 
6037 static struct intel_uncore_type spr_uncore_irp = {
6038 	SPR_UNCORE_COMMON_FORMAT(),
6039 	.name			= "irp",
6040 
6041 };
6042 
6043 static struct event_constraint spr_uncore_m2pcie_constraints[] = {
6044 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
6045 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
6046 	EVENT_CONSTRAINT_END
6047 };
6048 
6049 static struct intel_uncore_type spr_uncore_m2pcie = {
6050 	SPR_UNCORE_COMMON_FORMAT(),
6051 	.name			= "m2pcie",
6052 	.constraints		= spr_uncore_m2pcie_constraints,
6053 };
6054 
6055 static struct intel_uncore_type spr_uncore_pcu = {
6056 	.name			= "pcu",
6057 	.attr_update		= uncore_alias_groups,
6058 };
6059 
spr_uncore_mmio_enable_event(struct intel_uncore_box * box,struct perf_event * event)6060 static void spr_uncore_mmio_enable_event(struct intel_uncore_box *box,
6061 					 struct perf_event *event)
6062 {
6063 	struct hw_perf_event *hwc = &event->hw;
6064 
6065 	if (!box->io_addr)
6066 		return;
6067 
6068 	if (uncore_pmc_fixed(hwc->idx))
6069 		writel(SNBEP_PMON_CTL_EN, box->io_addr + hwc->config_base);
6070 	else
6071 		writel(hwc->config, box->io_addr + hwc->config_base);
6072 }
6073 
6074 static struct intel_uncore_ops spr_uncore_mmio_ops = {
6075 	.init_box		= intel_generic_uncore_mmio_init_box,
6076 	.exit_box		= uncore_mmio_exit_box,
6077 	.disable_box		= intel_generic_uncore_mmio_disable_box,
6078 	.enable_box		= intel_generic_uncore_mmio_enable_box,
6079 	.disable_event		= intel_generic_uncore_mmio_disable_event,
6080 	.enable_event		= spr_uncore_mmio_enable_event,
6081 	.read_counter		= uncore_mmio_read_counter,
6082 };
6083 
6084 static struct uncore_event_desc spr_uncore_imc_events[] = {
6085 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x01,umask=0x00"),
6086 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x05,umask=0xcf"),
6087 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
6088 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
6089 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x05,umask=0xf0"),
6090 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
6091 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
6092 	{ /* end: all zeroes */ },
6093 };
6094 
6095 #define SPR_UNCORE_MMIO_COMMON_FORMAT()				\
6096 	SPR_UNCORE_COMMON_FORMAT(),				\
6097 	.ops			= &spr_uncore_mmio_ops
6098 
6099 static struct intel_uncore_type spr_uncore_imc = {
6100 	SPR_UNCORE_MMIO_COMMON_FORMAT(),
6101 	.name			= "imc",
6102 	.fixed_ctr_bits		= 48,
6103 	.fixed_ctr		= SNR_IMC_MMIO_PMON_FIXED_CTR,
6104 	.fixed_ctl		= SNR_IMC_MMIO_PMON_FIXED_CTL,
6105 	.event_descs		= spr_uncore_imc_events,
6106 };
6107 
spr_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)6108 static void spr_uncore_pci_enable_event(struct intel_uncore_box *box,
6109 					struct perf_event *event)
6110 {
6111 	struct pci_dev *pdev = box->pci_dev;
6112 	struct hw_perf_event *hwc = &event->hw;
6113 
6114 	pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
6115 	pci_write_config_dword(pdev, hwc->config_base, (u32)hwc->config);
6116 }
6117 
6118 static struct intel_uncore_ops spr_uncore_pci_ops = {
6119 	.init_box		= intel_generic_uncore_pci_init_box,
6120 	.disable_box		= intel_generic_uncore_pci_disable_box,
6121 	.enable_box		= intel_generic_uncore_pci_enable_box,
6122 	.disable_event		= intel_generic_uncore_pci_disable_event,
6123 	.enable_event		= spr_uncore_pci_enable_event,
6124 	.read_counter		= intel_generic_uncore_pci_read_counter,
6125 };
6126 
6127 #define SPR_UNCORE_PCI_COMMON_FORMAT()			\
6128 	SPR_UNCORE_COMMON_FORMAT(),			\
6129 	.ops			= &spr_uncore_pci_ops
6130 
6131 static struct intel_uncore_type spr_uncore_m2m = {
6132 	SPR_UNCORE_PCI_COMMON_FORMAT(),
6133 	.name			= "m2m",
6134 };
6135 
6136 static struct attribute_group spr_upi_mapping_group = {
6137 	.is_visible	= skx_upi_mapping_visible,
6138 };
6139 
6140 static const struct attribute_group *spr_upi_attr_update[] = {
6141 	&uncore_alias_group,
6142 	&spr_upi_mapping_group,
6143 	NULL
6144 };
6145 
6146 #define SPR_UPI_REGS_ADDR_DEVICE_LINK0	0x01
6147 
spr_upi_set_mapping(struct intel_uncore_type * type)6148 static void spr_upi_set_mapping(struct intel_uncore_type *type)
6149 {
6150 	pmu_upi_set_mapping(type, &spr_upi_mapping_group);
6151 }
6152 
spr_upi_cleanup_mapping(struct intel_uncore_type * type)6153 static void spr_upi_cleanup_mapping(struct intel_uncore_type *type)
6154 {
6155 	pmu_cleanup_mapping(type, &spr_upi_mapping_group);
6156 }
6157 
spr_upi_get_topology(struct intel_uncore_type * type)6158 static int spr_upi_get_topology(struct intel_uncore_type *type)
6159 {
6160 	return discover_upi_topology(type, SPR_UBOX_DID, SPR_UPI_REGS_ADDR_DEVICE_LINK0);
6161 }
6162 
6163 static struct intel_uncore_type spr_uncore_mdf = {
6164 	SPR_UNCORE_COMMON_FORMAT(),
6165 	.name			= "mdf",
6166 };
6167 
spr_uncore_mmio_offs8_init_box(struct intel_uncore_box * box)6168 static void spr_uncore_mmio_offs8_init_box(struct intel_uncore_box *box)
6169 {
6170 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
6171 	intel_generic_uncore_mmio_init_box(box);
6172 }
6173 
6174 static struct intel_uncore_ops spr_uncore_mmio_offs8_ops = {
6175 	.init_box		= spr_uncore_mmio_offs8_init_box,
6176 	.exit_box		= uncore_mmio_exit_box,
6177 	.disable_box		= intel_generic_uncore_mmio_disable_box,
6178 	.enable_box		= intel_generic_uncore_mmio_enable_box,
6179 	.disable_event		= intel_generic_uncore_mmio_disable_event,
6180 	.enable_event		= spr_uncore_mmio_enable_event,
6181 	.read_counter		= uncore_mmio_read_counter,
6182 };
6183 
6184 #define SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT()			\
6185 	SPR_UNCORE_COMMON_FORMAT(),				\
6186 	.ops			= &spr_uncore_mmio_offs8_ops
6187 
6188 static struct event_constraint spr_uncore_cxlcm_constraints[] = {
6189 	UNCORE_EVENT_CONSTRAINT(0x02, 0x0f),
6190 	UNCORE_EVENT_CONSTRAINT(0x05, 0x0f),
6191 	UNCORE_EVENT_CONSTRAINT(0x40, 0xf0),
6192 	UNCORE_EVENT_CONSTRAINT(0x41, 0xf0),
6193 	UNCORE_EVENT_CONSTRAINT(0x42, 0xf0),
6194 	UNCORE_EVENT_CONSTRAINT(0x43, 0xf0),
6195 	UNCORE_EVENT_CONSTRAINT(0x4b, 0xf0),
6196 	UNCORE_EVENT_CONSTRAINT(0x52, 0xf0),
6197 	EVENT_CONSTRAINT_END
6198 };
6199 
6200 static struct intel_uncore_type spr_uncore_cxlcm = {
6201 	SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT(),
6202 	.name			= "cxlcm",
6203 	.constraints		= spr_uncore_cxlcm_constraints,
6204 };
6205 
6206 static struct intel_uncore_type spr_uncore_cxldp = {
6207 	SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT(),
6208 	.name			= "cxldp",
6209 };
6210 
6211 static struct intel_uncore_type spr_uncore_hbm = {
6212 	SPR_UNCORE_COMMON_FORMAT(),
6213 	.name			= "hbm",
6214 };
6215 
6216 #define UNCORE_SPR_NUM_UNCORE_TYPES		15
6217 #define UNCORE_SPR_CHA				0
6218 #define UNCORE_SPR_IIO				1
6219 #define UNCORE_SPR_IMC				6
6220 #define UNCORE_SPR_UPI				8
6221 #define UNCORE_SPR_M3UPI			9
6222 
6223 /*
6224  * The uncore units, which are supported by the discovery table,
6225  * are defined here.
6226  */
6227 static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = {
6228 	&spr_uncore_chabox,
6229 	&spr_uncore_iio,
6230 	&spr_uncore_irp,
6231 	&spr_uncore_m2pcie,
6232 	&spr_uncore_pcu,
6233 	NULL,
6234 	&spr_uncore_imc,
6235 	&spr_uncore_m2m,
6236 	NULL,
6237 	NULL,
6238 	NULL,
6239 	&spr_uncore_mdf,
6240 	&spr_uncore_cxlcm,
6241 	&spr_uncore_cxldp,
6242 	&spr_uncore_hbm,
6243 };
6244 
6245 /*
6246  * The uncore units, which are not supported by the discovery table,
6247  * are implemented from here.
6248  */
6249 #define SPR_UNCORE_UPI_NUM_BOXES	4
6250 
6251 static u64 spr_upi_pci_offsets[SPR_UNCORE_UPI_NUM_BOXES] = {
6252 	0, 0x8000, 0x10000, 0x18000
6253 };
6254 
spr_extra_boxes_cleanup(struct intel_uncore_type * type)6255 static void spr_extra_boxes_cleanup(struct intel_uncore_type *type)
6256 {
6257 	struct intel_uncore_discovery_unit *pos;
6258 	struct rb_node *node;
6259 
6260 	if (!type->boxes)
6261 		return;
6262 
6263 	while (!RB_EMPTY_ROOT(type->boxes)) {
6264 		node = rb_first(type->boxes);
6265 		pos = rb_entry(node, struct intel_uncore_discovery_unit, node);
6266 		rb_erase(node, type->boxes);
6267 		kfree(pos);
6268 	}
6269 	kfree(type->boxes);
6270 	type->boxes = NULL;
6271 }
6272 
6273 static struct intel_uncore_type spr_uncore_upi = {
6274 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
6275 	.event_mask_ext		= SPR_RAW_EVENT_MASK_EXT,
6276 	.format_group		= &spr_uncore_raw_format_group,
6277 	.ops			= &spr_uncore_pci_ops,
6278 	.name			= "upi",
6279 	.attr_update		= spr_upi_attr_update,
6280 	.get_topology		= spr_upi_get_topology,
6281 	.set_mapping		= spr_upi_set_mapping,
6282 	.cleanup_mapping	= spr_upi_cleanup_mapping,
6283 	.type_id		= UNCORE_SPR_UPI,
6284 	.num_counters		= 4,
6285 	.num_boxes		= SPR_UNCORE_UPI_NUM_BOXES,
6286 	.perf_ctr_bits		= 48,
6287 	.perf_ctr		= ICX_UPI_PCI_PMON_CTR0 - ICX_UPI_PCI_PMON_BOX_CTL,
6288 	.event_ctl		= ICX_UPI_PCI_PMON_CTL0 - ICX_UPI_PCI_PMON_BOX_CTL,
6289 	.box_ctl		= ICX_UPI_PCI_PMON_BOX_CTL,
6290 	.pci_offsets		= spr_upi_pci_offsets,
6291 	.cleanup_extra_boxes	= spr_extra_boxes_cleanup,
6292 };
6293 
6294 static struct intel_uncore_type spr_uncore_m3upi = {
6295 	SPR_UNCORE_PCI_COMMON_FORMAT(),
6296 	.name			= "m3upi",
6297 	.type_id		= UNCORE_SPR_M3UPI,
6298 	.num_counters		= 4,
6299 	.num_boxes		= SPR_UNCORE_UPI_NUM_BOXES,
6300 	.perf_ctr_bits		= 48,
6301 	.perf_ctr		= ICX_M3UPI_PCI_PMON_CTR0 - ICX_M3UPI_PCI_PMON_BOX_CTL,
6302 	.event_ctl		= ICX_M3UPI_PCI_PMON_CTL0 - ICX_M3UPI_PCI_PMON_BOX_CTL,
6303 	.box_ctl		= ICX_M3UPI_PCI_PMON_BOX_CTL,
6304 	.pci_offsets		= spr_upi_pci_offsets,
6305 	.constraints		= icx_uncore_m3upi_constraints,
6306 	.cleanup_extra_boxes	= spr_extra_boxes_cleanup,
6307 };
6308 
6309 enum perf_uncore_spr_iio_freerunning_type_id {
6310 	SPR_IIO_MSR_IOCLK,
6311 	SPR_IIO_MSR_BW_IN,
6312 	SPR_IIO_MSR_BW_OUT,
6313 
6314 	SPR_IIO_FREERUNNING_TYPE_MAX,
6315 };
6316 
6317 static struct freerunning_counters spr_iio_freerunning[] = {
6318 	[SPR_IIO_MSR_IOCLK]	= { 0x340e, 0x1, 0x10, 1, 48 },
6319 	[SPR_IIO_MSR_BW_IN]	= { 0x3800, 0x1, 0x10, 8, 48 },
6320 	[SPR_IIO_MSR_BW_OUT]	= { 0x3808, 0x1, 0x10, 8, 48 },
6321 };
6322 
6323 static struct uncore_event_desc spr_uncore_iio_freerunning_events[] = {
6324 	/* Free-Running IIO CLOCKS Counter */
6325 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
6326 	/* Free-Running IIO BANDWIDTH IN Counters */
6327 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
6328 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
6329 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
6330 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
6331 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
6332 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
6333 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
6334 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
6335 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
6336 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
6337 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
6338 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
6339 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
6340 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
6341 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
6342 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
6343 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
6344 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
6345 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
6346 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
6347 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
6348 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
6349 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
6350 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
6351 	/* Free-Running IIO BANDWIDTH OUT Counters */
6352 	INTEL_UNCORE_EVENT_DESC(bw_out_port0,		"event=0xff,umask=0x30"),
6353 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,	"3.814697266e-6"),
6354 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,	"MiB"),
6355 	INTEL_UNCORE_EVENT_DESC(bw_out_port1,		"event=0xff,umask=0x31"),
6356 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,	"3.814697266e-6"),
6357 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,	"MiB"),
6358 	INTEL_UNCORE_EVENT_DESC(bw_out_port2,		"event=0xff,umask=0x32"),
6359 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,	"3.814697266e-6"),
6360 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,	"MiB"),
6361 	INTEL_UNCORE_EVENT_DESC(bw_out_port3,		"event=0xff,umask=0x33"),
6362 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,	"3.814697266e-6"),
6363 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,	"MiB"),
6364 	INTEL_UNCORE_EVENT_DESC(bw_out_port4,		"event=0xff,umask=0x34"),
6365 	INTEL_UNCORE_EVENT_DESC(bw_out_port4.scale,	"3.814697266e-6"),
6366 	INTEL_UNCORE_EVENT_DESC(bw_out_port4.unit,	"MiB"),
6367 	INTEL_UNCORE_EVENT_DESC(bw_out_port5,		"event=0xff,umask=0x35"),
6368 	INTEL_UNCORE_EVENT_DESC(bw_out_port5.scale,	"3.814697266e-6"),
6369 	INTEL_UNCORE_EVENT_DESC(bw_out_port5.unit,	"MiB"),
6370 	INTEL_UNCORE_EVENT_DESC(bw_out_port6,		"event=0xff,umask=0x36"),
6371 	INTEL_UNCORE_EVENT_DESC(bw_out_port6.scale,	"3.814697266e-6"),
6372 	INTEL_UNCORE_EVENT_DESC(bw_out_port6.unit,	"MiB"),
6373 	INTEL_UNCORE_EVENT_DESC(bw_out_port7,		"event=0xff,umask=0x37"),
6374 	INTEL_UNCORE_EVENT_DESC(bw_out_port7.scale,	"3.814697266e-6"),
6375 	INTEL_UNCORE_EVENT_DESC(bw_out_port7.unit,	"MiB"),
6376 	{ /* end: all zeroes */ },
6377 };
6378 
6379 static struct intel_uncore_type spr_uncore_iio_free_running = {
6380 	.name			= "iio_free_running",
6381 	.num_counters		= 17,
6382 	.num_freerunning_types	= SPR_IIO_FREERUNNING_TYPE_MAX,
6383 	.freerunning		= spr_iio_freerunning,
6384 	.ops			= &skx_uncore_iio_freerunning_ops,
6385 	.event_descs		= spr_uncore_iio_freerunning_events,
6386 	.format_group		= &skx_uncore_iio_freerunning_format_group,
6387 };
6388 
6389 enum perf_uncore_spr_imc_freerunning_type_id {
6390 	SPR_IMC_DCLK,
6391 	SPR_IMC_PQ_CYCLES,
6392 
6393 	SPR_IMC_FREERUNNING_TYPE_MAX,
6394 };
6395 
6396 static struct freerunning_counters spr_imc_freerunning[] = {
6397 	[SPR_IMC_DCLK]		= { 0x22b0, 0x0, 0, 1, 48 },
6398 	[SPR_IMC_PQ_CYCLES]	= { 0x2318, 0x8, 0, 2, 48 },
6399 };
6400 
6401 static struct uncore_event_desc spr_uncore_imc_freerunning_events[] = {
6402 	INTEL_UNCORE_EVENT_DESC(dclk,			"event=0xff,umask=0x10"),
6403 
6404 	INTEL_UNCORE_EVENT_DESC(rpq_cycles,		"event=0xff,umask=0x20"),
6405 	INTEL_UNCORE_EVENT_DESC(wpq_cycles,		"event=0xff,umask=0x21"),
6406 	{ /* end: all zeroes */ },
6407 };
6408 
6409 #define SPR_MC_DEVICE_ID	0x3251
6410 
spr_uncore_imc_freerunning_init_box(struct intel_uncore_box * box)6411 static void spr_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
6412 {
6413 	int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE + SNR_IMC_MMIO_MEM0_OFFSET;
6414 
6415 	snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
6416 			    mem_offset, SPR_MC_DEVICE_ID);
6417 }
6418 
6419 static struct intel_uncore_ops spr_uncore_imc_freerunning_ops = {
6420 	.init_box	= spr_uncore_imc_freerunning_init_box,
6421 	.exit_box	= uncore_mmio_exit_box,
6422 	.read_counter	= uncore_mmio_read_counter,
6423 	.hw_config	= uncore_freerunning_hw_config,
6424 };
6425 
6426 static struct intel_uncore_type spr_uncore_imc_free_running = {
6427 	.name			= "imc_free_running",
6428 	.num_counters		= 3,
6429 	.mmio_map_size		= SNR_IMC_MMIO_SIZE,
6430 	.num_freerunning_types	= SPR_IMC_FREERUNNING_TYPE_MAX,
6431 	.freerunning		= spr_imc_freerunning,
6432 	.ops			= &spr_uncore_imc_freerunning_ops,
6433 	.event_descs		= spr_uncore_imc_freerunning_events,
6434 	.format_group		= &skx_uncore_iio_freerunning_format_group,
6435 };
6436 
6437 #define UNCORE_SPR_MSR_EXTRA_UNCORES		1
6438 #define UNCORE_SPR_MMIO_EXTRA_UNCORES		1
6439 #define UNCORE_SPR_PCI_EXTRA_UNCORES		2
6440 
6441 static struct intel_uncore_type *spr_msr_uncores[UNCORE_SPR_MSR_EXTRA_UNCORES] = {
6442 	&spr_uncore_iio_free_running,
6443 };
6444 
6445 static struct intel_uncore_type *spr_mmio_uncores[UNCORE_SPR_MMIO_EXTRA_UNCORES] = {
6446 	&spr_uncore_imc_free_running,
6447 };
6448 
6449 static struct intel_uncore_type *spr_pci_uncores[UNCORE_SPR_PCI_EXTRA_UNCORES] = {
6450 	&spr_uncore_upi,
6451 	&spr_uncore_m3upi
6452 };
6453 
6454 int spr_uncore_units_ignore[] = {
6455 	UNCORE_SPR_UPI,
6456 	UNCORE_SPR_M3UPI,
6457 	UNCORE_IGNORE_END
6458 };
6459 
uncore_type_customized_copy(struct intel_uncore_type * to_type,struct intel_uncore_type * from_type)6460 static void uncore_type_customized_copy(struct intel_uncore_type *to_type,
6461 					struct intel_uncore_type *from_type)
6462 {
6463 	if (!to_type || !from_type)
6464 		return;
6465 
6466 	if (from_type->name)
6467 		to_type->name = from_type->name;
6468 	if (from_type->fixed_ctr_bits)
6469 		to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
6470 	if (from_type->event_mask)
6471 		to_type->event_mask = from_type->event_mask;
6472 	if (from_type->event_mask_ext)
6473 		to_type->event_mask_ext = from_type->event_mask_ext;
6474 	if (from_type->fixed_ctr)
6475 		to_type->fixed_ctr = from_type->fixed_ctr;
6476 	if (from_type->fixed_ctl)
6477 		to_type->fixed_ctl = from_type->fixed_ctl;
6478 	if (from_type->fixed_ctr_bits)
6479 		to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
6480 	if (from_type->num_shared_regs)
6481 		to_type->num_shared_regs = from_type->num_shared_regs;
6482 	if (from_type->constraints)
6483 		to_type->constraints = from_type->constraints;
6484 	if (from_type->ops)
6485 		to_type->ops = from_type->ops;
6486 	if (from_type->event_descs)
6487 		to_type->event_descs = from_type->event_descs;
6488 	if (from_type->format_group)
6489 		to_type->format_group = from_type->format_group;
6490 	if (from_type->attr_update)
6491 		to_type->attr_update = from_type->attr_update;
6492 	if (from_type->set_mapping)
6493 		to_type->set_mapping = from_type->set_mapping;
6494 	if (from_type->get_topology)
6495 		to_type->get_topology = from_type->get_topology;
6496 	if (from_type->cleanup_mapping)
6497 		to_type->cleanup_mapping = from_type->cleanup_mapping;
6498 }
6499 
6500 static struct intel_uncore_type **
uncore_get_uncores(enum uncore_access_type type_id,int num_extra,struct intel_uncore_type ** extra,int max_num_types,struct intel_uncore_type ** uncores)6501 uncore_get_uncores(enum uncore_access_type type_id, int num_extra,
6502 		   struct intel_uncore_type **extra, int max_num_types,
6503 		   struct intel_uncore_type **uncores)
6504 {
6505 	struct intel_uncore_type **types, **start_types;
6506 	int i;
6507 
6508 	start_types = types = intel_uncore_generic_init_uncores(type_id, num_extra);
6509 
6510 	/* Only copy the customized features */
6511 	for (; *types; types++) {
6512 		if ((*types)->type_id >= max_num_types)
6513 			continue;
6514 		uncore_type_customized_copy(*types, uncores[(*types)->type_id]);
6515 	}
6516 
6517 	for (i = 0; i < num_extra; i++, types++)
6518 		*types = extra[i];
6519 
6520 	return start_types;
6521 }
6522 
6523 static struct intel_uncore_type *
uncore_find_type_by_id(struct intel_uncore_type ** types,int type_id)6524 uncore_find_type_by_id(struct intel_uncore_type **types, int type_id)
6525 {
6526 	for (; *types; types++) {
6527 		if (type_id == (*types)->type_id)
6528 			return *types;
6529 	}
6530 
6531 	return NULL;
6532 }
6533 
uncore_type_max_boxes(struct intel_uncore_type ** types,int type_id)6534 static int uncore_type_max_boxes(struct intel_uncore_type **types,
6535 				 int type_id)
6536 {
6537 	struct intel_uncore_discovery_unit *unit;
6538 	struct intel_uncore_type *type;
6539 	struct rb_node *node;
6540 	int max = 0;
6541 
6542 	type = uncore_find_type_by_id(types, type_id);
6543 	if (!type)
6544 		return 0;
6545 
6546 	for (node = rb_first(type->boxes); node; node = rb_next(node)) {
6547 		unit = rb_entry(node, struct intel_uncore_discovery_unit, node);
6548 
6549 		if (unit->id > max)
6550 			max = unit->id;
6551 	}
6552 	return max + 1;
6553 }
6554 
6555 #define SPR_MSR_UNC_CBO_CONFIG		0x2FFE
6556 
spr_uncore_cpu_init(void)6557 void spr_uncore_cpu_init(void)
6558 {
6559 	struct intel_uncore_type *type;
6560 	u64 num_cbo;
6561 
6562 	uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR,
6563 						UNCORE_SPR_MSR_EXTRA_UNCORES,
6564 						spr_msr_uncores,
6565 						UNCORE_SPR_NUM_UNCORE_TYPES,
6566 						spr_uncores);
6567 
6568 	type = uncore_find_type_by_id(uncore_msr_uncores, UNCORE_SPR_CHA);
6569 	if (type) {
6570 		/*
6571 		 * The value from the discovery table (stored in the type->num_boxes
6572 		 * of UNCORE_SPR_CHA) is incorrect on some SPR variants because of a
6573 		 * firmware bug. Using the value from SPR_MSR_UNC_CBO_CONFIG to replace it.
6574 		 */
6575 		rdmsrl(SPR_MSR_UNC_CBO_CONFIG, num_cbo);
6576 		/*
6577 		 * The MSR doesn't work on the EMR XCC, but the firmware bug doesn't impact
6578 		 * the EMR XCC. Don't let the value from the MSR replace the existing value.
6579 		 */
6580 		if (num_cbo)
6581 			type->num_boxes = num_cbo;
6582 	}
6583 	spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
6584 }
6585 
6586 #define SPR_UNCORE_UPI_PCIID		0x3241
6587 #define SPR_UNCORE_UPI0_DEVFN		0x9
6588 #define SPR_UNCORE_M3UPI_PCIID		0x3246
6589 #define SPR_UNCORE_M3UPI0_DEVFN		0x29
6590 
spr_update_device_location(int type_id)6591 static void spr_update_device_location(int type_id)
6592 {
6593 	struct intel_uncore_discovery_unit *unit;
6594 	struct intel_uncore_type *type;
6595 	struct pci_dev *dev = NULL;
6596 	struct rb_root *root;
6597 	u32 device, devfn;
6598 	int die;
6599 
6600 	if (type_id == UNCORE_SPR_UPI) {
6601 		type = &spr_uncore_upi;
6602 		device = SPR_UNCORE_UPI_PCIID;
6603 		devfn = SPR_UNCORE_UPI0_DEVFN;
6604 	} else if (type_id == UNCORE_SPR_M3UPI) {
6605 		type = &spr_uncore_m3upi;
6606 		device = SPR_UNCORE_M3UPI_PCIID;
6607 		devfn = SPR_UNCORE_M3UPI0_DEVFN;
6608 	} else
6609 		return;
6610 
6611 	root = kzalloc(sizeof(struct rb_root), GFP_KERNEL);
6612 	if (!root) {
6613 		type->num_boxes = 0;
6614 		return;
6615 	}
6616 	*root = RB_ROOT;
6617 
6618 	while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, dev)) != NULL) {
6619 
6620 		die = uncore_device_to_die(dev);
6621 		if (die < 0)
6622 			continue;
6623 
6624 		unit = kzalloc(sizeof(*unit), GFP_KERNEL);
6625 		if (!unit)
6626 			continue;
6627 		unit->die = die;
6628 		unit->id = PCI_SLOT(dev->devfn) - PCI_SLOT(devfn);
6629 		unit->addr = pci_domain_nr(dev->bus) << UNCORE_DISCOVERY_PCI_DOMAIN_OFFSET |
6630 			     dev->bus->number << UNCORE_DISCOVERY_PCI_BUS_OFFSET |
6631 			     devfn << UNCORE_DISCOVERY_PCI_DEVFN_OFFSET |
6632 			     type->box_ctl;
6633 
6634 		unit->pmu_idx = unit->id;
6635 
6636 		uncore_find_add_unit(unit, root, NULL);
6637 	}
6638 
6639 	type->boxes = root;
6640 }
6641 
spr_uncore_pci_init(void)6642 int spr_uncore_pci_init(void)
6643 {
6644 	/*
6645 	 * The discovery table of UPI on some SPR variant is broken,
6646 	 * which impacts the detection of both UPI and M3UPI uncore PMON.
6647 	 * Use the pre-defined UPI and M3UPI table to replace.
6648 	 *
6649 	 * The accurate location, e.g., domain and BUS number,
6650 	 * can only be retrieved at load time.
6651 	 * Update the location of UPI and M3UPI.
6652 	 */
6653 	spr_update_device_location(UNCORE_SPR_UPI);
6654 	spr_update_device_location(UNCORE_SPR_M3UPI);
6655 	uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI,
6656 						UNCORE_SPR_PCI_EXTRA_UNCORES,
6657 						spr_pci_uncores,
6658 						UNCORE_SPR_NUM_UNCORE_TYPES,
6659 						spr_uncores);
6660 	return 0;
6661 }
6662 
spr_uncore_mmio_init(void)6663 void spr_uncore_mmio_init(void)
6664 {
6665 	int ret = snbep_pci2phy_map_init(0x3250, SKX_CPUNODEID, SKX_GIDNIDMAP, true);
6666 
6667 	if (ret) {
6668 		uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL,
6669 							 UNCORE_SPR_NUM_UNCORE_TYPES,
6670 							 spr_uncores);
6671 	} else {
6672 		uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO,
6673 							 UNCORE_SPR_MMIO_EXTRA_UNCORES,
6674 							 spr_mmio_uncores,
6675 							 UNCORE_SPR_NUM_UNCORE_TYPES,
6676 							 spr_uncores);
6677 
6678 		spr_uncore_imc_free_running.num_boxes = uncore_type_max_boxes(uncore_mmio_uncores, UNCORE_SPR_IMC) / 2;
6679 	}
6680 }
6681 
6682 /* end of SPR uncore support */
6683 
6684 /* GNR uncore support */
6685 
6686 #define UNCORE_GNR_NUM_UNCORE_TYPES	23
6687 #define UNCORE_GNR_TYPE_15		15
6688 #define UNCORE_GNR_B2UPI		18
6689 #define UNCORE_GNR_TYPE_21		21
6690 #define UNCORE_GNR_TYPE_22		22
6691 
6692 int gnr_uncore_units_ignore[] = {
6693 	UNCORE_SPR_UPI,
6694 	UNCORE_GNR_TYPE_15,
6695 	UNCORE_GNR_B2UPI,
6696 	UNCORE_GNR_TYPE_21,
6697 	UNCORE_GNR_TYPE_22,
6698 	UNCORE_IGNORE_END
6699 };
6700 
6701 static struct intel_uncore_type gnr_uncore_ubox = {
6702 	.name			= "ubox",
6703 	.attr_update		= uncore_alias_groups,
6704 };
6705 
6706 static struct intel_uncore_type gnr_uncore_b2cmi = {
6707 	SPR_UNCORE_PCI_COMMON_FORMAT(),
6708 	.name			= "b2cmi",
6709 };
6710 
6711 static struct intel_uncore_type gnr_uncore_b2cxl = {
6712 	SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT(),
6713 	.name			= "b2cxl",
6714 };
6715 
6716 static struct intel_uncore_type gnr_uncore_mdf_sbo = {
6717 	.name			= "mdf_sbo",
6718 	.attr_update		= uncore_alias_groups,
6719 };
6720 
6721 static struct intel_uncore_type *gnr_uncores[UNCORE_GNR_NUM_UNCORE_TYPES] = {
6722 	&spr_uncore_chabox,
6723 	&spr_uncore_iio,
6724 	&spr_uncore_irp,
6725 	NULL,
6726 	&spr_uncore_pcu,
6727 	&gnr_uncore_ubox,
6728 	&spr_uncore_imc,
6729 	NULL,
6730 	NULL,
6731 	NULL,
6732 	NULL,
6733 	NULL,
6734 	NULL,
6735 	NULL,
6736 	NULL,
6737 	NULL,
6738 	&gnr_uncore_b2cmi,
6739 	&gnr_uncore_b2cxl,
6740 	NULL,
6741 	NULL,
6742 	&gnr_uncore_mdf_sbo,
6743 	NULL,
6744 	NULL,
6745 };
6746 
6747 static struct freerunning_counters gnr_iio_freerunning[] = {
6748 	[SPR_IIO_MSR_IOCLK]	= { 0x290e, 0x01, 0x10, 1, 48 },
6749 	[SPR_IIO_MSR_BW_IN]	= { 0x360e, 0x10, 0x80, 8, 48 },
6750 	[SPR_IIO_MSR_BW_OUT]	= { 0x2e0e, 0x10, 0x80, 8, 48 },
6751 };
6752 
gnr_uncore_cpu_init(void)6753 void gnr_uncore_cpu_init(void)
6754 {
6755 	uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR,
6756 						UNCORE_SPR_MSR_EXTRA_UNCORES,
6757 						spr_msr_uncores,
6758 						UNCORE_GNR_NUM_UNCORE_TYPES,
6759 						gnr_uncores);
6760 	spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
6761 	spr_uncore_iio_free_running.freerunning = gnr_iio_freerunning;
6762 }
6763 
gnr_uncore_pci_init(void)6764 int gnr_uncore_pci_init(void)
6765 {
6766 	uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, 0, NULL,
6767 						UNCORE_GNR_NUM_UNCORE_TYPES,
6768 						gnr_uncores);
6769 	return 0;
6770 }
6771 
gnr_uncore_mmio_init(void)6772 void gnr_uncore_mmio_init(void)
6773 {
6774 	uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL,
6775 						 UNCORE_GNR_NUM_UNCORE_TYPES,
6776 						 gnr_uncores);
6777 }
6778 
6779 /* end of GNR uncore support */
6780