xref: /linux/arch/x86/events/intel/uncore_snbep.c (revision 001821b0e79716c4e17c71d8e053a23599a7a508)
1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
3 #include <asm/cpu_device_id.h>
4 #include "uncore.h"
5 #include "uncore_discovery.h"
6 
7 /* SNB-EP pci bus to socket mapping */
8 #define SNBEP_CPUNODEID			0x40
9 #define SNBEP_GIDNIDMAP			0x54
10 
11 /* SNB-EP Box level control */
12 #define SNBEP_PMON_BOX_CTL_RST_CTRL	(1 << 0)
13 #define SNBEP_PMON_BOX_CTL_RST_CTRS	(1 << 1)
14 #define SNBEP_PMON_BOX_CTL_FRZ		(1 << 8)
15 #define SNBEP_PMON_BOX_CTL_FRZ_EN	(1 << 16)
16 #define SNBEP_PMON_BOX_CTL_INT		(SNBEP_PMON_BOX_CTL_RST_CTRL | \
17 					 SNBEP_PMON_BOX_CTL_RST_CTRS | \
18 					 SNBEP_PMON_BOX_CTL_FRZ_EN)
19 /* SNB-EP event control */
20 #define SNBEP_PMON_CTL_EV_SEL_MASK	0x000000ff
21 #define SNBEP_PMON_CTL_UMASK_MASK	0x0000ff00
22 #define SNBEP_PMON_CTL_RST		(1 << 17)
23 #define SNBEP_PMON_CTL_EDGE_DET		(1 << 18)
24 #define SNBEP_PMON_CTL_EV_SEL_EXT	(1 << 21)
25 #define SNBEP_PMON_CTL_EN		(1 << 22)
26 #define SNBEP_PMON_CTL_INVERT		(1 << 23)
27 #define SNBEP_PMON_CTL_TRESH_MASK	0xff000000
28 #define SNBEP_PMON_RAW_EVENT_MASK	(SNBEP_PMON_CTL_EV_SEL_MASK | \
29 					 SNBEP_PMON_CTL_UMASK_MASK | \
30 					 SNBEP_PMON_CTL_EDGE_DET | \
31 					 SNBEP_PMON_CTL_INVERT | \
32 					 SNBEP_PMON_CTL_TRESH_MASK)
33 
34 /* SNB-EP Ubox event control */
35 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK		0x1f000000
36 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK		\
37 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
38 				 SNBEP_PMON_CTL_UMASK_MASK | \
39 				 SNBEP_PMON_CTL_EDGE_DET | \
40 				 SNBEP_PMON_CTL_INVERT | \
41 				 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
42 
43 #define SNBEP_CBO_PMON_CTL_TID_EN		(1 << 19)
44 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK	(SNBEP_PMON_RAW_EVENT_MASK | \
45 						 SNBEP_CBO_PMON_CTL_TID_EN)
46 
47 /* SNB-EP PCU event control */
48 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK	0x0000c000
49 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK	0x1f000000
50 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT	(1 << 30)
51 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET	(1 << 31)
52 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK	\
53 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
54 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
55 				 SNBEP_PMON_CTL_EDGE_DET | \
56 				 SNBEP_PMON_CTL_INVERT | \
57 				 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
58 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
59 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
60 
61 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK	\
62 				(SNBEP_PMON_RAW_EVENT_MASK | \
63 				 SNBEP_PMON_CTL_EV_SEL_EXT)
64 
65 /* SNB-EP pci control register */
66 #define SNBEP_PCI_PMON_BOX_CTL			0xf4
67 #define SNBEP_PCI_PMON_CTL0			0xd8
68 /* SNB-EP pci counter register */
69 #define SNBEP_PCI_PMON_CTR0			0xa0
70 
71 /* SNB-EP home agent register */
72 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0	0x40
73 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1	0x44
74 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH	0x48
75 /* SNB-EP memory controller register */
76 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL		0xf0
77 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR		0xd0
78 /* SNB-EP QPI register */
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0		0x228
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1		0x22c
81 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0		0x238
82 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1		0x23c
83 
84 /* SNB-EP Ubox register */
85 #define SNBEP_U_MSR_PMON_CTR0			0xc16
86 #define SNBEP_U_MSR_PMON_CTL0			0xc10
87 
88 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL		0xc08
89 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR		0xc09
90 
91 /* SNB-EP Cbo register */
92 #define SNBEP_C0_MSR_PMON_CTR0			0xd16
93 #define SNBEP_C0_MSR_PMON_CTL0			0xd10
94 #define SNBEP_C0_MSR_PMON_BOX_CTL		0xd04
95 #define SNBEP_C0_MSR_PMON_BOX_FILTER		0xd14
96 #define SNBEP_CBO_MSR_OFFSET			0x20
97 
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID	0x1f
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID	0x3fc00
100 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE	0x7c0000
101 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC	0xff800000
102 
103 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {	\
104 	.event = (e),				\
105 	.msr = SNBEP_C0_MSR_PMON_BOX_FILTER,	\
106 	.config_mask = (m),			\
107 	.idx = (i)				\
108 }
109 
110 /* SNB-EP PCU register */
111 #define SNBEP_PCU_MSR_PMON_CTR0			0xc36
112 #define SNBEP_PCU_MSR_PMON_CTL0			0xc30
113 #define SNBEP_PCU_MSR_PMON_BOX_CTL		0xc24
114 #define SNBEP_PCU_MSR_PMON_BOX_FILTER		0xc34
115 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK	0xffffffff
116 #define SNBEP_PCU_MSR_CORE_C3_CTR		0x3fc
117 #define SNBEP_PCU_MSR_CORE_C6_CTR		0x3fd
118 
119 /* IVBEP event control */
120 #define IVBEP_PMON_BOX_CTL_INT		(SNBEP_PMON_BOX_CTL_RST_CTRL | \
121 					 SNBEP_PMON_BOX_CTL_RST_CTRS)
122 #define IVBEP_PMON_RAW_EVENT_MASK		(SNBEP_PMON_CTL_EV_SEL_MASK | \
123 					 SNBEP_PMON_CTL_UMASK_MASK | \
124 					 SNBEP_PMON_CTL_EDGE_DET | \
125 					 SNBEP_PMON_CTL_TRESH_MASK)
126 /* IVBEP Ubox */
127 #define IVBEP_U_MSR_PMON_GLOBAL_CTL		0xc00
128 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL		(1 << 31)
129 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL		(1 << 29)
130 
131 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK	\
132 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
133 				 SNBEP_PMON_CTL_UMASK_MASK | \
134 				 SNBEP_PMON_CTL_EDGE_DET | \
135 				 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
136 /* IVBEP Cbo */
137 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK		(IVBEP_PMON_RAW_EVENT_MASK | \
138 						 SNBEP_CBO_PMON_CTL_TID_EN)
139 
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID		(0x1fULL << 0)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 5)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE	(0x3fULL << 17)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID		(0xffffULL << 32)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC		(0x1ffULL << 52)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
146 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
147 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
148 
149 /* IVBEP home agent */
150 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST		(1 << 16)
151 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK		\
152 				(IVBEP_PMON_RAW_EVENT_MASK | \
153 				 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
154 /* IVBEP PCU */
155 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK	\
156 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
157 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
158 				 SNBEP_PMON_CTL_EDGE_DET | \
159 				 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
160 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
161 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
162 /* IVBEP QPI */
163 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK	\
164 				(IVBEP_PMON_RAW_EVENT_MASK | \
165 				 SNBEP_PMON_CTL_EV_SEL_EXT)
166 
167 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
168 				((1ULL << (n)) - 1)))
169 
170 /* Haswell-EP Ubox */
171 #define HSWEP_U_MSR_PMON_CTR0			0x709
172 #define HSWEP_U_MSR_PMON_CTL0			0x705
173 #define HSWEP_U_MSR_PMON_FILTER			0x707
174 
175 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL		0x703
176 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR		0x704
177 
178 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID		(0x1 << 0)
179 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID		(0x1fULL << 1)
180 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
181 					(HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
182 					 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
183 
184 /* Haswell-EP CBo */
185 #define HSWEP_C0_MSR_PMON_CTR0			0xe08
186 #define HSWEP_C0_MSR_PMON_CTL0			0xe01
187 #define HSWEP_C0_MSR_PMON_BOX_CTL			0xe00
188 #define HSWEP_C0_MSR_PMON_BOX_FILTER0		0xe05
189 #define HSWEP_CBO_MSR_OFFSET			0x10
190 
191 
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID		(0x3fULL << 0)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 6)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE	(0x7fULL << 17)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID		(0xffffULL << 32)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC		(0x1ffULL << 52)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
198 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
199 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
200 
201 
202 /* Haswell-EP Sbox */
203 #define HSWEP_S0_MSR_PMON_CTR0			0x726
204 #define HSWEP_S0_MSR_PMON_CTL0			0x721
205 #define HSWEP_S0_MSR_PMON_BOX_CTL			0x720
206 #define HSWEP_SBOX_MSR_OFFSET			0xa
207 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK		(SNBEP_PMON_RAW_EVENT_MASK | \
208 						 SNBEP_CBO_PMON_CTL_TID_EN)
209 
210 /* Haswell-EP PCU */
211 #define HSWEP_PCU_MSR_PMON_CTR0			0x717
212 #define HSWEP_PCU_MSR_PMON_CTL0			0x711
213 #define HSWEP_PCU_MSR_PMON_BOX_CTL		0x710
214 #define HSWEP_PCU_MSR_PMON_BOX_FILTER		0x715
215 
216 /* KNL Ubox */
217 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
218 					(SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
219 						SNBEP_CBO_PMON_CTL_TID_EN)
220 /* KNL CHA */
221 #define KNL_CHA_MSR_OFFSET			0xc
222 #define KNL_CHA_MSR_PMON_CTL_QOR		(1 << 16)
223 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
224 					(SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
225 					 KNL_CHA_MSR_PMON_CTL_QOR)
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID		0x1ff
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE	(7 << 18)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP		(0xfffffe2aULL << 32)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE	(0x1ULL << 32)
230 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE	(0x1ULL << 33)
231 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC		(0x1ULL << 37)
232 
233 /* KNL EDC/MC UCLK */
234 #define KNL_UCLK_MSR_PMON_CTR0_LOW		0x400
235 #define KNL_UCLK_MSR_PMON_CTL0			0x420
236 #define KNL_UCLK_MSR_PMON_BOX_CTL		0x430
237 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW	0x44c
238 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL	0x454
239 #define KNL_PMON_FIXED_CTL_EN			0x1
240 
241 /* KNL EDC */
242 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW		0xa00
243 #define KNL_EDC0_ECLK_MSR_PMON_CTL0		0xa20
244 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL		0xa30
245 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW	0xa3c
246 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL	0xa44
247 
248 /* KNL MC */
249 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW		0xb00
250 #define KNL_MC0_CH0_MSR_PMON_CTL0		0xb20
251 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL		0xb30
252 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW		0xb3c
253 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL		0xb44
254 
255 /* KNL IRP */
256 #define KNL_IRP_PCI_PMON_BOX_CTL		0xf0
257 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK		(SNBEP_PMON_RAW_EVENT_MASK | \
258 						 KNL_CHA_MSR_PMON_CTL_QOR)
259 /* KNL PCU */
260 #define KNL_PCU_PMON_CTL_EV_SEL_MASK		0x0000007f
261 #define KNL_PCU_PMON_CTL_USE_OCC_CTR		(1 << 7)
262 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK		0x3f000000
263 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK	\
264 				(KNL_PCU_PMON_CTL_EV_SEL_MASK | \
265 				 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
266 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
267 				 SNBEP_PMON_CTL_EDGE_DET | \
268 				 SNBEP_CBO_PMON_CTL_TID_EN | \
269 				 SNBEP_PMON_CTL_INVERT | \
270 				 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
271 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
272 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
273 
274 /* SKX pci bus to socket mapping */
275 #define SKX_CPUNODEID			0xc0
276 #define SKX_GIDNIDMAP			0xd4
277 
278 /*
279  * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR
280  * that BIOS programmed. MSR has package scope.
281  * |  Bit  |  Default  |  Description
282  * | [63]  |    00h    | VALID - When set, indicates the CPU bus
283  *                       numbers have been initialized. (RO)
284  * |[62:48]|    ---    | Reserved
285  * |[47:40]|    00h    | BUS_NUM_5 - Return the bus number BIOS assigned
286  *                       CPUBUSNO(5). (RO)
287  * |[39:32]|    00h    | BUS_NUM_4 - Return the bus number BIOS assigned
288  *                       CPUBUSNO(4). (RO)
289  * |[31:24]|    00h    | BUS_NUM_3 - Return the bus number BIOS assigned
290  *                       CPUBUSNO(3). (RO)
291  * |[23:16]|    00h    | BUS_NUM_2 - Return the bus number BIOS assigned
292  *                       CPUBUSNO(2). (RO)
293  * |[15:8] |    00h    | BUS_NUM_1 - Return the bus number BIOS assigned
294  *                       CPUBUSNO(1). (RO)
295  * | [7:0] |    00h    | BUS_NUM_0 - Return the bus number BIOS assigned
296  *                       CPUBUSNO(0). (RO)
297  */
298 #define SKX_MSR_CPU_BUS_NUMBER		0x300
299 #define SKX_MSR_CPU_BUS_VALID_BIT	(1ULL << 63)
300 #define BUS_NUM_STRIDE			8
301 
302 /* SKX CHA */
303 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID		(0x1ffULL << 0)
304 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 9)
305 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE	(0x3ffULL << 17)
306 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM		(0x1ULL << 32)
307 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC		(0x1ULL << 33)
308 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC	(0x1ULL << 35)
309 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM		(0x1ULL << 36)
310 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM	(0x1ULL << 37)
311 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0	(0x3ffULL << 41)
312 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1	(0x3ffULL << 51)
313 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
314 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
315 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
316 
317 /* SKX IIO */
318 #define SKX_IIO0_MSR_PMON_CTL0		0xa48
319 #define SKX_IIO0_MSR_PMON_CTR0		0xa41
320 #define SKX_IIO0_MSR_PMON_BOX_CTL	0xa40
321 #define SKX_IIO_MSR_OFFSET		0x20
322 
323 #define SKX_PMON_CTL_TRESH_MASK		(0xff << 24)
324 #define SKX_PMON_CTL_TRESH_MASK_EXT	(0xf)
325 #define SKX_PMON_CTL_CH_MASK		(0xff << 4)
326 #define SKX_PMON_CTL_FC_MASK		(0x7 << 12)
327 #define SKX_IIO_PMON_RAW_EVENT_MASK	(SNBEP_PMON_CTL_EV_SEL_MASK | \
328 					 SNBEP_PMON_CTL_UMASK_MASK | \
329 					 SNBEP_PMON_CTL_EDGE_DET | \
330 					 SNBEP_PMON_CTL_INVERT | \
331 					 SKX_PMON_CTL_TRESH_MASK)
332 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT	(SKX_PMON_CTL_TRESH_MASK_EXT | \
333 					 SKX_PMON_CTL_CH_MASK | \
334 					 SKX_PMON_CTL_FC_MASK)
335 
336 /* SKX IRP */
337 #define SKX_IRP0_MSR_PMON_CTL0		0xa5b
338 #define SKX_IRP0_MSR_PMON_CTR0		0xa59
339 #define SKX_IRP0_MSR_PMON_BOX_CTL	0xa58
340 #define SKX_IRP_MSR_OFFSET		0x20
341 
342 /* SKX UPI */
343 #define SKX_UPI_PCI_PMON_CTL0		0x350
344 #define SKX_UPI_PCI_PMON_CTR0		0x318
345 #define SKX_UPI_PCI_PMON_BOX_CTL	0x378
346 #define SKX_UPI_CTL_UMASK_EXT		0xffefff
347 
348 /* SKX M2M */
349 #define SKX_M2M_PCI_PMON_CTL0		0x228
350 #define SKX_M2M_PCI_PMON_CTR0		0x200
351 #define SKX_M2M_PCI_PMON_BOX_CTL	0x258
352 
353 /* Memory Map registers device ID */
354 #define SNR_ICX_MESH2IIO_MMAP_DID		0x9a2
355 #define SNR_ICX_SAD_CONTROL_CFG		0x3f4
356 
357 /* Getting I/O stack id in SAD_COTROL_CFG notation */
358 #define SAD_CONTROL_STACK_ID(data)		(((data) >> 4) & 0x7)
359 
360 /* SNR Ubox */
361 #define SNR_U_MSR_PMON_CTR0			0x1f98
362 #define SNR_U_MSR_PMON_CTL0			0x1f91
363 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL		0x1f93
364 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR		0x1f94
365 
366 /* SNR CHA */
367 #define SNR_CHA_RAW_EVENT_MASK_EXT		0x3ffffff
368 #define SNR_CHA_MSR_PMON_CTL0			0x1c01
369 #define SNR_CHA_MSR_PMON_CTR0			0x1c08
370 #define SNR_CHA_MSR_PMON_BOX_CTL		0x1c00
371 #define SNR_C0_MSR_PMON_BOX_FILTER0		0x1c05
372 
373 
374 /* SNR IIO */
375 #define SNR_IIO_MSR_PMON_CTL0			0x1e08
376 #define SNR_IIO_MSR_PMON_CTR0			0x1e01
377 #define SNR_IIO_MSR_PMON_BOX_CTL		0x1e00
378 #define SNR_IIO_MSR_OFFSET			0x10
379 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT		0x7ffff
380 
381 /* SNR IRP */
382 #define SNR_IRP0_MSR_PMON_CTL0			0x1ea8
383 #define SNR_IRP0_MSR_PMON_CTR0			0x1ea1
384 #define SNR_IRP0_MSR_PMON_BOX_CTL		0x1ea0
385 #define SNR_IRP_MSR_OFFSET			0x10
386 
387 /* SNR M2PCIE */
388 #define SNR_M2PCIE_MSR_PMON_CTL0		0x1e58
389 #define SNR_M2PCIE_MSR_PMON_CTR0		0x1e51
390 #define SNR_M2PCIE_MSR_PMON_BOX_CTL		0x1e50
391 #define SNR_M2PCIE_MSR_OFFSET			0x10
392 
393 /* SNR PCU */
394 #define SNR_PCU_MSR_PMON_CTL0			0x1ef1
395 #define SNR_PCU_MSR_PMON_CTR0			0x1ef8
396 #define SNR_PCU_MSR_PMON_BOX_CTL		0x1ef0
397 #define SNR_PCU_MSR_PMON_BOX_FILTER		0x1efc
398 
399 /* SNR M2M */
400 #define SNR_M2M_PCI_PMON_CTL0			0x468
401 #define SNR_M2M_PCI_PMON_CTR0			0x440
402 #define SNR_M2M_PCI_PMON_BOX_CTL		0x438
403 #define SNR_M2M_PCI_PMON_UMASK_EXT		0xff
404 
405 /* SNR PCIE3 */
406 #define SNR_PCIE3_PCI_PMON_CTL0			0x508
407 #define SNR_PCIE3_PCI_PMON_CTR0			0x4e8
408 #define SNR_PCIE3_PCI_PMON_BOX_CTL		0x4e0
409 
410 /* SNR IMC */
411 #define SNR_IMC_MMIO_PMON_FIXED_CTL		0x54
412 #define SNR_IMC_MMIO_PMON_FIXED_CTR		0x38
413 #define SNR_IMC_MMIO_PMON_CTL0			0x40
414 #define SNR_IMC_MMIO_PMON_CTR0			0x8
415 #define SNR_IMC_MMIO_PMON_BOX_CTL		0x22800
416 #define SNR_IMC_MMIO_OFFSET			0x4000
417 #define SNR_IMC_MMIO_SIZE			0x4000
418 #define SNR_IMC_MMIO_BASE_OFFSET		0xd0
419 #define SNR_IMC_MMIO_BASE_MASK			0x1FFFFFFF
420 #define SNR_IMC_MMIO_MEM0_OFFSET		0xd8
421 #define SNR_IMC_MMIO_MEM0_MASK			0x7FF
422 
423 /* ICX CHA */
424 #define ICX_C34_MSR_PMON_CTR0			0xb68
425 #define ICX_C34_MSR_PMON_CTL0			0xb61
426 #define ICX_C34_MSR_PMON_BOX_CTL		0xb60
427 #define ICX_C34_MSR_PMON_BOX_FILTER0		0xb65
428 
429 /* ICX IIO */
430 #define ICX_IIO_MSR_PMON_CTL0			0xa58
431 #define ICX_IIO_MSR_PMON_CTR0			0xa51
432 #define ICX_IIO_MSR_PMON_BOX_CTL		0xa50
433 
434 /* ICX IRP */
435 #define ICX_IRP0_MSR_PMON_CTL0			0xa4d
436 #define ICX_IRP0_MSR_PMON_CTR0			0xa4b
437 #define ICX_IRP0_MSR_PMON_BOX_CTL		0xa4a
438 
439 /* ICX M2PCIE */
440 #define ICX_M2PCIE_MSR_PMON_CTL0		0xa46
441 #define ICX_M2PCIE_MSR_PMON_CTR0		0xa41
442 #define ICX_M2PCIE_MSR_PMON_BOX_CTL		0xa40
443 
444 /* ICX UPI */
445 #define ICX_UPI_PCI_PMON_CTL0			0x350
446 #define ICX_UPI_PCI_PMON_CTR0			0x320
447 #define ICX_UPI_PCI_PMON_BOX_CTL		0x318
448 #define ICX_UPI_CTL_UMASK_EXT			0xffffff
449 #define ICX_UBOX_DID				0x3450
450 
451 /* ICX M3UPI*/
452 #define ICX_M3UPI_PCI_PMON_CTL0			0xd8
453 #define ICX_M3UPI_PCI_PMON_CTR0			0xa8
454 #define ICX_M3UPI_PCI_PMON_BOX_CTL		0xa0
455 
456 /* ICX IMC */
457 #define ICX_NUMBER_IMC_CHN			3
458 #define ICX_IMC_MEM_STRIDE			0x4
459 
460 /* SPR */
461 #define SPR_RAW_EVENT_MASK_EXT			0xffffff
462 #define SPR_UBOX_DID				0x3250
463 
464 /* SPR CHA */
465 #define SPR_CHA_PMON_CTL_TID_EN			(1 << 16)
466 #define SPR_CHA_PMON_EVENT_MASK			(SNBEP_PMON_RAW_EVENT_MASK | \
467 						 SPR_CHA_PMON_CTL_TID_EN)
468 #define SPR_CHA_PMON_BOX_FILTER_TID		0x3ff
469 
470 #define SPR_C0_MSR_PMON_BOX_FILTER0		0x200e
471 
472 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
473 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
474 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
475 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
476 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
477 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
478 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
479 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
480 DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
481 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
482 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
483 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
484 DEFINE_UNCORE_FORMAT_ATTR(tid_en2, tid_en, "config:16");
485 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
486 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
487 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
488 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
489 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
490 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
491 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
492 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
493 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
494 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
495 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
496 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
497 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
498 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
499 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
500 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
501 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
502 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
503 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
504 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
505 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
506 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
507 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
508 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
509 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
510 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
511 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
512 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
513 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
514 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
515 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
516 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
517 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
518 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
519 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
520 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
521 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
522 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
523 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
524 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
525 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
526 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
527 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
528 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
529 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
530 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
531 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
532 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
533 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
534 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
535 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
536 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
537 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
538 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
539 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
540 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
541 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
542 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
543 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
544 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
545 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
546 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
547 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
548 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
549 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
550 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
551 
552 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
553 {
554 	struct pci_dev *pdev = box->pci_dev;
555 	int box_ctl = uncore_pci_box_ctl(box);
556 	u32 config = 0;
557 
558 	if (!pci_read_config_dword(pdev, box_ctl, &config)) {
559 		config |= SNBEP_PMON_BOX_CTL_FRZ;
560 		pci_write_config_dword(pdev, box_ctl, config);
561 	}
562 }
563 
564 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
565 {
566 	struct pci_dev *pdev = box->pci_dev;
567 	int box_ctl = uncore_pci_box_ctl(box);
568 	u32 config = 0;
569 
570 	if (!pci_read_config_dword(pdev, box_ctl, &config)) {
571 		config &= ~SNBEP_PMON_BOX_CTL_FRZ;
572 		pci_write_config_dword(pdev, box_ctl, config);
573 	}
574 }
575 
576 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
577 {
578 	struct pci_dev *pdev = box->pci_dev;
579 	struct hw_perf_event *hwc = &event->hw;
580 
581 	pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
582 }
583 
584 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
585 {
586 	struct pci_dev *pdev = box->pci_dev;
587 	struct hw_perf_event *hwc = &event->hw;
588 
589 	pci_write_config_dword(pdev, hwc->config_base, hwc->config);
590 }
591 
592 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
593 {
594 	struct pci_dev *pdev = box->pci_dev;
595 	struct hw_perf_event *hwc = &event->hw;
596 	u64 count = 0;
597 
598 	pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
599 	pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
600 
601 	return count;
602 }
603 
604 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
605 {
606 	struct pci_dev *pdev = box->pci_dev;
607 	int box_ctl = uncore_pci_box_ctl(box);
608 
609 	pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
610 }
611 
612 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
613 {
614 	u64 config;
615 	unsigned msr;
616 
617 	msr = uncore_msr_box_ctl(box);
618 	if (msr) {
619 		rdmsrl(msr, config);
620 		config |= SNBEP_PMON_BOX_CTL_FRZ;
621 		wrmsrl(msr, config);
622 	}
623 }
624 
625 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
626 {
627 	u64 config;
628 	unsigned msr;
629 
630 	msr = uncore_msr_box_ctl(box);
631 	if (msr) {
632 		rdmsrl(msr, config);
633 		config &= ~SNBEP_PMON_BOX_CTL_FRZ;
634 		wrmsrl(msr, config);
635 	}
636 }
637 
638 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
639 {
640 	struct hw_perf_event *hwc = &event->hw;
641 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
642 
643 	if (reg1->idx != EXTRA_REG_NONE)
644 		wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
645 
646 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
647 }
648 
649 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
650 					struct perf_event *event)
651 {
652 	struct hw_perf_event *hwc = &event->hw;
653 
654 	wrmsrl(hwc->config_base, hwc->config);
655 }
656 
657 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
658 {
659 	unsigned msr = uncore_msr_box_ctl(box);
660 
661 	if (msr)
662 		wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
663 }
664 
665 static struct attribute *snbep_uncore_formats_attr[] = {
666 	&format_attr_event.attr,
667 	&format_attr_umask.attr,
668 	&format_attr_edge.attr,
669 	&format_attr_inv.attr,
670 	&format_attr_thresh8.attr,
671 	NULL,
672 };
673 
674 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
675 	&format_attr_event.attr,
676 	&format_attr_umask.attr,
677 	&format_attr_edge.attr,
678 	&format_attr_inv.attr,
679 	&format_attr_thresh5.attr,
680 	NULL,
681 };
682 
683 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
684 	&format_attr_event.attr,
685 	&format_attr_umask.attr,
686 	&format_attr_edge.attr,
687 	&format_attr_tid_en.attr,
688 	&format_attr_inv.attr,
689 	&format_attr_thresh8.attr,
690 	&format_attr_filter_tid.attr,
691 	&format_attr_filter_nid.attr,
692 	&format_attr_filter_state.attr,
693 	&format_attr_filter_opc.attr,
694 	NULL,
695 };
696 
697 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
698 	&format_attr_event.attr,
699 	&format_attr_occ_sel.attr,
700 	&format_attr_edge.attr,
701 	&format_attr_inv.attr,
702 	&format_attr_thresh5.attr,
703 	&format_attr_occ_invert.attr,
704 	&format_attr_occ_edge.attr,
705 	&format_attr_filter_band0.attr,
706 	&format_attr_filter_band1.attr,
707 	&format_attr_filter_band2.attr,
708 	&format_attr_filter_band3.attr,
709 	NULL,
710 };
711 
712 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
713 	&format_attr_event_ext.attr,
714 	&format_attr_umask.attr,
715 	&format_attr_edge.attr,
716 	&format_attr_inv.attr,
717 	&format_attr_thresh8.attr,
718 	&format_attr_match_rds.attr,
719 	&format_attr_match_rnid30.attr,
720 	&format_attr_match_rnid4.attr,
721 	&format_attr_match_dnid.attr,
722 	&format_attr_match_mc.attr,
723 	&format_attr_match_opc.attr,
724 	&format_attr_match_vnw.attr,
725 	&format_attr_match0.attr,
726 	&format_attr_match1.attr,
727 	&format_attr_mask_rds.attr,
728 	&format_attr_mask_rnid30.attr,
729 	&format_attr_mask_rnid4.attr,
730 	&format_attr_mask_dnid.attr,
731 	&format_attr_mask_mc.attr,
732 	&format_attr_mask_opc.attr,
733 	&format_attr_mask_vnw.attr,
734 	&format_attr_mask0.attr,
735 	&format_attr_mask1.attr,
736 	NULL,
737 };
738 
739 static struct uncore_event_desc snbep_uncore_imc_events[] = {
740 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
741 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
742 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
743 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
744 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
745 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
746 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
747 	{ /* end: all zeroes */ },
748 };
749 
750 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
751 	INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
752 	INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
753 	INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
754 	INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
755 	{ /* end: all zeroes */ },
756 };
757 
758 static const struct attribute_group snbep_uncore_format_group = {
759 	.name = "format",
760 	.attrs = snbep_uncore_formats_attr,
761 };
762 
763 static const struct attribute_group snbep_uncore_ubox_format_group = {
764 	.name = "format",
765 	.attrs = snbep_uncore_ubox_formats_attr,
766 };
767 
768 static const struct attribute_group snbep_uncore_cbox_format_group = {
769 	.name = "format",
770 	.attrs = snbep_uncore_cbox_formats_attr,
771 };
772 
773 static const struct attribute_group snbep_uncore_pcu_format_group = {
774 	.name = "format",
775 	.attrs = snbep_uncore_pcu_formats_attr,
776 };
777 
778 static const struct attribute_group snbep_uncore_qpi_format_group = {
779 	.name = "format",
780 	.attrs = snbep_uncore_qpi_formats_attr,
781 };
782 
783 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
784 	.disable_box	= snbep_uncore_msr_disable_box,		\
785 	.enable_box	= snbep_uncore_msr_enable_box,		\
786 	.disable_event	= snbep_uncore_msr_disable_event,	\
787 	.enable_event	= snbep_uncore_msr_enable_event,	\
788 	.read_counter	= uncore_msr_read_counter
789 
790 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
791 	__SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),			\
792 	.init_box	= snbep_uncore_msr_init_box		\
793 
794 static struct intel_uncore_ops snbep_uncore_msr_ops = {
795 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
796 };
797 
798 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()			\
799 	.init_box	= snbep_uncore_pci_init_box,		\
800 	.disable_box	= snbep_uncore_pci_disable_box,		\
801 	.enable_box	= snbep_uncore_pci_enable_box,		\
802 	.disable_event	= snbep_uncore_pci_disable_event,	\
803 	.read_counter	= snbep_uncore_pci_read_counter
804 
805 static struct intel_uncore_ops snbep_uncore_pci_ops = {
806 	SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
807 	.enable_event	= snbep_uncore_pci_enable_event,	\
808 };
809 
810 static struct event_constraint snbep_uncore_cbox_constraints[] = {
811 	UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
812 	UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
813 	UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
814 	UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
815 	UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
816 	UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
817 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
818 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
819 	UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
820 	UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
821 	UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
822 	UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
823 	UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
824 	UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
825 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
826 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
827 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
828 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
829 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
830 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
831 	UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
832 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
833 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
834 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
835 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
836 	UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
837 	EVENT_CONSTRAINT_END
838 };
839 
840 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
841 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
842 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
843 	UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
844 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
845 	UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
846 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
847 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
848 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
849 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
850 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
851 	EVENT_CONSTRAINT_END
852 };
853 
854 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
855 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
856 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
857 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
858 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
859 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
860 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
861 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
862 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
863 	UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
864 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
865 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
866 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
867 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
868 	UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
869 	UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
870 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
871 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
872 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
873 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
874 	UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
875 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
876 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
877 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
878 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
879 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
880 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
881 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
882 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
883 	EVENT_CONSTRAINT_END
884 };
885 
886 static struct intel_uncore_type snbep_uncore_ubox = {
887 	.name		= "ubox",
888 	.num_counters   = 2,
889 	.num_boxes	= 1,
890 	.perf_ctr_bits	= 44,
891 	.fixed_ctr_bits	= 48,
892 	.perf_ctr	= SNBEP_U_MSR_PMON_CTR0,
893 	.event_ctl	= SNBEP_U_MSR_PMON_CTL0,
894 	.event_mask	= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
895 	.fixed_ctr	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
896 	.fixed_ctl	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
897 	.ops		= &snbep_uncore_msr_ops,
898 	.format_group	= &snbep_uncore_ubox_format_group,
899 };
900 
901 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
902 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
903 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
904 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
905 	SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
906 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
907 	SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
908 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
909 	SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
910 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
911 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
912 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
913 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
914 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
915 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
916 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
917 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
918 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
919 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
920 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
921 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
922 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
923 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
924 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
925 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
926 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
927 	EVENT_EXTRA_END
928 };
929 
930 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
931 {
932 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
933 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
934 	int i;
935 
936 	if (uncore_box_is_fake(box))
937 		return;
938 
939 	for (i = 0; i < 5; i++) {
940 		if (reg1->alloc & (0x1 << i))
941 			atomic_sub(1 << (i * 6), &er->ref);
942 	}
943 	reg1->alloc = 0;
944 }
945 
946 static struct event_constraint *
947 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
948 			    u64 (*cbox_filter_mask)(int fields))
949 {
950 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
951 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
952 	int i, alloc = 0;
953 	unsigned long flags;
954 	u64 mask;
955 
956 	if (reg1->idx == EXTRA_REG_NONE)
957 		return NULL;
958 
959 	raw_spin_lock_irqsave(&er->lock, flags);
960 	for (i = 0; i < 5; i++) {
961 		if (!(reg1->idx & (0x1 << i)))
962 			continue;
963 		if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
964 			continue;
965 
966 		mask = cbox_filter_mask(0x1 << i);
967 		if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
968 		    !((reg1->config ^ er->config) & mask)) {
969 			atomic_add(1 << (i * 6), &er->ref);
970 			er->config &= ~mask;
971 			er->config |= reg1->config & mask;
972 			alloc |= (0x1 << i);
973 		} else {
974 			break;
975 		}
976 	}
977 	raw_spin_unlock_irqrestore(&er->lock, flags);
978 	if (i < 5)
979 		goto fail;
980 
981 	if (!uncore_box_is_fake(box))
982 		reg1->alloc |= alloc;
983 
984 	return NULL;
985 fail:
986 	for (; i >= 0; i--) {
987 		if (alloc & (0x1 << i))
988 			atomic_sub(1 << (i * 6), &er->ref);
989 	}
990 	return &uncore_constraint_empty;
991 }
992 
993 static u64 snbep_cbox_filter_mask(int fields)
994 {
995 	u64 mask = 0;
996 
997 	if (fields & 0x1)
998 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
999 	if (fields & 0x2)
1000 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1001 	if (fields & 0x4)
1002 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1003 	if (fields & 0x8)
1004 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1005 
1006 	return mask;
1007 }
1008 
1009 static struct event_constraint *
1010 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1011 {
1012 	return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
1013 }
1014 
1015 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1016 {
1017 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1018 	struct extra_reg *er;
1019 	int idx = 0;
1020 
1021 	for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
1022 		if (er->event != (event->hw.config & er->config_mask))
1023 			continue;
1024 		idx |= er->idx;
1025 	}
1026 
1027 	if (idx) {
1028 		reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1029 			SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1030 		reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
1031 		reg1->idx = idx;
1032 	}
1033 	return 0;
1034 }
1035 
1036 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
1037 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1038 	.hw_config		= snbep_cbox_hw_config,
1039 	.get_constraint		= snbep_cbox_get_constraint,
1040 	.put_constraint		= snbep_cbox_put_constraint,
1041 };
1042 
1043 static struct intel_uncore_type snbep_uncore_cbox = {
1044 	.name			= "cbox",
1045 	.num_counters		= 4,
1046 	.num_boxes		= 8,
1047 	.perf_ctr_bits		= 44,
1048 	.event_ctl		= SNBEP_C0_MSR_PMON_CTL0,
1049 	.perf_ctr		= SNBEP_C0_MSR_PMON_CTR0,
1050 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1051 	.box_ctl		= SNBEP_C0_MSR_PMON_BOX_CTL,
1052 	.msr_offset		= SNBEP_CBO_MSR_OFFSET,
1053 	.num_shared_regs	= 1,
1054 	.constraints		= snbep_uncore_cbox_constraints,
1055 	.ops			= &snbep_uncore_cbox_ops,
1056 	.format_group		= &snbep_uncore_cbox_format_group,
1057 };
1058 
1059 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
1060 {
1061 	struct hw_perf_event *hwc = &event->hw;
1062 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1063 	u64 config = reg1->config;
1064 
1065 	if (new_idx > reg1->idx)
1066 		config <<= 8 * (new_idx - reg1->idx);
1067 	else
1068 		config >>= 8 * (reg1->idx - new_idx);
1069 
1070 	if (modify) {
1071 		hwc->config += new_idx - reg1->idx;
1072 		reg1->config = config;
1073 		reg1->idx = new_idx;
1074 	}
1075 	return config;
1076 }
1077 
1078 static struct event_constraint *
1079 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1080 {
1081 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1082 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1083 	unsigned long flags;
1084 	int idx = reg1->idx;
1085 	u64 mask, config1 = reg1->config;
1086 	bool ok = false;
1087 
1088 	if (reg1->idx == EXTRA_REG_NONE ||
1089 	    (!uncore_box_is_fake(box) && reg1->alloc))
1090 		return NULL;
1091 again:
1092 	mask = 0xffULL << (idx * 8);
1093 	raw_spin_lock_irqsave(&er->lock, flags);
1094 	if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1095 	    !((config1 ^ er->config) & mask)) {
1096 		atomic_add(1 << (idx * 8), &er->ref);
1097 		er->config &= ~mask;
1098 		er->config |= config1 & mask;
1099 		ok = true;
1100 	}
1101 	raw_spin_unlock_irqrestore(&er->lock, flags);
1102 
1103 	if (!ok) {
1104 		idx = (idx + 1) % 4;
1105 		if (idx != reg1->idx) {
1106 			config1 = snbep_pcu_alter_er(event, idx, false);
1107 			goto again;
1108 		}
1109 		return &uncore_constraint_empty;
1110 	}
1111 
1112 	if (!uncore_box_is_fake(box)) {
1113 		if (idx != reg1->idx)
1114 			snbep_pcu_alter_er(event, idx, true);
1115 		reg1->alloc = 1;
1116 	}
1117 	return NULL;
1118 }
1119 
1120 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1121 {
1122 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1123 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1124 
1125 	if (uncore_box_is_fake(box) || !reg1->alloc)
1126 		return;
1127 
1128 	atomic_sub(1 << (reg1->idx * 8), &er->ref);
1129 	reg1->alloc = 0;
1130 }
1131 
1132 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1133 {
1134 	struct hw_perf_event *hwc = &event->hw;
1135 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1136 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1137 
1138 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
1139 		reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1140 		reg1->idx = ev_sel - 0xb;
1141 		reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1142 	}
1143 	return 0;
1144 }
1145 
1146 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1147 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1148 	.hw_config		= snbep_pcu_hw_config,
1149 	.get_constraint		= snbep_pcu_get_constraint,
1150 	.put_constraint		= snbep_pcu_put_constraint,
1151 };
1152 
1153 static struct intel_uncore_type snbep_uncore_pcu = {
1154 	.name			= "pcu",
1155 	.num_counters		= 4,
1156 	.num_boxes		= 1,
1157 	.perf_ctr_bits		= 48,
1158 	.perf_ctr		= SNBEP_PCU_MSR_PMON_CTR0,
1159 	.event_ctl		= SNBEP_PCU_MSR_PMON_CTL0,
1160 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1161 	.box_ctl		= SNBEP_PCU_MSR_PMON_BOX_CTL,
1162 	.num_shared_regs	= 1,
1163 	.ops			= &snbep_uncore_pcu_ops,
1164 	.format_group		= &snbep_uncore_pcu_format_group,
1165 };
1166 
1167 static struct intel_uncore_type *snbep_msr_uncores[] = {
1168 	&snbep_uncore_ubox,
1169 	&snbep_uncore_cbox,
1170 	&snbep_uncore_pcu,
1171 	NULL,
1172 };
1173 
1174 void snbep_uncore_cpu_init(void)
1175 {
1176 	if (snbep_uncore_cbox.num_boxes > topology_num_cores_per_package())
1177 		snbep_uncore_cbox.num_boxes = topology_num_cores_per_package();
1178 	uncore_msr_uncores = snbep_msr_uncores;
1179 }
1180 
1181 enum {
1182 	SNBEP_PCI_QPI_PORT0_FILTER,
1183 	SNBEP_PCI_QPI_PORT1_FILTER,
1184 	BDX_PCI_QPI_PORT2_FILTER,
1185 };
1186 
1187 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1188 {
1189 	struct hw_perf_event *hwc = &event->hw;
1190 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1191 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1192 
1193 	if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1194 		reg1->idx = 0;
1195 		reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1196 		reg1->config = event->attr.config1;
1197 		reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1198 		reg2->config = event->attr.config2;
1199 	}
1200 	return 0;
1201 }
1202 
1203 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1204 {
1205 	struct pci_dev *pdev = box->pci_dev;
1206 	struct hw_perf_event *hwc = &event->hw;
1207 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1208 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1209 
1210 	if (reg1->idx != EXTRA_REG_NONE) {
1211 		int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1212 		int die = box->dieid;
1213 		struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1214 
1215 		if (filter_pdev) {
1216 			pci_write_config_dword(filter_pdev, reg1->reg,
1217 						(u32)reg1->config);
1218 			pci_write_config_dword(filter_pdev, reg1->reg + 4,
1219 						(u32)(reg1->config >> 32));
1220 			pci_write_config_dword(filter_pdev, reg2->reg,
1221 						(u32)reg2->config);
1222 			pci_write_config_dword(filter_pdev, reg2->reg + 4,
1223 						(u32)(reg2->config >> 32));
1224 		}
1225 	}
1226 
1227 	pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1228 }
1229 
1230 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1231 	SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1232 	.enable_event		= snbep_qpi_enable_event,
1233 	.hw_config		= snbep_qpi_hw_config,
1234 	.get_constraint		= uncore_get_constraint,
1235 	.put_constraint		= uncore_put_constraint,
1236 };
1237 
1238 #define SNBEP_UNCORE_PCI_COMMON_INIT()				\
1239 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,			\
1240 	.event_ctl	= SNBEP_PCI_PMON_CTL0,			\
1241 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,		\
1242 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,		\
1243 	.ops		= &snbep_uncore_pci_ops,		\
1244 	.format_group	= &snbep_uncore_format_group
1245 
1246 static struct intel_uncore_type snbep_uncore_ha = {
1247 	.name		= "ha",
1248 	.num_counters   = 4,
1249 	.num_boxes	= 1,
1250 	.perf_ctr_bits	= 48,
1251 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1252 };
1253 
1254 static struct intel_uncore_type snbep_uncore_imc = {
1255 	.name		= "imc",
1256 	.num_counters   = 4,
1257 	.num_boxes	= 4,
1258 	.perf_ctr_bits	= 48,
1259 	.fixed_ctr_bits	= 48,
1260 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1261 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1262 	.event_descs	= snbep_uncore_imc_events,
1263 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1264 };
1265 
1266 static struct intel_uncore_type snbep_uncore_qpi = {
1267 	.name			= "qpi",
1268 	.num_counters		= 4,
1269 	.num_boxes		= 2,
1270 	.perf_ctr_bits		= 48,
1271 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
1272 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
1273 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1274 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1275 	.num_shared_regs	= 1,
1276 	.ops			= &snbep_uncore_qpi_ops,
1277 	.event_descs		= snbep_uncore_qpi_events,
1278 	.format_group		= &snbep_uncore_qpi_format_group,
1279 };
1280 
1281 
1282 static struct intel_uncore_type snbep_uncore_r2pcie = {
1283 	.name		= "r2pcie",
1284 	.num_counters   = 4,
1285 	.num_boxes	= 1,
1286 	.perf_ctr_bits	= 44,
1287 	.constraints	= snbep_uncore_r2pcie_constraints,
1288 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1289 };
1290 
1291 static struct intel_uncore_type snbep_uncore_r3qpi = {
1292 	.name		= "r3qpi",
1293 	.num_counters   = 3,
1294 	.num_boxes	= 2,
1295 	.perf_ctr_bits	= 44,
1296 	.constraints	= snbep_uncore_r3qpi_constraints,
1297 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1298 };
1299 
1300 enum {
1301 	SNBEP_PCI_UNCORE_HA,
1302 	SNBEP_PCI_UNCORE_IMC,
1303 	SNBEP_PCI_UNCORE_QPI,
1304 	SNBEP_PCI_UNCORE_R2PCIE,
1305 	SNBEP_PCI_UNCORE_R3QPI,
1306 };
1307 
1308 static struct intel_uncore_type *snbep_pci_uncores[] = {
1309 	[SNBEP_PCI_UNCORE_HA]		= &snbep_uncore_ha,
1310 	[SNBEP_PCI_UNCORE_IMC]		= &snbep_uncore_imc,
1311 	[SNBEP_PCI_UNCORE_QPI]		= &snbep_uncore_qpi,
1312 	[SNBEP_PCI_UNCORE_R2PCIE]	= &snbep_uncore_r2pcie,
1313 	[SNBEP_PCI_UNCORE_R3QPI]	= &snbep_uncore_r3qpi,
1314 	NULL,
1315 };
1316 
1317 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1318 	{ /* Home Agent */
1319 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1320 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1321 	},
1322 	{ /* MC Channel 0 */
1323 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1324 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1325 	},
1326 	{ /* MC Channel 1 */
1327 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1328 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1329 	},
1330 	{ /* MC Channel 2 */
1331 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1332 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1333 	},
1334 	{ /* MC Channel 3 */
1335 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1336 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1337 	},
1338 	{ /* QPI Port 0 */
1339 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1340 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1341 	},
1342 	{ /* QPI Port 1 */
1343 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1344 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1345 	},
1346 	{ /* R2PCIe */
1347 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1348 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1349 	},
1350 	{ /* R3QPI Link 0 */
1351 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1352 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1353 	},
1354 	{ /* R3QPI Link 1 */
1355 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1356 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1357 	},
1358 	{ /* QPI Port 0 filter  */
1359 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1360 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1361 						   SNBEP_PCI_QPI_PORT0_FILTER),
1362 	},
1363 	{ /* QPI Port 0 filter  */
1364 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1365 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1366 						   SNBEP_PCI_QPI_PORT1_FILTER),
1367 	},
1368 	{ /* end: all zeroes */ }
1369 };
1370 
1371 static struct pci_driver snbep_uncore_pci_driver = {
1372 	.name		= "snbep_uncore",
1373 	.id_table	= snbep_uncore_pci_ids,
1374 };
1375 
1376 #define NODE_ID_MASK	0x7
1377 
1378 /* Each three bits from 0 to 23 of GIDNIDMAP register correspond Node ID. */
1379 #define GIDNIDMAP(config, id)	(((config) >> (3 * (id))) & 0x7)
1380 
1381 static int upi_nodeid_groupid(struct pci_dev *ubox_dev, int nodeid_loc, int idmap_loc,
1382 			      int *nodeid, int *groupid)
1383 {
1384 	int ret;
1385 
1386 	/* get the Node ID of the local register */
1387 	ret = pci_read_config_dword(ubox_dev, nodeid_loc, nodeid);
1388 	if (ret)
1389 		goto err;
1390 
1391 	*nodeid = *nodeid & NODE_ID_MASK;
1392 	/* get the Node ID mapping */
1393 	ret = pci_read_config_dword(ubox_dev, idmap_loc, groupid);
1394 	if (ret)
1395 		goto err;
1396 err:
1397 	return ret;
1398 }
1399 
1400 static int topology_gidnid_map(int nodeid, u32 gidnid)
1401 {
1402 	int i, die_id = -1;
1403 
1404 	/*
1405 	 * every three bits in the Node ID mapping register maps
1406 	 * to a particular node.
1407 	 */
1408 	for (i = 0; i < 8; i++) {
1409 		if (nodeid == GIDNIDMAP(gidnid, i)) {
1410 			if (topology_max_dies_per_package() > 1)
1411 				die_id = i;
1412 			else
1413 				die_id = topology_phys_to_logical_pkg(i);
1414 			if (die_id < 0)
1415 				die_id = -ENODEV;
1416 			break;
1417 		}
1418 	}
1419 
1420 	return die_id;
1421 }
1422 
1423 /*
1424  * build pci bus to socket mapping
1425  */
1426 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1427 {
1428 	struct pci_dev *ubox_dev = NULL;
1429 	int i, bus, nodeid, segment, die_id;
1430 	struct pci2phy_map *map;
1431 	int err = 0;
1432 	u32 config = 0;
1433 
1434 	while (1) {
1435 		/* find the UBOX device */
1436 		ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1437 		if (!ubox_dev)
1438 			break;
1439 		bus = ubox_dev->bus->number;
1440 		/*
1441 		 * The nodeid and idmap registers only contain enough
1442 		 * information to handle 8 nodes.  On systems with more
1443 		 * than 8 nodes, we need to rely on NUMA information,
1444 		 * filled in from BIOS supplied information, to determine
1445 		 * the topology.
1446 		 */
1447 		if (nr_node_ids <= 8) {
1448 			err = upi_nodeid_groupid(ubox_dev, nodeid_loc, idmap_loc,
1449 						 &nodeid, &config);
1450 			if (err)
1451 				break;
1452 
1453 			segment = pci_domain_nr(ubox_dev->bus);
1454 			raw_spin_lock(&pci2phy_map_lock);
1455 			map = __find_pci2phy_map(segment);
1456 			if (!map) {
1457 				raw_spin_unlock(&pci2phy_map_lock);
1458 				err = -ENOMEM;
1459 				break;
1460 			}
1461 
1462 			map->pbus_to_dieid[bus] = topology_gidnid_map(nodeid, config);
1463 			raw_spin_unlock(&pci2phy_map_lock);
1464 		} else {
1465 			segment = pci_domain_nr(ubox_dev->bus);
1466 			raw_spin_lock(&pci2phy_map_lock);
1467 			map = __find_pci2phy_map(segment);
1468 			if (!map) {
1469 				raw_spin_unlock(&pci2phy_map_lock);
1470 				err = -ENOMEM;
1471 				break;
1472 			}
1473 
1474 			map->pbus_to_dieid[bus] = die_id = uncore_device_to_die(ubox_dev);
1475 
1476 			raw_spin_unlock(&pci2phy_map_lock);
1477 
1478 			if (WARN_ON_ONCE(die_id == -1)) {
1479 				err = -EINVAL;
1480 				break;
1481 			}
1482 		}
1483 	}
1484 
1485 	if (!err) {
1486 		/*
1487 		 * For PCI bus with no UBOX device, find the next bus
1488 		 * that has UBOX device and use its mapping.
1489 		 */
1490 		raw_spin_lock(&pci2phy_map_lock);
1491 		list_for_each_entry(map, &pci2phy_map_head, list) {
1492 			i = -1;
1493 			if (reverse) {
1494 				for (bus = 255; bus >= 0; bus--) {
1495 					if (map->pbus_to_dieid[bus] != -1)
1496 						i = map->pbus_to_dieid[bus];
1497 					else
1498 						map->pbus_to_dieid[bus] = i;
1499 				}
1500 			} else {
1501 				for (bus = 0; bus <= 255; bus++) {
1502 					if (map->pbus_to_dieid[bus] != -1)
1503 						i = map->pbus_to_dieid[bus];
1504 					else
1505 						map->pbus_to_dieid[bus] = i;
1506 				}
1507 			}
1508 		}
1509 		raw_spin_unlock(&pci2phy_map_lock);
1510 	}
1511 
1512 	pci_dev_put(ubox_dev);
1513 
1514 	return pcibios_err_to_errno(err);
1515 }
1516 
1517 int snbep_uncore_pci_init(void)
1518 {
1519 	int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1520 	if (ret)
1521 		return ret;
1522 	uncore_pci_uncores = snbep_pci_uncores;
1523 	uncore_pci_driver = &snbep_uncore_pci_driver;
1524 	return 0;
1525 }
1526 /* end of Sandy Bridge-EP uncore support */
1527 
1528 /* IvyTown uncore support */
1529 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1530 {
1531 	unsigned msr = uncore_msr_box_ctl(box);
1532 	if (msr)
1533 		wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1534 }
1535 
1536 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1537 {
1538 	struct pci_dev *pdev = box->pci_dev;
1539 
1540 	pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1541 }
1542 
1543 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
1544 	.init_box	= ivbep_uncore_msr_init_box,		\
1545 	.disable_box	= snbep_uncore_msr_disable_box,		\
1546 	.enable_box	= snbep_uncore_msr_enable_box,		\
1547 	.disable_event	= snbep_uncore_msr_disable_event,	\
1548 	.enable_event	= snbep_uncore_msr_enable_event,	\
1549 	.read_counter	= uncore_msr_read_counter
1550 
1551 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1552 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1553 };
1554 
1555 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1556 	.init_box	= ivbep_uncore_pci_init_box,
1557 	.disable_box	= snbep_uncore_pci_disable_box,
1558 	.enable_box	= snbep_uncore_pci_enable_box,
1559 	.disable_event	= snbep_uncore_pci_disable_event,
1560 	.enable_event	= snbep_uncore_pci_enable_event,
1561 	.read_counter	= snbep_uncore_pci_read_counter,
1562 };
1563 
1564 #define IVBEP_UNCORE_PCI_COMMON_INIT()				\
1565 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,			\
1566 	.event_ctl	= SNBEP_PCI_PMON_CTL0,			\
1567 	.event_mask	= IVBEP_PMON_RAW_EVENT_MASK,		\
1568 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,		\
1569 	.ops		= &ivbep_uncore_pci_ops,			\
1570 	.format_group	= &ivbep_uncore_format_group
1571 
1572 static struct attribute *ivbep_uncore_formats_attr[] = {
1573 	&format_attr_event.attr,
1574 	&format_attr_umask.attr,
1575 	&format_attr_edge.attr,
1576 	&format_attr_inv.attr,
1577 	&format_attr_thresh8.attr,
1578 	NULL,
1579 };
1580 
1581 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1582 	&format_attr_event.attr,
1583 	&format_attr_umask.attr,
1584 	&format_attr_edge.attr,
1585 	&format_attr_inv.attr,
1586 	&format_attr_thresh5.attr,
1587 	NULL,
1588 };
1589 
1590 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1591 	&format_attr_event.attr,
1592 	&format_attr_umask.attr,
1593 	&format_attr_edge.attr,
1594 	&format_attr_tid_en.attr,
1595 	&format_attr_thresh8.attr,
1596 	&format_attr_filter_tid.attr,
1597 	&format_attr_filter_link.attr,
1598 	&format_attr_filter_state2.attr,
1599 	&format_attr_filter_nid2.attr,
1600 	&format_attr_filter_opc2.attr,
1601 	&format_attr_filter_nc.attr,
1602 	&format_attr_filter_c6.attr,
1603 	&format_attr_filter_isoc.attr,
1604 	NULL,
1605 };
1606 
1607 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1608 	&format_attr_event.attr,
1609 	&format_attr_occ_sel.attr,
1610 	&format_attr_edge.attr,
1611 	&format_attr_thresh5.attr,
1612 	&format_attr_occ_invert.attr,
1613 	&format_attr_occ_edge.attr,
1614 	&format_attr_filter_band0.attr,
1615 	&format_attr_filter_band1.attr,
1616 	&format_attr_filter_band2.attr,
1617 	&format_attr_filter_band3.attr,
1618 	NULL,
1619 };
1620 
1621 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1622 	&format_attr_event_ext.attr,
1623 	&format_attr_umask.attr,
1624 	&format_attr_edge.attr,
1625 	&format_attr_thresh8.attr,
1626 	&format_attr_match_rds.attr,
1627 	&format_attr_match_rnid30.attr,
1628 	&format_attr_match_rnid4.attr,
1629 	&format_attr_match_dnid.attr,
1630 	&format_attr_match_mc.attr,
1631 	&format_attr_match_opc.attr,
1632 	&format_attr_match_vnw.attr,
1633 	&format_attr_match0.attr,
1634 	&format_attr_match1.attr,
1635 	&format_attr_mask_rds.attr,
1636 	&format_attr_mask_rnid30.attr,
1637 	&format_attr_mask_rnid4.attr,
1638 	&format_attr_mask_dnid.attr,
1639 	&format_attr_mask_mc.attr,
1640 	&format_attr_mask_opc.attr,
1641 	&format_attr_mask_vnw.attr,
1642 	&format_attr_mask0.attr,
1643 	&format_attr_mask1.attr,
1644 	NULL,
1645 };
1646 
1647 static const struct attribute_group ivbep_uncore_format_group = {
1648 	.name = "format",
1649 	.attrs = ivbep_uncore_formats_attr,
1650 };
1651 
1652 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1653 	.name = "format",
1654 	.attrs = ivbep_uncore_ubox_formats_attr,
1655 };
1656 
1657 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1658 	.name = "format",
1659 	.attrs = ivbep_uncore_cbox_formats_attr,
1660 };
1661 
1662 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1663 	.name = "format",
1664 	.attrs = ivbep_uncore_pcu_formats_attr,
1665 };
1666 
1667 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1668 	.name = "format",
1669 	.attrs = ivbep_uncore_qpi_formats_attr,
1670 };
1671 
1672 static struct intel_uncore_type ivbep_uncore_ubox = {
1673 	.name		= "ubox",
1674 	.num_counters   = 2,
1675 	.num_boxes	= 1,
1676 	.perf_ctr_bits	= 44,
1677 	.fixed_ctr_bits	= 48,
1678 	.perf_ctr	= SNBEP_U_MSR_PMON_CTR0,
1679 	.event_ctl	= SNBEP_U_MSR_PMON_CTL0,
1680 	.event_mask	= IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1681 	.fixed_ctr	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1682 	.fixed_ctl	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1683 	.ops		= &ivbep_uncore_msr_ops,
1684 	.format_group	= &ivbep_uncore_ubox_format_group,
1685 };
1686 
1687 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1688 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1689 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1690 	SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1691 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1692 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1693 	SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1694 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1695 	SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1696 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1697 	SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1698 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1699 	SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1700 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1701 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1702 	SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1703 	SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1704 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1705 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1706 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1707 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1708 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1709 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1710 	SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1711 	SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1712 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1713 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1714 	SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1715 	SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1716 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1717 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1718 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1719 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1720 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1721 	SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1722 	SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1723 	SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1724 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1725 	EVENT_EXTRA_END
1726 };
1727 
1728 static u64 ivbep_cbox_filter_mask(int fields)
1729 {
1730 	u64 mask = 0;
1731 
1732 	if (fields & 0x1)
1733 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1734 	if (fields & 0x2)
1735 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1736 	if (fields & 0x4)
1737 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1738 	if (fields & 0x8)
1739 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1740 	if (fields & 0x10) {
1741 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1742 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1743 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1744 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1745 	}
1746 
1747 	return mask;
1748 }
1749 
1750 static struct event_constraint *
1751 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1752 {
1753 	return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1754 }
1755 
1756 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1757 {
1758 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1759 	struct extra_reg *er;
1760 	int idx = 0;
1761 
1762 	for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1763 		if (er->event != (event->hw.config & er->config_mask))
1764 			continue;
1765 		idx |= er->idx;
1766 	}
1767 
1768 	if (idx) {
1769 		reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1770 			SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1771 		reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1772 		reg1->idx = idx;
1773 	}
1774 	return 0;
1775 }
1776 
1777 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1778 {
1779 	struct hw_perf_event *hwc = &event->hw;
1780 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1781 
1782 	if (reg1->idx != EXTRA_REG_NONE) {
1783 		u64 filter = uncore_shared_reg_config(box, 0);
1784 		wrmsrl(reg1->reg, filter & 0xffffffff);
1785 		wrmsrl(reg1->reg + 6, filter >> 32);
1786 	}
1787 
1788 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1789 }
1790 
1791 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1792 	.init_box		= ivbep_uncore_msr_init_box,
1793 	.disable_box		= snbep_uncore_msr_disable_box,
1794 	.enable_box		= snbep_uncore_msr_enable_box,
1795 	.disable_event		= snbep_uncore_msr_disable_event,
1796 	.enable_event		= ivbep_cbox_enable_event,
1797 	.read_counter		= uncore_msr_read_counter,
1798 	.hw_config		= ivbep_cbox_hw_config,
1799 	.get_constraint		= ivbep_cbox_get_constraint,
1800 	.put_constraint		= snbep_cbox_put_constraint,
1801 };
1802 
1803 static struct intel_uncore_type ivbep_uncore_cbox = {
1804 	.name			= "cbox",
1805 	.num_counters		= 4,
1806 	.num_boxes		= 15,
1807 	.perf_ctr_bits		= 44,
1808 	.event_ctl		= SNBEP_C0_MSR_PMON_CTL0,
1809 	.perf_ctr		= SNBEP_C0_MSR_PMON_CTR0,
1810 	.event_mask		= IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1811 	.box_ctl		= SNBEP_C0_MSR_PMON_BOX_CTL,
1812 	.msr_offset		= SNBEP_CBO_MSR_OFFSET,
1813 	.num_shared_regs	= 1,
1814 	.constraints		= snbep_uncore_cbox_constraints,
1815 	.ops			= &ivbep_uncore_cbox_ops,
1816 	.format_group		= &ivbep_uncore_cbox_format_group,
1817 };
1818 
1819 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1820 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1821 	.hw_config		= snbep_pcu_hw_config,
1822 	.get_constraint		= snbep_pcu_get_constraint,
1823 	.put_constraint		= snbep_pcu_put_constraint,
1824 };
1825 
1826 static struct intel_uncore_type ivbep_uncore_pcu = {
1827 	.name			= "pcu",
1828 	.num_counters		= 4,
1829 	.num_boxes		= 1,
1830 	.perf_ctr_bits		= 48,
1831 	.perf_ctr		= SNBEP_PCU_MSR_PMON_CTR0,
1832 	.event_ctl		= SNBEP_PCU_MSR_PMON_CTL0,
1833 	.event_mask		= IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1834 	.box_ctl		= SNBEP_PCU_MSR_PMON_BOX_CTL,
1835 	.num_shared_regs	= 1,
1836 	.ops			= &ivbep_uncore_pcu_ops,
1837 	.format_group		= &ivbep_uncore_pcu_format_group,
1838 };
1839 
1840 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1841 	&ivbep_uncore_ubox,
1842 	&ivbep_uncore_cbox,
1843 	&ivbep_uncore_pcu,
1844 	NULL,
1845 };
1846 
1847 void ivbep_uncore_cpu_init(void)
1848 {
1849 	if (ivbep_uncore_cbox.num_boxes > topology_num_cores_per_package())
1850 		ivbep_uncore_cbox.num_boxes = topology_num_cores_per_package();
1851 	uncore_msr_uncores = ivbep_msr_uncores;
1852 }
1853 
1854 static struct intel_uncore_type ivbep_uncore_ha = {
1855 	.name		= "ha",
1856 	.num_counters   = 4,
1857 	.num_boxes	= 2,
1858 	.perf_ctr_bits	= 48,
1859 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1860 };
1861 
1862 static struct intel_uncore_type ivbep_uncore_imc = {
1863 	.name		= "imc",
1864 	.num_counters   = 4,
1865 	.num_boxes	= 8,
1866 	.perf_ctr_bits	= 48,
1867 	.fixed_ctr_bits	= 48,
1868 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1869 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1870 	.event_descs	= snbep_uncore_imc_events,
1871 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1872 };
1873 
1874 /* registers in IRP boxes are not properly aligned */
1875 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1876 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1877 
1878 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1879 {
1880 	struct pci_dev *pdev = box->pci_dev;
1881 	struct hw_perf_event *hwc = &event->hw;
1882 
1883 	pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1884 			       hwc->config | SNBEP_PMON_CTL_EN);
1885 }
1886 
1887 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1888 {
1889 	struct pci_dev *pdev = box->pci_dev;
1890 	struct hw_perf_event *hwc = &event->hw;
1891 
1892 	pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1893 }
1894 
1895 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1896 {
1897 	struct pci_dev *pdev = box->pci_dev;
1898 	struct hw_perf_event *hwc = &event->hw;
1899 	u64 count = 0;
1900 
1901 	pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1902 	pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1903 
1904 	return count;
1905 }
1906 
1907 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1908 	.init_box	= ivbep_uncore_pci_init_box,
1909 	.disable_box	= snbep_uncore_pci_disable_box,
1910 	.enable_box	= snbep_uncore_pci_enable_box,
1911 	.disable_event	= ivbep_uncore_irp_disable_event,
1912 	.enable_event	= ivbep_uncore_irp_enable_event,
1913 	.read_counter	= ivbep_uncore_irp_read_counter,
1914 };
1915 
1916 static struct intel_uncore_type ivbep_uncore_irp = {
1917 	.name			= "irp",
1918 	.num_counters		= 4,
1919 	.num_boxes		= 1,
1920 	.perf_ctr_bits		= 48,
1921 	.event_mask		= IVBEP_PMON_RAW_EVENT_MASK,
1922 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1923 	.ops			= &ivbep_uncore_irp_ops,
1924 	.format_group		= &ivbep_uncore_format_group,
1925 };
1926 
1927 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1928 	.init_box	= ivbep_uncore_pci_init_box,
1929 	.disable_box	= snbep_uncore_pci_disable_box,
1930 	.enable_box	= snbep_uncore_pci_enable_box,
1931 	.disable_event	= snbep_uncore_pci_disable_event,
1932 	.enable_event	= snbep_qpi_enable_event,
1933 	.read_counter	= snbep_uncore_pci_read_counter,
1934 	.hw_config	= snbep_qpi_hw_config,
1935 	.get_constraint	= uncore_get_constraint,
1936 	.put_constraint	= uncore_put_constraint,
1937 };
1938 
1939 static struct intel_uncore_type ivbep_uncore_qpi = {
1940 	.name			= "qpi",
1941 	.num_counters		= 4,
1942 	.num_boxes		= 3,
1943 	.perf_ctr_bits		= 48,
1944 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
1945 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
1946 	.event_mask		= IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1947 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1948 	.num_shared_regs	= 1,
1949 	.ops			= &ivbep_uncore_qpi_ops,
1950 	.format_group		= &ivbep_uncore_qpi_format_group,
1951 };
1952 
1953 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1954 	.name		= "r2pcie",
1955 	.num_counters   = 4,
1956 	.num_boxes	= 1,
1957 	.perf_ctr_bits	= 44,
1958 	.constraints	= snbep_uncore_r2pcie_constraints,
1959 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1960 };
1961 
1962 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1963 	.name		= "r3qpi",
1964 	.num_counters   = 3,
1965 	.num_boxes	= 2,
1966 	.perf_ctr_bits	= 44,
1967 	.constraints	= snbep_uncore_r3qpi_constraints,
1968 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1969 };
1970 
1971 enum {
1972 	IVBEP_PCI_UNCORE_HA,
1973 	IVBEP_PCI_UNCORE_IMC,
1974 	IVBEP_PCI_UNCORE_IRP,
1975 	IVBEP_PCI_UNCORE_QPI,
1976 	IVBEP_PCI_UNCORE_R2PCIE,
1977 	IVBEP_PCI_UNCORE_R3QPI,
1978 };
1979 
1980 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1981 	[IVBEP_PCI_UNCORE_HA]	= &ivbep_uncore_ha,
1982 	[IVBEP_PCI_UNCORE_IMC]	= &ivbep_uncore_imc,
1983 	[IVBEP_PCI_UNCORE_IRP]	= &ivbep_uncore_irp,
1984 	[IVBEP_PCI_UNCORE_QPI]	= &ivbep_uncore_qpi,
1985 	[IVBEP_PCI_UNCORE_R2PCIE]	= &ivbep_uncore_r2pcie,
1986 	[IVBEP_PCI_UNCORE_R3QPI]	= &ivbep_uncore_r3qpi,
1987 	NULL,
1988 };
1989 
1990 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1991 	{ /* Home Agent 0 */
1992 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1993 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1994 	},
1995 	{ /* Home Agent 1 */
1996 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1997 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1998 	},
1999 	{ /* MC0 Channel 0 */
2000 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
2001 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
2002 	},
2003 	{ /* MC0 Channel 1 */
2004 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
2005 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
2006 	},
2007 	{ /* MC0 Channel 3 */
2008 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
2009 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
2010 	},
2011 	{ /* MC0 Channel 4 */
2012 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
2013 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
2014 	},
2015 	{ /* MC1 Channel 0 */
2016 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
2017 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
2018 	},
2019 	{ /* MC1 Channel 1 */
2020 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
2021 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
2022 	},
2023 	{ /* MC1 Channel 3 */
2024 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
2025 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
2026 	},
2027 	{ /* MC1 Channel 4 */
2028 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
2029 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
2030 	},
2031 	{ /* IRP */
2032 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
2033 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
2034 	},
2035 	{ /* QPI0 Port 0 */
2036 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
2037 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
2038 	},
2039 	{ /* QPI0 Port 1 */
2040 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
2041 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
2042 	},
2043 	{ /* QPI1 Port 2 */
2044 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
2045 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
2046 	},
2047 	{ /* R2PCIe */
2048 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
2049 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
2050 	},
2051 	{ /* R3QPI0 Link 0 */
2052 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
2053 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
2054 	},
2055 	{ /* R3QPI0 Link 1 */
2056 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
2057 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
2058 	},
2059 	{ /* R3QPI1 Link 2 */
2060 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
2061 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
2062 	},
2063 	{ /* QPI Port 0 filter  */
2064 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
2065 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2066 						   SNBEP_PCI_QPI_PORT0_FILTER),
2067 	},
2068 	{ /* QPI Port 0 filter  */
2069 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
2070 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2071 						   SNBEP_PCI_QPI_PORT1_FILTER),
2072 	},
2073 	{ /* end: all zeroes */ }
2074 };
2075 
2076 static struct pci_driver ivbep_uncore_pci_driver = {
2077 	.name		= "ivbep_uncore",
2078 	.id_table	= ivbep_uncore_pci_ids,
2079 };
2080 
2081 int ivbep_uncore_pci_init(void)
2082 {
2083 	int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2084 	if (ret)
2085 		return ret;
2086 	uncore_pci_uncores = ivbep_pci_uncores;
2087 	uncore_pci_driver = &ivbep_uncore_pci_driver;
2088 	return 0;
2089 }
2090 /* end of IvyTown uncore support */
2091 
2092 /* KNL uncore support */
2093 static struct attribute *knl_uncore_ubox_formats_attr[] = {
2094 	&format_attr_event.attr,
2095 	&format_attr_umask.attr,
2096 	&format_attr_edge.attr,
2097 	&format_attr_tid_en.attr,
2098 	&format_attr_inv.attr,
2099 	&format_attr_thresh5.attr,
2100 	NULL,
2101 };
2102 
2103 static const struct attribute_group knl_uncore_ubox_format_group = {
2104 	.name = "format",
2105 	.attrs = knl_uncore_ubox_formats_attr,
2106 };
2107 
2108 static struct intel_uncore_type knl_uncore_ubox = {
2109 	.name			= "ubox",
2110 	.num_counters		= 2,
2111 	.num_boxes		= 1,
2112 	.perf_ctr_bits		= 48,
2113 	.fixed_ctr_bits		= 48,
2114 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
2115 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
2116 	.event_mask		= KNL_U_MSR_PMON_RAW_EVENT_MASK,
2117 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2118 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2119 	.ops			= &snbep_uncore_msr_ops,
2120 	.format_group		= &knl_uncore_ubox_format_group,
2121 };
2122 
2123 static struct attribute *knl_uncore_cha_formats_attr[] = {
2124 	&format_attr_event.attr,
2125 	&format_attr_umask.attr,
2126 	&format_attr_qor.attr,
2127 	&format_attr_edge.attr,
2128 	&format_attr_tid_en.attr,
2129 	&format_attr_inv.attr,
2130 	&format_attr_thresh8.attr,
2131 	&format_attr_filter_tid4.attr,
2132 	&format_attr_filter_link3.attr,
2133 	&format_attr_filter_state4.attr,
2134 	&format_attr_filter_local.attr,
2135 	&format_attr_filter_all_op.attr,
2136 	&format_attr_filter_nnm.attr,
2137 	&format_attr_filter_opc3.attr,
2138 	&format_attr_filter_nc.attr,
2139 	&format_attr_filter_isoc.attr,
2140 	NULL,
2141 };
2142 
2143 static const struct attribute_group knl_uncore_cha_format_group = {
2144 	.name = "format",
2145 	.attrs = knl_uncore_cha_formats_attr,
2146 };
2147 
2148 static struct event_constraint knl_uncore_cha_constraints[] = {
2149 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2150 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2151 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2152 	EVENT_CONSTRAINT_END
2153 };
2154 
2155 static struct extra_reg knl_uncore_cha_extra_regs[] = {
2156 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2157 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2158 	SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2159 	SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2160 	SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2161 	EVENT_EXTRA_END
2162 };
2163 
2164 static u64 knl_cha_filter_mask(int fields)
2165 {
2166 	u64 mask = 0;
2167 
2168 	if (fields & 0x1)
2169 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2170 	if (fields & 0x2)
2171 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2172 	if (fields & 0x4)
2173 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2174 	return mask;
2175 }
2176 
2177 static struct event_constraint *
2178 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2179 {
2180 	return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2181 }
2182 
2183 static int knl_cha_hw_config(struct intel_uncore_box *box,
2184 			     struct perf_event *event)
2185 {
2186 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2187 	struct extra_reg *er;
2188 	int idx = 0;
2189 
2190 	for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2191 		if (er->event != (event->hw.config & er->config_mask))
2192 			continue;
2193 		idx |= er->idx;
2194 	}
2195 
2196 	if (idx) {
2197 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2198 			    KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2199 		reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2200 
2201 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2202 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2203 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2204 		reg1->idx = idx;
2205 	}
2206 	return 0;
2207 }
2208 
2209 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2210 				    struct perf_event *event);
2211 
2212 static struct intel_uncore_ops knl_uncore_cha_ops = {
2213 	.init_box		= snbep_uncore_msr_init_box,
2214 	.disable_box		= snbep_uncore_msr_disable_box,
2215 	.enable_box		= snbep_uncore_msr_enable_box,
2216 	.disable_event		= snbep_uncore_msr_disable_event,
2217 	.enable_event		= hswep_cbox_enable_event,
2218 	.read_counter		= uncore_msr_read_counter,
2219 	.hw_config		= knl_cha_hw_config,
2220 	.get_constraint		= knl_cha_get_constraint,
2221 	.put_constraint		= snbep_cbox_put_constraint,
2222 };
2223 
2224 static struct intel_uncore_type knl_uncore_cha = {
2225 	.name			= "cha",
2226 	.num_counters		= 4,
2227 	.num_boxes		= 38,
2228 	.perf_ctr_bits		= 48,
2229 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
2230 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
2231 	.event_mask		= KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2232 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
2233 	.msr_offset		= KNL_CHA_MSR_OFFSET,
2234 	.num_shared_regs	= 1,
2235 	.constraints		= knl_uncore_cha_constraints,
2236 	.ops			= &knl_uncore_cha_ops,
2237 	.format_group		= &knl_uncore_cha_format_group,
2238 };
2239 
2240 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2241 	&format_attr_event2.attr,
2242 	&format_attr_use_occ_ctr.attr,
2243 	&format_attr_occ_sel.attr,
2244 	&format_attr_edge.attr,
2245 	&format_attr_tid_en.attr,
2246 	&format_attr_inv.attr,
2247 	&format_attr_thresh6.attr,
2248 	&format_attr_occ_invert.attr,
2249 	&format_attr_occ_edge_det.attr,
2250 	NULL,
2251 };
2252 
2253 static const struct attribute_group knl_uncore_pcu_format_group = {
2254 	.name = "format",
2255 	.attrs = knl_uncore_pcu_formats_attr,
2256 };
2257 
2258 static struct intel_uncore_type knl_uncore_pcu = {
2259 	.name			= "pcu",
2260 	.num_counters		= 4,
2261 	.num_boxes		= 1,
2262 	.perf_ctr_bits		= 48,
2263 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
2264 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
2265 	.event_mask		= KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2266 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
2267 	.ops			= &snbep_uncore_msr_ops,
2268 	.format_group		= &knl_uncore_pcu_format_group,
2269 };
2270 
2271 static struct intel_uncore_type *knl_msr_uncores[] = {
2272 	&knl_uncore_ubox,
2273 	&knl_uncore_cha,
2274 	&knl_uncore_pcu,
2275 	NULL,
2276 };
2277 
2278 void knl_uncore_cpu_init(void)
2279 {
2280 	uncore_msr_uncores = knl_msr_uncores;
2281 }
2282 
2283 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2284 {
2285 	struct pci_dev *pdev = box->pci_dev;
2286 	int box_ctl = uncore_pci_box_ctl(box);
2287 
2288 	pci_write_config_dword(pdev, box_ctl, 0);
2289 }
2290 
2291 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2292 					struct perf_event *event)
2293 {
2294 	struct pci_dev *pdev = box->pci_dev;
2295 	struct hw_perf_event *hwc = &event->hw;
2296 
2297 	if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2298 							== UNCORE_FIXED_EVENT)
2299 		pci_write_config_dword(pdev, hwc->config_base,
2300 				       hwc->config | KNL_PMON_FIXED_CTL_EN);
2301 	else
2302 		pci_write_config_dword(pdev, hwc->config_base,
2303 				       hwc->config | SNBEP_PMON_CTL_EN);
2304 }
2305 
2306 static struct intel_uncore_ops knl_uncore_imc_ops = {
2307 	.init_box	= snbep_uncore_pci_init_box,
2308 	.disable_box	= snbep_uncore_pci_disable_box,
2309 	.enable_box	= knl_uncore_imc_enable_box,
2310 	.read_counter	= snbep_uncore_pci_read_counter,
2311 	.enable_event	= knl_uncore_imc_enable_event,
2312 	.disable_event	= snbep_uncore_pci_disable_event,
2313 };
2314 
2315 static struct intel_uncore_type knl_uncore_imc_uclk = {
2316 	.name			= "imc_uclk",
2317 	.num_counters		= 4,
2318 	.num_boxes		= 2,
2319 	.perf_ctr_bits		= 48,
2320 	.fixed_ctr_bits		= 48,
2321 	.perf_ctr		= KNL_UCLK_MSR_PMON_CTR0_LOW,
2322 	.event_ctl		= KNL_UCLK_MSR_PMON_CTL0,
2323 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2324 	.fixed_ctr		= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2325 	.fixed_ctl		= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2326 	.box_ctl		= KNL_UCLK_MSR_PMON_BOX_CTL,
2327 	.ops			= &knl_uncore_imc_ops,
2328 	.format_group		= &snbep_uncore_format_group,
2329 };
2330 
2331 static struct intel_uncore_type knl_uncore_imc_dclk = {
2332 	.name			= "imc",
2333 	.num_counters		= 4,
2334 	.num_boxes		= 6,
2335 	.perf_ctr_bits		= 48,
2336 	.fixed_ctr_bits		= 48,
2337 	.perf_ctr		= KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2338 	.event_ctl		= KNL_MC0_CH0_MSR_PMON_CTL0,
2339 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2340 	.fixed_ctr		= KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2341 	.fixed_ctl		= KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2342 	.box_ctl		= KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2343 	.ops			= &knl_uncore_imc_ops,
2344 	.format_group		= &snbep_uncore_format_group,
2345 };
2346 
2347 static struct intel_uncore_type knl_uncore_edc_uclk = {
2348 	.name			= "edc_uclk",
2349 	.num_counters		= 4,
2350 	.num_boxes		= 8,
2351 	.perf_ctr_bits		= 48,
2352 	.fixed_ctr_bits		= 48,
2353 	.perf_ctr		= KNL_UCLK_MSR_PMON_CTR0_LOW,
2354 	.event_ctl		= KNL_UCLK_MSR_PMON_CTL0,
2355 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2356 	.fixed_ctr		= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2357 	.fixed_ctl		= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2358 	.box_ctl		= KNL_UCLK_MSR_PMON_BOX_CTL,
2359 	.ops			= &knl_uncore_imc_ops,
2360 	.format_group		= &snbep_uncore_format_group,
2361 };
2362 
2363 static struct intel_uncore_type knl_uncore_edc_eclk = {
2364 	.name			= "edc_eclk",
2365 	.num_counters		= 4,
2366 	.num_boxes		= 8,
2367 	.perf_ctr_bits		= 48,
2368 	.fixed_ctr_bits		= 48,
2369 	.perf_ctr		= KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2370 	.event_ctl		= KNL_EDC0_ECLK_MSR_PMON_CTL0,
2371 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2372 	.fixed_ctr		= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2373 	.fixed_ctl		= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2374 	.box_ctl		= KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2375 	.ops			= &knl_uncore_imc_ops,
2376 	.format_group		= &snbep_uncore_format_group,
2377 };
2378 
2379 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2380 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2381 	EVENT_CONSTRAINT_END
2382 };
2383 
2384 static struct intel_uncore_type knl_uncore_m2pcie = {
2385 	.name		= "m2pcie",
2386 	.num_counters   = 4,
2387 	.num_boxes	= 1,
2388 	.perf_ctr_bits	= 48,
2389 	.constraints	= knl_uncore_m2pcie_constraints,
2390 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2391 };
2392 
2393 static struct attribute *knl_uncore_irp_formats_attr[] = {
2394 	&format_attr_event.attr,
2395 	&format_attr_umask.attr,
2396 	&format_attr_qor.attr,
2397 	&format_attr_edge.attr,
2398 	&format_attr_inv.attr,
2399 	&format_attr_thresh8.attr,
2400 	NULL,
2401 };
2402 
2403 static const struct attribute_group knl_uncore_irp_format_group = {
2404 	.name = "format",
2405 	.attrs = knl_uncore_irp_formats_attr,
2406 };
2407 
2408 static struct intel_uncore_type knl_uncore_irp = {
2409 	.name			= "irp",
2410 	.num_counters		= 2,
2411 	.num_boxes		= 1,
2412 	.perf_ctr_bits		= 48,
2413 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
2414 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
2415 	.event_mask		= KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2416 	.box_ctl		= KNL_IRP_PCI_PMON_BOX_CTL,
2417 	.ops			= &snbep_uncore_pci_ops,
2418 	.format_group		= &knl_uncore_irp_format_group,
2419 };
2420 
2421 enum {
2422 	KNL_PCI_UNCORE_MC_UCLK,
2423 	KNL_PCI_UNCORE_MC_DCLK,
2424 	KNL_PCI_UNCORE_EDC_UCLK,
2425 	KNL_PCI_UNCORE_EDC_ECLK,
2426 	KNL_PCI_UNCORE_M2PCIE,
2427 	KNL_PCI_UNCORE_IRP,
2428 };
2429 
2430 static struct intel_uncore_type *knl_pci_uncores[] = {
2431 	[KNL_PCI_UNCORE_MC_UCLK]	= &knl_uncore_imc_uclk,
2432 	[KNL_PCI_UNCORE_MC_DCLK]	= &knl_uncore_imc_dclk,
2433 	[KNL_PCI_UNCORE_EDC_UCLK]	= &knl_uncore_edc_uclk,
2434 	[KNL_PCI_UNCORE_EDC_ECLK]	= &knl_uncore_edc_eclk,
2435 	[KNL_PCI_UNCORE_M2PCIE]		= &knl_uncore_m2pcie,
2436 	[KNL_PCI_UNCORE_IRP]		= &knl_uncore_irp,
2437 	NULL,
2438 };
2439 
2440 /*
2441  * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2442  * device type. prior to KNL, each instance of a PMU device type had a unique
2443  * device ID.
2444  *
2445  *	PCI Device ID	Uncore PMU Devices
2446  *	----------------------------------
2447  *	0x7841		MC0 UClk, MC1 UClk
2448  *	0x7843		MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2449  *			MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2450  *	0x7833		EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2451  *			EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2452  *	0x7835		EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2453  *			EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2454  *	0x7817		M2PCIe
2455  *	0x7814		IRP
2456 */
2457 
2458 static const struct pci_device_id knl_uncore_pci_ids[] = {
2459 	{ /* MC0 UClk */
2460 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2461 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2462 	},
2463 	{ /* MC1 UClk */
2464 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2465 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2466 	},
2467 	{ /* MC0 DClk CH 0 */
2468 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2469 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2470 	},
2471 	{ /* MC0 DClk CH 1 */
2472 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2473 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2474 	},
2475 	{ /* MC0 DClk CH 2 */
2476 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2477 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2478 	},
2479 	{ /* MC1 DClk CH 0 */
2480 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2481 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2482 	},
2483 	{ /* MC1 DClk CH 1 */
2484 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2485 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2486 	},
2487 	{ /* MC1 DClk CH 2 */
2488 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2489 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2490 	},
2491 	{ /* EDC0 UClk */
2492 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2493 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2494 	},
2495 	{ /* EDC1 UClk */
2496 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2497 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2498 	},
2499 	{ /* EDC2 UClk */
2500 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2501 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2502 	},
2503 	{ /* EDC3 UClk */
2504 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2505 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2506 	},
2507 	{ /* EDC4 UClk */
2508 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2509 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2510 	},
2511 	{ /* EDC5 UClk */
2512 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2513 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2514 	},
2515 	{ /* EDC6 UClk */
2516 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2517 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2518 	},
2519 	{ /* EDC7 UClk */
2520 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2521 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2522 	},
2523 	{ /* EDC0 EClk */
2524 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2525 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2526 	},
2527 	{ /* EDC1 EClk */
2528 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2529 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2530 	},
2531 	{ /* EDC2 EClk */
2532 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2533 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2534 	},
2535 	{ /* EDC3 EClk */
2536 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2537 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2538 	},
2539 	{ /* EDC4 EClk */
2540 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2541 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2542 	},
2543 	{ /* EDC5 EClk */
2544 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2545 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2546 	},
2547 	{ /* EDC6 EClk */
2548 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2549 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2550 	},
2551 	{ /* EDC7 EClk */
2552 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2553 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2554 	},
2555 	{ /* M2PCIe */
2556 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2557 		.driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2558 	},
2559 	{ /* IRP */
2560 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2561 		.driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2562 	},
2563 	{ /* end: all zeroes */ }
2564 };
2565 
2566 static struct pci_driver knl_uncore_pci_driver = {
2567 	.name		= "knl_uncore",
2568 	.id_table	= knl_uncore_pci_ids,
2569 };
2570 
2571 int knl_uncore_pci_init(void)
2572 {
2573 	int ret;
2574 
2575 	/* All KNL PCI based PMON units are on the same PCI bus except IRP */
2576 	ret = snb_pci2phy_map_init(0x7814); /* IRP */
2577 	if (ret)
2578 		return ret;
2579 	ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2580 	if (ret)
2581 		return ret;
2582 	uncore_pci_uncores = knl_pci_uncores;
2583 	uncore_pci_driver = &knl_uncore_pci_driver;
2584 	return 0;
2585 }
2586 
2587 /* end of KNL uncore support */
2588 
2589 /* Haswell-EP uncore support */
2590 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2591 	&format_attr_event.attr,
2592 	&format_attr_umask.attr,
2593 	&format_attr_edge.attr,
2594 	&format_attr_inv.attr,
2595 	&format_attr_thresh5.attr,
2596 	&format_attr_filter_tid2.attr,
2597 	&format_attr_filter_cid.attr,
2598 	NULL,
2599 };
2600 
2601 static const struct attribute_group hswep_uncore_ubox_format_group = {
2602 	.name = "format",
2603 	.attrs = hswep_uncore_ubox_formats_attr,
2604 };
2605 
2606 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2607 {
2608 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2609 	reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2610 	reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2611 	reg1->idx = 0;
2612 	return 0;
2613 }
2614 
2615 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2616 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2617 	.hw_config		= hswep_ubox_hw_config,
2618 	.get_constraint		= uncore_get_constraint,
2619 	.put_constraint		= uncore_put_constraint,
2620 };
2621 
2622 static struct intel_uncore_type hswep_uncore_ubox = {
2623 	.name			= "ubox",
2624 	.num_counters		= 2,
2625 	.num_boxes		= 1,
2626 	.perf_ctr_bits		= 44,
2627 	.fixed_ctr_bits		= 48,
2628 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
2629 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
2630 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2631 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2632 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2633 	.num_shared_regs	= 1,
2634 	.ops			= &hswep_uncore_ubox_ops,
2635 	.format_group		= &hswep_uncore_ubox_format_group,
2636 };
2637 
2638 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2639 	&format_attr_event.attr,
2640 	&format_attr_umask.attr,
2641 	&format_attr_edge.attr,
2642 	&format_attr_tid_en.attr,
2643 	&format_attr_thresh8.attr,
2644 	&format_attr_filter_tid3.attr,
2645 	&format_attr_filter_link2.attr,
2646 	&format_attr_filter_state3.attr,
2647 	&format_attr_filter_nid2.attr,
2648 	&format_attr_filter_opc2.attr,
2649 	&format_attr_filter_nc.attr,
2650 	&format_attr_filter_c6.attr,
2651 	&format_attr_filter_isoc.attr,
2652 	NULL,
2653 };
2654 
2655 static const struct attribute_group hswep_uncore_cbox_format_group = {
2656 	.name = "format",
2657 	.attrs = hswep_uncore_cbox_formats_attr,
2658 };
2659 
2660 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2661 	UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2662 	UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2663 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2664 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2665 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2666 	UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2667 	UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2668 	EVENT_CONSTRAINT_END
2669 };
2670 
2671 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2672 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2673 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2674 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2675 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2676 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2677 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2678 	SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2679 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2680 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2681 	SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2682 	SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2683 	SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2684 	SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2685 	SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2686 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2687 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2688 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2689 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2690 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2691 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2692 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2693 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2694 	SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2695 	SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2696 	SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2697 	SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2698 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2699 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2700 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2701 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2702 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2703 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2704 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2705 	SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2706 	SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2707 	SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2708 	SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2709 	SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2710 	EVENT_EXTRA_END
2711 };
2712 
2713 static u64 hswep_cbox_filter_mask(int fields)
2714 {
2715 	u64 mask = 0;
2716 	if (fields & 0x1)
2717 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2718 	if (fields & 0x2)
2719 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2720 	if (fields & 0x4)
2721 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2722 	if (fields & 0x8)
2723 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2724 	if (fields & 0x10) {
2725 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2726 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2727 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2728 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2729 	}
2730 	return mask;
2731 }
2732 
2733 static struct event_constraint *
2734 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2735 {
2736 	return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2737 }
2738 
2739 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2740 {
2741 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2742 	struct extra_reg *er;
2743 	int idx = 0;
2744 
2745 	for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2746 		if (er->event != (event->hw.config & er->config_mask))
2747 			continue;
2748 		idx |= er->idx;
2749 	}
2750 
2751 	if (idx) {
2752 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2753 			    HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2754 		reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2755 		reg1->idx = idx;
2756 	}
2757 	return 0;
2758 }
2759 
2760 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2761 				  struct perf_event *event)
2762 {
2763 	struct hw_perf_event *hwc = &event->hw;
2764 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2765 
2766 	if (reg1->idx != EXTRA_REG_NONE) {
2767 		u64 filter = uncore_shared_reg_config(box, 0);
2768 		wrmsrl(reg1->reg, filter & 0xffffffff);
2769 		wrmsrl(reg1->reg + 1, filter >> 32);
2770 	}
2771 
2772 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2773 }
2774 
2775 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2776 	.init_box		= snbep_uncore_msr_init_box,
2777 	.disable_box		= snbep_uncore_msr_disable_box,
2778 	.enable_box		= snbep_uncore_msr_enable_box,
2779 	.disable_event		= snbep_uncore_msr_disable_event,
2780 	.enable_event		= hswep_cbox_enable_event,
2781 	.read_counter		= uncore_msr_read_counter,
2782 	.hw_config		= hswep_cbox_hw_config,
2783 	.get_constraint		= hswep_cbox_get_constraint,
2784 	.put_constraint		= snbep_cbox_put_constraint,
2785 };
2786 
2787 static struct intel_uncore_type hswep_uncore_cbox = {
2788 	.name			= "cbox",
2789 	.num_counters		= 4,
2790 	.num_boxes		= 18,
2791 	.perf_ctr_bits		= 48,
2792 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
2793 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
2794 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2795 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
2796 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
2797 	.num_shared_regs	= 1,
2798 	.constraints		= hswep_uncore_cbox_constraints,
2799 	.ops			= &hswep_uncore_cbox_ops,
2800 	.format_group		= &hswep_uncore_cbox_format_group,
2801 };
2802 
2803 /*
2804  * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2805  */
2806 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2807 {
2808 	unsigned msr = uncore_msr_box_ctl(box);
2809 
2810 	if (msr) {
2811 		u64 init = SNBEP_PMON_BOX_CTL_INT;
2812 		u64 flags = 0;
2813 		int i;
2814 
2815 		for_each_set_bit(i, (unsigned long *)&init, 64) {
2816 			flags |= (1ULL << i);
2817 			wrmsrl(msr, flags);
2818 		}
2819 	}
2820 }
2821 
2822 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2823 	__SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2824 	.init_box		= hswep_uncore_sbox_msr_init_box
2825 };
2826 
2827 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2828 	&format_attr_event.attr,
2829 	&format_attr_umask.attr,
2830 	&format_attr_edge.attr,
2831 	&format_attr_tid_en.attr,
2832 	&format_attr_inv.attr,
2833 	&format_attr_thresh8.attr,
2834 	NULL,
2835 };
2836 
2837 static const struct attribute_group hswep_uncore_sbox_format_group = {
2838 	.name = "format",
2839 	.attrs = hswep_uncore_sbox_formats_attr,
2840 };
2841 
2842 static struct intel_uncore_type hswep_uncore_sbox = {
2843 	.name			= "sbox",
2844 	.num_counters		= 4,
2845 	.num_boxes		= 4,
2846 	.perf_ctr_bits		= 44,
2847 	.event_ctl		= HSWEP_S0_MSR_PMON_CTL0,
2848 	.perf_ctr		= HSWEP_S0_MSR_PMON_CTR0,
2849 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2850 	.box_ctl		= HSWEP_S0_MSR_PMON_BOX_CTL,
2851 	.msr_offset		= HSWEP_SBOX_MSR_OFFSET,
2852 	.ops			= &hswep_uncore_sbox_msr_ops,
2853 	.format_group		= &hswep_uncore_sbox_format_group,
2854 };
2855 
2856 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2857 {
2858 	struct hw_perf_event *hwc = &event->hw;
2859 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2860 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2861 
2862 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
2863 		reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2864 		reg1->idx = ev_sel - 0xb;
2865 		reg1->config = event->attr.config1 & (0xff << reg1->idx);
2866 	}
2867 	return 0;
2868 }
2869 
2870 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2871 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2872 	.hw_config		= hswep_pcu_hw_config,
2873 	.get_constraint		= snbep_pcu_get_constraint,
2874 	.put_constraint		= snbep_pcu_put_constraint,
2875 };
2876 
2877 static struct intel_uncore_type hswep_uncore_pcu = {
2878 	.name			= "pcu",
2879 	.num_counters		= 4,
2880 	.num_boxes		= 1,
2881 	.perf_ctr_bits		= 48,
2882 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
2883 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
2884 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2885 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
2886 	.num_shared_regs	= 1,
2887 	.ops			= &hswep_uncore_pcu_ops,
2888 	.format_group		= &snbep_uncore_pcu_format_group,
2889 };
2890 
2891 static struct intel_uncore_type *hswep_msr_uncores[] = {
2892 	&hswep_uncore_ubox,
2893 	&hswep_uncore_cbox,
2894 	&hswep_uncore_sbox,
2895 	&hswep_uncore_pcu,
2896 	NULL,
2897 };
2898 
2899 #define HSWEP_PCU_DID			0x2fc0
2900 #define HSWEP_PCU_CAPID4_OFFET		0x94
2901 #define hswep_get_chop(_cap)		(((_cap) >> 6) & 0x3)
2902 
2903 static bool hswep_has_limit_sbox(unsigned int device)
2904 {
2905 	struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
2906 	u32 capid4;
2907 
2908 	if (!dev)
2909 		return false;
2910 
2911 	pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
2912 	pci_dev_put(dev);
2913 	if (!hswep_get_chop(capid4))
2914 		return true;
2915 
2916 	return false;
2917 }
2918 
2919 void hswep_uncore_cpu_init(void)
2920 {
2921 	if (hswep_uncore_cbox.num_boxes > topology_num_cores_per_package())
2922 		hswep_uncore_cbox.num_boxes = topology_num_cores_per_package();
2923 
2924 	/* Detect 6-8 core systems with only two SBOXes */
2925 	if (hswep_has_limit_sbox(HSWEP_PCU_DID))
2926 		hswep_uncore_sbox.num_boxes = 2;
2927 
2928 	uncore_msr_uncores = hswep_msr_uncores;
2929 }
2930 
2931 static struct intel_uncore_type hswep_uncore_ha = {
2932 	.name		= "ha",
2933 	.num_counters   = 4,
2934 	.num_boxes	= 2,
2935 	.perf_ctr_bits	= 48,
2936 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2937 };
2938 
2939 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2940 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2941 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2942 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2943 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2944 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2945 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2946 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2947 	{ /* end: all zeroes */ },
2948 };
2949 
2950 static struct intel_uncore_type hswep_uncore_imc = {
2951 	.name		= "imc",
2952 	.num_counters   = 4,
2953 	.num_boxes	= 8,
2954 	.perf_ctr_bits	= 48,
2955 	.fixed_ctr_bits	= 48,
2956 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2957 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2958 	.event_descs	= hswep_uncore_imc_events,
2959 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2960 };
2961 
2962 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2963 
2964 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2965 {
2966 	struct pci_dev *pdev = box->pci_dev;
2967 	struct hw_perf_event *hwc = &event->hw;
2968 	u64 count = 0;
2969 
2970 	pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2971 	pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2972 
2973 	return count;
2974 }
2975 
2976 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2977 	.init_box	= snbep_uncore_pci_init_box,
2978 	.disable_box	= snbep_uncore_pci_disable_box,
2979 	.enable_box	= snbep_uncore_pci_enable_box,
2980 	.disable_event	= ivbep_uncore_irp_disable_event,
2981 	.enable_event	= ivbep_uncore_irp_enable_event,
2982 	.read_counter	= hswep_uncore_irp_read_counter,
2983 };
2984 
2985 static struct intel_uncore_type hswep_uncore_irp = {
2986 	.name			= "irp",
2987 	.num_counters		= 4,
2988 	.num_boxes		= 1,
2989 	.perf_ctr_bits		= 48,
2990 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2991 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
2992 	.ops			= &hswep_uncore_irp_ops,
2993 	.format_group		= &snbep_uncore_format_group,
2994 };
2995 
2996 static struct intel_uncore_type hswep_uncore_qpi = {
2997 	.name			= "qpi",
2998 	.num_counters		= 4,
2999 	.num_boxes		= 3,
3000 	.perf_ctr_bits		= 48,
3001 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
3002 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
3003 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3004 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
3005 	.num_shared_regs	= 1,
3006 	.ops			= &snbep_uncore_qpi_ops,
3007 	.format_group		= &snbep_uncore_qpi_format_group,
3008 };
3009 
3010 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
3011 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3012 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3013 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3014 	UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3015 	UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
3016 	UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3017 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3018 	UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
3019 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3020 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3021 	UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
3022 	UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
3023 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3024 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3025 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3026 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3027 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3028 	UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
3029 	EVENT_CONSTRAINT_END
3030 };
3031 
3032 static struct intel_uncore_type hswep_uncore_r2pcie = {
3033 	.name		= "r2pcie",
3034 	.num_counters   = 4,
3035 	.num_boxes	= 1,
3036 	.perf_ctr_bits	= 48,
3037 	.constraints	= hswep_uncore_r2pcie_constraints,
3038 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3039 };
3040 
3041 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
3042 	UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
3043 	UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3044 	UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3045 	UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3046 	UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3047 	UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3048 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3049 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3050 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
3051 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3052 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3053 	UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3054 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3055 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3056 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3057 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3058 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3059 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3060 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3061 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3062 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3063 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3064 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3065 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3066 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3067 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
3068 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3069 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3070 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3071 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3072 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3073 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3074 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3075 	EVENT_CONSTRAINT_END
3076 };
3077 
3078 static struct intel_uncore_type hswep_uncore_r3qpi = {
3079 	.name		= "r3qpi",
3080 	.num_counters   = 3,
3081 	.num_boxes	= 3,
3082 	.perf_ctr_bits	= 44,
3083 	.constraints	= hswep_uncore_r3qpi_constraints,
3084 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3085 };
3086 
3087 enum {
3088 	HSWEP_PCI_UNCORE_HA,
3089 	HSWEP_PCI_UNCORE_IMC,
3090 	HSWEP_PCI_UNCORE_IRP,
3091 	HSWEP_PCI_UNCORE_QPI,
3092 	HSWEP_PCI_UNCORE_R2PCIE,
3093 	HSWEP_PCI_UNCORE_R3QPI,
3094 };
3095 
3096 static struct intel_uncore_type *hswep_pci_uncores[] = {
3097 	[HSWEP_PCI_UNCORE_HA]	= &hswep_uncore_ha,
3098 	[HSWEP_PCI_UNCORE_IMC]	= &hswep_uncore_imc,
3099 	[HSWEP_PCI_UNCORE_IRP]	= &hswep_uncore_irp,
3100 	[HSWEP_PCI_UNCORE_QPI]	= &hswep_uncore_qpi,
3101 	[HSWEP_PCI_UNCORE_R2PCIE]	= &hswep_uncore_r2pcie,
3102 	[HSWEP_PCI_UNCORE_R3QPI]	= &hswep_uncore_r3qpi,
3103 	NULL,
3104 };
3105 
3106 static const struct pci_device_id hswep_uncore_pci_ids[] = {
3107 	{ /* Home Agent 0 */
3108 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
3109 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
3110 	},
3111 	{ /* Home Agent 1 */
3112 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
3113 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
3114 	},
3115 	{ /* MC0 Channel 0 */
3116 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
3117 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
3118 	},
3119 	{ /* MC0 Channel 1 */
3120 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
3121 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
3122 	},
3123 	{ /* MC0 Channel 2 */
3124 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
3125 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
3126 	},
3127 	{ /* MC0 Channel 3 */
3128 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
3129 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
3130 	},
3131 	{ /* MC1 Channel 0 */
3132 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
3133 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
3134 	},
3135 	{ /* MC1 Channel 1 */
3136 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
3137 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
3138 	},
3139 	{ /* MC1 Channel 2 */
3140 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
3141 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
3142 	},
3143 	{ /* MC1 Channel 3 */
3144 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
3145 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
3146 	},
3147 	{ /* IRP */
3148 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
3149 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
3150 	},
3151 	{ /* QPI0 Port 0 */
3152 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3153 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3154 	},
3155 	{ /* QPI0 Port 1 */
3156 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3157 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3158 	},
3159 	{ /* QPI1 Port 2 */
3160 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3161 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3162 	},
3163 	{ /* R2PCIe */
3164 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3165 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3166 	},
3167 	{ /* R3QPI0 Link 0 */
3168 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3169 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3170 	},
3171 	{ /* R3QPI0 Link 1 */
3172 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3173 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3174 	},
3175 	{ /* R3QPI1 Link 2 */
3176 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3177 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3178 	},
3179 	{ /* QPI Port 0 filter  */
3180 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3181 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3182 						   SNBEP_PCI_QPI_PORT0_FILTER),
3183 	},
3184 	{ /* QPI Port 1 filter  */
3185 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3186 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3187 						   SNBEP_PCI_QPI_PORT1_FILTER),
3188 	},
3189 	{ /* end: all zeroes */ }
3190 };
3191 
3192 static struct pci_driver hswep_uncore_pci_driver = {
3193 	.name		= "hswep_uncore",
3194 	.id_table	= hswep_uncore_pci_ids,
3195 };
3196 
3197 int hswep_uncore_pci_init(void)
3198 {
3199 	int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3200 	if (ret)
3201 		return ret;
3202 	uncore_pci_uncores = hswep_pci_uncores;
3203 	uncore_pci_driver = &hswep_uncore_pci_driver;
3204 	return 0;
3205 }
3206 /* end of Haswell-EP uncore support */
3207 
3208 /* BDX uncore support */
3209 
3210 static struct intel_uncore_type bdx_uncore_ubox = {
3211 	.name			= "ubox",
3212 	.num_counters		= 2,
3213 	.num_boxes		= 1,
3214 	.perf_ctr_bits		= 48,
3215 	.fixed_ctr_bits		= 48,
3216 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
3217 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
3218 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3219 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3220 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3221 	.num_shared_regs	= 1,
3222 	.ops			= &ivbep_uncore_msr_ops,
3223 	.format_group		= &ivbep_uncore_ubox_format_group,
3224 };
3225 
3226 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3227 	UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3228 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3229 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3230 	UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3231 	EVENT_CONSTRAINT_END
3232 };
3233 
3234 static struct intel_uncore_type bdx_uncore_cbox = {
3235 	.name			= "cbox",
3236 	.num_counters		= 4,
3237 	.num_boxes		= 24,
3238 	.perf_ctr_bits		= 48,
3239 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
3240 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
3241 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3242 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
3243 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
3244 	.num_shared_regs	= 1,
3245 	.constraints		= bdx_uncore_cbox_constraints,
3246 	.ops			= &hswep_uncore_cbox_ops,
3247 	.format_group		= &hswep_uncore_cbox_format_group,
3248 };
3249 
3250 static struct intel_uncore_type bdx_uncore_sbox = {
3251 	.name			= "sbox",
3252 	.num_counters		= 4,
3253 	.num_boxes		= 4,
3254 	.perf_ctr_bits		= 48,
3255 	.event_ctl		= HSWEP_S0_MSR_PMON_CTL0,
3256 	.perf_ctr		= HSWEP_S0_MSR_PMON_CTR0,
3257 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3258 	.box_ctl		= HSWEP_S0_MSR_PMON_BOX_CTL,
3259 	.msr_offset		= HSWEP_SBOX_MSR_OFFSET,
3260 	.ops			= &hswep_uncore_sbox_msr_ops,
3261 	.format_group		= &hswep_uncore_sbox_format_group,
3262 };
3263 
3264 #define BDX_MSR_UNCORE_SBOX	3
3265 
3266 static struct intel_uncore_type *bdx_msr_uncores[] = {
3267 	&bdx_uncore_ubox,
3268 	&bdx_uncore_cbox,
3269 	&hswep_uncore_pcu,
3270 	&bdx_uncore_sbox,
3271 	NULL,
3272 };
3273 
3274 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3275 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3276 	EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3277 	EVENT_CONSTRAINT_END
3278 };
3279 
3280 #define BDX_PCU_DID			0x6fc0
3281 
3282 void bdx_uncore_cpu_init(void)
3283 {
3284 	if (bdx_uncore_cbox.num_boxes > topology_num_cores_per_package())
3285 		bdx_uncore_cbox.num_boxes = topology_num_cores_per_package();
3286 	uncore_msr_uncores = bdx_msr_uncores;
3287 
3288 	/* Detect systems with no SBOXes */
3289 	if (boot_cpu_data.x86_vfm == INTEL_BROADWELL_D || hswep_has_limit_sbox(BDX_PCU_DID))
3290 		uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3291 
3292 	hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3293 }
3294 
3295 static struct intel_uncore_type bdx_uncore_ha = {
3296 	.name		= "ha",
3297 	.num_counters   = 4,
3298 	.num_boxes	= 2,
3299 	.perf_ctr_bits	= 48,
3300 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3301 };
3302 
3303 static struct intel_uncore_type bdx_uncore_imc = {
3304 	.name		= "imc",
3305 	.num_counters   = 4,
3306 	.num_boxes	= 8,
3307 	.perf_ctr_bits	= 48,
3308 	.fixed_ctr_bits	= 48,
3309 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3310 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3311 	.event_descs	= hswep_uncore_imc_events,
3312 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3313 };
3314 
3315 static struct intel_uncore_type bdx_uncore_irp = {
3316 	.name			= "irp",
3317 	.num_counters		= 4,
3318 	.num_boxes		= 1,
3319 	.perf_ctr_bits		= 48,
3320 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
3321 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
3322 	.ops			= &hswep_uncore_irp_ops,
3323 	.format_group		= &snbep_uncore_format_group,
3324 };
3325 
3326 static struct intel_uncore_type bdx_uncore_qpi = {
3327 	.name			= "qpi",
3328 	.num_counters		= 4,
3329 	.num_boxes		= 3,
3330 	.perf_ctr_bits		= 48,
3331 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
3332 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
3333 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3334 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
3335 	.num_shared_regs	= 1,
3336 	.ops			= &snbep_uncore_qpi_ops,
3337 	.format_group		= &snbep_uncore_qpi_format_group,
3338 };
3339 
3340 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3341 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3342 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3343 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3344 	UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3345 	UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3346 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3347 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3348 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3349 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3350 	EVENT_CONSTRAINT_END
3351 };
3352 
3353 static struct intel_uncore_type bdx_uncore_r2pcie = {
3354 	.name		= "r2pcie",
3355 	.num_counters   = 4,
3356 	.num_boxes	= 1,
3357 	.perf_ctr_bits	= 48,
3358 	.constraints	= bdx_uncore_r2pcie_constraints,
3359 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3360 };
3361 
3362 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3363 	UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3364 	UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3365 	UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3366 	UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3367 	UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3368 	UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3369 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3370 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3371 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3372 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3373 	UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3374 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3375 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3376 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3377 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3378 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3379 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3380 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3381 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3382 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3383 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3384 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3385 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3386 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3387 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3388 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3389 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3390 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3391 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3392 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3393 	EVENT_CONSTRAINT_END
3394 };
3395 
3396 static struct intel_uncore_type bdx_uncore_r3qpi = {
3397 	.name		= "r3qpi",
3398 	.num_counters   = 3,
3399 	.num_boxes	= 3,
3400 	.perf_ctr_bits	= 48,
3401 	.constraints	= bdx_uncore_r3qpi_constraints,
3402 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3403 };
3404 
3405 enum {
3406 	BDX_PCI_UNCORE_HA,
3407 	BDX_PCI_UNCORE_IMC,
3408 	BDX_PCI_UNCORE_IRP,
3409 	BDX_PCI_UNCORE_QPI,
3410 	BDX_PCI_UNCORE_R2PCIE,
3411 	BDX_PCI_UNCORE_R3QPI,
3412 };
3413 
3414 static struct intel_uncore_type *bdx_pci_uncores[] = {
3415 	[BDX_PCI_UNCORE_HA]	= &bdx_uncore_ha,
3416 	[BDX_PCI_UNCORE_IMC]	= &bdx_uncore_imc,
3417 	[BDX_PCI_UNCORE_IRP]	= &bdx_uncore_irp,
3418 	[BDX_PCI_UNCORE_QPI]	= &bdx_uncore_qpi,
3419 	[BDX_PCI_UNCORE_R2PCIE]	= &bdx_uncore_r2pcie,
3420 	[BDX_PCI_UNCORE_R3QPI]	= &bdx_uncore_r3qpi,
3421 	NULL,
3422 };
3423 
3424 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3425 	{ /* Home Agent 0 */
3426 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3427 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3428 	},
3429 	{ /* Home Agent 1 */
3430 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3431 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3432 	},
3433 	{ /* MC0 Channel 0 */
3434 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3435 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3436 	},
3437 	{ /* MC0 Channel 1 */
3438 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3439 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3440 	},
3441 	{ /* MC0 Channel 2 */
3442 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3443 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3444 	},
3445 	{ /* MC0 Channel 3 */
3446 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3447 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3448 	},
3449 	{ /* MC1 Channel 0 */
3450 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3451 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3452 	},
3453 	{ /* MC1 Channel 1 */
3454 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3455 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3456 	},
3457 	{ /* MC1 Channel 2 */
3458 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3459 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3460 	},
3461 	{ /* MC1 Channel 3 */
3462 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3463 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3464 	},
3465 	{ /* IRP */
3466 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3467 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3468 	},
3469 	{ /* QPI0 Port 0 */
3470 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3471 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3472 	},
3473 	{ /* QPI0 Port 1 */
3474 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3475 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3476 	},
3477 	{ /* QPI1 Port 2 */
3478 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3479 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3480 	},
3481 	{ /* R2PCIe */
3482 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3483 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3484 	},
3485 	{ /* R3QPI0 Link 0 */
3486 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3487 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3488 	},
3489 	{ /* R3QPI0 Link 1 */
3490 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3491 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3492 	},
3493 	{ /* R3QPI1 Link 2 */
3494 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3495 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3496 	},
3497 	{ /* QPI Port 0 filter  */
3498 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3499 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3500 						   SNBEP_PCI_QPI_PORT0_FILTER),
3501 	},
3502 	{ /* QPI Port 1 filter  */
3503 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3504 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3505 						   SNBEP_PCI_QPI_PORT1_FILTER),
3506 	},
3507 	{ /* QPI Port 2 filter  */
3508 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3509 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3510 						   BDX_PCI_QPI_PORT2_FILTER),
3511 	},
3512 	{ /* end: all zeroes */ }
3513 };
3514 
3515 static struct pci_driver bdx_uncore_pci_driver = {
3516 	.name		= "bdx_uncore",
3517 	.id_table	= bdx_uncore_pci_ids,
3518 };
3519 
3520 int bdx_uncore_pci_init(void)
3521 {
3522 	int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3523 
3524 	if (ret)
3525 		return ret;
3526 	uncore_pci_uncores = bdx_pci_uncores;
3527 	uncore_pci_driver = &bdx_uncore_pci_driver;
3528 	return 0;
3529 }
3530 
3531 /* end of BDX uncore support */
3532 
3533 /* SKX uncore support */
3534 
3535 static struct intel_uncore_type skx_uncore_ubox = {
3536 	.name			= "ubox",
3537 	.num_counters		= 2,
3538 	.num_boxes		= 1,
3539 	.perf_ctr_bits		= 48,
3540 	.fixed_ctr_bits		= 48,
3541 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
3542 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
3543 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3544 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3545 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3546 	.ops			= &ivbep_uncore_msr_ops,
3547 	.format_group		= &ivbep_uncore_ubox_format_group,
3548 };
3549 
3550 static struct attribute *skx_uncore_cha_formats_attr[] = {
3551 	&format_attr_event.attr,
3552 	&format_attr_umask.attr,
3553 	&format_attr_edge.attr,
3554 	&format_attr_tid_en.attr,
3555 	&format_attr_inv.attr,
3556 	&format_attr_thresh8.attr,
3557 	&format_attr_filter_tid4.attr,
3558 	&format_attr_filter_state5.attr,
3559 	&format_attr_filter_rem.attr,
3560 	&format_attr_filter_loc.attr,
3561 	&format_attr_filter_nm.attr,
3562 	&format_attr_filter_all_op.attr,
3563 	&format_attr_filter_not_nm.attr,
3564 	&format_attr_filter_opc_0.attr,
3565 	&format_attr_filter_opc_1.attr,
3566 	&format_attr_filter_nc.attr,
3567 	&format_attr_filter_isoc.attr,
3568 	NULL,
3569 };
3570 
3571 static const struct attribute_group skx_uncore_chabox_format_group = {
3572 	.name = "format",
3573 	.attrs = skx_uncore_cha_formats_attr,
3574 };
3575 
3576 static struct event_constraint skx_uncore_chabox_constraints[] = {
3577 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3578 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3579 	EVENT_CONSTRAINT_END
3580 };
3581 
3582 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3583 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3584 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3585 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3586 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3587 	SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3588 	SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3589 	SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3590 	SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3591 	SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3592 	EVENT_EXTRA_END
3593 };
3594 
3595 static u64 skx_cha_filter_mask(int fields)
3596 {
3597 	u64 mask = 0;
3598 
3599 	if (fields & 0x1)
3600 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3601 	if (fields & 0x2)
3602 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3603 	if (fields & 0x4)
3604 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3605 	if (fields & 0x8) {
3606 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3607 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3608 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3609 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3610 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3611 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3612 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3613 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3614 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3615 	}
3616 	return mask;
3617 }
3618 
3619 static struct event_constraint *
3620 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3621 {
3622 	return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3623 }
3624 
3625 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3626 {
3627 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3628 	struct extra_reg *er;
3629 	int idx = 0;
3630 	/* Any of the CHA events may be filtered by Thread/Core-ID.*/
3631 	if (event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN)
3632 		idx = SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3633 
3634 	for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3635 		if (er->event != (event->hw.config & er->config_mask))
3636 			continue;
3637 		idx |= er->idx;
3638 	}
3639 
3640 	if (idx) {
3641 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3642 			    HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3643 		reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3644 		reg1->idx = idx;
3645 	}
3646 	return 0;
3647 }
3648 
3649 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3650 	/* There is no frz_en for chabox ctl */
3651 	.init_box		= ivbep_uncore_msr_init_box,
3652 	.disable_box		= snbep_uncore_msr_disable_box,
3653 	.enable_box		= snbep_uncore_msr_enable_box,
3654 	.disable_event		= snbep_uncore_msr_disable_event,
3655 	.enable_event		= hswep_cbox_enable_event,
3656 	.read_counter		= uncore_msr_read_counter,
3657 	.hw_config		= skx_cha_hw_config,
3658 	.get_constraint		= skx_cha_get_constraint,
3659 	.put_constraint		= snbep_cbox_put_constraint,
3660 };
3661 
3662 static struct intel_uncore_type skx_uncore_chabox = {
3663 	.name			= "cha",
3664 	.num_counters		= 4,
3665 	.perf_ctr_bits		= 48,
3666 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
3667 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
3668 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3669 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
3670 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
3671 	.num_shared_regs	= 1,
3672 	.constraints		= skx_uncore_chabox_constraints,
3673 	.ops			= &skx_uncore_chabox_ops,
3674 	.format_group		= &skx_uncore_chabox_format_group,
3675 };
3676 
3677 static struct attribute *skx_uncore_iio_formats_attr[] = {
3678 	&format_attr_event.attr,
3679 	&format_attr_umask.attr,
3680 	&format_attr_edge.attr,
3681 	&format_attr_inv.attr,
3682 	&format_attr_thresh9.attr,
3683 	&format_attr_ch_mask.attr,
3684 	&format_attr_fc_mask.attr,
3685 	NULL,
3686 };
3687 
3688 static const struct attribute_group skx_uncore_iio_format_group = {
3689 	.name = "format",
3690 	.attrs = skx_uncore_iio_formats_attr,
3691 };
3692 
3693 static struct event_constraint skx_uncore_iio_constraints[] = {
3694 	UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3695 	UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3696 	UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3697 	UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3698 	UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3699 	UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3700 	UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
3701 	EVENT_CONSTRAINT_END
3702 };
3703 
3704 static void skx_iio_enable_event(struct intel_uncore_box *box,
3705 				 struct perf_event *event)
3706 {
3707 	struct hw_perf_event *hwc = &event->hw;
3708 
3709 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3710 }
3711 
3712 static struct intel_uncore_ops skx_uncore_iio_ops = {
3713 	.init_box		= ivbep_uncore_msr_init_box,
3714 	.disable_box		= snbep_uncore_msr_disable_box,
3715 	.enable_box		= snbep_uncore_msr_enable_box,
3716 	.disable_event		= snbep_uncore_msr_disable_event,
3717 	.enable_event		= skx_iio_enable_event,
3718 	.read_counter		= uncore_msr_read_counter,
3719 };
3720 
3721 static struct intel_uncore_topology *pmu_topology(struct intel_uncore_pmu *pmu, int die)
3722 {
3723 	int idx;
3724 
3725 	for (idx = 0; idx < pmu->type->num_boxes; idx++) {
3726 		if (pmu->type->topology[die][idx].pmu_idx == pmu->pmu_idx)
3727 			return &pmu->type->topology[die][idx];
3728 	}
3729 
3730 	return NULL;
3731 }
3732 
3733 static umode_t
3734 pmu_iio_mapping_visible(struct kobject *kobj, struct attribute *attr,
3735 			 int die, int zero_bus_pmu)
3736 {
3737 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
3738 	struct intel_uncore_topology *pmut = pmu_topology(pmu, die);
3739 
3740 	return (pmut && !pmut->iio->pci_bus_no && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode;
3741 }
3742 
3743 static umode_t
3744 skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
3745 {
3746 	/* Root bus 0x00 is valid only for pmu_idx = 0. */
3747 	return pmu_iio_mapping_visible(kobj, attr, die, 0);
3748 }
3749 
3750 static ssize_t skx_iio_mapping_show(struct device *dev,
3751 				    struct device_attribute *attr, char *buf)
3752 {
3753 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
3754 	struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
3755 	long die = (long)ea->var;
3756 	struct intel_uncore_topology *pmut = pmu_topology(pmu, die);
3757 
3758 	return sprintf(buf, "%04x:%02x\n", pmut ? pmut->iio->segment : 0,
3759 					   pmut ? pmut->iio->pci_bus_no : 0);
3760 }
3761 
3762 static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
3763 {
3764 	u64 msr_value;
3765 
3766 	if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
3767 			!(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
3768 		return -ENXIO;
3769 
3770 	*topology = msr_value;
3771 
3772 	return 0;
3773 }
3774 
3775 static int die_to_cpu(int die)
3776 {
3777 	int res = 0, cpu, current_die;
3778 	/*
3779 	 * Using cpus_read_lock() to ensure cpu is not going down between
3780 	 * looking at cpu_online_mask.
3781 	 */
3782 	cpus_read_lock();
3783 	for_each_online_cpu(cpu) {
3784 		current_die = topology_logical_die_id(cpu);
3785 		if (current_die == die) {
3786 			res = cpu;
3787 			break;
3788 		}
3789 	}
3790 	cpus_read_unlock();
3791 	return res;
3792 }
3793 
3794 enum {
3795 	IIO_TOPOLOGY_TYPE,
3796 	UPI_TOPOLOGY_TYPE,
3797 	TOPOLOGY_MAX
3798 };
3799 
3800 static const size_t topology_size[TOPOLOGY_MAX] = {
3801 	sizeof(*((struct intel_uncore_topology *)NULL)->iio),
3802 	sizeof(*((struct intel_uncore_topology *)NULL)->upi)
3803 };
3804 
3805 static int pmu_alloc_topology(struct intel_uncore_type *type, int topology_type)
3806 {
3807 	int die, idx;
3808 	struct intel_uncore_topology **topology;
3809 
3810 	if (!type->num_boxes)
3811 		return -EPERM;
3812 
3813 	topology = kcalloc(uncore_max_dies(), sizeof(*topology), GFP_KERNEL);
3814 	if (!topology)
3815 		goto err;
3816 
3817 	for (die = 0; die < uncore_max_dies(); die++) {
3818 		topology[die] = kcalloc(type->num_boxes, sizeof(**topology), GFP_KERNEL);
3819 		if (!topology[die])
3820 			goto clear;
3821 		for (idx = 0; idx < type->num_boxes; idx++) {
3822 			topology[die][idx].untyped = kcalloc(type->num_boxes,
3823 							     topology_size[topology_type],
3824 							     GFP_KERNEL);
3825 			if (!topology[die][idx].untyped)
3826 				goto clear;
3827 		}
3828 	}
3829 
3830 	type->topology = topology;
3831 
3832 	return 0;
3833 clear:
3834 	for (; die >= 0; die--) {
3835 		for (idx = 0; idx < type->num_boxes; idx++)
3836 			kfree(topology[die][idx].untyped);
3837 		kfree(topology[die]);
3838 	}
3839 	kfree(topology);
3840 err:
3841 	return -ENOMEM;
3842 }
3843 
3844 static void pmu_free_topology(struct intel_uncore_type *type)
3845 {
3846 	int die, idx;
3847 
3848 	if (type->topology) {
3849 		for (die = 0; die < uncore_max_dies(); die++) {
3850 			for (idx = 0; idx < type->num_boxes; idx++)
3851 				kfree(type->topology[die][idx].untyped);
3852 			kfree(type->topology[die]);
3853 		}
3854 		kfree(type->topology);
3855 		type->topology = NULL;
3856 	}
3857 }
3858 
3859 static int skx_pmu_get_topology(struct intel_uncore_type *type,
3860 				 int (*topology_cb)(struct intel_uncore_type*, int, int, u64))
3861 {
3862 	int die, ret = -EPERM;
3863 	u64 cpu_bus_msr;
3864 
3865 	for (die = 0; die < uncore_max_dies(); die++) {
3866 		ret = skx_msr_cpu_bus_read(die_to_cpu(die), &cpu_bus_msr);
3867 		if (ret)
3868 			break;
3869 
3870 		ret = uncore_die_to_segment(die);
3871 		if (ret < 0)
3872 			break;
3873 
3874 		ret = topology_cb(type, ret, die, cpu_bus_msr);
3875 		if (ret)
3876 			break;
3877 	}
3878 
3879 	return ret;
3880 }
3881 
3882 static int skx_iio_topology_cb(struct intel_uncore_type *type, int segment,
3883 				int die, u64 cpu_bus_msr)
3884 {
3885 	int idx;
3886 	struct intel_uncore_topology *t;
3887 
3888 	for (idx = 0; idx < type->num_boxes; idx++) {
3889 		t = &type->topology[die][idx];
3890 		t->pmu_idx = idx;
3891 		t->iio->segment = segment;
3892 		t->iio->pci_bus_no = (cpu_bus_msr >> (idx * BUS_NUM_STRIDE)) & 0xff;
3893 	}
3894 
3895 	return 0;
3896 }
3897 
3898 static int skx_iio_get_topology(struct intel_uncore_type *type)
3899 {
3900 	return skx_pmu_get_topology(type, skx_iio_topology_cb);
3901 }
3902 
3903 static struct attribute_group skx_iio_mapping_group = {
3904 	.is_visible	= skx_iio_mapping_visible,
3905 };
3906 
3907 static const struct attribute_group *skx_iio_attr_update[] = {
3908 	&skx_iio_mapping_group,
3909 	NULL,
3910 };
3911 
3912 static void pmu_clear_mapping_attr(const struct attribute_group **groups,
3913 				   struct attribute_group *ag)
3914 {
3915 	int i;
3916 
3917 	for (i = 0; groups[i]; i++) {
3918 		if (groups[i] == ag) {
3919 			for (i++; groups[i]; i++)
3920 				groups[i - 1] = groups[i];
3921 			groups[i - 1] = NULL;
3922 			break;
3923 		}
3924 	}
3925 }
3926 
3927 static void
3928 pmu_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag,
3929 		ssize_t (*show)(struct device*, struct device_attribute*, char*),
3930 		int topology_type)
3931 {
3932 	char buf[64];
3933 	int ret;
3934 	long die = -1;
3935 	struct attribute **attrs = NULL;
3936 	struct dev_ext_attribute *eas = NULL;
3937 
3938 	ret = pmu_alloc_topology(type, topology_type);
3939 	if (ret < 0)
3940 		goto clear_attr_update;
3941 
3942 	ret = type->get_topology(type);
3943 	if (ret < 0)
3944 		goto clear_topology;
3945 
3946 	/* One more for NULL. */
3947 	attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
3948 	if (!attrs)
3949 		goto clear_topology;
3950 
3951 	eas = kcalloc(uncore_max_dies(), sizeof(*eas), GFP_KERNEL);
3952 	if (!eas)
3953 		goto clear_attrs;
3954 
3955 	for (die = 0; die < uncore_max_dies(); die++) {
3956 		snprintf(buf, sizeof(buf), "die%ld", die);
3957 		sysfs_attr_init(&eas[die].attr.attr);
3958 		eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
3959 		if (!eas[die].attr.attr.name)
3960 			goto err;
3961 		eas[die].attr.attr.mode = 0444;
3962 		eas[die].attr.show = show;
3963 		eas[die].attr.store = NULL;
3964 		eas[die].var = (void *)die;
3965 		attrs[die] = &eas[die].attr.attr;
3966 	}
3967 	ag->attrs = attrs;
3968 
3969 	return;
3970 err:
3971 	for (; die >= 0; die--)
3972 		kfree(eas[die].attr.attr.name);
3973 	kfree(eas);
3974 clear_attrs:
3975 	kfree(attrs);
3976 clear_topology:
3977 	pmu_free_topology(type);
3978 clear_attr_update:
3979 	pmu_clear_mapping_attr(type->attr_update, ag);
3980 }
3981 
3982 static void
3983 pmu_cleanup_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
3984 {
3985 	struct attribute **attr = ag->attrs;
3986 
3987 	if (!attr)
3988 		return;
3989 
3990 	for (; *attr; attr++)
3991 		kfree((*attr)->name);
3992 	kfree(attr_to_ext_attr(*ag->attrs));
3993 	kfree(ag->attrs);
3994 	ag->attrs = NULL;
3995 	pmu_free_topology(type);
3996 }
3997 
3998 static void
3999 pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
4000 {
4001 	pmu_set_mapping(type, ag, skx_iio_mapping_show, IIO_TOPOLOGY_TYPE);
4002 }
4003 
4004 static void skx_iio_set_mapping(struct intel_uncore_type *type)
4005 {
4006 	pmu_iio_set_mapping(type, &skx_iio_mapping_group);
4007 }
4008 
4009 static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
4010 {
4011 	pmu_cleanup_mapping(type, &skx_iio_mapping_group);
4012 }
4013 
4014 static struct intel_uncore_type skx_uncore_iio = {
4015 	.name			= "iio",
4016 	.num_counters		= 4,
4017 	.num_boxes		= 6,
4018 	.perf_ctr_bits		= 48,
4019 	.event_ctl		= SKX_IIO0_MSR_PMON_CTL0,
4020 	.perf_ctr		= SKX_IIO0_MSR_PMON_CTR0,
4021 	.event_mask		= SKX_IIO_PMON_RAW_EVENT_MASK,
4022 	.event_mask_ext		= SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
4023 	.box_ctl		= SKX_IIO0_MSR_PMON_BOX_CTL,
4024 	.msr_offset		= SKX_IIO_MSR_OFFSET,
4025 	.constraints		= skx_uncore_iio_constraints,
4026 	.ops			= &skx_uncore_iio_ops,
4027 	.format_group		= &skx_uncore_iio_format_group,
4028 	.attr_update		= skx_iio_attr_update,
4029 	.get_topology		= skx_iio_get_topology,
4030 	.set_mapping		= skx_iio_set_mapping,
4031 	.cleanup_mapping	= skx_iio_cleanup_mapping,
4032 };
4033 
4034 enum perf_uncore_iio_freerunning_type_id {
4035 	SKX_IIO_MSR_IOCLK			= 0,
4036 	SKX_IIO_MSR_BW				= 1,
4037 	SKX_IIO_MSR_UTIL			= 2,
4038 
4039 	SKX_IIO_FREERUNNING_TYPE_MAX,
4040 };
4041 
4042 
4043 static struct freerunning_counters skx_iio_freerunning[] = {
4044 	[SKX_IIO_MSR_IOCLK]	= { 0xa45, 0x1, 0x20, 1, 36 },
4045 	[SKX_IIO_MSR_BW]	= { 0xb00, 0x1, 0x10, 8, 36 },
4046 	[SKX_IIO_MSR_UTIL]	= { 0xb08, 0x1, 0x10, 8, 36 },
4047 };
4048 
4049 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
4050 	/* Free-Running IO CLOCKS Counter */
4051 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
4052 	/* Free-Running IIO BANDWIDTH Counters */
4053 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
4054 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
4055 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
4056 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
4057 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
4058 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
4059 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
4060 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
4061 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
4062 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
4063 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
4064 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
4065 	INTEL_UNCORE_EVENT_DESC(bw_out_port0,		"event=0xff,umask=0x24"),
4066 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,	"3.814697266e-6"),
4067 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,	"MiB"),
4068 	INTEL_UNCORE_EVENT_DESC(bw_out_port1,		"event=0xff,umask=0x25"),
4069 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,	"3.814697266e-6"),
4070 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,	"MiB"),
4071 	INTEL_UNCORE_EVENT_DESC(bw_out_port2,		"event=0xff,umask=0x26"),
4072 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,	"3.814697266e-6"),
4073 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,	"MiB"),
4074 	INTEL_UNCORE_EVENT_DESC(bw_out_port3,		"event=0xff,umask=0x27"),
4075 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,	"3.814697266e-6"),
4076 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,	"MiB"),
4077 	/* Free-running IIO UTILIZATION Counters */
4078 	INTEL_UNCORE_EVENT_DESC(util_in_port0,		"event=0xff,umask=0x30"),
4079 	INTEL_UNCORE_EVENT_DESC(util_out_port0,		"event=0xff,umask=0x31"),
4080 	INTEL_UNCORE_EVENT_DESC(util_in_port1,		"event=0xff,umask=0x32"),
4081 	INTEL_UNCORE_EVENT_DESC(util_out_port1,		"event=0xff,umask=0x33"),
4082 	INTEL_UNCORE_EVENT_DESC(util_in_port2,		"event=0xff,umask=0x34"),
4083 	INTEL_UNCORE_EVENT_DESC(util_out_port2,		"event=0xff,umask=0x35"),
4084 	INTEL_UNCORE_EVENT_DESC(util_in_port3,		"event=0xff,umask=0x36"),
4085 	INTEL_UNCORE_EVENT_DESC(util_out_port3,		"event=0xff,umask=0x37"),
4086 	{ /* end: all zeroes */ },
4087 };
4088 
4089 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
4090 	.read_counter		= uncore_msr_read_counter,
4091 	.hw_config		= uncore_freerunning_hw_config,
4092 };
4093 
4094 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
4095 	&format_attr_event.attr,
4096 	&format_attr_umask.attr,
4097 	NULL,
4098 };
4099 
4100 static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
4101 	.name = "format",
4102 	.attrs = skx_uncore_iio_freerunning_formats_attr,
4103 };
4104 
4105 static struct intel_uncore_type skx_uncore_iio_free_running = {
4106 	.name			= "iio_free_running",
4107 	.num_counters		= 17,
4108 	.num_boxes		= 6,
4109 	.num_freerunning_types	= SKX_IIO_FREERUNNING_TYPE_MAX,
4110 	.freerunning		= skx_iio_freerunning,
4111 	.ops			= &skx_uncore_iio_freerunning_ops,
4112 	.event_descs		= skx_uncore_iio_freerunning_events,
4113 	.format_group		= &skx_uncore_iio_freerunning_format_group,
4114 };
4115 
4116 static struct attribute *skx_uncore_formats_attr[] = {
4117 	&format_attr_event.attr,
4118 	&format_attr_umask.attr,
4119 	&format_attr_edge.attr,
4120 	&format_attr_inv.attr,
4121 	&format_attr_thresh8.attr,
4122 	NULL,
4123 };
4124 
4125 static const struct attribute_group skx_uncore_format_group = {
4126 	.name = "format",
4127 	.attrs = skx_uncore_formats_attr,
4128 };
4129 
4130 static struct intel_uncore_type skx_uncore_irp = {
4131 	.name			= "irp",
4132 	.num_counters		= 2,
4133 	.num_boxes		= 6,
4134 	.perf_ctr_bits		= 48,
4135 	.event_ctl		= SKX_IRP0_MSR_PMON_CTL0,
4136 	.perf_ctr		= SKX_IRP0_MSR_PMON_CTR0,
4137 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4138 	.box_ctl		= SKX_IRP0_MSR_PMON_BOX_CTL,
4139 	.msr_offset		= SKX_IRP_MSR_OFFSET,
4140 	.ops			= &skx_uncore_iio_ops,
4141 	.format_group		= &skx_uncore_format_group,
4142 };
4143 
4144 static struct attribute *skx_uncore_pcu_formats_attr[] = {
4145 	&format_attr_event.attr,
4146 	&format_attr_umask.attr,
4147 	&format_attr_edge.attr,
4148 	&format_attr_inv.attr,
4149 	&format_attr_thresh8.attr,
4150 	&format_attr_occ_invert.attr,
4151 	&format_attr_occ_edge_det.attr,
4152 	&format_attr_filter_band0.attr,
4153 	&format_attr_filter_band1.attr,
4154 	&format_attr_filter_band2.attr,
4155 	&format_attr_filter_band3.attr,
4156 	NULL,
4157 };
4158 
4159 static struct attribute_group skx_uncore_pcu_format_group = {
4160 	.name = "format",
4161 	.attrs = skx_uncore_pcu_formats_attr,
4162 };
4163 
4164 static struct intel_uncore_ops skx_uncore_pcu_ops = {
4165 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4166 	.hw_config		= hswep_pcu_hw_config,
4167 	.get_constraint		= snbep_pcu_get_constraint,
4168 	.put_constraint		= snbep_pcu_put_constraint,
4169 };
4170 
4171 static struct intel_uncore_type skx_uncore_pcu = {
4172 	.name			= "pcu",
4173 	.num_counters		= 4,
4174 	.num_boxes		= 1,
4175 	.perf_ctr_bits		= 48,
4176 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
4177 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
4178 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
4179 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
4180 	.num_shared_regs	= 1,
4181 	.ops			= &skx_uncore_pcu_ops,
4182 	.format_group		= &skx_uncore_pcu_format_group,
4183 };
4184 
4185 static struct intel_uncore_type *skx_msr_uncores[] = {
4186 	&skx_uncore_ubox,
4187 	&skx_uncore_chabox,
4188 	&skx_uncore_iio,
4189 	&skx_uncore_iio_free_running,
4190 	&skx_uncore_irp,
4191 	&skx_uncore_pcu,
4192 	NULL,
4193 };
4194 
4195 /*
4196  * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
4197  * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
4198  */
4199 #define SKX_CAPID6		0x9c
4200 #define SKX_CHA_BIT_MASK	GENMASK(27, 0)
4201 
4202 static int skx_count_chabox(void)
4203 {
4204 	struct pci_dev *dev = NULL;
4205 	u32 val = 0;
4206 
4207 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
4208 	if (!dev)
4209 		goto out;
4210 
4211 	pci_read_config_dword(dev, SKX_CAPID6, &val);
4212 	val &= SKX_CHA_BIT_MASK;
4213 out:
4214 	pci_dev_put(dev);
4215 	return hweight32(val);
4216 }
4217 
4218 void skx_uncore_cpu_init(void)
4219 {
4220 	skx_uncore_chabox.num_boxes = skx_count_chabox();
4221 	uncore_msr_uncores = skx_msr_uncores;
4222 }
4223 
4224 static struct intel_uncore_type skx_uncore_imc = {
4225 	.name		= "imc",
4226 	.num_counters   = 4,
4227 	.num_boxes	= 6,
4228 	.perf_ctr_bits	= 48,
4229 	.fixed_ctr_bits	= 48,
4230 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
4231 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
4232 	.event_descs	= hswep_uncore_imc_events,
4233 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
4234 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
4235 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4236 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
4237 	.ops		= &ivbep_uncore_pci_ops,
4238 	.format_group	= &skx_uncore_format_group,
4239 };
4240 
4241 static struct attribute *skx_upi_uncore_formats_attr[] = {
4242 	&format_attr_event.attr,
4243 	&format_attr_umask_ext.attr,
4244 	&format_attr_edge.attr,
4245 	&format_attr_inv.attr,
4246 	&format_attr_thresh8.attr,
4247 	NULL,
4248 };
4249 
4250 static const struct attribute_group skx_upi_uncore_format_group = {
4251 	.name = "format",
4252 	.attrs = skx_upi_uncore_formats_attr,
4253 };
4254 
4255 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
4256 {
4257 	struct pci_dev *pdev = box->pci_dev;
4258 
4259 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4260 	pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4261 }
4262 
4263 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
4264 	.init_box	= skx_upi_uncore_pci_init_box,
4265 	.disable_box	= snbep_uncore_pci_disable_box,
4266 	.enable_box	= snbep_uncore_pci_enable_box,
4267 	.disable_event	= snbep_uncore_pci_disable_event,
4268 	.enable_event	= snbep_uncore_pci_enable_event,
4269 	.read_counter	= snbep_uncore_pci_read_counter,
4270 };
4271 
4272 static umode_t
4273 skx_upi_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
4274 {
4275 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
4276 
4277 	return pmu->type->topology[die][pmu->pmu_idx].upi->enabled ? attr->mode : 0;
4278 }
4279 
4280 static ssize_t skx_upi_mapping_show(struct device *dev,
4281 				    struct device_attribute *attr, char *buf)
4282 {
4283 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
4284 	struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
4285 	long die = (long)ea->var;
4286 	struct uncore_upi_topology *upi = pmu->type->topology[die][pmu->pmu_idx].upi;
4287 
4288 	return sysfs_emit(buf, "upi_%d,die_%d\n", upi->pmu_idx_to, upi->die_to);
4289 }
4290 
4291 #define SKX_UPI_REG_DID			0x2058
4292 #define SKX_UPI_REGS_ADDR_DEVICE_LINK0	0x0e
4293 #define SKX_UPI_REGS_ADDR_FUNCTION	0x00
4294 
4295 /*
4296  * UPI Link Parameter 0
4297  * |  Bit  |  Default  |  Description
4298  * | 19:16 |     0h    | base_nodeid - The NodeID of the sending socket.
4299  * | 12:8  |    00h    | sending_port - The processor die port number of the sending port.
4300  */
4301 #define SKX_KTILP0_OFFSET	0x94
4302 
4303 /*
4304  * UPI Pcode Status. This register is used by PCode to store the link training status.
4305  * |  Bit  |  Default  |  Description
4306  * |   4   |     0h    | ll_status_valid — Bit indicates the valid training status
4307  *                       logged from PCode to the BIOS.
4308  */
4309 #define SKX_KTIPCSTS_OFFSET	0x120
4310 
4311 static int upi_fill_topology(struct pci_dev *dev, struct intel_uncore_topology *tp,
4312 			     int pmu_idx)
4313 {
4314 	int ret;
4315 	u32 upi_conf;
4316 	struct uncore_upi_topology *upi = tp->upi;
4317 
4318 	tp->pmu_idx = pmu_idx;
4319 	ret = pci_read_config_dword(dev, SKX_KTIPCSTS_OFFSET, &upi_conf);
4320 	if (ret) {
4321 		ret = pcibios_err_to_errno(ret);
4322 		goto err;
4323 	}
4324 	upi->enabled = (upi_conf >> 4) & 1;
4325 	if (upi->enabled) {
4326 		ret = pci_read_config_dword(dev, SKX_KTILP0_OFFSET,
4327 					    &upi_conf);
4328 		if (ret) {
4329 			ret = pcibios_err_to_errno(ret);
4330 			goto err;
4331 		}
4332 		upi->die_to = (upi_conf >> 16) & 0xf;
4333 		upi->pmu_idx_to = (upi_conf >> 8) & 0x1f;
4334 	}
4335 err:
4336 	return ret;
4337 }
4338 
4339 static int skx_upi_topology_cb(struct intel_uncore_type *type, int segment,
4340 				int die, u64 cpu_bus_msr)
4341 {
4342 	int idx, ret;
4343 	struct intel_uncore_topology *upi;
4344 	unsigned int devfn;
4345 	struct pci_dev *dev = NULL;
4346 	u8 bus = cpu_bus_msr >> (3 * BUS_NUM_STRIDE);
4347 
4348 	for (idx = 0; idx < type->num_boxes; idx++) {
4349 		upi = &type->topology[die][idx];
4350 		devfn = PCI_DEVFN(SKX_UPI_REGS_ADDR_DEVICE_LINK0 + idx,
4351 				  SKX_UPI_REGS_ADDR_FUNCTION);
4352 		dev = pci_get_domain_bus_and_slot(segment, bus, devfn);
4353 		if (dev) {
4354 			ret = upi_fill_topology(dev, upi, idx);
4355 			if (ret)
4356 				break;
4357 		}
4358 	}
4359 
4360 	pci_dev_put(dev);
4361 	return ret;
4362 }
4363 
4364 static int skx_upi_get_topology(struct intel_uncore_type *type)
4365 {
4366 	/* CPX case is not supported */
4367 	if (boot_cpu_data.x86_stepping == 11)
4368 		return -EPERM;
4369 
4370 	return skx_pmu_get_topology(type, skx_upi_topology_cb);
4371 }
4372 
4373 static struct attribute_group skx_upi_mapping_group = {
4374 	.is_visible	= skx_upi_mapping_visible,
4375 };
4376 
4377 static const struct attribute_group *skx_upi_attr_update[] = {
4378 	&skx_upi_mapping_group,
4379 	NULL
4380 };
4381 
4382 static void
4383 pmu_upi_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
4384 {
4385 	pmu_set_mapping(type, ag, skx_upi_mapping_show, UPI_TOPOLOGY_TYPE);
4386 }
4387 
4388 static void skx_upi_set_mapping(struct intel_uncore_type *type)
4389 {
4390 	pmu_upi_set_mapping(type, &skx_upi_mapping_group);
4391 }
4392 
4393 static void skx_upi_cleanup_mapping(struct intel_uncore_type *type)
4394 {
4395 	pmu_cleanup_mapping(type, &skx_upi_mapping_group);
4396 }
4397 
4398 static struct intel_uncore_type skx_uncore_upi = {
4399 	.name		= "upi",
4400 	.num_counters   = 4,
4401 	.num_boxes	= 3,
4402 	.perf_ctr_bits	= 48,
4403 	.perf_ctr	= SKX_UPI_PCI_PMON_CTR0,
4404 	.event_ctl	= SKX_UPI_PCI_PMON_CTL0,
4405 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4406 	.event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
4407 	.box_ctl	= SKX_UPI_PCI_PMON_BOX_CTL,
4408 	.ops		= &skx_upi_uncore_pci_ops,
4409 	.format_group	= &skx_upi_uncore_format_group,
4410 	.attr_update	= skx_upi_attr_update,
4411 	.get_topology	= skx_upi_get_topology,
4412 	.set_mapping	= skx_upi_set_mapping,
4413 	.cleanup_mapping = skx_upi_cleanup_mapping,
4414 };
4415 
4416 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4417 {
4418 	struct pci_dev *pdev = box->pci_dev;
4419 
4420 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4421 	pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4422 }
4423 
4424 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
4425 	.init_box	= skx_m2m_uncore_pci_init_box,
4426 	.disable_box	= snbep_uncore_pci_disable_box,
4427 	.enable_box	= snbep_uncore_pci_enable_box,
4428 	.disable_event	= snbep_uncore_pci_disable_event,
4429 	.enable_event	= snbep_uncore_pci_enable_event,
4430 	.read_counter	= snbep_uncore_pci_read_counter,
4431 };
4432 
4433 static struct intel_uncore_type skx_uncore_m2m = {
4434 	.name		= "m2m",
4435 	.num_counters   = 4,
4436 	.num_boxes	= 2,
4437 	.perf_ctr_bits	= 48,
4438 	.perf_ctr	= SKX_M2M_PCI_PMON_CTR0,
4439 	.event_ctl	= SKX_M2M_PCI_PMON_CTL0,
4440 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4441 	.box_ctl	= SKX_M2M_PCI_PMON_BOX_CTL,
4442 	.ops		= &skx_m2m_uncore_pci_ops,
4443 	.format_group	= &skx_uncore_format_group,
4444 };
4445 
4446 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
4447 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4448 	EVENT_CONSTRAINT_END
4449 };
4450 
4451 static struct intel_uncore_type skx_uncore_m2pcie = {
4452 	.name		= "m2pcie",
4453 	.num_counters   = 4,
4454 	.num_boxes	= 4,
4455 	.perf_ctr_bits	= 48,
4456 	.constraints	= skx_uncore_m2pcie_constraints,
4457 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
4458 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
4459 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4460 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
4461 	.ops		= &ivbep_uncore_pci_ops,
4462 	.format_group	= &skx_uncore_format_group,
4463 };
4464 
4465 static struct event_constraint skx_uncore_m3upi_constraints[] = {
4466 	UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
4467 	UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
4468 	UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
4469 	UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
4470 	UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
4471 	UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
4472 	UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
4473 	UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
4474 	EVENT_CONSTRAINT_END
4475 };
4476 
4477 static struct intel_uncore_type skx_uncore_m3upi = {
4478 	.name		= "m3upi",
4479 	.num_counters   = 3,
4480 	.num_boxes	= 3,
4481 	.perf_ctr_bits	= 48,
4482 	.constraints	= skx_uncore_m3upi_constraints,
4483 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
4484 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
4485 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4486 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
4487 	.ops		= &ivbep_uncore_pci_ops,
4488 	.format_group	= &skx_uncore_format_group,
4489 };
4490 
4491 enum {
4492 	SKX_PCI_UNCORE_IMC,
4493 	SKX_PCI_UNCORE_M2M,
4494 	SKX_PCI_UNCORE_UPI,
4495 	SKX_PCI_UNCORE_M2PCIE,
4496 	SKX_PCI_UNCORE_M3UPI,
4497 };
4498 
4499 static struct intel_uncore_type *skx_pci_uncores[] = {
4500 	[SKX_PCI_UNCORE_IMC]	= &skx_uncore_imc,
4501 	[SKX_PCI_UNCORE_M2M]	= &skx_uncore_m2m,
4502 	[SKX_PCI_UNCORE_UPI]	= &skx_uncore_upi,
4503 	[SKX_PCI_UNCORE_M2PCIE]	= &skx_uncore_m2pcie,
4504 	[SKX_PCI_UNCORE_M3UPI]	= &skx_uncore_m3upi,
4505 	NULL,
4506 };
4507 
4508 static const struct pci_device_id skx_uncore_pci_ids[] = {
4509 	{ /* MC0 Channel 0 */
4510 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4511 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
4512 	},
4513 	{ /* MC0 Channel 1 */
4514 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4515 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
4516 	},
4517 	{ /* MC0 Channel 2 */
4518 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4519 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
4520 	},
4521 	{ /* MC1 Channel 0 */
4522 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4523 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
4524 	},
4525 	{ /* MC1 Channel 1 */
4526 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4527 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
4528 	},
4529 	{ /* MC1 Channel 2 */
4530 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4531 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
4532 	},
4533 	{ /* M2M0 */
4534 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4535 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
4536 	},
4537 	{ /* M2M1 */
4538 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4539 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
4540 	},
4541 	{ /* UPI0 Link 0 */
4542 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4543 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
4544 	},
4545 	{ /* UPI0 Link 1 */
4546 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4547 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
4548 	},
4549 	{ /* UPI1 Link 2 */
4550 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4551 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
4552 	},
4553 	{ /* M2PCIe 0 */
4554 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4555 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
4556 	},
4557 	{ /* M2PCIe 1 */
4558 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4559 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
4560 	},
4561 	{ /* M2PCIe 2 */
4562 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4563 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
4564 	},
4565 	{ /* M2PCIe 3 */
4566 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4567 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
4568 	},
4569 	{ /* M3UPI0 Link 0 */
4570 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4571 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
4572 	},
4573 	{ /* M3UPI0 Link 1 */
4574 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4575 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4576 	},
4577 	{ /* M3UPI1 Link 2 */
4578 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4579 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4580 	},
4581 	{ /* end: all zeroes */ }
4582 };
4583 
4584 
4585 static struct pci_driver skx_uncore_pci_driver = {
4586 	.name		= "skx_uncore",
4587 	.id_table	= skx_uncore_pci_ids,
4588 };
4589 
4590 int skx_uncore_pci_init(void)
4591 {
4592 	/* need to double check pci address */
4593 	int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4594 
4595 	if (ret)
4596 		return ret;
4597 
4598 	uncore_pci_uncores = skx_pci_uncores;
4599 	uncore_pci_driver = &skx_uncore_pci_driver;
4600 	return 0;
4601 }
4602 
4603 /* end of SKX uncore support */
4604 
4605 /* SNR uncore support */
4606 
4607 static struct intel_uncore_type snr_uncore_ubox = {
4608 	.name			= "ubox",
4609 	.num_counters		= 2,
4610 	.num_boxes		= 1,
4611 	.perf_ctr_bits		= 48,
4612 	.fixed_ctr_bits		= 48,
4613 	.perf_ctr		= SNR_U_MSR_PMON_CTR0,
4614 	.event_ctl		= SNR_U_MSR_PMON_CTL0,
4615 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4616 	.fixed_ctr		= SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4617 	.fixed_ctl		= SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4618 	.ops			= &ivbep_uncore_msr_ops,
4619 	.format_group		= &ivbep_uncore_format_group,
4620 };
4621 
4622 static struct attribute *snr_uncore_cha_formats_attr[] = {
4623 	&format_attr_event.attr,
4624 	&format_attr_umask_ext2.attr,
4625 	&format_attr_edge.attr,
4626 	&format_attr_tid_en.attr,
4627 	&format_attr_inv.attr,
4628 	&format_attr_thresh8.attr,
4629 	&format_attr_filter_tid5.attr,
4630 	NULL,
4631 };
4632 static const struct attribute_group snr_uncore_chabox_format_group = {
4633 	.name = "format",
4634 	.attrs = snr_uncore_cha_formats_attr,
4635 };
4636 
4637 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4638 {
4639 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4640 
4641 	reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4642 		    box->pmu->type->msr_offset * box->pmu->pmu_idx;
4643 	reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4644 	reg1->idx = 0;
4645 
4646 	return 0;
4647 }
4648 
4649 static void snr_cha_enable_event(struct intel_uncore_box *box,
4650 				   struct perf_event *event)
4651 {
4652 	struct hw_perf_event *hwc = &event->hw;
4653 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4654 
4655 	if (reg1->idx != EXTRA_REG_NONE)
4656 		wrmsrl(reg1->reg, reg1->config);
4657 
4658 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4659 }
4660 
4661 static struct intel_uncore_ops snr_uncore_chabox_ops = {
4662 	.init_box		= ivbep_uncore_msr_init_box,
4663 	.disable_box		= snbep_uncore_msr_disable_box,
4664 	.enable_box		= snbep_uncore_msr_enable_box,
4665 	.disable_event		= snbep_uncore_msr_disable_event,
4666 	.enable_event		= snr_cha_enable_event,
4667 	.read_counter		= uncore_msr_read_counter,
4668 	.hw_config		= snr_cha_hw_config,
4669 };
4670 
4671 static struct intel_uncore_type snr_uncore_chabox = {
4672 	.name			= "cha",
4673 	.num_counters		= 4,
4674 	.num_boxes		= 6,
4675 	.perf_ctr_bits		= 48,
4676 	.event_ctl		= SNR_CHA_MSR_PMON_CTL0,
4677 	.perf_ctr		= SNR_CHA_MSR_PMON_CTR0,
4678 	.box_ctl		= SNR_CHA_MSR_PMON_BOX_CTL,
4679 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
4680 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4681 	.event_mask_ext		= SNR_CHA_RAW_EVENT_MASK_EXT,
4682 	.ops			= &snr_uncore_chabox_ops,
4683 	.format_group		= &snr_uncore_chabox_format_group,
4684 };
4685 
4686 static struct attribute *snr_uncore_iio_formats_attr[] = {
4687 	&format_attr_event.attr,
4688 	&format_attr_umask.attr,
4689 	&format_attr_edge.attr,
4690 	&format_attr_inv.attr,
4691 	&format_attr_thresh9.attr,
4692 	&format_attr_ch_mask2.attr,
4693 	&format_attr_fc_mask2.attr,
4694 	NULL,
4695 };
4696 
4697 static const struct attribute_group snr_uncore_iio_format_group = {
4698 	.name = "format",
4699 	.attrs = snr_uncore_iio_formats_attr,
4700 };
4701 
4702 static umode_t
4703 snr_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
4704 {
4705 	/* Root bus 0x00 is valid only for pmu_idx = 1. */
4706 	return pmu_iio_mapping_visible(kobj, attr, die, 1);
4707 }
4708 
4709 static struct attribute_group snr_iio_mapping_group = {
4710 	.is_visible	= snr_iio_mapping_visible,
4711 };
4712 
4713 static const struct attribute_group *snr_iio_attr_update[] = {
4714 	&snr_iio_mapping_group,
4715 	NULL,
4716 };
4717 
4718 static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_mapping)
4719 {
4720 	u32 sad_cfg;
4721 	int die, stack_id, ret = -EPERM;
4722 	struct pci_dev *dev = NULL;
4723 
4724 	while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, SNR_ICX_MESH2IIO_MMAP_DID, dev))) {
4725 		ret = pci_read_config_dword(dev, SNR_ICX_SAD_CONTROL_CFG, &sad_cfg);
4726 		if (ret) {
4727 			ret = pcibios_err_to_errno(ret);
4728 			break;
4729 		}
4730 
4731 		die = uncore_pcibus_to_dieid(dev->bus);
4732 		stack_id = SAD_CONTROL_STACK_ID(sad_cfg);
4733 		if (die < 0 || stack_id >= type->num_boxes) {
4734 			ret = -EPERM;
4735 			break;
4736 		}
4737 
4738 		/* Convert stack id from SAD_CONTROL to PMON notation. */
4739 		stack_id = sad_pmon_mapping[stack_id];
4740 
4741 		type->topology[die][stack_id].iio->segment = pci_domain_nr(dev->bus);
4742 		type->topology[die][stack_id].pmu_idx = stack_id;
4743 		type->topology[die][stack_id].iio->pci_bus_no = dev->bus->number;
4744 	}
4745 
4746 	pci_dev_put(dev);
4747 
4748 	return ret;
4749 }
4750 
4751 /*
4752  * SNR has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
4753  */
4754 enum {
4755 	SNR_QAT_PMON_ID,
4756 	SNR_CBDMA_DMI_PMON_ID,
4757 	SNR_NIS_PMON_ID,
4758 	SNR_DLB_PMON_ID,
4759 	SNR_PCIE_GEN3_PMON_ID
4760 };
4761 
4762 static u8 snr_sad_pmon_mapping[] = {
4763 	SNR_CBDMA_DMI_PMON_ID,
4764 	SNR_PCIE_GEN3_PMON_ID,
4765 	SNR_DLB_PMON_ID,
4766 	SNR_NIS_PMON_ID,
4767 	SNR_QAT_PMON_ID
4768 };
4769 
4770 static int snr_iio_get_topology(struct intel_uncore_type *type)
4771 {
4772 	return sad_cfg_iio_topology(type, snr_sad_pmon_mapping);
4773 }
4774 
4775 static void snr_iio_set_mapping(struct intel_uncore_type *type)
4776 {
4777 	pmu_iio_set_mapping(type, &snr_iio_mapping_group);
4778 }
4779 
4780 static void snr_iio_cleanup_mapping(struct intel_uncore_type *type)
4781 {
4782 	pmu_cleanup_mapping(type, &snr_iio_mapping_group);
4783 }
4784 
4785 static struct event_constraint snr_uncore_iio_constraints[] = {
4786 	UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
4787 	UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
4788 	UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
4789 	EVENT_CONSTRAINT_END
4790 };
4791 
4792 static struct intel_uncore_type snr_uncore_iio = {
4793 	.name			= "iio",
4794 	.num_counters		= 4,
4795 	.num_boxes		= 5,
4796 	.perf_ctr_bits		= 48,
4797 	.event_ctl		= SNR_IIO_MSR_PMON_CTL0,
4798 	.perf_ctr		= SNR_IIO_MSR_PMON_CTR0,
4799 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4800 	.event_mask_ext		= SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4801 	.box_ctl		= SNR_IIO_MSR_PMON_BOX_CTL,
4802 	.msr_offset		= SNR_IIO_MSR_OFFSET,
4803 	.constraints		= snr_uncore_iio_constraints,
4804 	.ops			= &ivbep_uncore_msr_ops,
4805 	.format_group		= &snr_uncore_iio_format_group,
4806 	.attr_update		= snr_iio_attr_update,
4807 	.get_topology		= snr_iio_get_topology,
4808 	.set_mapping		= snr_iio_set_mapping,
4809 	.cleanup_mapping	= snr_iio_cleanup_mapping,
4810 };
4811 
4812 static struct intel_uncore_type snr_uncore_irp = {
4813 	.name			= "irp",
4814 	.num_counters		= 2,
4815 	.num_boxes		= 5,
4816 	.perf_ctr_bits		= 48,
4817 	.event_ctl		= SNR_IRP0_MSR_PMON_CTL0,
4818 	.perf_ctr		= SNR_IRP0_MSR_PMON_CTR0,
4819 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4820 	.box_ctl		= SNR_IRP0_MSR_PMON_BOX_CTL,
4821 	.msr_offset		= SNR_IRP_MSR_OFFSET,
4822 	.ops			= &ivbep_uncore_msr_ops,
4823 	.format_group		= &ivbep_uncore_format_group,
4824 };
4825 
4826 static struct intel_uncore_type snr_uncore_m2pcie = {
4827 	.name		= "m2pcie",
4828 	.num_counters	= 4,
4829 	.num_boxes	= 5,
4830 	.perf_ctr_bits	= 48,
4831 	.event_ctl	= SNR_M2PCIE_MSR_PMON_CTL0,
4832 	.perf_ctr	= SNR_M2PCIE_MSR_PMON_CTR0,
4833 	.box_ctl	= SNR_M2PCIE_MSR_PMON_BOX_CTL,
4834 	.msr_offset	= SNR_M2PCIE_MSR_OFFSET,
4835 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4836 	.ops		= &ivbep_uncore_msr_ops,
4837 	.format_group	= &ivbep_uncore_format_group,
4838 };
4839 
4840 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4841 {
4842 	struct hw_perf_event *hwc = &event->hw;
4843 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4844 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4845 
4846 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
4847 		reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4848 		reg1->idx = ev_sel - 0xb;
4849 		reg1->config = event->attr.config1 & (0xff << reg1->idx);
4850 	}
4851 	return 0;
4852 }
4853 
4854 static struct intel_uncore_ops snr_uncore_pcu_ops = {
4855 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4856 	.hw_config		= snr_pcu_hw_config,
4857 	.get_constraint		= snbep_pcu_get_constraint,
4858 	.put_constraint		= snbep_pcu_put_constraint,
4859 };
4860 
4861 static struct intel_uncore_type snr_uncore_pcu = {
4862 	.name			= "pcu",
4863 	.num_counters		= 4,
4864 	.num_boxes		= 1,
4865 	.perf_ctr_bits		= 48,
4866 	.perf_ctr		= SNR_PCU_MSR_PMON_CTR0,
4867 	.event_ctl		= SNR_PCU_MSR_PMON_CTL0,
4868 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4869 	.box_ctl		= SNR_PCU_MSR_PMON_BOX_CTL,
4870 	.num_shared_regs	= 1,
4871 	.ops			= &snr_uncore_pcu_ops,
4872 	.format_group		= &skx_uncore_pcu_format_group,
4873 };
4874 
4875 enum perf_uncore_snr_iio_freerunning_type_id {
4876 	SNR_IIO_MSR_IOCLK,
4877 	SNR_IIO_MSR_BW_IN,
4878 
4879 	SNR_IIO_FREERUNNING_TYPE_MAX,
4880 };
4881 
4882 static struct freerunning_counters snr_iio_freerunning[] = {
4883 	[SNR_IIO_MSR_IOCLK]	= { 0x1eac, 0x1, 0x10, 1, 48 },
4884 	[SNR_IIO_MSR_BW_IN]	= { 0x1f00, 0x1, 0x10, 8, 48 },
4885 };
4886 
4887 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4888 	/* Free-Running IIO CLOCKS Counter */
4889 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
4890 	/* Free-Running IIO BANDWIDTH IN Counters */
4891 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
4892 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
4893 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
4894 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
4895 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
4896 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
4897 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
4898 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
4899 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
4900 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
4901 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
4902 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
4903 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
4904 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
4905 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
4906 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
4907 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
4908 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
4909 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
4910 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
4911 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
4912 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
4913 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
4914 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
4915 	{ /* end: all zeroes */ },
4916 };
4917 
4918 static struct intel_uncore_type snr_uncore_iio_free_running = {
4919 	.name			= "iio_free_running",
4920 	.num_counters		= 9,
4921 	.num_boxes		= 5,
4922 	.num_freerunning_types	= SNR_IIO_FREERUNNING_TYPE_MAX,
4923 	.freerunning		= snr_iio_freerunning,
4924 	.ops			= &skx_uncore_iio_freerunning_ops,
4925 	.event_descs		= snr_uncore_iio_freerunning_events,
4926 	.format_group		= &skx_uncore_iio_freerunning_format_group,
4927 };
4928 
4929 static struct intel_uncore_type *snr_msr_uncores[] = {
4930 	&snr_uncore_ubox,
4931 	&snr_uncore_chabox,
4932 	&snr_uncore_iio,
4933 	&snr_uncore_irp,
4934 	&snr_uncore_m2pcie,
4935 	&snr_uncore_pcu,
4936 	&snr_uncore_iio_free_running,
4937 	NULL,
4938 };
4939 
4940 void snr_uncore_cpu_init(void)
4941 {
4942 	uncore_msr_uncores = snr_msr_uncores;
4943 }
4944 
4945 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4946 {
4947 	struct pci_dev *pdev = box->pci_dev;
4948 	int box_ctl = uncore_pci_box_ctl(box);
4949 
4950 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4951 	pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4952 }
4953 
4954 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4955 	.init_box	= snr_m2m_uncore_pci_init_box,
4956 	.disable_box	= snbep_uncore_pci_disable_box,
4957 	.enable_box	= snbep_uncore_pci_enable_box,
4958 	.disable_event	= snbep_uncore_pci_disable_event,
4959 	.enable_event	= snbep_uncore_pci_enable_event,
4960 	.read_counter	= snbep_uncore_pci_read_counter,
4961 };
4962 
4963 static struct attribute *snr_m2m_uncore_formats_attr[] = {
4964 	&format_attr_event.attr,
4965 	&format_attr_umask_ext3.attr,
4966 	&format_attr_edge.attr,
4967 	&format_attr_inv.attr,
4968 	&format_attr_thresh8.attr,
4969 	NULL,
4970 };
4971 
4972 static const struct attribute_group snr_m2m_uncore_format_group = {
4973 	.name = "format",
4974 	.attrs = snr_m2m_uncore_formats_attr,
4975 };
4976 
4977 static struct intel_uncore_type snr_uncore_m2m = {
4978 	.name		= "m2m",
4979 	.num_counters   = 4,
4980 	.num_boxes	= 1,
4981 	.perf_ctr_bits	= 48,
4982 	.perf_ctr	= SNR_M2M_PCI_PMON_CTR0,
4983 	.event_ctl	= SNR_M2M_PCI_PMON_CTL0,
4984 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4985 	.event_mask_ext	= SNR_M2M_PCI_PMON_UMASK_EXT,
4986 	.box_ctl	= SNR_M2M_PCI_PMON_BOX_CTL,
4987 	.ops		= &snr_m2m_uncore_pci_ops,
4988 	.format_group	= &snr_m2m_uncore_format_group,
4989 };
4990 
4991 static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
4992 {
4993 	struct pci_dev *pdev = box->pci_dev;
4994 	struct hw_perf_event *hwc = &event->hw;
4995 
4996 	pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN));
4997 	pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
4998 }
4999 
5000 static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = {
5001 	.init_box	= snr_m2m_uncore_pci_init_box,
5002 	.disable_box	= snbep_uncore_pci_disable_box,
5003 	.enable_box	= snbep_uncore_pci_enable_box,
5004 	.disable_event	= snbep_uncore_pci_disable_event,
5005 	.enable_event	= snr_uncore_pci_enable_event,
5006 	.read_counter	= snbep_uncore_pci_read_counter,
5007 };
5008 
5009 static struct intel_uncore_type snr_uncore_pcie3 = {
5010 	.name		= "pcie3",
5011 	.num_counters	= 4,
5012 	.num_boxes	= 1,
5013 	.perf_ctr_bits	= 48,
5014 	.perf_ctr	= SNR_PCIE3_PCI_PMON_CTR0,
5015 	.event_ctl	= SNR_PCIE3_PCI_PMON_CTL0,
5016 	.event_mask	= SKX_IIO_PMON_RAW_EVENT_MASK,
5017 	.event_mask_ext	= SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
5018 	.box_ctl	= SNR_PCIE3_PCI_PMON_BOX_CTL,
5019 	.ops		= &snr_pcie3_uncore_pci_ops,
5020 	.format_group	= &skx_uncore_iio_format_group,
5021 };
5022 
5023 enum {
5024 	SNR_PCI_UNCORE_M2M,
5025 	SNR_PCI_UNCORE_PCIE3,
5026 };
5027 
5028 static struct intel_uncore_type *snr_pci_uncores[] = {
5029 	[SNR_PCI_UNCORE_M2M]		= &snr_uncore_m2m,
5030 	[SNR_PCI_UNCORE_PCIE3]		= &snr_uncore_pcie3,
5031 	NULL,
5032 };
5033 
5034 static const struct pci_device_id snr_uncore_pci_ids[] = {
5035 	{ /* M2M */
5036 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5037 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
5038 	},
5039 	{ /* end: all zeroes */ }
5040 };
5041 
5042 static struct pci_driver snr_uncore_pci_driver = {
5043 	.name		= "snr_uncore",
5044 	.id_table	= snr_uncore_pci_ids,
5045 };
5046 
5047 static const struct pci_device_id snr_uncore_pci_sub_ids[] = {
5048 	{ /* PCIe3 RP */
5049 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
5050 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
5051 	},
5052 	{ /* end: all zeroes */ }
5053 };
5054 
5055 static struct pci_driver snr_uncore_pci_sub_driver = {
5056 	.name		= "snr_uncore_sub",
5057 	.id_table	= snr_uncore_pci_sub_ids,
5058 };
5059 
5060 int snr_uncore_pci_init(void)
5061 {
5062 	/* SNR UBOX DID */
5063 	int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
5064 					 SKX_GIDNIDMAP, true);
5065 
5066 	if (ret)
5067 		return ret;
5068 
5069 	uncore_pci_uncores = snr_pci_uncores;
5070 	uncore_pci_driver = &snr_uncore_pci_driver;
5071 	uncore_pci_sub_driver = &snr_uncore_pci_sub_driver;
5072 	return 0;
5073 }
5074 
5075 #define SNR_MC_DEVICE_ID	0x3451
5076 
5077 static struct pci_dev *snr_uncore_get_mc_dev(unsigned int device, int id)
5078 {
5079 	struct pci_dev *mc_dev = NULL;
5080 	int pkg;
5081 
5082 	while (1) {
5083 		mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, mc_dev);
5084 		if (!mc_dev)
5085 			break;
5086 		pkg = uncore_pcibus_to_dieid(mc_dev->bus);
5087 		if (pkg == id)
5088 			break;
5089 	}
5090 	return mc_dev;
5091 }
5092 
5093 static int snr_uncore_mmio_map(struct intel_uncore_box *box,
5094 			       unsigned int box_ctl, int mem_offset,
5095 			       unsigned int device)
5096 {
5097 	struct pci_dev *pdev = snr_uncore_get_mc_dev(device, box->dieid);
5098 	struct intel_uncore_type *type = box->pmu->type;
5099 	resource_size_t addr;
5100 	u32 pci_dword;
5101 
5102 	if (!pdev)
5103 		return -ENODEV;
5104 
5105 	pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
5106 	addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
5107 
5108 	pci_read_config_dword(pdev, mem_offset, &pci_dword);
5109 	addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
5110 
5111 	addr += box_ctl;
5112 
5113 	pci_dev_put(pdev);
5114 
5115 	box->io_addr = ioremap(addr, type->mmio_map_size);
5116 	if (!box->io_addr) {
5117 		pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
5118 		return -EINVAL;
5119 	}
5120 
5121 	return 0;
5122 }
5123 
5124 static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
5125 				       unsigned int box_ctl, int mem_offset,
5126 				       unsigned int device)
5127 {
5128 	if (!snr_uncore_mmio_map(box, box_ctl, mem_offset, device))
5129 		writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
5130 }
5131 
5132 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
5133 {
5134 	__snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
5135 				   SNR_IMC_MMIO_MEM0_OFFSET,
5136 				   SNR_MC_DEVICE_ID);
5137 }
5138 
5139 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
5140 {
5141 	u32 config;
5142 
5143 	if (!box->io_addr)
5144 		return;
5145 
5146 	config = readl(box->io_addr);
5147 	config |= SNBEP_PMON_BOX_CTL_FRZ;
5148 	writel(config, box->io_addr);
5149 }
5150 
5151 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
5152 {
5153 	u32 config;
5154 
5155 	if (!box->io_addr)
5156 		return;
5157 
5158 	config = readl(box->io_addr);
5159 	config &= ~SNBEP_PMON_BOX_CTL_FRZ;
5160 	writel(config, box->io_addr);
5161 }
5162 
5163 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
5164 					   struct perf_event *event)
5165 {
5166 	struct hw_perf_event *hwc = &event->hw;
5167 
5168 	if (!box->io_addr)
5169 		return;
5170 
5171 	if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
5172 		return;
5173 
5174 	writel(hwc->config | SNBEP_PMON_CTL_EN,
5175 	       box->io_addr + hwc->config_base);
5176 }
5177 
5178 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
5179 					    struct perf_event *event)
5180 {
5181 	struct hw_perf_event *hwc = &event->hw;
5182 
5183 	if (!box->io_addr)
5184 		return;
5185 
5186 	if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
5187 		return;
5188 
5189 	writel(hwc->config, box->io_addr + hwc->config_base);
5190 }
5191 
5192 static struct intel_uncore_ops snr_uncore_mmio_ops = {
5193 	.init_box	= snr_uncore_mmio_init_box,
5194 	.exit_box	= uncore_mmio_exit_box,
5195 	.disable_box	= snr_uncore_mmio_disable_box,
5196 	.enable_box	= snr_uncore_mmio_enable_box,
5197 	.disable_event	= snr_uncore_mmio_disable_event,
5198 	.enable_event	= snr_uncore_mmio_enable_event,
5199 	.read_counter	= uncore_mmio_read_counter,
5200 };
5201 
5202 static struct uncore_event_desc snr_uncore_imc_events[] = {
5203 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
5204 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x0f"),
5205 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
5206 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
5207 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
5208 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
5209 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
5210 	{ /* end: all zeroes */ },
5211 };
5212 
5213 static struct intel_uncore_type snr_uncore_imc = {
5214 	.name		= "imc",
5215 	.num_counters   = 4,
5216 	.num_boxes	= 2,
5217 	.perf_ctr_bits	= 48,
5218 	.fixed_ctr_bits	= 48,
5219 	.fixed_ctr	= SNR_IMC_MMIO_PMON_FIXED_CTR,
5220 	.fixed_ctl	= SNR_IMC_MMIO_PMON_FIXED_CTL,
5221 	.event_descs	= snr_uncore_imc_events,
5222 	.perf_ctr	= SNR_IMC_MMIO_PMON_CTR0,
5223 	.event_ctl	= SNR_IMC_MMIO_PMON_CTL0,
5224 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5225 	.box_ctl	= SNR_IMC_MMIO_PMON_BOX_CTL,
5226 	.mmio_offset	= SNR_IMC_MMIO_OFFSET,
5227 	.mmio_map_size	= SNR_IMC_MMIO_SIZE,
5228 	.ops		= &snr_uncore_mmio_ops,
5229 	.format_group	= &skx_uncore_format_group,
5230 };
5231 
5232 enum perf_uncore_snr_imc_freerunning_type_id {
5233 	SNR_IMC_DCLK,
5234 	SNR_IMC_DDR,
5235 
5236 	SNR_IMC_FREERUNNING_TYPE_MAX,
5237 };
5238 
5239 static struct freerunning_counters snr_imc_freerunning[] = {
5240 	[SNR_IMC_DCLK]	= { 0x22b0, 0x0, 0, 1, 48 },
5241 	[SNR_IMC_DDR]	= { 0x2290, 0x8, 0, 2, 48 },
5242 };
5243 
5244 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
5245 	INTEL_UNCORE_EVENT_DESC(dclk,		"event=0xff,umask=0x10"),
5246 
5247 	INTEL_UNCORE_EVENT_DESC(read,		"event=0xff,umask=0x20"),
5248 	INTEL_UNCORE_EVENT_DESC(read.scale,	"6.103515625e-5"),
5249 	INTEL_UNCORE_EVENT_DESC(read.unit,	"MiB"),
5250 	INTEL_UNCORE_EVENT_DESC(write,		"event=0xff,umask=0x21"),
5251 	INTEL_UNCORE_EVENT_DESC(write.scale,	"6.103515625e-5"),
5252 	INTEL_UNCORE_EVENT_DESC(write.unit,	"MiB"),
5253 	{ /* end: all zeroes */ },
5254 };
5255 
5256 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
5257 	.init_box	= snr_uncore_mmio_init_box,
5258 	.exit_box	= uncore_mmio_exit_box,
5259 	.read_counter	= uncore_mmio_read_counter,
5260 	.hw_config	= uncore_freerunning_hw_config,
5261 };
5262 
5263 static struct intel_uncore_type snr_uncore_imc_free_running = {
5264 	.name			= "imc_free_running",
5265 	.num_counters		= 3,
5266 	.num_boxes		= 1,
5267 	.num_freerunning_types	= SNR_IMC_FREERUNNING_TYPE_MAX,
5268 	.mmio_map_size		= SNR_IMC_MMIO_SIZE,
5269 	.freerunning		= snr_imc_freerunning,
5270 	.ops			= &snr_uncore_imc_freerunning_ops,
5271 	.event_descs		= snr_uncore_imc_freerunning_events,
5272 	.format_group		= &skx_uncore_iio_freerunning_format_group,
5273 };
5274 
5275 static struct intel_uncore_type *snr_mmio_uncores[] = {
5276 	&snr_uncore_imc,
5277 	&snr_uncore_imc_free_running,
5278 	NULL,
5279 };
5280 
5281 void snr_uncore_mmio_init(void)
5282 {
5283 	uncore_mmio_uncores = snr_mmio_uncores;
5284 }
5285 
5286 /* end of SNR uncore support */
5287 
5288 /* ICX uncore support */
5289 
5290 static u64 icx_cha_msr_offsets[] = {
5291 	0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310,
5292 	0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e,
5293 	0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a,
5294 	0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0,   0xe,
5295 	0x1c,  0x2a,  0x38,  0x46,
5296 };
5297 
5298 static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
5299 {
5300 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
5301 	bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN);
5302 
5303 	if (tie_en) {
5304 		reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 +
5305 			    icx_cha_msr_offsets[box->pmu->pmu_idx];
5306 		reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
5307 		reg1->idx = 0;
5308 	}
5309 
5310 	return 0;
5311 }
5312 
5313 static struct intel_uncore_ops icx_uncore_chabox_ops = {
5314 	.init_box		= ivbep_uncore_msr_init_box,
5315 	.disable_box		= snbep_uncore_msr_disable_box,
5316 	.enable_box		= snbep_uncore_msr_enable_box,
5317 	.disable_event		= snbep_uncore_msr_disable_event,
5318 	.enable_event		= snr_cha_enable_event,
5319 	.read_counter		= uncore_msr_read_counter,
5320 	.hw_config		= icx_cha_hw_config,
5321 };
5322 
5323 static struct intel_uncore_type icx_uncore_chabox = {
5324 	.name			= "cha",
5325 	.num_counters		= 4,
5326 	.perf_ctr_bits		= 48,
5327 	.event_ctl		= ICX_C34_MSR_PMON_CTL0,
5328 	.perf_ctr		= ICX_C34_MSR_PMON_CTR0,
5329 	.box_ctl		= ICX_C34_MSR_PMON_BOX_CTL,
5330 	.msr_offsets		= icx_cha_msr_offsets,
5331 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
5332 	.event_mask_ext		= SNR_CHA_RAW_EVENT_MASK_EXT,
5333 	.constraints		= skx_uncore_chabox_constraints,
5334 	.ops			= &icx_uncore_chabox_ops,
5335 	.format_group		= &snr_uncore_chabox_format_group,
5336 };
5337 
5338 static u64 icx_msr_offsets[] = {
5339 	0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5340 };
5341 
5342 static struct event_constraint icx_uncore_iio_constraints[] = {
5343 	UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
5344 	UNCORE_EVENT_CONSTRAINT(0x03, 0x3),
5345 	UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
5346 	UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
5347 	UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
5348 	UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
5349 	UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
5350 	EVENT_CONSTRAINT_END
5351 };
5352 
5353 static umode_t
5354 icx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
5355 {
5356 	/* Root bus 0x00 is valid only for pmu_idx = 5. */
5357 	return pmu_iio_mapping_visible(kobj, attr, die, 5);
5358 }
5359 
5360 static struct attribute_group icx_iio_mapping_group = {
5361 	.is_visible	= icx_iio_mapping_visible,
5362 };
5363 
5364 static const struct attribute_group *icx_iio_attr_update[] = {
5365 	&icx_iio_mapping_group,
5366 	NULL,
5367 };
5368 
5369 /*
5370  * ICX has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
5371  */
5372 enum {
5373 	ICX_PCIE1_PMON_ID,
5374 	ICX_PCIE2_PMON_ID,
5375 	ICX_PCIE3_PMON_ID,
5376 	ICX_PCIE4_PMON_ID,
5377 	ICX_PCIE5_PMON_ID,
5378 	ICX_CBDMA_DMI_PMON_ID
5379 };
5380 
5381 static u8 icx_sad_pmon_mapping[] = {
5382 	ICX_CBDMA_DMI_PMON_ID,
5383 	ICX_PCIE1_PMON_ID,
5384 	ICX_PCIE2_PMON_ID,
5385 	ICX_PCIE3_PMON_ID,
5386 	ICX_PCIE4_PMON_ID,
5387 	ICX_PCIE5_PMON_ID,
5388 };
5389 
5390 static int icx_iio_get_topology(struct intel_uncore_type *type)
5391 {
5392 	return sad_cfg_iio_topology(type, icx_sad_pmon_mapping);
5393 }
5394 
5395 static void icx_iio_set_mapping(struct intel_uncore_type *type)
5396 {
5397 	/* Detect ICX-D system. This case is not supported */
5398 	if (boot_cpu_data.x86_vfm == INTEL_ICELAKE_D) {
5399 		pmu_clear_mapping_attr(type->attr_update, &icx_iio_mapping_group);
5400 		return;
5401 	}
5402 	pmu_iio_set_mapping(type, &icx_iio_mapping_group);
5403 }
5404 
5405 static void icx_iio_cleanup_mapping(struct intel_uncore_type *type)
5406 {
5407 	pmu_cleanup_mapping(type, &icx_iio_mapping_group);
5408 }
5409 
5410 static struct intel_uncore_type icx_uncore_iio = {
5411 	.name			= "iio",
5412 	.num_counters		= 4,
5413 	.num_boxes		= 6,
5414 	.perf_ctr_bits		= 48,
5415 	.event_ctl		= ICX_IIO_MSR_PMON_CTL0,
5416 	.perf_ctr		= ICX_IIO_MSR_PMON_CTR0,
5417 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
5418 	.event_mask_ext		= SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
5419 	.box_ctl		= ICX_IIO_MSR_PMON_BOX_CTL,
5420 	.msr_offsets		= icx_msr_offsets,
5421 	.constraints		= icx_uncore_iio_constraints,
5422 	.ops			= &skx_uncore_iio_ops,
5423 	.format_group		= &snr_uncore_iio_format_group,
5424 	.attr_update		= icx_iio_attr_update,
5425 	.get_topology		= icx_iio_get_topology,
5426 	.set_mapping		= icx_iio_set_mapping,
5427 	.cleanup_mapping	= icx_iio_cleanup_mapping,
5428 };
5429 
5430 static struct intel_uncore_type icx_uncore_irp = {
5431 	.name			= "irp",
5432 	.num_counters		= 2,
5433 	.num_boxes		= 6,
5434 	.perf_ctr_bits		= 48,
5435 	.event_ctl		= ICX_IRP0_MSR_PMON_CTL0,
5436 	.perf_ctr		= ICX_IRP0_MSR_PMON_CTR0,
5437 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
5438 	.box_ctl		= ICX_IRP0_MSR_PMON_BOX_CTL,
5439 	.msr_offsets		= icx_msr_offsets,
5440 	.ops			= &ivbep_uncore_msr_ops,
5441 	.format_group		= &ivbep_uncore_format_group,
5442 };
5443 
5444 static struct event_constraint icx_uncore_m2pcie_constraints[] = {
5445 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
5446 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
5447 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
5448 	EVENT_CONSTRAINT_END
5449 };
5450 
5451 static struct intel_uncore_type icx_uncore_m2pcie = {
5452 	.name		= "m2pcie",
5453 	.num_counters	= 4,
5454 	.num_boxes	= 6,
5455 	.perf_ctr_bits	= 48,
5456 	.event_ctl	= ICX_M2PCIE_MSR_PMON_CTL0,
5457 	.perf_ctr	= ICX_M2PCIE_MSR_PMON_CTR0,
5458 	.box_ctl	= ICX_M2PCIE_MSR_PMON_BOX_CTL,
5459 	.msr_offsets	= icx_msr_offsets,
5460 	.constraints	= icx_uncore_m2pcie_constraints,
5461 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5462 	.ops		= &ivbep_uncore_msr_ops,
5463 	.format_group	= &ivbep_uncore_format_group,
5464 };
5465 
5466 enum perf_uncore_icx_iio_freerunning_type_id {
5467 	ICX_IIO_MSR_IOCLK,
5468 	ICX_IIO_MSR_BW_IN,
5469 
5470 	ICX_IIO_FREERUNNING_TYPE_MAX,
5471 };
5472 
5473 static unsigned icx_iio_clk_freerunning_box_offsets[] = {
5474 	0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5475 };
5476 
5477 static unsigned icx_iio_bw_freerunning_box_offsets[] = {
5478 	0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0,
5479 };
5480 
5481 static struct freerunning_counters icx_iio_freerunning[] = {
5482 	[ICX_IIO_MSR_IOCLK]	= { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets },
5483 	[ICX_IIO_MSR_BW_IN]	= { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
5484 };
5485 
5486 static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
5487 	/* Free-Running IIO CLOCKS Counter */
5488 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
5489 	/* Free-Running IIO BANDWIDTH IN Counters */
5490 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
5491 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
5492 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
5493 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
5494 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
5495 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
5496 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
5497 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
5498 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
5499 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
5500 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
5501 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
5502 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
5503 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
5504 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
5505 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
5506 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
5507 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
5508 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
5509 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
5510 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
5511 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
5512 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
5513 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
5514 	{ /* end: all zeroes */ },
5515 };
5516 
5517 static struct intel_uncore_type icx_uncore_iio_free_running = {
5518 	.name			= "iio_free_running",
5519 	.num_counters		= 9,
5520 	.num_boxes		= 6,
5521 	.num_freerunning_types	= ICX_IIO_FREERUNNING_TYPE_MAX,
5522 	.freerunning		= icx_iio_freerunning,
5523 	.ops			= &skx_uncore_iio_freerunning_ops,
5524 	.event_descs		= icx_uncore_iio_freerunning_events,
5525 	.format_group		= &skx_uncore_iio_freerunning_format_group,
5526 };
5527 
5528 static struct intel_uncore_type *icx_msr_uncores[] = {
5529 	&skx_uncore_ubox,
5530 	&icx_uncore_chabox,
5531 	&icx_uncore_iio,
5532 	&icx_uncore_irp,
5533 	&icx_uncore_m2pcie,
5534 	&skx_uncore_pcu,
5535 	&icx_uncore_iio_free_running,
5536 	NULL,
5537 };
5538 
5539 /*
5540  * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High)
5541  * registers which located at Device 30, Function 3
5542  */
5543 #define ICX_CAPID6		0x9c
5544 #define ICX_CAPID7		0xa0
5545 
5546 static u64 icx_count_chabox(void)
5547 {
5548 	struct pci_dev *dev = NULL;
5549 	u64 caps = 0;
5550 
5551 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev);
5552 	if (!dev)
5553 		goto out;
5554 
5555 	pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps);
5556 	pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1);
5557 out:
5558 	pci_dev_put(dev);
5559 	return hweight64(caps);
5560 }
5561 
5562 void icx_uncore_cpu_init(void)
5563 {
5564 	u64 num_boxes = icx_count_chabox();
5565 
5566 	if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets)))
5567 		return;
5568 	icx_uncore_chabox.num_boxes = num_boxes;
5569 	uncore_msr_uncores = icx_msr_uncores;
5570 }
5571 
5572 static struct intel_uncore_type icx_uncore_m2m = {
5573 	.name		= "m2m",
5574 	.num_counters   = 4,
5575 	.num_boxes	= 4,
5576 	.perf_ctr_bits	= 48,
5577 	.perf_ctr	= SNR_M2M_PCI_PMON_CTR0,
5578 	.event_ctl	= SNR_M2M_PCI_PMON_CTL0,
5579 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5580 	.event_mask_ext	= SNR_M2M_PCI_PMON_UMASK_EXT,
5581 	.box_ctl	= SNR_M2M_PCI_PMON_BOX_CTL,
5582 	.ops		= &snr_m2m_uncore_pci_ops,
5583 	.format_group	= &snr_m2m_uncore_format_group,
5584 };
5585 
5586 static struct attribute *icx_upi_uncore_formats_attr[] = {
5587 	&format_attr_event.attr,
5588 	&format_attr_umask_ext4.attr,
5589 	&format_attr_edge.attr,
5590 	&format_attr_inv.attr,
5591 	&format_attr_thresh8.attr,
5592 	NULL,
5593 };
5594 
5595 static const struct attribute_group icx_upi_uncore_format_group = {
5596 	.name = "format",
5597 	.attrs = icx_upi_uncore_formats_attr,
5598 };
5599 
5600 #define ICX_UPI_REGS_ADDR_DEVICE_LINK0	0x02
5601 #define ICX_UPI_REGS_ADDR_FUNCTION	0x01
5602 
5603 static int discover_upi_topology(struct intel_uncore_type *type, int ubox_did, int dev_link0)
5604 {
5605 	struct pci_dev *ubox = NULL;
5606 	struct pci_dev *dev = NULL;
5607 	u32 nid, gid;
5608 	int idx, lgc_pkg, ret = -EPERM;
5609 	struct intel_uncore_topology *upi;
5610 	unsigned int devfn;
5611 
5612 	/* GIDNIDMAP method supports machines which have less than 8 sockets. */
5613 	if (uncore_max_dies() > 8)
5614 		goto err;
5615 
5616 	while ((ubox = pci_get_device(PCI_VENDOR_ID_INTEL, ubox_did, ubox))) {
5617 		ret = upi_nodeid_groupid(ubox, SKX_CPUNODEID, SKX_GIDNIDMAP, &nid, &gid);
5618 		if (ret) {
5619 			ret = pcibios_err_to_errno(ret);
5620 			break;
5621 		}
5622 
5623 		lgc_pkg = topology_gidnid_map(nid, gid);
5624 		if (lgc_pkg < 0) {
5625 			ret = -EPERM;
5626 			goto err;
5627 		}
5628 		for (idx = 0; idx < type->num_boxes; idx++) {
5629 			upi = &type->topology[lgc_pkg][idx];
5630 			devfn = PCI_DEVFN(dev_link0 + idx, ICX_UPI_REGS_ADDR_FUNCTION);
5631 			dev = pci_get_domain_bus_and_slot(pci_domain_nr(ubox->bus),
5632 							  ubox->bus->number,
5633 							  devfn);
5634 			if (dev) {
5635 				ret = upi_fill_topology(dev, upi, idx);
5636 				if (ret)
5637 					goto err;
5638 			}
5639 		}
5640 	}
5641 err:
5642 	pci_dev_put(ubox);
5643 	pci_dev_put(dev);
5644 	return ret;
5645 }
5646 
5647 static int icx_upi_get_topology(struct intel_uncore_type *type)
5648 {
5649 	return discover_upi_topology(type, ICX_UBOX_DID, ICX_UPI_REGS_ADDR_DEVICE_LINK0);
5650 }
5651 
5652 static struct attribute_group icx_upi_mapping_group = {
5653 	.is_visible	= skx_upi_mapping_visible,
5654 };
5655 
5656 static const struct attribute_group *icx_upi_attr_update[] = {
5657 	&icx_upi_mapping_group,
5658 	NULL
5659 };
5660 
5661 static void icx_upi_set_mapping(struct intel_uncore_type *type)
5662 {
5663 	pmu_upi_set_mapping(type, &icx_upi_mapping_group);
5664 }
5665 
5666 static void icx_upi_cleanup_mapping(struct intel_uncore_type *type)
5667 {
5668 	pmu_cleanup_mapping(type, &icx_upi_mapping_group);
5669 }
5670 
5671 static struct intel_uncore_type icx_uncore_upi = {
5672 	.name		= "upi",
5673 	.num_counters   = 4,
5674 	.num_boxes	= 3,
5675 	.perf_ctr_bits	= 48,
5676 	.perf_ctr	= ICX_UPI_PCI_PMON_CTR0,
5677 	.event_ctl	= ICX_UPI_PCI_PMON_CTL0,
5678 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5679 	.event_mask_ext = ICX_UPI_CTL_UMASK_EXT,
5680 	.box_ctl	= ICX_UPI_PCI_PMON_BOX_CTL,
5681 	.ops		= &skx_upi_uncore_pci_ops,
5682 	.format_group	= &icx_upi_uncore_format_group,
5683 	.attr_update	= icx_upi_attr_update,
5684 	.get_topology	= icx_upi_get_topology,
5685 	.set_mapping	= icx_upi_set_mapping,
5686 	.cleanup_mapping = icx_upi_cleanup_mapping,
5687 };
5688 
5689 static struct event_constraint icx_uncore_m3upi_constraints[] = {
5690 	UNCORE_EVENT_CONSTRAINT(0x1c, 0x1),
5691 	UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
5692 	UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
5693 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
5694 	UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
5695 	UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
5696 	UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
5697 	UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
5698 	EVENT_CONSTRAINT_END
5699 };
5700 
5701 static struct intel_uncore_type icx_uncore_m3upi = {
5702 	.name		= "m3upi",
5703 	.num_counters   = 4,
5704 	.num_boxes	= 3,
5705 	.perf_ctr_bits	= 48,
5706 	.perf_ctr	= ICX_M3UPI_PCI_PMON_CTR0,
5707 	.event_ctl	= ICX_M3UPI_PCI_PMON_CTL0,
5708 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5709 	.box_ctl	= ICX_M3UPI_PCI_PMON_BOX_CTL,
5710 	.constraints	= icx_uncore_m3upi_constraints,
5711 	.ops		= &ivbep_uncore_pci_ops,
5712 	.format_group	= &skx_uncore_format_group,
5713 };
5714 
5715 enum {
5716 	ICX_PCI_UNCORE_M2M,
5717 	ICX_PCI_UNCORE_UPI,
5718 	ICX_PCI_UNCORE_M3UPI,
5719 };
5720 
5721 static struct intel_uncore_type *icx_pci_uncores[] = {
5722 	[ICX_PCI_UNCORE_M2M]		= &icx_uncore_m2m,
5723 	[ICX_PCI_UNCORE_UPI]		= &icx_uncore_upi,
5724 	[ICX_PCI_UNCORE_M3UPI]		= &icx_uncore_m3upi,
5725 	NULL,
5726 };
5727 
5728 static const struct pci_device_id icx_uncore_pci_ids[] = {
5729 	{ /* M2M 0 */
5730 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5731 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0),
5732 	},
5733 	{ /* M2M 1 */
5734 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5735 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1),
5736 	},
5737 	{ /* M2M 2 */
5738 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5739 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2),
5740 	},
5741 	{ /* M2M 3 */
5742 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5743 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3),
5744 	},
5745 	{ /* UPI Link 0 */
5746 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5747 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0),
5748 	},
5749 	{ /* UPI Link 1 */
5750 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5751 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1),
5752 	},
5753 	{ /* UPI Link 2 */
5754 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5755 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2),
5756 	},
5757 	{ /* M3UPI Link 0 */
5758 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5759 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0),
5760 	},
5761 	{ /* M3UPI Link 1 */
5762 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5763 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1),
5764 	},
5765 	{ /* M3UPI Link 2 */
5766 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5767 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2),
5768 	},
5769 	{ /* end: all zeroes */ }
5770 };
5771 
5772 static struct pci_driver icx_uncore_pci_driver = {
5773 	.name		= "icx_uncore",
5774 	.id_table	= icx_uncore_pci_ids,
5775 };
5776 
5777 int icx_uncore_pci_init(void)
5778 {
5779 	/* ICX UBOX DID */
5780 	int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID,
5781 					 SKX_GIDNIDMAP, true);
5782 
5783 	if (ret)
5784 		return ret;
5785 
5786 	uncore_pci_uncores = icx_pci_uncores;
5787 	uncore_pci_driver = &icx_uncore_pci_driver;
5788 	return 0;
5789 }
5790 
5791 static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
5792 {
5793 	unsigned int box_ctl = box->pmu->type->box_ctl +
5794 			       box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN);
5795 	int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
5796 			 SNR_IMC_MMIO_MEM0_OFFSET;
5797 
5798 	__snr_uncore_mmio_init_box(box, box_ctl, mem_offset,
5799 				   SNR_MC_DEVICE_ID);
5800 }
5801 
5802 static struct intel_uncore_ops icx_uncore_mmio_ops = {
5803 	.init_box	= icx_uncore_imc_init_box,
5804 	.exit_box	= uncore_mmio_exit_box,
5805 	.disable_box	= snr_uncore_mmio_disable_box,
5806 	.enable_box	= snr_uncore_mmio_enable_box,
5807 	.disable_event	= snr_uncore_mmio_disable_event,
5808 	.enable_event	= snr_uncore_mmio_enable_event,
5809 	.read_counter	= uncore_mmio_read_counter,
5810 };
5811 
5812 static struct intel_uncore_type icx_uncore_imc = {
5813 	.name		= "imc",
5814 	.num_counters   = 4,
5815 	.num_boxes	= 12,
5816 	.perf_ctr_bits	= 48,
5817 	.fixed_ctr_bits	= 48,
5818 	.fixed_ctr	= SNR_IMC_MMIO_PMON_FIXED_CTR,
5819 	.fixed_ctl	= SNR_IMC_MMIO_PMON_FIXED_CTL,
5820 	.event_descs	= snr_uncore_imc_events,
5821 	.perf_ctr	= SNR_IMC_MMIO_PMON_CTR0,
5822 	.event_ctl	= SNR_IMC_MMIO_PMON_CTL0,
5823 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5824 	.box_ctl	= SNR_IMC_MMIO_PMON_BOX_CTL,
5825 	.mmio_offset	= SNR_IMC_MMIO_OFFSET,
5826 	.mmio_map_size	= SNR_IMC_MMIO_SIZE,
5827 	.ops		= &icx_uncore_mmio_ops,
5828 	.format_group	= &skx_uncore_format_group,
5829 };
5830 
5831 enum perf_uncore_icx_imc_freerunning_type_id {
5832 	ICX_IMC_DCLK,
5833 	ICX_IMC_DDR,
5834 	ICX_IMC_DDRT,
5835 
5836 	ICX_IMC_FREERUNNING_TYPE_MAX,
5837 };
5838 
5839 static struct freerunning_counters icx_imc_freerunning[] = {
5840 	[ICX_IMC_DCLK]	= { 0x22b0, 0x0, 0, 1, 48 },
5841 	[ICX_IMC_DDR]	= { 0x2290, 0x8, 0, 2, 48 },
5842 	[ICX_IMC_DDRT]	= { 0x22a0, 0x8, 0, 2, 48 },
5843 };
5844 
5845 static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
5846 	INTEL_UNCORE_EVENT_DESC(dclk,			"event=0xff,umask=0x10"),
5847 
5848 	INTEL_UNCORE_EVENT_DESC(read,			"event=0xff,umask=0x20"),
5849 	INTEL_UNCORE_EVENT_DESC(read.scale,		"6.103515625e-5"),
5850 	INTEL_UNCORE_EVENT_DESC(read.unit,		"MiB"),
5851 	INTEL_UNCORE_EVENT_DESC(write,			"event=0xff,umask=0x21"),
5852 	INTEL_UNCORE_EVENT_DESC(write.scale,		"6.103515625e-5"),
5853 	INTEL_UNCORE_EVENT_DESC(write.unit,		"MiB"),
5854 
5855 	INTEL_UNCORE_EVENT_DESC(ddrt_read,		"event=0xff,umask=0x30"),
5856 	INTEL_UNCORE_EVENT_DESC(ddrt_read.scale,	"6.103515625e-5"),
5857 	INTEL_UNCORE_EVENT_DESC(ddrt_read.unit,		"MiB"),
5858 	INTEL_UNCORE_EVENT_DESC(ddrt_write,		"event=0xff,umask=0x31"),
5859 	INTEL_UNCORE_EVENT_DESC(ddrt_write.scale,	"6.103515625e-5"),
5860 	INTEL_UNCORE_EVENT_DESC(ddrt_write.unit,	"MiB"),
5861 	{ /* end: all zeroes */ },
5862 };
5863 
5864 static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5865 {
5866 	int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
5867 			 SNR_IMC_MMIO_MEM0_OFFSET;
5868 
5869 	snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
5870 			    mem_offset, SNR_MC_DEVICE_ID);
5871 }
5872 
5873 static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
5874 	.init_box	= icx_uncore_imc_freerunning_init_box,
5875 	.exit_box	= uncore_mmio_exit_box,
5876 	.read_counter	= uncore_mmio_read_counter,
5877 	.hw_config	= uncore_freerunning_hw_config,
5878 };
5879 
5880 static struct intel_uncore_type icx_uncore_imc_free_running = {
5881 	.name			= "imc_free_running",
5882 	.num_counters		= 5,
5883 	.num_boxes		= 4,
5884 	.num_freerunning_types	= ICX_IMC_FREERUNNING_TYPE_MAX,
5885 	.mmio_map_size		= SNR_IMC_MMIO_SIZE,
5886 	.freerunning		= icx_imc_freerunning,
5887 	.ops			= &icx_uncore_imc_freerunning_ops,
5888 	.event_descs		= icx_uncore_imc_freerunning_events,
5889 	.format_group		= &skx_uncore_iio_freerunning_format_group,
5890 };
5891 
5892 static struct intel_uncore_type *icx_mmio_uncores[] = {
5893 	&icx_uncore_imc,
5894 	&icx_uncore_imc_free_running,
5895 	NULL,
5896 };
5897 
5898 void icx_uncore_mmio_init(void)
5899 {
5900 	uncore_mmio_uncores = icx_mmio_uncores;
5901 }
5902 
5903 /* end of ICX uncore support */
5904 
5905 /* SPR uncore support */
5906 
5907 static void spr_uncore_msr_enable_event(struct intel_uncore_box *box,
5908 					struct perf_event *event)
5909 {
5910 	struct hw_perf_event *hwc = &event->hw;
5911 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
5912 
5913 	if (reg1->idx != EXTRA_REG_NONE)
5914 		wrmsrl(reg1->reg, reg1->config);
5915 
5916 	wrmsrl(hwc->config_base, hwc->config);
5917 }
5918 
5919 static void spr_uncore_msr_disable_event(struct intel_uncore_box *box,
5920 					 struct perf_event *event)
5921 {
5922 	struct hw_perf_event *hwc = &event->hw;
5923 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
5924 
5925 	if (reg1->idx != EXTRA_REG_NONE)
5926 		wrmsrl(reg1->reg, 0);
5927 
5928 	wrmsrl(hwc->config_base, 0);
5929 }
5930 
5931 static int spr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
5932 {
5933 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
5934 	bool tie_en = !!(event->hw.config & SPR_CHA_PMON_CTL_TID_EN);
5935 	struct intel_uncore_type *type = box->pmu->type;
5936 
5937 	if (tie_en) {
5938 		reg1->reg = SPR_C0_MSR_PMON_BOX_FILTER0 +
5939 			    HSWEP_CBO_MSR_OFFSET * type->box_ids[box->pmu->pmu_idx];
5940 		reg1->config = event->attr.config1 & SPR_CHA_PMON_BOX_FILTER_TID;
5941 		reg1->idx = 0;
5942 	}
5943 
5944 	return 0;
5945 }
5946 
5947 static struct intel_uncore_ops spr_uncore_chabox_ops = {
5948 	.init_box		= intel_generic_uncore_msr_init_box,
5949 	.disable_box		= intel_generic_uncore_msr_disable_box,
5950 	.enable_box		= intel_generic_uncore_msr_enable_box,
5951 	.disable_event		= spr_uncore_msr_disable_event,
5952 	.enable_event		= spr_uncore_msr_enable_event,
5953 	.read_counter		= uncore_msr_read_counter,
5954 	.hw_config		= spr_cha_hw_config,
5955 	.get_constraint		= uncore_get_constraint,
5956 	.put_constraint		= uncore_put_constraint,
5957 };
5958 
5959 static struct attribute *spr_uncore_cha_formats_attr[] = {
5960 	&format_attr_event.attr,
5961 	&format_attr_umask_ext4.attr,
5962 	&format_attr_tid_en2.attr,
5963 	&format_attr_edge.attr,
5964 	&format_attr_inv.attr,
5965 	&format_attr_thresh8.attr,
5966 	&format_attr_filter_tid5.attr,
5967 	NULL,
5968 };
5969 static const struct attribute_group spr_uncore_chabox_format_group = {
5970 	.name = "format",
5971 	.attrs = spr_uncore_cha_formats_attr,
5972 };
5973 
5974 static ssize_t alias_show(struct device *dev,
5975 			  struct device_attribute *attr,
5976 			  char *buf)
5977 {
5978 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
5979 	char pmu_name[UNCORE_PMU_NAME_LEN];
5980 
5981 	uncore_get_alias_name(pmu_name, pmu);
5982 	return sysfs_emit(buf, "%s\n", pmu_name);
5983 }
5984 
5985 static DEVICE_ATTR_RO(alias);
5986 
5987 static struct attribute *uncore_alias_attrs[] = {
5988 	&dev_attr_alias.attr,
5989 	NULL
5990 };
5991 
5992 ATTRIBUTE_GROUPS(uncore_alias);
5993 
5994 static struct intel_uncore_type spr_uncore_chabox = {
5995 	.name			= "cha",
5996 	.event_mask		= SPR_CHA_PMON_EVENT_MASK,
5997 	.event_mask_ext		= SPR_RAW_EVENT_MASK_EXT,
5998 	.num_shared_regs	= 1,
5999 	.constraints		= skx_uncore_chabox_constraints,
6000 	.ops			= &spr_uncore_chabox_ops,
6001 	.format_group		= &spr_uncore_chabox_format_group,
6002 	.attr_update		= uncore_alias_groups,
6003 };
6004 
6005 static struct intel_uncore_type spr_uncore_iio = {
6006 	.name			= "iio",
6007 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
6008 	.event_mask_ext		= SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
6009 	.format_group		= &snr_uncore_iio_format_group,
6010 	.attr_update		= uncore_alias_groups,
6011 	.constraints		= icx_uncore_iio_constraints,
6012 };
6013 
6014 static struct attribute *spr_uncore_raw_formats_attr[] = {
6015 	&format_attr_event.attr,
6016 	&format_attr_umask_ext4.attr,
6017 	&format_attr_edge.attr,
6018 	&format_attr_inv.attr,
6019 	&format_attr_thresh8.attr,
6020 	NULL,
6021 };
6022 
6023 static const struct attribute_group spr_uncore_raw_format_group = {
6024 	.name			= "format",
6025 	.attrs			= spr_uncore_raw_formats_attr,
6026 };
6027 
6028 #define SPR_UNCORE_COMMON_FORMAT()				\
6029 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,	\
6030 	.event_mask_ext		= SPR_RAW_EVENT_MASK_EXT,	\
6031 	.format_group		= &spr_uncore_raw_format_group,	\
6032 	.attr_update		= uncore_alias_groups
6033 
6034 static struct intel_uncore_type spr_uncore_irp = {
6035 	SPR_UNCORE_COMMON_FORMAT(),
6036 	.name			= "irp",
6037 
6038 };
6039 
6040 static struct event_constraint spr_uncore_m2pcie_constraints[] = {
6041 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
6042 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
6043 	EVENT_CONSTRAINT_END
6044 };
6045 
6046 static struct intel_uncore_type spr_uncore_m2pcie = {
6047 	SPR_UNCORE_COMMON_FORMAT(),
6048 	.name			= "m2pcie",
6049 	.constraints		= spr_uncore_m2pcie_constraints,
6050 };
6051 
6052 static struct intel_uncore_type spr_uncore_pcu = {
6053 	.name			= "pcu",
6054 	.attr_update		= uncore_alias_groups,
6055 };
6056 
6057 static void spr_uncore_mmio_enable_event(struct intel_uncore_box *box,
6058 					 struct perf_event *event)
6059 {
6060 	struct hw_perf_event *hwc = &event->hw;
6061 
6062 	if (!box->io_addr)
6063 		return;
6064 
6065 	if (uncore_pmc_fixed(hwc->idx))
6066 		writel(SNBEP_PMON_CTL_EN, box->io_addr + hwc->config_base);
6067 	else
6068 		writel(hwc->config, box->io_addr + hwc->config_base);
6069 }
6070 
6071 static struct intel_uncore_ops spr_uncore_mmio_ops = {
6072 	.init_box		= intel_generic_uncore_mmio_init_box,
6073 	.exit_box		= uncore_mmio_exit_box,
6074 	.disable_box		= intel_generic_uncore_mmio_disable_box,
6075 	.enable_box		= intel_generic_uncore_mmio_enable_box,
6076 	.disable_event		= intel_generic_uncore_mmio_disable_event,
6077 	.enable_event		= spr_uncore_mmio_enable_event,
6078 	.read_counter		= uncore_mmio_read_counter,
6079 };
6080 
6081 static struct uncore_event_desc spr_uncore_imc_events[] = {
6082 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x01,umask=0x00"),
6083 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x05,umask=0xcf"),
6084 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
6085 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
6086 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x05,umask=0xf0"),
6087 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
6088 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
6089 	{ /* end: all zeroes */ },
6090 };
6091 
6092 #define SPR_UNCORE_MMIO_COMMON_FORMAT()				\
6093 	SPR_UNCORE_COMMON_FORMAT(),				\
6094 	.ops			= &spr_uncore_mmio_ops
6095 
6096 static struct intel_uncore_type spr_uncore_imc = {
6097 	SPR_UNCORE_MMIO_COMMON_FORMAT(),
6098 	.name			= "imc",
6099 	.fixed_ctr_bits		= 48,
6100 	.fixed_ctr		= SNR_IMC_MMIO_PMON_FIXED_CTR,
6101 	.fixed_ctl		= SNR_IMC_MMIO_PMON_FIXED_CTL,
6102 	.event_descs		= spr_uncore_imc_events,
6103 };
6104 
6105 static void spr_uncore_pci_enable_event(struct intel_uncore_box *box,
6106 					struct perf_event *event)
6107 {
6108 	struct pci_dev *pdev = box->pci_dev;
6109 	struct hw_perf_event *hwc = &event->hw;
6110 
6111 	pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
6112 	pci_write_config_dword(pdev, hwc->config_base, (u32)hwc->config);
6113 }
6114 
6115 static struct intel_uncore_ops spr_uncore_pci_ops = {
6116 	.init_box		= intel_generic_uncore_pci_init_box,
6117 	.disable_box		= intel_generic_uncore_pci_disable_box,
6118 	.enable_box		= intel_generic_uncore_pci_enable_box,
6119 	.disable_event		= intel_generic_uncore_pci_disable_event,
6120 	.enable_event		= spr_uncore_pci_enable_event,
6121 	.read_counter		= intel_generic_uncore_pci_read_counter,
6122 };
6123 
6124 #define SPR_UNCORE_PCI_COMMON_FORMAT()			\
6125 	SPR_UNCORE_COMMON_FORMAT(),			\
6126 	.ops			= &spr_uncore_pci_ops
6127 
6128 static struct intel_uncore_type spr_uncore_m2m = {
6129 	SPR_UNCORE_PCI_COMMON_FORMAT(),
6130 	.name			= "m2m",
6131 };
6132 
6133 static struct attribute_group spr_upi_mapping_group = {
6134 	.is_visible	= skx_upi_mapping_visible,
6135 };
6136 
6137 static const struct attribute_group *spr_upi_attr_update[] = {
6138 	&uncore_alias_group,
6139 	&spr_upi_mapping_group,
6140 	NULL
6141 };
6142 
6143 #define SPR_UPI_REGS_ADDR_DEVICE_LINK0	0x01
6144 
6145 static void spr_upi_set_mapping(struct intel_uncore_type *type)
6146 {
6147 	pmu_upi_set_mapping(type, &spr_upi_mapping_group);
6148 }
6149 
6150 static void spr_upi_cleanup_mapping(struct intel_uncore_type *type)
6151 {
6152 	pmu_cleanup_mapping(type, &spr_upi_mapping_group);
6153 }
6154 
6155 static int spr_upi_get_topology(struct intel_uncore_type *type)
6156 {
6157 	return discover_upi_topology(type, SPR_UBOX_DID, SPR_UPI_REGS_ADDR_DEVICE_LINK0);
6158 }
6159 
6160 static struct intel_uncore_type spr_uncore_mdf = {
6161 	SPR_UNCORE_COMMON_FORMAT(),
6162 	.name			= "mdf",
6163 };
6164 
6165 #define UNCORE_SPR_NUM_UNCORE_TYPES		12
6166 #define UNCORE_SPR_CHA				0
6167 #define UNCORE_SPR_IIO				1
6168 #define UNCORE_SPR_IMC				6
6169 #define UNCORE_SPR_UPI				8
6170 #define UNCORE_SPR_M3UPI			9
6171 
6172 /*
6173  * The uncore units, which are supported by the discovery table,
6174  * are defined here.
6175  */
6176 static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = {
6177 	&spr_uncore_chabox,
6178 	&spr_uncore_iio,
6179 	&spr_uncore_irp,
6180 	&spr_uncore_m2pcie,
6181 	&spr_uncore_pcu,
6182 	NULL,
6183 	&spr_uncore_imc,
6184 	&spr_uncore_m2m,
6185 	NULL,
6186 	NULL,
6187 	NULL,
6188 	&spr_uncore_mdf,
6189 };
6190 
6191 /*
6192  * The uncore units, which are not supported by the discovery table,
6193  * are implemented from here.
6194  */
6195 #define SPR_UNCORE_UPI_NUM_BOXES	4
6196 
6197 static u64 spr_upi_pci_offsets[SPR_UNCORE_UPI_NUM_BOXES] = {
6198 	0, 0x8000, 0x10000, 0x18000
6199 };
6200 
6201 static struct intel_uncore_type spr_uncore_upi = {
6202 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
6203 	.event_mask_ext		= SPR_RAW_EVENT_MASK_EXT,
6204 	.format_group		= &spr_uncore_raw_format_group,
6205 	.ops			= &spr_uncore_pci_ops,
6206 	.name			= "upi",
6207 	.attr_update		= spr_upi_attr_update,
6208 	.get_topology		= spr_upi_get_topology,
6209 	.set_mapping		= spr_upi_set_mapping,
6210 	.cleanup_mapping	= spr_upi_cleanup_mapping,
6211 	.type_id		= UNCORE_SPR_UPI,
6212 	.num_counters		= 4,
6213 	.num_boxes		= SPR_UNCORE_UPI_NUM_BOXES,
6214 	.perf_ctr_bits		= 48,
6215 	.perf_ctr		= ICX_UPI_PCI_PMON_CTR0,
6216 	.event_ctl		= ICX_UPI_PCI_PMON_CTL0,
6217 	.box_ctl		= ICX_UPI_PCI_PMON_BOX_CTL,
6218 	.pci_offsets		= spr_upi_pci_offsets,
6219 };
6220 
6221 static struct intel_uncore_type spr_uncore_m3upi = {
6222 	SPR_UNCORE_PCI_COMMON_FORMAT(),
6223 	.name			= "m3upi",
6224 	.type_id		= UNCORE_SPR_M3UPI,
6225 	.num_counters		= 4,
6226 	.num_boxes		= SPR_UNCORE_UPI_NUM_BOXES,
6227 	.perf_ctr_bits		= 48,
6228 	.perf_ctr		= ICX_M3UPI_PCI_PMON_CTR0,
6229 	.event_ctl		= ICX_M3UPI_PCI_PMON_CTL0,
6230 	.box_ctl		= ICX_M3UPI_PCI_PMON_BOX_CTL,
6231 	.pci_offsets		= spr_upi_pci_offsets,
6232 	.constraints		= icx_uncore_m3upi_constraints,
6233 };
6234 
6235 enum perf_uncore_spr_iio_freerunning_type_id {
6236 	SPR_IIO_MSR_IOCLK,
6237 	SPR_IIO_MSR_BW_IN,
6238 	SPR_IIO_MSR_BW_OUT,
6239 
6240 	SPR_IIO_FREERUNNING_TYPE_MAX,
6241 };
6242 
6243 static struct freerunning_counters spr_iio_freerunning[] = {
6244 	[SPR_IIO_MSR_IOCLK]	= { 0x340e, 0x1, 0x10, 1, 48 },
6245 	[SPR_IIO_MSR_BW_IN]	= { 0x3800, 0x1, 0x10, 8, 48 },
6246 	[SPR_IIO_MSR_BW_OUT]	= { 0x3808, 0x1, 0x10, 8, 48 },
6247 };
6248 
6249 static struct uncore_event_desc spr_uncore_iio_freerunning_events[] = {
6250 	/* Free-Running IIO CLOCKS Counter */
6251 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
6252 	/* Free-Running IIO BANDWIDTH IN Counters */
6253 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
6254 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
6255 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
6256 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
6257 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
6258 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
6259 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
6260 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
6261 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
6262 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
6263 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
6264 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
6265 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
6266 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
6267 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
6268 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
6269 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
6270 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
6271 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
6272 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
6273 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
6274 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
6275 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
6276 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
6277 	/* Free-Running IIO BANDWIDTH OUT Counters */
6278 	INTEL_UNCORE_EVENT_DESC(bw_out_port0,		"event=0xff,umask=0x30"),
6279 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,	"3.814697266e-6"),
6280 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,	"MiB"),
6281 	INTEL_UNCORE_EVENT_DESC(bw_out_port1,		"event=0xff,umask=0x31"),
6282 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,	"3.814697266e-6"),
6283 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,	"MiB"),
6284 	INTEL_UNCORE_EVENT_DESC(bw_out_port2,		"event=0xff,umask=0x32"),
6285 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,	"3.814697266e-6"),
6286 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,	"MiB"),
6287 	INTEL_UNCORE_EVENT_DESC(bw_out_port3,		"event=0xff,umask=0x33"),
6288 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,	"3.814697266e-6"),
6289 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,	"MiB"),
6290 	INTEL_UNCORE_EVENT_DESC(bw_out_port4,		"event=0xff,umask=0x34"),
6291 	INTEL_UNCORE_EVENT_DESC(bw_out_port4.scale,	"3.814697266e-6"),
6292 	INTEL_UNCORE_EVENT_DESC(bw_out_port4.unit,	"MiB"),
6293 	INTEL_UNCORE_EVENT_DESC(bw_out_port5,		"event=0xff,umask=0x35"),
6294 	INTEL_UNCORE_EVENT_DESC(bw_out_port5.scale,	"3.814697266e-6"),
6295 	INTEL_UNCORE_EVENT_DESC(bw_out_port5.unit,	"MiB"),
6296 	INTEL_UNCORE_EVENT_DESC(bw_out_port6,		"event=0xff,umask=0x36"),
6297 	INTEL_UNCORE_EVENT_DESC(bw_out_port6.scale,	"3.814697266e-6"),
6298 	INTEL_UNCORE_EVENT_DESC(bw_out_port6.unit,	"MiB"),
6299 	INTEL_UNCORE_EVENT_DESC(bw_out_port7,		"event=0xff,umask=0x37"),
6300 	INTEL_UNCORE_EVENT_DESC(bw_out_port7.scale,	"3.814697266e-6"),
6301 	INTEL_UNCORE_EVENT_DESC(bw_out_port7.unit,	"MiB"),
6302 	{ /* end: all zeroes */ },
6303 };
6304 
6305 static struct intel_uncore_type spr_uncore_iio_free_running = {
6306 	.name			= "iio_free_running",
6307 	.num_counters		= 17,
6308 	.num_freerunning_types	= SPR_IIO_FREERUNNING_TYPE_MAX,
6309 	.freerunning		= spr_iio_freerunning,
6310 	.ops			= &skx_uncore_iio_freerunning_ops,
6311 	.event_descs		= spr_uncore_iio_freerunning_events,
6312 	.format_group		= &skx_uncore_iio_freerunning_format_group,
6313 };
6314 
6315 enum perf_uncore_spr_imc_freerunning_type_id {
6316 	SPR_IMC_DCLK,
6317 	SPR_IMC_PQ_CYCLES,
6318 
6319 	SPR_IMC_FREERUNNING_TYPE_MAX,
6320 };
6321 
6322 static struct freerunning_counters spr_imc_freerunning[] = {
6323 	[SPR_IMC_DCLK]		= { 0x22b0, 0x0, 0, 1, 48 },
6324 	[SPR_IMC_PQ_CYCLES]	= { 0x2318, 0x8, 0, 2, 48 },
6325 };
6326 
6327 static struct uncore_event_desc spr_uncore_imc_freerunning_events[] = {
6328 	INTEL_UNCORE_EVENT_DESC(dclk,			"event=0xff,umask=0x10"),
6329 
6330 	INTEL_UNCORE_EVENT_DESC(rpq_cycles,		"event=0xff,umask=0x20"),
6331 	INTEL_UNCORE_EVENT_DESC(wpq_cycles,		"event=0xff,umask=0x21"),
6332 	{ /* end: all zeroes */ },
6333 };
6334 
6335 #define SPR_MC_DEVICE_ID	0x3251
6336 
6337 static void spr_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
6338 {
6339 	int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE + SNR_IMC_MMIO_MEM0_OFFSET;
6340 
6341 	snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
6342 			    mem_offset, SPR_MC_DEVICE_ID);
6343 }
6344 
6345 static struct intel_uncore_ops spr_uncore_imc_freerunning_ops = {
6346 	.init_box	= spr_uncore_imc_freerunning_init_box,
6347 	.exit_box	= uncore_mmio_exit_box,
6348 	.read_counter	= uncore_mmio_read_counter,
6349 	.hw_config	= uncore_freerunning_hw_config,
6350 };
6351 
6352 static struct intel_uncore_type spr_uncore_imc_free_running = {
6353 	.name			= "imc_free_running",
6354 	.num_counters		= 3,
6355 	.mmio_map_size		= SNR_IMC_MMIO_SIZE,
6356 	.num_freerunning_types	= SPR_IMC_FREERUNNING_TYPE_MAX,
6357 	.freerunning		= spr_imc_freerunning,
6358 	.ops			= &spr_uncore_imc_freerunning_ops,
6359 	.event_descs		= spr_uncore_imc_freerunning_events,
6360 	.format_group		= &skx_uncore_iio_freerunning_format_group,
6361 };
6362 
6363 #define UNCORE_SPR_MSR_EXTRA_UNCORES		1
6364 #define UNCORE_SPR_MMIO_EXTRA_UNCORES		1
6365 #define UNCORE_SPR_PCI_EXTRA_UNCORES		2
6366 
6367 static struct intel_uncore_type *spr_msr_uncores[UNCORE_SPR_MSR_EXTRA_UNCORES] = {
6368 	&spr_uncore_iio_free_running,
6369 };
6370 
6371 static struct intel_uncore_type *spr_mmio_uncores[UNCORE_SPR_MMIO_EXTRA_UNCORES] = {
6372 	&spr_uncore_imc_free_running,
6373 };
6374 
6375 static struct intel_uncore_type *spr_pci_uncores[UNCORE_SPR_PCI_EXTRA_UNCORES] = {
6376 	&spr_uncore_upi,
6377 	&spr_uncore_m3upi
6378 };
6379 
6380 int spr_uncore_units_ignore[] = {
6381 	UNCORE_SPR_UPI,
6382 	UNCORE_SPR_M3UPI,
6383 	UNCORE_IGNORE_END
6384 };
6385 
6386 static void uncore_type_customized_copy(struct intel_uncore_type *to_type,
6387 					struct intel_uncore_type *from_type)
6388 {
6389 	if (!to_type || !from_type)
6390 		return;
6391 
6392 	if (from_type->name)
6393 		to_type->name = from_type->name;
6394 	if (from_type->fixed_ctr_bits)
6395 		to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
6396 	if (from_type->event_mask)
6397 		to_type->event_mask = from_type->event_mask;
6398 	if (from_type->event_mask_ext)
6399 		to_type->event_mask_ext = from_type->event_mask_ext;
6400 	if (from_type->fixed_ctr)
6401 		to_type->fixed_ctr = from_type->fixed_ctr;
6402 	if (from_type->fixed_ctl)
6403 		to_type->fixed_ctl = from_type->fixed_ctl;
6404 	if (from_type->fixed_ctr_bits)
6405 		to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
6406 	if (from_type->num_shared_regs)
6407 		to_type->num_shared_regs = from_type->num_shared_regs;
6408 	if (from_type->constraints)
6409 		to_type->constraints = from_type->constraints;
6410 	if (from_type->ops)
6411 		to_type->ops = from_type->ops;
6412 	if (from_type->event_descs)
6413 		to_type->event_descs = from_type->event_descs;
6414 	if (from_type->format_group)
6415 		to_type->format_group = from_type->format_group;
6416 	if (from_type->attr_update)
6417 		to_type->attr_update = from_type->attr_update;
6418 	if (from_type->set_mapping)
6419 		to_type->set_mapping = from_type->set_mapping;
6420 	if (from_type->get_topology)
6421 		to_type->get_topology = from_type->get_topology;
6422 	if (from_type->cleanup_mapping)
6423 		to_type->cleanup_mapping = from_type->cleanup_mapping;
6424 }
6425 
6426 static struct intel_uncore_type **
6427 uncore_get_uncores(enum uncore_access_type type_id, int num_extra,
6428 		   struct intel_uncore_type **extra, int max_num_types,
6429 		   struct intel_uncore_type **uncores)
6430 {
6431 	struct intel_uncore_type **types, **start_types;
6432 	int i;
6433 
6434 	start_types = types = intel_uncore_generic_init_uncores(type_id, num_extra);
6435 
6436 	/* Only copy the customized features */
6437 	for (; *types; types++) {
6438 		if ((*types)->type_id >= max_num_types)
6439 			continue;
6440 		uncore_type_customized_copy(*types, uncores[(*types)->type_id]);
6441 	}
6442 
6443 	for (i = 0; i < num_extra; i++, types++)
6444 		*types = extra[i];
6445 
6446 	return start_types;
6447 }
6448 
6449 static struct intel_uncore_type *
6450 uncore_find_type_by_id(struct intel_uncore_type **types, int type_id)
6451 {
6452 	for (; *types; types++) {
6453 		if (type_id == (*types)->type_id)
6454 			return *types;
6455 	}
6456 
6457 	return NULL;
6458 }
6459 
6460 static int uncore_type_max_boxes(struct intel_uncore_type **types,
6461 				 int type_id)
6462 {
6463 	struct intel_uncore_type *type;
6464 	int i, max = 0;
6465 
6466 	type = uncore_find_type_by_id(types, type_id);
6467 	if (!type)
6468 		return 0;
6469 
6470 	for (i = 0; i < type->num_boxes; i++) {
6471 		if (type->box_ids[i] > max)
6472 			max = type->box_ids[i];
6473 	}
6474 
6475 	return max + 1;
6476 }
6477 
6478 #define SPR_MSR_UNC_CBO_CONFIG		0x2FFE
6479 
6480 void spr_uncore_cpu_init(void)
6481 {
6482 	struct intel_uncore_type *type;
6483 	u64 num_cbo;
6484 
6485 	uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR,
6486 						UNCORE_SPR_MSR_EXTRA_UNCORES,
6487 						spr_msr_uncores,
6488 						UNCORE_SPR_NUM_UNCORE_TYPES,
6489 						spr_uncores);
6490 
6491 	type = uncore_find_type_by_id(uncore_msr_uncores, UNCORE_SPR_CHA);
6492 	if (type) {
6493 		/*
6494 		 * The value from the discovery table (stored in the type->num_boxes
6495 		 * of UNCORE_SPR_CHA) is incorrect on some SPR variants because of a
6496 		 * firmware bug. Using the value from SPR_MSR_UNC_CBO_CONFIG to replace it.
6497 		 */
6498 		rdmsrl(SPR_MSR_UNC_CBO_CONFIG, num_cbo);
6499 		/*
6500 		 * The MSR doesn't work on the EMR XCC, but the firmware bug doesn't impact
6501 		 * the EMR XCC. Don't let the value from the MSR replace the existing value.
6502 		 */
6503 		if (num_cbo)
6504 			type->num_boxes = num_cbo;
6505 	}
6506 	spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
6507 }
6508 
6509 #define SPR_UNCORE_UPI_PCIID		0x3241
6510 #define SPR_UNCORE_UPI0_DEVFN		0x9
6511 #define SPR_UNCORE_M3UPI_PCIID		0x3246
6512 #define SPR_UNCORE_M3UPI0_DEVFN		0x29
6513 
6514 static void spr_update_device_location(int type_id)
6515 {
6516 	struct intel_uncore_type *type;
6517 	struct pci_dev *dev = NULL;
6518 	u32 device, devfn;
6519 	u64 *ctls;
6520 	int die;
6521 
6522 	if (type_id == UNCORE_SPR_UPI) {
6523 		type = &spr_uncore_upi;
6524 		device = SPR_UNCORE_UPI_PCIID;
6525 		devfn = SPR_UNCORE_UPI0_DEVFN;
6526 	} else if (type_id == UNCORE_SPR_M3UPI) {
6527 		type = &spr_uncore_m3upi;
6528 		device = SPR_UNCORE_M3UPI_PCIID;
6529 		devfn = SPR_UNCORE_M3UPI0_DEVFN;
6530 	} else
6531 		return;
6532 
6533 	ctls = kcalloc(__uncore_max_dies, sizeof(u64), GFP_KERNEL);
6534 	if (!ctls) {
6535 		type->num_boxes = 0;
6536 		return;
6537 	}
6538 
6539 	while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, dev)) != NULL) {
6540 		if (devfn != dev->devfn)
6541 			continue;
6542 
6543 		die = uncore_device_to_die(dev);
6544 		if (die < 0)
6545 			continue;
6546 
6547 		ctls[die] = pci_domain_nr(dev->bus) << UNCORE_DISCOVERY_PCI_DOMAIN_OFFSET |
6548 			    dev->bus->number << UNCORE_DISCOVERY_PCI_BUS_OFFSET |
6549 			    devfn << UNCORE_DISCOVERY_PCI_DEVFN_OFFSET |
6550 			    type->box_ctl;
6551 	}
6552 
6553 	type->box_ctls = ctls;
6554 }
6555 
6556 int spr_uncore_pci_init(void)
6557 {
6558 	/*
6559 	 * The discovery table of UPI on some SPR variant is broken,
6560 	 * which impacts the detection of both UPI and M3UPI uncore PMON.
6561 	 * Use the pre-defined UPI and M3UPI table to replace.
6562 	 *
6563 	 * The accurate location, e.g., domain and BUS number,
6564 	 * can only be retrieved at load time.
6565 	 * Update the location of UPI and M3UPI.
6566 	 */
6567 	spr_update_device_location(UNCORE_SPR_UPI);
6568 	spr_update_device_location(UNCORE_SPR_M3UPI);
6569 	uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI,
6570 						UNCORE_SPR_PCI_EXTRA_UNCORES,
6571 						spr_pci_uncores,
6572 						UNCORE_SPR_NUM_UNCORE_TYPES,
6573 						spr_uncores);
6574 	return 0;
6575 }
6576 
6577 void spr_uncore_mmio_init(void)
6578 {
6579 	int ret = snbep_pci2phy_map_init(0x3250, SKX_CPUNODEID, SKX_GIDNIDMAP, true);
6580 
6581 	if (ret) {
6582 		uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL,
6583 							 UNCORE_SPR_NUM_UNCORE_TYPES,
6584 							 spr_uncores);
6585 	} else {
6586 		uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO,
6587 							 UNCORE_SPR_MMIO_EXTRA_UNCORES,
6588 							 spr_mmio_uncores,
6589 							 UNCORE_SPR_NUM_UNCORE_TYPES,
6590 							 spr_uncores);
6591 
6592 		spr_uncore_imc_free_running.num_boxes = uncore_type_max_boxes(uncore_mmio_uncores, UNCORE_SPR_IMC) / 2;
6593 	}
6594 }
6595 
6596 /* end of SPR uncore support */
6597 
6598 /* GNR uncore support */
6599 
6600 #define UNCORE_GNR_NUM_UNCORE_TYPES	23
6601 #define UNCORE_GNR_TYPE_15		15
6602 #define UNCORE_GNR_B2UPI		18
6603 #define UNCORE_GNR_TYPE_21		21
6604 #define UNCORE_GNR_TYPE_22		22
6605 
6606 int gnr_uncore_units_ignore[] = {
6607 	UNCORE_SPR_UPI,
6608 	UNCORE_GNR_TYPE_15,
6609 	UNCORE_GNR_B2UPI,
6610 	UNCORE_GNR_TYPE_21,
6611 	UNCORE_GNR_TYPE_22,
6612 	UNCORE_IGNORE_END
6613 };
6614 
6615 static struct intel_uncore_type gnr_uncore_ubox = {
6616 	.name			= "ubox",
6617 	.attr_update		= uncore_alias_groups,
6618 };
6619 
6620 static struct intel_uncore_type gnr_uncore_b2cmi = {
6621 	SPR_UNCORE_PCI_COMMON_FORMAT(),
6622 	.name			= "b2cmi",
6623 };
6624 
6625 static struct intel_uncore_type gnr_uncore_b2cxl = {
6626 	SPR_UNCORE_MMIO_COMMON_FORMAT(),
6627 	.name			= "b2cxl",
6628 };
6629 
6630 static struct intel_uncore_type gnr_uncore_mdf_sbo = {
6631 	.name			= "mdf_sbo",
6632 	.attr_update		= uncore_alias_groups,
6633 };
6634 
6635 static struct intel_uncore_type *gnr_uncores[UNCORE_GNR_NUM_UNCORE_TYPES] = {
6636 	&spr_uncore_chabox,
6637 	&spr_uncore_iio,
6638 	&spr_uncore_irp,
6639 	NULL,
6640 	&spr_uncore_pcu,
6641 	&gnr_uncore_ubox,
6642 	&spr_uncore_imc,
6643 	NULL,
6644 	NULL,
6645 	NULL,
6646 	NULL,
6647 	NULL,
6648 	NULL,
6649 	NULL,
6650 	NULL,
6651 	NULL,
6652 	&gnr_uncore_b2cmi,
6653 	&gnr_uncore_b2cxl,
6654 	NULL,
6655 	NULL,
6656 	&gnr_uncore_mdf_sbo,
6657 	NULL,
6658 	NULL,
6659 };
6660 
6661 static struct freerunning_counters gnr_iio_freerunning[] = {
6662 	[SPR_IIO_MSR_IOCLK]	= { 0x290e, 0x01, 0x10, 1, 48 },
6663 	[SPR_IIO_MSR_BW_IN]	= { 0x360e, 0x10, 0x80, 8, 48 },
6664 	[SPR_IIO_MSR_BW_OUT]	= { 0x2e0e, 0x10, 0x80, 8, 48 },
6665 };
6666 
6667 void gnr_uncore_cpu_init(void)
6668 {
6669 	uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR,
6670 						UNCORE_SPR_MSR_EXTRA_UNCORES,
6671 						spr_msr_uncores,
6672 						UNCORE_GNR_NUM_UNCORE_TYPES,
6673 						gnr_uncores);
6674 	spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
6675 	spr_uncore_iio_free_running.freerunning = gnr_iio_freerunning;
6676 }
6677 
6678 int gnr_uncore_pci_init(void)
6679 {
6680 	uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, 0, NULL,
6681 						UNCORE_GNR_NUM_UNCORE_TYPES,
6682 						gnr_uncores);
6683 	return 0;
6684 }
6685 
6686 void gnr_uncore_mmio_init(void)
6687 {
6688 	uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL,
6689 						 UNCORE_GNR_NUM_UNCORE_TYPES,
6690 						 gnr_uncores);
6691 }
6692 
6693 /* end of GNR uncore support */
6694