xref: /linux/arch/x86/events/intel/uncore_snbep.c (revision df561f6688fef775baa341a0f5d960becd248b11)
1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
3 #include "uncore.h"
4 
5 /* SNB-EP pci bus to socket mapping */
6 #define SNBEP_CPUNODEID			0x40
7 #define SNBEP_GIDNIDMAP			0x54
8 
9 /* SNB-EP Box level control */
10 #define SNBEP_PMON_BOX_CTL_RST_CTRL	(1 << 0)
11 #define SNBEP_PMON_BOX_CTL_RST_CTRS	(1 << 1)
12 #define SNBEP_PMON_BOX_CTL_FRZ		(1 << 8)
13 #define SNBEP_PMON_BOX_CTL_FRZ_EN	(1 << 16)
14 #define SNBEP_PMON_BOX_CTL_INT		(SNBEP_PMON_BOX_CTL_RST_CTRL | \
15 					 SNBEP_PMON_BOX_CTL_RST_CTRS | \
16 					 SNBEP_PMON_BOX_CTL_FRZ_EN)
17 /* SNB-EP event control */
18 #define SNBEP_PMON_CTL_EV_SEL_MASK	0x000000ff
19 #define SNBEP_PMON_CTL_UMASK_MASK	0x0000ff00
20 #define SNBEP_PMON_CTL_RST		(1 << 17)
21 #define SNBEP_PMON_CTL_EDGE_DET		(1 << 18)
22 #define SNBEP_PMON_CTL_EV_SEL_EXT	(1 << 21)
23 #define SNBEP_PMON_CTL_EN		(1 << 22)
24 #define SNBEP_PMON_CTL_INVERT		(1 << 23)
25 #define SNBEP_PMON_CTL_TRESH_MASK	0xff000000
26 #define SNBEP_PMON_RAW_EVENT_MASK	(SNBEP_PMON_CTL_EV_SEL_MASK | \
27 					 SNBEP_PMON_CTL_UMASK_MASK | \
28 					 SNBEP_PMON_CTL_EDGE_DET | \
29 					 SNBEP_PMON_CTL_INVERT | \
30 					 SNBEP_PMON_CTL_TRESH_MASK)
31 
32 /* SNB-EP Ubox event control */
33 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK		0x1f000000
34 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK		\
35 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
36 				 SNBEP_PMON_CTL_UMASK_MASK | \
37 				 SNBEP_PMON_CTL_EDGE_DET | \
38 				 SNBEP_PMON_CTL_INVERT | \
39 				 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
40 
41 #define SNBEP_CBO_PMON_CTL_TID_EN		(1 << 19)
42 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK	(SNBEP_PMON_RAW_EVENT_MASK | \
43 						 SNBEP_CBO_PMON_CTL_TID_EN)
44 
45 /* SNB-EP PCU event control */
46 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK	0x0000c000
47 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK	0x1f000000
48 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT	(1 << 30)
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET	(1 << 31)
50 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK	\
51 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
52 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
53 				 SNBEP_PMON_CTL_EDGE_DET | \
54 				 SNBEP_PMON_CTL_INVERT | \
55 				 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
56 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
57 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
58 
59 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK	\
60 				(SNBEP_PMON_RAW_EVENT_MASK | \
61 				 SNBEP_PMON_CTL_EV_SEL_EXT)
62 
63 /* SNB-EP pci control register */
64 #define SNBEP_PCI_PMON_BOX_CTL			0xf4
65 #define SNBEP_PCI_PMON_CTL0			0xd8
66 /* SNB-EP pci counter register */
67 #define SNBEP_PCI_PMON_CTR0			0xa0
68 
69 /* SNB-EP home agent register */
70 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0	0x40
71 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1	0x44
72 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH	0x48
73 /* SNB-EP memory controller register */
74 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL		0xf0
75 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR		0xd0
76 /* SNB-EP QPI register */
77 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0		0x228
78 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1		0x22c
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0		0x238
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1		0x23c
81 
82 /* SNB-EP Ubox register */
83 #define SNBEP_U_MSR_PMON_CTR0			0xc16
84 #define SNBEP_U_MSR_PMON_CTL0			0xc10
85 
86 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL		0xc08
87 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR		0xc09
88 
89 /* SNB-EP Cbo register */
90 #define SNBEP_C0_MSR_PMON_CTR0			0xd16
91 #define SNBEP_C0_MSR_PMON_CTL0			0xd10
92 #define SNBEP_C0_MSR_PMON_BOX_CTL		0xd04
93 #define SNBEP_C0_MSR_PMON_BOX_FILTER		0xd14
94 #define SNBEP_CBO_MSR_OFFSET			0x20
95 
96 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID	0x1f
97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID	0x3fc00
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE	0x7c0000
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC	0xff800000
100 
101 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {	\
102 	.event = (e),				\
103 	.msr = SNBEP_C0_MSR_PMON_BOX_FILTER,	\
104 	.config_mask = (m),			\
105 	.idx = (i)				\
106 }
107 
108 /* SNB-EP PCU register */
109 #define SNBEP_PCU_MSR_PMON_CTR0			0xc36
110 #define SNBEP_PCU_MSR_PMON_CTL0			0xc30
111 #define SNBEP_PCU_MSR_PMON_BOX_CTL		0xc24
112 #define SNBEP_PCU_MSR_PMON_BOX_FILTER		0xc34
113 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK	0xffffffff
114 #define SNBEP_PCU_MSR_CORE_C3_CTR		0x3fc
115 #define SNBEP_PCU_MSR_CORE_C6_CTR		0x3fd
116 
117 /* IVBEP event control */
118 #define IVBEP_PMON_BOX_CTL_INT		(SNBEP_PMON_BOX_CTL_RST_CTRL | \
119 					 SNBEP_PMON_BOX_CTL_RST_CTRS)
120 #define IVBEP_PMON_RAW_EVENT_MASK		(SNBEP_PMON_CTL_EV_SEL_MASK | \
121 					 SNBEP_PMON_CTL_UMASK_MASK | \
122 					 SNBEP_PMON_CTL_EDGE_DET | \
123 					 SNBEP_PMON_CTL_TRESH_MASK)
124 /* IVBEP Ubox */
125 #define IVBEP_U_MSR_PMON_GLOBAL_CTL		0xc00
126 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL		(1 << 31)
127 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL		(1 << 29)
128 
129 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK	\
130 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
131 				 SNBEP_PMON_CTL_UMASK_MASK | \
132 				 SNBEP_PMON_CTL_EDGE_DET | \
133 				 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
134 /* IVBEP Cbo */
135 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK		(IVBEP_PMON_RAW_EVENT_MASK | \
136 						 SNBEP_CBO_PMON_CTL_TID_EN)
137 
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID		(0x1fULL << 0)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 5)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE	(0x3fULL << 17)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID		(0xffffULL << 32)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC		(0x1ffULL << 52)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
146 
147 /* IVBEP home agent */
148 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST		(1 << 16)
149 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK		\
150 				(IVBEP_PMON_RAW_EVENT_MASK | \
151 				 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
152 /* IVBEP PCU */
153 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK	\
154 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
155 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
156 				 SNBEP_PMON_CTL_EDGE_DET | \
157 				 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
158 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
159 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
160 /* IVBEP QPI */
161 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK	\
162 				(IVBEP_PMON_RAW_EVENT_MASK | \
163 				 SNBEP_PMON_CTL_EV_SEL_EXT)
164 
165 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
166 				((1ULL << (n)) - 1)))
167 
168 /* Haswell-EP Ubox */
169 #define HSWEP_U_MSR_PMON_CTR0			0x709
170 #define HSWEP_U_MSR_PMON_CTL0			0x705
171 #define HSWEP_U_MSR_PMON_FILTER			0x707
172 
173 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL		0x703
174 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR		0x704
175 
176 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID		(0x1 << 0)
177 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID		(0x1fULL << 1)
178 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
179 					(HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
180 					 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
181 
182 /* Haswell-EP CBo */
183 #define HSWEP_C0_MSR_PMON_CTR0			0xe08
184 #define HSWEP_C0_MSR_PMON_CTL0			0xe01
185 #define HSWEP_C0_MSR_PMON_BOX_CTL			0xe00
186 #define HSWEP_C0_MSR_PMON_BOX_FILTER0		0xe05
187 #define HSWEP_CBO_MSR_OFFSET			0x10
188 
189 
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID		(0x3fULL << 0)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 6)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE	(0x7fULL << 17)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID		(0xffffULL << 32)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC		(0x1ffULL << 52)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
198 
199 
200 /* Haswell-EP Sbox */
201 #define HSWEP_S0_MSR_PMON_CTR0			0x726
202 #define HSWEP_S0_MSR_PMON_CTL0			0x721
203 #define HSWEP_S0_MSR_PMON_BOX_CTL			0x720
204 #define HSWEP_SBOX_MSR_OFFSET			0xa
205 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK		(SNBEP_PMON_RAW_EVENT_MASK | \
206 						 SNBEP_CBO_PMON_CTL_TID_EN)
207 
208 /* Haswell-EP PCU */
209 #define HSWEP_PCU_MSR_PMON_CTR0			0x717
210 #define HSWEP_PCU_MSR_PMON_CTL0			0x711
211 #define HSWEP_PCU_MSR_PMON_BOX_CTL		0x710
212 #define HSWEP_PCU_MSR_PMON_BOX_FILTER		0x715
213 
214 /* KNL Ubox */
215 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
216 					(SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
217 						SNBEP_CBO_PMON_CTL_TID_EN)
218 /* KNL CHA */
219 #define KNL_CHA_MSR_OFFSET			0xc
220 #define KNL_CHA_MSR_PMON_CTL_QOR		(1 << 16)
221 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
222 					(SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
223 					 KNL_CHA_MSR_PMON_CTL_QOR)
224 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID		0x1ff
225 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE	(7 << 18)
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP		(0xfffffe2aULL << 32)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE	(0x1ULL << 32)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE	(0x1ULL << 33)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC		(0x1ULL << 37)
230 
231 /* KNL EDC/MC UCLK */
232 #define KNL_UCLK_MSR_PMON_CTR0_LOW		0x400
233 #define KNL_UCLK_MSR_PMON_CTL0			0x420
234 #define KNL_UCLK_MSR_PMON_BOX_CTL		0x430
235 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW	0x44c
236 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL	0x454
237 #define KNL_PMON_FIXED_CTL_EN			0x1
238 
239 /* KNL EDC */
240 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW		0xa00
241 #define KNL_EDC0_ECLK_MSR_PMON_CTL0		0xa20
242 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL		0xa30
243 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW	0xa3c
244 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL	0xa44
245 
246 /* KNL MC */
247 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW		0xb00
248 #define KNL_MC0_CH0_MSR_PMON_CTL0		0xb20
249 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL		0xb30
250 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW		0xb3c
251 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL		0xb44
252 
253 /* KNL IRP */
254 #define KNL_IRP_PCI_PMON_BOX_CTL		0xf0
255 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK		(SNBEP_PMON_RAW_EVENT_MASK | \
256 						 KNL_CHA_MSR_PMON_CTL_QOR)
257 /* KNL PCU */
258 #define KNL_PCU_PMON_CTL_EV_SEL_MASK		0x0000007f
259 #define KNL_PCU_PMON_CTL_USE_OCC_CTR		(1 << 7)
260 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK		0x3f000000
261 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK	\
262 				(KNL_PCU_PMON_CTL_EV_SEL_MASK | \
263 				 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
264 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
265 				 SNBEP_PMON_CTL_EDGE_DET | \
266 				 SNBEP_CBO_PMON_CTL_TID_EN | \
267 				 SNBEP_PMON_CTL_INVERT | \
268 				 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
269 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
270 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
271 
272 /* SKX pci bus to socket mapping */
273 #define SKX_CPUNODEID			0xc0
274 #define SKX_GIDNIDMAP			0xd4
275 
276 /*
277  * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR
278  * that BIOS programmed. MSR has package scope.
279  * |  Bit  |  Default  |  Description
280  * | [63]  |    00h    | VALID - When set, indicates the CPU bus
281  *                       numbers have been initialized. (RO)
282  * |[62:48]|    ---    | Reserved
283  * |[47:40]|    00h    | BUS_NUM_5 — Return the bus number BIOS assigned
284  *                       CPUBUSNO(5). (RO)
285  * |[39:32]|    00h    | BUS_NUM_4 — Return the bus number BIOS assigned
286  *                       CPUBUSNO(4). (RO)
287  * |[31:24]|    00h    | BUS_NUM_3 — Return the bus number BIOS assigned
288  *                       CPUBUSNO(3). (RO)
289  * |[23:16]|    00h    | BUS_NUM_2 — Return the bus number BIOS assigned
290  *                       CPUBUSNO(2). (RO)
291  * |[15:8] |    00h    | BUS_NUM_1 — Return the bus number BIOS assigned
292  *                       CPUBUSNO(1). (RO)
293  * | [7:0] |    00h    | BUS_NUM_0 — Return the bus number BIOS assigned
294  *                       CPUBUSNO(0). (RO)
295  */
296 #define SKX_MSR_CPU_BUS_NUMBER		0x300
297 #define SKX_MSR_CPU_BUS_VALID_BIT	(1ULL << 63)
298 #define BUS_NUM_STRIDE			8
299 
300 /* SKX CHA */
301 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID		(0x1ffULL << 0)
302 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 9)
303 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE	(0x3ffULL << 17)
304 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM		(0x1ULL << 32)
305 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC		(0x1ULL << 33)
306 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC	(0x1ULL << 35)
307 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM		(0x1ULL << 36)
308 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM	(0x1ULL << 37)
309 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0	(0x3ffULL << 41)
310 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1	(0x3ffULL << 51)
311 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
312 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
313 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
314 
315 /* SKX IIO */
316 #define SKX_IIO0_MSR_PMON_CTL0		0xa48
317 #define SKX_IIO0_MSR_PMON_CTR0		0xa41
318 #define SKX_IIO0_MSR_PMON_BOX_CTL	0xa40
319 #define SKX_IIO_MSR_OFFSET		0x20
320 
321 #define SKX_PMON_CTL_TRESH_MASK		(0xff << 24)
322 #define SKX_PMON_CTL_TRESH_MASK_EXT	(0xf)
323 #define SKX_PMON_CTL_CH_MASK		(0xff << 4)
324 #define SKX_PMON_CTL_FC_MASK		(0x7 << 12)
325 #define SKX_IIO_PMON_RAW_EVENT_MASK	(SNBEP_PMON_CTL_EV_SEL_MASK | \
326 					 SNBEP_PMON_CTL_UMASK_MASK | \
327 					 SNBEP_PMON_CTL_EDGE_DET | \
328 					 SNBEP_PMON_CTL_INVERT | \
329 					 SKX_PMON_CTL_TRESH_MASK)
330 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT	(SKX_PMON_CTL_TRESH_MASK_EXT | \
331 					 SKX_PMON_CTL_CH_MASK | \
332 					 SKX_PMON_CTL_FC_MASK)
333 
334 /* SKX IRP */
335 #define SKX_IRP0_MSR_PMON_CTL0		0xa5b
336 #define SKX_IRP0_MSR_PMON_CTR0		0xa59
337 #define SKX_IRP0_MSR_PMON_BOX_CTL	0xa58
338 #define SKX_IRP_MSR_OFFSET		0x20
339 
340 /* SKX UPI */
341 #define SKX_UPI_PCI_PMON_CTL0		0x350
342 #define SKX_UPI_PCI_PMON_CTR0		0x318
343 #define SKX_UPI_PCI_PMON_BOX_CTL	0x378
344 #define SKX_UPI_CTL_UMASK_EXT		0xffefff
345 
346 /* SKX M2M */
347 #define SKX_M2M_PCI_PMON_CTL0		0x228
348 #define SKX_M2M_PCI_PMON_CTR0		0x200
349 #define SKX_M2M_PCI_PMON_BOX_CTL	0x258
350 
351 /* SNR Ubox */
352 #define SNR_U_MSR_PMON_CTR0			0x1f98
353 #define SNR_U_MSR_PMON_CTL0			0x1f91
354 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL		0x1f93
355 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR		0x1f94
356 
357 /* SNR CHA */
358 #define SNR_CHA_RAW_EVENT_MASK_EXT		0x3ffffff
359 #define SNR_CHA_MSR_PMON_CTL0			0x1c01
360 #define SNR_CHA_MSR_PMON_CTR0			0x1c08
361 #define SNR_CHA_MSR_PMON_BOX_CTL		0x1c00
362 #define SNR_C0_MSR_PMON_BOX_FILTER0		0x1c05
363 
364 
365 /* SNR IIO */
366 #define SNR_IIO_MSR_PMON_CTL0			0x1e08
367 #define SNR_IIO_MSR_PMON_CTR0			0x1e01
368 #define SNR_IIO_MSR_PMON_BOX_CTL		0x1e00
369 #define SNR_IIO_MSR_OFFSET			0x10
370 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT		0x7ffff
371 
372 /* SNR IRP */
373 #define SNR_IRP0_MSR_PMON_CTL0			0x1ea8
374 #define SNR_IRP0_MSR_PMON_CTR0			0x1ea1
375 #define SNR_IRP0_MSR_PMON_BOX_CTL		0x1ea0
376 #define SNR_IRP_MSR_OFFSET			0x10
377 
378 /* SNR M2PCIE */
379 #define SNR_M2PCIE_MSR_PMON_CTL0		0x1e58
380 #define SNR_M2PCIE_MSR_PMON_CTR0		0x1e51
381 #define SNR_M2PCIE_MSR_PMON_BOX_CTL		0x1e50
382 #define SNR_M2PCIE_MSR_OFFSET			0x10
383 
384 /* SNR PCU */
385 #define SNR_PCU_MSR_PMON_CTL0			0x1ef1
386 #define SNR_PCU_MSR_PMON_CTR0			0x1ef8
387 #define SNR_PCU_MSR_PMON_BOX_CTL		0x1ef0
388 #define SNR_PCU_MSR_PMON_BOX_FILTER		0x1efc
389 
390 /* SNR M2M */
391 #define SNR_M2M_PCI_PMON_CTL0			0x468
392 #define SNR_M2M_PCI_PMON_CTR0			0x440
393 #define SNR_M2M_PCI_PMON_BOX_CTL		0x438
394 #define SNR_M2M_PCI_PMON_UMASK_EXT		0xff
395 
396 /* SNR IMC */
397 #define SNR_IMC_MMIO_PMON_FIXED_CTL		0x54
398 #define SNR_IMC_MMIO_PMON_FIXED_CTR		0x38
399 #define SNR_IMC_MMIO_PMON_CTL0			0x40
400 #define SNR_IMC_MMIO_PMON_CTR0			0x8
401 #define SNR_IMC_MMIO_PMON_BOX_CTL		0x22800
402 #define SNR_IMC_MMIO_OFFSET			0x4000
403 #define SNR_IMC_MMIO_SIZE			0x4000
404 #define SNR_IMC_MMIO_BASE_OFFSET		0xd0
405 #define SNR_IMC_MMIO_BASE_MASK			0x1FFFFFFF
406 #define SNR_IMC_MMIO_MEM0_OFFSET		0xd8
407 #define SNR_IMC_MMIO_MEM0_MASK			0x7FF
408 
409 /* ICX CHA */
410 #define ICX_C34_MSR_PMON_CTR0			0xb68
411 #define ICX_C34_MSR_PMON_CTL0			0xb61
412 #define ICX_C34_MSR_PMON_BOX_CTL		0xb60
413 #define ICX_C34_MSR_PMON_BOX_FILTER0		0xb65
414 
415 /* ICX IIO */
416 #define ICX_IIO_MSR_PMON_CTL0			0xa58
417 #define ICX_IIO_MSR_PMON_CTR0			0xa51
418 #define ICX_IIO_MSR_PMON_BOX_CTL		0xa50
419 
420 /* ICX IRP */
421 #define ICX_IRP0_MSR_PMON_CTL0			0xa4d
422 #define ICX_IRP0_MSR_PMON_CTR0			0xa4b
423 #define ICX_IRP0_MSR_PMON_BOX_CTL		0xa4a
424 
425 /* ICX M2PCIE */
426 #define ICX_M2PCIE_MSR_PMON_CTL0		0xa46
427 #define ICX_M2PCIE_MSR_PMON_CTR0		0xa41
428 #define ICX_M2PCIE_MSR_PMON_BOX_CTL		0xa40
429 
430 /* ICX UPI */
431 #define ICX_UPI_PCI_PMON_CTL0			0x350
432 #define ICX_UPI_PCI_PMON_CTR0			0x320
433 #define ICX_UPI_PCI_PMON_BOX_CTL		0x318
434 #define ICX_UPI_CTL_UMASK_EXT			0xffffff
435 
436 /* ICX M3UPI*/
437 #define ICX_M3UPI_PCI_PMON_CTL0			0xd8
438 #define ICX_M3UPI_PCI_PMON_CTR0			0xa8
439 #define ICX_M3UPI_PCI_PMON_BOX_CTL		0xa0
440 
441 /* ICX IMC */
442 #define ICX_NUMBER_IMC_CHN			2
443 #define ICX_IMC_MEM_STRIDE			0x4
444 
445 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
446 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
447 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
448 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
449 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
450 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
451 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
452 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
453 DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
454 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
455 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
456 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
457 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
458 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
459 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
460 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
461 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
462 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
463 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
464 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
465 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
466 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
467 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
468 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
469 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
470 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
471 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
472 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
473 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
474 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
475 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
476 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
477 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
478 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
479 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
480 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
481 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
482 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
483 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
484 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
485 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
486 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
487 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
488 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
489 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
490 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
491 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
492 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
493 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
494 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
495 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
496 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
497 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
498 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
499 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
500 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
501 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
502 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
503 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
504 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
505 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
506 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
507 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
508 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
509 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
510 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
511 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
512 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
513 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
514 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
515 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
516 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
517 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
518 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
519 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
520 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
521 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
522 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
523 
524 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
525 {
526 	struct pci_dev *pdev = box->pci_dev;
527 	int box_ctl = uncore_pci_box_ctl(box);
528 	u32 config = 0;
529 
530 	if (!pci_read_config_dword(pdev, box_ctl, &config)) {
531 		config |= SNBEP_PMON_BOX_CTL_FRZ;
532 		pci_write_config_dword(pdev, box_ctl, config);
533 	}
534 }
535 
536 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
537 {
538 	struct pci_dev *pdev = box->pci_dev;
539 	int box_ctl = uncore_pci_box_ctl(box);
540 	u32 config = 0;
541 
542 	if (!pci_read_config_dword(pdev, box_ctl, &config)) {
543 		config &= ~SNBEP_PMON_BOX_CTL_FRZ;
544 		pci_write_config_dword(pdev, box_ctl, config);
545 	}
546 }
547 
548 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
549 {
550 	struct pci_dev *pdev = box->pci_dev;
551 	struct hw_perf_event *hwc = &event->hw;
552 
553 	pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
554 }
555 
556 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
557 {
558 	struct pci_dev *pdev = box->pci_dev;
559 	struct hw_perf_event *hwc = &event->hw;
560 
561 	pci_write_config_dword(pdev, hwc->config_base, hwc->config);
562 }
563 
564 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
565 {
566 	struct pci_dev *pdev = box->pci_dev;
567 	struct hw_perf_event *hwc = &event->hw;
568 	u64 count = 0;
569 
570 	pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
571 	pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
572 
573 	return count;
574 }
575 
576 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
577 {
578 	struct pci_dev *pdev = box->pci_dev;
579 	int box_ctl = uncore_pci_box_ctl(box);
580 
581 	pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
582 }
583 
584 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
585 {
586 	u64 config;
587 	unsigned msr;
588 
589 	msr = uncore_msr_box_ctl(box);
590 	if (msr) {
591 		rdmsrl(msr, config);
592 		config |= SNBEP_PMON_BOX_CTL_FRZ;
593 		wrmsrl(msr, config);
594 	}
595 }
596 
597 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
598 {
599 	u64 config;
600 	unsigned msr;
601 
602 	msr = uncore_msr_box_ctl(box);
603 	if (msr) {
604 		rdmsrl(msr, config);
605 		config &= ~SNBEP_PMON_BOX_CTL_FRZ;
606 		wrmsrl(msr, config);
607 	}
608 }
609 
610 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
611 {
612 	struct hw_perf_event *hwc = &event->hw;
613 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
614 
615 	if (reg1->idx != EXTRA_REG_NONE)
616 		wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
617 
618 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
619 }
620 
621 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
622 					struct perf_event *event)
623 {
624 	struct hw_perf_event *hwc = &event->hw;
625 
626 	wrmsrl(hwc->config_base, hwc->config);
627 }
628 
629 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
630 {
631 	unsigned msr = uncore_msr_box_ctl(box);
632 
633 	if (msr)
634 		wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
635 }
636 
637 static struct attribute *snbep_uncore_formats_attr[] = {
638 	&format_attr_event.attr,
639 	&format_attr_umask.attr,
640 	&format_attr_edge.attr,
641 	&format_attr_inv.attr,
642 	&format_attr_thresh8.attr,
643 	NULL,
644 };
645 
646 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
647 	&format_attr_event.attr,
648 	&format_attr_umask.attr,
649 	&format_attr_edge.attr,
650 	&format_attr_inv.attr,
651 	&format_attr_thresh5.attr,
652 	NULL,
653 };
654 
655 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
656 	&format_attr_event.attr,
657 	&format_attr_umask.attr,
658 	&format_attr_edge.attr,
659 	&format_attr_tid_en.attr,
660 	&format_attr_inv.attr,
661 	&format_attr_thresh8.attr,
662 	&format_attr_filter_tid.attr,
663 	&format_attr_filter_nid.attr,
664 	&format_attr_filter_state.attr,
665 	&format_attr_filter_opc.attr,
666 	NULL,
667 };
668 
669 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
670 	&format_attr_event.attr,
671 	&format_attr_occ_sel.attr,
672 	&format_attr_edge.attr,
673 	&format_attr_inv.attr,
674 	&format_attr_thresh5.attr,
675 	&format_attr_occ_invert.attr,
676 	&format_attr_occ_edge.attr,
677 	&format_attr_filter_band0.attr,
678 	&format_attr_filter_band1.attr,
679 	&format_attr_filter_band2.attr,
680 	&format_attr_filter_band3.attr,
681 	NULL,
682 };
683 
684 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
685 	&format_attr_event_ext.attr,
686 	&format_attr_umask.attr,
687 	&format_attr_edge.attr,
688 	&format_attr_inv.attr,
689 	&format_attr_thresh8.attr,
690 	&format_attr_match_rds.attr,
691 	&format_attr_match_rnid30.attr,
692 	&format_attr_match_rnid4.attr,
693 	&format_attr_match_dnid.attr,
694 	&format_attr_match_mc.attr,
695 	&format_attr_match_opc.attr,
696 	&format_attr_match_vnw.attr,
697 	&format_attr_match0.attr,
698 	&format_attr_match1.attr,
699 	&format_attr_mask_rds.attr,
700 	&format_attr_mask_rnid30.attr,
701 	&format_attr_mask_rnid4.attr,
702 	&format_attr_mask_dnid.attr,
703 	&format_attr_mask_mc.attr,
704 	&format_attr_mask_opc.attr,
705 	&format_attr_mask_vnw.attr,
706 	&format_attr_mask0.attr,
707 	&format_attr_mask1.attr,
708 	NULL,
709 };
710 
711 static struct uncore_event_desc snbep_uncore_imc_events[] = {
712 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
713 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
714 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
715 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
716 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
717 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
718 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
719 	{ /* end: all zeroes */ },
720 };
721 
722 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
723 	INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
724 	INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
725 	INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
726 	INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
727 	{ /* end: all zeroes */ },
728 };
729 
730 static const struct attribute_group snbep_uncore_format_group = {
731 	.name = "format",
732 	.attrs = snbep_uncore_formats_attr,
733 };
734 
735 static const struct attribute_group snbep_uncore_ubox_format_group = {
736 	.name = "format",
737 	.attrs = snbep_uncore_ubox_formats_attr,
738 };
739 
740 static const struct attribute_group snbep_uncore_cbox_format_group = {
741 	.name = "format",
742 	.attrs = snbep_uncore_cbox_formats_attr,
743 };
744 
745 static const struct attribute_group snbep_uncore_pcu_format_group = {
746 	.name = "format",
747 	.attrs = snbep_uncore_pcu_formats_attr,
748 };
749 
750 static const struct attribute_group snbep_uncore_qpi_format_group = {
751 	.name = "format",
752 	.attrs = snbep_uncore_qpi_formats_attr,
753 };
754 
755 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
756 	.disable_box	= snbep_uncore_msr_disable_box,		\
757 	.enable_box	= snbep_uncore_msr_enable_box,		\
758 	.disable_event	= snbep_uncore_msr_disable_event,	\
759 	.enable_event	= snbep_uncore_msr_enable_event,	\
760 	.read_counter	= uncore_msr_read_counter
761 
762 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
763 	__SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),			\
764 	.init_box	= snbep_uncore_msr_init_box		\
765 
766 static struct intel_uncore_ops snbep_uncore_msr_ops = {
767 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
768 };
769 
770 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()			\
771 	.init_box	= snbep_uncore_pci_init_box,		\
772 	.disable_box	= snbep_uncore_pci_disable_box,		\
773 	.enable_box	= snbep_uncore_pci_enable_box,		\
774 	.disable_event	= snbep_uncore_pci_disable_event,	\
775 	.read_counter	= snbep_uncore_pci_read_counter
776 
777 static struct intel_uncore_ops snbep_uncore_pci_ops = {
778 	SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
779 	.enable_event	= snbep_uncore_pci_enable_event,	\
780 };
781 
782 static struct event_constraint snbep_uncore_cbox_constraints[] = {
783 	UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
784 	UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
785 	UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
786 	UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
787 	UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
788 	UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
789 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
790 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
791 	UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
792 	UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
793 	UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
794 	UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
795 	UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
796 	UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
797 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
798 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
799 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
800 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
801 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
802 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
803 	UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
804 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
805 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
806 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
807 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
808 	UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
809 	EVENT_CONSTRAINT_END
810 };
811 
812 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
813 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
814 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
815 	UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
816 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
817 	UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
818 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
819 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
820 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
821 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
822 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
823 	EVENT_CONSTRAINT_END
824 };
825 
826 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
827 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
828 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
829 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
830 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
831 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
832 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
833 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
834 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
835 	UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
836 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
837 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
838 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
839 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
840 	UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
841 	UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
842 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
843 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
844 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
845 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
846 	UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
847 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
848 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
849 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
850 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
851 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
852 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
853 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
854 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
855 	EVENT_CONSTRAINT_END
856 };
857 
858 static struct intel_uncore_type snbep_uncore_ubox = {
859 	.name		= "ubox",
860 	.num_counters   = 2,
861 	.num_boxes	= 1,
862 	.perf_ctr_bits	= 44,
863 	.fixed_ctr_bits	= 48,
864 	.perf_ctr	= SNBEP_U_MSR_PMON_CTR0,
865 	.event_ctl	= SNBEP_U_MSR_PMON_CTL0,
866 	.event_mask	= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
867 	.fixed_ctr	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
868 	.fixed_ctl	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
869 	.ops		= &snbep_uncore_msr_ops,
870 	.format_group	= &snbep_uncore_ubox_format_group,
871 };
872 
873 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
874 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
875 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
876 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
877 	SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
878 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
879 	SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
880 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
881 	SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
882 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
883 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
884 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
885 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
886 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
887 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
888 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
889 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
890 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
891 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
892 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
893 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
894 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
895 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
896 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
897 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
898 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
899 	EVENT_EXTRA_END
900 };
901 
902 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
903 {
904 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
905 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
906 	int i;
907 
908 	if (uncore_box_is_fake(box))
909 		return;
910 
911 	for (i = 0; i < 5; i++) {
912 		if (reg1->alloc & (0x1 << i))
913 			atomic_sub(1 << (i * 6), &er->ref);
914 	}
915 	reg1->alloc = 0;
916 }
917 
918 static struct event_constraint *
919 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
920 			    u64 (*cbox_filter_mask)(int fields))
921 {
922 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
923 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
924 	int i, alloc = 0;
925 	unsigned long flags;
926 	u64 mask;
927 
928 	if (reg1->idx == EXTRA_REG_NONE)
929 		return NULL;
930 
931 	raw_spin_lock_irqsave(&er->lock, flags);
932 	for (i = 0; i < 5; i++) {
933 		if (!(reg1->idx & (0x1 << i)))
934 			continue;
935 		if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
936 			continue;
937 
938 		mask = cbox_filter_mask(0x1 << i);
939 		if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
940 		    !((reg1->config ^ er->config) & mask)) {
941 			atomic_add(1 << (i * 6), &er->ref);
942 			er->config &= ~mask;
943 			er->config |= reg1->config & mask;
944 			alloc |= (0x1 << i);
945 		} else {
946 			break;
947 		}
948 	}
949 	raw_spin_unlock_irqrestore(&er->lock, flags);
950 	if (i < 5)
951 		goto fail;
952 
953 	if (!uncore_box_is_fake(box))
954 		reg1->alloc |= alloc;
955 
956 	return NULL;
957 fail:
958 	for (; i >= 0; i--) {
959 		if (alloc & (0x1 << i))
960 			atomic_sub(1 << (i * 6), &er->ref);
961 	}
962 	return &uncore_constraint_empty;
963 }
964 
965 static u64 snbep_cbox_filter_mask(int fields)
966 {
967 	u64 mask = 0;
968 
969 	if (fields & 0x1)
970 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
971 	if (fields & 0x2)
972 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
973 	if (fields & 0x4)
974 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
975 	if (fields & 0x8)
976 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
977 
978 	return mask;
979 }
980 
981 static struct event_constraint *
982 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
983 {
984 	return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
985 }
986 
987 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
988 {
989 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
990 	struct extra_reg *er;
991 	int idx = 0;
992 
993 	for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
994 		if (er->event != (event->hw.config & er->config_mask))
995 			continue;
996 		idx |= er->idx;
997 	}
998 
999 	if (idx) {
1000 		reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1001 			SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1002 		reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
1003 		reg1->idx = idx;
1004 	}
1005 	return 0;
1006 }
1007 
1008 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
1009 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1010 	.hw_config		= snbep_cbox_hw_config,
1011 	.get_constraint		= snbep_cbox_get_constraint,
1012 	.put_constraint		= snbep_cbox_put_constraint,
1013 };
1014 
1015 static struct intel_uncore_type snbep_uncore_cbox = {
1016 	.name			= "cbox",
1017 	.num_counters		= 4,
1018 	.num_boxes		= 8,
1019 	.perf_ctr_bits		= 44,
1020 	.event_ctl		= SNBEP_C0_MSR_PMON_CTL0,
1021 	.perf_ctr		= SNBEP_C0_MSR_PMON_CTR0,
1022 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1023 	.box_ctl		= SNBEP_C0_MSR_PMON_BOX_CTL,
1024 	.msr_offset		= SNBEP_CBO_MSR_OFFSET,
1025 	.num_shared_regs	= 1,
1026 	.constraints		= snbep_uncore_cbox_constraints,
1027 	.ops			= &snbep_uncore_cbox_ops,
1028 	.format_group		= &snbep_uncore_cbox_format_group,
1029 };
1030 
1031 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
1032 {
1033 	struct hw_perf_event *hwc = &event->hw;
1034 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1035 	u64 config = reg1->config;
1036 
1037 	if (new_idx > reg1->idx)
1038 		config <<= 8 * (new_idx - reg1->idx);
1039 	else
1040 		config >>= 8 * (reg1->idx - new_idx);
1041 
1042 	if (modify) {
1043 		hwc->config += new_idx - reg1->idx;
1044 		reg1->config = config;
1045 		reg1->idx = new_idx;
1046 	}
1047 	return config;
1048 }
1049 
1050 static struct event_constraint *
1051 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1052 {
1053 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1054 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1055 	unsigned long flags;
1056 	int idx = reg1->idx;
1057 	u64 mask, config1 = reg1->config;
1058 	bool ok = false;
1059 
1060 	if (reg1->idx == EXTRA_REG_NONE ||
1061 	    (!uncore_box_is_fake(box) && reg1->alloc))
1062 		return NULL;
1063 again:
1064 	mask = 0xffULL << (idx * 8);
1065 	raw_spin_lock_irqsave(&er->lock, flags);
1066 	if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1067 	    !((config1 ^ er->config) & mask)) {
1068 		atomic_add(1 << (idx * 8), &er->ref);
1069 		er->config &= ~mask;
1070 		er->config |= config1 & mask;
1071 		ok = true;
1072 	}
1073 	raw_spin_unlock_irqrestore(&er->lock, flags);
1074 
1075 	if (!ok) {
1076 		idx = (idx + 1) % 4;
1077 		if (idx != reg1->idx) {
1078 			config1 = snbep_pcu_alter_er(event, idx, false);
1079 			goto again;
1080 		}
1081 		return &uncore_constraint_empty;
1082 	}
1083 
1084 	if (!uncore_box_is_fake(box)) {
1085 		if (idx != reg1->idx)
1086 			snbep_pcu_alter_er(event, idx, true);
1087 		reg1->alloc = 1;
1088 	}
1089 	return NULL;
1090 }
1091 
1092 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1093 {
1094 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1095 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1096 
1097 	if (uncore_box_is_fake(box) || !reg1->alloc)
1098 		return;
1099 
1100 	atomic_sub(1 << (reg1->idx * 8), &er->ref);
1101 	reg1->alloc = 0;
1102 }
1103 
1104 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1105 {
1106 	struct hw_perf_event *hwc = &event->hw;
1107 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1108 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1109 
1110 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
1111 		reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1112 		reg1->idx = ev_sel - 0xb;
1113 		reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1114 	}
1115 	return 0;
1116 }
1117 
1118 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1119 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1120 	.hw_config		= snbep_pcu_hw_config,
1121 	.get_constraint		= snbep_pcu_get_constraint,
1122 	.put_constraint		= snbep_pcu_put_constraint,
1123 };
1124 
1125 static struct intel_uncore_type snbep_uncore_pcu = {
1126 	.name			= "pcu",
1127 	.num_counters		= 4,
1128 	.num_boxes		= 1,
1129 	.perf_ctr_bits		= 48,
1130 	.perf_ctr		= SNBEP_PCU_MSR_PMON_CTR0,
1131 	.event_ctl		= SNBEP_PCU_MSR_PMON_CTL0,
1132 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1133 	.box_ctl		= SNBEP_PCU_MSR_PMON_BOX_CTL,
1134 	.num_shared_regs	= 1,
1135 	.ops			= &snbep_uncore_pcu_ops,
1136 	.format_group		= &snbep_uncore_pcu_format_group,
1137 };
1138 
1139 static struct intel_uncore_type *snbep_msr_uncores[] = {
1140 	&snbep_uncore_ubox,
1141 	&snbep_uncore_cbox,
1142 	&snbep_uncore_pcu,
1143 	NULL,
1144 };
1145 
1146 void snbep_uncore_cpu_init(void)
1147 {
1148 	if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1149 		snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1150 	uncore_msr_uncores = snbep_msr_uncores;
1151 }
1152 
1153 enum {
1154 	SNBEP_PCI_QPI_PORT0_FILTER,
1155 	SNBEP_PCI_QPI_PORT1_FILTER,
1156 	BDX_PCI_QPI_PORT2_FILTER,
1157 	HSWEP_PCI_PCU_3,
1158 };
1159 
1160 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1161 {
1162 	struct hw_perf_event *hwc = &event->hw;
1163 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1164 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1165 
1166 	if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1167 		reg1->idx = 0;
1168 		reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1169 		reg1->config = event->attr.config1;
1170 		reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1171 		reg2->config = event->attr.config2;
1172 	}
1173 	return 0;
1174 }
1175 
1176 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1177 {
1178 	struct pci_dev *pdev = box->pci_dev;
1179 	struct hw_perf_event *hwc = &event->hw;
1180 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1181 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1182 
1183 	if (reg1->idx != EXTRA_REG_NONE) {
1184 		int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1185 		int die = box->dieid;
1186 		struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1187 
1188 		if (filter_pdev) {
1189 			pci_write_config_dword(filter_pdev, reg1->reg,
1190 						(u32)reg1->config);
1191 			pci_write_config_dword(filter_pdev, reg1->reg + 4,
1192 						(u32)(reg1->config >> 32));
1193 			pci_write_config_dword(filter_pdev, reg2->reg,
1194 						(u32)reg2->config);
1195 			pci_write_config_dword(filter_pdev, reg2->reg + 4,
1196 						(u32)(reg2->config >> 32));
1197 		}
1198 	}
1199 
1200 	pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1201 }
1202 
1203 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1204 	SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1205 	.enable_event		= snbep_qpi_enable_event,
1206 	.hw_config		= snbep_qpi_hw_config,
1207 	.get_constraint		= uncore_get_constraint,
1208 	.put_constraint		= uncore_put_constraint,
1209 };
1210 
1211 #define SNBEP_UNCORE_PCI_COMMON_INIT()				\
1212 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,			\
1213 	.event_ctl	= SNBEP_PCI_PMON_CTL0,			\
1214 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,		\
1215 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,		\
1216 	.ops		= &snbep_uncore_pci_ops,		\
1217 	.format_group	= &snbep_uncore_format_group
1218 
1219 static struct intel_uncore_type snbep_uncore_ha = {
1220 	.name		= "ha",
1221 	.num_counters   = 4,
1222 	.num_boxes	= 1,
1223 	.perf_ctr_bits	= 48,
1224 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1225 };
1226 
1227 static struct intel_uncore_type snbep_uncore_imc = {
1228 	.name		= "imc",
1229 	.num_counters   = 4,
1230 	.num_boxes	= 4,
1231 	.perf_ctr_bits	= 48,
1232 	.fixed_ctr_bits	= 48,
1233 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1234 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1235 	.event_descs	= snbep_uncore_imc_events,
1236 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1237 };
1238 
1239 static struct intel_uncore_type snbep_uncore_qpi = {
1240 	.name			= "qpi",
1241 	.num_counters		= 4,
1242 	.num_boxes		= 2,
1243 	.perf_ctr_bits		= 48,
1244 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
1245 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
1246 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1247 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1248 	.num_shared_regs	= 1,
1249 	.ops			= &snbep_uncore_qpi_ops,
1250 	.event_descs		= snbep_uncore_qpi_events,
1251 	.format_group		= &snbep_uncore_qpi_format_group,
1252 };
1253 
1254 
1255 static struct intel_uncore_type snbep_uncore_r2pcie = {
1256 	.name		= "r2pcie",
1257 	.num_counters   = 4,
1258 	.num_boxes	= 1,
1259 	.perf_ctr_bits	= 44,
1260 	.constraints	= snbep_uncore_r2pcie_constraints,
1261 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1262 };
1263 
1264 static struct intel_uncore_type snbep_uncore_r3qpi = {
1265 	.name		= "r3qpi",
1266 	.num_counters   = 3,
1267 	.num_boxes	= 2,
1268 	.perf_ctr_bits	= 44,
1269 	.constraints	= snbep_uncore_r3qpi_constraints,
1270 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1271 };
1272 
1273 enum {
1274 	SNBEP_PCI_UNCORE_HA,
1275 	SNBEP_PCI_UNCORE_IMC,
1276 	SNBEP_PCI_UNCORE_QPI,
1277 	SNBEP_PCI_UNCORE_R2PCIE,
1278 	SNBEP_PCI_UNCORE_R3QPI,
1279 };
1280 
1281 static struct intel_uncore_type *snbep_pci_uncores[] = {
1282 	[SNBEP_PCI_UNCORE_HA]		= &snbep_uncore_ha,
1283 	[SNBEP_PCI_UNCORE_IMC]		= &snbep_uncore_imc,
1284 	[SNBEP_PCI_UNCORE_QPI]		= &snbep_uncore_qpi,
1285 	[SNBEP_PCI_UNCORE_R2PCIE]	= &snbep_uncore_r2pcie,
1286 	[SNBEP_PCI_UNCORE_R3QPI]	= &snbep_uncore_r3qpi,
1287 	NULL,
1288 };
1289 
1290 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1291 	{ /* Home Agent */
1292 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1293 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1294 	},
1295 	{ /* MC Channel 0 */
1296 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1297 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1298 	},
1299 	{ /* MC Channel 1 */
1300 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1301 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1302 	},
1303 	{ /* MC Channel 2 */
1304 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1305 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1306 	},
1307 	{ /* MC Channel 3 */
1308 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1309 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1310 	},
1311 	{ /* QPI Port 0 */
1312 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1313 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1314 	},
1315 	{ /* QPI Port 1 */
1316 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1317 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1318 	},
1319 	{ /* R2PCIe */
1320 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1321 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1322 	},
1323 	{ /* R3QPI Link 0 */
1324 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1325 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1326 	},
1327 	{ /* R3QPI Link 1 */
1328 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1329 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1330 	},
1331 	{ /* QPI Port 0 filter  */
1332 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1333 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1334 						   SNBEP_PCI_QPI_PORT0_FILTER),
1335 	},
1336 	{ /* QPI Port 0 filter  */
1337 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1338 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1339 						   SNBEP_PCI_QPI_PORT1_FILTER),
1340 	},
1341 	{ /* end: all zeroes */ }
1342 };
1343 
1344 static struct pci_driver snbep_uncore_pci_driver = {
1345 	.name		= "snbep_uncore",
1346 	.id_table	= snbep_uncore_pci_ids,
1347 };
1348 
1349 #define NODE_ID_MASK	0x7
1350 
1351 /*
1352  * build pci bus to socket mapping
1353  */
1354 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1355 {
1356 	struct pci_dev *ubox_dev = NULL;
1357 	int i, bus, nodeid, segment;
1358 	struct pci2phy_map *map;
1359 	int err = 0;
1360 	u32 config = 0;
1361 
1362 	while (1) {
1363 		/* find the UBOX device */
1364 		ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1365 		if (!ubox_dev)
1366 			break;
1367 		bus = ubox_dev->bus->number;
1368 		/* get the Node ID of the local register */
1369 		err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1370 		if (err)
1371 			break;
1372 		nodeid = config & NODE_ID_MASK;
1373 		/* get the Node ID mapping */
1374 		err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1375 		if (err)
1376 			break;
1377 
1378 		segment = pci_domain_nr(ubox_dev->bus);
1379 		raw_spin_lock(&pci2phy_map_lock);
1380 		map = __find_pci2phy_map(segment);
1381 		if (!map) {
1382 			raw_spin_unlock(&pci2phy_map_lock);
1383 			err = -ENOMEM;
1384 			break;
1385 		}
1386 
1387 		/*
1388 		 * every three bits in the Node ID mapping register maps
1389 		 * to a particular node.
1390 		 */
1391 		for (i = 0; i < 8; i++) {
1392 			if (nodeid == ((config >> (3 * i)) & 0x7)) {
1393 				map->pbus_to_physid[bus] = i;
1394 				break;
1395 			}
1396 		}
1397 		raw_spin_unlock(&pci2phy_map_lock);
1398 	}
1399 
1400 	if (!err) {
1401 		/*
1402 		 * For PCI bus with no UBOX device, find the next bus
1403 		 * that has UBOX device and use its mapping.
1404 		 */
1405 		raw_spin_lock(&pci2phy_map_lock);
1406 		list_for_each_entry(map, &pci2phy_map_head, list) {
1407 			i = -1;
1408 			if (reverse) {
1409 				for (bus = 255; bus >= 0; bus--) {
1410 					if (map->pbus_to_physid[bus] >= 0)
1411 						i = map->pbus_to_physid[bus];
1412 					else
1413 						map->pbus_to_physid[bus] = i;
1414 				}
1415 			} else {
1416 				for (bus = 0; bus <= 255; bus++) {
1417 					if (map->pbus_to_physid[bus] >= 0)
1418 						i = map->pbus_to_physid[bus];
1419 					else
1420 						map->pbus_to_physid[bus] = i;
1421 				}
1422 			}
1423 		}
1424 		raw_spin_unlock(&pci2phy_map_lock);
1425 	}
1426 
1427 	pci_dev_put(ubox_dev);
1428 
1429 	return err ? pcibios_err_to_errno(err) : 0;
1430 }
1431 
1432 int snbep_uncore_pci_init(void)
1433 {
1434 	int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1435 	if (ret)
1436 		return ret;
1437 	uncore_pci_uncores = snbep_pci_uncores;
1438 	uncore_pci_driver = &snbep_uncore_pci_driver;
1439 	return 0;
1440 }
1441 /* end of Sandy Bridge-EP uncore support */
1442 
1443 /* IvyTown uncore support */
1444 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1445 {
1446 	unsigned msr = uncore_msr_box_ctl(box);
1447 	if (msr)
1448 		wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1449 }
1450 
1451 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1452 {
1453 	struct pci_dev *pdev = box->pci_dev;
1454 
1455 	pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1456 }
1457 
1458 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
1459 	.init_box	= ivbep_uncore_msr_init_box,		\
1460 	.disable_box	= snbep_uncore_msr_disable_box,		\
1461 	.enable_box	= snbep_uncore_msr_enable_box,		\
1462 	.disable_event	= snbep_uncore_msr_disable_event,	\
1463 	.enable_event	= snbep_uncore_msr_enable_event,	\
1464 	.read_counter	= uncore_msr_read_counter
1465 
1466 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1467 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1468 };
1469 
1470 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1471 	.init_box	= ivbep_uncore_pci_init_box,
1472 	.disable_box	= snbep_uncore_pci_disable_box,
1473 	.enable_box	= snbep_uncore_pci_enable_box,
1474 	.disable_event	= snbep_uncore_pci_disable_event,
1475 	.enable_event	= snbep_uncore_pci_enable_event,
1476 	.read_counter	= snbep_uncore_pci_read_counter,
1477 };
1478 
1479 #define IVBEP_UNCORE_PCI_COMMON_INIT()				\
1480 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,			\
1481 	.event_ctl	= SNBEP_PCI_PMON_CTL0,			\
1482 	.event_mask	= IVBEP_PMON_RAW_EVENT_MASK,		\
1483 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,		\
1484 	.ops		= &ivbep_uncore_pci_ops,			\
1485 	.format_group	= &ivbep_uncore_format_group
1486 
1487 static struct attribute *ivbep_uncore_formats_attr[] = {
1488 	&format_attr_event.attr,
1489 	&format_attr_umask.attr,
1490 	&format_attr_edge.attr,
1491 	&format_attr_inv.attr,
1492 	&format_attr_thresh8.attr,
1493 	NULL,
1494 };
1495 
1496 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1497 	&format_attr_event.attr,
1498 	&format_attr_umask.attr,
1499 	&format_attr_edge.attr,
1500 	&format_attr_inv.attr,
1501 	&format_attr_thresh5.attr,
1502 	NULL,
1503 };
1504 
1505 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1506 	&format_attr_event.attr,
1507 	&format_attr_umask.attr,
1508 	&format_attr_edge.attr,
1509 	&format_attr_tid_en.attr,
1510 	&format_attr_thresh8.attr,
1511 	&format_attr_filter_tid.attr,
1512 	&format_attr_filter_link.attr,
1513 	&format_attr_filter_state2.attr,
1514 	&format_attr_filter_nid2.attr,
1515 	&format_attr_filter_opc2.attr,
1516 	&format_attr_filter_nc.attr,
1517 	&format_attr_filter_c6.attr,
1518 	&format_attr_filter_isoc.attr,
1519 	NULL,
1520 };
1521 
1522 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1523 	&format_attr_event.attr,
1524 	&format_attr_occ_sel.attr,
1525 	&format_attr_edge.attr,
1526 	&format_attr_thresh5.attr,
1527 	&format_attr_occ_invert.attr,
1528 	&format_attr_occ_edge.attr,
1529 	&format_attr_filter_band0.attr,
1530 	&format_attr_filter_band1.attr,
1531 	&format_attr_filter_band2.attr,
1532 	&format_attr_filter_band3.attr,
1533 	NULL,
1534 };
1535 
1536 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1537 	&format_attr_event_ext.attr,
1538 	&format_attr_umask.attr,
1539 	&format_attr_edge.attr,
1540 	&format_attr_thresh8.attr,
1541 	&format_attr_match_rds.attr,
1542 	&format_attr_match_rnid30.attr,
1543 	&format_attr_match_rnid4.attr,
1544 	&format_attr_match_dnid.attr,
1545 	&format_attr_match_mc.attr,
1546 	&format_attr_match_opc.attr,
1547 	&format_attr_match_vnw.attr,
1548 	&format_attr_match0.attr,
1549 	&format_attr_match1.attr,
1550 	&format_attr_mask_rds.attr,
1551 	&format_attr_mask_rnid30.attr,
1552 	&format_attr_mask_rnid4.attr,
1553 	&format_attr_mask_dnid.attr,
1554 	&format_attr_mask_mc.attr,
1555 	&format_attr_mask_opc.attr,
1556 	&format_attr_mask_vnw.attr,
1557 	&format_attr_mask0.attr,
1558 	&format_attr_mask1.attr,
1559 	NULL,
1560 };
1561 
1562 static const struct attribute_group ivbep_uncore_format_group = {
1563 	.name = "format",
1564 	.attrs = ivbep_uncore_formats_attr,
1565 };
1566 
1567 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1568 	.name = "format",
1569 	.attrs = ivbep_uncore_ubox_formats_attr,
1570 };
1571 
1572 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1573 	.name = "format",
1574 	.attrs = ivbep_uncore_cbox_formats_attr,
1575 };
1576 
1577 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1578 	.name = "format",
1579 	.attrs = ivbep_uncore_pcu_formats_attr,
1580 };
1581 
1582 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1583 	.name = "format",
1584 	.attrs = ivbep_uncore_qpi_formats_attr,
1585 };
1586 
1587 static struct intel_uncore_type ivbep_uncore_ubox = {
1588 	.name		= "ubox",
1589 	.num_counters   = 2,
1590 	.num_boxes	= 1,
1591 	.perf_ctr_bits	= 44,
1592 	.fixed_ctr_bits	= 48,
1593 	.perf_ctr	= SNBEP_U_MSR_PMON_CTR0,
1594 	.event_ctl	= SNBEP_U_MSR_PMON_CTL0,
1595 	.event_mask	= IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1596 	.fixed_ctr	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1597 	.fixed_ctl	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1598 	.ops		= &ivbep_uncore_msr_ops,
1599 	.format_group	= &ivbep_uncore_ubox_format_group,
1600 };
1601 
1602 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1603 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1604 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1605 	SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1606 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1607 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1608 	SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1609 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1610 	SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1611 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1612 	SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1613 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1614 	SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1615 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1616 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1617 	SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1618 	SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1619 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1620 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1621 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1622 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1623 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1624 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1625 	SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1626 	SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1627 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1628 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1629 	SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1630 	SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1631 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1632 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1633 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1634 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1635 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1636 	SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1637 	SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1638 	SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1639 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1640 	EVENT_EXTRA_END
1641 };
1642 
1643 static u64 ivbep_cbox_filter_mask(int fields)
1644 {
1645 	u64 mask = 0;
1646 
1647 	if (fields & 0x1)
1648 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1649 	if (fields & 0x2)
1650 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1651 	if (fields & 0x4)
1652 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1653 	if (fields & 0x8)
1654 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1655 	if (fields & 0x10) {
1656 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1657 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1658 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1659 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1660 	}
1661 
1662 	return mask;
1663 }
1664 
1665 static struct event_constraint *
1666 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1667 {
1668 	return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1669 }
1670 
1671 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1672 {
1673 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1674 	struct extra_reg *er;
1675 	int idx = 0;
1676 
1677 	for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1678 		if (er->event != (event->hw.config & er->config_mask))
1679 			continue;
1680 		idx |= er->idx;
1681 	}
1682 
1683 	if (idx) {
1684 		reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1685 			SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1686 		reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1687 		reg1->idx = idx;
1688 	}
1689 	return 0;
1690 }
1691 
1692 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1693 {
1694 	struct hw_perf_event *hwc = &event->hw;
1695 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1696 
1697 	if (reg1->idx != EXTRA_REG_NONE) {
1698 		u64 filter = uncore_shared_reg_config(box, 0);
1699 		wrmsrl(reg1->reg, filter & 0xffffffff);
1700 		wrmsrl(reg1->reg + 6, filter >> 32);
1701 	}
1702 
1703 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1704 }
1705 
1706 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1707 	.init_box		= ivbep_uncore_msr_init_box,
1708 	.disable_box		= snbep_uncore_msr_disable_box,
1709 	.enable_box		= snbep_uncore_msr_enable_box,
1710 	.disable_event		= snbep_uncore_msr_disable_event,
1711 	.enable_event		= ivbep_cbox_enable_event,
1712 	.read_counter		= uncore_msr_read_counter,
1713 	.hw_config		= ivbep_cbox_hw_config,
1714 	.get_constraint		= ivbep_cbox_get_constraint,
1715 	.put_constraint		= snbep_cbox_put_constraint,
1716 };
1717 
1718 static struct intel_uncore_type ivbep_uncore_cbox = {
1719 	.name			= "cbox",
1720 	.num_counters		= 4,
1721 	.num_boxes		= 15,
1722 	.perf_ctr_bits		= 44,
1723 	.event_ctl		= SNBEP_C0_MSR_PMON_CTL0,
1724 	.perf_ctr		= SNBEP_C0_MSR_PMON_CTR0,
1725 	.event_mask		= IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1726 	.box_ctl		= SNBEP_C0_MSR_PMON_BOX_CTL,
1727 	.msr_offset		= SNBEP_CBO_MSR_OFFSET,
1728 	.num_shared_regs	= 1,
1729 	.constraints		= snbep_uncore_cbox_constraints,
1730 	.ops			= &ivbep_uncore_cbox_ops,
1731 	.format_group		= &ivbep_uncore_cbox_format_group,
1732 };
1733 
1734 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1735 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1736 	.hw_config		= snbep_pcu_hw_config,
1737 	.get_constraint		= snbep_pcu_get_constraint,
1738 	.put_constraint		= snbep_pcu_put_constraint,
1739 };
1740 
1741 static struct intel_uncore_type ivbep_uncore_pcu = {
1742 	.name			= "pcu",
1743 	.num_counters		= 4,
1744 	.num_boxes		= 1,
1745 	.perf_ctr_bits		= 48,
1746 	.perf_ctr		= SNBEP_PCU_MSR_PMON_CTR0,
1747 	.event_ctl		= SNBEP_PCU_MSR_PMON_CTL0,
1748 	.event_mask		= IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1749 	.box_ctl		= SNBEP_PCU_MSR_PMON_BOX_CTL,
1750 	.num_shared_regs	= 1,
1751 	.ops			= &ivbep_uncore_pcu_ops,
1752 	.format_group		= &ivbep_uncore_pcu_format_group,
1753 };
1754 
1755 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1756 	&ivbep_uncore_ubox,
1757 	&ivbep_uncore_cbox,
1758 	&ivbep_uncore_pcu,
1759 	NULL,
1760 };
1761 
1762 void ivbep_uncore_cpu_init(void)
1763 {
1764 	if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1765 		ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1766 	uncore_msr_uncores = ivbep_msr_uncores;
1767 }
1768 
1769 static struct intel_uncore_type ivbep_uncore_ha = {
1770 	.name		= "ha",
1771 	.num_counters   = 4,
1772 	.num_boxes	= 2,
1773 	.perf_ctr_bits	= 48,
1774 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1775 };
1776 
1777 static struct intel_uncore_type ivbep_uncore_imc = {
1778 	.name		= "imc",
1779 	.num_counters   = 4,
1780 	.num_boxes	= 8,
1781 	.perf_ctr_bits	= 48,
1782 	.fixed_ctr_bits	= 48,
1783 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1784 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1785 	.event_descs	= snbep_uncore_imc_events,
1786 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1787 };
1788 
1789 /* registers in IRP boxes are not properly aligned */
1790 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1791 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1792 
1793 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1794 {
1795 	struct pci_dev *pdev = box->pci_dev;
1796 	struct hw_perf_event *hwc = &event->hw;
1797 
1798 	pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1799 			       hwc->config | SNBEP_PMON_CTL_EN);
1800 }
1801 
1802 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1803 {
1804 	struct pci_dev *pdev = box->pci_dev;
1805 	struct hw_perf_event *hwc = &event->hw;
1806 
1807 	pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1808 }
1809 
1810 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1811 {
1812 	struct pci_dev *pdev = box->pci_dev;
1813 	struct hw_perf_event *hwc = &event->hw;
1814 	u64 count = 0;
1815 
1816 	pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1817 	pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1818 
1819 	return count;
1820 }
1821 
1822 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1823 	.init_box	= ivbep_uncore_pci_init_box,
1824 	.disable_box	= snbep_uncore_pci_disable_box,
1825 	.enable_box	= snbep_uncore_pci_enable_box,
1826 	.disable_event	= ivbep_uncore_irp_disable_event,
1827 	.enable_event	= ivbep_uncore_irp_enable_event,
1828 	.read_counter	= ivbep_uncore_irp_read_counter,
1829 };
1830 
1831 static struct intel_uncore_type ivbep_uncore_irp = {
1832 	.name			= "irp",
1833 	.num_counters		= 4,
1834 	.num_boxes		= 1,
1835 	.perf_ctr_bits		= 48,
1836 	.event_mask		= IVBEP_PMON_RAW_EVENT_MASK,
1837 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1838 	.ops			= &ivbep_uncore_irp_ops,
1839 	.format_group		= &ivbep_uncore_format_group,
1840 };
1841 
1842 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1843 	.init_box	= ivbep_uncore_pci_init_box,
1844 	.disable_box	= snbep_uncore_pci_disable_box,
1845 	.enable_box	= snbep_uncore_pci_enable_box,
1846 	.disable_event	= snbep_uncore_pci_disable_event,
1847 	.enable_event	= snbep_qpi_enable_event,
1848 	.read_counter	= snbep_uncore_pci_read_counter,
1849 	.hw_config	= snbep_qpi_hw_config,
1850 	.get_constraint	= uncore_get_constraint,
1851 	.put_constraint	= uncore_put_constraint,
1852 };
1853 
1854 static struct intel_uncore_type ivbep_uncore_qpi = {
1855 	.name			= "qpi",
1856 	.num_counters		= 4,
1857 	.num_boxes		= 3,
1858 	.perf_ctr_bits		= 48,
1859 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
1860 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
1861 	.event_mask		= IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1862 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1863 	.num_shared_regs	= 1,
1864 	.ops			= &ivbep_uncore_qpi_ops,
1865 	.format_group		= &ivbep_uncore_qpi_format_group,
1866 };
1867 
1868 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1869 	.name		= "r2pcie",
1870 	.num_counters   = 4,
1871 	.num_boxes	= 1,
1872 	.perf_ctr_bits	= 44,
1873 	.constraints	= snbep_uncore_r2pcie_constraints,
1874 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1875 };
1876 
1877 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1878 	.name		= "r3qpi",
1879 	.num_counters   = 3,
1880 	.num_boxes	= 2,
1881 	.perf_ctr_bits	= 44,
1882 	.constraints	= snbep_uncore_r3qpi_constraints,
1883 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1884 };
1885 
1886 enum {
1887 	IVBEP_PCI_UNCORE_HA,
1888 	IVBEP_PCI_UNCORE_IMC,
1889 	IVBEP_PCI_UNCORE_IRP,
1890 	IVBEP_PCI_UNCORE_QPI,
1891 	IVBEP_PCI_UNCORE_R2PCIE,
1892 	IVBEP_PCI_UNCORE_R3QPI,
1893 };
1894 
1895 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1896 	[IVBEP_PCI_UNCORE_HA]	= &ivbep_uncore_ha,
1897 	[IVBEP_PCI_UNCORE_IMC]	= &ivbep_uncore_imc,
1898 	[IVBEP_PCI_UNCORE_IRP]	= &ivbep_uncore_irp,
1899 	[IVBEP_PCI_UNCORE_QPI]	= &ivbep_uncore_qpi,
1900 	[IVBEP_PCI_UNCORE_R2PCIE]	= &ivbep_uncore_r2pcie,
1901 	[IVBEP_PCI_UNCORE_R3QPI]	= &ivbep_uncore_r3qpi,
1902 	NULL,
1903 };
1904 
1905 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1906 	{ /* Home Agent 0 */
1907 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1908 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1909 	},
1910 	{ /* Home Agent 1 */
1911 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1912 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1913 	},
1914 	{ /* MC0 Channel 0 */
1915 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1916 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1917 	},
1918 	{ /* MC0 Channel 1 */
1919 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1920 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1921 	},
1922 	{ /* MC0 Channel 3 */
1923 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1924 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1925 	},
1926 	{ /* MC0 Channel 4 */
1927 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1928 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1929 	},
1930 	{ /* MC1 Channel 0 */
1931 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1932 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1933 	},
1934 	{ /* MC1 Channel 1 */
1935 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1936 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1937 	},
1938 	{ /* MC1 Channel 3 */
1939 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1940 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1941 	},
1942 	{ /* MC1 Channel 4 */
1943 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1944 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1945 	},
1946 	{ /* IRP */
1947 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1948 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1949 	},
1950 	{ /* QPI0 Port 0 */
1951 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1952 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
1953 	},
1954 	{ /* QPI0 Port 1 */
1955 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1956 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
1957 	},
1958 	{ /* QPI1 Port 2 */
1959 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1960 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
1961 	},
1962 	{ /* R2PCIe */
1963 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1964 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
1965 	},
1966 	{ /* R3QPI0 Link 0 */
1967 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1968 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
1969 	},
1970 	{ /* R3QPI0 Link 1 */
1971 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1972 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
1973 	},
1974 	{ /* R3QPI1 Link 2 */
1975 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1976 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
1977 	},
1978 	{ /* QPI Port 0 filter  */
1979 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1980 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1981 						   SNBEP_PCI_QPI_PORT0_FILTER),
1982 	},
1983 	{ /* QPI Port 0 filter  */
1984 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1985 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1986 						   SNBEP_PCI_QPI_PORT1_FILTER),
1987 	},
1988 	{ /* end: all zeroes */ }
1989 };
1990 
1991 static struct pci_driver ivbep_uncore_pci_driver = {
1992 	.name		= "ivbep_uncore",
1993 	.id_table	= ivbep_uncore_pci_ids,
1994 };
1995 
1996 int ivbep_uncore_pci_init(void)
1997 {
1998 	int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1999 	if (ret)
2000 		return ret;
2001 	uncore_pci_uncores = ivbep_pci_uncores;
2002 	uncore_pci_driver = &ivbep_uncore_pci_driver;
2003 	return 0;
2004 }
2005 /* end of IvyTown uncore support */
2006 
2007 /* KNL uncore support */
2008 static struct attribute *knl_uncore_ubox_formats_attr[] = {
2009 	&format_attr_event.attr,
2010 	&format_attr_umask.attr,
2011 	&format_attr_edge.attr,
2012 	&format_attr_tid_en.attr,
2013 	&format_attr_inv.attr,
2014 	&format_attr_thresh5.attr,
2015 	NULL,
2016 };
2017 
2018 static const struct attribute_group knl_uncore_ubox_format_group = {
2019 	.name = "format",
2020 	.attrs = knl_uncore_ubox_formats_attr,
2021 };
2022 
2023 static struct intel_uncore_type knl_uncore_ubox = {
2024 	.name			= "ubox",
2025 	.num_counters		= 2,
2026 	.num_boxes		= 1,
2027 	.perf_ctr_bits		= 48,
2028 	.fixed_ctr_bits		= 48,
2029 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
2030 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
2031 	.event_mask		= KNL_U_MSR_PMON_RAW_EVENT_MASK,
2032 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2033 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2034 	.ops			= &snbep_uncore_msr_ops,
2035 	.format_group		= &knl_uncore_ubox_format_group,
2036 };
2037 
2038 static struct attribute *knl_uncore_cha_formats_attr[] = {
2039 	&format_attr_event.attr,
2040 	&format_attr_umask.attr,
2041 	&format_attr_qor.attr,
2042 	&format_attr_edge.attr,
2043 	&format_attr_tid_en.attr,
2044 	&format_attr_inv.attr,
2045 	&format_attr_thresh8.attr,
2046 	&format_attr_filter_tid4.attr,
2047 	&format_attr_filter_link3.attr,
2048 	&format_attr_filter_state4.attr,
2049 	&format_attr_filter_local.attr,
2050 	&format_attr_filter_all_op.attr,
2051 	&format_attr_filter_nnm.attr,
2052 	&format_attr_filter_opc3.attr,
2053 	&format_attr_filter_nc.attr,
2054 	&format_attr_filter_isoc.attr,
2055 	NULL,
2056 };
2057 
2058 static const struct attribute_group knl_uncore_cha_format_group = {
2059 	.name = "format",
2060 	.attrs = knl_uncore_cha_formats_attr,
2061 };
2062 
2063 static struct event_constraint knl_uncore_cha_constraints[] = {
2064 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2065 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2066 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2067 	EVENT_CONSTRAINT_END
2068 };
2069 
2070 static struct extra_reg knl_uncore_cha_extra_regs[] = {
2071 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2072 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2073 	SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2074 	SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2075 	SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2076 	EVENT_EXTRA_END
2077 };
2078 
2079 static u64 knl_cha_filter_mask(int fields)
2080 {
2081 	u64 mask = 0;
2082 
2083 	if (fields & 0x1)
2084 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2085 	if (fields & 0x2)
2086 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2087 	if (fields & 0x4)
2088 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2089 	return mask;
2090 }
2091 
2092 static struct event_constraint *
2093 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2094 {
2095 	return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2096 }
2097 
2098 static int knl_cha_hw_config(struct intel_uncore_box *box,
2099 			     struct perf_event *event)
2100 {
2101 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2102 	struct extra_reg *er;
2103 	int idx = 0;
2104 
2105 	for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2106 		if (er->event != (event->hw.config & er->config_mask))
2107 			continue;
2108 		idx |= er->idx;
2109 	}
2110 
2111 	if (idx) {
2112 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2113 			    KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2114 		reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2115 
2116 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2117 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2118 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2119 		reg1->idx = idx;
2120 	}
2121 	return 0;
2122 }
2123 
2124 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2125 				    struct perf_event *event);
2126 
2127 static struct intel_uncore_ops knl_uncore_cha_ops = {
2128 	.init_box		= snbep_uncore_msr_init_box,
2129 	.disable_box		= snbep_uncore_msr_disable_box,
2130 	.enable_box		= snbep_uncore_msr_enable_box,
2131 	.disable_event		= snbep_uncore_msr_disable_event,
2132 	.enable_event		= hswep_cbox_enable_event,
2133 	.read_counter		= uncore_msr_read_counter,
2134 	.hw_config		= knl_cha_hw_config,
2135 	.get_constraint		= knl_cha_get_constraint,
2136 	.put_constraint		= snbep_cbox_put_constraint,
2137 };
2138 
2139 static struct intel_uncore_type knl_uncore_cha = {
2140 	.name			= "cha",
2141 	.num_counters		= 4,
2142 	.num_boxes		= 38,
2143 	.perf_ctr_bits		= 48,
2144 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
2145 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
2146 	.event_mask		= KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2147 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
2148 	.msr_offset		= KNL_CHA_MSR_OFFSET,
2149 	.num_shared_regs	= 1,
2150 	.constraints		= knl_uncore_cha_constraints,
2151 	.ops			= &knl_uncore_cha_ops,
2152 	.format_group		= &knl_uncore_cha_format_group,
2153 };
2154 
2155 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2156 	&format_attr_event2.attr,
2157 	&format_attr_use_occ_ctr.attr,
2158 	&format_attr_occ_sel.attr,
2159 	&format_attr_edge.attr,
2160 	&format_attr_tid_en.attr,
2161 	&format_attr_inv.attr,
2162 	&format_attr_thresh6.attr,
2163 	&format_attr_occ_invert.attr,
2164 	&format_attr_occ_edge_det.attr,
2165 	NULL,
2166 };
2167 
2168 static const struct attribute_group knl_uncore_pcu_format_group = {
2169 	.name = "format",
2170 	.attrs = knl_uncore_pcu_formats_attr,
2171 };
2172 
2173 static struct intel_uncore_type knl_uncore_pcu = {
2174 	.name			= "pcu",
2175 	.num_counters		= 4,
2176 	.num_boxes		= 1,
2177 	.perf_ctr_bits		= 48,
2178 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
2179 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
2180 	.event_mask		= KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2181 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
2182 	.ops			= &snbep_uncore_msr_ops,
2183 	.format_group		= &knl_uncore_pcu_format_group,
2184 };
2185 
2186 static struct intel_uncore_type *knl_msr_uncores[] = {
2187 	&knl_uncore_ubox,
2188 	&knl_uncore_cha,
2189 	&knl_uncore_pcu,
2190 	NULL,
2191 };
2192 
2193 void knl_uncore_cpu_init(void)
2194 {
2195 	uncore_msr_uncores = knl_msr_uncores;
2196 }
2197 
2198 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2199 {
2200 	struct pci_dev *pdev = box->pci_dev;
2201 	int box_ctl = uncore_pci_box_ctl(box);
2202 
2203 	pci_write_config_dword(pdev, box_ctl, 0);
2204 }
2205 
2206 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2207 					struct perf_event *event)
2208 {
2209 	struct pci_dev *pdev = box->pci_dev;
2210 	struct hw_perf_event *hwc = &event->hw;
2211 
2212 	if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2213 							== UNCORE_FIXED_EVENT)
2214 		pci_write_config_dword(pdev, hwc->config_base,
2215 				       hwc->config | KNL_PMON_FIXED_CTL_EN);
2216 	else
2217 		pci_write_config_dword(pdev, hwc->config_base,
2218 				       hwc->config | SNBEP_PMON_CTL_EN);
2219 }
2220 
2221 static struct intel_uncore_ops knl_uncore_imc_ops = {
2222 	.init_box	= snbep_uncore_pci_init_box,
2223 	.disable_box	= snbep_uncore_pci_disable_box,
2224 	.enable_box	= knl_uncore_imc_enable_box,
2225 	.read_counter	= snbep_uncore_pci_read_counter,
2226 	.enable_event	= knl_uncore_imc_enable_event,
2227 	.disable_event	= snbep_uncore_pci_disable_event,
2228 };
2229 
2230 static struct intel_uncore_type knl_uncore_imc_uclk = {
2231 	.name			= "imc_uclk",
2232 	.num_counters		= 4,
2233 	.num_boxes		= 2,
2234 	.perf_ctr_bits		= 48,
2235 	.fixed_ctr_bits		= 48,
2236 	.perf_ctr		= KNL_UCLK_MSR_PMON_CTR0_LOW,
2237 	.event_ctl		= KNL_UCLK_MSR_PMON_CTL0,
2238 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2239 	.fixed_ctr		= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2240 	.fixed_ctl		= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2241 	.box_ctl		= KNL_UCLK_MSR_PMON_BOX_CTL,
2242 	.ops			= &knl_uncore_imc_ops,
2243 	.format_group		= &snbep_uncore_format_group,
2244 };
2245 
2246 static struct intel_uncore_type knl_uncore_imc_dclk = {
2247 	.name			= "imc",
2248 	.num_counters		= 4,
2249 	.num_boxes		= 6,
2250 	.perf_ctr_bits		= 48,
2251 	.fixed_ctr_bits		= 48,
2252 	.perf_ctr		= KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2253 	.event_ctl		= KNL_MC0_CH0_MSR_PMON_CTL0,
2254 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2255 	.fixed_ctr		= KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2256 	.fixed_ctl		= KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2257 	.box_ctl		= KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2258 	.ops			= &knl_uncore_imc_ops,
2259 	.format_group		= &snbep_uncore_format_group,
2260 };
2261 
2262 static struct intel_uncore_type knl_uncore_edc_uclk = {
2263 	.name			= "edc_uclk",
2264 	.num_counters		= 4,
2265 	.num_boxes		= 8,
2266 	.perf_ctr_bits		= 48,
2267 	.fixed_ctr_bits		= 48,
2268 	.perf_ctr		= KNL_UCLK_MSR_PMON_CTR0_LOW,
2269 	.event_ctl		= KNL_UCLK_MSR_PMON_CTL0,
2270 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2271 	.fixed_ctr		= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2272 	.fixed_ctl		= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2273 	.box_ctl		= KNL_UCLK_MSR_PMON_BOX_CTL,
2274 	.ops			= &knl_uncore_imc_ops,
2275 	.format_group		= &snbep_uncore_format_group,
2276 };
2277 
2278 static struct intel_uncore_type knl_uncore_edc_eclk = {
2279 	.name			= "edc_eclk",
2280 	.num_counters		= 4,
2281 	.num_boxes		= 8,
2282 	.perf_ctr_bits		= 48,
2283 	.fixed_ctr_bits		= 48,
2284 	.perf_ctr		= KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2285 	.event_ctl		= KNL_EDC0_ECLK_MSR_PMON_CTL0,
2286 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2287 	.fixed_ctr		= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2288 	.fixed_ctl		= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2289 	.box_ctl		= KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2290 	.ops			= &knl_uncore_imc_ops,
2291 	.format_group		= &snbep_uncore_format_group,
2292 };
2293 
2294 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2295 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2296 	EVENT_CONSTRAINT_END
2297 };
2298 
2299 static struct intel_uncore_type knl_uncore_m2pcie = {
2300 	.name		= "m2pcie",
2301 	.num_counters   = 4,
2302 	.num_boxes	= 1,
2303 	.perf_ctr_bits	= 48,
2304 	.constraints	= knl_uncore_m2pcie_constraints,
2305 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2306 };
2307 
2308 static struct attribute *knl_uncore_irp_formats_attr[] = {
2309 	&format_attr_event.attr,
2310 	&format_attr_umask.attr,
2311 	&format_attr_qor.attr,
2312 	&format_attr_edge.attr,
2313 	&format_attr_inv.attr,
2314 	&format_attr_thresh8.attr,
2315 	NULL,
2316 };
2317 
2318 static const struct attribute_group knl_uncore_irp_format_group = {
2319 	.name = "format",
2320 	.attrs = knl_uncore_irp_formats_attr,
2321 };
2322 
2323 static struct intel_uncore_type knl_uncore_irp = {
2324 	.name			= "irp",
2325 	.num_counters		= 2,
2326 	.num_boxes		= 1,
2327 	.perf_ctr_bits		= 48,
2328 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
2329 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
2330 	.event_mask		= KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2331 	.box_ctl		= KNL_IRP_PCI_PMON_BOX_CTL,
2332 	.ops			= &snbep_uncore_pci_ops,
2333 	.format_group		= &knl_uncore_irp_format_group,
2334 };
2335 
2336 enum {
2337 	KNL_PCI_UNCORE_MC_UCLK,
2338 	KNL_PCI_UNCORE_MC_DCLK,
2339 	KNL_PCI_UNCORE_EDC_UCLK,
2340 	KNL_PCI_UNCORE_EDC_ECLK,
2341 	KNL_PCI_UNCORE_M2PCIE,
2342 	KNL_PCI_UNCORE_IRP,
2343 };
2344 
2345 static struct intel_uncore_type *knl_pci_uncores[] = {
2346 	[KNL_PCI_UNCORE_MC_UCLK]	= &knl_uncore_imc_uclk,
2347 	[KNL_PCI_UNCORE_MC_DCLK]	= &knl_uncore_imc_dclk,
2348 	[KNL_PCI_UNCORE_EDC_UCLK]	= &knl_uncore_edc_uclk,
2349 	[KNL_PCI_UNCORE_EDC_ECLK]	= &knl_uncore_edc_eclk,
2350 	[KNL_PCI_UNCORE_M2PCIE]		= &knl_uncore_m2pcie,
2351 	[KNL_PCI_UNCORE_IRP]		= &knl_uncore_irp,
2352 	NULL,
2353 };
2354 
2355 /*
2356  * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2357  * device type. prior to KNL, each instance of a PMU device type had a unique
2358  * device ID.
2359  *
2360  *	PCI Device ID	Uncore PMU Devices
2361  *	----------------------------------
2362  *	0x7841		MC0 UClk, MC1 UClk
2363  *	0x7843		MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2364  *			MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2365  *	0x7833		EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2366  *			EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2367  *	0x7835		EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2368  *			EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2369  *	0x7817		M2PCIe
2370  *	0x7814		IRP
2371 */
2372 
2373 static const struct pci_device_id knl_uncore_pci_ids[] = {
2374 	{ /* MC0 UClk */
2375 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2376 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2377 	},
2378 	{ /* MC1 UClk */
2379 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2380 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2381 	},
2382 	{ /* MC0 DClk CH 0 */
2383 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2384 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2385 	},
2386 	{ /* MC0 DClk CH 1 */
2387 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2388 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2389 	},
2390 	{ /* MC0 DClk CH 2 */
2391 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2392 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2393 	},
2394 	{ /* MC1 DClk CH 0 */
2395 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2396 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2397 	},
2398 	{ /* MC1 DClk CH 1 */
2399 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2400 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2401 	},
2402 	{ /* MC1 DClk CH 2 */
2403 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2404 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2405 	},
2406 	{ /* EDC0 UClk */
2407 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2408 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2409 	},
2410 	{ /* EDC1 UClk */
2411 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2412 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2413 	},
2414 	{ /* EDC2 UClk */
2415 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2416 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2417 	},
2418 	{ /* EDC3 UClk */
2419 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2420 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2421 	},
2422 	{ /* EDC4 UClk */
2423 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2424 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2425 	},
2426 	{ /* EDC5 UClk */
2427 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2428 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2429 	},
2430 	{ /* EDC6 UClk */
2431 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2432 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2433 	},
2434 	{ /* EDC7 UClk */
2435 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2436 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2437 	},
2438 	{ /* EDC0 EClk */
2439 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2440 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2441 	},
2442 	{ /* EDC1 EClk */
2443 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2444 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2445 	},
2446 	{ /* EDC2 EClk */
2447 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2448 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2449 	},
2450 	{ /* EDC3 EClk */
2451 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2452 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2453 	},
2454 	{ /* EDC4 EClk */
2455 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2456 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2457 	},
2458 	{ /* EDC5 EClk */
2459 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2460 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2461 	},
2462 	{ /* EDC6 EClk */
2463 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2464 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2465 	},
2466 	{ /* EDC7 EClk */
2467 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2468 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2469 	},
2470 	{ /* M2PCIe */
2471 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2472 		.driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2473 	},
2474 	{ /* IRP */
2475 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2476 		.driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2477 	},
2478 	{ /* end: all zeroes */ }
2479 };
2480 
2481 static struct pci_driver knl_uncore_pci_driver = {
2482 	.name		= "knl_uncore",
2483 	.id_table	= knl_uncore_pci_ids,
2484 };
2485 
2486 int knl_uncore_pci_init(void)
2487 {
2488 	int ret;
2489 
2490 	/* All KNL PCI based PMON units are on the same PCI bus except IRP */
2491 	ret = snb_pci2phy_map_init(0x7814); /* IRP */
2492 	if (ret)
2493 		return ret;
2494 	ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2495 	if (ret)
2496 		return ret;
2497 	uncore_pci_uncores = knl_pci_uncores;
2498 	uncore_pci_driver = &knl_uncore_pci_driver;
2499 	return 0;
2500 }
2501 
2502 /* end of KNL uncore support */
2503 
2504 /* Haswell-EP uncore support */
2505 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2506 	&format_attr_event.attr,
2507 	&format_attr_umask.attr,
2508 	&format_attr_edge.attr,
2509 	&format_attr_inv.attr,
2510 	&format_attr_thresh5.attr,
2511 	&format_attr_filter_tid2.attr,
2512 	&format_attr_filter_cid.attr,
2513 	NULL,
2514 };
2515 
2516 static const struct attribute_group hswep_uncore_ubox_format_group = {
2517 	.name = "format",
2518 	.attrs = hswep_uncore_ubox_formats_attr,
2519 };
2520 
2521 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2522 {
2523 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2524 	reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2525 	reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2526 	reg1->idx = 0;
2527 	return 0;
2528 }
2529 
2530 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2531 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2532 	.hw_config		= hswep_ubox_hw_config,
2533 	.get_constraint		= uncore_get_constraint,
2534 	.put_constraint		= uncore_put_constraint,
2535 };
2536 
2537 static struct intel_uncore_type hswep_uncore_ubox = {
2538 	.name			= "ubox",
2539 	.num_counters		= 2,
2540 	.num_boxes		= 1,
2541 	.perf_ctr_bits		= 44,
2542 	.fixed_ctr_bits		= 48,
2543 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
2544 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
2545 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2546 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2547 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2548 	.num_shared_regs	= 1,
2549 	.ops			= &hswep_uncore_ubox_ops,
2550 	.format_group		= &hswep_uncore_ubox_format_group,
2551 };
2552 
2553 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2554 	&format_attr_event.attr,
2555 	&format_attr_umask.attr,
2556 	&format_attr_edge.attr,
2557 	&format_attr_tid_en.attr,
2558 	&format_attr_thresh8.attr,
2559 	&format_attr_filter_tid3.attr,
2560 	&format_attr_filter_link2.attr,
2561 	&format_attr_filter_state3.attr,
2562 	&format_attr_filter_nid2.attr,
2563 	&format_attr_filter_opc2.attr,
2564 	&format_attr_filter_nc.attr,
2565 	&format_attr_filter_c6.attr,
2566 	&format_attr_filter_isoc.attr,
2567 	NULL,
2568 };
2569 
2570 static const struct attribute_group hswep_uncore_cbox_format_group = {
2571 	.name = "format",
2572 	.attrs = hswep_uncore_cbox_formats_attr,
2573 };
2574 
2575 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2576 	UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2577 	UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2578 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2579 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2580 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2581 	UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2582 	UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2583 	EVENT_CONSTRAINT_END
2584 };
2585 
2586 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2587 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2588 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2589 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2590 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2591 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2592 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2593 	SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2594 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2595 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2596 	SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2597 	SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2598 	SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2599 	SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2600 	SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2601 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2602 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2603 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2604 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2605 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2606 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2607 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2608 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2609 	SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2610 	SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2611 	SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2612 	SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2613 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2614 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2615 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2616 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2617 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2618 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2619 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2620 	SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2621 	SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2622 	SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2623 	SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2624 	SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2625 	EVENT_EXTRA_END
2626 };
2627 
2628 static u64 hswep_cbox_filter_mask(int fields)
2629 {
2630 	u64 mask = 0;
2631 	if (fields & 0x1)
2632 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2633 	if (fields & 0x2)
2634 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2635 	if (fields & 0x4)
2636 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2637 	if (fields & 0x8)
2638 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2639 	if (fields & 0x10) {
2640 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2641 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2642 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2643 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2644 	}
2645 	return mask;
2646 }
2647 
2648 static struct event_constraint *
2649 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2650 {
2651 	return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2652 }
2653 
2654 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2655 {
2656 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2657 	struct extra_reg *er;
2658 	int idx = 0;
2659 
2660 	for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2661 		if (er->event != (event->hw.config & er->config_mask))
2662 			continue;
2663 		idx |= er->idx;
2664 	}
2665 
2666 	if (idx) {
2667 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2668 			    HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2669 		reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2670 		reg1->idx = idx;
2671 	}
2672 	return 0;
2673 }
2674 
2675 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2676 				  struct perf_event *event)
2677 {
2678 	struct hw_perf_event *hwc = &event->hw;
2679 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2680 
2681 	if (reg1->idx != EXTRA_REG_NONE) {
2682 		u64 filter = uncore_shared_reg_config(box, 0);
2683 		wrmsrl(reg1->reg, filter & 0xffffffff);
2684 		wrmsrl(reg1->reg + 1, filter >> 32);
2685 	}
2686 
2687 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2688 }
2689 
2690 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2691 	.init_box		= snbep_uncore_msr_init_box,
2692 	.disable_box		= snbep_uncore_msr_disable_box,
2693 	.enable_box		= snbep_uncore_msr_enable_box,
2694 	.disable_event		= snbep_uncore_msr_disable_event,
2695 	.enable_event		= hswep_cbox_enable_event,
2696 	.read_counter		= uncore_msr_read_counter,
2697 	.hw_config		= hswep_cbox_hw_config,
2698 	.get_constraint		= hswep_cbox_get_constraint,
2699 	.put_constraint		= snbep_cbox_put_constraint,
2700 };
2701 
2702 static struct intel_uncore_type hswep_uncore_cbox = {
2703 	.name			= "cbox",
2704 	.num_counters		= 4,
2705 	.num_boxes		= 18,
2706 	.perf_ctr_bits		= 48,
2707 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
2708 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
2709 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2710 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
2711 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
2712 	.num_shared_regs	= 1,
2713 	.constraints		= hswep_uncore_cbox_constraints,
2714 	.ops			= &hswep_uncore_cbox_ops,
2715 	.format_group		= &hswep_uncore_cbox_format_group,
2716 };
2717 
2718 /*
2719  * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2720  */
2721 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2722 {
2723 	unsigned msr = uncore_msr_box_ctl(box);
2724 
2725 	if (msr) {
2726 		u64 init = SNBEP_PMON_BOX_CTL_INT;
2727 		u64 flags = 0;
2728 		int i;
2729 
2730 		for_each_set_bit(i, (unsigned long *)&init, 64) {
2731 			flags |= (1ULL << i);
2732 			wrmsrl(msr, flags);
2733 		}
2734 	}
2735 }
2736 
2737 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2738 	__SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2739 	.init_box		= hswep_uncore_sbox_msr_init_box
2740 };
2741 
2742 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2743 	&format_attr_event.attr,
2744 	&format_attr_umask.attr,
2745 	&format_attr_edge.attr,
2746 	&format_attr_tid_en.attr,
2747 	&format_attr_inv.attr,
2748 	&format_attr_thresh8.attr,
2749 	NULL,
2750 };
2751 
2752 static const struct attribute_group hswep_uncore_sbox_format_group = {
2753 	.name = "format",
2754 	.attrs = hswep_uncore_sbox_formats_attr,
2755 };
2756 
2757 static struct intel_uncore_type hswep_uncore_sbox = {
2758 	.name			= "sbox",
2759 	.num_counters		= 4,
2760 	.num_boxes		= 4,
2761 	.perf_ctr_bits		= 44,
2762 	.event_ctl		= HSWEP_S0_MSR_PMON_CTL0,
2763 	.perf_ctr		= HSWEP_S0_MSR_PMON_CTR0,
2764 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2765 	.box_ctl		= HSWEP_S0_MSR_PMON_BOX_CTL,
2766 	.msr_offset		= HSWEP_SBOX_MSR_OFFSET,
2767 	.ops			= &hswep_uncore_sbox_msr_ops,
2768 	.format_group		= &hswep_uncore_sbox_format_group,
2769 };
2770 
2771 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2772 {
2773 	struct hw_perf_event *hwc = &event->hw;
2774 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2775 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2776 
2777 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
2778 		reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2779 		reg1->idx = ev_sel - 0xb;
2780 		reg1->config = event->attr.config1 & (0xff << reg1->idx);
2781 	}
2782 	return 0;
2783 }
2784 
2785 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2786 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2787 	.hw_config		= hswep_pcu_hw_config,
2788 	.get_constraint		= snbep_pcu_get_constraint,
2789 	.put_constraint		= snbep_pcu_put_constraint,
2790 };
2791 
2792 static struct intel_uncore_type hswep_uncore_pcu = {
2793 	.name			= "pcu",
2794 	.num_counters		= 4,
2795 	.num_boxes		= 1,
2796 	.perf_ctr_bits		= 48,
2797 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
2798 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
2799 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2800 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
2801 	.num_shared_regs	= 1,
2802 	.ops			= &hswep_uncore_pcu_ops,
2803 	.format_group		= &snbep_uncore_pcu_format_group,
2804 };
2805 
2806 static struct intel_uncore_type *hswep_msr_uncores[] = {
2807 	&hswep_uncore_ubox,
2808 	&hswep_uncore_cbox,
2809 	&hswep_uncore_sbox,
2810 	&hswep_uncore_pcu,
2811 	NULL,
2812 };
2813 
2814 void hswep_uncore_cpu_init(void)
2815 {
2816 	int pkg = boot_cpu_data.logical_proc_id;
2817 
2818 	if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2819 		hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2820 
2821 	/* Detect 6-8 core systems with only two SBOXes */
2822 	if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
2823 		u32 capid4;
2824 
2825 		pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3],
2826 				      0x94, &capid4);
2827 		if (((capid4 >> 6) & 0x3) == 0)
2828 			hswep_uncore_sbox.num_boxes = 2;
2829 	}
2830 
2831 	uncore_msr_uncores = hswep_msr_uncores;
2832 }
2833 
2834 static struct intel_uncore_type hswep_uncore_ha = {
2835 	.name		= "ha",
2836 	.num_counters   = 4,
2837 	.num_boxes	= 2,
2838 	.perf_ctr_bits	= 48,
2839 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2840 };
2841 
2842 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2843 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2844 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2845 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2846 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2847 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2848 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2849 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2850 	{ /* end: all zeroes */ },
2851 };
2852 
2853 static struct intel_uncore_type hswep_uncore_imc = {
2854 	.name		= "imc",
2855 	.num_counters   = 4,
2856 	.num_boxes	= 8,
2857 	.perf_ctr_bits	= 48,
2858 	.fixed_ctr_bits	= 48,
2859 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2860 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2861 	.event_descs	= hswep_uncore_imc_events,
2862 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2863 };
2864 
2865 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2866 
2867 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2868 {
2869 	struct pci_dev *pdev = box->pci_dev;
2870 	struct hw_perf_event *hwc = &event->hw;
2871 	u64 count = 0;
2872 
2873 	pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2874 	pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2875 
2876 	return count;
2877 }
2878 
2879 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2880 	.init_box	= snbep_uncore_pci_init_box,
2881 	.disable_box	= snbep_uncore_pci_disable_box,
2882 	.enable_box	= snbep_uncore_pci_enable_box,
2883 	.disable_event	= ivbep_uncore_irp_disable_event,
2884 	.enable_event	= ivbep_uncore_irp_enable_event,
2885 	.read_counter	= hswep_uncore_irp_read_counter,
2886 };
2887 
2888 static struct intel_uncore_type hswep_uncore_irp = {
2889 	.name			= "irp",
2890 	.num_counters		= 4,
2891 	.num_boxes		= 1,
2892 	.perf_ctr_bits		= 48,
2893 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2894 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
2895 	.ops			= &hswep_uncore_irp_ops,
2896 	.format_group		= &snbep_uncore_format_group,
2897 };
2898 
2899 static struct intel_uncore_type hswep_uncore_qpi = {
2900 	.name			= "qpi",
2901 	.num_counters		= 4,
2902 	.num_boxes		= 3,
2903 	.perf_ctr_bits		= 48,
2904 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
2905 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
2906 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2907 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
2908 	.num_shared_regs	= 1,
2909 	.ops			= &snbep_uncore_qpi_ops,
2910 	.format_group		= &snbep_uncore_qpi_format_group,
2911 };
2912 
2913 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2914 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2915 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2916 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2917 	UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2918 	UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2919 	UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2920 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2921 	UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2922 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2923 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2924 	UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2925 	UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2926 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2927 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2928 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2929 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2930 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2931 	UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2932 	EVENT_CONSTRAINT_END
2933 };
2934 
2935 static struct intel_uncore_type hswep_uncore_r2pcie = {
2936 	.name		= "r2pcie",
2937 	.num_counters   = 4,
2938 	.num_boxes	= 1,
2939 	.perf_ctr_bits	= 48,
2940 	.constraints	= hswep_uncore_r2pcie_constraints,
2941 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2942 };
2943 
2944 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
2945 	UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2946 	UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2947 	UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2948 	UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2949 	UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2950 	UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2951 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2952 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2953 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
2954 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2955 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2956 	UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2957 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2958 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2959 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2960 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2961 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2962 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2963 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2964 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2965 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2966 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2967 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2968 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2969 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2970 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
2971 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2972 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2973 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2974 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2975 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2976 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2977 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2978 	EVENT_CONSTRAINT_END
2979 };
2980 
2981 static struct intel_uncore_type hswep_uncore_r3qpi = {
2982 	.name		= "r3qpi",
2983 	.num_counters   = 3,
2984 	.num_boxes	= 3,
2985 	.perf_ctr_bits	= 44,
2986 	.constraints	= hswep_uncore_r3qpi_constraints,
2987 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2988 };
2989 
2990 enum {
2991 	HSWEP_PCI_UNCORE_HA,
2992 	HSWEP_PCI_UNCORE_IMC,
2993 	HSWEP_PCI_UNCORE_IRP,
2994 	HSWEP_PCI_UNCORE_QPI,
2995 	HSWEP_PCI_UNCORE_R2PCIE,
2996 	HSWEP_PCI_UNCORE_R3QPI,
2997 };
2998 
2999 static struct intel_uncore_type *hswep_pci_uncores[] = {
3000 	[HSWEP_PCI_UNCORE_HA]	= &hswep_uncore_ha,
3001 	[HSWEP_PCI_UNCORE_IMC]	= &hswep_uncore_imc,
3002 	[HSWEP_PCI_UNCORE_IRP]	= &hswep_uncore_irp,
3003 	[HSWEP_PCI_UNCORE_QPI]	= &hswep_uncore_qpi,
3004 	[HSWEP_PCI_UNCORE_R2PCIE]	= &hswep_uncore_r2pcie,
3005 	[HSWEP_PCI_UNCORE_R3QPI]	= &hswep_uncore_r3qpi,
3006 	NULL,
3007 };
3008 
3009 static const struct pci_device_id hswep_uncore_pci_ids[] = {
3010 	{ /* Home Agent 0 */
3011 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
3012 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
3013 	},
3014 	{ /* Home Agent 1 */
3015 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
3016 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
3017 	},
3018 	{ /* MC0 Channel 0 */
3019 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
3020 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
3021 	},
3022 	{ /* MC0 Channel 1 */
3023 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
3024 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
3025 	},
3026 	{ /* MC0 Channel 2 */
3027 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
3028 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
3029 	},
3030 	{ /* MC0 Channel 3 */
3031 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
3032 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
3033 	},
3034 	{ /* MC1 Channel 0 */
3035 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
3036 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
3037 	},
3038 	{ /* MC1 Channel 1 */
3039 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
3040 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
3041 	},
3042 	{ /* MC1 Channel 2 */
3043 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
3044 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
3045 	},
3046 	{ /* MC1 Channel 3 */
3047 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
3048 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
3049 	},
3050 	{ /* IRP */
3051 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
3052 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
3053 	},
3054 	{ /* QPI0 Port 0 */
3055 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3056 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3057 	},
3058 	{ /* QPI0 Port 1 */
3059 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3060 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3061 	},
3062 	{ /* QPI1 Port 2 */
3063 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3064 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3065 	},
3066 	{ /* R2PCIe */
3067 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3068 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3069 	},
3070 	{ /* R3QPI0 Link 0 */
3071 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3072 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3073 	},
3074 	{ /* R3QPI0 Link 1 */
3075 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3076 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3077 	},
3078 	{ /* R3QPI1 Link 2 */
3079 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3080 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3081 	},
3082 	{ /* QPI Port 0 filter  */
3083 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3084 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3085 						   SNBEP_PCI_QPI_PORT0_FILTER),
3086 	},
3087 	{ /* QPI Port 1 filter  */
3088 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3089 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3090 						   SNBEP_PCI_QPI_PORT1_FILTER),
3091 	},
3092 	{ /* PCU.3 (for Capability registers) */
3093 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
3094 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3095 						   HSWEP_PCI_PCU_3),
3096 	},
3097 	{ /* end: all zeroes */ }
3098 };
3099 
3100 static struct pci_driver hswep_uncore_pci_driver = {
3101 	.name		= "hswep_uncore",
3102 	.id_table	= hswep_uncore_pci_ids,
3103 };
3104 
3105 int hswep_uncore_pci_init(void)
3106 {
3107 	int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3108 	if (ret)
3109 		return ret;
3110 	uncore_pci_uncores = hswep_pci_uncores;
3111 	uncore_pci_driver = &hswep_uncore_pci_driver;
3112 	return 0;
3113 }
3114 /* end of Haswell-EP uncore support */
3115 
3116 /* BDX uncore support */
3117 
3118 static struct intel_uncore_type bdx_uncore_ubox = {
3119 	.name			= "ubox",
3120 	.num_counters		= 2,
3121 	.num_boxes		= 1,
3122 	.perf_ctr_bits		= 48,
3123 	.fixed_ctr_bits		= 48,
3124 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
3125 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
3126 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3127 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3128 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3129 	.num_shared_regs	= 1,
3130 	.ops			= &ivbep_uncore_msr_ops,
3131 	.format_group		= &ivbep_uncore_ubox_format_group,
3132 };
3133 
3134 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3135 	UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3136 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3137 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3138 	UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3139 	EVENT_CONSTRAINT_END
3140 };
3141 
3142 static struct intel_uncore_type bdx_uncore_cbox = {
3143 	.name			= "cbox",
3144 	.num_counters		= 4,
3145 	.num_boxes		= 24,
3146 	.perf_ctr_bits		= 48,
3147 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
3148 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
3149 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3150 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
3151 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
3152 	.num_shared_regs	= 1,
3153 	.constraints		= bdx_uncore_cbox_constraints,
3154 	.ops			= &hswep_uncore_cbox_ops,
3155 	.format_group		= &hswep_uncore_cbox_format_group,
3156 };
3157 
3158 static struct intel_uncore_type bdx_uncore_sbox = {
3159 	.name			= "sbox",
3160 	.num_counters		= 4,
3161 	.num_boxes		= 4,
3162 	.perf_ctr_bits		= 48,
3163 	.event_ctl		= HSWEP_S0_MSR_PMON_CTL0,
3164 	.perf_ctr		= HSWEP_S0_MSR_PMON_CTR0,
3165 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3166 	.box_ctl		= HSWEP_S0_MSR_PMON_BOX_CTL,
3167 	.msr_offset		= HSWEP_SBOX_MSR_OFFSET,
3168 	.ops			= &hswep_uncore_sbox_msr_ops,
3169 	.format_group		= &hswep_uncore_sbox_format_group,
3170 };
3171 
3172 #define BDX_MSR_UNCORE_SBOX	3
3173 
3174 static struct intel_uncore_type *bdx_msr_uncores[] = {
3175 	&bdx_uncore_ubox,
3176 	&bdx_uncore_cbox,
3177 	&hswep_uncore_pcu,
3178 	&bdx_uncore_sbox,
3179 	NULL,
3180 };
3181 
3182 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3183 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3184 	EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3185 	EVENT_CONSTRAINT_END
3186 };
3187 
3188 void bdx_uncore_cpu_init(void)
3189 {
3190 	int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
3191 
3192 	if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3193 		bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3194 	uncore_msr_uncores = bdx_msr_uncores;
3195 
3196 	/* BDX-DE doesn't have SBOX */
3197 	if (boot_cpu_data.x86_model == 86) {
3198 		uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3199 	/* Detect systems with no SBOXes */
3200 	} else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
3201 		struct pci_dev *pdev;
3202 		u32 capid4;
3203 
3204 		pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3];
3205 		pci_read_config_dword(pdev, 0x94, &capid4);
3206 		if (((capid4 >> 6) & 0x3) == 0)
3207 			bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3208 	}
3209 	hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3210 }
3211 
3212 static struct intel_uncore_type bdx_uncore_ha = {
3213 	.name		= "ha",
3214 	.num_counters   = 4,
3215 	.num_boxes	= 2,
3216 	.perf_ctr_bits	= 48,
3217 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3218 };
3219 
3220 static struct intel_uncore_type bdx_uncore_imc = {
3221 	.name		= "imc",
3222 	.num_counters   = 4,
3223 	.num_boxes	= 8,
3224 	.perf_ctr_bits	= 48,
3225 	.fixed_ctr_bits	= 48,
3226 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3227 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3228 	.event_descs	= hswep_uncore_imc_events,
3229 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3230 };
3231 
3232 static struct intel_uncore_type bdx_uncore_irp = {
3233 	.name			= "irp",
3234 	.num_counters		= 4,
3235 	.num_boxes		= 1,
3236 	.perf_ctr_bits		= 48,
3237 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
3238 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
3239 	.ops			= &hswep_uncore_irp_ops,
3240 	.format_group		= &snbep_uncore_format_group,
3241 };
3242 
3243 static struct intel_uncore_type bdx_uncore_qpi = {
3244 	.name			= "qpi",
3245 	.num_counters		= 4,
3246 	.num_boxes		= 3,
3247 	.perf_ctr_bits		= 48,
3248 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
3249 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
3250 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3251 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
3252 	.num_shared_regs	= 1,
3253 	.ops			= &snbep_uncore_qpi_ops,
3254 	.format_group		= &snbep_uncore_qpi_format_group,
3255 };
3256 
3257 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3258 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3259 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3260 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3261 	UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3262 	UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3263 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3264 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3265 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3266 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3267 	EVENT_CONSTRAINT_END
3268 };
3269 
3270 static struct intel_uncore_type bdx_uncore_r2pcie = {
3271 	.name		= "r2pcie",
3272 	.num_counters   = 4,
3273 	.num_boxes	= 1,
3274 	.perf_ctr_bits	= 48,
3275 	.constraints	= bdx_uncore_r2pcie_constraints,
3276 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3277 };
3278 
3279 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3280 	UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3281 	UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3282 	UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3283 	UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3284 	UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3285 	UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3286 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3287 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3288 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3289 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3290 	UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3291 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3292 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3293 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3294 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3295 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3296 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3297 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3298 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3299 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3300 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3301 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3302 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3303 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3304 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3305 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3306 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3307 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3308 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3309 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3310 	EVENT_CONSTRAINT_END
3311 };
3312 
3313 static struct intel_uncore_type bdx_uncore_r3qpi = {
3314 	.name		= "r3qpi",
3315 	.num_counters   = 3,
3316 	.num_boxes	= 3,
3317 	.perf_ctr_bits	= 48,
3318 	.constraints	= bdx_uncore_r3qpi_constraints,
3319 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3320 };
3321 
3322 enum {
3323 	BDX_PCI_UNCORE_HA,
3324 	BDX_PCI_UNCORE_IMC,
3325 	BDX_PCI_UNCORE_IRP,
3326 	BDX_PCI_UNCORE_QPI,
3327 	BDX_PCI_UNCORE_R2PCIE,
3328 	BDX_PCI_UNCORE_R3QPI,
3329 };
3330 
3331 static struct intel_uncore_type *bdx_pci_uncores[] = {
3332 	[BDX_PCI_UNCORE_HA]	= &bdx_uncore_ha,
3333 	[BDX_PCI_UNCORE_IMC]	= &bdx_uncore_imc,
3334 	[BDX_PCI_UNCORE_IRP]	= &bdx_uncore_irp,
3335 	[BDX_PCI_UNCORE_QPI]	= &bdx_uncore_qpi,
3336 	[BDX_PCI_UNCORE_R2PCIE]	= &bdx_uncore_r2pcie,
3337 	[BDX_PCI_UNCORE_R3QPI]	= &bdx_uncore_r3qpi,
3338 	NULL,
3339 };
3340 
3341 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3342 	{ /* Home Agent 0 */
3343 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3344 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3345 	},
3346 	{ /* Home Agent 1 */
3347 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3348 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3349 	},
3350 	{ /* MC0 Channel 0 */
3351 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3352 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3353 	},
3354 	{ /* MC0 Channel 1 */
3355 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3356 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3357 	},
3358 	{ /* MC0 Channel 2 */
3359 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3360 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3361 	},
3362 	{ /* MC0 Channel 3 */
3363 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3364 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3365 	},
3366 	{ /* MC1 Channel 0 */
3367 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3368 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3369 	},
3370 	{ /* MC1 Channel 1 */
3371 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3372 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3373 	},
3374 	{ /* MC1 Channel 2 */
3375 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3376 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3377 	},
3378 	{ /* MC1 Channel 3 */
3379 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3380 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3381 	},
3382 	{ /* IRP */
3383 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3384 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3385 	},
3386 	{ /* QPI0 Port 0 */
3387 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3388 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3389 	},
3390 	{ /* QPI0 Port 1 */
3391 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3392 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3393 	},
3394 	{ /* QPI1 Port 2 */
3395 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3396 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3397 	},
3398 	{ /* R2PCIe */
3399 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3400 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3401 	},
3402 	{ /* R3QPI0 Link 0 */
3403 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3404 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3405 	},
3406 	{ /* R3QPI0 Link 1 */
3407 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3408 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3409 	},
3410 	{ /* R3QPI1 Link 2 */
3411 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3412 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3413 	},
3414 	{ /* QPI Port 0 filter  */
3415 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3416 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3417 						   SNBEP_PCI_QPI_PORT0_FILTER),
3418 	},
3419 	{ /* QPI Port 1 filter  */
3420 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3421 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3422 						   SNBEP_PCI_QPI_PORT1_FILTER),
3423 	},
3424 	{ /* QPI Port 2 filter  */
3425 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3426 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3427 						   BDX_PCI_QPI_PORT2_FILTER),
3428 	},
3429 	{ /* PCU.3 (for Capability registers) */
3430 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),
3431 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3432 						   HSWEP_PCI_PCU_3),
3433 	},
3434 	{ /* end: all zeroes */ }
3435 };
3436 
3437 static struct pci_driver bdx_uncore_pci_driver = {
3438 	.name		= "bdx_uncore",
3439 	.id_table	= bdx_uncore_pci_ids,
3440 };
3441 
3442 int bdx_uncore_pci_init(void)
3443 {
3444 	int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3445 
3446 	if (ret)
3447 		return ret;
3448 	uncore_pci_uncores = bdx_pci_uncores;
3449 	uncore_pci_driver = &bdx_uncore_pci_driver;
3450 	return 0;
3451 }
3452 
3453 /* end of BDX uncore support */
3454 
3455 /* SKX uncore support */
3456 
3457 static struct intel_uncore_type skx_uncore_ubox = {
3458 	.name			= "ubox",
3459 	.num_counters		= 2,
3460 	.num_boxes		= 1,
3461 	.perf_ctr_bits		= 48,
3462 	.fixed_ctr_bits		= 48,
3463 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
3464 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
3465 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3466 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3467 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3468 	.ops			= &ivbep_uncore_msr_ops,
3469 	.format_group		= &ivbep_uncore_ubox_format_group,
3470 };
3471 
3472 static struct attribute *skx_uncore_cha_formats_attr[] = {
3473 	&format_attr_event.attr,
3474 	&format_attr_umask.attr,
3475 	&format_attr_edge.attr,
3476 	&format_attr_tid_en.attr,
3477 	&format_attr_inv.attr,
3478 	&format_attr_thresh8.attr,
3479 	&format_attr_filter_tid4.attr,
3480 	&format_attr_filter_state5.attr,
3481 	&format_attr_filter_rem.attr,
3482 	&format_attr_filter_loc.attr,
3483 	&format_attr_filter_nm.attr,
3484 	&format_attr_filter_all_op.attr,
3485 	&format_attr_filter_not_nm.attr,
3486 	&format_attr_filter_opc_0.attr,
3487 	&format_attr_filter_opc_1.attr,
3488 	&format_attr_filter_nc.attr,
3489 	&format_attr_filter_isoc.attr,
3490 	NULL,
3491 };
3492 
3493 static const struct attribute_group skx_uncore_chabox_format_group = {
3494 	.name = "format",
3495 	.attrs = skx_uncore_cha_formats_attr,
3496 };
3497 
3498 static struct event_constraint skx_uncore_chabox_constraints[] = {
3499 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3500 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3501 	EVENT_CONSTRAINT_END
3502 };
3503 
3504 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3505 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3506 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3507 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3508 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3509 	SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3510 	SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3511 	SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3512 	SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3513 	SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3514 	EVENT_EXTRA_END
3515 };
3516 
3517 static u64 skx_cha_filter_mask(int fields)
3518 {
3519 	u64 mask = 0;
3520 
3521 	if (fields & 0x1)
3522 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3523 	if (fields & 0x2)
3524 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3525 	if (fields & 0x4)
3526 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3527 	if (fields & 0x8) {
3528 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3529 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3530 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3531 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3532 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3533 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3534 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3535 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3536 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3537 	}
3538 	return mask;
3539 }
3540 
3541 static struct event_constraint *
3542 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3543 {
3544 	return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3545 }
3546 
3547 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3548 {
3549 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3550 	struct extra_reg *er;
3551 	int idx = 0;
3552 
3553 	for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3554 		if (er->event != (event->hw.config & er->config_mask))
3555 			continue;
3556 		idx |= er->idx;
3557 	}
3558 
3559 	if (idx) {
3560 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3561 			    HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3562 		reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3563 		reg1->idx = idx;
3564 	}
3565 	return 0;
3566 }
3567 
3568 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3569 	/* There is no frz_en for chabox ctl */
3570 	.init_box		= ivbep_uncore_msr_init_box,
3571 	.disable_box		= snbep_uncore_msr_disable_box,
3572 	.enable_box		= snbep_uncore_msr_enable_box,
3573 	.disable_event		= snbep_uncore_msr_disable_event,
3574 	.enable_event		= hswep_cbox_enable_event,
3575 	.read_counter		= uncore_msr_read_counter,
3576 	.hw_config		= skx_cha_hw_config,
3577 	.get_constraint		= skx_cha_get_constraint,
3578 	.put_constraint		= snbep_cbox_put_constraint,
3579 };
3580 
3581 static struct intel_uncore_type skx_uncore_chabox = {
3582 	.name			= "cha",
3583 	.num_counters		= 4,
3584 	.perf_ctr_bits		= 48,
3585 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
3586 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
3587 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3588 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
3589 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
3590 	.num_shared_regs	= 1,
3591 	.constraints		= skx_uncore_chabox_constraints,
3592 	.ops			= &skx_uncore_chabox_ops,
3593 	.format_group		= &skx_uncore_chabox_format_group,
3594 };
3595 
3596 static struct attribute *skx_uncore_iio_formats_attr[] = {
3597 	&format_attr_event.attr,
3598 	&format_attr_umask.attr,
3599 	&format_attr_edge.attr,
3600 	&format_attr_inv.attr,
3601 	&format_attr_thresh9.attr,
3602 	&format_attr_ch_mask.attr,
3603 	&format_attr_fc_mask.attr,
3604 	NULL,
3605 };
3606 
3607 static const struct attribute_group skx_uncore_iio_format_group = {
3608 	.name = "format",
3609 	.attrs = skx_uncore_iio_formats_attr,
3610 };
3611 
3612 static struct event_constraint skx_uncore_iio_constraints[] = {
3613 	UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3614 	UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3615 	UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3616 	UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3617 	UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3618 	UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3619 	EVENT_CONSTRAINT_END
3620 };
3621 
3622 static void skx_iio_enable_event(struct intel_uncore_box *box,
3623 				 struct perf_event *event)
3624 {
3625 	struct hw_perf_event *hwc = &event->hw;
3626 
3627 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3628 }
3629 
3630 static struct intel_uncore_ops skx_uncore_iio_ops = {
3631 	.init_box		= ivbep_uncore_msr_init_box,
3632 	.disable_box		= snbep_uncore_msr_disable_box,
3633 	.enable_box		= snbep_uncore_msr_enable_box,
3634 	.disable_event		= snbep_uncore_msr_disable_event,
3635 	.enable_event		= skx_iio_enable_event,
3636 	.read_counter		= uncore_msr_read_counter,
3637 };
3638 
3639 static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die)
3640 {
3641 	return pmu->type->topology[die] >> (pmu->pmu_idx * BUS_NUM_STRIDE);
3642 }
3643 
3644 static umode_t
3645 skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
3646 {
3647 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
3648 
3649 	/* Root bus 0x00 is valid only for die 0 AND pmu_idx = 0. */
3650 	return (!skx_iio_stack(pmu, die) && pmu->pmu_idx) ? 0 : attr->mode;
3651 }
3652 
3653 static ssize_t skx_iio_mapping_show(struct device *dev,
3654 				struct device_attribute *attr, char *buf)
3655 {
3656 	struct pci_bus *bus = pci_find_next_bus(NULL);
3657 	struct intel_uncore_pmu *uncore_pmu = dev_to_uncore_pmu(dev);
3658 	struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
3659 	long die = (long)ea->var;
3660 
3661 	/*
3662 	 * Current implementation is for single segment configuration hence it's
3663 	 * safe to take the segment value from the first available root bus.
3664 	 */
3665 	return sprintf(buf, "%04x:%02x\n", pci_domain_nr(bus),
3666 					   skx_iio_stack(uncore_pmu, die));
3667 }
3668 
3669 static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
3670 {
3671 	u64 msr_value;
3672 
3673 	if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
3674 			!(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
3675 		return -ENXIO;
3676 
3677 	*topology = msr_value;
3678 
3679 	return 0;
3680 }
3681 
3682 static int die_to_cpu(int die)
3683 {
3684 	int res = 0, cpu, current_die;
3685 	/*
3686 	 * Using cpus_read_lock() to ensure cpu is not going down between
3687 	 * looking at cpu_online_mask.
3688 	 */
3689 	cpus_read_lock();
3690 	for_each_online_cpu(cpu) {
3691 		current_die = topology_logical_die_id(cpu);
3692 		if (current_die == die) {
3693 			res = cpu;
3694 			break;
3695 		}
3696 	}
3697 	cpus_read_unlock();
3698 	return res;
3699 }
3700 
3701 static int skx_iio_get_topology(struct intel_uncore_type *type)
3702 {
3703 	int i, ret;
3704 	struct pci_bus *bus = NULL;
3705 
3706 	/*
3707 	 * Verified single-segment environments only; disabled for multiple
3708 	 * segment topologies for now except VMD domains.
3709 	 * VMD domains start at 0x10000 to not clash with ACPI _SEG domains.
3710 	 */
3711 	while ((bus = pci_find_next_bus(bus))
3712 		&& (!pci_domain_nr(bus) || pci_domain_nr(bus) > 0xffff))
3713 		;
3714 	if (bus)
3715 		return -EPERM;
3716 
3717 	type->topology = kcalloc(uncore_max_dies(), sizeof(u64), GFP_KERNEL);
3718 	if (!type->topology)
3719 		return -ENOMEM;
3720 
3721 	for (i = 0; i < uncore_max_dies(); i++) {
3722 		ret = skx_msr_cpu_bus_read(die_to_cpu(i), &type->topology[i]);
3723 		if (ret) {
3724 			kfree(type->topology);
3725 			type->topology = NULL;
3726 			return ret;
3727 		}
3728 	}
3729 
3730 	return 0;
3731 }
3732 
3733 static struct attribute_group skx_iio_mapping_group = {
3734 	.is_visible	= skx_iio_mapping_visible,
3735 };
3736 
3737 static const struct attribute_group *skx_iio_attr_update[] = {
3738 	&skx_iio_mapping_group,
3739 	NULL,
3740 };
3741 
3742 static int skx_iio_set_mapping(struct intel_uncore_type *type)
3743 {
3744 	char buf[64];
3745 	int ret;
3746 	long die = -1;
3747 	struct attribute **attrs = NULL;
3748 	struct dev_ext_attribute *eas = NULL;
3749 
3750 	ret = skx_iio_get_topology(type);
3751 	if (ret)
3752 		return ret;
3753 
3754 	/* One more for NULL. */
3755 	attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
3756 	if (!attrs)
3757 		goto err;
3758 
3759 	eas = kcalloc(uncore_max_dies(), sizeof(*eas), GFP_KERNEL);
3760 	if (!eas)
3761 		goto err;
3762 
3763 	for (die = 0; die < uncore_max_dies(); die++) {
3764 		sprintf(buf, "die%ld", die);
3765 		sysfs_attr_init(&eas[die].attr.attr);
3766 		eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
3767 		if (!eas[die].attr.attr.name)
3768 			goto err;
3769 		eas[die].attr.attr.mode = 0444;
3770 		eas[die].attr.show = skx_iio_mapping_show;
3771 		eas[die].attr.store = NULL;
3772 		eas[die].var = (void *)die;
3773 		attrs[die] = &eas[die].attr.attr;
3774 	}
3775 	skx_iio_mapping_group.attrs = attrs;
3776 
3777 	return 0;
3778 err:
3779 	for (; die >= 0; die--)
3780 		kfree(eas[die].attr.attr.name);
3781 	kfree(eas);
3782 	kfree(attrs);
3783 	kfree(type->topology);
3784 	type->attr_update = NULL;
3785 	return -ENOMEM;
3786 }
3787 
3788 static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
3789 {
3790 	struct attribute **attr = skx_iio_mapping_group.attrs;
3791 
3792 	if (!attr)
3793 		return;
3794 
3795 	for (; *attr; attr++)
3796 		kfree((*attr)->name);
3797 	kfree(attr_to_ext_attr(*skx_iio_mapping_group.attrs));
3798 	kfree(skx_iio_mapping_group.attrs);
3799 	skx_iio_mapping_group.attrs = NULL;
3800 	kfree(type->topology);
3801 }
3802 
3803 static struct intel_uncore_type skx_uncore_iio = {
3804 	.name			= "iio",
3805 	.num_counters		= 4,
3806 	.num_boxes		= 6,
3807 	.perf_ctr_bits		= 48,
3808 	.event_ctl		= SKX_IIO0_MSR_PMON_CTL0,
3809 	.perf_ctr		= SKX_IIO0_MSR_PMON_CTR0,
3810 	.event_mask		= SKX_IIO_PMON_RAW_EVENT_MASK,
3811 	.event_mask_ext		= SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3812 	.box_ctl		= SKX_IIO0_MSR_PMON_BOX_CTL,
3813 	.msr_offset		= SKX_IIO_MSR_OFFSET,
3814 	.constraints		= skx_uncore_iio_constraints,
3815 	.ops			= &skx_uncore_iio_ops,
3816 	.format_group		= &skx_uncore_iio_format_group,
3817 	.attr_update		= skx_iio_attr_update,
3818 	.set_mapping		= skx_iio_set_mapping,
3819 	.cleanup_mapping	= skx_iio_cleanup_mapping,
3820 };
3821 
3822 enum perf_uncore_iio_freerunning_type_id {
3823 	SKX_IIO_MSR_IOCLK			= 0,
3824 	SKX_IIO_MSR_BW				= 1,
3825 	SKX_IIO_MSR_UTIL			= 2,
3826 
3827 	SKX_IIO_FREERUNNING_TYPE_MAX,
3828 };
3829 
3830 
3831 static struct freerunning_counters skx_iio_freerunning[] = {
3832 	[SKX_IIO_MSR_IOCLK]	= { 0xa45, 0x1, 0x20, 1, 36 },
3833 	[SKX_IIO_MSR_BW]	= { 0xb00, 0x1, 0x10, 8, 36 },
3834 	[SKX_IIO_MSR_UTIL]	= { 0xb08, 0x1, 0x10, 8, 36 },
3835 };
3836 
3837 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
3838 	/* Free-Running IO CLOCKS Counter */
3839 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
3840 	/* Free-Running IIO BANDWIDTH Counters */
3841 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
3842 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
3843 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
3844 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
3845 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
3846 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
3847 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
3848 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
3849 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
3850 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
3851 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
3852 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
3853 	INTEL_UNCORE_EVENT_DESC(bw_out_port0,		"event=0xff,umask=0x24"),
3854 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,	"3.814697266e-6"),
3855 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,	"MiB"),
3856 	INTEL_UNCORE_EVENT_DESC(bw_out_port1,		"event=0xff,umask=0x25"),
3857 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,	"3.814697266e-6"),
3858 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,	"MiB"),
3859 	INTEL_UNCORE_EVENT_DESC(bw_out_port2,		"event=0xff,umask=0x26"),
3860 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,	"3.814697266e-6"),
3861 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,	"MiB"),
3862 	INTEL_UNCORE_EVENT_DESC(bw_out_port3,		"event=0xff,umask=0x27"),
3863 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,	"3.814697266e-6"),
3864 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,	"MiB"),
3865 	/* Free-running IIO UTILIZATION Counters */
3866 	INTEL_UNCORE_EVENT_DESC(util_in_port0,		"event=0xff,umask=0x30"),
3867 	INTEL_UNCORE_EVENT_DESC(util_out_port0,		"event=0xff,umask=0x31"),
3868 	INTEL_UNCORE_EVENT_DESC(util_in_port1,		"event=0xff,umask=0x32"),
3869 	INTEL_UNCORE_EVENT_DESC(util_out_port1,		"event=0xff,umask=0x33"),
3870 	INTEL_UNCORE_EVENT_DESC(util_in_port2,		"event=0xff,umask=0x34"),
3871 	INTEL_UNCORE_EVENT_DESC(util_out_port2,		"event=0xff,umask=0x35"),
3872 	INTEL_UNCORE_EVENT_DESC(util_in_port3,		"event=0xff,umask=0x36"),
3873 	INTEL_UNCORE_EVENT_DESC(util_out_port3,		"event=0xff,umask=0x37"),
3874 	{ /* end: all zeroes */ },
3875 };
3876 
3877 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
3878 	.read_counter		= uncore_msr_read_counter,
3879 	.hw_config		= uncore_freerunning_hw_config,
3880 };
3881 
3882 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
3883 	&format_attr_event.attr,
3884 	&format_attr_umask.attr,
3885 	NULL,
3886 };
3887 
3888 static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
3889 	.name = "format",
3890 	.attrs = skx_uncore_iio_freerunning_formats_attr,
3891 };
3892 
3893 static struct intel_uncore_type skx_uncore_iio_free_running = {
3894 	.name			= "iio_free_running",
3895 	.num_counters		= 17,
3896 	.num_boxes		= 6,
3897 	.num_freerunning_types	= SKX_IIO_FREERUNNING_TYPE_MAX,
3898 	.freerunning		= skx_iio_freerunning,
3899 	.ops			= &skx_uncore_iio_freerunning_ops,
3900 	.event_descs		= skx_uncore_iio_freerunning_events,
3901 	.format_group		= &skx_uncore_iio_freerunning_format_group,
3902 };
3903 
3904 static struct attribute *skx_uncore_formats_attr[] = {
3905 	&format_attr_event.attr,
3906 	&format_attr_umask.attr,
3907 	&format_attr_edge.attr,
3908 	&format_attr_inv.attr,
3909 	&format_attr_thresh8.attr,
3910 	NULL,
3911 };
3912 
3913 static const struct attribute_group skx_uncore_format_group = {
3914 	.name = "format",
3915 	.attrs = skx_uncore_formats_attr,
3916 };
3917 
3918 static struct intel_uncore_type skx_uncore_irp = {
3919 	.name			= "irp",
3920 	.num_counters		= 2,
3921 	.num_boxes		= 6,
3922 	.perf_ctr_bits		= 48,
3923 	.event_ctl		= SKX_IRP0_MSR_PMON_CTL0,
3924 	.perf_ctr		= SKX_IRP0_MSR_PMON_CTR0,
3925 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
3926 	.box_ctl		= SKX_IRP0_MSR_PMON_BOX_CTL,
3927 	.msr_offset		= SKX_IRP_MSR_OFFSET,
3928 	.ops			= &skx_uncore_iio_ops,
3929 	.format_group		= &skx_uncore_format_group,
3930 };
3931 
3932 static struct attribute *skx_uncore_pcu_formats_attr[] = {
3933 	&format_attr_event.attr,
3934 	&format_attr_umask.attr,
3935 	&format_attr_edge.attr,
3936 	&format_attr_inv.attr,
3937 	&format_attr_thresh8.attr,
3938 	&format_attr_occ_invert.attr,
3939 	&format_attr_occ_edge_det.attr,
3940 	&format_attr_filter_band0.attr,
3941 	&format_attr_filter_band1.attr,
3942 	&format_attr_filter_band2.attr,
3943 	&format_attr_filter_band3.attr,
3944 	NULL,
3945 };
3946 
3947 static struct attribute_group skx_uncore_pcu_format_group = {
3948 	.name = "format",
3949 	.attrs = skx_uncore_pcu_formats_attr,
3950 };
3951 
3952 static struct intel_uncore_ops skx_uncore_pcu_ops = {
3953 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
3954 	.hw_config		= hswep_pcu_hw_config,
3955 	.get_constraint		= snbep_pcu_get_constraint,
3956 	.put_constraint		= snbep_pcu_put_constraint,
3957 };
3958 
3959 static struct intel_uncore_type skx_uncore_pcu = {
3960 	.name			= "pcu",
3961 	.num_counters		= 4,
3962 	.num_boxes		= 1,
3963 	.perf_ctr_bits		= 48,
3964 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
3965 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
3966 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
3967 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
3968 	.num_shared_regs	= 1,
3969 	.ops			= &skx_uncore_pcu_ops,
3970 	.format_group		= &skx_uncore_pcu_format_group,
3971 };
3972 
3973 static struct intel_uncore_type *skx_msr_uncores[] = {
3974 	&skx_uncore_ubox,
3975 	&skx_uncore_chabox,
3976 	&skx_uncore_iio,
3977 	&skx_uncore_iio_free_running,
3978 	&skx_uncore_irp,
3979 	&skx_uncore_pcu,
3980 	NULL,
3981 };
3982 
3983 /*
3984  * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
3985  * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
3986  */
3987 #define SKX_CAPID6		0x9c
3988 #define SKX_CHA_BIT_MASK	GENMASK(27, 0)
3989 
3990 static int skx_count_chabox(void)
3991 {
3992 	struct pci_dev *dev = NULL;
3993 	u32 val = 0;
3994 
3995 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
3996 	if (!dev)
3997 		goto out;
3998 
3999 	pci_read_config_dword(dev, SKX_CAPID6, &val);
4000 	val &= SKX_CHA_BIT_MASK;
4001 out:
4002 	pci_dev_put(dev);
4003 	return hweight32(val);
4004 }
4005 
4006 void skx_uncore_cpu_init(void)
4007 {
4008 	skx_uncore_chabox.num_boxes = skx_count_chabox();
4009 	uncore_msr_uncores = skx_msr_uncores;
4010 }
4011 
4012 static struct intel_uncore_type skx_uncore_imc = {
4013 	.name		= "imc",
4014 	.num_counters   = 4,
4015 	.num_boxes	= 6,
4016 	.perf_ctr_bits	= 48,
4017 	.fixed_ctr_bits	= 48,
4018 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
4019 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
4020 	.event_descs	= hswep_uncore_imc_events,
4021 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
4022 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
4023 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4024 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
4025 	.ops		= &ivbep_uncore_pci_ops,
4026 	.format_group	= &skx_uncore_format_group,
4027 };
4028 
4029 static struct attribute *skx_upi_uncore_formats_attr[] = {
4030 	&format_attr_event.attr,
4031 	&format_attr_umask_ext.attr,
4032 	&format_attr_edge.attr,
4033 	&format_attr_inv.attr,
4034 	&format_attr_thresh8.attr,
4035 	NULL,
4036 };
4037 
4038 static const struct attribute_group skx_upi_uncore_format_group = {
4039 	.name = "format",
4040 	.attrs = skx_upi_uncore_formats_attr,
4041 };
4042 
4043 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
4044 {
4045 	struct pci_dev *pdev = box->pci_dev;
4046 
4047 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4048 	pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4049 }
4050 
4051 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
4052 	.init_box	= skx_upi_uncore_pci_init_box,
4053 	.disable_box	= snbep_uncore_pci_disable_box,
4054 	.enable_box	= snbep_uncore_pci_enable_box,
4055 	.disable_event	= snbep_uncore_pci_disable_event,
4056 	.enable_event	= snbep_uncore_pci_enable_event,
4057 	.read_counter	= snbep_uncore_pci_read_counter,
4058 };
4059 
4060 static struct intel_uncore_type skx_uncore_upi = {
4061 	.name		= "upi",
4062 	.num_counters   = 4,
4063 	.num_boxes	= 3,
4064 	.perf_ctr_bits	= 48,
4065 	.perf_ctr	= SKX_UPI_PCI_PMON_CTR0,
4066 	.event_ctl	= SKX_UPI_PCI_PMON_CTL0,
4067 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4068 	.event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
4069 	.box_ctl	= SKX_UPI_PCI_PMON_BOX_CTL,
4070 	.ops		= &skx_upi_uncore_pci_ops,
4071 	.format_group	= &skx_upi_uncore_format_group,
4072 };
4073 
4074 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4075 {
4076 	struct pci_dev *pdev = box->pci_dev;
4077 
4078 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4079 	pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4080 }
4081 
4082 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
4083 	.init_box	= skx_m2m_uncore_pci_init_box,
4084 	.disable_box	= snbep_uncore_pci_disable_box,
4085 	.enable_box	= snbep_uncore_pci_enable_box,
4086 	.disable_event	= snbep_uncore_pci_disable_event,
4087 	.enable_event	= snbep_uncore_pci_enable_event,
4088 	.read_counter	= snbep_uncore_pci_read_counter,
4089 };
4090 
4091 static struct intel_uncore_type skx_uncore_m2m = {
4092 	.name		= "m2m",
4093 	.num_counters   = 4,
4094 	.num_boxes	= 2,
4095 	.perf_ctr_bits	= 48,
4096 	.perf_ctr	= SKX_M2M_PCI_PMON_CTR0,
4097 	.event_ctl	= SKX_M2M_PCI_PMON_CTL0,
4098 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4099 	.box_ctl	= SKX_M2M_PCI_PMON_BOX_CTL,
4100 	.ops		= &skx_m2m_uncore_pci_ops,
4101 	.format_group	= &skx_uncore_format_group,
4102 };
4103 
4104 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
4105 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4106 	EVENT_CONSTRAINT_END
4107 };
4108 
4109 static struct intel_uncore_type skx_uncore_m2pcie = {
4110 	.name		= "m2pcie",
4111 	.num_counters   = 4,
4112 	.num_boxes	= 4,
4113 	.perf_ctr_bits	= 48,
4114 	.constraints	= skx_uncore_m2pcie_constraints,
4115 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
4116 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
4117 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4118 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
4119 	.ops		= &ivbep_uncore_pci_ops,
4120 	.format_group	= &skx_uncore_format_group,
4121 };
4122 
4123 static struct event_constraint skx_uncore_m3upi_constraints[] = {
4124 	UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
4125 	UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
4126 	UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
4127 	UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
4128 	UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
4129 	UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
4130 	UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
4131 	UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
4132 	EVENT_CONSTRAINT_END
4133 };
4134 
4135 static struct intel_uncore_type skx_uncore_m3upi = {
4136 	.name		= "m3upi",
4137 	.num_counters   = 3,
4138 	.num_boxes	= 3,
4139 	.perf_ctr_bits	= 48,
4140 	.constraints	= skx_uncore_m3upi_constraints,
4141 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
4142 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
4143 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4144 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
4145 	.ops		= &ivbep_uncore_pci_ops,
4146 	.format_group	= &skx_uncore_format_group,
4147 };
4148 
4149 enum {
4150 	SKX_PCI_UNCORE_IMC,
4151 	SKX_PCI_UNCORE_M2M,
4152 	SKX_PCI_UNCORE_UPI,
4153 	SKX_PCI_UNCORE_M2PCIE,
4154 	SKX_PCI_UNCORE_M3UPI,
4155 };
4156 
4157 static struct intel_uncore_type *skx_pci_uncores[] = {
4158 	[SKX_PCI_UNCORE_IMC]	= &skx_uncore_imc,
4159 	[SKX_PCI_UNCORE_M2M]	= &skx_uncore_m2m,
4160 	[SKX_PCI_UNCORE_UPI]	= &skx_uncore_upi,
4161 	[SKX_PCI_UNCORE_M2PCIE]	= &skx_uncore_m2pcie,
4162 	[SKX_PCI_UNCORE_M3UPI]	= &skx_uncore_m3upi,
4163 	NULL,
4164 };
4165 
4166 static const struct pci_device_id skx_uncore_pci_ids[] = {
4167 	{ /* MC0 Channel 0 */
4168 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4169 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
4170 	},
4171 	{ /* MC0 Channel 1 */
4172 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4173 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
4174 	},
4175 	{ /* MC0 Channel 2 */
4176 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4177 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
4178 	},
4179 	{ /* MC1 Channel 0 */
4180 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4181 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
4182 	},
4183 	{ /* MC1 Channel 1 */
4184 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4185 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
4186 	},
4187 	{ /* MC1 Channel 2 */
4188 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4189 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
4190 	},
4191 	{ /* M2M0 */
4192 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4193 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
4194 	},
4195 	{ /* M2M1 */
4196 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4197 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
4198 	},
4199 	{ /* UPI0 Link 0 */
4200 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4201 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
4202 	},
4203 	{ /* UPI0 Link 1 */
4204 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4205 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
4206 	},
4207 	{ /* UPI1 Link 2 */
4208 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4209 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
4210 	},
4211 	{ /* M2PCIe 0 */
4212 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4213 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
4214 	},
4215 	{ /* M2PCIe 1 */
4216 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4217 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
4218 	},
4219 	{ /* M2PCIe 2 */
4220 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4221 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
4222 	},
4223 	{ /* M2PCIe 3 */
4224 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4225 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
4226 	},
4227 	{ /* M3UPI0 Link 0 */
4228 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4229 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
4230 	},
4231 	{ /* M3UPI0 Link 1 */
4232 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4233 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4234 	},
4235 	{ /* M3UPI1 Link 2 */
4236 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4237 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4238 	},
4239 	{ /* end: all zeroes */ }
4240 };
4241 
4242 
4243 static struct pci_driver skx_uncore_pci_driver = {
4244 	.name		= "skx_uncore",
4245 	.id_table	= skx_uncore_pci_ids,
4246 };
4247 
4248 int skx_uncore_pci_init(void)
4249 {
4250 	/* need to double check pci address */
4251 	int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4252 
4253 	if (ret)
4254 		return ret;
4255 
4256 	uncore_pci_uncores = skx_pci_uncores;
4257 	uncore_pci_driver = &skx_uncore_pci_driver;
4258 	return 0;
4259 }
4260 
4261 /* end of SKX uncore support */
4262 
4263 /* SNR uncore support */
4264 
4265 static struct intel_uncore_type snr_uncore_ubox = {
4266 	.name			= "ubox",
4267 	.num_counters		= 2,
4268 	.num_boxes		= 1,
4269 	.perf_ctr_bits		= 48,
4270 	.fixed_ctr_bits		= 48,
4271 	.perf_ctr		= SNR_U_MSR_PMON_CTR0,
4272 	.event_ctl		= SNR_U_MSR_PMON_CTL0,
4273 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4274 	.fixed_ctr		= SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4275 	.fixed_ctl		= SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4276 	.ops			= &ivbep_uncore_msr_ops,
4277 	.format_group		= &ivbep_uncore_format_group,
4278 };
4279 
4280 static struct attribute *snr_uncore_cha_formats_attr[] = {
4281 	&format_attr_event.attr,
4282 	&format_attr_umask_ext2.attr,
4283 	&format_attr_edge.attr,
4284 	&format_attr_tid_en.attr,
4285 	&format_attr_inv.attr,
4286 	&format_attr_thresh8.attr,
4287 	&format_attr_filter_tid5.attr,
4288 	NULL,
4289 };
4290 static const struct attribute_group snr_uncore_chabox_format_group = {
4291 	.name = "format",
4292 	.attrs = snr_uncore_cha_formats_attr,
4293 };
4294 
4295 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4296 {
4297 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4298 
4299 	reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4300 		    box->pmu->type->msr_offset * box->pmu->pmu_idx;
4301 	reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4302 	reg1->idx = 0;
4303 
4304 	return 0;
4305 }
4306 
4307 static void snr_cha_enable_event(struct intel_uncore_box *box,
4308 				   struct perf_event *event)
4309 {
4310 	struct hw_perf_event *hwc = &event->hw;
4311 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4312 
4313 	if (reg1->idx != EXTRA_REG_NONE)
4314 		wrmsrl(reg1->reg, reg1->config);
4315 
4316 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4317 }
4318 
4319 static struct intel_uncore_ops snr_uncore_chabox_ops = {
4320 	.init_box		= ivbep_uncore_msr_init_box,
4321 	.disable_box		= snbep_uncore_msr_disable_box,
4322 	.enable_box		= snbep_uncore_msr_enable_box,
4323 	.disable_event		= snbep_uncore_msr_disable_event,
4324 	.enable_event		= snr_cha_enable_event,
4325 	.read_counter		= uncore_msr_read_counter,
4326 	.hw_config		= snr_cha_hw_config,
4327 };
4328 
4329 static struct intel_uncore_type snr_uncore_chabox = {
4330 	.name			= "cha",
4331 	.num_counters		= 4,
4332 	.num_boxes		= 6,
4333 	.perf_ctr_bits		= 48,
4334 	.event_ctl		= SNR_CHA_MSR_PMON_CTL0,
4335 	.perf_ctr		= SNR_CHA_MSR_PMON_CTR0,
4336 	.box_ctl		= SNR_CHA_MSR_PMON_BOX_CTL,
4337 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
4338 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4339 	.event_mask_ext		= SNR_CHA_RAW_EVENT_MASK_EXT,
4340 	.ops			= &snr_uncore_chabox_ops,
4341 	.format_group		= &snr_uncore_chabox_format_group,
4342 };
4343 
4344 static struct attribute *snr_uncore_iio_formats_attr[] = {
4345 	&format_attr_event.attr,
4346 	&format_attr_umask.attr,
4347 	&format_attr_edge.attr,
4348 	&format_attr_inv.attr,
4349 	&format_attr_thresh9.attr,
4350 	&format_attr_ch_mask2.attr,
4351 	&format_attr_fc_mask2.attr,
4352 	NULL,
4353 };
4354 
4355 static const struct attribute_group snr_uncore_iio_format_group = {
4356 	.name = "format",
4357 	.attrs = snr_uncore_iio_formats_attr,
4358 };
4359 
4360 static struct intel_uncore_type snr_uncore_iio = {
4361 	.name			= "iio",
4362 	.num_counters		= 4,
4363 	.num_boxes		= 5,
4364 	.perf_ctr_bits		= 48,
4365 	.event_ctl		= SNR_IIO_MSR_PMON_CTL0,
4366 	.perf_ctr		= SNR_IIO_MSR_PMON_CTR0,
4367 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4368 	.event_mask_ext		= SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4369 	.box_ctl		= SNR_IIO_MSR_PMON_BOX_CTL,
4370 	.msr_offset		= SNR_IIO_MSR_OFFSET,
4371 	.ops			= &ivbep_uncore_msr_ops,
4372 	.format_group		= &snr_uncore_iio_format_group,
4373 };
4374 
4375 static struct intel_uncore_type snr_uncore_irp = {
4376 	.name			= "irp",
4377 	.num_counters		= 2,
4378 	.num_boxes		= 5,
4379 	.perf_ctr_bits		= 48,
4380 	.event_ctl		= SNR_IRP0_MSR_PMON_CTL0,
4381 	.perf_ctr		= SNR_IRP0_MSR_PMON_CTR0,
4382 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4383 	.box_ctl		= SNR_IRP0_MSR_PMON_BOX_CTL,
4384 	.msr_offset		= SNR_IRP_MSR_OFFSET,
4385 	.ops			= &ivbep_uncore_msr_ops,
4386 	.format_group		= &ivbep_uncore_format_group,
4387 };
4388 
4389 static struct intel_uncore_type snr_uncore_m2pcie = {
4390 	.name		= "m2pcie",
4391 	.num_counters	= 4,
4392 	.num_boxes	= 5,
4393 	.perf_ctr_bits	= 48,
4394 	.event_ctl	= SNR_M2PCIE_MSR_PMON_CTL0,
4395 	.perf_ctr	= SNR_M2PCIE_MSR_PMON_CTR0,
4396 	.box_ctl	= SNR_M2PCIE_MSR_PMON_BOX_CTL,
4397 	.msr_offset	= SNR_M2PCIE_MSR_OFFSET,
4398 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4399 	.ops		= &ivbep_uncore_msr_ops,
4400 	.format_group	= &ivbep_uncore_format_group,
4401 };
4402 
4403 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4404 {
4405 	struct hw_perf_event *hwc = &event->hw;
4406 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4407 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4408 
4409 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
4410 		reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4411 		reg1->idx = ev_sel - 0xb;
4412 		reg1->config = event->attr.config1 & (0xff << reg1->idx);
4413 	}
4414 	return 0;
4415 }
4416 
4417 static struct intel_uncore_ops snr_uncore_pcu_ops = {
4418 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4419 	.hw_config		= snr_pcu_hw_config,
4420 	.get_constraint		= snbep_pcu_get_constraint,
4421 	.put_constraint		= snbep_pcu_put_constraint,
4422 };
4423 
4424 static struct intel_uncore_type snr_uncore_pcu = {
4425 	.name			= "pcu",
4426 	.num_counters		= 4,
4427 	.num_boxes		= 1,
4428 	.perf_ctr_bits		= 48,
4429 	.perf_ctr		= SNR_PCU_MSR_PMON_CTR0,
4430 	.event_ctl		= SNR_PCU_MSR_PMON_CTL0,
4431 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4432 	.box_ctl		= SNR_PCU_MSR_PMON_BOX_CTL,
4433 	.num_shared_regs	= 1,
4434 	.ops			= &snr_uncore_pcu_ops,
4435 	.format_group		= &skx_uncore_pcu_format_group,
4436 };
4437 
4438 enum perf_uncore_snr_iio_freerunning_type_id {
4439 	SNR_IIO_MSR_IOCLK,
4440 	SNR_IIO_MSR_BW_IN,
4441 
4442 	SNR_IIO_FREERUNNING_TYPE_MAX,
4443 };
4444 
4445 static struct freerunning_counters snr_iio_freerunning[] = {
4446 	[SNR_IIO_MSR_IOCLK]	= { 0x1eac, 0x1, 0x10, 1, 48 },
4447 	[SNR_IIO_MSR_BW_IN]	= { 0x1f00, 0x1, 0x10, 8, 48 },
4448 };
4449 
4450 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4451 	/* Free-Running IIO CLOCKS Counter */
4452 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
4453 	/* Free-Running IIO BANDWIDTH IN Counters */
4454 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
4455 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
4456 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
4457 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
4458 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
4459 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
4460 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
4461 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
4462 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
4463 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
4464 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
4465 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
4466 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
4467 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
4468 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
4469 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
4470 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
4471 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
4472 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
4473 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
4474 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
4475 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
4476 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
4477 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
4478 	{ /* end: all zeroes */ },
4479 };
4480 
4481 static struct intel_uncore_type snr_uncore_iio_free_running = {
4482 	.name			= "iio_free_running",
4483 	.num_counters		= 9,
4484 	.num_boxes		= 5,
4485 	.num_freerunning_types	= SNR_IIO_FREERUNNING_TYPE_MAX,
4486 	.freerunning		= snr_iio_freerunning,
4487 	.ops			= &skx_uncore_iio_freerunning_ops,
4488 	.event_descs		= snr_uncore_iio_freerunning_events,
4489 	.format_group		= &skx_uncore_iio_freerunning_format_group,
4490 };
4491 
4492 static struct intel_uncore_type *snr_msr_uncores[] = {
4493 	&snr_uncore_ubox,
4494 	&snr_uncore_chabox,
4495 	&snr_uncore_iio,
4496 	&snr_uncore_irp,
4497 	&snr_uncore_m2pcie,
4498 	&snr_uncore_pcu,
4499 	&snr_uncore_iio_free_running,
4500 	NULL,
4501 };
4502 
4503 void snr_uncore_cpu_init(void)
4504 {
4505 	uncore_msr_uncores = snr_msr_uncores;
4506 }
4507 
4508 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4509 {
4510 	struct pci_dev *pdev = box->pci_dev;
4511 	int box_ctl = uncore_pci_box_ctl(box);
4512 
4513 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4514 	pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4515 }
4516 
4517 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4518 	.init_box	= snr_m2m_uncore_pci_init_box,
4519 	.disable_box	= snbep_uncore_pci_disable_box,
4520 	.enable_box	= snbep_uncore_pci_enable_box,
4521 	.disable_event	= snbep_uncore_pci_disable_event,
4522 	.enable_event	= snbep_uncore_pci_enable_event,
4523 	.read_counter	= snbep_uncore_pci_read_counter,
4524 };
4525 
4526 static struct attribute *snr_m2m_uncore_formats_attr[] = {
4527 	&format_attr_event.attr,
4528 	&format_attr_umask_ext3.attr,
4529 	&format_attr_edge.attr,
4530 	&format_attr_inv.attr,
4531 	&format_attr_thresh8.attr,
4532 	NULL,
4533 };
4534 
4535 static const struct attribute_group snr_m2m_uncore_format_group = {
4536 	.name = "format",
4537 	.attrs = snr_m2m_uncore_formats_attr,
4538 };
4539 
4540 static struct intel_uncore_type snr_uncore_m2m = {
4541 	.name		= "m2m",
4542 	.num_counters   = 4,
4543 	.num_boxes	= 1,
4544 	.perf_ctr_bits	= 48,
4545 	.perf_ctr	= SNR_M2M_PCI_PMON_CTR0,
4546 	.event_ctl	= SNR_M2M_PCI_PMON_CTL0,
4547 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4548 	.event_mask_ext	= SNR_M2M_PCI_PMON_UMASK_EXT,
4549 	.box_ctl	= SNR_M2M_PCI_PMON_BOX_CTL,
4550 	.ops		= &snr_m2m_uncore_pci_ops,
4551 	.format_group	= &snr_m2m_uncore_format_group,
4552 };
4553 
4554 enum {
4555 	SNR_PCI_UNCORE_M2M,
4556 };
4557 
4558 static struct intel_uncore_type *snr_pci_uncores[] = {
4559 	[SNR_PCI_UNCORE_M2M]		= &snr_uncore_m2m,
4560 	NULL,
4561 };
4562 
4563 static const struct pci_device_id snr_uncore_pci_ids[] = {
4564 	{ /* M2M */
4565 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4566 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
4567 	},
4568 	{ /* end: all zeroes */ }
4569 };
4570 
4571 static struct pci_driver snr_uncore_pci_driver = {
4572 	.name		= "snr_uncore",
4573 	.id_table	= snr_uncore_pci_ids,
4574 };
4575 
4576 int snr_uncore_pci_init(void)
4577 {
4578 	/* SNR UBOX DID */
4579 	int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
4580 					 SKX_GIDNIDMAP, true);
4581 
4582 	if (ret)
4583 		return ret;
4584 
4585 	uncore_pci_uncores = snr_pci_uncores;
4586 	uncore_pci_driver = &snr_uncore_pci_driver;
4587 	return 0;
4588 }
4589 
4590 static struct pci_dev *snr_uncore_get_mc_dev(int id)
4591 {
4592 	struct pci_dev *mc_dev = NULL;
4593 	int phys_id, pkg;
4594 
4595 	while (1) {
4596 		mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3451, mc_dev);
4597 		if (!mc_dev)
4598 			break;
4599 		phys_id = uncore_pcibus_to_physid(mc_dev->bus);
4600 		if (phys_id < 0)
4601 			continue;
4602 		pkg = topology_phys_to_logical_pkg(phys_id);
4603 		if (pkg < 0)
4604 			continue;
4605 		else if (pkg == id)
4606 			break;
4607 	}
4608 	return mc_dev;
4609 }
4610 
4611 static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
4612 				       unsigned int box_ctl, int mem_offset)
4613 {
4614 	struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid);
4615 	struct intel_uncore_type *type = box->pmu->type;
4616 	resource_size_t addr;
4617 	u32 pci_dword;
4618 
4619 	if (!pdev)
4620 		return;
4621 
4622 	pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
4623 	addr = (pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
4624 
4625 	pci_read_config_dword(pdev, mem_offset, &pci_dword);
4626 	addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
4627 
4628 	addr += box_ctl;
4629 
4630 	box->io_addr = ioremap(addr, type->mmio_map_size);
4631 	if (!box->io_addr) {
4632 		pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
4633 		return;
4634 	}
4635 
4636 	writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
4637 }
4638 
4639 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
4640 {
4641 	__snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
4642 				   SNR_IMC_MMIO_MEM0_OFFSET);
4643 }
4644 
4645 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
4646 {
4647 	u32 config;
4648 
4649 	if (!box->io_addr)
4650 		return;
4651 
4652 	config = readl(box->io_addr);
4653 	config |= SNBEP_PMON_BOX_CTL_FRZ;
4654 	writel(config, box->io_addr);
4655 }
4656 
4657 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
4658 {
4659 	u32 config;
4660 
4661 	if (!box->io_addr)
4662 		return;
4663 
4664 	config = readl(box->io_addr);
4665 	config &= ~SNBEP_PMON_BOX_CTL_FRZ;
4666 	writel(config, box->io_addr);
4667 }
4668 
4669 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
4670 					   struct perf_event *event)
4671 {
4672 	struct hw_perf_event *hwc = &event->hw;
4673 
4674 	if (!box->io_addr)
4675 		return;
4676 
4677 	if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4678 		return;
4679 
4680 	writel(hwc->config | SNBEP_PMON_CTL_EN,
4681 	       box->io_addr + hwc->config_base);
4682 }
4683 
4684 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
4685 					    struct perf_event *event)
4686 {
4687 	struct hw_perf_event *hwc = &event->hw;
4688 
4689 	if (!box->io_addr)
4690 		return;
4691 
4692 	if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4693 		return;
4694 
4695 	writel(hwc->config, box->io_addr + hwc->config_base);
4696 }
4697 
4698 static struct intel_uncore_ops snr_uncore_mmio_ops = {
4699 	.init_box	= snr_uncore_mmio_init_box,
4700 	.exit_box	= uncore_mmio_exit_box,
4701 	.disable_box	= snr_uncore_mmio_disable_box,
4702 	.enable_box	= snr_uncore_mmio_enable_box,
4703 	.disable_event	= snr_uncore_mmio_disable_event,
4704 	.enable_event	= snr_uncore_mmio_enable_event,
4705 	.read_counter	= uncore_mmio_read_counter,
4706 };
4707 
4708 static struct uncore_event_desc snr_uncore_imc_events[] = {
4709 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
4710 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x0f"),
4711 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
4712 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
4713 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
4714 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
4715 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
4716 	{ /* end: all zeroes */ },
4717 };
4718 
4719 static struct intel_uncore_type snr_uncore_imc = {
4720 	.name		= "imc",
4721 	.num_counters   = 4,
4722 	.num_boxes	= 2,
4723 	.perf_ctr_bits	= 48,
4724 	.fixed_ctr_bits	= 48,
4725 	.fixed_ctr	= SNR_IMC_MMIO_PMON_FIXED_CTR,
4726 	.fixed_ctl	= SNR_IMC_MMIO_PMON_FIXED_CTL,
4727 	.event_descs	= snr_uncore_imc_events,
4728 	.perf_ctr	= SNR_IMC_MMIO_PMON_CTR0,
4729 	.event_ctl	= SNR_IMC_MMIO_PMON_CTL0,
4730 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4731 	.box_ctl	= SNR_IMC_MMIO_PMON_BOX_CTL,
4732 	.mmio_offset	= SNR_IMC_MMIO_OFFSET,
4733 	.mmio_map_size	= SNR_IMC_MMIO_SIZE,
4734 	.ops		= &snr_uncore_mmio_ops,
4735 	.format_group	= &skx_uncore_format_group,
4736 };
4737 
4738 enum perf_uncore_snr_imc_freerunning_type_id {
4739 	SNR_IMC_DCLK,
4740 	SNR_IMC_DDR,
4741 
4742 	SNR_IMC_FREERUNNING_TYPE_MAX,
4743 };
4744 
4745 static struct freerunning_counters snr_imc_freerunning[] = {
4746 	[SNR_IMC_DCLK]	= { 0x22b0, 0x0, 0, 1, 48 },
4747 	[SNR_IMC_DDR]	= { 0x2290, 0x8, 0, 2, 48 },
4748 };
4749 
4750 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
4751 	INTEL_UNCORE_EVENT_DESC(dclk,		"event=0xff,umask=0x10"),
4752 
4753 	INTEL_UNCORE_EVENT_DESC(read,		"event=0xff,umask=0x20"),
4754 	INTEL_UNCORE_EVENT_DESC(read.scale,	"3.814697266e-6"),
4755 	INTEL_UNCORE_EVENT_DESC(read.unit,	"MiB"),
4756 	INTEL_UNCORE_EVENT_DESC(write,		"event=0xff,umask=0x21"),
4757 	INTEL_UNCORE_EVENT_DESC(write.scale,	"3.814697266e-6"),
4758 	INTEL_UNCORE_EVENT_DESC(write.unit,	"MiB"),
4759 	{ /* end: all zeroes */ },
4760 };
4761 
4762 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
4763 	.init_box	= snr_uncore_mmio_init_box,
4764 	.exit_box	= uncore_mmio_exit_box,
4765 	.read_counter	= uncore_mmio_read_counter,
4766 	.hw_config	= uncore_freerunning_hw_config,
4767 };
4768 
4769 static struct intel_uncore_type snr_uncore_imc_free_running = {
4770 	.name			= "imc_free_running",
4771 	.num_counters		= 3,
4772 	.num_boxes		= 1,
4773 	.num_freerunning_types	= SNR_IMC_FREERUNNING_TYPE_MAX,
4774 	.mmio_map_size		= SNR_IMC_MMIO_SIZE,
4775 	.freerunning		= snr_imc_freerunning,
4776 	.ops			= &snr_uncore_imc_freerunning_ops,
4777 	.event_descs		= snr_uncore_imc_freerunning_events,
4778 	.format_group		= &skx_uncore_iio_freerunning_format_group,
4779 };
4780 
4781 static struct intel_uncore_type *snr_mmio_uncores[] = {
4782 	&snr_uncore_imc,
4783 	&snr_uncore_imc_free_running,
4784 	NULL,
4785 };
4786 
4787 void snr_uncore_mmio_init(void)
4788 {
4789 	uncore_mmio_uncores = snr_mmio_uncores;
4790 }
4791 
4792 /* end of SNR uncore support */
4793 
4794 /* ICX uncore support */
4795 
4796 static unsigned icx_cha_msr_offsets[] = {
4797 	0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310,
4798 	0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e,
4799 	0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a,
4800 	0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0,   0xe,
4801 	0x1c,  0x2a,  0x38,  0x46,
4802 };
4803 
4804 static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4805 {
4806 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4807 	bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN);
4808 
4809 	if (tie_en) {
4810 		reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 +
4811 			    icx_cha_msr_offsets[box->pmu->pmu_idx];
4812 		reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4813 		reg1->idx = 0;
4814 	}
4815 
4816 	return 0;
4817 }
4818 
4819 static struct intel_uncore_ops icx_uncore_chabox_ops = {
4820 	.init_box		= ivbep_uncore_msr_init_box,
4821 	.disable_box		= snbep_uncore_msr_disable_box,
4822 	.enable_box		= snbep_uncore_msr_enable_box,
4823 	.disable_event		= snbep_uncore_msr_disable_event,
4824 	.enable_event		= snr_cha_enable_event,
4825 	.read_counter		= uncore_msr_read_counter,
4826 	.hw_config		= icx_cha_hw_config,
4827 };
4828 
4829 static struct intel_uncore_type icx_uncore_chabox = {
4830 	.name			= "cha",
4831 	.num_counters		= 4,
4832 	.perf_ctr_bits		= 48,
4833 	.event_ctl		= ICX_C34_MSR_PMON_CTL0,
4834 	.perf_ctr		= ICX_C34_MSR_PMON_CTR0,
4835 	.box_ctl		= ICX_C34_MSR_PMON_BOX_CTL,
4836 	.msr_offsets		= icx_cha_msr_offsets,
4837 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4838 	.event_mask_ext		= SNR_CHA_RAW_EVENT_MASK_EXT,
4839 	.constraints		= skx_uncore_chabox_constraints,
4840 	.ops			= &icx_uncore_chabox_ops,
4841 	.format_group		= &snr_uncore_chabox_format_group,
4842 };
4843 
4844 static unsigned icx_msr_offsets[] = {
4845 	0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
4846 };
4847 
4848 static struct event_constraint icx_uncore_iio_constraints[] = {
4849 	UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
4850 	UNCORE_EVENT_CONSTRAINT(0x03, 0x3),
4851 	UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
4852 	UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
4853 	UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
4854 	EVENT_CONSTRAINT_END
4855 };
4856 
4857 static struct intel_uncore_type icx_uncore_iio = {
4858 	.name			= "iio",
4859 	.num_counters		= 4,
4860 	.num_boxes		= 6,
4861 	.perf_ctr_bits		= 48,
4862 	.event_ctl		= ICX_IIO_MSR_PMON_CTL0,
4863 	.perf_ctr		= ICX_IIO_MSR_PMON_CTR0,
4864 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4865 	.event_mask_ext		= SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4866 	.box_ctl		= ICX_IIO_MSR_PMON_BOX_CTL,
4867 	.msr_offsets		= icx_msr_offsets,
4868 	.constraints		= icx_uncore_iio_constraints,
4869 	.ops			= &skx_uncore_iio_ops,
4870 	.format_group		= &snr_uncore_iio_format_group,
4871 };
4872 
4873 static struct intel_uncore_type icx_uncore_irp = {
4874 	.name			= "irp",
4875 	.num_counters		= 2,
4876 	.num_boxes		= 6,
4877 	.perf_ctr_bits		= 48,
4878 	.event_ctl		= ICX_IRP0_MSR_PMON_CTL0,
4879 	.perf_ctr		= ICX_IRP0_MSR_PMON_CTR0,
4880 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4881 	.box_ctl		= ICX_IRP0_MSR_PMON_BOX_CTL,
4882 	.msr_offsets		= icx_msr_offsets,
4883 	.ops			= &ivbep_uncore_msr_ops,
4884 	.format_group		= &ivbep_uncore_format_group,
4885 };
4886 
4887 static struct event_constraint icx_uncore_m2pcie_constraints[] = {
4888 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
4889 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4890 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
4891 	EVENT_CONSTRAINT_END
4892 };
4893 
4894 static struct intel_uncore_type icx_uncore_m2pcie = {
4895 	.name		= "m2pcie",
4896 	.num_counters	= 4,
4897 	.num_boxes	= 6,
4898 	.perf_ctr_bits	= 48,
4899 	.event_ctl	= ICX_M2PCIE_MSR_PMON_CTL0,
4900 	.perf_ctr	= ICX_M2PCIE_MSR_PMON_CTR0,
4901 	.box_ctl	= ICX_M2PCIE_MSR_PMON_BOX_CTL,
4902 	.msr_offsets	= icx_msr_offsets,
4903 	.constraints	= icx_uncore_m2pcie_constraints,
4904 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4905 	.ops		= &ivbep_uncore_msr_ops,
4906 	.format_group	= &ivbep_uncore_format_group,
4907 };
4908 
4909 enum perf_uncore_icx_iio_freerunning_type_id {
4910 	ICX_IIO_MSR_IOCLK,
4911 	ICX_IIO_MSR_BW_IN,
4912 
4913 	ICX_IIO_FREERUNNING_TYPE_MAX,
4914 };
4915 
4916 static unsigned icx_iio_clk_freerunning_box_offsets[] = {
4917 	0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
4918 };
4919 
4920 static unsigned icx_iio_bw_freerunning_box_offsets[] = {
4921 	0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0,
4922 };
4923 
4924 static struct freerunning_counters icx_iio_freerunning[] = {
4925 	[ICX_IIO_MSR_IOCLK]	= { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets },
4926 	[ICX_IIO_MSR_BW_IN]	= { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
4927 };
4928 
4929 static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
4930 	/* Free-Running IIO CLOCKS Counter */
4931 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
4932 	/* Free-Running IIO BANDWIDTH IN Counters */
4933 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
4934 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
4935 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
4936 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
4937 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
4938 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
4939 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
4940 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
4941 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
4942 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
4943 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
4944 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
4945 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
4946 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
4947 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
4948 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
4949 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
4950 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
4951 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
4952 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
4953 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
4954 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
4955 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
4956 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
4957 	{ /* end: all zeroes */ },
4958 };
4959 
4960 static struct intel_uncore_type icx_uncore_iio_free_running = {
4961 	.name			= "iio_free_running",
4962 	.num_counters		= 9,
4963 	.num_boxes		= 6,
4964 	.num_freerunning_types	= ICX_IIO_FREERUNNING_TYPE_MAX,
4965 	.freerunning		= icx_iio_freerunning,
4966 	.ops			= &skx_uncore_iio_freerunning_ops,
4967 	.event_descs		= icx_uncore_iio_freerunning_events,
4968 	.format_group		= &skx_uncore_iio_freerunning_format_group,
4969 };
4970 
4971 static struct intel_uncore_type *icx_msr_uncores[] = {
4972 	&skx_uncore_ubox,
4973 	&icx_uncore_chabox,
4974 	&icx_uncore_iio,
4975 	&icx_uncore_irp,
4976 	&icx_uncore_m2pcie,
4977 	&skx_uncore_pcu,
4978 	&icx_uncore_iio_free_running,
4979 	NULL,
4980 };
4981 
4982 /*
4983  * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High)
4984  * registers which located at Device 30, Function 3
4985  */
4986 #define ICX_CAPID6		0x9c
4987 #define ICX_CAPID7		0xa0
4988 
4989 static u64 icx_count_chabox(void)
4990 {
4991 	struct pci_dev *dev = NULL;
4992 	u64 caps = 0;
4993 
4994 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev);
4995 	if (!dev)
4996 		goto out;
4997 
4998 	pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps);
4999 	pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1);
5000 out:
5001 	pci_dev_put(dev);
5002 	return hweight64(caps);
5003 }
5004 
5005 void icx_uncore_cpu_init(void)
5006 {
5007 	u64 num_boxes = icx_count_chabox();
5008 
5009 	if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets)))
5010 		return;
5011 	icx_uncore_chabox.num_boxes = num_boxes;
5012 	uncore_msr_uncores = icx_msr_uncores;
5013 }
5014 
5015 static struct intel_uncore_type icx_uncore_m2m = {
5016 	.name		= "m2m",
5017 	.num_counters   = 4,
5018 	.num_boxes	= 4,
5019 	.perf_ctr_bits	= 48,
5020 	.perf_ctr	= SNR_M2M_PCI_PMON_CTR0,
5021 	.event_ctl	= SNR_M2M_PCI_PMON_CTL0,
5022 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5023 	.box_ctl	= SNR_M2M_PCI_PMON_BOX_CTL,
5024 	.ops		= &snr_m2m_uncore_pci_ops,
5025 	.format_group	= &skx_uncore_format_group,
5026 };
5027 
5028 static struct attribute *icx_upi_uncore_formats_attr[] = {
5029 	&format_attr_event.attr,
5030 	&format_attr_umask_ext4.attr,
5031 	&format_attr_edge.attr,
5032 	&format_attr_inv.attr,
5033 	&format_attr_thresh8.attr,
5034 	NULL,
5035 };
5036 
5037 static const struct attribute_group icx_upi_uncore_format_group = {
5038 	.name = "format",
5039 	.attrs = icx_upi_uncore_formats_attr,
5040 };
5041 
5042 static struct intel_uncore_type icx_uncore_upi = {
5043 	.name		= "upi",
5044 	.num_counters   = 4,
5045 	.num_boxes	= 3,
5046 	.perf_ctr_bits	= 48,
5047 	.perf_ctr	= ICX_UPI_PCI_PMON_CTR0,
5048 	.event_ctl	= ICX_UPI_PCI_PMON_CTL0,
5049 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5050 	.event_mask_ext = ICX_UPI_CTL_UMASK_EXT,
5051 	.box_ctl	= ICX_UPI_PCI_PMON_BOX_CTL,
5052 	.ops		= &skx_upi_uncore_pci_ops,
5053 	.format_group	= &icx_upi_uncore_format_group,
5054 };
5055 
5056 static struct event_constraint icx_uncore_m3upi_constraints[] = {
5057 	UNCORE_EVENT_CONSTRAINT(0x1c, 0x1),
5058 	UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
5059 	UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
5060 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
5061 	UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
5062 	UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
5063 	UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
5064 	UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
5065 	EVENT_CONSTRAINT_END
5066 };
5067 
5068 static struct intel_uncore_type icx_uncore_m3upi = {
5069 	.name		= "m3upi",
5070 	.num_counters   = 4,
5071 	.num_boxes	= 3,
5072 	.perf_ctr_bits	= 48,
5073 	.perf_ctr	= ICX_M3UPI_PCI_PMON_CTR0,
5074 	.event_ctl	= ICX_M3UPI_PCI_PMON_CTL0,
5075 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5076 	.box_ctl	= ICX_M3UPI_PCI_PMON_BOX_CTL,
5077 	.constraints	= icx_uncore_m3upi_constraints,
5078 	.ops		= &ivbep_uncore_pci_ops,
5079 	.format_group	= &skx_uncore_format_group,
5080 };
5081 
5082 enum {
5083 	ICX_PCI_UNCORE_M2M,
5084 	ICX_PCI_UNCORE_UPI,
5085 	ICX_PCI_UNCORE_M3UPI,
5086 };
5087 
5088 static struct intel_uncore_type *icx_pci_uncores[] = {
5089 	[ICX_PCI_UNCORE_M2M]		= &icx_uncore_m2m,
5090 	[ICX_PCI_UNCORE_UPI]		= &icx_uncore_upi,
5091 	[ICX_PCI_UNCORE_M3UPI]		= &icx_uncore_m3upi,
5092 	NULL,
5093 };
5094 
5095 static const struct pci_device_id icx_uncore_pci_ids[] = {
5096 	{ /* M2M 0 */
5097 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5098 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0),
5099 	},
5100 	{ /* M2M 1 */
5101 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5102 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1),
5103 	},
5104 	{ /* M2M 2 */
5105 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5106 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2),
5107 	},
5108 	{ /* M2M 3 */
5109 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5110 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3),
5111 	},
5112 	{ /* UPI Link 0 */
5113 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5114 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0),
5115 	},
5116 	{ /* UPI Link 1 */
5117 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5118 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1),
5119 	},
5120 	{ /* UPI Link 2 */
5121 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5122 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2),
5123 	},
5124 	{ /* M3UPI Link 0 */
5125 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5126 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0),
5127 	},
5128 	{ /* M3UPI Link 1 */
5129 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5130 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1),
5131 	},
5132 	{ /* M3UPI Link 2 */
5133 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5134 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2),
5135 	},
5136 	{ /* end: all zeroes */ }
5137 };
5138 
5139 static struct pci_driver icx_uncore_pci_driver = {
5140 	.name		= "icx_uncore",
5141 	.id_table	= icx_uncore_pci_ids,
5142 };
5143 
5144 int icx_uncore_pci_init(void)
5145 {
5146 	/* ICX UBOX DID */
5147 	int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID,
5148 					 SKX_GIDNIDMAP, true);
5149 
5150 	if (ret)
5151 		return ret;
5152 
5153 	uncore_pci_uncores = icx_pci_uncores;
5154 	uncore_pci_driver = &icx_uncore_pci_driver;
5155 	return 0;
5156 }
5157 
5158 static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
5159 {
5160 	unsigned int box_ctl = box->pmu->type->box_ctl +
5161 			       box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN);
5162 	int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
5163 			 SNR_IMC_MMIO_MEM0_OFFSET;
5164 
5165 	__snr_uncore_mmio_init_box(box, box_ctl, mem_offset);
5166 }
5167 
5168 static struct intel_uncore_ops icx_uncore_mmio_ops = {
5169 	.init_box	= icx_uncore_imc_init_box,
5170 	.exit_box	= uncore_mmio_exit_box,
5171 	.disable_box	= snr_uncore_mmio_disable_box,
5172 	.enable_box	= snr_uncore_mmio_enable_box,
5173 	.disable_event	= snr_uncore_mmio_disable_event,
5174 	.enable_event	= snr_uncore_mmio_enable_event,
5175 	.read_counter	= uncore_mmio_read_counter,
5176 };
5177 
5178 static struct intel_uncore_type icx_uncore_imc = {
5179 	.name		= "imc",
5180 	.num_counters   = 4,
5181 	.num_boxes	= 8,
5182 	.perf_ctr_bits	= 48,
5183 	.fixed_ctr_bits	= 48,
5184 	.fixed_ctr	= SNR_IMC_MMIO_PMON_FIXED_CTR,
5185 	.fixed_ctl	= SNR_IMC_MMIO_PMON_FIXED_CTL,
5186 	.event_descs	= hswep_uncore_imc_events,
5187 	.perf_ctr	= SNR_IMC_MMIO_PMON_CTR0,
5188 	.event_ctl	= SNR_IMC_MMIO_PMON_CTL0,
5189 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5190 	.box_ctl	= SNR_IMC_MMIO_PMON_BOX_CTL,
5191 	.mmio_offset	= SNR_IMC_MMIO_OFFSET,
5192 	.mmio_map_size	= SNR_IMC_MMIO_SIZE,
5193 	.ops		= &icx_uncore_mmio_ops,
5194 	.format_group	= &skx_uncore_format_group,
5195 };
5196 
5197 enum perf_uncore_icx_imc_freerunning_type_id {
5198 	ICX_IMC_DCLK,
5199 	ICX_IMC_DDR,
5200 	ICX_IMC_DDRT,
5201 
5202 	ICX_IMC_FREERUNNING_TYPE_MAX,
5203 };
5204 
5205 static struct freerunning_counters icx_imc_freerunning[] = {
5206 	[ICX_IMC_DCLK]	= { 0x22b0, 0x0, 0, 1, 48 },
5207 	[ICX_IMC_DDR]	= { 0x2290, 0x8, 0, 2, 48 },
5208 	[ICX_IMC_DDRT]	= { 0x22a0, 0x8, 0, 2, 48 },
5209 };
5210 
5211 static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
5212 	INTEL_UNCORE_EVENT_DESC(dclk,			"event=0xff,umask=0x10"),
5213 
5214 	INTEL_UNCORE_EVENT_DESC(read,			"event=0xff,umask=0x20"),
5215 	INTEL_UNCORE_EVENT_DESC(read.scale,		"3.814697266e-6"),
5216 	INTEL_UNCORE_EVENT_DESC(read.unit,		"MiB"),
5217 	INTEL_UNCORE_EVENT_DESC(write,			"event=0xff,umask=0x21"),
5218 	INTEL_UNCORE_EVENT_DESC(write.scale,		"3.814697266e-6"),
5219 	INTEL_UNCORE_EVENT_DESC(write.unit,		"MiB"),
5220 
5221 	INTEL_UNCORE_EVENT_DESC(ddrt_read,		"event=0xff,umask=0x30"),
5222 	INTEL_UNCORE_EVENT_DESC(ddrt_read.scale,	"3.814697266e-6"),
5223 	INTEL_UNCORE_EVENT_DESC(ddrt_read.unit,		"MiB"),
5224 	INTEL_UNCORE_EVENT_DESC(ddrt_write,		"event=0xff,umask=0x31"),
5225 	INTEL_UNCORE_EVENT_DESC(ddrt_write.scale,	"3.814697266e-6"),
5226 	INTEL_UNCORE_EVENT_DESC(ddrt_write.unit,	"MiB"),
5227 	{ /* end: all zeroes */ },
5228 };
5229 
5230 static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5231 {
5232 	int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
5233 			 SNR_IMC_MMIO_MEM0_OFFSET;
5234 
5235 	__snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box), mem_offset);
5236 }
5237 
5238 static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
5239 	.init_box	= icx_uncore_imc_freerunning_init_box,
5240 	.exit_box	= uncore_mmio_exit_box,
5241 	.read_counter	= uncore_mmio_read_counter,
5242 	.hw_config	= uncore_freerunning_hw_config,
5243 };
5244 
5245 static struct intel_uncore_type icx_uncore_imc_free_running = {
5246 	.name			= "imc_free_running",
5247 	.num_counters		= 5,
5248 	.num_boxes		= 4,
5249 	.num_freerunning_types	= ICX_IMC_FREERUNNING_TYPE_MAX,
5250 	.mmio_map_size		= SNR_IMC_MMIO_SIZE,
5251 	.freerunning		= icx_imc_freerunning,
5252 	.ops			= &icx_uncore_imc_freerunning_ops,
5253 	.event_descs		= icx_uncore_imc_freerunning_events,
5254 	.format_group		= &skx_uncore_iio_freerunning_format_group,
5255 };
5256 
5257 static struct intel_uncore_type *icx_mmio_uncores[] = {
5258 	&icx_uncore_imc,
5259 	&icx_uncore_imc_free_running,
5260 	NULL,
5261 };
5262 
5263 void icx_uncore_mmio_init(void)
5264 {
5265 	uncore_mmio_uncores = icx_mmio_uncores;
5266 }
5267 
5268 /* end of ICX uncore support */
5269