xref: /linux/arch/x86/events/intel/uncore_snbep.c (revision 156010ed9c2ac1e9df6c11b1f688cf8a6e0152e6)
1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
3 #include "uncore.h"
4 #include "uncore_discovery.h"
5 
6 /* SNB-EP pci bus to socket mapping */
7 #define SNBEP_CPUNODEID			0x40
8 #define SNBEP_GIDNIDMAP			0x54
9 
10 /* SNB-EP Box level control */
11 #define SNBEP_PMON_BOX_CTL_RST_CTRL	(1 << 0)
12 #define SNBEP_PMON_BOX_CTL_RST_CTRS	(1 << 1)
13 #define SNBEP_PMON_BOX_CTL_FRZ		(1 << 8)
14 #define SNBEP_PMON_BOX_CTL_FRZ_EN	(1 << 16)
15 #define SNBEP_PMON_BOX_CTL_INT		(SNBEP_PMON_BOX_CTL_RST_CTRL | \
16 					 SNBEP_PMON_BOX_CTL_RST_CTRS | \
17 					 SNBEP_PMON_BOX_CTL_FRZ_EN)
18 /* SNB-EP event control */
19 #define SNBEP_PMON_CTL_EV_SEL_MASK	0x000000ff
20 #define SNBEP_PMON_CTL_UMASK_MASK	0x0000ff00
21 #define SNBEP_PMON_CTL_RST		(1 << 17)
22 #define SNBEP_PMON_CTL_EDGE_DET		(1 << 18)
23 #define SNBEP_PMON_CTL_EV_SEL_EXT	(1 << 21)
24 #define SNBEP_PMON_CTL_EN		(1 << 22)
25 #define SNBEP_PMON_CTL_INVERT		(1 << 23)
26 #define SNBEP_PMON_CTL_TRESH_MASK	0xff000000
27 #define SNBEP_PMON_RAW_EVENT_MASK	(SNBEP_PMON_CTL_EV_SEL_MASK | \
28 					 SNBEP_PMON_CTL_UMASK_MASK | \
29 					 SNBEP_PMON_CTL_EDGE_DET | \
30 					 SNBEP_PMON_CTL_INVERT | \
31 					 SNBEP_PMON_CTL_TRESH_MASK)
32 
33 /* SNB-EP Ubox event control */
34 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK		0x1f000000
35 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK		\
36 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
37 				 SNBEP_PMON_CTL_UMASK_MASK | \
38 				 SNBEP_PMON_CTL_EDGE_DET | \
39 				 SNBEP_PMON_CTL_INVERT | \
40 				 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
41 
42 #define SNBEP_CBO_PMON_CTL_TID_EN		(1 << 19)
43 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK	(SNBEP_PMON_RAW_EVENT_MASK | \
44 						 SNBEP_CBO_PMON_CTL_TID_EN)
45 
46 /* SNB-EP PCU event control */
47 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK	0x0000c000
48 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK	0x1f000000
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT	(1 << 30)
50 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET	(1 << 31)
51 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK	\
52 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
53 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
54 				 SNBEP_PMON_CTL_EDGE_DET | \
55 				 SNBEP_PMON_CTL_INVERT | \
56 				 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
57 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
58 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
59 
60 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK	\
61 				(SNBEP_PMON_RAW_EVENT_MASK | \
62 				 SNBEP_PMON_CTL_EV_SEL_EXT)
63 
64 /* SNB-EP pci control register */
65 #define SNBEP_PCI_PMON_BOX_CTL			0xf4
66 #define SNBEP_PCI_PMON_CTL0			0xd8
67 /* SNB-EP pci counter register */
68 #define SNBEP_PCI_PMON_CTR0			0xa0
69 
70 /* SNB-EP home agent register */
71 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0	0x40
72 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1	0x44
73 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH	0x48
74 /* SNB-EP memory controller register */
75 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL		0xf0
76 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR		0xd0
77 /* SNB-EP QPI register */
78 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0		0x228
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1		0x22c
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0		0x238
81 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1		0x23c
82 
83 /* SNB-EP Ubox register */
84 #define SNBEP_U_MSR_PMON_CTR0			0xc16
85 #define SNBEP_U_MSR_PMON_CTL0			0xc10
86 
87 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL		0xc08
88 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR		0xc09
89 
90 /* SNB-EP Cbo register */
91 #define SNBEP_C0_MSR_PMON_CTR0			0xd16
92 #define SNBEP_C0_MSR_PMON_CTL0			0xd10
93 #define SNBEP_C0_MSR_PMON_BOX_CTL		0xd04
94 #define SNBEP_C0_MSR_PMON_BOX_FILTER		0xd14
95 #define SNBEP_CBO_MSR_OFFSET			0x20
96 
97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID	0x1f
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID	0x3fc00
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE	0x7c0000
100 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC	0xff800000
101 
102 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {	\
103 	.event = (e),				\
104 	.msr = SNBEP_C0_MSR_PMON_BOX_FILTER,	\
105 	.config_mask = (m),			\
106 	.idx = (i)				\
107 }
108 
109 /* SNB-EP PCU register */
110 #define SNBEP_PCU_MSR_PMON_CTR0			0xc36
111 #define SNBEP_PCU_MSR_PMON_CTL0			0xc30
112 #define SNBEP_PCU_MSR_PMON_BOX_CTL		0xc24
113 #define SNBEP_PCU_MSR_PMON_BOX_FILTER		0xc34
114 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK	0xffffffff
115 #define SNBEP_PCU_MSR_CORE_C3_CTR		0x3fc
116 #define SNBEP_PCU_MSR_CORE_C6_CTR		0x3fd
117 
118 /* IVBEP event control */
119 #define IVBEP_PMON_BOX_CTL_INT		(SNBEP_PMON_BOX_CTL_RST_CTRL | \
120 					 SNBEP_PMON_BOX_CTL_RST_CTRS)
121 #define IVBEP_PMON_RAW_EVENT_MASK		(SNBEP_PMON_CTL_EV_SEL_MASK | \
122 					 SNBEP_PMON_CTL_UMASK_MASK | \
123 					 SNBEP_PMON_CTL_EDGE_DET | \
124 					 SNBEP_PMON_CTL_TRESH_MASK)
125 /* IVBEP Ubox */
126 #define IVBEP_U_MSR_PMON_GLOBAL_CTL		0xc00
127 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL		(1 << 31)
128 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL		(1 << 29)
129 
130 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK	\
131 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
132 				 SNBEP_PMON_CTL_UMASK_MASK | \
133 				 SNBEP_PMON_CTL_EDGE_DET | \
134 				 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
135 /* IVBEP Cbo */
136 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK		(IVBEP_PMON_RAW_EVENT_MASK | \
137 						 SNBEP_CBO_PMON_CTL_TID_EN)
138 
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID		(0x1fULL << 0)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 5)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE	(0x3fULL << 17)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID		(0xffffULL << 32)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC		(0x1ffULL << 52)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
146 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
147 
148 /* IVBEP home agent */
149 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST		(1 << 16)
150 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK		\
151 				(IVBEP_PMON_RAW_EVENT_MASK | \
152 				 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
153 /* IVBEP PCU */
154 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK	\
155 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
156 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
157 				 SNBEP_PMON_CTL_EDGE_DET | \
158 				 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
159 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
160 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
161 /* IVBEP QPI */
162 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK	\
163 				(IVBEP_PMON_RAW_EVENT_MASK | \
164 				 SNBEP_PMON_CTL_EV_SEL_EXT)
165 
166 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
167 				((1ULL << (n)) - 1)))
168 
169 /* Haswell-EP Ubox */
170 #define HSWEP_U_MSR_PMON_CTR0			0x709
171 #define HSWEP_U_MSR_PMON_CTL0			0x705
172 #define HSWEP_U_MSR_PMON_FILTER			0x707
173 
174 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL		0x703
175 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR		0x704
176 
177 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID		(0x1 << 0)
178 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID		(0x1fULL << 1)
179 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
180 					(HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
181 					 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
182 
183 /* Haswell-EP CBo */
184 #define HSWEP_C0_MSR_PMON_CTR0			0xe08
185 #define HSWEP_C0_MSR_PMON_CTL0			0xe01
186 #define HSWEP_C0_MSR_PMON_BOX_CTL			0xe00
187 #define HSWEP_C0_MSR_PMON_BOX_FILTER0		0xe05
188 #define HSWEP_CBO_MSR_OFFSET			0x10
189 
190 
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID		(0x3fULL << 0)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 6)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE	(0x7fULL << 17)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID		(0xffffULL << 32)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC		(0x1ffULL << 52)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
198 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
199 
200 
201 /* Haswell-EP Sbox */
202 #define HSWEP_S0_MSR_PMON_CTR0			0x726
203 #define HSWEP_S0_MSR_PMON_CTL0			0x721
204 #define HSWEP_S0_MSR_PMON_BOX_CTL			0x720
205 #define HSWEP_SBOX_MSR_OFFSET			0xa
206 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK		(SNBEP_PMON_RAW_EVENT_MASK | \
207 						 SNBEP_CBO_PMON_CTL_TID_EN)
208 
209 /* Haswell-EP PCU */
210 #define HSWEP_PCU_MSR_PMON_CTR0			0x717
211 #define HSWEP_PCU_MSR_PMON_CTL0			0x711
212 #define HSWEP_PCU_MSR_PMON_BOX_CTL		0x710
213 #define HSWEP_PCU_MSR_PMON_BOX_FILTER		0x715
214 
215 /* KNL Ubox */
216 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
217 					(SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
218 						SNBEP_CBO_PMON_CTL_TID_EN)
219 /* KNL CHA */
220 #define KNL_CHA_MSR_OFFSET			0xc
221 #define KNL_CHA_MSR_PMON_CTL_QOR		(1 << 16)
222 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
223 					(SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
224 					 KNL_CHA_MSR_PMON_CTL_QOR)
225 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID		0x1ff
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE	(7 << 18)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP		(0xfffffe2aULL << 32)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE	(0x1ULL << 32)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE	(0x1ULL << 33)
230 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC		(0x1ULL << 37)
231 
232 /* KNL EDC/MC UCLK */
233 #define KNL_UCLK_MSR_PMON_CTR0_LOW		0x400
234 #define KNL_UCLK_MSR_PMON_CTL0			0x420
235 #define KNL_UCLK_MSR_PMON_BOX_CTL		0x430
236 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW	0x44c
237 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL	0x454
238 #define KNL_PMON_FIXED_CTL_EN			0x1
239 
240 /* KNL EDC */
241 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW		0xa00
242 #define KNL_EDC0_ECLK_MSR_PMON_CTL0		0xa20
243 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL		0xa30
244 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW	0xa3c
245 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL	0xa44
246 
247 /* KNL MC */
248 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW		0xb00
249 #define KNL_MC0_CH0_MSR_PMON_CTL0		0xb20
250 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL		0xb30
251 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW		0xb3c
252 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL		0xb44
253 
254 /* KNL IRP */
255 #define KNL_IRP_PCI_PMON_BOX_CTL		0xf0
256 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK		(SNBEP_PMON_RAW_EVENT_MASK | \
257 						 KNL_CHA_MSR_PMON_CTL_QOR)
258 /* KNL PCU */
259 #define KNL_PCU_PMON_CTL_EV_SEL_MASK		0x0000007f
260 #define KNL_PCU_PMON_CTL_USE_OCC_CTR		(1 << 7)
261 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK		0x3f000000
262 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK	\
263 				(KNL_PCU_PMON_CTL_EV_SEL_MASK | \
264 				 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
265 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
266 				 SNBEP_PMON_CTL_EDGE_DET | \
267 				 SNBEP_CBO_PMON_CTL_TID_EN | \
268 				 SNBEP_PMON_CTL_INVERT | \
269 				 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
270 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
271 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
272 
273 /* SKX pci bus to socket mapping */
274 #define SKX_CPUNODEID			0xc0
275 #define SKX_GIDNIDMAP			0xd4
276 
277 /*
278  * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR
279  * that BIOS programmed. MSR has package scope.
280  * |  Bit  |  Default  |  Description
281  * | [63]  |    00h    | VALID - When set, indicates the CPU bus
282  *                       numbers have been initialized. (RO)
283  * |[62:48]|    ---    | Reserved
284  * |[47:40]|    00h    | BUS_NUM_5 - Return the bus number BIOS assigned
285  *                       CPUBUSNO(5). (RO)
286  * |[39:32]|    00h    | BUS_NUM_4 - Return the bus number BIOS assigned
287  *                       CPUBUSNO(4). (RO)
288  * |[31:24]|    00h    | BUS_NUM_3 - Return the bus number BIOS assigned
289  *                       CPUBUSNO(3). (RO)
290  * |[23:16]|    00h    | BUS_NUM_2 - Return the bus number BIOS assigned
291  *                       CPUBUSNO(2). (RO)
292  * |[15:8] |    00h    | BUS_NUM_1 - Return the bus number BIOS assigned
293  *                       CPUBUSNO(1). (RO)
294  * | [7:0] |    00h    | BUS_NUM_0 - Return the bus number BIOS assigned
295  *                       CPUBUSNO(0). (RO)
296  */
297 #define SKX_MSR_CPU_BUS_NUMBER		0x300
298 #define SKX_MSR_CPU_BUS_VALID_BIT	(1ULL << 63)
299 #define BUS_NUM_STRIDE			8
300 
301 /* SKX CHA */
302 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID		(0x1ffULL << 0)
303 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 9)
304 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE	(0x3ffULL << 17)
305 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM		(0x1ULL << 32)
306 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC		(0x1ULL << 33)
307 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC	(0x1ULL << 35)
308 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM		(0x1ULL << 36)
309 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM	(0x1ULL << 37)
310 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0	(0x3ffULL << 41)
311 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1	(0x3ffULL << 51)
312 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
313 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
314 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
315 
316 /* SKX IIO */
317 #define SKX_IIO0_MSR_PMON_CTL0		0xa48
318 #define SKX_IIO0_MSR_PMON_CTR0		0xa41
319 #define SKX_IIO0_MSR_PMON_BOX_CTL	0xa40
320 #define SKX_IIO_MSR_OFFSET		0x20
321 
322 #define SKX_PMON_CTL_TRESH_MASK		(0xff << 24)
323 #define SKX_PMON_CTL_TRESH_MASK_EXT	(0xf)
324 #define SKX_PMON_CTL_CH_MASK		(0xff << 4)
325 #define SKX_PMON_CTL_FC_MASK		(0x7 << 12)
326 #define SKX_IIO_PMON_RAW_EVENT_MASK	(SNBEP_PMON_CTL_EV_SEL_MASK | \
327 					 SNBEP_PMON_CTL_UMASK_MASK | \
328 					 SNBEP_PMON_CTL_EDGE_DET | \
329 					 SNBEP_PMON_CTL_INVERT | \
330 					 SKX_PMON_CTL_TRESH_MASK)
331 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT	(SKX_PMON_CTL_TRESH_MASK_EXT | \
332 					 SKX_PMON_CTL_CH_MASK | \
333 					 SKX_PMON_CTL_FC_MASK)
334 
335 /* SKX IRP */
336 #define SKX_IRP0_MSR_PMON_CTL0		0xa5b
337 #define SKX_IRP0_MSR_PMON_CTR0		0xa59
338 #define SKX_IRP0_MSR_PMON_BOX_CTL	0xa58
339 #define SKX_IRP_MSR_OFFSET		0x20
340 
341 /* SKX UPI */
342 #define SKX_UPI_PCI_PMON_CTL0		0x350
343 #define SKX_UPI_PCI_PMON_CTR0		0x318
344 #define SKX_UPI_PCI_PMON_BOX_CTL	0x378
345 #define SKX_UPI_CTL_UMASK_EXT		0xffefff
346 
347 /* SKX M2M */
348 #define SKX_M2M_PCI_PMON_CTL0		0x228
349 #define SKX_M2M_PCI_PMON_CTR0		0x200
350 #define SKX_M2M_PCI_PMON_BOX_CTL	0x258
351 
352 /* Memory Map registers device ID */
353 #define SNR_ICX_MESH2IIO_MMAP_DID		0x9a2
354 #define SNR_ICX_SAD_CONTROL_CFG		0x3f4
355 
356 /* Getting I/O stack id in SAD_COTROL_CFG notation */
357 #define SAD_CONTROL_STACK_ID(data)		(((data) >> 4) & 0x7)
358 
359 /* SNR Ubox */
360 #define SNR_U_MSR_PMON_CTR0			0x1f98
361 #define SNR_U_MSR_PMON_CTL0			0x1f91
362 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL		0x1f93
363 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR		0x1f94
364 
365 /* SNR CHA */
366 #define SNR_CHA_RAW_EVENT_MASK_EXT		0x3ffffff
367 #define SNR_CHA_MSR_PMON_CTL0			0x1c01
368 #define SNR_CHA_MSR_PMON_CTR0			0x1c08
369 #define SNR_CHA_MSR_PMON_BOX_CTL		0x1c00
370 #define SNR_C0_MSR_PMON_BOX_FILTER0		0x1c05
371 
372 
373 /* SNR IIO */
374 #define SNR_IIO_MSR_PMON_CTL0			0x1e08
375 #define SNR_IIO_MSR_PMON_CTR0			0x1e01
376 #define SNR_IIO_MSR_PMON_BOX_CTL		0x1e00
377 #define SNR_IIO_MSR_OFFSET			0x10
378 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT		0x7ffff
379 
380 /* SNR IRP */
381 #define SNR_IRP0_MSR_PMON_CTL0			0x1ea8
382 #define SNR_IRP0_MSR_PMON_CTR0			0x1ea1
383 #define SNR_IRP0_MSR_PMON_BOX_CTL		0x1ea0
384 #define SNR_IRP_MSR_OFFSET			0x10
385 
386 /* SNR M2PCIE */
387 #define SNR_M2PCIE_MSR_PMON_CTL0		0x1e58
388 #define SNR_M2PCIE_MSR_PMON_CTR0		0x1e51
389 #define SNR_M2PCIE_MSR_PMON_BOX_CTL		0x1e50
390 #define SNR_M2PCIE_MSR_OFFSET			0x10
391 
392 /* SNR PCU */
393 #define SNR_PCU_MSR_PMON_CTL0			0x1ef1
394 #define SNR_PCU_MSR_PMON_CTR0			0x1ef8
395 #define SNR_PCU_MSR_PMON_BOX_CTL		0x1ef0
396 #define SNR_PCU_MSR_PMON_BOX_FILTER		0x1efc
397 
398 /* SNR M2M */
399 #define SNR_M2M_PCI_PMON_CTL0			0x468
400 #define SNR_M2M_PCI_PMON_CTR0			0x440
401 #define SNR_M2M_PCI_PMON_BOX_CTL		0x438
402 #define SNR_M2M_PCI_PMON_UMASK_EXT		0xff
403 
404 /* SNR PCIE3 */
405 #define SNR_PCIE3_PCI_PMON_CTL0			0x508
406 #define SNR_PCIE3_PCI_PMON_CTR0			0x4e8
407 #define SNR_PCIE3_PCI_PMON_BOX_CTL		0x4e0
408 
409 /* SNR IMC */
410 #define SNR_IMC_MMIO_PMON_FIXED_CTL		0x54
411 #define SNR_IMC_MMIO_PMON_FIXED_CTR		0x38
412 #define SNR_IMC_MMIO_PMON_CTL0			0x40
413 #define SNR_IMC_MMIO_PMON_CTR0			0x8
414 #define SNR_IMC_MMIO_PMON_BOX_CTL		0x22800
415 #define SNR_IMC_MMIO_OFFSET			0x4000
416 #define SNR_IMC_MMIO_SIZE			0x4000
417 #define SNR_IMC_MMIO_BASE_OFFSET		0xd0
418 #define SNR_IMC_MMIO_BASE_MASK			0x1FFFFFFF
419 #define SNR_IMC_MMIO_MEM0_OFFSET		0xd8
420 #define SNR_IMC_MMIO_MEM0_MASK			0x7FF
421 
422 /* ICX CHA */
423 #define ICX_C34_MSR_PMON_CTR0			0xb68
424 #define ICX_C34_MSR_PMON_CTL0			0xb61
425 #define ICX_C34_MSR_PMON_BOX_CTL		0xb60
426 #define ICX_C34_MSR_PMON_BOX_FILTER0		0xb65
427 
428 /* ICX IIO */
429 #define ICX_IIO_MSR_PMON_CTL0			0xa58
430 #define ICX_IIO_MSR_PMON_CTR0			0xa51
431 #define ICX_IIO_MSR_PMON_BOX_CTL		0xa50
432 
433 /* ICX IRP */
434 #define ICX_IRP0_MSR_PMON_CTL0			0xa4d
435 #define ICX_IRP0_MSR_PMON_CTR0			0xa4b
436 #define ICX_IRP0_MSR_PMON_BOX_CTL		0xa4a
437 
438 /* ICX M2PCIE */
439 #define ICX_M2PCIE_MSR_PMON_CTL0		0xa46
440 #define ICX_M2PCIE_MSR_PMON_CTR0		0xa41
441 #define ICX_M2PCIE_MSR_PMON_BOX_CTL		0xa40
442 
443 /* ICX UPI */
444 #define ICX_UPI_PCI_PMON_CTL0			0x350
445 #define ICX_UPI_PCI_PMON_CTR0			0x320
446 #define ICX_UPI_PCI_PMON_BOX_CTL		0x318
447 #define ICX_UPI_CTL_UMASK_EXT			0xffffff
448 #define ICX_UBOX_DID				0x3450
449 
450 /* ICX M3UPI*/
451 #define ICX_M3UPI_PCI_PMON_CTL0			0xd8
452 #define ICX_M3UPI_PCI_PMON_CTR0			0xa8
453 #define ICX_M3UPI_PCI_PMON_BOX_CTL		0xa0
454 
455 /* ICX IMC */
456 #define ICX_NUMBER_IMC_CHN			3
457 #define ICX_IMC_MEM_STRIDE			0x4
458 
459 /* SPR */
460 #define SPR_RAW_EVENT_MASK_EXT			0xffffff
461 #define SPR_UBOX_DID				0x3250
462 
463 /* SPR CHA */
464 #define SPR_CHA_PMON_CTL_TID_EN			(1 << 16)
465 #define SPR_CHA_PMON_EVENT_MASK			(SNBEP_PMON_RAW_EVENT_MASK | \
466 						 SPR_CHA_PMON_CTL_TID_EN)
467 #define SPR_CHA_PMON_BOX_FILTER_TID		0x3ff
468 
469 #define SPR_C0_MSR_PMON_BOX_FILTER0		0x200e
470 
471 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
472 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
473 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
474 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
475 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
476 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
477 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
478 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
479 DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
480 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
481 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
482 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
483 DEFINE_UNCORE_FORMAT_ATTR(tid_en2, tid_en, "config:16");
484 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
485 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
486 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
487 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
488 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
489 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
490 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
491 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
492 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
493 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
494 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
495 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
496 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
497 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
498 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
499 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
500 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
501 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
502 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
503 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
504 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
505 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
506 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
507 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
508 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
509 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
510 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
511 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
512 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
513 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
514 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
515 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
516 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
517 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
518 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
519 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
520 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
521 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
522 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
523 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
524 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
525 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
526 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
527 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
528 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
529 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
530 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
531 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
532 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
533 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
534 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
535 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
536 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
537 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
538 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
539 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
540 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
541 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
542 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
543 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
544 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
545 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
546 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
547 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
548 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
549 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
550 
551 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
552 {
553 	struct pci_dev *pdev = box->pci_dev;
554 	int box_ctl = uncore_pci_box_ctl(box);
555 	u32 config = 0;
556 
557 	if (!pci_read_config_dword(pdev, box_ctl, &config)) {
558 		config |= SNBEP_PMON_BOX_CTL_FRZ;
559 		pci_write_config_dword(pdev, box_ctl, config);
560 	}
561 }
562 
563 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
564 {
565 	struct pci_dev *pdev = box->pci_dev;
566 	int box_ctl = uncore_pci_box_ctl(box);
567 	u32 config = 0;
568 
569 	if (!pci_read_config_dword(pdev, box_ctl, &config)) {
570 		config &= ~SNBEP_PMON_BOX_CTL_FRZ;
571 		pci_write_config_dword(pdev, box_ctl, config);
572 	}
573 }
574 
575 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
576 {
577 	struct pci_dev *pdev = box->pci_dev;
578 	struct hw_perf_event *hwc = &event->hw;
579 
580 	pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
581 }
582 
583 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
584 {
585 	struct pci_dev *pdev = box->pci_dev;
586 	struct hw_perf_event *hwc = &event->hw;
587 
588 	pci_write_config_dword(pdev, hwc->config_base, hwc->config);
589 }
590 
591 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
592 {
593 	struct pci_dev *pdev = box->pci_dev;
594 	struct hw_perf_event *hwc = &event->hw;
595 	u64 count = 0;
596 
597 	pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
598 	pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
599 
600 	return count;
601 }
602 
603 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
604 {
605 	struct pci_dev *pdev = box->pci_dev;
606 	int box_ctl = uncore_pci_box_ctl(box);
607 
608 	pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
609 }
610 
611 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
612 {
613 	u64 config;
614 	unsigned msr;
615 
616 	msr = uncore_msr_box_ctl(box);
617 	if (msr) {
618 		rdmsrl(msr, config);
619 		config |= SNBEP_PMON_BOX_CTL_FRZ;
620 		wrmsrl(msr, config);
621 	}
622 }
623 
624 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
625 {
626 	u64 config;
627 	unsigned msr;
628 
629 	msr = uncore_msr_box_ctl(box);
630 	if (msr) {
631 		rdmsrl(msr, config);
632 		config &= ~SNBEP_PMON_BOX_CTL_FRZ;
633 		wrmsrl(msr, config);
634 	}
635 }
636 
637 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
638 {
639 	struct hw_perf_event *hwc = &event->hw;
640 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
641 
642 	if (reg1->idx != EXTRA_REG_NONE)
643 		wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
644 
645 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
646 }
647 
648 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
649 					struct perf_event *event)
650 {
651 	struct hw_perf_event *hwc = &event->hw;
652 
653 	wrmsrl(hwc->config_base, hwc->config);
654 }
655 
656 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
657 {
658 	unsigned msr = uncore_msr_box_ctl(box);
659 
660 	if (msr)
661 		wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
662 }
663 
664 static struct attribute *snbep_uncore_formats_attr[] = {
665 	&format_attr_event.attr,
666 	&format_attr_umask.attr,
667 	&format_attr_edge.attr,
668 	&format_attr_inv.attr,
669 	&format_attr_thresh8.attr,
670 	NULL,
671 };
672 
673 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
674 	&format_attr_event.attr,
675 	&format_attr_umask.attr,
676 	&format_attr_edge.attr,
677 	&format_attr_inv.attr,
678 	&format_attr_thresh5.attr,
679 	NULL,
680 };
681 
682 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
683 	&format_attr_event.attr,
684 	&format_attr_umask.attr,
685 	&format_attr_edge.attr,
686 	&format_attr_tid_en.attr,
687 	&format_attr_inv.attr,
688 	&format_attr_thresh8.attr,
689 	&format_attr_filter_tid.attr,
690 	&format_attr_filter_nid.attr,
691 	&format_attr_filter_state.attr,
692 	&format_attr_filter_opc.attr,
693 	NULL,
694 };
695 
696 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
697 	&format_attr_event.attr,
698 	&format_attr_occ_sel.attr,
699 	&format_attr_edge.attr,
700 	&format_attr_inv.attr,
701 	&format_attr_thresh5.attr,
702 	&format_attr_occ_invert.attr,
703 	&format_attr_occ_edge.attr,
704 	&format_attr_filter_band0.attr,
705 	&format_attr_filter_band1.attr,
706 	&format_attr_filter_band2.attr,
707 	&format_attr_filter_band3.attr,
708 	NULL,
709 };
710 
711 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
712 	&format_attr_event_ext.attr,
713 	&format_attr_umask.attr,
714 	&format_attr_edge.attr,
715 	&format_attr_inv.attr,
716 	&format_attr_thresh8.attr,
717 	&format_attr_match_rds.attr,
718 	&format_attr_match_rnid30.attr,
719 	&format_attr_match_rnid4.attr,
720 	&format_attr_match_dnid.attr,
721 	&format_attr_match_mc.attr,
722 	&format_attr_match_opc.attr,
723 	&format_attr_match_vnw.attr,
724 	&format_attr_match0.attr,
725 	&format_attr_match1.attr,
726 	&format_attr_mask_rds.attr,
727 	&format_attr_mask_rnid30.attr,
728 	&format_attr_mask_rnid4.attr,
729 	&format_attr_mask_dnid.attr,
730 	&format_attr_mask_mc.attr,
731 	&format_attr_mask_opc.attr,
732 	&format_attr_mask_vnw.attr,
733 	&format_attr_mask0.attr,
734 	&format_attr_mask1.attr,
735 	NULL,
736 };
737 
738 static struct uncore_event_desc snbep_uncore_imc_events[] = {
739 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
740 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
741 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
742 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
743 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
744 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
745 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
746 	{ /* end: all zeroes */ },
747 };
748 
749 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
750 	INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
751 	INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
752 	INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
753 	INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
754 	{ /* end: all zeroes */ },
755 };
756 
757 static const struct attribute_group snbep_uncore_format_group = {
758 	.name = "format",
759 	.attrs = snbep_uncore_formats_attr,
760 };
761 
762 static const struct attribute_group snbep_uncore_ubox_format_group = {
763 	.name = "format",
764 	.attrs = snbep_uncore_ubox_formats_attr,
765 };
766 
767 static const struct attribute_group snbep_uncore_cbox_format_group = {
768 	.name = "format",
769 	.attrs = snbep_uncore_cbox_formats_attr,
770 };
771 
772 static const struct attribute_group snbep_uncore_pcu_format_group = {
773 	.name = "format",
774 	.attrs = snbep_uncore_pcu_formats_attr,
775 };
776 
777 static const struct attribute_group snbep_uncore_qpi_format_group = {
778 	.name = "format",
779 	.attrs = snbep_uncore_qpi_formats_attr,
780 };
781 
782 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
783 	.disable_box	= snbep_uncore_msr_disable_box,		\
784 	.enable_box	= snbep_uncore_msr_enable_box,		\
785 	.disable_event	= snbep_uncore_msr_disable_event,	\
786 	.enable_event	= snbep_uncore_msr_enable_event,	\
787 	.read_counter	= uncore_msr_read_counter
788 
789 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
790 	__SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),			\
791 	.init_box	= snbep_uncore_msr_init_box		\
792 
793 static struct intel_uncore_ops snbep_uncore_msr_ops = {
794 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
795 };
796 
797 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()			\
798 	.init_box	= snbep_uncore_pci_init_box,		\
799 	.disable_box	= snbep_uncore_pci_disable_box,		\
800 	.enable_box	= snbep_uncore_pci_enable_box,		\
801 	.disable_event	= snbep_uncore_pci_disable_event,	\
802 	.read_counter	= snbep_uncore_pci_read_counter
803 
804 static struct intel_uncore_ops snbep_uncore_pci_ops = {
805 	SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
806 	.enable_event	= snbep_uncore_pci_enable_event,	\
807 };
808 
809 static struct event_constraint snbep_uncore_cbox_constraints[] = {
810 	UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
811 	UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
812 	UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
813 	UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
814 	UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
815 	UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
816 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
817 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
818 	UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
819 	UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
820 	UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
821 	UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
822 	UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
823 	UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
824 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
825 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
826 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
827 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
828 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
829 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
830 	UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
831 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
832 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
833 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
834 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
835 	UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
836 	EVENT_CONSTRAINT_END
837 };
838 
839 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
840 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
841 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
842 	UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
843 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
844 	UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
845 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
846 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
847 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
848 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
849 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
850 	EVENT_CONSTRAINT_END
851 };
852 
853 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
854 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
855 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
856 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
857 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
858 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
859 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
860 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
861 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
862 	UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
863 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
864 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
865 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
866 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
867 	UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
868 	UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
869 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
870 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
871 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
872 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
873 	UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
874 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
875 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
876 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
877 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
878 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
879 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
880 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
881 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
882 	EVENT_CONSTRAINT_END
883 };
884 
885 static struct intel_uncore_type snbep_uncore_ubox = {
886 	.name		= "ubox",
887 	.num_counters   = 2,
888 	.num_boxes	= 1,
889 	.perf_ctr_bits	= 44,
890 	.fixed_ctr_bits	= 48,
891 	.perf_ctr	= SNBEP_U_MSR_PMON_CTR0,
892 	.event_ctl	= SNBEP_U_MSR_PMON_CTL0,
893 	.event_mask	= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
894 	.fixed_ctr	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
895 	.fixed_ctl	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
896 	.ops		= &snbep_uncore_msr_ops,
897 	.format_group	= &snbep_uncore_ubox_format_group,
898 };
899 
900 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
901 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
902 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
903 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
904 	SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
905 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
906 	SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
907 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
908 	SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
909 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
910 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
911 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
912 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
913 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
914 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
915 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
916 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
917 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
918 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
919 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
920 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
921 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
922 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
923 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
924 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
925 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
926 	EVENT_EXTRA_END
927 };
928 
929 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
930 {
931 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
932 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
933 	int i;
934 
935 	if (uncore_box_is_fake(box))
936 		return;
937 
938 	for (i = 0; i < 5; i++) {
939 		if (reg1->alloc & (0x1 << i))
940 			atomic_sub(1 << (i * 6), &er->ref);
941 	}
942 	reg1->alloc = 0;
943 }
944 
945 static struct event_constraint *
946 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
947 			    u64 (*cbox_filter_mask)(int fields))
948 {
949 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
950 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
951 	int i, alloc = 0;
952 	unsigned long flags;
953 	u64 mask;
954 
955 	if (reg1->idx == EXTRA_REG_NONE)
956 		return NULL;
957 
958 	raw_spin_lock_irqsave(&er->lock, flags);
959 	for (i = 0; i < 5; i++) {
960 		if (!(reg1->idx & (0x1 << i)))
961 			continue;
962 		if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
963 			continue;
964 
965 		mask = cbox_filter_mask(0x1 << i);
966 		if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
967 		    !((reg1->config ^ er->config) & mask)) {
968 			atomic_add(1 << (i * 6), &er->ref);
969 			er->config &= ~mask;
970 			er->config |= reg1->config & mask;
971 			alloc |= (0x1 << i);
972 		} else {
973 			break;
974 		}
975 	}
976 	raw_spin_unlock_irqrestore(&er->lock, flags);
977 	if (i < 5)
978 		goto fail;
979 
980 	if (!uncore_box_is_fake(box))
981 		reg1->alloc |= alloc;
982 
983 	return NULL;
984 fail:
985 	for (; i >= 0; i--) {
986 		if (alloc & (0x1 << i))
987 			atomic_sub(1 << (i * 6), &er->ref);
988 	}
989 	return &uncore_constraint_empty;
990 }
991 
992 static u64 snbep_cbox_filter_mask(int fields)
993 {
994 	u64 mask = 0;
995 
996 	if (fields & 0x1)
997 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
998 	if (fields & 0x2)
999 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1000 	if (fields & 0x4)
1001 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1002 	if (fields & 0x8)
1003 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1004 
1005 	return mask;
1006 }
1007 
1008 static struct event_constraint *
1009 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1010 {
1011 	return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
1012 }
1013 
1014 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1015 {
1016 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1017 	struct extra_reg *er;
1018 	int idx = 0;
1019 
1020 	for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
1021 		if (er->event != (event->hw.config & er->config_mask))
1022 			continue;
1023 		idx |= er->idx;
1024 	}
1025 
1026 	if (idx) {
1027 		reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1028 			SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1029 		reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
1030 		reg1->idx = idx;
1031 	}
1032 	return 0;
1033 }
1034 
1035 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
1036 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1037 	.hw_config		= snbep_cbox_hw_config,
1038 	.get_constraint		= snbep_cbox_get_constraint,
1039 	.put_constraint		= snbep_cbox_put_constraint,
1040 };
1041 
1042 static struct intel_uncore_type snbep_uncore_cbox = {
1043 	.name			= "cbox",
1044 	.num_counters		= 4,
1045 	.num_boxes		= 8,
1046 	.perf_ctr_bits		= 44,
1047 	.event_ctl		= SNBEP_C0_MSR_PMON_CTL0,
1048 	.perf_ctr		= SNBEP_C0_MSR_PMON_CTR0,
1049 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1050 	.box_ctl		= SNBEP_C0_MSR_PMON_BOX_CTL,
1051 	.msr_offset		= SNBEP_CBO_MSR_OFFSET,
1052 	.num_shared_regs	= 1,
1053 	.constraints		= snbep_uncore_cbox_constraints,
1054 	.ops			= &snbep_uncore_cbox_ops,
1055 	.format_group		= &snbep_uncore_cbox_format_group,
1056 };
1057 
1058 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
1059 {
1060 	struct hw_perf_event *hwc = &event->hw;
1061 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1062 	u64 config = reg1->config;
1063 
1064 	if (new_idx > reg1->idx)
1065 		config <<= 8 * (new_idx - reg1->idx);
1066 	else
1067 		config >>= 8 * (reg1->idx - new_idx);
1068 
1069 	if (modify) {
1070 		hwc->config += new_idx - reg1->idx;
1071 		reg1->config = config;
1072 		reg1->idx = new_idx;
1073 	}
1074 	return config;
1075 }
1076 
1077 static struct event_constraint *
1078 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1079 {
1080 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1081 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1082 	unsigned long flags;
1083 	int idx = reg1->idx;
1084 	u64 mask, config1 = reg1->config;
1085 	bool ok = false;
1086 
1087 	if (reg1->idx == EXTRA_REG_NONE ||
1088 	    (!uncore_box_is_fake(box) && reg1->alloc))
1089 		return NULL;
1090 again:
1091 	mask = 0xffULL << (idx * 8);
1092 	raw_spin_lock_irqsave(&er->lock, flags);
1093 	if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1094 	    !((config1 ^ er->config) & mask)) {
1095 		atomic_add(1 << (idx * 8), &er->ref);
1096 		er->config &= ~mask;
1097 		er->config |= config1 & mask;
1098 		ok = true;
1099 	}
1100 	raw_spin_unlock_irqrestore(&er->lock, flags);
1101 
1102 	if (!ok) {
1103 		idx = (idx + 1) % 4;
1104 		if (idx != reg1->idx) {
1105 			config1 = snbep_pcu_alter_er(event, idx, false);
1106 			goto again;
1107 		}
1108 		return &uncore_constraint_empty;
1109 	}
1110 
1111 	if (!uncore_box_is_fake(box)) {
1112 		if (idx != reg1->idx)
1113 			snbep_pcu_alter_er(event, idx, true);
1114 		reg1->alloc = 1;
1115 	}
1116 	return NULL;
1117 }
1118 
1119 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1120 {
1121 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1122 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1123 
1124 	if (uncore_box_is_fake(box) || !reg1->alloc)
1125 		return;
1126 
1127 	atomic_sub(1 << (reg1->idx * 8), &er->ref);
1128 	reg1->alloc = 0;
1129 }
1130 
1131 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1132 {
1133 	struct hw_perf_event *hwc = &event->hw;
1134 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1135 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1136 
1137 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
1138 		reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1139 		reg1->idx = ev_sel - 0xb;
1140 		reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1141 	}
1142 	return 0;
1143 }
1144 
1145 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1146 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1147 	.hw_config		= snbep_pcu_hw_config,
1148 	.get_constraint		= snbep_pcu_get_constraint,
1149 	.put_constraint		= snbep_pcu_put_constraint,
1150 };
1151 
1152 static struct intel_uncore_type snbep_uncore_pcu = {
1153 	.name			= "pcu",
1154 	.num_counters		= 4,
1155 	.num_boxes		= 1,
1156 	.perf_ctr_bits		= 48,
1157 	.perf_ctr		= SNBEP_PCU_MSR_PMON_CTR0,
1158 	.event_ctl		= SNBEP_PCU_MSR_PMON_CTL0,
1159 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1160 	.box_ctl		= SNBEP_PCU_MSR_PMON_BOX_CTL,
1161 	.num_shared_regs	= 1,
1162 	.ops			= &snbep_uncore_pcu_ops,
1163 	.format_group		= &snbep_uncore_pcu_format_group,
1164 };
1165 
1166 static struct intel_uncore_type *snbep_msr_uncores[] = {
1167 	&snbep_uncore_ubox,
1168 	&snbep_uncore_cbox,
1169 	&snbep_uncore_pcu,
1170 	NULL,
1171 };
1172 
1173 void snbep_uncore_cpu_init(void)
1174 {
1175 	if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1176 		snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1177 	uncore_msr_uncores = snbep_msr_uncores;
1178 }
1179 
1180 enum {
1181 	SNBEP_PCI_QPI_PORT0_FILTER,
1182 	SNBEP_PCI_QPI_PORT1_FILTER,
1183 	BDX_PCI_QPI_PORT2_FILTER,
1184 };
1185 
1186 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1187 {
1188 	struct hw_perf_event *hwc = &event->hw;
1189 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1190 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1191 
1192 	if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1193 		reg1->idx = 0;
1194 		reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1195 		reg1->config = event->attr.config1;
1196 		reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1197 		reg2->config = event->attr.config2;
1198 	}
1199 	return 0;
1200 }
1201 
1202 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1203 {
1204 	struct pci_dev *pdev = box->pci_dev;
1205 	struct hw_perf_event *hwc = &event->hw;
1206 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1207 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1208 
1209 	if (reg1->idx != EXTRA_REG_NONE) {
1210 		int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1211 		int die = box->dieid;
1212 		struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1213 
1214 		if (filter_pdev) {
1215 			pci_write_config_dword(filter_pdev, reg1->reg,
1216 						(u32)reg1->config);
1217 			pci_write_config_dword(filter_pdev, reg1->reg + 4,
1218 						(u32)(reg1->config >> 32));
1219 			pci_write_config_dword(filter_pdev, reg2->reg,
1220 						(u32)reg2->config);
1221 			pci_write_config_dword(filter_pdev, reg2->reg + 4,
1222 						(u32)(reg2->config >> 32));
1223 		}
1224 	}
1225 
1226 	pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1227 }
1228 
1229 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1230 	SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1231 	.enable_event		= snbep_qpi_enable_event,
1232 	.hw_config		= snbep_qpi_hw_config,
1233 	.get_constraint		= uncore_get_constraint,
1234 	.put_constraint		= uncore_put_constraint,
1235 };
1236 
1237 #define SNBEP_UNCORE_PCI_COMMON_INIT()				\
1238 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,			\
1239 	.event_ctl	= SNBEP_PCI_PMON_CTL0,			\
1240 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,		\
1241 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,		\
1242 	.ops		= &snbep_uncore_pci_ops,		\
1243 	.format_group	= &snbep_uncore_format_group
1244 
1245 static struct intel_uncore_type snbep_uncore_ha = {
1246 	.name		= "ha",
1247 	.num_counters   = 4,
1248 	.num_boxes	= 1,
1249 	.perf_ctr_bits	= 48,
1250 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1251 };
1252 
1253 static struct intel_uncore_type snbep_uncore_imc = {
1254 	.name		= "imc",
1255 	.num_counters   = 4,
1256 	.num_boxes	= 4,
1257 	.perf_ctr_bits	= 48,
1258 	.fixed_ctr_bits	= 48,
1259 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1260 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1261 	.event_descs	= snbep_uncore_imc_events,
1262 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1263 };
1264 
1265 static struct intel_uncore_type snbep_uncore_qpi = {
1266 	.name			= "qpi",
1267 	.num_counters		= 4,
1268 	.num_boxes		= 2,
1269 	.perf_ctr_bits		= 48,
1270 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
1271 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
1272 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1273 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1274 	.num_shared_regs	= 1,
1275 	.ops			= &snbep_uncore_qpi_ops,
1276 	.event_descs		= snbep_uncore_qpi_events,
1277 	.format_group		= &snbep_uncore_qpi_format_group,
1278 };
1279 
1280 
1281 static struct intel_uncore_type snbep_uncore_r2pcie = {
1282 	.name		= "r2pcie",
1283 	.num_counters   = 4,
1284 	.num_boxes	= 1,
1285 	.perf_ctr_bits	= 44,
1286 	.constraints	= snbep_uncore_r2pcie_constraints,
1287 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1288 };
1289 
1290 static struct intel_uncore_type snbep_uncore_r3qpi = {
1291 	.name		= "r3qpi",
1292 	.num_counters   = 3,
1293 	.num_boxes	= 2,
1294 	.perf_ctr_bits	= 44,
1295 	.constraints	= snbep_uncore_r3qpi_constraints,
1296 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1297 };
1298 
1299 enum {
1300 	SNBEP_PCI_UNCORE_HA,
1301 	SNBEP_PCI_UNCORE_IMC,
1302 	SNBEP_PCI_UNCORE_QPI,
1303 	SNBEP_PCI_UNCORE_R2PCIE,
1304 	SNBEP_PCI_UNCORE_R3QPI,
1305 };
1306 
1307 static struct intel_uncore_type *snbep_pci_uncores[] = {
1308 	[SNBEP_PCI_UNCORE_HA]		= &snbep_uncore_ha,
1309 	[SNBEP_PCI_UNCORE_IMC]		= &snbep_uncore_imc,
1310 	[SNBEP_PCI_UNCORE_QPI]		= &snbep_uncore_qpi,
1311 	[SNBEP_PCI_UNCORE_R2PCIE]	= &snbep_uncore_r2pcie,
1312 	[SNBEP_PCI_UNCORE_R3QPI]	= &snbep_uncore_r3qpi,
1313 	NULL,
1314 };
1315 
1316 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1317 	{ /* Home Agent */
1318 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1319 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1320 	},
1321 	{ /* MC Channel 0 */
1322 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1323 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1324 	},
1325 	{ /* MC Channel 1 */
1326 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1327 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1328 	},
1329 	{ /* MC Channel 2 */
1330 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1331 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1332 	},
1333 	{ /* MC Channel 3 */
1334 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1335 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1336 	},
1337 	{ /* QPI Port 0 */
1338 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1339 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1340 	},
1341 	{ /* QPI Port 1 */
1342 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1343 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1344 	},
1345 	{ /* R2PCIe */
1346 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1347 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1348 	},
1349 	{ /* R3QPI Link 0 */
1350 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1351 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1352 	},
1353 	{ /* R3QPI Link 1 */
1354 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1355 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1356 	},
1357 	{ /* QPI Port 0 filter  */
1358 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1359 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1360 						   SNBEP_PCI_QPI_PORT0_FILTER),
1361 	},
1362 	{ /* QPI Port 0 filter  */
1363 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1364 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1365 						   SNBEP_PCI_QPI_PORT1_FILTER),
1366 	},
1367 	{ /* end: all zeroes */ }
1368 };
1369 
1370 static struct pci_driver snbep_uncore_pci_driver = {
1371 	.name		= "snbep_uncore",
1372 	.id_table	= snbep_uncore_pci_ids,
1373 };
1374 
1375 #define NODE_ID_MASK	0x7
1376 
1377 /* Each three bits from 0 to 23 of GIDNIDMAP register correspond Node ID. */
1378 #define GIDNIDMAP(config, id)	(((config) >> (3 * (id))) & 0x7)
1379 
1380 static int upi_nodeid_groupid(struct pci_dev *ubox_dev, int nodeid_loc, int idmap_loc,
1381 			      int *nodeid, int *groupid)
1382 {
1383 	int ret;
1384 
1385 	/* get the Node ID of the local register */
1386 	ret = pci_read_config_dword(ubox_dev, nodeid_loc, nodeid);
1387 	if (ret)
1388 		goto err;
1389 
1390 	*nodeid = *nodeid & NODE_ID_MASK;
1391 	/* get the Node ID mapping */
1392 	ret = pci_read_config_dword(ubox_dev, idmap_loc, groupid);
1393 	if (ret)
1394 		goto err;
1395 err:
1396 	return ret;
1397 }
1398 
1399 /*
1400  * build pci bus to socket mapping
1401  */
1402 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1403 {
1404 	struct pci_dev *ubox_dev = NULL;
1405 	int i, bus, nodeid, segment, die_id;
1406 	struct pci2phy_map *map;
1407 	int err = 0;
1408 	u32 config = 0;
1409 
1410 	while (1) {
1411 		/* find the UBOX device */
1412 		ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1413 		if (!ubox_dev)
1414 			break;
1415 		bus = ubox_dev->bus->number;
1416 		/*
1417 		 * The nodeid and idmap registers only contain enough
1418 		 * information to handle 8 nodes.  On systems with more
1419 		 * than 8 nodes, we need to rely on NUMA information,
1420 		 * filled in from BIOS supplied information, to determine
1421 		 * the topology.
1422 		 */
1423 		if (nr_node_ids <= 8) {
1424 			err = upi_nodeid_groupid(ubox_dev, nodeid_loc, idmap_loc,
1425 						 &nodeid, &config);
1426 			if (err)
1427 				break;
1428 
1429 			segment = pci_domain_nr(ubox_dev->bus);
1430 			raw_spin_lock(&pci2phy_map_lock);
1431 			map = __find_pci2phy_map(segment);
1432 			if (!map) {
1433 				raw_spin_unlock(&pci2phy_map_lock);
1434 				err = -ENOMEM;
1435 				break;
1436 			}
1437 
1438 			/*
1439 			 * every three bits in the Node ID mapping register maps
1440 			 * to a particular node.
1441 			 */
1442 			for (i = 0; i < 8; i++) {
1443 				if (nodeid == GIDNIDMAP(config, i)) {
1444 					if (topology_max_die_per_package() > 1)
1445 						die_id = i;
1446 					else
1447 						die_id = topology_phys_to_logical_pkg(i);
1448 					if (die_id < 0)
1449 						die_id = -ENODEV;
1450 					map->pbus_to_dieid[bus] = die_id;
1451 					break;
1452 				}
1453 			}
1454 			raw_spin_unlock(&pci2phy_map_lock);
1455 		} else {
1456 			int node = pcibus_to_node(ubox_dev->bus);
1457 			int cpu;
1458 
1459 			segment = pci_domain_nr(ubox_dev->bus);
1460 			raw_spin_lock(&pci2phy_map_lock);
1461 			map = __find_pci2phy_map(segment);
1462 			if (!map) {
1463 				raw_spin_unlock(&pci2phy_map_lock);
1464 				err = -ENOMEM;
1465 				break;
1466 			}
1467 
1468 			die_id = -1;
1469 			for_each_cpu(cpu, cpumask_of_pcibus(ubox_dev->bus)) {
1470 				struct cpuinfo_x86 *c = &cpu_data(cpu);
1471 
1472 				if (c->initialized && cpu_to_node(cpu) == node) {
1473 					map->pbus_to_dieid[bus] = die_id = c->logical_die_id;
1474 					break;
1475 				}
1476 			}
1477 			raw_spin_unlock(&pci2phy_map_lock);
1478 
1479 			if (WARN_ON_ONCE(die_id == -1)) {
1480 				err = -EINVAL;
1481 				break;
1482 			}
1483 		}
1484 	}
1485 
1486 	if (!err) {
1487 		/*
1488 		 * For PCI bus with no UBOX device, find the next bus
1489 		 * that has UBOX device and use its mapping.
1490 		 */
1491 		raw_spin_lock(&pci2phy_map_lock);
1492 		list_for_each_entry(map, &pci2phy_map_head, list) {
1493 			i = -1;
1494 			if (reverse) {
1495 				for (bus = 255; bus >= 0; bus--) {
1496 					if (map->pbus_to_dieid[bus] != -1)
1497 						i = map->pbus_to_dieid[bus];
1498 					else
1499 						map->pbus_to_dieid[bus] = i;
1500 				}
1501 			} else {
1502 				for (bus = 0; bus <= 255; bus++) {
1503 					if (map->pbus_to_dieid[bus] != -1)
1504 						i = map->pbus_to_dieid[bus];
1505 					else
1506 						map->pbus_to_dieid[bus] = i;
1507 				}
1508 			}
1509 		}
1510 		raw_spin_unlock(&pci2phy_map_lock);
1511 	}
1512 
1513 	pci_dev_put(ubox_dev);
1514 
1515 	return err ? pcibios_err_to_errno(err) : 0;
1516 }
1517 
1518 int snbep_uncore_pci_init(void)
1519 {
1520 	int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1521 	if (ret)
1522 		return ret;
1523 	uncore_pci_uncores = snbep_pci_uncores;
1524 	uncore_pci_driver = &snbep_uncore_pci_driver;
1525 	return 0;
1526 }
1527 /* end of Sandy Bridge-EP uncore support */
1528 
1529 /* IvyTown uncore support */
1530 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1531 {
1532 	unsigned msr = uncore_msr_box_ctl(box);
1533 	if (msr)
1534 		wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1535 }
1536 
1537 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1538 {
1539 	struct pci_dev *pdev = box->pci_dev;
1540 
1541 	pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1542 }
1543 
1544 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
1545 	.init_box	= ivbep_uncore_msr_init_box,		\
1546 	.disable_box	= snbep_uncore_msr_disable_box,		\
1547 	.enable_box	= snbep_uncore_msr_enable_box,		\
1548 	.disable_event	= snbep_uncore_msr_disable_event,	\
1549 	.enable_event	= snbep_uncore_msr_enable_event,	\
1550 	.read_counter	= uncore_msr_read_counter
1551 
1552 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1553 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1554 };
1555 
1556 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1557 	.init_box	= ivbep_uncore_pci_init_box,
1558 	.disable_box	= snbep_uncore_pci_disable_box,
1559 	.enable_box	= snbep_uncore_pci_enable_box,
1560 	.disable_event	= snbep_uncore_pci_disable_event,
1561 	.enable_event	= snbep_uncore_pci_enable_event,
1562 	.read_counter	= snbep_uncore_pci_read_counter,
1563 };
1564 
1565 #define IVBEP_UNCORE_PCI_COMMON_INIT()				\
1566 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,			\
1567 	.event_ctl	= SNBEP_PCI_PMON_CTL0,			\
1568 	.event_mask	= IVBEP_PMON_RAW_EVENT_MASK,		\
1569 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,		\
1570 	.ops		= &ivbep_uncore_pci_ops,			\
1571 	.format_group	= &ivbep_uncore_format_group
1572 
1573 static struct attribute *ivbep_uncore_formats_attr[] = {
1574 	&format_attr_event.attr,
1575 	&format_attr_umask.attr,
1576 	&format_attr_edge.attr,
1577 	&format_attr_inv.attr,
1578 	&format_attr_thresh8.attr,
1579 	NULL,
1580 };
1581 
1582 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1583 	&format_attr_event.attr,
1584 	&format_attr_umask.attr,
1585 	&format_attr_edge.attr,
1586 	&format_attr_inv.attr,
1587 	&format_attr_thresh5.attr,
1588 	NULL,
1589 };
1590 
1591 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1592 	&format_attr_event.attr,
1593 	&format_attr_umask.attr,
1594 	&format_attr_edge.attr,
1595 	&format_attr_tid_en.attr,
1596 	&format_attr_thresh8.attr,
1597 	&format_attr_filter_tid.attr,
1598 	&format_attr_filter_link.attr,
1599 	&format_attr_filter_state2.attr,
1600 	&format_attr_filter_nid2.attr,
1601 	&format_attr_filter_opc2.attr,
1602 	&format_attr_filter_nc.attr,
1603 	&format_attr_filter_c6.attr,
1604 	&format_attr_filter_isoc.attr,
1605 	NULL,
1606 };
1607 
1608 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1609 	&format_attr_event.attr,
1610 	&format_attr_occ_sel.attr,
1611 	&format_attr_edge.attr,
1612 	&format_attr_thresh5.attr,
1613 	&format_attr_occ_invert.attr,
1614 	&format_attr_occ_edge.attr,
1615 	&format_attr_filter_band0.attr,
1616 	&format_attr_filter_band1.attr,
1617 	&format_attr_filter_band2.attr,
1618 	&format_attr_filter_band3.attr,
1619 	NULL,
1620 };
1621 
1622 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1623 	&format_attr_event_ext.attr,
1624 	&format_attr_umask.attr,
1625 	&format_attr_edge.attr,
1626 	&format_attr_thresh8.attr,
1627 	&format_attr_match_rds.attr,
1628 	&format_attr_match_rnid30.attr,
1629 	&format_attr_match_rnid4.attr,
1630 	&format_attr_match_dnid.attr,
1631 	&format_attr_match_mc.attr,
1632 	&format_attr_match_opc.attr,
1633 	&format_attr_match_vnw.attr,
1634 	&format_attr_match0.attr,
1635 	&format_attr_match1.attr,
1636 	&format_attr_mask_rds.attr,
1637 	&format_attr_mask_rnid30.attr,
1638 	&format_attr_mask_rnid4.attr,
1639 	&format_attr_mask_dnid.attr,
1640 	&format_attr_mask_mc.attr,
1641 	&format_attr_mask_opc.attr,
1642 	&format_attr_mask_vnw.attr,
1643 	&format_attr_mask0.attr,
1644 	&format_attr_mask1.attr,
1645 	NULL,
1646 };
1647 
1648 static const struct attribute_group ivbep_uncore_format_group = {
1649 	.name = "format",
1650 	.attrs = ivbep_uncore_formats_attr,
1651 };
1652 
1653 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1654 	.name = "format",
1655 	.attrs = ivbep_uncore_ubox_formats_attr,
1656 };
1657 
1658 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1659 	.name = "format",
1660 	.attrs = ivbep_uncore_cbox_formats_attr,
1661 };
1662 
1663 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1664 	.name = "format",
1665 	.attrs = ivbep_uncore_pcu_formats_attr,
1666 };
1667 
1668 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1669 	.name = "format",
1670 	.attrs = ivbep_uncore_qpi_formats_attr,
1671 };
1672 
1673 static struct intel_uncore_type ivbep_uncore_ubox = {
1674 	.name		= "ubox",
1675 	.num_counters   = 2,
1676 	.num_boxes	= 1,
1677 	.perf_ctr_bits	= 44,
1678 	.fixed_ctr_bits	= 48,
1679 	.perf_ctr	= SNBEP_U_MSR_PMON_CTR0,
1680 	.event_ctl	= SNBEP_U_MSR_PMON_CTL0,
1681 	.event_mask	= IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1682 	.fixed_ctr	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1683 	.fixed_ctl	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1684 	.ops		= &ivbep_uncore_msr_ops,
1685 	.format_group	= &ivbep_uncore_ubox_format_group,
1686 };
1687 
1688 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1689 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1690 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1691 	SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1692 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1693 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1694 	SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1695 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1696 	SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1697 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1698 	SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1699 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1700 	SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1701 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1702 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1703 	SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1704 	SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1705 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1706 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1707 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1708 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1709 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1710 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1711 	SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1712 	SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1713 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1714 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1715 	SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1716 	SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1717 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1718 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1719 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1720 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1721 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1722 	SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1723 	SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1724 	SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1725 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1726 	EVENT_EXTRA_END
1727 };
1728 
1729 static u64 ivbep_cbox_filter_mask(int fields)
1730 {
1731 	u64 mask = 0;
1732 
1733 	if (fields & 0x1)
1734 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1735 	if (fields & 0x2)
1736 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1737 	if (fields & 0x4)
1738 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1739 	if (fields & 0x8)
1740 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1741 	if (fields & 0x10) {
1742 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1743 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1744 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1745 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1746 	}
1747 
1748 	return mask;
1749 }
1750 
1751 static struct event_constraint *
1752 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1753 {
1754 	return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1755 }
1756 
1757 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1758 {
1759 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1760 	struct extra_reg *er;
1761 	int idx = 0;
1762 
1763 	for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1764 		if (er->event != (event->hw.config & er->config_mask))
1765 			continue;
1766 		idx |= er->idx;
1767 	}
1768 
1769 	if (idx) {
1770 		reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1771 			SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1772 		reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1773 		reg1->idx = idx;
1774 	}
1775 	return 0;
1776 }
1777 
1778 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1779 {
1780 	struct hw_perf_event *hwc = &event->hw;
1781 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1782 
1783 	if (reg1->idx != EXTRA_REG_NONE) {
1784 		u64 filter = uncore_shared_reg_config(box, 0);
1785 		wrmsrl(reg1->reg, filter & 0xffffffff);
1786 		wrmsrl(reg1->reg + 6, filter >> 32);
1787 	}
1788 
1789 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1790 }
1791 
1792 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1793 	.init_box		= ivbep_uncore_msr_init_box,
1794 	.disable_box		= snbep_uncore_msr_disable_box,
1795 	.enable_box		= snbep_uncore_msr_enable_box,
1796 	.disable_event		= snbep_uncore_msr_disable_event,
1797 	.enable_event		= ivbep_cbox_enable_event,
1798 	.read_counter		= uncore_msr_read_counter,
1799 	.hw_config		= ivbep_cbox_hw_config,
1800 	.get_constraint		= ivbep_cbox_get_constraint,
1801 	.put_constraint		= snbep_cbox_put_constraint,
1802 };
1803 
1804 static struct intel_uncore_type ivbep_uncore_cbox = {
1805 	.name			= "cbox",
1806 	.num_counters		= 4,
1807 	.num_boxes		= 15,
1808 	.perf_ctr_bits		= 44,
1809 	.event_ctl		= SNBEP_C0_MSR_PMON_CTL0,
1810 	.perf_ctr		= SNBEP_C0_MSR_PMON_CTR0,
1811 	.event_mask		= IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1812 	.box_ctl		= SNBEP_C0_MSR_PMON_BOX_CTL,
1813 	.msr_offset		= SNBEP_CBO_MSR_OFFSET,
1814 	.num_shared_regs	= 1,
1815 	.constraints		= snbep_uncore_cbox_constraints,
1816 	.ops			= &ivbep_uncore_cbox_ops,
1817 	.format_group		= &ivbep_uncore_cbox_format_group,
1818 };
1819 
1820 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1821 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1822 	.hw_config		= snbep_pcu_hw_config,
1823 	.get_constraint		= snbep_pcu_get_constraint,
1824 	.put_constraint		= snbep_pcu_put_constraint,
1825 };
1826 
1827 static struct intel_uncore_type ivbep_uncore_pcu = {
1828 	.name			= "pcu",
1829 	.num_counters		= 4,
1830 	.num_boxes		= 1,
1831 	.perf_ctr_bits		= 48,
1832 	.perf_ctr		= SNBEP_PCU_MSR_PMON_CTR0,
1833 	.event_ctl		= SNBEP_PCU_MSR_PMON_CTL0,
1834 	.event_mask		= IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1835 	.box_ctl		= SNBEP_PCU_MSR_PMON_BOX_CTL,
1836 	.num_shared_regs	= 1,
1837 	.ops			= &ivbep_uncore_pcu_ops,
1838 	.format_group		= &ivbep_uncore_pcu_format_group,
1839 };
1840 
1841 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1842 	&ivbep_uncore_ubox,
1843 	&ivbep_uncore_cbox,
1844 	&ivbep_uncore_pcu,
1845 	NULL,
1846 };
1847 
1848 void ivbep_uncore_cpu_init(void)
1849 {
1850 	if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1851 		ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1852 	uncore_msr_uncores = ivbep_msr_uncores;
1853 }
1854 
1855 static struct intel_uncore_type ivbep_uncore_ha = {
1856 	.name		= "ha",
1857 	.num_counters   = 4,
1858 	.num_boxes	= 2,
1859 	.perf_ctr_bits	= 48,
1860 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1861 };
1862 
1863 static struct intel_uncore_type ivbep_uncore_imc = {
1864 	.name		= "imc",
1865 	.num_counters   = 4,
1866 	.num_boxes	= 8,
1867 	.perf_ctr_bits	= 48,
1868 	.fixed_ctr_bits	= 48,
1869 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1870 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1871 	.event_descs	= snbep_uncore_imc_events,
1872 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1873 };
1874 
1875 /* registers in IRP boxes are not properly aligned */
1876 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1877 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1878 
1879 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1880 {
1881 	struct pci_dev *pdev = box->pci_dev;
1882 	struct hw_perf_event *hwc = &event->hw;
1883 
1884 	pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1885 			       hwc->config | SNBEP_PMON_CTL_EN);
1886 }
1887 
1888 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1889 {
1890 	struct pci_dev *pdev = box->pci_dev;
1891 	struct hw_perf_event *hwc = &event->hw;
1892 
1893 	pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1894 }
1895 
1896 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1897 {
1898 	struct pci_dev *pdev = box->pci_dev;
1899 	struct hw_perf_event *hwc = &event->hw;
1900 	u64 count = 0;
1901 
1902 	pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1903 	pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1904 
1905 	return count;
1906 }
1907 
1908 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1909 	.init_box	= ivbep_uncore_pci_init_box,
1910 	.disable_box	= snbep_uncore_pci_disable_box,
1911 	.enable_box	= snbep_uncore_pci_enable_box,
1912 	.disable_event	= ivbep_uncore_irp_disable_event,
1913 	.enable_event	= ivbep_uncore_irp_enable_event,
1914 	.read_counter	= ivbep_uncore_irp_read_counter,
1915 };
1916 
1917 static struct intel_uncore_type ivbep_uncore_irp = {
1918 	.name			= "irp",
1919 	.num_counters		= 4,
1920 	.num_boxes		= 1,
1921 	.perf_ctr_bits		= 48,
1922 	.event_mask		= IVBEP_PMON_RAW_EVENT_MASK,
1923 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1924 	.ops			= &ivbep_uncore_irp_ops,
1925 	.format_group		= &ivbep_uncore_format_group,
1926 };
1927 
1928 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1929 	.init_box	= ivbep_uncore_pci_init_box,
1930 	.disable_box	= snbep_uncore_pci_disable_box,
1931 	.enable_box	= snbep_uncore_pci_enable_box,
1932 	.disable_event	= snbep_uncore_pci_disable_event,
1933 	.enable_event	= snbep_qpi_enable_event,
1934 	.read_counter	= snbep_uncore_pci_read_counter,
1935 	.hw_config	= snbep_qpi_hw_config,
1936 	.get_constraint	= uncore_get_constraint,
1937 	.put_constraint	= uncore_put_constraint,
1938 };
1939 
1940 static struct intel_uncore_type ivbep_uncore_qpi = {
1941 	.name			= "qpi",
1942 	.num_counters		= 4,
1943 	.num_boxes		= 3,
1944 	.perf_ctr_bits		= 48,
1945 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
1946 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
1947 	.event_mask		= IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1948 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1949 	.num_shared_regs	= 1,
1950 	.ops			= &ivbep_uncore_qpi_ops,
1951 	.format_group		= &ivbep_uncore_qpi_format_group,
1952 };
1953 
1954 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1955 	.name		= "r2pcie",
1956 	.num_counters   = 4,
1957 	.num_boxes	= 1,
1958 	.perf_ctr_bits	= 44,
1959 	.constraints	= snbep_uncore_r2pcie_constraints,
1960 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1961 };
1962 
1963 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1964 	.name		= "r3qpi",
1965 	.num_counters   = 3,
1966 	.num_boxes	= 2,
1967 	.perf_ctr_bits	= 44,
1968 	.constraints	= snbep_uncore_r3qpi_constraints,
1969 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1970 };
1971 
1972 enum {
1973 	IVBEP_PCI_UNCORE_HA,
1974 	IVBEP_PCI_UNCORE_IMC,
1975 	IVBEP_PCI_UNCORE_IRP,
1976 	IVBEP_PCI_UNCORE_QPI,
1977 	IVBEP_PCI_UNCORE_R2PCIE,
1978 	IVBEP_PCI_UNCORE_R3QPI,
1979 };
1980 
1981 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1982 	[IVBEP_PCI_UNCORE_HA]	= &ivbep_uncore_ha,
1983 	[IVBEP_PCI_UNCORE_IMC]	= &ivbep_uncore_imc,
1984 	[IVBEP_PCI_UNCORE_IRP]	= &ivbep_uncore_irp,
1985 	[IVBEP_PCI_UNCORE_QPI]	= &ivbep_uncore_qpi,
1986 	[IVBEP_PCI_UNCORE_R2PCIE]	= &ivbep_uncore_r2pcie,
1987 	[IVBEP_PCI_UNCORE_R3QPI]	= &ivbep_uncore_r3qpi,
1988 	NULL,
1989 };
1990 
1991 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1992 	{ /* Home Agent 0 */
1993 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1994 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1995 	},
1996 	{ /* Home Agent 1 */
1997 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1998 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1999 	},
2000 	{ /* MC0 Channel 0 */
2001 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
2002 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
2003 	},
2004 	{ /* MC0 Channel 1 */
2005 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
2006 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
2007 	},
2008 	{ /* MC0 Channel 3 */
2009 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
2010 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
2011 	},
2012 	{ /* MC0 Channel 4 */
2013 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
2014 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
2015 	},
2016 	{ /* MC1 Channel 0 */
2017 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
2018 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
2019 	},
2020 	{ /* MC1 Channel 1 */
2021 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
2022 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
2023 	},
2024 	{ /* MC1 Channel 3 */
2025 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
2026 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
2027 	},
2028 	{ /* MC1 Channel 4 */
2029 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
2030 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
2031 	},
2032 	{ /* IRP */
2033 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
2034 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
2035 	},
2036 	{ /* QPI0 Port 0 */
2037 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
2038 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
2039 	},
2040 	{ /* QPI0 Port 1 */
2041 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
2042 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
2043 	},
2044 	{ /* QPI1 Port 2 */
2045 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
2046 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
2047 	},
2048 	{ /* R2PCIe */
2049 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
2050 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
2051 	},
2052 	{ /* R3QPI0 Link 0 */
2053 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
2054 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
2055 	},
2056 	{ /* R3QPI0 Link 1 */
2057 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
2058 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
2059 	},
2060 	{ /* R3QPI1 Link 2 */
2061 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
2062 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
2063 	},
2064 	{ /* QPI Port 0 filter  */
2065 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
2066 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2067 						   SNBEP_PCI_QPI_PORT0_FILTER),
2068 	},
2069 	{ /* QPI Port 0 filter  */
2070 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
2071 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2072 						   SNBEP_PCI_QPI_PORT1_FILTER),
2073 	},
2074 	{ /* end: all zeroes */ }
2075 };
2076 
2077 static struct pci_driver ivbep_uncore_pci_driver = {
2078 	.name		= "ivbep_uncore",
2079 	.id_table	= ivbep_uncore_pci_ids,
2080 };
2081 
2082 int ivbep_uncore_pci_init(void)
2083 {
2084 	int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2085 	if (ret)
2086 		return ret;
2087 	uncore_pci_uncores = ivbep_pci_uncores;
2088 	uncore_pci_driver = &ivbep_uncore_pci_driver;
2089 	return 0;
2090 }
2091 /* end of IvyTown uncore support */
2092 
2093 /* KNL uncore support */
2094 static struct attribute *knl_uncore_ubox_formats_attr[] = {
2095 	&format_attr_event.attr,
2096 	&format_attr_umask.attr,
2097 	&format_attr_edge.attr,
2098 	&format_attr_tid_en.attr,
2099 	&format_attr_inv.attr,
2100 	&format_attr_thresh5.attr,
2101 	NULL,
2102 };
2103 
2104 static const struct attribute_group knl_uncore_ubox_format_group = {
2105 	.name = "format",
2106 	.attrs = knl_uncore_ubox_formats_attr,
2107 };
2108 
2109 static struct intel_uncore_type knl_uncore_ubox = {
2110 	.name			= "ubox",
2111 	.num_counters		= 2,
2112 	.num_boxes		= 1,
2113 	.perf_ctr_bits		= 48,
2114 	.fixed_ctr_bits		= 48,
2115 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
2116 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
2117 	.event_mask		= KNL_U_MSR_PMON_RAW_EVENT_MASK,
2118 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2119 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2120 	.ops			= &snbep_uncore_msr_ops,
2121 	.format_group		= &knl_uncore_ubox_format_group,
2122 };
2123 
2124 static struct attribute *knl_uncore_cha_formats_attr[] = {
2125 	&format_attr_event.attr,
2126 	&format_attr_umask.attr,
2127 	&format_attr_qor.attr,
2128 	&format_attr_edge.attr,
2129 	&format_attr_tid_en.attr,
2130 	&format_attr_inv.attr,
2131 	&format_attr_thresh8.attr,
2132 	&format_attr_filter_tid4.attr,
2133 	&format_attr_filter_link3.attr,
2134 	&format_attr_filter_state4.attr,
2135 	&format_attr_filter_local.attr,
2136 	&format_attr_filter_all_op.attr,
2137 	&format_attr_filter_nnm.attr,
2138 	&format_attr_filter_opc3.attr,
2139 	&format_attr_filter_nc.attr,
2140 	&format_attr_filter_isoc.attr,
2141 	NULL,
2142 };
2143 
2144 static const struct attribute_group knl_uncore_cha_format_group = {
2145 	.name = "format",
2146 	.attrs = knl_uncore_cha_formats_attr,
2147 };
2148 
2149 static struct event_constraint knl_uncore_cha_constraints[] = {
2150 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2151 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2152 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2153 	EVENT_CONSTRAINT_END
2154 };
2155 
2156 static struct extra_reg knl_uncore_cha_extra_regs[] = {
2157 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2158 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2159 	SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2160 	SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2161 	SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2162 	EVENT_EXTRA_END
2163 };
2164 
2165 static u64 knl_cha_filter_mask(int fields)
2166 {
2167 	u64 mask = 0;
2168 
2169 	if (fields & 0x1)
2170 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2171 	if (fields & 0x2)
2172 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2173 	if (fields & 0x4)
2174 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2175 	return mask;
2176 }
2177 
2178 static struct event_constraint *
2179 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2180 {
2181 	return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2182 }
2183 
2184 static int knl_cha_hw_config(struct intel_uncore_box *box,
2185 			     struct perf_event *event)
2186 {
2187 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2188 	struct extra_reg *er;
2189 	int idx = 0;
2190 
2191 	for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2192 		if (er->event != (event->hw.config & er->config_mask))
2193 			continue;
2194 		idx |= er->idx;
2195 	}
2196 
2197 	if (idx) {
2198 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2199 			    KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2200 		reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2201 
2202 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2203 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2204 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2205 		reg1->idx = idx;
2206 	}
2207 	return 0;
2208 }
2209 
2210 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2211 				    struct perf_event *event);
2212 
2213 static struct intel_uncore_ops knl_uncore_cha_ops = {
2214 	.init_box		= snbep_uncore_msr_init_box,
2215 	.disable_box		= snbep_uncore_msr_disable_box,
2216 	.enable_box		= snbep_uncore_msr_enable_box,
2217 	.disable_event		= snbep_uncore_msr_disable_event,
2218 	.enable_event		= hswep_cbox_enable_event,
2219 	.read_counter		= uncore_msr_read_counter,
2220 	.hw_config		= knl_cha_hw_config,
2221 	.get_constraint		= knl_cha_get_constraint,
2222 	.put_constraint		= snbep_cbox_put_constraint,
2223 };
2224 
2225 static struct intel_uncore_type knl_uncore_cha = {
2226 	.name			= "cha",
2227 	.num_counters		= 4,
2228 	.num_boxes		= 38,
2229 	.perf_ctr_bits		= 48,
2230 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
2231 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
2232 	.event_mask		= KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2233 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
2234 	.msr_offset		= KNL_CHA_MSR_OFFSET,
2235 	.num_shared_regs	= 1,
2236 	.constraints		= knl_uncore_cha_constraints,
2237 	.ops			= &knl_uncore_cha_ops,
2238 	.format_group		= &knl_uncore_cha_format_group,
2239 };
2240 
2241 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2242 	&format_attr_event2.attr,
2243 	&format_attr_use_occ_ctr.attr,
2244 	&format_attr_occ_sel.attr,
2245 	&format_attr_edge.attr,
2246 	&format_attr_tid_en.attr,
2247 	&format_attr_inv.attr,
2248 	&format_attr_thresh6.attr,
2249 	&format_attr_occ_invert.attr,
2250 	&format_attr_occ_edge_det.attr,
2251 	NULL,
2252 };
2253 
2254 static const struct attribute_group knl_uncore_pcu_format_group = {
2255 	.name = "format",
2256 	.attrs = knl_uncore_pcu_formats_attr,
2257 };
2258 
2259 static struct intel_uncore_type knl_uncore_pcu = {
2260 	.name			= "pcu",
2261 	.num_counters		= 4,
2262 	.num_boxes		= 1,
2263 	.perf_ctr_bits		= 48,
2264 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
2265 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
2266 	.event_mask		= KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2267 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
2268 	.ops			= &snbep_uncore_msr_ops,
2269 	.format_group		= &knl_uncore_pcu_format_group,
2270 };
2271 
2272 static struct intel_uncore_type *knl_msr_uncores[] = {
2273 	&knl_uncore_ubox,
2274 	&knl_uncore_cha,
2275 	&knl_uncore_pcu,
2276 	NULL,
2277 };
2278 
2279 void knl_uncore_cpu_init(void)
2280 {
2281 	uncore_msr_uncores = knl_msr_uncores;
2282 }
2283 
2284 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2285 {
2286 	struct pci_dev *pdev = box->pci_dev;
2287 	int box_ctl = uncore_pci_box_ctl(box);
2288 
2289 	pci_write_config_dword(pdev, box_ctl, 0);
2290 }
2291 
2292 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2293 					struct perf_event *event)
2294 {
2295 	struct pci_dev *pdev = box->pci_dev;
2296 	struct hw_perf_event *hwc = &event->hw;
2297 
2298 	if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2299 							== UNCORE_FIXED_EVENT)
2300 		pci_write_config_dword(pdev, hwc->config_base,
2301 				       hwc->config | KNL_PMON_FIXED_CTL_EN);
2302 	else
2303 		pci_write_config_dword(pdev, hwc->config_base,
2304 				       hwc->config | SNBEP_PMON_CTL_EN);
2305 }
2306 
2307 static struct intel_uncore_ops knl_uncore_imc_ops = {
2308 	.init_box	= snbep_uncore_pci_init_box,
2309 	.disable_box	= snbep_uncore_pci_disable_box,
2310 	.enable_box	= knl_uncore_imc_enable_box,
2311 	.read_counter	= snbep_uncore_pci_read_counter,
2312 	.enable_event	= knl_uncore_imc_enable_event,
2313 	.disable_event	= snbep_uncore_pci_disable_event,
2314 };
2315 
2316 static struct intel_uncore_type knl_uncore_imc_uclk = {
2317 	.name			= "imc_uclk",
2318 	.num_counters		= 4,
2319 	.num_boxes		= 2,
2320 	.perf_ctr_bits		= 48,
2321 	.fixed_ctr_bits		= 48,
2322 	.perf_ctr		= KNL_UCLK_MSR_PMON_CTR0_LOW,
2323 	.event_ctl		= KNL_UCLK_MSR_PMON_CTL0,
2324 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2325 	.fixed_ctr		= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2326 	.fixed_ctl		= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2327 	.box_ctl		= KNL_UCLK_MSR_PMON_BOX_CTL,
2328 	.ops			= &knl_uncore_imc_ops,
2329 	.format_group		= &snbep_uncore_format_group,
2330 };
2331 
2332 static struct intel_uncore_type knl_uncore_imc_dclk = {
2333 	.name			= "imc",
2334 	.num_counters		= 4,
2335 	.num_boxes		= 6,
2336 	.perf_ctr_bits		= 48,
2337 	.fixed_ctr_bits		= 48,
2338 	.perf_ctr		= KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2339 	.event_ctl		= KNL_MC0_CH0_MSR_PMON_CTL0,
2340 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2341 	.fixed_ctr		= KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2342 	.fixed_ctl		= KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2343 	.box_ctl		= KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2344 	.ops			= &knl_uncore_imc_ops,
2345 	.format_group		= &snbep_uncore_format_group,
2346 };
2347 
2348 static struct intel_uncore_type knl_uncore_edc_uclk = {
2349 	.name			= "edc_uclk",
2350 	.num_counters		= 4,
2351 	.num_boxes		= 8,
2352 	.perf_ctr_bits		= 48,
2353 	.fixed_ctr_bits		= 48,
2354 	.perf_ctr		= KNL_UCLK_MSR_PMON_CTR0_LOW,
2355 	.event_ctl		= KNL_UCLK_MSR_PMON_CTL0,
2356 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2357 	.fixed_ctr		= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2358 	.fixed_ctl		= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2359 	.box_ctl		= KNL_UCLK_MSR_PMON_BOX_CTL,
2360 	.ops			= &knl_uncore_imc_ops,
2361 	.format_group		= &snbep_uncore_format_group,
2362 };
2363 
2364 static struct intel_uncore_type knl_uncore_edc_eclk = {
2365 	.name			= "edc_eclk",
2366 	.num_counters		= 4,
2367 	.num_boxes		= 8,
2368 	.perf_ctr_bits		= 48,
2369 	.fixed_ctr_bits		= 48,
2370 	.perf_ctr		= KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2371 	.event_ctl		= KNL_EDC0_ECLK_MSR_PMON_CTL0,
2372 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2373 	.fixed_ctr		= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2374 	.fixed_ctl		= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2375 	.box_ctl		= KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2376 	.ops			= &knl_uncore_imc_ops,
2377 	.format_group		= &snbep_uncore_format_group,
2378 };
2379 
2380 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2381 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2382 	EVENT_CONSTRAINT_END
2383 };
2384 
2385 static struct intel_uncore_type knl_uncore_m2pcie = {
2386 	.name		= "m2pcie",
2387 	.num_counters   = 4,
2388 	.num_boxes	= 1,
2389 	.perf_ctr_bits	= 48,
2390 	.constraints	= knl_uncore_m2pcie_constraints,
2391 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2392 };
2393 
2394 static struct attribute *knl_uncore_irp_formats_attr[] = {
2395 	&format_attr_event.attr,
2396 	&format_attr_umask.attr,
2397 	&format_attr_qor.attr,
2398 	&format_attr_edge.attr,
2399 	&format_attr_inv.attr,
2400 	&format_attr_thresh8.attr,
2401 	NULL,
2402 };
2403 
2404 static const struct attribute_group knl_uncore_irp_format_group = {
2405 	.name = "format",
2406 	.attrs = knl_uncore_irp_formats_attr,
2407 };
2408 
2409 static struct intel_uncore_type knl_uncore_irp = {
2410 	.name			= "irp",
2411 	.num_counters		= 2,
2412 	.num_boxes		= 1,
2413 	.perf_ctr_bits		= 48,
2414 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
2415 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
2416 	.event_mask		= KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2417 	.box_ctl		= KNL_IRP_PCI_PMON_BOX_CTL,
2418 	.ops			= &snbep_uncore_pci_ops,
2419 	.format_group		= &knl_uncore_irp_format_group,
2420 };
2421 
2422 enum {
2423 	KNL_PCI_UNCORE_MC_UCLK,
2424 	KNL_PCI_UNCORE_MC_DCLK,
2425 	KNL_PCI_UNCORE_EDC_UCLK,
2426 	KNL_PCI_UNCORE_EDC_ECLK,
2427 	KNL_PCI_UNCORE_M2PCIE,
2428 	KNL_PCI_UNCORE_IRP,
2429 };
2430 
2431 static struct intel_uncore_type *knl_pci_uncores[] = {
2432 	[KNL_PCI_UNCORE_MC_UCLK]	= &knl_uncore_imc_uclk,
2433 	[KNL_PCI_UNCORE_MC_DCLK]	= &knl_uncore_imc_dclk,
2434 	[KNL_PCI_UNCORE_EDC_UCLK]	= &knl_uncore_edc_uclk,
2435 	[KNL_PCI_UNCORE_EDC_ECLK]	= &knl_uncore_edc_eclk,
2436 	[KNL_PCI_UNCORE_M2PCIE]		= &knl_uncore_m2pcie,
2437 	[KNL_PCI_UNCORE_IRP]		= &knl_uncore_irp,
2438 	NULL,
2439 };
2440 
2441 /*
2442  * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2443  * device type. prior to KNL, each instance of a PMU device type had a unique
2444  * device ID.
2445  *
2446  *	PCI Device ID	Uncore PMU Devices
2447  *	----------------------------------
2448  *	0x7841		MC0 UClk, MC1 UClk
2449  *	0x7843		MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2450  *			MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2451  *	0x7833		EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2452  *			EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2453  *	0x7835		EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2454  *			EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2455  *	0x7817		M2PCIe
2456  *	0x7814		IRP
2457 */
2458 
2459 static const struct pci_device_id knl_uncore_pci_ids[] = {
2460 	{ /* MC0 UClk */
2461 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2462 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2463 	},
2464 	{ /* MC1 UClk */
2465 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2466 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2467 	},
2468 	{ /* MC0 DClk CH 0 */
2469 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2470 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2471 	},
2472 	{ /* MC0 DClk CH 1 */
2473 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2474 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2475 	},
2476 	{ /* MC0 DClk CH 2 */
2477 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2478 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2479 	},
2480 	{ /* MC1 DClk CH 0 */
2481 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2482 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2483 	},
2484 	{ /* MC1 DClk CH 1 */
2485 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2486 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2487 	},
2488 	{ /* MC1 DClk CH 2 */
2489 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2490 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2491 	},
2492 	{ /* EDC0 UClk */
2493 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2494 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2495 	},
2496 	{ /* EDC1 UClk */
2497 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2498 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2499 	},
2500 	{ /* EDC2 UClk */
2501 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2502 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2503 	},
2504 	{ /* EDC3 UClk */
2505 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2506 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2507 	},
2508 	{ /* EDC4 UClk */
2509 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2510 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2511 	},
2512 	{ /* EDC5 UClk */
2513 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2514 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2515 	},
2516 	{ /* EDC6 UClk */
2517 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2518 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2519 	},
2520 	{ /* EDC7 UClk */
2521 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2522 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2523 	},
2524 	{ /* EDC0 EClk */
2525 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2526 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2527 	},
2528 	{ /* EDC1 EClk */
2529 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2530 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2531 	},
2532 	{ /* EDC2 EClk */
2533 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2534 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2535 	},
2536 	{ /* EDC3 EClk */
2537 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2538 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2539 	},
2540 	{ /* EDC4 EClk */
2541 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2542 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2543 	},
2544 	{ /* EDC5 EClk */
2545 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2546 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2547 	},
2548 	{ /* EDC6 EClk */
2549 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2550 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2551 	},
2552 	{ /* EDC7 EClk */
2553 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2554 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2555 	},
2556 	{ /* M2PCIe */
2557 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2558 		.driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2559 	},
2560 	{ /* IRP */
2561 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2562 		.driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2563 	},
2564 	{ /* end: all zeroes */ }
2565 };
2566 
2567 static struct pci_driver knl_uncore_pci_driver = {
2568 	.name		= "knl_uncore",
2569 	.id_table	= knl_uncore_pci_ids,
2570 };
2571 
2572 int knl_uncore_pci_init(void)
2573 {
2574 	int ret;
2575 
2576 	/* All KNL PCI based PMON units are on the same PCI bus except IRP */
2577 	ret = snb_pci2phy_map_init(0x7814); /* IRP */
2578 	if (ret)
2579 		return ret;
2580 	ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2581 	if (ret)
2582 		return ret;
2583 	uncore_pci_uncores = knl_pci_uncores;
2584 	uncore_pci_driver = &knl_uncore_pci_driver;
2585 	return 0;
2586 }
2587 
2588 /* end of KNL uncore support */
2589 
2590 /* Haswell-EP uncore support */
2591 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2592 	&format_attr_event.attr,
2593 	&format_attr_umask.attr,
2594 	&format_attr_edge.attr,
2595 	&format_attr_inv.attr,
2596 	&format_attr_thresh5.attr,
2597 	&format_attr_filter_tid2.attr,
2598 	&format_attr_filter_cid.attr,
2599 	NULL,
2600 };
2601 
2602 static const struct attribute_group hswep_uncore_ubox_format_group = {
2603 	.name = "format",
2604 	.attrs = hswep_uncore_ubox_formats_attr,
2605 };
2606 
2607 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2608 {
2609 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2610 	reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2611 	reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2612 	reg1->idx = 0;
2613 	return 0;
2614 }
2615 
2616 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2617 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2618 	.hw_config		= hswep_ubox_hw_config,
2619 	.get_constraint		= uncore_get_constraint,
2620 	.put_constraint		= uncore_put_constraint,
2621 };
2622 
2623 static struct intel_uncore_type hswep_uncore_ubox = {
2624 	.name			= "ubox",
2625 	.num_counters		= 2,
2626 	.num_boxes		= 1,
2627 	.perf_ctr_bits		= 44,
2628 	.fixed_ctr_bits		= 48,
2629 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
2630 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
2631 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2632 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2633 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2634 	.num_shared_regs	= 1,
2635 	.ops			= &hswep_uncore_ubox_ops,
2636 	.format_group		= &hswep_uncore_ubox_format_group,
2637 };
2638 
2639 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2640 	&format_attr_event.attr,
2641 	&format_attr_umask.attr,
2642 	&format_attr_edge.attr,
2643 	&format_attr_tid_en.attr,
2644 	&format_attr_thresh8.attr,
2645 	&format_attr_filter_tid3.attr,
2646 	&format_attr_filter_link2.attr,
2647 	&format_attr_filter_state3.attr,
2648 	&format_attr_filter_nid2.attr,
2649 	&format_attr_filter_opc2.attr,
2650 	&format_attr_filter_nc.attr,
2651 	&format_attr_filter_c6.attr,
2652 	&format_attr_filter_isoc.attr,
2653 	NULL,
2654 };
2655 
2656 static const struct attribute_group hswep_uncore_cbox_format_group = {
2657 	.name = "format",
2658 	.attrs = hswep_uncore_cbox_formats_attr,
2659 };
2660 
2661 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2662 	UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2663 	UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2664 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2665 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2666 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2667 	UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2668 	UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2669 	EVENT_CONSTRAINT_END
2670 };
2671 
2672 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2673 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2674 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2675 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2676 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2677 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2678 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2679 	SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2680 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2681 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2682 	SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2683 	SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2684 	SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2685 	SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2686 	SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2687 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2688 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2689 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2690 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2691 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2692 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2693 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2694 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2695 	SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2696 	SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2697 	SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2698 	SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2699 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2700 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2701 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2702 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2703 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2704 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2705 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2706 	SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2707 	SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2708 	SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2709 	SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2710 	SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2711 	EVENT_EXTRA_END
2712 };
2713 
2714 static u64 hswep_cbox_filter_mask(int fields)
2715 {
2716 	u64 mask = 0;
2717 	if (fields & 0x1)
2718 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2719 	if (fields & 0x2)
2720 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2721 	if (fields & 0x4)
2722 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2723 	if (fields & 0x8)
2724 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2725 	if (fields & 0x10) {
2726 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2727 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2728 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2729 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2730 	}
2731 	return mask;
2732 }
2733 
2734 static struct event_constraint *
2735 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2736 {
2737 	return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2738 }
2739 
2740 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2741 {
2742 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2743 	struct extra_reg *er;
2744 	int idx = 0;
2745 
2746 	for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2747 		if (er->event != (event->hw.config & er->config_mask))
2748 			continue;
2749 		idx |= er->idx;
2750 	}
2751 
2752 	if (idx) {
2753 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2754 			    HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2755 		reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2756 		reg1->idx = idx;
2757 	}
2758 	return 0;
2759 }
2760 
2761 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2762 				  struct perf_event *event)
2763 {
2764 	struct hw_perf_event *hwc = &event->hw;
2765 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2766 
2767 	if (reg1->idx != EXTRA_REG_NONE) {
2768 		u64 filter = uncore_shared_reg_config(box, 0);
2769 		wrmsrl(reg1->reg, filter & 0xffffffff);
2770 		wrmsrl(reg1->reg + 1, filter >> 32);
2771 	}
2772 
2773 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2774 }
2775 
2776 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2777 	.init_box		= snbep_uncore_msr_init_box,
2778 	.disable_box		= snbep_uncore_msr_disable_box,
2779 	.enable_box		= snbep_uncore_msr_enable_box,
2780 	.disable_event		= snbep_uncore_msr_disable_event,
2781 	.enable_event		= hswep_cbox_enable_event,
2782 	.read_counter		= uncore_msr_read_counter,
2783 	.hw_config		= hswep_cbox_hw_config,
2784 	.get_constraint		= hswep_cbox_get_constraint,
2785 	.put_constraint		= snbep_cbox_put_constraint,
2786 };
2787 
2788 static struct intel_uncore_type hswep_uncore_cbox = {
2789 	.name			= "cbox",
2790 	.num_counters		= 4,
2791 	.num_boxes		= 18,
2792 	.perf_ctr_bits		= 48,
2793 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
2794 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
2795 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2796 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
2797 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
2798 	.num_shared_regs	= 1,
2799 	.constraints		= hswep_uncore_cbox_constraints,
2800 	.ops			= &hswep_uncore_cbox_ops,
2801 	.format_group		= &hswep_uncore_cbox_format_group,
2802 };
2803 
2804 /*
2805  * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2806  */
2807 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2808 {
2809 	unsigned msr = uncore_msr_box_ctl(box);
2810 
2811 	if (msr) {
2812 		u64 init = SNBEP_PMON_BOX_CTL_INT;
2813 		u64 flags = 0;
2814 		int i;
2815 
2816 		for_each_set_bit(i, (unsigned long *)&init, 64) {
2817 			flags |= (1ULL << i);
2818 			wrmsrl(msr, flags);
2819 		}
2820 	}
2821 }
2822 
2823 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2824 	__SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2825 	.init_box		= hswep_uncore_sbox_msr_init_box
2826 };
2827 
2828 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2829 	&format_attr_event.attr,
2830 	&format_attr_umask.attr,
2831 	&format_attr_edge.attr,
2832 	&format_attr_tid_en.attr,
2833 	&format_attr_inv.attr,
2834 	&format_attr_thresh8.attr,
2835 	NULL,
2836 };
2837 
2838 static const struct attribute_group hswep_uncore_sbox_format_group = {
2839 	.name = "format",
2840 	.attrs = hswep_uncore_sbox_formats_attr,
2841 };
2842 
2843 static struct intel_uncore_type hswep_uncore_sbox = {
2844 	.name			= "sbox",
2845 	.num_counters		= 4,
2846 	.num_boxes		= 4,
2847 	.perf_ctr_bits		= 44,
2848 	.event_ctl		= HSWEP_S0_MSR_PMON_CTL0,
2849 	.perf_ctr		= HSWEP_S0_MSR_PMON_CTR0,
2850 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2851 	.box_ctl		= HSWEP_S0_MSR_PMON_BOX_CTL,
2852 	.msr_offset		= HSWEP_SBOX_MSR_OFFSET,
2853 	.ops			= &hswep_uncore_sbox_msr_ops,
2854 	.format_group		= &hswep_uncore_sbox_format_group,
2855 };
2856 
2857 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2858 {
2859 	struct hw_perf_event *hwc = &event->hw;
2860 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2861 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2862 
2863 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
2864 		reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2865 		reg1->idx = ev_sel - 0xb;
2866 		reg1->config = event->attr.config1 & (0xff << reg1->idx);
2867 	}
2868 	return 0;
2869 }
2870 
2871 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2872 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2873 	.hw_config		= hswep_pcu_hw_config,
2874 	.get_constraint		= snbep_pcu_get_constraint,
2875 	.put_constraint		= snbep_pcu_put_constraint,
2876 };
2877 
2878 static struct intel_uncore_type hswep_uncore_pcu = {
2879 	.name			= "pcu",
2880 	.num_counters		= 4,
2881 	.num_boxes		= 1,
2882 	.perf_ctr_bits		= 48,
2883 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
2884 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
2885 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2886 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
2887 	.num_shared_regs	= 1,
2888 	.ops			= &hswep_uncore_pcu_ops,
2889 	.format_group		= &snbep_uncore_pcu_format_group,
2890 };
2891 
2892 static struct intel_uncore_type *hswep_msr_uncores[] = {
2893 	&hswep_uncore_ubox,
2894 	&hswep_uncore_cbox,
2895 	&hswep_uncore_sbox,
2896 	&hswep_uncore_pcu,
2897 	NULL,
2898 };
2899 
2900 #define HSWEP_PCU_DID			0x2fc0
2901 #define HSWEP_PCU_CAPID4_OFFET		0x94
2902 #define hswep_get_chop(_cap)		(((_cap) >> 6) & 0x3)
2903 
2904 static bool hswep_has_limit_sbox(unsigned int device)
2905 {
2906 	struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
2907 	u32 capid4;
2908 
2909 	if (!dev)
2910 		return false;
2911 
2912 	pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
2913 	pci_dev_put(dev);
2914 	if (!hswep_get_chop(capid4))
2915 		return true;
2916 
2917 	return false;
2918 }
2919 
2920 void hswep_uncore_cpu_init(void)
2921 {
2922 	if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2923 		hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2924 
2925 	/* Detect 6-8 core systems with only two SBOXes */
2926 	if (hswep_has_limit_sbox(HSWEP_PCU_DID))
2927 		hswep_uncore_sbox.num_boxes = 2;
2928 
2929 	uncore_msr_uncores = hswep_msr_uncores;
2930 }
2931 
2932 static struct intel_uncore_type hswep_uncore_ha = {
2933 	.name		= "ha",
2934 	.num_counters   = 4,
2935 	.num_boxes	= 2,
2936 	.perf_ctr_bits	= 48,
2937 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2938 };
2939 
2940 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2941 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2942 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2943 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2944 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2945 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2946 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2947 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2948 	{ /* end: all zeroes */ },
2949 };
2950 
2951 static struct intel_uncore_type hswep_uncore_imc = {
2952 	.name		= "imc",
2953 	.num_counters   = 4,
2954 	.num_boxes	= 8,
2955 	.perf_ctr_bits	= 48,
2956 	.fixed_ctr_bits	= 48,
2957 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2958 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2959 	.event_descs	= hswep_uncore_imc_events,
2960 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2961 };
2962 
2963 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2964 
2965 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2966 {
2967 	struct pci_dev *pdev = box->pci_dev;
2968 	struct hw_perf_event *hwc = &event->hw;
2969 	u64 count = 0;
2970 
2971 	pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2972 	pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2973 
2974 	return count;
2975 }
2976 
2977 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2978 	.init_box	= snbep_uncore_pci_init_box,
2979 	.disable_box	= snbep_uncore_pci_disable_box,
2980 	.enable_box	= snbep_uncore_pci_enable_box,
2981 	.disable_event	= ivbep_uncore_irp_disable_event,
2982 	.enable_event	= ivbep_uncore_irp_enable_event,
2983 	.read_counter	= hswep_uncore_irp_read_counter,
2984 };
2985 
2986 static struct intel_uncore_type hswep_uncore_irp = {
2987 	.name			= "irp",
2988 	.num_counters		= 4,
2989 	.num_boxes		= 1,
2990 	.perf_ctr_bits		= 48,
2991 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2992 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
2993 	.ops			= &hswep_uncore_irp_ops,
2994 	.format_group		= &snbep_uncore_format_group,
2995 };
2996 
2997 static struct intel_uncore_type hswep_uncore_qpi = {
2998 	.name			= "qpi",
2999 	.num_counters		= 4,
3000 	.num_boxes		= 3,
3001 	.perf_ctr_bits		= 48,
3002 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
3003 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
3004 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3005 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
3006 	.num_shared_regs	= 1,
3007 	.ops			= &snbep_uncore_qpi_ops,
3008 	.format_group		= &snbep_uncore_qpi_format_group,
3009 };
3010 
3011 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
3012 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3013 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3014 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3015 	UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3016 	UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
3017 	UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3018 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3019 	UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
3020 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3021 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3022 	UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
3023 	UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
3024 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3025 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3026 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3027 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3028 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3029 	UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
3030 	EVENT_CONSTRAINT_END
3031 };
3032 
3033 static struct intel_uncore_type hswep_uncore_r2pcie = {
3034 	.name		= "r2pcie",
3035 	.num_counters   = 4,
3036 	.num_boxes	= 1,
3037 	.perf_ctr_bits	= 48,
3038 	.constraints	= hswep_uncore_r2pcie_constraints,
3039 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3040 };
3041 
3042 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
3043 	UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
3044 	UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3045 	UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3046 	UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3047 	UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3048 	UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3049 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3050 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3051 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
3052 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3053 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3054 	UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3055 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3056 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3057 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3058 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3059 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3060 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3061 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3062 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3063 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3064 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3065 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3066 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3067 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3068 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
3069 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3070 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3071 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3072 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3073 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3074 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3075 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3076 	EVENT_CONSTRAINT_END
3077 };
3078 
3079 static struct intel_uncore_type hswep_uncore_r3qpi = {
3080 	.name		= "r3qpi",
3081 	.num_counters   = 3,
3082 	.num_boxes	= 3,
3083 	.perf_ctr_bits	= 44,
3084 	.constraints	= hswep_uncore_r3qpi_constraints,
3085 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3086 };
3087 
3088 enum {
3089 	HSWEP_PCI_UNCORE_HA,
3090 	HSWEP_PCI_UNCORE_IMC,
3091 	HSWEP_PCI_UNCORE_IRP,
3092 	HSWEP_PCI_UNCORE_QPI,
3093 	HSWEP_PCI_UNCORE_R2PCIE,
3094 	HSWEP_PCI_UNCORE_R3QPI,
3095 };
3096 
3097 static struct intel_uncore_type *hswep_pci_uncores[] = {
3098 	[HSWEP_PCI_UNCORE_HA]	= &hswep_uncore_ha,
3099 	[HSWEP_PCI_UNCORE_IMC]	= &hswep_uncore_imc,
3100 	[HSWEP_PCI_UNCORE_IRP]	= &hswep_uncore_irp,
3101 	[HSWEP_PCI_UNCORE_QPI]	= &hswep_uncore_qpi,
3102 	[HSWEP_PCI_UNCORE_R2PCIE]	= &hswep_uncore_r2pcie,
3103 	[HSWEP_PCI_UNCORE_R3QPI]	= &hswep_uncore_r3qpi,
3104 	NULL,
3105 };
3106 
3107 static const struct pci_device_id hswep_uncore_pci_ids[] = {
3108 	{ /* Home Agent 0 */
3109 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
3110 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
3111 	},
3112 	{ /* Home Agent 1 */
3113 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
3114 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
3115 	},
3116 	{ /* MC0 Channel 0 */
3117 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
3118 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
3119 	},
3120 	{ /* MC0 Channel 1 */
3121 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
3122 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
3123 	},
3124 	{ /* MC0 Channel 2 */
3125 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
3126 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
3127 	},
3128 	{ /* MC0 Channel 3 */
3129 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
3130 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
3131 	},
3132 	{ /* MC1 Channel 0 */
3133 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
3134 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
3135 	},
3136 	{ /* MC1 Channel 1 */
3137 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
3138 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
3139 	},
3140 	{ /* MC1 Channel 2 */
3141 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
3142 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
3143 	},
3144 	{ /* MC1 Channel 3 */
3145 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
3146 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
3147 	},
3148 	{ /* IRP */
3149 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
3150 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
3151 	},
3152 	{ /* QPI0 Port 0 */
3153 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3154 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3155 	},
3156 	{ /* QPI0 Port 1 */
3157 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3158 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3159 	},
3160 	{ /* QPI1 Port 2 */
3161 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3162 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3163 	},
3164 	{ /* R2PCIe */
3165 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3166 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3167 	},
3168 	{ /* R3QPI0 Link 0 */
3169 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3170 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3171 	},
3172 	{ /* R3QPI0 Link 1 */
3173 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3174 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3175 	},
3176 	{ /* R3QPI1 Link 2 */
3177 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3178 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3179 	},
3180 	{ /* QPI Port 0 filter  */
3181 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3182 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3183 						   SNBEP_PCI_QPI_PORT0_FILTER),
3184 	},
3185 	{ /* QPI Port 1 filter  */
3186 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3187 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3188 						   SNBEP_PCI_QPI_PORT1_FILTER),
3189 	},
3190 	{ /* end: all zeroes */ }
3191 };
3192 
3193 static struct pci_driver hswep_uncore_pci_driver = {
3194 	.name		= "hswep_uncore",
3195 	.id_table	= hswep_uncore_pci_ids,
3196 };
3197 
3198 int hswep_uncore_pci_init(void)
3199 {
3200 	int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3201 	if (ret)
3202 		return ret;
3203 	uncore_pci_uncores = hswep_pci_uncores;
3204 	uncore_pci_driver = &hswep_uncore_pci_driver;
3205 	return 0;
3206 }
3207 /* end of Haswell-EP uncore support */
3208 
3209 /* BDX uncore support */
3210 
3211 static struct intel_uncore_type bdx_uncore_ubox = {
3212 	.name			= "ubox",
3213 	.num_counters		= 2,
3214 	.num_boxes		= 1,
3215 	.perf_ctr_bits		= 48,
3216 	.fixed_ctr_bits		= 48,
3217 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
3218 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
3219 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3220 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3221 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3222 	.num_shared_regs	= 1,
3223 	.ops			= &ivbep_uncore_msr_ops,
3224 	.format_group		= &ivbep_uncore_ubox_format_group,
3225 };
3226 
3227 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3228 	UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3229 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3230 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3231 	UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3232 	EVENT_CONSTRAINT_END
3233 };
3234 
3235 static struct intel_uncore_type bdx_uncore_cbox = {
3236 	.name			= "cbox",
3237 	.num_counters		= 4,
3238 	.num_boxes		= 24,
3239 	.perf_ctr_bits		= 48,
3240 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
3241 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
3242 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3243 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
3244 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
3245 	.num_shared_regs	= 1,
3246 	.constraints		= bdx_uncore_cbox_constraints,
3247 	.ops			= &hswep_uncore_cbox_ops,
3248 	.format_group		= &hswep_uncore_cbox_format_group,
3249 };
3250 
3251 static struct intel_uncore_type bdx_uncore_sbox = {
3252 	.name			= "sbox",
3253 	.num_counters		= 4,
3254 	.num_boxes		= 4,
3255 	.perf_ctr_bits		= 48,
3256 	.event_ctl		= HSWEP_S0_MSR_PMON_CTL0,
3257 	.perf_ctr		= HSWEP_S0_MSR_PMON_CTR0,
3258 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3259 	.box_ctl		= HSWEP_S0_MSR_PMON_BOX_CTL,
3260 	.msr_offset		= HSWEP_SBOX_MSR_OFFSET,
3261 	.ops			= &hswep_uncore_sbox_msr_ops,
3262 	.format_group		= &hswep_uncore_sbox_format_group,
3263 };
3264 
3265 #define BDX_MSR_UNCORE_SBOX	3
3266 
3267 static struct intel_uncore_type *bdx_msr_uncores[] = {
3268 	&bdx_uncore_ubox,
3269 	&bdx_uncore_cbox,
3270 	&hswep_uncore_pcu,
3271 	&bdx_uncore_sbox,
3272 	NULL,
3273 };
3274 
3275 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3276 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3277 	EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3278 	EVENT_CONSTRAINT_END
3279 };
3280 
3281 #define BDX_PCU_DID			0x6fc0
3282 
3283 void bdx_uncore_cpu_init(void)
3284 {
3285 	if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3286 		bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3287 	uncore_msr_uncores = bdx_msr_uncores;
3288 
3289 	/* Detect systems with no SBOXes */
3290 	if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID))
3291 		uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3292 
3293 	hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3294 }
3295 
3296 static struct intel_uncore_type bdx_uncore_ha = {
3297 	.name		= "ha",
3298 	.num_counters   = 4,
3299 	.num_boxes	= 2,
3300 	.perf_ctr_bits	= 48,
3301 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3302 };
3303 
3304 static struct intel_uncore_type bdx_uncore_imc = {
3305 	.name		= "imc",
3306 	.num_counters   = 4,
3307 	.num_boxes	= 8,
3308 	.perf_ctr_bits	= 48,
3309 	.fixed_ctr_bits	= 48,
3310 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3311 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3312 	.event_descs	= hswep_uncore_imc_events,
3313 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3314 };
3315 
3316 static struct intel_uncore_type bdx_uncore_irp = {
3317 	.name			= "irp",
3318 	.num_counters		= 4,
3319 	.num_boxes		= 1,
3320 	.perf_ctr_bits		= 48,
3321 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
3322 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
3323 	.ops			= &hswep_uncore_irp_ops,
3324 	.format_group		= &snbep_uncore_format_group,
3325 };
3326 
3327 static struct intel_uncore_type bdx_uncore_qpi = {
3328 	.name			= "qpi",
3329 	.num_counters		= 4,
3330 	.num_boxes		= 3,
3331 	.perf_ctr_bits		= 48,
3332 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
3333 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
3334 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3335 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
3336 	.num_shared_regs	= 1,
3337 	.ops			= &snbep_uncore_qpi_ops,
3338 	.format_group		= &snbep_uncore_qpi_format_group,
3339 };
3340 
3341 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3342 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3343 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3344 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3345 	UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3346 	UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3347 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3348 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3349 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3350 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3351 	EVENT_CONSTRAINT_END
3352 };
3353 
3354 static struct intel_uncore_type bdx_uncore_r2pcie = {
3355 	.name		= "r2pcie",
3356 	.num_counters   = 4,
3357 	.num_boxes	= 1,
3358 	.perf_ctr_bits	= 48,
3359 	.constraints	= bdx_uncore_r2pcie_constraints,
3360 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3361 };
3362 
3363 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3364 	UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3365 	UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3366 	UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3367 	UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3368 	UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3369 	UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3370 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3371 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3372 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3373 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3374 	UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3375 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3376 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3377 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3378 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3379 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3380 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3381 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3382 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3383 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3384 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3385 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3386 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3387 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3388 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3389 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3390 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3391 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3392 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3393 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3394 	EVENT_CONSTRAINT_END
3395 };
3396 
3397 static struct intel_uncore_type bdx_uncore_r3qpi = {
3398 	.name		= "r3qpi",
3399 	.num_counters   = 3,
3400 	.num_boxes	= 3,
3401 	.perf_ctr_bits	= 48,
3402 	.constraints	= bdx_uncore_r3qpi_constraints,
3403 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3404 };
3405 
3406 enum {
3407 	BDX_PCI_UNCORE_HA,
3408 	BDX_PCI_UNCORE_IMC,
3409 	BDX_PCI_UNCORE_IRP,
3410 	BDX_PCI_UNCORE_QPI,
3411 	BDX_PCI_UNCORE_R2PCIE,
3412 	BDX_PCI_UNCORE_R3QPI,
3413 };
3414 
3415 static struct intel_uncore_type *bdx_pci_uncores[] = {
3416 	[BDX_PCI_UNCORE_HA]	= &bdx_uncore_ha,
3417 	[BDX_PCI_UNCORE_IMC]	= &bdx_uncore_imc,
3418 	[BDX_PCI_UNCORE_IRP]	= &bdx_uncore_irp,
3419 	[BDX_PCI_UNCORE_QPI]	= &bdx_uncore_qpi,
3420 	[BDX_PCI_UNCORE_R2PCIE]	= &bdx_uncore_r2pcie,
3421 	[BDX_PCI_UNCORE_R3QPI]	= &bdx_uncore_r3qpi,
3422 	NULL,
3423 };
3424 
3425 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3426 	{ /* Home Agent 0 */
3427 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3428 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3429 	},
3430 	{ /* Home Agent 1 */
3431 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3432 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3433 	},
3434 	{ /* MC0 Channel 0 */
3435 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3436 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3437 	},
3438 	{ /* MC0 Channel 1 */
3439 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3440 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3441 	},
3442 	{ /* MC0 Channel 2 */
3443 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3444 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3445 	},
3446 	{ /* MC0 Channel 3 */
3447 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3448 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3449 	},
3450 	{ /* MC1 Channel 0 */
3451 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3452 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3453 	},
3454 	{ /* MC1 Channel 1 */
3455 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3456 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3457 	},
3458 	{ /* MC1 Channel 2 */
3459 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3460 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3461 	},
3462 	{ /* MC1 Channel 3 */
3463 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3464 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3465 	},
3466 	{ /* IRP */
3467 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3468 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3469 	},
3470 	{ /* QPI0 Port 0 */
3471 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3472 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3473 	},
3474 	{ /* QPI0 Port 1 */
3475 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3476 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3477 	},
3478 	{ /* QPI1 Port 2 */
3479 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3480 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3481 	},
3482 	{ /* R2PCIe */
3483 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3484 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3485 	},
3486 	{ /* R3QPI0 Link 0 */
3487 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3488 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3489 	},
3490 	{ /* R3QPI0 Link 1 */
3491 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3492 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3493 	},
3494 	{ /* R3QPI1 Link 2 */
3495 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3496 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3497 	},
3498 	{ /* QPI Port 0 filter  */
3499 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3500 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3501 						   SNBEP_PCI_QPI_PORT0_FILTER),
3502 	},
3503 	{ /* QPI Port 1 filter  */
3504 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3505 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3506 						   SNBEP_PCI_QPI_PORT1_FILTER),
3507 	},
3508 	{ /* QPI Port 2 filter  */
3509 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3510 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3511 						   BDX_PCI_QPI_PORT2_FILTER),
3512 	},
3513 	{ /* end: all zeroes */ }
3514 };
3515 
3516 static struct pci_driver bdx_uncore_pci_driver = {
3517 	.name		= "bdx_uncore",
3518 	.id_table	= bdx_uncore_pci_ids,
3519 };
3520 
3521 int bdx_uncore_pci_init(void)
3522 {
3523 	int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3524 
3525 	if (ret)
3526 		return ret;
3527 	uncore_pci_uncores = bdx_pci_uncores;
3528 	uncore_pci_driver = &bdx_uncore_pci_driver;
3529 	return 0;
3530 }
3531 
3532 /* end of BDX uncore support */
3533 
3534 /* SKX uncore support */
3535 
3536 static struct intel_uncore_type skx_uncore_ubox = {
3537 	.name			= "ubox",
3538 	.num_counters		= 2,
3539 	.num_boxes		= 1,
3540 	.perf_ctr_bits		= 48,
3541 	.fixed_ctr_bits		= 48,
3542 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
3543 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
3544 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3545 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3546 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3547 	.ops			= &ivbep_uncore_msr_ops,
3548 	.format_group		= &ivbep_uncore_ubox_format_group,
3549 };
3550 
3551 static struct attribute *skx_uncore_cha_formats_attr[] = {
3552 	&format_attr_event.attr,
3553 	&format_attr_umask.attr,
3554 	&format_attr_edge.attr,
3555 	&format_attr_tid_en.attr,
3556 	&format_attr_inv.attr,
3557 	&format_attr_thresh8.attr,
3558 	&format_attr_filter_tid4.attr,
3559 	&format_attr_filter_state5.attr,
3560 	&format_attr_filter_rem.attr,
3561 	&format_attr_filter_loc.attr,
3562 	&format_attr_filter_nm.attr,
3563 	&format_attr_filter_all_op.attr,
3564 	&format_attr_filter_not_nm.attr,
3565 	&format_attr_filter_opc_0.attr,
3566 	&format_attr_filter_opc_1.attr,
3567 	&format_attr_filter_nc.attr,
3568 	&format_attr_filter_isoc.attr,
3569 	NULL,
3570 };
3571 
3572 static const struct attribute_group skx_uncore_chabox_format_group = {
3573 	.name = "format",
3574 	.attrs = skx_uncore_cha_formats_attr,
3575 };
3576 
3577 static struct event_constraint skx_uncore_chabox_constraints[] = {
3578 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3579 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3580 	EVENT_CONSTRAINT_END
3581 };
3582 
3583 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3584 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3585 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3586 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3587 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3588 	SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3589 	SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3590 	SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3591 	SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3592 	SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3593 	EVENT_EXTRA_END
3594 };
3595 
3596 static u64 skx_cha_filter_mask(int fields)
3597 {
3598 	u64 mask = 0;
3599 
3600 	if (fields & 0x1)
3601 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3602 	if (fields & 0x2)
3603 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3604 	if (fields & 0x4)
3605 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3606 	if (fields & 0x8) {
3607 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3608 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3609 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3610 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3611 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3612 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3613 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3614 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3615 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3616 	}
3617 	return mask;
3618 }
3619 
3620 static struct event_constraint *
3621 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3622 {
3623 	return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3624 }
3625 
3626 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3627 {
3628 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3629 	struct extra_reg *er;
3630 	int idx = 0;
3631 	/* Any of the CHA events may be filtered by Thread/Core-ID.*/
3632 	if (event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN)
3633 		idx = SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3634 
3635 	for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3636 		if (er->event != (event->hw.config & er->config_mask))
3637 			continue;
3638 		idx |= er->idx;
3639 	}
3640 
3641 	if (idx) {
3642 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3643 			    HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3644 		reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3645 		reg1->idx = idx;
3646 	}
3647 	return 0;
3648 }
3649 
3650 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3651 	/* There is no frz_en for chabox ctl */
3652 	.init_box		= ivbep_uncore_msr_init_box,
3653 	.disable_box		= snbep_uncore_msr_disable_box,
3654 	.enable_box		= snbep_uncore_msr_enable_box,
3655 	.disable_event		= snbep_uncore_msr_disable_event,
3656 	.enable_event		= hswep_cbox_enable_event,
3657 	.read_counter		= uncore_msr_read_counter,
3658 	.hw_config		= skx_cha_hw_config,
3659 	.get_constraint		= skx_cha_get_constraint,
3660 	.put_constraint		= snbep_cbox_put_constraint,
3661 };
3662 
3663 static struct intel_uncore_type skx_uncore_chabox = {
3664 	.name			= "cha",
3665 	.num_counters		= 4,
3666 	.perf_ctr_bits		= 48,
3667 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
3668 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
3669 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3670 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
3671 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
3672 	.num_shared_regs	= 1,
3673 	.constraints		= skx_uncore_chabox_constraints,
3674 	.ops			= &skx_uncore_chabox_ops,
3675 	.format_group		= &skx_uncore_chabox_format_group,
3676 };
3677 
3678 static struct attribute *skx_uncore_iio_formats_attr[] = {
3679 	&format_attr_event.attr,
3680 	&format_attr_umask.attr,
3681 	&format_attr_edge.attr,
3682 	&format_attr_inv.attr,
3683 	&format_attr_thresh9.attr,
3684 	&format_attr_ch_mask.attr,
3685 	&format_attr_fc_mask.attr,
3686 	NULL,
3687 };
3688 
3689 static const struct attribute_group skx_uncore_iio_format_group = {
3690 	.name = "format",
3691 	.attrs = skx_uncore_iio_formats_attr,
3692 };
3693 
3694 static struct event_constraint skx_uncore_iio_constraints[] = {
3695 	UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3696 	UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3697 	UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3698 	UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3699 	UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3700 	UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3701 	UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
3702 	EVENT_CONSTRAINT_END
3703 };
3704 
3705 static void skx_iio_enable_event(struct intel_uncore_box *box,
3706 				 struct perf_event *event)
3707 {
3708 	struct hw_perf_event *hwc = &event->hw;
3709 
3710 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3711 }
3712 
3713 static struct intel_uncore_ops skx_uncore_iio_ops = {
3714 	.init_box		= ivbep_uncore_msr_init_box,
3715 	.disable_box		= snbep_uncore_msr_disable_box,
3716 	.enable_box		= snbep_uncore_msr_enable_box,
3717 	.disable_event		= snbep_uncore_msr_disable_event,
3718 	.enable_event		= skx_iio_enable_event,
3719 	.read_counter		= uncore_msr_read_counter,
3720 };
3721 
3722 static struct intel_uncore_topology *pmu_topology(struct intel_uncore_pmu *pmu, int die)
3723 {
3724 	int idx;
3725 
3726 	for (idx = 0; idx < pmu->type->num_boxes; idx++) {
3727 		if (pmu->type->topology[die][idx].pmu_idx == pmu->pmu_idx)
3728 			return &pmu->type->topology[die][idx];
3729 	}
3730 
3731 	return NULL;
3732 }
3733 
3734 static umode_t
3735 pmu_iio_mapping_visible(struct kobject *kobj, struct attribute *attr,
3736 			 int die, int zero_bus_pmu)
3737 {
3738 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
3739 	struct intel_uncore_topology *pmut = pmu_topology(pmu, die);
3740 
3741 	return (pmut && !pmut->iio->pci_bus_no && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode;
3742 }
3743 
3744 static umode_t
3745 skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
3746 {
3747 	/* Root bus 0x00 is valid only for pmu_idx = 0. */
3748 	return pmu_iio_mapping_visible(kobj, attr, die, 0);
3749 }
3750 
3751 static ssize_t skx_iio_mapping_show(struct device *dev,
3752 				    struct device_attribute *attr, char *buf)
3753 {
3754 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
3755 	struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
3756 	long die = (long)ea->var;
3757 	struct intel_uncore_topology *pmut = pmu_topology(pmu, die);
3758 
3759 	return sprintf(buf, "%04x:%02x\n", pmut ? pmut->iio->segment : 0,
3760 					   pmut ? pmut->iio->pci_bus_no : 0);
3761 }
3762 
3763 static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
3764 {
3765 	u64 msr_value;
3766 
3767 	if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
3768 			!(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
3769 		return -ENXIO;
3770 
3771 	*topology = msr_value;
3772 
3773 	return 0;
3774 }
3775 
3776 static int die_to_cpu(int die)
3777 {
3778 	int res = 0, cpu, current_die;
3779 	/*
3780 	 * Using cpus_read_lock() to ensure cpu is not going down between
3781 	 * looking at cpu_online_mask.
3782 	 */
3783 	cpus_read_lock();
3784 	for_each_online_cpu(cpu) {
3785 		current_die = topology_logical_die_id(cpu);
3786 		if (current_die == die) {
3787 			res = cpu;
3788 			break;
3789 		}
3790 	}
3791 	cpus_read_unlock();
3792 	return res;
3793 }
3794 
3795 enum {
3796 	IIO_TOPOLOGY_TYPE,
3797 	UPI_TOPOLOGY_TYPE,
3798 	TOPOLOGY_MAX
3799 };
3800 
3801 static const size_t topology_size[TOPOLOGY_MAX] = {
3802 	sizeof(*((struct intel_uncore_topology *)NULL)->iio),
3803 	sizeof(*((struct intel_uncore_topology *)NULL)->upi)
3804 };
3805 
3806 static int pmu_alloc_topology(struct intel_uncore_type *type, int topology_type)
3807 {
3808 	int die, idx;
3809 	struct intel_uncore_topology **topology;
3810 
3811 	if (!type->num_boxes)
3812 		return -EPERM;
3813 
3814 	topology = kcalloc(uncore_max_dies(), sizeof(*topology), GFP_KERNEL);
3815 	if (!topology)
3816 		goto err;
3817 
3818 	for (die = 0; die < uncore_max_dies(); die++) {
3819 		topology[die] = kcalloc(type->num_boxes, sizeof(**topology), GFP_KERNEL);
3820 		if (!topology[die])
3821 			goto clear;
3822 		for (idx = 0; idx < type->num_boxes; idx++) {
3823 			topology[die][idx].untyped = kcalloc(type->num_boxes,
3824 							     topology_size[topology_type],
3825 							     GFP_KERNEL);
3826 			if (!topology[die][idx].untyped)
3827 				goto clear;
3828 		}
3829 	}
3830 
3831 	type->topology = topology;
3832 
3833 	return 0;
3834 clear:
3835 	for (; die >= 0; die--) {
3836 		for (idx = 0; idx < type->num_boxes; idx++)
3837 			kfree(topology[die][idx].untyped);
3838 		kfree(topology[die]);
3839 	}
3840 	kfree(topology);
3841 err:
3842 	return -ENOMEM;
3843 }
3844 
3845 static void pmu_free_topology(struct intel_uncore_type *type)
3846 {
3847 	int die, idx;
3848 
3849 	if (type->topology) {
3850 		for (die = 0; die < uncore_max_dies(); die++) {
3851 			for (idx = 0; idx < type->num_boxes; idx++)
3852 				kfree(type->topology[die][idx].untyped);
3853 			kfree(type->topology[die]);
3854 		}
3855 		kfree(type->topology);
3856 		type->topology = NULL;
3857 	}
3858 }
3859 
3860 static int skx_pmu_get_topology(struct intel_uncore_type *type,
3861 				 int (*topology_cb)(struct intel_uncore_type*, int, int, u64))
3862 {
3863 	int die, ret = -EPERM;
3864 	u64 cpu_bus_msr;
3865 
3866 	for (die = 0; die < uncore_max_dies(); die++) {
3867 		ret = skx_msr_cpu_bus_read(die_to_cpu(die), &cpu_bus_msr);
3868 		if (ret)
3869 			break;
3870 
3871 		ret = uncore_die_to_segment(die);
3872 		if (ret < 0)
3873 			break;
3874 
3875 		ret = topology_cb(type, ret, die, cpu_bus_msr);
3876 		if (ret)
3877 			break;
3878 	}
3879 
3880 	return ret;
3881 }
3882 
3883 static int skx_iio_topology_cb(struct intel_uncore_type *type, int segment,
3884 				int die, u64 cpu_bus_msr)
3885 {
3886 	int idx;
3887 	struct intel_uncore_topology *t;
3888 
3889 	for (idx = 0; idx < type->num_boxes; idx++) {
3890 		t = &type->topology[die][idx];
3891 		t->pmu_idx = idx;
3892 		t->iio->segment = segment;
3893 		t->iio->pci_bus_no = (cpu_bus_msr >> (idx * BUS_NUM_STRIDE)) & 0xff;
3894 	}
3895 
3896 	return 0;
3897 }
3898 
3899 static int skx_iio_get_topology(struct intel_uncore_type *type)
3900 {
3901 	return skx_pmu_get_topology(type, skx_iio_topology_cb);
3902 }
3903 
3904 static struct attribute_group skx_iio_mapping_group = {
3905 	.is_visible	= skx_iio_mapping_visible,
3906 };
3907 
3908 static const struct attribute_group *skx_iio_attr_update[] = {
3909 	&skx_iio_mapping_group,
3910 	NULL,
3911 };
3912 
3913 static void pmu_clear_mapping_attr(const struct attribute_group **groups,
3914 				   struct attribute_group *ag)
3915 {
3916 	int i;
3917 
3918 	for (i = 0; groups[i]; i++) {
3919 		if (groups[i] == ag) {
3920 			for (i++; groups[i]; i++)
3921 				groups[i - 1] = groups[i];
3922 			groups[i - 1] = NULL;
3923 			break;
3924 		}
3925 	}
3926 }
3927 
3928 static void
3929 pmu_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag,
3930 		ssize_t (*show)(struct device*, struct device_attribute*, char*),
3931 		int topology_type)
3932 {
3933 	char buf[64];
3934 	int ret;
3935 	long die = -1;
3936 	struct attribute **attrs = NULL;
3937 	struct dev_ext_attribute *eas = NULL;
3938 
3939 	ret = pmu_alloc_topology(type, topology_type);
3940 	if (ret < 0)
3941 		goto clear_attr_update;
3942 
3943 	ret = type->get_topology(type);
3944 	if (ret < 0)
3945 		goto clear_topology;
3946 
3947 	/* One more for NULL. */
3948 	attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
3949 	if (!attrs)
3950 		goto clear_topology;
3951 
3952 	eas = kcalloc(uncore_max_dies(), sizeof(*eas), GFP_KERNEL);
3953 	if (!eas)
3954 		goto clear_attrs;
3955 
3956 	for (die = 0; die < uncore_max_dies(); die++) {
3957 		snprintf(buf, sizeof(buf), "die%ld", die);
3958 		sysfs_attr_init(&eas[die].attr.attr);
3959 		eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
3960 		if (!eas[die].attr.attr.name)
3961 			goto err;
3962 		eas[die].attr.attr.mode = 0444;
3963 		eas[die].attr.show = show;
3964 		eas[die].attr.store = NULL;
3965 		eas[die].var = (void *)die;
3966 		attrs[die] = &eas[die].attr.attr;
3967 	}
3968 	ag->attrs = attrs;
3969 
3970 	return;
3971 err:
3972 	for (; die >= 0; die--)
3973 		kfree(eas[die].attr.attr.name);
3974 	kfree(eas);
3975 clear_attrs:
3976 	kfree(attrs);
3977 clear_topology:
3978 	pmu_free_topology(type);
3979 clear_attr_update:
3980 	pmu_clear_mapping_attr(type->attr_update, ag);
3981 }
3982 
3983 static void
3984 pmu_cleanup_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
3985 {
3986 	struct attribute **attr = ag->attrs;
3987 
3988 	if (!attr)
3989 		return;
3990 
3991 	for (; *attr; attr++)
3992 		kfree((*attr)->name);
3993 	kfree(attr_to_ext_attr(*ag->attrs));
3994 	kfree(ag->attrs);
3995 	ag->attrs = NULL;
3996 	pmu_free_topology(type);
3997 }
3998 
3999 static void
4000 pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
4001 {
4002 	pmu_set_mapping(type, ag, skx_iio_mapping_show, IIO_TOPOLOGY_TYPE);
4003 }
4004 
4005 static void skx_iio_set_mapping(struct intel_uncore_type *type)
4006 {
4007 	pmu_iio_set_mapping(type, &skx_iio_mapping_group);
4008 }
4009 
4010 static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
4011 {
4012 	pmu_cleanup_mapping(type, &skx_iio_mapping_group);
4013 }
4014 
4015 static struct intel_uncore_type skx_uncore_iio = {
4016 	.name			= "iio",
4017 	.num_counters		= 4,
4018 	.num_boxes		= 6,
4019 	.perf_ctr_bits		= 48,
4020 	.event_ctl		= SKX_IIO0_MSR_PMON_CTL0,
4021 	.perf_ctr		= SKX_IIO0_MSR_PMON_CTR0,
4022 	.event_mask		= SKX_IIO_PMON_RAW_EVENT_MASK,
4023 	.event_mask_ext		= SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
4024 	.box_ctl		= SKX_IIO0_MSR_PMON_BOX_CTL,
4025 	.msr_offset		= SKX_IIO_MSR_OFFSET,
4026 	.constraints		= skx_uncore_iio_constraints,
4027 	.ops			= &skx_uncore_iio_ops,
4028 	.format_group		= &skx_uncore_iio_format_group,
4029 	.attr_update		= skx_iio_attr_update,
4030 	.get_topology		= skx_iio_get_topology,
4031 	.set_mapping		= skx_iio_set_mapping,
4032 	.cleanup_mapping	= skx_iio_cleanup_mapping,
4033 };
4034 
4035 enum perf_uncore_iio_freerunning_type_id {
4036 	SKX_IIO_MSR_IOCLK			= 0,
4037 	SKX_IIO_MSR_BW				= 1,
4038 	SKX_IIO_MSR_UTIL			= 2,
4039 
4040 	SKX_IIO_FREERUNNING_TYPE_MAX,
4041 };
4042 
4043 
4044 static struct freerunning_counters skx_iio_freerunning[] = {
4045 	[SKX_IIO_MSR_IOCLK]	= { 0xa45, 0x1, 0x20, 1, 36 },
4046 	[SKX_IIO_MSR_BW]	= { 0xb00, 0x1, 0x10, 8, 36 },
4047 	[SKX_IIO_MSR_UTIL]	= { 0xb08, 0x1, 0x10, 8, 36 },
4048 };
4049 
4050 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
4051 	/* Free-Running IO CLOCKS Counter */
4052 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
4053 	/* Free-Running IIO BANDWIDTH Counters */
4054 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
4055 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
4056 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
4057 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
4058 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
4059 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
4060 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
4061 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
4062 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
4063 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
4064 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
4065 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
4066 	INTEL_UNCORE_EVENT_DESC(bw_out_port0,		"event=0xff,umask=0x24"),
4067 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,	"3.814697266e-6"),
4068 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,	"MiB"),
4069 	INTEL_UNCORE_EVENT_DESC(bw_out_port1,		"event=0xff,umask=0x25"),
4070 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,	"3.814697266e-6"),
4071 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,	"MiB"),
4072 	INTEL_UNCORE_EVENT_DESC(bw_out_port2,		"event=0xff,umask=0x26"),
4073 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,	"3.814697266e-6"),
4074 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,	"MiB"),
4075 	INTEL_UNCORE_EVENT_DESC(bw_out_port3,		"event=0xff,umask=0x27"),
4076 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,	"3.814697266e-6"),
4077 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,	"MiB"),
4078 	/* Free-running IIO UTILIZATION Counters */
4079 	INTEL_UNCORE_EVENT_DESC(util_in_port0,		"event=0xff,umask=0x30"),
4080 	INTEL_UNCORE_EVENT_DESC(util_out_port0,		"event=0xff,umask=0x31"),
4081 	INTEL_UNCORE_EVENT_DESC(util_in_port1,		"event=0xff,umask=0x32"),
4082 	INTEL_UNCORE_EVENT_DESC(util_out_port1,		"event=0xff,umask=0x33"),
4083 	INTEL_UNCORE_EVENT_DESC(util_in_port2,		"event=0xff,umask=0x34"),
4084 	INTEL_UNCORE_EVENT_DESC(util_out_port2,		"event=0xff,umask=0x35"),
4085 	INTEL_UNCORE_EVENT_DESC(util_in_port3,		"event=0xff,umask=0x36"),
4086 	INTEL_UNCORE_EVENT_DESC(util_out_port3,		"event=0xff,umask=0x37"),
4087 	{ /* end: all zeroes */ },
4088 };
4089 
4090 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
4091 	.read_counter		= uncore_msr_read_counter,
4092 	.hw_config		= uncore_freerunning_hw_config,
4093 };
4094 
4095 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
4096 	&format_attr_event.attr,
4097 	&format_attr_umask.attr,
4098 	NULL,
4099 };
4100 
4101 static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
4102 	.name = "format",
4103 	.attrs = skx_uncore_iio_freerunning_formats_attr,
4104 };
4105 
4106 static struct intel_uncore_type skx_uncore_iio_free_running = {
4107 	.name			= "iio_free_running",
4108 	.num_counters		= 17,
4109 	.num_boxes		= 6,
4110 	.num_freerunning_types	= SKX_IIO_FREERUNNING_TYPE_MAX,
4111 	.freerunning		= skx_iio_freerunning,
4112 	.ops			= &skx_uncore_iio_freerunning_ops,
4113 	.event_descs		= skx_uncore_iio_freerunning_events,
4114 	.format_group		= &skx_uncore_iio_freerunning_format_group,
4115 };
4116 
4117 static struct attribute *skx_uncore_formats_attr[] = {
4118 	&format_attr_event.attr,
4119 	&format_attr_umask.attr,
4120 	&format_attr_edge.attr,
4121 	&format_attr_inv.attr,
4122 	&format_attr_thresh8.attr,
4123 	NULL,
4124 };
4125 
4126 static const struct attribute_group skx_uncore_format_group = {
4127 	.name = "format",
4128 	.attrs = skx_uncore_formats_attr,
4129 };
4130 
4131 static struct intel_uncore_type skx_uncore_irp = {
4132 	.name			= "irp",
4133 	.num_counters		= 2,
4134 	.num_boxes		= 6,
4135 	.perf_ctr_bits		= 48,
4136 	.event_ctl		= SKX_IRP0_MSR_PMON_CTL0,
4137 	.perf_ctr		= SKX_IRP0_MSR_PMON_CTR0,
4138 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4139 	.box_ctl		= SKX_IRP0_MSR_PMON_BOX_CTL,
4140 	.msr_offset		= SKX_IRP_MSR_OFFSET,
4141 	.ops			= &skx_uncore_iio_ops,
4142 	.format_group		= &skx_uncore_format_group,
4143 };
4144 
4145 static struct attribute *skx_uncore_pcu_formats_attr[] = {
4146 	&format_attr_event.attr,
4147 	&format_attr_umask.attr,
4148 	&format_attr_edge.attr,
4149 	&format_attr_inv.attr,
4150 	&format_attr_thresh8.attr,
4151 	&format_attr_occ_invert.attr,
4152 	&format_attr_occ_edge_det.attr,
4153 	&format_attr_filter_band0.attr,
4154 	&format_attr_filter_band1.attr,
4155 	&format_attr_filter_band2.attr,
4156 	&format_attr_filter_band3.attr,
4157 	NULL,
4158 };
4159 
4160 static struct attribute_group skx_uncore_pcu_format_group = {
4161 	.name = "format",
4162 	.attrs = skx_uncore_pcu_formats_attr,
4163 };
4164 
4165 static struct intel_uncore_ops skx_uncore_pcu_ops = {
4166 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4167 	.hw_config		= hswep_pcu_hw_config,
4168 	.get_constraint		= snbep_pcu_get_constraint,
4169 	.put_constraint		= snbep_pcu_put_constraint,
4170 };
4171 
4172 static struct intel_uncore_type skx_uncore_pcu = {
4173 	.name			= "pcu",
4174 	.num_counters		= 4,
4175 	.num_boxes		= 1,
4176 	.perf_ctr_bits		= 48,
4177 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
4178 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
4179 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
4180 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
4181 	.num_shared_regs	= 1,
4182 	.ops			= &skx_uncore_pcu_ops,
4183 	.format_group		= &skx_uncore_pcu_format_group,
4184 };
4185 
4186 static struct intel_uncore_type *skx_msr_uncores[] = {
4187 	&skx_uncore_ubox,
4188 	&skx_uncore_chabox,
4189 	&skx_uncore_iio,
4190 	&skx_uncore_iio_free_running,
4191 	&skx_uncore_irp,
4192 	&skx_uncore_pcu,
4193 	NULL,
4194 };
4195 
4196 /*
4197  * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
4198  * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
4199  */
4200 #define SKX_CAPID6		0x9c
4201 #define SKX_CHA_BIT_MASK	GENMASK(27, 0)
4202 
4203 static int skx_count_chabox(void)
4204 {
4205 	struct pci_dev *dev = NULL;
4206 	u32 val = 0;
4207 
4208 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
4209 	if (!dev)
4210 		goto out;
4211 
4212 	pci_read_config_dword(dev, SKX_CAPID6, &val);
4213 	val &= SKX_CHA_BIT_MASK;
4214 out:
4215 	pci_dev_put(dev);
4216 	return hweight32(val);
4217 }
4218 
4219 void skx_uncore_cpu_init(void)
4220 {
4221 	skx_uncore_chabox.num_boxes = skx_count_chabox();
4222 	uncore_msr_uncores = skx_msr_uncores;
4223 }
4224 
4225 static struct intel_uncore_type skx_uncore_imc = {
4226 	.name		= "imc",
4227 	.num_counters   = 4,
4228 	.num_boxes	= 6,
4229 	.perf_ctr_bits	= 48,
4230 	.fixed_ctr_bits	= 48,
4231 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
4232 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
4233 	.event_descs	= hswep_uncore_imc_events,
4234 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
4235 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
4236 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4237 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
4238 	.ops		= &ivbep_uncore_pci_ops,
4239 	.format_group	= &skx_uncore_format_group,
4240 };
4241 
4242 static struct attribute *skx_upi_uncore_formats_attr[] = {
4243 	&format_attr_event.attr,
4244 	&format_attr_umask_ext.attr,
4245 	&format_attr_edge.attr,
4246 	&format_attr_inv.attr,
4247 	&format_attr_thresh8.attr,
4248 	NULL,
4249 };
4250 
4251 static const struct attribute_group skx_upi_uncore_format_group = {
4252 	.name = "format",
4253 	.attrs = skx_upi_uncore_formats_attr,
4254 };
4255 
4256 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
4257 {
4258 	struct pci_dev *pdev = box->pci_dev;
4259 
4260 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4261 	pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4262 }
4263 
4264 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
4265 	.init_box	= skx_upi_uncore_pci_init_box,
4266 	.disable_box	= snbep_uncore_pci_disable_box,
4267 	.enable_box	= snbep_uncore_pci_enable_box,
4268 	.disable_event	= snbep_uncore_pci_disable_event,
4269 	.enable_event	= snbep_uncore_pci_enable_event,
4270 	.read_counter	= snbep_uncore_pci_read_counter,
4271 };
4272 
4273 static umode_t
4274 skx_upi_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
4275 {
4276 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
4277 
4278 	return pmu->type->topology[die][pmu->pmu_idx].upi->enabled ? attr->mode : 0;
4279 }
4280 
4281 static ssize_t skx_upi_mapping_show(struct device *dev,
4282 				    struct device_attribute *attr, char *buf)
4283 {
4284 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
4285 	struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
4286 	long die = (long)ea->var;
4287 	struct uncore_upi_topology *upi = pmu->type->topology[die][pmu->pmu_idx].upi;
4288 
4289 	return sysfs_emit(buf, "upi_%d,die_%d\n", upi->pmu_idx_to, upi->die_to);
4290 }
4291 
4292 #define SKX_UPI_REG_DID			0x2058
4293 #define SKX_UPI_REGS_ADDR_DEVICE_LINK0	0x0e
4294 #define SKX_UPI_REGS_ADDR_FUNCTION	0x00
4295 
4296 /*
4297  * UPI Link Parameter 0
4298  * |  Bit  |  Default  |  Description
4299  * | 19:16 |     0h    | base_nodeid - The NodeID of the sending socket.
4300  * | 12:8  |    00h    | sending_port - The processor die port number of the sending port.
4301  */
4302 #define SKX_KTILP0_OFFSET	0x94
4303 
4304 /*
4305  * UPI Pcode Status. This register is used by PCode to store the link training status.
4306  * |  Bit  |  Default  |  Description
4307  * |   4   |     0h    | ll_status_valid — Bit indicates the valid training status
4308  *                       logged from PCode to the BIOS.
4309  */
4310 #define SKX_KTIPCSTS_OFFSET	0x120
4311 
4312 static int upi_fill_topology(struct pci_dev *dev, struct intel_uncore_topology *tp,
4313 			     int pmu_idx)
4314 {
4315 	int ret;
4316 	u32 upi_conf;
4317 	struct uncore_upi_topology *upi = tp->upi;
4318 
4319 	tp->pmu_idx = pmu_idx;
4320 	ret = pci_read_config_dword(dev, SKX_KTIPCSTS_OFFSET, &upi_conf);
4321 	if (ret) {
4322 		ret = pcibios_err_to_errno(ret);
4323 		goto err;
4324 	}
4325 	upi->enabled = (upi_conf >> 4) & 1;
4326 	if (upi->enabled) {
4327 		ret = pci_read_config_dword(dev, SKX_KTILP0_OFFSET,
4328 					    &upi_conf);
4329 		if (ret) {
4330 			ret = pcibios_err_to_errno(ret);
4331 			goto err;
4332 		}
4333 		upi->die_to = (upi_conf >> 16) & 0xf;
4334 		upi->pmu_idx_to = (upi_conf >> 8) & 0x1f;
4335 	}
4336 err:
4337 	return ret;
4338 }
4339 
4340 static int skx_upi_topology_cb(struct intel_uncore_type *type, int segment,
4341 				int die, u64 cpu_bus_msr)
4342 {
4343 	int idx, ret;
4344 	struct intel_uncore_topology *upi;
4345 	unsigned int devfn;
4346 	struct pci_dev *dev = NULL;
4347 	u8 bus = cpu_bus_msr >> (3 * BUS_NUM_STRIDE);
4348 
4349 	for (idx = 0; idx < type->num_boxes; idx++) {
4350 		upi = &type->topology[die][idx];
4351 		devfn = PCI_DEVFN(SKX_UPI_REGS_ADDR_DEVICE_LINK0 + idx,
4352 				  SKX_UPI_REGS_ADDR_FUNCTION);
4353 		dev = pci_get_domain_bus_and_slot(segment, bus, devfn);
4354 		if (dev) {
4355 			ret = upi_fill_topology(dev, upi, idx);
4356 			if (ret)
4357 				break;
4358 		}
4359 	}
4360 
4361 	pci_dev_put(dev);
4362 	return ret;
4363 }
4364 
4365 static int skx_upi_get_topology(struct intel_uncore_type *type)
4366 {
4367 	/* CPX case is not supported */
4368 	if (boot_cpu_data.x86_stepping == 11)
4369 		return -EPERM;
4370 
4371 	return skx_pmu_get_topology(type, skx_upi_topology_cb);
4372 }
4373 
4374 static struct attribute_group skx_upi_mapping_group = {
4375 	.is_visible	= skx_upi_mapping_visible,
4376 };
4377 
4378 static const struct attribute_group *skx_upi_attr_update[] = {
4379 	&skx_upi_mapping_group,
4380 	NULL
4381 };
4382 
4383 static void
4384 pmu_upi_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
4385 {
4386 	pmu_set_mapping(type, ag, skx_upi_mapping_show, UPI_TOPOLOGY_TYPE);
4387 }
4388 
4389 static void skx_upi_set_mapping(struct intel_uncore_type *type)
4390 {
4391 	pmu_upi_set_mapping(type, &skx_upi_mapping_group);
4392 }
4393 
4394 static void skx_upi_cleanup_mapping(struct intel_uncore_type *type)
4395 {
4396 	pmu_cleanup_mapping(type, &skx_upi_mapping_group);
4397 }
4398 
4399 static struct intel_uncore_type skx_uncore_upi = {
4400 	.name		= "upi",
4401 	.num_counters   = 4,
4402 	.num_boxes	= 3,
4403 	.perf_ctr_bits	= 48,
4404 	.perf_ctr	= SKX_UPI_PCI_PMON_CTR0,
4405 	.event_ctl	= SKX_UPI_PCI_PMON_CTL0,
4406 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4407 	.event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
4408 	.box_ctl	= SKX_UPI_PCI_PMON_BOX_CTL,
4409 	.ops		= &skx_upi_uncore_pci_ops,
4410 	.format_group	= &skx_upi_uncore_format_group,
4411 	.attr_update	= skx_upi_attr_update,
4412 	.get_topology	= skx_upi_get_topology,
4413 	.set_mapping	= skx_upi_set_mapping,
4414 	.cleanup_mapping = skx_upi_cleanup_mapping,
4415 };
4416 
4417 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4418 {
4419 	struct pci_dev *pdev = box->pci_dev;
4420 
4421 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4422 	pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4423 }
4424 
4425 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
4426 	.init_box	= skx_m2m_uncore_pci_init_box,
4427 	.disable_box	= snbep_uncore_pci_disable_box,
4428 	.enable_box	= snbep_uncore_pci_enable_box,
4429 	.disable_event	= snbep_uncore_pci_disable_event,
4430 	.enable_event	= snbep_uncore_pci_enable_event,
4431 	.read_counter	= snbep_uncore_pci_read_counter,
4432 };
4433 
4434 static struct intel_uncore_type skx_uncore_m2m = {
4435 	.name		= "m2m",
4436 	.num_counters   = 4,
4437 	.num_boxes	= 2,
4438 	.perf_ctr_bits	= 48,
4439 	.perf_ctr	= SKX_M2M_PCI_PMON_CTR0,
4440 	.event_ctl	= SKX_M2M_PCI_PMON_CTL0,
4441 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4442 	.box_ctl	= SKX_M2M_PCI_PMON_BOX_CTL,
4443 	.ops		= &skx_m2m_uncore_pci_ops,
4444 	.format_group	= &skx_uncore_format_group,
4445 };
4446 
4447 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
4448 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4449 	EVENT_CONSTRAINT_END
4450 };
4451 
4452 static struct intel_uncore_type skx_uncore_m2pcie = {
4453 	.name		= "m2pcie",
4454 	.num_counters   = 4,
4455 	.num_boxes	= 4,
4456 	.perf_ctr_bits	= 48,
4457 	.constraints	= skx_uncore_m2pcie_constraints,
4458 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
4459 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
4460 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4461 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
4462 	.ops		= &ivbep_uncore_pci_ops,
4463 	.format_group	= &skx_uncore_format_group,
4464 };
4465 
4466 static struct event_constraint skx_uncore_m3upi_constraints[] = {
4467 	UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
4468 	UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
4469 	UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
4470 	UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
4471 	UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
4472 	UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
4473 	UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
4474 	UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
4475 	EVENT_CONSTRAINT_END
4476 };
4477 
4478 static struct intel_uncore_type skx_uncore_m3upi = {
4479 	.name		= "m3upi",
4480 	.num_counters   = 3,
4481 	.num_boxes	= 3,
4482 	.perf_ctr_bits	= 48,
4483 	.constraints	= skx_uncore_m3upi_constraints,
4484 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
4485 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
4486 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4487 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
4488 	.ops		= &ivbep_uncore_pci_ops,
4489 	.format_group	= &skx_uncore_format_group,
4490 };
4491 
4492 enum {
4493 	SKX_PCI_UNCORE_IMC,
4494 	SKX_PCI_UNCORE_M2M,
4495 	SKX_PCI_UNCORE_UPI,
4496 	SKX_PCI_UNCORE_M2PCIE,
4497 	SKX_PCI_UNCORE_M3UPI,
4498 };
4499 
4500 static struct intel_uncore_type *skx_pci_uncores[] = {
4501 	[SKX_PCI_UNCORE_IMC]	= &skx_uncore_imc,
4502 	[SKX_PCI_UNCORE_M2M]	= &skx_uncore_m2m,
4503 	[SKX_PCI_UNCORE_UPI]	= &skx_uncore_upi,
4504 	[SKX_PCI_UNCORE_M2PCIE]	= &skx_uncore_m2pcie,
4505 	[SKX_PCI_UNCORE_M3UPI]	= &skx_uncore_m3upi,
4506 	NULL,
4507 };
4508 
4509 static const struct pci_device_id skx_uncore_pci_ids[] = {
4510 	{ /* MC0 Channel 0 */
4511 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4512 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
4513 	},
4514 	{ /* MC0 Channel 1 */
4515 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4516 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
4517 	},
4518 	{ /* MC0 Channel 2 */
4519 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4520 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
4521 	},
4522 	{ /* MC1 Channel 0 */
4523 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4524 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
4525 	},
4526 	{ /* MC1 Channel 1 */
4527 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4528 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
4529 	},
4530 	{ /* MC1 Channel 2 */
4531 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4532 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
4533 	},
4534 	{ /* M2M0 */
4535 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4536 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
4537 	},
4538 	{ /* M2M1 */
4539 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4540 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
4541 	},
4542 	{ /* UPI0 Link 0 */
4543 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4544 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
4545 	},
4546 	{ /* UPI0 Link 1 */
4547 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4548 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
4549 	},
4550 	{ /* UPI1 Link 2 */
4551 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4552 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
4553 	},
4554 	{ /* M2PCIe 0 */
4555 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4556 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
4557 	},
4558 	{ /* M2PCIe 1 */
4559 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4560 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
4561 	},
4562 	{ /* M2PCIe 2 */
4563 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4564 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
4565 	},
4566 	{ /* M2PCIe 3 */
4567 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4568 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
4569 	},
4570 	{ /* M3UPI0 Link 0 */
4571 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4572 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
4573 	},
4574 	{ /* M3UPI0 Link 1 */
4575 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4576 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4577 	},
4578 	{ /* M3UPI1 Link 2 */
4579 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4580 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4581 	},
4582 	{ /* end: all zeroes */ }
4583 };
4584 
4585 
4586 static struct pci_driver skx_uncore_pci_driver = {
4587 	.name		= "skx_uncore",
4588 	.id_table	= skx_uncore_pci_ids,
4589 };
4590 
4591 int skx_uncore_pci_init(void)
4592 {
4593 	/* need to double check pci address */
4594 	int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4595 
4596 	if (ret)
4597 		return ret;
4598 
4599 	uncore_pci_uncores = skx_pci_uncores;
4600 	uncore_pci_driver = &skx_uncore_pci_driver;
4601 	return 0;
4602 }
4603 
4604 /* end of SKX uncore support */
4605 
4606 /* SNR uncore support */
4607 
4608 static struct intel_uncore_type snr_uncore_ubox = {
4609 	.name			= "ubox",
4610 	.num_counters		= 2,
4611 	.num_boxes		= 1,
4612 	.perf_ctr_bits		= 48,
4613 	.fixed_ctr_bits		= 48,
4614 	.perf_ctr		= SNR_U_MSR_PMON_CTR0,
4615 	.event_ctl		= SNR_U_MSR_PMON_CTL0,
4616 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4617 	.fixed_ctr		= SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4618 	.fixed_ctl		= SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4619 	.ops			= &ivbep_uncore_msr_ops,
4620 	.format_group		= &ivbep_uncore_format_group,
4621 };
4622 
4623 static struct attribute *snr_uncore_cha_formats_attr[] = {
4624 	&format_attr_event.attr,
4625 	&format_attr_umask_ext2.attr,
4626 	&format_attr_edge.attr,
4627 	&format_attr_tid_en.attr,
4628 	&format_attr_inv.attr,
4629 	&format_attr_thresh8.attr,
4630 	&format_attr_filter_tid5.attr,
4631 	NULL,
4632 };
4633 static const struct attribute_group snr_uncore_chabox_format_group = {
4634 	.name = "format",
4635 	.attrs = snr_uncore_cha_formats_attr,
4636 };
4637 
4638 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4639 {
4640 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4641 
4642 	reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4643 		    box->pmu->type->msr_offset * box->pmu->pmu_idx;
4644 	reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4645 	reg1->idx = 0;
4646 
4647 	return 0;
4648 }
4649 
4650 static void snr_cha_enable_event(struct intel_uncore_box *box,
4651 				   struct perf_event *event)
4652 {
4653 	struct hw_perf_event *hwc = &event->hw;
4654 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4655 
4656 	if (reg1->idx != EXTRA_REG_NONE)
4657 		wrmsrl(reg1->reg, reg1->config);
4658 
4659 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4660 }
4661 
4662 static struct intel_uncore_ops snr_uncore_chabox_ops = {
4663 	.init_box		= ivbep_uncore_msr_init_box,
4664 	.disable_box		= snbep_uncore_msr_disable_box,
4665 	.enable_box		= snbep_uncore_msr_enable_box,
4666 	.disable_event		= snbep_uncore_msr_disable_event,
4667 	.enable_event		= snr_cha_enable_event,
4668 	.read_counter		= uncore_msr_read_counter,
4669 	.hw_config		= snr_cha_hw_config,
4670 };
4671 
4672 static struct intel_uncore_type snr_uncore_chabox = {
4673 	.name			= "cha",
4674 	.num_counters		= 4,
4675 	.num_boxes		= 6,
4676 	.perf_ctr_bits		= 48,
4677 	.event_ctl		= SNR_CHA_MSR_PMON_CTL0,
4678 	.perf_ctr		= SNR_CHA_MSR_PMON_CTR0,
4679 	.box_ctl		= SNR_CHA_MSR_PMON_BOX_CTL,
4680 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
4681 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4682 	.event_mask_ext		= SNR_CHA_RAW_EVENT_MASK_EXT,
4683 	.ops			= &snr_uncore_chabox_ops,
4684 	.format_group		= &snr_uncore_chabox_format_group,
4685 };
4686 
4687 static struct attribute *snr_uncore_iio_formats_attr[] = {
4688 	&format_attr_event.attr,
4689 	&format_attr_umask.attr,
4690 	&format_attr_edge.attr,
4691 	&format_attr_inv.attr,
4692 	&format_attr_thresh9.attr,
4693 	&format_attr_ch_mask2.attr,
4694 	&format_attr_fc_mask2.attr,
4695 	NULL,
4696 };
4697 
4698 static const struct attribute_group snr_uncore_iio_format_group = {
4699 	.name = "format",
4700 	.attrs = snr_uncore_iio_formats_attr,
4701 };
4702 
4703 static umode_t
4704 snr_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
4705 {
4706 	/* Root bus 0x00 is valid only for pmu_idx = 1. */
4707 	return pmu_iio_mapping_visible(kobj, attr, die, 1);
4708 }
4709 
4710 static struct attribute_group snr_iio_mapping_group = {
4711 	.is_visible	= snr_iio_mapping_visible,
4712 };
4713 
4714 static const struct attribute_group *snr_iio_attr_update[] = {
4715 	&snr_iio_mapping_group,
4716 	NULL,
4717 };
4718 
4719 static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_mapping)
4720 {
4721 	u32 sad_cfg;
4722 	int die, stack_id, ret = -EPERM;
4723 	struct pci_dev *dev = NULL;
4724 
4725 	while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, SNR_ICX_MESH2IIO_MMAP_DID, dev))) {
4726 		ret = pci_read_config_dword(dev, SNR_ICX_SAD_CONTROL_CFG, &sad_cfg);
4727 		if (ret) {
4728 			ret = pcibios_err_to_errno(ret);
4729 			break;
4730 		}
4731 
4732 		die = uncore_pcibus_to_dieid(dev->bus);
4733 		stack_id = SAD_CONTROL_STACK_ID(sad_cfg);
4734 		if (die < 0 || stack_id >= type->num_boxes) {
4735 			ret = -EPERM;
4736 			break;
4737 		}
4738 
4739 		/* Convert stack id from SAD_CONTROL to PMON notation. */
4740 		stack_id = sad_pmon_mapping[stack_id];
4741 
4742 		type->topology[die][stack_id].iio->segment = pci_domain_nr(dev->bus);
4743 		type->topology[die][stack_id].pmu_idx = stack_id;
4744 		type->topology[die][stack_id].iio->pci_bus_no = dev->bus->number;
4745 	}
4746 
4747 	pci_dev_put(dev);
4748 
4749 	return ret;
4750 }
4751 
4752 /*
4753  * SNR has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
4754  */
4755 enum {
4756 	SNR_QAT_PMON_ID,
4757 	SNR_CBDMA_DMI_PMON_ID,
4758 	SNR_NIS_PMON_ID,
4759 	SNR_DLB_PMON_ID,
4760 	SNR_PCIE_GEN3_PMON_ID
4761 };
4762 
4763 static u8 snr_sad_pmon_mapping[] = {
4764 	SNR_CBDMA_DMI_PMON_ID,
4765 	SNR_PCIE_GEN3_PMON_ID,
4766 	SNR_DLB_PMON_ID,
4767 	SNR_NIS_PMON_ID,
4768 	SNR_QAT_PMON_ID
4769 };
4770 
4771 static int snr_iio_get_topology(struct intel_uncore_type *type)
4772 {
4773 	return sad_cfg_iio_topology(type, snr_sad_pmon_mapping);
4774 }
4775 
4776 static void snr_iio_set_mapping(struct intel_uncore_type *type)
4777 {
4778 	pmu_iio_set_mapping(type, &snr_iio_mapping_group);
4779 }
4780 
4781 static void snr_iio_cleanup_mapping(struct intel_uncore_type *type)
4782 {
4783 	pmu_cleanup_mapping(type, &snr_iio_mapping_group);
4784 }
4785 
4786 static struct event_constraint snr_uncore_iio_constraints[] = {
4787 	UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
4788 	UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
4789 	UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
4790 	EVENT_CONSTRAINT_END
4791 };
4792 
4793 static struct intel_uncore_type snr_uncore_iio = {
4794 	.name			= "iio",
4795 	.num_counters		= 4,
4796 	.num_boxes		= 5,
4797 	.perf_ctr_bits		= 48,
4798 	.event_ctl		= SNR_IIO_MSR_PMON_CTL0,
4799 	.perf_ctr		= SNR_IIO_MSR_PMON_CTR0,
4800 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4801 	.event_mask_ext		= SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4802 	.box_ctl		= SNR_IIO_MSR_PMON_BOX_CTL,
4803 	.msr_offset		= SNR_IIO_MSR_OFFSET,
4804 	.constraints		= snr_uncore_iio_constraints,
4805 	.ops			= &ivbep_uncore_msr_ops,
4806 	.format_group		= &snr_uncore_iio_format_group,
4807 	.attr_update		= snr_iio_attr_update,
4808 	.get_topology		= snr_iio_get_topology,
4809 	.set_mapping		= snr_iio_set_mapping,
4810 	.cleanup_mapping	= snr_iio_cleanup_mapping,
4811 };
4812 
4813 static struct intel_uncore_type snr_uncore_irp = {
4814 	.name			= "irp",
4815 	.num_counters		= 2,
4816 	.num_boxes		= 5,
4817 	.perf_ctr_bits		= 48,
4818 	.event_ctl		= SNR_IRP0_MSR_PMON_CTL0,
4819 	.perf_ctr		= SNR_IRP0_MSR_PMON_CTR0,
4820 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4821 	.box_ctl		= SNR_IRP0_MSR_PMON_BOX_CTL,
4822 	.msr_offset		= SNR_IRP_MSR_OFFSET,
4823 	.ops			= &ivbep_uncore_msr_ops,
4824 	.format_group		= &ivbep_uncore_format_group,
4825 };
4826 
4827 static struct intel_uncore_type snr_uncore_m2pcie = {
4828 	.name		= "m2pcie",
4829 	.num_counters	= 4,
4830 	.num_boxes	= 5,
4831 	.perf_ctr_bits	= 48,
4832 	.event_ctl	= SNR_M2PCIE_MSR_PMON_CTL0,
4833 	.perf_ctr	= SNR_M2PCIE_MSR_PMON_CTR0,
4834 	.box_ctl	= SNR_M2PCIE_MSR_PMON_BOX_CTL,
4835 	.msr_offset	= SNR_M2PCIE_MSR_OFFSET,
4836 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4837 	.ops		= &ivbep_uncore_msr_ops,
4838 	.format_group	= &ivbep_uncore_format_group,
4839 };
4840 
4841 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4842 {
4843 	struct hw_perf_event *hwc = &event->hw;
4844 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4845 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4846 
4847 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
4848 		reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4849 		reg1->idx = ev_sel - 0xb;
4850 		reg1->config = event->attr.config1 & (0xff << reg1->idx);
4851 	}
4852 	return 0;
4853 }
4854 
4855 static struct intel_uncore_ops snr_uncore_pcu_ops = {
4856 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4857 	.hw_config		= snr_pcu_hw_config,
4858 	.get_constraint		= snbep_pcu_get_constraint,
4859 	.put_constraint		= snbep_pcu_put_constraint,
4860 };
4861 
4862 static struct intel_uncore_type snr_uncore_pcu = {
4863 	.name			= "pcu",
4864 	.num_counters		= 4,
4865 	.num_boxes		= 1,
4866 	.perf_ctr_bits		= 48,
4867 	.perf_ctr		= SNR_PCU_MSR_PMON_CTR0,
4868 	.event_ctl		= SNR_PCU_MSR_PMON_CTL0,
4869 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4870 	.box_ctl		= SNR_PCU_MSR_PMON_BOX_CTL,
4871 	.num_shared_regs	= 1,
4872 	.ops			= &snr_uncore_pcu_ops,
4873 	.format_group		= &skx_uncore_pcu_format_group,
4874 };
4875 
4876 enum perf_uncore_snr_iio_freerunning_type_id {
4877 	SNR_IIO_MSR_IOCLK,
4878 	SNR_IIO_MSR_BW_IN,
4879 
4880 	SNR_IIO_FREERUNNING_TYPE_MAX,
4881 };
4882 
4883 static struct freerunning_counters snr_iio_freerunning[] = {
4884 	[SNR_IIO_MSR_IOCLK]	= { 0x1eac, 0x1, 0x10, 1, 48 },
4885 	[SNR_IIO_MSR_BW_IN]	= { 0x1f00, 0x1, 0x10, 8, 48 },
4886 };
4887 
4888 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4889 	/* Free-Running IIO CLOCKS Counter */
4890 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
4891 	/* Free-Running IIO BANDWIDTH IN Counters */
4892 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
4893 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
4894 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
4895 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
4896 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
4897 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
4898 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
4899 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
4900 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
4901 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
4902 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
4903 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
4904 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
4905 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
4906 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
4907 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
4908 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
4909 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
4910 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
4911 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
4912 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
4913 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
4914 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
4915 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
4916 	{ /* end: all zeroes */ },
4917 };
4918 
4919 static struct intel_uncore_type snr_uncore_iio_free_running = {
4920 	.name			= "iio_free_running",
4921 	.num_counters		= 9,
4922 	.num_boxes		= 5,
4923 	.num_freerunning_types	= SNR_IIO_FREERUNNING_TYPE_MAX,
4924 	.freerunning		= snr_iio_freerunning,
4925 	.ops			= &skx_uncore_iio_freerunning_ops,
4926 	.event_descs		= snr_uncore_iio_freerunning_events,
4927 	.format_group		= &skx_uncore_iio_freerunning_format_group,
4928 };
4929 
4930 static struct intel_uncore_type *snr_msr_uncores[] = {
4931 	&snr_uncore_ubox,
4932 	&snr_uncore_chabox,
4933 	&snr_uncore_iio,
4934 	&snr_uncore_irp,
4935 	&snr_uncore_m2pcie,
4936 	&snr_uncore_pcu,
4937 	&snr_uncore_iio_free_running,
4938 	NULL,
4939 };
4940 
4941 void snr_uncore_cpu_init(void)
4942 {
4943 	uncore_msr_uncores = snr_msr_uncores;
4944 }
4945 
4946 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4947 {
4948 	struct pci_dev *pdev = box->pci_dev;
4949 	int box_ctl = uncore_pci_box_ctl(box);
4950 
4951 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4952 	pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4953 }
4954 
4955 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4956 	.init_box	= snr_m2m_uncore_pci_init_box,
4957 	.disable_box	= snbep_uncore_pci_disable_box,
4958 	.enable_box	= snbep_uncore_pci_enable_box,
4959 	.disable_event	= snbep_uncore_pci_disable_event,
4960 	.enable_event	= snbep_uncore_pci_enable_event,
4961 	.read_counter	= snbep_uncore_pci_read_counter,
4962 };
4963 
4964 static struct attribute *snr_m2m_uncore_formats_attr[] = {
4965 	&format_attr_event.attr,
4966 	&format_attr_umask_ext3.attr,
4967 	&format_attr_edge.attr,
4968 	&format_attr_inv.attr,
4969 	&format_attr_thresh8.attr,
4970 	NULL,
4971 };
4972 
4973 static const struct attribute_group snr_m2m_uncore_format_group = {
4974 	.name = "format",
4975 	.attrs = snr_m2m_uncore_formats_attr,
4976 };
4977 
4978 static struct intel_uncore_type snr_uncore_m2m = {
4979 	.name		= "m2m",
4980 	.num_counters   = 4,
4981 	.num_boxes	= 1,
4982 	.perf_ctr_bits	= 48,
4983 	.perf_ctr	= SNR_M2M_PCI_PMON_CTR0,
4984 	.event_ctl	= SNR_M2M_PCI_PMON_CTL0,
4985 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4986 	.event_mask_ext	= SNR_M2M_PCI_PMON_UMASK_EXT,
4987 	.box_ctl	= SNR_M2M_PCI_PMON_BOX_CTL,
4988 	.ops		= &snr_m2m_uncore_pci_ops,
4989 	.format_group	= &snr_m2m_uncore_format_group,
4990 };
4991 
4992 static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
4993 {
4994 	struct pci_dev *pdev = box->pci_dev;
4995 	struct hw_perf_event *hwc = &event->hw;
4996 
4997 	pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN));
4998 	pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
4999 }
5000 
5001 static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = {
5002 	.init_box	= snr_m2m_uncore_pci_init_box,
5003 	.disable_box	= snbep_uncore_pci_disable_box,
5004 	.enable_box	= snbep_uncore_pci_enable_box,
5005 	.disable_event	= snbep_uncore_pci_disable_event,
5006 	.enable_event	= snr_uncore_pci_enable_event,
5007 	.read_counter	= snbep_uncore_pci_read_counter,
5008 };
5009 
5010 static struct intel_uncore_type snr_uncore_pcie3 = {
5011 	.name		= "pcie3",
5012 	.num_counters	= 4,
5013 	.num_boxes	= 1,
5014 	.perf_ctr_bits	= 48,
5015 	.perf_ctr	= SNR_PCIE3_PCI_PMON_CTR0,
5016 	.event_ctl	= SNR_PCIE3_PCI_PMON_CTL0,
5017 	.event_mask	= SKX_IIO_PMON_RAW_EVENT_MASK,
5018 	.event_mask_ext	= SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
5019 	.box_ctl	= SNR_PCIE3_PCI_PMON_BOX_CTL,
5020 	.ops		= &snr_pcie3_uncore_pci_ops,
5021 	.format_group	= &skx_uncore_iio_format_group,
5022 };
5023 
5024 enum {
5025 	SNR_PCI_UNCORE_M2M,
5026 	SNR_PCI_UNCORE_PCIE3,
5027 };
5028 
5029 static struct intel_uncore_type *snr_pci_uncores[] = {
5030 	[SNR_PCI_UNCORE_M2M]		= &snr_uncore_m2m,
5031 	[SNR_PCI_UNCORE_PCIE3]		= &snr_uncore_pcie3,
5032 	NULL,
5033 };
5034 
5035 static const struct pci_device_id snr_uncore_pci_ids[] = {
5036 	{ /* M2M */
5037 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5038 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
5039 	},
5040 	{ /* end: all zeroes */ }
5041 };
5042 
5043 static struct pci_driver snr_uncore_pci_driver = {
5044 	.name		= "snr_uncore",
5045 	.id_table	= snr_uncore_pci_ids,
5046 };
5047 
5048 static const struct pci_device_id snr_uncore_pci_sub_ids[] = {
5049 	{ /* PCIe3 RP */
5050 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
5051 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
5052 	},
5053 	{ /* end: all zeroes */ }
5054 };
5055 
5056 static struct pci_driver snr_uncore_pci_sub_driver = {
5057 	.name		= "snr_uncore_sub",
5058 	.id_table	= snr_uncore_pci_sub_ids,
5059 };
5060 
5061 int snr_uncore_pci_init(void)
5062 {
5063 	/* SNR UBOX DID */
5064 	int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
5065 					 SKX_GIDNIDMAP, true);
5066 
5067 	if (ret)
5068 		return ret;
5069 
5070 	uncore_pci_uncores = snr_pci_uncores;
5071 	uncore_pci_driver = &snr_uncore_pci_driver;
5072 	uncore_pci_sub_driver = &snr_uncore_pci_sub_driver;
5073 	return 0;
5074 }
5075 
5076 #define SNR_MC_DEVICE_ID	0x3451
5077 
5078 static struct pci_dev *snr_uncore_get_mc_dev(unsigned int device, int id)
5079 {
5080 	struct pci_dev *mc_dev = NULL;
5081 	int pkg;
5082 
5083 	while (1) {
5084 		mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, mc_dev);
5085 		if (!mc_dev)
5086 			break;
5087 		pkg = uncore_pcibus_to_dieid(mc_dev->bus);
5088 		if (pkg == id)
5089 			break;
5090 	}
5091 	return mc_dev;
5092 }
5093 
5094 static int snr_uncore_mmio_map(struct intel_uncore_box *box,
5095 			       unsigned int box_ctl, int mem_offset,
5096 			       unsigned int device)
5097 {
5098 	struct pci_dev *pdev = snr_uncore_get_mc_dev(device, box->dieid);
5099 	struct intel_uncore_type *type = box->pmu->type;
5100 	resource_size_t addr;
5101 	u32 pci_dword;
5102 
5103 	if (!pdev)
5104 		return -ENODEV;
5105 
5106 	pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
5107 	addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
5108 
5109 	pci_read_config_dword(pdev, mem_offset, &pci_dword);
5110 	addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
5111 
5112 	addr += box_ctl;
5113 
5114 	pci_dev_put(pdev);
5115 
5116 	box->io_addr = ioremap(addr, type->mmio_map_size);
5117 	if (!box->io_addr) {
5118 		pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
5119 		return -EINVAL;
5120 	}
5121 
5122 	return 0;
5123 }
5124 
5125 static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
5126 				       unsigned int box_ctl, int mem_offset,
5127 				       unsigned int device)
5128 {
5129 	if (!snr_uncore_mmio_map(box, box_ctl, mem_offset, device))
5130 		writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
5131 }
5132 
5133 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
5134 {
5135 	__snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
5136 				   SNR_IMC_MMIO_MEM0_OFFSET,
5137 				   SNR_MC_DEVICE_ID);
5138 }
5139 
5140 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
5141 {
5142 	u32 config;
5143 
5144 	if (!box->io_addr)
5145 		return;
5146 
5147 	config = readl(box->io_addr);
5148 	config |= SNBEP_PMON_BOX_CTL_FRZ;
5149 	writel(config, box->io_addr);
5150 }
5151 
5152 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
5153 {
5154 	u32 config;
5155 
5156 	if (!box->io_addr)
5157 		return;
5158 
5159 	config = readl(box->io_addr);
5160 	config &= ~SNBEP_PMON_BOX_CTL_FRZ;
5161 	writel(config, box->io_addr);
5162 }
5163 
5164 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
5165 					   struct perf_event *event)
5166 {
5167 	struct hw_perf_event *hwc = &event->hw;
5168 
5169 	if (!box->io_addr)
5170 		return;
5171 
5172 	if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
5173 		return;
5174 
5175 	writel(hwc->config | SNBEP_PMON_CTL_EN,
5176 	       box->io_addr + hwc->config_base);
5177 }
5178 
5179 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
5180 					    struct perf_event *event)
5181 {
5182 	struct hw_perf_event *hwc = &event->hw;
5183 
5184 	if (!box->io_addr)
5185 		return;
5186 
5187 	if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
5188 		return;
5189 
5190 	writel(hwc->config, box->io_addr + hwc->config_base);
5191 }
5192 
5193 static struct intel_uncore_ops snr_uncore_mmio_ops = {
5194 	.init_box	= snr_uncore_mmio_init_box,
5195 	.exit_box	= uncore_mmio_exit_box,
5196 	.disable_box	= snr_uncore_mmio_disable_box,
5197 	.enable_box	= snr_uncore_mmio_enable_box,
5198 	.disable_event	= snr_uncore_mmio_disable_event,
5199 	.enable_event	= snr_uncore_mmio_enable_event,
5200 	.read_counter	= uncore_mmio_read_counter,
5201 };
5202 
5203 static struct uncore_event_desc snr_uncore_imc_events[] = {
5204 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
5205 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x0f"),
5206 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
5207 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
5208 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
5209 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
5210 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
5211 	{ /* end: all zeroes */ },
5212 };
5213 
5214 static struct intel_uncore_type snr_uncore_imc = {
5215 	.name		= "imc",
5216 	.num_counters   = 4,
5217 	.num_boxes	= 2,
5218 	.perf_ctr_bits	= 48,
5219 	.fixed_ctr_bits	= 48,
5220 	.fixed_ctr	= SNR_IMC_MMIO_PMON_FIXED_CTR,
5221 	.fixed_ctl	= SNR_IMC_MMIO_PMON_FIXED_CTL,
5222 	.event_descs	= snr_uncore_imc_events,
5223 	.perf_ctr	= SNR_IMC_MMIO_PMON_CTR0,
5224 	.event_ctl	= SNR_IMC_MMIO_PMON_CTL0,
5225 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5226 	.box_ctl	= SNR_IMC_MMIO_PMON_BOX_CTL,
5227 	.mmio_offset	= SNR_IMC_MMIO_OFFSET,
5228 	.mmio_map_size	= SNR_IMC_MMIO_SIZE,
5229 	.ops		= &snr_uncore_mmio_ops,
5230 	.format_group	= &skx_uncore_format_group,
5231 };
5232 
5233 enum perf_uncore_snr_imc_freerunning_type_id {
5234 	SNR_IMC_DCLK,
5235 	SNR_IMC_DDR,
5236 
5237 	SNR_IMC_FREERUNNING_TYPE_MAX,
5238 };
5239 
5240 static struct freerunning_counters snr_imc_freerunning[] = {
5241 	[SNR_IMC_DCLK]	= { 0x22b0, 0x0, 0, 1, 48 },
5242 	[SNR_IMC_DDR]	= { 0x2290, 0x8, 0, 2, 48 },
5243 };
5244 
5245 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
5246 	INTEL_UNCORE_EVENT_DESC(dclk,		"event=0xff,umask=0x10"),
5247 
5248 	INTEL_UNCORE_EVENT_DESC(read,		"event=0xff,umask=0x20"),
5249 	INTEL_UNCORE_EVENT_DESC(read.scale,	"6.103515625e-5"),
5250 	INTEL_UNCORE_EVENT_DESC(read.unit,	"MiB"),
5251 	INTEL_UNCORE_EVENT_DESC(write,		"event=0xff,umask=0x21"),
5252 	INTEL_UNCORE_EVENT_DESC(write.scale,	"6.103515625e-5"),
5253 	INTEL_UNCORE_EVENT_DESC(write.unit,	"MiB"),
5254 	{ /* end: all zeroes */ },
5255 };
5256 
5257 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
5258 	.init_box	= snr_uncore_mmio_init_box,
5259 	.exit_box	= uncore_mmio_exit_box,
5260 	.read_counter	= uncore_mmio_read_counter,
5261 	.hw_config	= uncore_freerunning_hw_config,
5262 };
5263 
5264 static struct intel_uncore_type snr_uncore_imc_free_running = {
5265 	.name			= "imc_free_running",
5266 	.num_counters		= 3,
5267 	.num_boxes		= 1,
5268 	.num_freerunning_types	= SNR_IMC_FREERUNNING_TYPE_MAX,
5269 	.mmio_map_size		= SNR_IMC_MMIO_SIZE,
5270 	.freerunning		= snr_imc_freerunning,
5271 	.ops			= &snr_uncore_imc_freerunning_ops,
5272 	.event_descs		= snr_uncore_imc_freerunning_events,
5273 	.format_group		= &skx_uncore_iio_freerunning_format_group,
5274 };
5275 
5276 static struct intel_uncore_type *snr_mmio_uncores[] = {
5277 	&snr_uncore_imc,
5278 	&snr_uncore_imc_free_running,
5279 	NULL,
5280 };
5281 
5282 void snr_uncore_mmio_init(void)
5283 {
5284 	uncore_mmio_uncores = snr_mmio_uncores;
5285 }
5286 
5287 /* end of SNR uncore support */
5288 
5289 /* ICX uncore support */
5290 
5291 static unsigned icx_cha_msr_offsets[] = {
5292 	0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310,
5293 	0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e,
5294 	0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a,
5295 	0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0,   0xe,
5296 	0x1c,  0x2a,  0x38,  0x46,
5297 };
5298 
5299 static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
5300 {
5301 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
5302 	bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN);
5303 
5304 	if (tie_en) {
5305 		reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 +
5306 			    icx_cha_msr_offsets[box->pmu->pmu_idx];
5307 		reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
5308 		reg1->idx = 0;
5309 	}
5310 
5311 	return 0;
5312 }
5313 
5314 static struct intel_uncore_ops icx_uncore_chabox_ops = {
5315 	.init_box		= ivbep_uncore_msr_init_box,
5316 	.disable_box		= snbep_uncore_msr_disable_box,
5317 	.enable_box		= snbep_uncore_msr_enable_box,
5318 	.disable_event		= snbep_uncore_msr_disable_event,
5319 	.enable_event		= snr_cha_enable_event,
5320 	.read_counter		= uncore_msr_read_counter,
5321 	.hw_config		= icx_cha_hw_config,
5322 };
5323 
5324 static struct intel_uncore_type icx_uncore_chabox = {
5325 	.name			= "cha",
5326 	.num_counters		= 4,
5327 	.perf_ctr_bits		= 48,
5328 	.event_ctl		= ICX_C34_MSR_PMON_CTL0,
5329 	.perf_ctr		= ICX_C34_MSR_PMON_CTR0,
5330 	.box_ctl		= ICX_C34_MSR_PMON_BOX_CTL,
5331 	.msr_offsets		= icx_cha_msr_offsets,
5332 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
5333 	.event_mask_ext		= SNR_CHA_RAW_EVENT_MASK_EXT,
5334 	.constraints		= skx_uncore_chabox_constraints,
5335 	.ops			= &icx_uncore_chabox_ops,
5336 	.format_group		= &snr_uncore_chabox_format_group,
5337 };
5338 
5339 static unsigned icx_msr_offsets[] = {
5340 	0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5341 };
5342 
5343 static struct event_constraint icx_uncore_iio_constraints[] = {
5344 	UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
5345 	UNCORE_EVENT_CONSTRAINT(0x03, 0x3),
5346 	UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
5347 	UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
5348 	UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
5349 	UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
5350 	UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
5351 	EVENT_CONSTRAINT_END
5352 };
5353 
5354 static umode_t
5355 icx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
5356 {
5357 	/* Root bus 0x00 is valid only for pmu_idx = 5. */
5358 	return pmu_iio_mapping_visible(kobj, attr, die, 5);
5359 }
5360 
5361 static struct attribute_group icx_iio_mapping_group = {
5362 	.is_visible	= icx_iio_mapping_visible,
5363 };
5364 
5365 static const struct attribute_group *icx_iio_attr_update[] = {
5366 	&icx_iio_mapping_group,
5367 	NULL,
5368 };
5369 
5370 /*
5371  * ICX has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
5372  */
5373 enum {
5374 	ICX_PCIE1_PMON_ID,
5375 	ICX_PCIE2_PMON_ID,
5376 	ICX_PCIE3_PMON_ID,
5377 	ICX_PCIE4_PMON_ID,
5378 	ICX_PCIE5_PMON_ID,
5379 	ICX_CBDMA_DMI_PMON_ID
5380 };
5381 
5382 static u8 icx_sad_pmon_mapping[] = {
5383 	ICX_CBDMA_DMI_PMON_ID,
5384 	ICX_PCIE1_PMON_ID,
5385 	ICX_PCIE2_PMON_ID,
5386 	ICX_PCIE3_PMON_ID,
5387 	ICX_PCIE4_PMON_ID,
5388 	ICX_PCIE5_PMON_ID,
5389 };
5390 
5391 static int icx_iio_get_topology(struct intel_uncore_type *type)
5392 {
5393 	return sad_cfg_iio_topology(type, icx_sad_pmon_mapping);
5394 }
5395 
5396 static void icx_iio_set_mapping(struct intel_uncore_type *type)
5397 {
5398 	/* Detect ICX-D system. This case is not supported */
5399 	if (boot_cpu_data.x86_model == INTEL_FAM6_ICELAKE_D) {
5400 		pmu_clear_mapping_attr(type->attr_update, &icx_iio_mapping_group);
5401 		return;
5402 	}
5403 	pmu_iio_set_mapping(type, &icx_iio_mapping_group);
5404 }
5405 
5406 static void icx_iio_cleanup_mapping(struct intel_uncore_type *type)
5407 {
5408 	pmu_cleanup_mapping(type, &icx_iio_mapping_group);
5409 }
5410 
5411 static struct intel_uncore_type icx_uncore_iio = {
5412 	.name			= "iio",
5413 	.num_counters		= 4,
5414 	.num_boxes		= 6,
5415 	.perf_ctr_bits		= 48,
5416 	.event_ctl		= ICX_IIO_MSR_PMON_CTL0,
5417 	.perf_ctr		= ICX_IIO_MSR_PMON_CTR0,
5418 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
5419 	.event_mask_ext		= SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
5420 	.box_ctl		= ICX_IIO_MSR_PMON_BOX_CTL,
5421 	.msr_offsets		= icx_msr_offsets,
5422 	.constraints		= icx_uncore_iio_constraints,
5423 	.ops			= &skx_uncore_iio_ops,
5424 	.format_group		= &snr_uncore_iio_format_group,
5425 	.attr_update		= icx_iio_attr_update,
5426 	.get_topology		= icx_iio_get_topology,
5427 	.set_mapping		= icx_iio_set_mapping,
5428 	.cleanup_mapping	= icx_iio_cleanup_mapping,
5429 };
5430 
5431 static struct intel_uncore_type icx_uncore_irp = {
5432 	.name			= "irp",
5433 	.num_counters		= 2,
5434 	.num_boxes		= 6,
5435 	.perf_ctr_bits		= 48,
5436 	.event_ctl		= ICX_IRP0_MSR_PMON_CTL0,
5437 	.perf_ctr		= ICX_IRP0_MSR_PMON_CTR0,
5438 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
5439 	.box_ctl		= ICX_IRP0_MSR_PMON_BOX_CTL,
5440 	.msr_offsets		= icx_msr_offsets,
5441 	.ops			= &ivbep_uncore_msr_ops,
5442 	.format_group		= &ivbep_uncore_format_group,
5443 };
5444 
5445 static struct event_constraint icx_uncore_m2pcie_constraints[] = {
5446 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
5447 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
5448 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
5449 	EVENT_CONSTRAINT_END
5450 };
5451 
5452 static struct intel_uncore_type icx_uncore_m2pcie = {
5453 	.name		= "m2pcie",
5454 	.num_counters	= 4,
5455 	.num_boxes	= 6,
5456 	.perf_ctr_bits	= 48,
5457 	.event_ctl	= ICX_M2PCIE_MSR_PMON_CTL0,
5458 	.perf_ctr	= ICX_M2PCIE_MSR_PMON_CTR0,
5459 	.box_ctl	= ICX_M2PCIE_MSR_PMON_BOX_CTL,
5460 	.msr_offsets	= icx_msr_offsets,
5461 	.constraints	= icx_uncore_m2pcie_constraints,
5462 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5463 	.ops		= &ivbep_uncore_msr_ops,
5464 	.format_group	= &ivbep_uncore_format_group,
5465 };
5466 
5467 enum perf_uncore_icx_iio_freerunning_type_id {
5468 	ICX_IIO_MSR_IOCLK,
5469 	ICX_IIO_MSR_BW_IN,
5470 
5471 	ICX_IIO_FREERUNNING_TYPE_MAX,
5472 };
5473 
5474 static unsigned icx_iio_clk_freerunning_box_offsets[] = {
5475 	0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5476 };
5477 
5478 static unsigned icx_iio_bw_freerunning_box_offsets[] = {
5479 	0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0,
5480 };
5481 
5482 static struct freerunning_counters icx_iio_freerunning[] = {
5483 	[ICX_IIO_MSR_IOCLK]	= { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets },
5484 	[ICX_IIO_MSR_BW_IN]	= { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
5485 };
5486 
5487 static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
5488 	/* Free-Running IIO CLOCKS Counter */
5489 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
5490 	/* Free-Running IIO BANDWIDTH IN Counters */
5491 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
5492 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
5493 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
5494 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
5495 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
5496 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
5497 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
5498 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
5499 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
5500 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
5501 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
5502 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
5503 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
5504 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
5505 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
5506 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
5507 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
5508 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
5509 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
5510 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
5511 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
5512 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
5513 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
5514 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
5515 	{ /* end: all zeroes */ },
5516 };
5517 
5518 static struct intel_uncore_type icx_uncore_iio_free_running = {
5519 	.name			= "iio_free_running",
5520 	.num_counters		= 9,
5521 	.num_boxes		= 6,
5522 	.num_freerunning_types	= ICX_IIO_FREERUNNING_TYPE_MAX,
5523 	.freerunning		= icx_iio_freerunning,
5524 	.ops			= &skx_uncore_iio_freerunning_ops,
5525 	.event_descs		= icx_uncore_iio_freerunning_events,
5526 	.format_group		= &skx_uncore_iio_freerunning_format_group,
5527 };
5528 
5529 static struct intel_uncore_type *icx_msr_uncores[] = {
5530 	&skx_uncore_ubox,
5531 	&icx_uncore_chabox,
5532 	&icx_uncore_iio,
5533 	&icx_uncore_irp,
5534 	&icx_uncore_m2pcie,
5535 	&skx_uncore_pcu,
5536 	&icx_uncore_iio_free_running,
5537 	NULL,
5538 };
5539 
5540 /*
5541  * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High)
5542  * registers which located at Device 30, Function 3
5543  */
5544 #define ICX_CAPID6		0x9c
5545 #define ICX_CAPID7		0xa0
5546 
5547 static u64 icx_count_chabox(void)
5548 {
5549 	struct pci_dev *dev = NULL;
5550 	u64 caps = 0;
5551 
5552 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev);
5553 	if (!dev)
5554 		goto out;
5555 
5556 	pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps);
5557 	pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1);
5558 out:
5559 	pci_dev_put(dev);
5560 	return hweight64(caps);
5561 }
5562 
5563 void icx_uncore_cpu_init(void)
5564 {
5565 	u64 num_boxes = icx_count_chabox();
5566 
5567 	if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets)))
5568 		return;
5569 	icx_uncore_chabox.num_boxes = num_boxes;
5570 	uncore_msr_uncores = icx_msr_uncores;
5571 }
5572 
5573 static struct intel_uncore_type icx_uncore_m2m = {
5574 	.name		= "m2m",
5575 	.num_counters   = 4,
5576 	.num_boxes	= 4,
5577 	.perf_ctr_bits	= 48,
5578 	.perf_ctr	= SNR_M2M_PCI_PMON_CTR0,
5579 	.event_ctl	= SNR_M2M_PCI_PMON_CTL0,
5580 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5581 	.event_mask_ext	= SNR_M2M_PCI_PMON_UMASK_EXT,
5582 	.box_ctl	= SNR_M2M_PCI_PMON_BOX_CTL,
5583 	.ops		= &snr_m2m_uncore_pci_ops,
5584 	.format_group	= &snr_m2m_uncore_format_group,
5585 };
5586 
5587 static struct attribute *icx_upi_uncore_formats_attr[] = {
5588 	&format_attr_event.attr,
5589 	&format_attr_umask_ext4.attr,
5590 	&format_attr_edge.attr,
5591 	&format_attr_inv.attr,
5592 	&format_attr_thresh8.attr,
5593 	NULL,
5594 };
5595 
5596 static const struct attribute_group icx_upi_uncore_format_group = {
5597 	.name = "format",
5598 	.attrs = icx_upi_uncore_formats_attr,
5599 };
5600 
5601 #define ICX_UPI_REGS_ADDR_DEVICE_LINK0	0x02
5602 #define ICX_UPI_REGS_ADDR_FUNCTION	0x01
5603 
5604 static int discover_upi_topology(struct intel_uncore_type *type, int ubox_did, int dev_link0)
5605 {
5606 	struct pci_dev *ubox = NULL;
5607 	struct pci_dev *dev = NULL;
5608 	u32 nid, gid;
5609 	int i, idx, ret = -EPERM;
5610 	struct intel_uncore_topology *upi;
5611 	unsigned int devfn;
5612 
5613 	/* GIDNIDMAP method supports machines which have less than 8 sockets. */
5614 	if (uncore_max_dies() > 8)
5615 		goto err;
5616 
5617 	while ((ubox = pci_get_device(PCI_VENDOR_ID_INTEL, ubox_did, ubox))) {
5618 		ret = upi_nodeid_groupid(ubox, SKX_CPUNODEID, SKX_GIDNIDMAP, &nid, &gid);
5619 		if (ret) {
5620 			ret = pcibios_err_to_errno(ret);
5621 			break;
5622 		}
5623 
5624 		for (i = 0; i < 8; i++) {
5625 			if (nid != GIDNIDMAP(gid, i))
5626 				continue;
5627 			for (idx = 0; idx < type->num_boxes; idx++) {
5628 				upi = &type->topology[nid][idx];
5629 				devfn = PCI_DEVFN(dev_link0 + idx, ICX_UPI_REGS_ADDR_FUNCTION);
5630 				dev = pci_get_domain_bus_and_slot(pci_domain_nr(ubox->bus),
5631 								  ubox->bus->number,
5632 								  devfn);
5633 				if (dev) {
5634 					ret = upi_fill_topology(dev, upi, idx);
5635 					if (ret)
5636 						goto err;
5637 				}
5638 			}
5639 		}
5640 	}
5641 err:
5642 	pci_dev_put(ubox);
5643 	pci_dev_put(dev);
5644 	return ret;
5645 }
5646 
5647 static int icx_upi_get_topology(struct intel_uncore_type *type)
5648 {
5649 	return discover_upi_topology(type, ICX_UBOX_DID, ICX_UPI_REGS_ADDR_DEVICE_LINK0);
5650 }
5651 
5652 static struct attribute_group icx_upi_mapping_group = {
5653 	.is_visible	= skx_upi_mapping_visible,
5654 };
5655 
5656 static const struct attribute_group *icx_upi_attr_update[] = {
5657 	&icx_upi_mapping_group,
5658 	NULL
5659 };
5660 
5661 static void icx_upi_set_mapping(struct intel_uncore_type *type)
5662 {
5663 	pmu_upi_set_mapping(type, &icx_upi_mapping_group);
5664 }
5665 
5666 static void icx_upi_cleanup_mapping(struct intel_uncore_type *type)
5667 {
5668 	pmu_cleanup_mapping(type, &icx_upi_mapping_group);
5669 }
5670 
5671 static struct intel_uncore_type icx_uncore_upi = {
5672 	.name		= "upi",
5673 	.num_counters   = 4,
5674 	.num_boxes	= 3,
5675 	.perf_ctr_bits	= 48,
5676 	.perf_ctr	= ICX_UPI_PCI_PMON_CTR0,
5677 	.event_ctl	= ICX_UPI_PCI_PMON_CTL0,
5678 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5679 	.event_mask_ext = ICX_UPI_CTL_UMASK_EXT,
5680 	.box_ctl	= ICX_UPI_PCI_PMON_BOX_CTL,
5681 	.ops		= &skx_upi_uncore_pci_ops,
5682 	.format_group	= &icx_upi_uncore_format_group,
5683 	.attr_update	= icx_upi_attr_update,
5684 	.get_topology	= icx_upi_get_topology,
5685 	.set_mapping	= icx_upi_set_mapping,
5686 	.cleanup_mapping = icx_upi_cleanup_mapping,
5687 };
5688 
5689 static struct event_constraint icx_uncore_m3upi_constraints[] = {
5690 	UNCORE_EVENT_CONSTRAINT(0x1c, 0x1),
5691 	UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
5692 	UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
5693 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
5694 	UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
5695 	UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
5696 	UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
5697 	UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
5698 	EVENT_CONSTRAINT_END
5699 };
5700 
5701 static struct intel_uncore_type icx_uncore_m3upi = {
5702 	.name		= "m3upi",
5703 	.num_counters   = 4,
5704 	.num_boxes	= 3,
5705 	.perf_ctr_bits	= 48,
5706 	.perf_ctr	= ICX_M3UPI_PCI_PMON_CTR0,
5707 	.event_ctl	= ICX_M3UPI_PCI_PMON_CTL0,
5708 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5709 	.box_ctl	= ICX_M3UPI_PCI_PMON_BOX_CTL,
5710 	.constraints	= icx_uncore_m3upi_constraints,
5711 	.ops		= &ivbep_uncore_pci_ops,
5712 	.format_group	= &skx_uncore_format_group,
5713 };
5714 
5715 enum {
5716 	ICX_PCI_UNCORE_M2M,
5717 	ICX_PCI_UNCORE_UPI,
5718 	ICX_PCI_UNCORE_M3UPI,
5719 };
5720 
5721 static struct intel_uncore_type *icx_pci_uncores[] = {
5722 	[ICX_PCI_UNCORE_M2M]		= &icx_uncore_m2m,
5723 	[ICX_PCI_UNCORE_UPI]		= &icx_uncore_upi,
5724 	[ICX_PCI_UNCORE_M3UPI]		= &icx_uncore_m3upi,
5725 	NULL,
5726 };
5727 
5728 static const struct pci_device_id icx_uncore_pci_ids[] = {
5729 	{ /* M2M 0 */
5730 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5731 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0),
5732 	},
5733 	{ /* M2M 1 */
5734 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5735 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1),
5736 	},
5737 	{ /* M2M 2 */
5738 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5739 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2),
5740 	},
5741 	{ /* M2M 3 */
5742 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5743 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3),
5744 	},
5745 	{ /* UPI Link 0 */
5746 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5747 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0),
5748 	},
5749 	{ /* UPI Link 1 */
5750 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5751 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1),
5752 	},
5753 	{ /* UPI Link 2 */
5754 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5755 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2),
5756 	},
5757 	{ /* M3UPI Link 0 */
5758 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5759 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0),
5760 	},
5761 	{ /* M3UPI Link 1 */
5762 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5763 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1),
5764 	},
5765 	{ /* M3UPI Link 2 */
5766 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5767 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2),
5768 	},
5769 	{ /* end: all zeroes */ }
5770 };
5771 
5772 static struct pci_driver icx_uncore_pci_driver = {
5773 	.name		= "icx_uncore",
5774 	.id_table	= icx_uncore_pci_ids,
5775 };
5776 
5777 int icx_uncore_pci_init(void)
5778 {
5779 	/* ICX UBOX DID */
5780 	int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID,
5781 					 SKX_GIDNIDMAP, true);
5782 
5783 	if (ret)
5784 		return ret;
5785 
5786 	uncore_pci_uncores = icx_pci_uncores;
5787 	uncore_pci_driver = &icx_uncore_pci_driver;
5788 	return 0;
5789 }
5790 
5791 static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
5792 {
5793 	unsigned int box_ctl = box->pmu->type->box_ctl +
5794 			       box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN);
5795 	int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
5796 			 SNR_IMC_MMIO_MEM0_OFFSET;
5797 
5798 	__snr_uncore_mmio_init_box(box, box_ctl, mem_offset,
5799 				   SNR_MC_DEVICE_ID);
5800 }
5801 
5802 static struct intel_uncore_ops icx_uncore_mmio_ops = {
5803 	.init_box	= icx_uncore_imc_init_box,
5804 	.exit_box	= uncore_mmio_exit_box,
5805 	.disable_box	= snr_uncore_mmio_disable_box,
5806 	.enable_box	= snr_uncore_mmio_enable_box,
5807 	.disable_event	= snr_uncore_mmio_disable_event,
5808 	.enable_event	= snr_uncore_mmio_enable_event,
5809 	.read_counter	= uncore_mmio_read_counter,
5810 };
5811 
5812 static struct intel_uncore_type icx_uncore_imc = {
5813 	.name		= "imc",
5814 	.num_counters   = 4,
5815 	.num_boxes	= 12,
5816 	.perf_ctr_bits	= 48,
5817 	.fixed_ctr_bits	= 48,
5818 	.fixed_ctr	= SNR_IMC_MMIO_PMON_FIXED_CTR,
5819 	.fixed_ctl	= SNR_IMC_MMIO_PMON_FIXED_CTL,
5820 	.event_descs	= snr_uncore_imc_events,
5821 	.perf_ctr	= SNR_IMC_MMIO_PMON_CTR0,
5822 	.event_ctl	= SNR_IMC_MMIO_PMON_CTL0,
5823 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5824 	.box_ctl	= SNR_IMC_MMIO_PMON_BOX_CTL,
5825 	.mmio_offset	= SNR_IMC_MMIO_OFFSET,
5826 	.mmio_map_size	= SNR_IMC_MMIO_SIZE,
5827 	.ops		= &icx_uncore_mmio_ops,
5828 	.format_group	= &skx_uncore_format_group,
5829 };
5830 
5831 enum perf_uncore_icx_imc_freerunning_type_id {
5832 	ICX_IMC_DCLK,
5833 	ICX_IMC_DDR,
5834 	ICX_IMC_DDRT,
5835 
5836 	ICX_IMC_FREERUNNING_TYPE_MAX,
5837 };
5838 
5839 static struct freerunning_counters icx_imc_freerunning[] = {
5840 	[ICX_IMC_DCLK]	= { 0x22b0, 0x0, 0, 1, 48 },
5841 	[ICX_IMC_DDR]	= { 0x2290, 0x8, 0, 2, 48 },
5842 	[ICX_IMC_DDRT]	= { 0x22a0, 0x8, 0, 2, 48 },
5843 };
5844 
5845 static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
5846 	INTEL_UNCORE_EVENT_DESC(dclk,			"event=0xff,umask=0x10"),
5847 
5848 	INTEL_UNCORE_EVENT_DESC(read,			"event=0xff,umask=0x20"),
5849 	INTEL_UNCORE_EVENT_DESC(read.scale,		"6.103515625e-5"),
5850 	INTEL_UNCORE_EVENT_DESC(read.unit,		"MiB"),
5851 	INTEL_UNCORE_EVENT_DESC(write,			"event=0xff,umask=0x21"),
5852 	INTEL_UNCORE_EVENT_DESC(write.scale,		"6.103515625e-5"),
5853 	INTEL_UNCORE_EVENT_DESC(write.unit,		"MiB"),
5854 
5855 	INTEL_UNCORE_EVENT_DESC(ddrt_read,		"event=0xff,umask=0x30"),
5856 	INTEL_UNCORE_EVENT_DESC(ddrt_read.scale,	"6.103515625e-5"),
5857 	INTEL_UNCORE_EVENT_DESC(ddrt_read.unit,		"MiB"),
5858 	INTEL_UNCORE_EVENT_DESC(ddrt_write,		"event=0xff,umask=0x31"),
5859 	INTEL_UNCORE_EVENT_DESC(ddrt_write.scale,	"6.103515625e-5"),
5860 	INTEL_UNCORE_EVENT_DESC(ddrt_write.unit,	"MiB"),
5861 	{ /* end: all zeroes */ },
5862 };
5863 
5864 static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5865 {
5866 	int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
5867 			 SNR_IMC_MMIO_MEM0_OFFSET;
5868 
5869 	snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
5870 			    mem_offset, SNR_MC_DEVICE_ID);
5871 }
5872 
5873 static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
5874 	.init_box	= icx_uncore_imc_freerunning_init_box,
5875 	.exit_box	= uncore_mmio_exit_box,
5876 	.read_counter	= uncore_mmio_read_counter,
5877 	.hw_config	= uncore_freerunning_hw_config,
5878 };
5879 
5880 static struct intel_uncore_type icx_uncore_imc_free_running = {
5881 	.name			= "imc_free_running",
5882 	.num_counters		= 5,
5883 	.num_boxes		= 4,
5884 	.num_freerunning_types	= ICX_IMC_FREERUNNING_TYPE_MAX,
5885 	.mmio_map_size		= SNR_IMC_MMIO_SIZE,
5886 	.freerunning		= icx_imc_freerunning,
5887 	.ops			= &icx_uncore_imc_freerunning_ops,
5888 	.event_descs		= icx_uncore_imc_freerunning_events,
5889 	.format_group		= &skx_uncore_iio_freerunning_format_group,
5890 };
5891 
5892 static struct intel_uncore_type *icx_mmio_uncores[] = {
5893 	&icx_uncore_imc,
5894 	&icx_uncore_imc_free_running,
5895 	NULL,
5896 };
5897 
5898 void icx_uncore_mmio_init(void)
5899 {
5900 	uncore_mmio_uncores = icx_mmio_uncores;
5901 }
5902 
5903 /* end of ICX uncore support */
5904 
5905 /* SPR uncore support */
5906 
5907 static void spr_uncore_msr_enable_event(struct intel_uncore_box *box,
5908 					struct perf_event *event)
5909 {
5910 	struct hw_perf_event *hwc = &event->hw;
5911 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
5912 
5913 	if (reg1->idx != EXTRA_REG_NONE)
5914 		wrmsrl(reg1->reg, reg1->config);
5915 
5916 	wrmsrl(hwc->config_base, hwc->config);
5917 }
5918 
5919 static void spr_uncore_msr_disable_event(struct intel_uncore_box *box,
5920 					 struct perf_event *event)
5921 {
5922 	struct hw_perf_event *hwc = &event->hw;
5923 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
5924 
5925 	if (reg1->idx != EXTRA_REG_NONE)
5926 		wrmsrl(reg1->reg, 0);
5927 
5928 	wrmsrl(hwc->config_base, 0);
5929 }
5930 
5931 static int spr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
5932 {
5933 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
5934 	bool tie_en = !!(event->hw.config & SPR_CHA_PMON_CTL_TID_EN);
5935 	struct intel_uncore_type *type = box->pmu->type;
5936 
5937 	if (tie_en) {
5938 		reg1->reg = SPR_C0_MSR_PMON_BOX_FILTER0 +
5939 			    HSWEP_CBO_MSR_OFFSET * type->box_ids[box->pmu->pmu_idx];
5940 		reg1->config = event->attr.config1 & SPR_CHA_PMON_BOX_FILTER_TID;
5941 		reg1->idx = 0;
5942 	}
5943 
5944 	return 0;
5945 }
5946 
5947 static struct intel_uncore_ops spr_uncore_chabox_ops = {
5948 	.init_box		= intel_generic_uncore_msr_init_box,
5949 	.disable_box		= intel_generic_uncore_msr_disable_box,
5950 	.enable_box		= intel_generic_uncore_msr_enable_box,
5951 	.disable_event		= spr_uncore_msr_disable_event,
5952 	.enable_event		= spr_uncore_msr_enable_event,
5953 	.read_counter		= uncore_msr_read_counter,
5954 	.hw_config		= spr_cha_hw_config,
5955 	.get_constraint		= uncore_get_constraint,
5956 	.put_constraint		= uncore_put_constraint,
5957 };
5958 
5959 static struct attribute *spr_uncore_cha_formats_attr[] = {
5960 	&format_attr_event.attr,
5961 	&format_attr_umask_ext4.attr,
5962 	&format_attr_tid_en2.attr,
5963 	&format_attr_edge.attr,
5964 	&format_attr_inv.attr,
5965 	&format_attr_thresh8.attr,
5966 	&format_attr_filter_tid5.attr,
5967 	NULL,
5968 };
5969 static const struct attribute_group spr_uncore_chabox_format_group = {
5970 	.name = "format",
5971 	.attrs = spr_uncore_cha_formats_attr,
5972 };
5973 
5974 static ssize_t alias_show(struct device *dev,
5975 			  struct device_attribute *attr,
5976 			  char *buf)
5977 {
5978 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
5979 	char pmu_name[UNCORE_PMU_NAME_LEN];
5980 
5981 	uncore_get_alias_name(pmu_name, pmu);
5982 	return sysfs_emit(buf, "%s\n", pmu_name);
5983 }
5984 
5985 static DEVICE_ATTR_RO(alias);
5986 
5987 static struct attribute *uncore_alias_attrs[] = {
5988 	&dev_attr_alias.attr,
5989 	NULL
5990 };
5991 
5992 ATTRIBUTE_GROUPS(uncore_alias);
5993 
5994 static struct intel_uncore_type spr_uncore_chabox = {
5995 	.name			= "cha",
5996 	.event_mask		= SPR_CHA_PMON_EVENT_MASK,
5997 	.event_mask_ext		= SPR_RAW_EVENT_MASK_EXT,
5998 	.num_shared_regs	= 1,
5999 	.constraints		= skx_uncore_chabox_constraints,
6000 	.ops			= &spr_uncore_chabox_ops,
6001 	.format_group		= &spr_uncore_chabox_format_group,
6002 	.attr_update		= uncore_alias_groups,
6003 };
6004 
6005 static struct intel_uncore_type spr_uncore_iio = {
6006 	.name			= "iio",
6007 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
6008 	.event_mask_ext		= SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
6009 	.format_group		= &snr_uncore_iio_format_group,
6010 	.attr_update		= uncore_alias_groups,
6011 	.constraints		= icx_uncore_iio_constraints,
6012 };
6013 
6014 static struct attribute *spr_uncore_raw_formats_attr[] = {
6015 	&format_attr_event.attr,
6016 	&format_attr_umask_ext4.attr,
6017 	&format_attr_edge.attr,
6018 	&format_attr_inv.attr,
6019 	&format_attr_thresh8.attr,
6020 	NULL,
6021 };
6022 
6023 static const struct attribute_group spr_uncore_raw_format_group = {
6024 	.name			= "format",
6025 	.attrs			= spr_uncore_raw_formats_attr,
6026 };
6027 
6028 #define SPR_UNCORE_COMMON_FORMAT()				\
6029 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,	\
6030 	.event_mask_ext		= SPR_RAW_EVENT_MASK_EXT,	\
6031 	.format_group		= &spr_uncore_raw_format_group,	\
6032 	.attr_update		= uncore_alias_groups
6033 
6034 static struct intel_uncore_type spr_uncore_irp = {
6035 	SPR_UNCORE_COMMON_FORMAT(),
6036 	.name			= "irp",
6037 
6038 };
6039 
6040 static struct event_constraint spr_uncore_m2pcie_constraints[] = {
6041 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
6042 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
6043 	EVENT_CONSTRAINT_END
6044 };
6045 
6046 static struct intel_uncore_type spr_uncore_m2pcie = {
6047 	SPR_UNCORE_COMMON_FORMAT(),
6048 	.name			= "m2pcie",
6049 	.constraints		= spr_uncore_m2pcie_constraints,
6050 };
6051 
6052 static struct intel_uncore_type spr_uncore_pcu = {
6053 	.name			= "pcu",
6054 	.attr_update		= uncore_alias_groups,
6055 };
6056 
6057 static void spr_uncore_mmio_enable_event(struct intel_uncore_box *box,
6058 					 struct perf_event *event)
6059 {
6060 	struct hw_perf_event *hwc = &event->hw;
6061 
6062 	if (!box->io_addr)
6063 		return;
6064 
6065 	if (uncore_pmc_fixed(hwc->idx))
6066 		writel(SNBEP_PMON_CTL_EN, box->io_addr + hwc->config_base);
6067 	else
6068 		writel(hwc->config, box->io_addr + hwc->config_base);
6069 }
6070 
6071 static struct intel_uncore_ops spr_uncore_mmio_ops = {
6072 	.init_box		= intel_generic_uncore_mmio_init_box,
6073 	.exit_box		= uncore_mmio_exit_box,
6074 	.disable_box		= intel_generic_uncore_mmio_disable_box,
6075 	.enable_box		= intel_generic_uncore_mmio_enable_box,
6076 	.disable_event		= intel_generic_uncore_mmio_disable_event,
6077 	.enable_event		= spr_uncore_mmio_enable_event,
6078 	.read_counter		= uncore_mmio_read_counter,
6079 };
6080 
6081 static struct intel_uncore_type spr_uncore_imc = {
6082 	SPR_UNCORE_COMMON_FORMAT(),
6083 	.name			= "imc",
6084 	.fixed_ctr_bits		= 48,
6085 	.fixed_ctr		= SNR_IMC_MMIO_PMON_FIXED_CTR,
6086 	.fixed_ctl		= SNR_IMC_MMIO_PMON_FIXED_CTL,
6087 	.ops			= &spr_uncore_mmio_ops,
6088 };
6089 
6090 static void spr_uncore_pci_enable_event(struct intel_uncore_box *box,
6091 					struct perf_event *event)
6092 {
6093 	struct pci_dev *pdev = box->pci_dev;
6094 	struct hw_perf_event *hwc = &event->hw;
6095 
6096 	pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
6097 	pci_write_config_dword(pdev, hwc->config_base, (u32)hwc->config);
6098 }
6099 
6100 static struct intel_uncore_ops spr_uncore_pci_ops = {
6101 	.init_box		= intel_generic_uncore_pci_init_box,
6102 	.disable_box		= intel_generic_uncore_pci_disable_box,
6103 	.enable_box		= intel_generic_uncore_pci_enable_box,
6104 	.disable_event		= intel_generic_uncore_pci_disable_event,
6105 	.enable_event		= spr_uncore_pci_enable_event,
6106 	.read_counter		= intel_generic_uncore_pci_read_counter,
6107 };
6108 
6109 #define SPR_UNCORE_PCI_COMMON_FORMAT()			\
6110 	SPR_UNCORE_COMMON_FORMAT(),			\
6111 	.ops			= &spr_uncore_pci_ops
6112 
6113 static struct intel_uncore_type spr_uncore_m2m = {
6114 	SPR_UNCORE_PCI_COMMON_FORMAT(),
6115 	.name			= "m2m",
6116 };
6117 
6118 static struct attribute_group spr_upi_mapping_group = {
6119 	.is_visible	= skx_upi_mapping_visible,
6120 };
6121 
6122 static const struct attribute_group *spr_upi_attr_update[] = {
6123 	&uncore_alias_group,
6124 	&spr_upi_mapping_group,
6125 	NULL
6126 };
6127 
6128 #define SPR_UPI_REGS_ADDR_DEVICE_LINK0	0x01
6129 
6130 static void spr_upi_set_mapping(struct intel_uncore_type *type)
6131 {
6132 	pmu_upi_set_mapping(type, &spr_upi_mapping_group);
6133 }
6134 
6135 static void spr_upi_cleanup_mapping(struct intel_uncore_type *type)
6136 {
6137 	pmu_cleanup_mapping(type, &spr_upi_mapping_group);
6138 }
6139 
6140 static int spr_upi_get_topology(struct intel_uncore_type *type)
6141 {
6142 	return discover_upi_topology(type, SPR_UBOX_DID, SPR_UPI_REGS_ADDR_DEVICE_LINK0);
6143 }
6144 
6145 static struct intel_uncore_type spr_uncore_upi = {
6146 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
6147 	.event_mask_ext		= SPR_RAW_EVENT_MASK_EXT,
6148 	.format_group		= &spr_uncore_raw_format_group,
6149 	.ops			= &spr_uncore_pci_ops,
6150 	.name			= "upi",
6151 	.attr_update		= spr_upi_attr_update,
6152 	.get_topology		= spr_upi_get_topology,
6153 	.set_mapping		= spr_upi_set_mapping,
6154 	.cleanup_mapping	= spr_upi_cleanup_mapping,
6155 };
6156 
6157 static struct intel_uncore_type spr_uncore_m3upi = {
6158 	SPR_UNCORE_PCI_COMMON_FORMAT(),
6159 	.name			= "m3upi",
6160 	.constraints		= icx_uncore_m3upi_constraints,
6161 };
6162 
6163 static struct intel_uncore_type spr_uncore_mdf = {
6164 	SPR_UNCORE_COMMON_FORMAT(),
6165 	.name			= "mdf",
6166 };
6167 
6168 #define UNCORE_SPR_NUM_UNCORE_TYPES		12
6169 #define UNCORE_SPR_IIO				1
6170 #define UNCORE_SPR_IMC				6
6171 
6172 static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = {
6173 	&spr_uncore_chabox,
6174 	&spr_uncore_iio,
6175 	&spr_uncore_irp,
6176 	&spr_uncore_m2pcie,
6177 	&spr_uncore_pcu,
6178 	NULL,
6179 	&spr_uncore_imc,
6180 	&spr_uncore_m2m,
6181 	&spr_uncore_upi,
6182 	&spr_uncore_m3upi,
6183 	NULL,
6184 	&spr_uncore_mdf,
6185 };
6186 
6187 enum perf_uncore_spr_iio_freerunning_type_id {
6188 	SPR_IIO_MSR_IOCLK,
6189 	SPR_IIO_MSR_BW_IN,
6190 	SPR_IIO_MSR_BW_OUT,
6191 
6192 	SPR_IIO_FREERUNNING_TYPE_MAX,
6193 };
6194 
6195 static struct freerunning_counters spr_iio_freerunning[] = {
6196 	[SPR_IIO_MSR_IOCLK]	= { 0x340e, 0x1, 0x10, 1, 48 },
6197 	[SPR_IIO_MSR_BW_IN]	= { 0x3800, 0x1, 0x10, 8, 48 },
6198 	[SPR_IIO_MSR_BW_OUT]	= { 0x3808, 0x1, 0x10, 8, 48 },
6199 };
6200 
6201 static struct uncore_event_desc spr_uncore_iio_freerunning_events[] = {
6202 	/* Free-Running IIO CLOCKS Counter */
6203 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
6204 	/* Free-Running IIO BANDWIDTH IN Counters */
6205 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
6206 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
6207 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
6208 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
6209 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
6210 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
6211 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
6212 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
6213 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
6214 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
6215 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
6216 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
6217 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
6218 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
6219 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
6220 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
6221 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
6222 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
6223 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
6224 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
6225 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
6226 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
6227 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
6228 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
6229 	/* Free-Running IIO BANDWIDTH OUT Counters */
6230 	INTEL_UNCORE_EVENT_DESC(bw_out_port0,		"event=0xff,umask=0x30"),
6231 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,	"3.814697266e-6"),
6232 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,	"MiB"),
6233 	INTEL_UNCORE_EVENT_DESC(bw_out_port1,		"event=0xff,umask=0x31"),
6234 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,	"3.814697266e-6"),
6235 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,	"MiB"),
6236 	INTEL_UNCORE_EVENT_DESC(bw_out_port2,		"event=0xff,umask=0x32"),
6237 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,	"3.814697266e-6"),
6238 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,	"MiB"),
6239 	INTEL_UNCORE_EVENT_DESC(bw_out_port3,		"event=0xff,umask=0x33"),
6240 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,	"3.814697266e-6"),
6241 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,	"MiB"),
6242 	INTEL_UNCORE_EVENT_DESC(bw_out_port4,		"event=0xff,umask=0x34"),
6243 	INTEL_UNCORE_EVENT_DESC(bw_out_port4.scale,	"3.814697266e-6"),
6244 	INTEL_UNCORE_EVENT_DESC(bw_out_port4.unit,	"MiB"),
6245 	INTEL_UNCORE_EVENT_DESC(bw_out_port5,		"event=0xff,umask=0x35"),
6246 	INTEL_UNCORE_EVENT_DESC(bw_out_port5.scale,	"3.814697266e-6"),
6247 	INTEL_UNCORE_EVENT_DESC(bw_out_port5.unit,	"MiB"),
6248 	INTEL_UNCORE_EVENT_DESC(bw_out_port6,		"event=0xff,umask=0x36"),
6249 	INTEL_UNCORE_EVENT_DESC(bw_out_port6.scale,	"3.814697266e-6"),
6250 	INTEL_UNCORE_EVENT_DESC(bw_out_port6.unit,	"MiB"),
6251 	INTEL_UNCORE_EVENT_DESC(bw_out_port7,		"event=0xff,umask=0x37"),
6252 	INTEL_UNCORE_EVENT_DESC(bw_out_port7.scale,	"3.814697266e-6"),
6253 	INTEL_UNCORE_EVENT_DESC(bw_out_port7.unit,	"MiB"),
6254 	{ /* end: all zeroes */ },
6255 };
6256 
6257 static struct intel_uncore_type spr_uncore_iio_free_running = {
6258 	.name			= "iio_free_running",
6259 	.num_counters		= 17,
6260 	.num_freerunning_types	= SPR_IIO_FREERUNNING_TYPE_MAX,
6261 	.freerunning		= spr_iio_freerunning,
6262 	.ops			= &skx_uncore_iio_freerunning_ops,
6263 	.event_descs		= spr_uncore_iio_freerunning_events,
6264 	.format_group		= &skx_uncore_iio_freerunning_format_group,
6265 };
6266 
6267 enum perf_uncore_spr_imc_freerunning_type_id {
6268 	SPR_IMC_DCLK,
6269 	SPR_IMC_PQ_CYCLES,
6270 
6271 	SPR_IMC_FREERUNNING_TYPE_MAX,
6272 };
6273 
6274 static struct freerunning_counters spr_imc_freerunning[] = {
6275 	[SPR_IMC_DCLK]		= { 0x22b0, 0x0, 0, 1, 48 },
6276 	[SPR_IMC_PQ_CYCLES]	= { 0x2318, 0x8, 0, 2, 48 },
6277 };
6278 
6279 static struct uncore_event_desc spr_uncore_imc_freerunning_events[] = {
6280 	INTEL_UNCORE_EVENT_DESC(dclk,			"event=0xff,umask=0x10"),
6281 
6282 	INTEL_UNCORE_EVENT_DESC(rpq_cycles,		"event=0xff,umask=0x20"),
6283 	INTEL_UNCORE_EVENT_DESC(wpq_cycles,		"event=0xff,umask=0x21"),
6284 	{ /* end: all zeroes */ },
6285 };
6286 
6287 #define SPR_MC_DEVICE_ID	0x3251
6288 
6289 static void spr_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
6290 {
6291 	int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE + SNR_IMC_MMIO_MEM0_OFFSET;
6292 
6293 	snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
6294 			    mem_offset, SPR_MC_DEVICE_ID);
6295 }
6296 
6297 static struct intel_uncore_ops spr_uncore_imc_freerunning_ops = {
6298 	.init_box	= spr_uncore_imc_freerunning_init_box,
6299 	.exit_box	= uncore_mmio_exit_box,
6300 	.read_counter	= uncore_mmio_read_counter,
6301 	.hw_config	= uncore_freerunning_hw_config,
6302 };
6303 
6304 static struct intel_uncore_type spr_uncore_imc_free_running = {
6305 	.name			= "imc_free_running",
6306 	.num_counters		= 3,
6307 	.mmio_map_size		= SNR_IMC_MMIO_SIZE,
6308 	.num_freerunning_types	= SPR_IMC_FREERUNNING_TYPE_MAX,
6309 	.freerunning		= spr_imc_freerunning,
6310 	.ops			= &spr_uncore_imc_freerunning_ops,
6311 	.event_descs		= spr_uncore_imc_freerunning_events,
6312 	.format_group		= &skx_uncore_iio_freerunning_format_group,
6313 };
6314 
6315 #define UNCORE_SPR_MSR_EXTRA_UNCORES		1
6316 #define UNCORE_SPR_MMIO_EXTRA_UNCORES		1
6317 
6318 static struct intel_uncore_type *spr_msr_uncores[UNCORE_SPR_MSR_EXTRA_UNCORES] = {
6319 	&spr_uncore_iio_free_running,
6320 };
6321 
6322 static struct intel_uncore_type *spr_mmio_uncores[UNCORE_SPR_MMIO_EXTRA_UNCORES] = {
6323 	&spr_uncore_imc_free_running,
6324 };
6325 
6326 static void uncore_type_customized_copy(struct intel_uncore_type *to_type,
6327 					struct intel_uncore_type *from_type)
6328 {
6329 	if (!to_type || !from_type)
6330 		return;
6331 
6332 	if (from_type->name)
6333 		to_type->name = from_type->name;
6334 	if (from_type->fixed_ctr_bits)
6335 		to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
6336 	if (from_type->event_mask)
6337 		to_type->event_mask = from_type->event_mask;
6338 	if (from_type->event_mask_ext)
6339 		to_type->event_mask_ext = from_type->event_mask_ext;
6340 	if (from_type->fixed_ctr)
6341 		to_type->fixed_ctr = from_type->fixed_ctr;
6342 	if (from_type->fixed_ctl)
6343 		to_type->fixed_ctl = from_type->fixed_ctl;
6344 	if (from_type->fixed_ctr_bits)
6345 		to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
6346 	if (from_type->num_shared_regs)
6347 		to_type->num_shared_regs = from_type->num_shared_regs;
6348 	if (from_type->constraints)
6349 		to_type->constraints = from_type->constraints;
6350 	if (from_type->ops)
6351 		to_type->ops = from_type->ops;
6352 	if (from_type->event_descs)
6353 		to_type->event_descs = from_type->event_descs;
6354 	if (from_type->format_group)
6355 		to_type->format_group = from_type->format_group;
6356 	if (from_type->attr_update)
6357 		to_type->attr_update = from_type->attr_update;
6358 	if (from_type->set_mapping)
6359 		to_type->set_mapping = from_type->set_mapping;
6360 	if (from_type->get_topology)
6361 		to_type->get_topology = from_type->get_topology;
6362 	if (from_type->cleanup_mapping)
6363 		to_type->cleanup_mapping = from_type->cleanup_mapping;
6364 }
6365 
6366 static struct intel_uncore_type **
6367 uncore_get_uncores(enum uncore_access_type type_id, int num_extra,
6368 		    struct intel_uncore_type **extra)
6369 {
6370 	struct intel_uncore_type **types, **start_types;
6371 	int i;
6372 
6373 	start_types = types = intel_uncore_generic_init_uncores(type_id, num_extra);
6374 
6375 	/* Only copy the customized features */
6376 	for (; *types; types++) {
6377 		if ((*types)->type_id >= UNCORE_SPR_NUM_UNCORE_TYPES)
6378 			continue;
6379 		uncore_type_customized_copy(*types, spr_uncores[(*types)->type_id]);
6380 	}
6381 
6382 	for (i = 0; i < num_extra; i++, types++)
6383 		*types = extra[i];
6384 
6385 	return start_types;
6386 }
6387 
6388 static struct intel_uncore_type *
6389 uncore_find_type_by_id(struct intel_uncore_type **types, int type_id)
6390 {
6391 	for (; *types; types++) {
6392 		if (type_id == (*types)->type_id)
6393 			return *types;
6394 	}
6395 
6396 	return NULL;
6397 }
6398 
6399 static int uncore_type_max_boxes(struct intel_uncore_type **types,
6400 				 int type_id)
6401 {
6402 	struct intel_uncore_type *type;
6403 	int i, max = 0;
6404 
6405 	type = uncore_find_type_by_id(types, type_id);
6406 	if (!type)
6407 		return 0;
6408 
6409 	for (i = 0; i < type->num_boxes; i++) {
6410 		if (type->box_ids[i] > max)
6411 			max = type->box_ids[i];
6412 	}
6413 
6414 	return max + 1;
6415 }
6416 
6417 void spr_uncore_cpu_init(void)
6418 {
6419 	uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR,
6420 						UNCORE_SPR_MSR_EXTRA_UNCORES,
6421 						spr_msr_uncores);
6422 
6423 	spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
6424 }
6425 
6426 int spr_uncore_pci_init(void)
6427 {
6428 	uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, 0, NULL);
6429 	return 0;
6430 }
6431 
6432 void spr_uncore_mmio_init(void)
6433 {
6434 	int ret = snbep_pci2phy_map_init(0x3250, SKX_CPUNODEID, SKX_GIDNIDMAP, true);
6435 
6436 	if (ret)
6437 		uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL);
6438 	else {
6439 		uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO,
6440 							 UNCORE_SPR_MMIO_EXTRA_UNCORES,
6441 							 spr_mmio_uncores);
6442 
6443 		spr_uncore_imc_free_running.num_boxes = uncore_type_max_boxes(uncore_mmio_uncores, UNCORE_SPR_IMC) / 2;
6444 	}
6445 }
6446 
6447 /* end of SPR uncore support */
6448