xref: /linux/arch/x86/events/intel/uncore_snb.c (revision 10d6d2416db2137a5a0ef9162662e5b7fee56dd4)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */
3 #include <asm/msr.h>
4 #include "uncore.h"
5 #include "uncore_discovery.h"
6 
7 /* Uncore IMC PCI IDs */
8 #define PCI_DEVICE_ID_INTEL_SNB_IMC		0x0100
9 #define PCI_DEVICE_ID_INTEL_IVB_IMC		0x0154
10 #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC		0x0150
11 #define PCI_DEVICE_ID_INTEL_HSW_IMC		0x0c00
12 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC		0x0a04
13 #define PCI_DEVICE_ID_INTEL_BDW_IMC		0x1604
14 #define PCI_DEVICE_ID_INTEL_SKL_U_IMC		0x1904
15 #define PCI_DEVICE_ID_INTEL_SKL_Y_IMC		0x190c
16 #define PCI_DEVICE_ID_INTEL_SKL_HD_IMC		0x1900
17 #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC		0x1910
18 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC		0x190f
19 #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC		0x191f
20 #define PCI_DEVICE_ID_INTEL_SKL_E3_IMC		0x1918
21 #define PCI_DEVICE_ID_INTEL_KBL_Y_IMC		0x590c
22 #define PCI_DEVICE_ID_INTEL_KBL_U_IMC		0x5904
23 #define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC		0x5914
24 #define PCI_DEVICE_ID_INTEL_KBL_SD_IMC		0x590f
25 #define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC		0x591f
26 #define PCI_DEVICE_ID_INTEL_KBL_HQ_IMC		0x5910
27 #define PCI_DEVICE_ID_INTEL_KBL_WQ_IMC		0x5918
28 #define PCI_DEVICE_ID_INTEL_CFL_2U_IMC		0x3ecc
29 #define PCI_DEVICE_ID_INTEL_CFL_4U_IMC		0x3ed0
30 #define PCI_DEVICE_ID_INTEL_CFL_4H_IMC		0x3e10
31 #define PCI_DEVICE_ID_INTEL_CFL_6H_IMC		0x3ec4
32 #define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC	0x3e0f
33 #define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC	0x3e1f
34 #define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC	0x3ec2
35 #define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC	0x3e30
36 #define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC	0x3e18
37 #define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC	0x3ec6
38 #define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC	0x3e31
39 #define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC	0x3e33
40 #define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC	0x3eca
41 #define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC	0x3e32
42 #define PCI_DEVICE_ID_INTEL_AML_YD_IMC		0x590c
43 #define PCI_DEVICE_ID_INTEL_AML_YQ_IMC		0x590d
44 #define PCI_DEVICE_ID_INTEL_WHL_UQ_IMC		0x3ed0
45 #define PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC	0x3e34
46 #define PCI_DEVICE_ID_INTEL_WHL_UD_IMC		0x3e35
47 #define PCI_DEVICE_ID_INTEL_CML_H1_IMC		0x9b44
48 #define PCI_DEVICE_ID_INTEL_CML_H2_IMC		0x9b54
49 #define PCI_DEVICE_ID_INTEL_CML_H3_IMC		0x9b64
50 #define PCI_DEVICE_ID_INTEL_CML_U1_IMC		0x9b51
51 #define PCI_DEVICE_ID_INTEL_CML_U2_IMC		0x9b61
52 #define PCI_DEVICE_ID_INTEL_CML_U3_IMC		0x9b71
53 #define PCI_DEVICE_ID_INTEL_CML_S1_IMC		0x9b33
54 #define PCI_DEVICE_ID_INTEL_CML_S2_IMC		0x9b43
55 #define PCI_DEVICE_ID_INTEL_CML_S3_IMC		0x9b53
56 #define PCI_DEVICE_ID_INTEL_CML_S4_IMC		0x9b63
57 #define PCI_DEVICE_ID_INTEL_CML_S5_IMC		0x9b73
58 #define PCI_DEVICE_ID_INTEL_ICL_U_IMC		0x8a02
59 #define PCI_DEVICE_ID_INTEL_ICL_U2_IMC		0x8a12
60 #define PCI_DEVICE_ID_INTEL_TGL_U1_IMC		0x9a02
61 #define PCI_DEVICE_ID_INTEL_TGL_U2_IMC		0x9a04
62 #define PCI_DEVICE_ID_INTEL_TGL_U3_IMC		0x9a12
63 #define PCI_DEVICE_ID_INTEL_TGL_U4_IMC		0x9a14
64 #define PCI_DEVICE_ID_INTEL_TGL_H_IMC		0x9a36
65 #define PCI_DEVICE_ID_INTEL_RKL_1_IMC		0x4c43
66 #define PCI_DEVICE_ID_INTEL_RKL_2_IMC		0x4c53
67 #define PCI_DEVICE_ID_INTEL_ADL_1_IMC		0x4660
68 #define PCI_DEVICE_ID_INTEL_ADL_2_IMC		0x4641
69 #define PCI_DEVICE_ID_INTEL_ADL_3_IMC		0x4601
70 #define PCI_DEVICE_ID_INTEL_ADL_4_IMC		0x4602
71 #define PCI_DEVICE_ID_INTEL_ADL_5_IMC		0x4609
72 #define PCI_DEVICE_ID_INTEL_ADL_6_IMC		0x460a
73 #define PCI_DEVICE_ID_INTEL_ADL_7_IMC		0x4621
74 #define PCI_DEVICE_ID_INTEL_ADL_8_IMC		0x4623
75 #define PCI_DEVICE_ID_INTEL_ADL_9_IMC		0x4629
76 #define PCI_DEVICE_ID_INTEL_ADL_10_IMC		0x4637
77 #define PCI_DEVICE_ID_INTEL_ADL_11_IMC		0x463b
78 #define PCI_DEVICE_ID_INTEL_ADL_12_IMC		0x4648
79 #define PCI_DEVICE_ID_INTEL_ADL_13_IMC		0x4649
80 #define PCI_DEVICE_ID_INTEL_ADL_14_IMC		0x4650
81 #define PCI_DEVICE_ID_INTEL_ADL_15_IMC		0x4668
82 #define PCI_DEVICE_ID_INTEL_ADL_16_IMC		0x4670
83 #define PCI_DEVICE_ID_INTEL_ADL_17_IMC		0x4614
84 #define PCI_DEVICE_ID_INTEL_ADL_18_IMC		0x4617
85 #define PCI_DEVICE_ID_INTEL_ADL_19_IMC		0x4618
86 #define PCI_DEVICE_ID_INTEL_ADL_20_IMC		0x461B
87 #define PCI_DEVICE_ID_INTEL_ADL_21_IMC		0x461C
88 #define PCI_DEVICE_ID_INTEL_RPL_1_IMC		0xA700
89 #define PCI_DEVICE_ID_INTEL_RPL_2_IMC		0xA702
90 #define PCI_DEVICE_ID_INTEL_RPL_3_IMC		0xA706
91 #define PCI_DEVICE_ID_INTEL_RPL_4_IMC		0xA709
92 #define PCI_DEVICE_ID_INTEL_RPL_5_IMC		0xA701
93 #define PCI_DEVICE_ID_INTEL_RPL_6_IMC		0xA703
94 #define PCI_DEVICE_ID_INTEL_RPL_7_IMC		0xA704
95 #define PCI_DEVICE_ID_INTEL_RPL_8_IMC		0xA705
96 #define PCI_DEVICE_ID_INTEL_RPL_9_IMC		0xA706
97 #define PCI_DEVICE_ID_INTEL_RPL_10_IMC		0xA707
98 #define PCI_DEVICE_ID_INTEL_RPL_11_IMC		0xA708
99 #define PCI_DEVICE_ID_INTEL_RPL_12_IMC		0xA709
100 #define PCI_DEVICE_ID_INTEL_RPL_13_IMC		0xA70a
101 #define PCI_DEVICE_ID_INTEL_RPL_14_IMC		0xA70b
102 #define PCI_DEVICE_ID_INTEL_RPL_15_IMC		0xA715
103 #define PCI_DEVICE_ID_INTEL_RPL_16_IMC		0xA716
104 #define PCI_DEVICE_ID_INTEL_RPL_17_IMC		0xA717
105 #define PCI_DEVICE_ID_INTEL_RPL_18_IMC		0xA718
106 #define PCI_DEVICE_ID_INTEL_RPL_19_IMC		0xA719
107 #define PCI_DEVICE_ID_INTEL_RPL_20_IMC		0xA71A
108 #define PCI_DEVICE_ID_INTEL_RPL_21_IMC		0xA71B
109 #define PCI_DEVICE_ID_INTEL_RPL_22_IMC		0xA71C
110 #define PCI_DEVICE_ID_INTEL_RPL_23_IMC		0xA728
111 #define PCI_DEVICE_ID_INTEL_RPL_24_IMC		0xA729
112 #define PCI_DEVICE_ID_INTEL_RPL_25_IMC		0xA72A
113 #define PCI_DEVICE_ID_INTEL_MTL_1_IMC		0x7d00
114 #define PCI_DEVICE_ID_INTEL_MTL_2_IMC		0x7d01
115 #define PCI_DEVICE_ID_INTEL_MTL_3_IMC		0x7d02
116 #define PCI_DEVICE_ID_INTEL_MTL_4_IMC		0x7d05
117 #define PCI_DEVICE_ID_INTEL_MTL_5_IMC		0x7d10
118 #define PCI_DEVICE_ID_INTEL_MTL_6_IMC		0x7d14
119 #define PCI_DEVICE_ID_INTEL_MTL_7_IMC		0x7d15
120 #define PCI_DEVICE_ID_INTEL_MTL_8_IMC		0x7d16
121 #define PCI_DEVICE_ID_INTEL_MTL_9_IMC		0x7d21
122 #define PCI_DEVICE_ID_INTEL_MTL_10_IMC		0x7d22
123 #define PCI_DEVICE_ID_INTEL_MTL_11_IMC		0x7d23
124 #define PCI_DEVICE_ID_INTEL_MTL_12_IMC		0x7d24
125 #define PCI_DEVICE_ID_INTEL_MTL_13_IMC		0x7d28
126 
127 
128 #define IMC_UNCORE_DEV(a)						\
129 {									\
130 	PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_##a##_IMC),	\
131 	.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),	\
132 }
133 
134 /* SNB event control */
135 #define SNB_UNC_CTL_EV_SEL_MASK			0x000000ff
136 #define SNB_UNC_CTL_UMASK_MASK			0x0000ff00
137 #define SNB_UNC_CTL_EDGE_DET			(1 << 18)
138 #define SNB_UNC_CTL_EN				(1 << 22)
139 #define SNB_UNC_CTL_INVERT			(1 << 23)
140 #define SNB_UNC_CTL_CMASK_MASK			0x1f000000
141 #define NHM_UNC_CTL_CMASK_MASK			0xff000000
142 #define NHM_UNC_FIXED_CTR_CTL_EN		(1 << 0)
143 
144 #define SNB_UNC_RAW_EVENT_MASK			(SNB_UNC_CTL_EV_SEL_MASK | \
145 						 SNB_UNC_CTL_UMASK_MASK | \
146 						 SNB_UNC_CTL_EDGE_DET | \
147 						 SNB_UNC_CTL_INVERT | \
148 						 SNB_UNC_CTL_CMASK_MASK)
149 
150 #define NHM_UNC_RAW_EVENT_MASK			(SNB_UNC_CTL_EV_SEL_MASK | \
151 						 SNB_UNC_CTL_UMASK_MASK | \
152 						 SNB_UNC_CTL_EDGE_DET | \
153 						 SNB_UNC_CTL_INVERT | \
154 						 NHM_UNC_CTL_CMASK_MASK)
155 
156 /* SNB global control register */
157 #define SNB_UNC_PERF_GLOBAL_CTL                 0x391
158 #define SNB_UNC_FIXED_CTR_CTRL                  0x394
159 #define SNB_UNC_FIXED_CTR                       0x395
160 
161 /* SNB uncore global control */
162 #define SNB_UNC_GLOBAL_CTL_CORE_ALL             ((1 << 4) - 1)
163 #define SNB_UNC_GLOBAL_CTL_EN                   (1 << 29)
164 
165 /* SNB Cbo register */
166 #define SNB_UNC_CBO_0_PERFEVTSEL0               0x700
167 #define SNB_UNC_CBO_0_PER_CTR0                  0x706
168 #define SNB_UNC_CBO_MSR_OFFSET                  0x10
169 
170 /* SNB ARB register */
171 #define SNB_UNC_ARB_PER_CTR0			0x3b0
172 #define SNB_UNC_ARB_PERFEVTSEL0			0x3b2
173 #define SNB_UNC_ARB_MSR_OFFSET			0x10
174 
175 /* NHM global control register */
176 #define NHM_UNC_PERF_GLOBAL_CTL                 0x391
177 #define NHM_UNC_FIXED_CTR                       0x394
178 #define NHM_UNC_FIXED_CTR_CTRL                  0x395
179 
180 /* NHM uncore global control */
181 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL            ((1ULL << 8) - 1)
182 #define NHM_UNC_GLOBAL_CTL_EN_FC                (1ULL << 32)
183 
184 /* NHM uncore register */
185 #define NHM_UNC_PERFEVTSEL0                     0x3c0
186 #define NHM_UNC_UNCORE_PMC0                     0x3b0
187 
188 /* SKL uncore global control */
189 #define SKL_UNC_PERF_GLOBAL_CTL			0xe01
190 #define SKL_UNC_GLOBAL_CTL_CORE_ALL		((1 << 5) - 1)
191 
192 /* ICL Cbo register */
193 #define ICL_UNC_CBO_CONFIG			0x396
194 #define ICL_UNC_NUM_CBO_MASK			0xf
195 #define ICL_UNC_CBO_0_PER_CTR0			0x702
196 #define ICL_UNC_CBO_MSR_OFFSET			0x8
197 
198 /* ICL ARB register */
199 #define ICL_UNC_ARB_PER_CTR			0x3b1
200 #define ICL_UNC_ARB_PERFEVTSEL			0x3b3
201 
202 /* ADL uncore global control */
203 #define ADL_UNC_PERF_GLOBAL_CTL			0x2ff0
204 #define ADL_UNC_FIXED_CTR_CTRL                  0x2fde
205 #define ADL_UNC_FIXED_CTR                       0x2fdf
206 
207 /* ADL Cbo register */
208 #define ADL_UNC_CBO_0_PER_CTR0			0x2002
209 #define ADL_UNC_CBO_0_PERFEVTSEL0		0x2000
210 #define ADL_UNC_CTL_THRESHOLD			0x3f000000
211 #define ADL_UNC_RAW_EVENT_MASK			(SNB_UNC_CTL_EV_SEL_MASK | \
212 						 SNB_UNC_CTL_UMASK_MASK | \
213 						 SNB_UNC_CTL_EDGE_DET | \
214 						 SNB_UNC_CTL_INVERT | \
215 						 ADL_UNC_CTL_THRESHOLD)
216 
217 /* ADL ARB register */
218 #define ADL_UNC_ARB_PER_CTR0			0x2FD2
219 #define ADL_UNC_ARB_PERFEVTSEL0			0x2FD0
220 #define ADL_UNC_ARB_MSR_OFFSET			0x8
221 
222 /* MTL Cbo register */
223 #define MTL_UNC_CBO_0_PER_CTR0			0x2448
224 #define MTL_UNC_CBO_0_PERFEVTSEL0		0x2442
225 
226 /* MTL HAC_ARB register */
227 #define MTL_UNC_HAC_ARB_CTR			0x2018
228 #define MTL_UNC_HAC_ARB_CTRL			0x2012
229 
230 /* MTL ARB register */
231 #define MTL_UNC_ARB_CTR				0x2418
232 #define MTL_UNC_ARB_CTRL			0x2412
233 
234 /* MTL cNCU register */
235 #define MTL_UNC_CNCU_FIXED_CTR			0x2408
236 #define MTL_UNC_CNCU_FIXED_CTRL			0x2402
237 #define MTL_UNC_CNCU_BOX_CTL			0x240e
238 
239 /* MTL sNCU register */
240 #define MTL_UNC_SNCU_FIXED_CTR			0x2008
241 #define MTL_UNC_SNCU_FIXED_CTRL			0x2002
242 #define MTL_UNC_SNCU_BOX_CTL			0x200e
243 
244 /* MTL HAC_CBO register */
245 #define MTL_UNC_HBO_CTR				0x2048
246 #define MTL_UNC_HBO_CTRL			0x2042
247 
248 /* PTL Low Power Bridge register */
249 #define PTL_UNC_IA_CORE_BRIDGE_PER_CTR0		0x2028
250 #define PTL_UNC_IA_CORE_BRIDGE_PERFEVTSEL0	0x2022
251 
252 /* PTL Santa register */
253 #define PTL_UNC_SANTA_CTR0			0x2418
254 #define PTL_UNC_SANTA_CTRL0			0x2412
255 
256 /* PTL cNCU register */
257 #define PTL_UNC_CNCU_MSR_OFFSET			0x140
258 
259 /* NVL cNCU register */
260 #define NVL_UNC_CNCU_BOX_CTL			0x202e
261 #define NVL_UNC_CNCU_FIXED_CTR			0x2028
262 #define NVL_UNC_CNCU_FIXED_CTRL			0x2022
263 
264 /* NVL SANTA register */
265 #define NVL_UNC_SANTA_CTR0			0x2048
266 #define NVL_UNC_SANTA_CTRL0			0x2042
267 
268 /* NVL CBOX register */
269 #define NVL_UNC_CBOX_PER_CTR0			0x2108
270 #define NVL_UNC_CBOX_PERFEVTSEL0		0x2102
271 
272 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
273 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
274 DEFINE_UNCORE_FORMAT_ATTR(chmask, chmask, "config:8-11");
275 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
276 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
277 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
278 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
279 DEFINE_UNCORE_FORMAT_ATTR(threshold, threshold, "config:24-29");
280 DEFINE_UNCORE_FORMAT_ATTR(threshold2, threshold, "config:24-31");
281 
282 /* Sandy Bridge uncore support */
283 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
284 {
285 	struct hw_perf_event *hwc = &event->hw;
286 
287 	if (hwc->idx < UNCORE_PMC_IDX_FIXED)
288 		wrmsrq(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
289 	else
290 		wrmsrq(hwc->config_base, SNB_UNC_CTL_EN);
291 }
292 
293 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
294 {
295 	wrmsrq(event->hw.config_base, 0);
296 }
297 
298 static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
299 {
300 	if (box->pmu->pmu_idx == 0) {
301 		wrmsrq(SNB_UNC_PERF_GLOBAL_CTL,
302 			SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
303 	}
304 }
305 
306 static void snb_uncore_msr_enable_box(struct intel_uncore_box *box)
307 {
308 	wrmsrq(SNB_UNC_PERF_GLOBAL_CTL,
309 		SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
310 }
311 
312 static void snb_uncore_msr_exit_box(struct intel_uncore_box *box)
313 {
314 	if (box->pmu->pmu_idx == 0)
315 		wrmsrq(SNB_UNC_PERF_GLOBAL_CTL, 0);
316 }
317 
318 static struct uncore_event_desc snb_uncore_events[] = {
319 	INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
320 	{ /* end: all zeroes */ },
321 };
322 
323 static struct attribute *snb_uncore_formats_attr[] = {
324 	&format_attr_event.attr,
325 	&format_attr_umask.attr,
326 	&format_attr_edge.attr,
327 	&format_attr_inv.attr,
328 	&format_attr_cmask5.attr,
329 	NULL,
330 };
331 
332 static const struct attribute_group snb_uncore_format_group = {
333 	.name		= "format",
334 	.attrs		= snb_uncore_formats_attr,
335 };
336 
337 static struct intel_uncore_ops snb_uncore_msr_ops = {
338 	.init_box	= snb_uncore_msr_init_box,
339 	.enable_box	= snb_uncore_msr_enable_box,
340 	.exit_box	= snb_uncore_msr_exit_box,
341 	.disable_event	= snb_uncore_msr_disable_event,
342 	.enable_event	= snb_uncore_msr_enable_event,
343 	.read_counter	= uncore_msr_read_counter,
344 };
345 
346 static struct event_constraint snb_uncore_arb_constraints[] = {
347 	UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
348 	UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
349 	EVENT_CONSTRAINT_END
350 };
351 
352 static struct intel_uncore_type snb_uncore_cbox = {
353 	.name		= "cbox",
354 	.num_counters   = 2,
355 	.num_boxes	= 4,
356 	.perf_ctr_bits	= 44,
357 	.fixed_ctr_bits	= 48,
358 	.perf_ctr	= SNB_UNC_CBO_0_PER_CTR0,
359 	.event_ctl	= SNB_UNC_CBO_0_PERFEVTSEL0,
360 	.fixed_ctr	= SNB_UNC_FIXED_CTR,
361 	.fixed_ctl	= SNB_UNC_FIXED_CTR_CTRL,
362 	.single_fixed	= 1,
363 	.event_mask	= SNB_UNC_RAW_EVENT_MASK,
364 	.msr_offset	= SNB_UNC_CBO_MSR_OFFSET,
365 	.ops		= &snb_uncore_msr_ops,
366 	.format_group	= &snb_uncore_format_group,
367 	.event_descs	= snb_uncore_events,
368 };
369 
370 static struct intel_uncore_type snb_uncore_arb = {
371 	.name		= "arb",
372 	.num_counters   = 2,
373 	.num_boxes	= 1,
374 	.perf_ctr_bits	= 44,
375 	.perf_ctr	= SNB_UNC_ARB_PER_CTR0,
376 	.event_ctl	= SNB_UNC_ARB_PERFEVTSEL0,
377 	.event_mask	= SNB_UNC_RAW_EVENT_MASK,
378 	.msr_offset	= SNB_UNC_ARB_MSR_OFFSET,
379 	.constraints	= snb_uncore_arb_constraints,
380 	.ops		= &snb_uncore_msr_ops,
381 	.format_group	= &snb_uncore_format_group,
382 };
383 
384 static struct intel_uncore_type *snb_msr_uncores[] = {
385 	&snb_uncore_cbox,
386 	&snb_uncore_arb,
387 	NULL,
388 };
389 
390 void snb_uncore_cpu_init(void)
391 {
392 	uncore_msr_uncores = snb_msr_uncores;
393 	if (snb_uncore_cbox.num_boxes > topology_num_cores_per_package())
394 		snb_uncore_cbox.num_boxes = topology_num_cores_per_package();
395 }
396 
397 static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
398 {
399 	if (box->pmu->pmu_idx == 0) {
400 		wrmsrq(SKL_UNC_PERF_GLOBAL_CTL,
401 			SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
402 	}
403 
404 	/* The 8th CBOX has different MSR space */
405 	if (box->pmu->pmu_idx == 7)
406 		__set_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags);
407 }
408 
409 static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
410 {
411 	wrmsrq(SKL_UNC_PERF_GLOBAL_CTL,
412 		SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
413 }
414 
415 static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
416 {
417 	if (box->pmu->pmu_idx == 0)
418 		wrmsrq(SKL_UNC_PERF_GLOBAL_CTL, 0);
419 }
420 
421 static struct intel_uncore_ops skl_uncore_msr_ops = {
422 	.init_box	= skl_uncore_msr_init_box,
423 	.enable_box	= skl_uncore_msr_enable_box,
424 	.exit_box	= skl_uncore_msr_exit_box,
425 	.disable_event	= snb_uncore_msr_disable_event,
426 	.enable_event	= snb_uncore_msr_enable_event,
427 	.read_counter	= uncore_msr_read_counter,
428 };
429 
430 static struct intel_uncore_type skl_uncore_cbox = {
431 	.name		= "cbox",
432 	.num_counters   = 4,
433 	.num_boxes	= 8,
434 	.perf_ctr_bits	= 44,
435 	.fixed_ctr_bits	= 48,
436 	.perf_ctr	= SNB_UNC_CBO_0_PER_CTR0,
437 	.event_ctl	= SNB_UNC_CBO_0_PERFEVTSEL0,
438 	.fixed_ctr	= SNB_UNC_FIXED_CTR,
439 	.fixed_ctl	= SNB_UNC_FIXED_CTR_CTRL,
440 	.single_fixed	= 1,
441 	.event_mask	= SNB_UNC_RAW_EVENT_MASK,
442 	.msr_offset	= SNB_UNC_CBO_MSR_OFFSET,
443 	.ops		= &skl_uncore_msr_ops,
444 	.format_group	= &snb_uncore_format_group,
445 	.event_descs	= snb_uncore_events,
446 };
447 
448 static struct intel_uncore_type *skl_msr_uncores[] = {
449 	&skl_uncore_cbox,
450 	&snb_uncore_arb,
451 	NULL,
452 };
453 
454 void skl_uncore_cpu_init(void)
455 {
456 	uncore_msr_uncores = skl_msr_uncores;
457 	if (skl_uncore_cbox.num_boxes > topology_num_cores_per_package())
458 		skl_uncore_cbox.num_boxes = topology_num_cores_per_package();
459 	snb_uncore_arb.ops = &skl_uncore_msr_ops;
460 }
461 
462 static struct intel_uncore_ops icl_uncore_msr_ops = {
463 	.disable_event	= snb_uncore_msr_disable_event,
464 	.enable_event	= snb_uncore_msr_enable_event,
465 	.read_counter	= uncore_msr_read_counter,
466 };
467 
468 static struct intel_uncore_type icl_uncore_cbox = {
469 	.name		= "cbox",
470 	.num_counters   = 2,
471 	.perf_ctr_bits	= 44,
472 	.perf_ctr	= ICL_UNC_CBO_0_PER_CTR0,
473 	.event_ctl	= SNB_UNC_CBO_0_PERFEVTSEL0,
474 	.event_mask	= SNB_UNC_RAW_EVENT_MASK,
475 	.msr_offset	= ICL_UNC_CBO_MSR_OFFSET,
476 	.ops		= &icl_uncore_msr_ops,
477 	.format_group	= &snb_uncore_format_group,
478 };
479 
480 static struct uncore_event_desc icl_uncore_events[] = {
481 	INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff"),
482 	{ /* end: all zeroes */ },
483 };
484 
485 static struct attribute *icl_uncore_clock_formats_attr[] = {
486 	&format_attr_event.attr,
487 	NULL,
488 };
489 
490 static struct attribute_group icl_uncore_clock_format_group = {
491 	.name = "format",
492 	.attrs = icl_uncore_clock_formats_attr,
493 };
494 
495 static struct intel_uncore_type icl_uncore_clockbox = {
496 	.name		= "clock",
497 	.num_counters	= 1,
498 	.num_boxes	= 1,
499 	.fixed_ctr_bits	= 48,
500 	.fixed_ctr	= SNB_UNC_FIXED_CTR,
501 	.fixed_ctl	= SNB_UNC_FIXED_CTR_CTRL,
502 	.single_fixed	= 1,
503 	.event_mask	= SNB_UNC_CTL_EV_SEL_MASK,
504 	.format_group	= &icl_uncore_clock_format_group,
505 	.ops		= &icl_uncore_msr_ops,
506 	.event_descs	= icl_uncore_events,
507 };
508 
509 static struct intel_uncore_type icl_uncore_arb = {
510 	.name		= "arb",
511 	.num_counters   = 1,
512 	.num_boxes	= 1,
513 	.perf_ctr_bits	= 44,
514 	.perf_ctr	= ICL_UNC_ARB_PER_CTR,
515 	.event_ctl	= ICL_UNC_ARB_PERFEVTSEL,
516 	.event_mask	= SNB_UNC_RAW_EVENT_MASK,
517 	.ops		= &icl_uncore_msr_ops,
518 	.format_group	= &snb_uncore_format_group,
519 };
520 
521 static struct intel_uncore_type *icl_msr_uncores[] = {
522 	&icl_uncore_cbox,
523 	&icl_uncore_arb,
524 	&icl_uncore_clockbox,
525 	NULL,
526 };
527 
528 static int icl_get_cbox_num(void)
529 {
530 	u64 num_boxes;
531 
532 	rdmsrq(ICL_UNC_CBO_CONFIG, num_boxes);
533 
534 	return num_boxes & ICL_UNC_NUM_CBO_MASK;
535 }
536 
537 void icl_uncore_cpu_init(void)
538 {
539 	uncore_msr_uncores = icl_msr_uncores;
540 	icl_uncore_cbox.num_boxes = icl_get_cbox_num();
541 }
542 
543 static struct intel_uncore_type *tgl_msr_uncores[] = {
544 	&icl_uncore_cbox,
545 	&snb_uncore_arb,
546 	&icl_uncore_clockbox,
547 	NULL,
548 };
549 
550 static void rkl_uncore_msr_init_box(struct intel_uncore_box *box)
551 {
552 	if (box->pmu->pmu_idx == 0)
553 		wrmsrq(SKL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
554 }
555 
556 void tgl_uncore_cpu_init(void)
557 {
558 	uncore_msr_uncores = tgl_msr_uncores;
559 	icl_uncore_cbox.num_boxes = icl_get_cbox_num();
560 	icl_uncore_cbox.ops = &skl_uncore_msr_ops;
561 	icl_uncore_clockbox.ops = &skl_uncore_msr_ops;
562 	snb_uncore_arb.ops = &skl_uncore_msr_ops;
563 	skl_uncore_msr_ops.init_box = rkl_uncore_msr_init_box;
564 }
565 
566 static void adl_uncore_msr_init_box(struct intel_uncore_box *box)
567 {
568 	if (box->pmu->pmu_idx == 0)
569 		wrmsrq(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
570 }
571 
572 static void adl_uncore_msr_enable_box(struct intel_uncore_box *box)
573 {
574 	wrmsrq(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
575 }
576 
577 static void adl_uncore_msr_disable_box(struct intel_uncore_box *box)
578 {
579 	if (box->pmu->pmu_idx == 0)
580 		wrmsrq(ADL_UNC_PERF_GLOBAL_CTL, 0);
581 }
582 
583 static void adl_uncore_msr_exit_box(struct intel_uncore_box *box)
584 {
585 	if (box->pmu->pmu_idx == 0)
586 		wrmsrq(ADL_UNC_PERF_GLOBAL_CTL, 0);
587 }
588 
589 static struct intel_uncore_ops adl_uncore_msr_ops = {
590 	.init_box	= adl_uncore_msr_init_box,
591 	.enable_box	= adl_uncore_msr_enable_box,
592 	.disable_box	= adl_uncore_msr_disable_box,
593 	.exit_box	= adl_uncore_msr_exit_box,
594 	.disable_event	= snb_uncore_msr_disable_event,
595 	.enable_event	= snb_uncore_msr_enable_event,
596 	.read_counter	= uncore_msr_read_counter,
597 };
598 
599 static struct attribute *adl_uncore_formats_attr[] = {
600 	&format_attr_event.attr,
601 	&format_attr_umask.attr,
602 	&format_attr_edge.attr,
603 	&format_attr_inv.attr,
604 	&format_attr_threshold.attr,
605 	NULL,
606 };
607 
608 static const struct attribute_group adl_uncore_format_group = {
609 	.name		= "format",
610 	.attrs		= adl_uncore_formats_attr,
611 };
612 
613 static struct intel_uncore_type adl_uncore_cbox = {
614 	.name		= "cbox",
615 	.num_counters   = 2,
616 	.perf_ctr_bits	= 44,
617 	.perf_ctr	= ADL_UNC_CBO_0_PER_CTR0,
618 	.event_ctl	= ADL_UNC_CBO_0_PERFEVTSEL0,
619 	.event_mask	= ADL_UNC_RAW_EVENT_MASK,
620 	.msr_offset	= ICL_UNC_CBO_MSR_OFFSET,
621 	.ops		= &adl_uncore_msr_ops,
622 	.format_group	= &adl_uncore_format_group,
623 };
624 
625 static struct intel_uncore_type adl_uncore_arb = {
626 	.name		= "arb",
627 	.num_counters   = 2,
628 	.num_boxes	= 2,
629 	.perf_ctr_bits	= 44,
630 	.perf_ctr	= ADL_UNC_ARB_PER_CTR0,
631 	.event_ctl	= ADL_UNC_ARB_PERFEVTSEL0,
632 	.event_mask	= SNB_UNC_RAW_EVENT_MASK,
633 	.msr_offset	= ADL_UNC_ARB_MSR_OFFSET,
634 	.constraints	= snb_uncore_arb_constraints,
635 	.ops		= &adl_uncore_msr_ops,
636 	.format_group	= &snb_uncore_format_group,
637 };
638 
639 static struct intel_uncore_type adl_uncore_clockbox = {
640 	.name		= "clock",
641 	.num_counters	= 1,
642 	.num_boxes	= 1,
643 	.fixed_ctr_bits	= 48,
644 	.fixed_ctr	= ADL_UNC_FIXED_CTR,
645 	.fixed_ctl	= ADL_UNC_FIXED_CTR_CTRL,
646 	.single_fixed	= 1,
647 	.event_mask	= SNB_UNC_CTL_EV_SEL_MASK,
648 	.format_group	= &icl_uncore_clock_format_group,
649 	.ops		= &adl_uncore_msr_ops,
650 	.event_descs	= icl_uncore_events,
651 };
652 
653 static struct intel_uncore_type *adl_msr_uncores[] = {
654 	&adl_uncore_cbox,
655 	&adl_uncore_arb,
656 	&adl_uncore_clockbox,
657 	NULL,
658 };
659 
660 void adl_uncore_cpu_init(void)
661 {
662 	adl_uncore_cbox.num_boxes = icl_get_cbox_num();
663 	uncore_msr_uncores = adl_msr_uncores;
664 }
665 
666 static struct intel_uncore_type mtl_uncore_cbox = {
667 	.name		= "cbox",
668 	.num_counters   = 2,
669 	.perf_ctr_bits	= 48,
670 	.perf_ctr	= MTL_UNC_CBO_0_PER_CTR0,
671 	.event_ctl	= MTL_UNC_CBO_0_PERFEVTSEL0,
672 	.event_mask	= ADL_UNC_RAW_EVENT_MASK,
673 	.msr_offset	= SNB_UNC_CBO_MSR_OFFSET,
674 	.ops		= &icl_uncore_msr_ops,
675 	.format_group	= &adl_uncore_format_group,
676 };
677 
678 static struct intel_uncore_type mtl_uncore_hac_arb = {
679 	.name		= "hac_arb",
680 	.num_counters   = 2,
681 	.num_boxes	= 2,
682 	.perf_ctr_bits	= 48,
683 	.perf_ctr	= MTL_UNC_HAC_ARB_CTR,
684 	.event_ctl	= MTL_UNC_HAC_ARB_CTRL,
685 	.event_mask	= ADL_UNC_RAW_EVENT_MASK,
686 	.msr_offset	= SNB_UNC_CBO_MSR_OFFSET,
687 	.ops		= &icl_uncore_msr_ops,
688 	.format_group	= &adl_uncore_format_group,
689 };
690 
691 static struct intel_uncore_type mtl_uncore_arb = {
692 	.name		= "arb",
693 	.num_counters   = 2,
694 	.num_boxes	= 2,
695 	.perf_ctr_bits	= 48,
696 	.perf_ctr	= MTL_UNC_ARB_CTR,
697 	.event_ctl	= MTL_UNC_ARB_CTRL,
698 	.event_mask	= ADL_UNC_RAW_EVENT_MASK,
699 	.msr_offset	= SNB_UNC_CBO_MSR_OFFSET,
700 	.ops		= &icl_uncore_msr_ops,
701 	.format_group	= &adl_uncore_format_group,
702 };
703 
704 static struct intel_uncore_type mtl_uncore_hac_cbox = {
705 	.name		= "hac_cbox",
706 	.num_counters   = 2,
707 	.num_boxes	= 2,
708 	.perf_ctr_bits	= 48,
709 	.perf_ctr	= MTL_UNC_HBO_CTR,
710 	.event_ctl	= MTL_UNC_HBO_CTRL,
711 	.event_mask	= ADL_UNC_RAW_EVENT_MASK,
712 	.msr_offset	= SNB_UNC_CBO_MSR_OFFSET,
713 	.ops		= &icl_uncore_msr_ops,
714 	.format_group	= &adl_uncore_format_group,
715 };
716 
717 static void mtl_uncore_msr_init_box(struct intel_uncore_box *box)
718 {
719 	wrmsrq(uncore_msr_box_ctl(box), SNB_UNC_GLOBAL_CTL_EN);
720 }
721 
722 static struct intel_uncore_ops mtl_uncore_msr_ops = {
723 	.init_box	= mtl_uncore_msr_init_box,
724 	.disable_event	= snb_uncore_msr_disable_event,
725 	.enable_event	= snb_uncore_msr_enable_event,
726 	.read_counter	= uncore_msr_read_counter,
727 };
728 
729 static struct intel_uncore_type mtl_uncore_cncu = {
730 	.name		= "cncu",
731 	.num_counters   = 1,
732 	.num_boxes	= 1,
733 	.box_ctl	= MTL_UNC_CNCU_BOX_CTL,
734 	.fixed_ctr_bits = 48,
735 	.fixed_ctr	= MTL_UNC_CNCU_FIXED_CTR,
736 	.fixed_ctl	= MTL_UNC_CNCU_FIXED_CTRL,
737 	.single_fixed	= 1,
738 	.event_mask	= SNB_UNC_CTL_EV_SEL_MASK,
739 	.format_group	= &icl_uncore_clock_format_group,
740 	.ops		= &mtl_uncore_msr_ops,
741 	.event_descs	= icl_uncore_events,
742 };
743 
744 static struct intel_uncore_type mtl_uncore_sncu = {
745 	.name		= "sncu",
746 	.num_counters   = 1,
747 	.num_boxes	= 1,
748 	.box_ctl	= MTL_UNC_SNCU_BOX_CTL,
749 	.fixed_ctr_bits	= 48,
750 	.fixed_ctr	= MTL_UNC_SNCU_FIXED_CTR,
751 	.fixed_ctl	= MTL_UNC_SNCU_FIXED_CTRL,
752 	.single_fixed	= 1,
753 	.event_mask	= SNB_UNC_CTL_EV_SEL_MASK,
754 	.format_group	= &icl_uncore_clock_format_group,
755 	.ops		= &mtl_uncore_msr_ops,
756 	.event_descs	= icl_uncore_events,
757 };
758 
759 static struct intel_uncore_type *mtl_msr_uncores[] = {
760 	&mtl_uncore_cbox,
761 	&mtl_uncore_hac_arb,
762 	&mtl_uncore_arb,
763 	&mtl_uncore_hac_cbox,
764 	&mtl_uncore_cncu,
765 	&mtl_uncore_sncu,
766 	NULL
767 };
768 
769 void mtl_uncore_cpu_init(void)
770 {
771 	mtl_uncore_cbox.num_boxes = icl_get_cbox_num();
772 	uncore_msr_uncores = mtl_msr_uncores;
773 }
774 
775 static struct intel_uncore_type *lnl_msr_uncores[] = {
776 	&mtl_uncore_cbox,
777 	&mtl_uncore_arb,
778 	NULL
779 };
780 
781 #define LNL_UNC_MSR_GLOBAL_CTL			0x240e
782 
783 static void lnl_uncore_msr_init_box(struct intel_uncore_box *box)
784 {
785 	if (box->pmu->pmu_idx == 0)
786 		wrmsrq(LNL_UNC_MSR_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
787 }
788 
789 static struct intel_uncore_ops lnl_uncore_msr_ops = {
790 	.init_box	= lnl_uncore_msr_init_box,
791 	.disable_event	= snb_uncore_msr_disable_event,
792 	.enable_event	= snb_uncore_msr_enable_event,
793 	.read_counter	= uncore_msr_read_counter,
794 };
795 
796 void lnl_uncore_cpu_init(void)
797 {
798 	mtl_uncore_cbox.num_boxes = 4;
799 	mtl_uncore_cbox.ops = &lnl_uncore_msr_ops;
800 	uncore_msr_uncores = lnl_msr_uncores;
801 }
802 
803 enum {
804 	SNB_PCI_UNCORE_IMC,
805 };
806 
807 static struct uncore_event_desc snb_uncore_imc_events[] = {
808 	INTEL_UNCORE_EVENT_DESC(data_reads,  "event=0x01"),
809 	INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"),
810 	INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"),
811 
812 	INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
813 	INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
814 	INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
815 
816 	INTEL_UNCORE_EVENT_DESC(gt_requests, "event=0x03"),
817 	INTEL_UNCORE_EVENT_DESC(gt_requests.scale, "6.103515625e-5"),
818 	INTEL_UNCORE_EVENT_DESC(gt_requests.unit, "MiB"),
819 
820 	INTEL_UNCORE_EVENT_DESC(ia_requests, "event=0x04"),
821 	INTEL_UNCORE_EVENT_DESC(ia_requests.scale, "6.103515625e-5"),
822 	INTEL_UNCORE_EVENT_DESC(ia_requests.unit, "MiB"),
823 
824 	INTEL_UNCORE_EVENT_DESC(io_requests, "event=0x05"),
825 	INTEL_UNCORE_EVENT_DESC(io_requests.scale, "6.103515625e-5"),
826 	INTEL_UNCORE_EVENT_DESC(io_requests.unit, "MiB"),
827 
828 	{ /* end: all zeroes */ },
829 };
830 
831 #define SNB_UNCORE_PCI_IMC_EVENT_MASK		0xff
832 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET		0x48
833 
834 /* page size multiple covering all config regs */
835 #define SNB_UNCORE_PCI_IMC_MAP_SIZE		0x6000
836 
837 #define SNB_UNCORE_PCI_IMC_DATA_READS		0x1
838 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE	0x5050
839 #define SNB_UNCORE_PCI_IMC_DATA_WRITES		0x2
840 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE	0x5054
841 #define SNB_UNCORE_PCI_IMC_CTR_BASE		SNB_UNCORE_PCI_IMC_DATA_READS_BASE
842 
843 /* BW break down- legacy counters */
844 #define SNB_UNCORE_PCI_IMC_GT_REQUESTS		0x3
845 #define SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE	0x5040
846 #define SNB_UNCORE_PCI_IMC_IA_REQUESTS		0x4
847 #define SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE	0x5044
848 #define SNB_UNCORE_PCI_IMC_IO_REQUESTS		0x5
849 #define SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE	0x5048
850 
851 enum perf_snb_uncore_imc_freerunning_types {
852 	SNB_PCI_UNCORE_IMC_DATA_READS		= 0,
853 	SNB_PCI_UNCORE_IMC_DATA_WRITES,
854 	SNB_PCI_UNCORE_IMC_GT_REQUESTS,
855 	SNB_PCI_UNCORE_IMC_IA_REQUESTS,
856 	SNB_PCI_UNCORE_IMC_IO_REQUESTS,
857 
858 	SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
859 };
860 
861 static struct freerunning_counters snb_uncore_imc_freerunning[] = {
862 	[SNB_PCI_UNCORE_IMC_DATA_READS]		= { SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
863 							0x0, 0x0, 1, 32 },
864 	[SNB_PCI_UNCORE_IMC_DATA_WRITES]	= { SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE,
865 							0x0, 0x0, 1, 32 },
866 	[SNB_PCI_UNCORE_IMC_GT_REQUESTS]	= { SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE,
867 							0x0, 0x0, 1, 32 },
868 	[SNB_PCI_UNCORE_IMC_IA_REQUESTS]	= { SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE,
869 							0x0, 0x0, 1, 32 },
870 	[SNB_PCI_UNCORE_IMC_IO_REQUESTS]	= { SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE,
871 							0x0, 0x0, 1, 32 },
872 };
873 
874 static struct attribute *snb_uncore_imc_formats_attr[] = {
875 	&format_attr_event.attr,
876 	NULL,
877 };
878 
879 static const struct attribute_group snb_uncore_imc_format_group = {
880 	.name = "format",
881 	.attrs = snb_uncore_imc_formats_attr,
882 };
883 
884 static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
885 {
886 	struct intel_uncore_type *type = box->pmu->type;
887 	struct pci_dev *pdev = box->pci_dev;
888 	int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
889 	resource_size_t addr;
890 	u32 pci_dword;
891 
892 	pci_read_config_dword(pdev, where, &pci_dword);
893 	addr = pci_dword;
894 
895 #ifdef CONFIG_PHYS_ADDR_T_64BIT
896 	pci_read_config_dword(pdev, where + 4, &pci_dword);
897 	addr |= ((resource_size_t)pci_dword << 32);
898 #endif
899 
900 	addr &= ~(PAGE_SIZE - 1);
901 
902 	box->io_addr = ioremap(addr, type->mmio_map_size);
903 	if (!box->io_addr)
904 		pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
905 
906 	box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
907 }
908 
909 static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
910 {}
911 
912 static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
913 {}
914 
915 static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
916 {}
917 
918 static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
919 {}
920 
921 /*
922  * Keep the custom event_init() function compatible with old event
923  * encoding for free running counters.
924  */
925 static int snb_uncore_imc_event_init(struct perf_event *event)
926 {
927 	struct intel_uncore_pmu *pmu;
928 	struct intel_uncore_box *box;
929 	struct hw_perf_event *hwc = &event->hw;
930 	u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
931 	int idx, base;
932 
933 	if (event->attr.type != event->pmu->type)
934 		return -ENOENT;
935 
936 	pmu = uncore_event_to_pmu(event);
937 	/* no device found for this pmu */
938 	if (!pmu->registered)
939 		return -ENOENT;
940 
941 	/* Sampling not supported yet */
942 	if (hwc->sample_period)
943 		return -EINVAL;
944 
945 	/* unsupported modes and filters */
946 	if (event->attr.sample_period) /* no sampling */
947 		return -EINVAL;
948 
949 	/*
950 	 * Place all uncore events for a particular physical package
951 	 * onto a single cpu
952 	 */
953 	if (event->cpu < 0)
954 		return -EINVAL;
955 
956 	/* check only supported bits are set */
957 	if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
958 		return -EINVAL;
959 
960 	box = uncore_pmu_to_box(pmu, event->cpu);
961 	if (!box || box->cpu < 0)
962 		return -EINVAL;
963 
964 	event->cpu = box->cpu;
965 	event->pmu_private = box;
966 
967 	event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
968 
969 	event->hw.idx = -1;
970 	event->hw.last_tag = ~0ULL;
971 	event->hw.extra_reg.idx = EXTRA_REG_NONE;
972 	event->hw.branch_reg.idx = EXTRA_REG_NONE;
973 	/*
974 	 * check event is known (whitelist, determines counter)
975 	 */
976 	switch (cfg) {
977 	case SNB_UNCORE_PCI_IMC_DATA_READS:
978 		base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
979 		idx = UNCORE_PMC_IDX_FREERUNNING;
980 		break;
981 	case SNB_UNCORE_PCI_IMC_DATA_WRITES:
982 		base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
983 		idx = UNCORE_PMC_IDX_FREERUNNING;
984 		break;
985 	case SNB_UNCORE_PCI_IMC_GT_REQUESTS:
986 		base = SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE;
987 		idx = UNCORE_PMC_IDX_FREERUNNING;
988 		break;
989 	case SNB_UNCORE_PCI_IMC_IA_REQUESTS:
990 		base = SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE;
991 		idx = UNCORE_PMC_IDX_FREERUNNING;
992 		break;
993 	case SNB_UNCORE_PCI_IMC_IO_REQUESTS:
994 		base = SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE;
995 		idx = UNCORE_PMC_IDX_FREERUNNING;
996 		break;
997 	default:
998 		return -EINVAL;
999 	}
1000 
1001 	/* must be done before validate_group */
1002 	event->hw.event_base = base;
1003 	event->hw.idx = idx;
1004 
1005 	/* Convert to standard encoding format for freerunning counters */
1006 	event->hw.config = ((cfg - 1) << 8) | 0x10ff;
1007 
1008 	/* no group validation needed, we have free running counters */
1009 
1010 	return 0;
1011 }
1012 
1013 static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1014 {
1015 	return 0;
1016 }
1017 
1018 int snb_pci2phy_map_init(int devid)
1019 {
1020 	struct pci_dev *dev = NULL;
1021 	struct pci2phy_map *map;
1022 	int bus, segment;
1023 
1024 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
1025 	if (!dev)
1026 		return -ENOTTY;
1027 
1028 	bus = dev->bus->number;
1029 	segment = pci_domain_nr(dev->bus);
1030 
1031 	raw_spin_lock(&pci2phy_map_lock);
1032 	map = __find_pci2phy_map(segment);
1033 	if (!map) {
1034 		raw_spin_unlock(&pci2phy_map_lock);
1035 		pci_dev_put(dev);
1036 		return -ENOMEM;
1037 	}
1038 	map->pbus_to_dieid[bus] = 0;
1039 	raw_spin_unlock(&pci2phy_map_lock);
1040 
1041 	pci_dev_put(dev);
1042 
1043 	return 0;
1044 }
1045 
1046 static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1047 {
1048 	struct hw_perf_event *hwc = &event->hw;
1049 
1050 	/*
1051 	 * SNB IMC counters are 32-bit and are laid out back to back
1052 	 * in MMIO space. Therefore we must use a 32-bit accessor function
1053 	 * using readq() from uncore_mmio_read_counter() causes problems
1054 	 * because it is reading 64-bit at a time. This is okay for the
1055 	 * uncore_perf_event_update() function because it drops the upper
1056 	 * 32-bits but not okay for plain uncore_read_counter() as invoked
1057 	 * in uncore_pmu_event_start().
1058 	 */
1059 	return (u64)readl(box->io_addr + hwc->event_base);
1060 }
1061 
1062 static struct pmu snb_uncore_imc_pmu = {
1063 	.task_ctx_nr	= perf_invalid_context,
1064 	.event_init	= snb_uncore_imc_event_init,
1065 	.add		= uncore_pmu_event_add,
1066 	.del		= uncore_pmu_event_del,
1067 	.start		= uncore_pmu_event_start,
1068 	.stop		= uncore_pmu_event_stop,
1069 	.read		= uncore_pmu_event_read,
1070 	.capabilities	= PERF_PMU_CAP_NO_EXCLUDE,
1071 };
1072 
1073 static struct intel_uncore_ops snb_uncore_imc_ops = {
1074 	.init_box	= snb_uncore_imc_init_box,
1075 	.exit_box	= uncore_mmio_exit_box,
1076 	.enable_box	= snb_uncore_imc_enable_box,
1077 	.disable_box	= snb_uncore_imc_disable_box,
1078 	.disable_event	= snb_uncore_imc_disable_event,
1079 	.enable_event	= snb_uncore_imc_enable_event,
1080 	.hw_config	= snb_uncore_imc_hw_config,
1081 	.read_counter	= snb_uncore_imc_read_counter,
1082 };
1083 
1084 static struct intel_uncore_type snb_uncore_imc = {
1085 	.name		= "imc",
1086 	.num_counters   = 5,
1087 	.num_boxes	= 1,
1088 	.num_freerunning_types	= SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
1089 	.mmio_map_size	= SNB_UNCORE_PCI_IMC_MAP_SIZE,
1090 	.freerunning	= snb_uncore_imc_freerunning,
1091 	.event_descs	= snb_uncore_imc_events,
1092 	.format_group	= &snb_uncore_imc_format_group,
1093 	.ops		= &snb_uncore_imc_ops,
1094 	.pmu		= &snb_uncore_imc_pmu,
1095 };
1096 
1097 static struct intel_uncore_type *snb_pci_uncores[] = {
1098 	[SNB_PCI_UNCORE_IMC]	= &snb_uncore_imc,
1099 	NULL,
1100 };
1101 
1102 static const struct pci_device_id snb_uncore_pci_ids[] = {
1103 	IMC_UNCORE_DEV(SNB),
1104 	{ /* end: all zeroes */ },
1105 };
1106 
1107 static const struct pci_device_id ivb_uncore_pci_ids[] = {
1108 	IMC_UNCORE_DEV(IVB),
1109 	IMC_UNCORE_DEV(IVB_E3),
1110 	{ /* end: all zeroes */ },
1111 };
1112 
1113 static const struct pci_device_id hsw_uncore_pci_ids[] = {
1114 	IMC_UNCORE_DEV(HSW),
1115 	IMC_UNCORE_DEV(HSW_U),
1116 	{ /* end: all zeroes */ },
1117 };
1118 
1119 static const struct pci_device_id bdw_uncore_pci_ids[] = {
1120 	IMC_UNCORE_DEV(BDW),
1121 	{ /* end: all zeroes */ },
1122 };
1123 
1124 static const struct pci_device_id skl_uncore_pci_ids[] = {
1125 	IMC_UNCORE_DEV(SKL_Y),
1126 	IMC_UNCORE_DEV(SKL_U),
1127 	IMC_UNCORE_DEV(SKL_HD),
1128 	IMC_UNCORE_DEV(SKL_HQ),
1129 	IMC_UNCORE_DEV(SKL_SD),
1130 	IMC_UNCORE_DEV(SKL_SQ),
1131 	IMC_UNCORE_DEV(SKL_E3),
1132 	IMC_UNCORE_DEV(KBL_Y),
1133 	IMC_UNCORE_DEV(KBL_U),
1134 	IMC_UNCORE_DEV(KBL_UQ),
1135 	IMC_UNCORE_DEV(KBL_SD),
1136 	IMC_UNCORE_DEV(KBL_SQ),
1137 	IMC_UNCORE_DEV(KBL_HQ),
1138 	IMC_UNCORE_DEV(KBL_WQ),
1139 	IMC_UNCORE_DEV(CFL_2U),
1140 	IMC_UNCORE_DEV(CFL_4U),
1141 	IMC_UNCORE_DEV(CFL_4H),
1142 	IMC_UNCORE_DEV(CFL_6H),
1143 	IMC_UNCORE_DEV(CFL_2S_D),
1144 	IMC_UNCORE_DEV(CFL_4S_D),
1145 	IMC_UNCORE_DEV(CFL_6S_D),
1146 	IMC_UNCORE_DEV(CFL_8S_D),
1147 	IMC_UNCORE_DEV(CFL_4S_W),
1148 	IMC_UNCORE_DEV(CFL_6S_W),
1149 	IMC_UNCORE_DEV(CFL_8S_W),
1150 	IMC_UNCORE_DEV(CFL_4S_S),
1151 	IMC_UNCORE_DEV(CFL_6S_S),
1152 	IMC_UNCORE_DEV(CFL_8S_S),
1153 	IMC_UNCORE_DEV(AML_YD),
1154 	IMC_UNCORE_DEV(AML_YQ),
1155 	IMC_UNCORE_DEV(WHL_UQ),
1156 	IMC_UNCORE_DEV(WHL_4_UQ),
1157 	IMC_UNCORE_DEV(WHL_UD),
1158 	IMC_UNCORE_DEV(CML_H1),
1159 	IMC_UNCORE_DEV(CML_H2),
1160 	IMC_UNCORE_DEV(CML_H3),
1161 	IMC_UNCORE_DEV(CML_U1),
1162 	IMC_UNCORE_DEV(CML_U2),
1163 	IMC_UNCORE_DEV(CML_U3),
1164 	IMC_UNCORE_DEV(CML_S1),
1165 	IMC_UNCORE_DEV(CML_S2),
1166 	IMC_UNCORE_DEV(CML_S3),
1167 	IMC_UNCORE_DEV(CML_S4),
1168 	IMC_UNCORE_DEV(CML_S5),
1169 	{ /* end: all zeroes */ },
1170 };
1171 
1172 static const struct pci_device_id icl_uncore_pci_ids[] = {
1173 	IMC_UNCORE_DEV(ICL_U),
1174 	IMC_UNCORE_DEV(ICL_U2),
1175 	IMC_UNCORE_DEV(RKL_1),
1176 	IMC_UNCORE_DEV(RKL_2),
1177 	{ /* end: all zeroes */ },
1178 };
1179 
1180 static struct pci_driver snb_uncore_pci_driver = {
1181 	.name		= "snb_uncore",
1182 	.id_table	= snb_uncore_pci_ids,
1183 };
1184 
1185 static struct pci_driver ivb_uncore_pci_driver = {
1186 	.name		= "ivb_uncore",
1187 	.id_table	= ivb_uncore_pci_ids,
1188 };
1189 
1190 static struct pci_driver hsw_uncore_pci_driver = {
1191 	.name		= "hsw_uncore",
1192 	.id_table	= hsw_uncore_pci_ids,
1193 };
1194 
1195 static struct pci_driver bdw_uncore_pci_driver = {
1196 	.name		= "bdw_uncore",
1197 	.id_table	= bdw_uncore_pci_ids,
1198 };
1199 
1200 static struct pci_driver skl_uncore_pci_driver = {
1201 	.name		= "skl_uncore",
1202 	.id_table	= skl_uncore_pci_ids,
1203 };
1204 
1205 static struct pci_driver icl_uncore_pci_driver = {
1206 	.name		= "icl_uncore",
1207 	.id_table	= icl_uncore_pci_ids,
1208 };
1209 
1210 struct imc_uncore_pci_dev {
1211 	__u32 pci_id;
1212 	struct pci_driver *driver;
1213 };
1214 #define IMC_DEV(a, d) \
1215 	{ .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) }
1216 
1217 static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
1218 	IMC_DEV(SNB_IMC, &snb_uncore_pci_driver),
1219 	IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver),    /* 3rd Gen Core processor */
1220 	IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */
1221 	IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver),    /* 4th Gen Core Processor */
1222 	IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver),  /* 4th Gen Core ULT Mobile Processor */
1223 	IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver),    /* 5th Gen Core U */
1224 	IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core Y */
1225 	IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core U */
1226 	IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Dual Core */
1227 	IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Quad Core */
1228 	IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Dual Core */
1229 	IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Quad Core */
1230 	IMC_DEV(SKL_E3_IMC, &skl_uncore_pci_driver),  /* Xeon E3 V5 Gen Core processor */
1231 	IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core Y */
1232 	IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core U */
1233 	IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core U Quad Core */
1234 	IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S Dual Core */
1235 	IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S Quad Core */
1236 	IMC_DEV(KBL_HQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core H Quad Core */
1237 	IMC_DEV(KBL_WQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S 4 cores Work Station */
1238 	IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core U 2 Cores */
1239 	IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core U 4 Cores */
1240 	IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core H 4 Cores */
1241 	IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core H 6 Cores */
1242 	IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 2 Cores Desktop */
1243 	IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Desktop */
1244 	IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Desktop */
1245 	IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Desktop */
1246 	IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Work Station */
1247 	IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Work Station */
1248 	IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Work Station */
1249 	IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Server */
1250 	IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Server */
1251 	IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Server */
1252 	IMC_DEV(AML_YD_IMC, &skl_uncore_pci_driver),	/* 8th Gen Core Y Mobile Dual Core */
1253 	IMC_DEV(AML_YQ_IMC, &skl_uncore_pci_driver),	/* 8th Gen Core Y Mobile Quad Core */
1254 	IMC_DEV(WHL_UQ_IMC, &skl_uncore_pci_driver),	/* 8th Gen Core U Mobile Quad Core */
1255 	IMC_DEV(WHL_4_UQ_IMC, &skl_uncore_pci_driver),	/* 8th Gen Core U Mobile Quad Core */
1256 	IMC_DEV(WHL_UD_IMC, &skl_uncore_pci_driver),	/* 8th Gen Core U Mobile Dual Core */
1257 	IMC_DEV(CML_H1_IMC, &skl_uncore_pci_driver),
1258 	IMC_DEV(CML_H2_IMC, &skl_uncore_pci_driver),
1259 	IMC_DEV(CML_H3_IMC, &skl_uncore_pci_driver),
1260 	IMC_DEV(CML_U1_IMC, &skl_uncore_pci_driver),
1261 	IMC_DEV(CML_U2_IMC, &skl_uncore_pci_driver),
1262 	IMC_DEV(CML_U3_IMC, &skl_uncore_pci_driver),
1263 	IMC_DEV(CML_S1_IMC, &skl_uncore_pci_driver),
1264 	IMC_DEV(CML_S2_IMC, &skl_uncore_pci_driver),
1265 	IMC_DEV(CML_S3_IMC, &skl_uncore_pci_driver),
1266 	IMC_DEV(CML_S4_IMC, &skl_uncore_pci_driver),
1267 	IMC_DEV(CML_S5_IMC, &skl_uncore_pci_driver),
1268 	IMC_DEV(ICL_U_IMC, &icl_uncore_pci_driver),	/* 10th Gen Core Mobile */
1269 	IMC_DEV(ICL_U2_IMC, &icl_uncore_pci_driver),	/* 10th Gen Core Mobile */
1270 	IMC_DEV(RKL_1_IMC, &icl_uncore_pci_driver),
1271 	IMC_DEV(RKL_2_IMC, &icl_uncore_pci_driver),
1272 	{  /* end marker */ }
1273 };
1274 
1275 
1276 #define for_each_imc_pci_id(x, t) \
1277 	for (x = (t); (x)->pci_id; x++)
1278 
1279 static struct pci_driver *imc_uncore_find_dev(void)
1280 {
1281 	const struct imc_uncore_pci_dev *p;
1282 	int ret;
1283 
1284 	for_each_imc_pci_id(p, desktop_imc_pci_ids) {
1285 		ret = snb_pci2phy_map_init(p->pci_id);
1286 		if (ret == 0)
1287 			return p->driver;
1288 	}
1289 	return NULL;
1290 }
1291 
1292 static int imc_uncore_pci_init(void)
1293 {
1294 	struct pci_driver *imc_drv = imc_uncore_find_dev();
1295 
1296 	if (!imc_drv)
1297 		return -ENODEV;
1298 
1299 	uncore_pci_uncores = snb_pci_uncores;
1300 	uncore_pci_driver = imc_drv;
1301 
1302 	return 0;
1303 }
1304 
1305 int snb_uncore_pci_init(void)
1306 {
1307 	return imc_uncore_pci_init();
1308 }
1309 
1310 int ivb_uncore_pci_init(void)
1311 {
1312 	return imc_uncore_pci_init();
1313 }
1314 int hsw_uncore_pci_init(void)
1315 {
1316 	return imc_uncore_pci_init();
1317 }
1318 
1319 int bdw_uncore_pci_init(void)
1320 {
1321 	return imc_uncore_pci_init();
1322 }
1323 
1324 int skl_uncore_pci_init(void)
1325 {
1326 	return imc_uncore_pci_init();
1327 }
1328 
1329 /* end of Sandy Bridge uncore support */
1330 
1331 /* Nehalem uncore support */
1332 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
1333 {
1334 	wrmsrq(NHM_UNC_PERF_GLOBAL_CTL, 0);
1335 }
1336 
1337 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
1338 {
1339 	wrmsrq(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
1340 }
1341 
1342 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1343 {
1344 	struct hw_perf_event *hwc = &event->hw;
1345 
1346 	if (hwc->idx < UNCORE_PMC_IDX_FIXED)
1347 		wrmsrq(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
1348 	else
1349 		wrmsrq(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
1350 }
1351 
1352 static struct attribute *nhm_uncore_formats_attr[] = {
1353 	&format_attr_event.attr,
1354 	&format_attr_umask.attr,
1355 	&format_attr_edge.attr,
1356 	&format_attr_inv.attr,
1357 	&format_attr_cmask8.attr,
1358 	NULL,
1359 };
1360 
1361 static const struct attribute_group nhm_uncore_format_group = {
1362 	.name = "format",
1363 	.attrs = nhm_uncore_formats_attr,
1364 };
1365 
1366 static struct uncore_event_desc nhm_uncore_events[] = {
1367 	INTEL_UNCORE_EVENT_DESC(clockticks,                "event=0xff,umask=0x00"),
1368 	INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any,       "event=0x2f,umask=0x0f"),
1369 	INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any,      "event=0x2c,umask=0x0f"),
1370 	INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads,     "event=0x20,umask=0x01"),
1371 	INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes,    "event=0x20,umask=0x02"),
1372 	INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads,  "event=0x20,umask=0x04"),
1373 	INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
1374 	INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads,   "event=0x20,umask=0x10"),
1375 	INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes,  "event=0x20,umask=0x20"),
1376 	{ /* end: all zeroes */ },
1377 };
1378 
1379 static struct intel_uncore_ops nhm_uncore_msr_ops = {
1380 	.disable_box	= nhm_uncore_msr_disable_box,
1381 	.enable_box	= nhm_uncore_msr_enable_box,
1382 	.disable_event	= snb_uncore_msr_disable_event,
1383 	.enable_event	= nhm_uncore_msr_enable_event,
1384 	.read_counter	= uncore_msr_read_counter,
1385 };
1386 
1387 static struct intel_uncore_type nhm_uncore = {
1388 	.name		= "",
1389 	.num_counters   = 8,
1390 	.num_boxes	= 1,
1391 	.perf_ctr_bits	= 48,
1392 	.fixed_ctr_bits	= 48,
1393 	.event_ctl	= NHM_UNC_PERFEVTSEL0,
1394 	.perf_ctr	= NHM_UNC_UNCORE_PMC0,
1395 	.fixed_ctr	= NHM_UNC_FIXED_CTR,
1396 	.fixed_ctl	= NHM_UNC_FIXED_CTR_CTRL,
1397 	.event_mask	= NHM_UNC_RAW_EVENT_MASK,
1398 	.event_descs	= nhm_uncore_events,
1399 	.ops		= &nhm_uncore_msr_ops,
1400 	.format_group	= &nhm_uncore_format_group,
1401 };
1402 
1403 static struct intel_uncore_type *nhm_msr_uncores[] = {
1404 	&nhm_uncore,
1405 	NULL,
1406 };
1407 
1408 void nhm_uncore_cpu_init(void)
1409 {
1410 	uncore_msr_uncores = nhm_msr_uncores;
1411 }
1412 
1413 /* end of Nehalem uncore support */
1414 
1415 /* Tiger Lake MMIO uncore support */
1416 
1417 static const struct pci_device_id tgl_uncore_pci_ids[] = {
1418 	IMC_UNCORE_DEV(TGL_U1),
1419 	IMC_UNCORE_DEV(TGL_U2),
1420 	IMC_UNCORE_DEV(TGL_U3),
1421 	IMC_UNCORE_DEV(TGL_U4),
1422 	IMC_UNCORE_DEV(TGL_H),
1423 	IMC_UNCORE_DEV(ADL_1),
1424 	IMC_UNCORE_DEV(ADL_2),
1425 	IMC_UNCORE_DEV(ADL_3),
1426 	IMC_UNCORE_DEV(ADL_4),
1427 	IMC_UNCORE_DEV(ADL_5),
1428 	IMC_UNCORE_DEV(ADL_6),
1429 	IMC_UNCORE_DEV(ADL_7),
1430 	IMC_UNCORE_DEV(ADL_8),
1431 	IMC_UNCORE_DEV(ADL_9),
1432 	IMC_UNCORE_DEV(ADL_10),
1433 	IMC_UNCORE_DEV(ADL_11),
1434 	IMC_UNCORE_DEV(ADL_12),
1435 	IMC_UNCORE_DEV(ADL_13),
1436 	IMC_UNCORE_DEV(ADL_14),
1437 	IMC_UNCORE_DEV(ADL_15),
1438 	IMC_UNCORE_DEV(ADL_16),
1439 	IMC_UNCORE_DEV(ADL_17),
1440 	IMC_UNCORE_DEV(ADL_18),
1441 	IMC_UNCORE_DEV(ADL_19),
1442 	IMC_UNCORE_DEV(ADL_20),
1443 	IMC_UNCORE_DEV(ADL_21),
1444 	IMC_UNCORE_DEV(RPL_1),
1445 	IMC_UNCORE_DEV(RPL_2),
1446 	IMC_UNCORE_DEV(RPL_3),
1447 	IMC_UNCORE_DEV(RPL_4),
1448 	IMC_UNCORE_DEV(RPL_5),
1449 	IMC_UNCORE_DEV(RPL_6),
1450 	IMC_UNCORE_DEV(RPL_7),
1451 	IMC_UNCORE_DEV(RPL_8),
1452 	IMC_UNCORE_DEV(RPL_9),
1453 	IMC_UNCORE_DEV(RPL_10),
1454 	IMC_UNCORE_DEV(RPL_11),
1455 	IMC_UNCORE_DEV(RPL_12),
1456 	IMC_UNCORE_DEV(RPL_13),
1457 	IMC_UNCORE_DEV(RPL_14),
1458 	IMC_UNCORE_DEV(RPL_15),
1459 	IMC_UNCORE_DEV(RPL_16),
1460 	IMC_UNCORE_DEV(RPL_17),
1461 	IMC_UNCORE_DEV(RPL_18),
1462 	IMC_UNCORE_DEV(RPL_19),
1463 	IMC_UNCORE_DEV(RPL_20),
1464 	IMC_UNCORE_DEV(RPL_21),
1465 	IMC_UNCORE_DEV(RPL_22),
1466 	IMC_UNCORE_DEV(RPL_23),
1467 	IMC_UNCORE_DEV(RPL_24),
1468 	IMC_UNCORE_DEV(RPL_25),
1469 	IMC_UNCORE_DEV(MTL_1),
1470 	IMC_UNCORE_DEV(MTL_2),
1471 	IMC_UNCORE_DEV(MTL_3),
1472 	IMC_UNCORE_DEV(MTL_4),
1473 	IMC_UNCORE_DEV(MTL_5),
1474 	IMC_UNCORE_DEV(MTL_6),
1475 	IMC_UNCORE_DEV(MTL_7),
1476 	IMC_UNCORE_DEV(MTL_8),
1477 	IMC_UNCORE_DEV(MTL_9),
1478 	IMC_UNCORE_DEV(MTL_10),
1479 	IMC_UNCORE_DEV(MTL_11),
1480 	IMC_UNCORE_DEV(MTL_12),
1481 	IMC_UNCORE_DEV(MTL_13),
1482 	{ /* end: all zeroes */ }
1483 };
1484 
1485 enum perf_tgl_uncore_imc_freerunning_types {
1486 	TGL_MMIO_UNCORE_IMC_DATA_TOTAL,
1487 	TGL_MMIO_UNCORE_IMC_DATA_READ,
1488 	TGL_MMIO_UNCORE_IMC_DATA_WRITE,
1489 	TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX
1490 };
1491 
1492 static struct freerunning_counters tgl_l_uncore_imc_freerunning[] = {
1493 	[TGL_MMIO_UNCORE_IMC_DATA_TOTAL]	= { 0x5040, 0x0, 0x0, 1, 64 },
1494 	[TGL_MMIO_UNCORE_IMC_DATA_READ]		= { 0x5058, 0x0, 0x0, 1, 64 },
1495 	[TGL_MMIO_UNCORE_IMC_DATA_WRITE]	= { 0x50A0, 0x0, 0x0, 1, 64 },
1496 };
1497 
1498 static struct freerunning_counters tgl_uncore_imc_freerunning[] = {
1499 	[TGL_MMIO_UNCORE_IMC_DATA_TOTAL]	= { 0xd840, 0x0, 0x0, 1, 64 },
1500 	[TGL_MMIO_UNCORE_IMC_DATA_READ]		= { 0xd858, 0x0, 0x0, 1, 64 },
1501 	[TGL_MMIO_UNCORE_IMC_DATA_WRITE]	= { 0xd8A0, 0x0, 0x0, 1, 64 },
1502 };
1503 
1504 static struct uncore_event_desc tgl_uncore_imc_events[] = {
1505 	INTEL_UNCORE_EVENT_DESC(data_total,         "event=0xff,umask=0x10"),
1506 	INTEL_UNCORE_EVENT_DESC(data_total.scale,   "6.103515625e-5"),
1507 	INTEL_UNCORE_EVENT_DESC(data_total.unit,    "MiB"),
1508 
1509 	INTEL_UNCORE_EVENT_DESC(data_read,         "event=0xff,umask=0x20"),
1510 	INTEL_UNCORE_EVENT_DESC(data_read.scale,   "6.103515625e-5"),
1511 	INTEL_UNCORE_EVENT_DESC(data_read.unit,    "MiB"),
1512 
1513 	INTEL_UNCORE_EVENT_DESC(data_write,        "event=0xff,umask=0x30"),
1514 	INTEL_UNCORE_EVENT_DESC(data_write.scale,  "6.103515625e-5"),
1515 	INTEL_UNCORE_EVENT_DESC(data_write.unit,   "MiB"),
1516 
1517 	{ /* end: all zeroes */ }
1518 };
1519 
1520 static struct pci_dev *tgl_uncore_get_mc_dev(void)
1521 {
1522 	const struct pci_device_id *ids = tgl_uncore_pci_ids;
1523 	struct pci_dev *mc_dev = NULL;
1524 
1525 	while (ids && ids->vendor) {
1526 		mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, ids->device, NULL);
1527 		if (mc_dev)
1528 			return mc_dev;
1529 		ids++;
1530 	}
1531 
1532 	/* Just try to grab 00:00.0 device */
1533 	if (!mc_dev)
1534 		mc_dev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
1535 
1536 	return mc_dev;
1537 }
1538 
1539 #define TGL_UNCORE_MMIO_IMC_MEM_OFFSET		0x10000
1540 #define TGL_UNCORE_PCI_IMC_MAP_SIZE		0xe000
1541 
1542 static void
1543 uncore_get_box_mmio_addr(struct intel_uncore_box *box,
1544 			 unsigned int base_offset,
1545 			 int bar_offset, int step)
1546 {
1547 	struct pci_dev *pdev = tgl_uncore_get_mc_dev();
1548 	struct intel_uncore_pmu *pmu = box->pmu;
1549 	struct intel_uncore_type *type = pmu->type;
1550 	resource_size_t addr;
1551 	u32 bar;
1552 
1553 	if (!pdev) {
1554 		pr_warn("perf uncore: Cannot find matched IMC device.\n");
1555 		return;
1556 	}
1557 
1558 	pci_read_config_dword(pdev, bar_offset, &bar);
1559 	if (!(bar & BIT(0))) {
1560 		pr_warn("perf uncore: BAR 0x%x is disabled. Failed to map %s counters.\n",
1561 			bar_offset, type->name);
1562 		pci_dev_put(pdev);
1563 		return;
1564 	}
1565 	bar &= ~BIT(0);
1566 	addr = (resource_size_t)(bar + step * pmu->pmu_idx);
1567 
1568 #ifdef CONFIG_PHYS_ADDR_T_64BIT
1569 	pci_read_config_dword(pdev, bar_offset + 4, &bar);
1570 	addr |= ((resource_size_t)bar << 32);
1571 #endif
1572 
1573 	addr += base_offset;
1574 	box->io_addr = ioremap(addr, type->mmio_map_size);
1575 	if (!box->io_addr)
1576 		pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
1577 
1578 	pci_dev_put(pdev);
1579 }
1580 
1581 static void __uncore_imc_init_box(struct intel_uncore_box *box,
1582 				  unsigned int base_offset)
1583 {
1584 	uncore_get_box_mmio_addr(box, base_offset,
1585 				 SNB_UNCORE_PCI_IMC_BAR_OFFSET,
1586 				 TGL_UNCORE_MMIO_IMC_MEM_OFFSET);
1587 }
1588 
1589 static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
1590 {
1591 	__uncore_imc_init_box(box, 0);
1592 }
1593 
1594 static struct intel_uncore_ops tgl_uncore_imc_freerunning_ops = {
1595 	.init_box	= tgl_uncore_imc_freerunning_init_box,
1596 	.exit_box	= uncore_mmio_exit_box,
1597 	.read_counter	= uncore_mmio_read_counter,
1598 	.hw_config	= uncore_freerunning_hw_config,
1599 };
1600 
1601 static struct attribute *tgl_uncore_imc_formats_attr[] = {
1602 	&format_attr_event.attr,
1603 	&format_attr_umask.attr,
1604 	NULL
1605 };
1606 
1607 static const struct attribute_group tgl_uncore_imc_format_group = {
1608 	.name = "format",
1609 	.attrs = tgl_uncore_imc_formats_attr,
1610 };
1611 
1612 static struct intel_uncore_type tgl_uncore_imc_free_running = {
1613 	.name			= "imc_free_running",
1614 	.num_counters		= 3,
1615 	.num_boxes		= 2,
1616 	.num_freerunning_types	= TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX,
1617 	.mmio_map_size		= TGL_UNCORE_PCI_IMC_MAP_SIZE,
1618 	.freerunning		= tgl_uncore_imc_freerunning,
1619 	.ops			= &tgl_uncore_imc_freerunning_ops,
1620 	.event_descs		= tgl_uncore_imc_events,
1621 	.format_group		= &tgl_uncore_imc_format_group,
1622 };
1623 
1624 static struct intel_uncore_type *tgl_mmio_uncores[] = {
1625 	&tgl_uncore_imc_free_running,
1626 	NULL
1627 };
1628 
1629 void tgl_l_uncore_mmio_init(void)
1630 {
1631 	tgl_uncore_imc_free_running.freerunning = tgl_l_uncore_imc_freerunning;
1632 	uncore_mmio_uncores = tgl_mmio_uncores;
1633 }
1634 
1635 void tgl_uncore_mmio_init(void)
1636 {
1637 	uncore_mmio_uncores = tgl_mmio_uncores;
1638 }
1639 
1640 /* end of Tiger Lake MMIO uncore support */
1641 
1642 /* Alder Lake MMIO uncore support */
1643 #define ADL_UNCORE_IMC_BASE			0xd900
1644 #define ADL_UNCORE_IMC_MAP_SIZE			0x200
1645 #define ADL_UNCORE_IMC_CTR			0xe8
1646 #define ADL_UNCORE_IMC_CTRL			0xd0
1647 #define ADL_UNCORE_IMC_GLOBAL_CTL		0xc0
1648 #define ADL_UNCORE_IMC_BOX_CTL			0xc4
1649 #define ADL_UNCORE_IMC_FREERUNNING_BASE		0xd800
1650 #define ADL_UNCORE_IMC_FREERUNNING_MAP_SIZE	0x100
1651 
1652 #define ADL_UNCORE_IMC_CTL_FRZ			(1 << 0)
1653 #define ADL_UNCORE_IMC_CTL_RST_CTRL		(1 << 1)
1654 #define ADL_UNCORE_IMC_CTL_RST_CTRS		(1 << 2)
1655 #define ADL_UNCORE_IMC_CTL_INT			(ADL_UNCORE_IMC_CTL_RST_CTRL | \
1656 						ADL_UNCORE_IMC_CTL_RST_CTRS)
1657 
1658 static void adl_uncore_imc_init_box(struct intel_uncore_box *box)
1659 {
1660 	__uncore_imc_init_box(box, ADL_UNCORE_IMC_BASE);
1661 
1662 	/* The global control in MC1 can control both MCs. */
1663 	if (box->io_addr && (box->pmu->pmu_idx == 1))
1664 		writel(ADL_UNCORE_IMC_CTL_INT, box->io_addr + ADL_UNCORE_IMC_GLOBAL_CTL);
1665 }
1666 
1667 static void adl_uncore_mmio_disable_box(struct intel_uncore_box *box)
1668 {
1669 	if (!box->io_addr)
1670 		return;
1671 
1672 	writel(ADL_UNCORE_IMC_CTL_FRZ, box->io_addr + uncore_mmio_box_ctl(box));
1673 }
1674 
1675 static void adl_uncore_mmio_enable_box(struct intel_uncore_box *box)
1676 {
1677 	if (!box->io_addr)
1678 		return;
1679 
1680 	writel(0, box->io_addr + uncore_mmio_box_ctl(box));
1681 }
1682 
1683 #define MMIO_UNCORE_COMMON_OPS()				\
1684 	.exit_box	= uncore_mmio_exit_box,		\
1685 	.disable_box	= adl_uncore_mmio_disable_box,	\
1686 	.enable_box	= adl_uncore_mmio_enable_box,	\
1687 	.disable_event	= intel_generic_uncore_mmio_disable_event,	\
1688 	.enable_event	= intel_generic_uncore_mmio_enable_event,	\
1689 	.read_counter	= uncore_mmio_read_counter,
1690 
1691 static struct intel_uncore_ops adl_uncore_mmio_ops = {
1692 	.init_box	= adl_uncore_imc_init_box,
1693 	MMIO_UNCORE_COMMON_OPS()
1694 };
1695 
1696 #define ADL_UNC_CTL_CHMASK_MASK			0x00000f00
1697 #define ADL_UNC_IMC_EVENT_MASK			(SNB_UNC_CTL_EV_SEL_MASK | \
1698 						 ADL_UNC_CTL_CHMASK_MASK | \
1699 						 SNB_UNC_CTL_EDGE_DET)
1700 
1701 static struct attribute *adl_uncore_imc_formats_attr[] = {
1702 	&format_attr_event.attr,
1703 	&format_attr_chmask.attr,
1704 	&format_attr_edge.attr,
1705 	NULL,
1706 };
1707 
1708 static const struct attribute_group adl_uncore_imc_format_group = {
1709 	.name		= "format",
1710 	.attrs		= adl_uncore_imc_formats_attr,
1711 };
1712 
1713 static struct intel_uncore_type adl_uncore_imc = {
1714 	.name		= "imc",
1715 	.num_counters   = 5,
1716 	.num_boxes	= 2,
1717 	.perf_ctr_bits	= 64,
1718 	.perf_ctr	= ADL_UNCORE_IMC_CTR,
1719 	.event_ctl	= ADL_UNCORE_IMC_CTRL,
1720 	.event_mask	= ADL_UNC_IMC_EVENT_MASK,
1721 	.box_ctl	= ADL_UNCORE_IMC_BOX_CTL,
1722 	.mmio_offset	= 0,
1723 	.mmio_map_size	= ADL_UNCORE_IMC_MAP_SIZE,
1724 	.ops		= &adl_uncore_mmio_ops,
1725 	.format_group	= &adl_uncore_imc_format_group,
1726 };
1727 
1728 enum perf_adl_uncore_imc_freerunning_types {
1729 	ADL_MMIO_UNCORE_IMC_DATA_TOTAL,
1730 	ADL_MMIO_UNCORE_IMC_DATA_READ,
1731 	ADL_MMIO_UNCORE_IMC_DATA_WRITE,
1732 	ADL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX
1733 };
1734 
1735 static struct freerunning_counters adl_uncore_imc_freerunning[] = {
1736 	[ADL_MMIO_UNCORE_IMC_DATA_TOTAL]	= { 0x40, 0x0, 0x0, 1, 64 },
1737 	[ADL_MMIO_UNCORE_IMC_DATA_READ]		= { 0x58, 0x0, 0x0, 1, 64 },
1738 	[ADL_MMIO_UNCORE_IMC_DATA_WRITE]	= { 0xA0, 0x0, 0x0, 1, 64 },
1739 };
1740 
1741 static void adl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
1742 {
1743 	__uncore_imc_init_box(box, ADL_UNCORE_IMC_FREERUNNING_BASE);
1744 }
1745 
1746 static struct intel_uncore_ops adl_uncore_imc_freerunning_ops = {
1747 	.init_box	= adl_uncore_imc_freerunning_init_box,
1748 	.exit_box	= uncore_mmio_exit_box,
1749 	.read_counter	= uncore_mmio_read_counter,
1750 	.hw_config	= uncore_freerunning_hw_config,
1751 };
1752 
1753 static struct intel_uncore_type adl_uncore_imc_free_running = {
1754 	.name			= "imc_free_running",
1755 	.num_counters		= 3,
1756 	.num_boxes		= 2,
1757 	.num_freerunning_types	= ADL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX,
1758 	.mmio_map_size		= ADL_UNCORE_IMC_FREERUNNING_MAP_SIZE,
1759 	.freerunning		= adl_uncore_imc_freerunning,
1760 	.ops			= &adl_uncore_imc_freerunning_ops,
1761 	.event_descs		= tgl_uncore_imc_events,
1762 	.format_group		= &tgl_uncore_imc_format_group,
1763 };
1764 
1765 static struct intel_uncore_type *adl_mmio_uncores[] = {
1766 	&adl_uncore_imc,
1767 	&adl_uncore_imc_free_running,
1768 	NULL
1769 };
1770 
1771 void adl_uncore_mmio_init(void)
1772 {
1773 	uncore_mmio_uncores = adl_mmio_uncores;
1774 }
1775 
1776 /* end of Alder Lake MMIO uncore support */
1777 
1778 /* Lunar Lake MMIO uncore support */
1779 #define LNL_UNCORE_PCI_SAFBAR_OFFSET		0x68
1780 #define LNL_UNCORE_MAP_SIZE			0x1000
1781 #define LNL_UNCORE_SNCU_BASE			0xE4B000
1782 #define LNL_UNCORE_SNCU_CTR			0x390
1783 #define LNL_UNCORE_SNCU_CTRL			0x398
1784 #define LNL_UNCORE_SNCU_BOX_CTL			0x380
1785 #define LNL_UNCORE_GLOBAL_CTL			0x700
1786 #define LNL_UNCORE_HBO_BASE			0xE54000
1787 #define LNL_UNCORE_HBO_OFFSET			-4096
1788 #define LNL_UNCORE_HBO_CTR			0x570
1789 #define LNL_UNCORE_HBO_CTRL			0x550
1790 #define LNL_UNCORE_HBO_BOX_CTL			0x548
1791 
1792 #define LNL_UNC_CTL_THRESHOLD			0xff000000
1793 #define LNL_UNC_RAW_EVENT_MASK			(SNB_UNC_CTL_EV_SEL_MASK | \
1794 						 SNB_UNC_CTL_UMASK_MASK | \
1795 						 SNB_UNC_CTL_EDGE_DET | \
1796 						 SNB_UNC_CTL_INVERT | \
1797 						 LNL_UNC_CTL_THRESHOLD)
1798 
1799 static struct attribute *lnl_uncore_formats_attr[] = {
1800 	&format_attr_event.attr,
1801 	&format_attr_umask.attr,
1802 	&format_attr_edge.attr,
1803 	&format_attr_inv.attr,
1804 	&format_attr_threshold2.attr,
1805 	NULL
1806 };
1807 
1808 static const struct attribute_group lnl_uncore_format_group = {
1809 	.name		= "format",
1810 	.attrs		= lnl_uncore_formats_attr,
1811 };
1812 
1813 static void lnl_uncore_hbo_init_box(struct intel_uncore_box *box)
1814 {
1815 	uncore_get_box_mmio_addr(box, LNL_UNCORE_HBO_BASE,
1816 				 LNL_UNCORE_PCI_SAFBAR_OFFSET,
1817 				 LNL_UNCORE_HBO_OFFSET);
1818 }
1819 
1820 static struct intel_uncore_ops lnl_uncore_hbo_ops = {
1821 	.init_box	= lnl_uncore_hbo_init_box,
1822 	MMIO_UNCORE_COMMON_OPS()
1823 };
1824 
1825 static struct intel_uncore_type lnl_uncore_hbo = {
1826 	.name		= "hbo",
1827 	.num_counters   = 4,
1828 	.num_boxes	= 2,
1829 	.perf_ctr_bits	= 64,
1830 	.perf_ctr	= LNL_UNCORE_HBO_CTR,
1831 	.event_ctl	= LNL_UNCORE_HBO_CTRL,
1832 	.event_mask	= LNL_UNC_RAW_EVENT_MASK,
1833 	.box_ctl	= LNL_UNCORE_HBO_BOX_CTL,
1834 	.mmio_map_size	= LNL_UNCORE_MAP_SIZE,
1835 	.ops		= &lnl_uncore_hbo_ops,
1836 	.format_group	= &lnl_uncore_format_group,
1837 };
1838 
1839 static void lnl_uncore_sncu_init_box(struct intel_uncore_box *box)
1840 {
1841 	uncore_get_box_mmio_addr(box, LNL_UNCORE_SNCU_BASE,
1842 				 LNL_UNCORE_PCI_SAFBAR_OFFSET,
1843 				 0);
1844 
1845 	if (box->io_addr)
1846 		writel(ADL_UNCORE_IMC_CTL_INT, box->io_addr + LNL_UNCORE_GLOBAL_CTL);
1847 }
1848 
1849 static struct intel_uncore_ops lnl_uncore_sncu_ops = {
1850 	.init_box	= lnl_uncore_sncu_init_box,
1851 	MMIO_UNCORE_COMMON_OPS()
1852 };
1853 
1854 static struct intel_uncore_type lnl_uncore_sncu = {
1855 	.name		= "sncu",
1856 	.num_counters   = 2,
1857 	.num_boxes	= 1,
1858 	.perf_ctr_bits	= 64,
1859 	.perf_ctr	= LNL_UNCORE_SNCU_CTR,
1860 	.event_ctl	= LNL_UNCORE_SNCU_CTRL,
1861 	.event_mask	= LNL_UNC_RAW_EVENT_MASK,
1862 	.box_ctl	= LNL_UNCORE_SNCU_BOX_CTL,
1863 	.mmio_map_size	= LNL_UNCORE_MAP_SIZE,
1864 	.ops		= &lnl_uncore_sncu_ops,
1865 	.format_group	= &lnl_uncore_format_group,
1866 };
1867 
1868 static struct intel_uncore_type *lnl_mmio_uncores[] = {
1869 	&adl_uncore_imc,
1870 	&lnl_uncore_hbo,
1871 	&lnl_uncore_sncu,
1872 	&adl_uncore_imc_free_running,
1873 	NULL
1874 };
1875 
1876 void lnl_uncore_mmio_init(void)
1877 {
1878 	uncore_mmio_uncores = lnl_mmio_uncores;
1879 }
1880 
1881 /* end of Lunar Lake MMIO uncore support */
1882 
1883 /* Panther Lake uncore support */
1884 
1885 #define UNCORE_PTL_MAX_NUM_UNCORE_TYPES		42
1886 #define UNCORE_PTL_TYPE_IMC			6
1887 #define UNCORE_PTL_TYPE_SNCU			34
1888 #define UNCORE_PTL_TYPE_HBO			41
1889 
1890 #define PTL_UNCORE_GLOBAL_CTL_OFFSET		0x380
1891 
1892 static struct intel_uncore_type ptl_uncore_imc = {
1893 	.name			= "imc",
1894 	.mmio_map_size		= 0xf00,
1895 };
1896 
1897 static void ptl_uncore_sncu_init_box(struct intel_uncore_box *box)
1898 {
1899 	intel_generic_uncore_mmio_init_box(box);
1900 
1901 	/* Clear the global freeze bit */
1902 	if (box->io_addr)
1903 		writel(0, box->io_addr + PTL_UNCORE_GLOBAL_CTL_OFFSET);
1904 }
1905 
1906 static struct intel_uncore_ops ptl_uncore_sncu_ops = {
1907 	.init_box		= ptl_uncore_sncu_init_box,
1908 	.exit_box		= uncore_mmio_exit_box,
1909 	.disable_box		= intel_generic_uncore_mmio_disable_box,
1910 	.enable_box		= intel_generic_uncore_mmio_enable_box,
1911 	.disable_event		= intel_generic_uncore_mmio_disable_event,
1912 	.enable_event		= intel_generic_uncore_mmio_enable_event,
1913 	.read_counter		= uncore_mmio_read_counter,
1914 };
1915 
1916 static struct intel_uncore_type ptl_uncore_sncu = {
1917 	.name			= "sncu",
1918 	.ops			= &ptl_uncore_sncu_ops,
1919 	.mmio_map_size		= 0xf00,
1920 };
1921 
1922 static struct intel_uncore_type ptl_uncore_hbo = {
1923 	.name			= "hbo",
1924 	.mmio_map_size		= 0xf00,
1925 };
1926 
1927 static struct intel_uncore_type *ptl_uncores[UNCORE_PTL_MAX_NUM_UNCORE_TYPES] = {
1928 	[UNCORE_PTL_TYPE_IMC] = &ptl_uncore_imc,
1929 	[UNCORE_PTL_TYPE_SNCU] = &ptl_uncore_sncu,
1930 	[UNCORE_PTL_TYPE_HBO] = &ptl_uncore_hbo,
1931 };
1932 
1933 #define UNCORE_PTL_MMIO_EXTRA_UNCORES		1
1934 
1935 static struct intel_uncore_type *ptl_mmio_extra_uncores[UNCORE_PTL_MMIO_EXTRA_UNCORES] = {
1936 	&adl_uncore_imc_free_running,
1937 };
1938 
1939 void ptl_uncore_mmio_init(void)
1940 {
1941 	uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO,
1942 						 UNCORE_PTL_MMIO_EXTRA_UNCORES,
1943 						 ptl_mmio_extra_uncores,
1944 						 UNCORE_PTL_MAX_NUM_UNCORE_TYPES,
1945 						 ptl_uncores);
1946 }
1947 
1948 static struct intel_uncore_type ptl_uncore_ia_core_bridge = {
1949 	.name		= "ia_core_bridge",
1950 	.num_counters   = 2,
1951 	.num_boxes	= 1,
1952 	.perf_ctr_bits	= 48,
1953 	.perf_ctr	= PTL_UNC_IA_CORE_BRIDGE_PER_CTR0,
1954 	.event_ctl	= PTL_UNC_IA_CORE_BRIDGE_PERFEVTSEL0,
1955 	.event_mask	= ADL_UNC_RAW_EVENT_MASK,
1956 	.ops		= &icl_uncore_msr_ops,
1957 	.format_group	= &adl_uncore_format_group,
1958 };
1959 
1960 static struct intel_uncore_type ptl_uncore_santa = {
1961 	.name		= "santa",
1962 	.num_counters   = 2,
1963 	.num_boxes	= 2,
1964 	.perf_ctr_bits	= 48,
1965 	.perf_ctr	= PTL_UNC_SANTA_CTR0,
1966 	.event_ctl	= PTL_UNC_SANTA_CTRL0,
1967 	.event_mask	= ADL_UNC_RAW_EVENT_MASK,
1968 	.msr_offset	= SNB_UNC_CBO_MSR_OFFSET,
1969 	.ops		= &icl_uncore_msr_ops,
1970 	.format_group	= &adl_uncore_format_group,
1971 };
1972 
1973 static struct intel_uncore_type *ptl_msr_uncores[] = {
1974 	&mtl_uncore_cbox,
1975 	&ptl_uncore_ia_core_bridge,
1976 	&ptl_uncore_santa,
1977 	&mtl_uncore_cncu,
1978 	NULL
1979 };
1980 
1981 void ptl_uncore_cpu_init(void)
1982 {
1983 	mtl_uncore_cbox.num_boxes = 6;
1984 	mtl_uncore_cbox.ops = &lnl_uncore_msr_ops;
1985 
1986 	mtl_uncore_cncu.num_counters = 2;
1987 	mtl_uncore_cncu.num_boxes = 2;
1988 	mtl_uncore_cncu.msr_offset = PTL_UNC_CNCU_MSR_OFFSET;
1989 	mtl_uncore_cncu.single_fixed = 0;
1990 
1991 	uncore_msr_uncores = ptl_msr_uncores;
1992 }
1993 
1994 /* end of Panther Lake uncore support */
1995 
1996 /* Nova Lake uncore support */
1997 
1998 static struct intel_uncore_type *nvl_msr_uncores[] = {
1999 	&mtl_uncore_cbox,
2000 	&ptl_uncore_santa,
2001 	&mtl_uncore_cncu,
2002 	NULL
2003 };
2004 
2005 void nvl_uncore_cpu_init(void)
2006 {
2007 	mtl_uncore_cbox.num_boxes = 12;
2008 	mtl_uncore_cbox.perf_ctr = NVL_UNC_CBOX_PER_CTR0;
2009 	mtl_uncore_cbox.event_ctl = NVL_UNC_CBOX_PERFEVTSEL0;
2010 
2011 	ptl_uncore_santa.perf_ctr = NVL_UNC_SANTA_CTR0;
2012 	ptl_uncore_santa.event_ctl = NVL_UNC_SANTA_CTRL0;
2013 
2014 	mtl_uncore_cncu.box_ctl = NVL_UNC_CNCU_BOX_CTL;
2015 	mtl_uncore_cncu.fixed_ctr = NVL_UNC_CNCU_FIXED_CTR;
2016 	mtl_uncore_cncu.fixed_ctl = NVL_UNC_CNCU_FIXED_CTRL;
2017 
2018 	uncore_msr_uncores = nvl_msr_uncores;
2019 }
2020 
2021 /* end of Nova Lake uncore support */
2022