xref: /linux/drivers/perf/apple_m1_cpu_pmu.c (revision e814f3fd16acfb7f9966773953de8f740a1e3202)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * CPU PMU driver for the Apple M1 and derivatives
4  *
5  * Copyright (C) 2021 Google LLC
6  *
7  * Author: Marc Zyngier <maz@kernel.org>
8  *
9  * Most of the information used in this driver was provided by the
10  * Asahi Linux project. The rest was experimentally discovered.
11  */
12 
13 #include <linux/of.h>
14 #include <linux/perf/arm_pmu.h>
15 #include <linux/platform_device.h>
16 
17 #include <asm/apple_m1_pmu.h>
18 #include <asm/irq_regs.h>
19 #include <asm/perf_event.h>
20 
21 #define M1_PMU_NR_COUNTERS		10
22 
23 #define M1_PMU_CFG_EVENT		GENMASK(7, 0)
24 
25 #define ANY_BUT_0_1			GENMASK(9, 2)
26 #define ONLY_2_TO_7			GENMASK(7, 2)
27 #define ONLY_2_4_6			(BIT(2) | BIT(4) | BIT(6))
28 #define ONLY_5_6_7			(BIT(5) | BIT(6) | BIT(7))
29 
30 /*
31  * Description of the events we actually know about, as well as those with
32  * a specific counter affinity. Yes, this is a grand total of two known
33  * counters, and the rest is anybody's guess.
34  *
35  * Not all counters can count all events. Counters #0 and #1 are wired to
36  * count cycles and instructions respectively, and some events have
37  * bizarre mappings (every other counter, or even *one* counter). These
38  * restrictions equally apply to both P and E cores.
39  *
40  * It is worth noting that the PMUs attached to P and E cores are likely
41  * to be different because the underlying uarches are different. At the
42  * moment, we don't really need to distinguish between the two because we
43  * know next to nothing about the events themselves, and we already have
44  * per cpu-type PMU abstractions.
45  *
46  * If we eventually find out that the events are different across
47  * implementations, we'll have to introduce per cpu-type tables.
48  */
49 enum m1_pmu_events {
50 	M1_PMU_PERFCTR_RETIRE_UOP				= 0x1,
51 	M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE			= 0x2,
52 	M1_PMU_PERFCTR_L1I_TLB_FILL				= 0x4,
53 	M1_PMU_PERFCTR_L1D_TLB_FILL				= 0x5,
54 	M1_PMU_PERFCTR_MMU_TABLE_WALK_INSTRUCTION		= 0x7,
55 	M1_PMU_PERFCTR_MMU_TABLE_WALK_DATA			= 0x8,
56 	M1_PMU_PERFCTR_L2_TLB_MISS_INSTRUCTION			= 0xa,
57 	M1_PMU_PERFCTR_L2_TLB_MISS_DATA				= 0xb,
58 	M1_PMU_PERFCTR_MMU_VIRTUAL_MEMORY_FAULT_NONSPEC		= 0xd,
59 	M1_PMU_PERFCTR_SCHEDULE_UOP				= 0x52,
60 	M1_PMU_PERFCTR_INTERRUPT_PENDING			= 0x6c,
61 	M1_PMU_PERFCTR_MAP_STALL_DISPATCH			= 0x70,
62 	M1_PMU_PERFCTR_MAP_REWIND				= 0x75,
63 	M1_PMU_PERFCTR_MAP_STALL				= 0x76,
64 	M1_PMU_PERFCTR_MAP_INT_UOP				= 0x7c,
65 	M1_PMU_PERFCTR_MAP_LDST_UOP				= 0x7d,
66 	M1_PMU_PERFCTR_MAP_SIMD_UOP				= 0x7e,
67 	M1_PMU_PERFCTR_FLUSH_RESTART_OTHER_NONSPEC		= 0x84,
68 	M1_PMU_PERFCTR_INST_ALL					= 0x8c,
69 	M1_PMU_PERFCTR_INST_BRANCH				= 0x8d,
70 	M1_PMU_PERFCTR_INST_BRANCH_CALL				= 0x8e,
71 	M1_PMU_PERFCTR_INST_BRANCH_RET				= 0x8f,
72 	M1_PMU_PERFCTR_INST_BRANCH_TAKEN			= 0x90,
73 	M1_PMU_PERFCTR_INST_BRANCH_INDIR			= 0x93,
74 	M1_PMU_PERFCTR_INST_BRANCH_COND				= 0x94,
75 	M1_PMU_PERFCTR_INST_INT_LD				= 0x95,
76 	M1_PMU_PERFCTR_INST_INT_ST				= 0x96,
77 	M1_PMU_PERFCTR_INST_INT_ALU				= 0x97,
78 	M1_PMU_PERFCTR_INST_SIMD_LD				= 0x98,
79 	M1_PMU_PERFCTR_INST_SIMD_ST				= 0x99,
80 	M1_PMU_PERFCTR_INST_SIMD_ALU				= 0x9a,
81 	M1_PMU_PERFCTR_INST_LDST				= 0x9b,
82 	M1_PMU_PERFCTR_INST_BARRIER				= 0x9c,
83 	M1_PMU_PERFCTR_UNKNOWN_9f				= 0x9f,
84 	M1_PMU_PERFCTR_L1D_TLB_ACCESS				= 0xa0,
85 	M1_PMU_PERFCTR_L1D_TLB_MISS				= 0xa1,
86 	M1_PMU_PERFCTR_L1D_CACHE_MISS_ST			= 0xa2,
87 	M1_PMU_PERFCTR_L1D_CACHE_MISS_LD			= 0xa3,
88 	M1_PMU_PERFCTR_LD_UNIT_UOP				= 0xa6,
89 	M1_PMU_PERFCTR_ST_UNIT_UOP				= 0xa7,
90 	M1_PMU_PERFCTR_L1D_CACHE_WRITEBACK			= 0xa8,
91 	M1_PMU_PERFCTR_LDST_X64_UOP				= 0xb1,
92 	M1_PMU_PERFCTR_LDST_XPG_UOP				= 0xb2,
93 	M1_PMU_PERFCTR_ATOMIC_OR_EXCLUSIVE_SUCC			= 0xb3,
94 	M1_PMU_PERFCTR_ATOMIC_OR_EXCLUSIVE_FAIL			= 0xb4,
95 	M1_PMU_PERFCTR_L1D_CACHE_MISS_LD_NONSPEC		= 0xbf,
96 	M1_PMU_PERFCTR_L1D_CACHE_MISS_ST_NONSPEC		= 0xc0,
97 	M1_PMU_PERFCTR_L1D_TLB_MISS_NONSPEC			= 0xc1,
98 	M1_PMU_PERFCTR_ST_MEMORY_ORDER_VIOLATION_NONSPEC	= 0xc4,
99 	M1_PMU_PERFCTR_BRANCH_COND_MISPRED_NONSPEC		= 0xc5,
100 	M1_PMU_PERFCTR_BRANCH_INDIR_MISPRED_NONSPEC		= 0xc6,
101 	M1_PMU_PERFCTR_BRANCH_RET_INDIR_MISPRED_NONSPEC		= 0xc8,
102 	M1_PMU_PERFCTR_BRANCH_CALL_INDIR_MISPRED_NONSPEC	= 0xca,
103 	M1_PMU_PERFCTR_BRANCH_MISPRED_NONSPEC			= 0xcb,
104 	M1_PMU_PERFCTR_L1I_TLB_MISS_DEMAND			= 0xd4,
105 	M1_PMU_PERFCTR_MAP_DISPATCH_BUBBLE			= 0xd6,
106 	M1_PMU_PERFCTR_L1I_CACHE_MISS_DEMAND			= 0xdb,
107 	M1_PMU_PERFCTR_FETCH_RESTART				= 0xde,
108 	M1_PMU_PERFCTR_ST_NT_UOP				= 0xe5,
109 	M1_PMU_PERFCTR_LD_NT_UOP				= 0xe6,
110 	M1_PMU_PERFCTR_UNKNOWN_f5				= 0xf5,
111 	M1_PMU_PERFCTR_UNKNOWN_f6				= 0xf6,
112 	M1_PMU_PERFCTR_UNKNOWN_f7				= 0xf7,
113 	M1_PMU_PERFCTR_UNKNOWN_f8				= 0xf8,
114 	M1_PMU_PERFCTR_UNKNOWN_fd				= 0xfd,
115 	M1_PMU_PERFCTR_LAST					= M1_PMU_CFG_EVENT,
116 
117 	/*
118 	 * From this point onwards, these are not actual HW events,
119 	 * but attributes that get stored in hw->config_base.
120 	 */
121 	M1_PMU_CFG_COUNT_USER					= BIT(8),
122 	M1_PMU_CFG_COUNT_KERNEL					= BIT(9),
123 };
124 
125 /*
126  * Per-event affinity table. Most events can be installed on counter
127  * 2-9, but there are a number of exceptions. Note that this table
128  * has been created experimentally, and I wouldn't be surprised if more
129  * counters had strange affinities.
130  */
131 static const u16 m1_pmu_event_affinity[M1_PMU_PERFCTR_LAST + 1] = {
132 	[0 ... M1_PMU_PERFCTR_LAST]				= ANY_BUT_0_1,
133 	[M1_PMU_PERFCTR_RETIRE_UOP]				= BIT(7),
134 	[M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE]			= ANY_BUT_0_1 | BIT(0),
135 	[M1_PMU_PERFCTR_INST_ALL]				= BIT(7) | BIT(1),
136 	[M1_PMU_PERFCTR_INST_BRANCH]				= ONLY_5_6_7,
137 	[M1_PMU_PERFCTR_INST_BRANCH_CALL]			= ONLY_5_6_7,
138 	[M1_PMU_PERFCTR_INST_BRANCH_RET]			= ONLY_5_6_7,
139 	[M1_PMU_PERFCTR_INST_BRANCH_TAKEN]			= ONLY_5_6_7,
140 	[M1_PMU_PERFCTR_INST_BRANCH_INDIR]			= ONLY_5_6_7,
141 	[M1_PMU_PERFCTR_INST_BRANCH_COND]			= ONLY_5_6_7,
142 	[M1_PMU_PERFCTR_INST_INT_LD]				= ONLY_5_6_7,
143 	[M1_PMU_PERFCTR_INST_INT_ST]				= BIT(7),
144 	[M1_PMU_PERFCTR_INST_INT_ALU]				= BIT(7),
145 	[M1_PMU_PERFCTR_INST_SIMD_LD]				= ONLY_5_6_7,
146 	[M1_PMU_PERFCTR_INST_SIMD_ST]				= ONLY_5_6_7,
147 	[M1_PMU_PERFCTR_INST_SIMD_ALU]				= BIT(7),
148 	[M1_PMU_PERFCTR_INST_LDST]				= BIT(7),
149 	[M1_PMU_PERFCTR_INST_BARRIER]				= ONLY_5_6_7,
150 	[M1_PMU_PERFCTR_UNKNOWN_9f]				= BIT(7),
151 	[M1_PMU_PERFCTR_L1D_CACHE_MISS_LD_NONSPEC]		= ONLY_5_6_7,
152 	[M1_PMU_PERFCTR_L1D_CACHE_MISS_ST_NONSPEC]		= ONLY_5_6_7,
153 	[M1_PMU_PERFCTR_L1D_TLB_MISS_NONSPEC]			= ONLY_5_6_7,
154 	[M1_PMU_PERFCTR_ST_MEMORY_ORDER_VIOLATION_NONSPEC]	= ONLY_5_6_7,
155 	[M1_PMU_PERFCTR_BRANCH_COND_MISPRED_NONSPEC]		= ONLY_5_6_7,
156 	[M1_PMU_PERFCTR_BRANCH_INDIR_MISPRED_NONSPEC]		= ONLY_5_6_7,
157 	[M1_PMU_PERFCTR_BRANCH_RET_INDIR_MISPRED_NONSPEC]	= ONLY_5_6_7,
158 	[M1_PMU_PERFCTR_BRANCH_CALL_INDIR_MISPRED_NONSPEC]	= ONLY_5_6_7,
159 	[M1_PMU_PERFCTR_BRANCH_MISPRED_NONSPEC]			= ONLY_5_6_7,
160 	[M1_PMU_PERFCTR_UNKNOWN_f5]				= ONLY_2_4_6,
161 	[M1_PMU_PERFCTR_UNKNOWN_f6]				= ONLY_2_4_6,
162 	[M1_PMU_PERFCTR_UNKNOWN_f7]				= ONLY_2_4_6,
163 	[M1_PMU_PERFCTR_UNKNOWN_f8]				= ONLY_2_TO_7,
164 	[M1_PMU_PERFCTR_UNKNOWN_fd]				= ONLY_2_4_6,
165 };
166 
167 static const unsigned m1_pmu_perf_map[PERF_COUNT_HW_MAX] = {
168 	PERF_MAP_ALL_UNSUPPORTED,
169 	[PERF_COUNT_HW_CPU_CYCLES]		= M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE,
170 	[PERF_COUNT_HW_INSTRUCTIONS]		= M1_PMU_PERFCTR_INST_ALL,
171 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= M1_PMU_PERFCTR_INST_BRANCH,
172 	[PERF_COUNT_HW_BRANCH_MISSES]		= M1_PMU_PERFCTR_BRANCH_MISPRED_NONSPEC,
173 };
174 
175 /* sysfs definitions */
176 static ssize_t m1_pmu_events_sysfs_show(struct device *dev,
177 					struct device_attribute *attr,
178 					char *page)
179 {
180 	struct perf_pmu_events_attr *pmu_attr;
181 
182 	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
183 
184 	return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
185 }
186 
187 #define M1_PMU_EVENT_ATTR(name, config)					\
188 	PMU_EVENT_ATTR_ID(name, m1_pmu_events_sysfs_show, config)
189 
190 static struct attribute *m1_pmu_event_attrs[] = {
191 	M1_PMU_EVENT_ATTR(cycles, M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE),
192 	M1_PMU_EVENT_ATTR(instructions, M1_PMU_PERFCTR_INST_ALL),
193 	NULL,
194 };
195 
196 static const struct attribute_group m1_pmu_events_attr_group = {
197 	.name = "events",
198 	.attrs = m1_pmu_event_attrs,
199 };
200 
201 PMU_FORMAT_ATTR(event, "config:0-7");
202 
203 static struct attribute *m1_pmu_format_attrs[] = {
204 	&format_attr_event.attr,
205 	NULL,
206 };
207 
208 static const struct attribute_group m1_pmu_format_attr_group = {
209 	.name = "format",
210 	.attrs = m1_pmu_format_attrs,
211 };
212 
213 /* Low level accessors. No synchronisation. */
214 #define PMU_READ_COUNTER(_idx)						\
215 	case _idx:	return read_sysreg_s(SYS_IMP_APL_PMC## _idx ##_EL1)
216 
217 #define PMU_WRITE_COUNTER(_val, _idx)					\
218 	case _idx:							\
219 		write_sysreg_s(_val, SYS_IMP_APL_PMC## _idx ##_EL1);	\
220 		return
221 
222 static u64 m1_pmu_read_hw_counter(unsigned int index)
223 {
224 	switch (index) {
225 		PMU_READ_COUNTER(0);
226 		PMU_READ_COUNTER(1);
227 		PMU_READ_COUNTER(2);
228 		PMU_READ_COUNTER(3);
229 		PMU_READ_COUNTER(4);
230 		PMU_READ_COUNTER(5);
231 		PMU_READ_COUNTER(6);
232 		PMU_READ_COUNTER(7);
233 		PMU_READ_COUNTER(8);
234 		PMU_READ_COUNTER(9);
235 	}
236 
237 	BUG();
238 }
239 
240 static void m1_pmu_write_hw_counter(u64 val, unsigned int index)
241 {
242 	switch (index) {
243 		PMU_WRITE_COUNTER(val, 0);
244 		PMU_WRITE_COUNTER(val, 1);
245 		PMU_WRITE_COUNTER(val, 2);
246 		PMU_WRITE_COUNTER(val, 3);
247 		PMU_WRITE_COUNTER(val, 4);
248 		PMU_WRITE_COUNTER(val, 5);
249 		PMU_WRITE_COUNTER(val, 6);
250 		PMU_WRITE_COUNTER(val, 7);
251 		PMU_WRITE_COUNTER(val, 8);
252 		PMU_WRITE_COUNTER(val, 9);
253 	}
254 
255 	BUG();
256 }
257 
258 #define get_bit_offset(index, mask)	(__ffs(mask) + (index))
259 
260 static void __m1_pmu_enable_counter(unsigned int index, bool en)
261 {
262 	u64 val, bit;
263 
264 	switch (index) {
265 	case 0 ... 7:
266 		bit = BIT(get_bit_offset(index, PMCR0_CNT_ENABLE_0_7));
267 		break;
268 	case 8 ... 9:
269 		bit = BIT(get_bit_offset(index - 8, PMCR0_CNT_ENABLE_8_9));
270 		break;
271 	default:
272 		BUG();
273 	}
274 
275 	val = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1);
276 
277 	if (en)
278 		val |= bit;
279 	else
280 		val &= ~bit;
281 
282 	write_sysreg_s(val, SYS_IMP_APL_PMCR0_EL1);
283 }
284 
285 static void m1_pmu_enable_counter(unsigned int index)
286 {
287 	__m1_pmu_enable_counter(index, true);
288 }
289 
290 static void m1_pmu_disable_counter(unsigned int index)
291 {
292 	__m1_pmu_enable_counter(index, false);
293 }
294 
295 static void __m1_pmu_enable_counter_interrupt(unsigned int index, bool en)
296 {
297 	u64 val, bit;
298 
299 	switch (index) {
300 	case 0 ... 7:
301 		bit = BIT(get_bit_offset(index, PMCR0_PMI_ENABLE_0_7));
302 		break;
303 	case 8 ... 9:
304 		bit = BIT(get_bit_offset(index - 8, PMCR0_PMI_ENABLE_8_9));
305 		break;
306 	default:
307 		BUG();
308 	}
309 
310 	val = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1);
311 
312 	if (en)
313 		val |= bit;
314 	else
315 		val &= ~bit;
316 
317 	write_sysreg_s(val, SYS_IMP_APL_PMCR0_EL1);
318 }
319 
320 static void m1_pmu_enable_counter_interrupt(unsigned int index)
321 {
322 	__m1_pmu_enable_counter_interrupt(index, true);
323 }
324 
325 static void m1_pmu_disable_counter_interrupt(unsigned int index)
326 {
327 	__m1_pmu_enable_counter_interrupt(index, false);
328 }
329 
330 static void m1_pmu_configure_counter(unsigned int index, u8 event,
331 				     bool user, bool kernel)
332 {
333 	u64 val, user_bit, kernel_bit;
334 	int shift;
335 
336 	switch (index) {
337 	case 0 ... 7:
338 		user_bit = BIT(get_bit_offset(index, PMCR1_COUNT_A64_EL0_0_7));
339 		kernel_bit = BIT(get_bit_offset(index, PMCR1_COUNT_A64_EL1_0_7));
340 		break;
341 	case 8 ... 9:
342 		user_bit = BIT(get_bit_offset(index - 8, PMCR1_COUNT_A64_EL0_8_9));
343 		kernel_bit = BIT(get_bit_offset(index - 8, PMCR1_COUNT_A64_EL1_8_9));
344 		break;
345 	default:
346 		BUG();
347 	}
348 
349 	val = read_sysreg_s(SYS_IMP_APL_PMCR1_EL1);
350 
351 	if (user)
352 		val |= user_bit;
353 	else
354 		val &= ~user_bit;
355 
356 	if (kernel)
357 		val |= kernel_bit;
358 	else
359 		val &= ~kernel_bit;
360 
361 	write_sysreg_s(val, SYS_IMP_APL_PMCR1_EL1);
362 
363 	/*
364 	 * Counters 0 and 1 have fixed events. For anything else,
365 	 * place the event at the expected location in the relevant
366 	 * register (PMESR0 holds the event configuration for counters
367 	 * 2-5, resp. PMESR1 for counters 6-9).
368 	 */
369 	switch (index) {
370 	case 0 ... 1:
371 		break;
372 	case 2 ... 5:
373 		shift = (index - 2) * 8;
374 		val = read_sysreg_s(SYS_IMP_APL_PMESR0_EL1);
375 		val &= ~((u64)0xff << shift);
376 		val |= (u64)event << shift;
377 		write_sysreg_s(val, SYS_IMP_APL_PMESR0_EL1);
378 		break;
379 	case 6 ... 9:
380 		shift = (index - 6) * 8;
381 		val = read_sysreg_s(SYS_IMP_APL_PMESR1_EL1);
382 		val &= ~((u64)0xff << shift);
383 		val |= (u64)event << shift;
384 		write_sysreg_s(val, SYS_IMP_APL_PMESR1_EL1);
385 		break;
386 	}
387 }
388 
389 /* arm_pmu backend */
390 static void m1_pmu_enable_event(struct perf_event *event)
391 {
392 	bool user, kernel;
393 	u8 evt;
394 
395 	evt = event->hw.config_base & M1_PMU_CFG_EVENT;
396 	user = event->hw.config_base & M1_PMU_CFG_COUNT_USER;
397 	kernel = event->hw.config_base & M1_PMU_CFG_COUNT_KERNEL;
398 
399 	m1_pmu_disable_counter_interrupt(event->hw.idx);
400 	m1_pmu_disable_counter(event->hw.idx);
401 	isb();
402 
403 	m1_pmu_configure_counter(event->hw.idx, evt, user, kernel);
404 	m1_pmu_enable_counter(event->hw.idx);
405 	m1_pmu_enable_counter_interrupt(event->hw.idx);
406 	isb();
407 }
408 
409 static void m1_pmu_disable_event(struct perf_event *event)
410 {
411 	m1_pmu_disable_counter_interrupt(event->hw.idx);
412 	m1_pmu_disable_counter(event->hw.idx);
413 	isb();
414 }
415 
416 static irqreturn_t m1_pmu_handle_irq(struct arm_pmu *cpu_pmu)
417 {
418 	struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
419 	struct pt_regs *regs;
420 	u64 overflow, state;
421 	int idx;
422 
423 	overflow = read_sysreg_s(SYS_IMP_APL_PMSR_EL1);
424 	if (!overflow) {
425 		/* Spurious interrupt? */
426 		state = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1);
427 		state &= ~PMCR0_IACT;
428 		write_sysreg_s(state, SYS_IMP_APL_PMCR0_EL1);
429 		isb();
430 		return IRQ_NONE;
431 	}
432 
433 	cpu_pmu->stop(cpu_pmu);
434 
435 	regs = get_irq_regs();
436 
437 	for_each_set_bit(idx, cpu_pmu->cntr_mask, M1_PMU_NR_COUNTERS) {
438 		struct perf_event *event = cpuc->events[idx];
439 		struct perf_sample_data data;
440 
441 		if (!event)
442 			continue;
443 
444 		armpmu_event_update(event);
445 		perf_sample_data_init(&data, 0, event->hw.last_period);
446 		if (!armpmu_event_set_period(event))
447 			continue;
448 
449 		if (perf_event_overflow(event, &data, regs))
450 			m1_pmu_disable_event(event);
451 	}
452 
453 	cpu_pmu->start(cpu_pmu);
454 
455 	return IRQ_HANDLED;
456 }
457 
458 static u64 m1_pmu_read_counter(struct perf_event *event)
459 {
460 	return m1_pmu_read_hw_counter(event->hw.idx);
461 }
462 
463 static void m1_pmu_write_counter(struct perf_event *event, u64 value)
464 {
465 	m1_pmu_write_hw_counter(value, event->hw.idx);
466 	isb();
467 }
468 
469 static int m1_pmu_get_event_idx(struct pmu_hw_events *cpuc,
470 				struct perf_event *event)
471 {
472 	unsigned long evtype = event->hw.config_base & M1_PMU_CFG_EVENT;
473 	unsigned long affinity = m1_pmu_event_affinity[evtype];
474 	int idx;
475 
476 	/*
477 	 * Place the event on the first free counter that can count
478 	 * this event.
479 	 *
480 	 * We could do a better job if we had a view of all the events
481 	 * counting on the PMU at any given time, and by placing the
482 	 * most constraining events first.
483 	 */
484 	for_each_set_bit(idx, &affinity, M1_PMU_NR_COUNTERS) {
485 		if (!test_and_set_bit(idx, cpuc->used_mask))
486 			return idx;
487 	}
488 
489 	return -EAGAIN;
490 }
491 
492 static void m1_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
493 				   struct perf_event *event)
494 {
495 	clear_bit(event->hw.idx, cpuc->used_mask);
496 }
497 
498 static void __m1_pmu_set_mode(u8 mode)
499 {
500 	u64 val;
501 
502 	val = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1);
503 	val &= ~(PMCR0_IMODE | PMCR0_IACT);
504 	val |= FIELD_PREP(PMCR0_IMODE, mode);
505 	write_sysreg_s(val, SYS_IMP_APL_PMCR0_EL1);
506 	isb();
507 }
508 
509 static void m1_pmu_start(struct arm_pmu *cpu_pmu)
510 {
511 	__m1_pmu_set_mode(PMCR0_IMODE_FIQ);
512 }
513 
514 static void m1_pmu_stop(struct arm_pmu *cpu_pmu)
515 {
516 	__m1_pmu_set_mode(PMCR0_IMODE_OFF);
517 }
518 
519 static int m1_pmu_map_event(struct perf_event *event)
520 {
521 	/*
522 	 * Although the counters are 48bit wide, bit 47 is what
523 	 * triggers the overflow interrupt. Advertise the counters
524 	 * being 47bit wide to mimick the behaviour of the ARM PMU.
525 	 */
526 	event->hw.flags |= ARMPMU_EVT_47BIT;
527 	return armpmu_map_event(event, &m1_pmu_perf_map, NULL, M1_PMU_CFG_EVENT);
528 }
529 
530 static int m2_pmu_map_event(struct perf_event *event)
531 {
532 	/*
533 	 * Same deal as the above, except that M2 has 64bit counters.
534 	 * Which, as far as we're concerned, actually means 63 bits.
535 	 * Yes, this is getting awkward.
536 	 */
537 	event->hw.flags |= ARMPMU_EVT_63BIT;
538 	return armpmu_map_event(event, &m1_pmu_perf_map, NULL, M1_PMU_CFG_EVENT);
539 }
540 
541 static void m1_pmu_reset(void *info)
542 {
543 	int i;
544 
545 	__m1_pmu_set_mode(PMCR0_IMODE_OFF);
546 
547 	for (i = 0; i < M1_PMU_NR_COUNTERS; i++) {
548 		m1_pmu_disable_counter(i);
549 		m1_pmu_disable_counter_interrupt(i);
550 		m1_pmu_write_hw_counter(0, i);
551 	}
552 
553 	isb();
554 }
555 
556 static int m1_pmu_set_event_filter(struct hw_perf_event *event,
557 				   struct perf_event_attr *attr)
558 {
559 	unsigned long config_base = 0;
560 
561 	if (!attr->exclude_guest) {
562 		pr_debug("ARM performance counters do not support mode exclusion\n");
563 		return -EOPNOTSUPP;
564 	}
565 	if (!attr->exclude_kernel)
566 		config_base |= M1_PMU_CFG_COUNT_KERNEL;
567 	if (!attr->exclude_user)
568 		config_base |= M1_PMU_CFG_COUNT_USER;
569 
570 	event->config_base = config_base;
571 
572 	return 0;
573 }
574 
575 static int m1_pmu_init(struct arm_pmu *cpu_pmu, u32 flags)
576 {
577 	cpu_pmu->handle_irq	  = m1_pmu_handle_irq;
578 	cpu_pmu->enable		  = m1_pmu_enable_event;
579 	cpu_pmu->disable	  = m1_pmu_disable_event;
580 	cpu_pmu->read_counter	  = m1_pmu_read_counter;
581 	cpu_pmu->write_counter	  = m1_pmu_write_counter;
582 	cpu_pmu->get_event_idx	  = m1_pmu_get_event_idx;
583 	cpu_pmu->clear_event_idx  = m1_pmu_clear_event_idx;
584 	cpu_pmu->start		  = m1_pmu_start;
585 	cpu_pmu->stop		  = m1_pmu_stop;
586 
587 	if (flags & ARMPMU_EVT_47BIT)
588 		cpu_pmu->map_event = m1_pmu_map_event;
589 	else if (flags & ARMPMU_EVT_63BIT)
590 		cpu_pmu->map_event = m2_pmu_map_event;
591 	else
592 		return WARN_ON(-EINVAL);
593 
594 	cpu_pmu->reset		  = m1_pmu_reset;
595 	cpu_pmu->set_event_filter = m1_pmu_set_event_filter;
596 
597 	bitmap_set(cpu_pmu->cntr_mask, 0, M1_PMU_NR_COUNTERS);
598 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &m1_pmu_events_attr_group;
599 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &m1_pmu_format_attr_group;
600 	return 0;
601 }
602 
603 /* Device driver gunk */
604 static int m1_pmu_ice_init(struct arm_pmu *cpu_pmu)
605 {
606 	cpu_pmu->name = "apple_icestorm_pmu";
607 	return m1_pmu_init(cpu_pmu, ARMPMU_EVT_47BIT);
608 }
609 
610 static int m1_pmu_fire_init(struct arm_pmu *cpu_pmu)
611 {
612 	cpu_pmu->name = "apple_firestorm_pmu";
613 	return m1_pmu_init(cpu_pmu, ARMPMU_EVT_47BIT);
614 }
615 
616 static int m2_pmu_avalanche_init(struct arm_pmu *cpu_pmu)
617 {
618 	cpu_pmu->name = "apple_avalanche_pmu";
619 	return m1_pmu_init(cpu_pmu, ARMPMU_EVT_63BIT);
620 }
621 
622 static int m2_pmu_blizzard_init(struct arm_pmu *cpu_pmu)
623 {
624 	cpu_pmu->name = "apple_blizzard_pmu";
625 	return m1_pmu_init(cpu_pmu, ARMPMU_EVT_63BIT);
626 }
627 
628 static const struct of_device_id m1_pmu_of_device_ids[] = {
629 	{ .compatible = "apple,avalanche-pmu",	.data = m2_pmu_avalanche_init, },
630 	{ .compatible = "apple,blizzard-pmu",	.data = m2_pmu_blizzard_init, },
631 	{ .compatible = "apple,icestorm-pmu",	.data = m1_pmu_ice_init, },
632 	{ .compatible = "apple,firestorm-pmu",	.data = m1_pmu_fire_init, },
633 	{ },
634 };
635 MODULE_DEVICE_TABLE(of, m1_pmu_of_device_ids);
636 
637 static int m1_pmu_device_probe(struct platform_device *pdev)
638 {
639 	return arm_pmu_device_probe(pdev, m1_pmu_of_device_ids, NULL);
640 }
641 
642 static struct platform_driver m1_pmu_driver = {
643 	.driver		= {
644 		.name			= "apple-m1-cpu-pmu",
645 		.of_match_table		= m1_pmu_of_device_ids,
646 		.suppress_bind_attrs	= true,
647 	},
648 	.probe		= m1_pmu_device_probe,
649 };
650 
651 module_platform_driver(m1_pmu_driver);
652