xref: /linux/drivers/perf/apple_m1_cpu_pmu.c (revision 114143a595895c03fbefccfd8346fc51fb4908ed)
1a639027aSMarc Zyngier // SPDX-License-Identifier: GPL-2.0
2a639027aSMarc Zyngier /*
3a639027aSMarc Zyngier  * CPU PMU driver for the Apple M1 and derivatives
4a639027aSMarc Zyngier  *
5a639027aSMarc Zyngier  * Copyright (C) 2021 Google LLC
6a639027aSMarc Zyngier  *
7a639027aSMarc Zyngier  * Author: Marc Zyngier <maz@kernel.org>
8a639027aSMarc Zyngier  *
9a639027aSMarc Zyngier  * Most of the information used in this driver was provided by the
10a639027aSMarc Zyngier  * Asahi Linux project. The rest was experimentally discovered.
11a639027aSMarc Zyngier  */
12a639027aSMarc Zyngier 
13a639027aSMarc Zyngier #include <linux/of.h>
14a639027aSMarc Zyngier #include <linux/perf/arm_pmu.h>
15a639027aSMarc Zyngier #include <linux/platform_device.h>
16a639027aSMarc Zyngier 
17a639027aSMarc Zyngier #include <asm/apple_m1_pmu.h>
18a639027aSMarc Zyngier #include <asm/irq_regs.h>
19a639027aSMarc Zyngier #include <asm/perf_event.h>
20a639027aSMarc Zyngier 
21a639027aSMarc Zyngier #define M1_PMU_NR_COUNTERS		10
22a639027aSMarc Zyngier 
23a639027aSMarc Zyngier #define M1_PMU_CFG_EVENT		GENMASK(7, 0)
24a639027aSMarc Zyngier 
25a639027aSMarc Zyngier #define ANY_BUT_0_1			GENMASK(9, 2)
26a639027aSMarc Zyngier #define ONLY_2_TO_7			GENMASK(7, 2)
27a639027aSMarc Zyngier #define ONLY_2_4_6			(BIT(2) | BIT(4) | BIT(6))
28a639027aSMarc Zyngier #define ONLY_5_6_7			(BIT(5) | BIT(6) | BIT(7))
29a639027aSMarc Zyngier 
30a639027aSMarc Zyngier /*
31a639027aSMarc Zyngier  * Description of the events we actually know about, as well as those with
32a639027aSMarc Zyngier  * a specific counter affinity. Yes, this is a grand total of two known
33a639027aSMarc Zyngier  * counters, and the rest is anybody's guess.
34a639027aSMarc Zyngier  *
35a639027aSMarc Zyngier  * Not all counters can count all events. Counters #0 and #1 are wired to
36a639027aSMarc Zyngier  * count cycles and instructions respectively, and some events have
37a639027aSMarc Zyngier  * bizarre mappings (every other counter, or even *one* counter). These
38a639027aSMarc Zyngier  * restrictions equally apply to both P and E cores.
39a639027aSMarc Zyngier  *
40a639027aSMarc Zyngier  * It is worth noting that the PMUs attached to P and E cores are likely
41a639027aSMarc Zyngier  * to be different because the underlying uarches are different. At the
42a639027aSMarc Zyngier  * moment, we don't really need to distinguish between the two because we
43a639027aSMarc Zyngier  * know next to nothing about the events themselves, and we already have
44a639027aSMarc Zyngier  * per cpu-type PMU abstractions.
45a639027aSMarc Zyngier  *
46a639027aSMarc Zyngier  * If we eventually find out that the events are different across
47a639027aSMarc Zyngier  * implementations, we'll have to introduce per cpu-type tables.
48a639027aSMarc Zyngier  */
49a639027aSMarc Zyngier enum m1_pmu_events {
50*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_RETIRE_UOP				= 0x1,
51*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE			= 0x2,
52*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_L1I_TLB_FILL				= 0x4,
53*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_L1D_TLB_FILL				= 0x5,
54*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_MMU_TABLE_WALK_INSTRUCTION		= 0x7,
55*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_MMU_TABLE_WALK_DATA			= 0x8,
56*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_L2_TLB_MISS_INSTRUCTION			= 0xa,
57*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_L2_TLB_MISS_DATA				= 0xb,
58*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_MMU_VIRTUAL_MEMORY_FAULT_NONSPEC		= 0xd,
59*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_SCHEDULE_UOP				= 0x52,
60*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_INTERRUPT_PENDING			= 0x6c,
61*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_MAP_STALL_DISPATCH			= 0x70,
62*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_MAP_REWIND				= 0x75,
63*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_MAP_STALL				= 0x76,
64*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_MAP_INT_UOP				= 0x7c,
65*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_MAP_LDST_UOP				= 0x7d,
66*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_MAP_SIMD_UOP				= 0x7e,
67*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_FLUSH_RESTART_OTHER_NONSPEC		= 0x84,
68*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_INST_ALL					= 0x8c,
69*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_INST_BRANCH				= 0x8d,
70*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_INST_BRANCH_CALL				= 0x8e,
71*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_INST_BRANCH_RET				= 0x8f,
72*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_INST_BRANCH_TAKEN			= 0x90,
73*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_INST_BRANCH_INDIR			= 0x93,
74*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_INST_BRANCH_COND				= 0x94,
75*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_INST_INT_LD				= 0x95,
76*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_INST_INT_ST				= 0x96,
77*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_INST_INT_ALU				= 0x97,
78*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_INST_SIMD_LD				= 0x98,
79*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_INST_SIMD_ST				= 0x99,
80*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_INST_SIMD_ALU				= 0x9a,
81*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_INST_LDST				= 0x9b,
82*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_INST_BARRIER				= 0x9c,
83a639027aSMarc Zyngier 	M1_PMU_PERFCTR_UNKNOWN_9f				= 0x9f,
84*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_L1D_TLB_ACCESS				= 0xa0,
85*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_L1D_TLB_MISS				= 0xa1,
86*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_L1D_CACHE_MISS_ST			= 0xa2,
87*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_L1D_CACHE_MISS_LD			= 0xa3,
88*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_LD_UNIT_UOP				= 0xa6,
89*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_ST_UNIT_UOP				= 0xa7,
90*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_L1D_CACHE_WRITEBACK			= 0xa8,
91*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_LDST_X64_UOP				= 0xb1,
92*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_LDST_XPG_UOP				= 0xb2,
93*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_ATOMIC_OR_EXCLUSIVE_SUCC			= 0xb3,
94*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_ATOMIC_OR_EXCLUSIVE_FAIL			= 0xb4,
95*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_L1D_CACHE_MISS_LD_NONSPEC		= 0xbf,
96*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_L1D_CACHE_MISS_ST_NONSPEC		= 0xc0,
97*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_L1D_TLB_MISS_NONSPEC			= 0xc1,
98*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_ST_MEMORY_ORDER_VIOLATION_NONSPEC	= 0xc4,
99*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_BRANCH_COND_MISPRED_NONSPEC		= 0xc5,
100*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_BRANCH_INDIR_MISPRED_NONSPEC		= 0xc6,
101*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_BRANCH_RET_INDIR_MISPRED_NONSPEC		= 0xc8,
102*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_BRANCH_CALL_INDIR_MISPRED_NONSPEC	= 0xca,
103*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_BRANCH_MISPRED_NONSPEC			= 0xcb,
104*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_L1I_TLB_MISS_DEMAND			= 0xd4,
105*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_MAP_DISPATCH_BUBBLE			= 0xd6,
106*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_L1I_CACHE_MISS_DEMAND			= 0xdb,
107*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_FETCH_RESTART				= 0xde,
108*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_ST_NT_UOP				= 0xe5,
109*3cce331eSYangyu Chen 	M1_PMU_PERFCTR_LD_NT_UOP				= 0xe6,
110a639027aSMarc Zyngier 	M1_PMU_PERFCTR_UNKNOWN_f5				= 0xf5,
111a639027aSMarc Zyngier 	M1_PMU_PERFCTR_UNKNOWN_f6				= 0xf6,
112a639027aSMarc Zyngier 	M1_PMU_PERFCTR_UNKNOWN_f7				= 0xf7,
113a639027aSMarc Zyngier 	M1_PMU_PERFCTR_UNKNOWN_f8				= 0xf8,
114a639027aSMarc Zyngier 	M1_PMU_PERFCTR_UNKNOWN_fd				= 0xfd,
115a639027aSMarc Zyngier 	M1_PMU_PERFCTR_LAST					= M1_PMU_CFG_EVENT,
116a639027aSMarc Zyngier 
117a639027aSMarc Zyngier 	/*
118a639027aSMarc Zyngier 	 * From this point onwards, these are not actual HW events,
119a639027aSMarc Zyngier 	 * but attributes that get stored in hw->config_base.
120a639027aSMarc Zyngier 	 */
121a639027aSMarc Zyngier 	M1_PMU_CFG_COUNT_USER					= BIT(8),
122a639027aSMarc Zyngier 	M1_PMU_CFG_COUNT_KERNEL					= BIT(9),
123a639027aSMarc Zyngier };
124a639027aSMarc Zyngier 
125a639027aSMarc Zyngier /*
126a639027aSMarc Zyngier  * Per-event affinity table. Most events can be installed on counter
127a639027aSMarc Zyngier  * 2-9, but there are a number of exceptions. Note that this table
128a639027aSMarc Zyngier  * has been created experimentally, and I wouldn't be surprised if more
129a639027aSMarc Zyngier  * counters had strange affinities.
130a639027aSMarc Zyngier  */
131a639027aSMarc Zyngier static const u16 m1_pmu_event_affinity[M1_PMU_PERFCTR_LAST + 1] = {
132a639027aSMarc Zyngier 	[0 ... M1_PMU_PERFCTR_LAST]				= ANY_BUT_0_1,
133*3cce331eSYangyu Chen 	[M1_PMU_PERFCTR_RETIRE_UOP]				= BIT(7),
134*3cce331eSYangyu Chen 	[M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE]			= ANY_BUT_0_1 | BIT(0),
135*3cce331eSYangyu Chen 	[M1_PMU_PERFCTR_INST_ALL]				= BIT(7) | BIT(1),
136*3cce331eSYangyu Chen 	[M1_PMU_PERFCTR_INST_BRANCH]				= ONLY_5_6_7,
137*3cce331eSYangyu Chen 	[M1_PMU_PERFCTR_INST_BRANCH_CALL]			= ONLY_5_6_7,
138*3cce331eSYangyu Chen 	[M1_PMU_PERFCTR_INST_BRANCH_RET]			= ONLY_5_6_7,
139*3cce331eSYangyu Chen 	[M1_PMU_PERFCTR_INST_BRANCH_TAKEN]			= ONLY_5_6_7,
140*3cce331eSYangyu Chen 	[M1_PMU_PERFCTR_INST_BRANCH_INDIR]			= ONLY_5_6_7,
141*3cce331eSYangyu Chen 	[M1_PMU_PERFCTR_INST_BRANCH_COND]			= ONLY_5_6_7,
142*3cce331eSYangyu Chen 	[M1_PMU_PERFCTR_INST_INT_LD]				= ONLY_5_6_7,
143*3cce331eSYangyu Chen 	[M1_PMU_PERFCTR_INST_INT_ST]				= BIT(7),
144*3cce331eSYangyu Chen 	[M1_PMU_PERFCTR_INST_INT_ALU]				= BIT(7),
145*3cce331eSYangyu Chen 	[M1_PMU_PERFCTR_INST_SIMD_LD]				= ONLY_5_6_7,
146*3cce331eSYangyu Chen 	[M1_PMU_PERFCTR_INST_SIMD_ST]				= ONLY_5_6_7,
147*3cce331eSYangyu Chen 	[M1_PMU_PERFCTR_INST_SIMD_ALU]				= BIT(7),
148*3cce331eSYangyu Chen 	[M1_PMU_PERFCTR_INST_LDST]				= BIT(7),
149*3cce331eSYangyu Chen 	[M1_PMU_PERFCTR_INST_BARRIER]				= ONLY_5_6_7,
150a639027aSMarc Zyngier 	[M1_PMU_PERFCTR_UNKNOWN_9f]				= BIT(7),
151*3cce331eSYangyu Chen 	[M1_PMU_PERFCTR_L1D_CACHE_MISS_LD_NONSPEC]		= ONLY_5_6_7,
152*3cce331eSYangyu Chen 	[M1_PMU_PERFCTR_L1D_CACHE_MISS_ST_NONSPEC]		= ONLY_5_6_7,
153*3cce331eSYangyu Chen 	[M1_PMU_PERFCTR_L1D_TLB_MISS_NONSPEC]			= ONLY_5_6_7,
154*3cce331eSYangyu Chen 	[M1_PMU_PERFCTR_ST_MEMORY_ORDER_VIOLATION_NONSPEC]	= ONLY_5_6_7,
155*3cce331eSYangyu Chen 	[M1_PMU_PERFCTR_BRANCH_COND_MISPRED_NONSPEC]		= ONLY_5_6_7,
156*3cce331eSYangyu Chen 	[M1_PMU_PERFCTR_BRANCH_INDIR_MISPRED_NONSPEC]		= ONLY_5_6_7,
157*3cce331eSYangyu Chen 	[M1_PMU_PERFCTR_BRANCH_RET_INDIR_MISPRED_NONSPEC]	= ONLY_5_6_7,
158*3cce331eSYangyu Chen 	[M1_PMU_PERFCTR_BRANCH_CALL_INDIR_MISPRED_NONSPEC]	= ONLY_5_6_7,
159*3cce331eSYangyu Chen 	[M1_PMU_PERFCTR_BRANCH_MISPRED_NONSPEC]			= ONLY_5_6_7,
160a639027aSMarc Zyngier 	[M1_PMU_PERFCTR_UNKNOWN_f5]				= ONLY_2_4_6,
161a639027aSMarc Zyngier 	[M1_PMU_PERFCTR_UNKNOWN_f6]				= ONLY_2_4_6,
162a639027aSMarc Zyngier 	[M1_PMU_PERFCTR_UNKNOWN_f7]				= ONLY_2_4_6,
163a639027aSMarc Zyngier 	[M1_PMU_PERFCTR_UNKNOWN_f8]				= ONLY_2_TO_7,
164a639027aSMarc Zyngier 	[M1_PMU_PERFCTR_UNKNOWN_fd]				= ONLY_2_4_6,
165a639027aSMarc Zyngier };
166a639027aSMarc Zyngier 
167a639027aSMarc Zyngier static const unsigned m1_pmu_perf_map[PERF_COUNT_HW_MAX] = {
168a639027aSMarc Zyngier 	PERF_MAP_ALL_UNSUPPORTED,
169*3cce331eSYangyu Chen 	[PERF_COUNT_HW_CPU_CYCLES]		= M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE,
170*3cce331eSYangyu Chen 	[PERF_COUNT_HW_INSTRUCTIONS]		= M1_PMU_PERFCTR_INST_ALL,
171a639027aSMarc Zyngier };
172a639027aSMarc Zyngier 
173a639027aSMarc Zyngier /* sysfs definitions */
m1_pmu_events_sysfs_show(struct device * dev,struct device_attribute * attr,char * page)174a639027aSMarc Zyngier static ssize_t m1_pmu_events_sysfs_show(struct device *dev,
175a639027aSMarc Zyngier 					struct device_attribute *attr,
176a639027aSMarc Zyngier 					char *page)
177a639027aSMarc Zyngier {
178a639027aSMarc Zyngier 	struct perf_pmu_events_attr *pmu_attr;
179a639027aSMarc Zyngier 
180a639027aSMarc Zyngier 	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
181a639027aSMarc Zyngier 
182a639027aSMarc Zyngier 	return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
183a639027aSMarc Zyngier }
184a639027aSMarc Zyngier 
185a639027aSMarc Zyngier #define M1_PMU_EVENT_ATTR(name, config)					\
186a639027aSMarc Zyngier 	PMU_EVENT_ATTR_ID(name, m1_pmu_events_sysfs_show, config)
187a639027aSMarc Zyngier 
188a639027aSMarc Zyngier static struct attribute *m1_pmu_event_attrs[] = {
189*3cce331eSYangyu Chen 	M1_PMU_EVENT_ATTR(cycles, M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE),
190*3cce331eSYangyu Chen 	M1_PMU_EVENT_ATTR(instructions, M1_PMU_PERFCTR_INST_ALL),
191a639027aSMarc Zyngier 	NULL,
192a639027aSMarc Zyngier };
193a639027aSMarc Zyngier 
194a639027aSMarc Zyngier static const struct attribute_group m1_pmu_events_attr_group = {
195a639027aSMarc Zyngier 	.name = "events",
196a639027aSMarc Zyngier 	.attrs = m1_pmu_event_attrs,
197a639027aSMarc Zyngier };
198a639027aSMarc Zyngier 
199a639027aSMarc Zyngier PMU_FORMAT_ATTR(event, "config:0-7");
200a639027aSMarc Zyngier 
201a639027aSMarc Zyngier static struct attribute *m1_pmu_format_attrs[] = {
202a639027aSMarc Zyngier 	&format_attr_event.attr,
203a639027aSMarc Zyngier 	NULL,
204a639027aSMarc Zyngier };
205a639027aSMarc Zyngier 
206a639027aSMarc Zyngier static const struct attribute_group m1_pmu_format_attr_group = {
207a639027aSMarc Zyngier 	.name = "format",
208a639027aSMarc Zyngier 	.attrs = m1_pmu_format_attrs,
209a639027aSMarc Zyngier };
210a639027aSMarc Zyngier 
211a639027aSMarc Zyngier /* Low level accessors. No synchronisation. */
212a639027aSMarc Zyngier #define PMU_READ_COUNTER(_idx)						\
213a639027aSMarc Zyngier 	case _idx:	return read_sysreg_s(SYS_IMP_APL_PMC## _idx ##_EL1)
214a639027aSMarc Zyngier 
215a639027aSMarc Zyngier #define PMU_WRITE_COUNTER(_val, _idx)					\
216a639027aSMarc Zyngier 	case _idx:							\
217a639027aSMarc Zyngier 		write_sysreg_s(_val, SYS_IMP_APL_PMC## _idx ##_EL1);	\
218a639027aSMarc Zyngier 		return
219a639027aSMarc Zyngier 
m1_pmu_read_hw_counter(unsigned int index)220a639027aSMarc Zyngier static u64 m1_pmu_read_hw_counter(unsigned int index)
221a639027aSMarc Zyngier {
222a639027aSMarc Zyngier 	switch (index) {
223a639027aSMarc Zyngier 		PMU_READ_COUNTER(0);
224a639027aSMarc Zyngier 		PMU_READ_COUNTER(1);
225a639027aSMarc Zyngier 		PMU_READ_COUNTER(2);
226a639027aSMarc Zyngier 		PMU_READ_COUNTER(3);
227a639027aSMarc Zyngier 		PMU_READ_COUNTER(4);
228a639027aSMarc Zyngier 		PMU_READ_COUNTER(5);
229a639027aSMarc Zyngier 		PMU_READ_COUNTER(6);
230a639027aSMarc Zyngier 		PMU_READ_COUNTER(7);
231a639027aSMarc Zyngier 		PMU_READ_COUNTER(8);
232a639027aSMarc Zyngier 		PMU_READ_COUNTER(9);
233a639027aSMarc Zyngier 	}
234a639027aSMarc Zyngier 
235a639027aSMarc Zyngier 	BUG();
236a639027aSMarc Zyngier }
237a639027aSMarc Zyngier 
m1_pmu_write_hw_counter(u64 val,unsigned int index)238a639027aSMarc Zyngier static void m1_pmu_write_hw_counter(u64 val, unsigned int index)
239a639027aSMarc Zyngier {
240a639027aSMarc Zyngier 	switch (index) {
241a639027aSMarc Zyngier 		PMU_WRITE_COUNTER(val, 0);
242a639027aSMarc Zyngier 		PMU_WRITE_COUNTER(val, 1);
243a639027aSMarc Zyngier 		PMU_WRITE_COUNTER(val, 2);
244a639027aSMarc Zyngier 		PMU_WRITE_COUNTER(val, 3);
245a639027aSMarc Zyngier 		PMU_WRITE_COUNTER(val, 4);
246a639027aSMarc Zyngier 		PMU_WRITE_COUNTER(val, 5);
247a639027aSMarc Zyngier 		PMU_WRITE_COUNTER(val, 6);
248a639027aSMarc Zyngier 		PMU_WRITE_COUNTER(val, 7);
249a639027aSMarc Zyngier 		PMU_WRITE_COUNTER(val, 8);
250a639027aSMarc Zyngier 		PMU_WRITE_COUNTER(val, 9);
251a639027aSMarc Zyngier 	}
252a639027aSMarc Zyngier 
253a639027aSMarc Zyngier 	BUG();
254a639027aSMarc Zyngier }
255a639027aSMarc Zyngier 
256a639027aSMarc Zyngier #define get_bit_offset(index, mask)	(__ffs(mask) + (index))
257a639027aSMarc Zyngier 
__m1_pmu_enable_counter(unsigned int index,bool en)258a639027aSMarc Zyngier static void __m1_pmu_enable_counter(unsigned int index, bool en)
259a639027aSMarc Zyngier {
260a639027aSMarc Zyngier 	u64 val, bit;
261a639027aSMarc Zyngier 
262a639027aSMarc Zyngier 	switch (index) {
263a639027aSMarc Zyngier 	case 0 ... 7:
264a639027aSMarc Zyngier 		bit = BIT(get_bit_offset(index, PMCR0_CNT_ENABLE_0_7));
265a639027aSMarc Zyngier 		break;
266a639027aSMarc Zyngier 	case 8 ... 9:
267a639027aSMarc Zyngier 		bit = BIT(get_bit_offset(index - 8, PMCR0_CNT_ENABLE_8_9));
268a639027aSMarc Zyngier 		break;
269a639027aSMarc Zyngier 	default:
270a639027aSMarc Zyngier 		BUG();
271a639027aSMarc Zyngier 	}
272a639027aSMarc Zyngier 
273a639027aSMarc Zyngier 	val = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1);
274a639027aSMarc Zyngier 
275a639027aSMarc Zyngier 	if (en)
276a639027aSMarc Zyngier 		val |= bit;
277a639027aSMarc Zyngier 	else
278a639027aSMarc Zyngier 		val &= ~bit;
279a639027aSMarc Zyngier 
280a639027aSMarc Zyngier 	write_sysreg_s(val, SYS_IMP_APL_PMCR0_EL1);
281a639027aSMarc Zyngier }
282a639027aSMarc Zyngier 
m1_pmu_enable_counter(unsigned int index)283a639027aSMarc Zyngier static void m1_pmu_enable_counter(unsigned int index)
284a639027aSMarc Zyngier {
285a639027aSMarc Zyngier 	__m1_pmu_enable_counter(index, true);
286a639027aSMarc Zyngier }
287a639027aSMarc Zyngier 
m1_pmu_disable_counter(unsigned int index)288a639027aSMarc Zyngier static void m1_pmu_disable_counter(unsigned int index)
289a639027aSMarc Zyngier {
290a639027aSMarc Zyngier 	__m1_pmu_enable_counter(index, false);
291a639027aSMarc Zyngier }
292a639027aSMarc Zyngier 
__m1_pmu_enable_counter_interrupt(unsigned int index,bool en)293a639027aSMarc Zyngier static void __m1_pmu_enable_counter_interrupt(unsigned int index, bool en)
294a639027aSMarc Zyngier {
295a639027aSMarc Zyngier 	u64 val, bit;
296a639027aSMarc Zyngier 
297a639027aSMarc Zyngier 	switch (index) {
298a639027aSMarc Zyngier 	case 0 ... 7:
299a639027aSMarc Zyngier 		bit = BIT(get_bit_offset(index, PMCR0_PMI_ENABLE_0_7));
300a639027aSMarc Zyngier 		break;
301a639027aSMarc Zyngier 	case 8 ... 9:
302a639027aSMarc Zyngier 		bit = BIT(get_bit_offset(index - 8, PMCR0_PMI_ENABLE_8_9));
303a639027aSMarc Zyngier 		break;
304a639027aSMarc Zyngier 	default:
305a639027aSMarc Zyngier 		BUG();
306a639027aSMarc Zyngier 	}
307a639027aSMarc Zyngier 
308a639027aSMarc Zyngier 	val = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1);
309a639027aSMarc Zyngier 
310a639027aSMarc Zyngier 	if (en)
311a639027aSMarc Zyngier 		val |= bit;
312a639027aSMarc Zyngier 	else
313a639027aSMarc Zyngier 		val &= ~bit;
314a639027aSMarc Zyngier 
315a639027aSMarc Zyngier 	write_sysreg_s(val, SYS_IMP_APL_PMCR0_EL1);
316a639027aSMarc Zyngier }
317a639027aSMarc Zyngier 
m1_pmu_enable_counter_interrupt(unsigned int index)318a639027aSMarc Zyngier static void m1_pmu_enable_counter_interrupt(unsigned int index)
319a639027aSMarc Zyngier {
320a639027aSMarc Zyngier 	__m1_pmu_enable_counter_interrupt(index, true);
321a639027aSMarc Zyngier }
322a639027aSMarc Zyngier 
m1_pmu_disable_counter_interrupt(unsigned int index)323a639027aSMarc Zyngier static void m1_pmu_disable_counter_interrupt(unsigned int index)
324a639027aSMarc Zyngier {
325a639027aSMarc Zyngier 	__m1_pmu_enable_counter_interrupt(index, false);
326a639027aSMarc Zyngier }
327a639027aSMarc Zyngier 
m1_pmu_configure_counter(unsigned int index,u8 event,bool user,bool kernel)328a639027aSMarc Zyngier static void m1_pmu_configure_counter(unsigned int index, u8 event,
329a639027aSMarc Zyngier 				     bool user, bool kernel)
330a639027aSMarc Zyngier {
331a639027aSMarc Zyngier 	u64 val, user_bit, kernel_bit;
332a639027aSMarc Zyngier 	int shift;
333a639027aSMarc Zyngier 
334a639027aSMarc Zyngier 	switch (index) {
335a639027aSMarc Zyngier 	case 0 ... 7:
336a639027aSMarc Zyngier 		user_bit = BIT(get_bit_offset(index, PMCR1_COUNT_A64_EL0_0_7));
337a639027aSMarc Zyngier 		kernel_bit = BIT(get_bit_offset(index, PMCR1_COUNT_A64_EL1_0_7));
338a639027aSMarc Zyngier 		break;
339a639027aSMarc Zyngier 	case 8 ... 9:
340a639027aSMarc Zyngier 		user_bit = BIT(get_bit_offset(index - 8, PMCR1_COUNT_A64_EL0_8_9));
341a639027aSMarc Zyngier 		kernel_bit = BIT(get_bit_offset(index - 8, PMCR1_COUNT_A64_EL1_8_9));
342a639027aSMarc Zyngier 		break;
343a639027aSMarc Zyngier 	default:
344a639027aSMarc Zyngier 		BUG();
345a639027aSMarc Zyngier 	}
346a639027aSMarc Zyngier 
347a639027aSMarc Zyngier 	val = read_sysreg_s(SYS_IMP_APL_PMCR1_EL1);
348a639027aSMarc Zyngier 
349a639027aSMarc Zyngier 	if (user)
350a639027aSMarc Zyngier 		val |= user_bit;
351a639027aSMarc Zyngier 	else
352a639027aSMarc Zyngier 		val &= ~user_bit;
353a639027aSMarc Zyngier 
354a639027aSMarc Zyngier 	if (kernel)
355a639027aSMarc Zyngier 		val |= kernel_bit;
356a639027aSMarc Zyngier 	else
357a639027aSMarc Zyngier 		val &= ~kernel_bit;
358a639027aSMarc Zyngier 
359a639027aSMarc Zyngier 	write_sysreg_s(val, SYS_IMP_APL_PMCR1_EL1);
360a639027aSMarc Zyngier 
361a639027aSMarc Zyngier 	/*
362a639027aSMarc Zyngier 	 * Counters 0 and 1 have fixed events. For anything else,
363a639027aSMarc Zyngier 	 * place the event at the expected location in the relevant
364a639027aSMarc Zyngier 	 * register (PMESR0 holds the event configuration for counters
365a639027aSMarc Zyngier 	 * 2-5, resp. PMESR1 for counters 6-9).
366a639027aSMarc Zyngier 	 */
367a639027aSMarc Zyngier 	switch (index) {
368a639027aSMarc Zyngier 	case 0 ... 1:
369a639027aSMarc Zyngier 		break;
370a639027aSMarc Zyngier 	case 2 ... 5:
371a639027aSMarc Zyngier 		shift = (index - 2) * 8;
372a639027aSMarc Zyngier 		val = read_sysreg_s(SYS_IMP_APL_PMESR0_EL1);
373a639027aSMarc Zyngier 		val &= ~((u64)0xff << shift);
374a639027aSMarc Zyngier 		val |= (u64)event << shift;
375a639027aSMarc Zyngier 		write_sysreg_s(val, SYS_IMP_APL_PMESR0_EL1);
376a639027aSMarc Zyngier 		break;
377a639027aSMarc Zyngier 	case 6 ... 9:
378a639027aSMarc Zyngier 		shift = (index - 6) * 8;
379a639027aSMarc Zyngier 		val = read_sysreg_s(SYS_IMP_APL_PMESR1_EL1);
380a639027aSMarc Zyngier 		val &= ~((u64)0xff << shift);
381a639027aSMarc Zyngier 		val |= (u64)event << shift;
382a639027aSMarc Zyngier 		write_sysreg_s(val, SYS_IMP_APL_PMESR1_EL1);
383a639027aSMarc Zyngier 		break;
384a639027aSMarc Zyngier 	}
385a639027aSMarc Zyngier }
386a639027aSMarc Zyngier 
387a639027aSMarc Zyngier /* arm_pmu backend */
m1_pmu_enable_event(struct perf_event * event)388a639027aSMarc Zyngier static void m1_pmu_enable_event(struct perf_event *event)
389a639027aSMarc Zyngier {
390a639027aSMarc Zyngier 	bool user, kernel;
391a639027aSMarc Zyngier 	u8 evt;
392a639027aSMarc Zyngier 
393a639027aSMarc Zyngier 	evt = event->hw.config_base & M1_PMU_CFG_EVENT;
394a639027aSMarc Zyngier 	user = event->hw.config_base & M1_PMU_CFG_COUNT_USER;
395a639027aSMarc Zyngier 	kernel = event->hw.config_base & M1_PMU_CFG_COUNT_KERNEL;
396a639027aSMarc Zyngier 
397a639027aSMarc Zyngier 	m1_pmu_disable_counter_interrupt(event->hw.idx);
398a639027aSMarc Zyngier 	m1_pmu_disable_counter(event->hw.idx);
399a639027aSMarc Zyngier 	isb();
400a639027aSMarc Zyngier 
401a639027aSMarc Zyngier 	m1_pmu_configure_counter(event->hw.idx, evt, user, kernel);
402a639027aSMarc Zyngier 	m1_pmu_enable_counter(event->hw.idx);
403a639027aSMarc Zyngier 	m1_pmu_enable_counter_interrupt(event->hw.idx);
404a639027aSMarc Zyngier 	isb();
405a639027aSMarc Zyngier }
406a639027aSMarc Zyngier 
m1_pmu_disable_event(struct perf_event * event)407a639027aSMarc Zyngier static void m1_pmu_disable_event(struct perf_event *event)
408a639027aSMarc Zyngier {
409a639027aSMarc Zyngier 	m1_pmu_disable_counter_interrupt(event->hw.idx);
410a639027aSMarc Zyngier 	m1_pmu_disable_counter(event->hw.idx);
411a639027aSMarc Zyngier 	isb();
412a639027aSMarc Zyngier }
413a639027aSMarc Zyngier 
m1_pmu_handle_irq(struct arm_pmu * cpu_pmu)414a639027aSMarc Zyngier static irqreturn_t m1_pmu_handle_irq(struct arm_pmu *cpu_pmu)
415a639027aSMarc Zyngier {
416a639027aSMarc Zyngier 	struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
417a639027aSMarc Zyngier 	struct pt_regs *regs;
418a639027aSMarc Zyngier 	u64 overflow, state;
419a639027aSMarc Zyngier 	int idx;
420a639027aSMarc Zyngier 
421a639027aSMarc Zyngier 	overflow = read_sysreg_s(SYS_IMP_APL_PMSR_EL1);
422a639027aSMarc Zyngier 	if (!overflow) {
423a639027aSMarc Zyngier 		/* Spurious interrupt? */
424a639027aSMarc Zyngier 		state = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1);
425a639027aSMarc Zyngier 		state &= ~PMCR0_IACT;
426a639027aSMarc Zyngier 		write_sysreg_s(state, SYS_IMP_APL_PMCR0_EL1);
427a639027aSMarc Zyngier 		isb();
428a639027aSMarc Zyngier 		return IRQ_NONE;
429a639027aSMarc Zyngier 	}
430a639027aSMarc Zyngier 
431a639027aSMarc Zyngier 	cpu_pmu->stop(cpu_pmu);
432a639027aSMarc Zyngier 
433a639027aSMarc Zyngier 	regs = get_irq_regs();
434a639027aSMarc Zyngier 
435bf5ffc8cSRob Herring (Arm) 	for_each_set_bit(idx, cpu_pmu->cntr_mask, M1_PMU_NR_COUNTERS) {
436a639027aSMarc Zyngier 		struct perf_event *event = cpuc->events[idx];
437a639027aSMarc Zyngier 		struct perf_sample_data data;
438a639027aSMarc Zyngier 
439a639027aSMarc Zyngier 		if (!event)
440a639027aSMarc Zyngier 			continue;
441a639027aSMarc Zyngier 
442a639027aSMarc Zyngier 		armpmu_event_update(event);
443a639027aSMarc Zyngier 		perf_sample_data_init(&data, 0, event->hw.last_period);
444a639027aSMarc Zyngier 		if (!armpmu_event_set_period(event))
445a639027aSMarc Zyngier 			continue;
446a639027aSMarc Zyngier 
447a639027aSMarc Zyngier 		if (perf_event_overflow(event, &data, regs))
448a639027aSMarc Zyngier 			m1_pmu_disable_event(event);
449a639027aSMarc Zyngier 	}
450a639027aSMarc Zyngier 
451a639027aSMarc Zyngier 	cpu_pmu->start(cpu_pmu);
452a639027aSMarc Zyngier 
453a639027aSMarc Zyngier 	return IRQ_HANDLED;
454a639027aSMarc Zyngier }
455a639027aSMarc Zyngier 
m1_pmu_read_counter(struct perf_event * event)456a639027aSMarc Zyngier static u64 m1_pmu_read_counter(struct perf_event *event)
457a639027aSMarc Zyngier {
458a639027aSMarc Zyngier 	return m1_pmu_read_hw_counter(event->hw.idx);
459a639027aSMarc Zyngier }
460a639027aSMarc Zyngier 
m1_pmu_write_counter(struct perf_event * event,u64 value)461a639027aSMarc Zyngier static void m1_pmu_write_counter(struct perf_event *event, u64 value)
462a639027aSMarc Zyngier {
463a639027aSMarc Zyngier 	m1_pmu_write_hw_counter(value, event->hw.idx);
464a639027aSMarc Zyngier 	isb();
465a639027aSMarc Zyngier }
466a639027aSMarc Zyngier 
m1_pmu_get_event_idx(struct pmu_hw_events * cpuc,struct perf_event * event)467a639027aSMarc Zyngier static int m1_pmu_get_event_idx(struct pmu_hw_events *cpuc,
468a639027aSMarc Zyngier 				struct perf_event *event)
469a639027aSMarc Zyngier {
470a639027aSMarc Zyngier 	unsigned long evtype = event->hw.config_base & M1_PMU_CFG_EVENT;
471a639027aSMarc Zyngier 	unsigned long affinity = m1_pmu_event_affinity[evtype];
472a639027aSMarc Zyngier 	int idx;
473a639027aSMarc Zyngier 
474a639027aSMarc Zyngier 	/*
475a639027aSMarc Zyngier 	 * Place the event on the first free counter that can count
476a639027aSMarc Zyngier 	 * this event.
477a639027aSMarc Zyngier 	 *
478a639027aSMarc Zyngier 	 * We could do a better job if we had a view of all the events
479a639027aSMarc Zyngier 	 * counting on the PMU at any given time, and by placing the
480a639027aSMarc Zyngier 	 * most constraining events first.
481a639027aSMarc Zyngier 	 */
482a639027aSMarc Zyngier 	for_each_set_bit(idx, &affinity, M1_PMU_NR_COUNTERS) {
483a639027aSMarc Zyngier 		if (!test_and_set_bit(idx, cpuc->used_mask))
484a639027aSMarc Zyngier 			return idx;
485a639027aSMarc Zyngier 	}
486a639027aSMarc Zyngier 
487a639027aSMarc Zyngier 	return -EAGAIN;
488a639027aSMarc Zyngier }
489a639027aSMarc Zyngier 
m1_pmu_clear_event_idx(struct pmu_hw_events * cpuc,struct perf_event * event)490a639027aSMarc Zyngier static void m1_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
491a639027aSMarc Zyngier 				   struct perf_event *event)
492a639027aSMarc Zyngier {
493a639027aSMarc Zyngier 	clear_bit(event->hw.idx, cpuc->used_mask);
494a639027aSMarc Zyngier }
495a639027aSMarc Zyngier 
__m1_pmu_set_mode(u8 mode)496a639027aSMarc Zyngier static void __m1_pmu_set_mode(u8 mode)
497a639027aSMarc Zyngier {
498a639027aSMarc Zyngier 	u64 val;
499a639027aSMarc Zyngier 
500a639027aSMarc Zyngier 	val = read_sysreg_s(SYS_IMP_APL_PMCR0_EL1);
501a639027aSMarc Zyngier 	val &= ~(PMCR0_IMODE | PMCR0_IACT);
502a639027aSMarc Zyngier 	val |= FIELD_PREP(PMCR0_IMODE, mode);
503a639027aSMarc Zyngier 	write_sysreg_s(val, SYS_IMP_APL_PMCR0_EL1);
504a639027aSMarc Zyngier 	isb();
505a639027aSMarc Zyngier }
506a639027aSMarc Zyngier 
m1_pmu_start(struct arm_pmu * cpu_pmu)507a639027aSMarc Zyngier static void m1_pmu_start(struct arm_pmu *cpu_pmu)
508a639027aSMarc Zyngier {
509a639027aSMarc Zyngier 	__m1_pmu_set_mode(PMCR0_IMODE_FIQ);
510a639027aSMarc Zyngier }
511a639027aSMarc Zyngier 
m1_pmu_stop(struct arm_pmu * cpu_pmu)512a639027aSMarc Zyngier static void m1_pmu_stop(struct arm_pmu *cpu_pmu)
513a639027aSMarc Zyngier {
514a639027aSMarc Zyngier 	__m1_pmu_set_mode(PMCR0_IMODE_OFF);
515a639027aSMarc Zyngier }
516a639027aSMarc Zyngier 
m1_pmu_map_event(struct perf_event * event)517a639027aSMarc Zyngier static int m1_pmu_map_event(struct perf_event *event)
518a639027aSMarc Zyngier {
519a639027aSMarc Zyngier 	/*
520a639027aSMarc Zyngier 	 * Although the counters are 48bit wide, bit 47 is what
521a639027aSMarc Zyngier 	 * triggers the overflow interrupt. Advertise the counters
522a639027aSMarc Zyngier 	 * being 47bit wide to mimick the behaviour of the ARM PMU.
523a639027aSMarc Zyngier 	 */
524a639027aSMarc Zyngier 	event->hw.flags |= ARMPMU_EVT_47BIT;
525a639027aSMarc Zyngier 	return armpmu_map_event(event, &m1_pmu_perf_map, NULL, M1_PMU_CFG_EVENT);
526a639027aSMarc Zyngier }
527a639027aSMarc Zyngier 
m2_pmu_map_event(struct perf_event * event)5288be3593bSMarc Zyngier static int m2_pmu_map_event(struct perf_event *event)
5298be3593bSMarc Zyngier {
5308be3593bSMarc Zyngier 	/*
5318be3593bSMarc Zyngier 	 * Same deal as the above, except that M2 has 64bit counters.
5328be3593bSMarc Zyngier 	 * Which, as far as we're concerned, actually means 63 bits.
5338be3593bSMarc Zyngier 	 * Yes, this is getting awkward.
5348be3593bSMarc Zyngier 	 */
5358be3593bSMarc Zyngier 	event->hw.flags |= ARMPMU_EVT_63BIT;
5368be3593bSMarc Zyngier 	return armpmu_map_event(event, &m1_pmu_perf_map, NULL, M1_PMU_CFG_EVENT);
5378be3593bSMarc Zyngier }
5388be3593bSMarc Zyngier 
m1_pmu_reset(void * info)539a639027aSMarc Zyngier static void m1_pmu_reset(void *info)
540a639027aSMarc Zyngier {
541a639027aSMarc Zyngier 	int i;
542a639027aSMarc Zyngier 
543a639027aSMarc Zyngier 	__m1_pmu_set_mode(PMCR0_IMODE_OFF);
544a639027aSMarc Zyngier 
545a639027aSMarc Zyngier 	for (i = 0; i < M1_PMU_NR_COUNTERS; i++) {
546a639027aSMarc Zyngier 		m1_pmu_disable_counter(i);
547a639027aSMarc Zyngier 		m1_pmu_disable_counter_interrupt(i);
548a639027aSMarc Zyngier 		m1_pmu_write_hw_counter(0, i);
549a639027aSMarc Zyngier 	}
550a639027aSMarc Zyngier 
551a639027aSMarc Zyngier 	isb();
552a639027aSMarc Zyngier }
553a639027aSMarc Zyngier 
m1_pmu_set_event_filter(struct hw_perf_event * event,struct perf_event_attr * attr)554a639027aSMarc Zyngier static int m1_pmu_set_event_filter(struct hw_perf_event *event,
555a639027aSMarc Zyngier 				   struct perf_event_attr *attr)
556a639027aSMarc Zyngier {
557a639027aSMarc Zyngier 	unsigned long config_base = 0;
558a639027aSMarc Zyngier 
559186c91aaSJames Clark 	if (!attr->exclude_guest) {
560186c91aaSJames Clark 		pr_debug("ARM performance counters do not support mode exclusion\n");
561186c91aaSJames Clark 		return -EOPNOTSUPP;
562186c91aaSJames Clark 	}
563a639027aSMarc Zyngier 	if (!attr->exclude_kernel)
564a639027aSMarc Zyngier 		config_base |= M1_PMU_CFG_COUNT_KERNEL;
565a639027aSMarc Zyngier 	if (!attr->exclude_user)
566a639027aSMarc Zyngier 		config_base |= M1_PMU_CFG_COUNT_USER;
567a639027aSMarc Zyngier 
568a639027aSMarc Zyngier 	event->config_base = config_base;
569a639027aSMarc Zyngier 
570a639027aSMarc Zyngier 	return 0;
571a639027aSMarc Zyngier }
572a639027aSMarc Zyngier 
m1_pmu_init(struct arm_pmu * cpu_pmu,u32 flags)5738be3593bSMarc Zyngier static int m1_pmu_init(struct arm_pmu *cpu_pmu, u32 flags)
574a639027aSMarc Zyngier {
575a639027aSMarc Zyngier 	cpu_pmu->handle_irq	  = m1_pmu_handle_irq;
576a639027aSMarc Zyngier 	cpu_pmu->enable		  = m1_pmu_enable_event;
577a639027aSMarc Zyngier 	cpu_pmu->disable	  = m1_pmu_disable_event;
578a639027aSMarc Zyngier 	cpu_pmu->read_counter	  = m1_pmu_read_counter;
579a639027aSMarc Zyngier 	cpu_pmu->write_counter	  = m1_pmu_write_counter;
580a639027aSMarc Zyngier 	cpu_pmu->get_event_idx	  = m1_pmu_get_event_idx;
581a639027aSMarc Zyngier 	cpu_pmu->clear_event_idx  = m1_pmu_clear_event_idx;
582a639027aSMarc Zyngier 	cpu_pmu->start		  = m1_pmu_start;
583a639027aSMarc Zyngier 	cpu_pmu->stop		  = m1_pmu_stop;
5848be3593bSMarc Zyngier 
5858be3593bSMarc Zyngier 	if (flags & ARMPMU_EVT_47BIT)
586a639027aSMarc Zyngier 		cpu_pmu->map_event = m1_pmu_map_event;
5878be3593bSMarc Zyngier 	else if (flags & ARMPMU_EVT_63BIT)
5888be3593bSMarc Zyngier 		cpu_pmu->map_event = m2_pmu_map_event;
5898be3593bSMarc Zyngier 	else
5908be3593bSMarc Zyngier 		return WARN_ON(-EINVAL);
5918be3593bSMarc Zyngier 
592a639027aSMarc Zyngier 	cpu_pmu->reset		  = m1_pmu_reset;
593a639027aSMarc Zyngier 	cpu_pmu->set_event_filter = m1_pmu_set_event_filter;
594a639027aSMarc Zyngier 
595bf5ffc8cSRob Herring (Arm) 	bitmap_set(cpu_pmu->cntr_mask, 0, M1_PMU_NR_COUNTERS);
596a639027aSMarc Zyngier 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &m1_pmu_events_attr_group;
597a639027aSMarc Zyngier 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &m1_pmu_format_attr_group;
598a639027aSMarc Zyngier 	return 0;
599a639027aSMarc Zyngier }
600a639027aSMarc Zyngier 
601a639027aSMarc Zyngier /* Device driver gunk */
m1_pmu_ice_init(struct arm_pmu * cpu_pmu)602a639027aSMarc Zyngier static int m1_pmu_ice_init(struct arm_pmu *cpu_pmu)
603a639027aSMarc Zyngier {
604a639027aSMarc Zyngier 	cpu_pmu->name = "apple_icestorm_pmu";
6058be3593bSMarc Zyngier 	return m1_pmu_init(cpu_pmu, ARMPMU_EVT_47BIT);
606a639027aSMarc Zyngier }
607a639027aSMarc Zyngier 
m1_pmu_fire_init(struct arm_pmu * cpu_pmu)608a639027aSMarc Zyngier static int m1_pmu_fire_init(struct arm_pmu *cpu_pmu)
609a639027aSMarc Zyngier {
610a639027aSMarc Zyngier 	cpu_pmu->name = "apple_firestorm_pmu";
6118be3593bSMarc Zyngier 	return m1_pmu_init(cpu_pmu, ARMPMU_EVT_47BIT);
612a639027aSMarc Zyngier }
613a639027aSMarc Zyngier 
m2_pmu_avalanche_init(struct arm_pmu * cpu_pmu)6147d0bfb7cSJanne Grunau static int m2_pmu_avalanche_init(struct arm_pmu *cpu_pmu)
6157d0bfb7cSJanne Grunau {
6167d0bfb7cSJanne Grunau 	cpu_pmu->name = "apple_avalanche_pmu";
6178be3593bSMarc Zyngier 	return m1_pmu_init(cpu_pmu, ARMPMU_EVT_63BIT);
6187d0bfb7cSJanne Grunau }
6197d0bfb7cSJanne Grunau 
m2_pmu_blizzard_init(struct arm_pmu * cpu_pmu)6207d0bfb7cSJanne Grunau static int m2_pmu_blizzard_init(struct arm_pmu *cpu_pmu)
6217d0bfb7cSJanne Grunau {
6227d0bfb7cSJanne Grunau 	cpu_pmu->name = "apple_blizzard_pmu";
6238be3593bSMarc Zyngier 	return m1_pmu_init(cpu_pmu, ARMPMU_EVT_63BIT);
6247d0bfb7cSJanne Grunau }
6257d0bfb7cSJanne Grunau 
626a639027aSMarc Zyngier static const struct of_device_id m1_pmu_of_device_ids[] = {
6277d0bfb7cSJanne Grunau 	{ .compatible = "apple,avalanche-pmu",	.data = m2_pmu_avalanche_init, },
6287d0bfb7cSJanne Grunau 	{ .compatible = "apple,blizzard-pmu",	.data = m2_pmu_blizzard_init, },
629a639027aSMarc Zyngier 	{ .compatible = "apple,icestorm-pmu",	.data = m1_pmu_ice_init, },
630a639027aSMarc Zyngier 	{ .compatible = "apple,firestorm-pmu",	.data = m1_pmu_fire_init, },
631a639027aSMarc Zyngier 	{ },
632a639027aSMarc Zyngier };
633a639027aSMarc Zyngier MODULE_DEVICE_TABLE(of, m1_pmu_of_device_ids);
634a639027aSMarc Zyngier 
m1_pmu_device_probe(struct platform_device * pdev)635a639027aSMarc Zyngier static int m1_pmu_device_probe(struct platform_device *pdev)
636a639027aSMarc Zyngier {
637a639027aSMarc Zyngier 	return arm_pmu_device_probe(pdev, m1_pmu_of_device_ids, NULL);
638a639027aSMarc Zyngier }
639a639027aSMarc Zyngier 
640a639027aSMarc Zyngier static struct platform_driver m1_pmu_driver = {
641a639027aSMarc Zyngier 	.driver		= {
642a639027aSMarc Zyngier 		.name			= "apple-m1-cpu-pmu",
643a639027aSMarc Zyngier 		.of_match_table		= m1_pmu_of_device_ids,
644a639027aSMarc Zyngier 		.suppress_bind_attrs	= true,
645a639027aSMarc Zyngier 	},
646a639027aSMarc Zyngier 	.probe		= m1_pmu_device_probe,
647a639027aSMarc Zyngier };
648a639027aSMarc Zyngier 
649a639027aSMarc Zyngier module_platform_driver(m1_pmu_driver);
650