xref: /linux/drivers/perf/riscv_pmu_sbi.c (revision 7dd646cf745c34d31e7ed2a52265e9ca8308f58f)
1e9991434SAtish Patra // SPDX-License-Identifier: GPL-2.0
2e9991434SAtish Patra /*
3e9991434SAtish Patra  * RISC-V performance counter support.
4e9991434SAtish Patra  *
5e9991434SAtish Patra  * Copyright (C) 2021 Western Digital Corporation or its affiliates.
6e9991434SAtish Patra  *
7e9991434SAtish Patra  * This code is based on ARM perf event code which is in turn based on
8e9991434SAtish Patra  * sparc64 and x86 code.
9e9991434SAtish Patra  */
10e9991434SAtish Patra 
11e9991434SAtish Patra #define pr_fmt(fmt) "riscv-pmu-sbi: " fmt
12e9991434SAtish Patra 
13e9991434SAtish Patra #include <linux/mod_devicetable.h>
14e9991434SAtish Patra #include <linux/perf/riscv_pmu.h>
15e9991434SAtish Patra #include <linux/platform_device.h>
164905ec2fSAtish Patra #include <linux/irq.h>
174905ec2fSAtish Patra #include <linux/irqdomain.h>
184905ec2fSAtish Patra #include <linux/of_irq.h>
194905ec2fSAtish Patra #include <linux/of.h>
20e9a023f2SEric Lin #include <linux/cpu_pm.h>
21096b52fdSSergey Matyukevich #include <linux/sched/clock.h>
22bc969d6cSYu Chien Peter Lin #include <linux/soc/andes/irq.h>
23e9991434SAtish Patra 
2465e9fb08SHeiko Stuebner #include <asm/errata_list.h>
25e9991434SAtish Patra #include <asm/sbi.h>
26e72c4333SXiao Wang #include <asm/cpufeature.h>
27e9991434SAtish Patra 
28bc969d6cSYu Chien Peter Lin #define ALT_SBI_PMU_OVERFLOW(__ovl)					\
29bc969d6cSYu Chien Peter Lin asm volatile(ALTERNATIVE_2(						\
30d1927f64SAtish Patra 	"csrr %0, " __stringify(CSR_SCOUNTOVF),				\
31bc969d6cSYu Chien Peter Lin 	"csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF),		\
32bc969d6cSYu Chien Peter Lin 		THEAD_VENDOR_ID, ERRATA_THEAD_PMU,			\
33bc969d6cSYu Chien Peter Lin 		CONFIG_ERRATA_THEAD_PMU,				\
34bc969d6cSYu Chien Peter Lin 	"csrr %0, " __stringify(ANDES_CSR_SCOUNTEROF),			\
35bc969d6cSYu Chien Peter Lin 		0, RISCV_ISA_EXT_XANDESPMU,				\
36bc969d6cSYu Chien Peter Lin 		CONFIG_ANDES_CUSTOM_PMU)				\
37bc969d6cSYu Chien Peter Lin 	: "=r" (__ovl) :						\
38bc969d6cSYu Chien Peter Lin 	: "memory")
39bc969d6cSYu Chien Peter Lin 
40bc969d6cSYu Chien Peter Lin #define ALT_SBI_PMU_OVF_CLEAR_PENDING(__irq_mask)			\
41bc969d6cSYu Chien Peter Lin asm volatile(ALTERNATIVE(						\
42bc969d6cSYu Chien Peter Lin 	"csrc " __stringify(CSR_IP) ", %0\n\t",				\
43bc969d6cSYu Chien Peter Lin 	"csrc " __stringify(ANDES_CSR_SLIP) ", %0\n\t",			\
44bc969d6cSYu Chien Peter Lin 		0, RISCV_ISA_EXT_XANDESPMU,				\
45bc969d6cSYu Chien Peter Lin 		CONFIG_ANDES_CUSTOM_PMU)				\
46bc969d6cSYu Chien Peter Lin 	: : "r"(__irq_mask)						\
47bc969d6cSYu Chien Peter Lin 	: "memory")
48bc969d6cSYu Chien Peter Lin 
49cc4c07c8SAlexandre Ghiti #define SYSCTL_NO_USER_ACCESS	0
50cc4c07c8SAlexandre Ghiti #define SYSCTL_USER_ACCESS	1
51cc4c07c8SAlexandre Ghiti #define SYSCTL_LEGACY		2
52cc4c07c8SAlexandre Ghiti 
53cc4c07c8SAlexandre Ghiti #define PERF_EVENT_FLAG_NO_USER_ACCESS	BIT(SYSCTL_NO_USER_ACCESS)
54cc4c07c8SAlexandre Ghiti #define PERF_EVENT_FLAG_USER_ACCESS	BIT(SYSCTL_USER_ACCESS)
55cc4c07c8SAlexandre Ghiti #define PERF_EVENT_FLAG_LEGACY		BIT(SYSCTL_LEGACY)
56cc4c07c8SAlexandre Ghiti 
5726fabd6dSNikita Shubin PMU_FORMAT_ATTR(event, "config:0-47");
5826fabd6dSNikita Shubin PMU_FORMAT_ATTR(firmware, "config:63");
5926fabd6dSNikita Shubin 
607dda24baSAtish Patra static bool sbi_v2_available;
61a8625217SAtish Patra static DEFINE_STATIC_KEY_FALSE(sbi_pmu_snapshot_available);
62a8625217SAtish Patra #define sbi_pmu_snapshot_available() \
63a8625217SAtish Patra 	static_branch_unlikely(&sbi_pmu_snapshot_available)
647dda24baSAtish Patra 
6526fabd6dSNikita Shubin static struct attribute *riscv_arch_formats_attr[] = {
6626fabd6dSNikita Shubin 	&format_attr_event.attr,
6726fabd6dSNikita Shubin 	&format_attr_firmware.attr,
6826fabd6dSNikita Shubin 	NULL,
6926fabd6dSNikita Shubin };
7026fabd6dSNikita Shubin 
7126fabd6dSNikita Shubin static struct attribute_group riscv_pmu_format_group = {
7226fabd6dSNikita Shubin 	.name = "format",
7326fabd6dSNikita Shubin 	.attrs = riscv_arch_formats_attr,
7426fabd6dSNikita Shubin };
7526fabd6dSNikita Shubin 
7626fabd6dSNikita Shubin static const struct attribute_group *riscv_pmu_attr_groups[] = {
7726fabd6dSNikita Shubin 	&riscv_pmu_format_group,
7826fabd6dSNikita Shubin 	NULL,
7926fabd6dSNikita Shubin };
8026fabd6dSNikita Shubin 
81cc4c07c8SAlexandre Ghiti /* Allow user mode access by default */
82cc4c07c8SAlexandre Ghiti static int sysctl_perf_user_access __read_mostly = SYSCTL_USER_ACCESS;
83cc4c07c8SAlexandre Ghiti 
84c7a9dceaSPalmer Dabbelt /*
85585e351fSAtish Patra  * RISC-V doesn't have heterogeneous harts yet. This need to be part of
86e9991434SAtish Patra  * per_cpu in case of harts with different pmu counters
87e9991434SAtish Patra  */
88e9991434SAtish Patra static union sbi_pmu_ctr_info *pmu_ctr_list;
8965e9fb08SHeiko Stuebner static bool riscv_pmu_use_irq;
9065e9fb08SHeiko Stuebner static unsigned int riscv_pmu_irq_num;
91bc969d6cSYu Chien Peter Lin static unsigned int riscv_pmu_irq_mask;
924905ec2fSAtish Patra static unsigned int riscv_pmu_irq;
93e9991434SAtish Patra 
94585e351fSAtish Patra /* Cache the available counters in a bitmask */
95585e351fSAtish Patra static unsigned long cmask;
96585e351fSAtish Patra 
97e9991434SAtish Patra struct sbi_pmu_event_data {
98e9991434SAtish Patra 	union {
99e9991434SAtish Patra 		union {
100e9991434SAtish Patra 			struct hw_gen_event {
101e9991434SAtish Patra 				uint32_t event_code:16;
102e9991434SAtish Patra 				uint32_t event_type:4;
103e9991434SAtish Patra 				uint32_t reserved:12;
104e9991434SAtish Patra 			} hw_gen_event;
105e9991434SAtish Patra 			struct hw_cache_event {
106e9991434SAtish Patra 				uint32_t result_id:1;
107e9991434SAtish Patra 				uint32_t op_id:2;
108e9991434SAtish Patra 				uint32_t cache_id:13;
109e9991434SAtish Patra 				uint32_t event_type:4;
110e9991434SAtish Patra 				uint32_t reserved:12;
111e9991434SAtish Patra 			} hw_cache_event;
112e9991434SAtish Patra 		};
113e9991434SAtish Patra 		uint32_t event_idx;
114e9991434SAtish Patra 	};
115e9991434SAtish Patra };
116e9991434SAtish Patra 
117e9991434SAtish Patra static const struct sbi_pmu_event_data pmu_hw_event_map[] = {
118e9991434SAtish Patra 	[PERF_COUNT_HW_CPU_CYCLES]		= {.hw_gen_event = {
119e9991434SAtish Patra 							SBI_PMU_HW_CPU_CYCLES,
120e9991434SAtish Patra 							SBI_PMU_EVENT_TYPE_HW, 0}},
121e9991434SAtish Patra 	[PERF_COUNT_HW_INSTRUCTIONS]		= {.hw_gen_event = {
122e9991434SAtish Patra 							SBI_PMU_HW_INSTRUCTIONS,
123e9991434SAtish Patra 							SBI_PMU_EVENT_TYPE_HW, 0}},
124e9991434SAtish Patra 	[PERF_COUNT_HW_CACHE_REFERENCES]	= {.hw_gen_event = {
125e9991434SAtish Patra 							SBI_PMU_HW_CACHE_REFERENCES,
126e9991434SAtish Patra 							SBI_PMU_EVENT_TYPE_HW, 0}},
127e9991434SAtish Patra 	[PERF_COUNT_HW_CACHE_MISSES]		= {.hw_gen_event = {
128e9991434SAtish Patra 							SBI_PMU_HW_CACHE_MISSES,
129e9991434SAtish Patra 							SBI_PMU_EVENT_TYPE_HW, 0}},
130e9991434SAtish Patra 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= {.hw_gen_event = {
131e9991434SAtish Patra 							SBI_PMU_HW_BRANCH_INSTRUCTIONS,
132e9991434SAtish Patra 							SBI_PMU_EVENT_TYPE_HW, 0}},
133e9991434SAtish Patra 	[PERF_COUNT_HW_BRANCH_MISSES]		= {.hw_gen_event = {
134e9991434SAtish Patra 							SBI_PMU_HW_BRANCH_MISSES,
135e9991434SAtish Patra 							SBI_PMU_EVENT_TYPE_HW, 0}},
136e9991434SAtish Patra 	[PERF_COUNT_HW_BUS_CYCLES]		= {.hw_gen_event = {
137e9991434SAtish Patra 							SBI_PMU_HW_BUS_CYCLES,
138e9991434SAtish Patra 							SBI_PMU_EVENT_TYPE_HW, 0}},
139e9991434SAtish Patra 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= {.hw_gen_event = {
140e9991434SAtish Patra 							SBI_PMU_HW_STALLED_CYCLES_FRONTEND,
141e9991434SAtish Patra 							SBI_PMU_EVENT_TYPE_HW, 0}},
142e9991434SAtish Patra 	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= {.hw_gen_event = {
143e9991434SAtish Patra 							SBI_PMU_HW_STALLED_CYCLES_BACKEND,
144e9991434SAtish Patra 							SBI_PMU_EVENT_TYPE_HW, 0}},
145e9991434SAtish Patra 	[PERF_COUNT_HW_REF_CPU_CYCLES]		= {.hw_gen_event = {
146e9991434SAtish Patra 							SBI_PMU_HW_REF_CPU_CYCLES,
147e9991434SAtish Patra 							SBI_PMU_EVENT_TYPE_HW, 0}},
148e9991434SAtish Patra };
149e9991434SAtish Patra 
150e9991434SAtish Patra #define C(x) PERF_COUNT_HW_CACHE_##x
151e9991434SAtish Patra static const struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
152e9991434SAtish Patra [PERF_COUNT_HW_CACHE_OP_MAX]
153e9991434SAtish Patra [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
154e9991434SAtish Patra 	[C(L1D)] = {
155e9991434SAtish Patra 		[C(OP_READ)] = {
156e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
157e9991434SAtish Patra 					C(OP_READ), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
158e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
159e9991434SAtish Patra 					C(OP_READ), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
160e9991434SAtish Patra 		},
161e9991434SAtish Patra 		[C(OP_WRITE)] = {
162e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
163e9991434SAtish Patra 					C(OP_WRITE), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
164e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
165e9991434SAtish Patra 					C(OP_WRITE), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
166e9991434SAtish Patra 		},
167e9991434SAtish Patra 		[C(OP_PREFETCH)] = {
168e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
169e9991434SAtish Patra 					C(OP_PREFETCH), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
170e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
171e9991434SAtish Patra 					C(OP_PREFETCH), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
172e9991434SAtish Patra 		},
173e9991434SAtish Patra 	},
174e9991434SAtish Patra 	[C(L1I)] = {
175e9991434SAtish Patra 		[C(OP_READ)] = {
176e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event =	{C(RESULT_ACCESS),
177e9991434SAtish Patra 					C(OP_READ), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
178e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), C(OP_READ),
179e9991434SAtish Patra 					C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
180e9991434SAtish Patra 		},
181e9991434SAtish Patra 		[C(OP_WRITE)] = {
182e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
183e9991434SAtish Patra 					C(OP_WRITE), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
184e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
185e9991434SAtish Patra 					C(OP_WRITE), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
186e9991434SAtish Patra 		},
187e9991434SAtish Patra 		[C(OP_PREFETCH)] = {
188e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
189e9991434SAtish Patra 					C(OP_PREFETCH), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
190e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
191e9991434SAtish Patra 					C(OP_PREFETCH), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
192e9991434SAtish Patra 		},
193e9991434SAtish Patra 	},
194e9991434SAtish Patra 	[C(LL)] = {
195e9991434SAtish Patra 		[C(OP_READ)] = {
196e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
197e9991434SAtish Patra 					C(OP_READ), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
198e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
199e9991434SAtish Patra 					C(OP_READ), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
200e9991434SAtish Patra 		},
201e9991434SAtish Patra 		[C(OP_WRITE)] = {
202e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
203e9991434SAtish Patra 					C(OP_WRITE), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
204e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
205e9991434SAtish Patra 					C(OP_WRITE), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
206e9991434SAtish Patra 		},
207e9991434SAtish Patra 		[C(OP_PREFETCH)] = {
208e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
209e9991434SAtish Patra 					C(OP_PREFETCH), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
210e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
211e9991434SAtish Patra 					C(OP_PREFETCH), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
212e9991434SAtish Patra 		},
213e9991434SAtish Patra 	},
214e9991434SAtish Patra 	[C(DTLB)] = {
215e9991434SAtish Patra 		[C(OP_READ)] = {
216e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
217e9991434SAtish Patra 					C(OP_READ), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
218e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
219e9991434SAtish Patra 					C(OP_READ), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
220e9991434SAtish Patra 		},
221e9991434SAtish Patra 		[C(OP_WRITE)] = {
222e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
223e9991434SAtish Patra 					C(OP_WRITE), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
224e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
225e9991434SAtish Patra 					C(OP_WRITE), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
226e9991434SAtish Patra 		},
227e9991434SAtish Patra 		[C(OP_PREFETCH)] = {
228e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
229e9991434SAtish Patra 					C(OP_PREFETCH), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
230e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
231e9991434SAtish Patra 					C(OP_PREFETCH), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
232e9991434SAtish Patra 		},
233e9991434SAtish Patra 	},
234e9991434SAtish Patra 	[C(ITLB)] = {
235e9991434SAtish Patra 		[C(OP_READ)] = {
236e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
237e9991434SAtish Patra 					C(OP_READ), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
238e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
239e9991434SAtish Patra 					C(OP_READ), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
240e9991434SAtish Patra 		},
241e9991434SAtish Patra 		[C(OP_WRITE)] = {
242e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
243e9991434SAtish Patra 					C(OP_WRITE), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
244e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
245e9991434SAtish Patra 					C(OP_WRITE), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
246e9991434SAtish Patra 		},
247e9991434SAtish Patra 		[C(OP_PREFETCH)] = {
248e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
249e9991434SAtish Patra 					C(OP_PREFETCH), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
250e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
251e9991434SAtish Patra 					C(OP_PREFETCH), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
252e9991434SAtish Patra 		},
253e9991434SAtish Patra 	},
254e9991434SAtish Patra 	[C(BPU)] = {
255e9991434SAtish Patra 		[C(OP_READ)] = {
256e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
257e9991434SAtish Patra 					C(OP_READ), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
258e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
259e9991434SAtish Patra 					C(OP_READ), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
260e9991434SAtish Patra 		},
261e9991434SAtish Patra 		[C(OP_WRITE)] = {
262e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
263e9991434SAtish Patra 					C(OP_WRITE), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
264e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
265e9991434SAtish Patra 					C(OP_WRITE), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
266e9991434SAtish Patra 		},
267e9991434SAtish Patra 		[C(OP_PREFETCH)] = {
268e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
269e9991434SAtish Patra 					C(OP_PREFETCH), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
270e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
271e9991434SAtish Patra 					C(OP_PREFETCH), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
272e9991434SAtish Patra 		},
273e9991434SAtish Patra 	},
274e9991434SAtish Patra 	[C(NODE)] = {
275e9991434SAtish Patra 		[C(OP_READ)] = {
276e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
277e9991434SAtish Patra 					C(OP_READ), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
278e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
279e9991434SAtish Patra 					C(OP_READ), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
280e9991434SAtish Patra 		},
281e9991434SAtish Patra 		[C(OP_WRITE)] = {
282e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
283e9991434SAtish Patra 					C(OP_WRITE), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
284e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
285e9991434SAtish Patra 					C(OP_WRITE), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
286e9991434SAtish Patra 		},
287e9991434SAtish Patra 		[C(OP_PREFETCH)] = {
288e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
289e9991434SAtish Patra 					C(OP_PREFETCH), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
290e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
291e9991434SAtish Patra 					C(OP_PREFETCH), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
292e9991434SAtish Patra 		},
293e9991434SAtish Patra 	},
294e9991434SAtish Patra };
295e9991434SAtish Patra 
296e9991434SAtish Patra static int pmu_sbi_ctr_get_width(int idx)
297e9991434SAtish Patra {
298e9991434SAtish Patra 	return pmu_ctr_list[idx].width;
299e9991434SAtish Patra }
300e9991434SAtish Patra 
301e9991434SAtish Patra static bool pmu_sbi_ctr_is_fw(int cidx)
302e9991434SAtish Patra {
303e9991434SAtish Patra 	union sbi_pmu_ctr_info *info;
304e9991434SAtish Patra 
305e9991434SAtish Patra 	info = &pmu_ctr_list[cidx];
306e9991434SAtish Patra 	if (!info)
307e9991434SAtish Patra 		return false;
308e9991434SAtish Patra 
309e9991434SAtish Patra 	return (info->type == SBI_PMU_CTR_TYPE_FW) ? true : false;
310e9991434SAtish Patra }
311e9991434SAtish Patra 
312585e351fSAtish Patra /*
313585e351fSAtish Patra  * Returns the counter width of a programmable counter and number of hardware
314585e351fSAtish Patra  * counters. As we don't support heterogeneous CPUs yet, it is okay to just
315585e351fSAtish Patra  * return the counter width of the first programmable counter.
316585e351fSAtish Patra  */
317585e351fSAtish Patra int riscv_pmu_get_hpm_info(u32 *hw_ctr_width, u32 *num_hw_ctr)
318585e351fSAtish Patra {
319585e351fSAtish Patra 	int i;
320585e351fSAtish Patra 	union sbi_pmu_ctr_info *info;
321585e351fSAtish Patra 	u32 hpm_width = 0, hpm_count = 0;
322585e351fSAtish Patra 
323585e351fSAtish Patra 	if (!cmask)
324585e351fSAtish Patra 		return -EINVAL;
325585e351fSAtish Patra 
326585e351fSAtish Patra 	for_each_set_bit(i, &cmask, RISCV_MAX_COUNTERS) {
327585e351fSAtish Patra 		info = &pmu_ctr_list[i];
328585e351fSAtish Patra 		if (!info)
329585e351fSAtish Patra 			continue;
330585e351fSAtish Patra 		if (!hpm_width && info->csr != CSR_CYCLE && info->csr != CSR_INSTRET)
331585e351fSAtish Patra 			hpm_width = info->width;
332585e351fSAtish Patra 		if (info->type == SBI_PMU_CTR_TYPE_HW)
333585e351fSAtish Patra 			hpm_count++;
334585e351fSAtish Patra 	}
335585e351fSAtish Patra 
336585e351fSAtish Patra 	*hw_ctr_width = hpm_width;
337585e351fSAtish Patra 	*num_hw_ctr = hpm_count;
338585e351fSAtish Patra 
339585e351fSAtish Patra 	return 0;
340585e351fSAtish Patra }
341585e351fSAtish Patra EXPORT_SYMBOL_GPL(riscv_pmu_get_hpm_info);
342585e351fSAtish Patra 
343cc4c07c8SAlexandre Ghiti static uint8_t pmu_sbi_csr_index(struct perf_event *event)
344cc4c07c8SAlexandre Ghiti {
345cc4c07c8SAlexandre Ghiti 	return pmu_ctr_list[event->hw.idx].csr - CSR_CYCLE;
346cc4c07c8SAlexandre Ghiti }
347cc4c07c8SAlexandre Ghiti 
3488929283aSAtish Patra static unsigned long pmu_sbi_get_filter_flags(struct perf_event *event)
3498929283aSAtish Patra {
3508929283aSAtish Patra 	unsigned long cflags = 0;
3518929283aSAtish Patra 	bool guest_events = false;
3528929283aSAtish Patra 
3538929283aSAtish Patra 	if (event->attr.config1 & RISCV_PMU_CONFIG1_GUEST_EVENTS)
3548929283aSAtish Patra 		guest_events = true;
3558929283aSAtish Patra 	if (event->attr.exclude_kernel)
3568929283aSAtish Patra 		cflags |= guest_events ? SBI_PMU_CFG_FLAG_SET_VSINH : SBI_PMU_CFG_FLAG_SET_SINH;
3578929283aSAtish Patra 	if (event->attr.exclude_user)
3588929283aSAtish Patra 		cflags |= guest_events ? SBI_PMU_CFG_FLAG_SET_VUINH : SBI_PMU_CFG_FLAG_SET_UINH;
3598929283aSAtish Patra 	if (guest_events && event->attr.exclude_hv)
3608929283aSAtish Patra 		cflags |= SBI_PMU_CFG_FLAG_SET_SINH;
3618929283aSAtish Patra 	if (event->attr.exclude_host)
3628929283aSAtish Patra 		cflags |= SBI_PMU_CFG_FLAG_SET_UINH | SBI_PMU_CFG_FLAG_SET_SINH;
3638929283aSAtish Patra 	if (event->attr.exclude_guest)
3648929283aSAtish Patra 		cflags |= SBI_PMU_CFG_FLAG_SET_VSINH | SBI_PMU_CFG_FLAG_SET_VUINH;
3658929283aSAtish Patra 
3668929283aSAtish Patra 	return cflags;
3678929283aSAtish Patra }
3688929283aSAtish Patra 
369e9991434SAtish Patra static int pmu_sbi_ctr_get_idx(struct perf_event *event)
370e9991434SAtish Patra {
371e9991434SAtish Patra 	struct hw_perf_event *hwc = &event->hw;
372e9991434SAtish Patra 	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
373e9991434SAtish Patra 	struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
374e9991434SAtish Patra 	struct sbiret ret;
375e9991434SAtish Patra 	int idx;
376cc4c07c8SAlexandre Ghiti 	uint64_t cbase = 0, cmask = rvpmu->cmask;
377e9991434SAtish Patra 	unsigned long cflags = 0;
378e9991434SAtish Patra 
3798929283aSAtish Patra 	cflags = pmu_sbi_get_filter_flags(event);
380cc4c07c8SAlexandre Ghiti 
381cc4c07c8SAlexandre Ghiti 	/*
382cc4c07c8SAlexandre Ghiti 	 * In legacy mode, we have to force the fixed counters for those events
383cc4c07c8SAlexandre Ghiti 	 * but not in the user access mode as we want to use the other counters
384cc4c07c8SAlexandre Ghiti 	 * that support sampling/filtering.
385cc4c07c8SAlexandre Ghiti 	 */
386cc4c07c8SAlexandre Ghiti 	if (hwc->flags & PERF_EVENT_FLAG_LEGACY) {
387cc4c07c8SAlexandre Ghiti 		if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
388cc4c07c8SAlexandre Ghiti 			cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH;
389cc4c07c8SAlexandre Ghiti 			cmask = 1;
390cc4c07c8SAlexandre Ghiti 		} else if (event->attr.config == PERF_COUNT_HW_INSTRUCTIONS) {
391cc4c07c8SAlexandre Ghiti 			cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH;
392c69f9cb0SAtish Patra 			cmask = BIT(CSR_INSTRET - CSR_CYCLE);
393cc4c07c8SAlexandre Ghiti 		}
394cc4c07c8SAlexandre Ghiti 	}
395cc4c07c8SAlexandre Ghiti 
396e9991434SAtish Patra 	/* retrieve the available counter index */
3970209b583SAtish Patra #if defined(CONFIG_32BIT)
3981537bf26SSergey Matyukevich 	ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
399cc4c07c8SAlexandre Ghiti 			cmask, cflags, hwc->event_base, hwc->config,
4001537bf26SSergey Matyukevich 			hwc->config >> 32);
4010209b583SAtish Patra #else
4021537bf26SSergey Matyukevich 	ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
403cc4c07c8SAlexandre Ghiti 			cmask, cflags, hwc->event_base, hwc->config, 0);
4040209b583SAtish Patra #endif
405e9991434SAtish Patra 	if (ret.error) {
406e9991434SAtish Patra 		pr_debug("Not able to find a counter for event %lx config %llx\n",
407e9991434SAtish Patra 			hwc->event_base, hwc->config);
408e9991434SAtish Patra 		return sbi_err_map_linux_errno(ret.error);
409e9991434SAtish Patra 	}
410e9991434SAtish Patra 
411e9991434SAtish Patra 	idx = ret.value;
4121537bf26SSergey Matyukevich 	if (!test_bit(idx, &rvpmu->cmask) || !pmu_ctr_list[idx].value)
413e9991434SAtish Patra 		return -ENOENT;
414e9991434SAtish Patra 
415e9991434SAtish Patra 	/* Additional sanity check for the counter id */
416e9991434SAtish Patra 	if (pmu_sbi_ctr_is_fw(idx)) {
417e9991434SAtish Patra 		if (!test_and_set_bit(idx, cpuc->used_fw_ctrs))
418e9991434SAtish Patra 			return idx;
419e9991434SAtish Patra 	} else {
420e9991434SAtish Patra 		if (!test_and_set_bit(idx, cpuc->used_hw_ctrs))
421e9991434SAtish Patra 			return idx;
422e9991434SAtish Patra 	}
423e9991434SAtish Patra 
424e9991434SAtish Patra 	return -ENOENT;
425e9991434SAtish Patra }
426e9991434SAtish Patra 
427e9991434SAtish Patra static void pmu_sbi_ctr_clear_idx(struct perf_event *event)
428e9991434SAtish Patra {
429e9991434SAtish Patra 
430e9991434SAtish Patra 	struct hw_perf_event *hwc = &event->hw;
431e9991434SAtish Patra 	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
432e9991434SAtish Patra 	struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
433e9991434SAtish Patra 	int idx = hwc->idx;
434e9991434SAtish Patra 
435e9991434SAtish Patra 	if (pmu_sbi_ctr_is_fw(idx))
436e9991434SAtish Patra 		clear_bit(idx, cpuc->used_fw_ctrs);
437e9991434SAtish Patra 	else
438e9991434SAtish Patra 		clear_bit(idx, cpuc->used_hw_ctrs);
439e9991434SAtish Patra }
440e9991434SAtish Patra 
441e9991434SAtish Patra static int pmu_event_find_cache(u64 config)
442e9991434SAtish Patra {
443e9991434SAtish Patra 	unsigned int cache_type, cache_op, cache_result, ret;
444e9991434SAtish Patra 
445e9991434SAtish Patra 	cache_type = (config >>  0) & 0xff;
446e9991434SAtish Patra 	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
447e9991434SAtish Patra 		return -EINVAL;
448e9991434SAtish Patra 
449e9991434SAtish Patra 	cache_op = (config >>  8) & 0xff;
450e9991434SAtish Patra 	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
451e9991434SAtish Patra 		return -EINVAL;
452e9991434SAtish Patra 
453e9991434SAtish Patra 	cache_result = (config >> 16) & 0xff;
454e9991434SAtish Patra 	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
455e9991434SAtish Patra 		return -EINVAL;
456e9991434SAtish Patra 
457e9991434SAtish Patra 	ret = pmu_cache_event_map[cache_type][cache_op][cache_result].event_idx;
458e9991434SAtish Patra 
459e9991434SAtish Patra 	return ret;
460e9991434SAtish Patra }
461e9991434SAtish Patra 
462e9991434SAtish Patra static bool pmu_sbi_is_fw_event(struct perf_event *event)
463e9991434SAtish Patra {
464e9991434SAtish Patra 	u32 type = event->attr.type;
465e9991434SAtish Patra 	u64 config = event->attr.config;
466e9991434SAtish Patra 
467e9991434SAtish Patra 	if ((type == PERF_TYPE_RAW) && ((config >> 63) == 1))
468e9991434SAtish Patra 		return true;
469e9991434SAtish Patra 	else
470e9991434SAtish Patra 		return false;
471e9991434SAtish Patra }
472e9991434SAtish Patra 
473e9991434SAtish Patra static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
474e9991434SAtish Patra {
475e9991434SAtish Patra 	u32 type = event->attr.type;
476e9991434SAtish Patra 	u64 config = event->attr.config;
477e9991434SAtish Patra 	int bSoftware;
478e9991434SAtish Patra 	u64 raw_config_val;
479e9991434SAtish Patra 	int ret;
480e9991434SAtish Patra 
481e9991434SAtish Patra 	switch (type) {
482e9991434SAtish Patra 	case PERF_TYPE_HARDWARE:
483e9991434SAtish Patra 		if (config >= PERF_COUNT_HW_MAX)
484e9991434SAtish Patra 			return -EINVAL;
485e9991434SAtish Patra 		ret = pmu_hw_event_map[event->attr.config].event_idx;
486e9991434SAtish Patra 		break;
487e9991434SAtish Patra 	case PERF_TYPE_HW_CACHE:
488e9991434SAtish Patra 		ret = pmu_event_find_cache(config);
489e9991434SAtish Patra 		break;
490e9991434SAtish Patra 	case PERF_TYPE_RAW:
491e9991434SAtish Patra 		/*
492e9991434SAtish Patra 		 * As per SBI specification, the upper 16 bits must be unused for
493e9991434SAtish Patra 		 * a raw event. Use the MSB (63b) to distinguish between hardware
494e9991434SAtish Patra 		 * raw event and firmware events.
495e9991434SAtish Patra 		 */
496e9991434SAtish Patra 		bSoftware = config >> 63;
497e9991434SAtish Patra 		raw_config_val = config & RISCV_PMU_RAW_EVENT_MASK;
498e9991434SAtish Patra 		if (bSoftware) {
499e9991434SAtish Patra 			ret = (raw_config_val & 0xFFFF) |
500e9991434SAtish Patra 				(SBI_PMU_EVENT_TYPE_FW << 16);
501e9991434SAtish Patra 		} else {
502e9991434SAtish Patra 			ret = RISCV_PMU_RAW_EVENT_IDX;
503e9991434SAtish Patra 			*econfig = raw_config_val;
504e9991434SAtish Patra 		}
505e9991434SAtish Patra 		break;
506e9991434SAtish Patra 	default:
507e9991434SAtish Patra 		ret = -EINVAL;
508e9991434SAtish Patra 		break;
509e9991434SAtish Patra 	}
510e9991434SAtish Patra 
511e9991434SAtish Patra 	return ret;
512e9991434SAtish Patra }
513e9991434SAtish Patra 
514a8625217SAtish Patra static void pmu_sbi_snapshot_free(struct riscv_pmu *pmu)
515a8625217SAtish Patra {
516a8625217SAtish Patra 	int cpu;
517a8625217SAtish Patra 
518a8625217SAtish Patra 	for_each_possible_cpu(cpu) {
519a8625217SAtish Patra 		struct cpu_hw_events *cpu_hw_evt = per_cpu_ptr(pmu->hw_events, cpu);
520a8625217SAtish Patra 
521a8625217SAtish Patra 		if (!cpu_hw_evt->snapshot_addr)
522a8625217SAtish Patra 			continue;
523a8625217SAtish Patra 
524a8625217SAtish Patra 		free_page((unsigned long)cpu_hw_evt->snapshot_addr);
525a8625217SAtish Patra 		cpu_hw_evt->snapshot_addr = NULL;
526a8625217SAtish Patra 		cpu_hw_evt->snapshot_addr_phys = 0;
527a8625217SAtish Patra 	}
528a8625217SAtish Patra }
529a8625217SAtish Patra 
530a8625217SAtish Patra static int pmu_sbi_snapshot_alloc(struct riscv_pmu *pmu)
531a8625217SAtish Patra {
532a8625217SAtish Patra 	int cpu;
533a8625217SAtish Patra 	struct page *snapshot_page;
534a8625217SAtish Patra 
535a8625217SAtish Patra 	for_each_possible_cpu(cpu) {
536a8625217SAtish Patra 		struct cpu_hw_events *cpu_hw_evt = per_cpu_ptr(pmu->hw_events, cpu);
537a8625217SAtish Patra 
538a8625217SAtish Patra 		snapshot_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
539a8625217SAtish Patra 		if (!snapshot_page) {
540a8625217SAtish Patra 			pmu_sbi_snapshot_free(pmu);
541a8625217SAtish Patra 			return -ENOMEM;
542a8625217SAtish Patra 		}
543a8625217SAtish Patra 		cpu_hw_evt->snapshot_addr = page_to_virt(snapshot_page);
544a8625217SAtish Patra 		cpu_hw_evt->snapshot_addr_phys = page_to_phys(snapshot_page);
545a8625217SAtish Patra 	}
546a8625217SAtish Patra 
547a8625217SAtish Patra 	return 0;
548a8625217SAtish Patra }
549a8625217SAtish Patra 
550a8625217SAtish Patra static int pmu_sbi_snapshot_disable(void)
551a8625217SAtish Patra {
552a8625217SAtish Patra 	struct sbiret ret;
553a8625217SAtish Patra 
554a8625217SAtish Patra 	ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_SNAPSHOT_SET_SHMEM, SBI_SHMEM_DISABLE,
555a8625217SAtish Patra 			SBI_SHMEM_DISABLE, 0, 0, 0, 0);
556a8625217SAtish Patra 	if (ret.error) {
557a8625217SAtish Patra 		pr_warn("failed to disable snapshot shared memory\n");
558a8625217SAtish Patra 		return sbi_err_map_linux_errno(ret.error);
559a8625217SAtish Patra 	}
560a8625217SAtish Patra 
561a8625217SAtish Patra 	return 0;
562a8625217SAtish Patra }
563a8625217SAtish Patra 
564a8625217SAtish Patra static int pmu_sbi_snapshot_setup(struct riscv_pmu *pmu, int cpu)
565a8625217SAtish Patra {
566a8625217SAtish Patra 	struct cpu_hw_events *cpu_hw_evt;
567a8625217SAtish Patra 	struct sbiret ret = {0};
568a8625217SAtish Patra 
569a8625217SAtish Patra 	cpu_hw_evt = per_cpu_ptr(pmu->hw_events, cpu);
570a8625217SAtish Patra 	if (!cpu_hw_evt->snapshot_addr_phys)
571a8625217SAtish Patra 		return -EINVAL;
572a8625217SAtish Patra 
573a8625217SAtish Patra 	if (cpu_hw_evt->snapshot_set_done)
574a8625217SAtish Patra 		return 0;
575a8625217SAtish Patra 
576a8625217SAtish Patra 	if (IS_ENABLED(CONFIG_32BIT))
577a8625217SAtish Patra 		ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_SNAPSHOT_SET_SHMEM,
578a8625217SAtish Patra 				cpu_hw_evt->snapshot_addr_phys,
579a8625217SAtish Patra 				(u64)(cpu_hw_evt->snapshot_addr_phys) >> 32, 0, 0, 0, 0);
580a8625217SAtish Patra 	else
581a8625217SAtish Patra 		ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_SNAPSHOT_SET_SHMEM,
582a8625217SAtish Patra 				cpu_hw_evt->snapshot_addr_phys, 0, 0, 0, 0, 0);
583a8625217SAtish Patra 
584a8625217SAtish Patra 	/* Free up the snapshot area memory and fall back to SBI PMU calls without snapshot */
585a8625217SAtish Patra 	if (ret.error) {
586a8625217SAtish Patra 		if (ret.error != SBI_ERR_NOT_SUPPORTED)
587a8625217SAtish Patra 			pr_warn("pmu snapshot setup failed with error %ld\n", ret.error);
588a8625217SAtish Patra 		return sbi_err_map_linux_errno(ret.error);
589a8625217SAtish Patra 	}
590a8625217SAtish Patra 
591a8625217SAtish Patra 	memset(cpu_hw_evt->snapshot_cval_shcopy, 0, sizeof(u64) * RISCV_MAX_COUNTERS);
592a8625217SAtish Patra 	cpu_hw_evt->snapshot_set_done = true;
593a8625217SAtish Patra 
594a8625217SAtish Patra 	return 0;
595a8625217SAtish Patra }
596a8625217SAtish Patra 
597e9991434SAtish Patra static u64 pmu_sbi_ctr_read(struct perf_event *event)
598e9991434SAtish Patra {
599e9991434SAtish Patra 	struct hw_perf_event *hwc = &event->hw;
600e9991434SAtish Patra 	int idx = hwc->idx;
601e9991434SAtish Patra 	struct sbiret ret;
602e9991434SAtish Patra 	u64 val = 0;
603a8625217SAtish Patra 	struct riscv_pmu *pmu = to_riscv_pmu(event->pmu);
604a8625217SAtish Patra 	struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
605a8625217SAtish Patra 	struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr;
6067dda24baSAtish Patra 	union sbi_pmu_ctr_info info = pmu_ctr_list[idx];
607e9991434SAtish Patra 
608a8625217SAtish Patra 	/* Read the value from the shared memory directly only if counter is stopped */
609a8625217SAtish Patra 	if (sbi_pmu_snapshot_available() && (hwc->state & PERF_HES_STOPPED)) {
610a8625217SAtish Patra 		val = sdata->ctr_values[idx];
611a8625217SAtish Patra 		return val;
612a8625217SAtish Patra 	}
613e9991434SAtish Patra 
614e9991434SAtish Patra 	if (pmu_sbi_is_fw_event(event)) {
615e9991434SAtish Patra 		ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_FW_READ,
616e9991434SAtish Patra 				hwc->idx, 0, 0, 0, 0, 0);
6177dda24baSAtish Patra 		if (ret.error)
6187dda24baSAtish Patra 			return 0;
6197dda24baSAtish Patra 
620e9991434SAtish Patra 		val = ret.value;
6217dda24baSAtish Patra 		if (IS_ENABLED(CONFIG_32BIT) && sbi_v2_available && info.width >= 32) {
6227dda24baSAtish Patra 			ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_FW_READ_HI,
6237dda24baSAtish Patra 					hwc->idx, 0, 0, 0, 0, 0);
6247dda24baSAtish Patra 			if (!ret.error)
6257dda24baSAtish Patra 				val |= ((u64)ret.value << 32);
6267dda24baSAtish Patra 			else
6277dda24baSAtish Patra 				WARN_ONCE(1, "Unable to read upper 32 bits of firmware counter error: %ld\n",
6287dda24baSAtish Patra 					  ret.error);
6297dda24baSAtish Patra 		}
630e9991434SAtish Patra 	} else {
631e9991434SAtish Patra 		val = riscv_pmu_ctr_read_csr(info.csr);
632e9991434SAtish Patra 		if (IS_ENABLED(CONFIG_32BIT))
6337dda24baSAtish Patra 			val |= ((u64)riscv_pmu_ctr_read_csr(info.csr + 0x80)) << 32;
634e9991434SAtish Patra 	}
635e9991434SAtish Patra 
636e9991434SAtish Patra 	return val;
637e9991434SAtish Patra }
638e9991434SAtish Patra 
639cc4c07c8SAlexandre Ghiti static void pmu_sbi_set_scounteren(void *arg)
640cc4c07c8SAlexandre Ghiti {
641cc4c07c8SAlexandre Ghiti 	struct perf_event *event = (struct perf_event *)arg;
642cc4c07c8SAlexandre Ghiti 
6433fec3233SAlexandre Ghiti 	if (event->hw.idx != -1)
644cc4c07c8SAlexandre Ghiti 		csr_write(CSR_SCOUNTEREN,
64534b56786SFei Wu 			  csr_read(CSR_SCOUNTEREN) | BIT(pmu_sbi_csr_index(event)));
646cc4c07c8SAlexandre Ghiti }
647cc4c07c8SAlexandre Ghiti 
648cc4c07c8SAlexandre Ghiti static void pmu_sbi_reset_scounteren(void *arg)
649cc4c07c8SAlexandre Ghiti {
650cc4c07c8SAlexandre Ghiti 	struct perf_event *event = (struct perf_event *)arg;
651cc4c07c8SAlexandre Ghiti 
6523fec3233SAlexandre Ghiti 	if (event->hw.idx != -1)
653cc4c07c8SAlexandre Ghiti 		csr_write(CSR_SCOUNTEREN,
65434b56786SFei Wu 			  csr_read(CSR_SCOUNTEREN) & ~BIT(pmu_sbi_csr_index(event)));
655cc4c07c8SAlexandre Ghiti }
656cc4c07c8SAlexandre Ghiti 
657e9991434SAtish Patra static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
658e9991434SAtish Patra {
659e9991434SAtish Patra 	struct sbiret ret;
660e9991434SAtish Patra 	struct hw_perf_event *hwc = &event->hw;
661e9991434SAtish Patra 	unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE;
662e9991434SAtish Patra 
663a8625217SAtish Patra 	/* There is no benefit setting SNAPSHOT FLAG for a single counter */
6640209b583SAtish Patra #if defined(CONFIG_32BIT)
665e9991434SAtish Patra 	ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx,
666e9991434SAtish Patra 			1, flag, ival, ival >> 32, 0);
6670209b583SAtish Patra #else
6680209b583SAtish Patra 	ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx,
6690209b583SAtish Patra 			1, flag, ival, 0, 0);
6700209b583SAtish Patra #endif
671e9991434SAtish Patra 	if (ret.error && (ret.error != SBI_ERR_ALREADY_STARTED))
672e9991434SAtish Patra 		pr_err("Starting counter idx %d failed with error %d\n",
673e9991434SAtish Patra 			hwc->idx, sbi_err_map_linux_errno(ret.error));
674cc4c07c8SAlexandre Ghiti 
675cc4c07c8SAlexandre Ghiti 	if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
676cc4c07c8SAlexandre Ghiti 	    (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
677cc4c07c8SAlexandre Ghiti 		pmu_sbi_set_scounteren((void *)event);
678e9991434SAtish Patra }
679e9991434SAtish Patra 
680e9991434SAtish Patra static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
681e9991434SAtish Patra {
682e9991434SAtish Patra 	struct sbiret ret;
683e9991434SAtish Patra 	struct hw_perf_event *hwc = &event->hw;
684a8625217SAtish Patra 	struct riscv_pmu *pmu = to_riscv_pmu(event->pmu);
685a8625217SAtish Patra 	struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
686a8625217SAtish Patra 	struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr;
687e9991434SAtish Patra 
688cc4c07c8SAlexandre Ghiti 	if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
689cc4c07c8SAlexandre Ghiti 	    (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
690cc4c07c8SAlexandre Ghiti 		pmu_sbi_reset_scounteren((void *)event);
691cc4c07c8SAlexandre Ghiti 
692a8625217SAtish Patra 	if (sbi_pmu_snapshot_available())
693a8625217SAtish Patra 		flag |= SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT;
694a8625217SAtish Patra 
695e9991434SAtish Patra 	ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0);
696a8625217SAtish Patra 	if (!ret.error && sbi_pmu_snapshot_available()) {
697a8625217SAtish Patra 		/*
698a8625217SAtish Patra 		 * The counter snapshot is based on the index base specified by hwc->idx.
699a8625217SAtish Patra 		 * The actual counter value is updated in shared memory at index 0 when counter
700a8625217SAtish Patra 		 * mask is 0x01. To ensure accurate counter values, it's necessary to transfer
701a8625217SAtish Patra 		 * the counter value to shared memory. However, if hwc->idx is zero, the counter
702a8625217SAtish Patra 		 * value is already correctly updated in shared memory, requiring no further
703a8625217SAtish Patra 		 * adjustment.
704a8625217SAtish Patra 		 */
705a8625217SAtish Patra 		if (hwc->idx > 0) {
706a8625217SAtish Patra 			sdata->ctr_values[hwc->idx] = sdata->ctr_values[0];
707a8625217SAtish Patra 			sdata->ctr_values[0] = 0;
708a8625217SAtish Patra 		}
709a8625217SAtish Patra 	} else if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) &&
710a8625217SAtish Patra 		flag != SBI_PMU_STOP_FLAG_RESET) {
711e9991434SAtish Patra 		pr_err("Stopping counter idx %d failed with error %d\n",
712e9991434SAtish Patra 			hwc->idx, sbi_err_map_linux_errno(ret.error));
713e9991434SAtish Patra 	}
714a8625217SAtish Patra }
715e9991434SAtish Patra 
716e9991434SAtish Patra static int pmu_sbi_find_num_ctrs(void)
717e9991434SAtish Patra {
718e9991434SAtish Patra 	struct sbiret ret;
719e9991434SAtish Patra 
720e9991434SAtish Patra 	ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_NUM_COUNTERS, 0, 0, 0, 0, 0, 0);
721e9991434SAtish Patra 	if (!ret.error)
722e9991434SAtish Patra 		return ret.value;
723e9991434SAtish Patra 	else
724e9991434SAtish Patra 		return sbi_err_map_linux_errno(ret.error);
725e9991434SAtish Patra }
726e9991434SAtish Patra 
7271537bf26SSergey Matyukevich static int pmu_sbi_get_ctrinfo(int nctr, unsigned long *mask)
728e9991434SAtish Patra {
729e9991434SAtish Patra 	struct sbiret ret;
730e9991434SAtish Patra 	int i, num_hw_ctr = 0, num_fw_ctr = 0;
731e9991434SAtish Patra 	union sbi_pmu_ctr_info cinfo;
732e9991434SAtish Patra 
733e9991434SAtish Patra 	pmu_ctr_list = kcalloc(nctr, sizeof(*pmu_ctr_list), GFP_KERNEL);
734e9991434SAtish Patra 	if (!pmu_ctr_list)
735e9991434SAtish Patra 		return -ENOMEM;
736e9991434SAtish Patra 
73720e0fbabSSergey Matyukevich 	for (i = 0; i < nctr; i++) {
738e9991434SAtish Patra 		ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0);
739e9991434SAtish Patra 		if (ret.error)
740e9991434SAtish Patra 			/* The logical counter ids are not expected to be contiguous */
741e9991434SAtish Patra 			continue;
7421537bf26SSergey Matyukevich 
7431537bf26SSergey Matyukevich 		*mask |= BIT(i);
7441537bf26SSergey Matyukevich 
745e9991434SAtish Patra 		cinfo.value = ret.value;
746e9991434SAtish Patra 		if (cinfo.type == SBI_PMU_CTR_TYPE_FW)
747e9991434SAtish Patra 			num_fw_ctr++;
748e9991434SAtish Patra 		else
749e9991434SAtish Patra 			num_hw_ctr++;
750e9991434SAtish Patra 		pmu_ctr_list[i].value = cinfo.value;
751e9991434SAtish Patra 	}
752e9991434SAtish Patra 
753e9991434SAtish Patra 	pr_info("%d firmware and %d hardware counters\n", num_fw_ctr, num_hw_ctr);
754e9991434SAtish Patra 
755e9991434SAtish Patra 	return 0;
756e9991434SAtish Patra }
757e9991434SAtish Patra 
7584905ec2fSAtish Patra static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
7594905ec2fSAtish Patra {
760c7a9dceaSPalmer Dabbelt 	/*
7614905ec2fSAtish Patra 	 * No need to check the error because we are disabling all the counters
7624905ec2fSAtish Patra 	 * which may include counters that are not enabled yet.
7634905ec2fSAtish Patra 	 */
7644905ec2fSAtish Patra 	sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
765*7dd646cfSSamuel Holland 		  0, pmu->cmask, SBI_PMU_STOP_FLAG_RESET, 0, 0, 0);
7664905ec2fSAtish Patra }
7674905ec2fSAtish Patra 
7684905ec2fSAtish Patra static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
7694905ec2fSAtish Patra {
7704905ec2fSAtish Patra 	struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
771a8625217SAtish Patra 	struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr;
772a8625217SAtish Patra 	unsigned long flag = 0;
773a8625217SAtish Patra 	int i, idx;
774a8625217SAtish Patra 	struct sbiret ret;
775a8625217SAtish Patra 	u64 temp_ctr_overflow_mask = 0;
7764905ec2fSAtish Patra 
777a8625217SAtish Patra 	if (sbi_pmu_snapshot_available())
778a8625217SAtish Patra 		flag = SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT;
779a8625217SAtish Patra 
780a8625217SAtish Patra 	/* Reset the shadow copy to avoid save/restore any value from previous overflow */
781a8625217SAtish Patra 	memset(cpu_hw_evt->snapshot_cval_shcopy, 0, sizeof(u64) * RISCV_MAX_COUNTERS);
782a8625217SAtish Patra 
783a8625217SAtish Patra 	for (i = 0; i < BITS_TO_LONGS(RISCV_MAX_COUNTERS); i++) {
7844905ec2fSAtish Patra 		/* No need to check the error here as we can't do anything about the error */
785a8625217SAtish Patra 		ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, i * BITS_PER_LONG,
786a8625217SAtish Patra 				cpu_hw_evt->used_hw_ctrs[i], flag, 0, 0, 0);
787a8625217SAtish Patra 		if (!ret.error && sbi_pmu_snapshot_available()) {
788a8625217SAtish Patra 			/* Save the counter values to avoid clobbering */
789a8625217SAtish Patra 			for_each_set_bit(idx, &cpu_hw_evt->used_hw_ctrs[i], BITS_PER_LONG)
790a8625217SAtish Patra 				cpu_hw_evt->snapshot_cval_shcopy[i * BITS_PER_LONG + idx] =
791a8625217SAtish Patra 							sdata->ctr_values[idx];
792a8625217SAtish Patra 			/* Save the overflow mask to avoid clobbering */
793a8625217SAtish Patra 			temp_ctr_overflow_mask |= sdata->ctr_overflow_mask << (i * BITS_PER_LONG);
794a8625217SAtish Patra 		}
795a8625217SAtish Patra 	}
796a8625217SAtish Patra 
797a8625217SAtish Patra 	/* Restore the counter values to the shared memory for used hw counters */
798a8625217SAtish Patra 	if (sbi_pmu_snapshot_available()) {
799a8625217SAtish Patra 		for_each_set_bit(idx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS)
800a8625217SAtish Patra 			sdata->ctr_values[idx] = cpu_hw_evt->snapshot_cval_shcopy[idx];
801a8625217SAtish Patra 		if (temp_ctr_overflow_mask)
802a8625217SAtish Patra 			sdata->ctr_overflow_mask = temp_ctr_overflow_mask;
803a8625217SAtish Patra 	}
8044905ec2fSAtish Patra }
8054905ec2fSAtish Patra 
806c7a9dceaSPalmer Dabbelt /*
8074905ec2fSAtish Patra  * This function starts all the used counters in two step approach.
8084905ec2fSAtish Patra  * Any counter that did not overflow can be start in a single step
8094905ec2fSAtish Patra  * while the overflowed counters need to be started with updated initialization
8104905ec2fSAtish Patra  * value.
8114905ec2fSAtish Patra  */
812a8625217SAtish Patra static inline void pmu_sbi_start_ovf_ctrs_sbi(struct cpu_hw_events *cpu_hw_evt,
813a8625217SAtish Patra 					      u64 ctr_ovf_mask)
8144905ec2fSAtish Patra {
815b994cdfcSAtish Patra 	int idx = 0, i;
8164905ec2fSAtish Patra 	struct perf_event *event;
8174905ec2fSAtish Patra 	unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE;
8184905ec2fSAtish Patra 	unsigned long ctr_start_mask = 0;
8194905ec2fSAtish Patra 	uint64_t max_period;
8204905ec2fSAtish Patra 	struct hw_perf_event *hwc;
8214905ec2fSAtish Patra 	u64 init_val = 0;
8224905ec2fSAtish Patra 
823b994cdfcSAtish Patra 	for (i = 0; i < BITS_TO_LONGS(RISCV_MAX_COUNTERS); i++) {
824b994cdfcSAtish Patra 		ctr_start_mask = cpu_hw_evt->used_hw_ctrs[i] & ~ctr_ovf_mask;
8254905ec2fSAtish Patra 		/* Start all the counters that did not overflow in a single shot */
826b994cdfcSAtish Patra 		sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, i * BITS_PER_LONG, ctr_start_mask,
8274905ec2fSAtish Patra 			0, 0, 0, 0);
828b994cdfcSAtish Patra 	}
8294905ec2fSAtish Patra 
8304905ec2fSAtish Patra 	/* Reinitialize and start all the counter that overflowed */
8314905ec2fSAtish Patra 	while (ctr_ovf_mask) {
8324905ec2fSAtish Patra 		if (ctr_ovf_mask & 0x01) {
8334905ec2fSAtish Patra 			event = cpu_hw_evt->events[idx];
8344905ec2fSAtish Patra 			hwc = &event->hw;
8354905ec2fSAtish Patra 			max_period = riscv_pmu_ctr_get_width_mask(event);
8364905ec2fSAtish Patra 			init_val = local64_read(&hwc->prev_count) & max_period;
837acc1b919SAtish Patra #if defined(CONFIG_32BIT)
838acc1b919SAtish Patra 			sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1,
839acc1b919SAtish Patra 				  flag, init_val, init_val >> 32, 0);
840acc1b919SAtish Patra #else
8414905ec2fSAtish Patra 			sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1,
8424905ec2fSAtish Patra 				  flag, init_val, 0, 0);
843acc1b919SAtish Patra #endif
844133a6d1fSAtish Patra 			perf_event_update_userpage(event);
8454905ec2fSAtish Patra 		}
8464905ec2fSAtish Patra 		ctr_ovf_mask = ctr_ovf_mask >> 1;
8474905ec2fSAtish Patra 		idx++;
8484905ec2fSAtish Patra 	}
8494905ec2fSAtish Patra }
8504905ec2fSAtish Patra 
851a8625217SAtish Patra static inline void pmu_sbi_start_ovf_ctrs_snapshot(struct cpu_hw_events *cpu_hw_evt,
852a8625217SAtish Patra 						   u64 ctr_ovf_mask)
853a8625217SAtish Patra {
854a8625217SAtish Patra 	int i, idx = 0;
855a8625217SAtish Patra 	struct perf_event *event;
856a8625217SAtish Patra 	unsigned long flag = SBI_PMU_START_FLAG_INIT_SNAPSHOT;
857a8625217SAtish Patra 	u64 max_period, init_val = 0;
858a8625217SAtish Patra 	struct hw_perf_event *hwc;
859a8625217SAtish Patra 	struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr;
860a8625217SAtish Patra 
861a8625217SAtish Patra 	for_each_set_bit(idx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) {
862a8625217SAtish Patra 		if (ctr_ovf_mask & BIT(idx)) {
863a8625217SAtish Patra 			event = cpu_hw_evt->events[idx];
864a8625217SAtish Patra 			hwc = &event->hw;
865a8625217SAtish Patra 			max_period = riscv_pmu_ctr_get_width_mask(event);
866a8625217SAtish Patra 			init_val = local64_read(&hwc->prev_count) & max_period;
867a8625217SAtish Patra 			cpu_hw_evt->snapshot_cval_shcopy[idx] = init_val;
868a8625217SAtish Patra 		}
869a8625217SAtish Patra 		/*
870a8625217SAtish Patra 		 * We do not need to update the non-overflow counters the previous
871a8625217SAtish Patra 		 * value should have been there already.
872a8625217SAtish Patra 		 */
873a8625217SAtish Patra 	}
874a8625217SAtish Patra 
875a8625217SAtish Patra 	for (i = 0; i < BITS_TO_LONGS(RISCV_MAX_COUNTERS); i++) {
876a8625217SAtish Patra 		/* Restore the counter values to relative indices for used hw counters */
877a8625217SAtish Patra 		for_each_set_bit(idx, &cpu_hw_evt->used_hw_ctrs[i], BITS_PER_LONG)
878a8625217SAtish Patra 			sdata->ctr_values[idx] =
879a8625217SAtish Patra 					cpu_hw_evt->snapshot_cval_shcopy[idx + i * BITS_PER_LONG];
880a8625217SAtish Patra 		/* Start all the counters in a single shot */
881a8625217SAtish Patra 		sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx * BITS_PER_LONG,
882a8625217SAtish Patra 			  cpu_hw_evt->used_hw_ctrs[i], flag, 0, 0, 0);
883a8625217SAtish Patra 	}
884a8625217SAtish Patra }
885a8625217SAtish Patra 
886a8625217SAtish Patra static void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu,
887a8625217SAtish Patra 					u64 ctr_ovf_mask)
888a8625217SAtish Patra {
889a8625217SAtish Patra 	struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
890a8625217SAtish Patra 
891a8625217SAtish Patra 	if (sbi_pmu_snapshot_available())
892a8625217SAtish Patra 		pmu_sbi_start_ovf_ctrs_snapshot(cpu_hw_evt, ctr_ovf_mask);
893a8625217SAtish Patra 	else
894a8625217SAtish Patra 		pmu_sbi_start_ovf_ctrs_sbi(cpu_hw_evt, ctr_ovf_mask);
895a8625217SAtish Patra }
896a8625217SAtish Patra 
8974905ec2fSAtish Patra static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
8984905ec2fSAtish Patra {
8994905ec2fSAtish Patra 	struct perf_sample_data data;
9004905ec2fSAtish Patra 	struct pt_regs *regs;
9014905ec2fSAtish Patra 	struct hw_perf_event *hw_evt;
9024905ec2fSAtish Patra 	union sbi_pmu_ctr_info *info;
9034905ec2fSAtish Patra 	int lidx, hidx, fidx;
9044905ec2fSAtish Patra 	struct riscv_pmu *pmu;
9054905ec2fSAtish Patra 	struct perf_event *event;
906a8625217SAtish Patra 	u64 overflow;
907a8625217SAtish Patra 	u64 overflowed_ctrs = 0;
9084905ec2fSAtish Patra 	struct cpu_hw_events *cpu_hw_evt = dev;
909096b52fdSSergey Matyukevich 	u64 start_clock = sched_clock();
910a8625217SAtish Patra 	struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr;
9114905ec2fSAtish Patra 
9124905ec2fSAtish Patra 	if (WARN_ON_ONCE(!cpu_hw_evt))
9134905ec2fSAtish Patra 		return IRQ_NONE;
9144905ec2fSAtish Patra 
9154905ec2fSAtish Patra 	/* Firmware counter don't support overflow yet */
9164905ec2fSAtish Patra 	fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS);
917c6e316acSAlexandre Ghiti 	if (fidx == RISCV_MAX_COUNTERS) {
918c6e316acSAlexandre Ghiti 		csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
919c6e316acSAlexandre Ghiti 		return IRQ_NONE;
920c6e316acSAlexandre Ghiti 	}
921c6e316acSAlexandre Ghiti 
9224905ec2fSAtish Patra 	event = cpu_hw_evt->events[fidx];
9234905ec2fSAtish Patra 	if (!event) {
924bc969d6cSYu Chien Peter Lin 		ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask);
9254905ec2fSAtish Patra 		return IRQ_NONE;
9264905ec2fSAtish Patra 	}
9274905ec2fSAtish Patra 
9284905ec2fSAtish Patra 	pmu = to_riscv_pmu(event->pmu);
9294905ec2fSAtish Patra 	pmu_sbi_stop_hw_ctrs(pmu);
9304905ec2fSAtish Patra 
9314905ec2fSAtish Patra 	/* Overflow status register should only be read after counter are stopped */
932a8625217SAtish Patra 	if (sbi_pmu_snapshot_available())
933a8625217SAtish Patra 		overflow = sdata->ctr_overflow_mask;
934a8625217SAtish Patra 	else
93565e9fb08SHeiko Stuebner 		ALT_SBI_PMU_OVERFLOW(overflow);
9364905ec2fSAtish Patra 
937c7a9dceaSPalmer Dabbelt 	/*
9384905ec2fSAtish Patra 	 * Overflow interrupt pending bit should only be cleared after stopping
9394905ec2fSAtish Patra 	 * all the counters to avoid any race condition.
9404905ec2fSAtish Patra 	 */
941bc969d6cSYu Chien Peter Lin 	ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask);
9424905ec2fSAtish Patra 
9434905ec2fSAtish Patra 	/* No overflow bit is set */
9444905ec2fSAtish Patra 	if (!overflow)
9454905ec2fSAtish Patra 		return IRQ_NONE;
9464905ec2fSAtish Patra 
9474905ec2fSAtish Patra 	regs = get_irq_regs();
9484905ec2fSAtish Patra 
9494905ec2fSAtish Patra 	for_each_set_bit(lidx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) {
9504905ec2fSAtish Patra 		struct perf_event *event = cpu_hw_evt->events[lidx];
9514905ec2fSAtish Patra 
9524905ec2fSAtish Patra 		/* Skip if invalid event or user did not request a sampling */
9534905ec2fSAtish Patra 		if (!event || !is_sampling_event(event))
9544905ec2fSAtish Patra 			continue;
9554905ec2fSAtish Patra 
9564905ec2fSAtish Patra 		info = &pmu_ctr_list[lidx];
9574905ec2fSAtish Patra 		/* Do a sanity check */
9584905ec2fSAtish Patra 		if (!info || info->type != SBI_PMU_CTR_TYPE_HW)
9594905ec2fSAtish Patra 			continue;
9604905ec2fSAtish Patra 
961a8625217SAtish Patra 		if (sbi_pmu_snapshot_available())
962a8625217SAtish Patra 			/* SBI implementation already updated the logical indicies */
963a8625217SAtish Patra 			hidx = lidx;
964a8625217SAtish Patra 		else
9654905ec2fSAtish Patra 			/* compute hardware counter index */
9664905ec2fSAtish Patra 			hidx = info->csr - CSR_CYCLE;
967a8625217SAtish Patra 
968a8625217SAtish Patra 		/* check if the corresponding bit is set in sscountovf or overflow mask in shmem */
96934b56786SFei Wu 		if (!(overflow & BIT(hidx)))
9704905ec2fSAtish Patra 			continue;
9714905ec2fSAtish Patra 
9724905ec2fSAtish Patra 		/*
9734905ec2fSAtish Patra 		 * Keep a track of overflowed counters so that they can be started
9744905ec2fSAtish Patra 		 * with updated initial value.
9754905ec2fSAtish Patra 		 */
97634b56786SFei Wu 		overflowed_ctrs |= BIT(lidx);
9774905ec2fSAtish Patra 		hw_evt = &event->hw;
978a8625217SAtish Patra 		/* Update the event states here so that we know the state while reading */
979a8625217SAtish Patra 		hw_evt->state |= PERF_HES_STOPPED;
9804905ec2fSAtish Patra 		riscv_pmu_event_update(event);
981a8625217SAtish Patra 		hw_evt->state |= PERF_HES_UPTODATE;
9824905ec2fSAtish Patra 		perf_sample_data_init(&data, 0, hw_evt->last_period);
9834905ec2fSAtish Patra 		if (riscv_pmu_event_set_period(event)) {
9844905ec2fSAtish Patra 			/*
9854905ec2fSAtish Patra 			 * Unlike other ISAs, RISC-V don't have to disable interrupts
9864905ec2fSAtish Patra 			 * to avoid throttling here. As per the specification, the
9874905ec2fSAtish Patra 			 * interrupt remains disabled until the OF bit is set.
9884905ec2fSAtish Patra 			 * Interrupts are enabled again only during the start.
9894905ec2fSAtish Patra 			 * TODO: We will need to stop the guest counters once
9904905ec2fSAtish Patra 			 * virtualization support is added.
9914905ec2fSAtish Patra 			 */
9924905ec2fSAtish Patra 			perf_event_overflow(event, &data, regs);
9934905ec2fSAtish Patra 		}
994a8625217SAtish Patra 		/* Reset the state as we are going to start the counter after the loop */
995a8625217SAtish Patra 		hw_evt->state = 0;
9964905ec2fSAtish Patra 	}
997096b52fdSSergey Matyukevich 
9984905ec2fSAtish Patra 	pmu_sbi_start_overflow_mask(pmu, overflowed_ctrs);
999096b52fdSSergey Matyukevich 	perf_sample_event_took(sched_clock() - start_clock);
10004905ec2fSAtish Patra 
10014905ec2fSAtish Patra 	return IRQ_HANDLED;
10024905ec2fSAtish Patra }
10034905ec2fSAtish Patra 
1004e9991434SAtish Patra static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
1005e9991434SAtish Patra {
1006e9991434SAtish Patra 	struct riscv_pmu *pmu = hlist_entry_safe(node, struct riscv_pmu, node);
10074905ec2fSAtish Patra 	struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
1008e9991434SAtish Patra 
10095a5294fbSPalmer Dabbelt 	/*
1010cc4c07c8SAlexandre Ghiti 	 * We keep enabling userspace access to CYCLE, TIME and INSTRET via the
1011cc4c07c8SAlexandre Ghiti 	 * legacy option but that will be removed in the future.
10125a5294fbSPalmer Dabbelt 	 */
1013cc4c07c8SAlexandre Ghiti 	if (sysctl_perf_user_access == SYSCTL_LEGACY)
10145a5294fbSPalmer Dabbelt 		csr_write(CSR_SCOUNTEREN, 0x7);
1015cc4c07c8SAlexandre Ghiti 	else
1016cc4c07c8SAlexandre Ghiti 		csr_write(CSR_SCOUNTEREN, 0x2);
1017e9991434SAtish Patra 
1018e9991434SAtish Patra 	/* Stop all the counters so that they can be enabled from perf */
10194905ec2fSAtish Patra 	pmu_sbi_stop_all(pmu);
10204905ec2fSAtish Patra 
102165e9fb08SHeiko Stuebner 	if (riscv_pmu_use_irq) {
10224905ec2fSAtish Patra 		cpu_hw_evt->irq = riscv_pmu_irq;
1023bc969d6cSYu Chien Peter Lin 		ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask);
10244905ec2fSAtish Patra 		enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE);
10254905ec2fSAtish Patra 	}
1026e9991434SAtish Patra 
1027a8625217SAtish Patra 	if (sbi_pmu_snapshot_available())
1028a8625217SAtish Patra 		return pmu_sbi_snapshot_setup(pmu, cpu);
1029a8625217SAtish Patra 
1030e9991434SAtish Patra 	return 0;
1031e9991434SAtish Patra }
1032e9991434SAtish Patra 
1033e9991434SAtish Patra static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node)
1034e9991434SAtish Patra {
103565e9fb08SHeiko Stuebner 	if (riscv_pmu_use_irq) {
10364905ec2fSAtish Patra 		disable_percpu_irq(riscv_pmu_irq);
10374905ec2fSAtish Patra 	}
10384905ec2fSAtish Patra 
1039e9991434SAtish Patra 	/* Disable all counters access for user mode now */
1040e9991434SAtish Patra 	csr_write(CSR_SCOUNTEREN, 0x0);
1041e9991434SAtish Patra 
1042a8625217SAtish Patra 	if (sbi_pmu_snapshot_available())
1043a8625217SAtish Patra 		return pmu_sbi_snapshot_disable();
1044a8625217SAtish Patra 
1045e9991434SAtish Patra 	return 0;
1046e9991434SAtish Patra }
1047e9991434SAtish Patra 
10484905ec2fSAtish Patra static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pdev)
10494905ec2fSAtish Patra {
10504905ec2fSAtish Patra 	int ret;
10514905ec2fSAtish Patra 	struct cpu_hw_events __percpu *hw_events = pmu->hw_events;
10524905ec2fSAtish Patra 	struct irq_domain *domain = NULL;
10534905ec2fSAtish Patra 
105465e9fb08SHeiko Stuebner 	if (riscv_isa_extension_available(NULL, SSCOFPMF)) {
105565e9fb08SHeiko Stuebner 		riscv_pmu_irq_num = RV_IRQ_PMU;
105665e9fb08SHeiko Stuebner 		riscv_pmu_use_irq = true;
105765e9fb08SHeiko Stuebner 	} else if (IS_ENABLED(CONFIG_ERRATA_THEAD_PMU) &&
105865e9fb08SHeiko Stuebner 		   riscv_cached_mvendorid(0) == THEAD_VENDOR_ID &&
105965e9fb08SHeiko Stuebner 		   riscv_cached_marchid(0) == 0 &&
106065e9fb08SHeiko Stuebner 		   riscv_cached_mimpid(0) == 0) {
106165e9fb08SHeiko Stuebner 		riscv_pmu_irq_num = THEAD_C9XX_RV_IRQ_PMU;
106265e9fb08SHeiko Stuebner 		riscv_pmu_use_irq = true;
1063bc969d6cSYu Chien Peter Lin 	} else if (riscv_isa_extension_available(NULL, XANDESPMU) &&
1064bc969d6cSYu Chien Peter Lin 		   IS_ENABLED(CONFIG_ANDES_CUSTOM_PMU)) {
1065bc969d6cSYu Chien Peter Lin 		riscv_pmu_irq_num = ANDES_SLI_CAUSE_BASE + ANDES_RV_IRQ_PMOVI;
1066bc969d6cSYu Chien Peter Lin 		riscv_pmu_use_irq = true;
106765e9fb08SHeiko Stuebner 	}
106865e9fb08SHeiko Stuebner 
1069bc969d6cSYu Chien Peter Lin 	riscv_pmu_irq_mask = BIT(riscv_pmu_irq_num % BITS_PER_LONG);
1070bc969d6cSYu Chien Peter Lin 
107165e9fb08SHeiko Stuebner 	if (!riscv_pmu_use_irq)
10724905ec2fSAtish Patra 		return -EOPNOTSUPP;
10734905ec2fSAtish Patra 
1074ca7473cbSSunil V L 	domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(),
1075ca7473cbSSunil V L 					  DOMAIN_BUS_ANY);
10764905ec2fSAtish Patra 	if (!domain) {
10774905ec2fSAtish Patra 		pr_err("Failed to find INTC IRQ root domain\n");
10784905ec2fSAtish Patra 		return -ENODEV;
10794905ec2fSAtish Patra 	}
10804905ec2fSAtish Patra 
108165e9fb08SHeiko Stuebner 	riscv_pmu_irq = irq_create_mapping(domain, riscv_pmu_irq_num);
10824905ec2fSAtish Patra 	if (!riscv_pmu_irq) {
10834905ec2fSAtish Patra 		pr_err("Failed to map PMU interrupt for node\n");
10844905ec2fSAtish Patra 		return -ENODEV;
10854905ec2fSAtish Patra 	}
10864905ec2fSAtish Patra 
10874905ec2fSAtish Patra 	ret = request_percpu_irq(riscv_pmu_irq, pmu_sbi_ovf_handler, "riscv-pmu", hw_events);
10884905ec2fSAtish Patra 	if (ret) {
10894905ec2fSAtish Patra 		pr_err("registering percpu irq failed [%d]\n", ret);
10904905ec2fSAtish Patra 		return ret;
10914905ec2fSAtish Patra 	}
10924905ec2fSAtish Patra 
10934905ec2fSAtish Patra 	return 0;
10944905ec2fSAtish Patra }
10954905ec2fSAtish Patra 
1096e9a023f2SEric Lin #ifdef CONFIG_CPU_PM
1097e9a023f2SEric Lin static int riscv_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
1098e9a023f2SEric Lin 				void *v)
1099e9a023f2SEric Lin {
1100e9a023f2SEric Lin 	struct riscv_pmu *rvpmu = container_of(b, struct riscv_pmu, riscv_pm_nb);
1101e9a023f2SEric Lin 	struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
1102e9a023f2SEric Lin 	int enabled = bitmap_weight(cpuc->used_hw_ctrs, RISCV_MAX_COUNTERS);
1103e9a023f2SEric Lin 	struct perf_event *event;
1104e9a023f2SEric Lin 	int idx;
1105e9a023f2SEric Lin 
1106e9a023f2SEric Lin 	if (!enabled)
1107e9a023f2SEric Lin 		return NOTIFY_OK;
1108e9a023f2SEric Lin 
1109e9a023f2SEric Lin 	for (idx = 0; idx < RISCV_MAX_COUNTERS; idx++) {
1110e9a023f2SEric Lin 		event = cpuc->events[idx];
1111e9a023f2SEric Lin 		if (!event)
1112e9a023f2SEric Lin 			continue;
1113e9a023f2SEric Lin 
1114e9a023f2SEric Lin 		switch (cmd) {
1115e9a023f2SEric Lin 		case CPU_PM_ENTER:
1116e9a023f2SEric Lin 			/*
1117e9a023f2SEric Lin 			 * Stop and update the counter
1118e9a023f2SEric Lin 			 */
1119e9a023f2SEric Lin 			riscv_pmu_stop(event, PERF_EF_UPDATE);
1120e9a023f2SEric Lin 			break;
1121e9a023f2SEric Lin 		case CPU_PM_EXIT:
1122e9a023f2SEric Lin 		case CPU_PM_ENTER_FAILED:
1123e9a023f2SEric Lin 			/*
1124e9a023f2SEric Lin 			 * Restore and enable the counter.
1125e9a023f2SEric Lin 			 */
11261c38b061SPeter Zijlstra 			riscv_pmu_start(event, PERF_EF_RELOAD);
1127e9a023f2SEric Lin 			break;
1128e9a023f2SEric Lin 		default:
1129e9a023f2SEric Lin 			break;
1130e9a023f2SEric Lin 		}
1131e9a023f2SEric Lin 	}
1132e9a023f2SEric Lin 
1133e9a023f2SEric Lin 	return NOTIFY_OK;
1134e9a023f2SEric Lin }
1135e9a023f2SEric Lin 
1136e9a023f2SEric Lin static int riscv_pm_pmu_register(struct riscv_pmu *pmu)
1137e9a023f2SEric Lin {
1138e9a023f2SEric Lin 	pmu->riscv_pm_nb.notifier_call = riscv_pm_pmu_notify;
1139e9a023f2SEric Lin 	return cpu_pm_register_notifier(&pmu->riscv_pm_nb);
1140e9a023f2SEric Lin }
1141e9a023f2SEric Lin 
1142e9a023f2SEric Lin static void riscv_pm_pmu_unregister(struct riscv_pmu *pmu)
1143e9a023f2SEric Lin {
1144e9a023f2SEric Lin 	cpu_pm_unregister_notifier(&pmu->riscv_pm_nb);
1145e9a023f2SEric Lin }
1146e9a023f2SEric Lin #else
1147e9a023f2SEric Lin static inline int riscv_pm_pmu_register(struct riscv_pmu *pmu) { return 0; }
1148e9a023f2SEric Lin static inline void riscv_pm_pmu_unregister(struct riscv_pmu *pmu) { }
1149e9a023f2SEric Lin #endif
1150e9a023f2SEric Lin 
1151e9a023f2SEric Lin static void riscv_pmu_destroy(struct riscv_pmu *pmu)
1152e9a023f2SEric Lin {
1153a8625217SAtish Patra 	if (sbi_v2_available) {
1154a8625217SAtish Patra 		if (sbi_pmu_snapshot_available()) {
1155a8625217SAtish Patra 			pmu_sbi_snapshot_disable();
1156a8625217SAtish Patra 			pmu_sbi_snapshot_free(pmu);
1157a8625217SAtish Patra 		}
1158a8625217SAtish Patra 	}
1159e9a023f2SEric Lin 	riscv_pm_pmu_unregister(pmu);
1160e9a023f2SEric Lin 	cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
1161e9a023f2SEric Lin }
1162e9a023f2SEric Lin 
1163cc4c07c8SAlexandre Ghiti static void pmu_sbi_event_init(struct perf_event *event)
1164cc4c07c8SAlexandre Ghiti {
1165cc4c07c8SAlexandre Ghiti 	/*
1166cc4c07c8SAlexandre Ghiti 	 * The permissions are set at event_init so that we do not depend
1167cc4c07c8SAlexandre Ghiti 	 * on the sysctl value that can change.
1168cc4c07c8SAlexandre Ghiti 	 */
1169cc4c07c8SAlexandre Ghiti 	if (sysctl_perf_user_access == SYSCTL_NO_USER_ACCESS)
1170cc4c07c8SAlexandre Ghiti 		event->hw.flags |= PERF_EVENT_FLAG_NO_USER_ACCESS;
1171cc4c07c8SAlexandre Ghiti 	else if (sysctl_perf_user_access == SYSCTL_USER_ACCESS)
1172cc4c07c8SAlexandre Ghiti 		event->hw.flags |= PERF_EVENT_FLAG_USER_ACCESS;
1173cc4c07c8SAlexandre Ghiti 	else
1174cc4c07c8SAlexandre Ghiti 		event->hw.flags |= PERF_EVENT_FLAG_LEGACY;
1175cc4c07c8SAlexandre Ghiti }
1176cc4c07c8SAlexandre Ghiti 
1177cc4c07c8SAlexandre Ghiti static void pmu_sbi_event_mapped(struct perf_event *event, struct mm_struct *mm)
1178cc4c07c8SAlexandre Ghiti {
1179cc4c07c8SAlexandre Ghiti 	if (event->hw.flags & PERF_EVENT_FLAG_NO_USER_ACCESS)
1180cc4c07c8SAlexandre Ghiti 		return;
1181cc4c07c8SAlexandre Ghiti 
1182cc4c07c8SAlexandre Ghiti 	if (event->hw.flags & PERF_EVENT_FLAG_LEGACY) {
1183cc4c07c8SAlexandre Ghiti 		if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES &&
1184cc4c07c8SAlexandre Ghiti 		    event->attr.config != PERF_COUNT_HW_INSTRUCTIONS) {
1185cc4c07c8SAlexandre Ghiti 			return;
1186cc4c07c8SAlexandre Ghiti 		}
1187cc4c07c8SAlexandre Ghiti 	}
1188cc4c07c8SAlexandre Ghiti 
1189cc4c07c8SAlexandre Ghiti 	/*
1190cc4c07c8SAlexandre Ghiti 	 * The user mmapped the event to directly access it: this is where
1191cc4c07c8SAlexandre Ghiti 	 * we determine based on sysctl_perf_user_access if we grant userspace
1192cc4c07c8SAlexandre Ghiti 	 * the direct access to this event. That means that within the same
1193cc4c07c8SAlexandre Ghiti 	 * task, some events may be directly accessible and some other may not,
1194cc4c07c8SAlexandre Ghiti 	 * if the user changes the value of sysctl_perf_user_accesss in the
1195cc4c07c8SAlexandre Ghiti 	 * meantime.
1196cc4c07c8SAlexandre Ghiti 	 */
1197cc4c07c8SAlexandre Ghiti 
1198cc4c07c8SAlexandre Ghiti 	event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT;
1199cc4c07c8SAlexandre Ghiti 
1200cc4c07c8SAlexandre Ghiti 	/*
1201cc4c07c8SAlexandre Ghiti 	 * We must enable userspace access *before* advertising in the user page
1202cc4c07c8SAlexandre Ghiti 	 * that it is possible to do so to avoid any race.
1203cc4c07c8SAlexandre Ghiti 	 * And we must notify all cpus here because threads that currently run
1204cc4c07c8SAlexandre Ghiti 	 * on other cpus will try to directly access the counter too without
1205cc4c07c8SAlexandre Ghiti 	 * calling pmu_sbi_ctr_start.
1206cc4c07c8SAlexandre Ghiti 	 */
1207cc4c07c8SAlexandre Ghiti 	if (event->hw.flags & PERF_EVENT_FLAG_USER_ACCESS)
1208cc4c07c8SAlexandre Ghiti 		on_each_cpu_mask(mm_cpumask(mm),
1209cc4c07c8SAlexandre Ghiti 				 pmu_sbi_set_scounteren, (void *)event, 1);
1210cc4c07c8SAlexandre Ghiti }
1211cc4c07c8SAlexandre Ghiti 
1212cc4c07c8SAlexandre Ghiti static void pmu_sbi_event_unmapped(struct perf_event *event, struct mm_struct *mm)
1213cc4c07c8SAlexandre Ghiti {
1214cc4c07c8SAlexandre Ghiti 	if (event->hw.flags & PERF_EVENT_FLAG_NO_USER_ACCESS)
1215cc4c07c8SAlexandre Ghiti 		return;
1216cc4c07c8SAlexandre Ghiti 
1217cc4c07c8SAlexandre Ghiti 	if (event->hw.flags & PERF_EVENT_FLAG_LEGACY) {
1218cc4c07c8SAlexandre Ghiti 		if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES &&
1219cc4c07c8SAlexandre Ghiti 		    event->attr.config != PERF_COUNT_HW_INSTRUCTIONS) {
1220cc4c07c8SAlexandre Ghiti 			return;
1221cc4c07c8SAlexandre Ghiti 		}
1222cc4c07c8SAlexandre Ghiti 	}
1223cc4c07c8SAlexandre Ghiti 
1224cc4c07c8SAlexandre Ghiti 	/*
1225cc4c07c8SAlexandre Ghiti 	 * Here we can directly remove user access since the user does not have
1226cc4c07c8SAlexandre Ghiti 	 * access to the user page anymore so we avoid the racy window where the
1227cc4c07c8SAlexandre Ghiti 	 * user could have read cap_user_rdpmc to true right before we disable
1228cc4c07c8SAlexandre Ghiti 	 * it.
1229cc4c07c8SAlexandre Ghiti 	 */
1230cc4c07c8SAlexandre Ghiti 	event->hw.flags &= ~PERF_EVENT_FLAG_USER_READ_CNT;
1231cc4c07c8SAlexandre Ghiti 
1232cc4c07c8SAlexandre Ghiti 	if (event->hw.flags & PERF_EVENT_FLAG_USER_ACCESS)
1233cc4c07c8SAlexandre Ghiti 		on_each_cpu_mask(mm_cpumask(mm),
1234cc4c07c8SAlexandre Ghiti 				 pmu_sbi_reset_scounteren, (void *)event, 1);
1235cc4c07c8SAlexandre Ghiti }
1236cc4c07c8SAlexandre Ghiti 
1237cc4c07c8SAlexandre Ghiti static void riscv_pmu_update_counter_access(void *info)
1238cc4c07c8SAlexandre Ghiti {
1239cc4c07c8SAlexandre Ghiti 	if (sysctl_perf_user_access == SYSCTL_LEGACY)
1240cc4c07c8SAlexandre Ghiti 		csr_write(CSR_SCOUNTEREN, 0x7);
1241cc4c07c8SAlexandre Ghiti 	else
1242cc4c07c8SAlexandre Ghiti 		csr_write(CSR_SCOUNTEREN, 0x2);
1243cc4c07c8SAlexandre Ghiti }
1244cc4c07c8SAlexandre Ghiti 
1245cc4c07c8SAlexandre Ghiti static int riscv_pmu_proc_user_access_handler(struct ctl_table *table,
1246cc4c07c8SAlexandre Ghiti 					      int write, void *buffer,
1247cc4c07c8SAlexandre Ghiti 					      size_t *lenp, loff_t *ppos)
1248cc4c07c8SAlexandre Ghiti {
1249cc4c07c8SAlexandre Ghiti 	int prev = sysctl_perf_user_access;
1250cc4c07c8SAlexandre Ghiti 	int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1251cc4c07c8SAlexandre Ghiti 
1252cc4c07c8SAlexandre Ghiti 	/*
1253cc4c07c8SAlexandre Ghiti 	 * Test against the previous value since we clear SCOUNTEREN when
1254cc4c07c8SAlexandre Ghiti 	 * sysctl_perf_user_access is set to SYSCTL_USER_ACCESS, but we should
1255cc4c07c8SAlexandre Ghiti 	 * not do that if that was already the case.
1256cc4c07c8SAlexandre Ghiti 	 */
1257cc4c07c8SAlexandre Ghiti 	if (ret || !write || prev == sysctl_perf_user_access)
1258cc4c07c8SAlexandre Ghiti 		return ret;
1259cc4c07c8SAlexandre Ghiti 
1260cc4c07c8SAlexandre Ghiti 	on_each_cpu(riscv_pmu_update_counter_access, NULL, 1);
1261cc4c07c8SAlexandre Ghiti 
1262cc4c07c8SAlexandre Ghiti 	return 0;
1263cc4c07c8SAlexandre Ghiti }
1264cc4c07c8SAlexandre Ghiti 
1265cc4c07c8SAlexandre Ghiti static struct ctl_table sbi_pmu_sysctl_table[] = {
1266cc4c07c8SAlexandre Ghiti 	{
1267cc4c07c8SAlexandre Ghiti 		.procname       = "perf_user_access",
1268cc4c07c8SAlexandre Ghiti 		.data		= &sysctl_perf_user_access,
1269cc4c07c8SAlexandre Ghiti 		.maxlen		= sizeof(unsigned int),
1270cc4c07c8SAlexandre Ghiti 		.mode           = 0644,
1271cc4c07c8SAlexandre Ghiti 		.proc_handler	= riscv_pmu_proc_user_access_handler,
1272cc4c07c8SAlexandre Ghiti 		.extra1		= SYSCTL_ZERO,
1273cc4c07c8SAlexandre Ghiti 		.extra2		= SYSCTL_TWO,
1274cc4c07c8SAlexandre Ghiti 	},
1275cc4c07c8SAlexandre Ghiti };
1276cc4c07c8SAlexandre Ghiti 
1277e9991434SAtish Patra static int pmu_sbi_device_probe(struct platform_device *pdev)
1278e9991434SAtish Patra {
1279e9991434SAtish Patra 	struct riscv_pmu *pmu = NULL;
12804905ec2fSAtish Patra 	int ret = -ENODEV;
12811537bf26SSergey Matyukevich 	int num_counters;
1282e9991434SAtish Patra 
1283e9991434SAtish Patra 	pr_info("SBI PMU extension is available\n");
1284e9991434SAtish Patra 	pmu = riscv_pmu_alloc();
1285e9991434SAtish Patra 	if (!pmu)
1286e9991434SAtish Patra 		return -ENOMEM;
1287e9991434SAtish Patra 
1288e9991434SAtish Patra 	num_counters = pmu_sbi_find_num_ctrs();
1289e9991434SAtish Patra 	if (num_counters < 0) {
1290e9991434SAtish Patra 		pr_err("SBI PMU extension doesn't provide any counters\n");
12914905ec2fSAtish Patra 		goto out_free;
1292e9991434SAtish Patra 	}
1293e9991434SAtish Patra 
1294ee95b88dSViacheslav Mitrofanov 	/* It is possible to get from SBI more than max number of counters */
1295ee95b88dSViacheslav Mitrofanov 	if (num_counters > RISCV_MAX_COUNTERS) {
1296ee95b88dSViacheslav Mitrofanov 		num_counters = RISCV_MAX_COUNTERS;
1297ee95b88dSViacheslav Mitrofanov 		pr_info("SBI returned more than maximum number of counters. Limiting the number of counters to %d\n", num_counters);
1298ee95b88dSViacheslav Mitrofanov 	}
1299ee95b88dSViacheslav Mitrofanov 
1300e9991434SAtish Patra 	/* cache all the information about counters now */
13011537bf26SSergey Matyukevich 	if (pmu_sbi_get_ctrinfo(num_counters, &cmask))
13024905ec2fSAtish Patra 		goto out_free;
1303e9991434SAtish Patra 
13044905ec2fSAtish Patra 	ret = pmu_sbi_setup_irqs(pmu, pdev);
13054905ec2fSAtish Patra 	if (ret < 0) {
13064905ec2fSAtish Patra 		pr_info("Perf sampling/filtering is not supported as sscof extension is not available\n");
13074905ec2fSAtish Patra 		pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
13084905ec2fSAtish Patra 		pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
13094905ec2fSAtish Patra 	}
13101537bf26SSergey Matyukevich 
131126fabd6dSNikita Shubin 	pmu->pmu.attr_groups = riscv_pmu_attr_groups;
131250650e5fSJonathan Cameron 	pmu->pmu.parent = &pdev->dev;
13131537bf26SSergey Matyukevich 	pmu->cmask = cmask;
1314e9991434SAtish Patra 	pmu->ctr_start = pmu_sbi_ctr_start;
1315e9991434SAtish Patra 	pmu->ctr_stop = pmu_sbi_ctr_stop;
1316e9991434SAtish Patra 	pmu->event_map = pmu_sbi_event_map;
1317e9991434SAtish Patra 	pmu->ctr_get_idx = pmu_sbi_ctr_get_idx;
1318e9991434SAtish Patra 	pmu->ctr_get_width = pmu_sbi_ctr_get_width;
1319e9991434SAtish Patra 	pmu->ctr_clear_idx = pmu_sbi_ctr_clear_idx;
1320e9991434SAtish Patra 	pmu->ctr_read = pmu_sbi_ctr_read;
1321cc4c07c8SAlexandre Ghiti 	pmu->event_init = pmu_sbi_event_init;
1322cc4c07c8SAlexandre Ghiti 	pmu->event_mapped = pmu_sbi_event_mapped;
1323cc4c07c8SAlexandre Ghiti 	pmu->event_unmapped = pmu_sbi_event_unmapped;
1324cc4c07c8SAlexandre Ghiti 	pmu->csr_index = pmu_sbi_csr_index;
1325e9991434SAtish Patra 
1326e9a023f2SEric Lin 	ret = riscv_pm_pmu_register(pmu);
1327e9a023f2SEric Lin 	if (ret)
1328e9a023f2SEric Lin 		goto out_unregister;
1329e9a023f2SEric Lin 
1330e9991434SAtish Patra 	ret = perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW);
1331e9a023f2SEric Lin 	if (ret)
1332e9a023f2SEric Lin 		goto out_unregister;
1333e9991434SAtish Patra 
1334a8625217SAtish Patra 	/* SBI PMU Snapsphot is only available in SBI v2.0 */
1335a8625217SAtish Patra 	if (sbi_v2_available) {
1336a8625217SAtish Patra 		ret = pmu_sbi_snapshot_alloc(pmu);
1337a8625217SAtish Patra 		if (ret)
1338a8625217SAtish Patra 			goto out_unregister;
1339a8625217SAtish Patra 
1340a8625217SAtish Patra 		ret = pmu_sbi_snapshot_setup(pmu, smp_processor_id());
1341a8625217SAtish Patra 		if (ret) {
1342a8625217SAtish Patra 			/* Snapshot is an optional feature. Continue if not available */
1343a8625217SAtish Patra 			pmu_sbi_snapshot_free(pmu);
1344a8625217SAtish Patra 		} else {
1345a8625217SAtish Patra 			pr_info("SBI PMU snapshot detected\n");
1346a8625217SAtish Patra 			/*
1347a8625217SAtish Patra 			 * We enable it once here for the boot cpu. If snapshot shmem setup
1348a8625217SAtish Patra 			 * fails during cpu hotplug process, it will fail to start the cpu
1349a8625217SAtish Patra 			 * as we can not handle hetergenous PMUs with different snapshot
1350a8625217SAtish Patra 			 * capability.
1351a8625217SAtish Patra 			 */
1352a8625217SAtish Patra 			static_branch_enable(&sbi_pmu_snapshot_available);
1353a8625217SAtish Patra 		}
1354a8625217SAtish Patra 	}
1355a8625217SAtish Patra 
1356cc4c07c8SAlexandre Ghiti 	register_sysctl("kernel", sbi_pmu_sysctl_table);
1357cc4c07c8SAlexandre Ghiti 
1358a8625217SAtish Patra 	ret = cpuhp_state_add_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
1359a8625217SAtish Patra 	if (ret)
1360a8625217SAtish Patra 		goto out_unregister;
1361a8625217SAtish Patra 
1362e9991434SAtish Patra 	return 0;
13634905ec2fSAtish Patra 
1364e9a023f2SEric Lin out_unregister:
1365e9a023f2SEric Lin 	riscv_pmu_destroy(pmu);
1366e9a023f2SEric Lin 
13674905ec2fSAtish Patra out_free:
13684905ec2fSAtish Patra 	kfree(pmu);
13694905ec2fSAtish Patra 	return ret;
1370e9991434SAtish Patra }
1371e9991434SAtish Patra 
1372e9991434SAtish Patra static struct platform_driver pmu_sbi_driver = {
1373e9991434SAtish Patra 	.probe		= pmu_sbi_device_probe,
1374e9991434SAtish Patra 	.driver		= {
1375d5ac062dSAlexandre Ghiti 		.name	= RISCV_PMU_SBI_PDEV_NAME,
1376e9991434SAtish Patra 	},
1377e9991434SAtish Patra };
1378e9991434SAtish Patra 
1379e9991434SAtish Patra static int __init pmu_sbi_devinit(void)
1380e9991434SAtish Patra {
1381e9991434SAtish Patra 	int ret;
1382e9991434SAtish Patra 	struct platform_device *pdev;
1383e9991434SAtish Patra 
1384e9991434SAtish Patra 	if (sbi_spec_version < sbi_mk_version(0, 3) ||
138541cad828SAndrew Jones 	    !sbi_probe_extension(SBI_EXT_PMU)) {
1386e9991434SAtish Patra 		return 0;
1387e9991434SAtish Patra 	}
1388e9991434SAtish Patra 
13897dda24baSAtish Patra 	if (sbi_spec_version >= sbi_mk_version(2, 0))
13907dda24baSAtish Patra 		sbi_v2_available = true;
13917dda24baSAtish Patra 
1392e9991434SAtish Patra 	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_RISCV_STARTING,
1393e9991434SAtish Patra 				      "perf/riscv/pmu:starting",
1394e9991434SAtish Patra 				      pmu_sbi_starting_cpu, pmu_sbi_dying_cpu);
1395e9991434SAtish Patra 	if (ret) {
1396e9991434SAtish Patra 		pr_err("CPU hotplug notifier could not be registered: %d\n",
1397e9991434SAtish Patra 		       ret);
1398e9991434SAtish Patra 		return ret;
1399e9991434SAtish Patra 	}
1400e9991434SAtish Patra 
1401e9991434SAtish Patra 	ret = platform_driver_register(&pmu_sbi_driver);
1402e9991434SAtish Patra 	if (ret)
1403e9991434SAtish Patra 		return ret;
1404e9991434SAtish Patra 
1405d5ac062dSAlexandre Ghiti 	pdev = platform_device_register_simple(RISCV_PMU_SBI_PDEV_NAME, -1, NULL, 0);
1406e9991434SAtish Patra 	if (IS_ERR(pdev)) {
1407e9991434SAtish Patra 		platform_driver_unregister(&pmu_sbi_driver);
1408e9991434SAtish Patra 		return PTR_ERR(pdev);
1409e9991434SAtish Patra 	}
1410e9991434SAtish Patra 
1411e9991434SAtish Patra 	/* Notify legacy implementation that SBI pmu is available*/
1412e9991434SAtish Patra 	riscv_pmu_legacy_skip_init();
1413e9991434SAtish Patra 
1414e9991434SAtish Patra 	return ret;
1415e9991434SAtish Patra }
1416e9991434SAtish Patra device_initcall(pmu_sbi_devinit)
1417