xref: /linux/drivers/perf/riscv_pmu_sbi.c (revision 3fec323339a4a9801a54e8b282eb571965b67b23)
1e9991434SAtish Patra // SPDX-License-Identifier: GPL-2.0
2e9991434SAtish Patra /*
3e9991434SAtish Patra  * RISC-V performance counter support.
4e9991434SAtish Patra  *
5e9991434SAtish Patra  * Copyright (C) 2021 Western Digital Corporation or its affiliates.
6e9991434SAtish Patra  *
7e9991434SAtish Patra  * This code is based on ARM perf event code which is in turn based on
8e9991434SAtish Patra  * sparc64 and x86 code.
9e9991434SAtish Patra  */
10e9991434SAtish Patra 
11e9991434SAtish Patra #define pr_fmt(fmt) "riscv-pmu-sbi: " fmt
12e9991434SAtish Patra 
13e9991434SAtish Patra #include <linux/mod_devicetable.h>
14e9991434SAtish Patra #include <linux/perf/riscv_pmu.h>
15e9991434SAtish Patra #include <linux/platform_device.h>
164905ec2fSAtish Patra #include <linux/irq.h>
174905ec2fSAtish Patra #include <linux/irqdomain.h>
184905ec2fSAtish Patra #include <linux/of_irq.h>
194905ec2fSAtish Patra #include <linux/of.h>
20e9a023f2SEric Lin #include <linux/cpu_pm.h>
21096b52fdSSergey Matyukevich #include <linux/sched/clock.h>
22e9991434SAtish Patra 
2365e9fb08SHeiko Stuebner #include <asm/errata_list.h>
24e9991434SAtish Patra #include <asm/sbi.h>
254905ec2fSAtish Patra #include <asm/hwcap.h>
26e9991434SAtish Patra 
27cc4c07c8SAlexandre Ghiti #define SYSCTL_NO_USER_ACCESS	0
28cc4c07c8SAlexandre Ghiti #define SYSCTL_USER_ACCESS	1
29cc4c07c8SAlexandre Ghiti #define SYSCTL_LEGACY		2
30cc4c07c8SAlexandre Ghiti 
31cc4c07c8SAlexandre Ghiti #define PERF_EVENT_FLAG_NO_USER_ACCESS	BIT(SYSCTL_NO_USER_ACCESS)
32cc4c07c8SAlexandre Ghiti #define PERF_EVENT_FLAG_USER_ACCESS	BIT(SYSCTL_USER_ACCESS)
33cc4c07c8SAlexandre Ghiti #define PERF_EVENT_FLAG_LEGACY		BIT(SYSCTL_LEGACY)
34cc4c07c8SAlexandre Ghiti 
3526fabd6dSNikita Shubin PMU_FORMAT_ATTR(event, "config:0-47");
3626fabd6dSNikita Shubin PMU_FORMAT_ATTR(firmware, "config:63");
3726fabd6dSNikita Shubin 
3826fabd6dSNikita Shubin static struct attribute *riscv_arch_formats_attr[] = {
3926fabd6dSNikita Shubin 	&format_attr_event.attr,
4026fabd6dSNikita Shubin 	&format_attr_firmware.attr,
4126fabd6dSNikita Shubin 	NULL,
4226fabd6dSNikita Shubin };
4326fabd6dSNikita Shubin 
4426fabd6dSNikita Shubin static struct attribute_group riscv_pmu_format_group = {
4526fabd6dSNikita Shubin 	.name = "format",
4626fabd6dSNikita Shubin 	.attrs = riscv_arch_formats_attr,
4726fabd6dSNikita Shubin };
4826fabd6dSNikita Shubin 
4926fabd6dSNikita Shubin static const struct attribute_group *riscv_pmu_attr_groups[] = {
5026fabd6dSNikita Shubin 	&riscv_pmu_format_group,
5126fabd6dSNikita Shubin 	NULL,
5226fabd6dSNikita Shubin };
5326fabd6dSNikita Shubin 
54cc4c07c8SAlexandre Ghiti /* Allow user mode access by default */
55cc4c07c8SAlexandre Ghiti static int sysctl_perf_user_access __read_mostly = SYSCTL_USER_ACCESS;
56cc4c07c8SAlexandre Ghiti 
57c7a9dceaSPalmer Dabbelt /*
58585e351fSAtish Patra  * RISC-V doesn't have heterogeneous harts yet. This need to be part of
59e9991434SAtish Patra  * per_cpu in case of harts with different pmu counters
60e9991434SAtish Patra  */
61e9991434SAtish Patra static union sbi_pmu_ctr_info *pmu_ctr_list;
6265e9fb08SHeiko Stuebner static bool riscv_pmu_use_irq;
6365e9fb08SHeiko Stuebner static unsigned int riscv_pmu_irq_num;
644905ec2fSAtish Patra static unsigned int riscv_pmu_irq;
65e9991434SAtish Patra 
66585e351fSAtish Patra /* Cache the available counters in a bitmask */
67585e351fSAtish Patra static unsigned long cmask;
68585e351fSAtish Patra 
69e9991434SAtish Patra struct sbi_pmu_event_data {
70e9991434SAtish Patra 	union {
71e9991434SAtish Patra 		union {
72e9991434SAtish Patra 			struct hw_gen_event {
73e9991434SAtish Patra 				uint32_t event_code:16;
74e9991434SAtish Patra 				uint32_t event_type:4;
75e9991434SAtish Patra 				uint32_t reserved:12;
76e9991434SAtish Patra 			} hw_gen_event;
77e9991434SAtish Patra 			struct hw_cache_event {
78e9991434SAtish Patra 				uint32_t result_id:1;
79e9991434SAtish Patra 				uint32_t op_id:2;
80e9991434SAtish Patra 				uint32_t cache_id:13;
81e9991434SAtish Patra 				uint32_t event_type:4;
82e9991434SAtish Patra 				uint32_t reserved:12;
83e9991434SAtish Patra 			} hw_cache_event;
84e9991434SAtish Patra 		};
85e9991434SAtish Patra 		uint32_t event_idx;
86e9991434SAtish Patra 	};
87e9991434SAtish Patra };
88e9991434SAtish Patra 
89e9991434SAtish Patra static const struct sbi_pmu_event_data pmu_hw_event_map[] = {
90e9991434SAtish Patra 	[PERF_COUNT_HW_CPU_CYCLES]		= {.hw_gen_event = {
91e9991434SAtish Patra 							SBI_PMU_HW_CPU_CYCLES,
92e9991434SAtish Patra 							SBI_PMU_EVENT_TYPE_HW, 0}},
93e9991434SAtish Patra 	[PERF_COUNT_HW_INSTRUCTIONS]		= {.hw_gen_event = {
94e9991434SAtish Patra 							SBI_PMU_HW_INSTRUCTIONS,
95e9991434SAtish Patra 							SBI_PMU_EVENT_TYPE_HW, 0}},
96e9991434SAtish Patra 	[PERF_COUNT_HW_CACHE_REFERENCES]	= {.hw_gen_event = {
97e9991434SAtish Patra 							SBI_PMU_HW_CACHE_REFERENCES,
98e9991434SAtish Patra 							SBI_PMU_EVENT_TYPE_HW, 0}},
99e9991434SAtish Patra 	[PERF_COUNT_HW_CACHE_MISSES]		= {.hw_gen_event = {
100e9991434SAtish Patra 							SBI_PMU_HW_CACHE_MISSES,
101e9991434SAtish Patra 							SBI_PMU_EVENT_TYPE_HW, 0}},
102e9991434SAtish Patra 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= {.hw_gen_event = {
103e9991434SAtish Patra 							SBI_PMU_HW_BRANCH_INSTRUCTIONS,
104e9991434SAtish Patra 							SBI_PMU_EVENT_TYPE_HW, 0}},
105e9991434SAtish Patra 	[PERF_COUNT_HW_BRANCH_MISSES]		= {.hw_gen_event = {
106e9991434SAtish Patra 							SBI_PMU_HW_BRANCH_MISSES,
107e9991434SAtish Patra 							SBI_PMU_EVENT_TYPE_HW, 0}},
108e9991434SAtish Patra 	[PERF_COUNT_HW_BUS_CYCLES]		= {.hw_gen_event = {
109e9991434SAtish Patra 							SBI_PMU_HW_BUS_CYCLES,
110e9991434SAtish Patra 							SBI_PMU_EVENT_TYPE_HW, 0}},
111e9991434SAtish Patra 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= {.hw_gen_event = {
112e9991434SAtish Patra 							SBI_PMU_HW_STALLED_CYCLES_FRONTEND,
113e9991434SAtish Patra 							SBI_PMU_EVENT_TYPE_HW, 0}},
114e9991434SAtish Patra 	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= {.hw_gen_event = {
115e9991434SAtish Patra 							SBI_PMU_HW_STALLED_CYCLES_BACKEND,
116e9991434SAtish Patra 							SBI_PMU_EVENT_TYPE_HW, 0}},
117e9991434SAtish Patra 	[PERF_COUNT_HW_REF_CPU_CYCLES]		= {.hw_gen_event = {
118e9991434SAtish Patra 							SBI_PMU_HW_REF_CPU_CYCLES,
119e9991434SAtish Patra 							SBI_PMU_EVENT_TYPE_HW, 0}},
120e9991434SAtish Patra };
121e9991434SAtish Patra 
122e9991434SAtish Patra #define C(x) PERF_COUNT_HW_CACHE_##x
123e9991434SAtish Patra static const struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
124e9991434SAtish Patra [PERF_COUNT_HW_CACHE_OP_MAX]
125e9991434SAtish Patra [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
126e9991434SAtish Patra 	[C(L1D)] = {
127e9991434SAtish Patra 		[C(OP_READ)] = {
128e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
129e9991434SAtish Patra 					C(OP_READ), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
130e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
131e9991434SAtish Patra 					C(OP_READ), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
132e9991434SAtish Patra 		},
133e9991434SAtish Patra 		[C(OP_WRITE)] = {
134e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
135e9991434SAtish Patra 					C(OP_WRITE), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
136e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
137e9991434SAtish Patra 					C(OP_WRITE), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
138e9991434SAtish Patra 		},
139e9991434SAtish Patra 		[C(OP_PREFETCH)] = {
140e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
141e9991434SAtish Patra 					C(OP_PREFETCH), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
142e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
143e9991434SAtish Patra 					C(OP_PREFETCH), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
144e9991434SAtish Patra 		},
145e9991434SAtish Patra 	},
146e9991434SAtish Patra 	[C(L1I)] = {
147e9991434SAtish Patra 		[C(OP_READ)] = {
148e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event =	{C(RESULT_ACCESS),
149e9991434SAtish Patra 					C(OP_READ), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
150e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), C(OP_READ),
151e9991434SAtish Patra 					C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
152e9991434SAtish Patra 		},
153e9991434SAtish Patra 		[C(OP_WRITE)] = {
154e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
155e9991434SAtish Patra 					C(OP_WRITE), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
156e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
157e9991434SAtish Patra 					C(OP_WRITE), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
158e9991434SAtish Patra 		},
159e9991434SAtish Patra 		[C(OP_PREFETCH)] = {
160e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
161e9991434SAtish Patra 					C(OP_PREFETCH), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
162e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
163e9991434SAtish Patra 					C(OP_PREFETCH), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
164e9991434SAtish Patra 		},
165e9991434SAtish Patra 	},
166e9991434SAtish Patra 	[C(LL)] = {
167e9991434SAtish Patra 		[C(OP_READ)] = {
168e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
169e9991434SAtish Patra 					C(OP_READ), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
170e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
171e9991434SAtish Patra 					C(OP_READ), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
172e9991434SAtish Patra 		},
173e9991434SAtish Patra 		[C(OP_WRITE)] = {
174e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
175e9991434SAtish Patra 					C(OP_WRITE), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
176e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
177e9991434SAtish Patra 					C(OP_WRITE), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
178e9991434SAtish Patra 		},
179e9991434SAtish Patra 		[C(OP_PREFETCH)] = {
180e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
181e9991434SAtish Patra 					C(OP_PREFETCH), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
182e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
183e9991434SAtish Patra 					C(OP_PREFETCH), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
184e9991434SAtish Patra 		},
185e9991434SAtish Patra 	},
186e9991434SAtish Patra 	[C(DTLB)] = {
187e9991434SAtish Patra 		[C(OP_READ)] = {
188e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
189e9991434SAtish Patra 					C(OP_READ), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
190e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
191e9991434SAtish Patra 					C(OP_READ), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
192e9991434SAtish Patra 		},
193e9991434SAtish Patra 		[C(OP_WRITE)] = {
194e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
195e9991434SAtish Patra 					C(OP_WRITE), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
196e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
197e9991434SAtish Patra 					C(OP_WRITE), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
198e9991434SAtish Patra 		},
199e9991434SAtish Patra 		[C(OP_PREFETCH)] = {
200e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
201e9991434SAtish Patra 					C(OP_PREFETCH), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
202e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
203e9991434SAtish Patra 					C(OP_PREFETCH), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
204e9991434SAtish Patra 		},
205e9991434SAtish Patra 	},
206e9991434SAtish Patra 	[C(ITLB)] = {
207e9991434SAtish Patra 		[C(OP_READ)] = {
208e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
209e9991434SAtish Patra 					C(OP_READ), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
210e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
211e9991434SAtish Patra 					C(OP_READ), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
212e9991434SAtish Patra 		},
213e9991434SAtish Patra 		[C(OP_WRITE)] = {
214e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
215e9991434SAtish Patra 					C(OP_WRITE), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
216e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
217e9991434SAtish Patra 					C(OP_WRITE), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
218e9991434SAtish Patra 		},
219e9991434SAtish Patra 		[C(OP_PREFETCH)] = {
220e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
221e9991434SAtish Patra 					C(OP_PREFETCH), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
222e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
223e9991434SAtish Patra 					C(OP_PREFETCH), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
224e9991434SAtish Patra 		},
225e9991434SAtish Patra 	},
226e9991434SAtish Patra 	[C(BPU)] = {
227e9991434SAtish Patra 		[C(OP_READ)] = {
228e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
229e9991434SAtish Patra 					C(OP_READ), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
230e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
231e9991434SAtish Patra 					C(OP_READ), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
232e9991434SAtish Patra 		},
233e9991434SAtish Patra 		[C(OP_WRITE)] = {
234e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
235e9991434SAtish Patra 					C(OP_WRITE), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
236e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
237e9991434SAtish Patra 					C(OP_WRITE), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
238e9991434SAtish Patra 		},
239e9991434SAtish Patra 		[C(OP_PREFETCH)] = {
240e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
241e9991434SAtish Patra 					C(OP_PREFETCH), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
242e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
243e9991434SAtish Patra 					C(OP_PREFETCH), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
244e9991434SAtish Patra 		},
245e9991434SAtish Patra 	},
246e9991434SAtish Patra 	[C(NODE)] = {
247e9991434SAtish Patra 		[C(OP_READ)] = {
248e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
249e9991434SAtish Patra 					C(OP_READ), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
250e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
251e9991434SAtish Patra 					C(OP_READ), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
252e9991434SAtish Patra 		},
253e9991434SAtish Patra 		[C(OP_WRITE)] = {
254e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
255e9991434SAtish Patra 					C(OP_WRITE), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
256e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
257e9991434SAtish Patra 					C(OP_WRITE), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
258e9991434SAtish Patra 		},
259e9991434SAtish Patra 		[C(OP_PREFETCH)] = {
260e9991434SAtish Patra 			[C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
261e9991434SAtish Patra 					C(OP_PREFETCH), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
262e9991434SAtish Patra 			[C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
263e9991434SAtish Patra 					C(OP_PREFETCH), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
264e9991434SAtish Patra 		},
265e9991434SAtish Patra 	},
266e9991434SAtish Patra };
267e9991434SAtish Patra 
268e9991434SAtish Patra static int pmu_sbi_ctr_get_width(int idx)
269e9991434SAtish Patra {
270e9991434SAtish Patra 	return pmu_ctr_list[idx].width;
271e9991434SAtish Patra }
272e9991434SAtish Patra 
273e9991434SAtish Patra static bool pmu_sbi_ctr_is_fw(int cidx)
274e9991434SAtish Patra {
275e9991434SAtish Patra 	union sbi_pmu_ctr_info *info;
276e9991434SAtish Patra 
277e9991434SAtish Patra 	info = &pmu_ctr_list[cidx];
278e9991434SAtish Patra 	if (!info)
279e9991434SAtish Patra 		return false;
280e9991434SAtish Patra 
281e9991434SAtish Patra 	return (info->type == SBI_PMU_CTR_TYPE_FW) ? true : false;
282e9991434SAtish Patra }
283e9991434SAtish Patra 
284585e351fSAtish Patra /*
285585e351fSAtish Patra  * Returns the counter width of a programmable counter and number of hardware
286585e351fSAtish Patra  * counters. As we don't support heterogeneous CPUs yet, it is okay to just
287585e351fSAtish Patra  * return the counter width of the first programmable counter.
288585e351fSAtish Patra  */
289585e351fSAtish Patra int riscv_pmu_get_hpm_info(u32 *hw_ctr_width, u32 *num_hw_ctr)
290585e351fSAtish Patra {
291585e351fSAtish Patra 	int i;
292585e351fSAtish Patra 	union sbi_pmu_ctr_info *info;
293585e351fSAtish Patra 	u32 hpm_width = 0, hpm_count = 0;
294585e351fSAtish Patra 
295585e351fSAtish Patra 	if (!cmask)
296585e351fSAtish Patra 		return -EINVAL;
297585e351fSAtish Patra 
298585e351fSAtish Patra 	for_each_set_bit(i, &cmask, RISCV_MAX_COUNTERS) {
299585e351fSAtish Patra 		info = &pmu_ctr_list[i];
300585e351fSAtish Patra 		if (!info)
301585e351fSAtish Patra 			continue;
302585e351fSAtish Patra 		if (!hpm_width && info->csr != CSR_CYCLE && info->csr != CSR_INSTRET)
303585e351fSAtish Patra 			hpm_width = info->width;
304585e351fSAtish Patra 		if (info->type == SBI_PMU_CTR_TYPE_HW)
305585e351fSAtish Patra 			hpm_count++;
306585e351fSAtish Patra 	}
307585e351fSAtish Patra 
308585e351fSAtish Patra 	*hw_ctr_width = hpm_width;
309585e351fSAtish Patra 	*num_hw_ctr = hpm_count;
310585e351fSAtish Patra 
311585e351fSAtish Patra 	return 0;
312585e351fSAtish Patra }
313585e351fSAtish Patra EXPORT_SYMBOL_GPL(riscv_pmu_get_hpm_info);
314585e351fSAtish Patra 
315cc4c07c8SAlexandre Ghiti static uint8_t pmu_sbi_csr_index(struct perf_event *event)
316cc4c07c8SAlexandre Ghiti {
317cc4c07c8SAlexandre Ghiti 	return pmu_ctr_list[event->hw.idx].csr - CSR_CYCLE;
318cc4c07c8SAlexandre Ghiti }
319cc4c07c8SAlexandre Ghiti 
3208929283aSAtish Patra static unsigned long pmu_sbi_get_filter_flags(struct perf_event *event)
3218929283aSAtish Patra {
3228929283aSAtish Patra 	unsigned long cflags = 0;
3238929283aSAtish Patra 	bool guest_events = false;
3248929283aSAtish Patra 
3258929283aSAtish Patra 	if (event->attr.config1 & RISCV_PMU_CONFIG1_GUEST_EVENTS)
3268929283aSAtish Patra 		guest_events = true;
3278929283aSAtish Patra 	if (event->attr.exclude_kernel)
3288929283aSAtish Patra 		cflags |= guest_events ? SBI_PMU_CFG_FLAG_SET_VSINH : SBI_PMU_CFG_FLAG_SET_SINH;
3298929283aSAtish Patra 	if (event->attr.exclude_user)
3308929283aSAtish Patra 		cflags |= guest_events ? SBI_PMU_CFG_FLAG_SET_VUINH : SBI_PMU_CFG_FLAG_SET_UINH;
3318929283aSAtish Patra 	if (guest_events && event->attr.exclude_hv)
3328929283aSAtish Patra 		cflags |= SBI_PMU_CFG_FLAG_SET_SINH;
3338929283aSAtish Patra 	if (event->attr.exclude_host)
3348929283aSAtish Patra 		cflags |= SBI_PMU_CFG_FLAG_SET_UINH | SBI_PMU_CFG_FLAG_SET_SINH;
3358929283aSAtish Patra 	if (event->attr.exclude_guest)
3368929283aSAtish Patra 		cflags |= SBI_PMU_CFG_FLAG_SET_VSINH | SBI_PMU_CFG_FLAG_SET_VUINH;
3378929283aSAtish Patra 
3388929283aSAtish Patra 	return cflags;
3398929283aSAtish Patra }
3408929283aSAtish Patra 
341e9991434SAtish Patra static int pmu_sbi_ctr_get_idx(struct perf_event *event)
342e9991434SAtish Patra {
343e9991434SAtish Patra 	struct hw_perf_event *hwc = &event->hw;
344e9991434SAtish Patra 	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
345e9991434SAtish Patra 	struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
346e9991434SAtish Patra 	struct sbiret ret;
347e9991434SAtish Patra 	int idx;
348cc4c07c8SAlexandre Ghiti 	uint64_t cbase = 0, cmask = rvpmu->cmask;
349e9991434SAtish Patra 	unsigned long cflags = 0;
350e9991434SAtish Patra 
3518929283aSAtish Patra 	cflags = pmu_sbi_get_filter_flags(event);
352cc4c07c8SAlexandre Ghiti 
353cc4c07c8SAlexandre Ghiti 	/*
354cc4c07c8SAlexandre Ghiti 	 * In legacy mode, we have to force the fixed counters for those events
355cc4c07c8SAlexandre Ghiti 	 * but not in the user access mode as we want to use the other counters
356cc4c07c8SAlexandre Ghiti 	 * that support sampling/filtering.
357cc4c07c8SAlexandre Ghiti 	 */
358cc4c07c8SAlexandre Ghiti 	if (hwc->flags & PERF_EVENT_FLAG_LEGACY) {
359cc4c07c8SAlexandre Ghiti 		if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
360cc4c07c8SAlexandre Ghiti 			cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH;
361cc4c07c8SAlexandre Ghiti 			cmask = 1;
362cc4c07c8SAlexandre Ghiti 		} else if (event->attr.config == PERF_COUNT_HW_INSTRUCTIONS) {
363cc4c07c8SAlexandre Ghiti 			cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH;
364cc4c07c8SAlexandre Ghiti 			cmask = 1UL << (CSR_INSTRET - CSR_CYCLE);
365cc4c07c8SAlexandre Ghiti 		}
366cc4c07c8SAlexandre Ghiti 	}
367cc4c07c8SAlexandre Ghiti 
368e9991434SAtish Patra 	/* retrieve the available counter index */
3690209b583SAtish Patra #if defined(CONFIG_32BIT)
3701537bf26SSergey Matyukevich 	ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
371cc4c07c8SAlexandre Ghiti 			cmask, cflags, hwc->event_base, hwc->config,
3721537bf26SSergey Matyukevich 			hwc->config >> 32);
3730209b583SAtish Patra #else
3741537bf26SSergey Matyukevich 	ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
375cc4c07c8SAlexandre Ghiti 			cmask, cflags, hwc->event_base, hwc->config, 0);
3760209b583SAtish Patra #endif
377e9991434SAtish Patra 	if (ret.error) {
378e9991434SAtish Patra 		pr_debug("Not able to find a counter for event %lx config %llx\n",
379e9991434SAtish Patra 			hwc->event_base, hwc->config);
380e9991434SAtish Patra 		return sbi_err_map_linux_errno(ret.error);
381e9991434SAtish Patra 	}
382e9991434SAtish Patra 
383e9991434SAtish Patra 	idx = ret.value;
3841537bf26SSergey Matyukevich 	if (!test_bit(idx, &rvpmu->cmask) || !pmu_ctr_list[idx].value)
385e9991434SAtish Patra 		return -ENOENT;
386e9991434SAtish Patra 
387e9991434SAtish Patra 	/* Additional sanity check for the counter id */
388e9991434SAtish Patra 	if (pmu_sbi_ctr_is_fw(idx)) {
389e9991434SAtish Patra 		if (!test_and_set_bit(idx, cpuc->used_fw_ctrs))
390e9991434SAtish Patra 			return idx;
391e9991434SAtish Patra 	} else {
392e9991434SAtish Patra 		if (!test_and_set_bit(idx, cpuc->used_hw_ctrs))
393e9991434SAtish Patra 			return idx;
394e9991434SAtish Patra 	}
395e9991434SAtish Patra 
396e9991434SAtish Patra 	return -ENOENT;
397e9991434SAtish Patra }
398e9991434SAtish Patra 
399e9991434SAtish Patra static void pmu_sbi_ctr_clear_idx(struct perf_event *event)
400e9991434SAtish Patra {
401e9991434SAtish Patra 
402e9991434SAtish Patra 	struct hw_perf_event *hwc = &event->hw;
403e9991434SAtish Patra 	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
404e9991434SAtish Patra 	struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
405e9991434SAtish Patra 	int idx = hwc->idx;
406e9991434SAtish Patra 
407e9991434SAtish Patra 	if (pmu_sbi_ctr_is_fw(idx))
408e9991434SAtish Patra 		clear_bit(idx, cpuc->used_fw_ctrs);
409e9991434SAtish Patra 	else
410e9991434SAtish Patra 		clear_bit(idx, cpuc->used_hw_ctrs);
411e9991434SAtish Patra }
412e9991434SAtish Patra 
413e9991434SAtish Patra static int pmu_event_find_cache(u64 config)
414e9991434SAtish Patra {
415e9991434SAtish Patra 	unsigned int cache_type, cache_op, cache_result, ret;
416e9991434SAtish Patra 
417e9991434SAtish Patra 	cache_type = (config >>  0) & 0xff;
418e9991434SAtish Patra 	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
419e9991434SAtish Patra 		return -EINVAL;
420e9991434SAtish Patra 
421e9991434SAtish Patra 	cache_op = (config >>  8) & 0xff;
422e9991434SAtish Patra 	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
423e9991434SAtish Patra 		return -EINVAL;
424e9991434SAtish Patra 
425e9991434SAtish Patra 	cache_result = (config >> 16) & 0xff;
426e9991434SAtish Patra 	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
427e9991434SAtish Patra 		return -EINVAL;
428e9991434SAtish Patra 
429e9991434SAtish Patra 	ret = pmu_cache_event_map[cache_type][cache_op][cache_result].event_idx;
430e9991434SAtish Patra 
431e9991434SAtish Patra 	return ret;
432e9991434SAtish Patra }
433e9991434SAtish Patra 
434e9991434SAtish Patra static bool pmu_sbi_is_fw_event(struct perf_event *event)
435e9991434SAtish Patra {
436e9991434SAtish Patra 	u32 type = event->attr.type;
437e9991434SAtish Patra 	u64 config = event->attr.config;
438e9991434SAtish Patra 
439e9991434SAtish Patra 	if ((type == PERF_TYPE_RAW) && ((config >> 63) == 1))
440e9991434SAtish Patra 		return true;
441e9991434SAtish Patra 	else
442e9991434SAtish Patra 		return false;
443e9991434SAtish Patra }
444e9991434SAtish Patra 
445e9991434SAtish Patra static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
446e9991434SAtish Patra {
447e9991434SAtish Patra 	u32 type = event->attr.type;
448e9991434SAtish Patra 	u64 config = event->attr.config;
449e9991434SAtish Patra 	int bSoftware;
450e9991434SAtish Patra 	u64 raw_config_val;
451e9991434SAtish Patra 	int ret;
452e9991434SAtish Patra 
453e9991434SAtish Patra 	switch (type) {
454e9991434SAtish Patra 	case PERF_TYPE_HARDWARE:
455e9991434SAtish Patra 		if (config >= PERF_COUNT_HW_MAX)
456e9991434SAtish Patra 			return -EINVAL;
457e9991434SAtish Patra 		ret = pmu_hw_event_map[event->attr.config].event_idx;
458e9991434SAtish Patra 		break;
459e9991434SAtish Patra 	case PERF_TYPE_HW_CACHE:
460e9991434SAtish Patra 		ret = pmu_event_find_cache(config);
461e9991434SAtish Patra 		break;
462e9991434SAtish Patra 	case PERF_TYPE_RAW:
463e9991434SAtish Patra 		/*
464e9991434SAtish Patra 		 * As per SBI specification, the upper 16 bits must be unused for
465e9991434SAtish Patra 		 * a raw event. Use the MSB (63b) to distinguish between hardware
466e9991434SAtish Patra 		 * raw event and firmware events.
467e9991434SAtish Patra 		 */
468e9991434SAtish Patra 		bSoftware = config >> 63;
469e9991434SAtish Patra 		raw_config_val = config & RISCV_PMU_RAW_EVENT_MASK;
470e9991434SAtish Patra 		if (bSoftware) {
471e9991434SAtish Patra 			ret = (raw_config_val & 0xFFFF) |
472e9991434SAtish Patra 				(SBI_PMU_EVENT_TYPE_FW << 16);
473e9991434SAtish Patra 		} else {
474e9991434SAtish Patra 			ret = RISCV_PMU_RAW_EVENT_IDX;
475e9991434SAtish Patra 			*econfig = raw_config_val;
476e9991434SAtish Patra 		}
477e9991434SAtish Patra 		break;
478e9991434SAtish Patra 	default:
479e9991434SAtish Patra 		ret = -EINVAL;
480e9991434SAtish Patra 		break;
481e9991434SAtish Patra 	}
482e9991434SAtish Patra 
483e9991434SAtish Patra 	return ret;
484e9991434SAtish Patra }
485e9991434SAtish Patra 
486e9991434SAtish Patra static u64 pmu_sbi_ctr_read(struct perf_event *event)
487e9991434SAtish Patra {
488e9991434SAtish Patra 	struct hw_perf_event *hwc = &event->hw;
489e9991434SAtish Patra 	int idx = hwc->idx;
490e9991434SAtish Patra 	struct sbiret ret;
491e9991434SAtish Patra 	union sbi_pmu_ctr_info info;
492e9991434SAtish Patra 	u64 val = 0;
493e9991434SAtish Patra 
494e9991434SAtish Patra 	if (pmu_sbi_is_fw_event(event)) {
495e9991434SAtish Patra 		ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_FW_READ,
496e9991434SAtish Patra 				hwc->idx, 0, 0, 0, 0, 0);
497e9991434SAtish Patra 		if (!ret.error)
498e9991434SAtish Patra 			val = ret.value;
499e9991434SAtish Patra 	} else {
500e9991434SAtish Patra 		info = pmu_ctr_list[idx];
501e9991434SAtish Patra 		val = riscv_pmu_ctr_read_csr(info.csr);
502e9991434SAtish Patra 		if (IS_ENABLED(CONFIG_32BIT))
503e9991434SAtish Patra 			val = ((u64)riscv_pmu_ctr_read_csr(info.csr + 0x80)) << 31 | val;
504e9991434SAtish Patra 	}
505e9991434SAtish Patra 
506e9991434SAtish Patra 	return val;
507e9991434SAtish Patra }
508e9991434SAtish Patra 
509cc4c07c8SAlexandre Ghiti static void pmu_sbi_set_scounteren(void *arg)
510cc4c07c8SAlexandre Ghiti {
511cc4c07c8SAlexandre Ghiti 	struct perf_event *event = (struct perf_event *)arg;
512cc4c07c8SAlexandre Ghiti 
513*3fec3233SAlexandre Ghiti 	if (event->hw.idx != -1)
514cc4c07c8SAlexandre Ghiti 		csr_write(CSR_SCOUNTEREN,
515cc4c07c8SAlexandre Ghiti 			  csr_read(CSR_SCOUNTEREN) | (1 << pmu_sbi_csr_index(event)));
516cc4c07c8SAlexandre Ghiti }
517cc4c07c8SAlexandre Ghiti 
518cc4c07c8SAlexandre Ghiti static void pmu_sbi_reset_scounteren(void *arg)
519cc4c07c8SAlexandre Ghiti {
520cc4c07c8SAlexandre Ghiti 	struct perf_event *event = (struct perf_event *)arg;
521cc4c07c8SAlexandre Ghiti 
522*3fec3233SAlexandre Ghiti 	if (event->hw.idx != -1)
523cc4c07c8SAlexandre Ghiti 		csr_write(CSR_SCOUNTEREN,
524cc4c07c8SAlexandre Ghiti 			  csr_read(CSR_SCOUNTEREN) & ~(1 << pmu_sbi_csr_index(event)));
525cc4c07c8SAlexandre Ghiti }
526cc4c07c8SAlexandre Ghiti 
527e9991434SAtish Patra static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
528e9991434SAtish Patra {
529e9991434SAtish Patra 	struct sbiret ret;
530e9991434SAtish Patra 	struct hw_perf_event *hwc = &event->hw;
531e9991434SAtish Patra 	unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE;
532e9991434SAtish Patra 
5330209b583SAtish Patra #if defined(CONFIG_32BIT)
534e9991434SAtish Patra 	ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx,
535e9991434SAtish Patra 			1, flag, ival, ival >> 32, 0);
5360209b583SAtish Patra #else
5370209b583SAtish Patra 	ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx,
5380209b583SAtish Patra 			1, flag, ival, 0, 0);
5390209b583SAtish Patra #endif
540e9991434SAtish Patra 	if (ret.error && (ret.error != SBI_ERR_ALREADY_STARTED))
541e9991434SAtish Patra 		pr_err("Starting counter idx %d failed with error %d\n",
542e9991434SAtish Patra 			hwc->idx, sbi_err_map_linux_errno(ret.error));
543cc4c07c8SAlexandre Ghiti 
544cc4c07c8SAlexandre Ghiti 	if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
545cc4c07c8SAlexandre Ghiti 	    (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
546*3fec3233SAlexandre Ghiti 		on_each_cpu_mask(mm_cpumask(event->owner->mm),
547*3fec3233SAlexandre Ghiti 				 pmu_sbi_set_scounteren, (void *)event, 1);
548e9991434SAtish Patra }
549e9991434SAtish Patra 
550e9991434SAtish Patra static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
551e9991434SAtish Patra {
552e9991434SAtish Patra 	struct sbiret ret;
553e9991434SAtish Patra 	struct hw_perf_event *hwc = &event->hw;
554e9991434SAtish Patra 
555cc4c07c8SAlexandre Ghiti 	if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
556cc4c07c8SAlexandre Ghiti 	    (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
557*3fec3233SAlexandre Ghiti 		on_each_cpu_mask(mm_cpumask(event->owner->mm),
558*3fec3233SAlexandre Ghiti 				 pmu_sbi_reset_scounteren, (void *)event, 1);
559cc4c07c8SAlexandre Ghiti 
560e9991434SAtish Patra 	ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0);
561e9991434SAtish Patra 	if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) &&
562e9991434SAtish Patra 		flag != SBI_PMU_STOP_FLAG_RESET)
563e9991434SAtish Patra 		pr_err("Stopping counter idx %d failed with error %d\n",
564e9991434SAtish Patra 			hwc->idx, sbi_err_map_linux_errno(ret.error));
565e9991434SAtish Patra }
566e9991434SAtish Patra 
567e9991434SAtish Patra static int pmu_sbi_find_num_ctrs(void)
568e9991434SAtish Patra {
569e9991434SAtish Patra 	struct sbiret ret;
570e9991434SAtish Patra 
571e9991434SAtish Patra 	ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_NUM_COUNTERS, 0, 0, 0, 0, 0, 0);
572e9991434SAtish Patra 	if (!ret.error)
573e9991434SAtish Patra 		return ret.value;
574e9991434SAtish Patra 	else
575e9991434SAtish Patra 		return sbi_err_map_linux_errno(ret.error);
576e9991434SAtish Patra }
577e9991434SAtish Patra 
5781537bf26SSergey Matyukevich static int pmu_sbi_get_ctrinfo(int nctr, unsigned long *mask)
579e9991434SAtish Patra {
580e9991434SAtish Patra 	struct sbiret ret;
581e9991434SAtish Patra 	int i, num_hw_ctr = 0, num_fw_ctr = 0;
582e9991434SAtish Patra 	union sbi_pmu_ctr_info cinfo;
583e9991434SAtish Patra 
584e9991434SAtish Patra 	pmu_ctr_list = kcalloc(nctr, sizeof(*pmu_ctr_list), GFP_KERNEL);
585e9991434SAtish Patra 	if (!pmu_ctr_list)
586e9991434SAtish Patra 		return -ENOMEM;
587e9991434SAtish Patra 
58820e0fbabSSergey Matyukevich 	for (i = 0; i < nctr; i++) {
589e9991434SAtish Patra 		ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0);
590e9991434SAtish Patra 		if (ret.error)
591e9991434SAtish Patra 			/* The logical counter ids are not expected to be contiguous */
592e9991434SAtish Patra 			continue;
5931537bf26SSergey Matyukevich 
5941537bf26SSergey Matyukevich 		*mask |= BIT(i);
5951537bf26SSergey Matyukevich 
596e9991434SAtish Patra 		cinfo.value = ret.value;
597e9991434SAtish Patra 		if (cinfo.type == SBI_PMU_CTR_TYPE_FW)
598e9991434SAtish Patra 			num_fw_ctr++;
599e9991434SAtish Patra 		else
600e9991434SAtish Patra 			num_hw_ctr++;
601e9991434SAtish Patra 		pmu_ctr_list[i].value = cinfo.value;
602e9991434SAtish Patra 	}
603e9991434SAtish Patra 
604e9991434SAtish Patra 	pr_info("%d firmware and %d hardware counters\n", num_fw_ctr, num_hw_ctr);
605e9991434SAtish Patra 
606e9991434SAtish Patra 	return 0;
607e9991434SAtish Patra }
608e9991434SAtish Patra 
6094905ec2fSAtish Patra static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
6104905ec2fSAtish Patra {
611c7a9dceaSPalmer Dabbelt 	/*
6124905ec2fSAtish Patra 	 * No need to check the error because we are disabling all the counters
6134905ec2fSAtish Patra 	 * which may include counters that are not enabled yet.
6144905ec2fSAtish Patra 	 */
6154905ec2fSAtish Patra 	sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
6161537bf26SSergey Matyukevich 		  0, pmu->cmask, 0, 0, 0, 0);
6174905ec2fSAtish Patra }
6184905ec2fSAtish Patra 
6194905ec2fSAtish Patra static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
6204905ec2fSAtish Patra {
6214905ec2fSAtish Patra 	struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
6224905ec2fSAtish Patra 
6234905ec2fSAtish Patra 	/* No need to check the error here as we can't do anything about the error */
6244905ec2fSAtish Patra 	sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, 0,
6254905ec2fSAtish Patra 		  cpu_hw_evt->used_hw_ctrs[0], 0, 0, 0, 0);
6264905ec2fSAtish Patra }
6274905ec2fSAtish Patra 
628c7a9dceaSPalmer Dabbelt /*
6294905ec2fSAtish Patra  * This function starts all the used counters in two step approach.
6304905ec2fSAtish Patra  * Any counter that did not overflow can be start in a single step
6314905ec2fSAtish Patra  * while the overflowed counters need to be started with updated initialization
6324905ec2fSAtish Patra  * value.
6334905ec2fSAtish Patra  */
6344905ec2fSAtish Patra static inline void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu,
6354905ec2fSAtish Patra 					       unsigned long ctr_ovf_mask)
6364905ec2fSAtish Patra {
6374905ec2fSAtish Patra 	int idx = 0;
6384905ec2fSAtish Patra 	struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
6394905ec2fSAtish Patra 	struct perf_event *event;
6404905ec2fSAtish Patra 	unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE;
6414905ec2fSAtish Patra 	unsigned long ctr_start_mask = 0;
6424905ec2fSAtish Patra 	uint64_t max_period;
6434905ec2fSAtish Patra 	struct hw_perf_event *hwc;
6444905ec2fSAtish Patra 	u64 init_val = 0;
6454905ec2fSAtish Patra 
6464905ec2fSAtish Patra 	ctr_start_mask = cpu_hw_evt->used_hw_ctrs[0] & ~ctr_ovf_mask;
6474905ec2fSAtish Patra 
6484905ec2fSAtish Patra 	/* Start all the counters that did not overflow in a single shot */
6494905ec2fSAtish Patra 	sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, 0, ctr_start_mask,
6504905ec2fSAtish Patra 		  0, 0, 0, 0);
6514905ec2fSAtish Patra 
6524905ec2fSAtish Patra 	/* Reinitialize and start all the counter that overflowed */
6534905ec2fSAtish Patra 	while (ctr_ovf_mask) {
6544905ec2fSAtish Patra 		if (ctr_ovf_mask & 0x01) {
6554905ec2fSAtish Patra 			event = cpu_hw_evt->events[idx];
6564905ec2fSAtish Patra 			hwc = &event->hw;
6574905ec2fSAtish Patra 			max_period = riscv_pmu_ctr_get_width_mask(event);
6584905ec2fSAtish Patra 			init_val = local64_read(&hwc->prev_count) & max_period;
659acc1b919SAtish Patra #if defined(CONFIG_32BIT)
660acc1b919SAtish Patra 			sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1,
661acc1b919SAtish Patra 				  flag, init_val, init_val >> 32, 0);
662acc1b919SAtish Patra #else
6634905ec2fSAtish Patra 			sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1,
6644905ec2fSAtish Patra 				  flag, init_val, 0, 0);
665acc1b919SAtish Patra #endif
666133a6d1fSAtish Patra 			perf_event_update_userpage(event);
6674905ec2fSAtish Patra 		}
6684905ec2fSAtish Patra 		ctr_ovf_mask = ctr_ovf_mask >> 1;
6694905ec2fSAtish Patra 		idx++;
6704905ec2fSAtish Patra 	}
6714905ec2fSAtish Patra }
6724905ec2fSAtish Patra 
6734905ec2fSAtish Patra static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
6744905ec2fSAtish Patra {
6754905ec2fSAtish Patra 	struct perf_sample_data data;
6764905ec2fSAtish Patra 	struct pt_regs *regs;
6774905ec2fSAtish Patra 	struct hw_perf_event *hw_evt;
6784905ec2fSAtish Patra 	union sbi_pmu_ctr_info *info;
6794905ec2fSAtish Patra 	int lidx, hidx, fidx;
6804905ec2fSAtish Patra 	struct riscv_pmu *pmu;
6814905ec2fSAtish Patra 	struct perf_event *event;
6824905ec2fSAtish Patra 	unsigned long overflow;
6834905ec2fSAtish Patra 	unsigned long overflowed_ctrs = 0;
6844905ec2fSAtish Patra 	struct cpu_hw_events *cpu_hw_evt = dev;
685096b52fdSSergey Matyukevich 	u64 start_clock = sched_clock();
6864905ec2fSAtish Patra 
6874905ec2fSAtish Patra 	if (WARN_ON_ONCE(!cpu_hw_evt))
6884905ec2fSAtish Patra 		return IRQ_NONE;
6894905ec2fSAtish Patra 
6904905ec2fSAtish Patra 	/* Firmware counter don't support overflow yet */
6914905ec2fSAtish Patra 	fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS);
6924905ec2fSAtish Patra 	event = cpu_hw_evt->events[fidx];
6934905ec2fSAtish Patra 	if (!event) {
69465e9fb08SHeiko Stuebner 		csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
6954905ec2fSAtish Patra 		return IRQ_NONE;
6964905ec2fSAtish Patra 	}
6974905ec2fSAtish Patra 
6984905ec2fSAtish Patra 	pmu = to_riscv_pmu(event->pmu);
6994905ec2fSAtish Patra 	pmu_sbi_stop_hw_ctrs(pmu);
7004905ec2fSAtish Patra 
7014905ec2fSAtish Patra 	/* Overflow status register should only be read after counter are stopped */
70265e9fb08SHeiko Stuebner 	ALT_SBI_PMU_OVERFLOW(overflow);
7034905ec2fSAtish Patra 
704c7a9dceaSPalmer Dabbelt 	/*
7054905ec2fSAtish Patra 	 * Overflow interrupt pending bit should only be cleared after stopping
7064905ec2fSAtish Patra 	 * all the counters to avoid any race condition.
7074905ec2fSAtish Patra 	 */
70865e9fb08SHeiko Stuebner 	csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
7094905ec2fSAtish Patra 
7104905ec2fSAtish Patra 	/* No overflow bit is set */
7114905ec2fSAtish Patra 	if (!overflow)
7124905ec2fSAtish Patra 		return IRQ_NONE;
7134905ec2fSAtish Patra 
7144905ec2fSAtish Patra 	regs = get_irq_regs();
7154905ec2fSAtish Patra 
7164905ec2fSAtish Patra 	for_each_set_bit(lidx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) {
7174905ec2fSAtish Patra 		struct perf_event *event = cpu_hw_evt->events[lidx];
7184905ec2fSAtish Patra 
7194905ec2fSAtish Patra 		/* Skip if invalid event or user did not request a sampling */
7204905ec2fSAtish Patra 		if (!event || !is_sampling_event(event))
7214905ec2fSAtish Patra 			continue;
7224905ec2fSAtish Patra 
7234905ec2fSAtish Patra 		info = &pmu_ctr_list[lidx];
7244905ec2fSAtish Patra 		/* Do a sanity check */
7254905ec2fSAtish Patra 		if (!info || info->type != SBI_PMU_CTR_TYPE_HW)
7264905ec2fSAtish Patra 			continue;
7274905ec2fSAtish Patra 
7284905ec2fSAtish Patra 		/* compute hardware counter index */
7294905ec2fSAtish Patra 		hidx = info->csr - CSR_CYCLE;
7304905ec2fSAtish Patra 		/* check if the corresponding bit is set in sscountovf */
7314905ec2fSAtish Patra 		if (!(overflow & (1 << hidx)))
7324905ec2fSAtish Patra 			continue;
7334905ec2fSAtish Patra 
7344905ec2fSAtish Patra 		/*
7354905ec2fSAtish Patra 		 * Keep a track of overflowed counters so that they can be started
7364905ec2fSAtish Patra 		 * with updated initial value.
7374905ec2fSAtish Patra 		 */
7384905ec2fSAtish Patra 		overflowed_ctrs |= 1 << lidx;
7394905ec2fSAtish Patra 		hw_evt = &event->hw;
7404905ec2fSAtish Patra 		riscv_pmu_event_update(event);
7414905ec2fSAtish Patra 		perf_sample_data_init(&data, 0, hw_evt->last_period);
7424905ec2fSAtish Patra 		if (riscv_pmu_event_set_period(event)) {
7434905ec2fSAtish Patra 			/*
7444905ec2fSAtish Patra 			 * Unlike other ISAs, RISC-V don't have to disable interrupts
7454905ec2fSAtish Patra 			 * to avoid throttling here. As per the specification, the
7464905ec2fSAtish Patra 			 * interrupt remains disabled until the OF bit is set.
7474905ec2fSAtish Patra 			 * Interrupts are enabled again only during the start.
7484905ec2fSAtish Patra 			 * TODO: We will need to stop the guest counters once
7494905ec2fSAtish Patra 			 * virtualization support is added.
7504905ec2fSAtish Patra 			 */
7514905ec2fSAtish Patra 			perf_event_overflow(event, &data, regs);
7524905ec2fSAtish Patra 		}
7534905ec2fSAtish Patra 	}
754096b52fdSSergey Matyukevich 
7554905ec2fSAtish Patra 	pmu_sbi_start_overflow_mask(pmu, overflowed_ctrs);
756096b52fdSSergey Matyukevich 	perf_sample_event_took(sched_clock() - start_clock);
7574905ec2fSAtish Patra 
7584905ec2fSAtish Patra 	return IRQ_HANDLED;
7594905ec2fSAtish Patra }
7604905ec2fSAtish Patra 
761e9991434SAtish Patra static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
762e9991434SAtish Patra {
763e9991434SAtish Patra 	struct riscv_pmu *pmu = hlist_entry_safe(node, struct riscv_pmu, node);
7644905ec2fSAtish Patra 	struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
765e9991434SAtish Patra 
7665a5294fbSPalmer Dabbelt 	/*
767cc4c07c8SAlexandre Ghiti 	 * We keep enabling userspace access to CYCLE, TIME and INSTRET via the
768cc4c07c8SAlexandre Ghiti 	 * legacy option but that will be removed in the future.
7695a5294fbSPalmer Dabbelt 	 */
770cc4c07c8SAlexandre Ghiti 	if (sysctl_perf_user_access == SYSCTL_LEGACY)
7715a5294fbSPalmer Dabbelt 		csr_write(CSR_SCOUNTEREN, 0x7);
772cc4c07c8SAlexandre Ghiti 	else
773cc4c07c8SAlexandre Ghiti 		csr_write(CSR_SCOUNTEREN, 0x2);
774e9991434SAtish Patra 
775e9991434SAtish Patra 	/* Stop all the counters so that they can be enabled from perf */
7764905ec2fSAtish Patra 	pmu_sbi_stop_all(pmu);
7774905ec2fSAtish Patra 
77865e9fb08SHeiko Stuebner 	if (riscv_pmu_use_irq) {
7794905ec2fSAtish Patra 		cpu_hw_evt->irq = riscv_pmu_irq;
78065e9fb08SHeiko Stuebner 		csr_clear(CSR_IP, BIT(riscv_pmu_irq_num));
78165e9fb08SHeiko Stuebner 		csr_set(CSR_IE, BIT(riscv_pmu_irq_num));
7824905ec2fSAtish Patra 		enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE);
7834905ec2fSAtish Patra 	}
784e9991434SAtish Patra 
785e9991434SAtish Patra 	return 0;
786e9991434SAtish Patra }
787e9991434SAtish Patra 
788e9991434SAtish Patra static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node)
789e9991434SAtish Patra {
79065e9fb08SHeiko Stuebner 	if (riscv_pmu_use_irq) {
7914905ec2fSAtish Patra 		disable_percpu_irq(riscv_pmu_irq);
79265e9fb08SHeiko Stuebner 		csr_clear(CSR_IE, BIT(riscv_pmu_irq_num));
7934905ec2fSAtish Patra 	}
7944905ec2fSAtish Patra 
795e9991434SAtish Patra 	/* Disable all counters access for user mode now */
796e9991434SAtish Patra 	csr_write(CSR_SCOUNTEREN, 0x0);
797e9991434SAtish Patra 
798e9991434SAtish Patra 	return 0;
799e9991434SAtish Patra }
800e9991434SAtish Patra 
8014905ec2fSAtish Patra static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pdev)
8024905ec2fSAtish Patra {
8034905ec2fSAtish Patra 	int ret;
8044905ec2fSAtish Patra 	struct cpu_hw_events __percpu *hw_events = pmu->hw_events;
8054905ec2fSAtish Patra 	struct irq_domain *domain = NULL;
8064905ec2fSAtish Patra 
80765e9fb08SHeiko Stuebner 	if (riscv_isa_extension_available(NULL, SSCOFPMF)) {
80865e9fb08SHeiko Stuebner 		riscv_pmu_irq_num = RV_IRQ_PMU;
80965e9fb08SHeiko Stuebner 		riscv_pmu_use_irq = true;
81065e9fb08SHeiko Stuebner 	} else if (IS_ENABLED(CONFIG_ERRATA_THEAD_PMU) &&
81165e9fb08SHeiko Stuebner 		   riscv_cached_mvendorid(0) == THEAD_VENDOR_ID &&
81265e9fb08SHeiko Stuebner 		   riscv_cached_marchid(0) == 0 &&
81365e9fb08SHeiko Stuebner 		   riscv_cached_mimpid(0) == 0) {
81465e9fb08SHeiko Stuebner 		riscv_pmu_irq_num = THEAD_C9XX_RV_IRQ_PMU;
81565e9fb08SHeiko Stuebner 		riscv_pmu_use_irq = true;
81665e9fb08SHeiko Stuebner 	}
81765e9fb08SHeiko Stuebner 
81865e9fb08SHeiko Stuebner 	if (!riscv_pmu_use_irq)
8194905ec2fSAtish Patra 		return -EOPNOTSUPP;
8204905ec2fSAtish Patra 
821ca7473cbSSunil V L 	domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(),
822ca7473cbSSunil V L 					  DOMAIN_BUS_ANY);
8234905ec2fSAtish Patra 	if (!domain) {
8244905ec2fSAtish Patra 		pr_err("Failed to find INTC IRQ root domain\n");
8254905ec2fSAtish Patra 		return -ENODEV;
8264905ec2fSAtish Patra 	}
8274905ec2fSAtish Patra 
82865e9fb08SHeiko Stuebner 	riscv_pmu_irq = irq_create_mapping(domain, riscv_pmu_irq_num);
8294905ec2fSAtish Patra 	if (!riscv_pmu_irq) {
8304905ec2fSAtish Patra 		pr_err("Failed to map PMU interrupt for node\n");
8314905ec2fSAtish Patra 		return -ENODEV;
8324905ec2fSAtish Patra 	}
8334905ec2fSAtish Patra 
8344905ec2fSAtish Patra 	ret = request_percpu_irq(riscv_pmu_irq, pmu_sbi_ovf_handler, "riscv-pmu", hw_events);
8354905ec2fSAtish Patra 	if (ret) {
8364905ec2fSAtish Patra 		pr_err("registering percpu irq failed [%d]\n", ret);
8374905ec2fSAtish Patra 		return ret;
8384905ec2fSAtish Patra 	}
8394905ec2fSAtish Patra 
8404905ec2fSAtish Patra 	return 0;
8414905ec2fSAtish Patra }
8424905ec2fSAtish Patra 
843e9a023f2SEric Lin #ifdef CONFIG_CPU_PM
844e9a023f2SEric Lin static int riscv_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
845e9a023f2SEric Lin 				void *v)
846e9a023f2SEric Lin {
847e9a023f2SEric Lin 	struct riscv_pmu *rvpmu = container_of(b, struct riscv_pmu, riscv_pm_nb);
848e9a023f2SEric Lin 	struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
849e9a023f2SEric Lin 	int enabled = bitmap_weight(cpuc->used_hw_ctrs, RISCV_MAX_COUNTERS);
850e9a023f2SEric Lin 	struct perf_event *event;
851e9a023f2SEric Lin 	int idx;
852e9a023f2SEric Lin 
853e9a023f2SEric Lin 	if (!enabled)
854e9a023f2SEric Lin 		return NOTIFY_OK;
855e9a023f2SEric Lin 
856e9a023f2SEric Lin 	for (idx = 0; idx < RISCV_MAX_COUNTERS; idx++) {
857e9a023f2SEric Lin 		event = cpuc->events[idx];
858e9a023f2SEric Lin 		if (!event)
859e9a023f2SEric Lin 			continue;
860e9a023f2SEric Lin 
861e9a023f2SEric Lin 		switch (cmd) {
862e9a023f2SEric Lin 		case CPU_PM_ENTER:
863e9a023f2SEric Lin 			/*
864e9a023f2SEric Lin 			 * Stop and update the counter
865e9a023f2SEric Lin 			 */
866e9a023f2SEric Lin 			riscv_pmu_stop(event, PERF_EF_UPDATE);
867e9a023f2SEric Lin 			break;
868e9a023f2SEric Lin 		case CPU_PM_EXIT:
869e9a023f2SEric Lin 		case CPU_PM_ENTER_FAILED:
870e9a023f2SEric Lin 			/*
871e9a023f2SEric Lin 			 * Restore and enable the counter.
872e9a023f2SEric Lin 			 */
8731c38b061SPeter Zijlstra 			riscv_pmu_start(event, PERF_EF_RELOAD);
874e9a023f2SEric Lin 			break;
875e9a023f2SEric Lin 		default:
876e9a023f2SEric Lin 			break;
877e9a023f2SEric Lin 		}
878e9a023f2SEric Lin 	}
879e9a023f2SEric Lin 
880e9a023f2SEric Lin 	return NOTIFY_OK;
881e9a023f2SEric Lin }
882e9a023f2SEric Lin 
883e9a023f2SEric Lin static int riscv_pm_pmu_register(struct riscv_pmu *pmu)
884e9a023f2SEric Lin {
885e9a023f2SEric Lin 	pmu->riscv_pm_nb.notifier_call = riscv_pm_pmu_notify;
886e9a023f2SEric Lin 	return cpu_pm_register_notifier(&pmu->riscv_pm_nb);
887e9a023f2SEric Lin }
888e9a023f2SEric Lin 
889e9a023f2SEric Lin static void riscv_pm_pmu_unregister(struct riscv_pmu *pmu)
890e9a023f2SEric Lin {
891e9a023f2SEric Lin 	cpu_pm_unregister_notifier(&pmu->riscv_pm_nb);
892e9a023f2SEric Lin }
893e9a023f2SEric Lin #else
894e9a023f2SEric Lin static inline int riscv_pm_pmu_register(struct riscv_pmu *pmu) { return 0; }
895e9a023f2SEric Lin static inline void riscv_pm_pmu_unregister(struct riscv_pmu *pmu) { }
896e9a023f2SEric Lin #endif
897e9a023f2SEric Lin 
898e9a023f2SEric Lin static void riscv_pmu_destroy(struct riscv_pmu *pmu)
899e9a023f2SEric Lin {
900e9a023f2SEric Lin 	riscv_pm_pmu_unregister(pmu);
901e9a023f2SEric Lin 	cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
902e9a023f2SEric Lin }
903e9a023f2SEric Lin 
904cc4c07c8SAlexandre Ghiti static void pmu_sbi_event_init(struct perf_event *event)
905cc4c07c8SAlexandre Ghiti {
906cc4c07c8SAlexandre Ghiti 	/*
907cc4c07c8SAlexandre Ghiti 	 * The permissions are set at event_init so that we do not depend
908cc4c07c8SAlexandre Ghiti 	 * on the sysctl value that can change.
909cc4c07c8SAlexandre Ghiti 	 */
910cc4c07c8SAlexandre Ghiti 	if (sysctl_perf_user_access == SYSCTL_NO_USER_ACCESS)
911cc4c07c8SAlexandre Ghiti 		event->hw.flags |= PERF_EVENT_FLAG_NO_USER_ACCESS;
912cc4c07c8SAlexandre Ghiti 	else if (sysctl_perf_user_access == SYSCTL_USER_ACCESS)
913cc4c07c8SAlexandre Ghiti 		event->hw.flags |= PERF_EVENT_FLAG_USER_ACCESS;
914cc4c07c8SAlexandre Ghiti 	else
915cc4c07c8SAlexandre Ghiti 		event->hw.flags |= PERF_EVENT_FLAG_LEGACY;
916cc4c07c8SAlexandre Ghiti }
917cc4c07c8SAlexandre Ghiti 
918cc4c07c8SAlexandre Ghiti static void pmu_sbi_event_mapped(struct perf_event *event, struct mm_struct *mm)
919cc4c07c8SAlexandre Ghiti {
920cc4c07c8SAlexandre Ghiti 	if (event->hw.flags & PERF_EVENT_FLAG_NO_USER_ACCESS)
921cc4c07c8SAlexandre Ghiti 		return;
922cc4c07c8SAlexandre Ghiti 
923cc4c07c8SAlexandre Ghiti 	if (event->hw.flags & PERF_EVENT_FLAG_LEGACY) {
924cc4c07c8SAlexandre Ghiti 		if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES &&
925cc4c07c8SAlexandre Ghiti 		    event->attr.config != PERF_COUNT_HW_INSTRUCTIONS) {
926cc4c07c8SAlexandre Ghiti 			return;
927cc4c07c8SAlexandre Ghiti 		}
928cc4c07c8SAlexandre Ghiti 	}
929cc4c07c8SAlexandre Ghiti 
930cc4c07c8SAlexandre Ghiti 	/*
931cc4c07c8SAlexandre Ghiti 	 * The user mmapped the event to directly access it: this is where
932cc4c07c8SAlexandre Ghiti 	 * we determine based on sysctl_perf_user_access if we grant userspace
933cc4c07c8SAlexandre Ghiti 	 * the direct access to this event. That means that within the same
934cc4c07c8SAlexandre Ghiti 	 * task, some events may be directly accessible and some other may not,
935cc4c07c8SAlexandre Ghiti 	 * if the user changes the value of sysctl_perf_user_accesss in the
936cc4c07c8SAlexandre Ghiti 	 * meantime.
937cc4c07c8SAlexandre Ghiti 	 */
938cc4c07c8SAlexandre Ghiti 
939cc4c07c8SAlexandre Ghiti 	event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT;
940cc4c07c8SAlexandre Ghiti 
941cc4c07c8SAlexandre Ghiti 	/*
942cc4c07c8SAlexandre Ghiti 	 * We must enable userspace access *before* advertising in the user page
943cc4c07c8SAlexandre Ghiti 	 * that it is possible to do so to avoid any race.
944cc4c07c8SAlexandre Ghiti 	 * And we must notify all cpus here because threads that currently run
945cc4c07c8SAlexandre Ghiti 	 * on other cpus will try to directly access the counter too without
946cc4c07c8SAlexandre Ghiti 	 * calling pmu_sbi_ctr_start.
947cc4c07c8SAlexandre Ghiti 	 */
948cc4c07c8SAlexandre Ghiti 	if (event->hw.flags & PERF_EVENT_FLAG_USER_ACCESS)
949cc4c07c8SAlexandre Ghiti 		on_each_cpu_mask(mm_cpumask(mm),
950cc4c07c8SAlexandre Ghiti 				 pmu_sbi_set_scounteren, (void *)event, 1);
951cc4c07c8SAlexandre Ghiti }
952cc4c07c8SAlexandre Ghiti 
953cc4c07c8SAlexandre Ghiti static void pmu_sbi_event_unmapped(struct perf_event *event, struct mm_struct *mm)
954cc4c07c8SAlexandre Ghiti {
955cc4c07c8SAlexandre Ghiti 	if (event->hw.flags & PERF_EVENT_FLAG_NO_USER_ACCESS)
956cc4c07c8SAlexandre Ghiti 		return;
957cc4c07c8SAlexandre Ghiti 
958cc4c07c8SAlexandre Ghiti 	if (event->hw.flags & PERF_EVENT_FLAG_LEGACY) {
959cc4c07c8SAlexandre Ghiti 		if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES &&
960cc4c07c8SAlexandre Ghiti 		    event->attr.config != PERF_COUNT_HW_INSTRUCTIONS) {
961cc4c07c8SAlexandre Ghiti 			return;
962cc4c07c8SAlexandre Ghiti 		}
963cc4c07c8SAlexandre Ghiti 	}
964cc4c07c8SAlexandre Ghiti 
965cc4c07c8SAlexandre Ghiti 	/*
966cc4c07c8SAlexandre Ghiti 	 * Here we can directly remove user access since the user does not have
967cc4c07c8SAlexandre Ghiti 	 * access to the user page anymore so we avoid the racy window where the
968cc4c07c8SAlexandre Ghiti 	 * user could have read cap_user_rdpmc to true right before we disable
969cc4c07c8SAlexandre Ghiti 	 * it.
970cc4c07c8SAlexandre Ghiti 	 */
971cc4c07c8SAlexandre Ghiti 	event->hw.flags &= ~PERF_EVENT_FLAG_USER_READ_CNT;
972cc4c07c8SAlexandre Ghiti 
973cc4c07c8SAlexandre Ghiti 	if (event->hw.flags & PERF_EVENT_FLAG_USER_ACCESS)
974cc4c07c8SAlexandre Ghiti 		on_each_cpu_mask(mm_cpumask(mm),
975cc4c07c8SAlexandre Ghiti 				 pmu_sbi_reset_scounteren, (void *)event, 1);
976cc4c07c8SAlexandre Ghiti }
977cc4c07c8SAlexandre Ghiti 
978cc4c07c8SAlexandre Ghiti static void riscv_pmu_update_counter_access(void *info)
979cc4c07c8SAlexandre Ghiti {
980cc4c07c8SAlexandre Ghiti 	if (sysctl_perf_user_access == SYSCTL_LEGACY)
981cc4c07c8SAlexandre Ghiti 		csr_write(CSR_SCOUNTEREN, 0x7);
982cc4c07c8SAlexandre Ghiti 	else
983cc4c07c8SAlexandre Ghiti 		csr_write(CSR_SCOUNTEREN, 0x2);
984cc4c07c8SAlexandre Ghiti }
985cc4c07c8SAlexandre Ghiti 
986cc4c07c8SAlexandre Ghiti static int riscv_pmu_proc_user_access_handler(struct ctl_table *table,
987cc4c07c8SAlexandre Ghiti 					      int write, void *buffer,
988cc4c07c8SAlexandre Ghiti 					      size_t *lenp, loff_t *ppos)
989cc4c07c8SAlexandre Ghiti {
990cc4c07c8SAlexandre Ghiti 	int prev = sysctl_perf_user_access;
991cc4c07c8SAlexandre Ghiti 	int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
992cc4c07c8SAlexandre Ghiti 
993cc4c07c8SAlexandre Ghiti 	/*
994cc4c07c8SAlexandre Ghiti 	 * Test against the previous value since we clear SCOUNTEREN when
995cc4c07c8SAlexandre Ghiti 	 * sysctl_perf_user_access is set to SYSCTL_USER_ACCESS, but we should
996cc4c07c8SAlexandre Ghiti 	 * not do that if that was already the case.
997cc4c07c8SAlexandre Ghiti 	 */
998cc4c07c8SAlexandre Ghiti 	if (ret || !write || prev == sysctl_perf_user_access)
999cc4c07c8SAlexandre Ghiti 		return ret;
1000cc4c07c8SAlexandre Ghiti 
1001cc4c07c8SAlexandre Ghiti 	on_each_cpu(riscv_pmu_update_counter_access, NULL, 1);
1002cc4c07c8SAlexandre Ghiti 
1003cc4c07c8SAlexandre Ghiti 	return 0;
1004cc4c07c8SAlexandre Ghiti }
1005cc4c07c8SAlexandre Ghiti 
1006cc4c07c8SAlexandre Ghiti static struct ctl_table sbi_pmu_sysctl_table[] = {
1007cc4c07c8SAlexandre Ghiti 	{
1008cc4c07c8SAlexandre Ghiti 		.procname       = "perf_user_access",
1009cc4c07c8SAlexandre Ghiti 		.data		= &sysctl_perf_user_access,
1010cc4c07c8SAlexandre Ghiti 		.maxlen		= sizeof(unsigned int),
1011cc4c07c8SAlexandre Ghiti 		.mode           = 0644,
1012cc4c07c8SAlexandre Ghiti 		.proc_handler	= riscv_pmu_proc_user_access_handler,
1013cc4c07c8SAlexandre Ghiti 		.extra1		= SYSCTL_ZERO,
1014cc4c07c8SAlexandre Ghiti 		.extra2		= SYSCTL_TWO,
1015cc4c07c8SAlexandre Ghiti 	},
1016cc4c07c8SAlexandre Ghiti 	{ }
1017cc4c07c8SAlexandre Ghiti };
1018cc4c07c8SAlexandre Ghiti 
1019e9991434SAtish Patra static int pmu_sbi_device_probe(struct platform_device *pdev)
1020e9991434SAtish Patra {
1021e9991434SAtish Patra 	struct riscv_pmu *pmu = NULL;
10224905ec2fSAtish Patra 	int ret = -ENODEV;
10231537bf26SSergey Matyukevich 	int num_counters;
1024e9991434SAtish Patra 
1025e9991434SAtish Patra 	pr_info("SBI PMU extension is available\n");
1026e9991434SAtish Patra 	pmu = riscv_pmu_alloc();
1027e9991434SAtish Patra 	if (!pmu)
1028e9991434SAtish Patra 		return -ENOMEM;
1029e9991434SAtish Patra 
1030e9991434SAtish Patra 	num_counters = pmu_sbi_find_num_ctrs();
1031e9991434SAtish Patra 	if (num_counters < 0) {
1032e9991434SAtish Patra 		pr_err("SBI PMU extension doesn't provide any counters\n");
10334905ec2fSAtish Patra 		goto out_free;
1034e9991434SAtish Patra 	}
1035e9991434SAtish Patra 
1036ee95b88dSViacheslav Mitrofanov 	/* It is possible to get from SBI more than max number of counters */
1037ee95b88dSViacheslav Mitrofanov 	if (num_counters > RISCV_MAX_COUNTERS) {
1038ee95b88dSViacheslav Mitrofanov 		num_counters = RISCV_MAX_COUNTERS;
1039ee95b88dSViacheslav Mitrofanov 		pr_info("SBI returned more than maximum number of counters. Limiting the number of counters to %d\n", num_counters);
1040ee95b88dSViacheslav Mitrofanov 	}
1041ee95b88dSViacheslav Mitrofanov 
1042e9991434SAtish Patra 	/* cache all the information about counters now */
10431537bf26SSergey Matyukevich 	if (pmu_sbi_get_ctrinfo(num_counters, &cmask))
10444905ec2fSAtish Patra 		goto out_free;
1045e9991434SAtish Patra 
10464905ec2fSAtish Patra 	ret = pmu_sbi_setup_irqs(pmu, pdev);
10474905ec2fSAtish Patra 	if (ret < 0) {
10484905ec2fSAtish Patra 		pr_info("Perf sampling/filtering is not supported as sscof extension is not available\n");
10494905ec2fSAtish Patra 		pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
10504905ec2fSAtish Patra 		pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
10514905ec2fSAtish Patra 	}
10521537bf26SSergey Matyukevich 
105326fabd6dSNikita Shubin 	pmu->pmu.attr_groups = riscv_pmu_attr_groups;
10541537bf26SSergey Matyukevich 	pmu->cmask = cmask;
1055e9991434SAtish Patra 	pmu->ctr_start = pmu_sbi_ctr_start;
1056e9991434SAtish Patra 	pmu->ctr_stop = pmu_sbi_ctr_stop;
1057e9991434SAtish Patra 	pmu->event_map = pmu_sbi_event_map;
1058e9991434SAtish Patra 	pmu->ctr_get_idx = pmu_sbi_ctr_get_idx;
1059e9991434SAtish Patra 	pmu->ctr_get_width = pmu_sbi_ctr_get_width;
1060e9991434SAtish Patra 	pmu->ctr_clear_idx = pmu_sbi_ctr_clear_idx;
1061e9991434SAtish Patra 	pmu->ctr_read = pmu_sbi_ctr_read;
1062cc4c07c8SAlexandre Ghiti 	pmu->event_init = pmu_sbi_event_init;
1063cc4c07c8SAlexandre Ghiti 	pmu->event_mapped = pmu_sbi_event_mapped;
1064cc4c07c8SAlexandre Ghiti 	pmu->event_unmapped = pmu_sbi_event_unmapped;
1065cc4c07c8SAlexandre Ghiti 	pmu->csr_index = pmu_sbi_csr_index;
1066e9991434SAtish Patra 
1067e9991434SAtish Patra 	ret = cpuhp_state_add_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
1068e9991434SAtish Patra 	if (ret)
1069e9991434SAtish Patra 		return ret;
1070e9991434SAtish Patra 
1071e9a023f2SEric Lin 	ret = riscv_pm_pmu_register(pmu);
1072e9a023f2SEric Lin 	if (ret)
1073e9a023f2SEric Lin 		goto out_unregister;
1074e9a023f2SEric Lin 
1075e9991434SAtish Patra 	ret = perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW);
1076e9a023f2SEric Lin 	if (ret)
1077e9a023f2SEric Lin 		goto out_unregister;
1078e9991434SAtish Patra 
1079cc4c07c8SAlexandre Ghiti 	register_sysctl("kernel", sbi_pmu_sysctl_table);
1080cc4c07c8SAlexandre Ghiti 
1081e9991434SAtish Patra 	return 0;
10824905ec2fSAtish Patra 
1083e9a023f2SEric Lin out_unregister:
1084e9a023f2SEric Lin 	riscv_pmu_destroy(pmu);
1085e9a023f2SEric Lin 
10864905ec2fSAtish Patra out_free:
10874905ec2fSAtish Patra 	kfree(pmu);
10884905ec2fSAtish Patra 	return ret;
1089e9991434SAtish Patra }
1090e9991434SAtish Patra 
1091e9991434SAtish Patra static struct platform_driver pmu_sbi_driver = {
1092e9991434SAtish Patra 	.probe		= pmu_sbi_device_probe,
1093e9991434SAtish Patra 	.driver		= {
1094d5ac062dSAlexandre Ghiti 		.name	= RISCV_PMU_SBI_PDEV_NAME,
1095e9991434SAtish Patra 	},
1096e9991434SAtish Patra };
1097e9991434SAtish Patra 
1098e9991434SAtish Patra static int __init pmu_sbi_devinit(void)
1099e9991434SAtish Patra {
1100e9991434SAtish Patra 	int ret;
1101e9991434SAtish Patra 	struct platform_device *pdev;
1102e9991434SAtish Patra 
1103e9991434SAtish Patra 	if (sbi_spec_version < sbi_mk_version(0, 3) ||
110441cad828SAndrew Jones 	    !sbi_probe_extension(SBI_EXT_PMU)) {
1105e9991434SAtish Patra 		return 0;
1106e9991434SAtish Patra 	}
1107e9991434SAtish Patra 
1108e9991434SAtish Patra 	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_RISCV_STARTING,
1109e9991434SAtish Patra 				      "perf/riscv/pmu:starting",
1110e9991434SAtish Patra 				      pmu_sbi_starting_cpu, pmu_sbi_dying_cpu);
1111e9991434SAtish Patra 	if (ret) {
1112e9991434SAtish Patra 		pr_err("CPU hotplug notifier could not be registered: %d\n",
1113e9991434SAtish Patra 		       ret);
1114e9991434SAtish Patra 		return ret;
1115e9991434SAtish Patra 	}
1116e9991434SAtish Patra 
1117e9991434SAtish Patra 	ret = platform_driver_register(&pmu_sbi_driver);
1118e9991434SAtish Patra 	if (ret)
1119e9991434SAtish Patra 		return ret;
1120e9991434SAtish Patra 
1121d5ac062dSAlexandre Ghiti 	pdev = platform_device_register_simple(RISCV_PMU_SBI_PDEV_NAME, -1, NULL, 0);
1122e9991434SAtish Patra 	if (IS_ERR(pdev)) {
1123e9991434SAtish Patra 		platform_driver_unregister(&pmu_sbi_driver);
1124e9991434SAtish Patra 		return PTR_ERR(pdev);
1125e9991434SAtish Patra 	}
1126e9991434SAtish Patra 
1127e9991434SAtish Patra 	/* Notify legacy implementation that SBI pmu is available*/
1128e9991434SAtish Patra 	riscv_pmu_legacy_skip_init();
1129e9991434SAtish Patra 
1130e9991434SAtish Patra 	return ret;
1131e9991434SAtish Patra }
1132e9991434SAtish Patra device_initcall(pmu_sbi_devinit)
1133