1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * RISC-V performance counter support.
4 *
5 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
6 *
7 * This code is based on ARM perf event code which is in turn based on
8 * sparc64 and x86 code.
9 */
10
11 #define pr_fmt(fmt) "riscv-pmu-sbi: " fmt
12
13 #include <linux/mod_devicetable.h>
14 #include <linux/perf/riscv_pmu.h>
15 #include <linux/platform_device.h>
16 #include <linux/irq.h>
17 #include <linux/irqdomain.h>
18 #include <linux/of_irq.h>
19 #include <linux/of.h>
20 #include <linux/cpu_pm.h>
21 #include <linux/sched/clock.h>
22 #include <linux/soc/andes/irq.h>
23 #include <linux/workqueue.h>
24
25 #include <asm/errata_list.h>
26 #include <asm/sbi.h>
27 #include <asm/cpufeature.h>
28 #include <asm/vendor_extensions.h>
29 #include <asm/vendor_extensions/andes.h>
30
31 #define ALT_SBI_PMU_OVERFLOW(__ovl) \
32 asm volatile(ALTERNATIVE_2( \
33 "csrr %0, " __stringify(CSR_SCOUNTOVF), \
34 "csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF), \
35 THEAD_VENDOR_ID, ERRATA_THEAD_PMU, \
36 CONFIG_ERRATA_THEAD_PMU, \
37 "csrr %0, " __stringify(ANDES_CSR_SCOUNTEROF), \
38 ANDES_VENDOR_ID, \
39 RISCV_ISA_VENDOR_EXT_XANDESPMU + RISCV_VENDOR_EXT_ALTERNATIVES_BASE, \
40 CONFIG_ANDES_CUSTOM_PMU) \
41 : "=r" (__ovl) : \
42 : "memory")
43
44 #define ALT_SBI_PMU_OVF_CLEAR_PENDING(__irq_mask) \
45 asm volatile(ALTERNATIVE( \
46 "csrc " __stringify(CSR_IP) ", %0\n\t", \
47 "csrc " __stringify(ANDES_CSR_SLIP) ", %0\n\t", \
48 ANDES_VENDOR_ID, \
49 RISCV_ISA_VENDOR_EXT_XANDESPMU + RISCV_VENDOR_EXT_ALTERNATIVES_BASE, \
50 CONFIG_ANDES_CUSTOM_PMU) \
51 : : "r"(__irq_mask) \
52 : "memory")
53
54 #define SYSCTL_NO_USER_ACCESS 0
55 #define SYSCTL_USER_ACCESS 1
56 #define SYSCTL_LEGACY 2
57
58 #define PERF_EVENT_FLAG_NO_USER_ACCESS BIT(SYSCTL_NO_USER_ACCESS)
59 #define PERF_EVENT_FLAG_USER_ACCESS BIT(SYSCTL_USER_ACCESS)
60 #define PERF_EVENT_FLAG_LEGACY BIT(SYSCTL_LEGACY)
61
62 PMU_FORMAT_ATTR(event, "config:0-47");
63 PMU_FORMAT_ATTR(firmware, "config:63");
64
65 static bool sbi_v2_available;
66 static DEFINE_STATIC_KEY_FALSE(sbi_pmu_snapshot_available);
67 #define sbi_pmu_snapshot_available() \
68 static_branch_unlikely(&sbi_pmu_snapshot_available)
69
70 static struct attribute *riscv_arch_formats_attr[] = {
71 &format_attr_event.attr,
72 &format_attr_firmware.attr,
73 NULL,
74 };
75
76 static struct attribute_group riscv_pmu_format_group = {
77 .name = "format",
78 .attrs = riscv_arch_formats_attr,
79 };
80
81 static const struct attribute_group *riscv_pmu_attr_groups[] = {
82 &riscv_pmu_format_group,
83 NULL,
84 };
85
86 /* Allow user mode access by default */
87 static int sysctl_perf_user_access __read_mostly = SYSCTL_USER_ACCESS;
88
89 /*
90 * RISC-V doesn't have heterogeneous harts yet. This need to be part of
91 * per_cpu in case of harts with different pmu counters
92 */
93 static union sbi_pmu_ctr_info *pmu_ctr_list;
94 static bool riscv_pmu_use_irq;
95 static unsigned int riscv_pmu_irq_num;
96 static unsigned int riscv_pmu_irq_mask;
97 static unsigned int riscv_pmu_irq;
98
99 /* Cache the available counters in a bitmask */
100 static unsigned long cmask;
101
102 struct sbi_pmu_event_data {
103 union {
104 union {
105 struct hw_gen_event {
106 uint32_t event_code:16;
107 uint32_t event_type:4;
108 uint32_t reserved:12;
109 } hw_gen_event;
110 struct hw_cache_event {
111 uint32_t result_id:1;
112 uint32_t op_id:2;
113 uint32_t cache_id:13;
114 uint32_t event_type:4;
115 uint32_t reserved:12;
116 } hw_cache_event;
117 };
118 uint32_t event_idx;
119 };
120 };
121
122 static struct sbi_pmu_event_data pmu_hw_event_map[] = {
123 [PERF_COUNT_HW_CPU_CYCLES] = {.hw_gen_event = {
124 SBI_PMU_HW_CPU_CYCLES,
125 SBI_PMU_EVENT_TYPE_HW, 0}},
126 [PERF_COUNT_HW_INSTRUCTIONS] = {.hw_gen_event = {
127 SBI_PMU_HW_INSTRUCTIONS,
128 SBI_PMU_EVENT_TYPE_HW, 0}},
129 [PERF_COUNT_HW_CACHE_REFERENCES] = {.hw_gen_event = {
130 SBI_PMU_HW_CACHE_REFERENCES,
131 SBI_PMU_EVENT_TYPE_HW, 0}},
132 [PERF_COUNT_HW_CACHE_MISSES] = {.hw_gen_event = {
133 SBI_PMU_HW_CACHE_MISSES,
134 SBI_PMU_EVENT_TYPE_HW, 0}},
135 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {.hw_gen_event = {
136 SBI_PMU_HW_BRANCH_INSTRUCTIONS,
137 SBI_PMU_EVENT_TYPE_HW, 0}},
138 [PERF_COUNT_HW_BRANCH_MISSES] = {.hw_gen_event = {
139 SBI_PMU_HW_BRANCH_MISSES,
140 SBI_PMU_EVENT_TYPE_HW, 0}},
141 [PERF_COUNT_HW_BUS_CYCLES] = {.hw_gen_event = {
142 SBI_PMU_HW_BUS_CYCLES,
143 SBI_PMU_EVENT_TYPE_HW, 0}},
144 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {.hw_gen_event = {
145 SBI_PMU_HW_STALLED_CYCLES_FRONTEND,
146 SBI_PMU_EVENT_TYPE_HW, 0}},
147 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {.hw_gen_event = {
148 SBI_PMU_HW_STALLED_CYCLES_BACKEND,
149 SBI_PMU_EVENT_TYPE_HW, 0}},
150 [PERF_COUNT_HW_REF_CPU_CYCLES] = {.hw_gen_event = {
151 SBI_PMU_HW_REF_CPU_CYCLES,
152 SBI_PMU_EVENT_TYPE_HW, 0}},
153 };
154
155 #define C(x) PERF_COUNT_HW_CACHE_##x
156 static struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
157 [PERF_COUNT_HW_CACHE_OP_MAX]
158 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
159 [C(L1D)] = {
160 [C(OP_READ)] = {
161 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
162 C(OP_READ), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
163 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
164 C(OP_READ), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
165 },
166 [C(OP_WRITE)] = {
167 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
168 C(OP_WRITE), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
169 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
170 C(OP_WRITE), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
171 },
172 [C(OP_PREFETCH)] = {
173 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
174 C(OP_PREFETCH), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
175 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
176 C(OP_PREFETCH), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
177 },
178 },
179 [C(L1I)] = {
180 [C(OP_READ)] = {
181 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
182 C(OP_READ), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
183 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), C(OP_READ),
184 C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
185 },
186 [C(OP_WRITE)] = {
187 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
188 C(OP_WRITE), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
189 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
190 C(OP_WRITE), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
191 },
192 [C(OP_PREFETCH)] = {
193 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
194 C(OP_PREFETCH), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
195 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
196 C(OP_PREFETCH), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
197 },
198 },
199 [C(LL)] = {
200 [C(OP_READ)] = {
201 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
202 C(OP_READ), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
203 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
204 C(OP_READ), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
205 },
206 [C(OP_WRITE)] = {
207 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
208 C(OP_WRITE), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
209 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
210 C(OP_WRITE), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
211 },
212 [C(OP_PREFETCH)] = {
213 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
214 C(OP_PREFETCH), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
215 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
216 C(OP_PREFETCH), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
217 },
218 },
219 [C(DTLB)] = {
220 [C(OP_READ)] = {
221 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
222 C(OP_READ), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
223 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
224 C(OP_READ), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
225 },
226 [C(OP_WRITE)] = {
227 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
228 C(OP_WRITE), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
229 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
230 C(OP_WRITE), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
231 },
232 [C(OP_PREFETCH)] = {
233 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
234 C(OP_PREFETCH), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
235 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
236 C(OP_PREFETCH), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
237 },
238 },
239 [C(ITLB)] = {
240 [C(OP_READ)] = {
241 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
242 C(OP_READ), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
243 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
244 C(OP_READ), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
245 },
246 [C(OP_WRITE)] = {
247 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
248 C(OP_WRITE), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
249 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
250 C(OP_WRITE), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
251 },
252 [C(OP_PREFETCH)] = {
253 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
254 C(OP_PREFETCH), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
255 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
256 C(OP_PREFETCH), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
257 },
258 },
259 [C(BPU)] = {
260 [C(OP_READ)] = {
261 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
262 C(OP_READ), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
263 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
264 C(OP_READ), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
265 },
266 [C(OP_WRITE)] = {
267 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
268 C(OP_WRITE), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
269 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
270 C(OP_WRITE), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
271 },
272 [C(OP_PREFETCH)] = {
273 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
274 C(OP_PREFETCH), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
275 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
276 C(OP_PREFETCH), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
277 },
278 },
279 [C(NODE)] = {
280 [C(OP_READ)] = {
281 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
282 C(OP_READ), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
283 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
284 C(OP_READ), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
285 },
286 [C(OP_WRITE)] = {
287 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
288 C(OP_WRITE), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
289 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
290 C(OP_WRITE), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
291 },
292 [C(OP_PREFETCH)] = {
293 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
294 C(OP_PREFETCH), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
295 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
296 C(OP_PREFETCH), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
297 },
298 },
299 };
300
pmu_sbi_check_event(struct sbi_pmu_event_data * edata)301 static void pmu_sbi_check_event(struct sbi_pmu_event_data *edata)
302 {
303 struct sbiret ret;
304
305 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH,
306 0, cmask, 0, edata->event_idx, 0, 0);
307 if (!ret.error) {
308 sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
309 ret.value, 0x1, SBI_PMU_STOP_FLAG_RESET, 0, 0, 0);
310 } else if (ret.error == SBI_ERR_NOT_SUPPORTED) {
311 /* This event cannot be monitored by any counter */
312 edata->event_idx = -EINVAL;
313 }
314 }
315
pmu_sbi_check_std_events(struct work_struct * work)316 static void pmu_sbi_check_std_events(struct work_struct *work)
317 {
318 for (int i = 0; i < ARRAY_SIZE(pmu_hw_event_map); i++)
319 pmu_sbi_check_event(&pmu_hw_event_map[i]);
320
321 for (int i = 0; i < ARRAY_SIZE(pmu_cache_event_map); i++)
322 for (int j = 0; j < ARRAY_SIZE(pmu_cache_event_map[i]); j++)
323 for (int k = 0; k < ARRAY_SIZE(pmu_cache_event_map[i][j]); k++)
324 pmu_sbi_check_event(&pmu_cache_event_map[i][j][k]);
325 }
326
327 static DECLARE_WORK(check_std_events_work, pmu_sbi_check_std_events);
328
pmu_sbi_ctr_get_width(int idx)329 static int pmu_sbi_ctr_get_width(int idx)
330 {
331 return pmu_ctr_list[idx].width;
332 }
333
pmu_sbi_ctr_is_fw(int cidx)334 static bool pmu_sbi_ctr_is_fw(int cidx)
335 {
336 union sbi_pmu_ctr_info *info;
337
338 info = &pmu_ctr_list[cidx];
339 if (!info)
340 return false;
341
342 return (info->type == SBI_PMU_CTR_TYPE_FW) ? true : false;
343 }
344
345 /*
346 * Returns the counter width of a programmable counter and number of hardware
347 * counters. As we don't support heterogeneous CPUs yet, it is okay to just
348 * return the counter width of the first programmable counter.
349 */
riscv_pmu_get_hpm_info(u32 * hw_ctr_width,u32 * num_hw_ctr)350 int riscv_pmu_get_hpm_info(u32 *hw_ctr_width, u32 *num_hw_ctr)
351 {
352 int i;
353 union sbi_pmu_ctr_info *info;
354 u32 hpm_width = 0, hpm_count = 0;
355
356 if (!cmask)
357 return -EINVAL;
358
359 for_each_set_bit(i, &cmask, RISCV_MAX_COUNTERS) {
360 info = &pmu_ctr_list[i];
361 if (!info)
362 continue;
363 if (!hpm_width && info->csr != CSR_CYCLE && info->csr != CSR_INSTRET)
364 hpm_width = info->width;
365 if (info->type == SBI_PMU_CTR_TYPE_HW)
366 hpm_count++;
367 }
368
369 *hw_ctr_width = hpm_width;
370 *num_hw_ctr = hpm_count;
371
372 return 0;
373 }
374 EXPORT_SYMBOL_GPL(riscv_pmu_get_hpm_info);
375
pmu_sbi_csr_index(struct perf_event * event)376 static uint8_t pmu_sbi_csr_index(struct perf_event *event)
377 {
378 return pmu_ctr_list[event->hw.idx].csr - CSR_CYCLE;
379 }
380
pmu_sbi_get_filter_flags(struct perf_event * event)381 static unsigned long pmu_sbi_get_filter_flags(struct perf_event *event)
382 {
383 unsigned long cflags = 0;
384 bool guest_events = false;
385
386 if (event->attr.config1 & RISCV_PMU_CONFIG1_GUEST_EVENTS)
387 guest_events = true;
388 if (event->attr.exclude_kernel)
389 cflags |= guest_events ? SBI_PMU_CFG_FLAG_SET_VSINH : SBI_PMU_CFG_FLAG_SET_SINH;
390 if (event->attr.exclude_user)
391 cflags |= guest_events ? SBI_PMU_CFG_FLAG_SET_VUINH : SBI_PMU_CFG_FLAG_SET_UINH;
392 if (guest_events && event->attr.exclude_hv)
393 cflags |= SBI_PMU_CFG_FLAG_SET_SINH;
394 if (event->attr.exclude_host)
395 cflags |= SBI_PMU_CFG_FLAG_SET_UINH | SBI_PMU_CFG_FLAG_SET_SINH;
396 if (event->attr.exclude_guest)
397 cflags |= SBI_PMU_CFG_FLAG_SET_VSINH | SBI_PMU_CFG_FLAG_SET_VUINH;
398
399 return cflags;
400 }
401
pmu_sbi_ctr_get_idx(struct perf_event * event)402 static int pmu_sbi_ctr_get_idx(struct perf_event *event)
403 {
404 struct hw_perf_event *hwc = &event->hw;
405 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
406 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
407 struct sbiret ret;
408 int idx;
409 uint64_t cbase = 0, cmask = rvpmu->cmask;
410 unsigned long cflags = 0;
411
412 cflags = pmu_sbi_get_filter_flags(event);
413
414 /*
415 * In legacy mode, we have to force the fixed counters for those events
416 * but not in the user access mode as we want to use the other counters
417 * that support sampling/filtering.
418 */
419 if ((hwc->flags & PERF_EVENT_FLAG_LEGACY) && (event->attr.type == PERF_TYPE_HARDWARE)) {
420 if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
421 cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH;
422 cmask = 1;
423 } else if (event->attr.config == PERF_COUNT_HW_INSTRUCTIONS) {
424 cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH;
425 cmask = BIT(CSR_INSTRET - CSR_CYCLE);
426 }
427 }
428
429 /* retrieve the available counter index */
430 #if defined(CONFIG_32BIT)
431 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
432 cmask, cflags, hwc->event_base, hwc->config,
433 hwc->config >> 32);
434 #else
435 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
436 cmask, cflags, hwc->event_base, hwc->config, 0);
437 #endif
438 if (ret.error) {
439 pr_debug("Not able to find a counter for event %lx config %llx\n",
440 hwc->event_base, hwc->config);
441 return sbi_err_map_linux_errno(ret.error);
442 }
443
444 idx = ret.value;
445 if (!test_bit(idx, &rvpmu->cmask) || !pmu_ctr_list[idx].value)
446 return -ENOENT;
447
448 /* Additional sanity check for the counter id */
449 if (pmu_sbi_ctr_is_fw(idx)) {
450 if (!test_and_set_bit(idx, cpuc->used_fw_ctrs))
451 return idx;
452 } else {
453 if (!test_and_set_bit(idx, cpuc->used_hw_ctrs))
454 return idx;
455 }
456
457 return -ENOENT;
458 }
459
pmu_sbi_ctr_clear_idx(struct perf_event * event)460 static void pmu_sbi_ctr_clear_idx(struct perf_event *event)
461 {
462
463 struct hw_perf_event *hwc = &event->hw;
464 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
465 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
466 int idx = hwc->idx;
467
468 if (pmu_sbi_ctr_is_fw(idx))
469 clear_bit(idx, cpuc->used_fw_ctrs);
470 else
471 clear_bit(idx, cpuc->used_hw_ctrs);
472 }
473
pmu_event_find_cache(u64 config)474 static int pmu_event_find_cache(u64 config)
475 {
476 unsigned int cache_type, cache_op, cache_result, ret;
477
478 cache_type = (config >> 0) & 0xff;
479 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
480 return -EINVAL;
481
482 cache_op = (config >> 8) & 0xff;
483 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
484 return -EINVAL;
485
486 cache_result = (config >> 16) & 0xff;
487 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
488 return -EINVAL;
489
490 ret = pmu_cache_event_map[cache_type][cache_op][cache_result].event_idx;
491
492 return ret;
493 }
494
pmu_sbi_is_fw_event(struct perf_event * event)495 static bool pmu_sbi_is_fw_event(struct perf_event *event)
496 {
497 u32 type = event->attr.type;
498 u64 config = event->attr.config;
499
500 if ((type == PERF_TYPE_RAW) && ((config >> 63) == 1))
501 return true;
502 else
503 return false;
504 }
505
pmu_sbi_event_map(struct perf_event * event,u64 * econfig)506 static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
507 {
508 u32 type = event->attr.type;
509 u64 config = event->attr.config;
510 int bSoftware;
511 u64 raw_config_val;
512 int ret;
513
514 /*
515 * Ensure we are finished checking standard hardware events for
516 * validity before allowing userspace to configure any events.
517 */
518 flush_work(&check_std_events_work);
519
520 switch (type) {
521 case PERF_TYPE_HARDWARE:
522 if (config >= PERF_COUNT_HW_MAX)
523 return -EINVAL;
524 ret = pmu_hw_event_map[event->attr.config].event_idx;
525 break;
526 case PERF_TYPE_HW_CACHE:
527 ret = pmu_event_find_cache(config);
528 break;
529 case PERF_TYPE_RAW:
530 /*
531 * As per SBI specification, the upper 16 bits must be unused for
532 * a raw event. Use the MSB (63b) to distinguish between hardware
533 * raw event and firmware events.
534 */
535 bSoftware = config >> 63;
536 raw_config_val = config & RISCV_PMU_RAW_EVENT_MASK;
537 if (bSoftware) {
538 ret = (raw_config_val & 0xFFFF) |
539 (SBI_PMU_EVENT_TYPE_FW << 16);
540 } else {
541 ret = RISCV_PMU_RAW_EVENT_IDX;
542 *econfig = raw_config_val;
543 }
544 break;
545 default:
546 ret = -EINVAL;
547 break;
548 }
549
550 return ret;
551 }
552
pmu_sbi_snapshot_free(struct riscv_pmu * pmu)553 static void pmu_sbi_snapshot_free(struct riscv_pmu *pmu)
554 {
555 int cpu;
556
557 for_each_possible_cpu(cpu) {
558 struct cpu_hw_events *cpu_hw_evt = per_cpu_ptr(pmu->hw_events, cpu);
559
560 if (!cpu_hw_evt->snapshot_addr)
561 continue;
562
563 free_page((unsigned long)cpu_hw_evt->snapshot_addr);
564 cpu_hw_evt->snapshot_addr = NULL;
565 cpu_hw_evt->snapshot_addr_phys = 0;
566 }
567 }
568
pmu_sbi_snapshot_alloc(struct riscv_pmu * pmu)569 static int pmu_sbi_snapshot_alloc(struct riscv_pmu *pmu)
570 {
571 int cpu;
572 struct page *snapshot_page;
573
574 for_each_possible_cpu(cpu) {
575 struct cpu_hw_events *cpu_hw_evt = per_cpu_ptr(pmu->hw_events, cpu);
576
577 snapshot_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
578 if (!snapshot_page) {
579 pmu_sbi_snapshot_free(pmu);
580 return -ENOMEM;
581 }
582 cpu_hw_evt->snapshot_addr = page_to_virt(snapshot_page);
583 cpu_hw_evt->snapshot_addr_phys = page_to_phys(snapshot_page);
584 }
585
586 return 0;
587 }
588
pmu_sbi_snapshot_disable(void)589 static int pmu_sbi_snapshot_disable(void)
590 {
591 struct sbiret ret;
592
593 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_SNAPSHOT_SET_SHMEM, SBI_SHMEM_DISABLE,
594 SBI_SHMEM_DISABLE, 0, 0, 0, 0);
595 if (ret.error) {
596 pr_warn("failed to disable snapshot shared memory\n");
597 return sbi_err_map_linux_errno(ret.error);
598 }
599
600 return 0;
601 }
602
pmu_sbi_snapshot_setup(struct riscv_pmu * pmu,int cpu)603 static int pmu_sbi_snapshot_setup(struct riscv_pmu *pmu, int cpu)
604 {
605 struct cpu_hw_events *cpu_hw_evt;
606 struct sbiret ret = {0};
607
608 cpu_hw_evt = per_cpu_ptr(pmu->hw_events, cpu);
609 if (!cpu_hw_evt->snapshot_addr_phys)
610 return -EINVAL;
611
612 if (cpu_hw_evt->snapshot_set_done)
613 return 0;
614
615 if (IS_ENABLED(CONFIG_32BIT))
616 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_SNAPSHOT_SET_SHMEM,
617 cpu_hw_evt->snapshot_addr_phys,
618 (u64)(cpu_hw_evt->snapshot_addr_phys) >> 32, 0, 0, 0, 0);
619 else
620 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_SNAPSHOT_SET_SHMEM,
621 cpu_hw_evt->snapshot_addr_phys, 0, 0, 0, 0, 0);
622
623 /* Free up the snapshot area memory and fall back to SBI PMU calls without snapshot */
624 if (ret.error) {
625 if (ret.error != SBI_ERR_NOT_SUPPORTED)
626 pr_warn("pmu snapshot setup failed with error %ld\n", ret.error);
627 return sbi_err_map_linux_errno(ret.error);
628 }
629
630 memset(cpu_hw_evt->snapshot_cval_shcopy, 0, sizeof(u64) * RISCV_MAX_COUNTERS);
631 cpu_hw_evt->snapshot_set_done = true;
632
633 return 0;
634 }
635
pmu_sbi_ctr_read(struct perf_event * event)636 static u64 pmu_sbi_ctr_read(struct perf_event *event)
637 {
638 struct hw_perf_event *hwc = &event->hw;
639 int idx = hwc->idx;
640 struct sbiret ret;
641 u64 val = 0;
642 struct riscv_pmu *pmu = to_riscv_pmu(event->pmu);
643 struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
644 struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr;
645 union sbi_pmu_ctr_info info = pmu_ctr_list[idx];
646
647 /* Read the value from the shared memory directly only if counter is stopped */
648 if (sbi_pmu_snapshot_available() && (hwc->state & PERF_HES_STOPPED)) {
649 val = sdata->ctr_values[idx];
650 return val;
651 }
652
653 if (pmu_sbi_is_fw_event(event)) {
654 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_FW_READ,
655 hwc->idx, 0, 0, 0, 0, 0);
656 if (ret.error)
657 return 0;
658
659 val = ret.value;
660 if (IS_ENABLED(CONFIG_32BIT) && sbi_v2_available && info.width >= 32) {
661 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_FW_READ_HI,
662 hwc->idx, 0, 0, 0, 0, 0);
663 if (!ret.error)
664 val |= ((u64)ret.value << 32);
665 else
666 WARN_ONCE(1, "Unable to read upper 32 bits of firmware counter error: %ld\n",
667 ret.error);
668 }
669 } else {
670 val = riscv_pmu_ctr_read_csr(info.csr);
671 if (IS_ENABLED(CONFIG_32BIT))
672 val |= ((u64)riscv_pmu_ctr_read_csr(info.csr + 0x80)) << 32;
673 }
674
675 return val;
676 }
677
pmu_sbi_set_scounteren(void * arg)678 static void pmu_sbi_set_scounteren(void *arg)
679 {
680 struct perf_event *event = (struct perf_event *)arg;
681
682 if (event->hw.idx != -1)
683 csr_write(CSR_SCOUNTEREN,
684 csr_read(CSR_SCOUNTEREN) | BIT(pmu_sbi_csr_index(event)));
685 }
686
pmu_sbi_reset_scounteren(void * arg)687 static void pmu_sbi_reset_scounteren(void *arg)
688 {
689 struct perf_event *event = (struct perf_event *)arg;
690
691 if (event->hw.idx != -1)
692 csr_write(CSR_SCOUNTEREN,
693 csr_read(CSR_SCOUNTEREN) & ~BIT(pmu_sbi_csr_index(event)));
694 }
695
pmu_sbi_ctr_start(struct perf_event * event,u64 ival)696 static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
697 {
698 struct sbiret ret;
699 struct hw_perf_event *hwc = &event->hw;
700 unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE;
701
702 /* There is no benefit setting SNAPSHOT FLAG for a single counter */
703 #if defined(CONFIG_32BIT)
704 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx,
705 1, flag, ival, ival >> 32, 0);
706 #else
707 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx,
708 1, flag, ival, 0, 0);
709 #endif
710 if (ret.error && (ret.error != SBI_ERR_ALREADY_STARTED))
711 pr_err("Starting counter idx %d failed with error %d\n",
712 hwc->idx, sbi_err_map_linux_errno(ret.error));
713
714 if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
715 (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
716 pmu_sbi_set_scounteren((void *)event);
717 }
718
pmu_sbi_ctr_stop(struct perf_event * event,unsigned long flag)719 static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
720 {
721 struct sbiret ret;
722 struct hw_perf_event *hwc = &event->hw;
723 struct riscv_pmu *pmu = to_riscv_pmu(event->pmu);
724 struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
725 struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr;
726
727 if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
728 (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
729 pmu_sbi_reset_scounteren((void *)event);
730
731 if (sbi_pmu_snapshot_available())
732 flag |= SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT;
733
734 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0);
735 if (!ret.error && sbi_pmu_snapshot_available()) {
736 /*
737 * The counter snapshot is based on the index base specified by hwc->idx.
738 * The actual counter value is updated in shared memory at index 0 when counter
739 * mask is 0x01. To ensure accurate counter values, it's necessary to transfer
740 * the counter value to shared memory. However, if hwc->idx is zero, the counter
741 * value is already correctly updated in shared memory, requiring no further
742 * adjustment.
743 */
744 if (hwc->idx > 0) {
745 sdata->ctr_values[hwc->idx] = sdata->ctr_values[0];
746 sdata->ctr_values[0] = 0;
747 }
748 } else if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) &&
749 flag != SBI_PMU_STOP_FLAG_RESET) {
750 pr_err("Stopping counter idx %d failed with error %d\n",
751 hwc->idx, sbi_err_map_linux_errno(ret.error));
752 }
753 }
754
pmu_sbi_find_num_ctrs(void)755 static int pmu_sbi_find_num_ctrs(void)
756 {
757 struct sbiret ret;
758
759 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_NUM_COUNTERS, 0, 0, 0, 0, 0, 0);
760 if (!ret.error)
761 return ret.value;
762 else
763 return sbi_err_map_linux_errno(ret.error);
764 }
765
pmu_sbi_get_ctrinfo(int nctr,unsigned long * mask)766 static int pmu_sbi_get_ctrinfo(int nctr, unsigned long *mask)
767 {
768 struct sbiret ret;
769 int i, num_hw_ctr = 0, num_fw_ctr = 0;
770 union sbi_pmu_ctr_info cinfo;
771
772 pmu_ctr_list = kcalloc(nctr, sizeof(*pmu_ctr_list), GFP_KERNEL);
773 if (!pmu_ctr_list)
774 return -ENOMEM;
775
776 for (i = 0; i < nctr; i++) {
777 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0);
778 if (ret.error)
779 /* The logical counter ids are not expected to be contiguous */
780 continue;
781
782 *mask |= BIT(i);
783
784 cinfo.value = ret.value;
785 if (cinfo.type == SBI_PMU_CTR_TYPE_FW)
786 num_fw_ctr++;
787 else
788 num_hw_ctr++;
789 pmu_ctr_list[i].value = cinfo.value;
790 }
791
792 pr_info("%d firmware and %d hardware counters\n", num_fw_ctr, num_hw_ctr);
793
794 return 0;
795 }
796
pmu_sbi_stop_all(struct riscv_pmu * pmu)797 static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
798 {
799 /*
800 * No need to check the error because we are disabling all the counters
801 * which may include counters that are not enabled yet.
802 */
803 sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
804 0, pmu->cmask, SBI_PMU_STOP_FLAG_RESET, 0, 0, 0);
805 }
806
pmu_sbi_stop_hw_ctrs(struct riscv_pmu * pmu)807 static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
808 {
809 struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
810 struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr;
811 unsigned long flag = 0;
812 int i, idx;
813 struct sbiret ret;
814 u64 temp_ctr_overflow_mask = 0;
815
816 if (sbi_pmu_snapshot_available())
817 flag = SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT;
818
819 /* Reset the shadow copy to avoid save/restore any value from previous overflow */
820 memset(cpu_hw_evt->snapshot_cval_shcopy, 0, sizeof(u64) * RISCV_MAX_COUNTERS);
821
822 for (i = 0; i < BITS_TO_LONGS(RISCV_MAX_COUNTERS); i++) {
823 /* No need to check the error here as we can't do anything about the error */
824 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, i * BITS_PER_LONG,
825 cpu_hw_evt->used_hw_ctrs[i], flag, 0, 0, 0);
826 if (!ret.error && sbi_pmu_snapshot_available()) {
827 /* Save the counter values to avoid clobbering */
828 for_each_set_bit(idx, &cpu_hw_evt->used_hw_ctrs[i], BITS_PER_LONG)
829 cpu_hw_evt->snapshot_cval_shcopy[i * BITS_PER_LONG + idx] =
830 sdata->ctr_values[idx];
831 /* Save the overflow mask to avoid clobbering */
832 temp_ctr_overflow_mask |= sdata->ctr_overflow_mask << (i * BITS_PER_LONG);
833 }
834 }
835
836 /* Restore the counter values to the shared memory for used hw counters */
837 if (sbi_pmu_snapshot_available()) {
838 for_each_set_bit(idx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS)
839 sdata->ctr_values[idx] = cpu_hw_evt->snapshot_cval_shcopy[idx];
840 if (temp_ctr_overflow_mask)
841 sdata->ctr_overflow_mask = temp_ctr_overflow_mask;
842 }
843 }
844
845 /*
846 * This function starts all the used counters in two step approach.
847 * Any counter that did not overflow can be start in a single step
848 * while the overflowed counters need to be started with updated initialization
849 * value.
850 */
pmu_sbi_start_ovf_ctrs_sbi(struct cpu_hw_events * cpu_hw_evt,u64 ctr_ovf_mask)851 static inline void pmu_sbi_start_ovf_ctrs_sbi(struct cpu_hw_events *cpu_hw_evt,
852 u64 ctr_ovf_mask)
853 {
854 int idx = 0, i;
855 struct perf_event *event;
856 unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE;
857 unsigned long ctr_start_mask = 0;
858 uint64_t max_period;
859 struct hw_perf_event *hwc;
860 u64 init_val = 0;
861
862 for (i = 0; i < BITS_TO_LONGS(RISCV_MAX_COUNTERS); i++) {
863 ctr_start_mask = cpu_hw_evt->used_hw_ctrs[i] & ~ctr_ovf_mask;
864 /* Start all the counters that did not overflow in a single shot */
865 sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, i * BITS_PER_LONG, ctr_start_mask,
866 0, 0, 0, 0);
867 }
868
869 /* Reinitialize and start all the counter that overflowed */
870 while (ctr_ovf_mask) {
871 if (ctr_ovf_mask & 0x01) {
872 event = cpu_hw_evt->events[idx];
873 hwc = &event->hw;
874 max_period = riscv_pmu_ctr_get_width_mask(event);
875 init_val = local64_read(&hwc->prev_count) & max_period;
876 #if defined(CONFIG_32BIT)
877 sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1,
878 flag, init_val, init_val >> 32, 0);
879 #else
880 sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1,
881 flag, init_val, 0, 0);
882 #endif
883 perf_event_update_userpage(event);
884 }
885 ctr_ovf_mask = ctr_ovf_mask >> 1;
886 idx++;
887 }
888 }
889
pmu_sbi_start_ovf_ctrs_snapshot(struct cpu_hw_events * cpu_hw_evt,u64 ctr_ovf_mask)890 static inline void pmu_sbi_start_ovf_ctrs_snapshot(struct cpu_hw_events *cpu_hw_evt,
891 u64 ctr_ovf_mask)
892 {
893 int i, idx = 0;
894 struct perf_event *event;
895 unsigned long flag = SBI_PMU_START_FLAG_INIT_SNAPSHOT;
896 u64 max_period, init_val = 0;
897 struct hw_perf_event *hwc;
898 struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr;
899
900 for_each_set_bit(idx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) {
901 if (ctr_ovf_mask & BIT(idx)) {
902 event = cpu_hw_evt->events[idx];
903 hwc = &event->hw;
904 max_period = riscv_pmu_ctr_get_width_mask(event);
905 init_val = local64_read(&hwc->prev_count) & max_period;
906 cpu_hw_evt->snapshot_cval_shcopy[idx] = init_val;
907 }
908 /*
909 * We do not need to update the non-overflow counters the previous
910 * value should have been there already.
911 */
912 }
913
914 for (i = 0; i < BITS_TO_LONGS(RISCV_MAX_COUNTERS); i++) {
915 /* Restore the counter values to relative indices for used hw counters */
916 for_each_set_bit(idx, &cpu_hw_evt->used_hw_ctrs[i], BITS_PER_LONG)
917 sdata->ctr_values[idx] =
918 cpu_hw_evt->snapshot_cval_shcopy[idx + i * BITS_PER_LONG];
919 /* Start all the counters in a single shot */
920 sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx * BITS_PER_LONG,
921 cpu_hw_evt->used_hw_ctrs[i], flag, 0, 0, 0);
922 }
923 }
924
pmu_sbi_start_overflow_mask(struct riscv_pmu * pmu,u64 ctr_ovf_mask)925 static void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu,
926 u64 ctr_ovf_mask)
927 {
928 struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
929
930 if (sbi_pmu_snapshot_available())
931 pmu_sbi_start_ovf_ctrs_snapshot(cpu_hw_evt, ctr_ovf_mask);
932 else
933 pmu_sbi_start_ovf_ctrs_sbi(cpu_hw_evt, ctr_ovf_mask);
934 }
935
pmu_sbi_ovf_handler(int irq,void * dev)936 static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
937 {
938 struct perf_sample_data data;
939 struct pt_regs *regs;
940 struct hw_perf_event *hw_evt;
941 union sbi_pmu_ctr_info *info;
942 int lidx, hidx, fidx;
943 struct riscv_pmu *pmu;
944 struct perf_event *event;
945 u64 overflow;
946 u64 overflowed_ctrs = 0;
947 struct cpu_hw_events *cpu_hw_evt = dev;
948 u64 start_clock = sched_clock();
949 struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr;
950
951 if (WARN_ON_ONCE(!cpu_hw_evt))
952 return IRQ_NONE;
953
954 /* Firmware counter don't support overflow yet */
955 fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS);
956 if (fidx == RISCV_MAX_COUNTERS) {
957 csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
958 return IRQ_NONE;
959 }
960
961 event = cpu_hw_evt->events[fidx];
962 if (!event) {
963 ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask);
964 return IRQ_NONE;
965 }
966
967 pmu = to_riscv_pmu(event->pmu);
968 pmu_sbi_stop_hw_ctrs(pmu);
969
970 /* Overflow status register should only be read after counter are stopped */
971 if (sbi_pmu_snapshot_available())
972 overflow = sdata->ctr_overflow_mask;
973 else
974 ALT_SBI_PMU_OVERFLOW(overflow);
975
976 /*
977 * Overflow interrupt pending bit should only be cleared after stopping
978 * all the counters to avoid any race condition.
979 */
980 ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask);
981
982 /* No overflow bit is set */
983 if (!overflow)
984 return IRQ_NONE;
985
986 regs = get_irq_regs();
987
988 for_each_set_bit(lidx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) {
989 struct perf_event *event = cpu_hw_evt->events[lidx];
990
991 /* Skip if invalid event or user did not request a sampling */
992 if (!event || !is_sampling_event(event))
993 continue;
994
995 info = &pmu_ctr_list[lidx];
996 /* Do a sanity check */
997 if (!info || info->type != SBI_PMU_CTR_TYPE_HW)
998 continue;
999
1000 if (sbi_pmu_snapshot_available())
1001 /* SBI implementation already updated the logical indicies */
1002 hidx = lidx;
1003 else
1004 /* compute hardware counter index */
1005 hidx = info->csr - CSR_CYCLE;
1006
1007 /* check if the corresponding bit is set in sscountovf or overflow mask in shmem */
1008 if (!(overflow & BIT(hidx)))
1009 continue;
1010
1011 /*
1012 * Keep a track of overflowed counters so that they can be started
1013 * with updated initial value.
1014 */
1015 overflowed_ctrs |= BIT(lidx);
1016 hw_evt = &event->hw;
1017 /* Update the event states here so that we know the state while reading */
1018 hw_evt->state |= PERF_HES_STOPPED;
1019 riscv_pmu_event_update(event);
1020 hw_evt->state |= PERF_HES_UPTODATE;
1021 perf_sample_data_init(&data, 0, hw_evt->last_period);
1022 if (riscv_pmu_event_set_period(event)) {
1023 /*
1024 * Unlike other ISAs, RISC-V don't have to disable interrupts
1025 * to avoid throttling here. As per the specification, the
1026 * interrupt remains disabled until the OF bit is set.
1027 * Interrupts are enabled again only during the start.
1028 * TODO: We will need to stop the guest counters once
1029 * virtualization support is added.
1030 */
1031 perf_event_overflow(event, &data, regs);
1032 }
1033 /* Reset the state as we are going to start the counter after the loop */
1034 hw_evt->state = 0;
1035 }
1036
1037 pmu_sbi_start_overflow_mask(pmu, overflowed_ctrs);
1038 perf_sample_event_took(sched_clock() - start_clock);
1039
1040 return IRQ_HANDLED;
1041 }
1042
pmu_sbi_starting_cpu(unsigned int cpu,struct hlist_node * node)1043 static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
1044 {
1045 struct riscv_pmu *pmu = hlist_entry_safe(node, struct riscv_pmu, node);
1046 struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
1047
1048 /*
1049 * We keep enabling userspace access to CYCLE, TIME and INSTRET via the
1050 * legacy option but that will be removed in the future.
1051 */
1052 if (sysctl_perf_user_access == SYSCTL_LEGACY)
1053 csr_write(CSR_SCOUNTEREN, 0x7);
1054 else
1055 csr_write(CSR_SCOUNTEREN, 0x2);
1056
1057 /* Stop all the counters so that they can be enabled from perf */
1058 pmu_sbi_stop_all(pmu);
1059
1060 if (riscv_pmu_use_irq) {
1061 cpu_hw_evt->irq = riscv_pmu_irq;
1062 ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask);
1063 enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE);
1064 }
1065
1066 if (sbi_pmu_snapshot_available())
1067 return pmu_sbi_snapshot_setup(pmu, cpu);
1068
1069 return 0;
1070 }
1071
pmu_sbi_dying_cpu(unsigned int cpu,struct hlist_node * node)1072 static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node)
1073 {
1074 if (riscv_pmu_use_irq) {
1075 disable_percpu_irq(riscv_pmu_irq);
1076 }
1077
1078 /* Disable all counters access for user mode now */
1079 csr_write(CSR_SCOUNTEREN, 0x0);
1080
1081 if (sbi_pmu_snapshot_available())
1082 return pmu_sbi_snapshot_disable();
1083
1084 return 0;
1085 }
1086
pmu_sbi_setup_irqs(struct riscv_pmu * pmu,struct platform_device * pdev)1087 static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pdev)
1088 {
1089 int ret;
1090 struct cpu_hw_events __percpu *hw_events = pmu->hw_events;
1091 struct irq_domain *domain = NULL;
1092
1093 if (riscv_isa_extension_available(NULL, SSCOFPMF)) {
1094 riscv_pmu_irq_num = RV_IRQ_PMU;
1095 riscv_pmu_use_irq = true;
1096 } else if (IS_ENABLED(CONFIG_ERRATA_THEAD_PMU) &&
1097 riscv_cached_mvendorid(0) == THEAD_VENDOR_ID &&
1098 riscv_cached_marchid(0) == 0 &&
1099 riscv_cached_mimpid(0) == 0) {
1100 riscv_pmu_irq_num = THEAD_C9XX_RV_IRQ_PMU;
1101 riscv_pmu_use_irq = true;
1102 } else if (riscv_has_vendor_extension_unlikely(ANDES_VENDOR_ID,
1103 RISCV_ISA_VENDOR_EXT_XANDESPMU) &&
1104 IS_ENABLED(CONFIG_ANDES_CUSTOM_PMU)) {
1105 riscv_pmu_irq_num = ANDES_SLI_CAUSE_BASE + ANDES_RV_IRQ_PMOVI;
1106 riscv_pmu_use_irq = true;
1107 }
1108
1109 riscv_pmu_irq_mask = BIT(riscv_pmu_irq_num % BITS_PER_LONG);
1110
1111 if (!riscv_pmu_use_irq)
1112 return -EOPNOTSUPP;
1113
1114 domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(),
1115 DOMAIN_BUS_ANY);
1116 if (!domain) {
1117 pr_err("Failed to find INTC IRQ root domain\n");
1118 return -ENODEV;
1119 }
1120
1121 riscv_pmu_irq = irq_create_mapping(domain, riscv_pmu_irq_num);
1122 if (!riscv_pmu_irq) {
1123 pr_err("Failed to map PMU interrupt for node\n");
1124 return -ENODEV;
1125 }
1126
1127 ret = request_percpu_irq(riscv_pmu_irq, pmu_sbi_ovf_handler, "riscv-pmu", hw_events);
1128 if (ret) {
1129 pr_err("registering percpu irq failed [%d]\n", ret);
1130 return ret;
1131 }
1132
1133 return 0;
1134 }
1135
1136 #ifdef CONFIG_CPU_PM
riscv_pm_pmu_notify(struct notifier_block * b,unsigned long cmd,void * v)1137 static int riscv_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
1138 void *v)
1139 {
1140 struct riscv_pmu *rvpmu = container_of(b, struct riscv_pmu, riscv_pm_nb);
1141 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
1142 int enabled = bitmap_weight(cpuc->used_hw_ctrs, RISCV_MAX_COUNTERS);
1143 struct perf_event *event;
1144 int idx;
1145
1146 if (!enabled)
1147 return NOTIFY_OK;
1148
1149 for (idx = 0; idx < RISCV_MAX_COUNTERS; idx++) {
1150 event = cpuc->events[idx];
1151 if (!event)
1152 continue;
1153
1154 switch (cmd) {
1155 case CPU_PM_ENTER:
1156 /*
1157 * Stop and update the counter
1158 */
1159 riscv_pmu_stop(event, PERF_EF_UPDATE);
1160 break;
1161 case CPU_PM_EXIT:
1162 case CPU_PM_ENTER_FAILED:
1163 /*
1164 * Restore and enable the counter.
1165 */
1166 riscv_pmu_start(event, PERF_EF_RELOAD);
1167 break;
1168 default:
1169 break;
1170 }
1171 }
1172
1173 return NOTIFY_OK;
1174 }
1175
riscv_pm_pmu_register(struct riscv_pmu * pmu)1176 static int riscv_pm_pmu_register(struct riscv_pmu *pmu)
1177 {
1178 pmu->riscv_pm_nb.notifier_call = riscv_pm_pmu_notify;
1179 return cpu_pm_register_notifier(&pmu->riscv_pm_nb);
1180 }
1181
riscv_pm_pmu_unregister(struct riscv_pmu * pmu)1182 static void riscv_pm_pmu_unregister(struct riscv_pmu *pmu)
1183 {
1184 cpu_pm_unregister_notifier(&pmu->riscv_pm_nb);
1185 }
1186 #else
riscv_pm_pmu_register(struct riscv_pmu * pmu)1187 static inline int riscv_pm_pmu_register(struct riscv_pmu *pmu) { return 0; }
riscv_pm_pmu_unregister(struct riscv_pmu * pmu)1188 static inline void riscv_pm_pmu_unregister(struct riscv_pmu *pmu) { }
1189 #endif
1190
riscv_pmu_destroy(struct riscv_pmu * pmu)1191 static void riscv_pmu_destroy(struct riscv_pmu *pmu)
1192 {
1193 if (sbi_v2_available) {
1194 if (sbi_pmu_snapshot_available()) {
1195 pmu_sbi_snapshot_disable();
1196 pmu_sbi_snapshot_free(pmu);
1197 }
1198 }
1199 riscv_pm_pmu_unregister(pmu);
1200 cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
1201 }
1202
pmu_sbi_event_init(struct perf_event * event)1203 static void pmu_sbi_event_init(struct perf_event *event)
1204 {
1205 /*
1206 * The permissions are set at event_init so that we do not depend
1207 * on the sysctl value that can change.
1208 */
1209 if (sysctl_perf_user_access == SYSCTL_NO_USER_ACCESS)
1210 event->hw.flags |= PERF_EVENT_FLAG_NO_USER_ACCESS;
1211 else if (sysctl_perf_user_access == SYSCTL_USER_ACCESS)
1212 event->hw.flags |= PERF_EVENT_FLAG_USER_ACCESS;
1213 else
1214 event->hw.flags |= PERF_EVENT_FLAG_LEGACY;
1215 }
1216
pmu_sbi_event_mapped(struct perf_event * event,struct mm_struct * mm)1217 static void pmu_sbi_event_mapped(struct perf_event *event, struct mm_struct *mm)
1218 {
1219 if (event->hw.flags & PERF_EVENT_FLAG_NO_USER_ACCESS)
1220 return;
1221
1222 if (event->hw.flags & PERF_EVENT_FLAG_LEGACY) {
1223 if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES &&
1224 event->attr.config != PERF_COUNT_HW_INSTRUCTIONS) {
1225 return;
1226 }
1227 }
1228
1229 /*
1230 * The user mmapped the event to directly access it: this is where
1231 * we determine based on sysctl_perf_user_access if we grant userspace
1232 * the direct access to this event. That means that within the same
1233 * task, some events may be directly accessible and some other may not,
1234 * if the user changes the value of sysctl_perf_user_accesss in the
1235 * meantime.
1236 */
1237
1238 event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT;
1239
1240 /*
1241 * We must enable userspace access *before* advertising in the user page
1242 * that it is possible to do so to avoid any race.
1243 * And we must notify all cpus here because threads that currently run
1244 * on other cpus will try to directly access the counter too without
1245 * calling pmu_sbi_ctr_start.
1246 */
1247 if (event->hw.flags & PERF_EVENT_FLAG_USER_ACCESS)
1248 on_each_cpu_mask(mm_cpumask(mm),
1249 pmu_sbi_set_scounteren, (void *)event, 1);
1250 }
1251
pmu_sbi_event_unmapped(struct perf_event * event,struct mm_struct * mm)1252 static void pmu_sbi_event_unmapped(struct perf_event *event, struct mm_struct *mm)
1253 {
1254 if (event->hw.flags & PERF_EVENT_FLAG_NO_USER_ACCESS)
1255 return;
1256
1257 if (event->hw.flags & PERF_EVENT_FLAG_LEGACY) {
1258 if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES &&
1259 event->attr.config != PERF_COUNT_HW_INSTRUCTIONS) {
1260 return;
1261 }
1262 }
1263
1264 /*
1265 * Here we can directly remove user access since the user does not have
1266 * access to the user page anymore so we avoid the racy window where the
1267 * user could have read cap_user_rdpmc to true right before we disable
1268 * it.
1269 */
1270 event->hw.flags &= ~PERF_EVENT_FLAG_USER_READ_CNT;
1271
1272 if (event->hw.flags & PERF_EVENT_FLAG_USER_ACCESS)
1273 on_each_cpu_mask(mm_cpumask(mm),
1274 pmu_sbi_reset_scounteren, (void *)event, 1);
1275 }
1276
riscv_pmu_update_counter_access(void * info)1277 static void riscv_pmu_update_counter_access(void *info)
1278 {
1279 if (sysctl_perf_user_access == SYSCTL_LEGACY)
1280 csr_write(CSR_SCOUNTEREN, 0x7);
1281 else
1282 csr_write(CSR_SCOUNTEREN, 0x2);
1283 }
1284
riscv_pmu_proc_user_access_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)1285 static int riscv_pmu_proc_user_access_handler(const struct ctl_table *table,
1286 int write, void *buffer,
1287 size_t *lenp, loff_t *ppos)
1288 {
1289 int prev = sysctl_perf_user_access;
1290 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1291
1292 /*
1293 * Test against the previous value since we clear SCOUNTEREN when
1294 * sysctl_perf_user_access is set to SYSCTL_USER_ACCESS, but we should
1295 * not do that if that was already the case.
1296 */
1297 if (ret || !write || prev == sysctl_perf_user_access)
1298 return ret;
1299
1300 on_each_cpu(riscv_pmu_update_counter_access, NULL, 1);
1301
1302 return 0;
1303 }
1304
1305 static struct ctl_table sbi_pmu_sysctl_table[] = {
1306 {
1307 .procname = "perf_user_access",
1308 .data = &sysctl_perf_user_access,
1309 .maxlen = sizeof(unsigned int),
1310 .mode = 0644,
1311 .proc_handler = riscv_pmu_proc_user_access_handler,
1312 .extra1 = SYSCTL_ZERO,
1313 .extra2 = SYSCTL_TWO,
1314 },
1315 };
1316
pmu_sbi_device_probe(struct platform_device * pdev)1317 static int pmu_sbi_device_probe(struct platform_device *pdev)
1318 {
1319 struct riscv_pmu *pmu = NULL;
1320 int ret = -ENODEV;
1321 int num_counters;
1322
1323 pr_info("SBI PMU extension is available\n");
1324 pmu = riscv_pmu_alloc();
1325 if (!pmu)
1326 return -ENOMEM;
1327
1328 num_counters = pmu_sbi_find_num_ctrs();
1329 if (num_counters < 0) {
1330 pr_err("SBI PMU extension doesn't provide any counters\n");
1331 goto out_free;
1332 }
1333
1334 /* It is possible to get from SBI more than max number of counters */
1335 if (num_counters > RISCV_MAX_COUNTERS) {
1336 num_counters = RISCV_MAX_COUNTERS;
1337 pr_info("SBI returned more than maximum number of counters. Limiting the number of counters to %d\n", num_counters);
1338 }
1339
1340 /* cache all the information about counters now */
1341 if (pmu_sbi_get_ctrinfo(num_counters, &cmask))
1342 goto out_free;
1343
1344 ret = pmu_sbi_setup_irqs(pmu, pdev);
1345 if (ret < 0) {
1346 pr_info("Perf sampling/filtering is not supported as sscof extension is not available\n");
1347 pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
1348 pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
1349 }
1350
1351 pmu->pmu.attr_groups = riscv_pmu_attr_groups;
1352 pmu->pmu.parent = &pdev->dev;
1353 pmu->cmask = cmask;
1354 pmu->ctr_start = pmu_sbi_ctr_start;
1355 pmu->ctr_stop = pmu_sbi_ctr_stop;
1356 pmu->event_map = pmu_sbi_event_map;
1357 pmu->ctr_get_idx = pmu_sbi_ctr_get_idx;
1358 pmu->ctr_get_width = pmu_sbi_ctr_get_width;
1359 pmu->ctr_clear_idx = pmu_sbi_ctr_clear_idx;
1360 pmu->ctr_read = pmu_sbi_ctr_read;
1361 pmu->event_init = pmu_sbi_event_init;
1362 pmu->event_mapped = pmu_sbi_event_mapped;
1363 pmu->event_unmapped = pmu_sbi_event_unmapped;
1364 pmu->csr_index = pmu_sbi_csr_index;
1365
1366 ret = riscv_pm_pmu_register(pmu);
1367 if (ret)
1368 goto out_unregister;
1369
1370 ret = perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW);
1371 if (ret)
1372 goto out_unregister;
1373
1374 /* SBI PMU Snapsphot is only available in SBI v2.0 */
1375 if (sbi_v2_available) {
1376 int cpu;
1377
1378 ret = pmu_sbi_snapshot_alloc(pmu);
1379 if (ret)
1380 goto out_unregister;
1381
1382 cpu = get_cpu();
1383
1384 ret = pmu_sbi_snapshot_setup(pmu, cpu);
1385 if (ret) {
1386 /* Snapshot is an optional feature. Continue if not available */
1387 pmu_sbi_snapshot_free(pmu);
1388 } else {
1389 pr_info("SBI PMU snapshot detected\n");
1390 /*
1391 * We enable it once here for the boot cpu. If snapshot shmem setup
1392 * fails during cpu hotplug process, it will fail to start the cpu
1393 * as we can not handle hetergenous PMUs with different snapshot
1394 * capability.
1395 */
1396 static_branch_enable(&sbi_pmu_snapshot_available);
1397 }
1398 put_cpu();
1399 }
1400
1401 register_sysctl("kernel", sbi_pmu_sysctl_table);
1402
1403 ret = cpuhp_state_add_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
1404 if (ret)
1405 goto out_unregister;
1406
1407 /* Asynchronously check which standard events are available */
1408 schedule_work(&check_std_events_work);
1409
1410 return 0;
1411
1412 out_unregister:
1413 riscv_pmu_destroy(pmu);
1414
1415 out_free:
1416 kfree(pmu);
1417 return ret;
1418 }
1419
1420 static struct platform_driver pmu_sbi_driver = {
1421 .probe = pmu_sbi_device_probe,
1422 .driver = {
1423 .name = RISCV_PMU_SBI_PDEV_NAME,
1424 },
1425 };
1426
pmu_sbi_devinit(void)1427 static int __init pmu_sbi_devinit(void)
1428 {
1429 int ret;
1430 struct platform_device *pdev;
1431
1432 if (sbi_spec_version < sbi_mk_version(0, 3) ||
1433 !sbi_probe_extension(SBI_EXT_PMU)) {
1434 return 0;
1435 }
1436
1437 if (sbi_spec_version >= sbi_mk_version(2, 0))
1438 sbi_v2_available = true;
1439
1440 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_RISCV_STARTING,
1441 "perf/riscv/pmu:starting",
1442 pmu_sbi_starting_cpu, pmu_sbi_dying_cpu);
1443 if (ret) {
1444 pr_err("CPU hotplug notifier could not be registered: %d\n",
1445 ret);
1446 return ret;
1447 }
1448
1449 ret = platform_driver_register(&pmu_sbi_driver);
1450 if (ret)
1451 return ret;
1452
1453 pdev = platform_device_register_simple(RISCV_PMU_SBI_PDEV_NAME, -1, NULL, 0);
1454 if (IS_ERR(pdev)) {
1455 platform_driver_unregister(&pmu_sbi_driver);
1456 return PTR_ERR(pdev);
1457 }
1458
1459 /* Notify legacy implementation that SBI pmu is available*/
1460 riscv_pmu_legacy_skip_init();
1461
1462 return ret;
1463 }
1464 device_initcall(pmu_sbi_devinit)
1465