Lines Matching +full:riscv +full:- +full:j +full:- +full:extension

1 // SPDX-License-Identifier: GPL-2.0
3 * RISC-V performance counter support.
11 #define pr_fmt(fmt) "riscv-pmu-sbi: " fmt
62 PMU_FORMAT_ATTR(event, "config:0-47");
63 PMU_FORMAT_ATTR(firmware, "config:62-63");
90 * RISC-V doesn't have heterogeneous harts yet. This need to be part of
306 0, cmask, 0, edata->event_idx, 0, 0); in pmu_sbi_check_event()
312 edata->event_idx = -ENOENT; in pmu_sbi_check_event()
322 for (int j = 0; j < ARRAY_SIZE(pmu_cache_event_map[i]); j++) in pmu_sbi_check_std_events() local
323 for (int k = 0; k < ARRAY_SIZE(pmu_cache_event_map[i][j]); k++) in pmu_sbi_check_std_events()
324 pmu_sbi_check_event(&pmu_cache_event_map[i][j][k]); in pmu_sbi_check_std_events()
342 return (info->type == SBI_PMU_CTR_TYPE_FW) ? true : false; in pmu_sbi_ctr_is_fw()
357 return -EINVAL; in riscv_pmu_get_hpm_info()
363 if (!hpm_width && info->csr != CSR_CYCLE && info->csr != CSR_INSTRET) in riscv_pmu_get_hpm_info()
364 hpm_width = info->width; in riscv_pmu_get_hpm_info()
365 if (info->type == SBI_PMU_CTR_TYPE_HW) in riscv_pmu_get_hpm_info()
378 return pmu_ctr_list[event->hw.idx].csr - CSR_CYCLE; in pmu_sbi_csr_index()
386 if (event->attr.config1 & RISCV_PMU_CONFIG1_GUEST_EVENTS) in pmu_sbi_get_filter_flags()
388 if (event->attr.exclude_kernel) in pmu_sbi_get_filter_flags()
390 if (event->attr.exclude_user) in pmu_sbi_get_filter_flags()
392 if (guest_events && event->attr.exclude_hv) in pmu_sbi_get_filter_flags()
394 if (event->attr.exclude_host) in pmu_sbi_get_filter_flags()
396 if (event->attr.exclude_guest) in pmu_sbi_get_filter_flags()
404 struct hw_perf_event *hwc = &event->hw; in pmu_sbi_ctr_get_idx()
405 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); in pmu_sbi_ctr_get_idx()
406 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); in pmu_sbi_ctr_get_idx()
409 uint64_t cbase = 0, cmask = rvpmu->cmask; in pmu_sbi_ctr_get_idx()
419 if ((hwc->flags & PERF_EVENT_FLAG_LEGACY) && (event->attr.type == PERF_TYPE_HARDWARE)) { in pmu_sbi_ctr_get_idx()
420 if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES) { in pmu_sbi_ctr_get_idx()
423 } else if (event->attr.config == PERF_COUNT_HW_INSTRUCTIONS) { in pmu_sbi_ctr_get_idx()
425 cmask = BIT(CSR_INSTRET - CSR_CYCLE); in pmu_sbi_ctr_get_idx()
432 cmask, cflags, hwc->event_base, hwc->config, in pmu_sbi_ctr_get_idx()
433 hwc->config >> 32); in pmu_sbi_ctr_get_idx()
436 cmask, cflags, hwc->event_base, hwc->config, 0); in pmu_sbi_ctr_get_idx()
440 hwc->event_base, hwc->config); in pmu_sbi_ctr_get_idx()
445 if (!test_bit(idx, &rvpmu->cmask) || !pmu_ctr_list[idx].value) in pmu_sbi_ctr_get_idx()
446 return -ENOENT; in pmu_sbi_ctr_get_idx()
450 if (!test_and_set_bit(idx, cpuc->used_fw_ctrs)) in pmu_sbi_ctr_get_idx()
453 if (!test_and_set_bit(idx, cpuc->used_hw_ctrs)) in pmu_sbi_ctr_get_idx()
457 return -ENOENT; in pmu_sbi_ctr_get_idx()
463 struct hw_perf_event *hwc = &event->hw; in pmu_sbi_ctr_clear_idx()
464 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); in pmu_sbi_ctr_clear_idx()
465 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); in pmu_sbi_ctr_clear_idx()
466 int idx = hwc->idx; in pmu_sbi_ctr_clear_idx()
469 clear_bit(idx, cpuc->used_fw_ctrs); in pmu_sbi_ctr_clear_idx()
471 clear_bit(idx, cpuc->used_hw_ctrs); in pmu_sbi_ctr_clear_idx()
480 return -EINVAL; in pmu_event_find_cache()
484 return -EINVAL; in pmu_event_find_cache()
488 return -EINVAL; in pmu_event_find_cache()
497 u32 type = event->attr.type; in pmu_sbi_is_fw_event()
498 u64 config = event->attr.config; in pmu_sbi_is_fw_event()
508 u32 type = event->attr.type; in pmu_sbi_event_map()
509 u64 config = event->attr.config; in pmu_sbi_event_map()
522 return -EINVAL; in pmu_sbi_event_map()
523 ret = pmu_hw_event_map[event->attr.config].event_idx; in pmu_sbi_event_map()
533 * 00 - Hardware raw event in pmu_sbi_event_map()
534 * 10 - SBI firmware events in pmu_sbi_event_map()
535 * 11 - Risc-V platform specific firmware event in pmu_sbi_event_map()
549 * For Risc-V platform specific firmware events in pmu_sbi_event_map()
550 * Event code - 0xFFFF in pmu_sbi_event_map()
551 * Event data - raw event encoding in pmu_sbi_event_map()
559 ret = -ENOENT; in pmu_sbi_event_map()
571 struct cpu_hw_events *cpu_hw_evt = per_cpu_ptr(pmu->hw_events, cpu); in pmu_sbi_snapshot_free()
573 if (!cpu_hw_evt->snapshot_addr) in pmu_sbi_snapshot_free()
576 free_page((unsigned long)cpu_hw_evt->snapshot_addr); in pmu_sbi_snapshot_free()
577 cpu_hw_evt->snapshot_addr = NULL; in pmu_sbi_snapshot_free()
578 cpu_hw_evt->snapshot_addr_phys = 0; in pmu_sbi_snapshot_free()
588 struct cpu_hw_events *cpu_hw_evt = per_cpu_ptr(pmu->hw_events, cpu); in pmu_sbi_snapshot_alloc()
593 return -ENOMEM; in pmu_sbi_snapshot_alloc()
595 cpu_hw_evt->snapshot_addr = page_to_virt(snapshot_page); in pmu_sbi_snapshot_alloc()
596 cpu_hw_evt->snapshot_addr_phys = page_to_phys(snapshot_page); in pmu_sbi_snapshot_alloc()
621 cpu_hw_evt = per_cpu_ptr(pmu->hw_events, cpu); in pmu_sbi_snapshot_setup()
622 if (!cpu_hw_evt->snapshot_addr_phys) in pmu_sbi_snapshot_setup()
623 return -EINVAL; in pmu_sbi_snapshot_setup()
625 if (cpu_hw_evt->snapshot_set_done) in pmu_sbi_snapshot_setup()
630 cpu_hw_evt->snapshot_addr_phys, in pmu_sbi_snapshot_setup()
631 (u64)(cpu_hw_evt->snapshot_addr_phys) >> 32, 0, 0, 0, 0); in pmu_sbi_snapshot_setup()
634 cpu_hw_evt->snapshot_addr_phys, 0, 0, 0, 0, 0); in pmu_sbi_snapshot_setup()
643 memset(cpu_hw_evt->snapshot_cval_shcopy, 0, sizeof(u64) * RISCV_MAX_COUNTERS); in pmu_sbi_snapshot_setup()
644 cpu_hw_evt->snapshot_set_done = true; in pmu_sbi_snapshot_setup()
651 struct hw_perf_event *hwc = &event->hw; in pmu_sbi_ctr_read()
652 int idx = hwc->idx; in pmu_sbi_ctr_read()
655 struct riscv_pmu *pmu = to_riscv_pmu(event->pmu); in pmu_sbi_ctr_read()
656 struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); in pmu_sbi_ctr_read()
657 struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr; in pmu_sbi_ctr_read()
661 if (sbi_pmu_snapshot_available() && (hwc->state & PERF_HES_STOPPED)) { in pmu_sbi_ctr_read()
662 val = sdata->ctr_values[idx]; in pmu_sbi_ctr_read()
668 hwc->idx, 0, 0, 0, 0, 0); in pmu_sbi_ctr_read()
675 hwc->idx, 0, 0, 0, 0, 0); in pmu_sbi_ctr_read()
695 if (event->hw.idx != -1) in pmu_sbi_set_scounteren()
704 if (event->hw.idx != -1) in pmu_sbi_reset_scounteren()
712 struct hw_perf_event *hwc = &event->hw; in pmu_sbi_ctr_start()
717 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx, in pmu_sbi_ctr_start()
720 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx, in pmu_sbi_ctr_start()
725 hwc->idx, sbi_err_map_linux_errno(ret.error)); in pmu_sbi_ctr_start()
727 if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) && in pmu_sbi_ctr_start()
728 (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT)) in pmu_sbi_ctr_start()
735 struct hw_perf_event *hwc = &event->hw; in pmu_sbi_ctr_stop()
736 struct riscv_pmu *pmu = to_riscv_pmu(event->pmu); in pmu_sbi_ctr_stop()
737 struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); in pmu_sbi_ctr_stop()
738 struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr; in pmu_sbi_ctr_stop()
740 if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) && in pmu_sbi_ctr_stop()
741 (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT)) in pmu_sbi_ctr_stop()
747 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0); in pmu_sbi_ctr_stop()
750 * The counter snapshot is based on the index base specified by hwc->idx. in pmu_sbi_ctr_stop()
753 * the counter value to shared memory. However, if hwc->idx is zero, the counter in pmu_sbi_ctr_stop()
757 if (hwc->idx > 0) { in pmu_sbi_ctr_stop()
758 sdata->ctr_values[hwc->idx] = sdata->ctr_values[0]; in pmu_sbi_ctr_stop()
759 sdata->ctr_values[0] = 0; in pmu_sbi_ctr_stop()
764 hwc->idx, sbi_err_map_linux_errno(ret.error)); in pmu_sbi_ctr_stop()
787 return -ENOMEM; in pmu_sbi_get_ctrinfo()
817 0, pmu->cmask, SBI_PMU_STOP_FLAG_RESET, 0, 0, 0); in pmu_sbi_stop_all()
822 struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); in pmu_sbi_stop_hw_ctrs()
823 struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr; in pmu_sbi_stop_hw_ctrs()
833 memset(cpu_hw_evt->snapshot_cval_shcopy, 0, sizeof(u64) * RISCV_MAX_COUNTERS); in pmu_sbi_stop_hw_ctrs()
838 cpu_hw_evt->used_hw_ctrs[i], flag, 0, 0, 0); in pmu_sbi_stop_hw_ctrs()
841 for_each_set_bit(idx, &cpu_hw_evt->used_hw_ctrs[i], BITS_PER_LONG) in pmu_sbi_stop_hw_ctrs()
842 cpu_hw_evt->snapshot_cval_shcopy[i * BITS_PER_LONG + idx] = in pmu_sbi_stop_hw_ctrs()
843 sdata->ctr_values[idx]; in pmu_sbi_stop_hw_ctrs()
845 temp_ctr_overflow_mask |= sdata->ctr_overflow_mask << (i * BITS_PER_LONG); in pmu_sbi_stop_hw_ctrs()
851 for_each_set_bit(idx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) in pmu_sbi_stop_hw_ctrs()
852 sdata->ctr_values[idx] = cpu_hw_evt->snapshot_cval_shcopy[idx]; in pmu_sbi_stop_hw_ctrs()
854 sdata->ctr_overflow_mask = temp_ctr_overflow_mask; in pmu_sbi_stop_hw_ctrs()
876 ctr_start_mask = cpu_hw_evt->used_hw_ctrs[i] & ~ctr_ovf_mask; in pmu_sbi_start_ovf_ctrs_sbi()
885 event = cpu_hw_evt->events[idx]; in pmu_sbi_start_ovf_ctrs_sbi()
886 hwc = &event->hw; in pmu_sbi_start_ovf_ctrs_sbi()
888 init_val = local64_read(&hwc->prev_count) & max_period; in pmu_sbi_start_ovf_ctrs_sbi()
911 struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr; in pmu_sbi_start_ovf_ctrs_snapshot()
913 for_each_set_bit(idx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) { in pmu_sbi_start_ovf_ctrs_snapshot()
915 event = cpu_hw_evt->events[idx]; in pmu_sbi_start_ovf_ctrs_snapshot()
916 hwc = &event->hw; in pmu_sbi_start_ovf_ctrs_snapshot()
918 init_val = local64_read(&hwc->prev_count) & max_period; in pmu_sbi_start_ovf_ctrs_snapshot()
919 cpu_hw_evt->snapshot_cval_shcopy[idx] = init_val; in pmu_sbi_start_ovf_ctrs_snapshot()
922 * We do not need to update the non-overflow counters the previous in pmu_sbi_start_ovf_ctrs_snapshot()
929 for_each_set_bit(idx, &cpu_hw_evt->used_hw_ctrs[i], BITS_PER_LONG) in pmu_sbi_start_ovf_ctrs_snapshot()
930 sdata->ctr_values[idx] = in pmu_sbi_start_ovf_ctrs_snapshot()
931 cpu_hw_evt->snapshot_cval_shcopy[idx + i * BITS_PER_LONG]; in pmu_sbi_start_ovf_ctrs_snapshot()
934 cpu_hw_evt->used_hw_ctrs[i], flag, 0, 0, 0); in pmu_sbi_start_ovf_ctrs_snapshot()
941 struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); in pmu_sbi_start_overflow_mask()
962 struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr; in pmu_sbi_ovf_handler()
968 fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS); in pmu_sbi_ovf_handler()
974 event = cpu_hw_evt->events[fidx]; in pmu_sbi_ovf_handler()
980 pmu = to_riscv_pmu(event->pmu); in pmu_sbi_ovf_handler()
985 overflow = sdata->ctr_overflow_mask; in pmu_sbi_ovf_handler()
1001 for_each_set_bit(lidx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) { in pmu_sbi_ovf_handler()
1002 struct perf_event *event = cpu_hw_evt->events[lidx]; in pmu_sbi_ovf_handler()
1010 if (!info || info->type != SBI_PMU_CTR_TYPE_HW) in pmu_sbi_ovf_handler()
1018 hidx = info->csr - CSR_CYCLE; in pmu_sbi_ovf_handler()
1029 hw_evt = &event->hw; in pmu_sbi_ovf_handler()
1031 hw_evt->state |= PERF_HES_STOPPED; in pmu_sbi_ovf_handler()
1033 hw_evt->state |= PERF_HES_UPTODATE; in pmu_sbi_ovf_handler()
1034 perf_sample_data_init(&data, 0, hw_evt->last_period); in pmu_sbi_ovf_handler()
1037 * Unlike other ISAs, RISC-V don't have to disable interrupts in pmu_sbi_ovf_handler()
1047 hw_evt->state = 0; in pmu_sbi_ovf_handler()
1051 perf_sample_event_took(sched_clock() - start_clock); in pmu_sbi_ovf_handler()
1059 struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); in pmu_sbi_starting_cpu()
1074 cpu_hw_evt->irq = riscv_pmu_irq; in pmu_sbi_starting_cpu()
1103 struct cpu_hw_events __percpu *hw_events = pmu->hw_events; in pmu_sbi_setup_irqs()
1125 return -EOPNOTSUPP; in pmu_sbi_setup_irqs()
1131 return -ENODEV; in pmu_sbi_setup_irqs()
1137 return -ENODEV; in pmu_sbi_setup_irqs()
1140 ret = request_percpu_irq(riscv_pmu_irq, pmu_sbi_ovf_handler, "riscv-pmu", hw_events); in pmu_sbi_setup_irqs()
1154 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); in riscv_pm_pmu_notify()
1155 int enabled = bitmap_weight(cpuc->used_hw_ctrs, RISCV_MAX_COUNTERS); in riscv_pm_pmu_notify()
1163 event = cpuc->events[idx]; in riscv_pm_pmu_notify()
1191 pmu->riscv_pm_nb.notifier_call = riscv_pm_pmu_notify; in riscv_pm_pmu_register()
1192 return cpu_pm_register_notifier(&pmu->riscv_pm_nb); in riscv_pm_pmu_register()
1197 cpu_pm_unregister_notifier(&pmu->riscv_pm_nb); in riscv_pm_pmu_unregister()
1213 cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node); in riscv_pmu_destroy()
1223 event->hw.flags |= PERF_EVENT_FLAG_NO_USER_ACCESS; in pmu_sbi_event_init()
1225 event->hw.flags |= PERF_EVENT_FLAG_USER_ACCESS; in pmu_sbi_event_init()
1227 event->hw.flags |= PERF_EVENT_FLAG_LEGACY; in pmu_sbi_event_init()
1232 if (event->hw.flags & PERF_EVENT_FLAG_NO_USER_ACCESS) in pmu_sbi_event_mapped()
1235 if (event->hw.flags & PERF_EVENT_FLAG_LEGACY) { in pmu_sbi_event_mapped()
1236 if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES && in pmu_sbi_event_mapped()
1237 event->attr.config != PERF_COUNT_HW_INSTRUCTIONS) { in pmu_sbi_event_mapped()
1251 event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT; in pmu_sbi_event_mapped()
1260 if (event->hw.flags & PERF_EVENT_FLAG_USER_ACCESS) in pmu_sbi_event_mapped()
1267 if (event->hw.flags & PERF_EVENT_FLAG_NO_USER_ACCESS) in pmu_sbi_event_unmapped()
1270 if (event->hw.flags & PERF_EVENT_FLAG_LEGACY) { in pmu_sbi_event_unmapped()
1271 if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES && in pmu_sbi_event_unmapped()
1272 event->attr.config != PERF_COUNT_HW_INSTRUCTIONS) { in pmu_sbi_event_unmapped()
1283 event->hw.flags &= ~PERF_EVENT_FLAG_USER_READ_CNT; in pmu_sbi_event_unmapped()
1285 if (event->hw.flags & PERF_EVENT_FLAG_USER_ACCESS) in pmu_sbi_event_unmapped()
1333 int ret = -ENODEV; in pmu_sbi_device_probe()
1336 pr_info("SBI PMU extension is available\n"); in pmu_sbi_device_probe()
1339 return -ENOMEM; in pmu_sbi_device_probe()
1343 pr_err("SBI PMU extension doesn't provide any counters\n"); in pmu_sbi_device_probe()
1359 pr_info("Perf sampling/filtering is not supported as sscof extension is not available\n"); in pmu_sbi_device_probe()
1360 pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; in pmu_sbi_device_probe()
1361 pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE; in pmu_sbi_device_probe()
1364 pmu->pmu.attr_groups = riscv_pmu_attr_groups; in pmu_sbi_device_probe()
1365 pmu->pmu.parent = &pdev->dev; in pmu_sbi_device_probe()
1366 pmu->cmask = cmask; in pmu_sbi_device_probe()
1367 pmu->ctr_start = pmu_sbi_ctr_start; in pmu_sbi_device_probe()
1368 pmu->ctr_stop = pmu_sbi_ctr_stop; in pmu_sbi_device_probe()
1369 pmu->event_map = pmu_sbi_event_map; in pmu_sbi_device_probe()
1370 pmu->ctr_get_idx = pmu_sbi_ctr_get_idx; in pmu_sbi_device_probe()
1371 pmu->ctr_get_width = pmu_sbi_ctr_get_width; in pmu_sbi_device_probe()
1372 pmu->ctr_clear_idx = pmu_sbi_ctr_clear_idx; in pmu_sbi_device_probe()
1373 pmu->ctr_read = pmu_sbi_ctr_read; in pmu_sbi_device_probe()
1374 pmu->event_init = pmu_sbi_event_init; in pmu_sbi_device_probe()
1375 pmu->event_mapped = pmu_sbi_event_mapped; in pmu_sbi_device_probe()
1376 pmu->event_unmapped = pmu_sbi_event_unmapped; in pmu_sbi_device_probe()
1377 pmu->csr_index = pmu_sbi_csr_index; in pmu_sbi_device_probe()
1383 ret = perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW); in pmu_sbi_device_probe()
1416 ret = cpuhp_state_add_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node); in pmu_sbi_device_probe()
1454 "perf/riscv/pmu:starting", in pmu_sbi_devinit()
1466 pdev = platform_device_register_simple(RISCV_PMU_SBI_PDEV_NAME, -1, NULL, 0); in pmu_sbi_devinit()