xref: /linux/arch/riscv/kvm/vcpu_sbi_pmu.c (revision f4b0c4b508364fde023e4f7b9f23f7e38c663dfe)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2023 Rivos Inc
4  *
5  * Authors:
6  *     Atish Patra <atishp@rivosinc.com>
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <asm/csr.h>
13 #include <asm/sbi.h>
14 #include <asm/kvm_vcpu_sbi.h>
15 
kvm_sbi_ext_pmu_handler(struct kvm_vcpu * vcpu,struct kvm_run * run,struct kvm_vcpu_sbi_return * retdata)16 static int kvm_sbi_ext_pmu_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
17 				   struct kvm_vcpu_sbi_return *retdata)
18 {
19 	int ret = 0;
20 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
21 	struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
22 	unsigned long funcid = cp->a6;
23 	u64 temp;
24 
25 	if (!kvpmu->init_done) {
26 		retdata->err_val = SBI_ERR_NOT_SUPPORTED;
27 		return 0;
28 	}
29 
30 	switch (funcid) {
31 	case SBI_EXT_PMU_NUM_COUNTERS:
32 		ret = kvm_riscv_vcpu_pmu_num_ctrs(vcpu, retdata);
33 		break;
34 	case SBI_EXT_PMU_COUNTER_GET_INFO:
35 		ret = kvm_riscv_vcpu_pmu_ctr_info(vcpu, cp->a0, retdata);
36 		break;
37 	case SBI_EXT_PMU_COUNTER_CFG_MATCH:
38 #if defined(CONFIG_32BIT)
39 		temp = ((uint64_t)cp->a5 << 32) | cp->a4;
40 #else
41 		temp = cp->a4;
42 #endif
43 		/*
44 		 * This can fail if perf core framework fails to create an event.
45 		 * No need to forward the error to userspace and exit the guest.
46 		 * The operation can continue without profiling. Forward the
47 		 * appropriate SBI error to the guest.
48 		 */
49 		ret = kvm_riscv_vcpu_pmu_ctr_cfg_match(vcpu, cp->a0, cp->a1,
50 						       cp->a2, cp->a3, temp, retdata);
51 		break;
52 	case SBI_EXT_PMU_COUNTER_START:
53 #if defined(CONFIG_32BIT)
54 		temp = ((uint64_t)cp->a4 << 32) | cp->a3;
55 #else
56 		temp = cp->a3;
57 #endif
58 		ret = kvm_riscv_vcpu_pmu_ctr_start(vcpu, cp->a0, cp->a1, cp->a2,
59 						   temp, retdata);
60 		break;
61 	case SBI_EXT_PMU_COUNTER_STOP:
62 		ret = kvm_riscv_vcpu_pmu_ctr_stop(vcpu, cp->a0, cp->a1, cp->a2, retdata);
63 		break;
64 	case SBI_EXT_PMU_COUNTER_FW_READ:
65 		ret = kvm_riscv_vcpu_pmu_fw_ctr_read(vcpu, cp->a0, retdata);
66 		break;
67 	case SBI_EXT_PMU_COUNTER_FW_READ_HI:
68 		if (IS_ENABLED(CONFIG_32BIT))
69 			ret = kvm_riscv_vcpu_pmu_fw_ctr_read_hi(vcpu, cp->a0, retdata);
70 		else
71 			retdata->out_val = 0;
72 		break;
73 	case SBI_EXT_PMU_SNAPSHOT_SET_SHMEM:
74 		ret = kvm_riscv_vcpu_pmu_snapshot_set_shmem(vcpu, cp->a0, cp->a1, cp->a2, retdata);
75 		break;
76 	default:
77 		retdata->err_val = SBI_ERR_NOT_SUPPORTED;
78 	}
79 
80 	return ret;
81 }
82 
kvm_sbi_ext_pmu_probe(struct kvm_vcpu * vcpu)83 static unsigned long kvm_sbi_ext_pmu_probe(struct kvm_vcpu *vcpu)
84 {
85 	struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
86 
87 	return kvpmu->init_done;
88 }
89 
90 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
91 	.extid_start = SBI_EXT_PMU,
92 	.extid_end = SBI_EXT_PMU,
93 	.handler = kvm_sbi_ext_pmu_handler,
94 	.probe = kvm_sbi_ext_pmu_probe,
95 };
96