1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2021 Western Digital Corporation or its affiliates.
4 *
5 * Authors:
6 * Atish Patra <atish.patra@wdc.com>
7 */
8
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <linux/wordpart.h>
13 #include <asm/sbi.h>
14 #include <asm/kvm_vcpu_sbi.h>
15
kvm_sbi_hsm_vcpu_start(struct kvm_vcpu * vcpu)16 static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu)
17 {
18 struct kvm_cpu_context *reset_cntx;
19 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
20 struct kvm_vcpu *target_vcpu;
21 unsigned long target_vcpuid = cp->a0;
22 int ret = 0;
23
24 target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
25 if (!target_vcpu)
26 return SBI_ERR_INVALID_PARAM;
27
28 spin_lock(&target_vcpu->arch.mp_state_lock);
29
30 if (!kvm_riscv_vcpu_stopped(target_vcpu)) {
31 ret = SBI_ERR_ALREADY_AVAILABLE;
32 goto out;
33 }
34
35 spin_lock(&target_vcpu->arch.reset_cntx_lock);
36 reset_cntx = &target_vcpu->arch.guest_reset_context;
37 /* start address */
38 reset_cntx->sepc = cp->a1;
39 /* target vcpu id to start */
40 reset_cntx->a0 = target_vcpuid;
41 /* private data passed from kernel */
42 reset_cntx->a1 = cp->a2;
43 spin_unlock(&target_vcpu->arch.reset_cntx_lock);
44
45 kvm_make_request(KVM_REQ_VCPU_RESET, target_vcpu);
46
47 __kvm_riscv_vcpu_power_on(target_vcpu);
48
49 out:
50 spin_unlock(&target_vcpu->arch.mp_state_lock);
51
52 return ret;
53 }
54
kvm_sbi_hsm_vcpu_stop(struct kvm_vcpu * vcpu)55 static int kvm_sbi_hsm_vcpu_stop(struct kvm_vcpu *vcpu)
56 {
57 int ret = 0;
58
59 spin_lock(&vcpu->arch.mp_state_lock);
60
61 if (kvm_riscv_vcpu_stopped(vcpu)) {
62 ret = SBI_ERR_FAILURE;
63 goto out;
64 }
65
66 __kvm_riscv_vcpu_power_off(vcpu);
67
68 out:
69 spin_unlock(&vcpu->arch.mp_state_lock);
70
71 return ret;
72 }
73
kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu * vcpu)74 static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu)
75 {
76 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
77 unsigned long target_vcpuid = cp->a0;
78 struct kvm_vcpu *target_vcpu;
79
80 target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
81 if (!target_vcpu)
82 return SBI_ERR_INVALID_PARAM;
83 if (kvm_riscv_vcpu_stopped(target_vcpu))
84 return SBI_HSM_STATE_STOPPED;
85 else if (target_vcpu->stat.generic.blocking)
86 return SBI_HSM_STATE_SUSPENDED;
87 else
88 return SBI_HSM_STATE_STARTED;
89 }
90
kvm_sbi_ext_hsm_handler(struct kvm_vcpu * vcpu,struct kvm_run * run,struct kvm_vcpu_sbi_return * retdata)91 static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
92 struct kvm_vcpu_sbi_return *retdata)
93 {
94 int ret = 0;
95 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
96 unsigned long funcid = cp->a6;
97
98 switch (funcid) {
99 case SBI_EXT_HSM_HART_START:
100 ret = kvm_sbi_hsm_vcpu_start(vcpu);
101 break;
102 case SBI_EXT_HSM_HART_STOP:
103 ret = kvm_sbi_hsm_vcpu_stop(vcpu);
104 break;
105 case SBI_EXT_HSM_HART_STATUS:
106 ret = kvm_sbi_hsm_vcpu_get_status(vcpu);
107 if (ret >= 0) {
108 retdata->out_val = ret;
109 retdata->err_val = 0;
110 }
111 return 0;
112 case SBI_EXT_HSM_HART_SUSPEND:
113 switch (lower_32_bits(cp->a0)) {
114 case SBI_HSM_SUSPEND_RET_DEFAULT:
115 kvm_riscv_vcpu_wfi(vcpu);
116 break;
117 case SBI_HSM_SUSPEND_NON_RET_DEFAULT:
118 ret = SBI_ERR_NOT_SUPPORTED;
119 break;
120 default:
121 ret = SBI_ERR_INVALID_PARAM;
122 }
123 break;
124 default:
125 ret = SBI_ERR_NOT_SUPPORTED;
126 }
127
128 retdata->err_val = ret;
129
130 return 0;
131 }
132
133 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm = {
134 .extid_start = SBI_EXT_HSM,
135 .extid_end = SBI_EXT_HSM,
136 .handler = kvm_sbi_ext_hsm_handler,
137 };
138