xref: /linux/arch/riscv/kvm/vcpu_sbi_hsm.c (revision 43db1111073049220381944af4a3b8a5400eda71)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2021 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Atish Patra <atish.patra@wdc.com>
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <linux/wordpart.h>
13 #include <asm/sbi.h>
14 #include <asm/kvm_vcpu_sbi.h>
15 
kvm_sbi_hsm_vcpu_start(struct kvm_vcpu * vcpu)16 static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu)
17 {
18 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
19 	struct kvm_vcpu *target_vcpu;
20 	unsigned long target_vcpuid = cp->a0;
21 	int ret = 0;
22 
23 	target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
24 	if (!target_vcpu)
25 		return SBI_ERR_INVALID_PARAM;
26 
27 	spin_lock(&target_vcpu->arch.mp_state_lock);
28 
29 	if (!kvm_riscv_vcpu_stopped(target_vcpu)) {
30 		ret = SBI_ERR_ALREADY_AVAILABLE;
31 		goto out;
32 	}
33 
34 	kvm_riscv_vcpu_sbi_request_reset(target_vcpu, cp->a1, cp->a2);
35 
36 	__kvm_riscv_vcpu_power_on(target_vcpu);
37 
38 out:
39 	spin_unlock(&target_vcpu->arch.mp_state_lock);
40 
41 	return ret;
42 }
43 
kvm_sbi_hsm_vcpu_stop(struct kvm_vcpu * vcpu)44 static int kvm_sbi_hsm_vcpu_stop(struct kvm_vcpu *vcpu)
45 {
46 	int ret = 0;
47 
48 	spin_lock(&vcpu->arch.mp_state_lock);
49 
50 	if (kvm_riscv_vcpu_stopped(vcpu)) {
51 		ret = SBI_ERR_FAILURE;
52 		goto out;
53 	}
54 
55 	__kvm_riscv_vcpu_power_off(vcpu);
56 
57 out:
58 	spin_unlock(&vcpu->arch.mp_state_lock);
59 
60 	return ret;
61 }
62 
kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu * vcpu)63 static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu)
64 {
65 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
66 	unsigned long target_vcpuid = cp->a0;
67 	struct kvm_vcpu *target_vcpu;
68 
69 	target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
70 	if (!target_vcpu)
71 		return SBI_ERR_INVALID_PARAM;
72 	if (kvm_riscv_vcpu_stopped(target_vcpu))
73 		return SBI_HSM_STATE_STOPPED;
74 	else if (target_vcpu->stat.generic.blocking)
75 		return SBI_HSM_STATE_SUSPENDED;
76 	else
77 		return SBI_HSM_STATE_STARTED;
78 }
79 
kvm_sbi_ext_hsm_handler(struct kvm_vcpu * vcpu,struct kvm_run * run,struct kvm_vcpu_sbi_return * retdata)80 static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
81 				   struct kvm_vcpu_sbi_return *retdata)
82 {
83 	int ret = 0;
84 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
85 	unsigned long funcid = cp->a6;
86 
87 	switch (funcid) {
88 	case SBI_EXT_HSM_HART_START:
89 		ret = kvm_sbi_hsm_vcpu_start(vcpu);
90 		break;
91 	case SBI_EXT_HSM_HART_STOP:
92 		ret = kvm_sbi_hsm_vcpu_stop(vcpu);
93 		break;
94 	case SBI_EXT_HSM_HART_STATUS:
95 		ret = kvm_sbi_hsm_vcpu_get_status(vcpu);
96 		if (ret >= 0) {
97 			retdata->out_val = ret;
98 			retdata->err_val = 0;
99 		}
100 		return 0;
101 	case SBI_EXT_HSM_HART_SUSPEND:
102 		switch (lower_32_bits(cp->a0)) {
103 		case SBI_HSM_SUSPEND_RET_DEFAULT:
104 			kvm_riscv_vcpu_wfi(vcpu);
105 			break;
106 		case SBI_HSM_SUSPEND_NON_RET_DEFAULT:
107 			ret = SBI_ERR_NOT_SUPPORTED;
108 			break;
109 		default:
110 			ret = SBI_ERR_INVALID_PARAM;
111 		}
112 		break;
113 	default:
114 		ret = SBI_ERR_NOT_SUPPORTED;
115 	}
116 
117 	retdata->err_val = ret;
118 
119 	return 0;
120 }
121 
122 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm = {
123 	.extid_start = SBI_EXT_HSM,
124 	.extid_end = SBI_EXT_HSM,
125 	.handler = kvm_sbi_ext_hsm_handler,
126 };
127