xref: /linux/arch/riscv/kvm/vcpu_sbi.c (revision 3a755ebcc2557e22b895b8976257f682c653db1d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Atish Patra <atish.patra@wdc.com>
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <asm/sbi.h>
13 #include <asm/kvm_vcpu_sbi.h>
14 
15 static int kvm_linux_err_map_sbi(int err)
16 {
17 	switch (err) {
18 	case 0:
19 		return SBI_SUCCESS;
20 	case -EPERM:
21 		return SBI_ERR_DENIED;
22 	case -EINVAL:
23 		return SBI_ERR_INVALID_PARAM;
24 	case -EFAULT:
25 		return SBI_ERR_INVALID_ADDRESS;
26 	case -EOPNOTSUPP:
27 		return SBI_ERR_NOT_SUPPORTED;
28 	case -EALREADY:
29 		return SBI_ERR_ALREADY_AVAILABLE;
30 	default:
31 		return SBI_ERR_FAILURE;
32 	};
33 }
34 
35 #ifdef CONFIG_RISCV_SBI_V01
36 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01;
37 #else
38 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
39 	.extid_start = -1UL,
40 	.extid_end = -1UL,
41 	.handler = NULL,
42 };
43 #endif
44 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base;
45 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time;
46 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi;
47 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence;
48 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst;
49 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm;
50 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental;
51 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor;
52 
53 static const struct kvm_vcpu_sbi_extension *sbi_ext[] = {
54 	&vcpu_sbi_ext_v01,
55 	&vcpu_sbi_ext_base,
56 	&vcpu_sbi_ext_time,
57 	&vcpu_sbi_ext_ipi,
58 	&vcpu_sbi_ext_rfence,
59 	&vcpu_sbi_ext_srst,
60 	&vcpu_sbi_ext_hsm,
61 	&vcpu_sbi_ext_experimental,
62 	&vcpu_sbi_ext_vendor,
63 };
64 
65 void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
66 {
67 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
68 
69 	vcpu->arch.sbi_context.return_handled = 0;
70 	vcpu->stat.ecall_exit_stat++;
71 	run->exit_reason = KVM_EXIT_RISCV_SBI;
72 	run->riscv_sbi.extension_id = cp->a7;
73 	run->riscv_sbi.function_id = cp->a6;
74 	run->riscv_sbi.args[0] = cp->a0;
75 	run->riscv_sbi.args[1] = cp->a1;
76 	run->riscv_sbi.args[2] = cp->a2;
77 	run->riscv_sbi.args[3] = cp->a3;
78 	run->riscv_sbi.args[4] = cp->a4;
79 	run->riscv_sbi.args[5] = cp->a5;
80 	run->riscv_sbi.ret[0] = cp->a0;
81 	run->riscv_sbi.ret[1] = cp->a1;
82 }
83 
84 void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
85 				     struct kvm_run *run,
86 				     u32 type, u64 reason)
87 {
88 	unsigned long i;
89 	struct kvm_vcpu *tmp;
90 
91 	kvm_for_each_vcpu(i, tmp, vcpu->kvm)
92 		tmp->arch.power_off = true;
93 	kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
94 
95 	memset(&run->system_event, 0, sizeof(run->system_event));
96 	run->system_event.type = type;
97 	run->system_event.ndata = 1;
98 	run->system_event.data[0] = reason;
99 	run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
100 }
101 
102 int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
103 {
104 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
105 
106 	/* Handle SBI return only once */
107 	if (vcpu->arch.sbi_context.return_handled)
108 		return 0;
109 	vcpu->arch.sbi_context.return_handled = 1;
110 
111 	/* Update return values */
112 	cp->a0 = run->riscv_sbi.ret[0];
113 	cp->a1 = run->riscv_sbi.ret[1];
114 
115 	/* Move to next instruction */
116 	vcpu->arch.guest_context.sepc += 4;
117 
118 	return 0;
119 }
120 
121 const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid)
122 {
123 	int i = 0;
124 
125 	for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
126 		if (sbi_ext[i]->extid_start <= extid &&
127 		    sbi_ext[i]->extid_end >= extid)
128 			return sbi_ext[i];
129 	}
130 
131 	return NULL;
132 }
133 
134 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
135 {
136 	int ret = 1;
137 	bool next_sepc = true;
138 	bool userspace_exit = false;
139 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
140 	const struct kvm_vcpu_sbi_extension *sbi_ext;
141 	struct kvm_cpu_trap utrap = { 0 };
142 	unsigned long out_val = 0;
143 	bool ext_is_v01 = false;
144 
145 	sbi_ext = kvm_vcpu_sbi_find_ext(cp->a7);
146 	if (sbi_ext && sbi_ext->handler) {
147 #ifdef CONFIG_RISCV_SBI_V01
148 		if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
149 		    cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
150 			ext_is_v01 = true;
151 #endif
152 		ret = sbi_ext->handler(vcpu, run, &out_val, &utrap, &userspace_exit);
153 	} else {
154 		/* Return error for unsupported SBI calls */
155 		cp->a0 = SBI_ERR_NOT_SUPPORTED;
156 		goto ecall_done;
157 	}
158 
159 	/* Handle special error cases i.e trap, exit or userspace forward */
160 	if (utrap.scause) {
161 		/* No need to increment sepc or exit ioctl loop */
162 		ret = 1;
163 		utrap.sepc = cp->sepc;
164 		kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
165 		next_sepc = false;
166 		goto ecall_done;
167 	}
168 
169 	/* Exit ioctl loop or Propagate the error code the guest */
170 	if (userspace_exit) {
171 		next_sepc = false;
172 		ret = 0;
173 	} else {
174 		/**
175 		 * SBI extension handler always returns an Linux error code. Convert
176 		 * it to the SBI specific error code that can be propagated the SBI
177 		 * caller.
178 		 */
179 		ret = kvm_linux_err_map_sbi(ret);
180 		cp->a0 = ret;
181 		ret = 1;
182 	}
183 ecall_done:
184 	if (next_sepc)
185 		cp->sepc += 4;
186 	if (!ext_is_v01)
187 		cp->a1 = out_val;
188 
189 	return ret;
190 }
191