xref: /linux/arch/riscv/kvm/vcpu_sbi.c (revision 0678df8271820bcf8fb4f877129f05d68a237de4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Atish Patra <atish.patra@wdc.com>
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <asm/sbi.h>
13 #include <asm/kvm_vcpu_sbi.h>
14 
15 #ifndef CONFIG_RISCV_SBI_V01
16 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
17 	.extid_start = -1UL,
18 	.extid_end = -1UL,
19 	.handler = NULL,
20 };
21 #endif
22 
23 #ifndef CONFIG_RISCV_PMU_SBI
24 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
25 	.extid_start = -1UL,
26 	.extid_end = -1UL,
27 	.handler = NULL,
28 };
29 #endif
30 
31 struct kvm_riscv_sbi_extension_entry {
32 	enum KVM_RISCV_SBI_EXT_ID ext_idx;
33 	const struct kvm_vcpu_sbi_extension *ext_ptr;
34 };
35 
36 static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
37 	{
38 		.ext_idx = KVM_RISCV_SBI_EXT_V01,
39 		.ext_ptr = &vcpu_sbi_ext_v01,
40 	},
41 	{
42 		.ext_idx = KVM_RISCV_SBI_EXT_MAX, /* Can't be disabled */
43 		.ext_ptr = &vcpu_sbi_ext_base,
44 	},
45 	{
46 		.ext_idx = KVM_RISCV_SBI_EXT_TIME,
47 		.ext_ptr = &vcpu_sbi_ext_time,
48 	},
49 	{
50 		.ext_idx = KVM_RISCV_SBI_EXT_IPI,
51 		.ext_ptr = &vcpu_sbi_ext_ipi,
52 	},
53 	{
54 		.ext_idx = KVM_RISCV_SBI_EXT_RFENCE,
55 		.ext_ptr = &vcpu_sbi_ext_rfence,
56 	},
57 	{
58 		.ext_idx = KVM_RISCV_SBI_EXT_SRST,
59 		.ext_ptr = &vcpu_sbi_ext_srst,
60 	},
61 	{
62 		.ext_idx = KVM_RISCV_SBI_EXT_HSM,
63 		.ext_ptr = &vcpu_sbi_ext_hsm,
64 	},
65 	{
66 		.ext_idx = KVM_RISCV_SBI_EXT_PMU,
67 		.ext_ptr = &vcpu_sbi_ext_pmu,
68 	},
69 	{
70 		.ext_idx = KVM_RISCV_SBI_EXT_DBCN,
71 		.ext_ptr = &vcpu_sbi_ext_dbcn,
72 	},
73 	{
74 		.ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
75 		.ext_ptr = &vcpu_sbi_ext_experimental,
76 	},
77 	{
78 		.ext_idx = KVM_RISCV_SBI_EXT_VENDOR,
79 		.ext_ptr = &vcpu_sbi_ext_vendor,
80 	},
81 };
82 
83 void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
84 {
85 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
86 
87 	vcpu->arch.sbi_context.return_handled = 0;
88 	vcpu->stat.ecall_exit_stat++;
89 	run->exit_reason = KVM_EXIT_RISCV_SBI;
90 	run->riscv_sbi.extension_id = cp->a7;
91 	run->riscv_sbi.function_id = cp->a6;
92 	run->riscv_sbi.args[0] = cp->a0;
93 	run->riscv_sbi.args[1] = cp->a1;
94 	run->riscv_sbi.args[2] = cp->a2;
95 	run->riscv_sbi.args[3] = cp->a3;
96 	run->riscv_sbi.args[4] = cp->a4;
97 	run->riscv_sbi.args[5] = cp->a5;
98 	run->riscv_sbi.ret[0] = cp->a0;
99 	run->riscv_sbi.ret[1] = cp->a1;
100 }
101 
102 void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
103 				     struct kvm_run *run,
104 				     u32 type, u64 reason)
105 {
106 	unsigned long i;
107 	struct kvm_vcpu *tmp;
108 
109 	kvm_for_each_vcpu(i, tmp, vcpu->kvm)
110 		tmp->arch.power_off = true;
111 	kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
112 
113 	memset(&run->system_event, 0, sizeof(run->system_event));
114 	run->system_event.type = type;
115 	run->system_event.ndata = 1;
116 	run->system_event.data[0] = reason;
117 	run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
118 }
119 
120 int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
121 {
122 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
123 
124 	/* Handle SBI return only once */
125 	if (vcpu->arch.sbi_context.return_handled)
126 		return 0;
127 	vcpu->arch.sbi_context.return_handled = 1;
128 
129 	/* Update return values */
130 	cp->a0 = run->riscv_sbi.ret[0];
131 	cp->a1 = run->riscv_sbi.ret[1];
132 
133 	/* Move to next instruction */
134 	vcpu->arch.guest_context.sepc += 4;
135 
136 	return 0;
137 }
138 
139 static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
140 					 unsigned long reg_num,
141 					 unsigned long reg_val)
142 {
143 	unsigned long i;
144 	const struct kvm_riscv_sbi_extension_entry *sext = NULL;
145 	struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
146 
147 	if (reg_num >= KVM_RISCV_SBI_EXT_MAX)
148 		return -ENOENT;
149 
150 	if (reg_val != 1 && reg_val != 0)
151 		return -EINVAL;
152 
153 	for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
154 		if (sbi_ext[i].ext_idx == reg_num) {
155 			sext = &sbi_ext[i];
156 			break;
157 		}
158 	}
159 	if (!sext)
160 		return -ENOENT;
161 
162 	scontext->ext_status[sext->ext_idx] = (reg_val) ?
163 			KVM_RISCV_SBI_EXT_AVAILABLE :
164 			KVM_RISCV_SBI_EXT_UNAVAILABLE;
165 
166 	return 0;
167 }
168 
169 static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
170 					 unsigned long reg_num,
171 					 unsigned long *reg_val)
172 {
173 	unsigned long i;
174 	const struct kvm_riscv_sbi_extension_entry *sext = NULL;
175 	struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
176 
177 	if (reg_num >= KVM_RISCV_SBI_EXT_MAX)
178 		return -ENOENT;
179 
180 	for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
181 		if (sbi_ext[i].ext_idx == reg_num) {
182 			sext = &sbi_ext[i];
183 			break;
184 		}
185 	}
186 	if (!sext)
187 		return -ENOENT;
188 
189 	*reg_val = scontext->ext_status[sext->ext_idx] ==
190 				KVM_RISCV_SBI_EXT_AVAILABLE;
191 	return 0;
192 }
193 
194 static int riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu *vcpu,
195 					unsigned long reg_num,
196 					unsigned long reg_val, bool enable)
197 {
198 	unsigned long i, ext_id;
199 
200 	if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
201 		return -ENOENT;
202 
203 	for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
204 		ext_id = i + reg_num * BITS_PER_LONG;
205 		if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
206 			break;
207 
208 		riscv_vcpu_set_sbi_ext_single(vcpu, ext_id, enable);
209 	}
210 
211 	return 0;
212 }
213 
214 static int riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu *vcpu,
215 					unsigned long reg_num,
216 					unsigned long *reg_val)
217 {
218 	unsigned long i, ext_id, ext_val;
219 
220 	if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
221 		return -ENOENT;
222 
223 	for (i = 0; i < BITS_PER_LONG; i++) {
224 		ext_id = i + reg_num * BITS_PER_LONG;
225 		if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
226 			break;
227 
228 		ext_val = 0;
229 		riscv_vcpu_get_sbi_ext_single(vcpu, ext_id, &ext_val);
230 		if (ext_val)
231 			*reg_val |= KVM_REG_RISCV_SBI_MULTI_MASK(ext_id);
232 	}
233 
234 	return 0;
235 }
236 
237 int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
238 				   const struct kvm_one_reg *reg)
239 {
240 	unsigned long __user *uaddr =
241 			(unsigned long __user *)(unsigned long)reg->addr;
242 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
243 					    KVM_REG_SIZE_MASK |
244 					    KVM_REG_RISCV_SBI_EXT);
245 	unsigned long reg_val, reg_subtype;
246 
247 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
248 		return -EINVAL;
249 
250 	if (vcpu->arch.ran_atleast_once)
251 		return -EBUSY;
252 
253 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
254 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
255 
256 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
257 		return -EFAULT;
258 
259 	switch (reg_subtype) {
260 	case KVM_REG_RISCV_SBI_SINGLE:
261 		return riscv_vcpu_set_sbi_ext_single(vcpu, reg_num, reg_val);
262 	case KVM_REG_RISCV_SBI_MULTI_EN:
263 		return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, true);
264 	case KVM_REG_RISCV_SBI_MULTI_DIS:
265 		return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, false);
266 	default:
267 		return -ENOENT;
268 	}
269 
270 	return 0;
271 }
272 
273 int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
274 				   const struct kvm_one_reg *reg)
275 {
276 	int rc;
277 	unsigned long __user *uaddr =
278 			(unsigned long __user *)(unsigned long)reg->addr;
279 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
280 					    KVM_REG_SIZE_MASK |
281 					    KVM_REG_RISCV_SBI_EXT);
282 	unsigned long reg_val, reg_subtype;
283 
284 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
285 		return -EINVAL;
286 
287 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
288 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
289 
290 	reg_val = 0;
291 	switch (reg_subtype) {
292 	case KVM_REG_RISCV_SBI_SINGLE:
293 		rc = riscv_vcpu_get_sbi_ext_single(vcpu, reg_num, &reg_val);
294 		break;
295 	case KVM_REG_RISCV_SBI_MULTI_EN:
296 	case KVM_REG_RISCV_SBI_MULTI_DIS:
297 		rc = riscv_vcpu_get_sbi_ext_multi(vcpu, reg_num, &reg_val);
298 		if (!rc && reg_subtype == KVM_REG_RISCV_SBI_MULTI_DIS)
299 			reg_val = ~reg_val;
300 		break;
301 	default:
302 		rc = -ENOENT;
303 	}
304 	if (rc)
305 		return rc;
306 
307 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
308 		return -EFAULT;
309 
310 	return 0;
311 }
312 
313 const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
314 				struct kvm_vcpu *vcpu, unsigned long extid)
315 {
316 	struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
317 	const struct kvm_riscv_sbi_extension_entry *entry;
318 	const struct kvm_vcpu_sbi_extension *ext;
319 	int i;
320 
321 	for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
322 		entry = &sbi_ext[i];
323 		ext = entry->ext_ptr;
324 
325 		if (ext->extid_start <= extid && ext->extid_end >= extid) {
326 			if (entry->ext_idx >= KVM_RISCV_SBI_EXT_MAX ||
327 			    scontext->ext_status[entry->ext_idx] ==
328 						KVM_RISCV_SBI_EXT_AVAILABLE)
329 				return ext;
330 
331 			return NULL;
332 		}
333 	}
334 
335 	return NULL;
336 }
337 
338 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
339 {
340 	int ret = 1;
341 	bool next_sepc = true;
342 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
343 	const struct kvm_vcpu_sbi_extension *sbi_ext;
344 	struct kvm_cpu_trap utrap = {0};
345 	struct kvm_vcpu_sbi_return sbi_ret = {
346 		.out_val = 0,
347 		.err_val = 0,
348 		.utrap = &utrap,
349 	};
350 	bool ext_is_v01 = false;
351 
352 	sbi_ext = kvm_vcpu_sbi_find_ext(vcpu, cp->a7);
353 	if (sbi_ext && sbi_ext->handler) {
354 #ifdef CONFIG_RISCV_SBI_V01
355 		if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
356 		    cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
357 			ext_is_v01 = true;
358 #endif
359 		ret = sbi_ext->handler(vcpu, run, &sbi_ret);
360 	} else {
361 		/* Return error for unsupported SBI calls */
362 		cp->a0 = SBI_ERR_NOT_SUPPORTED;
363 		goto ecall_done;
364 	}
365 
366 	/*
367 	 * When the SBI extension returns a Linux error code, it exits the ioctl
368 	 * loop and forwards the error to userspace.
369 	 */
370 	if (ret < 0) {
371 		next_sepc = false;
372 		goto ecall_done;
373 	}
374 
375 	/* Handle special error cases i.e trap, exit or userspace forward */
376 	if (sbi_ret.utrap->scause) {
377 		/* No need to increment sepc or exit ioctl loop */
378 		ret = 1;
379 		sbi_ret.utrap->sepc = cp->sepc;
380 		kvm_riscv_vcpu_trap_redirect(vcpu, sbi_ret.utrap);
381 		next_sepc = false;
382 		goto ecall_done;
383 	}
384 
385 	/* Exit ioctl loop or Propagate the error code the guest */
386 	if (sbi_ret.uexit) {
387 		next_sepc = false;
388 		ret = 0;
389 	} else {
390 		cp->a0 = sbi_ret.err_val;
391 		ret = 1;
392 	}
393 ecall_done:
394 	if (next_sepc)
395 		cp->sepc += 4;
396 	/* a1 should only be updated when we continue the ioctl loop */
397 	if (!ext_is_v01 && ret == 1)
398 		cp->a1 = sbi_ret.out_val;
399 
400 	return ret;
401 }
402 
403 void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu)
404 {
405 	struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
406 	const struct kvm_riscv_sbi_extension_entry *entry;
407 	const struct kvm_vcpu_sbi_extension *ext;
408 	int i;
409 
410 	for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
411 		entry = &sbi_ext[i];
412 		ext = entry->ext_ptr;
413 
414 		if (ext->probe && !ext->probe(vcpu)) {
415 			scontext->ext_status[entry->ext_idx] =
416 				KVM_RISCV_SBI_EXT_UNAVAILABLE;
417 			continue;
418 		}
419 
420 		scontext->ext_status[entry->ext_idx] = ext->default_unavail ?
421 					KVM_RISCV_SBI_EXT_UNAVAILABLE :
422 					KVM_RISCV_SBI_EXT_AVAILABLE;
423 	}
424 }
425