xref: /linux/arch/riscv/kvm/vcpu_onereg.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  * Copyright (C) 2023 Ventana Micro Systems Inc.
5  *
6  * Authors:
7  *	Anup Patel <apatel@ventanamicro.com>
8  */
9 
10 #include <linux/bitops.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/uaccess.h>
14 #include <linux/kvm_host.h>
15 #include <asm/cacheflush.h>
16 #include <asm/cpufeature.h>
17 #include <asm/kvm_vcpu_vector.h>
18 #include <asm/vector.h>
19 
20 #define KVM_RISCV_BASE_ISA_MASK		GENMASK(25, 0)
21 
22 #define KVM_ISA_EXT_ARR(ext)		\
23 [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
24 
25 /* Mapping between KVM ISA Extension ID & Host ISA extension ID */
26 static const unsigned long kvm_isa_ext_arr[] = {
27 	/* Single letter extensions (alphabetically sorted) */
28 	[KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
29 	[KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
30 	[KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
31 	[KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
32 	[KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
33 	[KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
34 	[KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
35 	[KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
36 	/* Multi letter extensions (alphabetically sorted) */
37 	KVM_ISA_EXT_ARR(SMSTATEEN),
38 	KVM_ISA_EXT_ARR(SSAIA),
39 	KVM_ISA_EXT_ARR(SSCOFPMF),
40 	KVM_ISA_EXT_ARR(SSTC),
41 	KVM_ISA_EXT_ARR(SVINVAL),
42 	KVM_ISA_EXT_ARR(SVNAPOT),
43 	KVM_ISA_EXT_ARR(SVPBMT),
44 	KVM_ISA_EXT_ARR(ZACAS),
45 	KVM_ISA_EXT_ARR(ZAWRS),
46 	KVM_ISA_EXT_ARR(ZBA),
47 	KVM_ISA_EXT_ARR(ZBB),
48 	KVM_ISA_EXT_ARR(ZBC),
49 	KVM_ISA_EXT_ARR(ZBKB),
50 	KVM_ISA_EXT_ARR(ZBKC),
51 	KVM_ISA_EXT_ARR(ZBKX),
52 	KVM_ISA_EXT_ARR(ZBS),
53 	KVM_ISA_EXT_ARR(ZCA),
54 	KVM_ISA_EXT_ARR(ZCB),
55 	KVM_ISA_EXT_ARR(ZCD),
56 	KVM_ISA_EXT_ARR(ZCF),
57 	KVM_ISA_EXT_ARR(ZCMOP),
58 	KVM_ISA_EXT_ARR(ZFA),
59 	KVM_ISA_EXT_ARR(ZFH),
60 	KVM_ISA_EXT_ARR(ZFHMIN),
61 	KVM_ISA_EXT_ARR(ZICBOM),
62 	KVM_ISA_EXT_ARR(ZICBOZ),
63 	KVM_ISA_EXT_ARR(ZICNTR),
64 	KVM_ISA_EXT_ARR(ZICOND),
65 	KVM_ISA_EXT_ARR(ZICSR),
66 	KVM_ISA_EXT_ARR(ZIFENCEI),
67 	KVM_ISA_EXT_ARR(ZIHINTNTL),
68 	KVM_ISA_EXT_ARR(ZIHINTPAUSE),
69 	KVM_ISA_EXT_ARR(ZIHPM),
70 	KVM_ISA_EXT_ARR(ZIMOP),
71 	KVM_ISA_EXT_ARR(ZKND),
72 	KVM_ISA_EXT_ARR(ZKNE),
73 	KVM_ISA_EXT_ARR(ZKNH),
74 	KVM_ISA_EXT_ARR(ZKR),
75 	KVM_ISA_EXT_ARR(ZKSED),
76 	KVM_ISA_EXT_ARR(ZKSH),
77 	KVM_ISA_EXT_ARR(ZKT),
78 	KVM_ISA_EXT_ARR(ZTSO),
79 	KVM_ISA_EXT_ARR(ZVBB),
80 	KVM_ISA_EXT_ARR(ZVBC),
81 	KVM_ISA_EXT_ARR(ZVFH),
82 	KVM_ISA_EXT_ARR(ZVFHMIN),
83 	KVM_ISA_EXT_ARR(ZVKB),
84 	KVM_ISA_EXT_ARR(ZVKG),
85 	KVM_ISA_EXT_ARR(ZVKNED),
86 	KVM_ISA_EXT_ARR(ZVKNHA),
87 	KVM_ISA_EXT_ARR(ZVKNHB),
88 	KVM_ISA_EXT_ARR(ZVKSED),
89 	KVM_ISA_EXT_ARR(ZVKSH),
90 	KVM_ISA_EXT_ARR(ZVKT),
91 };
92 
kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)93 static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
94 {
95 	unsigned long i;
96 
97 	for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
98 		if (kvm_isa_ext_arr[i] == base_ext)
99 			return i;
100 	}
101 
102 	return KVM_RISCV_ISA_EXT_MAX;
103 }
104 
kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)105 static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
106 {
107 	switch (ext) {
108 	case KVM_RISCV_ISA_EXT_H:
109 		return false;
110 	case KVM_RISCV_ISA_EXT_SSCOFPMF:
111 		/* Sscofpmf depends on interrupt filtering defined in ssaia */
112 		return __riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSAIA);
113 	case KVM_RISCV_ISA_EXT_V:
114 		return riscv_v_vstate_ctrl_user_allowed();
115 	default:
116 		break;
117 	}
118 
119 	return true;
120 }
121 
kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)122 static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
123 {
124 	switch (ext) {
125 	/* Extensions which don't have any mechanism to disable */
126 	case KVM_RISCV_ISA_EXT_A:
127 	case KVM_RISCV_ISA_EXT_C:
128 	case KVM_RISCV_ISA_EXT_I:
129 	case KVM_RISCV_ISA_EXT_M:
130 	/* There is not architectural config bit to disable sscofpmf completely */
131 	case KVM_RISCV_ISA_EXT_SSCOFPMF:
132 	case KVM_RISCV_ISA_EXT_SSTC:
133 	case KVM_RISCV_ISA_EXT_SVINVAL:
134 	case KVM_RISCV_ISA_EXT_SVNAPOT:
135 	case KVM_RISCV_ISA_EXT_ZACAS:
136 	case KVM_RISCV_ISA_EXT_ZAWRS:
137 	case KVM_RISCV_ISA_EXT_ZBA:
138 	case KVM_RISCV_ISA_EXT_ZBB:
139 	case KVM_RISCV_ISA_EXT_ZBC:
140 	case KVM_RISCV_ISA_EXT_ZBKB:
141 	case KVM_RISCV_ISA_EXT_ZBKC:
142 	case KVM_RISCV_ISA_EXT_ZBKX:
143 	case KVM_RISCV_ISA_EXT_ZBS:
144 	case KVM_RISCV_ISA_EXT_ZCA:
145 	case KVM_RISCV_ISA_EXT_ZCB:
146 	case KVM_RISCV_ISA_EXT_ZCD:
147 	case KVM_RISCV_ISA_EXT_ZCF:
148 	case KVM_RISCV_ISA_EXT_ZCMOP:
149 	case KVM_RISCV_ISA_EXT_ZFA:
150 	case KVM_RISCV_ISA_EXT_ZFH:
151 	case KVM_RISCV_ISA_EXT_ZFHMIN:
152 	case KVM_RISCV_ISA_EXT_ZICNTR:
153 	case KVM_RISCV_ISA_EXT_ZICOND:
154 	case KVM_RISCV_ISA_EXT_ZICSR:
155 	case KVM_RISCV_ISA_EXT_ZIFENCEI:
156 	case KVM_RISCV_ISA_EXT_ZIHINTNTL:
157 	case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
158 	case KVM_RISCV_ISA_EXT_ZIHPM:
159 	case KVM_RISCV_ISA_EXT_ZIMOP:
160 	case KVM_RISCV_ISA_EXT_ZKND:
161 	case KVM_RISCV_ISA_EXT_ZKNE:
162 	case KVM_RISCV_ISA_EXT_ZKNH:
163 	case KVM_RISCV_ISA_EXT_ZKR:
164 	case KVM_RISCV_ISA_EXT_ZKSED:
165 	case KVM_RISCV_ISA_EXT_ZKSH:
166 	case KVM_RISCV_ISA_EXT_ZKT:
167 	case KVM_RISCV_ISA_EXT_ZTSO:
168 	case KVM_RISCV_ISA_EXT_ZVBB:
169 	case KVM_RISCV_ISA_EXT_ZVBC:
170 	case KVM_RISCV_ISA_EXT_ZVFH:
171 	case KVM_RISCV_ISA_EXT_ZVFHMIN:
172 	case KVM_RISCV_ISA_EXT_ZVKB:
173 	case KVM_RISCV_ISA_EXT_ZVKG:
174 	case KVM_RISCV_ISA_EXT_ZVKNED:
175 	case KVM_RISCV_ISA_EXT_ZVKNHA:
176 	case KVM_RISCV_ISA_EXT_ZVKNHB:
177 	case KVM_RISCV_ISA_EXT_ZVKSED:
178 	case KVM_RISCV_ISA_EXT_ZVKSH:
179 	case KVM_RISCV_ISA_EXT_ZVKT:
180 		return false;
181 	/* Extensions which can be disabled using Smstateen */
182 	case KVM_RISCV_ISA_EXT_SSAIA:
183 		return riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN);
184 	default:
185 		break;
186 	}
187 
188 	return true;
189 }
190 
kvm_riscv_vcpu_setup_isa(struct kvm_vcpu * vcpu)191 void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu)
192 {
193 	unsigned long host_isa, i;
194 
195 	for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
196 		host_isa = kvm_isa_ext_arr[i];
197 		if (__riscv_isa_extension_available(NULL, host_isa) &&
198 		    kvm_riscv_vcpu_isa_enable_allowed(i))
199 			set_bit(host_isa, vcpu->arch.isa);
200 	}
201 }
202 
kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)203 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
204 					 const struct kvm_one_reg *reg)
205 {
206 	unsigned long __user *uaddr =
207 			(unsigned long __user *)(unsigned long)reg->addr;
208 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
209 					    KVM_REG_SIZE_MASK |
210 					    KVM_REG_RISCV_CONFIG);
211 	unsigned long reg_val;
212 
213 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
214 		return -EINVAL;
215 
216 	switch (reg_num) {
217 	case KVM_REG_RISCV_CONFIG_REG(isa):
218 		reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
219 		break;
220 	case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
221 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
222 			return -ENOENT;
223 		reg_val = riscv_cbom_block_size;
224 		break;
225 	case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
226 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
227 			return -ENOENT;
228 		reg_val = riscv_cboz_block_size;
229 		break;
230 	case KVM_REG_RISCV_CONFIG_REG(mvendorid):
231 		reg_val = vcpu->arch.mvendorid;
232 		break;
233 	case KVM_REG_RISCV_CONFIG_REG(marchid):
234 		reg_val = vcpu->arch.marchid;
235 		break;
236 	case KVM_REG_RISCV_CONFIG_REG(mimpid):
237 		reg_val = vcpu->arch.mimpid;
238 		break;
239 	case KVM_REG_RISCV_CONFIG_REG(satp_mode):
240 		reg_val = satp_mode >> SATP_MODE_SHIFT;
241 		break;
242 	default:
243 		return -ENOENT;
244 	}
245 
246 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
247 		return -EFAULT;
248 
249 	return 0;
250 }
251 
kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)252 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
253 					 const struct kvm_one_reg *reg)
254 {
255 	unsigned long __user *uaddr =
256 			(unsigned long __user *)(unsigned long)reg->addr;
257 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
258 					    KVM_REG_SIZE_MASK |
259 					    KVM_REG_RISCV_CONFIG);
260 	unsigned long i, isa_ext, reg_val;
261 
262 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
263 		return -EINVAL;
264 
265 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
266 		return -EFAULT;
267 
268 	switch (reg_num) {
269 	case KVM_REG_RISCV_CONFIG_REG(isa):
270 		/*
271 		 * This ONE REG interface is only defined for
272 		 * single letter extensions.
273 		 */
274 		if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
275 			return -EINVAL;
276 
277 		/*
278 		 * Return early (i.e. do nothing) if reg_val is the same
279 		 * value retrievable via kvm_riscv_vcpu_get_reg_config().
280 		 */
281 		if (reg_val == (vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK))
282 			break;
283 
284 		if (!vcpu->arch.ran_atleast_once) {
285 			/* Ignore the enable/disable request for certain extensions */
286 			for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
287 				isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
288 				if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
289 					reg_val &= ~BIT(i);
290 					continue;
291 				}
292 				if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
293 					if (reg_val & BIT(i))
294 						reg_val &= ~BIT(i);
295 				if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
296 					if (!(reg_val & BIT(i)))
297 						reg_val |= BIT(i);
298 			}
299 			reg_val &= riscv_isa_extension_base(NULL);
300 			/* Do not modify anything beyond single letter extensions */
301 			reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
302 				  (reg_val & KVM_RISCV_BASE_ISA_MASK);
303 			vcpu->arch.isa[0] = reg_val;
304 			kvm_riscv_vcpu_fp_reset(vcpu);
305 		} else {
306 			return -EBUSY;
307 		}
308 		break;
309 	case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
310 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
311 			return -ENOENT;
312 		if (reg_val != riscv_cbom_block_size)
313 			return -EINVAL;
314 		break;
315 	case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
316 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
317 			return -ENOENT;
318 		if (reg_val != riscv_cboz_block_size)
319 			return -EINVAL;
320 		break;
321 	case KVM_REG_RISCV_CONFIG_REG(mvendorid):
322 		if (reg_val == vcpu->arch.mvendorid)
323 			break;
324 		if (!vcpu->arch.ran_atleast_once)
325 			vcpu->arch.mvendorid = reg_val;
326 		else
327 			return -EBUSY;
328 		break;
329 	case KVM_REG_RISCV_CONFIG_REG(marchid):
330 		if (reg_val == vcpu->arch.marchid)
331 			break;
332 		if (!vcpu->arch.ran_atleast_once)
333 			vcpu->arch.marchid = reg_val;
334 		else
335 			return -EBUSY;
336 		break;
337 	case KVM_REG_RISCV_CONFIG_REG(mimpid):
338 		if (reg_val == vcpu->arch.mimpid)
339 			break;
340 		if (!vcpu->arch.ran_atleast_once)
341 			vcpu->arch.mimpid = reg_val;
342 		else
343 			return -EBUSY;
344 		break;
345 	case KVM_REG_RISCV_CONFIG_REG(satp_mode):
346 		if (reg_val != (satp_mode >> SATP_MODE_SHIFT))
347 			return -EINVAL;
348 		break;
349 	default:
350 		return -ENOENT;
351 	}
352 
353 	return 0;
354 }
355 
kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)356 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
357 				       const struct kvm_one_reg *reg)
358 {
359 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
360 	unsigned long __user *uaddr =
361 			(unsigned long __user *)(unsigned long)reg->addr;
362 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
363 					    KVM_REG_SIZE_MASK |
364 					    KVM_REG_RISCV_CORE);
365 	unsigned long reg_val;
366 
367 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
368 		return -EINVAL;
369 	if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
370 		return -ENOENT;
371 
372 	if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
373 		reg_val = cntx->sepc;
374 	else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
375 		 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
376 		reg_val = ((unsigned long *)cntx)[reg_num];
377 	else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
378 		reg_val = (cntx->sstatus & SR_SPP) ?
379 				KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
380 	else
381 		return -ENOENT;
382 
383 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
384 		return -EFAULT;
385 
386 	return 0;
387 }
388 
kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)389 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
390 				       const struct kvm_one_reg *reg)
391 {
392 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
393 	unsigned long __user *uaddr =
394 			(unsigned long __user *)(unsigned long)reg->addr;
395 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
396 					    KVM_REG_SIZE_MASK |
397 					    KVM_REG_RISCV_CORE);
398 	unsigned long reg_val;
399 
400 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
401 		return -EINVAL;
402 	if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
403 		return -ENOENT;
404 
405 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
406 		return -EFAULT;
407 
408 	if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
409 		cntx->sepc = reg_val;
410 	else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
411 		 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
412 		((unsigned long *)cntx)[reg_num] = reg_val;
413 	else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
414 		if (reg_val == KVM_RISCV_MODE_S)
415 			cntx->sstatus |= SR_SPP;
416 		else
417 			cntx->sstatus &= ~SR_SPP;
418 	} else
419 		return -ENOENT;
420 
421 	return 0;
422 }
423 
kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * out_val)424 static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
425 					  unsigned long reg_num,
426 					  unsigned long *out_val)
427 {
428 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
429 
430 	if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
431 		return -ENOENT;
432 
433 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
434 		kvm_riscv_vcpu_flush_interrupts(vcpu);
435 		*out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
436 		*out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
437 	} else
438 		*out_val = ((unsigned long *)csr)[reg_num];
439 
440 	return 0;
441 }
442 
kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)443 static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
444 					  unsigned long reg_num,
445 					  unsigned long reg_val)
446 {
447 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
448 
449 	if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
450 		return -ENOENT;
451 
452 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
453 		reg_val &= VSIP_VALID_MASK;
454 		reg_val <<= VSIP_TO_HVIP_SHIFT;
455 	}
456 
457 	((unsigned long *)csr)[reg_num] = reg_val;
458 
459 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
460 		WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
461 
462 	return 0;
463 }
464 
kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)465 static inline int kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu *vcpu,
466 						   unsigned long reg_num,
467 						   unsigned long reg_val)
468 {
469 	struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
470 
471 	if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
472 		sizeof(unsigned long))
473 		return -EINVAL;
474 
475 	((unsigned long *)csr)[reg_num] = reg_val;
476 	return 0;
477 }
478 
kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * out_val)479 static int kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu *vcpu,
480 					    unsigned long reg_num,
481 					    unsigned long *out_val)
482 {
483 	struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
484 
485 	if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
486 		sizeof(unsigned long))
487 		return -EINVAL;
488 
489 	*out_val = ((unsigned long *)csr)[reg_num];
490 	return 0;
491 }
492 
kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)493 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
494 				      const struct kvm_one_reg *reg)
495 {
496 	int rc;
497 	unsigned long __user *uaddr =
498 			(unsigned long __user *)(unsigned long)reg->addr;
499 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
500 					    KVM_REG_SIZE_MASK |
501 					    KVM_REG_RISCV_CSR);
502 	unsigned long reg_val, reg_subtype;
503 
504 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
505 		return -EINVAL;
506 
507 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
508 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
509 	switch (reg_subtype) {
510 	case KVM_REG_RISCV_CSR_GENERAL:
511 		rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, &reg_val);
512 		break;
513 	case KVM_REG_RISCV_CSR_AIA:
514 		rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val);
515 		break;
516 	case KVM_REG_RISCV_CSR_SMSTATEEN:
517 		rc = -EINVAL;
518 		if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
519 			rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num,
520 							      &reg_val);
521 		break;
522 	default:
523 		rc = -ENOENT;
524 		break;
525 	}
526 	if (rc)
527 		return rc;
528 
529 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
530 		return -EFAULT;
531 
532 	return 0;
533 }
534 
kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)535 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
536 				      const struct kvm_one_reg *reg)
537 {
538 	int rc;
539 	unsigned long __user *uaddr =
540 			(unsigned long __user *)(unsigned long)reg->addr;
541 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
542 					    KVM_REG_SIZE_MASK |
543 					    KVM_REG_RISCV_CSR);
544 	unsigned long reg_val, reg_subtype;
545 
546 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
547 		return -EINVAL;
548 
549 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
550 		return -EFAULT;
551 
552 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
553 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
554 	switch (reg_subtype) {
555 	case KVM_REG_RISCV_CSR_GENERAL:
556 		rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
557 		break;
558 	case KVM_REG_RISCV_CSR_AIA:
559 		rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
560 		break;
561 	case KVM_REG_RISCV_CSR_SMSTATEEN:
562 		rc = -EINVAL;
563 		if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
564 			rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num,
565 							      reg_val);
566 		break;
567 	default:
568 		rc = -ENOENT;
569 		break;
570 	}
571 	if (rc)
572 		return rc;
573 
574 	return 0;
575 }
576 
riscv_vcpu_get_isa_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)577 static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
578 					 unsigned long reg_num,
579 					 unsigned long *reg_val)
580 {
581 	unsigned long host_isa_ext;
582 
583 	if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
584 	    reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
585 		return -ENOENT;
586 
587 	host_isa_ext = kvm_isa_ext_arr[reg_num];
588 	if (!__riscv_isa_extension_available(NULL, host_isa_ext))
589 		return -ENOENT;
590 
591 	*reg_val = 0;
592 	if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
593 		*reg_val = 1; /* Mark the given extension as available */
594 
595 	return 0;
596 }
597 
riscv_vcpu_set_isa_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)598 static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu,
599 					 unsigned long reg_num,
600 					 unsigned long reg_val)
601 {
602 	unsigned long host_isa_ext;
603 
604 	if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
605 	    reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
606 		return -ENOENT;
607 
608 	host_isa_ext = kvm_isa_ext_arr[reg_num];
609 	if (!__riscv_isa_extension_available(NULL, host_isa_ext))
610 		return -ENOENT;
611 
612 	if (reg_val == test_bit(host_isa_ext, vcpu->arch.isa))
613 		return 0;
614 
615 	if (!vcpu->arch.ran_atleast_once) {
616 		/*
617 		 * All multi-letter extension and a few single letter
618 		 * extension can be disabled
619 		 */
620 		if (reg_val == 1 &&
621 		    kvm_riscv_vcpu_isa_enable_allowed(reg_num))
622 			set_bit(host_isa_ext, vcpu->arch.isa);
623 		else if (!reg_val &&
624 			 kvm_riscv_vcpu_isa_disable_allowed(reg_num))
625 			clear_bit(host_isa_ext, vcpu->arch.isa);
626 		else
627 			return -EINVAL;
628 		kvm_riscv_vcpu_fp_reset(vcpu);
629 	} else {
630 		return -EBUSY;
631 	}
632 
633 	return 0;
634 }
635 
riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)636 static int riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu *vcpu,
637 					unsigned long reg_num,
638 					unsigned long *reg_val)
639 {
640 	unsigned long i, ext_id, ext_val;
641 
642 	if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
643 		return -ENOENT;
644 
645 	for (i = 0; i < BITS_PER_LONG; i++) {
646 		ext_id = i + reg_num * BITS_PER_LONG;
647 		if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
648 			break;
649 
650 		ext_val = 0;
651 		riscv_vcpu_get_isa_ext_single(vcpu, ext_id, &ext_val);
652 		if (ext_val)
653 			*reg_val |= KVM_REG_RISCV_ISA_MULTI_MASK(ext_id);
654 	}
655 
656 	return 0;
657 }
658 
riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val,bool enable)659 static int riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu *vcpu,
660 					unsigned long reg_num,
661 					unsigned long reg_val, bool enable)
662 {
663 	unsigned long i, ext_id;
664 
665 	if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
666 		return -ENOENT;
667 
668 	for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
669 		ext_id = i + reg_num * BITS_PER_LONG;
670 		if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
671 			break;
672 
673 		riscv_vcpu_set_isa_ext_single(vcpu, ext_id, enable);
674 	}
675 
676 	return 0;
677 }
678 
kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)679 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
680 					  const struct kvm_one_reg *reg)
681 {
682 	int rc;
683 	unsigned long __user *uaddr =
684 			(unsigned long __user *)(unsigned long)reg->addr;
685 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
686 					    KVM_REG_SIZE_MASK |
687 					    KVM_REG_RISCV_ISA_EXT);
688 	unsigned long reg_val, reg_subtype;
689 
690 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
691 		return -EINVAL;
692 
693 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
694 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
695 
696 	reg_val = 0;
697 	switch (reg_subtype) {
698 	case KVM_REG_RISCV_ISA_SINGLE:
699 		rc = riscv_vcpu_get_isa_ext_single(vcpu, reg_num, &reg_val);
700 		break;
701 	case KVM_REG_RISCV_ISA_MULTI_EN:
702 	case KVM_REG_RISCV_ISA_MULTI_DIS:
703 		rc = riscv_vcpu_get_isa_ext_multi(vcpu, reg_num, &reg_val);
704 		if (!rc && reg_subtype == KVM_REG_RISCV_ISA_MULTI_DIS)
705 			reg_val = ~reg_val;
706 		break;
707 	default:
708 		rc = -ENOENT;
709 	}
710 	if (rc)
711 		return rc;
712 
713 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
714 		return -EFAULT;
715 
716 	return 0;
717 }
718 
kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)719 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
720 					  const struct kvm_one_reg *reg)
721 {
722 	unsigned long __user *uaddr =
723 			(unsigned long __user *)(unsigned long)reg->addr;
724 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
725 					    KVM_REG_SIZE_MASK |
726 					    KVM_REG_RISCV_ISA_EXT);
727 	unsigned long reg_val, reg_subtype;
728 
729 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
730 		return -EINVAL;
731 
732 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
733 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
734 
735 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
736 		return -EFAULT;
737 
738 	switch (reg_subtype) {
739 	case KVM_REG_RISCV_ISA_SINGLE:
740 		return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
741 	case KVM_REG_RISCV_ISA_MULTI_EN:
742 		return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
743 	case KVM_REG_RISCV_ISA_MULTI_DIS:
744 		return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
745 	default:
746 		return -ENOENT;
747 	}
748 
749 	return 0;
750 }
751 
copy_config_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)752 static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
753 				u64 __user *uindices)
754 {
755 	int n = 0;
756 
757 	for (int i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long);
758 		 i++) {
759 		u64 size;
760 		u64 reg;
761 
762 		/*
763 		 * Avoid reporting config reg if the corresponding extension
764 		 * was not available.
765 		 */
766 		if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
767 			!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
768 			continue;
769 		else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
770 			!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
771 			continue;
772 
773 		size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
774 		reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;
775 
776 		if (uindices) {
777 			if (put_user(reg, uindices))
778 				return -EFAULT;
779 			uindices++;
780 		}
781 
782 		n++;
783 	}
784 
785 	return n;
786 }
787 
num_config_regs(const struct kvm_vcpu * vcpu)788 static unsigned long num_config_regs(const struct kvm_vcpu *vcpu)
789 {
790 	return copy_config_reg_indices(vcpu, NULL);
791 }
792 
num_core_regs(void)793 static inline unsigned long num_core_regs(void)
794 {
795 	return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
796 }
797 
copy_core_reg_indices(u64 __user * uindices)798 static int copy_core_reg_indices(u64 __user *uindices)
799 {
800 	int n = num_core_regs();
801 
802 	for (int i = 0; i < n; i++) {
803 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
804 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
805 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i;
806 
807 		if (uindices) {
808 			if (put_user(reg, uindices))
809 				return -EFAULT;
810 			uindices++;
811 		}
812 	}
813 
814 	return n;
815 }
816 
num_csr_regs(const struct kvm_vcpu * vcpu)817 static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu)
818 {
819 	unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
820 
821 	if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
822 		n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
823 	if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
824 		n += sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
825 
826 	return n;
827 }
828 
copy_csr_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)829 static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
830 				u64 __user *uindices)
831 {
832 	int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
833 	int n2 = 0, n3 = 0;
834 
835 	/* copy general csr regs */
836 	for (int i = 0; i < n1; i++) {
837 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
838 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
839 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
840 				  KVM_REG_RISCV_CSR_GENERAL | i;
841 
842 		if (uindices) {
843 			if (put_user(reg, uindices))
844 				return -EFAULT;
845 			uindices++;
846 		}
847 	}
848 
849 	/* copy AIA csr regs */
850 	if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) {
851 		n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
852 
853 		for (int i = 0; i < n2; i++) {
854 			u64 size = IS_ENABLED(CONFIG_32BIT) ?
855 				   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
856 			u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
857 					  KVM_REG_RISCV_CSR_AIA | i;
858 
859 			if (uindices) {
860 				if (put_user(reg, uindices))
861 					return -EFAULT;
862 				uindices++;
863 			}
864 		}
865 	}
866 
867 	/* copy Smstateen csr regs */
868 	if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) {
869 		n3 = sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
870 
871 		for (int i = 0; i < n3; i++) {
872 			u64 size = IS_ENABLED(CONFIG_32BIT) ?
873 				   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
874 			u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
875 					  KVM_REG_RISCV_CSR_SMSTATEEN | i;
876 
877 			if (uindices) {
878 				if (put_user(reg, uindices))
879 					return -EFAULT;
880 				uindices++;
881 			}
882 		}
883 	}
884 
885 	return n1 + n2 + n3;
886 }
887 
num_timer_regs(void)888 static inline unsigned long num_timer_regs(void)
889 {
890 	return sizeof(struct kvm_riscv_timer) / sizeof(u64);
891 }
892 
copy_timer_reg_indices(u64 __user * uindices)893 static int copy_timer_reg_indices(u64 __user *uindices)
894 {
895 	int n = num_timer_regs();
896 
897 	for (int i = 0; i < n; i++) {
898 		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
899 			  KVM_REG_RISCV_TIMER | i;
900 
901 		if (uindices) {
902 			if (put_user(reg, uindices))
903 				return -EFAULT;
904 			uindices++;
905 		}
906 	}
907 
908 	return n;
909 }
910 
num_fp_f_regs(const struct kvm_vcpu * vcpu)911 static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
912 {
913 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
914 
915 	if (riscv_isa_extension_available(vcpu->arch.isa, f))
916 		return sizeof(cntx->fp.f) / sizeof(u32);
917 	else
918 		return 0;
919 }
920 
copy_fp_f_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)921 static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
922 				u64 __user *uindices)
923 {
924 	int n = num_fp_f_regs(vcpu);
925 
926 	for (int i = 0; i < n; i++) {
927 		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 |
928 			  KVM_REG_RISCV_FP_F | i;
929 
930 		if (uindices) {
931 			if (put_user(reg, uindices))
932 				return -EFAULT;
933 			uindices++;
934 		}
935 	}
936 
937 	return n;
938 }
939 
num_fp_d_regs(const struct kvm_vcpu * vcpu)940 static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
941 {
942 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
943 
944 	if (riscv_isa_extension_available(vcpu->arch.isa, d))
945 		return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
946 	else
947 		return 0;
948 }
949 
copy_fp_d_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)950 static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
951 				u64 __user *uindices)
952 {
953 	int i;
954 	int n = num_fp_d_regs(vcpu);
955 	u64 reg;
956 
957 	/* copy fp.d.f indices */
958 	for (i = 0; i < n-1; i++) {
959 		reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
960 		      KVM_REG_RISCV_FP_D | i;
961 
962 		if (uindices) {
963 			if (put_user(reg, uindices))
964 				return -EFAULT;
965 			uindices++;
966 		}
967 	}
968 
969 	/* copy fp.d.fcsr indices */
970 	reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
971 	if (uindices) {
972 		if (put_user(reg, uindices))
973 			return -EFAULT;
974 		uindices++;
975 	}
976 
977 	return n;
978 }
979 
copy_isa_ext_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)980 static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
981 				u64 __user *uindices)
982 {
983 	unsigned int n = 0;
984 	unsigned long isa_ext;
985 
986 	for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
987 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
988 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
989 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
990 
991 		isa_ext = kvm_isa_ext_arr[i];
992 		if (!__riscv_isa_extension_available(NULL, isa_ext))
993 			continue;
994 
995 		if (uindices) {
996 			if (put_user(reg, uindices))
997 				return -EFAULT;
998 			uindices++;
999 		}
1000 
1001 		n++;
1002 	}
1003 
1004 	return n;
1005 }
1006 
num_isa_ext_regs(const struct kvm_vcpu * vcpu)1007 static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
1008 {
1009 	return copy_isa_ext_reg_indices(vcpu, NULL);
1010 }
1011 
copy_sbi_ext_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)1012 static int copy_sbi_ext_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1013 {
1014 	unsigned int n = 0;
1015 
1016 	for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) {
1017 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
1018 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1019 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
1020 			  KVM_REG_RISCV_SBI_SINGLE | i;
1021 
1022 		if (!riscv_vcpu_supports_sbi_ext(vcpu, i))
1023 			continue;
1024 
1025 		if (uindices) {
1026 			if (put_user(reg, uindices))
1027 				return -EFAULT;
1028 			uindices++;
1029 		}
1030 
1031 		n++;
1032 	}
1033 
1034 	return n;
1035 }
1036 
num_sbi_ext_regs(struct kvm_vcpu * vcpu)1037 static unsigned long num_sbi_ext_regs(struct kvm_vcpu *vcpu)
1038 {
1039 	return copy_sbi_ext_reg_indices(vcpu, NULL);
1040 }
1041 
copy_sbi_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)1042 static int copy_sbi_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1043 {
1044 	struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
1045 	int total = 0;
1046 
1047 	if (scontext->ext_status[KVM_RISCV_SBI_EXT_STA] == KVM_RISCV_SBI_EXT_STATUS_ENABLED) {
1048 		u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1049 		int n = sizeof(struct kvm_riscv_sbi_sta) / sizeof(unsigned long);
1050 
1051 		for (int i = 0; i < n; i++) {
1052 			u64 reg = KVM_REG_RISCV | size |
1053 				  KVM_REG_RISCV_SBI_STATE |
1054 				  KVM_REG_RISCV_SBI_STA | i;
1055 
1056 			if (uindices) {
1057 				if (put_user(reg, uindices))
1058 					return -EFAULT;
1059 				uindices++;
1060 			}
1061 		}
1062 
1063 		total += n;
1064 	}
1065 
1066 	return total;
1067 }
1068 
num_sbi_regs(struct kvm_vcpu * vcpu)1069 static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu)
1070 {
1071 	return copy_sbi_reg_indices(vcpu, NULL);
1072 }
1073 
num_vector_regs(const struct kvm_vcpu * vcpu)1074 static inline unsigned long num_vector_regs(const struct kvm_vcpu *vcpu)
1075 {
1076 	if (!riscv_isa_extension_available(vcpu->arch.isa, v))
1077 		return 0;
1078 
1079 	/* vstart, vl, vtype, vcsr, vlenb and 32 vector regs */
1080 	return 37;
1081 }
1082 
copy_vector_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)1083 static int copy_vector_reg_indices(const struct kvm_vcpu *vcpu,
1084 				u64 __user *uindices)
1085 {
1086 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
1087 	int n = num_vector_regs(vcpu);
1088 	u64 reg, size;
1089 	int i;
1090 
1091 	if (n == 0)
1092 		return 0;
1093 
1094 	/* copy vstart, vl, vtype, vcsr and vlenb */
1095 	size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1096 	for (i = 0; i < 5; i++) {
1097 		reg = KVM_REG_RISCV | size | KVM_REG_RISCV_VECTOR | i;
1098 
1099 		if (uindices) {
1100 			if (put_user(reg, uindices))
1101 				return -EFAULT;
1102 			uindices++;
1103 		}
1104 	}
1105 
1106 	/* vector_regs have a variable 'vlenb' size */
1107 	size = __builtin_ctzl(cntx->vector.vlenb);
1108 	size <<= KVM_REG_SIZE_SHIFT;
1109 	for (i = 0; i < 32; i++) {
1110 		reg = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | size |
1111 			KVM_REG_RISCV_VECTOR_REG(i);
1112 
1113 		if (uindices) {
1114 			if (put_user(reg, uindices))
1115 				return -EFAULT;
1116 			uindices++;
1117 		}
1118 	}
1119 
1120 	return n;
1121 }
1122 
1123 /*
1124  * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
1125  *
1126  * This is for all registers.
1127  */
kvm_riscv_vcpu_num_regs(struct kvm_vcpu * vcpu)1128 unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
1129 {
1130 	unsigned long res = 0;
1131 
1132 	res += num_config_regs(vcpu);
1133 	res += num_core_regs();
1134 	res += num_csr_regs(vcpu);
1135 	res += num_timer_regs();
1136 	res += num_fp_f_regs(vcpu);
1137 	res += num_fp_d_regs(vcpu);
1138 	res += num_vector_regs(vcpu);
1139 	res += num_isa_ext_regs(vcpu);
1140 	res += num_sbi_ext_regs(vcpu);
1141 	res += num_sbi_regs(vcpu);
1142 
1143 	return res;
1144 }
1145 
1146 /*
1147  * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
1148  */
kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)1149 int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
1150 				    u64 __user *uindices)
1151 {
1152 	int ret;
1153 
1154 	ret = copy_config_reg_indices(vcpu, uindices);
1155 	if (ret < 0)
1156 		return ret;
1157 	uindices += ret;
1158 
1159 	ret = copy_core_reg_indices(uindices);
1160 	if (ret < 0)
1161 		return ret;
1162 	uindices += ret;
1163 
1164 	ret = copy_csr_reg_indices(vcpu, uindices);
1165 	if (ret < 0)
1166 		return ret;
1167 	uindices += ret;
1168 
1169 	ret = copy_timer_reg_indices(uindices);
1170 	if (ret < 0)
1171 		return ret;
1172 	uindices += ret;
1173 
1174 	ret = copy_fp_f_reg_indices(vcpu, uindices);
1175 	if (ret < 0)
1176 		return ret;
1177 	uindices += ret;
1178 
1179 	ret = copy_fp_d_reg_indices(vcpu, uindices);
1180 	if (ret < 0)
1181 		return ret;
1182 	uindices += ret;
1183 
1184 	ret = copy_vector_reg_indices(vcpu, uindices);
1185 	if (ret < 0)
1186 		return ret;
1187 	uindices += ret;
1188 
1189 	ret = copy_isa_ext_reg_indices(vcpu, uindices);
1190 	if (ret < 0)
1191 		return ret;
1192 	uindices += ret;
1193 
1194 	ret = copy_sbi_ext_reg_indices(vcpu, uindices);
1195 	if (ret < 0)
1196 		return ret;
1197 	uindices += ret;
1198 
1199 	ret = copy_sbi_reg_indices(vcpu, uindices);
1200 	if (ret < 0)
1201 		return ret;
1202 	uindices += ret;
1203 
1204 	return 0;
1205 }
1206 
kvm_riscv_vcpu_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)1207 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
1208 			   const struct kvm_one_reg *reg)
1209 {
1210 	switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1211 	case KVM_REG_RISCV_CONFIG:
1212 		return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
1213 	case KVM_REG_RISCV_CORE:
1214 		return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
1215 	case KVM_REG_RISCV_CSR:
1216 		return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
1217 	case KVM_REG_RISCV_TIMER:
1218 		return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
1219 	case KVM_REG_RISCV_FP_F:
1220 		return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1221 						 KVM_REG_RISCV_FP_F);
1222 	case KVM_REG_RISCV_FP_D:
1223 		return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1224 						 KVM_REG_RISCV_FP_D);
1225 	case KVM_REG_RISCV_VECTOR:
1226 		return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
1227 	case KVM_REG_RISCV_ISA_EXT:
1228 		return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
1229 	case KVM_REG_RISCV_SBI_EXT:
1230 		return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
1231 	case KVM_REG_RISCV_SBI_STATE:
1232 		return kvm_riscv_vcpu_set_reg_sbi(vcpu, reg);
1233 	default:
1234 		break;
1235 	}
1236 
1237 	return -ENOENT;
1238 }
1239 
kvm_riscv_vcpu_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)1240 int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
1241 			   const struct kvm_one_reg *reg)
1242 {
1243 	switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1244 	case KVM_REG_RISCV_CONFIG:
1245 		return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
1246 	case KVM_REG_RISCV_CORE:
1247 		return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
1248 	case KVM_REG_RISCV_CSR:
1249 		return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
1250 	case KVM_REG_RISCV_TIMER:
1251 		return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
1252 	case KVM_REG_RISCV_FP_F:
1253 		return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1254 						 KVM_REG_RISCV_FP_F);
1255 	case KVM_REG_RISCV_FP_D:
1256 		return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1257 						 KVM_REG_RISCV_FP_D);
1258 	case KVM_REG_RISCV_VECTOR:
1259 		return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
1260 	case KVM_REG_RISCV_ISA_EXT:
1261 		return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
1262 	case KVM_REG_RISCV_SBI_EXT:
1263 		return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
1264 	case KVM_REG_RISCV_SBI_STATE:
1265 		return kvm_riscv_vcpu_get_reg_sbi(vcpu, reg);
1266 	default:
1267 		break;
1268 	}
1269 
1270 	return -ENOENT;
1271 }
1272