xref: /linux/arch/riscv/kvm/vcpu_onereg.c (revision 7255fcc80d4b525cc10cfaaf7f485830d4ed2000)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  * Copyright (C) 2023 Ventana Micro Systems Inc.
5  *
6  * Authors:
7  *	Anup Patel <apatel@ventanamicro.com>
8  */
9 
10 #include <linux/bitops.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/uaccess.h>
14 #include <linux/kvm_host.h>
15 #include <asm/cacheflush.h>
16 #include <asm/cpufeature.h>
17 #include <asm/kvm_vcpu_vector.h>
18 #include <asm/vector.h>
19 
20 #define KVM_RISCV_BASE_ISA_MASK		GENMASK(25, 0)
21 
22 #define KVM_ISA_EXT_ARR(ext)		\
23 [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
24 
25 /* Mapping between KVM ISA Extension ID & Host ISA extension ID */
26 static const unsigned long kvm_isa_ext_arr[] = {
27 	/* Single letter extensions (alphabetically sorted) */
28 	[KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
29 	[KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
30 	[KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
31 	[KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
32 	[KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
33 	[KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
34 	[KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
35 	[KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
36 	/* Multi letter extensions (alphabetically sorted) */
37 	KVM_ISA_EXT_ARR(SMSTATEEN),
38 	KVM_ISA_EXT_ARR(SSAIA),
39 	KVM_ISA_EXT_ARR(SSTC),
40 	KVM_ISA_EXT_ARR(SVINVAL),
41 	KVM_ISA_EXT_ARR(SVNAPOT),
42 	KVM_ISA_EXT_ARR(SVPBMT),
43 	KVM_ISA_EXT_ARR(ZACAS),
44 	KVM_ISA_EXT_ARR(ZBA),
45 	KVM_ISA_EXT_ARR(ZBB),
46 	KVM_ISA_EXT_ARR(ZBC),
47 	KVM_ISA_EXT_ARR(ZBKB),
48 	KVM_ISA_EXT_ARR(ZBKC),
49 	KVM_ISA_EXT_ARR(ZBKX),
50 	KVM_ISA_EXT_ARR(ZBS),
51 	KVM_ISA_EXT_ARR(ZFA),
52 	KVM_ISA_EXT_ARR(ZFH),
53 	KVM_ISA_EXT_ARR(ZFHMIN),
54 	KVM_ISA_EXT_ARR(ZICBOM),
55 	KVM_ISA_EXT_ARR(ZICBOZ),
56 	KVM_ISA_EXT_ARR(ZICNTR),
57 	KVM_ISA_EXT_ARR(ZICOND),
58 	KVM_ISA_EXT_ARR(ZICSR),
59 	KVM_ISA_EXT_ARR(ZIFENCEI),
60 	KVM_ISA_EXT_ARR(ZIHINTNTL),
61 	KVM_ISA_EXT_ARR(ZIHINTPAUSE),
62 	KVM_ISA_EXT_ARR(ZIHPM),
63 	KVM_ISA_EXT_ARR(ZKND),
64 	KVM_ISA_EXT_ARR(ZKNE),
65 	KVM_ISA_EXT_ARR(ZKNH),
66 	KVM_ISA_EXT_ARR(ZKR),
67 	KVM_ISA_EXT_ARR(ZKSED),
68 	KVM_ISA_EXT_ARR(ZKSH),
69 	KVM_ISA_EXT_ARR(ZKT),
70 	KVM_ISA_EXT_ARR(ZTSO),
71 	KVM_ISA_EXT_ARR(ZVBB),
72 	KVM_ISA_EXT_ARR(ZVBC),
73 	KVM_ISA_EXT_ARR(ZVFH),
74 	KVM_ISA_EXT_ARR(ZVFHMIN),
75 	KVM_ISA_EXT_ARR(ZVKB),
76 	KVM_ISA_EXT_ARR(ZVKG),
77 	KVM_ISA_EXT_ARR(ZVKNED),
78 	KVM_ISA_EXT_ARR(ZVKNHA),
79 	KVM_ISA_EXT_ARR(ZVKNHB),
80 	KVM_ISA_EXT_ARR(ZVKSED),
81 	KVM_ISA_EXT_ARR(ZVKSH),
82 	KVM_ISA_EXT_ARR(ZVKT),
83 };
84 
85 static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
86 {
87 	unsigned long i;
88 
89 	for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
90 		if (kvm_isa_ext_arr[i] == base_ext)
91 			return i;
92 	}
93 
94 	return KVM_RISCV_ISA_EXT_MAX;
95 }
96 
97 static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
98 {
99 	switch (ext) {
100 	case KVM_RISCV_ISA_EXT_H:
101 		return false;
102 	case KVM_RISCV_ISA_EXT_V:
103 		return riscv_v_vstate_ctrl_user_allowed();
104 	default:
105 		break;
106 	}
107 
108 	return true;
109 }
110 
111 static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
112 {
113 	switch (ext) {
114 	/* Extensions which don't have any mechanism to disable */
115 	case KVM_RISCV_ISA_EXT_A:
116 	case KVM_RISCV_ISA_EXT_C:
117 	case KVM_RISCV_ISA_EXT_I:
118 	case KVM_RISCV_ISA_EXT_M:
119 	case KVM_RISCV_ISA_EXT_SSTC:
120 	case KVM_RISCV_ISA_EXT_SVINVAL:
121 	case KVM_RISCV_ISA_EXT_SVNAPOT:
122 	case KVM_RISCV_ISA_EXT_ZACAS:
123 	case KVM_RISCV_ISA_EXT_ZBA:
124 	case KVM_RISCV_ISA_EXT_ZBB:
125 	case KVM_RISCV_ISA_EXT_ZBC:
126 	case KVM_RISCV_ISA_EXT_ZBKB:
127 	case KVM_RISCV_ISA_EXT_ZBKC:
128 	case KVM_RISCV_ISA_EXT_ZBKX:
129 	case KVM_RISCV_ISA_EXT_ZBS:
130 	case KVM_RISCV_ISA_EXT_ZFA:
131 	case KVM_RISCV_ISA_EXT_ZFH:
132 	case KVM_RISCV_ISA_EXT_ZFHMIN:
133 	case KVM_RISCV_ISA_EXT_ZICNTR:
134 	case KVM_RISCV_ISA_EXT_ZICOND:
135 	case KVM_RISCV_ISA_EXT_ZICSR:
136 	case KVM_RISCV_ISA_EXT_ZIFENCEI:
137 	case KVM_RISCV_ISA_EXT_ZIHINTNTL:
138 	case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
139 	case KVM_RISCV_ISA_EXT_ZIHPM:
140 	case KVM_RISCV_ISA_EXT_ZKND:
141 	case KVM_RISCV_ISA_EXT_ZKNE:
142 	case KVM_RISCV_ISA_EXT_ZKNH:
143 	case KVM_RISCV_ISA_EXT_ZKR:
144 	case KVM_RISCV_ISA_EXT_ZKSED:
145 	case KVM_RISCV_ISA_EXT_ZKSH:
146 	case KVM_RISCV_ISA_EXT_ZKT:
147 	case KVM_RISCV_ISA_EXT_ZTSO:
148 	case KVM_RISCV_ISA_EXT_ZVBB:
149 	case KVM_RISCV_ISA_EXT_ZVBC:
150 	case KVM_RISCV_ISA_EXT_ZVFH:
151 	case KVM_RISCV_ISA_EXT_ZVFHMIN:
152 	case KVM_RISCV_ISA_EXT_ZVKB:
153 	case KVM_RISCV_ISA_EXT_ZVKG:
154 	case KVM_RISCV_ISA_EXT_ZVKNED:
155 	case KVM_RISCV_ISA_EXT_ZVKNHA:
156 	case KVM_RISCV_ISA_EXT_ZVKNHB:
157 	case KVM_RISCV_ISA_EXT_ZVKSED:
158 	case KVM_RISCV_ISA_EXT_ZVKSH:
159 	case KVM_RISCV_ISA_EXT_ZVKT:
160 		return false;
161 	/* Extensions which can be disabled using Smstateen */
162 	case KVM_RISCV_ISA_EXT_SSAIA:
163 		return riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN);
164 	default:
165 		break;
166 	}
167 
168 	return true;
169 }
170 
171 void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu)
172 {
173 	unsigned long host_isa, i;
174 
175 	for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
176 		host_isa = kvm_isa_ext_arr[i];
177 		if (__riscv_isa_extension_available(NULL, host_isa) &&
178 		    kvm_riscv_vcpu_isa_enable_allowed(i))
179 			set_bit(host_isa, vcpu->arch.isa);
180 	}
181 }
182 
183 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
184 					 const struct kvm_one_reg *reg)
185 {
186 	unsigned long __user *uaddr =
187 			(unsigned long __user *)(unsigned long)reg->addr;
188 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
189 					    KVM_REG_SIZE_MASK |
190 					    KVM_REG_RISCV_CONFIG);
191 	unsigned long reg_val;
192 
193 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
194 		return -EINVAL;
195 
196 	switch (reg_num) {
197 	case KVM_REG_RISCV_CONFIG_REG(isa):
198 		reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
199 		break;
200 	case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
201 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
202 			return -ENOENT;
203 		reg_val = riscv_cbom_block_size;
204 		break;
205 	case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
206 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
207 			return -ENOENT;
208 		reg_val = riscv_cboz_block_size;
209 		break;
210 	case KVM_REG_RISCV_CONFIG_REG(mvendorid):
211 		reg_val = vcpu->arch.mvendorid;
212 		break;
213 	case KVM_REG_RISCV_CONFIG_REG(marchid):
214 		reg_val = vcpu->arch.marchid;
215 		break;
216 	case KVM_REG_RISCV_CONFIG_REG(mimpid):
217 		reg_val = vcpu->arch.mimpid;
218 		break;
219 	case KVM_REG_RISCV_CONFIG_REG(satp_mode):
220 		reg_val = satp_mode >> SATP_MODE_SHIFT;
221 		break;
222 	default:
223 		return -ENOENT;
224 	}
225 
226 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
227 		return -EFAULT;
228 
229 	return 0;
230 }
231 
232 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
233 					 const struct kvm_one_reg *reg)
234 {
235 	unsigned long __user *uaddr =
236 			(unsigned long __user *)(unsigned long)reg->addr;
237 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
238 					    KVM_REG_SIZE_MASK |
239 					    KVM_REG_RISCV_CONFIG);
240 	unsigned long i, isa_ext, reg_val;
241 
242 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
243 		return -EINVAL;
244 
245 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
246 		return -EFAULT;
247 
248 	switch (reg_num) {
249 	case KVM_REG_RISCV_CONFIG_REG(isa):
250 		/*
251 		 * This ONE REG interface is only defined for
252 		 * single letter extensions.
253 		 */
254 		if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
255 			return -EINVAL;
256 
257 		/*
258 		 * Return early (i.e. do nothing) if reg_val is the same
259 		 * value retrievable via kvm_riscv_vcpu_get_reg_config().
260 		 */
261 		if (reg_val == (vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK))
262 			break;
263 
264 		if (!vcpu->arch.ran_atleast_once) {
265 			/* Ignore the enable/disable request for certain extensions */
266 			for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
267 				isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
268 				if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
269 					reg_val &= ~BIT(i);
270 					continue;
271 				}
272 				if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
273 					if (reg_val & BIT(i))
274 						reg_val &= ~BIT(i);
275 				if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
276 					if (!(reg_val & BIT(i)))
277 						reg_val |= BIT(i);
278 			}
279 			reg_val &= riscv_isa_extension_base(NULL);
280 			/* Do not modify anything beyond single letter extensions */
281 			reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
282 				  (reg_val & KVM_RISCV_BASE_ISA_MASK);
283 			vcpu->arch.isa[0] = reg_val;
284 			kvm_riscv_vcpu_fp_reset(vcpu);
285 		} else {
286 			return -EBUSY;
287 		}
288 		break;
289 	case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
290 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
291 			return -ENOENT;
292 		if (reg_val != riscv_cbom_block_size)
293 			return -EINVAL;
294 		break;
295 	case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
296 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
297 			return -ENOENT;
298 		if (reg_val != riscv_cboz_block_size)
299 			return -EINVAL;
300 		break;
301 	case KVM_REG_RISCV_CONFIG_REG(mvendorid):
302 		if (reg_val == vcpu->arch.mvendorid)
303 			break;
304 		if (!vcpu->arch.ran_atleast_once)
305 			vcpu->arch.mvendorid = reg_val;
306 		else
307 			return -EBUSY;
308 		break;
309 	case KVM_REG_RISCV_CONFIG_REG(marchid):
310 		if (reg_val == vcpu->arch.marchid)
311 			break;
312 		if (!vcpu->arch.ran_atleast_once)
313 			vcpu->arch.marchid = reg_val;
314 		else
315 			return -EBUSY;
316 		break;
317 	case KVM_REG_RISCV_CONFIG_REG(mimpid):
318 		if (reg_val == vcpu->arch.mimpid)
319 			break;
320 		if (!vcpu->arch.ran_atleast_once)
321 			vcpu->arch.mimpid = reg_val;
322 		else
323 			return -EBUSY;
324 		break;
325 	case KVM_REG_RISCV_CONFIG_REG(satp_mode):
326 		if (reg_val != (satp_mode >> SATP_MODE_SHIFT))
327 			return -EINVAL;
328 		break;
329 	default:
330 		return -ENOENT;
331 	}
332 
333 	return 0;
334 }
335 
336 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
337 				       const struct kvm_one_reg *reg)
338 {
339 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
340 	unsigned long __user *uaddr =
341 			(unsigned long __user *)(unsigned long)reg->addr;
342 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
343 					    KVM_REG_SIZE_MASK |
344 					    KVM_REG_RISCV_CORE);
345 	unsigned long reg_val;
346 
347 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
348 		return -EINVAL;
349 	if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
350 		return -ENOENT;
351 
352 	if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
353 		reg_val = cntx->sepc;
354 	else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
355 		 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
356 		reg_val = ((unsigned long *)cntx)[reg_num];
357 	else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
358 		reg_val = (cntx->sstatus & SR_SPP) ?
359 				KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
360 	else
361 		return -ENOENT;
362 
363 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
364 		return -EFAULT;
365 
366 	return 0;
367 }
368 
369 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
370 				       const struct kvm_one_reg *reg)
371 {
372 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
373 	unsigned long __user *uaddr =
374 			(unsigned long __user *)(unsigned long)reg->addr;
375 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
376 					    KVM_REG_SIZE_MASK |
377 					    KVM_REG_RISCV_CORE);
378 	unsigned long reg_val;
379 
380 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
381 		return -EINVAL;
382 	if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
383 		return -ENOENT;
384 
385 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
386 		return -EFAULT;
387 
388 	if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
389 		cntx->sepc = reg_val;
390 	else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
391 		 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
392 		((unsigned long *)cntx)[reg_num] = reg_val;
393 	else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
394 		if (reg_val == KVM_RISCV_MODE_S)
395 			cntx->sstatus |= SR_SPP;
396 		else
397 			cntx->sstatus &= ~SR_SPP;
398 	} else
399 		return -ENOENT;
400 
401 	return 0;
402 }
403 
404 static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
405 					  unsigned long reg_num,
406 					  unsigned long *out_val)
407 {
408 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
409 
410 	if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
411 		return -ENOENT;
412 
413 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
414 		kvm_riscv_vcpu_flush_interrupts(vcpu);
415 		*out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
416 		*out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
417 	} else
418 		*out_val = ((unsigned long *)csr)[reg_num];
419 
420 	return 0;
421 }
422 
423 static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
424 					  unsigned long reg_num,
425 					  unsigned long reg_val)
426 {
427 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
428 
429 	if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
430 		return -ENOENT;
431 
432 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
433 		reg_val &= VSIP_VALID_MASK;
434 		reg_val <<= VSIP_TO_HVIP_SHIFT;
435 	}
436 
437 	((unsigned long *)csr)[reg_num] = reg_val;
438 
439 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
440 		WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
441 
442 	return 0;
443 }
444 
445 static inline int kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu *vcpu,
446 						   unsigned long reg_num,
447 						   unsigned long reg_val)
448 {
449 	struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
450 
451 	if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
452 		sizeof(unsigned long))
453 		return -EINVAL;
454 
455 	((unsigned long *)csr)[reg_num] = reg_val;
456 	return 0;
457 }
458 
459 static int kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu *vcpu,
460 					    unsigned long reg_num,
461 					    unsigned long *out_val)
462 {
463 	struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
464 
465 	if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
466 		sizeof(unsigned long))
467 		return -EINVAL;
468 
469 	*out_val = ((unsigned long *)csr)[reg_num];
470 	return 0;
471 }
472 
473 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
474 				      const struct kvm_one_reg *reg)
475 {
476 	int rc;
477 	unsigned long __user *uaddr =
478 			(unsigned long __user *)(unsigned long)reg->addr;
479 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
480 					    KVM_REG_SIZE_MASK |
481 					    KVM_REG_RISCV_CSR);
482 	unsigned long reg_val, reg_subtype;
483 
484 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
485 		return -EINVAL;
486 
487 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
488 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
489 	switch (reg_subtype) {
490 	case KVM_REG_RISCV_CSR_GENERAL:
491 		rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, &reg_val);
492 		break;
493 	case KVM_REG_RISCV_CSR_AIA:
494 		rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val);
495 		break;
496 	case KVM_REG_RISCV_CSR_SMSTATEEN:
497 		rc = -EINVAL;
498 		if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
499 			rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num,
500 							      &reg_val);
501 		break;
502 	default:
503 		rc = -ENOENT;
504 		break;
505 	}
506 	if (rc)
507 		return rc;
508 
509 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
510 		return -EFAULT;
511 
512 	return 0;
513 }
514 
515 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
516 				      const struct kvm_one_reg *reg)
517 {
518 	int rc;
519 	unsigned long __user *uaddr =
520 			(unsigned long __user *)(unsigned long)reg->addr;
521 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
522 					    KVM_REG_SIZE_MASK |
523 					    KVM_REG_RISCV_CSR);
524 	unsigned long reg_val, reg_subtype;
525 
526 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
527 		return -EINVAL;
528 
529 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
530 		return -EFAULT;
531 
532 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
533 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
534 	switch (reg_subtype) {
535 	case KVM_REG_RISCV_CSR_GENERAL:
536 		rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
537 		break;
538 	case KVM_REG_RISCV_CSR_AIA:
539 		rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
540 		break;
541 	case KVM_REG_RISCV_CSR_SMSTATEEN:
542 		rc = -EINVAL;
543 		if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
544 			rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num,
545 							      reg_val);
546 		break;
547 	default:
548 		rc = -ENOENT;
549 		break;
550 	}
551 	if (rc)
552 		return rc;
553 
554 	return 0;
555 }
556 
557 static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
558 					 unsigned long reg_num,
559 					 unsigned long *reg_val)
560 {
561 	unsigned long host_isa_ext;
562 
563 	if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
564 	    reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
565 		return -ENOENT;
566 
567 	host_isa_ext = kvm_isa_ext_arr[reg_num];
568 	if (!__riscv_isa_extension_available(NULL, host_isa_ext))
569 		return -ENOENT;
570 
571 	*reg_val = 0;
572 	if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
573 		*reg_val = 1; /* Mark the given extension as available */
574 
575 	return 0;
576 }
577 
578 static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu,
579 					 unsigned long reg_num,
580 					 unsigned long reg_val)
581 {
582 	unsigned long host_isa_ext;
583 
584 	if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
585 	    reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
586 		return -ENOENT;
587 
588 	host_isa_ext = kvm_isa_ext_arr[reg_num];
589 	if (!__riscv_isa_extension_available(NULL, host_isa_ext))
590 		return -ENOENT;
591 
592 	if (reg_val == test_bit(host_isa_ext, vcpu->arch.isa))
593 		return 0;
594 
595 	if (!vcpu->arch.ran_atleast_once) {
596 		/*
597 		 * All multi-letter extension and a few single letter
598 		 * extension can be disabled
599 		 */
600 		if (reg_val == 1 &&
601 		    kvm_riscv_vcpu_isa_enable_allowed(reg_num))
602 			set_bit(host_isa_ext, vcpu->arch.isa);
603 		else if (!reg_val &&
604 			 kvm_riscv_vcpu_isa_disable_allowed(reg_num))
605 			clear_bit(host_isa_ext, vcpu->arch.isa);
606 		else
607 			return -EINVAL;
608 		kvm_riscv_vcpu_fp_reset(vcpu);
609 	} else {
610 		return -EBUSY;
611 	}
612 
613 	return 0;
614 }
615 
616 static int riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu *vcpu,
617 					unsigned long reg_num,
618 					unsigned long *reg_val)
619 {
620 	unsigned long i, ext_id, ext_val;
621 
622 	if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
623 		return -ENOENT;
624 
625 	for (i = 0; i < BITS_PER_LONG; i++) {
626 		ext_id = i + reg_num * BITS_PER_LONG;
627 		if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
628 			break;
629 
630 		ext_val = 0;
631 		riscv_vcpu_get_isa_ext_single(vcpu, ext_id, &ext_val);
632 		if (ext_val)
633 			*reg_val |= KVM_REG_RISCV_ISA_MULTI_MASK(ext_id);
634 	}
635 
636 	return 0;
637 }
638 
639 static int riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu *vcpu,
640 					unsigned long reg_num,
641 					unsigned long reg_val, bool enable)
642 {
643 	unsigned long i, ext_id;
644 
645 	if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
646 		return -ENOENT;
647 
648 	for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
649 		ext_id = i + reg_num * BITS_PER_LONG;
650 		if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
651 			break;
652 
653 		riscv_vcpu_set_isa_ext_single(vcpu, ext_id, enable);
654 	}
655 
656 	return 0;
657 }
658 
659 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
660 					  const struct kvm_one_reg *reg)
661 {
662 	int rc;
663 	unsigned long __user *uaddr =
664 			(unsigned long __user *)(unsigned long)reg->addr;
665 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
666 					    KVM_REG_SIZE_MASK |
667 					    KVM_REG_RISCV_ISA_EXT);
668 	unsigned long reg_val, reg_subtype;
669 
670 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
671 		return -EINVAL;
672 
673 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
674 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
675 
676 	reg_val = 0;
677 	switch (reg_subtype) {
678 	case KVM_REG_RISCV_ISA_SINGLE:
679 		rc = riscv_vcpu_get_isa_ext_single(vcpu, reg_num, &reg_val);
680 		break;
681 	case KVM_REG_RISCV_ISA_MULTI_EN:
682 	case KVM_REG_RISCV_ISA_MULTI_DIS:
683 		rc = riscv_vcpu_get_isa_ext_multi(vcpu, reg_num, &reg_val);
684 		if (!rc && reg_subtype == KVM_REG_RISCV_ISA_MULTI_DIS)
685 			reg_val = ~reg_val;
686 		break;
687 	default:
688 		rc = -ENOENT;
689 	}
690 	if (rc)
691 		return rc;
692 
693 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
694 		return -EFAULT;
695 
696 	return 0;
697 }
698 
699 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
700 					  const struct kvm_one_reg *reg)
701 {
702 	unsigned long __user *uaddr =
703 			(unsigned long __user *)(unsigned long)reg->addr;
704 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
705 					    KVM_REG_SIZE_MASK |
706 					    KVM_REG_RISCV_ISA_EXT);
707 	unsigned long reg_val, reg_subtype;
708 
709 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
710 		return -EINVAL;
711 
712 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
713 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
714 
715 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
716 		return -EFAULT;
717 
718 	switch (reg_subtype) {
719 	case KVM_REG_RISCV_ISA_SINGLE:
720 		return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
721 	case KVM_REG_RISCV_SBI_MULTI_EN:
722 		return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
723 	case KVM_REG_RISCV_SBI_MULTI_DIS:
724 		return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
725 	default:
726 		return -ENOENT;
727 	}
728 
729 	return 0;
730 }
731 
732 static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
733 				u64 __user *uindices)
734 {
735 	int n = 0;
736 
737 	for (int i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long);
738 		 i++) {
739 		u64 size;
740 		u64 reg;
741 
742 		/*
743 		 * Avoid reporting config reg if the corresponding extension
744 		 * was not available.
745 		 */
746 		if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
747 			!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
748 			continue;
749 		else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
750 			!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
751 			continue;
752 
753 		size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
754 		reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;
755 
756 		if (uindices) {
757 			if (put_user(reg, uindices))
758 				return -EFAULT;
759 			uindices++;
760 		}
761 
762 		n++;
763 	}
764 
765 	return n;
766 }
767 
768 static unsigned long num_config_regs(const struct kvm_vcpu *vcpu)
769 {
770 	return copy_config_reg_indices(vcpu, NULL);
771 }
772 
773 static inline unsigned long num_core_regs(void)
774 {
775 	return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
776 }
777 
778 static int copy_core_reg_indices(u64 __user *uindices)
779 {
780 	int n = num_core_regs();
781 
782 	for (int i = 0; i < n; i++) {
783 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
784 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
785 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i;
786 
787 		if (uindices) {
788 			if (put_user(reg, uindices))
789 				return -EFAULT;
790 			uindices++;
791 		}
792 	}
793 
794 	return n;
795 }
796 
797 static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu)
798 {
799 	unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
800 
801 	if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
802 		n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
803 	if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
804 		n += sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
805 
806 	return n;
807 }
808 
809 static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
810 				u64 __user *uindices)
811 {
812 	int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
813 	int n2 = 0, n3 = 0;
814 
815 	/* copy general csr regs */
816 	for (int i = 0; i < n1; i++) {
817 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
818 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
819 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
820 				  KVM_REG_RISCV_CSR_GENERAL | i;
821 
822 		if (uindices) {
823 			if (put_user(reg, uindices))
824 				return -EFAULT;
825 			uindices++;
826 		}
827 	}
828 
829 	/* copy AIA csr regs */
830 	if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) {
831 		n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
832 
833 		for (int i = 0; i < n2; i++) {
834 			u64 size = IS_ENABLED(CONFIG_32BIT) ?
835 				   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
836 			u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
837 					  KVM_REG_RISCV_CSR_AIA | i;
838 
839 			if (uindices) {
840 				if (put_user(reg, uindices))
841 					return -EFAULT;
842 				uindices++;
843 			}
844 		}
845 	}
846 
847 	/* copy Smstateen csr regs */
848 	if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) {
849 		n3 = sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
850 
851 		for (int i = 0; i < n3; i++) {
852 			u64 size = IS_ENABLED(CONFIG_32BIT) ?
853 				   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
854 			u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
855 					  KVM_REG_RISCV_CSR_SMSTATEEN | i;
856 
857 			if (uindices) {
858 				if (put_user(reg, uindices))
859 					return -EFAULT;
860 				uindices++;
861 			}
862 		}
863 	}
864 
865 	return n1 + n2 + n3;
866 }
867 
868 static inline unsigned long num_timer_regs(void)
869 {
870 	return sizeof(struct kvm_riscv_timer) / sizeof(u64);
871 }
872 
873 static int copy_timer_reg_indices(u64 __user *uindices)
874 {
875 	int n = num_timer_regs();
876 
877 	for (int i = 0; i < n; i++) {
878 		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
879 			  KVM_REG_RISCV_TIMER | i;
880 
881 		if (uindices) {
882 			if (put_user(reg, uindices))
883 				return -EFAULT;
884 			uindices++;
885 		}
886 	}
887 
888 	return n;
889 }
890 
891 static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
892 {
893 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
894 
895 	if (riscv_isa_extension_available(vcpu->arch.isa, f))
896 		return sizeof(cntx->fp.f) / sizeof(u32);
897 	else
898 		return 0;
899 }
900 
901 static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
902 				u64 __user *uindices)
903 {
904 	int n = num_fp_f_regs(vcpu);
905 
906 	for (int i = 0; i < n; i++) {
907 		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 |
908 			  KVM_REG_RISCV_FP_F | i;
909 
910 		if (uindices) {
911 			if (put_user(reg, uindices))
912 				return -EFAULT;
913 			uindices++;
914 		}
915 	}
916 
917 	return n;
918 }
919 
920 static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
921 {
922 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
923 
924 	if (riscv_isa_extension_available(vcpu->arch.isa, d))
925 		return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
926 	else
927 		return 0;
928 }
929 
930 static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
931 				u64 __user *uindices)
932 {
933 	int i;
934 	int n = num_fp_d_regs(vcpu);
935 	u64 reg;
936 
937 	/* copy fp.d.f indices */
938 	for (i = 0; i < n-1; i++) {
939 		reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
940 		      KVM_REG_RISCV_FP_D | i;
941 
942 		if (uindices) {
943 			if (put_user(reg, uindices))
944 				return -EFAULT;
945 			uindices++;
946 		}
947 	}
948 
949 	/* copy fp.d.fcsr indices */
950 	reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
951 	if (uindices) {
952 		if (put_user(reg, uindices))
953 			return -EFAULT;
954 		uindices++;
955 	}
956 
957 	return n;
958 }
959 
960 static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
961 				u64 __user *uindices)
962 {
963 	unsigned int n = 0;
964 	unsigned long isa_ext;
965 
966 	for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
967 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
968 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
969 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
970 
971 		isa_ext = kvm_isa_ext_arr[i];
972 		if (!__riscv_isa_extension_available(NULL, isa_ext))
973 			continue;
974 
975 		if (uindices) {
976 			if (put_user(reg, uindices))
977 				return -EFAULT;
978 			uindices++;
979 		}
980 
981 		n++;
982 	}
983 
984 	return n;
985 }
986 
987 static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
988 {
989 	return copy_isa_ext_reg_indices(vcpu, NULL);
990 }
991 
992 static int copy_sbi_ext_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
993 {
994 	unsigned int n = 0;
995 
996 	for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) {
997 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
998 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
999 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
1000 			  KVM_REG_RISCV_SBI_SINGLE | i;
1001 
1002 		if (!riscv_vcpu_supports_sbi_ext(vcpu, i))
1003 			continue;
1004 
1005 		if (uindices) {
1006 			if (put_user(reg, uindices))
1007 				return -EFAULT;
1008 			uindices++;
1009 		}
1010 
1011 		n++;
1012 	}
1013 
1014 	return n;
1015 }
1016 
1017 static unsigned long num_sbi_ext_regs(struct kvm_vcpu *vcpu)
1018 {
1019 	return copy_sbi_ext_reg_indices(vcpu, NULL);
1020 }
1021 
1022 static int copy_sbi_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1023 {
1024 	struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
1025 	int total = 0;
1026 
1027 	if (scontext->ext_status[KVM_RISCV_SBI_EXT_STA] == KVM_RISCV_SBI_EXT_STATUS_ENABLED) {
1028 		u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1029 		int n = sizeof(struct kvm_riscv_sbi_sta) / sizeof(unsigned long);
1030 
1031 		for (int i = 0; i < n; i++) {
1032 			u64 reg = KVM_REG_RISCV | size |
1033 				  KVM_REG_RISCV_SBI_STATE |
1034 				  KVM_REG_RISCV_SBI_STA | i;
1035 
1036 			if (uindices) {
1037 				if (put_user(reg, uindices))
1038 					return -EFAULT;
1039 				uindices++;
1040 			}
1041 		}
1042 
1043 		total += n;
1044 	}
1045 
1046 	return total;
1047 }
1048 
1049 static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu)
1050 {
1051 	return copy_sbi_reg_indices(vcpu, NULL);
1052 }
1053 
1054 static inline unsigned long num_vector_regs(const struct kvm_vcpu *vcpu)
1055 {
1056 	if (!riscv_isa_extension_available(vcpu->arch.isa, v))
1057 		return 0;
1058 
1059 	/* vstart, vl, vtype, vcsr, vlenb and 32 vector regs */
1060 	return 37;
1061 }
1062 
1063 static int copy_vector_reg_indices(const struct kvm_vcpu *vcpu,
1064 				u64 __user *uindices)
1065 {
1066 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
1067 	int n = num_vector_regs(vcpu);
1068 	u64 reg, size;
1069 	int i;
1070 
1071 	if (n == 0)
1072 		return 0;
1073 
1074 	/* copy vstart, vl, vtype, vcsr and vlenb */
1075 	size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1076 	for (i = 0; i < 5; i++) {
1077 		reg = KVM_REG_RISCV | size | KVM_REG_RISCV_VECTOR | i;
1078 
1079 		if (uindices) {
1080 			if (put_user(reg, uindices))
1081 				return -EFAULT;
1082 			uindices++;
1083 		}
1084 	}
1085 
1086 	/* vector_regs have a variable 'vlenb' size */
1087 	size = __builtin_ctzl(cntx->vector.vlenb);
1088 	size <<= KVM_REG_SIZE_SHIFT;
1089 	for (i = 0; i < 32; i++) {
1090 		reg = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | size |
1091 			KVM_REG_RISCV_VECTOR_REG(i);
1092 
1093 		if (uindices) {
1094 			if (put_user(reg, uindices))
1095 				return -EFAULT;
1096 			uindices++;
1097 		}
1098 	}
1099 
1100 	return n;
1101 }
1102 
1103 /*
1104  * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
1105  *
1106  * This is for all registers.
1107  */
1108 unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
1109 {
1110 	unsigned long res = 0;
1111 
1112 	res += num_config_regs(vcpu);
1113 	res += num_core_regs();
1114 	res += num_csr_regs(vcpu);
1115 	res += num_timer_regs();
1116 	res += num_fp_f_regs(vcpu);
1117 	res += num_fp_d_regs(vcpu);
1118 	res += num_vector_regs(vcpu);
1119 	res += num_isa_ext_regs(vcpu);
1120 	res += num_sbi_ext_regs(vcpu);
1121 	res += num_sbi_regs(vcpu);
1122 
1123 	return res;
1124 }
1125 
1126 /*
1127  * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
1128  */
1129 int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
1130 				    u64 __user *uindices)
1131 {
1132 	int ret;
1133 
1134 	ret = copy_config_reg_indices(vcpu, uindices);
1135 	if (ret < 0)
1136 		return ret;
1137 	uindices += ret;
1138 
1139 	ret = copy_core_reg_indices(uindices);
1140 	if (ret < 0)
1141 		return ret;
1142 	uindices += ret;
1143 
1144 	ret = copy_csr_reg_indices(vcpu, uindices);
1145 	if (ret < 0)
1146 		return ret;
1147 	uindices += ret;
1148 
1149 	ret = copy_timer_reg_indices(uindices);
1150 	if (ret < 0)
1151 		return ret;
1152 	uindices += ret;
1153 
1154 	ret = copy_fp_f_reg_indices(vcpu, uindices);
1155 	if (ret < 0)
1156 		return ret;
1157 	uindices += ret;
1158 
1159 	ret = copy_fp_d_reg_indices(vcpu, uindices);
1160 	if (ret < 0)
1161 		return ret;
1162 	uindices += ret;
1163 
1164 	ret = copy_vector_reg_indices(vcpu, uindices);
1165 	if (ret < 0)
1166 		return ret;
1167 	uindices += ret;
1168 
1169 	ret = copy_isa_ext_reg_indices(vcpu, uindices);
1170 	if (ret < 0)
1171 		return ret;
1172 	uindices += ret;
1173 
1174 	ret = copy_sbi_ext_reg_indices(vcpu, uindices);
1175 	if (ret < 0)
1176 		return ret;
1177 	uindices += ret;
1178 
1179 	ret = copy_sbi_reg_indices(vcpu, uindices);
1180 	if (ret < 0)
1181 		return ret;
1182 	uindices += ret;
1183 
1184 	return 0;
1185 }
1186 
1187 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
1188 			   const struct kvm_one_reg *reg)
1189 {
1190 	switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1191 	case KVM_REG_RISCV_CONFIG:
1192 		return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
1193 	case KVM_REG_RISCV_CORE:
1194 		return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
1195 	case KVM_REG_RISCV_CSR:
1196 		return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
1197 	case KVM_REG_RISCV_TIMER:
1198 		return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
1199 	case KVM_REG_RISCV_FP_F:
1200 		return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1201 						 KVM_REG_RISCV_FP_F);
1202 	case KVM_REG_RISCV_FP_D:
1203 		return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1204 						 KVM_REG_RISCV_FP_D);
1205 	case KVM_REG_RISCV_VECTOR:
1206 		return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
1207 	case KVM_REG_RISCV_ISA_EXT:
1208 		return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
1209 	case KVM_REG_RISCV_SBI_EXT:
1210 		return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
1211 	case KVM_REG_RISCV_SBI_STATE:
1212 		return kvm_riscv_vcpu_set_reg_sbi(vcpu, reg);
1213 	default:
1214 		break;
1215 	}
1216 
1217 	return -ENOENT;
1218 }
1219 
1220 int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
1221 			   const struct kvm_one_reg *reg)
1222 {
1223 	switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1224 	case KVM_REG_RISCV_CONFIG:
1225 		return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
1226 	case KVM_REG_RISCV_CORE:
1227 		return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
1228 	case KVM_REG_RISCV_CSR:
1229 		return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
1230 	case KVM_REG_RISCV_TIMER:
1231 		return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
1232 	case KVM_REG_RISCV_FP_F:
1233 		return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1234 						 KVM_REG_RISCV_FP_F);
1235 	case KVM_REG_RISCV_FP_D:
1236 		return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1237 						 KVM_REG_RISCV_FP_D);
1238 	case KVM_REG_RISCV_VECTOR:
1239 		return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
1240 	case KVM_REG_RISCV_ISA_EXT:
1241 		return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
1242 	case KVM_REG_RISCV_SBI_EXT:
1243 		return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
1244 	case KVM_REG_RISCV_SBI_STATE:
1245 		return kvm_riscv_vcpu_get_reg_sbi(vcpu, reg);
1246 	default:
1247 		break;
1248 	}
1249 
1250 	return -ENOENT;
1251 }
1252