xref: /linux/arch/riscv/kvm/vcpu_onereg.c (revision 90d32e92011eaae8e70a9169b4e7acf4ca8f9d3a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  * Copyright (C) 2023 Ventana Micro Systems Inc.
5  *
6  * Authors:
7  *	Anup Patel <apatel@ventanamicro.com>
8  */
9 
10 #include <linux/bitops.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/uaccess.h>
14 #include <linux/kvm_host.h>
15 #include <asm/cacheflush.h>
16 #include <asm/cpufeature.h>
17 #include <asm/kvm_vcpu_vector.h>
18 #include <asm/vector.h>
19 
20 #define KVM_RISCV_BASE_ISA_MASK		GENMASK(25, 0)
21 
22 #define KVM_ISA_EXT_ARR(ext)		\
23 [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
24 
25 /* Mapping between KVM ISA Extension ID & Host ISA extension ID */
26 static const unsigned long kvm_isa_ext_arr[] = {
27 	/* Single letter extensions (alphabetically sorted) */
28 	[KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
29 	[KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
30 	[KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
31 	[KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
32 	[KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
33 	[KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
34 	[KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
35 	[KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
36 	/* Multi letter extensions (alphabetically sorted) */
37 	KVM_ISA_EXT_ARR(SMSTATEEN),
38 	KVM_ISA_EXT_ARR(SSAIA),
39 	KVM_ISA_EXT_ARR(SSCOFPMF),
40 	KVM_ISA_EXT_ARR(SSTC),
41 	KVM_ISA_EXT_ARR(SVINVAL),
42 	KVM_ISA_EXT_ARR(SVNAPOT),
43 	KVM_ISA_EXT_ARR(SVPBMT),
44 	KVM_ISA_EXT_ARR(ZACAS),
45 	KVM_ISA_EXT_ARR(ZBA),
46 	KVM_ISA_EXT_ARR(ZBB),
47 	KVM_ISA_EXT_ARR(ZBC),
48 	KVM_ISA_EXT_ARR(ZBKB),
49 	KVM_ISA_EXT_ARR(ZBKC),
50 	KVM_ISA_EXT_ARR(ZBKX),
51 	KVM_ISA_EXT_ARR(ZBS),
52 	KVM_ISA_EXT_ARR(ZFA),
53 	KVM_ISA_EXT_ARR(ZFH),
54 	KVM_ISA_EXT_ARR(ZFHMIN),
55 	KVM_ISA_EXT_ARR(ZICBOM),
56 	KVM_ISA_EXT_ARR(ZICBOZ),
57 	KVM_ISA_EXT_ARR(ZICNTR),
58 	KVM_ISA_EXT_ARR(ZICOND),
59 	KVM_ISA_EXT_ARR(ZICSR),
60 	KVM_ISA_EXT_ARR(ZIFENCEI),
61 	KVM_ISA_EXT_ARR(ZIHINTNTL),
62 	KVM_ISA_EXT_ARR(ZIHINTPAUSE),
63 	KVM_ISA_EXT_ARR(ZIHPM),
64 	KVM_ISA_EXT_ARR(ZKND),
65 	KVM_ISA_EXT_ARR(ZKNE),
66 	KVM_ISA_EXT_ARR(ZKNH),
67 	KVM_ISA_EXT_ARR(ZKR),
68 	KVM_ISA_EXT_ARR(ZKSED),
69 	KVM_ISA_EXT_ARR(ZKSH),
70 	KVM_ISA_EXT_ARR(ZKT),
71 	KVM_ISA_EXT_ARR(ZTSO),
72 	KVM_ISA_EXT_ARR(ZVBB),
73 	KVM_ISA_EXT_ARR(ZVBC),
74 	KVM_ISA_EXT_ARR(ZVFH),
75 	KVM_ISA_EXT_ARR(ZVFHMIN),
76 	KVM_ISA_EXT_ARR(ZVKB),
77 	KVM_ISA_EXT_ARR(ZVKG),
78 	KVM_ISA_EXT_ARR(ZVKNED),
79 	KVM_ISA_EXT_ARR(ZVKNHA),
80 	KVM_ISA_EXT_ARR(ZVKNHB),
81 	KVM_ISA_EXT_ARR(ZVKSED),
82 	KVM_ISA_EXT_ARR(ZVKSH),
83 	KVM_ISA_EXT_ARR(ZVKT),
84 };
85 
86 static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
87 {
88 	unsigned long i;
89 
90 	for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
91 		if (kvm_isa_ext_arr[i] == base_ext)
92 			return i;
93 	}
94 
95 	return KVM_RISCV_ISA_EXT_MAX;
96 }
97 
98 static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
99 {
100 	switch (ext) {
101 	case KVM_RISCV_ISA_EXT_H:
102 		return false;
103 	case KVM_RISCV_ISA_EXT_SSCOFPMF:
104 		/* Sscofpmf depends on interrupt filtering defined in ssaia */
105 		return __riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSAIA);
106 	case KVM_RISCV_ISA_EXT_V:
107 		return riscv_v_vstate_ctrl_user_allowed();
108 	default:
109 		break;
110 	}
111 
112 	return true;
113 }
114 
115 static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
116 {
117 	switch (ext) {
118 	/* Extensions which don't have any mechanism to disable */
119 	case KVM_RISCV_ISA_EXT_A:
120 	case KVM_RISCV_ISA_EXT_C:
121 	case KVM_RISCV_ISA_EXT_I:
122 	case KVM_RISCV_ISA_EXT_M:
123 	/* There is not architectural config bit to disable sscofpmf completely */
124 	case KVM_RISCV_ISA_EXT_SSCOFPMF:
125 	case KVM_RISCV_ISA_EXT_SSTC:
126 	case KVM_RISCV_ISA_EXT_SVINVAL:
127 	case KVM_RISCV_ISA_EXT_SVNAPOT:
128 	case KVM_RISCV_ISA_EXT_ZACAS:
129 	case KVM_RISCV_ISA_EXT_ZBA:
130 	case KVM_RISCV_ISA_EXT_ZBB:
131 	case KVM_RISCV_ISA_EXT_ZBC:
132 	case KVM_RISCV_ISA_EXT_ZBKB:
133 	case KVM_RISCV_ISA_EXT_ZBKC:
134 	case KVM_RISCV_ISA_EXT_ZBKX:
135 	case KVM_RISCV_ISA_EXT_ZBS:
136 	case KVM_RISCV_ISA_EXT_ZFA:
137 	case KVM_RISCV_ISA_EXT_ZFH:
138 	case KVM_RISCV_ISA_EXT_ZFHMIN:
139 	case KVM_RISCV_ISA_EXT_ZICNTR:
140 	case KVM_RISCV_ISA_EXT_ZICOND:
141 	case KVM_RISCV_ISA_EXT_ZICSR:
142 	case KVM_RISCV_ISA_EXT_ZIFENCEI:
143 	case KVM_RISCV_ISA_EXT_ZIHINTNTL:
144 	case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
145 	case KVM_RISCV_ISA_EXT_ZIHPM:
146 	case KVM_RISCV_ISA_EXT_ZKND:
147 	case KVM_RISCV_ISA_EXT_ZKNE:
148 	case KVM_RISCV_ISA_EXT_ZKNH:
149 	case KVM_RISCV_ISA_EXT_ZKR:
150 	case KVM_RISCV_ISA_EXT_ZKSED:
151 	case KVM_RISCV_ISA_EXT_ZKSH:
152 	case KVM_RISCV_ISA_EXT_ZKT:
153 	case KVM_RISCV_ISA_EXT_ZTSO:
154 	case KVM_RISCV_ISA_EXT_ZVBB:
155 	case KVM_RISCV_ISA_EXT_ZVBC:
156 	case KVM_RISCV_ISA_EXT_ZVFH:
157 	case KVM_RISCV_ISA_EXT_ZVFHMIN:
158 	case KVM_RISCV_ISA_EXT_ZVKB:
159 	case KVM_RISCV_ISA_EXT_ZVKG:
160 	case KVM_RISCV_ISA_EXT_ZVKNED:
161 	case KVM_RISCV_ISA_EXT_ZVKNHA:
162 	case KVM_RISCV_ISA_EXT_ZVKNHB:
163 	case KVM_RISCV_ISA_EXT_ZVKSED:
164 	case KVM_RISCV_ISA_EXT_ZVKSH:
165 	case KVM_RISCV_ISA_EXT_ZVKT:
166 		return false;
167 	/* Extensions which can be disabled using Smstateen */
168 	case KVM_RISCV_ISA_EXT_SSAIA:
169 		return riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN);
170 	default:
171 		break;
172 	}
173 
174 	return true;
175 }
176 
177 void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu)
178 {
179 	unsigned long host_isa, i;
180 
181 	for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
182 		host_isa = kvm_isa_ext_arr[i];
183 		if (__riscv_isa_extension_available(NULL, host_isa) &&
184 		    kvm_riscv_vcpu_isa_enable_allowed(i))
185 			set_bit(host_isa, vcpu->arch.isa);
186 	}
187 }
188 
189 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
190 					 const struct kvm_one_reg *reg)
191 {
192 	unsigned long __user *uaddr =
193 			(unsigned long __user *)(unsigned long)reg->addr;
194 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
195 					    KVM_REG_SIZE_MASK |
196 					    KVM_REG_RISCV_CONFIG);
197 	unsigned long reg_val;
198 
199 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
200 		return -EINVAL;
201 
202 	switch (reg_num) {
203 	case KVM_REG_RISCV_CONFIG_REG(isa):
204 		reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
205 		break;
206 	case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
207 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
208 			return -ENOENT;
209 		reg_val = riscv_cbom_block_size;
210 		break;
211 	case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
212 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
213 			return -ENOENT;
214 		reg_val = riscv_cboz_block_size;
215 		break;
216 	case KVM_REG_RISCV_CONFIG_REG(mvendorid):
217 		reg_val = vcpu->arch.mvendorid;
218 		break;
219 	case KVM_REG_RISCV_CONFIG_REG(marchid):
220 		reg_val = vcpu->arch.marchid;
221 		break;
222 	case KVM_REG_RISCV_CONFIG_REG(mimpid):
223 		reg_val = vcpu->arch.mimpid;
224 		break;
225 	case KVM_REG_RISCV_CONFIG_REG(satp_mode):
226 		reg_val = satp_mode >> SATP_MODE_SHIFT;
227 		break;
228 	default:
229 		return -ENOENT;
230 	}
231 
232 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
233 		return -EFAULT;
234 
235 	return 0;
236 }
237 
238 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
239 					 const struct kvm_one_reg *reg)
240 {
241 	unsigned long __user *uaddr =
242 			(unsigned long __user *)(unsigned long)reg->addr;
243 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
244 					    KVM_REG_SIZE_MASK |
245 					    KVM_REG_RISCV_CONFIG);
246 	unsigned long i, isa_ext, reg_val;
247 
248 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
249 		return -EINVAL;
250 
251 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
252 		return -EFAULT;
253 
254 	switch (reg_num) {
255 	case KVM_REG_RISCV_CONFIG_REG(isa):
256 		/*
257 		 * This ONE REG interface is only defined for
258 		 * single letter extensions.
259 		 */
260 		if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
261 			return -EINVAL;
262 
263 		/*
264 		 * Return early (i.e. do nothing) if reg_val is the same
265 		 * value retrievable via kvm_riscv_vcpu_get_reg_config().
266 		 */
267 		if (reg_val == (vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK))
268 			break;
269 
270 		if (!vcpu->arch.ran_atleast_once) {
271 			/* Ignore the enable/disable request for certain extensions */
272 			for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
273 				isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
274 				if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
275 					reg_val &= ~BIT(i);
276 					continue;
277 				}
278 				if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
279 					if (reg_val & BIT(i))
280 						reg_val &= ~BIT(i);
281 				if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
282 					if (!(reg_val & BIT(i)))
283 						reg_val |= BIT(i);
284 			}
285 			reg_val &= riscv_isa_extension_base(NULL);
286 			/* Do not modify anything beyond single letter extensions */
287 			reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
288 				  (reg_val & KVM_RISCV_BASE_ISA_MASK);
289 			vcpu->arch.isa[0] = reg_val;
290 			kvm_riscv_vcpu_fp_reset(vcpu);
291 		} else {
292 			return -EBUSY;
293 		}
294 		break;
295 	case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
296 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
297 			return -ENOENT;
298 		if (reg_val != riscv_cbom_block_size)
299 			return -EINVAL;
300 		break;
301 	case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
302 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
303 			return -ENOENT;
304 		if (reg_val != riscv_cboz_block_size)
305 			return -EINVAL;
306 		break;
307 	case KVM_REG_RISCV_CONFIG_REG(mvendorid):
308 		if (reg_val == vcpu->arch.mvendorid)
309 			break;
310 		if (!vcpu->arch.ran_atleast_once)
311 			vcpu->arch.mvendorid = reg_val;
312 		else
313 			return -EBUSY;
314 		break;
315 	case KVM_REG_RISCV_CONFIG_REG(marchid):
316 		if (reg_val == vcpu->arch.marchid)
317 			break;
318 		if (!vcpu->arch.ran_atleast_once)
319 			vcpu->arch.marchid = reg_val;
320 		else
321 			return -EBUSY;
322 		break;
323 	case KVM_REG_RISCV_CONFIG_REG(mimpid):
324 		if (reg_val == vcpu->arch.mimpid)
325 			break;
326 		if (!vcpu->arch.ran_atleast_once)
327 			vcpu->arch.mimpid = reg_val;
328 		else
329 			return -EBUSY;
330 		break;
331 	case KVM_REG_RISCV_CONFIG_REG(satp_mode):
332 		if (reg_val != (satp_mode >> SATP_MODE_SHIFT))
333 			return -EINVAL;
334 		break;
335 	default:
336 		return -ENOENT;
337 	}
338 
339 	return 0;
340 }
341 
342 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
343 				       const struct kvm_one_reg *reg)
344 {
345 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
346 	unsigned long __user *uaddr =
347 			(unsigned long __user *)(unsigned long)reg->addr;
348 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
349 					    KVM_REG_SIZE_MASK |
350 					    KVM_REG_RISCV_CORE);
351 	unsigned long reg_val;
352 
353 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
354 		return -EINVAL;
355 	if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
356 		return -ENOENT;
357 
358 	if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
359 		reg_val = cntx->sepc;
360 	else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
361 		 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
362 		reg_val = ((unsigned long *)cntx)[reg_num];
363 	else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
364 		reg_val = (cntx->sstatus & SR_SPP) ?
365 				KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
366 	else
367 		return -ENOENT;
368 
369 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
370 		return -EFAULT;
371 
372 	return 0;
373 }
374 
375 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
376 				       const struct kvm_one_reg *reg)
377 {
378 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
379 	unsigned long __user *uaddr =
380 			(unsigned long __user *)(unsigned long)reg->addr;
381 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
382 					    KVM_REG_SIZE_MASK |
383 					    KVM_REG_RISCV_CORE);
384 	unsigned long reg_val;
385 
386 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
387 		return -EINVAL;
388 	if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
389 		return -ENOENT;
390 
391 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
392 		return -EFAULT;
393 
394 	if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
395 		cntx->sepc = reg_val;
396 	else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
397 		 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
398 		((unsigned long *)cntx)[reg_num] = reg_val;
399 	else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
400 		if (reg_val == KVM_RISCV_MODE_S)
401 			cntx->sstatus |= SR_SPP;
402 		else
403 			cntx->sstatus &= ~SR_SPP;
404 	} else
405 		return -ENOENT;
406 
407 	return 0;
408 }
409 
410 static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
411 					  unsigned long reg_num,
412 					  unsigned long *out_val)
413 {
414 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
415 
416 	if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
417 		return -ENOENT;
418 
419 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
420 		kvm_riscv_vcpu_flush_interrupts(vcpu);
421 		*out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
422 		*out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
423 	} else
424 		*out_val = ((unsigned long *)csr)[reg_num];
425 
426 	return 0;
427 }
428 
429 static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
430 					  unsigned long reg_num,
431 					  unsigned long reg_val)
432 {
433 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
434 
435 	if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
436 		return -ENOENT;
437 
438 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
439 		reg_val &= VSIP_VALID_MASK;
440 		reg_val <<= VSIP_TO_HVIP_SHIFT;
441 	}
442 
443 	((unsigned long *)csr)[reg_num] = reg_val;
444 
445 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
446 		WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
447 
448 	return 0;
449 }
450 
451 static inline int kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu *vcpu,
452 						   unsigned long reg_num,
453 						   unsigned long reg_val)
454 {
455 	struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
456 
457 	if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
458 		sizeof(unsigned long))
459 		return -EINVAL;
460 
461 	((unsigned long *)csr)[reg_num] = reg_val;
462 	return 0;
463 }
464 
465 static int kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu *vcpu,
466 					    unsigned long reg_num,
467 					    unsigned long *out_val)
468 {
469 	struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
470 
471 	if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
472 		sizeof(unsigned long))
473 		return -EINVAL;
474 
475 	*out_val = ((unsigned long *)csr)[reg_num];
476 	return 0;
477 }
478 
479 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
480 				      const struct kvm_one_reg *reg)
481 {
482 	int rc;
483 	unsigned long __user *uaddr =
484 			(unsigned long __user *)(unsigned long)reg->addr;
485 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
486 					    KVM_REG_SIZE_MASK |
487 					    KVM_REG_RISCV_CSR);
488 	unsigned long reg_val, reg_subtype;
489 
490 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
491 		return -EINVAL;
492 
493 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
494 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
495 	switch (reg_subtype) {
496 	case KVM_REG_RISCV_CSR_GENERAL:
497 		rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, &reg_val);
498 		break;
499 	case KVM_REG_RISCV_CSR_AIA:
500 		rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val);
501 		break;
502 	case KVM_REG_RISCV_CSR_SMSTATEEN:
503 		rc = -EINVAL;
504 		if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
505 			rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num,
506 							      &reg_val);
507 		break;
508 	default:
509 		rc = -ENOENT;
510 		break;
511 	}
512 	if (rc)
513 		return rc;
514 
515 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
516 		return -EFAULT;
517 
518 	return 0;
519 }
520 
521 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
522 				      const struct kvm_one_reg *reg)
523 {
524 	int rc;
525 	unsigned long __user *uaddr =
526 			(unsigned long __user *)(unsigned long)reg->addr;
527 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
528 					    KVM_REG_SIZE_MASK |
529 					    KVM_REG_RISCV_CSR);
530 	unsigned long reg_val, reg_subtype;
531 
532 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
533 		return -EINVAL;
534 
535 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
536 		return -EFAULT;
537 
538 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
539 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
540 	switch (reg_subtype) {
541 	case KVM_REG_RISCV_CSR_GENERAL:
542 		rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
543 		break;
544 	case KVM_REG_RISCV_CSR_AIA:
545 		rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
546 		break;
547 	case KVM_REG_RISCV_CSR_SMSTATEEN:
548 		rc = -EINVAL;
549 		if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
550 			rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num,
551 							      reg_val);
552 		break;
553 	default:
554 		rc = -ENOENT;
555 		break;
556 	}
557 	if (rc)
558 		return rc;
559 
560 	return 0;
561 }
562 
563 static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
564 					 unsigned long reg_num,
565 					 unsigned long *reg_val)
566 {
567 	unsigned long host_isa_ext;
568 
569 	if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
570 	    reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
571 		return -ENOENT;
572 
573 	host_isa_ext = kvm_isa_ext_arr[reg_num];
574 	if (!__riscv_isa_extension_available(NULL, host_isa_ext))
575 		return -ENOENT;
576 
577 	*reg_val = 0;
578 	if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
579 		*reg_val = 1; /* Mark the given extension as available */
580 
581 	return 0;
582 }
583 
584 static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu,
585 					 unsigned long reg_num,
586 					 unsigned long reg_val)
587 {
588 	unsigned long host_isa_ext;
589 
590 	if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
591 	    reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
592 		return -ENOENT;
593 
594 	host_isa_ext = kvm_isa_ext_arr[reg_num];
595 	if (!__riscv_isa_extension_available(NULL, host_isa_ext))
596 		return -ENOENT;
597 
598 	if (reg_val == test_bit(host_isa_ext, vcpu->arch.isa))
599 		return 0;
600 
601 	if (!vcpu->arch.ran_atleast_once) {
602 		/*
603 		 * All multi-letter extension and a few single letter
604 		 * extension can be disabled
605 		 */
606 		if (reg_val == 1 &&
607 		    kvm_riscv_vcpu_isa_enable_allowed(reg_num))
608 			set_bit(host_isa_ext, vcpu->arch.isa);
609 		else if (!reg_val &&
610 			 kvm_riscv_vcpu_isa_disable_allowed(reg_num))
611 			clear_bit(host_isa_ext, vcpu->arch.isa);
612 		else
613 			return -EINVAL;
614 		kvm_riscv_vcpu_fp_reset(vcpu);
615 	} else {
616 		return -EBUSY;
617 	}
618 
619 	return 0;
620 }
621 
622 static int riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu *vcpu,
623 					unsigned long reg_num,
624 					unsigned long *reg_val)
625 {
626 	unsigned long i, ext_id, ext_val;
627 
628 	if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
629 		return -ENOENT;
630 
631 	for (i = 0; i < BITS_PER_LONG; i++) {
632 		ext_id = i + reg_num * BITS_PER_LONG;
633 		if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
634 			break;
635 
636 		ext_val = 0;
637 		riscv_vcpu_get_isa_ext_single(vcpu, ext_id, &ext_val);
638 		if (ext_val)
639 			*reg_val |= KVM_REG_RISCV_ISA_MULTI_MASK(ext_id);
640 	}
641 
642 	return 0;
643 }
644 
645 static int riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu *vcpu,
646 					unsigned long reg_num,
647 					unsigned long reg_val, bool enable)
648 {
649 	unsigned long i, ext_id;
650 
651 	if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
652 		return -ENOENT;
653 
654 	for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
655 		ext_id = i + reg_num * BITS_PER_LONG;
656 		if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
657 			break;
658 
659 		riscv_vcpu_set_isa_ext_single(vcpu, ext_id, enable);
660 	}
661 
662 	return 0;
663 }
664 
665 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
666 					  const struct kvm_one_reg *reg)
667 {
668 	int rc;
669 	unsigned long __user *uaddr =
670 			(unsigned long __user *)(unsigned long)reg->addr;
671 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
672 					    KVM_REG_SIZE_MASK |
673 					    KVM_REG_RISCV_ISA_EXT);
674 	unsigned long reg_val, reg_subtype;
675 
676 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
677 		return -EINVAL;
678 
679 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
680 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
681 
682 	reg_val = 0;
683 	switch (reg_subtype) {
684 	case KVM_REG_RISCV_ISA_SINGLE:
685 		rc = riscv_vcpu_get_isa_ext_single(vcpu, reg_num, &reg_val);
686 		break;
687 	case KVM_REG_RISCV_ISA_MULTI_EN:
688 	case KVM_REG_RISCV_ISA_MULTI_DIS:
689 		rc = riscv_vcpu_get_isa_ext_multi(vcpu, reg_num, &reg_val);
690 		if (!rc && reg_subtype == KVM_REG_RISCV_ISA_MULTI_DIS)
691 			reg_val = ~reg_val;
692 		break;
693 	default:
694 		rc = -ENOENT;
695 	}
696 	if (rc)
697 		return rc;
698 
699 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
700 		return -EFAULT;
701 
702 	return 0;
703 }
704 
705 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
706 					  const struct kvm_one_reg *reg)
707 {
708 	unsigned long __user *uaddr =
709 			(unsigned long __user *)(unsigned long)reg->addr;
710 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
711 					    KVM_REG_SIZE_MASK |
712 					    KVM_REG_RISCV_ISA_EXT);
713 	unsigned long reg_val, reg_subtype;
714 
715 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
716 		return -EINVAL;
717 
718 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
719 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
720 
721 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
722 		return -EFAULT;
723 
724 	switch (reg_subtype) {
725 	case KVM_REG_RISCV_ISA_SINGLE:
726 		return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
727 	case KVM_REG_RISCV_SBI_MULTI_EN:
728 		return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
729 	case KVM_REG_RISCV_SBI_MULTI_DIS:
730 		return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
731 	default:
732 		return -ENOENT;
733 	}
734 
735 	return 0;
736 }
737 
738 static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
739 				u64 __user *uindices)
740 {
741 	int n = 0;
742 
743 	for (int i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long);
744 		 i++) {
745 		u64 size;
746 		u64 reg;
747 
748 		/*
749 		 * Avoid reporting config reg if the corresponding extension
750 		 * was not available.
751 		 */
752 		if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
753 			!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
754 			continue;
755 		else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
756 			!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
757 			continue;
758 
759 		size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
760 		reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;
761 
762 		if (uindices) {
763 			if (put_user(reg, uindices))
764 				return -EFAULT;
765 			uindices++;
766 		}
767 
768 		n++;
769 	}
770 
771 	return n;
772 }
773 
774 static unsigned long num_config_regs(const struct kvm_vcpu *vcpu)
775 {
776 	return copy_config_reg_indices(vcpu, NULL);
777 }
778 
779 static inline unsigned long num_core_regs(void)
780 {
781 	return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
782 }
783 
784 static int copy_core_reg_indices(u64 __user *uindices)
785 {
786 	int n = num_core_regs();
787 
788 	for (int i = 0; i < n; i++) {
789 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
790 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
791 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i;
792 
793 		if (uindices) {
794 			if (put_user(reg, uindices))
795 				return -EFAULT;
796 			uindices++;
797 		}
798 	}
799 
800 	return n;
801 }
802 
803 static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu)
804 {
805 	unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
806 
807 	if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
808 		n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
809 	if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
810 		n += sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
811 
812 	return n;
813 }
814 
815 static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
816 				u64 __user *uindices)
817 {
818 	int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
819 	int n2 = 0, n3 = 0;
820 
821 	/* copy general csr regs */
822 	for (int i = 0; i < n1; i++) {
823 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
824 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
825 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
826 				  KVM_REG_RISCV_CSR_GENERAL | i;
827 
828 		if (uindices) {
829 			if (put_user(reg, uindices))
830 				return -EFAULT;
831 			uindices++;
832 		}
833 	}
834 
835 	/* copy AIA csr regs */
836 	if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) {
837 		n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
838 
839 		for (int i = 0; i < n2; i++) {
840 			u64 size = IS_ENABLED(CONFIG_32BIT) ?
841 				   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
842 			u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
843 					  KVM_REG_RISCV_CSR_AIA | i;
844 
845 			if (uindices) {
846 				if (put_user(reg, uindices))
847 					return -EFAULT;
848 				uindices++;
849 			}
850 		}
851 	}
852 
853 	/* copy Smstateen csr regs */
854 	if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) {
855 		n3 = sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
856 
857 		for (int i = 0; i < n3; i++) {
858 			u64 size = IS_ENABLED(CONFIG_32BIT) ?
859 				   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
860 			u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
861 					  KVM_REG_RISCV_CSR_SMSTATEEN | i;
862 
863 			if (uindices) {
864 				if (put_user(reg, uindices))
865 					return -EFAULT;
866 				uindices++;
867 			}
868 		}
869 	}
870 
871 	return n1 + n2 + n3;
872 }
873 
874 static inline unsigned long num_timer_regs(void)
875 {
876 	return sizeof(struct kvm_riscv_timer) / sizeof(u64);
877 }
878 
879 static int copy_timer_reg_indices(u64 __user *uindices)
880 {
881 	int n = num_timer_regs();
882 
883 	for (int i = 0; i < n; i++) {
884 		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
885 			  KVM_REG_RISCV_TIMER | i;
886 
887 		if (uindices) {
888 			if (put_user(reg, uindices))
889 				return -EFAULT;
890 			uindices++;
891 		}
892 	}
893 
894 	return n;
895 }
896 
897 static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
898 {
899 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
900 
901 	if (riscv_isa_extension_available(vcpu->arch.isa, f))
902 		return sizeof(cntx->fp.f) / sizeof(u32);
903 	else
904 		return 0;
905 }
906 
907 static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
908 				u64 __user *uindices)
909 {
910 	int n = num_fp_f_regs(vcpu);
911 
912 	for (int i = 0; i < n; i++) {
913 		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 |
914 			  KVM_REG_RISCV_FP_F | i;
915 
916 		if (uindices) {
917 			if (put_user(reg, uindices))
918 				return -EFAULT;
919 			uindices++;
920 		}
921 	}
922 
923 	return n;
924 }
925 
926 static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
927 {
928 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
929 
930 	if (riscv_isa_extension_available(vcpu->arch.isa, d))
931 		return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
932 	else
933 		return 0;
934 }
935 
936 static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
937 				u64 __user *uindices)
938 {
939 	int i;
940 	int n = num_fp_d_regs(vcpu);
941 	u64 reg;
942 
943 	/* copy fp.d.f indices */
944 	for (i = 0; i < n-1; i++) {
945 		reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
946 		      KVM_REG_RISCV_FP_D | i;
947 
948 		if (uindices) {
949 			if (put_user(reg, uindices))
950 				return -EFAULT;
951 			uindices++;
952 		}
953 	}
954 
955 	/* copy fp.d.fcsr indices */
956 	reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
957 	if (uindices) {
958 		if (put_user(reg, uindices))
959 			return -EFAULT;
960 		uindices++;
961 	}
962 
963 	return n;
964 }
965 
966 static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
967 				u64 __user *uindices)
968 {
969 	unsigned int n = 0;
970 	unsigned long isa_ext;
971 
972 	for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
973 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
974 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
975 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
976 
977 		isa_ext = kvm_isa_ext_arr[i];
978 		if (!__riscv_isa_extension_available(NULL, isa_ext))
979 			continue;
980 
981 		if (uindices) {
982 			if (put_user(reg, uindices))
983 				return -EFAULT;
984 			uindices++;
985 		}
986 
987 		n++;
988 	}
989 
990 	return n;
991 }
992 
993 static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
994 {
995 	return copy_isa_ext_reg_indices(vcpu, NULL);
996 }
997 
998 static int copy_sbi_ext_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
999 {
1000 	unsigned int n = 0;
1001 
1002 	for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) {
1003 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
1004 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1005 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
1006 			  KVM_REG_RISCV_SBI_SINGLE | i;
1007 
1008 		if (!riscv_vcpu_supports_sbi_ext(vcpu, i))
1009 			continue;
1010 
1011 		if (uindices) {
1012 			if (put_user(reg, uindices))
1013 				return -EFAULT;
1014 			uindices++;
1015 		}
1016 
1017 		n++;
1018 	}
1019 
1020 	return n;
1021 }
1022 
1023 static unsigned long num_sbi_ext_regs(struct kvm_vcpu *vcpu)
1024 {
1025 	return copy_sbi_ext_reg_indices(vcpu, NULL);
1026 }
1027 
1028 static int copy_sbi_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1029 {
1030 	struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
1031 	int total = 0;
1032 
1033 	if (scontext->ext_status[KVM_RISCV_SBI_EXT_STA] == KVM_RISCV_SBI_EXT_STATUS_ENABLED) {
1034 		u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1035 		int n = sizeof(struct kvm_riscv_sbi_sta) / sizeof(unsigned long);
1036 
1037 		for (int i = 0; i < n; i++) {
1038 			u64 reg = KVM_REG_RISCV | size |
1039 				  KVM_REG_RISCV_SBI_STATE |
1040 				  KVM_REG_RISCV_SBI_STA | i;
1041 
1042 			if (uindices) {
1043 				if (put_user(reg, uindices))
1044 					return -EFAULT;
1045 				uindices++;
1046 			}
1047 		}
1048 
1049 		total += n;
1050 	}
1051 
1052 	return total;
1053 }
1054 
1055 static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu)
1056 {
1057 	return copy_sbi_reg_indices(vcpu, NULL);
1058 }
1059 
1060 static inline unsigned long num_vector_regs(const struct kvm_vcpu *vcpu)
1061 {
1062 	if (!riscv_isa_extension_available(vcpu->arch.isa, v))
1063 		return 0;
1064 
1065 	/* vstart, vl, vtype, vcsr, vlenb and 32 vector regs */
1066 	return 37;
1067 }
1068 
1069 static int copy_vector_reg_indices(const struct kvm_vcpu *vcpu,
1070 				u64 __user *uindices)
1071 {
1072 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
1073 	int n = num_vector_regs(vcpu);
1074 	u64 reg, size;
1075 	int i;
1076 
1077 	if (n == 0)
1078 		return 0;
1079 
1080 	/* copy vstart, vl, vtype, vcsr and vlenb */
1081 	size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1082 	for (i = 0; i < 5; i++) {
1083 		reg = KVM_REG_RISCV | size | KVM_REG_RISCV_VECTOR | i;
1084 
1085 		if (uindices) {
1086 			if (put_user(reg, uindices))
1087 				return -EFAULT;
1088 			uindices++;
1089 		}
1090 	}
1091 
1092 	/* vector_regs have a variable 'vlenb' size */
1093 	size = __builtin_ctzl(cntx->vector.vlenb);
1094 	size <<= KVM_REG_SIZE_SHIFT;
1095 	for (i = 0; i < 32; i++) {
1096 		reg = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | size |
1097 			KVM_REG_RISCV_VECTOR_REG(i);
1098 
1099 		if (uindices) {
1100 			if (put_user(reg, uindices))
1101 				return -EFAULT;
1102 			uindices++;
1103 		}
1104 	}
1105 
1106 	return n;
1107 }
1108 
1109 /*
1110  * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
1111  *
1112  * This is for all registers.
1113  */
1114 unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
1115 {
1116 	unsigned long res = 0;
1117 
1118 	res += num_config_regs(vcpu);
1119 	res += num_core_regs();
1120 	res += num_csr_regs(vcpu);
1121 	res += num_timer_regs();
1122 	res += num_fp_f_regs(vcpu);
1123 	res += num_fp_d_regs(vcpu);
1124 	res += num_vector_regs(vcpu);
1125 	res += num_isa_ext_regs(vcpu);
1126 	res += num_sbi_ext_regs(vcpu);
1127 	res += num_sbi_regs(vcpu);
1128 
1129 	return res;
1130 }
1131 
1132 /*
1133  * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
1134  */
1135 int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
1136 				    u64 __user *uindices)
1137 {
1138 	int ret;
1139 
1140 	ret = copy_config_reg_indices(vcpu, uindices);
1141 	if (ret < 0)
1142 		return ret;
1143 	uindices += ret;
1144 
1145 	ret = copy_core_reg_indices(uindices);
1146 	if (ret < 0)
1147 		return ret;
1148 	uindices += ret;
1149 
1150 	ret = copy_csr_reg_indices(vcpu, uindices);
1151 	if (ret < 0)
1152 		return ret;
1153 	uindices += ret;
1154 
1155 	ret = copy_timer_reg_indices(uindices);
1156 	if (ret < 0)
1157 		return ret;
1158 	uindices += ret;
1159 
1160 	ret = copy_fp_f_reg_indices(vcpu, uindices);
1161 	if (ret < 0)
1162 		return ret;
1163 	uindices += ret;
1164 
1165 	ret = copy_fp_d_reg_indices(vcpu, uindices);
1166 	if (ret < 0)
1167 		return ret;
1168 	uindices += ret;
1169 
1170 	ret = copy_vector_reg_indices(vcpu, uindices);
1171 	if (ret < 0)
1172 		return ret;
1173 	uindices += ret;
1174 
1175 	ret = copy_isa_ext_reg_indices(vcpu, uindices);
1176 	if (ret < 0)
1177 		return ret;
1178 	uindices += ret;
1179 
1180 	ret = copy_sbi_ext_reg_indices(vcpu, uindices);
1181 	if (ret < 0)
1182 		return ret;
1183 	uindices += ret;
1184 
1185 	ret = copy_sbi_reg_indices(vcpu, uindices);
1186 	if (ret < 0)
1187 		return ret;
1188 	uindices += ret;
1189 
1190 	return 0;
1191 }
1192 
1193 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
1194 			   const struct kvm_one_reg *reg)
1195 {
1196 	switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1197 	case KVM_REG_RISCV_CONFIG:
1198 		return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
1199 	case KVM_REG_RISCV_CORE:
1200 		return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
1201 	case KVM_REG_RISCV_CSR:
1202 		return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
1203 	case KVM_REG_RISCV_TIMER:
1204 		return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
1205 	case KVM_REG_RISCV_FP_F:
1206 		return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1207 						 KVM_REG_RISCV_FP_F);
1208 	case KVM_REG_RISCV_FP_D:
1209 		return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1210 						 KVM_REG_RISCV_FP_D);
1211 	case KVM_REG_RISCV_VECTOR:
1212 		return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
1213 	case KVM_REG_RISCV_ISA_EXT:
1214 		return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
1215 	case KVM_REG_RISCV_SBI_EXT:
1216 		return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
1217 	case KVM_REG_RISCV_SBI_STATE:
1218 		return kvm_riscv_vcpu_set_reg_sbi(vcpu, reg);
1219 	default:
1220 		break;
1221 	}
1222 
1223 	return -ENOENT;
1224 }
1225 
1226 int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
1227 			   const struct kvm_one_reg *reg)
1228 {
1229 	switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1230 	case KVM_REG_RISCV_CONFIG:
1231 		return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
1232 	case KVM_REG_RISCV_CORE:
1233 		return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
1234 	case KVM_REG_RISCV_CSR:
1235 		return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
1236 	case KVM_REG_RISCV_TIMER:
1237 		return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
1238 	case KVM_REG_RISCV_FP_F:
1239 		return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1240 						 KVM_REG_RISCV_FP_F);
1241 	case KVM_REG_RISCV_FP_D:
1242 		return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1243 						 KVM_REG_RISCV_FP_D);
1244 	case KVM_REG_RISCV_VECTOR:
1245 		return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
1246 	case KVM_REG_RISCV_ISA_EXT:
1247 		return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
1248 	case KVM_REG_RISCV_SBI_EXT:
1249 		return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
1250 	case KVM_REG_RISCV_SBI_STATE:
1251 		return kvm_riscv_vcpu_get_reg_sbi(vcpu, reg);
1252 	default:
1253 		break;
1254 	}
1255 
1256 	return -ENOENT;
1257 }
1258