xref: /linux/arch/riscv/kvm/vcpu_onereg.c (revision 2eff01ee2881becc9daaa0d53477ec202136b1f4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  * Copyright (C) 2023 Ventana Micro Systems Inc.
5  *
6  * Authors:
7  *	Anup Patel <apatel@ventanamicro.com>
8  */
9 
10 #include <linux/bitops.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/uaccess.h>
14 #include <linux/kvm_host.h>
15 #include <asm/cacheflush.h>
16 #include <asm/cpufeature.h>
17 #include <asm/kvm_vcpu_vector.h>
18 #include <asm/vector.h>
19 
20 #define KVM_RISCV_BASE_ISA_MASK		GENMASK(25, 0)
21 
22 #define KVM_ISA_EXT_ARR(ext)		\
23 [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
24 
25 /* Mapping between KVM ISA Extension ID & Host ISA extension ID */
26 static const unsigned long kvm_isa_ext_arr[] = {
27 	/* Single letter extensions (alphabetically sorted) */
28 	[KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
29 	[KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
30 	[KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
31 	[KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
32 	[KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
33 	[KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
34 	[KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
35 	[KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
36 	/* Multi letter extensions (alphabetically sorted) */
37 	[KVM_RISCV_ISA_EXT_SMNPM] = RISCV_ISA_EXT_SSNPM,
38 	KVM_ISA_EXT_ARR(SMSTATEEN),
39 	KVM_ISA_EXT_ARR(SSAIA),
40 	KVM_ISA_EXT_ARR(SSCOFPMF),
41 	KVM_ISA_EXT_ARR(SSNPM),
42 	KVM_ISA_EXT_ARR(SSTC),
43 	KVM_ISA_EXT_ARR(SVINVAL),
44 	KVM_ISA_EXT_ARR(SVNAPOT),
45 	KVM_ISA_EXT_ARR(SVPBMT),
46 	KVM_ISA_EXT_ARR(ZACAS),
47 	KVM_ISA_EXT_ARR(ZAWRS),
48 	KVM_ISA_EXT_ARR(ZBA),
49 	KVM_ISA_EXT_ARR(ZBB),
50 	KVM_ISA_EXT_ARR(ZBC),
51 	KVM_ISA_EXT_ARR(ZBKB),
52 	KVM_ISA_EXT_ARR(ZBKC),
53 	KVM_ISA_EXT_ARR(ZBKX),
54 	KVM_ISA_EXT_ARR(ZBS),
55 	KVM_ISA_EXT_ARR(ZCA),
56 	KVM_ISA_EXT_ARR(ZCB),
57 	KVM_ISA_EXT_ARR(ZCD),
58 	KVM_ISA_EXT_ARR(ZCF),
59 	KVM_ISA_EXT_ARR(ZCMOP),
60 	KVM_ISA_EXT_ARR(ZFA),
61 	KVM_ISA_EXT_ARR(ZFH),
62 	KVM_ISA_EXT_ARR(ZFHMIN),
63 	KVM_ISA_EXT_ARR(ZICBOM),
64 	KVM_ISA_EXT_ARR(ZICBOZ),
65 	KVM_ISA_EXT_ARR(ZICNTR),
66 	KVM_ISA_EXT_ARR(ZICOND),
67 	KVM_ISA_EXT_ARR(ZICSR),
68 	KVM_ISA_EXT_ARR(ZIFENCEI),
69 	KVM_ISA_EXT_ARR(ZIHINTNTL),
70 	KVM_ISA_EXT_ARR(ZIHINTPAUSE),
71 	KVM_ISA_EXT_ARR(ZIHPM),
72 	KVM_ISA_EXT_ARR(ZIMOP),
73 	KVM_ISA_EXT_ARR(ZKND),
74 	KVM_ISA_EXT_ARR(ZKNE),
75 	KVM_ISA_EXT_ARR(ZKNH),
76 	KVM_ISA_EXT_ARR(ZKR),
77 	KVM_ISA_EXT_ARR(ZKSED),
78 	KVM_ISA_EXT_ARR(ZKSH),
79 	KVM_ISA_EXT_ARR(ZKT),
80 	KVM_ISA_EXT_ARR(ZTSO),
81 	KVM_ISA_EXT_ARR(ZVBB),
82 	KVM_ISA_EXT_ARR(ZVBC),
83 	KVM_ISA_EXT_ARR(ZVFH),
84 	KVM_ISA_EXT_ARR(ZVFHMIN),
85 	KVM_ISA_EXT_ARR(ZVKB),
86 	KVM_ISA_EXT_ARR(ZVKG),
87 	KVM_ISA_EXT_ARR(ZVKNED),
88 	KVM_ISA_EXT_ARR(ZVKNHA),
89 	KVM_ISA_EXT_ARR(ZVKNHB),
90 	KVM_ISA_EXT_ARR(ZVKSED),
91 	KVM_ISA_EXT_ARR(ZVKSH),
92 	KVM_ISA_EXT_ARR(ZVKT),
93 };
94 
95 static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
96 {
97 	unsigned long i;
98 
99 	for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
100 		if (kvm_isa_ext_arr[i] == base_ext)
101 			return i;
102 	}
103 
104 	return KVM_RISCV_ISA_EXT_MAX;
105 }
106 
107 static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
108 {
109 	switch (ext) {
110 	case KVM_RISCV_ISA_EXT_H:
111 		return false;
112 	case KVM_RISCV_ISA_EXT_SSCOFPMF:
113 		/* Sscofpmf depends on interrupt filtering defined in ssaia */
114 		return __riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSAIA);
115 	case KVM_RISCV_ISA_EXT_V:
116 		return riscv_v_vstate_ctrl_user_allowed();
117 	default:
118 		break;
119 	}
120 
121 	return true;
122 }
123 
124 static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
125 {
126 	switch (ext) {
127 	/* Extensions which don't have any mechanism to disable */
128 	case KVM_RISCV_ISA_EXT_A:
129 	case KVM_RISCV_ISA_EXT_C:
130 	case KVM_RISCV_ISA_EXT_I:
131 	case KVM_RISCV_ISA_EXT_M:
132 	case KVM_RISCV_ISA_EXT_SMNPM:
133 	/* There is not architectural config bit to disable sscofpmf completely */
134 	case KVM_RISCV_ISA_EXT_SSCOFPMF:
135 	case KVM_RISCV_ISA_EXT_SSNPM:
136 	case KVM_RISCV_ISA_EXT_SSTC:
137 	case KVM_RISCV_ISA_EXT_SVINVAL:
138 	case KVM_RISCV_ISA_EXT_SVNAPOT:
139 	case KVM_RISCV_ISA_EXT_ZACAS:
140 	case KVM_RISCV_ISA_EXT_ZAWRS:
141 	case KVM_RISCV_ISA_EXT_ZBA:
142 	case KVM_RISCV_ISA_EXT_ZBB:
143 	case KVM_RISCV_ISA_EXT_ZBC:
144 	case KVM_RISCV_ISA_EXT_ZBKB:
145 	case KVM_RISCV_ISA_EXT_ZBKC:
146 	case KVM_RISCV_ISA_EXT_ZBKX:
147 	case KVM_RISCV_ISA_EXT_ZBS:
148 	case KVM_RISCV_ISA_EXT_ZCA:
149 	case KVM_RISCV_ISA_EXT_ZCB:
150 	case KVM_RISCV_ISA_EXT_ZCD:
151 	case KVM_RISCV_ISA_EXT_ZCF:
152 	case KVM_RISCV_ISA_EXT_ZCMOP:
153 	case KVM_RISCV_ISA_EXT_ZFA:
154 	case KVM_RISCV_ISA_EXT_ZFH:
155 	case KVM_RISCV_ISA_EXT_ZFHMIN:
156 	case KVM_RISCV_ISA_EXT_ZICNTR:
157 	case KVM_RISCV_ISA_EXT_ZICOND:
158 	case KVM_RISCV_ISA_EXT_ZICSR:
159 	case KVM_RISCV_ISA_EXT_ZIFENCEI:
160 	case KVM_RISCV_ISA_EXT_ZIHINTNTL:
161 	case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
162 	case KVM_RISCV_ISA_EXT_ZIHPM:
163 	case KVM_RISCV_ISA_EXT_ZIMOP:
164 	case KVM_RISCV_ISA_EXT_ZKND:
165 	case KVM_RISCV_ISA_EXT_ZKNE:
166 	case KVM_RISCV_ISA_EXT_ZKNH:
167 	case KVM_RISCV_ISA_EXT_ZKR:
168 	case KVM_RISCV_ISA_EXT_ZKSED:
169 	case KVM_RISCV_ISA_EXT_ZKSH:
170 	case KVM_RISCV_ISA_EXT_ZKT:
171 	case KVM_RISCV_ISA_EXT_ZTSO:
172 	case KVM_RISCV_ISA_EXT_ZVBB:
173 	case KVM_RISCV_ISA_EXT_ZVBC:
174 	case KVM_RISCV_ISA_EXT_ZVFH:
175 	case KVM_RISCV_ISA_EXT_ZVFHMIN:
176 	case KVM_RISCV_ISA_EXT_ZVKB:
177 	case KVM_RISCV_ISA_EXT_ZVKG:
178 	case KVM_RISCV_ISA_EXT_ZVKNED:
179 	case KVM_RISCV_ISA_EXT_ZVKNHA:
180 	case KVM_RISCV_ISA_EXT_ZVKNHB:
181 	case KVM_RISCV_ISA_EXT_ZVKSED:
182 	case KVM_RISCV_ISA_EXT_ZVKSH:
183 	case KVM_RISCV_ISA_EXT_ZVKT:
184 		return false;
185 	/* Extensions which can be disabled using Smstateen */
186 	case KVM_RISCV_ISA_EXT_SSAIA:
187 		return riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN);
188 	default:
189 		break;
190 	}
191 
192 	return true;
193 }
194 
195 void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu)
196 {
197 	unsigned long host_isa, i;
198 
199 	for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
200 		host_isa = kvm_isa_ext_arr[i];
201 		if (__riscv_isa_extension_available(NULL, host_isa) &&
202 		    kvm_riscv_vcpu_isa_enable_allowed(i))
203 			set_bit(host_isa, vcpu->arch.isa);
204 	}
205 }
206 
207 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
208 					 const struct kvm_one_reg *reg)
209 {
210 	unsigned long __user *uaddr =
211 			(unsigned long __user *)(unsigned long)reg->addr;
212 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
213 					    KVM_REG_SIZE_MASK |
214 					    KVM_REG_RISCV_CONFIG);
215 	unsigned long reg_val;
216 
217 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
218 		return -EINVAL;
219 
220 	switch (reg_num) {
221 	case KVM_REG_RISCV_CONFIG_REG(isa):
222 		reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
223 		break;
224 	case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
225 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
226 			return -ENOENT;
227 		reg_val = riscv_cbom_block_size;
228 		break;
229 	case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
230 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
231 			return -ENOENT;
232 		reg_val = riscv_cboz_block_size;
233 		break;
234 	case KVM_REG_RISCV_CONFIG_REG(mvendorid):
235 		reg_val = vcpu->arch.mvendorid;
236 		break;
237 	case KVM_REG_RISCV_CONFIG_REG(marchid):
238 		reg_val = vcpu->arch.marchid;
239 		break;
240 	case KVM_REG_RISCV_CONFIG_REG(mimpid):
241 		reg_val = vcpu->arch.mimpid;
242 		break;
243 	case KVM_REG_RISCV_CONFIG_REG(satp_mode):
244 		reg_val = satp_mode >> SATP_MODE_SHIFT;
245 		break;
246 	default:
247 		return -ENOENT;
248 	}
249 
250 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
251 		return -EFAULT;
252 
253 	return 0;
254 }
255 
256 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
257 					 const struct kvm_one_reg *reg)
258 {
259 	unsigned long __user *uaddr =
260 			(unsigned long __user *)(unsigned long)reg->addr;
261 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
262 					    KVM_REG_SIZE_MASK |
263 					    KVM_REG_RISCV_CONFIG);
264 	unsigned long i, isa_ext, reg_val;
265 
266 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
267 		return -EINVAL;
268 
269 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
270 		return -EFAULT;
271 
272 	switch (reg_num) {
273 	case KVM_REG_RISCV_CONFIG_REG(isa):
274 		/*
275 		 * This ONE REG interface is only defined for
276 		 * single letter extensions.
277 		 */
278 		if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
279 			return -EINVAL;
280 
281 		/*
282 		 * Return early (i.e. do nothing) if reg_val is the same
283 		 * value retrievable via kvm_riscv_vcpu_get_reg_config().
284 		 */
285 		if (reg_val == (vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK))
286 			break;
287 
288 		if (!vcpu->arch.ran_atleast_once) {
289 			/* Ignore the enable/disable request for certain extensions */
290 			for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
291 				isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
292 				if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
293 					reg_val &= ~BIT(i);
294 					continue;
295 				}
296 				if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
297 					if (reg_val & BIT(i))
298 						reg_val &= ~BIT(i);
299 				if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
300 					if (!(reg_val & BIT(i)))
301 						reg_val |= BIT(i);
302 			}
303 			reg_val &= riscv_isa_extension_base(NULL);
304 			/* Do not modify anything beyond single letter extensions */
305 			reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
306 				  (reg_val & KVM_RISCV_BASE_ISA_MASK);
307 			vcpu->arch.isa[0] = reg_val;
308 			kvm_riscv_vcpu_fp_reset(vcpu);
309 		} else {
310 			return -EBUSY;
311 		}
312 		break;
313 	case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
314 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
315 			return -ENOENT;
316 		if (reg_val != riscv_cbom_block_size)
317 			return -EINVAL;
318 		break;
319 	case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
320 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
321 			return -ENOENT;
322 		if (reg_val != riscv_cboz_block_size)
323 			return -EINVAL;
324 		break;
325 	case KVM_REG_RISCV_CONFIG_REG(mvendorid):
326 		if (reg_val == vcpu->arch.mvendorid)
327 			break;
328 		if (!vcpu->arch.ran_atleast_once)
329 			vcpu->arch.mvendorid = reg_val;
330 		else
331 			return -EBUSY;
332 		break;
333 	case KVM_REG_RISCV_CONFIG_REG(marchid):
334 		if (reg_val == vcpu->arch.marchid)
335 			break;
336 		if (!vcpu->arch.ran_atleast_once)
337 			vcpu->arch.marchid = reg_val;
338 		else
339 			return -EBUSY;
340 		break;
341 	case KVM_REG_RISCV_CONFIG_REG(mimpid):
342 		if (reg_val == vcpu->arch.mimpid)
343 			break;
344 		if (!vcpu->arch.ran_atleast_once)
345 			vcpu->arch.mimpid = reg_val;
346 		else
347 			return -EBUSY;
348 		break;
349 	case KVM_REG_RISCV_CONFIG_REG(satp_mode):
350 		if (reg_val != (satp_mode >> SATP_MODE_SHIFT))
351 			return -EINVAL;
352 		break;
353 	default:
354 		return -ENOENT;
355 	}
356 
357 	return 0;
358 }
359 
360 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
361 				       const struct kvm_one_reg *reg)
362 {
363 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
364 	unsigned long __user *uaddr =
365 			(unsigned long __user *)(unsigned long)reg->addr;
366 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
367 					    KVM_REG_SIZE_MASK |
368 					    KVM_REG_RISCV_CORE);
369 	unsigned long reg_val;
370 
371 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
372 		return -EINVAL;
373 	if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
374 		return -ENOENT;
375 
376 	if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
377 		reg_val = cntx->sepc;
378 	else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
379 		 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
380 		reg_val = ((unsigned long *)cntx)[reg_num];
381 	else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
382 		reg_val = (cntx->sstatus & SR_SPP) ?
383 				KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
384 	else
385 		return -ENOENT;
386 
387 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
388 		return -EFAULT;
389 
390 	return 0;
391 }
392 
393 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
394 				       const struct kvm_one_reg *reg)
395 {
396 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
397 	unsigned long __user *uaddr =
398 			(unsigned long __user *)(unsigned long)reg->addr;
399 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
400 					    KVM_REG_SIZE_MASK |
401 					    KVM_REG_RISCV_CORE);
402 	unsigned long reg_val;
403 
404 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
405 		return -EINVAL;
406 	if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
407 		return -ENOENT;
408 
409 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
410 		return -EFAULT;
411 
412 	if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
413 		cntx->sepc = reg_val;
414 	else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
415 		 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
416 		((unsigned long *)cntx)[reg_num] = reg_val;
417 	else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
418 		if (reg_val == KVM_RISCV_MODE_S)
419 			cntx->sstatus |= SR_SPP;
420 		else
421 			cntx->sstatus &= ~SR_SPP;
422 	} else
423 		return -ENOENT;
424 
425 	return 0;
426 }
427 
428 static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
429 					  unsigned long reg_num,
430 					  unsigned long *out_val)
431 {
432 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
433 
434 	if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
435 		return -ENOENT;
436 
437 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
438 		kvm_riscv_vcpu_flush_interrupts(vcpu);
439 		*out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
440 		*out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
441 	} else
442 		*out_val = ((unsigned long *)csr)[reg_num];
443 
444 	return 0;
445 }
446 
447 static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
448 					  unsigned long reg_num,
449 					  unsigned long reg_val)
450 {
451 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
452 
453 	if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
454 		return -ENOENT;
455 
456 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
457 		reg_val &= VSIP_VALID_MASK;
458 		reg_val <<= VSIP_TO_HVIP_SHIFT;
459 	}
460 
461 	((unsigned long *)csr)[reg_num] = reg_val;
462 
463 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
464 		WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
465 
466 	return 0;
467 }
468 
469 static inline int kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu *vcpu,
470 						   unsigned long reg_num,
471 						   unsigned long reg_val)
472 {
473 	struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
474 
475 	if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
476 		sizeof(unsigned long))
477 		return -EINVAL;
478 
479 	((unsigned long *)csr)[reg_num] = reg_val;
480 	return 0;
481 }
482 
483 static int kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu *vcpu,
484 					    unsigned long reg_num,
485 					    unsigned long *out_val)
486 {
487 	struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
488 
489 	if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
490 		sizeof(unsigned long))
491 		return -EINVAL;
492 
493 	*out_val = ((unsigned long *)csr)[reg_num];
494 	return 0;
495 }
496 
497 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
498 				      const struct kvm_one_reg *reg)
499 {
500 	int rc;
501 	unsigned long __user *uaddr =
502 			(unsigned long __user *)(unsigned long)reg->addr;
503 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
504 					    KVM_REG_SIZE_MASK |
505 					    KVM_REG_RISCV_CSR);
506 	unsigned long reg_val, reg_subtype;
507 
508 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
509 		return -EINVAL;
510 
511 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
512 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
513 	switch (reg_subtype) {
514 	case KVM_REG_RISCV_CSR_GENERAL:
515 		rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, &reg_val);
516 		break;
517 	case KVM_REG_RISCV_CSR_AIA:
518 		rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val);
519 		break;
520 	case KVM_REG_RISCV_CSR_SMSTATEEN:
521 		rc = -EINVAL;
522 		if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
523 			rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num,
524 							      &reg_val);
525 		break;
526 	default:
527 		rc = -ENOENT;
528 		break;
529 	}
530 	if (rc)
531 		return rc;
532 
533 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
534 		return -EFAULT;
535 
536 	return 0;
537 }
538 
539 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
540 				      const struct kvm_one_reg *reg)
541 {
542 	int rc;
543 	unsigned long __user *uaddr =
544 			(unsigned long __user *)(unsigned long)reg->addr;
545 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
546 					    KVM_REG_SIZE_MASK |
547 					    KVM_REG_RISCV_CSR);
548 	unsigned long reg_val, reg_subtype;
549 
550 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
551 		return -EINVAL;
552 
553 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
554 		return -EFAULT;
555 
556 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
557 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
558 	switch (reg_subtype) {
559 	case KVM_REG_RISCV_CSR_GENERAL:
560 		rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
561 		break;
562 	case KVM_REG_RISCV_CSR_AIA:
563 		rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
564 		break;
565 	case KVM_REG_RISCV_CSR_SMSTATEEN:
566 		rc = -EINVAL;
567 		if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
568 			rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num,
569 							      reg_val);
570 		break;
571 	default:
572 		rc = -ENOENT;
573 		break;
574 	}
575 	if (rc)
576 		return rc;
577 
578 	return 0;
579 }
580 
581 static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
582 					 unsigned long reg_num,
583 					 unsigned long *reg_val)
584 {
585 	unsigned long host_isa_ext;
586 
587 	if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
588 	    reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
589 		return -ENOENT;
590 
591 	host_isa_ext = kvm_isa_ext_arr[reg_num];
592 	if (!__riscv_isa_extension_available(NULL, host_isa_ext))
593 		return -ENOENT;
594 
595 	*reg_val = 0;
596 	if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
597 		*reg_val = 1; /* Mark the given extension as available */
598 
599 	return 0;
600 }
601 
602 static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu,
603 					 unsigned long reg_num,
604 					 unsigned long reg_val)
605 {
606 	unsigned long host_isa_ext;
607 
608 	if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
609 	    reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
610 		return -ENOENT;
611 
612 	host_isa_ext = kvm_isa_ext_arr[reg_num];
613 	if (!__riscv_isa_extension_available(NULL, host_isa_ext))
614 		return -ENOENT;
615 
616 	if (reg_val == test_bit(host_isa_ext, vcpu->arch.isa))
617 		return 0;
618 
619 	if (!vcpu->arch.ran_atleast_once) {
620 		/*
621 		 * All multi-letter extension and a few single letter
622 		 * extension can be disabled
623 		 */
624 		if (reg_val == 1 &&
625 		    kvm_riscv_vcpu_isa_enable_allowed(reg_num))
626 			set_bit(host_isa_ext, vcpu->arch.isa);
627 		else if (!reg_val &&
628 			 kvm_riscv_vcpu_isa_disable_allowed(reg_num))
629 			clear_bit(host_isa_ext, vcpu->arch.isa);
630 		else
631 			return -EINVAL;
632 		kvm_riscv_vcpu_fp_reset(vcpu);
633 	} else {
634 		return -EBUSY;
635 	}
636 
637 	return 0;
638 }
639 
640 static int riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu *vcpu,
641 					unsigned long reg_num,
642 					unsigned long *reg_val)
643 {
644 	unsigned long i, ext_id, ext_val;
645 
646 	if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
647 		return -ENOENT;
648 
649 	for (i = 0; i < BITS_PER_LONG; i++) {
650 		ext_id = i + reg_num * BITS_PER_LONG;
651 		if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
652 			break;
653 
654 		ext_val = 0;
655 		riscv_vcpu_get_isa_ext_single(vcpu, ext_id, &ext_val);
656 		if (ext_val)
657 			*reg_val |= KVM_REG_RISCV_ISA_MULTI_MASK(ext_id);
658 	}
659 
660 	return 0;
661 }
662 
663 static int riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu *vcpu,
664 					unsigned long reg_num,
665 					unsigned long reg_val, bool enable)
666 {
667 	unsigned long i, ext_id;
668 
669 	if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
670 		return -ENOENT;
671 
672 	for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
673 		ext_id = i + reg_num * BITS_PER_LONG;
674 		if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
675 			break;
676 
677 		riscv_vcpu_set_isa_ext_single(vcpu, ext_id, enable);
678 	}
679 
680 	return 0;
681 }
682 
683 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
684 					  const struct kvm_one_reg *reg)
685 {
686 	int rc;
687 	unsigned long __user *uaddr =
688 			(unsigned long __user *)(unsigned long)reg->addr;
689 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
690 					    KVM_REG_SIZE_MASK |
691 					    KVM_REG_RISCV_ISA_EXT);
692 	unsigned long reg_val, reg_subtype;
693 
694 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
695 		return -EINVAL;
696 
697 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
698 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
699 
700 	reg_val = 0;
701 	switch (reg_subtype) {
702 	case KVM_REG_RISCV_ISA_SINGLE:
703 		rc = riscv_vcpu_get_isa_ext_single(vcpu, reg_num, &reg_val);
704 		break;
705 	case KVM_REG_RISCV_ISA_MULTI_EN:
706 	case KVM_REG_RISCV_ISA_MULTI_DIS:
707 		rc = riscv_vcpu_get_isa_ext_multi(vcpu, reg_num, &reg_val);
708 		if (!rc && reg_subtype == KVM_REG_RISCV_ISA_MULTI_DIS)
709 			reg_val = ~reg_val;
710 		break;
711 	default:
712 		rc = -ENOENT;
713 	}
714 	if (rc)
715 		return rc;
716 
717 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
718 		return -EFAULT;
719 
720 	return 0;
721 }
722 
723 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
724 					  const struct kvm_one_reg *reg)
725 {
726 	unsigned long __user *uaddr =
727 			(unsigned long __user *)(unsigned long)reg->addr;
728 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
729 					    KVM_REG_SIZE_MASK |
730 					    KVM_REG_RISCV_ISA_EXT);
731 	unsigned long reg_val, reg_subtype;
732 
733 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
734 		return -EINVAL;
735 
736 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
737 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
738 
739 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
740 		return -EFAULT;
741 
742 	switch (reg_subtype) {
743 	case KVM_REG_RISCV_ISA_SINGLE:
744 		return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
745 	case KVM_REG_RISCV_ISA_MULTI_EN:
746 		return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
747 	case KVM_REG_RISCV_ISA_MULTI_DIS:
748 		return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
749 	default:
750 		return -ENOENT;
751 	}
752 
753 	return 0;
754 }
755 
756 static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
757 				u64 __user *uindices)
758 {
759 	int n = 0;
760 
761 	for (int i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long);
762 		 i++) {
763 		u64 size;
764 		u64 reg;
765 
766 		/*
767 		 * Avoid reporting config reg if the corresponding extension
768 		 * was not available.
769 		 */
770 		if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
771 			!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
772 			continue;
773 		else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
774 			!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
775 			continue;
776 
777 		size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
778 		reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;
779 
780 		if (uindices) {
781 			if (put_user(reg, uindices))
782 				return -EFAULT;
783 			uindices++;
784 		}
785 
786 		n++;
787 	}
788 
789 	return n;
790 }
791 
792 static unsigned long num_config_regs(const struct kvm_vcpu *vcpu)
793 {
794 	return copy_config_reg_indices(vcpu, NULL);
795 }
796 
797 static inline unsigned long num_core_regs(void)
798 {
799 	return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
800 }
801 
802 static int copy_core_reg_indices(u64 __user *uindices)
803 {
804 	int n = num_core_regs();
805 
806 	for (int i = 0; i < n; i++) {
807 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
808 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
809 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i;
810 
811 		if (uindices) {
812 			if (put_user(reg, uindices))
813 				return -EFAULT;
814 			uindices++;
815 		}
816 	}
817 
818 	return n;
819 }
820 
821 static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu)
822 {
823 	unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
824 
825 	if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
826 		n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
827 	if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
828 		n += sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
829 
830 	return n;
831 }
832 
833 static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
834 				u64 __user *uindices)
835 {
836 	int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
837 	int n2 = 0, n3 = 0;
838 
839 	/* copy general csr regs */
840 	for (int i = 0; i < n1; i++) {
841 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
842 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
843 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
844 				  KVM_REG_RISCV_CSR_GENERAL | i;
845 
846 		if (uindices) {
847 			if (put_user(reg, uindices))
848 				return -EFAULT;
849 			uindices++;
850 		}
851 	}
852 
853 	/* copy AIA csr regs */
854 	if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) {
855 		n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
856 
857 		for (int i = 0; i < n2; i++) {
858 			u64 size = IS_ENABLED(CONFIG_32BIT) ?
859 				   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
860 			u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
861 					  KVM_REG_RISCV_CSR_AIA | i;
862 
863 			if (uindices) {
864 				if (put_user(reg, uindices))
865 					return -EFAULT;
866 				uindices++;
867 			}
868 		}
869 	}
870 
871 	/* copy Smstateen csr regs */
872 	if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) {
873 		n3 = sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
874 
875 		for (int i = 0; i < n3; i++) {
876 			u64 size = IS_ENABLED(CONFIG_32BIT) ?
877 				   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
878 			u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
879 					  KVM_REG_RISCV_CSR_SMSTATEEN | i;
880 
881 			if (uindices) {
882 				if (put_user(reg, uindices))
883 					return -EFAULT;
884 				uindices++;
885 			}
886 		}
887 	}
888 
889 	return n1 + n2 + n3;
890 }
891 
892 static inline unsigned long num_timer_regs(void)
893 {
894 	return sizeof(struct kvm_riscv_timer) / sizeof(u64);
895 }
896 
897 static int copy_timer_reg_indices(u64 __user *uindices)
898 {
899 	int n = num_timer_regs();
900 
901 	for (int i = 0; i < n; i++) {
902 		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
903 			  KVM_REG_RISCV_TIMER | i;
904 
905 		if (uindices) {
906 			if (put_user(reg, uindices))
907 				return -EFAULT;
908 			uindices++;
909 		}
910 	}
911 
912 	return n;
913 }
914 
915 static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
916 {
917 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
918 
919 	if (riscv_isa_extension_available(vcpu->arch.isa, f))
920 		return sizeof(cntx->fp.f) / sizeof(u32);
921 	else
922 		return 0;
923 }
924 
925 static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
926 				u64 __user *uindices)
927 {
928 	int n = num_fp_f_regs(vcpu);
929 
930 	for (int i = 0; i < n; i++) {
931 		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 |
932 			  KVM_REG_RISCV_FP_F | i;
933 
934 		if (uindices) {
935 			if (put_user(reg, uindices))
936 				return -EFAULT;
937 			uindices++;
938 		}
939 	}
940 
941 	return n;
942 }
943 
944 static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
945 {
946 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
947 
948 	if (riscv_isa_extension_available(vcpu->arch.isa, d))
949 		return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
950 	else
951 		return 0;
952 }
953 
954 static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
955 				u64 __user *uindices)
956 {
957 	int i;
958 	int n = num_fp_d_regs(vcpu);
959 	u64 reg;
960 
961 	/* copy fp.d.f indices */
962 	for (i = 0; i < n-1; i++) {
963 		reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
964 		      KVM_REG_RISCV_FP_D | i;
965 
966 		if (uindices) {
967 			if (put_user(reg, uindices))
968 				return -EFAULT;
969 			uindices++;
970 		}
971 	}
972 
973 	/* copy fp.d.fcsr indices */
974 	reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
975 	if (uindices) {
976 		if (put_user(reg, uindices))
977 			return -EFAULT;
978 		uindices++;
979 	}
980 
981 	return n;
982 }
983 
984 static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
985 				u64 __user *uindices)
986 {
987 	unsigned int n = 0;
988 	unsigned long isa_ext;
989 
990 	for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
991 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
992 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
993 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
994 
995 		isa_ext = kvm_isa_ext_arr[i];
996 		if (!__riscv_isa_extension_available(NULL, isa_ext))
997 			continue;
998 
999 		if (uindices) {
1000 			if (put_user(reg, uindices))
1001 				return -EFAULT;
1002 			uindices++;
1003 		}
1004 
1005 		n++;
1006 	}
1007 
1008 	return n;
1009 }
1010 
1011 static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
1012 {
1013 	return copy_isa_ext_reg_indices(vcpu, NULL);
1014 }
1015 
1016 static int copy_sbi_ext_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1017 {
1018 	unsigned int n = 0;
1019 
1020 	for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) {
1021 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
1022 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1023 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
1024 			  KVM_REG_RISCV_SBI_SINGLE | i;
1025 
1026 		if (!riscv_vcpu_supports_sbi_ext(vcpu, i))
1027 			continue;
1028 
1029 		if (uindices) {
1030 			if (put_user(reg, uindices))
1031 				return -EFAULT;
1032 			uindices++;
1033 		}
1034 
1035 		n++;
1036 	}
1037 
1038 	return n;
1039 }
1040 
1041 static unsigned long num_sbi_ext_regs(struct kvm_vcpu *vcpu)
1042 {
1043 	return copy_sbi_ext_reg_indices(vcpu, NULL);
1044 }
1045 
1046 static int copy_sbi_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1047 {
1048 	struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
1049 	int total = 0;
1050 
1051 	if (scontext->ext_status[KVM_RISCV_SBI_EXT_STA] == KVM_RISCV_SBI_EXT_STATUS_ENABLED) {
1052 		u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1053 		int n = sizeof(struct kvm_riscv_sbi_sta) / sizeof(unsigned long);
1054 
1055 		for (int i = 0; i < n; i++) {
1056 			u64 reg = KVM_REG_RISCV | size |
1057 				  KVM_REG_RISCV_SBI_STATE |
1058 				  KVM_REG_RISCV_SBI_STA | i;
1059 
1060 			if (uindices) {
1061 				if (put_user(reg, uindices))
1062 					return -EFAULT;
1063 				uindices++;
1064 			}
1065 		}
1066 
1067 		total += n;
1068 	}
1069 
1070 	return total;
1071 }
1072 
1073 static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu)
1074 {
1075 	return copy_sbi_reg_indices(vcpu, NULL);
1076 }
1077 
1078 static inline unsigned long num_vector_regs(const struct kvm_vcpu *vcpu)
1079 {
1080 	if (!riscv_isa_extension_available(vcpu->arch.isa, v))
1081 		return 0;
1082 
1083 	/* vstart, vl, vtype, vcsr, vlenb and 32 vector regs */
1084 	return 37;
1085 }
1086 
1087 static int copy_vector_reg_indices(const struct kvm_vcpu *vcpu,
1088 				u64 __user *uindices)
1089 {
1090 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
1091 	int n = num_vector_regs(vcpu);
1092 	u64 reg, size;
1093 	int i;
1094 
1095 	if (n == 0)
1096 		return 0;
1097 
1098 	/* copy vstart, vl, vtype, vcsr and vlenb */
1099 	size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1100 	for (i = 0; i < 5; i++) {
1101 		reg = KVM_REG_RISCV | size | KVM_REG_RISCV_VECTOR | i;
1102 
1103 		if (uindices) {
1104 			if (put_user(reg, uindices))
1105 				return -EFAULT;
1106 			uindices++;
1107 		}
1108 	}
1109 
1110 	/* vector_regs have a variable 'vlenb' size */
1111 	size = __builtin_ctzl(cntx->vector.vlenb);
1112 	size <<= KVM_REG_SIZE_SHIFT;
1113 	for (i = 0; i < 32; i++) {
1114 		reg = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | size |
1115 			KVM_REG_RISCV_VECTOR_REG(i);
1116 
1117 		if (uindices) {
1118 			if (put_user(reg, uindices))
1119 				return -EFAULT;
1120 			uindices++;
1121 		}
1122 	}
1123 
1124 	return n;
1125 }
1126 
1127 /*
1128  * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
1129  *
1130  * This is for all registers.
1131  */
1132 unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
1133 {
1134 	unsigned long res = 0;
1135 
1136 	res += num_config_regs(vcpu);
1137 	res += num_core_regs();
1138 	res += num_csr_regs(vcpu);
1139 	res += num_timer_regs();
1140 	res += num_fp_f_regs(vcpu);
1141 	res += num_fp_d_regs(vcpu);
1142 	res += num_vector_regs(vcpu);
1143 	res += num_isa_ext_regs(vcpu);
1144 	res += num_sbi_ext_regs(vcpu);
1145 	res += num_sbi_regs(vcpu);
1146 
1147 	return res;
1148 }
1149 
1150 /*
1151  * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
1152  */
1153 int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
1154 				    u64 __user *uindices)
1155 {
1156 	int ret;
1157 
1158 	ret = copy_config_reg_indices(vcpu, uindices);
1159 	if (ret < 0)
1160 		return ret;
1161 	uindices += ret;
1162 
1163 	ret = copy_core_reg_indices(uindices);
1164 	if (ret < 0)
1165 		return ret;
1166 	uindices += ret;
1167 
1168 	ret = copy_csr_reg_indices(vcpu, uindices);
1169 	if (ret < 0)
1170 		return ret;
1171 	uindices += ret;
1172 
1173 	ret = copy_timer_reg_indices(uindices);
1174 	if (ret < 0)
1175 		return ret;
1176 	uindices += ret;
1177 
1178 	ret = copy_fp_f_reg_indices(vcpu, uindices);
1179 	if (ret < 0)
1180 		return ret;
1181 	uindices += ret;
1182 
1183 	ret = copy_fp_d_reg_indices(vcpu, uindices);
1184 	if (ret < 0)
1185 		return ret;
1186 	uindices += ret;
1187 
1188 	ret = copy_vector_reg_indices(vcpu, uindices);
1189 	if (ret < 0)
1190 		return ret;
1191 	uindices += ret;
1192 
1193 	ret = copy_isa_ext_reg_indices(vcpu, uindices);
1194 	if (ret < 0)
1195 		return ret;
1196 	uindices += ret;
1197 
1198 	ret = copy_sbi_ext_reg_indices(vcpu, uindices);
1199 	if (ret < 0)
1200 		return ret;
1201 	uindices += ret;
1202 
1203 	ret = copy_sbi_reg_indices(vcpu, uindices);
1204 	if (ret < 0)
1205 		return ret;
1206 	uindices += ret;
1207 
1208 	return 0;
1209 }
1210 
1211 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
1212 			   const struct kvm_one_reg *reg)
1213 {
1214 	switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1215 	case KVM_REG_RISCV_CONFIG:
1216 		return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
1217 	case KVM_REG_RISCV_CORE:
1218 		return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
1219 	case KVM_REG_RISCV_CSR:
1220 		return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
1221 	case KVM_REG_RISCV_TIMER:
1222 		return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
1223 	case KVM_REG_RISCV_FP_F:
1224 		return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1225 						 KVM_REG_RISCV_FP_F);
1226 	case KVM_REG_RISCV_FP_D:
1227 		return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1228 						 KVM_REG_RISCV_FP_D);
1229 	case KVM_REG_RISCV_VECTOR:
1230 		return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
1231 	case KVM_REG_RISCV_ISA_EXT:
1232 		return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
1233 	case KVM_REG_RISCV_SBI_EXT:
1234 		return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
1235 	case KVM_REG_RISCV_SBI_STATE:
1236 		return kvm_riscv_vcpu_set_reg_sbi(vcpu, reg);
1237 	default:
1238 		break;
1239 	}
1240 
1241 	return -ENOENT;
1242 }
1243 
1244 int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
1245 			   const struct kvm_one_reg *reg)
1246 {
1247 	switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1248 	case KVM_REG_RISCV_CONFIG:
1249 		return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
1250 	case KVM_REG_RISCV_CORE:
1251 		return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
1252 	case KVM_REG_RISCV_CSR:
1253 		return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
1254 	case KVM_REG_RISCV_TIMER:
1255 		return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
1256 	case KVM_REG_RISCV_FP_F:
1257 		return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1258 						 KVM_REG_RISCV_FP_F);
1259 	case KVM_REG_RISCV_FP_D:
1260 		return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1261 						 KVM_REG_RISCV_FP_D);
1262 	case KVM_REG_RISCV_VECTOR:
1263 		return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
1264 	case KVM_REG_RISCV_ISA_EXT:
1265 		return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
1266 	case KVM_REG_RISCV_SBI_EXT:
1267 		return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
1268 	case KVM_REG_RISCV_SBI_STATE:
1269 		return kvm_riscv_vcpu_get_reg_sbi(vcpu, reg);
1270 	default:
1271 		break;
1272 	}
1273 
1274 	return -ENOENT;
1275 }
1276