xref: /linux/arch/riscv/kvm/vcpu_onereg.c (revision 1fd1dc41724319406b0aff221a352a400b0ddfc5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  * Copyright (C) 2023 Ventana Micro Systems Inc.
5  *
6  * Authors:
7  *	Anup Patel <apatel@ventanamicro.com>
8  */
9 
10 #include <linux/bitops.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/uaccess.h>
14 #include <linux/kvm_host.h>
15 #include <asm/cacheflush.h>
16 #include <asm/cpufeature.h>
17 #include <asm/kvm_vcpu_vector.h>
18 #include <asm/pgtable.h>
19 #include <asm/vector.h>
20 
21 #define KVM_RISCV_BASE_ISA_MASK		GENMASK(25, 0)
22 
23 #define KVM_ISA_EXT_ARR(ext)		\
24 [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
25 
26 /* Mapping between KVM ISA Extension ID & guest ISA extension ID */
27 static const unsigned long kvm_isa_ext_arr[] = {
28 	/* Single letter extensions (alphabetically sorted) */
29 	[KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
30 	[KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
31 	[KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
32 	[KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
33 	[KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
34 	[KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
35 	[KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
36 	[KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
37 	/* Multi letter extensions (alphabetically sorted) */
38 	KVM_ISA_EXT_ARR(SMNPM),
39 	KVM_ISA_EXT_ARR(SMSTATEEN),
40 	KVM_ISA_EXT_ARR(SSAIA),
41 	KVM_ISA_EXT_ARR(SSCOFPMF),
42 	KVM_ISA_EXT_ARR(SSNPM),
43 	KVM_ISA_EXT_ARR(SSTC),
44 	KVM_ISA_EXT_ARR(SVADE),
45 	KVM_ISA_EXT_ARR(SVADU),
46 	KVM_ISA_EXT_ARR(SVINVAL),
47 	KVM_ISA_EXT_ARR(SVNAPOT),
48 	KVM_ISA_EXT_ARR(SVPBMT),
49 	KVM_ISA_EXT_ARR(SVVPTC),
50 	KVM_ISA_EXT_ARR(ZAAMO),
51 	KVM_ISA_EXT_ARR(ZABHA),
52 	KVM_ISA_EXT_ARR(ZACAS),
53 	KVM_ISA_EXT_ARR(ZALASR),
54 	KVM_ISA_EXT_ARR(ZALRSC),
55 	KVM_ISA_EXT_ARR(ZAWRS),
56 	KVM_ISA_EXT_ARR(ZBA),
57 	KVM_ISA_EXT_ARR(ZBB),
58 	KVM_ISA_EXT_ARR(ZBC),
59 	KVM_ISA_EXT_ARR(ZBKB),
60 	KVM_ISA_EXT_ARR(ZBKC),
61 	KVM_ISA_EXT_ARR(ZBKX),
62 	KVM_ISA_EXT_ARR(ZBS),
63 	KVM_ISA_EXT_ARR(ZCA),
64 	KVM_ISA_EXT_ARR(ZCB),
65 	KVM_ISA_EXT_ARR(ZCD),
66 	KVM_ISA_EXT_ARR(ZCF),
67 	KVM_ISA_EXT_ARR(ZCLSD),
68 	KVM_ISA_EXT_ARR(ZCMOP),
69 	KVM_ISA_EXT_ARR(ZFA),
70 	KVM_ISA_EXT_ARR(ZFBFMIN),
71 	KVM_ISA_EXT_ARR(ZFH),
72 	KVM_ISA_EXT_ARR(ZFHMIN),
73 	KVM_ISA_EXT_ARR(ZICBOM),
74 	KVM_ISA_EXT_ARR(ZICBOP),
75 	KVM_ISA_EXT_ARR(ZICBOZ),
76 	KVM_ISA_EXT_ARR(ZICCRSE),
77 	KVM_ISA_EXT_ARR(ZICNTR),
78 	KVM_ISA_EXT_ARR(ZICOND),
79 	KVM_ISA_EXT_ARR(ZICSR),
80 	KVM_ISA_EXT_ARR(ZIFENCEI),
81 	KVM_ISA_EXT_ARR(ZIHINTNTL),
82 	KVM_ISA_EXT_ARR(ZIHINTPAUSE),
83 	KVM_ISA_EXT_ARR(ZIHPM),
84 	KVM_ISA_EXT_ARR(ZILSD),
85 	KVM_ISA_EXT_ARR(ZIMOP),
86 	KVM_ISA_EXT_ARR(ZKND),
87 	KVM_ISA_EXT_ARR(ZKNE),
88 	KVM_ISA_EXT_ARR(ZKNH),
89 	KVM_ISA_EXT_ARR(ZKR),
90 	KVM_ISA_EXT_ARR(ZKSED),
91 	KVM_ISA_EXT_ARR(ZKSH),
92 	KVM_ISA_EXT_ARR(ZKT),
93 	KVM_ISA_EXT_ARR(ZTSO),
94 	KVM_ISA_EXT_ARR(ZVBB),
95 	KVM_ISA_EXT_ARR(ZVBC),
96 	KVM_ISA_EXT_ARR(ZVFBFMIN),
97 	KVM_ISA_EXT_ARR(ZVFBFWMA),
98 	KVM_ISA_EXT_ARR(ZVFH),
99 	KVM_ISA_EXT_ARR(ZVFHMIN),
100 	KVM_ISA_EXT_ARR(ZVKB),
101 	KVM_ISA_EXT_ARR(ZVKG),
102 	KVM_ISA_EXT_ARR(ZVKNED),
103 	KVM_ISA_EXT_ARR(ZVKNHA),
104 	KVM_ISA_EXT_ARR(ZVKNHB),
105 	KVM_ISA_EXT_ARR(ZVKSED),
106 	KVM_ISA_EXT_ARR(ZVKSH),
107 	KVM_ISA_EXT_ARR(ZVKT),
108 };
109 
110 static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
111 {
112 	unsigned long i;
113 
114 	for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
115 		if (kvm_isa_ext_arr[i] == base_ext)
116 			return i;
117 	}
118 
119 	return KVM_RISCV_ISA_EXT_MAX;
120 }
121 
122 static int kvm_riscv_vcpu_isa_check_host(unsigned long kvm_ext, unsigned long *guest_ext)
123 {
124 	unsigned long host_ext;
125 
126 	if (kvm_ext >= KVM_RISCV_ISA_EXT_MAX ||
127 	    kvm_ext >= ARRAY_SIZE(kvm_isa_ext_arr))
128 		return -ENOENT;
129 
130 	*guest_ext = kvm_isa_ext_arr[kvm_ext];
131 	switch (*guest_ext) {
132 	case RISCV_ISA_EXT_SMNPM:
133 		/*
134 		 * Pointer masking effective in (H)S-mode is provided by the
135 		 * Smnpm extension, so that extension is reported to the guest,
136 		 * even though the CSR bits for configuring VS-mode pointer
137 		 * masking on the host side are part of the Ssnpm extension.
138 		 */
139 		host_ext = RISCV_ISA_EXT_SSNPM;
140 		break;
141 	default:
142 		host_ext = *guest_ext;
143 		break;
144 	}
145 
146 	if (!__riscv_isa_extension_available(NULL, host_ext))
147 		return -ENOENT;
148 
149 	return 0;
150 }
151 
152 static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
153 {
154 	switch (ext) {
155 	case KVM_RISCV_ISA_EXT_H:
156 		return false;
157 	case KVM_RISCV_ISA_EXT_SSCOFPMF:
158 		/* Sscofpmf depends on interrupt filtering defined in ssaia */
159 		return __riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSAIA);
160 	case KVM_RISCV_ISA_EXT_SVADU:
161 		/*
162 		 * The henvcfg.ADUE is read-only zero if menvcfg.ADUE is zero.
163 		 * Guest OS can use Svadu only when host OS enable Svadu.
164 		 */
165 		return arch_has_hw_pte_young();
166 	case KVM_RISCV_ISA_EXT_V:
167 		return riscv_v_vstate_ctrl_user_allowed();
168 	default:
169 		break;
170 	}
171 
172 	return true;
173 }
174 
175 static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
176 {
177 	switch (ext) {
178 	/* Extensions which don't have any mechanism to disable */
179 	case KVM_RISCV_ISA_EXT_A:
180 	case KVM_RISCV_ISA_EXT_C:
181 	case KVM_RISCV_ISA_EXT_I:
182 	case KVM_RISCV_ISA_EXT_M:
183 	/* There is not architectural config bit to disable sscofpmf completely */
184 	case KVM_RISCV_ISA_EXT_SSCOFPMF:
185 	case KVM_RISCV_ISA_EXT_SSNPM:
186 	case KVM_RISCV_ISA_EXT_SSTC:
187 	case KVM_RISCV_ISA_EXT_SVINVAL:
188 	case KVM_RISCV_ISA_EXT_SVNAPOT:
189 	case KVM_RISCV_ISA_EXT_SVVPTC:
190 	case KVM_RISCV_ISA_EXT_ZAAMO:
191 	case KVM_RISCV_ISA_EXT_ZABHA:
192 	case KVM_RISCV_ISA_EXT_ZACAS:
193 	case KVM_RISCV_ISA_EXT_ZALASR:
194 	case KVM_RISCV_ISA_EXT_ZALRSC:
195 	case KVM_RISCV_ISA_EXT_ZAWRS:
196 	case KVM_RISCV_ISA_EXT_ZBA:
197 	case KVM_RISCV_ISA_EXT_ZBB:
198 	case KVM_RISCV_ISA_EXT_ZBC:
199 	case KVM_RISCV_ISA_EXT_ZBKB:
200 	case KVM_RISCV_ISA_EXT_ZBKC:
201 	case KVM_RISCV_ISA_EXT_ZBKX:
202 	case KVM_RISCV_ISA_EXT_ZBS:
203 	case KVM_RISCV_ISA_EXT_ZCA:
204 	case KVM_RISCV_ISA_EXT_ZCB:
205 	case KVM_RISCV_ISA_EXT_ZCD:
206 	case KVM_RISCV_ISA_EXT_ZCF:
207 	case KVM_RISCV_ISA_EXT_ZCMOP:
208 	case KVM_RISCV_ISA_EXT_ZFA:
209 	case KVM_RISCV_ISA_EXT_ZFBFMIN:
210 	case KVM_RISCV_ISA_EXT_ZFH:
211 	case KVM_RISCV_ISA_EXT_ZFHMIN:
212 	case KVM_RISCV_ISA_EXT_ZICBOP:
213 	case KVM_RISCV_ISA_EXT_ZICCRSE:
214 	case KVM_RISCV_ISA_EXT_ZICNTR:
215 	case KVM_RISCV_ISA_EXT_ZICOND:
216 	case KVM_RISCV_ISA_EXT_ZICSR:
217 	case KVM_RISCV_ISA_EXT_ZIFENCEI:
218 	case KVM_RISCV_ISA_EXT_ZIHINTNTL:
219 	case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
220 	case KVM_RISCV_ISA_EXT_ZIHPM:
221 	case KVM_RISCV_ISA_EXT_ZIMOP:
222 	case KVM_RISCV_ISA_EXT_ZKND:
223 	case KVM_RISCV_ISA_EXT_ZKNE:
224 	case KVM_RISCV_ISA_EXT_ZKNH:
225 	case KVM_RISCV_ISA_EXT_ZKR:
226 	case KVM_RISCV_ISA_EXT_ZKSED:
227 	case KVM_RISCV_ISA_EXT_ZKSH:
228 	case KVM_RISCV_ISA_EXT_ZKT:
229 	case KVM_RISCV_ISA_EXT_ZTSO:
230 	case KVM_RISCV_ISA_EXT_ZVBB:
231 	case KVM_RISCV_ISA_EXT_ZVBC:
232 	case KVM_RISCV_ISA_EXT_ZVFBFMIN:
233 	case KVM_RISCV_ISA_EXT_ZVFBFWMA:
234 	case KVM_RISCV_ISA_EXT_ZVFH:
235 	case KVM_RISCV_ISA_EXT_ZVFHMIN:
236 	case KVM_RISCV_ISA_EXT_ZVKB:
237 	case KVM_RISCV_ISA_EXT_ZVKG:
238 	case KVM_RISCV_ISA_EXT_ZVKNED:
239 	case KVM_RISCV_ISA_EXT_ZVKNHA:
240 	case KVM_RISCV_ISA_EXT_ZVKNHB:
241 	case KVM_RISCV_ISA_EXT_ZVKSED:
242 	case KVM_RISCV_ISA_EXT_ZVKSH:
243 	case KVM_RISCV_ISA_EXT_ZVKT:
244 		return false;
245 	/* Extensions which can be disabled using Smstateen */
246 	case KVM_RISCV_ISA_EXT_SSAIA:
247 		return riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN);
248 	case KVM_RISCV_ISA_EXT_SVADE:
249 		/*
250 		 * The henvcfg.ADUE is read-only zero if menvcfg.ADUE is zero.
251 		 * Svade can't be disabled unless we support Svadu.
252 		 */
253 		return arch_has_hw_pte_young();
254 	default:
255 		break;
256 	}
257 
258 	return true;
259 }
260 
261 void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu)
262 {
263 	unsigned long guest_ext, i;
264 
265 	for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
266 		if (kvm_riscv_vcpu_isa_check_host(i, &guest_ext))
267 			continue;
268 		if (kvm_riscv_vcpu_isa_enable_allowed(i))
269 			set_bit(guest_ext, vcpu->arch.isa);
270 	}
271 }
272 
273 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
274 					 const struct kvm_one_reg *reg)
275 {
276 	unsigned long __user *uaddr =
277 			(unsigned long __user *)(unsigned long)reg->addr;
278 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
279 					    KVM_REG_SIZE_MASK |
280 					    KVM_REG_RISCV_CONFIG);
281 	unsigned long reg_val;
282 
283 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
284 		return -EINVAL;
285 
286 	switch (reg_num) {
287 	case KVM_REG_RISCV_CONFIG_REG(isa):
288 		reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
289 		break;
290 	case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
291 		if (!riscv_isa_extension_available(NULL, ZICBOM))
292 			return -ENOENT;
293 		reg_val = riscv_cbom_block_size;
294 		break;
295 	case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
296 		if (!riscv_isa_extension_available(NULL, ZICBOZ))
297 			return -ENOENT;
298 		reg_val = riscv_cboz_block_size;
299 		break;
300 	case KVM_REG_RISCV_CONFIG_REG(zicbop_block_size):
301 		if (!riscv_isa_extension_available(NULL, ZICBOP))
302 			return -ENOENT;
303 		reg_val = riscv_cbop_block_size;
304 		break;
305 	case KVM_REG_RISCV_CONFIG_REG(mvendorid):
306 		reg_val = vcpu->arch.mvendorid;
307 		break;
308 	case KVM_REG_RISCV_CONFIG_REG(marchid):
309 		reg_val = vcpu->arch.marchid;
310 		break;
311 	case KVM_REG_RISCV_CONFIG_REG(mimpid):
312 		reg_val = vcpu->arch.mimpid;
313 		break;
314 	case KVM_REG_RISCV_CONFIG_REG(satp_mode):
315 		reg_val = satp_mode >> SATP_MODE_SHIFT;
316 		break;
317 	default:
318 		return -ENOENT;
319 	}
320 
321 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
322 		return -EFAULT;
323 
324 	return 0;
325 }
326 
327 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
328 					 const struct kvm_one_reg *reg)
329 {
330 	unsigned long __user *uaddr =
331 			(unsigned long __user *)(unsigned long)reg->addr;
332 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
333 					    KVM_REG_SIZE_MASK |
334 					    KVM_REG_RISCV_CONFIG);
335 	unsigned long i, isa_ext, reg_val;
336 
337 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
338 		return -EINVAL;
339 
340 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
341 		return -EFAULT;
342 
343 	switch (reg_num) {
344 	case KVM_REG_RISCV_CONFIG_REG(isa):
345 		/*
346 		 * This ONE REG interface is only defined for
347 		 * single letter extensions.
348 		 */
349 		if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
350 			return -EINVAL;
351 
352 		/*
353 		 * Return early (i.e. do nothing) if reg_val is the same
354 		 * value retrievable via kvm_riscv_vcpu_get_reg_config().
355 		 */
356 		if (reg_val == (vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK))
357 			break;
358 
359 		if (!vcpu->arch.ran_atleast_once) {
360 			/* Ignore the enable/disable request for certain extensions */
361 			for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
362 				isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
363 				if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
364 					reg_val &= ~BIT(i);
365 					continue;
366 				}
367 				if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
368 					if (reg_val & BIT(i))
369 						reg_val &= ~BIT(i);
370 				if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
371 					if (!(reg_val & BIT(i)))
372 						reg_val |= BIT(i);
373 			}
374 			reg_val &= riscv_isa_extension_base(NULL);
375 			/* Do not modify anything beyond single letter extensions */
376 			reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
377 				  (reg_val & KVM_RISCV_BASE_ISA_MASK);
378 			vcpu->arch.isa[0] = reg_val;
379 			kvm_riscv_vcpu_fp_reset(vcpu);
380 		} else {
381 			return -EBUSY;
382 		}
383 		break;
384 	case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
385 		if (!riscv_isa_extension_available(NULL, ZICBOM))
386 			return -ENOENT;
387 		if (reg_val != riscv_cbom_block_size)
388 			return -EINVAL;
389 		break;
390 	case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
391 		if (!riscv_isa_extension_available(NULL, ZICBOZ))
392 			return -ENOENT;
393 		if (reg_val != riscv_cboz_block_size)
394 			return -EINVAL;
395 		break;
396 	case KVM_REG_RISCV_CONFIG_REG(zicbop_block_size):
397 		if (!riscv_isa_extension_available(NULL, ZICBOP))
398 			return -ENOENT;
399 		if (reg_val != riscv_cbop_block_size)
400 			return -EINVAL;
401 		break;
402 	case KVM_REG_RISCV_CONFIG_REG(mvendorid):
403 		if (reg_val == vcpu->arch.mvendorid)
404 			break;
405 		if (!vcpu->arch.ran_atleast_once)
406 			vcpu->arch.mvendorid = reg_val;
407 		else
408 			return -EBUSY;
409 		break;
410 	case KVM_REG_RISCV_CONFIG_REG(marchid):
411 		if (reg_val == vcpu->arch.marchid)
412 			break;
413 		if (!vcpu->arch.ran_atleast_once)
414 			vcpu->arch.marchid = reg_val;
415 		else
416 			return -EBUSY;
417 		break;
418 	case KVM_REG_RISCV_CONFIG_REG(mimpid):
419 		if (reg_val == vcpu->arch.mimpid)
420 			break;
421 		if (!vcpu->arch.ran_atleast_once)
422 			vcpu->arch.mimpid = reg_val;
423 		else
424 			return -EBUSY;
425 		break;
426 	case KVM_REG_RISCV_CONFIG_REG(satp_mode):
427 		if (reg_val != (satp_mode >> SATP_MODE_SHIFT))
428 			return -EINVAL;
429 		break;
430 	default:
431 		return -ENOENT;
432 	}
433 
434 	return 0;
435 }
436 
437 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
438 				       const struct kvm_one_reg *reg)
439 {
440 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
441 	unsigned long __user *uaddr =
442 			(unsigned long __user *)(unsigned long)reg->addr;
443 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
444 					    KVM_REG_SIZE_MASK |
445 					    KVM_REG_RISCV_CORE);
446 	unsigned long reg_val;
447 
448 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
449 		return -EINVAL;
450 	if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
451 		return -ENOENT;
452 
453 	if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
454 		reg_val = cntx->sepc;
455 	else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
456 		 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
457 		reg_val = ((unsigned long *)cntx)[reg_num];
458 	else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
459 		reg_val = (cntx->sstatus & SR_SPP) ?
460 				KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
461 	else
462 		return -ENOENT;
463 
464 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
465 		return -EFAULT;
466 
467 	return 0;
468 }
469 
470 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
471 				       const struct kvm_one_reg *reg)
472 {
473 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
474 	unsigned long __user *uaddr =
475 			(unsigned long __user *)(unsigned long)reg->addr;
476 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
477 					    KVM_REG_SIZE_MASK |
478 					    KVM_REG_RISCV_CORE);
479 	unsigned long reg_val;
480 
481 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
482 		return -EINVAL;
483 	if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
484 		return -ENOENT;
485 
486 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
487 		return -EFAULT;
488 
489 	if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
490 		cntx->sepc = reg_val;
491 	else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
492 		 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
493 		((unsigned long *)cntx)[reg_num] = reg_val;
494 	else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
495 		if (reg_val == KVM_RISCV_MODE_S)
496 			cntx->sstatus |= SR_SPP;
497 		else
498 			cntx->sstatus &= ~SR_SPP;
499 	} else
500 		return -ENOENT;
501 
502 	return 0;
503 }
504 
505 static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
506 					  unsigned long reg_num,
507 					  unsigned long *out_val)
508 {
509 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
510 
511 	if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
512 		return -ENOENT;
513 
514 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
515 		kvm_riscv_vcpu_flush_interrupts(vcpu);
516 		*out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
517 		*out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
518 	} else
519 		*out_val = ((unsigned long *)csr)[reg_num];
520 
521 	return 0;
522 }
523 
524 static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
525 					  unsigned long reg_num,
526 					  unsigned long reg_val)
527 {
528 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
529 
530 	if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
531 		return -ENOENT;
532 
533 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
534 		reg_val &= VSIP_VALID_MASK;
535 		reg_val <<= VSIP_TO_HVIP_SHIFT;
536 	}
537 
538 	((unsigned long *)csr)[reg_num] = reg_val;
539 
540 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
541 		WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
542 
543 	return 0;
544 }
545 
546 static inline int kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu *vcpu,
547 						   unsigned long reg_num,
548 						   unsigned long reg_val)
549 {
550 	struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
551 
552 	if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
553 		sizeof(unsigned long))
554 		return -EINVAL;
555 
556 	((unsigned long *)csr)[reg_num] = reg_val;
557 	return 0;
558 }
559 
560 static int kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu *vcpu,
561 					    unsigned long reg_num,
562 					    unsigned long *out_val)
563 {
564 	struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
565 
566 	if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
567 		sizeof(unsigned long))
568 		return -EINVAL;
569 
570 	*out_val = ((unsigned long *)csr)[reg_num];
571 	return 0;
572 }
573 
574 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
575 				      const struct kvm_one_reg *reg)
576 {
577 	int rc;
578 	unsigned long __user *uaddr =
579 			(unsigned long __user *)(unsigned long)reg->addr;
580 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
581 					    KVM_REG_SIZE_MASK |
582 					    KVM_REG_RISCV_CSR);
583 	unsigned long reg_val, reg_subtype;
584 
585 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
586 		return -EINVAL;
587 
588 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
589 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
590 	switch (reg_subtype) {
591 	case KVM_REG_RISCV_CSR_GENERAL:
592 		rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, &reg_val);
593 		break;
594 	case KVM_REG_RISCV_CSR_AIA:
595 		rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val);
596 		break;
597 	case KVM_REG_RISCV_CSR_SMSTATEEN:
598 		rc = -EINVAL;
599 		if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
600 			rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num,
601 							      &reg_val);
602 		break;
603 	default:
604 		rc = -ENOENT;
605 		break;
606 	}
607 	if (rc)
608 		return rc;
609 
610 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
611 		return -EFAULT;
612 
613 	return 0;
614 }
615 
616 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
617 				      const struct kvm_one_reg *reg)
618 {
619 	int rc;
620 	unsigned long __user *uaddr =
621 			(unsigned long __user *)(unsigned long)reg->addr;
622 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
623 					    KVM_REG_SIZE_MASK |
624 					    KVM_REG_RISCV_CSR);
625 	unsigned long reg_val, reg_subtype;
626 
627 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
628 		return -EINVAL;
629 
630 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
631 		return -EFAULT;
632 
633 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
634 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
635 	switch (reg_subtype) {
636 	case KVM_REG_RISCV_CSR_GENERAL:
637 		rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
638 		break;
639 	case KVM_REG_RISCV_CSR_AIA:
640 		rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
641 		break;
642 	case KVM_REG_RISCV_CSR_SMSTATEEN:
643 		rc = -EINVAL;
644 		if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
645 			rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num,
646 							      reg_val);
647 		break;
648 	default:
649 		rc = -ENOENT;
650 		break;
651 	}
652 	if (rc)
653 		return rc;
654 
655 	return 0;
656 }
657 
658 static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
659 					 unsigned long reg_num,
660 					 unsigned long *reg_val)
661 {
662 	unsigned long guest_ext;
663 	int ret;
664 
665 	ret = kvm_riscv_vcpu_isa_check_host(reg_num, &guest_ext);
666 	if (ret)
667 		return ret;
668 
669 	*reg_val = 0;
670 	if (__riscv_isa_extension_available(vcpu->arch.isa, guest_ext))
671 		*reg_val = 1; /* Mark the given extension as available */
672 
673 	return 0;
674 }
675 
676 static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu,
677 					 unsigned long reg_num,
678 					 unsigned long reg_val)
679 {
680 	unsigned long guest_ext;
681 	int ret;
682 
683 	ret = kvm_riscv_vcpu_isa_check_host(reg_num, &guest_ext);
684 	if (ret)
685 		return ret;
686 
687 	if (reg_val == test_bit(guest_ext, vcpu->arch.isa))
688 		return 0;
689 
690 	if (!vcpu->arch.ran_atleast_once) {
691 		/*
692 		 * All multi-letter extension and a few single letter
693 		 * extension can be disabled
694 		 */
695 		if (reg_val == 1 &&
696 		    kvm_riscv_vcpu_isa_enable_allowed(reg_num))
697 			set_bit(guest_ext, vcpu->arch.isa);
698 		else if (!reg_val &&
699 			 kvm_riscv_vcpu_isa_disable_allowed(reg_num))
700 			clear_bit(guest_ext, vcpu->arch.isa);
701 		else
702 			return -EINVAL;
703 		kvm_riscv_vcpu_fp_reset(vcpu);
704 	} else {
705 		return -EBUSY;
706 	}
707 
708 	return 0;
709 }
710 
711 static int riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu *vcpu,
712 					unsigned long reg_num,
713 					unsigned long *reg_val)
714 {
715 	unsigned long i, ext_id, ext_val;
716 
717 	if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
718 		return -ENOENT;
719 
720 	for (i = 0; i < BITS_PER_LONG; i++) {
721 		ext_id = i + reg_num * BITS_PER_LONG;
722 		if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
723 			break;
724 
725 		ext_val = 0;
726 		riscv_vcpu_get_isa_ext_single(vcpu, ext_id, &ext_val);
727 		if (ext_val)
728 			*reg_val |= KVM_REG_RISCV_ISA_MULTI_MASK(ext_id);
729 	}
730 
731 	return 0;
732 }
733 
734 static int riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu *vcpu,
735 					unsigned long reg_num,
736 					unsigned long reg_val, bool enable)
737 {
738 	unsigned long i, ext_id;
739 
740 	if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
741 		return -ENOENT;
742 
743 	for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
744 		ext_id = i + reg_num * BITS_PER_LONG;
745 		if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
746 			break;
747 
748 		riscv_vcpu_set_isa_ext_single(vcpu, ext_id, enable);
749 	}
750 
751 	return 0;
752 }
753 
754 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
755 					  const struct kvm_one_reg *reg)
756 {
757 	int rc;
758 	unsigned long __user *uaddr =
759 			(unsigned long __user *)(unsigned long)reg->addr;
760 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
761 					    KVM_REG_SIZE_MASK |
762 					    KVM_REG_RISCV_ISA_EXT);
763 	unsigned long reg_val, reg_subtype;
764 
765 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
766 		return -EINVAL;
767 
768 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
769 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
770 
771 	reg_val = 0;
772 	switch (reg_subtype) {
773 	case KVM_REG_RISCV_ISA_SINGLE:
774 		rc = riscv_vcpu_get_isa_ext_single(vcpu, reg_num, &reg_val);
775 		break;
776 	case KVM_REG_RISCV_ISA_MULTI_EN:
777 	case KVM_REG_RISCV_ISA_MULTI_DIS:
778 		rc = riscv_vcpu_get_isa_ext_multi(vcpu, reg_num, &reg_val);
779 		if (!rc && reg_subtype == KVM_REG_RISCV_ISA_MULTI_DIS)
780 			reg_val = ~reg_val;
781 		break;
782 	default:
783 		rc = -ENOENT;
784 	}
785 	if (rc)
786 		return rc;
787 
788 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
789 		return -EFAULT;
790 
791 	return 0;
792 }
793 
794 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
795 					  const struct kvm_one_reg *reg)
796 {
797 	unsigned long __user *uaddr =
798 			(unsigned long __user *)(unsigned long)reg->addr;
799 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
800 					    KVM_REG_SIZE_MASK |
801 					    KVM_REG_RISCV_ISA_EXT);
802 	unsigned long reg_val, reg_subtype;
803 
804 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
805 		return -EINVAL;
806 
807 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
808 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
809 
810 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
811 		return -EFAULT;
812 
813 	switch (reg_subtype) {
814 	case KVM_REG_RISCV_ISA_SINGLE:
815 		return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
816 	case KVM_REG_RISCV_ISA_MULTI_EN:
817 		return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
818 	case KVM_REG_RISCV_ISA_MULTI_DIS:
819 		return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
820 	default:
821 		return -ENOENT;
822 	}
823 
824 	return 0;
825 }
826 
827 static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
828 				u64 __user *uindices)
829 {
830 	int n = 0;
831 
832 	for (int i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long);
833 		 i++) {
834 		u64 size;
835 		u64 reg;
836 
837 		/*
838 		 * Avoid reporting config reg if the corresponding extension
839 		 * was not available.
840 		 */
841 		if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
842 			!riscv_isa_extension_available(NULL, ZICBOM))
843 			continue;
844 		else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
845 			!riscv_isa_extension_available(NULL, ZICBOZ))
846 			continue;
847 		else if (i == KVM_REG_RISCV_CONFIG_REG(zicbop_block_size) &&
848 			!riscv_isa_extension_available(NULL, ZICBOP))
849 			continue;
850 
851 		size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
852 		reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;
853 
854 		if (uindices) {
855 			if (put_user(reg, uindices))
856 				return -EFAULT;
857 			uindices++;
858 		}
859 
860 		n++;
861 	}
862 
863 	return n;
864 }
865 
866 static unsigned long num_config_regs(const struct kvm_vcpu *vcpu)
867 {
868 	return copy_config_reg_indices(vcpu, NULL);
869 }
870 
871 static inline unsigned long num_core_regs(void)
872 {
873 	return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
874 }
875 
876 static int copy_core_reg_indices(u64 __user *uindices)
877 {
878 	int n = num_core_regs();
879 
880 	for (int i = 0; i < n; i++) {
881 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
882 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
883 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i;
884 
885 		if (uindices) {
886 			if (put_user(reg, uindices))
887 				return -EFAULT;
888 			uindices++;
889 		}
890 	}
891 
892 	return n;
893 }
894 
895 static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu)
896 {
897 	unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
898 
899 	if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
900 		n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
901 	if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
902 		n += sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
903 
904 	return n;
905 }
906 
907 static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
908 				u64 __user *uindices)
909 {
910 	int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
911 	int n2 = 0, n3 = 0;
912 
913 	/* copy general csr regs */
914 	for (int i = 0; i < n1; i++) {
915 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
916 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
917 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
918 				  KVM_REG_RISCV_CSR_GENERAL | i;
919 
920 		if (uindices) {
921 			if (put_user(reg, uindices))
922 				return -EFAULT;
923 			uindices++;
924 		}
925 	}
926 
927 	/* copy AIA csr regs */
928 	if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) {
929 		n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
930 
931 		for (int i = 0; i < n2; i++) {
932 			u64 size = IS_ENABLED(CONFIG_32BIT) ?
933 				   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
934 			u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
935 					  KVM_REG_RISCV_CSR_AIA | i;
936 
937 			if (uindices) {
938 				if (put_user(reg, uindices))
939 					return -EFAULT;
940 				uindices++;
941 			}
942 		}
943 	}
944 
945 	/* copy Smstateen csr regs */
946 	if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) {
947 		n3 = sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
948 
949 		for (int i = 0; i < n3; i++) {
950 			u64 size = IS_ENABLED(CONFIG_32BIT) ?
951 				   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
952 			u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
953 					  KVM_REG_RISCV_CSR_SMSTATEEN | i;
954 
955 			if (uindices) {
956 				if (put_user(reg, uindices))
957 					return -EFAULT;
958 				uindices++;
959 			}
960 		}
961 	}
962 
963 	return n1 + n2 + n3;
964 }
965 
966 static inline unsigned long num_timer_regs(void)
967 {
968 	return sizeof(struct kvm_riscv_timer) / sizeof(u64);
969 }
970 
971 static int copy_timer_reg_indices(u64 __user *uindices)
972 {
973 	int n = num_timer_regs();
974 
975 	for (int i = 0; i < n; i++) {
976 		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
977 			  KVM_REG_RISCV_TIMER | i;
978 
979 		if (uindices) {
980 			if (put_user(reg, uindices))
981 				return -EFAULT;
982 			uindices++;
983 		}
984 	}
985 
986 	return n;
987 }
988 
989 static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
990 {
991 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
992 
993 	if (riscv_isa_extension_available(vcpu->arch.isa, f))
994 		return sizeof(cntx->fp.f) / sizeof(u32);
995 	else
996 		return 0;
997 }
998 
999 static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
1000 				u64 __user *uindices)
1001 {
1002 	int n = num_fp_f_regs(vcpu);
1003 
1004 	for (int i = 0; i < n; i++) {
1005 		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 |
1006 			  KVM_REG_RISCV_FP_F | i;
1007 
1008 		if (uindices) {
1009 			if (put_user(reg, uindices))
1010 				return -EFAULT;
1011 			uindices++;
1012 		}
1013 	}
1014 
1015 	return n;
1016 }
1017 
1018 static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
1019 {
1020 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
1021 
1022 	if (riscv_isa_extension_available(vcpu->arch.isa, d))
1023 		return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
1024 	else
1025 		return 0;
1026 }
1027 
1028 static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
1029 				u64 __user *uindices)
1030 {
1031 	int i;
1032 	int n = num_fp_d_regs(vcpu);
1033 	u64 reg;
1034 
1035 	/* copy fp.d.f indices */
1036 	for (i = 0; i < n-1; i++) {
1037 		reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
1038 		      KVM_REG_RISCV_FP_D | i;
1039 
1040 		if (uindices) {
1041 			if (put_user(reg, uindices))
1042 				return -EFAULT;
1043 			uindices++;
1044 		}
1045 	}
1046 
1047 	/* copy fp.d.fcsr indices */
1048 	reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
1049 	if (uindices) {
1050 		if (put_user(reg, uindices))
1051 			return -EFAULT;
1052 		uindices++;
1053 	}
1054 
1055 	return n;
1056 }
1057 
1058 static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
1059 				u64 __user *uindices)
1060 {
1061 	unsigned long guest_ext;
1062 	unsigned int n = 0;
1063 
1064 	for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
1065 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
1066 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1067 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
1068 
1069 		if (kvm_riscv_vcpu_isa_check_host(i, &guest_ext))
1070 			continue;
1071 
1072 		if (uindices) {
1073 			if (put_user(reg, uindices))
1074 				return -EFAULT;
1075 			uindices++;
1076 		}
1077 
1078 		n++;
1079 	}
1080 
1081 	return n;
1082 }
1083 
1084 static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
1085 {
1086 	return copy_isa_ext_reg_indices(vcpu, NULL);
1087 }
1088 
1089 static unsigned long num_sbi_ext_regs(struct kvm_vcpu *vcpu)
1090 {
1091 	return kvm_riscv_vcpu_reg_indices_sbi_ext(vcpu, NULL);
1092 }
1093 
1094 static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu)
1095 {
1096 	return kvm_riscv_vcpu_reg_indices_sbi(vcpu, NULL);
1097 }
1098 
1099 static inline unsigned long num_vector_regs(const struct kvm_vcpu *vcpu)
1100 {
1101 	if (!riscv_isa_extension_available(vcpu->arch.isa, v))
1102 		return 0;
1103 
1104 	/* vstart, vl, vtype, vcsr, vlenb and 32 vector regs */
1105 	return 37;
1106 }
1107 
1108 static int copy_vector_reg_indices(const struct kvm_vcpu *vcpu,
1109 				u64 __user *uindices)
1110 {
1111 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
1112 	int n = num_vector_regs(vcpu);
1113 	u64 reg, size;
1114 	int i;
1115 
1116 	if (n == 0)
1117 		return 0;
1118 
1119 	/* copy vstart, vl, vtype, vcsr and vlenb */
1120 	size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1121 	for (i = 0; i < 5; i++) {
1122 		reg = KVM_REG_RISCV | size | KVM_REG_RISCV_VECTOR | i;
1123 
1124 		if (uindices) {
1125 			if (put_user(reg, uindices))
1126 				return -EFAULT;
1127 			uindices++;
1128 		}
1129 	}
1130 
1131 	/* vector_regs have a variable 'vlenb' size */
1132 	size = __builtin_ctzl(cntx->vector.vlenb);
1133 	size <<= KVM_REG_SIZE_SHIFT;
1134 	for (i = 0; i < 32; i++) {
1135 		reg = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | size |
1136 			KVM_REG_RISCV_VECTOR_REG(i);
1137 
1138 		if (uindices) {
1139 			if (put_user(reg, uindices))
1140 				return -EFAULT;
1141 			uindices++;
1142 		}
1143 	}
1144 
1145 	return n;
1146 }
1147 
1148 /*
1149  * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
1150  *
1151  * This is for all registers.
1152  */
1153 unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
1154 {
1155 	unsigned long res = 0;
1156 
1157 	res += num_config_regs(vcpu);
1158 	res += num_core_regs();
1159 	res += num_csr_regs(vcpu);
1160 	res += num_timer_regs();
1161 	res += num_fp_f_regs(vcpu);
1162 	res += num_fp_d_regs(vcpu);
1163 	res += num_vector_regs(vcpu);
1164 	res += num_isa_ext_regs(vcpu);
1165 	res += num_sbi_ext_regs(vcpu);
1166 	res += num_sbi_regs(vcpu);
1167 
1168 	return res;
1169 }
1170 
1171 /*
1172  * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
1173  */
1174 int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
1175 				    u64 __user *uindices)
1176 {
1177 	int ret;
1178 
1179 	ret = copy_config_reg_indices(vcpu, uindices);
1180 	if (ret < 0)
1181 		return ret;
1182 	uindices += ret;
1183 
1184 	ret = copy_core_reg_indices(uindices);
1185 	if (ret < 0)
1186 		return ret;
1187 	uindices += ret;
1188 
1189 	ret = copy_csr_reg_indices(vcpu, uindices);
1190 	if (ret < 0)
1191 		return ret;
1192 	uindices += ret;
1193 
1194 	ret = copy_timer_reg_indices(uindices);
1195 	if (ret < 0)
1196 		return ret;
1197 	uindices += ret;
1198 
1199 	ret = copy_fp_f_reg_indices(vcpu, uindices);
1200 	if (ret < 0)
1201 		return ret;
1202 	uindices += ret;
1203 
1204 	ret = copy_fp_d_reg_indices(vcpu, uindices);
1205 	if (ret < 0)
1206 		return ret;
1207 	uindices += ret;
1208 
1209 	ret = copy_vector_reg_indices(vcpu, uindices);
1210 	if (ret < 0)
1211 		return ret;
1212 	uindices += ret;
1213 
1214 	ret = copy_isa_ext_reg_indices(vcpu, uindices);
1215 	if (ret < 0)
1216 		return ret;
1217 	uindices += ret;
1218 
1219 	ret = kvm_riscv_vcpu_reg_indices_sbi_ext(vcpu, uindices);
1220 	if (ret < 0)
1221 		return ret;
1222 	uindices += ret;
1223 
1224 	ret = kvm_riscv_vcpu_reg_indices_sbi(vcpu, uindices);
1225 	if (ret < 0)
1226 		return ret;
1227 	uindices += ret;
1228 
1229 	return 0;
1230 }
1231 
1232 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
1233 			   const struct kvm_one_reg *reg)
1234 {
1235 	switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1236 	case KVM_REG_RISCV_CONFIG:
1237 		return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
1238 	case KVM_REG_RISCV_CORE:
1239 		return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
1240 	case KVM_REG_RISCV_CSR:
1241 		return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
1242 	case KVM_REG_RISCV_TIMER:
1243 		return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
1244 	case KVM_REG_RISCV_FP_F:
1245 		return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1246 						 KVM_REG_RISCV_FP_F);
1247 	case KVM_REG_RISCV_FP_D:
1248 		return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1249 						 KVM_REG_RISCV_FP_D);
1250 	case KVM_REG_RISCV_VECTOR:
1251 		return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
1252 	case KVM_REG_RISCV_ISA_EXT:
1253 		return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
1254 	case KVM_REG_RISCV_SBI_EXT:
1255 		return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
1256 	case KVM_REG_RISCV_SBI_STATE:
1257 		return kvm_riscv_vcpu_set_reg_sbi(vcpu, reg);
1258 	default:
1259 		break;
1260 	}
1261 
1262 	return -ENOENT;
1263 }
1264 
1265 int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
1266 			   const struct kvm_one_reg *reg)
1267 {
1268 	switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1269 	case KVM_REG_RISCV_CONFIG:
1270 		return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
1271 	case KVM_REG_RISCV_CORE:
1272 		return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
1273 	case KVM_REG_RISCV_CSR:
1274 		return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
1275 	case KVM_REG_RISCV_TIMER:
1276 		return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
1277 	case KVM_REG_RISCV_FP_F:
1278 		return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1279 						 KVM_REG_RISCV_FP_F);
1280 	case KVM_REG_RISCV_FP_D:
1281 		return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1282 						 KVM_REG_RISCV_FP_D);
1283 	case KVM_REG_RISCV_VECTOR:
1284 		return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
1285 	case KVM_REG_RISCV_ISA_EXT:
1286 		return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
1287 	case KVM_REG_RISCV_SBI_EXT:
1288 		return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
1289 	case KVM_REG_RISCV_SBI_STATE:
1290 		return kvm_riscv_vcpu_get_reg_sbi(vcpu, reg);
1291 	default:
1292 		break;
1293 	}
1294 
1295 	return -ENOENT;
1296 }
1297