xref: /linux/arch/riscv/kvm/vcpu_onereg.c (revision c4bb3a2d641c02ac2c7aa45534b4cefdf9bf416b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  * Copyright (C) 2023 Ventana Micro Systems Inc.
5  *
6  * Authors:
7  *	Anup Patel <apatel@ventanamicro.com>
8  */
9 
10 #include <linux/bitops.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/uaccess.h>
14 #include <linux/kvm_host.h>
15 #include <asm/cacheflush.h>
16 #include <asm/cpufeature.h>
17 #include <asm/kvm_vcpu_vector.h>
18 #include <asm/pgtable.h>
19 #include <asm/vector.h>
20 
21 #define KVM_RISCV_BASE_ISA_MASK		GENMASK(25, 0)
22 
23 #define KVM_ISA_EXT_ARR(ext)		\
24 [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
25 
26 /* Mapping between KVM ISA Extension ID & Host ISA extension ID */
27 static const unsigned long kvm_isa_ext_arr[] = {
28 	/* Single letter extensions (alphabetically sorted) */
29 	[KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
30 	[KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
31 	[KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
32 	[KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
33 	[KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
34 	[KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
35 	[KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
36 	[KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
37 	/* Multi letter extensions (alphabetically sorted) */
38 	[KVM_RISCV_ISA_EXT_SMNPM] = RISCV_ISA_EXT_SSNPM,
39 	KVM_ISA_EXT_ARR(SMSTATEEN),
40 	KVM_ISA_EXT_ARR(SSAIA),
41 	KVM_ISA_EXT_ARR(SSCOFPMF),
42 	KVM_ISA_EXT_ARR(SSNPM),
43 	KVM_ISA_EXT_ARR(SSTC),
44 	KVM_ISA_EXT_ARR(SVADE),
45 	KVM_ISA_EXT_ARR(SVADU),
46 	KVM_ISA_EXT_ARR(SVINVAL),
47 	KVM_ISA_EXT_ARR(SVNAPOT),
48 	KVM_ISA_EXT_ARR(SVPBMT),
49 	KVM_ISA_EXT_ARR(ZACAS),
50 	KVM_ISA_EXT_ARR(ZAWRS),
51 	KVM_ISA_EXT_ARR(ZBA),
52 	KVM_ISA_EXT_ARR(ZBB),
53 	KVM_ISA_EXT_ARR(ZBC),
54 	KVM_ISA_EXT_ARR(ZBKB),
55 	KVM_ISA_EXT_ARR(ZBKC),
56 	KVM_ISA_EXT_ARR(ZBKX),
57 	KVM_ISA_EXT_ARR(ZBS),
58 	KVM_ISA_EXT_ARR(ZCA),
59 	KVM_ISA_EXT_ARR(ZCB),
60 	KVM_ISA_EXT_ARR(ZCD),
61 	KVM_ISA_EXT_ARR(ZCF),
62 	KVM_ISA_EXT_ARR(ZCMOP),
63 	KVM_ISA_EXT_ARR(ZFA),
64 	KVM_ISA_EXT_ARR(ZFH),
65 	KVM_ISA_EXT_ARR(ZFHMIN),
66 	KVM_ISA_EXT_ARR(ZICBOM),
67 	KVM_ISA_EXT_ARR(ZICBOZ),
68 	KVM_ISA_EXT_ARR(ZICNTR),
69 	KVM_ISA_EXT_ARR(ZICOND),
70 	KVM_ISA_EXT_ARR(ZICSR),
71 	KVM_ISA_EXT_ARR(ZIFENCEI),
72 	KVM_ISA_EXT_ARR(ZIHINTNTL),
73 	KVM_ISA_EXT_ARR(ZIHINTPAUSE),
74 	KVM_ISA_EXT_ARR(ZIHPM),
75 	KVM_ISA_EXT_ARR(ZIMOP),
76 	KVM_ISA_EXT_ARR(ZKND),
77 	KVM_ISA_EXT_ARR(ZKNE),
78 	KVM_ISA_EXT_ARR(ZKNH),
79 	KVM_ISA_EXT_ARR(ZKR),
80 	KVM_ISA_EXT_ARR(ZKSED),
81 	KVM_ISA_EXT_ARR(ZKSH),
82 	KVM_ISA_EXT_ARR(ZKT),
83 	KVM_ISA_EXT_ARR(ZTSO),
84 	KVM_ISA_EXT_ARR(ZVBB),
85 	KVM_ISA_EXT_ARR(ZVBC),
86 	KVM_ISA_EXT_ARR(ZVFH),
87 	KVM_ISA_EXT_ARR(ZVFHMIN),
88 	KVM_ISA_EXT_ARR(ZVKB),
89 	KVM_ISA_EXT_ARR(ZVKG),
90 	KVM_ISA_EXT_ARR(ZVKNED),
91 	KVM_ISA_EXT_ARR(ZVKNHA),
92 	KVM_ISA_EXT_ARR(ZVKNHB),
93 	KVM_ISA_EXT_ARR(ZVKSED),
94 	KVM_ISA_EXT_ARR(ZVKSH),
95 	KVM_ISA_EXT_ARR(ZVKT),
96 };
97 
kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)98 static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
99 {
100 	unsigned long i;
101 
102 	for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
103 		if (kvm_isa_ext_arr[i] == base_ext)
104 			return i;
105 	}
106 
107 	return KVM_RISCV_ISA_EXT_MAX;
108 }
109 
kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)110 static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
111 {
112 	switch (ext) {
113 	case KVM_RISCV_ISA_EXT_H:
114 		return false;
115 	case KVM_RISCV_ISA_EXT_SSCOFPMF:
116 		/* Sscofpmf depends on interrupt filtering defined in ssaia */
117 		return __riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSAIA);
118 	case KVM_RISCV_ISA_EXT_SVADU:
119 		/*
120 		 * The henvcfg.ADUE is read-only zero if menvcfg.ADUE is zero.
121 		 * Guest OS can use Svadu only when host OS enable Svadu.
122 		 */
123 		return arch_has_hw_pte_young();
124 	case KVM_RISCV_ISA_EXT_V:
125 		return riscv_v_vstate_ctrl_user_allowed();
126 	default:
127 		break;
128 	}
129 
130 	return true;
131 }
132 
kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)133 static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
134 {
135 	switch (ext) {
136 	/* Extensions which don't have any mechanism to disable */
137 	case KVM_RISCV_ISA_EXT_A:
138 	case KVM_RISCV_ISA_EXT_C:
139 	case KVM_RISCV_ISA_EXT_I:
140 	case KVM_RISCV_ISA_EXT_M:
141 	case KVM_RISCV_ISA_EXT_SMNPM:
142 	/* There is not architectural config bit to disable sscofpmf completely */
143 	case KVM_RISCV_ISA_EXT_SSCOFPMF:
144 	case KVM_RISCV_ISA_EXT_SSNPM:
145 	case KVM_RISCV_ISA_EXT_SSTC:
146 	case KVM_RISCV_ISA_EXT_SVINVAL:
147 	case KVM_RISCV_ISA_EXT_SVNAPOT:
148 	case KVM_RISCV_ISA_EXT_ZACAS:
149 	case KVM_RISCV_ISA_EXT_ZAWRS:
150 	case KVM_RISCV_ISA_EXT_ZBA:
151 	case KVM_RISCV_ISA_EXT_ZBB:
152 	case KVM_RISCV_ISA_EXT_ZBC:
153 	case KVM_RISCV_ISA_EXT_ZBKB:
154 	case KVM_RISCV_ISA_EXT_ZBKC:
155 	case KVM_RISCV_ISA_EXT_ZBKX:
156 	case KVM_RISCV_ISA_EXT_ZBS:
157 	case KVM_RISCV_ISA_EXT_ZCA:
158 	case KVM_RISCV_ISA_EXT_ZCB:
159 	case KVM_RISCV_ISA_EXT_ZCD:
160 	case KVM_RISCV_ISA_EXT_ZCF:
161 	case KVM_RISCV_ISA_EXT_ZCMOP:
162 	case KVM_RISCV_ISA_EXT_ZFA:
163 	case KVM_RISCV_ISA_EXT_ZFH:
164 	case KVM_RISCV_ISA_EXT_ZFHMIN:
165 	case KVM_RISCV_ISA_EXT_ZICNTR:
166 	case KVM_RISCV_ISA_EXT_ZICOND:
167 	case KVM_RISCV_ISA_EXT_ZICSR:
168 	case KVM_RISCV_ISA_EXT_ZIFENCEI:
169 	case KVM_RISCV_ISA_EXT_ZIHINTNTL:
170 	case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
171 	case KVM_RISCV_ISA_EXT_ZIHPM:
172 	case KVM_RISCV_ISA_EXT_ZIMOP:
173 	case KVM_RISCV_ISA_EXT_ZKND:
174 	case KVM_RISCV_ISA_EXT_ZKNE:
175 	case KVM_RISCV_ISA_EXT_ZKNH:
176 	case KVM_RISCV_ISA_EXT_ZKR:
177 	case KVM_RISCV_ISA_EXT_ZKSED:
178 	case KVM_RISCV_ISA_EXT_ZKSH:
179 	case KVM_RISCV_ISA_EXT_ZKT:
180 	case KVM_RISCV_ISA_EXT_ZTSO:
181 	case KVM_RISCV_ISA_EXT_ZVBB:
182 	case KVM_RISCV_ISA_EXT_ZVBC:
183 	case KVM_RISCV_ISA_EXT_ZVFH:
184 	case KVM_RISCV_ISA_EXT_ZVFHMIN:
185 	case KVM_RISCV_ISA_EXT_ZVKB:
186 	case KVM_RISCV_ISA_EXT_ZVKG:
187 	case KVM_RISCV_ISA_EXT_ZVKNED:
188 	case KVM_RISCV_ISA_EXT_ZVKNHA:
189 	case KVM_RISCV_ISA_EXT_ZVKNHB:
190 	case KVM_RISCV_ISA_EXT_ZVKSED:
191 	case KVM_RISCV_ISA_EXT_ZVKSH:
192 	case KVM_RISCV_ISA_EXT_ZVKT:
193 		return false;
194 	/* Extensions which can be disabled using Smstateen */
195 	case KVM_RISCV_ISA_EXT_SSAIA:
196 		return riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN);
197 	case KVM_RISCV_ISA_EXT_SVADE:
198 		/*
199 		 * The henvcfg.ADUE is read-only zero if menvcfg.ADUE is zero.
200 		 * Svade is not allowed to disable when the platform use Svade.
201 		 */
202 		return arch_has_hw_pte_young();
203 	default:
204 		break;
205 	}
206 
207 	return true;
208 }
209 
kvm_riscv_vcpu_setup_isa(struct kvm_vcpu * vcpu)210 void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu)
211 {
212 	unsigned long host_isa, i;
213 
214 	for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
215 		host_isa = kvm_isa_ext_arr[i];
216 		if (__riscv_isa_extension_available(NULL, host_isa) &&
217 		    kvm_riscv_vcpu_isa_enable_allowed(i))
218 			set_bit(host_isa, vcpu->arch.isa);
219 	}
220 }
221 
kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)222 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
223 					 const struct kvm_one_reg *reg)
224 {
225 	unsigned long __user *uaddr =
226 			(unsigned long __user *)(unsigned long)reg->addr;
227 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
228 					    KVM_REG_SIZE_MASK |
229 					    KVM_REG_RISCV_CONFIG);
230 	unsigned long reg_val;
231 
232 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
233 		return -EINVAL;
234 
235 	switch (reg_num) {
236 	case KVM_REG_RISCV_CONFIG_REG(isa):
237 		reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
238 		break;
239 	case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
240 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
241 			return -ENOENT;
242 		reg_val = riscv_cbom_block_size;
243 		break;
244 	case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
245 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
246 			return -ENOENT;
247 		reg_val = riscv_cboz_block_size;
248 		break;
249 	case KVM_REG_RISCV_CONFIG_REG(mvendorid):
250 		reg_val = vcpu->arch.mvendorid;
251 		break;
252 	case KVM_REG_RISCV_CONFIG_REG(marchid):
253 		reg_val = vcpu->arch.marchid;
254 		break;
255 	case KVM_REG_RISCV_CONFIG_REG(mimpid):
256 		reg_val = vcpu->arch.mimpid;
257 		break;
258 	case KVM_REG_RISCV_CONFIG_REG(satp_mode):
259 		reg_val = satp_mode >> SATP_MODE_SHIFT;
260 		break;
261 	default:
262 		return -ENOENT;
263 	}
264 
265 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
266 		return -EFAULT;
267 
268 	return 0;
269 }
270 
kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)271 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
272 					 const struct kvm_one_reg *reg)
273 {
274 	unsigned long __user *uaddr =
275 			(unsigned long __user *)(unsigned long)reg->addr;
276 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
277 					    KVM_REG_SIZE_MASK |
278 					    KVM_REG_RISCV_CONFIG);
279 	unsigned long i, isa_ext, reg_val;
280 
281 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
282 		return -EINVAL;
283 
284 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
285 		return -EFAULT;
286 
287 	switch (reg_num) {
288 	case KVM_REG_RISCV_CONFIG_REG(isa):
289 		/*
290 		 * This ONE REG interface is only defined for
291 		 * single letter extensions.
292 		 */
293 		if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
294 			return -EINVAL;
295 
296 		/*
297 		 * Return early (i.e. do nothing) if reg_val is the same
298 		 * value retrievable via kvm_riscv_vcpu_get_reg_config().
299 		 */
300 		if (reg_val == (vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK))
301 			break;
302 
303 		if (!vcpu->arch.ran_atleast_once) {
304 			/* Ignore the enable/disable request for certain extensions */
305 			for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
306 				isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
307 				if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
308 					reg_val &= ~BIT(i);
309 					continue;
310 				}
311 				if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
312 					if (reg_val & BIT(i))
313 						reg_val &= ~BIT(i);
314 				if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
315 					if (!(reg_val & BIT(i)))
316 						reg_val |= BIT(i);
317 			}
318 			reg_val &= riscv_isa_extension_base(NULL);
319 			/* Do not modify anything beyond single letter extensions */
320 			reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
321 				  (reg_val & KVM_RISCV_BASE_ISA_MASK);
322 			vcpu->arch.isa[0] = reg_val;
323 			kvm_riscv_vcpu_fp_reset(vcpu);
324 		} else {
325 			return -EBUSY;
326 		}
327 		break;
328 	case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
329 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
330 			return -ENOENT;
331 		if (reg_val != riscv_cbom_block_size)
332 			return -EINVAL;
333 		break;
334 	case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
335 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
336 			return -ENOENT;
337 		if (reg_val != riscv_cboz_block_size)
338 			return -EINVAL;
339 		break;
340 	case KVM_REG_RISCV_CONFIG_REG(mvendorid):
341 		if (reg_val == vcpu->arch.mvendorid)
342 			break;
343 		if (!vcpu->arch.ran_atleast_once)
344 			vcpu->arch.mvendorid = reg_val;
345 		else
346 			return -EBUSY;
347 		break;
348 	case KVM_REG_RISCV_CONFIG_REG(marchid):
349 		if (reg_val == vcpu->arch.marchid)
350 			break;
351 		if (!vcpu->arch.ran_atleast_once)
352 			vcpu->arch.marchid = reg_val;
353 		else
354 			return -EBUSY;
355 		break;
356 	case KVM_REG_RISCV_CONFIG_REG(mimpid):
357 		if (reg_val == vcpu->arch.mimpid)
358 			break;
359 		if (!vcpu->arch.ran_atleast_once)
360 			vcpu->arch.mimpid = reg_val;
361 		else
362 			return -EBUSY;
363 		break;
364 	case KVM_REG_RISCV_CONFIG_REG(satp_mode):
365 		if (reg_val != (satp_mode >> SATP_MODE_SHIFT))
366 			return -EINVAL;
367 		break;
368 	default:
369 		return -ENOENT;
370 	}
371 
372 	return 0;
373 }
374 
kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)375 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
376 				       const struct kvm_one_reg *reg)
377 {
378 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
379 	unsigned long __user *uaddr =
380 			(unsigned long __user *)(unsigned long)reg->addr;
381 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
382 					    KVM_REG_SIZE_MASK |
383 					    KVM_REG_RISCV_CORE);
384 	unsigned long reg_val;
385 
386 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
387 		return -EINVAL;
388 	if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
389 		return -ENOENT;
390 
391 	if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
392 		reg_val = cntx->sepc;
393 	else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
394 		 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
395 		reg_val = ((unsigned long *)cntx)[reg_num];
396 	else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
397 		reg_val = (cntx->sstatus & SR_SPP) ?
398 				KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
399 	else
400 		return -ENOENT;
401 
402 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
403 		return -EFAULT;
404 
405 	return 0;
406 }
407 
kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)408 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
409 				       const struct kvm_one_reg *reg)
410 {
411 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
412 	unsigned long __user *uaddr =
413 			(unsigned long __user *)(unsigned long)reg->addr;
414 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
415 					    KVM_REG_SIZE_MASK |
416 					    KVM_REG_RISCV_CORE);
417 	unsigned long reg_val;
418 
419 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
420 		return -EINVAL;
421 	if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
422 		return -ENOENT;
423 
424 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
425 		return -EFAULT;
426 
427 	if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
428 		cntx->sepc = reg_val;
429 	else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
430 		 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
431 		((unsigned long *)cntx)[reg_num] = reg_val;
432 	else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
433 		if (reg_val == KVM_RISCV_MODE_S)
434 			cntx->sstatus |= SR_SPP;
435 		else
436 			cntx->sstatus &= ~SR_SPP;
437 	} else
438 		return -ENOENT;
439 
440 	return 0;
441 }
442 
kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * out_val)443 static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
444 					  unsigned long reg_num,
445 					  unsigned long *out_val)
446 {
447 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
448 
449 	if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
450 		return -ENOENT;
451 
452 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
453 		kvm_riscv_vcpu_flush_interrupts(vcpu);
454 		*out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
455 		*out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
456 	} else
457 		*out_val = ((unsigned long *)csr)[reg_num];
458 
459 	return 0;
460 }
461 
kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)462 static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
463 					  unsigned long reg_num,
464 					  unsigned long reg_val)
465 {
466 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
467 
468 	if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
469 		return -ENOENT;
470 
471 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
472 		reg_val &= VSIP_VALID_MASK;
473 		reg_val <<= VSIP_TO_HVIP_SHIFT;
474 	}
475 
476 	((unsigned long *)csr)[reg_num] = reg_val;
477 
478 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
479 		WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
480 
481 	return 0;
482 }
483 
kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)484 static inline int kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu *vcpu,
485 						   unsigned long reg_num,
486 						   unsigned long reg_val)
487 {
488 	struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
489 
490 	if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
491 		sizeof(unsigned long))
492 		return -EINVAL;
493 
494 	((unsigned long *)csr)[reg_num] = reg_val;
495 	return 0;
496 }
497 
kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * out_val)498 static int kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu *vcpu,
499 					    unsigned long reg_num,
500 					    unsigned long *out_val)
501 {
502 	struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
503 
504 	if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
505 		sizeof(unsigned long))
506 		return -EINVAL;
507 
508 	*out_val = ((unsigned long *)csr)[reg_num];
509 	return 0;
510 }
511 
kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)512 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
513 				      const struct kvm_one_reg *reg)
514 {
515 	int rc;
516 	unsigned long __user *uaddr =
517 			(unsigned long __user *)(unsigned long)reg->addr;
518 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
519 					    KVM_REG_SIZE_MASK |
520 					    KVM_REG_RISCV_CSR);
521 	unsigned long reg_val, reg_subtype;
522 
523 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
524 		return -EINVAL;
525 
526 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
527 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
528 	switch (reg_subtype) {
529 	case KVM_REG_RISCV_CSR_GENERAL:
530 		rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, &reg_val);
531 		break;
532 	case KVM_REG_RISCV_CSR_AIA:
533 		rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val);
534 		break;
535 	case KVM_REG_RISCV_CSR_SMSTATEEN:
536 		rc = -EINVAL;
537 		if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
538 			rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num,
539 							      &reg_val);
540 		break;
541 	default:
542 		rc = -ENOENT;
543 		break;
544 	}
545 	if (rc)
546 		return rc;
547 
548 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
549 		return -EFAULT;
550 
551 	return 0;
552 }
553 
kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)554 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
555 				      const struct kvm_one_reg *reg)
556 {
557 	int rc;
558 	unsigned long __user *uaddr =
559 			(unsigned long __user *)(unsigned long)reg->addr;
560 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
561 					    KVM_REG_SIZE_MASK |
562 					    KVM_REG_RISCV_CSR);
563 	unsigned long reg_val, reg_subtype;
564 
565 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
566 		return -EINVAL;
567 
568 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
569 		return -EFAULT;
570 
571 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
572 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
573 	switch (reg_subtype) {
574 	case KVM_REG_RISCV_CSR_GENERAL:
575 		rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
576 		break;
577 	case KVM_REG_RISCV_CSR_AIA:
578 		rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
579 		break;
580 	case KVM_REG_RISCV_CSR_SMSTATEEN:
581 		rc = -EINVAL;
582 		if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
583 			rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num,
584 							      reg_val);
585 		break;
586 	default:
587 		rc = -ENOENT;
588 		break;
589 	}
590 	if (rc)
591 		return rc;
592 
593 	return 0;
594 }
595 
riscv_vcpu_get_isa_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)596 static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
597 					 unsigned long reg_num,
598 					 unsigned long *reg_val)
599 {
600 	unsigned long host_isa_ext;
601 
602 	if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
603 	    reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
604 		return -ENOENT;
605 
606 	host_isa_ext = kvm_isa_ext_arr[reg_num];
607 	if (!__riscv_isa_extension_available(NULL, host_isa_ext))
608 		return -ENOENT;
609 
610 	*reg_val = 0;
611 	if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
612 		*reg_val = 1; /* Mark the given extension as available */
613 
614 	return 0;
615 }
616 
riscv_vcpu_set_isa_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)617 static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu,
618 					 unsigned long reg_num,
619 					 unsigned long reg_val)
620 {
621 	unsigned long host_isa_ext;
622 
623 	if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
624 	    reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
625 		return -ENOENT;
626 
627 	host_isa_ext = kvm_isa_ext_arr[reg_num];
628 	if (!__riscv_isa_extension_available(NULL, host_isa_ext))
629 		return -ENOENT;
630 
631 	if (reg_val == test_bit(host_isa_ext, vcpu->arch.isa))
632 		return 0;
633 
634 	if (!vcpu->arch.ran_atleast_once) {
635 		/*
636 		 * All multi-letter extension and a few single letter
637 		 * extension can be disabled
638 		 */
639 		if (reg_val == 1 &&
640 		    kvm_riscv_vcpu_isa_enable_allowed(reg_num))
641 			set_bit(host_isa_ext, vcpu->arch.isa);
642 		else if (!reg_val &&
643 			 kvm_riscv_vcpu_isa_disable_allowed(reg_num))
644 			clear_bit(host_isa_ext, vcpu->arch.isa);
645 		else
646 			return -EINVAL;
647 		kvm_riscv_vcpu_fp_reset(vcpu);
648 	} else {
649 		return -EBUSY;
650 	}
651 
652 	return 0;
653 }
654 
riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)655 static int riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu *vcpu,
656 					unsigned long reg_num,
657 					unsigned long *reg_val)
658 {
659 	unsigned long i, ext_id, ext_val;
660 
661 	if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
662 		return -ENOENT;
663 
664 	for (i = 0; i < BITS_PER_LONG; i++) {
665 		ext_id = i + reg_num * BITS_PER_LONG;
666 		if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
667 			break;
668 
669 		ext_val = 0;
670 		riscv_vcpu_get_isa_ext_single(vcpu, ext_id, &ext_val);
671 		if (ext_val)
672 			*reg_val |= KVM_REG_RISCV_ISA_MULTI_MASK(ext_id);
673 	}
674 
675 	return 0;
676 }
677 
riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val,bool enable)678 static int riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu *vcpu,
679 					unsigned long reg_num,
680 					unsigned long reg_val, bool enable)
681 {
682 	unsigned long i, ext_id;
683 
684 	if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
685 		return -ENOENT;
686 
687 	for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
688 		ext_id = i + reg_num * BITS_PER_LONG;
689 		if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
690 			break;
691 
692 		riscv_vcpu_set_isa_ext_single(vcpu, ext_id, enable);
693 	}
694 
695 	return 0;
696 }
697 
kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)698 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
699 					  const struct kvm_one_reg *reg)
700 {
701 	int rc;
702 	unsigned long __user *uaddr =
703 			(unsigned long __user *)(unsigned long)reg->addr;
704 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
705 					    KVM_REG_SIZE_MASK |
706 					    KVM_REG_RISCV_ISA_EXT);
707 	unsigned long reg_val, reg_subtype;
708 
709 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
710 		return -EINVAL;
711 
712 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
713 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
714 
715 	reg_val = 0;
716 	switch (reg_subtype) {
717 	case KVM_REG_RISCV_ISA_SINGLE:
718 		rc = riscv_vcpu_get_isa_ext_single(vcpu, reg_num, &reg_val);
719 		break;
720 	case KVM_REG_RISCV_ISA_MULTI_EN:
721 	case KVM_REG_RISCV_ISA_MULTI_DIS:
722 		rc = riscv_vcpu_get_isa_ext_multi(vcpu, reg_num, &reg_val);
723 		if (!rc && reg_subtype == KVM_REG_RISCV_ISA_MULTI_DIS)
724 			reg_val = ~reg_val;
725 		break;
726 	default:
727 		rc = -ENOENT;
728 	}
729 	if (rc)
730 		return rc;
731 
732 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
733 		return -EFAULT;
734 
735 	return 0;
736 }
737 
kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)738 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
739 					  const struct kvm_one_reg *reg)
740 {
741 	unsigned long __user *uaddr =
742 			(unsigned long __user *)(unsigned long)reg->addr;
743 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
744 					    KVM_REG_SIZE_MASK |
745 					    KVM_REG_RISCV_ISA_EXT);
746 	unsigned long reg_val, reg_subtype;
747 
748 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
749 		return -EINVAL;
750 
751 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
752 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
753 
754 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
755 		return -EFAULT;
756 
757 	switch (reg_subtype) {
758 	case KVM_REG_RISCV_ISA_SINGLE:
759 		return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
760 	case KVM_REG_RISCV_ISA_MULTI_EN:
761 		return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
762 	case KVM_REG_RISCV_ISA_MULTI_DIS:
763 		return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
764 	default:
765 		return -ENOENT;
766 	}
767 
768 	return 0;
769 }
770 
copy_config_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)771 static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
772 				u64 __user *uindices)
773 {
774 	int n = 0;
775 
776 	for (int i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long);
777 		 i++) {
778 		u64 size;
779 		u64 reg;
780 
781 		/*
782 		 * Avoid reporting config reg if the corresponding extension
783 		 * was not available.
784 		 */
785 		if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
786 			!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
787 			continue;
788 		else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
789 			!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
790 			continue;
791 
792 		size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
793 		reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;
794 
795 		if (uindices) {
796 			if (put_user(reg, uindices))
797 				return -EFAULT;
798 			uindices++;
799 		}
800 
801 		n++;
802 	}
803 
804 	return n;
805 }
806 
num_config_regs(const struct kvm_vcpu * vcpu)807 static unsigned long num_config_regs(const struct kvm_vcpu *vcpu)
808 {
809 	return copy_config_reg_indices(vcpu, NULL);
810 }
811 
num_core_regs(void)812 static inline unsigned long num_core_regs(void)
813 {
814 	return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
815 }
816 
copy_core_reg_indices(u64 __user * uindices)817 static int copy_core_reg_indices(u64 __user *uindices)
818 {
819 	int n = num_core_regs();
820 
821 	for (int i = 0; i < n; i++) {
822 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
823 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
824 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i;
825 
826 		if (uindices) {
827 			if (put_user(reg, uindices))
828 				return -EFAULT;
829 			uindices++;
830 		}
831 	}
832 
833 	return n;
834 }
835 
num_csr_regs(const struct kvm_vcpu * vcpu)836 static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu)
837 {
838 	unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
839 
840 	if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
841 		n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
842 	if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
843 		n += sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
844 
845 	return n;
846 }
847 
copy_csr_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)848 static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
849 				u64 __user *uindices)
850 {
851 	int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
852 	int n2 = 0, n3 = 0;
853 
854 	/* copy general csr regs */
855 	for (int i = 0; i < n1; i++) {
856 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
857 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
858 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
859 				  KVM_REG_RISCV_CSR_GENERAL | i;
860 
861 		if (uindices) {
862 			if (put_user(reg, uindices))
863 				return -EFAULT;
864 			uindices++;
865 		}
866 	}
867 
868 	/* copy AIA csr regs */
869 	if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) {
870 		n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
871 
872 		for (int i = 0; i < n2; i++) {
873 			u64 size = IS_ENABLED(CONFIG_32BIT) ?
874 				   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
875 			u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
876 					  KVM_REG_RISCV_CSR_AIA | i;
877 
878 			if (uindices) {
879 				if (put_user(reg, uindices))
880 					return -EFAULT;
881 				uindices++;
882 			}
883 		}
884 	}
885 
886 	/* copy Smstateen csr regs */
887 	if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) {
888 		n3 = sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
889 
890 		for (int i = 0; i < n3; i++) {
891 			u64 size = IS_ENABLED(CONFIG_32BIT) ?
892 				   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
893 			u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
894 					  KVM_REG_RISCV_CSR_SMSTATEEN | i;
895 
896 			if (uindices) {
897 				if (put_user(reg, uindices))
898 					return -EFAULT;
899 				uindices++;
900 			}
901 		}
902 	}
903 
904 	return n1 + n2 + n3;
905 }
906 
num_timer_regs(void)907 static inline unsigned long num_timer_regs(void)
908 {
909 	return sizeof(struct kvm_riscv_timer) / sizeof(u64);
910 }
911 
copy_timer_reg_indices(u64 __user * uindices)912 static int copy_timer_reg_indices(u64 __user *uindices)
913 {
914 	int n = num_timer_regs();
915 
916 	for (int i = 0; i < n; i++) {
917 		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
918 			  KVM_REG_RISCV_TIMER | i;
919 
920 		if (uindices) {
921 			if (put_user(reg, uindices))
922 				return -EFAULT;
923 			uindices++;
924 		}
925 	}
926 
927 	return n;
928 }
929 
num_fp_f_regs(const struct kvm_vcpu * vcpu)930 static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
931 {
932 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
933 
934 	if (riscv_isa_extension_available(vcpu->arch.isa, f))
935 		return sizeof(cntx->fp.f) / sizeof(u32);
936 	else
937 		return 0;
938 }
939 
copy_fp_f_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)940 static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
941 				u64 __user *uindices)
942 {
943 	int n = num_fp_f_regs(vcpu);
944 
945 	for (int i = 0; i < n; i++) {
946 		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 |
947 			  KVM_REG_RISCV_FP_F | i;
948 
949 		if (uindices) {
950 			if (put_user(reg, uindices))
951 				return -EFAULT;
952 			uindices++;
953 		}
954 	}
955 
956 	return n;
957 }
958 
num_fp_d_regs(const struct kvm_vcpu * vcpu)959 static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
960 {
961 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
962 
963 	if (riscv_isa_extension_available(vcpu->arch.isa, d))
964 		return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
965 	else
966 		return 0;
967 }
968 
copy_fp_d_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)969 static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
970 				u64 __user *uindices)
971 {
972 	int i;
973 	int n = num_fp_d_regs(vcpu);
974 	u64 reg;
975 
976 	/* copy fp.d.f indices */
977 	for (i = 0; i < n-1; i++) {
978 		reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
979 		      KVM_REG_RISCV_FP_D | i;
980 
981 		if (uindices) {
982 			if (put_user(reg, uindices))
983 				return -EFAULT;
984 			uindices++;
985 		}
986 	}
987 
988 	/* copy fp.d.fcsr indices */
989 	reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
990 	if (uindices) {
991 		if (put_user(reg, uindices))
992 			return -EFAULT;
993 		uindices++;
994 	}
995 
996 	return n;
997 }
998 
copy_isa_ext_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)999 static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
1000 				u64 __user *uindices)
1001 {
1002 	unsigned int n = 0;
1003 	unsigned long isa_ext;
1004 
1005 	for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
1006 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
1007 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1008 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
1009 
1010 		isa_ext = kvm_isa_ext_arr[i];
1011 		if (!__riscv_isa_extension_available(NULL, isa_ext))
1012 			continue;
1013 
1014 		if (uindices) {
1015 			if (put_user(reg, uindices))
1016 				return -EFAULT;
1017 			uindices++;
1018 		}
1019 
1020 		n++;
1021 	}
1022 
1023 	return n;
1024 }
1025 
num_isa_ext_regs(const struct kvm_vcpu * vcpu)1026 static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
1027 {
1028 	return copy_isa_ext_reg_indices(vcpu, NULL);
1029 }
1030 
copy_sbi_ext_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)1031 static int copy_sbi_ext_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1032 {
1033 	unsigned int n = 0;
1034 
1035 	for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) {
1036 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
1037 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1038 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
1039 			  KVM_REG_RISCV_SBI_SINGLE | i;
1040 
1041 		if (!riscv_vcpu_supports_sbi_ext(vcpu, i))
1042 			continue;
1043 
1044 		if (uindices) {
1045 			if (put_user(reg, uindices))
1046 				return -EFAULT;
1047 			uindices++;
1048 		}
1049 
1050 		n++;
1051 	}
1052 
1053 	return n;
1054 }
1055 
num_sbi_ext_regs(struct kvm_vcpu * vcpu)1056 static unsigned long num_sbi_ext_regs(struct kvm_vcpu *vcpu)
1057 {
1058 	return copy_sbi_ext_reg_indices(vcpu, NULL);
1059 }
1060 
copy_sbi_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)1061 static int copy_sbi_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1062 {
1063 	struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
1064 	int total = 0;
1065 
1066 	if (scontext->ext_status[KVM_RISCV_SBI_EXT_STA] == KVM_RISCV_SBI_EXT_STATUS_ENABLED) {
1067 		u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1068 		int n = sizeof(struct kvm_riscv_sbi_sta) / sizeof(unsigned long);
1069 
1070 		for (int i = 0; i < n; i++) {
1071 			u64 reg = KVM_REG_RISCV | size |
1072 				  KVM_REG_RISCV_SBI_STATE |
1073 				  KVM_REG_RISCV_SBI_STA | i;
1074 
1075 			if (uindices) {
1076 				if (put_user(reg, uindices))
1077 					return -EFAULT;
1078 				uindices++;
1079 			}
1080 		}
1081 
1082 		total += n;
1083 	}
1084 
1085 	return total;
1086 }
1087 
num_sbi_regs(struct kvm_vcpu * vcpu)1088 static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu)
1089 {
1090 	return copy_sbi_reg_indices(vcpu, NULL);
1091 }
1092 
num_vector_regs(const struct kvm_vcpu * vcpu)1093 static inline unsigned long num_vector_regs(const struct kvm_vcpu *vcpu)
1094 {
1095 	if (!riscv_isa_extension_available(vcpu->arch.isa, v))
1096 		return 0;
1097 
1098 	/* vstart, vl, vtype, vcsr, vlenb and 32 vector regs */
1099 	return 37;
1100 }
1101 
copy_vector_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)1102 static int copy_vector_reg_indices(const struct kvm_vcpu *vcpu,
1103 				u64 __user *uindices)
1104 {
1105 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
1106 	int n = num_vector_regs(vcpu);
1107 	u64 reg, size;
1108 	int i;
1109 
1110 	if (n == 0)
1111 		return 0;
1112 
1113 	/* copy vstart, vl, vtype, vcsr and vlenb */
1114 	size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1115 	for (i = 0; i < 5; i++) {
1116 		reg = KVM_REG_RISCV | size | KVM_REG_RISCV_VECTOR | i;
1117 
1118 		if (uindices) {
1119 			if (put_user(reg, uindices))
1120 				return -EFAULT;
1121 			uindices++;
1122 		}
1123 	}
1124 
1125 	/* vector_regs have a variable 'vlenb' size */
1126 	size = __builtin_ctzl(cntx->vector.vlenb);
1127 	size <<= KVM_REG_SIZE_SHIFT;
1128 	for (i = 0; i < 32; i++) {
1129 		reg = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | size |
1130 			KVM_REG_RISCV_VECTOR_REG(i);
1131 
1132 		if (uindices) {
1133 			if (put_user(reg, uindices))
1134 				return -EFAULT;
1135 			uindices++;
1136 		}
1137 	}
1138 
1139 	return n;
1140 }
1141 
1142 /*
1143  * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
1144  *
1145  * This is for all registers.
1146  */
kvm_riscv_vcpu_num_regs(struct kvm_vcpu * vcpu)1147 unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
1148 {
1149 	unsigned long res = 0;
1150 
1151 	res += num_config_regs(vcpu);
1152 	res += num_core_regs();
1153 	res += num_csr_regs(vcpu);
1154 	res += num_timer_regs();
1155 	res += num_fp_f_regs(vcpu);
1156 	res += num_fp_d_regs(vcpu);
1157 	res += num_vector_regs(vcpu);
1158 	res += num_isa_ext_regs(vcpu);
1159 	res += num_sbi_ext_regs(vcpu);
1160 	res += num_sbi_regs(vcpu);
1161 
1162 	return res;
1163 }
1164 
1165 /*
1166  * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
1167  */
kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)1168 int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
1169 				    u64 __user *uindices)
1170 {
1171 	int ret;
1172 
1173 	ret = copy_config_reg_indices(vcpu, uindices);
1174 	if (ret < 0)
1175 		return ret;
1176 	uindices += ret;
1177 
1178 	ret = copy_core_reg_indices(uindices);
1179 	if (ret < 0)
1180 		return ret;
1181 	uindices += ret;
1182 
1183 	ret = copy_csr_reg_indices(vcpu, uindices);
1184 	if (ret < 0)
1185 		return ret;
1186 	uindices += ret;
1187 
1188 	ret = copy_timer_reg_indices(uindices);
1189 	if (ret < 0)
1190 		return ret;
1191 	uindices += ret;
1192 
1193 	ret = copy_fp_f_reg_indices(vcpu, uindices);
1194 	if (ret < 0)
1195 		return ret;
1196 	uindices += ret;
1197 
1198 	ret = copy_fp_d_reg_indices(vcpu, uindices);
1199 	if (ret < 0)
1200 		return ret;
1201 	uindices += ret;
1202 
1203 	ret = copy_vector_reg_indices(vcpu, uindices);
1204 	if (ret < 0)
1205 		return ret;
1206 	uindices += ret;
1207 
1208 	ret = copy_isa_ext_reg_indices(vcpu, uindices);
1209 	if (ret < 0)
1210 		return ret;
1211 	uindices += ret;
1212 
1213 	ret = copy_sbi_ext_reg_indices(vcpu, uindices);
1214 	if (ret < 0)
1215 		return ret;
1216 	uindices += ret;
1217 
1218 	ret = copy_sbi_reg_indices(vcpu, uindices);
1219 	if (ret < 0)
1220 		return ret;
1221 	uindices += ret;
1222 
1223 	return 0;
1224 }
1225 
kvm_riscv_vcpu_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)1226 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
1227 			   const struct kvm_one_reg *reg)
1228 {
1229 	switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1230 	case KVM_REG_RISCV_CONFIG:
1231 		return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
1232 	case KVM_REG_RISCV_CORE:
1233 		return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
1234 	case KVM_REG_RISCV_CSR:
1235 		return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
1236 	case KVM_REG_RISCV_TIMER:
1237 		return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
1238 	case KVM_REG_RISCV_FP_F:
1239 		return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1240 						 KVM_REG_RISCV_FP_F);
1241 	case KVM_REG_RISCV_FP_D:
1242 		return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1243 						 KVM_REG_RISCV_FP_D);
1244 	case KVM_REG_RISCV_VECTOR:
1245 		return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
1246 	case KVM_REG_RISCV_ISA_EXT:
1247 		return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
1248 	case KVM_REG_RISCV_SBI_EXT:
1249 		return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
1250 	case KVM_REG_RISCV_SBI_STATE:
1251 		return kvm_riscv_vcpu_set_reg_sbi(vcpu, reg);
1252 	default:
1253 		break;
1254 	}
1255 
1256 	return -ENOENT;
1257 }
1258 
kvm_riscv_vcpu_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)1259 int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
1260 			   const struct kvm_one_reg *reg)
1261 {
1262 	switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1263 	case KVM_REG_RISCV_CONFIG:
1264 		return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
1265 	case KVM_REG_RISCV_CORE:
1266 		return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
1267 	case KVM_REG_RISCV_CSR:
1268 		return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
1269 	case KVM_REG_RISCV_TIMER:
1270 		return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
1271 	case KVM_REG_RISCV_FP_F:
1272 		return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1273 						 KVM_REG_RISCV_FP_F);
1274 	case KVM_REG_RISCV_FP_D:
1275 		return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1276 						 KVM_REG_RISCV_FP_D);
1277 	case KVM_REG_RISCV_VECTOR:
1278 		return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
1279 	case KVM_REG_RISCV_ISA_EXT:
1280 		return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
1281 	case KVM_REG_RISCV_SBI_EXT:
1282 		return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
1283 	case KVM_REG_RISCV_SBI_STATE:
1284 		return kvm_riscv_vcpu_get_reg_sbi(vcpu, reg);
1285 	default:
1286 		break;
1287 	}
1288 
1289 	return -ENOENT;
1290 }
1291