xref: /linux/arch/riscv/kvm/vcpu_onereg.c (revision 11e8c7e9471cf8e6ae6ec7324a3174191cd965e3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  * Copyright (C) 2023 Ventana Micro Systems Inc.
5  *
6  * Authors:
7  *	Anup Patel <apatel@ventanamicro.com>
8  */
9 
10 #include <linux/bitops.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/nospec.h>
14 #include <linux/uaccess.h>
15 #include <linux/kvm_host.h>
16 #include <asm/cacheflush.h>
17 #include <asm/cpufeature.h>
18 #include <asm/kvm_vcpu_vector.h>
19 #include <asm/pgtable.h>
20 #include <asm/vector.h>
21 
22 #define KVM_RISCV_BASE_ISA_MASK		GENMASK(25, 0)
23 
24 #define KVM_ISA_EXT_ARR(ext)		\
25 [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
26 
27 /* Mapping between KVM ISA Extension ID & guest ISA extension ID */
28 static const unsigned long kvm_isa_ext_arr[] = {
29 	/* Single letter extensions (alphabetically sorted) */
30 	[KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
31 	[KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
32 	[KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
33 	[KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
34 	[KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
35 	[KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
36 	[KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
37 	[KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
38 	/* Multi letter extensions (alphabetically sorted) */
39 	KVM_ISA_EXT_ARR(SMNPM),
40 	KVM_ISA_EXT_ARR(SMSTATEEN),
41 	KVM_ISA_EXT_ARR(SSAIA),
42 	KVM_ISA_EXT_ARR(SSCOFPMF),
43 	KVM_ISA_EXT_ARR(SSNPM),
44 	KVM_ISA_EXT_ARR(SSTC),
45 	KVM_ISA_EXT_ARR(SVADE),
46 	KVM_ISA_EXT_ARR(SVADU),
47 	KVM_ISA_EXT_ARR(SVINVAL),
48 	KVM_ISA_EXT_ARR(SVNAPOT),
49 	KVM_ISA_EXT_ARR(SVPBMT),
50 	KVM_ISA_EXT_ARR(SVVPTC),
51 	KVM_ISA_EXT_ARR(ZAAMO),
52 	KVM_ISA_EXT_ARR(ZABHA),
53 	KVM_ISA_EXT_ARR(ZACAS),
54 	KVM_ISA_EXT_ARR(ZALASR),
55 	KVM_ISA_EXT_ARR(ZALRSC),
56 	KVM_ISA_EXT_ARR(ZAWRS),
57 	KVM_ISA_EXT_ARR(ZBA),
58 	KVM_ISA_EXT_ARR(ZBB),
59 	KVM_ISA_EXT_ARR(ZBC),
60 	KVM_ISA_EXT_ARR(ZBKB),
61 	KVM_ISA_EXT_ARR(ZBKC),
62 	KVM_ISA_EXT_ARR(ZBKX),
63 	KVM_ISA_EXT_ARR(ZBS),
64 	KVM_ISA_EXT_ARR(ZCA),
65 	KVM_ISA_EXT_ARR(ZCB),
66 	KVM_ISA_EXT_ARR(ZCD),
67 	KVM_ISA_EXT_ARR(ZCF),
68 	KVM_ISA_EXT_ARR(ZCLSD),
69 	KVM_ISA_EXT_ARR(ZCMOP),
70 	KVM_ISA_EXT_ARR(ZFA),
71 	KVM_ISA_EXT_ARR(ZFBFMIN),
72 	KVM_ISA_EXT_ARR(ZFH),
73 	KVM_ISA_EXT_ARR(ZFHMIN),
74 	KVM_ISA_EXT_ARR(ZICBOM),
75 	KVM_ISA_EXT_ARR(ZICBOP),
76 	KVM_ISA_EXT_ARR(ZICBOZ),
77 	KVM_ISA_EXT_ARR(ZICCRSE),
78 	KVM_ISA_EXT_ARR(ZICNTR),
79 	KVM_ISA_EXT_ARR(ZICOND),
80 	KVM_ISA_EXT_ARR(ZICSR),
81 	KVM_ISA_EXT_ARR(ZIFENCEI),
82 	KVM_ISA_EXT_ARR(ZIHINTNTL),
83 	KVM_ISA_EXT_ARR(ZIHINTPAUSE),
84 	KVM_ISA_EXT_ARR(ZIHPM),
85 	KVM_ISA_EXT_ARR(ZILSD),
86 	KVM_ISA_EXT_ARR(ZIMOP),
87 	KVM_ISA_EXT_ARR(ZKND),
88 	KVM_ISA_EXT_ARR(ZKNE),
89 	KVM_ISA_EXT_ARR(ZKNH),
90 	KVM_ISA_EXT_ARR(ZKR),
91 	KVM_ISA_EXT_ARR(ZKSED),
92 	KVM_ISA_EXT_ARR(ZKSH),
93 	KVM_ISA_EXT_ARR(ZKT),
94 	KVM_ISA_EXT_ARR(ZTSO),
95 	KVM_ISA_EXT_ARR(ZVBB),
96 	KVM_ISA_EXT_ARR(ZVBC),
97 	KVM_ISA_EXT_ARR(ZVFBFMIN),
98 	KVM_ISA_EXT_ARR(ZVFBFWMA),
99 	KVM_ISA_EXT_ARR(ZVFH),
100 	KVM_ISA_EXT_ARR(ZVFHMIN),
101 	KVM_ISA_EXT_ARR(ZVKB),
102 	KVM_ISA_EXT_ARR(ZVKG),
103 	KVM_ISA_EXT_ARR(ZVKNED),
104 	KVM_ISA_EXT_ARR(ZVKNHA),
105 	KVM_ISA_EXT_ARR(ZVKNHB),
106 	KVM_ISA_EXT_ARR(ZVKSED),
107 	KVM_ISA_EXT_ARR(ZVKSH),
108 	KVM_ISA_EXT_ARR(ZVKT),
109 };
110 
kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)111 static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
112 {
113 	unsigned long i;
114 
115 	for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
116 		if (kvm_isa_ext_arr[i] == base_ext)
117 			return i;
118 	}
119 
120 	return KVM_RISCV_ISA_EXT_MAX;
121 }
122 
kvm_riscv_vcpu_isa_check_host(unsigned long kvm_ext,unsigned long * guest_ext)123 static int kvm_riscv_vcpu_isa_check_host(unsigned long kvm_ext, unsigned long *guest_ext)
124 {
125 	unsigned long host_ext;
126 
127 	if (kvm_ext >= KVM_RISCV_ISA_EXT_MAX ||
128 	    kvm_ext >= ARRAY_SIZE(kvm_isa_ext_arr))
129 		return -ENOENT;
130 
131 	kvm_ext = array_index_nospec(kvm_ext, ARRAY_SIZE(kvm_isa_ext_arr));
132 	*guest_ext = kvm_isa_ext_arr[kvm_ext];
133 	switch (*guest_ext) {
134 	case RISCV_ISA_EXT_SMNPM:
135 		/*
136 		 * Pointer masking effective in (H)S-mode is provided by the
137 		 * Smnpm extension, so that extension is reported to the guest,
138 		 * even though the CSR bits for configuring VS-mode pointer
139 		 * masking on the host side are part of the Ssnpm extension.
140 		 */
141 		host_ext = RISCV_ISA_EXT_SSNPM;
142 		break;
143 	default:
144 		host_ext = *guest_ext;
145 		break;
146 	}
147 
148 	if (!__riscv_isa_extension_available(NULL, host_ext))
149 		return -ENOENT;
150 
151 	return 0;
152 }
153 
kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)154 static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
155 {
156 	switch (ext) {
157 	case KVM_RISCV_ISA_EXT_H:
158 		return false;
159 	case KVM_RISCV_ISA_EXT_SSCOFPMF:
160 		/* Sscofpmf depends on interrupt filtering defined in ssaia */
161 		return __riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSAIA);
162 	case KVM_RISCV_ISA_EXT_SVADU:
163 		/*
164 		 * The henvcfg.ADUE is read-only zero if menvcfg.ADUE is zero.
165 		 * Guest OS can use Svadu only when host OS enable Svadu.
166 		 */
167 		return arch_has_hw_pte_young();
168 	case KVM_RISCV_ISA_EXT_V:
169 		return riscv_v_vstate_ctrl_user_allowed();
170 	default:
171 		break;
172 	}
173 
174 	return true;
175 }
176 
kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)177 static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
178 {
179 	switch (ext) {
180 	/* Extensions which don't have any mechanism to disable */
181 	case KVM_RISCV_ISA_EXT_A:
182 	case KVM_RISCV_ISA_EXT_C:
183 	case KVM_RISCV_ISA_EXT_I:
184 	case KVM_RISCV_ISA_EXT_M:
185 	/* There is not architectural config bit to disable sscofpmf completely */
186 	case KVM_RISCV_ISA_EXT_SSCOFPMF:
187 	case KVM_RISCV_ISA_EXT_SSNPM:
188 	case KVM_RISCV_ISA_EXT_SSTC:
189 	case KVM_RISCV_ISA_EXT_SVINVAL:
190 	case KVM_RISCV_ISA_EXT_SVNAPOT:
191 	case KVM_RISCV_ISA_EXT_SVVPTC:
192 	case KVM_RISCV_ISA_EXT_ZAAMO:
193 	case KVM_RISCV_ISA_EXT_ZABHA:
194 	case KVM_RISCV_ISA_EXT_ZACAS:
195 	case KVM_RISCV_ISA_EXT_ZALASR:
196 	case KVM_RISCV_ISA_EXT_ZALRSC:
197 	case KVM_RISCV_ISA_EXT_ZAWRS:
198 	case KVM_RISCV_ISA_EXT_ZBA:
199 	case KVM_RISCV_ISA_EXT_ZBB:
200 	case KVM_RISCV_ISA_EXT_ZBC:
201 	case KVM_RISCV_ISA_EXT_ZBKB:
202 	case KVM_RISCV_ISA_EXT_ZBKC:
203 	case KVM_RISCV_ISA_EXT_ZBKX:
204 	case KVM_RISCV_ISA_EXT_ZBS:
205 	case KVM_RISCV_ISA_EXT_ZCA:
206 	case KVM_RISCV_ISA_EXT_ZCB:
207 	case KVM_RISCV_ISA_EXT_ZCD:
208 	case KVM_RISCV_ISA_EXT_ZCF:
209 	case KVM_RISCV_ISA_EXT_ZCMOP:
210 	case KVM_RISCV_ISA_EXT_ZFA:
211 	case KVM_RISCV_ISA_EXT_ZFBFMIN:
212 	case KVM_RISCV_ISA_EXT_ZFH:
213 	case KVM_RISCV_ISA_EXT_ZFHMIN:
214 	case KVM_RISCV_ISA_EXT_ZICBOP:
215 	case KVM_RISCV_ISA_EXT_ZICCRSE:
216 	case KVM_RISCV_ISA_EXT_ZICNTR:
217 	case KVM_RISCV_ISA_EXT_ZICOND:
218 	case KVM_RISCV_ISA_EXT_ZICSR:
219 	case KVM_RISCV_ISA_EXT_ZIFENCEI:
220 	case KVM_RISCV_ISA_EXT_ZIHINTNTL:
221 	case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
222 	case KVM_RISCV_ISA_EXT_ZIHPM:
223 	case KVM_RISCV_ISA_EXT_ZIMOP:
224 	case KVM_RISCV_ISA_EXT_ZKND:
225 	case KVM_RISCV_ISA_EXT_ZKNE:
226 	case KVM_RISCV_ISA_EXT_ZKNH:
227 	case KVM_RISCV_ISA_EXT_ZKR:
228 	case KVM_RISCV_ISA_EXT_ZKSED:
229 	case KVM_RISCV_ISA_EXT_ZKSH:
230 	case KVM_RISCV_ISA_EXT_ZKT:
231 	case KVM_RISCV_ISA_EXT_ZTSO:
232 	case KVM_RISCV_ISA_EXT_ZVBB:
233 	case KVM_RISCV_ISA_EXT_ZVBC:
234 	case KVM_RISCV_ISA_EXT_ZVFBFMIN:
235 	case KVM_RISCV_ISA_EXT_ZVFBFWMA:
236 	case KVM_RISCV_ISA_EXT_ZVFH:
237 	case KVM_RISCV_ISA_EXT_ZVFHMIN:
238 	case KVM_RISCV_ISA_EXT_ZVKB:
239 	case KVM_RISCV_ISA_EXT_ZVKG:
240 	case KVM_RISCV_ISA_EXT_ZVKNED:
241 	case KVM_RISCV_ISA_EXT_ZVKNHA:
242 	case KVM_RISCV_ISA_EXT_ZVKNHB:
243 	case KVM_RISCV_ISA_EXT_ZVKSED:
244 	case KVM_RISCV_ISA_EXT_ZVKSH:
245 	case KVM_RISCV_ISA_EXT_ZVKT:
246 		return false;
247 	/* Extensions which can be disabled using Smstateen */
248 	case KVM_RISCV_ISA_EXT_SSAIA:
249 		return riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN);
250 	case KVM_RISCV_ISA_EXT_SVADE:
251 		/*
252 		 * The henvcfg.ADUE is read-only zero if menvcfg.ADUE is zero.
253 		 * Svade can't be disabled unless we support Svadu.
254 		 */
255 		return arch_has_hw_pte_young();
256 	default:
257 		break;
258 	}
259 
260 	return true;
261 }
262 
kvm_riscv_vcpu_setup_isa(struct kvm_vcpu * vcpu)263 void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu)
264 {
265 	unsigned long guest_ext, i;
266 
267 	for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
268 		if (kvm_riscv_vcpu_isa_check_host(i, &guest_ext))
269 			continue;
270 		if (kvm_riscv_vcpu_isa_enable_allowed(i))
271 			set_bit(guest_ext, vcpu->arch.isa);
272 	}
273 }
274 
kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)275 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
276 					 const struct kvm_one_reg *reg)
277 {
278 	unsigned long __user *uaddr =
279 			(unsigned long __user *)(unsigned long)reg->addr;
280 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
281 					    KVM_REG_SIZE_MASK |
282 					    KVM_REG_RISCV_CONFIG);
283 	unsigned long reg_val;
284 
285 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
286 		return -EINVAL;
287 
288 	switch (reg_num) {
289 	case KVM_REG_RISCV_CONFIG_REG(isa):
290 		reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
291 		break;
292 	case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
293 		if (!riscv_isa_extension_available(NULL, ZICBOM))
294 			return -ENOENT;
295 		reg_val = riscv_cbom_block_size;
296 		break;
297 	case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
298 		if (!riscv_isa_extension_available(NULL, ZICBOZ))
299 			return -ENOENT;
300 		reg_val = riscv_cboz_block_size;
301 		break;
302 	case KVM_REG_RISCV_CONFIG_REG(zicbop_block_size):
303 		if (!riscv_isa_extension_available(NULL, ZICBOP))
304 			return -ENOENT;
305 		reg_val = riscv_cbop_block_size;
306 		break;
307 	case KVM_REG_RISCV_CONFIG_REG(mvendorid):
308 		reg_val = vcpu->arch.mvendorid;
309 		break;
310 	case KVM_REG_RISCV_CONFIG_REG(marchid):
311 		reg_val = vcpu->arch.marchid;
312 		break;
313 	case KVM_REG_RISCV_CONFIG_REG(mimpid):
314 		reg_val = vcpu->arch.mimpid;
315 		break;
316 	case KVM_REG_RISCV_CONFIG_REG(satp_mode):
317 		reg_val = satp_mode >> SATP_MODE_SHIFT;
318 		break;
319 	default:
320 		return -ENOENT;
321 	}
322 
323 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
324 		return -EFAULT;
325 
326 	return 0;
327 }
328 
kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)329 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
330 					 const struct kvm_one_reg *reg)
331 {
332 	unsigned long __user *uaddr =
333 			(unsigned long __user *)(unsigned long)reg->addr;
334 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
335 					    KVM_REG_SIZE_MASK |
336 					    KVM_REG_RISCV_CONFIG);
337 	unsigned long i, isa_ext, reg_val;
338 
339 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
340 		return -EINVAL;
341 
342 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
343 		return -EFAULT;
344 
345 	switch (reg_num) {
346 	case KVM_REG_RISCV_CONFIG_REG(isa):
347 		/*
348 		 * This ONE REG interface is only defined for
349 		 * single letter extensions.
350 		 */
351 		if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
352 			return -EINVAL;
353 
354 		/*
355 		 * Return early (i.e. do nothing) if reg_val is the same
356 		 * value retrievable via kvm_riscv_vcpu_get_reg_config().
357 		 */
358 		if (reg_val == (vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK))
359 			break;
360 
361 		if (!vcpu->arch.ran_atleast_once) {
362 			/* Ignore the enable/disable request for certain extensions */
363 			for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
364 				isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
365 				if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
366 					reg_val &= ~BIT(i);
367 					continue;
368 				}
369 				if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
370 					if (reg_val & BIT(i))
371 						reg_val &= ~BIT(i);
372 				if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
373 					if (!(reg_val & BIT(i)))
374 						reg_val |= BIT(i);
375 			}
376 			reg_val &= riscv_isa_extension_base(NULL);
377 			/* Do not modify anything beyond single letter extensions */
378 			reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
379 				  (reg_val & KVM_RISCV_BASE_ISA_MASK);
380 			vcpu->arch.isa[0] = reg_val;
381 			kvm_riscv_vcpu_fp_reset(vcpu);
382 		} else {
383 			return -EBUSY;
384 		}
385 		break;
386 	case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
387 		if (!riscv_isa_extension_available(NULL, ZICBOM))
388 			return -ENOENT;
389 		if (reg_val != riscv_cbom_block_size)
390 			return -EINVAL;
391 		break;
392 	case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
393 		if (!riscv_isa_extension_available(NULL, ZICBOZ))
394 			return -ENOENT;
395 		if (reg_val != riscv_cboz_block_size)
396 			return -EINVAL;
397 		break;
398 	case KVM_REG_RISCV_CONFIG_REG(zicbop_block_size):
399 		if (!riscv_isa_extension_available(NULL, ZICBOP))
400 			return -ENOENT;
401 		if (reg_val != riscv_cbop_block_size)
402 			return -EINVAL;
403 		break;
404 	case KVM_REG_RISCV_CONFIG_REG(mvendorid):
405 		if (reg_val == vcpu->arch.mvendorid)
406 			break;
407 		if (!vcpu->arch.ran_atleast_once)
408 			vcpu->arch.mvendorid = reg_val;
409 		else
410 			return -EBUSY;
411 		break;
412 	case KVM_REG_RISCV_CONFIG_REG(marchid):
413 		if (reg_val == vcpu->arch.marchid)
414 			break;
415 		if (!vcpu->arch.ran_atleast_once)
416 			vcpu->arch.marchid = reg_val;
417 		else
418 			return -EBUSY;
419 		break;
420 	case KVM_REG_RISCV_CONFIG_REG(mimpid):
421 		if (reg_val == vcpu->arch.mimpid)
422 			break;
423 		if (!vcpu->arch.ran_atleast_once)
424 			vcpu->arch.mimpid = reg_val;
425 		else
426 			return -EBUSY;
427 		break;
428 	case KVM_REG_RISCV_CONFIG_REG(satp_mode):
429 		if (reg_val != (satp_mode >> SATP_MODE_SHIFT))
430 			return -EINVAL;
431 		break;
432 	default:
433 		return -ENOENT;
434 	}
435 
436 	return 0;
437 }
438 
kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)439 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
440 				       const struct kvm_one_reg *reg)
441 {
442 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
443 	unsigned long __user *uaddr =
444 			(unsigned long __user *)(unsigned long)reg->addr;
445 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
446 					    KVM_REG_SIZE_MASK |
447 					    KVM_REG_RISCV_CORE);
448 	unsigned long regs_max = sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
449 	unsigned long reg_val;
450 
451 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
452 		return -EINVAL;
453 	if (reg_num >= regs_max)
454 		return -ENOENT;
455 
456 	reg_num = array_index_nospec(reg_num, regs_max);
457 
458 	if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
459 		reg_val = cntx->sepc;
460 	else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
461 		 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
462 		reg_val = ((unsigned long *)cntx)[reg_num];
463 	else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
464 		reg_val = (cntx->sstatus & SR_SPP) ?
465 				KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
466 	else
467 		return -ENOENT;
468 
469 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
470 		return -EFAULT;
471 
472 	return 0;
473 }
474 
kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)475 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
476 				       const struct kvm_one_reg *reg)
477 {
478 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
479 	unsigned long __user *uaddr =
480 			(unsigned long __user *)(unsigned long)reg->addr;
481 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
482 					    KVM_REG_SIZE_MASK |
483 					    KVM_REG_RISCV_CORE);
484 	unsigned long regs_max = sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
485 	unsigned long reg_val;
486 
487 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
488 		return -EINVAL;
489 	if (reg_num >= regs_max)
490 		return -ENOENT;
491 
492 	reg_num = array_index_nospec(reg_num, regs_max);
493 
494 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
495 		return -EFAULT;
496 
497 	if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
498 		cntx->sepc = reg_val;
499 	else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
500 		 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
501 		((unsigned long *)cntx)[reg_num] = reg_val;
502 	else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
503 		if (reg_val == KVM_RISCV_MODE_S)
504 			cntx->sstatus |= SR_SPP;
505 		else
506 			cntx->sstatus &= ~SR_SPP;
507 	} else
508 		return -ENOENT;
509 
510 	return 0;
511 }
512 
kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * out_val)513 static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
514 					  unsigned long reg_num,
515 					  unsigned long *out_val)
516 {
517 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
518 	unsigned long regs_max = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
519 
520 	if (reg_num >= regs_max)
521 		return -ENOENT;
522 
523 	reg_num = array_index_nospec(reg_num, regs_max);
524 
525 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
526 		kvm_riscv_vcpu_flush_interrupts(vcpu);
527 		*out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
528 		*out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
529 	} else
530 		*out_val = ((unsigned long *)csr)[reg_num];
531 
532 	return 0;
533 }
534 
kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)535 static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
536 					  unsigned long reg_num,
537 					  unsigned long reg_val)
538 {
539 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
540 	unsigned long regs_max = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
541 
542 	if (reg_num >= regs_max)
543 		return -ENOENT;
544 
545 	reg_num = array_index_nospec(reg_num, regs_max);
546 
547 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
548 		reg_val &= VSIP_VALID_MASK;
549 		reg_val <<= VSIP_TO_HVIP_SHIFT;
550 	}
551 
552 	((unsigned long *)csr)[reg_num] = reg_val;
553 
554 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
555 		WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
556 
557 	return 0;
558 }
559 
kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)560 static inline int kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu *vcpu,
561 						   unsigned long reg_num,
562 						   unsigned long reg_val)
563 {
564 	struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
565 	unsigned long regs_max = sizeof(struct kvm_riscv_smstateen_csr) /
566 		sizeof(unsigned long);
567 
568 	if (!riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
569 		return -ENOENT;
570 	if (reg_num >= regs_max)
571 		return -ENOENT;
572 
573 	reg_num = array_index_nospec(reg_num, regs_max);
574 
575 	((unsigned long *)csr)[reg_num] = reg_val;
576 	return 0;
577 }
578 
kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * out_val)579 static int kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu *vcpu,
580 					    unsigned long reg_num,
581 					    unsigned long *out_val)
582 {
583 	struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
584 	unsigned long regs_max = sizeof(struct kvm_riscv_smstateen_csr) /
585 		sizeof(unsigned long);
586 
587 	if (!riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
588 		return -ENOENT;
589 	if (reg_num >= regs_max)
590 		return -ENOENT;
591 
592 	reg_num = array_index_nospec(reg_num, regs_max);
593 
594 	*out_val = ((unsigned long *)csr)[reg_num];
595 	return 0;
596 }
597 
kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)598 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
599 				      const struct kvm_one_reg *reg)
600 {
601 	int rc;
602 	unsigned long __user *uaddr =
603 			(unsigned long __user *)(unsigned long)reg->addr;
604 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
605 					    KVM_REG_SIZE_MASK |
606 					    KVM_REG_RISCV_CSR);
607 	unsigned long reg_val, reg_subtype;
608 
609 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
610 		return -EINVAL;
611 
612 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
613 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
614 	switch (reg_subtype) {
615 	case KVM_REG_RISCV_CSR_GENERAL:
616 		rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, &reg_val);
617 		break;
618 	case KVM_REG_RISCV_CSR_AIA:
619 		rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val);
620 		break;
621 	case KVM_REG_RISCV_CSR_SMSTATEEN:
622 		rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num, &reg_val);
623 		break;
624 	default:
625 		rc = -ENOENT;
626 		break;
627 	}
628 	if (rc)
629 		return rc;
630 
631 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
632 		return -EFAULT;
633 
634 	return 0;
635 }
636 
kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)637 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
638 				      const struct kvm_one_reg *reg)
639 {
640 	int rc;
641 	unsigned long __user *uaddr =
642 			(unsigned long __user *)(unsigned long)reg->addr;
643 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
644 					    KVM_REG_SIZE_MASK |
645 					    KVM_REG_RISCV_CSR);
646 	unsigned long reg_val, reg_subtype;
647 
648 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
649 		return -EINVAL;
650 
651 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
652 		return -EFAULT;
653 
654 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
655 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
656 	switch (reg_subtype) {
657 	case KVM_REG_RISCV_CSR_GENERAL:
658 		rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
659 		break;
660 	case KVM_REG_RISCV_CSR_AIA:
661 		rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
662 		break;
663 	case KVM_REG_RISCV_CSR_SMSTATEEN:
664 		rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num, reg_val);
665 		break;
666 	default:
667 		rc = -ENOENT;
668 		break;
669 	}
670 	if (rc)
671 		return rc;
672 
673 	return 0;
674 }
675 
riscv_vcpu_get_isa_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)676 static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
677 					 unsigned long reg_num,
678 					 unsigned long *reg_val)
679 {
680 	unsigned long guest_ext;
681 	int ret;
682 
683 	ret = kvm_riscv_vcpu_isa_check_host(reg_num, &guest_ext);
684 	if (ret)
685 		return ret;
686 
687 	*reg_val = 0;
688 	if (__riscv_isa_extension_available(vcpu->arch.isa, guest_ext))
689 		*reg_val = 1; /* Mark the given extension as available */
690 
691 	return 0;
692 }
693 
riscv_vcpu_set_isa_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)694 static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu,
695 					 unsigned long reg_num,
696 					 unsigned long reg_val)
697 {
698 	unsigned long guest_ext;
699 	int ret;
700 
701 	ret = kvm_riscv_vcpu_isa_check_host(reg_num, &guest_ext);
702 	if (ret)
703 		return ret;
704 
705 	if (reg_val == test_bit(guest_ext, vcpu->arch.isa))
706 		return 0;
707 
708 	if (!vcpu->arch.ran_atleast_once) {
709 		/*
710 		 * All multi-letter extension and a few single letter
711 		 * extension can be disabled
712 		 */
713 		if (reg_val == 1 &&
714 		    kvm_riscv_vcpu_isa_enable_allowed(reg_num))
715 			set_bit(guest_ext, vcpu->arch.isa);
716 		else if (!reg_val &&
717 			 kvm_riscv_vcpu_isa_disable_allowed(reg_num))
718 			clear_bit(guest_ext, vcpu->arch.isa);
719 		else
720 			return -EINVAL;
721 		kvm_riscv_vcpu_fp_reset(vcpu);
722 	} else {
723 		return -EBUSY;
724 	}
725 
726 	return 0;
727 }
728 
riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)729 static int riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu *vcpu,
730 					unsigned long reg_num,
731 					unsigned long *reg_val)
732 {
733 	unsigned long i, ext_id, ext_val;
734 
735 	if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
736 		return -ENOENT;
737 
738 	for (i = 0; i < BITS_PER_LONG; i++) {
739 		ext_id = i + reg_num * BITS_PER_LONG;
740 		if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
741 			break;
742 
743 		ext_val = 0;
744 		riscv_vcpu_get_isa_ext_single(vcpu, ext_id, &ext_val);
745 		if (ext_val)
746 			*reg_val |= KVM_REG_RISCV_ISA_MULTI_MASK(ext_id);
747 	}
748 
749 	return 0;
750 }
751 
riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val,bool enable)752 static int riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu *vcpu,
753 					unsigned long reg_num,
754 					unsigned long reg_val, bool enable)
755 {
756 	unsigned long i, ext_id;
757 
758 	if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
759 		return -ENOENT;
760 
761 	for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
762 		ext_id = i + reg_num * BITS_PER_LONG;
763 		if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
764 			break;
765 
766 		riscv_vcpu_set_isa_ext_single(vcpu, ext_id, enable);
767 	}
768 
769 	return 0;
770 }
771 
kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)772 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
773 					  const struct kvm_one_reg *reg)
774 {
775 	int rc;
776 	unsigned long __user *uaddr =
777 			(unsigned long __user *)(unsigned long)reg->addr;
778 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
779 					    KVM_REG_SIZE_MASK |
780 					    KVM_REG_RISCV_ISA_EXT);
781 	unsigned long reg_val, reg_subtype;
782 
783 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
784 		return -EINVAL;
785 
786 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
787 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
788 
789 	reg_val = 0;
790 	switch (reg_subtype) {
791 	case KVM_REG_RISCV_ISA_SINGLE:
792 		rc = riscv_vcpu_get_isa_ext_single(vcpu, reg_num, &reg_val);
793 		break;
794 	case KVM_REG_RISCV_ISA_MULTI_EN:
795 	case KVM_REG_RISCV_ISA_MULTI_DIS:
796 		rc = riscv_vcpu_get_isa_ext_multi(vcpu, reg_num, &reg_val);
797 		if (!rc && reg_subtype == KVM_REG_RISCV_ISA_MULTI_DIS)
798 			reg_val = ~reg_val;
799 		break;
800 	default:
801 		rc = -ENOENT;
802 	}
803 	if (rc)
804 		return rc;
805 
806 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
807 		return -EFAULT;
808 
809 	return 0;
810 }
811 
kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)812 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
813 					  const struct kvm_one_reg *reg)
814 {
815 	unsigned long __user *uaddr =
816 			(unsigned long __user *)(unsigned long)reg->addr;
817 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
818 					    KVM_REG_SIZE_MASK |
819 					    KVM_REG_RISCV_ISA_EXT);
820 	unsigned long reg_val, reg_subtype;
821 
822 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
823 		return -EINVAL;
824 
825 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
826 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
827 
828 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
829 		return -EFAULT;
830 
831 	switch (reg_subtype) {
832 	case KVM_REG_RISCV_ISA_SINGLE:
833 		return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
834 	case KVM_REG_RISCV_ISA_MULTI_EN:
835 		return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
836 	case KVM_REG_RISCV_ISA_MULTI_DIS:
837 		return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
838 	default:
839 		return -ENOENT;
840 	}
841 
842 	return 0;
843 }
844 
copy_config_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)845 static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
846 				u64 __user *uindices)
847 {
848 	int n = 0;
849 
850 	for (int i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long);
851 		 i++) {
852 		u64 size;
853 		u64 reg;
854 
855 		/*
856 		 * Avoid reporting config reg if the corresponding extension
857 		 * was not available.
858 		 */
859 		if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
860 			!riscv_isa_extension_available(NULL, ZICBOM))
861 			continue;
862 		else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
863 			!riscv_isa_extension_available(NULL, ZICBOZ))
864 			continue;
865 		else if (i == KVM_REG_RISCV_CONFIG_REG(zicbop_block_size) &&
866 			!riscv_isa_extension_available(NULL, ZICBOP))
867 			continue;
868 
869 		size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
870 		reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;
871 
872 		if (uindices) {
873 			if (put_user(reg, uindices))
874 				return -EFAULT;
875 			uindices++;
876 		}
877 
878 		n++;
879 	}
880 
881 	return n;
882 }
883 
num_config_regs(const struct kvm_vcpu * vcpu)884 static unsigned long num_config_regs(const struct kvm_vcpu *vcpu)
885 {
886 	return copy_config_reg_indices(vcpu, NULL);
887 }
888 
num_core_regs(void)889 static inline unsigned long num_core_regs(void)
890 {
891 	return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
892 }
893 
copy_core_reg_indices(u64 __user * uindices)894 static int copy_core_reg_indices(u64 __user *uindices)
895 {
896 	int n = num_core_regs();
897 
898 	for (int i = 0; i < n; i++) {
899 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
900 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
901 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i;
902 
903 		if (uindices) {
904 			if (put_user(reg, uindices))
905 				return -EFAULT;
906 			uindices++;
907 		}
908 	}
909 
910 	return n;
911 }
912 
num_csr_regs(const struct kvm_vcpu * vcpu)913 static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu)
914 {
915 	unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
916 
917 	if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
918 		n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
919 	if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
920 		n += sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
921 
922 	return n;
923 }
924 
copy_csr_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)925 static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
926 				u64 __user *uindices)
927 {
928 	int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
929 	int n2 = 0, n3 = 0;
930 
931 	/* copy general csr regs */
932 	for (int i = 0; i < n1; i++) {
933 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
934 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
935 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
936 				  KVM_REG_RISCV_CSR_GENERAL | i;
937 
938 		if (uindices) {
939 			if (put_user(reg, uindices))
940 				return -EFAULT;
941 			uindices++;
942 		}
943 	}
944 
945 	/* copy AIA csr regs */
946 	if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) {
947 		n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
948 
949 		for (int i = 0; i < n2; i++) {
950 			u64 size = IS_ENABLED(CONFIG_32BIT) ?
951 				   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
952 			u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
953 					  KVM_REG_RISCV_CSR_AIA | i;
954 
955 			if (uindices) {
956 				if (put_user(reg, uindices))
957 					return -EFAULT;
958 				uindices++;
959 			}
960 		}
961 	}
962 
963 	/* copy Smstateen csr regs */
964 	if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) {
965 		n3 = sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
966 
967 		for (int i = 0; i < n3; i++) {
968 			u64 size = IS_ENABLED(CONFIG_32BIT) ?
969 				   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
970 			u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
971 					  KVM_REG_RISCV_CSR_SMSTATEEN | i;
972 
973 			if (uindices) {
974 				if (put_user(reg, uindices))
975 					return -EFAULT;
976 				uindices++;
977 			}
978 		}
979 	}
980 
981 	return n1 + n2 + n3;
982 }
983 
num_timer_regs(void)984 static inline unsigned long num_timer_regs(void)
985 {
986 	return sizeof(struct kvm_riscv_timer) / sizeof(u64);
987 }
988 
copy_timer_reg_indices(u64 __user * uindices)989 static int copy_timer_reg_indices(u64 __user *uindices)
990 {
991 	int n = num_timer_regs();
992 
993 	for (int i = 0; i < n; i++) {
994 		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
995 			  KVM_REG_RISCV_TIMER | i;
996 
997 		if (uindices) {
998 			if (put_user(reg, uindices))
999 				return -EFAULT;
1000 			uindices++;
1001 		}
1002 	}
1003 
1004 	return n;
1005 }
1006 
num_fp_f_regs(const struct kvm_vcpu * vcpu)1007 static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
1008 {
1009 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
1010 
1011 	if (riscv_isa_extension_available(vcpu->arch.isa, f))
1012 		return sizeof(cntx->fp.f) / sizeof(u32);
1013 	else
1014 		return 0;
1015 }
1016 
copy_fp_f_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)1017 static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
1018 				u64 __user *uindices)
1019 {
1020 	int n = num_fp_f_regs(vcpu);
1021 
1022 	for (int i = 0; i < n; i++) {
1023 		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 |
1024 			  KVM_REG_RISCV_FP_F | i;
1025 
1026 		if (uindices) {
1027 			if (put_user(reg, uindices))
1028 				return -EFAULT;
1029 			uindices++;
1030 		}
1031 	}
1032 
1033 	return n;
1034 }
1035 
num_fp_d_regs(const struct kvm_vcpu * vcpu)1036 static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
1037 {
1038 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
1039 
1040 	if (riscv_isa_extension_available(vcpu->arch.isa, d))
1041 		return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
1042 	else
1043 		return 0;
1044 }
1045 
copy_fp_d_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)1046 static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
1047 				u64 __user *uindices)
1048 {
1049 	int i;
1050 	int n = num_fp_d_regs(vcpu);
1051 	u64 reg;
1052 
1053 	/* copy fp.d.f indices */
1054 	for (i = 0; i < n-1; i++) {
1055 		reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
1056 		      KVM_REG_RISCV_FP_D | i;
1057 
1058 		if (uindices) {
1059 			if (put_user(reg, uindices))
1060 				return -EFAULT;
1061 			uindices++;
1062 		}
1063 	}
1064 
1065 	/* copy fp.d.fcsr indices */
1066 	reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
1067 	if (uindices) {
1068 		if (put_user(reg, uindices))
1069 			return -EFAULT;
1070 		uindices++;
1071 	}
1072 
1073 	return n;
1074 }
1075 
copy_isa_ext_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)1076 static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
1077 				u64 __user *uindices)
1078 {
1079 	unsigned long guest_ext;
1080 	unsigned int n = 0;
1081 
1082 	for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
1083 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
1084 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1085 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
1086 
1087 		if (kvm_riscv_vcpu_isa_check_host(i, &guest_ext))
1088 			continue;
1089 
1090 		if (uindices) {
1091 			if (put_user(reg, uindices))
1092 				return -EFAULT;
1093 			uindices++;
1094 		}
1095 
1096 		n++;
1097 	}
1098 
1099 	return n;
1100 }
1101 
num_isa_ext_regs(const struct kvm_vcpu * vcpu)1102 static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
1103 {
1104 	return copy_isa_ext_reg_indices(vcpu, NULL);
1105 }
1106 
num_sbi_ext_regs(struct kvm_vcpu * vcpu)1107 static unsigned long num_sbi_ext_regs(struct kvm_vcpu *vcpu)
1108 {
1109 	return kvm_riscv_vcpu_reg_indices_sbi_ext(vcpu, NULL);
1110 }
1111 
num_sbi_regs(struct kvm_vcpu * vcpu)1112 static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu)
1113 {
1114 	return kvm_riscv_vcpu_reg_indices_sbi(vcpu, NULL);
1115 }
1116 
num_vector_regs(const struct kvm_vcpu * vcpu)1117 static inline unsigned long num_vector_regs(const struct kvm_vcpu *vcpu)
1118 {
1119 	if (!riscv_isa_extension_available(vcpu->arch.isa, v))
1120 		return 0;
1121 
1122 	/* vstart, vl, vtype, vcsr, vlenb and 32 vector regs */
1123 	return 37;
1124 }
1125 
copy_vector_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)1126 static int copy_vector_reg_indices(const struct kvm_vcpu *vcpu,
1127 				u64 __user *uindices)
1128 {
1129 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
1130 	int n = num_vector_regs(vcpu);
1131 	u64 reg, size;
1132 	int i;
1133 
1134 	if (n == 0)
1135 		return 0;
1136 
1137 	/* copy vstart, vl, vtype, vcsr and vlenb */
1138 	size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1139 	for (i = 0; i < 5; i++) {
1140 		reg = KVM_REG_RISCV | size | KVM_REG_RISCV_VECTOR | i;
1141 
1142 		if (uindices) {
1143 			if (put_user(reg, uindices))
1144 				return -EFAULT;
1145 			uindices++;
1146 		}
1147 	}
1148 
1149 	/* vector_regs have a variable 'vlenb' size */
1150 	size = __builtin_ctzl(cntx->vector.vlenb);
1151 	size <<= KVM_REG_SIZE_SHIFT;
1152 	for (i = 0; i < 32; i++) {
1153 		reg = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | size |
1154 			KVM_REG_RISCV_VECTOR_REG(i);
1155 
1156 		if (uindices) {
1157 			if (put_user(reg, uindices))
1158 				return -EFAULT;
1159 			uindices++;
1160 		}
1161 	}
1162 
1163 	return n;
1164 }
1165 
1166 /*
1167  * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
1168  *
1169  * This is for all registers.
1170  */
kvm_riscv_vcpu_num_regs(struct kvm_vcpu * vcpu)1171 unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
1172 {
1173 	unsigned long res = 0;
1174 
1175 	res += num_config_regs(vcpu);
1176 	res += num_core_regs();
1177 	res += num_csr_regs(vcpu);
1178 	res += num_timer_regs();
1179 	res += num_fp_f_regs(vcpu);
1180 	res += num_fp_d_regs(vcpu);
1181 	res += num_vector_regs(vcpu);
1182 	res += num_isa_ext_regs(vcpu);
1183 	res += num_sbi_ext_regs(vcpu);
1184 	res += num_sbi_regs(vcpu);
1185 
1186 	return res;
1187 }
1188 
1189 /*
1190  * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
1191  */
kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)1192 int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
1193 				    u64 __user *uindices)
1194 {
1195 	int ret;
1196 
1197 	ret = copy_config_reg_indices(vcpu, uindices);
1198 	if (ret < 0)
1199 		return ret;
1200 	uindices += ret;
1201 
1202 	ret = copy_core_reg_indices(uindices);
1203 	if (ret < 0)
1204 		return ret;
1205 	uindices += ret;
1206 
1207 	ret = copy_csr_reg_indices(vcpu, uindices);
1208 	if (ret < 0)
1209 		return ret;
1210 	uindices += ret;
1211 
1212 	ret = copy_timer_reg_indices(uindices);
1213 	if (ret < 0)
1214 		return ret;
1215 	uindices += ret;
1216 
1217 	ret = copy_fp_f_reg_indices(vcpu, uindices);
1218 	if (ret < 0)
1219 		return ret;
1220 	uindices += ret;
1221 
1222 	ret = copy_fp_d_reg_indices(vcpu, uindices);
1223 	if (ret < 0)
1224 		return ret;
1225 	uindices += ret;
1226 
1227 	ret = copy_vector_reg_indices(vcpu, uindices);
1228 	if (ret < 0)
1229 		return ret;
1230 	uindices += ret;
1231 
1232 	ret = copy_isa_ext_reg_indices(vcpu, uindices);
1233 	if (ret < 0)
1234 		return ret;
1235 	uindices += ret;
1236 
1237 	ret = kvm_riscv_vcpu_reg_indices_sbi_ext(vcpu, uindices);
1238 	if (ret < 0)
1239 		return ret;
1240 	uindices += ret;
1241 
1242 	ret = kvm_riscv_vcpu_reg_indices_sbi(vcpu, uindices);
1243 	if (ret < 0)
1244 		return ret;
1245 	uindices += ret;
1246 
1247 	return 0;
1248 }
1249 
kvm_riscv_vcpu_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)1250 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
1251 			   const struct kvm_one_reg *reg)
1252 {
1253 	switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1254 	case KVM_REG_RISCV_CONFIG:
1255 		return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
1256 	case KVM_REG_RISCV_CORE:
1257 		return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
1258 	case KVM_REG_RISCV_CSR:
1259 		return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
1260 	case KVM_REG_RISCV_TIMER:
1261 		return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
1262 	case KVM_REG_RISCV_FP_F:
1263 		return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1264 						 KVM_REG_RISCV_FP_F);
1265 	case KVM_REG_RISCV_FP_D:
1266 		return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1267 						 KVM_REG_RISCV_FP_D);
1268 	case KVM_REG_RISCV_VECTOR:
1269 		return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
1270 	case KVM_REG_RISCV_ISA_EXT:
1271 		return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
1272 	case KVM_REG_RISCV_SBI_EXT:
1273 		return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
1274 	case KVM_REG_RISCV_SBI_STATE:
1275 		return kvm_riscv_vcpu_set_reg_sbi(vcpu, reg);
1276 	default:
1277 		break;
1278 	}
1279 
1280 	return -ENOENT;
1281 }
1282 
kvm_riscv_vcpu_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)1283 int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
1284 			   const struct kvm_one_reg *reg)
1285 {
1286 	switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1287 	case KVM_REG_RISCV_CONFIG:
1288 		return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
1289 	case KVM_REG_RISCV_CORE:
1290 		return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
1291 	case KVM_REG_RISCV_CSR:
1292 		return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
1293 	case KVM_REG_RISCV_TIMER:
1294 		return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
1295 	case KVM_REG_RISCV_FP_F:
1296 		return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1297 						 KVM_REG_RISCV_FP_F);
1298 	case KVM_REG_RISCV_FP_D:
1299 		return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1300 						 KVM_REG_RISCV_FP_D);
1301 	case KVM_REG_RISCV_VECTOR:
1302 		return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
1303 	case KVM_REG_RISCV_ISA_EXT:
1304 		return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
1305 	case KVM_REG_RISCV_SBI_EXT:
1306 		return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
1307 	case KVM_REG_RISCV_SBI_STATE:
1308 		return kvm_riscv_vcpu_get_reg_sbi(vcpu, reg);
1309 	default:
1310 		break;
1311 	}
1312 
1313 	return -ENOENT;
1314 }
1315