xref: /linux/arch/riscv/kvm/vcpu_onereg.c (revision 4a1d8ababde685a77fd4fd61e58f973cbdf29f8c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  * Copyright (C) 2023 Ventana Micro Systems Inc.
5  *
6  * Authors:
7  *	Anup Patel <apatel@ventanamicro.com>
8  */
9 
10 #include <linux/bitops.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/uaccess.h>
14 #include <linux/kvm_host.h>
15 #include <asm/cacheflush.h>
16 #include <asm/cpufeature.h>
17 #include <asm/kvm_vcpu_vector.h>
18 #include <asm/pgtable.h>
19 #include <asm/vector.h>
20 
21 #define KVM_RISCV_BASE_ISA_MASK		GENMASK(25, 0)
22 
23 #define KVM_ISA_EXT_ARR(ext)		\
24 [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
25 
26 /* Mapping between KVM ISA Extension ID & Host ISA extension ID */
27 static const unsigned long kvm_isa_ext_arr[] = {
28 	/* Single letter extensions (alphabetically sorted) */
29 	[KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
30 	[KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
31 	[KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
32 	[KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
33 	[KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
34 	[KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
35 	[KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
36 	[KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
37 	/* Multi letter extensions (alphabetically sorted) */
38 	[KVM_RISCV_ISA_EXT_SMNPM] = RISCV_ISA_EXT_SSNPM,
39 	KVM_ISA_EXT_ARR(SMSTATEEN),
40 	KVM_ISA_EXT_ARR(SSAIA),
41 	KVM_ISA_EXT_ARR(SSCOFPMF),
42 	KVM_ISA_EXT_ARR(SSNPM),
43 	KVM_ISA_EXT_ARR(SSTC),
44 	KVM_ISA_EXT_ARR(SVADE),
45 	KVM_ISA_EXT_ARR(SVADU),
46 	KVM_ISA_EXT_ARR(SVINVAL),
47 	KVM_ISA_EXT_ARR(SVNAPOT),
48 	KVM_ISA_EXT_ARR(SVPBMT),
49 	KVM_ISA_EXT_ARR(SVVPTC),
50 	KVM_ISA_EXT_ARR(ZAAMO),
51 	KVM_ISA_EXT_ARR(ZABHA),
52 	KVM_ISA_EXT_ARR(ZACAS),
53 	KVM_ISA_EXT_ARR(ZALRSC),
54 	KVM_ISA_EXT_ARR(ZAWRS),
55 	KVM_ISA_EXT_ARR(ZBA),
56 	KVM_ISA_EXT_ARR(ZBB),
57 	KVM_ISA_EXT_ARR(ZBC),
58 	KVM_ISA_EXT_ARR(ZBKB),
59 	KVM_ISA_EXT_ARR(ZBKC),
60 	KVM_ISA_EXT_ARR(ZBKX),
61 	KVM_ISA_EXT_ARR(ZBS),
62 	KVM_ISA_EXT_ARR(ZCA),
63 	KVM_ISA_EXT_ARR(ZCB),
64 	KVM_ISA_EXT_ARR(ZCD),
65 	KVM_ISA_EXT_ARR(ZCF),
66 	KVM_ISA_EXT_ARR(ZCMOP),
67 	KVM_ISA_EXT_ARR(ZFA),
68 	KVM_ISA_EXT_ARR(ZFH),
69 	KVM_ISA_EXT_ARR(ZFHMIN),
70 	KVM_ISA_EXT_ARR(ZICBOM),
71 	KVM_ISA_EXT_ARR(ZICBOZ),
72 	KVM_ISA_EXT_ARR(ZICCRSE),
73 	KVM_ISA_EXT_ARR(ZICNTR),
74 	KVM_ISA_EXT_ARR(ZICOND),
75 	KVM_ISA_EXT_ARR(ZICSR),
76 	KVM_ISA_EXT_ARR(ZIFENCEI),
77 	KVM_ISA_EXT_ARR(ZIHINTNTL),
78 	KVM_ISA_EXT_ARR(ZIHINTPAUSE),
79 	KVM_ISA_EXT_ARR(ZIHPM),
80 	KVM_ISA_EXT_ARR(ZIMOP),
81 	KVM_ISA_EXT_ARR(ZKND),
82 	KVM_ISA_EXT_ARR(ZKNE),
83 	KVM_ISA_EXT_ARR(ZKNH),
84 	KVM_ISA_EXT_ARR(ZKR),
85 	KVM_ISA_EXT_ARR(ZKSED),
86 	KVM_ISA_EXT_ARR(ZKSH),
87 	KVM_ISA_EXT_ARR(ZKT),
88 	KVM_ISA_EXT_ARR(ZTSO),
89 	KVM_ISA_EXT_ARR(ZVBB),
90 	KVM_ISA_EXT_ARR(ZVBC),
91 	KVM_ISA_EXT_ARR(ZVFH),
92 	KVM_ISA_EXT_ARR(ZVFHMIN),
93 	KVM_ISA_EXT_ARR(ZVKB),
94 	KVM_ISA_EXT_ARR(ZVKG),
95 	KVM_ISA_EXT_ARR(ZVKNED),
96 	KVM_ISA_EXT_ARR(ZVKNHA),
97 	KVM_ISA_EXT_ARR(ZVKNHB),
98 	KVM_ISA_EXT_ARR(ZVKSED),
99 	KVM_ISA_EXT_ARR(ZVKSH),
100 	KVM_ISA_EXT_ARR(ZVKT),
101 };
102 
kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)103 static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
104 {
105 	unsigned long i;
106 
107 	for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
108 		if (kvm_isa_ext_arr[i] == base_ext)
109 			return i;
110 	}
111 
112 	return KVM_RISCV_ISA_EXT_MAX;
113 }
114 
kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)115 static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
116 {
117 	switch (ext) {
118 	case KVM_RISCV_ISA_EXT_H:
119 		return false;
120 	case KVM_RISCV_ISA_EXT_SSCOFPMF:
121 		/* Sscofpmf depends on interrupt filtering defined in ssaia */
122 		return __riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSAIA);
123 	case KVM_RISCV_ISA_EXT_SVADU:
124 		/*
125 		 * The henvcfg.ADUE is read-only zero if menvcfg.ADUE is zero.
126 		 * Guest OS can use Svadu only when host OS enable Svadu.
127 		 */
128 		return arch_has_hw_pte_young();
129 	case KVM_RISCV_ISA_EXT_V:
130 		return riscv_v_vstate_ctrl_user_allowed();
131 	default:
132 		break;
133 	}
134 
135 	return true;
136 }
137 
kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)138 static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
139 {
140 	switch (ext) {
141 	/* Extensions which don't have any mechanism to disable */
142 	case KVM_RISCV_ISA_EXT_A:
143 	case KVM_RISCV_ISA_EXT_C:
144 	case KVM_RISCV_ISA_EXT_I:
145 	case KVM_RISCV_ISA_EXT_M:
146 	case KVM_RISCV_ISA_EXT_SMNPM:
147 	/* There is not architectural config bit to disable sscofpmf completely */
148 	case KVM_RISCV_ISA_EXT_SSCOFPMF:
149 	case KVM_RISCV_ISA_EXT_SSNPM:
150 	case KVM_RISCV_ISA_EXT_SSTC:
151 	case KVM_RISCV_ISA_EXT_SVINVAL:
152 	case KVM_RISCV_ISA_EXT_SVNAPOT:
153 	case KVM_RISCV_ISA_EXT_SVVPTC:
154 	case KVM_RISCV_ISA_EXT_ZAAMO:
155 	case KVM_RISCV_ISA_EXT_ZABHA:
156 	case KVM_RISCV_ISA_EXT_ZACAS:
157 	case KVM_RISCV_ISA_EXT_ZALRSC:
158 	case KVM_RISCV_ISA_EXT_ZAWRS:
159 	case KVM_RISCV_ISA_EXT_ZBA:
160 	case KVM_RISCV_ISA_EXT_ZBB:
161 	case KVM_RISCV_ISA_EXT_ZBC:
162 	case KVM_RISCV_ISA_EXT_ZBKB:
163 	case KVM_RISCV_ISA_EXT_ZBKC:
164 	case KVM_RISCV_ISA_EXT_ZBKX:
165 	case KVM_RISCV_ISA_EXT_ZBS:
166 	case KVM_RISCV_ISA_EXT_ZCA:
167 	case KVM_RISCV_ISA_EXT_ZCB:
168 	case KVM_RISCV_ISA_EXT_ZCD:
169 	case KVM_RISCV_ISA_EXT_ZCF:
170 	case KVM_RISCV_ISA_EXT_ZCMOP:
171 	case KVM_RISCV_ISA_EXT_ZFA:
172 	case KVM_RISCV_ISA_EXT_ZFH:
173 	case KVM_RISCV_ISA_EXT_ZFHMIN:
174 	case KVM_RISCV_ISA_EXT_ZICCRSE:
175 	case KVM_RISCV_ISA_EXT_ZICNTR:
176 	case KVM_RISCV_ISA_EXT_ZICOND:
177 	case KVM_RISCV_ISA_EXT_ZICSR:
178 	case KVM_RISCV_ISA_EXT_ZIFENCEI:
179 	case KVM_RISCV_ISA_EXT_ZIHINTNTL:
180 	case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
181 	case KVM_RISCV_ISA_EXT_ZIHPM:
182 	case KVM_RISCV_ISA_EXT_ZIMOP:
183 	case KVM_RISCV_ISA_EXT_ZKND:
184 	case KVM_RISCV_ISA_EXT_ZKNE:
185 	case KVM_RISCV_ISA_EXT_ZKNH:
186 	case KVM_RISCV_ISA_EXT_ZKR:
187 	case KVM_RISCV_ISA_EXT_ZKSED:
188 	case KVM_RISCV_ISA_EXT_ZKSH:
189 	case KVM_RISCV_ISA_EXT_ZKT:
190 	case KVM_RISCV_ISA_EXT_ZTSO:
191 	case KVM_RISCV_ISA_EXT_ZVBB:
192 	case KVM_RISCV_ISA_EXT_ZVBC:
193 	case KVM_RISCV_ISA_EXT_ZVFH:
194 	case KVM_RISCV_ISA_EXT_ZVFHMIN:
195 	case KVM_RISCV_ISA_EXT_ZVKB:
196 	case KVM_RISCV_ISA_EXT_ZVKG:
197 	case KVM_RISCV_ISA_EXT_ZVKNED:
198 	case KVM_RISCV_ISA_EXT_ZVKNHA:
199 	case KVM_RISCV_ISA_EXT_ZVKNHB:
200 	case KVM_RISCV_ISA_EXT_ZVKSED:
201 	case KVM_RISCV_ISA_EXT_ZVKSH:
202 	case KVM_RISCV_ISA_EXT_ZVKT:
203 		return false;
204 	/* Extensions which can be disabled using Smstateen */
205 	case KVM_RISCV_ISA_EXT_SSAIA:
206 		return riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN);
207 	case KVM_RISCV_ISA_EXT_SVADE:
208 		/*
209 		 * The henvcfg.ADUE is read-only zero if menvcfg.ADUE is zero.
210 		 * Svade can't be disabled unless we support Svadu.
211 		 */
212 		return arch_has_hw_pte_young();
213 	default:
214 		break;
215 	}
216 
217 	return true;
218 }
219 
kvm_riscv_vcpu_setup_isa(struct kvm_vcpu * vcpu)220 void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu)
221 {
222 	unsigned long host_isa, i;
223 
224 	for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
225 		host_isa = kvm_isa_ext_arr[i];
226 		if (__riscv_isa_extension_available(NULL, host_isa) &&
227 		    kvm_riscv_vcpu_isa_enable_allowed(i))
228 			set_bit(host_isa, vcpu->arch.isa);
229 	}
230 }
231 
kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)232 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
233 					 const struct kvm_one_reg *reg)
234 {
235 	unsigned long __user *uaddr =
236 			(unsigned long __user *)(unsigned long)reg->addr;
237 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
238 					    KVM_REG_SIZE_MASK |
239 					    KVM_REG_RISCV_CONFIG);
240 	unsigned long reg_val;
241 
242 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
243 		return -EINVAL;
244 
245 	switch (reg_num) {
246 	case KVM_REG_RISCV_CONFIG_REG(isa):
247 		reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
248 		break;
249 	case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
250 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
251 			return -ENOENT;
252 		reg_val = riscv_cbom_block_size;
253 		break;
254 	case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
255 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
256 			return -ENOENT;
257 		reg_val = riscv_cboz_block_size;
258 		break;
259 	case KVM_REG_RISCV_CONFIG_REG(mvendorid):
260 		reg_val = vcpu->arch.mvendorid;
261 		break;
262 	case KVM_REG_RISCV_CONFIG_REG(marchid):
263 		reg_val = vcpu->arch.marchid;
264 		break;
265 	case KVM_REG_RISCV_CONFIG_REG(mimpid):
266 		reg_val = vcpu->arch.mimpid;
267 		break;
268 	case KVM_REG_RISCV_CONFIG_REG(satp_mode):
269 		reg_val = satp_mode >> SATP_MODE_SHIFT;
270 		break;
271 	default:
272 		return -ENOENT;
273 	}
274 
275 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
276 		return -EFAULT;
277 
278 	return 0;
279 }
280 
kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)281 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
282 					 const struct kvm_one_reg *reg)
283 {
284 	unsigned long __user *uaddr =
285 			(unsigned long __user *)(unsigned long)reg->addr;
286 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
287 					    KVM_REG_SIZE_MASK |
288 					    KVM_REG_RISCV_CONFIG);
289 	unsigned long i, isa_ext, reg_val;
290 
291 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
292 		return -EINVAL;
293 
294 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
295 		return -EFAULT;
296 
297 	switch (reg_num) {
298 	case KVM_REG_RISCV_CONFIG_REG(isa):
299 		/*
300 		 * This ONE REG interface is only defined for
301 		 * single letter extensions.
302 		 */
303 		if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
304 			return -EINVAL;
305 
306 		/*
307 		 * Return early (i.e. do nothing) if reg_val is the same
308 		 * value retrievable via kvm_riscv_vcpu_get_reg_config().
309 		 */
310 		if (reg_val == (vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK))
311 			break;
312 
313 		if (!vcpu->arch.ran_atleast_once) {
314 			/* Ignore the enable/disable request for certain extensions */
315 			for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
316 				isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
317 				if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
318 					reg_val &= ~BIT(i);
319 					continue;
320 				}
321 				if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
322 					if (reg_val & BIT(i))
323 						reg_val &= ~BIT(i);
324 				if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
325 					if (!(reg_val & BIT(i)))
326 						reg_val |= BIT(i);
327 			}
328 			reg_val &= riscv_isa_extension_base(NULL);
329 			/* Do not modify anything beyond single letter extensions */
330 			reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
331 				  (reg_val & KVM_RISCV_BASE_ISA_MASK);
332 			vcpu->arch.isa[0] = reg_val;
333 			kvm_riscv_vcpu_fp_reset(vcpu);
334 		} else {
335 			return -EBUSY;
336 		}
337 		break;
338 	case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
339 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
340 			return -ENOENT;
341 		if (reg_val != riscv_cbom_block_size)
342 			return -EINVAL;
343 		break;
344 	case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
345 		if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
346 			return -ENOENT;
347 		if (reg_val != riscv_cboz_block_size)
348 			return -EINVAL;
349 		break;
350 	case KVM_REG_RISCV_CONFIG_REG(mvendorid):
351 		if (reg_val == vcpu->arch.mvendorid)
352 			break;
353 		if (!vcpu->arch.ran_atleast_once)
354 			vcpu->arch.mvendorid = reg_val;
355 		else
356 			return -EBUSY;
357 		break;
358 	case KVM_REG_RISCV_CONFIG_REG(marchid):
359 		if (reg_val == vcpu->arch.marchid)
360 			break;
361 		if (!vcpu->arch.ran_atleast_once)
362 			vcpu->arch.marchid = reg_val;
363 		else
364 			return -EBUSY;
365 		break;
366 	case KVM_REG_RISCV_CONFIG_REG(mimpid):
367 		if (reg_val == vcpu->arch.mimpid)
368 			break;
369 		if (!vcpu->arch.ran_atleast_once)
370 			vcpu->arch.mimpid = reg_val;
371 		else
372 			return -EBUSY;
373 		break;
374 	case KVM_REG_RISCV_CONFIG_REG(satp_mode):
375 		if (reg_val != (satp_mode >> SATP_MODE_SHIFT))
376 			return -EINVAL;
377 		break;
378 	default:
379 		return -ENOENT;
380 	}
381 
382 	return 0;
383 }
384 
kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)385 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
386 				       const struct kvm_one_reg *reg)
387 {
388 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
389 	unsigned long __user *uaddr =
390 			(unsigned long __user *)(unsigned long)reg->addr;
391 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
392 					    KVM_REG_SIZE_MASK |
393 					    KVM_REG_RISCV_CORE);
394 	unsigned long reg_val;
395 
396 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
397 		return -EINVAL;
398 	if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
399 		return -ENOENT;
400 
401 	if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
402 		reg_val = cntx->sepc;
403 	else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
404 		 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
405 		reg_val = ((unsigned long *)cntx)[reg_num];
406 	else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
407 		reg_val = (cntx->sstatus & SR_SPP) ?
408 				KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
409 	else
410 		return -ENOENT;
411 
412 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
413 		return -EFAULT;
414 
415 	return 0;
416 }
417 
kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)418 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
419 				       const struct kvm_one_reg *reg)
420 {
421 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
422 	unsigned long __user *uaddr =
423 			(unsigned long __user *)(unsigned long)reg->addr;
424 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
425 					    KVM_REG_SIZE_MASK |
426 					    KVM_REG_RISCV_CORE);
427 	unsigned long reg_val;
428 
429 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
430 		return -EINVAL;
431 	if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
432 		return -ENOENT;
433 
434 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
435 		return -EFAULT;
436 
437 	if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
438 		cntx->sepc = reg_val;
439 	else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
440 		 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
441 		((unsigned long *)cntx)[reg_num] = reg_val;
442 	else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
443 		if (reg_val == KVM_RISCV_MODE_S)
444 			cntx->sstatus |= SR_SPP;
445 		else
446 			cntx->sstatus &= ~SR_SPP;
447 	} else
448 		return -ENOENT;
449 
450 	return 0;
451 }
452 
kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * out_val)453 static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
454 					  unsigned long reg_num,
455 					  unsigned long *out_val)
456 {
457 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
458 
459 	if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
460 		return -ENOENT;
461 
462 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
463 		kvm_riscv_vcpu_flush_interrupts(vcpu);
464 		*out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
465 		*out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
466 	} else
467 		*out_val = ((unsigned long *)csr)[reg_num];
468 
469 	return 0;
470 }
471 
kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)472 static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
473 					  unsigned long reg_num,
474 					  unsigned long reg_val)
475 {
476 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
477 
478 	if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
479 		return -ENOENT;
480 
481 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
482 		reg_val &= VSIP_VALID_MASK;
483 		reg_val <<= VSIP_TO_HVIP_SHIFT;
484 	}
485 
486 	((unsigned long *)csr)[reg_num] = reg_val;
487 
488 	if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
489 		WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
490 
491 	return 0;
492 }
493 
kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)494 static inline int kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu *vcpu,
495 						   unsigned long reg_num,
496 						   unsigned long reg_val)
497 {
498 	struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
499 
500 	if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
501 		sizeof(unsigned long))
502 		return -EINVAL;
503 
504 	((unsigned long *)csr)[reg_num] = reg_val;
505 	return 0;
506 }
507 
kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * out_val)508 static int kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu *vcpu,
509 					    unsigned long reg_num,
510 					    unsigned long *out_val)
511 {
512 	struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
513 
514 	if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
515 		sizeof(unsigned long))
516 		return -EINVAL;
517 
518 	*out_val = ((unsigned long *)csr)[reg_num];
519 	return 0;
520 }
521 
kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)522 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
523 				      const struct kvm_one_reg *reg)
524 {
525 	int rc;
526 	unsigned long __user *uaddr =
527 			(unsigned long __user *)(unsigned long)reg->addr;
528 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
529 					    KVM_REG_SIZE_MASK |
530 					    KVM_REG_RISCV_CSR);
531 	unsigned long reg_val, reg_subtype;
532 
533 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
534 		return -EINVAL;
535 
536 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
537 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
538 	switch (reg_subtype) {
539 	case KVM_REG_RISCV_CSR_GENERAL:
540 		rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, &reg_val);
541 		break;
542 	case KVM_REG_RISCV_CSR_AIA:
543 		rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val);
544 		break;
545 	case KVM_REG_RISCV_CSR_SMSTATEEN:
546 		rc = -EINVAL;
547 		if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
548 			rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num,
549 							      &reg_val);
550 		break;
551 	default:
552 		rc = -ENOENT;
553 		break;
554 	}
555 	if (rc)
556 		return rc;
557 
558 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
559 		return -EFAULT;
560 
561 	return 0;
562 }
563 
kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)564 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
565 				      const struct kvm_one_reg *reg)
566 {
567 	int rc;
568 	unsigned long __user *uaddr =
569 			(unsigned long __user *)(unsigned long)reg->addr;
570 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
571 					    KVM_REG_SIZE_MASK |
572 					    KVM_REG_RISCV_CSR);
573 	unsigned long reg_val, reg_subtype;
574 
575 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
576 		return -EINVAL;
577 
578 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
579 		return -EFAULT;
580 
581 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
582 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
583 	switch (reg_subtype) {
584 	case KVM_REG_RISCV_CSR_GENERAL:
585 		rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
586 		break;
587 	case KVM_REG_RISCV_CSR_AIA:
588 		rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
589 		break;
590 	case KVM_REG_RISCV_CSR_SMSTATEEN:
591 		rc = -EINVAL;
592 		if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
593 			rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num,
594 							      reg_val);
595 		break;
596 	default:
597 		rc = -ENOENT;
598 		break;
599 	}
600 	if (rc)
601 		return rc;
602 
603 	return 0;
604 }
605 
riscv_vcpu_get_isa_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)606 static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
607 					 unsigned long reg_num,
608 					 unsigned long *reg_val)
609 {
610 	unsigned long host_isa_ext;
611 
612 	if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
613 	    reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
614 		return -ENOENT;
615 
616 	host_isa_ext = kvm_isa_ext_arr[reg_num];
617 	if (!__riscv_isa_extension_available(NULL, host_isa_ext))
618 		return -ENOENT;
619 
620 	*reg_val = 0;
621 	if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
622 		*reg_val = 1; /* Mark the given extension as available */
623 
624 	return 0;
625 }
626 
riscv_vcpu_set_isa_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)627 static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu,
628 					 unsigned long reg_num,
629 					 unsigned long reg_val)
630 {
631 	unsigned long host_isa_ext;
632 
633 	if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
634 	    reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
635 		return -ENOENT;
636 
637 	host_isa_ext = kvm_isa_ext_arr[reg_num];
638 	if (!__riscv_isa_extension_available(NULL, host_isa_ext))
639 		return -ENOENT;
640 
641 	if (reg_val == test_bit(host_isa_ext, vcpu->arch.isa))
642 		return 0;
643 
644 	if (!vcpu->arch.ran_atleast_once) {
645 		/*
646 		 * All multi-letter extension and a few single letter
647 		 * extension can be disabled
648 		 */
649 		if (reg_val == 1 &&
650 		    kvm_riscv_vcpu_isa_enable_allowed(reg_num))
651 			set_bit(host_isa_ext, vcpu->arch.isa);
652 		else if (!reg_val &&
653 			 kvm_riscv_vcpu_isa_disable_allowed(reg_num))
654 			clear_bit(host_isa_ext, vcpu->arch.isa);
655 		else
656 			return -EINVAL;
657 		kvm_riscv_vcpu_fp_reset(vcpu);
658 	} else {
659 		return -EBUSY;
660 	}
661 
662 	return 0;
663 }
664 
riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)665 static int riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu *vcpu,
666 					unsigned long reg_num,
667 					unsigned long *reg_val)
668 {
669 	unsigned long i, ext_id, ext_val;
670 
671 	if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
672 		return -ENOENT;
673 
674 	for (i = 0; i < BITS_PER_LONG; i++) {
675 		ext_id = i + reg_num * BITS_PER_LONG;
676 		if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
677 			break;
678 
679 		ext_val = 0;
680 		riscv_vcpu_get_isa_ext_single(vcpu, ext_id, &ext_val);
681 		if (ext_val)
682 			*reg_val |= KVM_REG_RISCV_ISA_MULTI_MASK(ext_id);
683 	}
684 
685 	return 0;
686 }
687 
riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val,bool enable)688 static int riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu *vcpu,
689 					unsigned long reg_num,
690 					unsigned long reg_val, bool enable)
691 {
692 	unsigned long i, ext_id;
693 
694 	if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
695 		return -ENOENT;
696 
697 	for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
698 		ext_id = i + reg_num * BITS_PER_LONG;
699 		if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
700 			break;
701 
702 		riscv_vcpu_set_isa_ext_single(vcpu, ext_id, enable);
703 	}
704 
705 	return 0;
706 }
707 
kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)708 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
709 					  const struct kvm_one_reg *reg)
710 {
711 	int rc;
712 	unsigned long __user *uaddr =
713 			(unsigned long __user *)(unsigned long)reg->addr;
714 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
715 					    KVM_REG_SIZE_MASK |
716 					    KVM_REG_RISCV_ISA_EXT);
717 	unsigned long reg_val, reg_subtype;
718 
719 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
720 		return -EINVAL;
721 
722 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
723 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
724 
725 	reg_val = 0;
726 	switch (reg_subtype) {
727 	case KVM_REG_RISCV_ISA_SINGLE:
728 		rc = riscv_vcpu_get_isa_ext_single(vcpu, reg_num, &reg_val);
729 		break;
730 	case KVM_REG_RISCV_ISA_MULTI_EN:
731 	case KVM_REG_RISCV_ISA_MULTI_DIS:
732 		rc = riscv_vcpu_get_isa_ext_multi(vcpu, reg_num, &reg_val);
733 		if (!rc && reg_subtype == KVM_REG_RISCV_ISA_MULTI_DIS)
734 			reg_val = ~reg_val;
735 		break;
736 	default:
737 		rc = -ENOENT;
738 	}
739 	if (rc)
740 		return rc;
741 
742 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
743 		return -EFAULT;
744 
745 	return 0;
746 }
747 
kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)748 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
749 					  const struct kvm_one_reg *reg)
750 {
751 	unsigned long __user *uaddr =
752 			(unsigned long __user *)(unsigned long)reg->addr;
753 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
754 					    KVM_REG_SIZE_MASK |
755 					    KVM_REG_RISCV_ISA_EXT);
756 	unsigned long reg_val, reg_subtype;
757 
758 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
759 		return -EINVAL;
760 
761 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
762 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
763 
764 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
765 		return -EFAULT;
766 
767 	switch (reg_subtype) {
768 	case KVM_REG_RISCV_ISA_SINGLE:
769 		return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
770 	case KVM_REG_RISCV_ISA_MULTI_EN:
771 		return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
772 	case KVM_REG_RISCV_ISA_MULTI_DIS:
773 		return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
774 	default:
775 		return -ENOENT;
776 	}
777 
778 	return 0;
779 }
780 
copy_config_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)781 static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
782 				u64 __user *uindices)
783 {
784 	int n = 0;
785 
786 	for (int i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long);
787 		 i++) {
788 		u64 size;
789 		u64 reg;
790 
791 		/*
792 		 * Avoid reporting config reg if the corresponding extension
793 		 * was not available.
794 		 */
795 		if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
796 			!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
797 			continue;
798 		else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
799 			!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
800 			continue;
801 
802 		size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
803 		reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;
804 
805 		if (uindices) {
806 			if (put_user(reg, uindices))
807 				return -EFAULT;
808 			uindices++;
809 		}
810 
811 		n++;
812 	}
813 
814 	return n;
815 }
816 
num_config_regs(const struct kvm_vcpu * vcpu)817 static unsigned long num_config_regs(const struct kvm_vcpu *vcpu)
818 {
819 	return copy_config_reg_indices(vcpu, NULL);
820 }
821 
num_core_regs(void)822 static inline unsigned long num_core_regs(void)
823 {
824 	return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
825 }
826 
copy_core_reg_indices(u64 __user * uindices)827 static int copy_core_reg_indices(u64 __user *uindices)
828 {
829 	int n = num_core_regs();
830 
831 	for (int i = 0; i < n; i++) {
832 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
833 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
834 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i;
835 
836 		if (uindices) {
837 			if (put_user(reg, uindices))
838 				return -EFAULT;
839 			uindices++;
840 		}
841 	}
842 
843 	return n;
844 }
845 
num_csr_regs(const struct kvm_vcpu * vcpu)846 static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu)
847 {
848 	unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
849 
850 	if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
851 		n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
852 	if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
853 		n += sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
854 
855 	return n;
856 }
857 
copy_csr_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)858 static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
859 				u64 __user *uindices)
860 {
861 	int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
862 	int n2 = 0, n3 = 0;
863 
864 	/* copy general csr regs */
865 	for (int i = 0; i < n1; i++) {
866 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
867 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
868 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
869 				  KVM_REG_RISCV_CSR_GENERAL | i;
870 
871 		if (uindices) {
872 			if (put_user(reg, uindices))
873 				return -EFAULT;
874 			uindices++;
875 		}
876 	}
877 
878 	/* copy AIA csr regs */
879 	if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) {
880 		n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
881 
882 		for (int i = 0; i < n2; i++) {
883 			u64 size = IS_ENABLED(CONFIG_32BIT) ?
884 				   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
885 			u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
886 					  KVM_REG_RISCV_CSR_AIA | i;
887 
888 			if (uindices) {
889 				if (put_user(reg, uindices))
890 					return -EFAULT;
891 				uindices++;
892 			}
893 		}
894 	}
895 
896 	/* copy Smstateen csr regs */
897 	if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) {
898 		n3 = sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
899 
900 		for (int i = 0; i < n3; i++) {
901 			u64 size = IS_ENABLED(CONFIG_32BIT) ?
902 				   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
903 			u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
904 					  KVM_REG_RISCV_CSR_SMSTATEEN | i;
905 
906 			if (uindices) {
907 				if (put_user(reg, uindices))
908 					return -EFAULT;
909 				uindices++;
910 			}
911 		}
912 	}
913 
914 	return n1 + n2 + n3;
915 }
916 
num_timer_regs(void)917 static inline unsigned long num_timer_regs(void)
918 {
919 	return sizeof(struct kvm_riscv_timer) / sizeof(u64);
920 }
921 
copy_timer_reg_indices(u64 __user * uindices)922 static int copy_timer_reg_indices(u64 __user *uindices)
923 {
924 	int n = num_timer_regs();
925 
926 	for (int i = 0; i < n; i++) {
927 		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
928 			  KVM_REG_RISCV_TIMER | i;
929 
930 		if (uindices) {
931 			if (put_user(reg, uindices))
932 				return -EFAULT;
933 			uindices++;
934 		}
935 	}
936 
937 	return n;
938 }
939 
num_fp_f_regs(const struct kvm_vcpu * vcpu)940 static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
941 {
942 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
943 
944 	if (riscv_isa_extension_available(vcpu->arch.isa, f))
945 		return sizeof(cntx->fp.f) / sizeof(u32);
946 	else
947 		return 0;
948 }
949 
copy_fp_f_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)950 static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
951 				u64 __user *uindices)
952 {
953 	int n = num_fp_f_regs(vcpu);
954 
955 	for (int i = 0; i < n; i++) {
956 		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 |
957 			  KVM_REG_RISCV_FP_F | i;
958 
959 		if (uindices) {
960 			if (put_user(reg, uindices))
961 				return -EFAULT;
962 			uindices++;
963 		}
964 	}
965 
966 	return n;
967 }
968 
num_fp_d_regs(const struct kvm_vcpu * vcpu)969 static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
970 {
971 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
972 
973 	if (riscv_isa_extension_available(vcpu->arch.isa, d))
974 		return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
975 	else
976 		return 0;
977 }
978 
copy_fp_d_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)979 static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
980 				u64 __user *uindices)
981 {
982 	int i;
983 	int n = num_fp_d_regs(vcpu);
984 	u64 reg;
985 
986 	/* copy fp.d.f indices */
987 	for (i = 0; i < n-1; i++) {
988 		reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
989 		      KVM_REG_RISCV_FP_D | i;
990 
991 		if (uindices) {
992 			if (put_user(reg, uindices))
993 				return -EFAULT;
994 			uindices++;
995 		}
996 	}
997 
998 	/* copy fp.d.fcsr indices */
999 	reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
1000 	if (uindices) {
1001 		if (put_user(reg, uindices))
1002 			return -EFAULT;
1003 		uindices++;
1004 	}
1005 
1006 	return n;
1007 }
1008 
copy_isa_ext_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)1009 static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
1010 				u64 __user *uindices)
1011 {
1012 	unsigned int n = 0;
1013 	unsigned long isa_ext;
1014 
1015 	for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
1016 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
1017 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1018 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
1019 
1020 		isa_ext = kvm_isa_ext_arr[i];
1021 		if (!__riscv_isa_extension_available(NULL, isa_ext))
1022 			continue;
1023 
1024 		if (uindices) {
1025 			if (put_user(reg, uindices))
1026 				return -EFAULT;
1027 			uindices++;
1028 		}
1029 
1030 		n++;
1031 	}
1032 
1033 	return n;
1034 }
1035 
num_isa_ext_regs(const struct kvm_vcpu * vcpu)1036 static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
1037 {
1038 	return copy_isa_ext_reg_indices(vcpu, NULL);
1039 }
1040 
copy_sbi_ext_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)1041 static int copy_sbi_ext_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1042 {
1043 	unsigned int n = 0;
1044 
1045 	for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) {
1046 		u64 size = IS_ENABLED(CONFIG_32BIT) ?
1047 			   KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1048 		u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
1049 			  KVM_REG_RISCV_SBI_SINGLE | i;
1050 
1051 		if (!riscv_vcpu_supports_sbi_ext(vcpu, i))
1052 			continue;
1053 
1054 		if (uindices) {
1055 			if (put_user(reg, uindices))
1056 				return -EFAULT;
1057 			uindices++;
1058 		}
1059 
1060 		n++;
1061 	}
1062 
1063 	return n;
1064 }
1065 
num_sbi_ext_regs(struct kvm_vcpu * vcpu)1066 static unsigned long num_sbi_ext_regs(struct kvm_vcpu *vcpu)
1067 {
1068 	return copy_sbi_ext_reg_indices(vcpu, NULL);
1069 }
1070 
copy_sbi_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)1071 static int copy_sbi_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1072 {
1073 	struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
1074 	int total = 0;
1075 
1076 	if (scontext->ext_status[KVM_RISCV_SBI_EXT_STA] == KVM_RISCV_SBI_EXT_STATUS_ENABLED) {
1077 		u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1078 		int n = sizeof(struct kvm_riscv_sbi_sta) / sizeof(unsigned long);
1079 
1080 		for (int i = 0; i < n; i++) {
1081 			u64 reg = KVM_REG_RISCV | size |
1082 				  KVM_REG_RISCV_SBI_STATE |
1083 				  KVM_REG_RISCV_SBI_STA | i;
1084 
1085 			if (uindices) {
1086 				if (put_user(reg, uindices))
1087 					return -EFAULT;
1088 				uindices++;
1089 			}
1090 		}
1091 
1092 		total += n;
1093 	}
1094 
1095 	return total;
1096 }
1097 
num_sbi_regs(struct kvm_vcpu * vcpu)1098 static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu)
1099 {
1100 	return copy_sbi_reg_indices(vcpu, NULL);
1101 }
1102 
num_vector_regs(const struct kvm_vcpu * vcpu)1103 static inline unsigned long num_vector_regs(const struct kvm_vcpu *vcpu)
1104 {
1105 	if (!riscv_isa_extension_available(vcpu->arch.isa, v))
1106 		return 0;
1107 
1108 	/* vstart, vl, vtype, vcsr, vlenb and 32 vector regs */
1109 	return 37;
1110 }
1111 
copy_vector_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)1112 static int copy_vector_reg_indices(const struct kvm_vcpu *vcpu,
1113 				u64 __user *uindices)
1114 {
1115 	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
1116 	int n = num_vector_regs(vcpu);
1117 	u64 reg, size;
1118 	int i;
1119 
1120 	if (n == 0)
1121 		return 0;
1122 
1123 	/* copy vstart, vl, vtype, vcsr and vlenb */
1124 	size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1125 	for (i = 0; i < 5; i++) {
1126 		reg = KVM_REG_RISCV | size | KVM_REG_RISCV_VECTOR | i;
1127 
1128 		if (uindices) {
1129 			if (put_user(reg, uindices))
1130 				return -EFAULT;
1131 			uindices++;
1132 		}
1133 	}
1134 
1135 	/* vector_regs have a variable 'vlenb' size */
1136 	size = __builtin_ctzl(cntx->vector.vlenb);
1137 	size <<= KVM_REG_SIZE_SHIFT;
1138 	for (i = 0; i < 32; i++) {
1139 		reg = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | size |
1140 			KVM_REG_RISCV_VECTOR_REG(i);
1141 
1142 		if (uindices) {
1143 			if (put_user(reg, uindices))
1144 				return -EFAULT;
1145 			uindices++;
1146 		}
1147 	}
1148 
1149 	return n;
1150 }
1151 
1152 /*
1153  * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
1154  *
1155  * This is for all registers.
1156  */
kvm_riscv_vcpu_num_regs(struct kvm_vcpu * vcpu)1157 unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
1158 {
1159 	unsigned long res = 0;
1160 
1161 	res += num_config_regs(vcpu);
1162 	res += num_core_regs();
1163 	res += num_csr_regs(vcpu);
1164 	res += num_timer_regs();
1165 	res += num_fp_f_regs(vcpu);
1166 	res += num_fp_d_regs(vcpu);
1167 	res += num_vector_regs(vcpu);
1168 	res += num_isa_ext_regs(vcpu);
1169 	res += num_sbi_ext_regs(vcpu);
1170 	res += num_sbi_regs(vcpu);
1171 
1172 	return res;
1173 }
1174 
1175 /*
1176  * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
1177  */
kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)1178 int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
1179 				    u64 __user *uindices)
1180 {
1181 	int ret;
1182 
1183 	ret = copy_config_reg_indices(vcpu, uindices);
1184 	if (ret < 0)
1185 		return ret;
1186 	uindices += ret;
1187 
1188 	ret = copy_core_reg_indices(uindices);
1189 	if (ret < 0)
1190 		return ret;
1191 	uindices += ret;
1192 
1193 	ret = copy_csr_reg_indices(vcpu, uindices);
1194 	if (ret < 0)
1195 		return ret;
1196 	uindices += ret;
1197 
1198 	ret = copy_timer_reg_indices(uindices);
1199 	if (ret < 0)
1200 		return ret;
1201 	uindices += ret;
1202 
1203 	ret = copy_fp_f_reg_indices(vcpu, uindices);
1204 	if (ret < 0)
1205 		return ret;
1206 	uindices += ret;
1207 
1208 	ret = copy_fp_d_reg_indices(vcpu, uindices);
1209 	if (ret < 0)
1210 		return ret;
1211 	uindices += ret;
1212 
1213 	ret = copy_vector_reg_indices(vcpu, uindices);
1214 	if (ret < 0)
1215 		return ret;
1216 	uindices += ret;
1217 
1218 	ret = copy_isa_ext_reg_indices(vcpu, uindices);
1219 	if (ret < 0)
1220 		return ret;
1221 	uindices += ret;
1222 
1223 	ret = copy_sbi_ext_reg_indices(vcpu, uindices);
1224 	if (ret < 0)
1225 		return ret;
1226 	uindices += ret;
1227 
1228 	ret = copy_sbi_reg_indices(vcpu, uindices);
1229 	if (ret < 0)
1230 		return ret;
1231 	uindices += ret;
1232 
1233 	return 0;
1234 }
1235 
kvm_riscv_vcpu_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)1236 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
1237 			   const struct kvm_one_reg *reg)
1238 {
1239 	switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1240 	case KVM_REG_RISCV_CONFIG:
1241 		return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
1242 	case KVM_REG_RISCV_CORE:
1243 		return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
1244 	case KVM_REG_RISCV_CSR:
1245 		return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
1246 	case KVM_REG_RISCV_TIMER:
1247 		return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
1248 	case KVM_REG_RISCV_FP_F:
1249 		return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1250 						 KVM_REG_RISCV_FP_F);
1251 	case KVM_REG_RISCV_FP_D:
1252 		return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1253 						 KVM_REG_RISCV_FP_D);
1254 	case KVM_REG_RISCV_VECTOR:
1255 		return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
1256 	case KVM_REG_RISCV_ISA_EXT:
1257 		return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
1258 	case KVM_REG_RISCV_SBI_EXT:
1259 		return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
1260 	case KVM_REG_RISCV_SBI_STATE:
1261 		return kvm_riscv_vcpu_set_reg_sbi(vcpu, reg);
1262 	default:
1263 		break;
1264 	}
1265 
1266 	return -ENOENT;
1267 }
1268 
kvm_riscv_vcpu_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)1269 int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
1270 			   const struct kvm_one_reg *reg)
1271 {
1272 	switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1273 	case KVM_REG_RISCV_CONFIG:
1274 		return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
1275 	case KVM_REG_RISCV_CORE:
1276 		return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
1277 	case KVM_REG_RISCV_CSR:
1278 		return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
1279 	case KVM_REG_RISCV_TIMER:
1280 		return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
1281 	case KVM_REG_RISCV_FP_F:
1282 		return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1283 						 KVM_REG_RISCV_FP_F);
1284 	case KVM_REG_RISCV_FP_D:
1285 		return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1286 						 KVM_REG_RISCV_FP_D);
1287 	case KVM_REG_RISCV_VECTOR:
1288 		return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
1289 	case KVM_REG_RISCV_ISA_EXT:
1290 		return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
1291 	case KVM_REG_RISCV_SBI_EXT:
1292 		return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
1293 	case KVM_REG_RISCV_SBI_STATE:
1294 		return kvm_riscv_vcpu_get_reg_sbi(vcpu, reg);
1295 	default:
1296 		break;
1297 	}
1298 
1299 	return -ENOENT;
1300 }
1301