1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 * Copyright (C) 2023 Ventana Micro Systems Inc.
5 *
6 * Authors:
7 * Anup Patel <apatel@ventanamicro.com>
8 */
9
10 #include <linux/bitops.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/nospec.h>
14 #include <linux/uaccess.h>
15 #include <linux/kvm_host.h>
16 #include <asm/cacheflush.h>
17 #include <asm/cpufeature.h>
18 #include <asm/kvm_isa.h>
19 #include <asm/kvm_vcpu_vector.h>
20
21 #define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0)
22
kvm_riscv_vcpu_setup_isa(struct kvm_vcpu * vcpu)23 void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu)
24 {
25 unsigned long guest_ext, i;
26
27 for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
28 if (__kvm_riscv_isa_check_host(i, &guest_ext))
29 continue;
30 if (kvm_riscv_isa_enable_allowed(i))
31 set_bit(guest_ext, vcpu->arch.isa);
32 }
33 }
34
kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)35 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
36 const struct kvm_one_reg *reg)
37 {
38 unsigned long __user *uaddr =
39 (unsigned long __user *)(unsigned long)reg->addr;
40 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
41 KVM_REG_SIZE_MASK |
42 KVM_REG_RISCV_CONFIG);
43 unsigned long reg_val;
44
45 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
46 return -EINVAL;
47
48 switch (reg_num) {
49 case KVM_REG_RISCV_CONFIG_REG(isa):
50 reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
51 break;
52 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
53 if (kvm_riscv_isa_check_host(ZICBOM))
54 return -ENOENT;
55 reg_val = riscv_cbom_block_size;
56 break;
57 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
58 if (kvm_riscv_isa_check_host(ZICBOZ))
59 return -ENOENT;
60 reg_val = riscv_cboz_block_size;
61 break;
62 case KVM_REG_RISCV_CONFIG_REG(zicbop_block_size):
63 if (kvm_riscv_isa_check_host(ZICBOP))
64 return -ENOENT;
65 reg_val = riscv_cbop_block_size;
66 break;
67 case KVM_REG_RISCV_CONFIG_REG(mvendorid):
68 reg_val = vcpu->arch.mvendorid;
69 break;
70 case KVM_REG_RISCV_CONFIG_REG(marchid):
71 reg_val = vcpu->arch.marchid;
72 break;
73 case KVM_REG_RISCV_CONFIG_REG(mimpid):
74 reg_val = vcpu->arch.mimpid;
75 break;
76 case KVM_REG_RISCV_CONFIG_REG(satp_mode):
77 reg_val = satp_mode >> SATP_MODE_SHIFT;
78 break;
79 default:
80 return -ENOENT;
81 }
82
83 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
84 return -EFAULT;
85
86 return 0;
87 }
88
kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)89 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
90 const struct kvm_one_reg *reg)
91 {
92 unsigned long __user *uaddr =
93 (unsigned long __user *)(unsigned long)reg->addr;
94 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
95 KVM_REG_SIZE_MASK |
96 KVM_REG_RISCV_CONFIG);
97 unsigned long i, isa_ext, reg_val;
98
99 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
100 return -EINVAL;
101
102 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
103 return -EFAULT;
104
105 switch (reg_num) {
106 case KVM_REG_RISCV_CONFIG_REG(isa):
107 /*
108 * This ONE REG interface is only defined for
109 * single letter extensions.
110 */
111 if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
112 return -EINVAL;
113
114 /*
115 * Return early (i.e. do nothing) if reg_val is the same
116 * value retrievable via kvm_riscv_vcpu_get_reg_config().
117 */
118 if (reg_val == (vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK))
119 break;
120
121 if (!vcpu->arch.ran_atleast_once) {
122 /* Ignore the enable/disable request for certain extensions */
123 for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
124 isa_ext = kvm_riscv_base2isa_ext(i);
125 if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
126 reg_val &= ~BIT(i);
127 continue;
128 }
129 if (!kvm_riscv_isa_enable_allowed(isa_ext))
130 if (reg_val & BIT(i))
131 reg_val &= ~BIT(i);
132 if (!kvm_riscv_isa_disable_allowed(isa_ext))
133 if (!(reg_val & BIT(i)))
134 reg_val |= BIT(i);
135 }
136 reg_val &= riscv_isa_extension_base(NULL);
137 /* Do not modify anything beyond single letter extensions */
138 reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
139 (reg_val & KVM_RISCV_BASE_ISA_MASK);
140 vcpu->arch.isa[0] = reg_val;
141 kvm_riscv_vcpu_fp_reset(vcpu);
142 } else {
143 return -EBUSY;
144 }
145 break;
146 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
147 if (kvm_riscv_isa_check_host(ZICBOM))
148 return -ENOENT;
149 if (reg_val != riscv_cbom_block_size)
150 return -EINVAL;
151 break;
152 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
153 if (kvm_riscv_isa_check_host(ZICBOZ))
154 return -ENOENT;
155 if (reg_val != riscv_cboz_block_size)
156 return -EINVAL;
157 break;
158 case KVM_REG_RISCV_CONFIG_REG(zicbop_block_size):
159 if (kvm_riscv_isa_check_host(ZICBOP))
160 return -ENOENT;
161 if (reg_val != riscv_cbop_block_size)
162 return -EINVAL;
163 break;
164 case KVM_REG_RISCV_CONFIG_REG(mvendorid):
165 if (reg_val == vcpu->arch.mvendorid)
166 break;
167 if (!vcpu->arch.ran_atleast_once)
168 vcpu->arch.mvendorid = reg_val;
169 else
170 return -EBUSY;
171 break;
172 case KVM_REG_RISCV_CONFIG_REG(marchid):
173 if (reg_val == vcpu->arch.marchid)
174 break;
175 if (!vcpu->arch.ran_atleast_once)
176 vcpu->arch.marchid = reg_val;
177 else
178 return -EBUSY;
179 break;
180 case KVM_REG_RISCV_CONFIG_REG(mimpid):
181 if (reg_val == vcpu->arch.mimpid)
182 break;
183 if (!vcpu->arch.ran_atleast_once)
184 vcpu->arch.mimpid = reg_val;
185 else
186 return -EBUSY;
187 break;
188 case KVM_REG_RISCV_CONFIG_REG(satp_mode):
189 if (reg_val != (satp_mode >> SATP_MODE_SHIFT))
190 return -EINVAL;
191 break;
192 default:
193 return -ENOENT;
194 }
195
196 return 0;
197 }
198
kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)199 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
200 const struct kvm_one_reg *reg)
201 {
202 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
203 unsigned long __user *uaddr =
204 (unsigned long __user *)(unsigned long)reg->addr;
205 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
206 KVM_REG_SIZE_MASK |
207 KVM_REG_RISCV_CORE);
208 unsigned long regs_max = sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
209 unsigned long reg_val;
210
211 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
212 return -EINVAL;
213 if (reg_num >= regs_max)
214 return -ENOENT;
215
216 reg_num = array_index_nospec(reg_num, regs_max);
217
218 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
219 reg_val = cntx->sepc;
220 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
221 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
222 reg_val = ((unsigned long *)cntx)[reg_num];
223 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
224 reg_val = (cntx->sstatus & SR_SPP) ?
225 KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
226 else
227 return -ENOENT;
228
229 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
230 return -EFAULT;
231
232 return 0;
233 }
234
kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)235 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
236 const struct kvm_one_reg *reg)
237 {
238 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
239 unsigned long __user *uaddr =
240 (unsigned long __user *)(unsigned long)reg->addr;
241 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
242 KVM_REG_SIZE_MASK |
243 KVM_REG_RISCV_CORE);
244 unsigned long regs_max = sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
245 unsigned long reg_val;
246
247 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
248 return -EINVAL;
249 if (reg_num >= regs_max)
250 return -ENOENT;
251
252 reg_num = array_index_nospec(reg_num, regs_max);
253
254 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
255 return -EFAULT;
256
257 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
258 cntx->sepc = reg_val;
259 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
260 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
261 ((unsigned long *)cntx)[reg_num] = reg_val;
262 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
263 if (reg_val == KVM_RISCV_MODE_S)
264 cntx->sstatus |= SR_SPP;
265 else
266 cntx->sstatus &= ~SR_SPP;
267 } else
268 return -ENOENT;
269
270 return 0;
271 }
272
kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * out_val)273 static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
274 unsigned long reg_num,
275 unsigned long *out_val)
276 {
277 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
278 unsigned long regs_max = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
279
280 if (reg_num >= regs_max)
281 return -ENOENT;
282
283 reg_num = array_index_nospec(reg_num, regs_max);
284
285 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
286 kvm_riscv_vcpu_flush_interrupts(vcpu);
287 *out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
288 *out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
289 } else
290 *out_val = ((unsigned long *)csr)[reg_num];
291
292 return 0;
293 }
294
kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)295 static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
296 unsigned long reg_num,
297 unsigned long reg_val)
298 {
299 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
300 unsigned long regs_max = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
301
302 if (reg_num >= regs_max)
303 return -ENOENT;
304
305 reg_num = array_index_nospec(reg_num, regs_max);
306
307 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
308 reg_val &= VSIP_VALID_MASK;
309 reg_val <<= VSIP_TO_HVIP_SHIFT;
310 }
311
312 ((unsigned long *)csr)[reg_num] = reg_val;
313
314 if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
315 WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
316
317 return 0;
318 }
319
kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)320 static inline int kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu *vcpu,
321 unsigned long reg_num,
322 unsigned long reg_val)
323 {
324 struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
325 unsigned long regs_max = sizeof(struct kvm_riscv_smstateen_csr) /
326 sizeof(unsigned long);
327
328 if (!riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
329 return -ENOENT;
330 if (reg_num >= regs_max)
331 return -ENOENT;
332
333 reg_num = array_index_nospec(reg_num, regs_max);
334
335 ((unsigned long *)csr)[reg_num] = reg_val;
336 return 0;
337 }
338
kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * out_val)339 static int kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu *vcpu,
340 unsigned long reg_num,
341 unsigned long *out_val)
342 {
343 struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
344 unsigned long regs_max = sizeof(struct kvm_riscv_smstateen_csr) /
345 sizeof(unsigned long);
346
347 if (!riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
348 return -ENOENT;
349 if (reg_num >= regs_max)
350 return -ENOENT;
351
352 reg_num = array_index_nospec(reg_num, regs_max);
353
354 *out_val = ((unsigned long *)csr)[reg_num];
355 return 0;
356 }
357
kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)358 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
359 const struct kvm_one_reg *reg)
360 {
361 int rc;
362 unsigned long __user *uaddr =
363 (unsigned long __user *)(unsigned long)reg->addr;
364 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
365 KVM_REG_SIZE_MASK |
366 KVM_REG_RISCV_CSR);
367 unsigned long reg_val, reg_subtype;
368
369 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
370 return -EINVAL;
371
372 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
373 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
374 switch (reg_subtype) {
375 case KVM_REG_RISCV_CSR_GENERAL:
376 rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, ®_val);
377 break;
378 case KVM_REG_RISCV_CSR_AIA:
379 rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, ®_val);
380 break;
381 case KVM_REG_RISCV_CSR_SMSTATEEN:
382 rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num, ®_val);
383 break;
384 default:
385 rc = -ENOENT;
386 break;
387 }
388 if (rc)
389 return rc;
390
391 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
392 return -EFAULT;
393
394 return 0;
395 }
396
kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)397 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
398 const struct kvm_one_reg *reg)
399 {
400 int rc;
401 unsigned long __user *uaddr =
402 (unsigned long __user *)(unsigned long)reg->addr;
403 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
404 KVM_REG_SIZE_MASK |
405 KVM_REG_RISCV_CSR);
406 unsigned long reg_val, reg_subtype;
407
408 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
409 return -EINVAL;
410
411 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
412 return -EFAULT;
413
414 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
415 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
416 switch (reg_subtype) {
417 case KVM_REG_RISCV_CSR_GENERAL:
418 rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
419 break;
420 case KVM_REG_RISCV_CSR_AIA:
421 rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
422 break;
423 case KVM_REG_RISCV_CSR_SMSTATEEN:
424 rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num, reg_val);
425 break;
426 default:
427 rc = -ENOENT;
428 break;
429 }
430 if (rc)
431 return rc;
432
433 vcpu->arch.csr_dirty = true;
434
435 return 0;
436 }
437
riscv_vcpu_get_isa_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)438 static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
439 unsigned long reg_num,
440 unsigned long *reg_val)
441 {
442 unsigned long guest_ext;
443 int ret;
444
445 ret = __kvm_riscv_isa_check_host(reg_num, &guest_ext);
446 if (ret)
447 return ret;
448
449 *reg_val = 0;
450 if (__riscv_isa_extension_available(vcpu->arch.isa, guest_ext))
451 *reg_val = 1; /* Mark the given extension as available */
452
453 return 0;
454 }
455
riscv_vcpu_set_isa_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)456 static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu,
457 unsigned long reg_num,
458 unsigned long reg_val)
459 {
460 unsigned long guest_ext;
461 int ret;
462
463 ret = __kvm_riscv_isa_check_host(reg_num, &guest_ext);
464 if (ret)
465 return ret;
466
467 if (reg_val == test_bit(guest_ext, vcpu->arch.isa))
468 return 0;
469
470 if (!vcpu->arch.ran_atleast_once) {
471 /*
472 * All multi-letter extension and a few single letter
473 * extension can be disabled
474 */
475 if (reg_val == 1 &&
476 kvm_riscv_isa_enable_allowed(reg_num))
477 set_bit(guest_ext, vcpu->arch.isa);
478 else if (!reg_val &&
479 kvm_riscv_isa_disable_allowed(reg_num))
480 clear_bit(guest_ext, vcpu->arch.isa);
481 else
482 return -EINVAL;
483 kvm_riscv_vcpu_fp_reset(vcpu);
484 } else {
485 return -EBUSY;
486 }
487
488 return 0;
489 }
490
riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)491 static int riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu *vcpu,
492 unsigned long reg_num,
493 unsigned long *reg_val)
494 {
495 unsigned long i, ext_id, ext_val;
496
497 if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
498 return -ENOENT;
499
500 for (i = 0; i < BITS_PER_LONG; i++) {
501 ext_id = i + reg_num * BITS_PER_LONG;
502 if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
503 break;
504
505 ext_val = 0;
506 riscv_vcpu_get_isa_ext_single(vcpu, ext_id, &ext_val);
507 if (ext_val)
508 *reg_val |= KVM_REG_RISCV_ISA_MULTI_MASK(ext_id);
509 }
510
511 return 0;
512 }
513
riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val,bool enable)514 static int riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu *vcpu,
515 unsigned long reg_num,
516 unsigned long reg_val, bool enable)
517 {
518 unsigned long i, ext_id;
519
520 if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
521 return -ENOENT;
522
523 for_each_set_bit(i, ®_val, BITS_PER_LONG) {
524 ext_id = i + reg_num * BITS_PER_LONG;
525 if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
526 break;
527
528 riscv_vcpu_set_isa_ext_single(vcpu, ext_id, enable);
529 }
530
531 return 0;
532 }
533
kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)534 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
535 const struct kvm_one_reg *reg)
536 {
537 int rc;
538 unsigned long __user *uaddr =
539 (unsigned long __user *)(unsigned long)reg->addr;
540 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
541 KVM_REG_SIZE_MASK |
542 KVM_REG_RISCV_ISA_EXT);
543 unsigned long reg_val, reg_subtype;
544
545 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
546 return -EINVAL;
547
548 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
549 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
550
551 reg_val = 0;
552 switch (reg_subtype) {
553 case KVM_REG_RISCV_ISA_SINGLE:
554 rc = riscv_vcpu_get_isa_ext_single(vcpu, reg_num, ®_val);
555 break;
556 case KVM_REG_RISCV_ISA_MULTI_EN:
557 case KVM_REG_RISCV_ISA_MULTI_DIS:
558 rc = riscv_vcpu_get_isa_ext_multi(vcpu, reg_num, ®_val);
559 if (!rc && reg_subtype == KVM_REG_RISCV_ISA_MULTI_DIS)
560 reg_val = ~reg_val;
561 break;
562 default:
563 rc = -ENOENT;
564 }
565 if (rc)
566 return rc;
567
568 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
569 return -EFAULT;
570
571 return 0;
572 }
573
kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)574 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
575 const struct kvm_one_reg *reg)
576 {
577 unsigned long __user *uaddr =
578 (unsigned long __user *)(unsigned long)reg->addr;
579 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
580 KVM_REG_SIZE_MASK |
581 KVM_REG_RISCV_ISA_EXT);
582 unsigned long reg_val, reg_subtype;
583
584 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
585 return -EINVAL;
586
587 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
588 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
589
590 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
591 return -EFAULT;
592
593 switch (reg_subtype) {
594 case KVM_REG_RISCV_ISA_SINGLE:
595 return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
596 case KVM_REG_RISCV_ISA_MULTI_EN:
597 return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
598 case KVM_REG_RISCV_ISA_MULTI_DIS:
599 return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
600 default:
601 return -ENOENT;
602 }
603
604 return 0;
605 }
606
copy_config_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)607 static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
608 u64 __user *uindices)
609 {
610 int n = 0;
611
612 for (int i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long);
613 i++) {
614 u64 size;
615 u64 reg;
616
617 /*
618 * Avoid reporting config reg if the corresponding extension
619 * was not available.
620 */
621 if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
622 kvm_riscv_isa_check_host(ZICBOM))
623 continue;
624 else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
625 kvm_riscv_isa_check_host(ZICBOZ))
626 continue;
627 else if (i == KVM_REG_RISCV_CONFIG_REG(zicbop_block_size) &&
628 kvm_riscv_isa_check_host(ZICBOP))
629 continue;
630
631 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
632 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;
633
634 if (uindices) {
635 if (put_user(reg, uindices))
636 return -EFAULT;
637 uindices++;
638 }
639
640 n++;
641 }
642
643 return n;
644 }
645
num_config_regs(const struct kvm_vcpu * vcpu)646 static unsigned long num_config_regs(const struct kvm_vcpu *vcpu)
647 {
648 return copy_config_reg_indices(vcpu, NULL);
649 }
650
num_core_regs(void)651 static inline unsigned long num_core_regs(void)
652 {
653 return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
654 }
655
copy_core_reg_indices(u64 __user * uindices)656 static int copy_core_reg_indices(u64 __user *uindices)
657 {
658 int n = num_core_regs();
659
660 for (int i = 0; i < n; i++) {
661 u64 size = IS_ENABLED(CONFIG_32BIT) ?
662 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
663 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i;
664
665 if (uindices) {
666 if (put_user(reg, uindices))
667 return -EFAULT;
668 uindices++;
669 }
670 }
671
672 return n;
673 }
674
num_csr_regs(const struct kvm_vcpu * vcpu)675 static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu)
676 {
677 unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
678
679 if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
680 n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
681 if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
682 n += sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
683
684 return n;
685 }
686
copy_csr_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)687 static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
688 u64 __user *uindices)
689 {
690 int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
691 int n2 = 0, n3 = 0;
692
693 /* copy general csr regs */
694 for (int i = 0; i < n1; i++) {
695 u64 size = IS_ENABLED(CONFIG_32BIT) ?
696 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
697 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
698 KVM_REG_RISCV_CSR_GENERAL | i;
699
700 if (uindices) {
701 if (put_user(reg, uindices))
702 return -EFAULT;
703 uindices++;
704 }
705 }
706
707 /* copy AIA csr regs */
708 if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) {
709 n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
710
711 for (int i = 0; i < n2; i++) {
712 u64 size = IS_ENABLED(CONFIG_32BIT) ?
713 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
714 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
715 KVM_REG_RISCV_CSR_AIA | i;
716
717 if (uindices) {
718 if (put_user(reg, uindices))
719 return -EFAULT;
720 uindices++;
721 }
722 }
723 }
724
725 /* copy Smstateen csr regs */
726 if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) {
727 n3 = sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
728
729 for (int i = 0; i < n3; i++) {
730 u64 size = IS_ENABLED(CONFIG_32BIT) ?
731 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
732 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
733 KVM_REG_RISCV_CSR_SMSTATEEN | i;
734
735 if (uindices) {
736 if (put_user(reg, uindices))
737 return -EFAULT;
738 uindices++;
739 }
740 }
741 }
742
743 return n1 + n2 + n3;
744 }
745
num_timer_regs(void)746 static inline unsigned long num_timer_regs(void)
747 {
748 return sizeof(struct kvm_riscv_timer) / sizeof(u64);
749 }
750
copy_timer_reg_indices(u64 __user * uindices)751 static int copy_timer_reg_indices(u64 __user *uindices)
752 {
753 int n = num_timer_regs();
754
755 for (int i = 0; i < n; i++) {
756 u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
757 KVM_REG_RISCV_TIMER | i;
758
759 if (uindices) {
760 if (put_user(reg, uindices))
761 return -EFAULT;
762 uindices++;
763 }
764 }
765
766 return n;
767 }
768
num_fp_f_regs(const struct kvm_vcpu * vcpu)769 static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
770 {
771 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
772
773 if (riscv_isa_extension_available(vcpu->arch.isa, f))
774 return sizeof(cntx->fp.f) / sizeof(u32);
775 else
776 return 0;
777 }
778
copy_fp_f_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)779 static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
780 u64 __user *uindices)
781 {
782 int n = num_fp_f_regs(vcpu);
783
784 for (int i = 0; i < n; i++) {
785 u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 |
786 KVM_REG_RISCV_FP_F | i;
787
788 if (uindices) {
789 if (put_user(reg, uindices))
790 return -EFAULT;
791 uindices++;
792 }
793 }
794
795 return n;
796 }
797
num_fp_d_regs(const struct kvm_vcpu * vcpu)798 static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
799 {
800 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
801
802 if (riscv_isa_extension_available(vcpu->arch.isa, d))
803 return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
804 else
805 return 0;
806 }
807
copy_fp_d_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)808 static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
809 u64 __user *uindices)
810 {
811 int i;
812 int n = num_fp_d_regs(vcpu);
813 u64 reg;
814
815 /* copy fp.d.f indices */
816 for (i = 0; i < n-1; i++) {
817 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
818 KVM_REG_RISCV_FP_D | i;
819
820 if (uindices) {
821 if (put_user(reg, uindices))
822 return -EFAULT;
823 uindices++;
824 }
825 }
826
827 /* copy fp.d.fcsr indices */
828 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
829 if (uindices) {
830 if (put_user(reg, uindices))
831 return -EFAULT;
832 uindices++;
833 }
834
835 return n;
836 }
837
copy_isa_ext_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)838 static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
839 u64 __user *uindices)
840 {
841 unsigned long guest_ext;
842 unsigned int n = 0;
843
844 for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
845 u64 size = IS_ENABLED(CONFIG_32BIT) ?
846 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
847 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
848
849 if (__kvm_riscv_isa_check_host(i, &guest_ext))
850 continue;
851
852 if (uindices) {
853 if (put_user(reg, uindices))
854 return -EFAULT;
855 uindices++;
856 }
857
858 n++;
859 }
860
861 return n;
862 }
863
num_isa_ext_regs(const struct kvm_vcpu * vcpu)864 static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
865 {
866 return copy_isa_ext_reg_indices(vcpu, NULL);
867 }
868
num_sbi_ext_regs(struct kvm_vcpu * vcpu)869 static unsigned long num_sbi_ext_regs(struct kvm_vcpu *vcpu)
870 {
871 return kvm_riscv_vcpu_reg_indices_sbi_ext(vcpu, NULL);
872 }
873
num_sbi_regs(struct kvm_vcpu * vcpu)874 static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu)
875 {
876 return kvm_riscv_vcpu_reg_indices_sbi(vcpu, NULL);
877 }
878
num_vector_regs(const struct kvm_vcpu * vcpu)879 static inline unsigned long num_vector_regs(const struct kvm_vcpu *vcpu)
880 {
881 if (!riscv_isa_extension_available(vcpu->arch.isa, v))
882 return 0;
883
884 /* vstart, vl, vtype, vcsr, vlenb and 32 vector regs */
885 return 37;
886 }
887
copy_vector_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)888 static int copy_vector_reg_indices(const struct kvm_vcpu *vcpu,
889 u64 __user *uindices)
890 {
891 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
892 int n = num_vector_regs(vcpu);
893 u64 reg, size;
894 int i;
895
896 if (n == 0)
897 return 0;
898
899 /* copy vstart, vl, vtype, vcsr and vlenb */
900 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
901 for (i = 0; i < 5; i++) {
902 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_VECTOR | i;
903
904 if (uindices) {
905 if (put_user(reg, uindices))
906 return -EFAULT;
907 uindices++;
908 }
909 }
910
911 /* vector_regs have a variable 'vlenb' size */
912 size = __builtin_ctzl(cntx->vector.vlenb);
913 size <<= KVM_REG_SIZE_SHIFT;
914 for (i = 0; i < 32; i++) {
915 reg = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | size |
916 KVM_REG_RISCV_VECTOR_REG(i);
917
918 if (uindices) {
919 if (put_user(reg, uindices))
920 return -EFAULT;
921 uindices++;
922 }
923 }
924
925 return n;
926 }
927
928 /*
929 * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
930 *
931 * This is for all registers.
932 */
kvm_riscv_vcpu_num_regs(struct kvm_vcpu * vcpu)933 unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
934 {
935 unsigned long res = 0;
936
937 res += num_config_regs(vcpu);
938 res += num_core_regs();
939 res += num_csr_regs(vcpu);
940 res += num_timer_regs();
941 res += num_fp_f_regs(vcpu);
942 res += num_fp_d_regs(vcpu);
943 res += num_vector_regs(vcpu);
944 res += num_isa_ext_regs(vcpu);
945 res += num_sbi_ext_regs(vcpu);
946 res += num_sbi_regs(vcpu);
947
948 return res;
949 }
950
951 /*
952 * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
953 */
kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)954 int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
955 u64 __user *uindices)
956 {
957 int ret;
958
959 ret = copy_config_reg_indices(vcpu, uindices);
960 if (ret < 0)
961 return ret;
962 uindices += ret;
963
964 ret = copy_core_reg_indices(uindices);
965 if (ret < 0)
966 return ret;
967 uindices += ret;
968
969 ret = copy_csr_reg_indices(vcpu, uindices);
970 if (ret < 0)
971 return ret;
972 uindices += ret;
973
974 ret = copy_timer_reg_indices(uindices);
975 if (ret < 0)
976 return ret;
977 uindices += ret;
978
979 ret = copy_fp_f_reg_indices(vcpu, uindices);
980 if (ret < 0)
981 return ret;
982 uindices += ret;
983
984 ret = copy_fp_d_reg_indices(vcpu, uindices);
985 if (ret < 0)
986 return ret;
987 uindices += ret;
988
989 ret = copy_vector_reg_indices(vcpu, uindices);
990 if (ret < 0)
991 return ret;
992 uindices += ret;
993
994 ret = copy_isa_ext_reg_indices(vcpu, uindices);
995 if (ret < 0)
996 return ret;
997 uindices += ret;
998
999 ret = kvm_riscv_vcpu_reg_indices_sbi_ext(vcpu, uindices);
1000 if (ret < 0)
1001 return ret;
1002 uindices += ret;
1003
1004 ret = kvm_riscv_vcpu_reg_indices_sbi(vcpu, uindices);
1005 if (ret < 0)
1006 return ret;
1007 uindices += ret;
1008
1009 return 0;
1010 }
1011
kvm_riscv_vcpu_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)1012 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
1013 const struct kvm_one_reg *reg)
1014 {
1015 switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1016 case KVM_REG_RISCV_CONFIG:
1017 return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
1018 case KVM_REG_RISCV_CORE:
1019 return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
1020 case KVM_REG_RISCV_CSR:
1021 return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
1022 case KVM_REG_RISCV_TIMER:
1023 return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
1024 case KVM_REG_RISCV_FP_F:
1025 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1026 KVM_REG_RISCV_FP_F);
1027 case KVM_REG_RISCV_FP_D:
1028 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1029 KVM_REG_RISCV_FP_D);
1030 case KVM_REG_RISCV_VECTOR:
1031 return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
1032 case KVM_REG_RISCV_ISA_EXT:
1033 return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
1034 case KVM_REG_RISCV_SBI_EXT:
1035 return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
1036 case KVM_REG_RISCV_SBI_STATE:
1037 return kvm_riscv_vcpu_set_reg_sbi(vcpu, reg);
1038 default:
1039 break;
1040 }
1041
1042 return -ENOENT;
1043 }
1044
kvm_riscv_vcpu_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)1045 int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
1046 const struct kvm_one_reg *reg)
1047 {
1048 switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1049 case KVM_REG_RISCV_CONFIG:
1050 return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
1051 case KVM_REG_RISCV_CORE:
1052 return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
1053 case KVM_REG_RISCV_CSR:
1054 return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
1055 case KVM_REG_RISCV_TIMER:
1056 return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
1057 case KVM_REG_RISCV_FP_F:
1058 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1059 KVM_REG_RISCV_FP_F);
1060 case KVM_REG_RISCV_FP_D:
1061 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1062 KVM_REG_RISCV_FP_D);
1063 case KVM_REG_RISCV_VECTOR:
1064 return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
1065 case KVM_REG_RISCV_ISA_EXT:
1066 return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
1067 case KVM_REG_RISCV_SBI_EXT:
1068 return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
1069 case KVM_REG_RISCV_SBI_STATE:
1070 return kvm_riscv_vcpu_get_reg_sbi(vcpu, reg);
1071 default:
1072 break;
1073 }
1074
1075 return -ENOENT;
1076 }
1077