1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 * Copyright (C) 2023 Ventana Micro Systems Inc.
5 *
6 * Authors:
7 * Anup Patel <apatel@ventanamicro.com>
8 */
9
10 #include <linux/bitops.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/uaccess.h>
14 #include <linux/kvm_host.h>
15 #include <asm/cacheflush.h>
16 #include <asm/cpufeature.h>
17 #include <asm/kvm_vcpu_vector.h>
18 #include <asm/pgtable.h>
19 #include <asm/vector.h>
20
21 #define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0)
22
23 #define KVM_ISA_EXT_ARR(ext) \
24 [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
25
26 /* Mapping between KVM ISA Extension ID & guest ISA extension ID */
27 static const unsigned long kvm_isa_ext_arr[] = {
28 /* Single letter extensions (alphabetically sorted) */
29 [KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
30 [KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
31 [KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
32 [KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
33 [KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
34 [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
35 [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
36 [KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
37 /* Multi letter extensions (alphabetically sorted) */
38 KVM_ISA_EXT_ARR(SMNPM),
39 KVM_ISA_EXT_ARR(SMSTATEEN),
40 KVM_ISA_EXT_ARR(SSAIA),
41 KVM_ISA_EXT_ARR(SSCOFPMF),
42 KVM_ISA_EXT_ARR(SSNPM),
43 KVM_ISA_EXT_ARR(SSTC),
44 KVM_ISA_EXT_ARR(SVADE),
45 KVM_ISA_EXT_ARR(SVADU),
46 KVM_ISA_EXT_ARR(SVINVAL),
47 KVM_ISA_EXT_ARR(SVNAPOT),
48 KVM_ISA_EXT_ARR(SVPBMT),
49 KVM_ISA_EXT_ARR(SVVPTC),
50 KVM_ISA_EXT_ARR(ZAAMO),
51 KVM_ISA_EXT_ARR(ZABHA),
52 KVM_ISA_EXT_ARR(ZACAS),
53 KVM_ISA_EXT_ARR(ZALRSC),
54 KVM_ISA_EXT_ARR(ZAWRS),
55 KVM_ISA_EXT_ARR(ZBA),
56 KVM_ISA_EXT_ARR(ZBB),
57 KVM_ISA_EXT_ARR(ZBC),
58 KVM_ISA_EXT_ARR(ZBKB),
59 KVM_ISA_EXT_ARR(ZBKC),
60 KVM_ISA_EXT_ARR(ZBKX),
61 KVM_ISA_EXT_ARR(ZBS),
62 KVM_ISA_EXT_ARR(ZCA),
63 KVM_ISA_EXT_ARR(ZCB),
64 KVM_ISA_EXT_ARR(ZCD),
65 KVM_ISA_EXT_ARR(ZCF),
66 KVM_ISA_EXT_ARR(ZCMOP),
67 KVM_ISA_EXT_ARR(ZFA),
68 KVM_ISA_EXT_ARR(ZFH),
69 KVM_ISA_EXT_ARR(ZFHMIN),
70 KVM_ISA_EXT_ARR(ZICBOM),
71 KVM_ISA_EXT_ARR(ZICBOZ),
72 KVM_ISA_EXT_ARR(ZICCRSE),
73 KVM_ISA_EXT_ARR(ZICNTR),
74 KVM_ISA_EXT_ARR(ZICOND),
75 KVM_ISA_EXT_ARR(ZICSR),
76 KVM_ISA_EXT_ARR(ZIFENCEI),
77 KVM_ISA_EXT_ARR(ZIHINTNTL),
78 KVM_ISA_EXT_ARR(ZIHINTPAUSE),
79 KVM_ISA_EXT_ARR(ZIHPM),
80 KVM_ISA_EXT_ARR(ZIMOP),
81 KVM_ISA_EXT_ARR(ZKND),
82 KVM_ISA_EXT_ARR(ZKNE),
83 KVM_ISA_EXT_ARR(ZKNH),
84 KVM_ISA_EXT_ARR(ZKR),
85 KVM_ISA_EXT_ARR(ZKSED),
86 KVM_ISA_EXT_ARR(ZKSH),
87 KVM_ISA_EXT_ARR(ZKT),
88 KVM_ISA_EXT_ARR(ZTSO),
89 KVM_ISA_EXT_ARR(ZVBB),
90 KVM_ISA_EXT_ARR(ZVBC),
91 KVM_ISA_EXT_ARR(ZVFH),
92 KVM_ISA_EXT_ARR(ZVFHMIN),
93 KVM_ISA_EXT_ARR(ZVKB),
94 KVM_ISA_EXT_ARR(ZVKG),
95 KVM_ISA_EXT_ARR(ZVKNED),
96 KVM_ISA_EXT_ARR(ZVKNHA),
97 KVM_ISA_EXT_ARR(ZVKNHB),
98 KVM_ISA_EXT_ARR(ZVKSED),
99 KVM_ISA_EXT_ARR(ZVKSH),
100 KVM_ISA_EXT_ARR(ZVKT),
101 };
102
kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)103 static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
104 {
105 unsigned long i;
106
107 for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
108 if (kvm_isa_ext_arr[i] == base_ext)
109 return i;
110 }
111
112 return KVM_RISCV_ISA_EXT_MAX;
113 }
114
kvm_riscv_vcpu_isa_check_host(unsigned long kvm_ext,unsigned long * guest_ext)115 static int kvm_riscv_vcpu_isa_check_host(unsigned long kvm_ext, unsigned long *guest_ext)
116 {
117 unsigned long host_ext;
118
119 if (kvm_ext >= KVM_RISCV_ISA_EXT_MAX ||
120 kvm_ext >= ARRAY_SIZE(kvm_isa_ext_arr))
121 return -ENOENT;
122
123 *guest_ext = kvm_isa_ext_arr[kvm_ext];
124 switch (*guest_ext) {
125 case RISCV_ISA_EXT_SMNPM:
126 /*
127 * Pointer masking effective in (H)S-mode is provided by the
128 * Smnpm extension, so that extension is reported to the guest,
129 * even though the CSR bits for configuring VS-mode pointer
130 * masking on the host side are part of the Ssnpm extension.
131 */
132 host_ext = RISCV_ISA_EXT_SSNPM;
133 break;
134 default:
135 host_ext = *guest_ext;
136 break;
137 }
138
139 if (!__riscv_isa_extension_available(NULL, host_ext))
140 return -ENOENT;
141
142 return 0;
143 }
144
kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)145 static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
146 {
147 switch (ext) {
148 case KVM_RISCV_ISA_EXT_H:
149 return false;
150 case KVM_RISCV_ISA_EXT_SSCOFPMF:
151 /* Sscofpmf depends on interrupt filtering defined in ssaia */
152 return __riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSAIA);
153 case KVM_RISCV_ISA_EXT_SVADU:
154 /*
155 * The henvcfg.ADUE is read-only zero if menvcfg.ADUE is zero.
156 * Guest OS can use Svadu only when host OS enable Svadu.
157 */
158 return arch_has_hw_pte_young();
159 case KVM_RISCV_ISA_EXT_V:
160 return riscv_v_vstate_ctrl_user_allowed();
161 default:
162 break;
163 }
164
165 return true;
166 }
167
kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)168 static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
169 {
170 switch (ext) {
171 /* Extensions which don't have any mechanism to disable */
172 case KVM_RISCV_ISA_EXT_A:
173 case KVM_RISCV_ISA_EXT_C:
174 case KVM_RISCV_ISA_EXT_I:
175 case KVM_RISCV_ISA_EXT_M:
176 case KVM_RISCV_ISA_EXT_SMNPM:
177 /* There is not architectural config bit to disable sscofpmf completely */
178 case KVM_RISCV_ISA_EXT_SSCOFPMF:
179 case KVM_RISCV_ISA_EXT_SSNPM:
180 case KVM_RISCV_ISA_EXT_SSTC:
181 case KVM_RISCV_ISA_EXT_SVINVAL:
182 case KVM_RISCV_ISA_EXT_SVNAPOT:
183 case KVM_RISCV_ISA_EXT_SVVPTC:
184 case KVM_RISCV_ISA_EXT_ZAAMO:
185 case KVM_RISCV_ISA_EXT_ZABHA:
186 case KVM_RISCV_ISA_EXT_ZACAS:
187 case KVM_RISCV_ISA_EXT_ZALRSC:
188 case KVM_RISCV_ISA_EXT_ZAWRS:
189 case KVM_RISCV_ISA_EXT_ZBA:
190 case KVM_RISCV_ISA_EXT_ZBB:
191 case KVM_RISCV_ISA_EXT_ZBC:
192 case KVM_RISCV_ISA_EXT_ZBKB:
193 case KVM_RISCV_ISA_EXT_ZBKC:
194 case KVM_RISCV_ISA_EXT_ZBKX:
195 case KVM_RISCV_ISA_EXT_ZBS:
196 case KVM_RISCV_ISA_EXT_ZCA:
197 case KVM_RISCV_ISA_EXT_ZCB:
198 case KVM_RISCV_ISA_EXT_ZCD:
199 case KVM_RISCV_ISA_EXT_ZCF:
200 case KVM_RISCV_ISA_EXT_ZCMOP:
201 case KVM_RISCV_ISA_EXT_ZFA:
202 case KVM_RISCV_ISA_EXT_ZFH:
203 case KVM_RISCV_ISA_EXT_ZFHMIN:
204 case KVM_RISCV_ISA_EXT_ZICCRSE:
205 case KVM_RISCV_ISA_EXT_ZICNTR:
206 case KVM_RISCV_ISA_EXT_ZICOND:
207 case KVM_RISCV_ISA_EXT_ZICSR:
208 case KVM_RISCV_ISA_EXT_ZIFENCEI:
209 case KVM_RISCV_ISA_EXT_ZIHINTNTL:
210 case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
211 case KVM_RISCV_ISA_EXT_ZIHPM:
212 case KVM_RISCV_ISA_EXT_ZIMOP:
213 case KVM_RISCV_ISA_EXT_ZKND:
214 case KVM_RISCV_ISA_EXT_ZKNE:
215 case KVM_RISCV_ISA_EXT_ZKNH:
216 case KVM_RISCV_ISA_EXT_ZKR:
217 case KVM_RISCV_ISA_EXT_ZKSED:
218 case KVM_RISCV_ISA_EXT_ZKSH:
219 case KVM_RISCV_ISA_EXT_ZKT:
220 case KVM_RISCV_ISA_EXT_ZTSO:
221 case KVM_RISCV_ISA_EXT_ZVBB:
222 case KVM_RISCV_ISA_EXT_ZVBC:
223 case KVM_RISCV_ISA_EXT_ZVFH:
224 case KVM_RISCV_ISA_EXT_ZVFHMIN:
225 case KVM_RISCV_ISA_EXT_ZVKB:
226 case KVM_RISCV_ISA_EXT_ZVKG:
227 case KVM_RISCV_ISA_EXT_ZVKNED:
228 case KVM_RISCV_ISA_EXT_ZVKNHA:
229 case KVM_RISCV_ISA_EXT_ZVKNHB:
230 case KVM_RISCV_ISA_EXT_ZVKSED:
231 case KVM_RISCV_ISA_EXT_ZVKSH:
232 case KVM_RISCV_ISA_EXT_ZVKT:
233 return false;
234 /* Extensions which can be disabled using Smstateen */
235 case KVM_RISCV_ISA_EXT_SSAIA:
236 return riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN);
237 case KVM_RISCV_ISA_EXT_SVADE:
238 /*
239 * The henvcfg.ADUE is read-only zero if menvcfg.ADUE is zero.
240 * Svade can't be disabled unless we support Svadu.
241 */
242 return arch_has_hw_pte_young();
243 default:
244 break;
245 }
246
247 return true;
248 }
249
kvm_riscv_vcpu_setup_isa(struct kvm_vcpu * vcpu)250 void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu)
251 {
252 unsigned long guest_ext, i;
253
254 for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
255 if (kvm_riscv_vcpu_isa_check_host(i, &guest_ext))
256 continue;
257 if (kvm_riscv_vcpu_isa_enable_allowed(i))
258 set_bit(guest_ext, vcpu->arch.isa);
259 }
260 }
261
kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)262 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
263 const struct kvm_one_reg *reg)
264 {
265 unsigned long __user *uaddr =
266 (unsigned long __user *)(unsigned long)reg->addr;
267 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
268 KVM_REG_SIZE_MASK |
269 KVM_REG_RISCV_CONFIG);
270 unsigned long reg_val;
271
272 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
273 return -EINVAL;
274
275 switch (reg_num) {
276 case KVM_REG_RISCV_CONFIG_REG(isa):
277 reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
278 break;
279 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
280 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
281 return -ENOENT;
282 reg_val = riscv_cbom_block_size;
283 break;
284 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
285 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
286 return -ENOENT;
287 reg_val = riscv_cboz_block_size;
288 break;
289 case KVM_REG_RISCV_CONFIG_REG(mvendorid):
290 reg_val = vcpu->arch.mvendorid;
291 break;
292 case KVM_REG_RISCV_CONFIG_REG(marchid):
293 reg_val = vcpu->arch.marchid;
294 break;
295 case KVM_REG_RISCV_CONFIG_REG(mimpid):
296 reg_val = vcpu->arch.mimpid;
297 break;
298 case KVM_REG_RISCV_CONFIG_REG(satp_mode):
299 reg_val = satp_mode >> SATP_MODE_SHIFT;
300 break;
301 default:
302 return -ENOENT;
303 }
304
305 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
306 return -EFAULT;
307
308 return 0;
309 }
310
kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)311 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
312 const struct kvm_one_reg *reg)
313 {
314 unsigned long __user *uaddr =
315 (unsigned long __user *)(unsigned long)reg->addr;
316 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
317 KVM_REG_SIZE_MASK |
318 KVM_REG_RISCV_CONFIG);
319 unsigned long i, isa_ext, reg_val;
320
321 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
322 return -EINVAL;
323
324 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
325 return -EFAULT;
326
327 switch (reg_num) {
328 case KVM_REG_RISCV_CONFIG_REG(isa):
329 /*
330 * This ONE REG interface is only defined for
331 * single letter extensions.
332 */
333 if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
334 return -EINVAL;
335
336 /*
337 * Return early (i.e. do nothing) if reg_val is the same
338 * value retrievable via kvm_riscv_vcpu_get_reg_config().
339 */
340 if (reg_val == (vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK))
341 break;
342
343 if (!vcpu->arch.ran_atleast_once) {
344 /* Ignore the enable/disable request for certain extensions */
345 for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
346 isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
347 if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
348 reg_val &= ~BIT(i);
349 continue;
350 }
351 if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
352 if (reg_val & BIT(i))
353 reg_val &= ~BIT(i);
354 if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
355 if (!(reg_val & BIT(i)))
356 reg_val |= BIT(i);
357 }
358 reg_val &= riscv_isa_extension_base(NULL);
359 /* Do not modify anything beyond single letter extensions */
360 reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
361 (reg_val & KVM_RISCV_BASE_ISA_MASK);
362 vcpu->arch.isa[0] = reg_val;
363 kvm_riscv_vcpu_fp_reset(vcpu);
364 } else {
365 return -EBUSY;
366 }
367 break;
368 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
369 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
370 return -ENOENT;
371 if (reg_val != riscv_cbom_block_size)
372 return -EINVAL;
373 break;
374 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
375 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
376 return -ENOENT;
377 if (reg_val != riscv_cboz_block_size)
378 return -EINVAL;
379 break;
380 case KVM_REG_RISCV_CONFIG_REG(mvendorid):
381 if (reg_val == vcpu->arch.mvendorid)
382 break;
383 if (!vcpu->arch.ran_atleast_once)
384 vcpu->arch.mvendorid = reg_val;
385 else
386 return -EBUSY;
387 break;
388 case KVM_REG_RISCV_CONFIG_REG(marchid):
389 if (reg_val == vcpu->arch.marchid)
390 break;
391 if (!vcpu->arch.ran_atleast_once)
392 vcpu->arch.marchid = reg_val;
393 else
394 return -EBUSY;
395 break;
396 case KVM_REG_RISCV_CONFIG_REG(mimpid):
397 if (reg_val == vcpu->arch.mimpid)
398 break;
399 if (!vcpu->arch.ran_atleast_once)
400 vcpu->arch.mimpid = reg_val;
401 else
402 return -EBUSY;
403 break;
404 case KVM_REG_RISCV_CONFIG_REG(satp_mode):
405 if (reg_val != (satp_mode >> SATP_MODE_SHIFT))
406 return -EINVAL;
407 break;
408 default:
409 return -ENOENT;
410 }
411
412 return 0;
413 }
414
kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)415 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
416 const struct kvm_one_reg *reg)
417 {
418 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
419 unsigned long __user *uaddr =
420 (unsigned long __user *)(unsigned long)reg->addr;
421 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
422 KVM_REG_SIZE_MASK |
423 KVM_REG_RISCV_CORE);
424 unsigned long reg_val;
425
426 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
427 return -EINVAL;
428 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
429 return -ENOENT;
430
431 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
432 reg_val = cntx->sepc;
433 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
434 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
435 reg_val = ((unsigned long *)cntx)[reg_num];
436 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
437 reg_val = (cntx->sstatus & SR_SPP) ?
438 KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
439 else
440 return -ENOENT;
441
442 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
443 return -EFAULT;
444
445 return 0;
446 }
447
kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)448 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
449 const struct kvm_one_reg *reg)
450 {
451 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
452 unsigned long __user *uaddr =
453 (unsigned long __user *)(unsigned long)reg->addr;
454 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
455 KVM_REG_SIZE_MASK |
456 KVM_REG_RISCV_CORE);
457 unsigned long reg_val;
458
459 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
460 return -EINVAL;
461 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
462 return -ENOENT;
463
464 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
465 return -EFAULT;
466
467 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
468 cntx->sepc = reg_val;
469 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
470 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
471 ((unsigned long *)cntx)[reg_num] = reg_val;
472 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
473 if (reg_val == KVM_RISCV_MODE_S)
474 cntx->sstatus |= SR_SPP;
475 else
476 cntx->sstatus &= ~SR_SPP;
477 } else
478 return -ENOENT;
479
480 return 0;
481 }
482
kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * out_val)483 static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
484 unsigned long reg_num,
485 unsigned long *out_val)
486 {
487 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
488
489 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
490 return -ENOENT;
491
492 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
493 kvm_riscv_vcpu_flush_interrupts(vcpu);
494 *out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
495 *out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
496 } else
497 *out_val = ((unsigned long *)csr)[reg_num];
498
499 return 0;
500 }
501
kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)502 static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
503 unsigned long reg_num,
504 unsigned long reg_val)
505 {
506 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
507
508 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
509 return -ENOENT;
510
511 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
512 reg_val &= VSIP_VALID_MASK;
513 reg_val <<= VSIP_TO_HVIP_SHIFT;
514 }
515
516 ((unsigned long *)csr)[reg_num] = reg_val;
517
518 if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
519 WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
520
521 return 0;
522 }
523
kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)524 static inline int kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu *vcpu,
525 unsigned long reg_num,
526 unsigned long reg_val)
527 {
528 struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
529
530 if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
531 sizeof(unsigned long))
532 return -EINVAL;
533
534 ((unsigned long *)csr)[reg_num] = reg_val;
535 return 0;
536 }
537
kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * out_val)538 static int kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu *vcpu,
539 unsigned long reg_num,
540 unsigned long *out_val)
541 {
542 struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
543
544 if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
545 sizeof(unsigned long))
546 return -EINVAL;
547
548 *out_val = ((unsigned long *)csr)[reg_num];
549 return 0;
550 }
551
kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)552 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
553 const struct kvm_one_reg *reg)
554 {
555 int rc;
556 unsigned long __user *uaddr =
557 (unsigned long __user *)(unsigned long)reg->addr;
558 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
559 KVM_REG_SIZE_MASK |
560 KVM_REG_RISCV_CSR);
561 unsigned long reg_val, reg_subtype;
562
563 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
564 return -EINVAL;
565
566 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
567 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
568 switch (reg_subtype) {
569 case KVM_REG_RISCV_CSR_GENERAL:
570 rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, ®_val);
571 break;
572 case KVM_REG_RISCV_CSR_AIA:
573 rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, ®_val);
574 break;
575 case KVM_REG_RISCV_CSR_SMSTATEEN:
576 rc = -EINVAL;
577 if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
578 rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num,
579 ®_val);
580 break;
581 default:
582 rc = -ENOENT;
583 break;
584 }
585 if (rc)
586 return rc;
587
588 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
589 return -EFAULT;
590
591 return 0;
592 }
593
kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)594 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
595 const struct kvm_one_reg *reg)
596 {
597 int rc;
598 unsigned long __user *uaddr =
599 (unsigned long __user *)(unsigned long)reg->addr;
600 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
601 KVM_REG_SIZE_MASK |
602 KVM_REG_RISCV_CSR);
603 unsigned long reg_val, reg_subtype;
604
605 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
606 return -EINVAL;
607
608 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
609 return -EFAULT;
610
611 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
612 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
613 switch (reg_subtype) {
614 case KVM_REG_RISCV_CSR_GENERAL:
615 rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
616 break;
617 case KVM_REG_RISCV_CSR_AIA:
618 rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
619 break;
620 case KVM_REG_RISCV_CSR_SMSTATEEN:
621 rc = -EINVAL;
622 if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
623 rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num,
624 reg_val);
625 break;
626 default:
627 rc = -ENOENT;
628 break;
629 }
630 if (rc)
631 return rc;
632
633 return 0;
634 }
635
riscv_vcpu_get_isa_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)636 static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
637 unsigned long reg_num,
638 unsigned long *reg_val)
639 {
640 unsigned long guest_ext;
641 int ret;
642
643 ret = kvm_riscv_vcpu_isa_check_host(reg_num, &guest_ext);
644 if (ret)
645 return ret;
646
647 *reg_val = 0;
648 if (__riscv_isa_extension_available(vcpu->arch.isa, guest_ext))
649 *reg_val = 1; /* Mark the given extension as available */
650
651 return 0;
652 }
653
riscv_vcpu_set_isa_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)654 static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu,
655 unsigned long reg_num,
656 unsigned long reg_val)
657 {
658 unsigned long guest_ext;
659 int ret;
660
661 ret = kvm_riscv_vcpu_isa_check_host(reg_num, &guest_ext);
662 if (ret)
663 return ret;
664
665 if (reg_val == test_bit(guest_ext, vcpu->arch.isa))
666 return 0;
667
668 if (!vcpu->arch.ran_atleast_once) {
669 /*
670 * All multi-letter extension and a few single letter
671 * extension can be disabled
672 */
673 if (reg_val == 1 &&
674 kvm_riscv_vcpu_isa_enable_allowed(reg_num))
675 set_bit(guest_ext, vcpu->arch.isa);
676 else if (!reg_val &&
677 kvm_riscv_vcpu_isa_disable_allowed(reg_num))
678 clear_bit(guest_ext, vcpu->arch.isa);
679 else
680 return -EINVAL;
681 kvm_riscv_vcpu_fp_reset(vcpu);
682 } else {
683 return -EBUSY;
684 }
685
686 return 0;
687 }
688
riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)689 static int riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu *vcpu,
690 unsigned long reg_num,
691 unsigned long *reg_val)
692 {
693 unsigned long i, ext_id, ext_val;
694
695 if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
696 return -ENOENT;
697
698 for (i = 0; i < BITS_PER_LONG; i++) {
699 ext_id = i + reg_num * BITS_PER_LONG;
700 if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
701 break;
702
703 ext_val = 0;
704 riscv_vcpu_get_isa_ext_single(vcpu, ext_id, &ext_val);
705 if (ext_val)
706 *reg_val |= KVM_REG_RISCV_ISA_MULTI_MASK(ext_id);
707 }
708
709 return 0;
710 }
711
riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val,bool enable)712 static int riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu *vcpu,
713 unsigned long reg_num,
714 unsigned long reg_val, bool enable)
715 {
716 unsigned long i, ext_id;
717
718 if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
719 return -ENOENT;
720
721 for_each_set_bit(i, ®_val, BITS_PER_LONG) {
722 ext_id = i + reg_num * BITS_PER_LONG;
723 if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
724 break;
725
726 riscv_vcpu_set_isa_ext_single(vcpu, ext_id, enable);
727 }
728
729 return 0;
730 }
731
kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)732 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
733 const struct kvm_one_reg *reg)
734 {
735 int rc;
736 unsigned long __user *uaddr =
737 (unsigned long __user *)(unsigned long)reg->addr;
738 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
739 KVM_REG_SIZE_MASK |
740 KVM_REG_RISCV_ISA_EXT);
741 unsigned long reg_val, reg_subtype;
742
743 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
744 return -EINVAL;
745
746 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
747 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
748
749 reg_val = 0;
750 switch (reg_subtype) {
751 case KVM_REG_RISCV_ISA_SINGLE:
752 rc = riscv_vcpu_get_isa_ext_single(vcpu, reg_num, ®_val);
753 break;
754 case KVM_REG_RISCV_ISA_MULTI_EN:
755 case KVM_REG_RISCV_ISA_MULTI_DIS:
756 rc = riscv_vcpu_get_isa_ext_multi(vcpu, reg_num, ®_val);
757 if (!rc && reg_subtype == KVM_REG_RISCV_ISA_MULTI_DIS)
758 reg_val = ~reg_val;
759 break;
760 default:
761 rc = -ENOENT;
762 }
763 if (rc)
764 return rc;
765
766 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
767 return -EFAULT;
768
769 return 0;
770 }
771
kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)772 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
773 const struct kvm_one_reg *reg)
774 {
775 unsigned long __user *uaddr =
776 (unsigned long __user *)(unsigned long)reg->addr;
777 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
778 KVM_REG_SIZE_MASK |
779 KVM_REG_RISCV_ISA_EXT);
780 unsigned long reg_val, reg_subtype;
781
782 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
783 return -EINVAL;
784
785 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
786 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
787
788 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
789 return -EFAULT;
790
791 switch (reg_subtype) {
792 case KVM_REG_RISCV_ISA_SINGLE:
793 return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
794 case KVM_REG_RISCV_ISA_MULTI_EN:
795 return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
796 case KVM_REG_RISCV_ISA_MULTI_DIS:
797 return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
798 default:
799 return -ENOENT;
800 }
801
802 return 0;
803 }
804
copy_config_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)805 static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
806 u64 __user *uindices)
807 {
808 int n = 0;
809
810 for (int i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long);
811 i++) {
812 u64 size;
813 u64 reg;
814
815 /*
816 * Avoid reporting config reg if the corresponding extension
817 * was not available.
818 */
819 if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
820 !riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
821 continue;
822 else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
823 !riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
824 continue;
825
826 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
827 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;
828
829 if (uindices) {
830 if (put_user(reg, uindices))
831 return -EFAULT;
832 uindices++;
833 }
834
835 n++;
836 }
837
838 return n;
839 }
840
num_config_regs(const struct kvm_vcpu * vcpu)841 static unsigned long num_config_regs(const struct kvm_vcpu *vcpu)
842 {
843 return copy_config_reg_indices(vcpu, NULL);
844 }
845
num_core_regs(void)846 static inline unsigned long num_core_regs(void)
847 {
848 return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
849 }
850
copy_core_reg_indices(u64 __user * uindices)851 static int copy_core_reg_indices(u64 __user *uindices)
852 {
853 int n = num_core_regs();
854
855 for (int i = 0; i < n; i++) {
856 u64 size = IS_ENABLED(CONFIG_32BIT) ?
857 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
858 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i;
859
860 if (uindices) {
861 if (put_user(reg, uindices))
862 return -EFAULT;
863 uindices++;
864 }
865 }
866
867 return n;
868 }
869
num_csr_regs(const struct kvm_vcpu * vcpu)870 static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu)
871 {
872 unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
873
874 if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
875 n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
876 if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
877 n += sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
878
879 return n;
880 }
881
copy_csr_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)882 static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
883 u64 __user *uindices)
884 {
885 int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
886 int n2 = 0, n3 = 0;
887
888 /* copy general csr regs */
889 for (int i = 0; i < n1; i++) {
890 u64 size = IS_ENABLED(CONFIG_32BIT) ?
891 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
892 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
893 KVM_REG_RISCV_CSR_GENERAL | i;
894
895 if (uindices) {
896 if (put_user(reg, uindices))
897 return -EFAULT;
898 uindices++;
899 }
900 }
901
902 /* copy AIA csr regs */
903 if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) {
904 n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
905
906 for (int i = 0; i < n2; i++) {
907 u64 size = IS_ENABLED(CONFIG_32BIT) ?
908 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
909 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
910 KVM_REG_RISCV_CSR_AIA | i;
911
912 if (uindices) {
913 if (put_user(reg, uindices))
914 return -EFAULT;
915 uindices++;
916 }
917 }
918 }
919
920 /* copy Smstateen csr regs */
921 if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) {
922 n3 = sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
923
924 for (int i = 0; i < n3; i++) {
925 u64 size = IS_ENABLED(CONFIG_32BIT) ?
926 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
927 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
928 KVM_REG_RISCV_CSR_SMSTATEEN | i;
929
930 if (uindices) {
931 if (put_user(reg, uindices))
932 return -EFAULT;
933 uindices++;
934 }
935 }
936 }
937
938 return n1 + n2 + n3;
939 }
940
num_timer_regs(void)941 static inline unsigned long num_timer_regs(void)
942 {
943 return sizeof(struct kvm_riscv_timer) / sizeof(u64);
944 }
945
copy_timer_reg_indices(u64 __user * uindices)946 static int copy_timer_reg_indices(u64 __user *uindices)
947 {
948 int n = num_timer_regs();
949
950 for (int i = 0; i < n; i++) {
951 u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
952 KVM_REG_RISCV_TIMER | i;
953
954 if (uindices) {
955 if (put_user(reg, uindices))
956 return -EFAULT;
957 uindices++;
958 }
959 }
960
961 return n;
962 }
963
num_fp_f_regs(const struct kvm_vcpu * vcpu)964 static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
965 {
966 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
967
968 if (riscv_isa_extension_available(vcpu->arch.isa, f))
969 return sizeof(cntx->fp.f) / sizeof(u32);
970 else
971 return 0;
972 }
973
copy_fp_f_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)974 static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
975 u64 __user *uindices)
976 {
977 int n = num_fp_f_regs(vcpu);
978
979 for (int i = 0; i < n; i++) {
980 u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 |
981 KVM_REG_RISCV_FP_F | i;
982
983 if (uindices) {
984 if (put_user(reg, uindices))
985 return -EFAULT;
986 uindices++;
987 }
988 }
989
990 return n;
991 }
992
num_fp_d_regs(const struct kvm_vcpu * vcpu)993 static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
994 {
995 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
996
997 if (riscv_isa_extension_available(vcpu->arch.isa, d))
998 return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
999 else
1000 return 0;
1001 }
1002
copy_fp_d_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)1003 static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
1004 u64 __user *uindices)
1005 {
1006 int i;
1007 int n = num_fp_d_regs(vcpu);
1008 u64 reg;
1009
1010 /* copy fp.d.f indices */
1011 for (i = 0; i < n-1; i++) {
1012 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
1013 KVM_REG_RISCV_FP_D | i;
1014
1015 if (uindices) {
1016 if (put_user(reg, uindices))
1017 return -EFAULT;
1018 uindices++;
1019 }
1020 }
1021
1022 /* copy fp.d.fcsr indices */
1023 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
1024 if (uindices) {
1025 if (put_user(reg, uindices))
1026 return -EFAULT;
1027 uindices++;
1028 }
1029
1030 return n;
1031 }
1032
copy_isa_ext_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)1033 static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
1034 u64 __user *uindices)
1035 {
1036 unsigned long guest_ext;
1037 unsigned int n = 0;
1038
1039 for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
1040 u64 size = IS_ENABLED(CONFIG_32BIT) ?
1041 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1042 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
1043
1044 if (kvm_riscv_vcpu_isa_check_host(i, &guest_ext))
1045 continue;
1046
1047 if (uindices) {
1048 if (put_user(reg, uindices))
1049 return -EFAULT;
1050 uindices++;
1051 }
1052
1053 n++;
1054 }
1055
1056 return n;
1057 }
1058
num_isa_ext_regs(const struct kvm_vcpu * vcpu)1059 static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
1060 {
1061 return copy_isa_ext_reg_indices(vcpu, NULL);
1062 }
1063
copy_sbi_ext_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)1064 static int copy_sbi_ext_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1065 {
1066 unsigned int n = 0;
1067
1068 for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) {
1069 u64 size = IS_ENABLED(CONFIG_32BIT) ?
1070 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1071 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
1072 KVM_REG_RISCV_SBI_SINGLE | i;
1073
1074 if (!riscv_vcpu_supports_sbi_ext(vcpu, i))
1075 continue;
1076
1077 if (uindices) {
1078 if (put_user(reg, uindices))
1079 return -EFAULT;
1080 uindices++;
1081 }
1082
1083 n++;
1084 }
1085
1086 return n;
1087 }
1088
num_sbi_ext_regs(struct kvm_vcpu * vcpu)1089 static unsigned long num_sbi_ext_regs(struct kvm_vcpu *vcpu)
1090 {
1091 return copy_sbi_ext_reg_indices(vcpu, NULL);
1092 }
1093
copy_sbi_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)1094 static int copy_sbi_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1095 {
1096 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
1097 int total = 0;
1098
1099 if (scontext->ext_status[KVM_RISCV_SBI_EXT_STA] == KVM_RISCV_SBI_EXT_STATUS_ENABLED) {
1100 u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1101 int n = sizeof(struct kvm_riscv_sbi_sta) / sizeof(unsigned long);
1102
1103 for (int i = 0; i < n; i++) {
1104 u64 reg = KVM_REG_RISCV | size |
1105 KVM_REG_RISCV_SBI_STATE |
1106 KVM_REG_RISCV_SBI_STA | i;
1107
1108 if (uindices) {
1109 if (put_user(reg, uindices))
1110 return -EFAULT;
1111 uindices++;
1112 }
1113 }
1114
1115 total += n;
1116 }
1117
1118 return total;
1119 }
1120
num_sbi_regs(struct kvm_vcpu * vcpu)1121 static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu)
1122 {
1123 return copy_sbi_reg_indices(vcpu, NULL);
1124 }
1125
num_vector_regs(const struct kvm_vcpu * vcpu)1126 static inline unsigned long num_vector_regs(const struct kvm_vcpu *vcpu)
1127 {
1128 if (!riscv_isa_extension_available(vcpu->arch.isa, v))
1129 return 0;
1130
1131 /* vstart, vl, vtype, vcsr, vlenb and 32 vector regs */
1132 return 37;
1133 }
1134
copy_vector_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)1135 static int copy_vector_reg_indices(const struct kvm_vcpu *vcpu,
1136 u64 __user *uindices)
1137 {
1138 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
1139 int n = num_vector_regs(vcpu);
1140 u64 reg, size;
1141 int i;
1142
1143 if (n == 0)
1144 return 0;
1145
1146 /* copy vstart, vl, vtype, vcsr and vlenb */
1147 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
1148 for (i = 0; i < 5; i++) {
1149 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_VECTOR | i;
1150
1151 if (uindices) {
1152 if (put_user(reg, uindices))
1153 return -EFAULT;
1154 uindices++;
1155 }
1156 }
1157
1158 /* vector_regs have a variable 'vlenb' size */
1159 size = __builtin_ctzl(cntx->vector.vlenb);
1160 size <<= KVM_REG_SIZE_SHIFT;
1161 for (i = 0; i < 32; i++) {
1162 reg = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | size |
1163 KVM_REG_RISCV_VECTOR_REG(i);
1164
1165 if (uindices) {
1166 if (put_user(reg, uindices))
1167 return -EFAULT;
1168 uindices++;
1169 }
1170 }
1171
1172 return n;
1173 }
1174
1175 /*
1176 * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
1177 *
1178 * This is for all registers.
1179 */
kvm_riscv_vcpu_num_regs(struct kvm_vcpu * vcpu)1180 unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
1181 {
1182 unsigned long res = 0;
1183
1184 res += num_config_regs(vcpu);
1185 res += num_core_regs();
1186 res += num_csr_regs(vcpu);
1187 res += num_timer_regs();
1188 res += num_fp_f_regs(vcpu);
1189 res += num_fp_d_regs(vcpu);
1190 res += num_vector_regs(vcpu);
1191 res += num_isa_ext_regs(vcpu);
1192 res += num_sbi_ext_regs(vcpu);
1193 res += num_sbi_regs(vcpu);
1194
1195 return res;
1196 }
1197
1198 /*
1199 * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
1200 */
kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)1201 int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
1202 u64 __user *uindices)
1203 {
1204 int ret;
1205
1206 ret = copy_config_reg_indices(vcpu, uindices);
1207 if (ret < 0)
1208 return ret;
1209 uindices += ret;
1210
1211 ret = copy_core_reg_indices(uindices);
1212 if (ret < 0)
1213 return ret;
1214 uindices += ret;
1215
1216 ret = copy_csr_reg_indices(vcpu, uindices);
1217 if (ret < 0)
1218 return ret;
1219 uindices += ret;
1220
1221 ret = copy_timer_reg_indices(uindices);
1222 if (ret < 0)
1223 return ret;
1224 uindices += ret;
1225
1226 ret = copy_fp_f_reg_indices(vcpu, uindices);
1227 if (ret < 0)
1228 return ret;
1229 uindices += ret;
1230
1231 ret = copy_fp_d_reg_indices(vcpu, uindices);
1232 if (ret < 0)
1233 return ret;
1234 uindices += ret;
1235
1236 ret = copy_vector_reg_indices(vcpu, uindices);
1237 if (ret < 0)
1238 return ret;
1239 uindices += ret;
1240
1241 ret = copy_isa_ext_reg_indices(vcpu, uindices);
1242 if (ret < 0)
1243 return ret;
1244 uindices += ret;
1245
1246 ret = copy_sbi_ext_reg_indices(vcpu, uindices);
1247 if (ret < 0)
1248 return ret;
1249 uindices += ret;
1250
1251 ret = copy_sbi_reg_indices(vcpu, uindices);
1252 if (ret < 0)
1253 return ret;
1254 uindices += ret;
1255
1256 return 0;
1257 }
1258
kvm_riscv_vcpu_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)1259 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
1260 const struct kvm_one_reg *reg)
1261 {
1262 switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1263 case KVM_REG_RISCV_CONFIG:
1264 return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
1265 case KVM_REG_RISCV_CORE:
1266 return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
1267 case KVM_REG_RISCV_CSR:
1268 return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
1269 case KVM_REG_RISCV_TIMER:
1270 return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
1271 case KVM_REG_RISCV_FP_F:
1272 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1273 KVM_REG_RISCV_FP_F);
1274 case KVM_REG_RISCV_FP_D:
1275 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
1276 KVM_REG_RISCV_FP_D);
1277 case KVM_REG_RISCV_VECTOR:
1278 return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
1279 case KVM_REG_RISCV_ISA_EXT:
1280 return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
1281 case KVM_REG_RISCV_SBI_EXT:
1282 return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
1283 case KVM_REG_RISCV_SBI_STATE:
1284 return kvm_riscv_vcpu_set_reg_sbi(vcpu, reg);
1285 default:
1286 break;
1287 }
1288
1289 return -ENOENT;
1290 }
1291
kvm_riscv_vcpu_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)1292 int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
1293 const struct kvm_one_reg *reg)
1294 {
1295 switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
1296 case KVM_REG_RISCV_CONFIG:
1297 return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
1298 case KVM_REG_RISCV_CORE:
1299 return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
1300 case KVM_REG_RISCV_CSR:
1301 return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
1302 case KVM_REG_RISCV_TIMER:
1303 return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
1304 case KVM_REG_RISCV_FP_F:
1305 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1306 KVM_REG_RISCV_FP_F);
1307 case KVM_REG_RISCV_FP_D:
1308 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
1309 KVM_REG_RISCV_FP_D);
1310 case KVM_REG_RISCV_VECTOR:
1311 return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
1312 case KVM_REG_RISCV_ISA_EXT:
1313 return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
1314 case KVM_REG_RISCV_SBI_EXT:
1315 return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
1316 case KVM_REG_RISCV_SBI_STATE:
1317 return kvm_riscv_vcpu_get_reg_sbi(vcpu, reg);
1318 default:
1319 break;
1320 }
1321
1322 return -ENOENT;
1323 }
1324