1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 * Copyright (c) 2022 Ventana Micro Systems Inc.
5 */
6
7 #include <linux/bitops.h>
8 #include <linux/kvm_host.h>
9
10 #include <asm/cpufeature.h>
11 #include <asm/insn.h>
12
13 struct insn_func {
14 unsigned long mask;
15 unsigned long match;
16 /*
17 * Possible return values are as follows:
18 * 1) Returns < 0 for error case
19 * 2) Returns 0 for exit to user-space
20 * 3) Returns 1 to continue with next sepc
21 * 4) Returns 2 to continue with same sepc
22 * 5) Returns 3 to inject illegal instruction trap and continue
23 * 6) Returns 4 to inject virtual instruction trap and continue
24 *
25 * Use enum kvm_insn_return for return values
26 */
27 int (*func)(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn);
28 };
29
truly_illegal_insn(struct kvm_vcpu * vcpu,struct kvm_run * run,ulong insn)30 static int truly_illegal_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
31 ulong insn)
32 {
33 struct kvm_cpu_trap utrap = { 0 };
34
35 /* Redirect trap to Guest VCPU */
36 utrap.sepc = vcpu->arch.guest_context.sepc;
37 utrap.scause = EXC_INST_ILLEGAL;
38 utrap.stval = insn;
39 utrap.htval = 0;
40 utrap.htinst = 0;
41 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
42
43 return 1;
44 }
45
truly_virtual_insn(struct kvm_vcpu * vcpu,struct kvm_run * run,ulong insn)46 static int truly_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
47 ulong insn)
48 {
49 struct kvm_cpu_trap utrap = { 0 };
50
51 /* Redirect trap to Guest VCPU */
52 utrap.sepc = vcpu->arch.guest_context.sepc;
53 utrap.scause = EXC_VIRTUAL_INST_FAULT;
54 utrap.stval = insn;
55 utrap.htval = 0;
56 utrap.htinst = 0;
57 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
58
59 return 1;
60 }
61
62 /**
63 * kvm_riscv_vcpu_wfi -- Emulate wait for interrupt (WFI) behaviour
64 *
65 * @vcpu: The VCPU pointer
66 */
kvm_riscv_vcpu_wfi(struct kvm_vcpu * vcpu)67 void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu)
68 {
69 if (!kvm_arch_vcpu_runnable(vcpu)) {
70 kvm_vcpu_srcu_read_unlock(vcpu);
71 kvm_vcpu_halt(vcpu);
72 kvm_vcpu_srcu_read_lock(vcpu);
73 }
74 }
75
wfi_insn(struct kvm_vcpu * vcpu,struct kvm_run * run,ulong insn)76 static int wfi_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn)
77 {
78 vcpu->stat.wfi_exit_stat++;
79 kvm_riscv_vcpu_wfi(vcpu);
80 return KVM_INSN_CONTINUE_NEXT_SEPC;
81 }
82
wrs_insn(struct kvm_vcpu * vcpu,struct kvm_run * run,ulong insn)83 static int wrs_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn)
84 {
85 vcpu->stat.wrs_exit_stat++;
86 kvm_vcpu_on_spin(vcpu, vcpu->arch.guest_context.sstatus & SR_SPP);
87 return KVM_INSN_CONTINUE_NEXT_SEPC;
88 }
89
90 struct csr_func {
91 unsigned int base;
92 unsigned int count;
93 /*
94 * Possible return values are as same as "func" callback in
95 * "struct insn_func".
96 */
97 int (*func)(struct kvm_vcpu *vcpu, unsigned int csr_num,
98 unsigned long *val, unsigned long new_val,
99 unsigned long wr_mask);
100 };
101
seed_csr_rmw(struct kvm_vcpu * vcpu,unsigned int csr_num,unsigned long * val,unsigned long new_val,unsigned long wr_mask)102 static int seed_csr_rmw(struct kvm_vcpu *vcpu, unsigned int csr_num,
103 unsigned long *val, unsigned long new_val,
104 unsigned long wr_mask)
105 {
106 if (!riscv_isa_extension_available(vcpu->arch.isa, ZKR))
107 return KVM_INSN_ILLEGAL_TRAP;
108
109 return KVM_INSN_EXIT_TO_USER_SPACE;
110 }
111
112 static const struct csr_func csr_funcs[] = {
113 KVM_RISCV_VCPU_AIA_CSR_FUNCS
114 KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS
115 { .base = CSR_SEED, .count = 1, .func = seed_csr_rmw },
116 };
117
118 /**
119 * kvm_riscv_vcpu_csr_return -- Handle CSR read/write after user space
120 * emulation or in-kernel emulation
121 *
122 * @vcpu: The VCPU pointer
123 * @run: The VCPU run struct containing the CSR data
124 *
125 * Returns > 0 upon failure and 0 upon success
126 */
kvm_riscv_vcpu_csr_return(struct kvm_vcpu * vcpu,struct kvm_run * run)127 int kvm_riscv_vcpu_csr_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
128 {
129 ulong insn;
130
131 if (vcpu->arch.csr_decode.return_handled)
132 return 0;
133 vcpu->arch.csr_decode.return_handled = 1;
134
135 /* Update destination register for CSR reads */
136 insn = vcpu->arch.csr_decode.insn;
137 if ((insn >> SH_RD) & MASK_RX)
138 SET_RD(insn, &vcpu->arch.guest_context,
139 run->riscv_csr.ret_value);
140
141 /* Move to next instruction */
142 vcpu->arch.guest_context.sepc += INSN_LEN(insn);
143
144 return 0;
145 }
146
csr_insn(struct kvm_vcpu * vcpu,struct kvm_run * run,ulong insn)147 static int csr_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn)
148 {
149 int i, rc = KVM_INSN_ILLEGAL_TRAP;
150 unsigned int csr_num = insn >> SH_RS2;
151 unsigned int rs1_num = (insn >> SH_RS1) & MASK_RX;
152 ulong rs1_val = GET_RS1(insn, &vcpu->arch.guest_context);
153 const struct csr_func *tcfn, *cfn = NULL;
154 ulong val = 0, wr_mask = 0, new_val = 0;
155
156 /* Decode the CSR instruction */
157 switch (GET_FUNCT3(insn)) {
158 case GET_FUNCT3(INSN_MATCH_CSRRW):
159 wr_mask = -1UL;
160 new_val = rs1_val;
161 break;
162 case GET_FUNCT3(INSN_MATCH_CSRRS):
163 wr_mask = rs1_val;
164 new_val = -1UL;
165 break;
166 case GET_FUNCT3(INSN_MATCH_CSRRC):
167 wr_mask = rs1_val;
168 new_val = 0;
169 break;
170 case GET_FUNCT3(INSN_MATCH_CSRRWI):
171 wr_mask = -1UL;
172 new_val = rs1_num;
173 break;
174 case GET_FUNCT3(INSN_MATCH_CSRRSI):
175 wr_mask = rs1_num;
176 new_val = -1UL;
177 break;
178 case GET_FUNCT3(INSN_MATCH_CSRRCI):
179 wr_mask = rs1_num;
180 new_val = 0;
181 break;
182 default:
183 return rc;
184 }
185
186 /* Save instruction decode info */
187 vcpu->arch.csr_decode.insn = insn;
188 vcpu->arch.csr_decode.return_handled = 0;
189
190 /* Update CSR details in kvm_run struct */
191 run->riscv_csr.csr_num = csr_num;
192 run->riscv_csr.new_value = new_val;
193 run->riscv_csr.write_mask = wr_mask;
194 run->riscv_csr.ret_value = 0;
195
196 /* Find in-kernel CSR function */
197 for (i = 0; i < ARRAY_SIZE(csr_funcs); i++) {
198 tcfn = &csr_funcs[i];
199 if ((tcfn->base <= csr_num) &&
200 (csr_num < (tcfn->base + tcfn->count))) {
201 cfn = tcfn;
202 break;
203 }
204 }
205
206 /* First try in-kernel CSR emulation */
207 if (cfn && cfn->func) {
208 rc = cfn->func(vcpu, csr_num, &val, new_val, wr_mask);
209 if (rc > KVM_INSN_EXIT_TO_USER_SPACE) {
210 if (rc == KVM_INSN_CONTINUE_NEXT_SEPC) {
211 run->riscv_csr.ret_value = val;
212 vcpu->stat.csr_exit_kernel++;
213 kvm_riscv_vcpu_csr_return(vcpu, run);
214 rc = KVM_INSN_CONTINUE_SAME_SEPC;
215 }
216 return rc;
217 }
218 }
219
220 /* Exit to user-space for CSR emulation */
221 if (rc <= KVM_INSN_EXIT_TO_USER_SPACE) {
222 vcpu->stat.csr_exit_user++;
223 run->exit_reason = KVM_EXIT_RISCV_CSR;
224 }
225
226 return rc;
227 }
228
229 static const struct insn_func system_opcode_funcs[] = {
230 {
231 .mask = INSN_MASK_CSRRW,
232 .match = INSN_MATCH_CSRRW,
233 .func = csr_insn,
234 },
235 {
236 .mask = INSN_MASK_CSRRS,
237 .match = INSN_MATCH_CSRRS,
238 .func = csr_insn,
239 },
240 {
241 .mask = INSN_MASK_CSRRC,
242 .match = INSN_MATCH_CSRRC,
243 .func = csr_insn,
244 },
245 {
246 .mask = INSN_MASK_CSRRWI,
247 .match = INSN_MATCH_CSRRWI,
248 .func = csr_insn,
249 },
250 {
251 .mask = INSN_MASK_CSRRSI,
252 .match = INSN_MATCH_CSRRSI,
253 .func = csr_insn,
254 },
255 {
256 .mask = INSN_MASK_CSRRCI,
257 .match = INSN_MATCH_CSRRCI,
258 .func = csr_insn,
259 },
260 {
261 .mask = INSN_MASK_WFI,
262 .match = INSN_MATCH_WFI,
263 .func = wfi_insn,
264 },
265 {
266 .mask = INSN_MASK_WRS,
267 .match = INSN_MATCH_WRS,
268 .func = wrs_insn,
269 },
270 };
271
system_opcode_insn(struct kvm_vcpu * vcpu,struct kvm_run * run,ulong insn)272 static int system_opcode_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
273 ulong insn)
274 {
275 int i, rc = KVM_INSN_ILLEGAL_TRAP;
276 const struct insn_func *ifn;
277
278 for (i = 0; i < ARRAY_SIZE(system_opcode_funcs); i++) {
279 ifn = &system_opcode_funcs[i];
280 if ((insn & ifn->mask) == ifn->match) {
281 rc = ifn->func(vcpu, run, insn);
282 break;
283 }
284 }
285
286 switch (rc) {
287 case KVM_INSN_ILLEGAL_TRAP:
288 return truly_illegal_insn(vcpu, run, insn);
289 case KVM_INSN_VIRTUAL_TRAP:
290 return truly_virtual_insn(vcpu, run, insn);
291 case KVM_INSN_CONTINUE_NEXT_SEPC:
292 vcpu->arch.guest_context.sepc += INSN_LEN(insn);
293 break;
294 default:
295 break;
296 }
297
298 return (rc <= 0) ? rc : 1;
299 }
300
is_load_guest_page_fault(unsigned long scause)301 static bool is_load_guest_page_fault(unsigned long scause)
302 {
303 /**
304 * If a g-stage page fault occurs, the direct approach
305 * is to let the g-stage page fault handler handle it
306 * naturally, however, calling the g-stage page fault
307 * handler here seems rather strange.
308 * Considering this is a corner case, we can directly
309 * return to the guest and re-execute the same PC, this
310 * will trigger a g-stage page fault again and then the
311 * regular g-stage page fault handler will populate
312 * g-stage page table.
313 */
314 return (scause == EXC_LOAD_GUEST_PAGE_FAULT);
315 }
316
317 /**
318 * kvm_riscv_vcpu_virtual_insn -- Handle virtual instruction trap
319 *
320 * @vcpu: The VCPU pointer
321 * @run: The VCPU run struct containing the mmio data
322 * @trap: Trap details
323 *
324 * Returns > 0 to continue run-loop
325 * Returns 0 to exit run-loop and handle in user-space.
326 * Returns < 0 to report failure and exit run-loop
327 */
kvm_riscv_vcpu_virtual_insn(struct kvm_vcpu * vcpu,struct kvm_run * run,struct kvm_cpu_trap * trap)328 int kvm_riscv_vcpu_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
329 struct kvm_cpu_trap *trap)
330 {
331 unsigned long insn = trap->stval;
332 struct kvm_cpu_trap utrap = { 0 };
333 struct kvm_cpu_context *ct;
334
335 if (unlikely(INSN_IS_16BIT(insn))) {
336 if (insn == 0) {
337 ct = &vcpu->arch.guest_context;
338 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true,
339 ct->sepc,
340 &utrap);
341 if (utrap.scause) {
342 if (is_load_guest_page_fault(utrap.scause))
343 return 1;
344 utrap.sepc = ct->sepc;
345 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
346 return 1;
347 }
348 }
349 if (INSN_IS_16BIT(insn))
350 return truly_illegal_insn(vcpu, run, insn);
351 }
352
353 switch ((insn & INSN_OPCODE_MASK) >> INSN_OPCODE_SHIFT) {
354 case INSN_OPCODE_SYSTEM:
355 return system_opcode_insn(vcpu, run, insn);
356 default:
357 return truly_illegal_insn(vcpu, run, insn);
358 }
359 }
360
361 /**
362 * kvm_riscv_vcpu_mmio_load -- Emulate MMIO load instruction
363 *
364 * @vcpu: The VCPU pointer
365 * @run: The VCPU run struct containing the mmio data
366 * @fault_addr: Guest physical address to load
367 * @htinst: Transformed encoding of the load instruction
368 *
369 * Returns > 0 to continue run-loop
370 * Returns 0 to exit run-loop and handle in user-space.
371 * Returns < 0 to report failure and exit run-loop
372 */
kvm_riscv_vcpu_mmio_load(struct kvm_vcpu * vcpu,struct kvm_run * run,unsigned long fault_addr,unsigned long htinst)373 int kvm_riscv_vcpu_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run,
374 unsigned long fault_addr,
375 unsigned long htinst)
376 {
377 u8 data_buf[8];
378 unsigned long insn;
379 int shift = 0, len = 0, insn_len = 0;
380 struct kvm_cpu_trap utrap = { 0 };
381 struct kvm_cpu_context *ct = &vcpu->arch.guest_context;
382
383 /* Determine trapped instruction */
384 if (htinst & 0x1) {
385 /*
386 * Bit[0] == 1 implies trapped instruction value is
387 * transformed instruction or custom instruction.
388 */
389 insn = htinst | INSN_16BIT_MASK;
390 insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2;
391 } else {
392 /*
393 * Bit[0] == 0 implies trapped instruction value is
394 * zero or special value.
395 */
396 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
397 &utrap);
398 if (utrap.scause) {
399 if (is_load_guest_page_fault(utrap.scause))
400 return 1;
401 /* Redirect trap if we failed to read instruction */
402 utrap.sepc = ct->sepc;
403 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
404 return 1;
405 }
406 insn_len = INSN_LEN(insn);
407 }
408
409 /* Decode length of MMIO and shift */
410 if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) {
411 len = 4;
412 shift = 8 * (sizeof(ulong) - len);
413 } else if ((insn & INSN_MASK_LB) == INSN_MATCH_LB) {
414 len = 1;
415 shift = 8 * (sizeof(ulong) - len);
416 } else if ((insn & INSN_MASK_LBU) == INSN_MATCH_LBU) {
417 len = 1;
418 shift = 8 * (sizeof(ulong) - len);
419 #ifdef CONFIG_64BIT
420 } else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) {
421 len = 8;
422 shift = 8 * (sizeof(ulong) - len);
423 } else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) {
424 len = 4;
425 #endif
426 } else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) {
427 len = 2;
428 shift = 8 * (sizeof(ulong) - len);
429 } else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) {
430 len = 2;
431 #ifdef CONFIG_64BIT
432 } else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) {
433 len = 8;
434 shift = 8 * (sizeof(ulong) - len);
435 insn = RVC_RS2S(insn) << SH_RD;
436 } else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP &&
437 ((insn >> SH_RD) & 0x1f)) {
438 len = 8;
439 shift = 8 * (sizeof(ulong) - len);
440 #endif
441 } else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) {
442 len = 4;
443 shift = 8 * (sizeof(ulong) - len);
444 insn = RVC_RS2S(insn) << SH_RD;
445 } else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP &&
446 ((insn >> SH_RD) & 0x1f)) {
447 len = 4;
448 shift = 8 * (sizeof(ulong) - len);
449 } else {
450 return -EOPNOTSUPP;
451 }
452
453 /* Fault address should be aligned to length of MMIO */
454 if (fault_addr & (len - 1))
455 return -EIO;
456
457 /* Save instruction decode info */
458 vcpu->arch.mmio_decode.insn = insn;
459 vcpu->arch.mmio_decode.insn_len = insn_len;
460 vcpu->arch.mmio_decode.shift = shift;
461 vcpu->arch.mmio_decode.len = len;
462 vcpu->arch.mmio_decode.return_handled = 0;
463
464 /* Update MMIO details in kvm_run struct */
465 run->mmio.is_write = false;
466 run->mmio.phys_addr = fault_addr;
467 run->mmio.len = len;
468
469 /* Try to handle MMIO access in the kernel */
470 if (!kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_addr, len, data_buf)) {
471 /* Successfully handled MMIO access in the kernel so resume */
472 memcpy(run->mmio.data, data_buf, len);
473 vcpu->stat.mmio_exit_kernel++;
474 kvm_riscv_vcpu_mmio_return(vcpu, run);
475 return 1;
476 }
477
478 /* Exit to userspace for MMIO emulation */
479 vcpu->stat.mmio_exit_user++;
480 run->exit_reason = KVM_EXIT_MMIO;
481
482 return 0;
483 }
484
485 /**
486 * kvm_riscv_vcpu_mmio_store -- Emulate MMIO store instruction
487 *
488 * @vcpu: The VCPU pointer
489 * @run: The VCPU run struct containing the mmio data
490 * @fault_addr: Guest physical address to store
491 * @htinst: Transformed encoding of the store instruction
492 *
493 * Returns > 0 to continue run-loop
494 * Returns 0 to exit run-loop and handle in user-space.
495 * Returns < 0 to report failure and exit run-loop
496 */
kvm_riscv_vcpu_mmio_store(struct kvm_vcpu * vcpu,struct kvm_run * run,unsigned long fault_addr,unsigned long htinst)497 int kvm_riscv_vcpu_mmio_store(struct kvm_vcpu *vcpu, struct kvm_run *run,
498 unsigned long fault_addr,
499 unsigned long htinst)
500 {
501 u8 data8;
502 u16 data16;
503 u32 data32;
504 u64 data64;
505 ulong data;
506 unsigned long insn;
507 int len = 0, insn_len = 0;
508 struct kvm_cpu_trap utrap = { 0 };
509 struct kvm_cpu_context *ct = &vcpu->arch.guest_context;
510
511 /* Determine trapped instruction */
512 if (htinst & 0x1) {
513 /*
514 * Bit[0] == 1 implies trapped instruction value is
515 * transformed instruction or custom instruction.
516 */
517 insn = htinst | INSN_16BIT_MASK;
518 insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2;
519 } else {
520 /*
521 * Bit[0] == 0 implies trapped instruction value is
522 * zero or special value.
523 */
524 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
525 &utrap);
526 if (utrap.scause) {
527 if (is_load_guest_page_fault(utrap.scause))
528 return 1;
529 /* Redirect trap if we failed to read instruction */
530 utrap.sepc = ct->sepc;
531 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
532 return 1;
533 }
534 insn_len = INSN_LEN(insn);
535 }
536
537 data = GET_RS2(insn, &vcpu->arch.guest_context);
538 data8 = data16 = data32 = data64 = data;
539
540 if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) {
541 len = 4;
542 } else if ((insn & INSN_MASK_SB) == INSN_MATCH_SB) {
543 len = 1;
544 #ifdef CONFIG_64BIT
545 } else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) {
546 len = 8;
547 #endif
548 } else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) {
549 len = 2;
550 #ifdef CONFIG_64BIT
551 } else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
552 len = 8;
553 data64 = GET_RS2S(insn, &vcpu->arch.guest_context);
554 } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP &&
555 ((insn >> SH_RD) & 0x1f)) {
556 len = 8;
557 data64 = GET_RS2C(insn, &vcpu->arch.guest_context);
558 #endif
559 } else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
560 len = 4;
561 data32 = GET_RS2S(insn, &vcpu->arch.guest_context);
562 } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP &&
563 ((insn >> SH_RD) & 0x1f)) {
564 len = 4;
565 data32 = GET_RS2C(insn, &vcpu->arch.guest_context);
566 } else {
567 return -EOPNOTSUPP;
568 }
569
570 /* Fault address should be aligned to length of MMIO */
571 if (fault_addr & (len - 1))
572 return -EIO;
573
574 /* Save instruction decode info */
575 vcpu->arch.mmio_decode.insn = insn;
576 vcpu->arch.mmio_decode.insn_len = insn_len;
577 vcpu->arch.mmio_decode.shift = 0;
578 vcpu->arch.mmio_decode.len = len;
579 vcpu->arch.mmio_decode.return_handled = 0;
580
581 /* Copy data to kvm_run instance */
582 switch (len) {
583 case 1:
584 *((u8 *)run->mmio.data) = data8;
585 break;
586 case 2:
587 *((u16 *)run->mmio.data) = data16;
588 break;
589 case 4:
590 *((u32 *)run->mmio.data) = data32;
591 break;
592 case 8:
593 *((u64 *)run->mmio.data) = data64;
594 break;
595 default:
596 return -EOPNOTSUPP;
597 }
598
599 /* Update MMIO details in kvm_run struct */
600 run->mmio.is_write = true;
601 run->mmio.phys_addr = fault_addr;
602 run->mmio.len = len;
603
604 /* Try to handle MMIO access in the kernel */
605 if (!kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
606 fault_addr, len, run->mmio.data)) {
607 /* Successfully handled MMIO access in the kernel so resume */
608 vcpu->stat.mmio_exit_kernel++;
609 kvm_riscv_vcpu_mmio_return(vcpu, run);
610 return 1;
611 }
612
613 /* Exit to userspace for MMIO emulation */
614 vcpu->stat.mmio_exit_user++;
615 run->exit_reason = KVM_EXIT_MMIO;
616
617 return 0;
618 }
619
620 /**
621 * kvm_riscv_vcpu_mmio_return -- Handle MMIO loads after user space emulation
622 * or in-kernel IO emulation
623 *
624 * @vcpu: The VCPU pointer
625 * @run: The VCPU run struct containing the mmio data
626 */
kvm_riscv_vcpu_mmio_return(struct kvm_vcpu * vcpu,struct kvm_run * run)627 int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
628 {
629 u8 data8;
630 u16 data16;
631 u32 data32;
632 u64 data64;
633 ulong insn;
634 int len, shift;
635
636 if (vcpu->arch.mmio_decode.return_handled)
637 return 0;
638
639 vcpu->arch.mmio_decode.return_handled = 1;
640 insn = vcpu->arch.mmio_decode.insn;
641
642 if (run->mmio.is_write)
643 goto done;
644
645 len = vcpu->arch.mmio_decode.len;
646 shift = vcpu->arch.mmio_decode.shift;
647
648 switch (len) {
649 case 1:
650 data8 = *((u8 *)run->mmio.data);
651 SET_RD(insn, &vcpu->arch.guest_context,
652 (ulong)data8 << shift >> shift);
653 break;
654 case 2:
655 data16 = *((u16 *)run->mmio.data);
656 SET_RD(insn, &vcpu->arch.guest_context,
657 (ulong)data16 << shift >> shift);
658 break;
659 case 4:
660 data32 = *((u32 *)run->mmio.data);
661 SET_RD(insn, &vcpu->arch.guest_context,
662 (ulong)data32 << shift >> shift);
663 break;
664 case 8:
665 data64 = *((u64 *)run->mmio.data);
666 SET_RD(insn, &vcpu->arch.guest_context,
667 (ulong)data64 << shift >> shift);
668 break;
669 default:
670 return -EOPNOTSUPP;
671 }
672
673 done:
674 /* Move to next instruction */
675 vcpu->arch.guest_context.sepc += vcpu->arch.mmio_decode.insn_len;
676
677 return 0;
678 }
679