xref: /linux/arch/riscv/kvm/vcpu_insn.c (revision 8e1bb4a41aa78d6105e59186af3dcd545fc66e70)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  * Copyright (c) 2022 Ventana Micro Systems Inc.
5  */
6 
7 #include <linux/bitops.h>
8 #include <linux/kvm_host.h>
9 
10 #include <asm/cpufeature.h>
11 
12 #define INSN_OPCODE_MASK	0x007c
13 #define INSN_OPCODE_SHIFT	2
14 #define INSN_OPCODE_SYSTEM	28
15 
16 #define INSN_MASK_WFI		0xffffffff
17 #define INSN_MATCH_WFI		0x10500073
18 
19 #define INSN_MASK_WRS		0xffffffff
20 #define INSN_MATCH_WRS		0x00d00073
21 
22 #define INSN_MATCH_CSRRW	0x1073
23 #define INSN_MASK_CSRRW		0x707f
24 #define INSN_MATCH_CSRRS	0x2073
25 #define INSN_MASK_CSRRS		0x707f
26 #define INSN_MATCH_CSRRC	0x3073
27 #define INSN_MASK_CSRRC		0x707f
28 #define INSN_MATCH_CSRRWI	0x5073
29 #define INSN_MASK_CSRRWI	0x707f
30 #define INSN_MATCH_CSRRSI	0x6073
31 #define INSN_MASK_CSRRSI	0x707f
32 #define INSN_MATCH_CSRRCI	0x7073
33 #define INSN_MASK_CSRRCI	0x707f
34 
35 #define INSN_MATCH_LB		0x3
36 #define INSN_MASK_LB		0x707f
37 #define INSN_MATCH_LH		0x1003
38 #define INSN_MASK_LH		0x707f
39 #define INSN_MATCH_LW		0x2003
40 #define INSN_MASK_LW		0x707f
41 #define INSN_MATCH_LD		0x3003
42 #define INSN_MASK_LD		0x707f
43 #define INSN_MATCH_LBU		0x4003
44 #define INSN_MASK_LBU		0x707f
45 #define INSN_MATCH_LHU		0x5003
46 #define INSN_MASK_LHU		0x707f
47 #define INSN_MATCH_LWU		0x6003
48 #define INSN_MASK_LWU		0x707f
49 #define INSN_MATCH_SB		0x23
50 #define INSN_MASK_SB		0x707f
51 #define INSN_MATCH_SH		0x1023
52 #define INSN_MASK_SH		0x707f
53 #define INSN_MATCH_SW		0x2023
54 #define INSN_MASK_SW		0x707f
55 #define INSN_MATCH_SD		0x3023
56 #define INSN_MASK_SD		0x707f
57 
58 #define INSN_MATCH_C_LD		0x6000
59 #define INSN_MASK_C_LD		0xe003
60 #define INSN_MATCH_C_SD		0xe000
61 #define INSN_MASK_C_SD		0xe003
62 #define INSN_MATCH_C_LW		0x4000
63 #define INSN_MASK_C_LW		0xe003
64 #define INSN_MATCH_C_SW		0xc000
65 #define INSN_MASK_C_SW		0xe003
66 #define INSN_MATCH_C_LDSP	0x6002
67 #define INSN_MASK_C_LDSP	0xe003
68 #define INSN_MATCH_C_SDSP	0xe002
69 #define INSN_MASK_C_SDSP	0xe003
70 #define INSN_MATCH_C_LWSP	0x4002
71 #define INSN_MASK_C_LWSP	0xe003
72 #define INSN_MATCH_C_SWSP	0xc002
73 #define INSN_MASK_C_SWSP	0xe003
74 
75 #define INSN_16BIT_MASK		0x3
76 
77 #define INSN_IS_16BIT(insn)	(((insn) & INSN_16BIT_MASK) != INSN_16BIT_MASK)
78 
79 #define INSN_LEN(insn)		(INSN_IS_16BIT(insn) ? 2 : 4)
80 
81 #ifdef CONFIG_64BIT
82 #define LOG_REGBYTES		3
83 #else
84 #define LOG_REGBYTES		2
85 #endif
86 #define REGBYTES		(1 << LOG_REGBYTES)
87 
88 #define SH_RD			7
89 #define SH_RS1			15
90 #define SH_RS2			20
91 #define SH_RS2C			2
92 #define MASK_RX			0x1f
93 
94 #define RV_X(x, s, n)		(((x) >> (s)) & ((1 << (n)) - 1))
95 #define RVC_LW_IMM(x)		((RV_X(x, 6, 1) << 2) | \
96 				 (RV_X(x, 10, 3) << 3) | \
97 				 (RV_X(x, 5, 1) << 6))
98 #define RVC_LD_IMM(x)		((RV_X(x, 10, 3) << 3) | \
99 				 (RV_X(x, 5, 2) << 6))
100 #define RVC_LWSP_IMM(x)		((RV_X(x, 4, 3) << 2) | \
101 				 (RV_X(x, 12, 1) << 5) | \
102 				 (RV_X(x, 2, 2) << 6))
103 #define RVC_LDSP_IMM(x)		((RV_X(x, 5, 2) << 3) | \
104 				 (RV_X(x, 12, 1) << 5) | \
105 				 (RV_X(x, 2, 3) << 6))
106 #define RVC_SWSP_IMM(x)		((RV_X(x, 9, 4) << 2) | \
107 				 (RV_X(x, 7, 2) << 6))
108 #define RVC_SDSP_IMM(x)		((RV_X(x, 10, 3) << 3) | \
109 				 (RV_X(x, 7, 3) << 6))
110 #define RVC_RS1S(insn)		(8 + RV_X(insn, SH_RD, 3))
111 #define RVC_RS2S(insn)		(8 + RV_X(insn, SH_RS2C, 3))
112 #define RVC_RS2(insn)		RV_X(insn, SH_RS2C, 5)
113 
114 #define SHIFT_RIGHT(x, y)		\
115 	((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
116 
117 #define REG_MASK			\
118 	((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
119 
120 #define REG_OFFSET(insn, pos)		\
121 	(SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
122 
123 #define REG_PTR(insn, pos, regs)	\
124 	((ulong *)((ulong)(regs) + REG_OFFSET(insn, pos)))
125 
126 #define GET_FUNCT3(insn)	(((insn) >> 12) & 7)
127 
128 #define GET_RS1(insn, regs)	(*REG_PTR(insn, SH_RS1, regs))
129 #define GET_RS2(insn, regs)	(*REG_PTR(insn, SH_RS2, regs))
130 #define GET_RS1S(insn, regs)	(*REG_PTR(RVC_RS1S(insn), 0, regs))
131 #define GET_RS2S(insn, regs)	(*REG_PTR(RVC_RS2S(insn), 0, regs))
132 #define GET_RS2C(insn, regs)	(*REG_PTR(insn, SH_RS2C, regs))
133 #define GET_SP(regs)		(*REG_PTR(2, 0, regs))
134 #define SET_RD(insn, regs, val)	(*REG_PTR(insn, SH_RD, regs) = (val))
135 #define IMM_I(insn)		((s32)(insn) >> 20)
136 #define IMM_S(insn)		(((s32)(insn) >> 25 << 5) | \
137 				 (s32)(((insn) >> 7) & 0x1f))
138 
139 struct insn_func {
140 	unsigned long mask;
141 	unsigned long match;
142 	/*
143 	 * Possible return values are as follows:
144 	 * 1) Returns < 0 for error case
145 	 * 2) Returns 0 for exit to user-space
146 	 * 3) Returns 1 to continue with next sepc
147 	 * 4) Returns 2 to continue with same sepc
148 	 * 5) Returns 3 to inject illegal instruction trap and continue
149 	 * 6) Returns 4 to inject virtual instruction trap and continue
150 	 *
151 	 * Use enum kvm_insn_return for return values
152 	 */
153 	int (*func)(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn);
154 };
155 
156 static int truly_illegal_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
157 			      ulong insn)
158 {
159 	struct kvm_cpu_trap utrap = { 0 };
160 
161 	/* Redirect trap to Guest VCPU */
162 	utrap.sepc = vcpu->arch.guest_context.sepc;
163 	utrap.scause = EXC_INST_ILLEGAL;
164 	utrap.stval = insn;
165 	utrap.htval = 0;
166 	utrap.htinst = 0;
167 	kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
168 
169 	return 1;
170 }
171 
172 static int truly_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
173 			      ulong insn)
174 {
175 	struct kvm_cpu_trap utrap = { 0 };
176 
177 	/* Redirect trap to Guest VCPU */
178 	utrap.sepc = vcpu->arch.guest_context.sepc;
179 	utrap.scause = EXC_VIRTUAL_INST_FAULT;
180 	utrap.stval = insn;
181 	utrap.htval = 0;
182 	utrap.htinst = 0;
183 	kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
184 
185 	return 1;
186 }
187 
188 /**
189  * kvm_riscv_vcpu_wfi -- Emulate wait for interrupt (WFI) behaviour
190  *
191  * @vcpu: The VCPU pointer
192  */
193 void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu)
194 {
195 	if (!kvm_arch_vcpu_runnable(vcpu)) {
196 		kvm_vcpu_srcu_read_unlock(vcpu);
197 		kvm_vcpu_halt(vcpu);
198 		kvm_vcpu_srcu_read_lock(vcpu);
199 	}
200 }
201 
202 static int wfi_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn)
203 {
204 	vcpu->stat.wfi_exit_stat++;
205 	kvm_riscv_vcpu_wfi(vcpu);
206 	return KVM_INSN_CONTINUE_NEXT_SEPC;
207 }
208 
209 static int wrs_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn)
210 {
211 	vcpu->stat.wrs_exit_stat++;
212 	kvm_vcpu_on_spin(vcpu, vcpu->arch.guest_context.sstatus & SR_SPP);
213 	return KVM_INSN_CONTINUE_NEXT_SEPC;
214 }
215 
216 struct csr_func {
217 	unsigned int base;
218 	unsigned int count;
219 	/*
220 	 * Possible return values are as same as "func" callback in
221 	 * "struct insn_func".
222 	 */
223 	int (*func)(struct kvm_vcpu *vcpu, unsigned int csr_num,
224 		    unsigned long *val, unsigned long new_val,
225 		    unsigned long wr_mask);
226 };
227 
228 static int seed_csr_rmw(struct kvm_vcpu *vcpu, unsigned int csr_num,
229 			unsigned long *val, unsigned long new_val,
230 			unsigned long wr_mask)
231 {
232 	if (!riscv_isa_extension_available(vcpu->arch.isa, ZKR))
233 		return KVM_INSN_ILLEGAL_TRAP;
234 
235 	return KVM_INSN_EXIT_TO_USER_SPACE;
236 }
237 
238 static const struct csr_func csr_funcs[] = {
239 	KVM_RISCV_VCPU_AIA_CSR_FUNCS
240 	KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS
241 	{ .base = CSR_SEED, .count = 1, .func = seed_csr_rmw },
242 };
243 
244 /**
245  * kvm_riscv_vcpu_csr_return -- Handle CSR read/write after user space
246  *				emulation or in-kernel emulation
247  *
248  * @vcpu: The VCPU pointer
249  * @run:  The VCPU run struct containing the CSR data
250  *
251  * Returns > 0 upon failure and 0 upon success
252  */
253 int kvm_riscv_vcpu_csr_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
254 {
255 	ulong insn;
256 
257 	if (vcpu->arch.csr_decode.return_handled)
258 		return 0;
259 	vcpu->arch.csr_decode.return_handled = 1;
260 
261 	/* Update destination register for CSR reads */
262 	insn = vcpu->arch.csr_decode.insn;
263 	if ((insn >> SH_RD) & MASK_RX)
264 		SET_RD(insn, &vcpu->arch.guest_context,
265 		       run->riscv_csr.ret_value);
266 
267 	/* Move to next instruction */
268 	vcpu->arch.guest_context.sepc += INSN_LEN(insn);
269 
270 	return 0;
271 }
272 
273 static int csr_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn)
274 {
275 	int i, rc = KVM_INSN_ILLEGAL_TRAP;
276 	unsigned int csr_num = insn >> SH_RS2;
277 	unsigned int rs1_num = (insn >> SH_RS1) & MASK_RX;
278 	ulong rs1_val = GET_RS1(insn, &vcpu->arch.guest_context);
279 	const struct csr_func *tcfn, *cfn = NULL;
280 	ulong val = 0, wr_mask = 0, new_val = 0;
281 
282 	/* Decode the CSR instruction */
283 	switch (GET_FUNCT3(insn)) {
284 	case GET_FUNCT3(INSN_MATCH_CSRRW):
285 		wr_mask = -1UL;
286 		new_val = rs1_val;
287 		break;
288 	case GET_FUNCT3(INSN_MATCH_CSRRS):
289 		wr_mask = rs1_val;
290 		new_val = -1UL;
291 		break;
292 	case GET_FUNCT3(INSN_MATCH_CSRRC):
293 		wr_mask = rs1_val;
294 		new_val = 0;
295 		break;
296 	case GET_FUNCT3(INSN_MATCH_CSRRWI):
297 		wr_mask = -1UL;
298 		new_val = rs1_num;
299 		break;
300 	case GET_FUNCT3(INSN_MATCH_CSRRSI):
301 		wr_mask = rs1_num;
302 		new_val = -1UL;
303 		break;
304 	case GET_FUNCT3(INSN_MATCH_CSRRCI):
305 		wr_mask = rs1_num;
306 		new_val = 0;
307 		break;
308 	default:
309 		return rc;
310 	}
311 
312 	/* Save instruction decode info */
313 	vcpu->arch.csr_decode.insn = insn;
314 	vcpu->arch.csr_decode.return_handled = 0;
315 
316 	/* Update CSR details in kvm_run struct */
317 	run->riscv_csr.csr_num = csr_num;
318 	run->riscv_csr.new_value = new_val;
319 	run->riscv_csr.write_mask = wr_mask;
320 	run->riscv_csr.ret_value = 0;
321 
322 	/* Find in-kernel CSR function */
323 	for (i = 0; i < ARRAY_SIZE(csr_funcs); i++) {
324 		tcfn = &csr_funcs[i];
325 		if ((tcfn->base <= csr_num) &&
326 		    (csr_num < (tcfn->base + tcfn->count))) {
327 			cfn = tcfn;
328 			break;
329 		}
330 	}
331 
332 	/* First try in-kernel CSR emulation */
333 	if (cfn && cfn->func) {
334 		rc = cfn->func(vcpu, csr_num, &val, new_val, wr_mask);
335 		if (rc > KVM_INSN_EXIT_TO_USER_SPACE) {
336 			if (rc == KVM_INSN_CONTINUE_NEXT_SEPC) {
337 				run->riscv_csr.ret_value = val;
338 				vcpu->stat.csr_exit_kernel++;
339 				kvm_riscv_vcpu_csr_return(vcpu, run);
340 				rc = KVM_INSN_CONTINUE_SAME_SEPC;
341 			}
342 			return rc;
343 		}
344 	}
345 
346 	/* Exit to user-space for CSR emulation */
347 	if (rc <= KVM_INSN_EXIT_TO_USER_SPACE) {
348 		vcpu->stat.csr_exit_user++;
349 		run->exit_reason = KVM_EXIT_RISCV_CSR;
350 	}
351 
352 	return rc;
353 }
354 
355 static const struct insn_func system_opcode_funcs[] = {
356 	{
357 		.mask  = INSN_MASK_CSRRW,
358 		.match = INSN_MATCH_CSRRW,
359 		.func  = csr_insn,
360 	},
361 	{
362 		.mask  = INSN_MASK_CSRRS,
363 		.match = INSN_MATCH_CSRRS,
364 		.func  = csr_insn,
365 	},
366 	{
367 		.mask  = INSN_MASK_CSRRC,
368 		.match = INSN_MATCH_CSRRC,
369 		.func  = csr_insn,
370 	},
371 	{
372 		.mask  = INSN_MASK_CSRRWI,
373 		.match = INSN_MATCH_CSRRWI,
374 		.func  = csr_insn,
375 	},
376 	{
377 		.mask  = INSN_MASK_CSRRSI,
378 		.match = INSN_MATCH_CSRRSI,
379 		.func  = csr_insn,
380 	},
381 	{
382 		.mask  = INSN_MASK_CSRRCI,
383 		.match = INSN_MATCH_CSRRCI,
384 		.func  = csr_insn,
385 	},
386 	{
387 		.mask  = INSN_MASK_WFI,
388 		.match = INSN_MATCH_WFI,
389 		.func  = wfi_insn,
390 	},
391 	{
392 		.mask  = INSN_MASK_WRS,
393 		.match = INSN_MATCH_WRS,
394 		.func  = wrs_insn,
395 	},
396 };
397 
398 static int system_opcode_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
399 			      ulong insn)
400 {
401 	int i, rc = KVM_INSN_ILLEGAL_TRAP;
402 	const struct insn_func *ifn;
403 
404 	for (i = 0; i < ARRAY_SIZE(system_opcode_funcs); i++) {
405 		ifn = &system_opcode_funcs[i];
406 		if ((insn & ifn->mask) == ifn->match) {
407 			rc = ifn->func(vcpu, run, insn);
408 			break;
409 		}
410 	}
411 
412 	switch (rc) {
413 	case KVM_INSN_ILLEGAL_TRAP:
414 		return truly_illegal_insn(vcpu, run, insn);
415 	case KVM_INSN_VIRTUAL_TRAP:
416 		return truly_virtual_insn(vcpu, run, insn);
417 	case KVM_INSN_CONTINUE_NEXT_SEPC:
418 		vcpu->arch.guest_context.sepc += INSN_LEN(insn);
419 		break;
420 	default:
421 		break;
422 	}
423 
424 	return (rc <= 0) ? rc : 1;
425 }
426 
427 /**
428  * kvm_riscv_vcpu_virtual_insn -- Handle virtual instruction trap
429  *
430  * @vcpu: The VCPU pointer
431  * @run:  The VCPU run struct containing the mmio data
432  * @trap: Trap details
433  *
434  * Returns > 0 to continue run-loop
435  * Returns   0 to exit run-loop and handle in user-space.
436  * Returns < 0 to report failure and exit run-loop
437  */
438 int kvm_riscv_vcpu_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
439 				struct kvm_cpu_trap *trap)
440 {
441 	unsigned long insn = trap->stval;
442 	struct kvm_cpu_trap utrap = { 0 };
443 	struct kvm_cpu_context *ct;
444 
445 	if (unlikely(INSN_IS_16BIT(insn))) {
446 		if (insn == 0) {
447 			ct = &vcpu->arch.guest_context;
448 			insn = kvm_riscv_vcpu_unpriv_read(vcpu, true,
449 							  ct->sepc,
450 							  &utrap);
451 			if (utrap.scause) {
452 				utrap.sepc = ct->sepc;
453 				kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
454 				return 1;
455 			}
456 		}
457 		if (INSN_IS_16BIT(insn))
458 			return truly_illegal_insn(vcpu, run, insn);
459 	}
460 
461 	switch ((insn & INSN_OPCODE_MASK) >> INSN_OPCODE_SHIFT) {
462 	case INSN_OPCODE_SYSTEM:
463 		return system_opcode_insn(vcpu, run, insn);
464 	default:
465 		return truly_illegal_insn(vcpu, run, insn);
466 	}
467 }
468 
469 /**
470  * kvm_riscv_vcpu_mmio_load -- Emulate MMIO load instruction
471  *
472  * @vcpu: The VCPU pointer
473  * @run:  The VCPU run struct containing the mmio data
474  * @fault_addr: Guest physical address to load
475  * @htinst: Transformed encoding of the load instruction
476  *
477  * Returns > 0 to continue run-loop
478  * Returns   0 to exit run-loop and handle in user-space.
479  * Returns < 0 to report failure and exit run-loop
480  */
481 int kvm_riscv_vcpu_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run,
482 			     unsigned long fault_addr,
483 			     unsigned long htinst)
484 {
485 	u8 data_buf[8];
486 	unsigned long insn;
487 	int shift = 0, len = 0, insn_len = 0;
488 	struct kvm_cpu_trap utrap = { 0 };
489 	struct kvm_cpu_context *ct = &vcpu->arch.guest_context;
490 
491 	/* Determine trapped instruction */
492 	if (htinst & 0x1) {
493 		/*
494 		 * Bit[0] == 1 implies trapped instruction value is
495 		 * transformed instruction or custom instruction.
496 		 */
497 		insn = htinst | INSN_16BIT_MASK;
498 		insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2;
499 	} else {
500 		/*
501 		 * Bit[0] == 0 implies trapped instruction value is
502 		 * zero or special value.
503 		 */
504 		insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
505 						  &utrap);
506 		if (utrap.scause) {
507 			/* Redirect trap if we failed to read instruction */
508 			utrap.sepc = ct->sepc;
509 			kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
510 			return 1;
511 		}
512 		insn_len = INSN_LEN(insn);
513 	}
514 
515 	/* Decode length of MMIO and shift */
516 	if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) {
517 		len = 4;
518 		shift = 8 * (sizeof(ulong) - len);
519 	} else if ((insn & INSN_MASK_LB) == INSN_MATCH_LB) {
520 		len = 1;
521 		shift = 8 * (sizeof(ulong) - len);
522 	} else if ((insn & INSN_MASK_LBU) == INSN_MATCH_LBU) {
523 		len = 1;
524 		shift = 8 * (sizeof(ulong) - len);
525 #ifdef CONFIG_64BIT
526 	} else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) {
527 		len = 8;
528 		shift = 8 * (sizeof(ulong) - len);
529 	} else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) {
530 		len = 4;
531 #endif
532 	} else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) {
533 		len = 2;
534 		shift = 8 * (sizeof(ulong) - len);
535 	} else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) {
536 		len = 2;
537 #ifdef CONFIG_64BIT
538 	} else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) {
539 		len = 8;
540 		shift = 8 * (sizeof(ulong) - len);
541 		insn = RVC_RS2S(insn) << SH_RD;
542 	} else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP &&
543 		   ((insn >> SH_RD) & 0x1f)) {
544 		len = 8;
545 		shift = 8 * (sizeof(ulong) - len);
546 #endif
547 	} else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) {
548 		len = 4;
549 		shift = 8 * (sizeof(ulong) - len);
550 		insn = RVC_RS2S(insn) << SH_RD;
551 	} else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP &&
552 		   ((insn >> SH_RD) & 0x1f)) {
553 		len = 4;
554 		shift = 8 * (sizeof(ulong) - len);
555 	} else {
556 		return -EOPNOTSUPP;
557 	}
558 
559 	/* Fault address should be aligned to length of MMIO */
560 	if (fault_addr & (len - 1))
561 		return -EIO;
562 
563 	/* Save instruction decode info */
564 	vcpu->arch.mmio_decode.insn = insn;
565 	vcpu->arch.mmio_decode.insn_len = insn_len;
566 	vcpu->arch.mmio_decode.shift = shift;
567 	vcpu->arch.mmio_decode.len = len;
568 	vcpu->arch.mmio_decode.return_handled = 0;
569 
570 	/* Update MMIO details in kvm_run struct */
571 	run->mmio.is_write = false;
572 	run->mmio.phys_addr = fault_addr;
573 	run->mmio.len = len;
574 
575 	/* Try to handle MMIO access in the kernel */
576 	if (!kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_addr, len, data_buf)) {
577 		/* Successfully handled MMIO access in the kernel so resume */
578 		memcpy(run->mmio.data, data_buf, len);
579 		vcpu->stat.mmio_exit_kernel++;
580 		kvm_riscv_vcpu_mmio_return(vcpu, run);
581 		return 1;
582 	}
583 
584 	/* Exit to userspace for MMIO emulation */
585 	vcpu->stat.mmio_exit_user++;
586 	run->exit_reason = KVM_EXIT_MMIO;
587 
588 	return 0;
589 }
590 
591 /**
592  * kvm_riscv_vcpu_mmio_store -- Emulate MMIO store instruction
593  *
594  * @vcpu: The VCPU pointer
595  * @run:  The VCPU run struct containing the mmio data
596  * @fault_addr: Guest physical address to store
597  * @htinst: Transformed encoding of the store instruction
598  *
599  * Returns > 0 to continue run-loop
600  * Returns   0 to exit run-loop and handle in user-space.
601  * Returns < 0 to report failure and exit run-loop
602  */
603 int kvm_riscv_vcpu_mmio_store(struct kvm_vcpu *vcpu, struct kvm_run *run,
604 			      unsigned long fault_addr,
605 			      unsigned long htinst)
606 {
607 	u8 data8;
608 	u16 data16;
609 	u32 data32;
610 	u64 data64;
611 	ulong data;
612 	unsigned long insn;
613 	int len = 0, insn_len = 0;
614 	struct kvm_cpu_trap utrap = { 0 };
615 	struct kvm_cpu_context *ct = &vcpu->arch.guest_context;
616 
617 	/* Determine trapped instruction */
618 	if (htinst & 0x1) {
619 		/*
620 		 * Bit[0] == 1 implies trapped instruction value is
621 		 * transformed instruction or custom instruction.
622 		 */
623 		insn = htinst | INSN_16BIT_MASK;
624 		insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2;
625 	} else {
626 		/*
627 		 * Bit[0] == 0 implies trapped instruction value is
628 		 * zero or special value.
629 		 */
630 		insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
631 						  &utrap);
632 		if (utrap.scause) {
633 			/* Redirect trap if we failed to read instruction */
634 			utrap.sepc = ct->sepc;
635 			kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
636 			return 1;
637 		}
638 		insn_len = INSN_LEN(insn);
639 	}
640 
641 	data = GET_RS2(insn, &vcpu->arch.guest_context);
642 	data8 = data16 = data32 = data64 = data;
643 
644 	if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) {
645 		len = 4;
646 	} else if ((insn & INSN_MASK_SB) == INSN_MATCH_SB) {
647 		len = 1;
648 #ifdef CONFIG_64BIT
649 	} else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) {
650 		len = 8;
651 #endif
652 	} else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) {
653 		len = 2;
654 #ifdef CONFIG_64BIT
655 	} else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
656 		len = 8;
657 		data64 = GET_RS2S(insn, &vcpu->arch.guest_context);
658 	} else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP &&
659 		   ((insn >> SH_RD) & 0x1f)) {
660 		len = 8;
661 		data64 = GET_RS2C(insn, &vcpu->arch.guest_context);
662 #endif
663 	} else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
664 		len = 4;
665 		data32 = GET_RS2S(insn, &vcpu->arch.guest_context);
666 	} else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP &&
667 		   ((insn >> SH_RD) & 0x1f)) {
668 		len = 4;
669 		data32 = GET_RS2C(insn, &vcpu->arch.guest_context);
670 	} else {
671 		return -EOPNOTSUPP;
672 	}
673 
674 	/* Fault address should be aligned to length of MMIO */
675 	if (fault_addr & (len - 1))
676 		return -EIO;
677 
678 	/* Save instruction decode info */
679 	vcpu->arch.mmio_decode.insn = insn;
680 	vcpu->arch.mmio_decode.insn_len = insn_len;
681 	vcpu->arch.mmio_decode.shift = 0;
682 	vcpu->arch.mmio_decode.len = len;
683 	vcpu->arch.mmio_decode.return_handled = 0;
684 
685 	/* Copy data to kvm_run instance */
686 	switch (len) {
687 	case 1:
688 		*((u8 *)run->mmio.data) = data8;
689 		break;
690 	case 2:
691 		*((u16 *)run->mmio.data) = data16;
692 		break;
693 	case 4:
694 		*((u32 *)run->mmio.data) = data32;
695 		break;
696 	case 8:
697 		*((u64 *)run->mmio.data) = data64;
698 		break;
699 	default:
700 		return -EOPNOTSUPP;
701 	}
702 
703 	/* Update MMIO details in kvm_run struct */
704 	run->mmio.is_write = true;
705 	run->mmio.phys_addr = fault_addr;
706 	run->mmio.len = len;
707 
708 	/* Try to handle MMIO access in the kernel */
709 	if (!kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
710 			      fault_addr, len, run->mmio.data)) {
711 		/* Successfully handled MMIO access in the kernel so resume */
712 		vcpu->stat.mmio_exit_kernel++;
713 		kvm_riscv_vcpu_mmio_return(vcpu, run);
714 		return 1;
715 	}
716 
717 	/* Exit to userspace for MMIO emulation */
718 	vcpu->stat.mmio_exit_user++;
719 	run->exit_reason = KVM_EXIT_MMIO;
720 
721 	return 0;
722 }
723 
724 /**
725  * kvm_riscv_vcpu_mmio_return -- Handle MMIO loads after user space emulation
726  *			     or in-kernel IO emulation
727  *
728  * @vcpu: The VCPU pointer
729  * @run:  The VCPU run struct containing the mmio data
730  */
731 int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
732 {
733 	u8 data8;
734 	u16 data16;
735 	u32 data32;
736 	u64 data64;
737 	ulong insn;
738 	int len, shift;
739 
740 	if (vcpu->arch.mmio_decode.return_handled)
741 		return 0;
742 
743 	vcpu->arch.mmio_decode.return_handled = 1;
744 	insn = vcpu->arch.mmio_decode.insn;
745 
746 	if (run->mmio.is_write)
747 		goto done;
748 
749 	len = vcpu->arch.mmio_decode.len;
750 	shift = vcpu->arch.mmio_decode.shift;
751 
752 	switch (len) {
753 	case 1:
754 		data8 = *((u8 *)run->mmio.data);
755 		SET_RD(insn, &vcpu->arch.guest_context,
756 			(ulong)data8 << shift >> shift);
757 		break;
758 	case 2:
759 		data16 = *((u16 *)run->mmio.data);
760 		SET_RD(insn, &vcpu->arch.guest_context,
761 			(ulong)data16 << shift >> shift);
762 		break;
763 	case 4:
764 		data32 = *((u32 *)run->mmio.data);
765 		SET_RD(insn, &vcpu->arch.guest_context,
766 			(ulong)data32 << shift >> shift);
767 		break;
768 	case 8:
769 		data64 = *((u64 *)run->mmio.data);
770 		SET_RD(insn, &vcpu->arch.guest_context,
771 			(ulong)data64 << shift >> shift);
772 		break;
773 	default:
774 		return -EOPNOTSUPP;
775 	}
776 
777 done:
778 	/* Move to next instruction */
779 	vcpu->arch.guest_context.sepc += vcpu->arch.mmio_decode.insn_len;
780 
781 	return 0;
782 }
783