xref: /linux/arch/loongarch/kvm/exit.c (revision 7f4f3b14e8079ecde096bd734af10e30d40c27b7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4  */
5 
6 #include <linux/err.h>
7 #include <linux/errno.h>
8 #include <linux/kvm_host.h>
9 #include <linux/module.h>
10 #include <linux/preempt.h>
11 #include <linux/vmalloc.h>
12 #include <trace/events/kvm.h>
13 #include <asm/fpu.h>
14 #include <asm/inst.h>
15 #include <asm/loongarch.h>
16 #include <asm/mmzone.h>
17 #include <asm/numa.h>
18 #include <asm/time.h>
19 #include <asm/tlb.h>
20 #include <asm/kvm_csr.h>
21 #include <asm/kvm_vcpu.h>
22 #include "trace.h"
23 
24 static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
25 {
26 	int rd, rj;
27 	unsigned int index, ret;
28 
29 	if (inst.reg2_format.opcode != cpucfg_op)
30 		return EMULATE_FAIL;
31 
32 	rd = inst.reg2_format.rd;
33 	rj = inst.reg2_format.rj;
34 	++vcpu->stat.cpucfg_exits;
35 	index = vcpu->arch.gprs[rj];
36 
37 	/*
38 	 * By LoongArch Reference Manual 2.2.10.5
39 	 * Return value is 0 for undefined CPUCFG index
40 	 *
41 	 * Disable preemption since hw gcsr is accessed
42 	 */
43 	preempt_disable();
44 	switch (index) {
45 	case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
46 		vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index];
47 		break;
48 	case CPUCFG_KVM_SIG:
49 		/* CPUCFG emulation between 0x40000000 -- 0x400000ff */
50 		vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE;
51 		break;
52 	case CPUCFG_KVM_FEATURE:
53 		ret = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
54 		vcpu->arch.gprs[rd] = ret;
55 		break;
56 	default:
57 		vcpu->arch.gprs[rd] = 0;
58 		break;
59 	}
60 	preempt_enable();
61 
62 	return EMULATE_DONE;
63 }
64 
65 static unsigned long kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid)
66 {
67 	unsigned long val = 0;
68 	struct loongarch_csrs *csr = vcpu->arch.csr;
69 
70 	/*
71 	 * From LoongArch Reference Manual Volume 1 Chapter 4.2.1
72 	 * For undefined CSR id, return value is 0
73 	 */
74 	if (get_gcsr_flag(csrid) & SW_GCSR)
75 		val = kvm_read_sw_gcsr(csr, csrid);
76 	else
77 		pr_warn_once("Unsupported csrrd 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
78 
79 	return val;
80 }
81 
82 static unsigned long kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long val)
83 {
84 	unsigned long old = 0;
85 	struct loongarch_csrs *csr = vcpu->arch.csr;
86 
87 	if (get_gcsr_flag(csrid) & SW_GCSR) {
88 		old = kvm_read_sw_gcsr(csr, csrid);
89 		kvm_write_sw_gcsr(csr, csrid, val);
90 	} else
91 		pr_warn_once("Unsupported csrwr 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
92 
93 	return old;
94 }
95 
96 static unsigned long kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid,
97 				unsigned long csr_mask, unsigned long val)
98 {
99 	unsigned long old = 0;
100 	struct loongarch_csrs *csr = vcpu->arch.csr;
101 
102 	if (get_gcsr_flag(csrid) & SW_GCSR) {
103 		old = kvm_read_sw_gcsr(csr, csrid);
104 		val = (old & ~csr_mask) | (val & csr_mask);
105 		kvm_write_sw_gcsr(csr, csrid, val);
106 		old = old & csr_mask;
107 	} else
108 		pr_warn_once("Unsupported csrxchg 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
109 
110 	return old;
111 }
112 
113 static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
114 {
115 	unsigned int rd, rj, csrid;
116 	unsigned long csr_mask, val = 0;
117 
118 	/*
119 	 * CSR value mask imm
120 	 * rj = 0 means csrrd
121 	 * rj = 1 means csrwr
122 	 * rj != 0,1 means csrxchg
123 	 */
124 	rd = inst.reg2csr_format.rd;
125 	rj = inst.reg2csr_format.rj;
126 	csrid = inst.reg2csr_format.csr;
127 
128 	if (csrid >= LOONGARCH_CSR_PERFCTRL0 && csrid <= vcpu->arch.max_pmu_csrid) {
129 		if (kvm_guest_has_pmu(&vcpu->arch)) {
130 			vcpu->arch.pc -= 4;
131 			kvm_make_request(KVM_REQ_PMU, vcpu);
132 			return EMULATE_DONE;
133 		}
134 	}
135 
136 	/* Process CSR ops */
137 	switch (rj) {
138 	case 0: /* process csrrd */
139 		val = kvm_emu_read_csr(vcpu, csrid);
140 		vcpu->arch.gprs[rd] = val;
141 		break;
142 	case 1: /* process csrwr */
143 		val = vcpu->arch.gprs[rd];
144 		val = kvm_emu_write_csr(vcpu, csrid, val);
145 		vcpu->arch.gprs[rd] = val;
146 		break;
147 	default: /* process csrxchg */
148 		val = vcpu->arch.gprs[rd];
149 		csr_mask = vcpu->arch.gprs[rj];
150 		val = kvm_emu_xchg_csr(vcpu, csrid, csr_mask, val);
151 		vcpu->arch.gprs[rd] = val;
152 	}
153 
154 	return EMULATE_DONE;
155 }
156 
157 int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
158 {
159 	int ret;
160 	unsigned long *val;
161 	u32 addr, rd, rj, opcode;
162 
163 	/*
164 	 * Each IOCSR with different opcode
165 	 */
166 	rd = inst.reg2_format.rd;
167 	rj = inst.reg2_format.rj;
168 	opcode = inst.reg2_format.opcode;
169 	addr = vcpu->arch.gprs[rj];
170 	ret = EMULATE_DO_IOCSR;
171 	run->iocsr_io.phys_addr = addr;
172 	run->iocsr_io.is_write = 0;
173 	val = &vcpu->arch.gprs[rd];
174 
175 	/* LoongArch is Little endian */
176 	switch (opcode) {
177 	case iocsrrdb_op:
178 		run->iocsr_io.len = 1;
179 		break;
180 	case iocsrrdh_op:
181 		run->iocsr_io.len = 2;
182 		break;
183 	case iocsrrdw_op:
184 		run->iocsr_io.len = 4;
185 		break;
186 	case iocsrrdd_op:
187 		run->iocsr_io.len = 8;
188 		break;
189 	case iocsrwrb_op:
190 		run->iocsr_io.len = 1;
191 		run->iocsr_io.is_write = 1;
192 		break;
193 	case iocsrwrh_op:
194 		run->iocsr_io.len = 2;
195 		run->iocsr_io.is_write = 1;
196 		break;
197 	case iocsrwrw_op:
198 		run->iocsr_io.len = 4;
199 		run->iocsr_io.is_write = 1;
200 		break;
201 	case iocsrwrd_op:
202 		run->iocsr_io.len = 8;
203 		run->iocsr_io.is_write = 1;
204 		break;
205 	default:
206 		return EMULATE_FAIL;
207 	}
208 
209 	if (run->iocsr_io.is_write) {
210 		if (!kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val))
211 			ret = EMULATE_DONE;
212 		else
213 			/* Save data and let user space to write it */
214 			memcpy(run->iocsr_io.data, val, run->iocsr_io.len);
215 
216 		trace_kvm_iocsr(KVM_TRACE_IOCSR_WRITE, run->iocsr_io.len, addr, val);
217 	} else {
218 		if (!kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val))
219 			ret = EMULATE_DONE;
220 		else
221 			/* Save register id for iocsr read completion */
222 			vcpu->arch.io_gpr = rd;
223 
224 		trace_kvm_iocsr(KVM_TRACE_IOCSR_READ, run->iocsr_io.len, addr, NULL);
225 	}
226 
227 	return ret;
228 }
229 
230 int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
231 {
232 	enum emulation_result er = EMULATE_DONE;
233 	unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
234 
235 	switch (run->iocsr_io.len) {
236 	case 1:
237 		*gpr = *(s8 *)run->iocsr_io.data;
238 		break;
239 	case 2:
240 		*gpr = *(s16 *)run->iocsr_io.data;
241 		break;
242 	case 4:
243 		*gpr = *(s32 *)run->iocsr_io.data;
244 		break;
245 	case 8:
246 		*gpr = *(s64 *)run->iocsr_io.data;
247 		break;
248 	default:
249 		kvm_err("Bad IOCSR length: %d, addr is 0x%lx\n",
250 				run->iocsr_io.len, vcpu->arch.badv);
251 		er = EMULATE_FAIL;
252 		break;
253 	}
254 
255 	return er;
256 }
257 
258 int kvm_emu_idle(struct kvm_vcpu *vcpu)
259 {
260 	++vcpu->stat.idle_exits;
261 	trace_kvm_exit_idle(vcpu, KVM_TRACE_EXIT_IDLE);
262 
263 	if (!kvm_arch_vcpu_runnable(vcpu))
264 		kvm_vcpu_halt(vcpu);
265 
266 	return EMULATE_DONE;
267 }
268 
269 static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
270 {
271 	unsigned long curr_pc;
272 	larch_inst inst;
273 	enum emulation_result er = EMULATE_DONE;
274 	struct kvm_run *run = vcpu->run;
275 
276 	/* Fetch the instruction */
277 	inst.word = vcpu->arch.badi;
278 	curr_pc = vcpu->arch.pc;
279 	update_pc(&vcpu->arch);
280 
281 	trace_kvm_exit_gspr(vcpu, inst.word);
282 	er = EMULATE_FAIL;
283 	switch (((inst.word >> 24) & 0xff)) {
284 	case 0x0: /* CPUCFG GSPR */
285 		er = kvm_emu_cpucfg(vcpu, inst);
286 		break;
287 	case 0x4: /* CSR{RD,WR,XCHG} GSPR */
288 		er = kvm_handle_csr(vcpu, inst);
289 		break;
290 	case 0x6: /* Cache, Idle and IOCSR GSPR */
291 		switch (((inst.word >> 22) & 0x3ff)) {
292 		case 0x18: /* Cache GSPR */
293 			er = EMULATE_DONE;
294 			trace_kvm_exit_cache(vcpu, KVM_TRACE_EXIT_CACHE);
295 			break;
296 		case 0x19: /* Idle/IOCSR GSPR */
297 			switch (((inst.word >> 15) & 0x1ffff)) {
298 			case 0xc90: /* IOCSR GSPR */
299 				er = kvm_emu_iocsr(inst, run, vcpu);
300 				break;
301 			case 0xc91: /* Idle GSPR */
302 				er = kvm_emu_idle(vcpu);
303 				break;
304 			default:
305 				er = EMULATE_FAIL;
306 				break;
307 			}
308 			break;
309 		default:
310 			er = EMULATE_FAIL;
311 			break;
312 		}
313 		break;
314 	default:
315 		er = EMULATE_FAIL;
316 		break;
317 	}
318 
319 	/* Rollback PC only if emulation was unsuccessful */
320 	if (er == EMULATE_FAIL) {
321 		kvm_err("[%#lx]%s: unsupported gspr instruction 0x%08x\n",
322 			curr_pc, __func__, inst.word);
323 
324 		kvm_arch_vcpu_dump_regs(vcpu);
325 		vcpu->arch.pc = curr_pc;
326 	}
327 
328 	return er;
329 }
330 
331 /*
332  * Trigger GSPR:
333  * 1) Execute CPUCFG instruction;
334  * 2) Execute CACOP/IDLE instructions;
335  * 3) Access to unimplemented CSRs/IOCSRs.
336  */
337 static int kvm_handle_gspr(struct kvm_vcpu *vcpu)
338 {
339 	int ret = RESUME_GUEST;
340 	enum emulation_result er = EMULATE_DONE;
341 
342 	er = kvm_trap_handle_gspr(vcpu);
343 
344 	if (er == EMULATE_DONE) {
345 		ret = RESUME_GUEST;
346 	} else if (er == EMULATE_DO_MMIO) {
347 		vcpu->run->exit_reason = KVM_EXIT_MMIO;
348 		ret = RESUME_HOST;
349 	} else if (er == EMULATE_DO_IOCSR) {
350 		vcpu->run->exit_reason = KVM_EXIT_LOONGARCH_IOCSR;
351 		ret = RESUME_HOST;
352 	} else {
353 		kvm_queue_exception(vcpu, EXCCODE_INE, 0);
354 		ret = RESUME_GUEST;
355 	}
356 
357 	return ret;
358 }
359 
360 int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
361 {
362 	int ret;
363 	unsigned int op8, opcode, rd;
364 	struct kvm_run *run = vcpu->run;
365 
366 	run->mmio.phys_addr = vcpu->arch.badv;
367 	vcpu->mmio_needed = 2;	/* signed */
368 	op8 = (inst.word >> 24) & 0xff;
369 	ret = EMULATE_DO_MMIO;
370 
371 	switch (op8) {
372 	case 0x24 ... 0x27:	/* ldptr.w/d process */
373 		rd = inst.reg2i14_format.rd;
374 		opcode = inst.reg2i14_format.opcode;
375 
376 		switch (opcode) {
377 		case ldptrw_op:
378 			run->mmio.len = 4;
379 			break;
380 		case ldptrd_op:
381 			run->mmio.len = 8;
382 			break;
383 		default:
384 			break;
385 		}
386 		break;
387 	case 0x28 ... 0x2e:	/* ld.b/h/w/d, ld.bu/hu/wu process */
388 		rd = inst.reg2i12_format.rd;
389 		opcode = inst.reg2i12_format.opcode;
390 
391 		switch (opcode) {
392 		case ldb_op:
393 			run->mmio.len = 1;
394 			break;
395 		case ldbu_op:
396 			vcpu->mmio_needed = 1;	/* unsigned */
397 			run->mmio.len = 1;
398 			break;
399 		case ldh_op:
400 			run->mmio.len = 2;
401 			break;
402 		case ldhu_op:
403 			vcpu->mmio_needed = 1;	/* unsigned */
404 			run->mmio.len = 2;
405 			break;
406 		case ldw_op:
407 			run->mmio.len = 4;
408 			break;
409 		case ldwu_op:
410 			vcpu->mmio_needed = 1;	/* unsigned */
411 			run->mmio.len = 4;
412 			break;
413 		case ldd_op:
414 			run->mmio.len = 8;
415 			break;
416 		default:
417 			ret = EMULATE_FAIL;
418 			break;
419 		}
420 		break;
421 	case 0x38:	/* ldx.b/h/w/d, ldx.bu/hu/wu process */
422 		rd = inst.reg3_format.rd;
423 		opcode = inst.reg3_format.opcode;
424 
425 		switch (opcode) {
426 		case ldxb_op:
427 			run->mmio.len = 1;
428 			break;
429 		case ldxbu_op:
430 			run->mmio.len = 1;
431 			vcpu->mmio_needed = 1;	/* unsigned */
432 			break;
433 		case ldxh_op:
434 			run->mmio.len = 2;
435 			break;
436 		case ldxhu_op:
437 			run->mmio.len = 2;
438 			vcpu->mmio_needed = 1;	/* unsigned */
439 			break;
440 		case ldxw_op:
441 			run->mmio.len = 4;
442 			break;
443 		case ldxwu_op:
444 			run->mmio.len = 4;
445 			vcpu->mmio_needed = 1;	/* unsigned */
446 			break;
447 		case ldxd_op:
448 			run->mmio.len = 8;
449 			break;
450 		default:
451 			ret = EMULATE_FAIL;
452 			break;
453 		}
454 		break;
455 	default:
456 		ret = EMULATE_FAIL;
457 	}
458 
459 	if (ret == EMULATE_DO_MMIO) {
460 		trace_kvm_mmio(KVM_TRACE_MMIO_READ, run->mmio.len, run->mmio.phys_addr, NULL);
461 
462 		/*
463 		 * If mmio device such as PCH-PIC is emulated in KVM,
464 		 * it need not return to user space to handle the mmio
465 		 * exception.
466 		 */
467 		ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, vcpu->arch.badv,
468 				      run->mmio.len, &vcpu->arch.gprs[rd]);
469 		if (!ret) {
470 			update_pc(&vcpu->arch);
471 			vcpu->mmio_needed = 0;
472 			return EMULATE_DONE;
473 		}
474 
475 		/* Set for kvm_complete_mmio_read() use */
476 		vcpu->arch.io_gpr = rd;
477 		run->mmio.is_write = 0;
478 		vcpu->mmio_is_write = 0;
479 		return EMULATE_DO_MMIO;
480 	}
481 
482 	kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
483 			inst.word, vcpu->arch.pc, vcpu->arch.badv);
484 	kvm_arch_vcpu_dump_regs(vcpu);
485 	vcpu->mmio_needed = 0;
486 
487 	return ret;
488 }
489 
490 int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
491 {
492 	enum emulation_result er = EMULATE_DONE;
493 	unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
494 
495 	/* Update with new PC */
496 	update_pc(&vcpu->arch);
497 	switch (run->mmio.len) {
498 	case 1:
499 		if (vcpu->mmio_needed == 2)
500 			*gpr = *(s8 *)run->mmio.data;
501 		else
502 			*gpr = *(u8 *)run->mmio.data;
503 		break;
504 	case 2:
505 		if (vcpu->mmio_needed == 2)
506 			*gpr = *(s16 *)run->mmio.data;
507 		else
508 			*gpr = *(u16 *)run->mmio.data;
509 		break;
510 	case 4:
511 		if (vcpu->mmio_needed == 2)
512 			*gpr = *(s32 *)run->mmio.data;
513 		else
514 			*gpr = *(u32 *)run->mmio.data;
515 		break;
516 	case 8:
517 		*gpr = *(s64 *)run->mmio.data;
518 		break;
519 	default:
520 		kvm_err("Bad MMIO length: %d, addr is 0x%lx\n",
521 				run->mmio.len, vcpu->arch.badv);
522 		er = EMULATE_FAIL;
523 		break;
524 	}
525 
526 	trace_kvm_mmio(KVM_TRACE_MMIO_READ, run->mmio.len,
527 			run->mmio.phys_addr, run->mmio.data);
528 
529 	return er;
530 }
531 
532 int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
533 {
534 	int ret;
535 	unsigned int rd, op8, opcode;
536 	unsigned long curr_pc, rd_val = 0;
537 	struct kvm_run *run = vcpu->run;
538 	void *data = run->mmio.data;
539 
540 	/*
541 	 * Update PC and hold onto current PC in case there is
542 	 * an error and we want to rollback the PC
543 	 */
544 	curr_pc = vcpu->arch.pc;
545 	update_pc(&vcpu->arch);
546 
547 	op8 = (inst.word >> 24) & 0xff;
548 	run->mmio.phys_addr = vcpu->arch.badv;
549 	ret = EMULATE_DO_MMIO;
550 	switch (op8) {
551 	case 0x24 ... 0x27:	/* stptr.w/d process */
552 		rd = inst.reg2i14_format.rd;
553 		opcode = inst.reg2i14_format.opcode;
554 
555 		switch (opcode) {
556 		case stptrw_op:
557 			run->mmio.len = 4;
558 			*(unsigned int *)data = vcpu->arch.gprs[rd];
559 			break;
560 		case stptrd_op:
561 			run->mmio.len = 8;
562 			*(unsigned long *)data = vcpu->arch.gprs[rd];
563 			break;
564 		default:
565 			ret = EMULATE_FAIL;
566 			break;
567 		}
568 		break;
569 	case 0x28 ... 0x2e:	/* st.b/h/w/d  process */
570 		rd = inst.reg2i12_format.rd;
571 		opcode = inst.reg2i12_format.opcode;
572 		rd_val = vcpu->arch.gprs[rd];
573 
574 		switch (opcode) {
575 		case stb_op:
576 			run->mmio.len = 1;
577 			*(unsigned char *)data = rd_val;
578 			break;
579 		case sth_op:
580 			run->mmio.len = 2;
581 			*(unsigned short *)data = rd_val;
582 			break;
583 		case stw_op:
584 			run->mmio.len = 4;
585 			*(unsigned int *)data = rd_val;
586 			break;
587 		case std_op:
588 			run->mmio.len = 8;
589 			*(unsigned long *)data = rd_val;
590 			break;
591 		default:
592 			ret = EMULATE_FAIL;
593 			break;
594 		}
595 		break;
596 	case 0x38:	/* stx.b/h/w/d process */
597 		rd = inst.reg3_format.rd;
598 		opcode = inst.reg3_format.opcode;
599 
600 		switch (opcode) {
601 		case stxb_op:
602 			run->mmio.len = 1;
603 			*(unsigned char *)data = vcpu->arch.gprs[rd];
604 			break;
605 		case stxh_op:
606 			run->mmio.len = 2;
607 			*(unsigned short *)data = vcpu->arch.gprs[rd];
608 			break;
609 		case stxw_op:
610 			run->mmio.len = 4;
611 			*(unsigned int *)data = vcpu->arch.gprs[rd];
612 			break;
613 		case stxd_op:
614 			run->mmio.len = 8;
615 			*(unsigned long *)data = vcpu->arch.gprs[rd];
616 			break;
617 		default:
618 			ret = EMULATE_FAIL;
619 			break;
620 		}
621 		break;
622 	default:
623 		ret = EMULATE_FAIL;
624 	}
625 
626 	if (ret == EMULATE_DO_MMIO) {
627 		trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, run->mmio.len, run->mmio.phys_addr, data);
628 
629 		/*
630 		 * If mmio device such as PCH-PIC is emulated in KVM,
631 		 * it need not return to user space to handle the mmio
632 		 * exception.
633 		 */
634 		ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, vcpu->arch.badv, run->mmio.len, data);
635 		if (!ret)
636 			return EMULATE_DONE;
637 
638 		run->mmio.is_write = 1;
639 		vcpu->mmio_needed = 1;
640 		vcpu->mmio_is_write = 1;
641 		return EMULATE_DO_MMIO;
642 	}
643 
644 	vcpu->arch.pc = curr_pc;
645 	kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
646 			inst.word, vcpu->arch.pc, vcpu->arch.badv);
647 	kvm_arch_vcpu_dump_regs(vcpu);
648 	/* Rollback PC if emulation was unsuccessful */
649 
650 	return ret;
651 }
652 
653 static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
654 {
655 	int ret;
656 	larch_inst inst;
657 	enum emulation_result er = EMULATE_DONE;
658 	struct kvm_run *run = vcpu->run;
659 	unsigned long badv = vcpu->arch.badv;
660 
661 	ret = kvm_handle_mm_fault(vcpu, badv, write);
662 	if (ret) {
663 		/* Treat as MMIO */
664 		inst.word = vcpu->arch.badi;
665 		if (write) {
666 			er = kvm_emu_mmio_write(vcpu, inst);
667 		} else {
668 			/* A code fetch fault doesn't count as an MMIO */
669 			if (kvm_is_ifetch_fault(&vcpu->arch)) {
670 				kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEF);
671 				return RESUME_GUEST;
672 			}
673 
674 			er = kvm_emu_mmio_read(vcpu, inst);
675 		}
676 	}
677 
678 	if (er == EMULATE_DONE) {
679 		ret = RESUME_GUEST;
680 	} else if (er == EMULATE_DO_MMIO) {
681 		run->exit_reason = KVM_EXIT_MMIO;
682 		ret = RESUME_HOST;
683 	} else {
684 		kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM);
685 		ret = RESUME_GUEST;
686 	}
687 
688 	return ret;
689 }
690 
691 static int kvm_handle_read_fault(struct kvm_vcpu *vcpu)
692 {
693 	return kvm_handle_rdwr_fault(vcpu, false);
694 }
695 
696 static int kvm_handle_write_fault(struct kvm_vcpu *vcpu)
697 {
698 	return kvm_handle_rdwr_fault(vcpu, true);
699 }
700 
701 /**
702  * kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host
703  * @vcpu:	Virtual CPU context.
704  *
705  * Handle when the guest attempts to use fpu which hasn't been allowed
706  * by the root context.
707  */
708 static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
709 {
710 	struct kvm_run *run = vcpu->run;
711 
712 	if (!kvm_guest_has_fpu(&vcpu->arch)) {
713 		kvm_queue_exception(vcpu, EXCCODE_INE, 0);
714 		return RESUME_GUEST;
715 	}
716 
717 	/*
718 	 * If guest FPU not present, the FPU operation should have been
719 	 * treated as a reserved instruction!
720 	 * If FPU already in use, we shouldn't get this at all.
721 	 */
722 	if (WARN_ON(vcpu->arch.aux_inuse & KVM_LARCH_FPU)) {
723 		kvm_err("%s internal error\n", __func__);
724 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
725 		return RESUME_HOST;
726 	}
727 
728 	kvm_own_fpu(vcpu);
729 
730 	return RESUME_GUEST;
731 }
732 
733 static long kvm_save_notify(struct kvm_vcpu *vcpu)
734 {
735 	unsigned long id, data;
736 
737 	id   = kvm_read_reg(vcpu, LOONGARCH_GPR_A1);
738 	data = kvm_read_reg(vcpu, LOONGARCH_GPR_A2);
739 	switch (id) {
740 	case BIT(KVM_FEATURE_STEAL_TIME):
741 		if (data & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
742 			return KVM_HCALL_INVALID_PARAMETER;
743 
744 		vcpu->arch.st.guest_addr = data;
745 		if (!(data & KVM_STEAL_PHYS_VALID))
746 			return 0;
747 
748 		vcpu->arch.st.last_steal = current->sched_info.run_delay;
749 		kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
750 		return 0;
751 	default:
752 		return KVM_HCALL_INVALID_CODE;
753 	};
754 
755 	return KVM_HCALL_INVALID_CODE;
756 };
757 
758 /*
759  * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
760  * @vcpu:      Virtual CPU context.
761  *
762  * Handle when the guest attempts to use LSX when it is disabled in the root
763  * context.
764  */
765 static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
766 {
767 	if (kvm_own_lsx(vcpu))
768 		kvm_queue_exception(vcpu, EXCCODE_INE, 0);
769 
770 	return RESUME_GUEST;
771 }
772 
773 /*
774  * kvm_handle_lasx_disabled() - Guest used LASX while disabled in root.
775  * @vcpu:	Virtual CPU context.
776  *
777  * Handle when the guest attempts to use LASX when it is disabled in the root
778  * context.
779  */
780 static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
781 {
782 	if (kvm_own_lasx(vcpu))
783 		kvm_queue_exception(vcpu, EXCCODE_INE, 0);
784 
785 	return RESUME_GUEST;
786 }
787 
788 static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu)
789 {
790 	if (kvm_own_lbt(vcpu))
791 		kvm_queue_exception(vcpu, EXCCODE_INE, 0);
792 
793 	return RESUME_GUEST;
794 }
795 
796 static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
797 {
798 	unsigned int min, cpu, i;
799 	unsigned long ipi_bitmap;
800 	struct kvm_vcpu *dest;
801 
802 	min = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
803 	for (i = 0; i < 2; i++, min += BITS_PER_LONG) {
804 		ipi_bitmap = kvm_read_reg(vcpu, LOONGARCH_GPR_A1 + i);
805 		if (!ipi_bitmap)
806 			continue;
807 
808 		cpu = find_first_bit((void *)&ipi_bitmap, BITS_PER_LONG);
809 		while (cpu < BITS_PER_LONG) {
810 			dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min);
811 			cpu = find_next_bit((void *)&ipi_bitmap, BITS_PER_LONG, cpu + 1);
812 			if (!dest)
813 				continue;
814 
815 			/* Send SWI0 to dest vcpu to emulate IPI interrupt */
816 			kvm_queue_irq(dest, INT_SWI0);
817 			kvm_vcpu_kick(dest);
818 		}
819 	}
820 
821 	return 0;
822 }
823 
824 /*
825  * Hypercall emulation always return to guest, Caller should check retval.
826  */
827 static void kvm_handle_service(struct kvm_vcpu *vcpu)
828 {
829 	long ret = KVM_HCALL_INVALID_CODE;
830 	unsigned long func = kvm_read_reg(vcpu, LOONGARCH_GPR_A0);
831 
832 	switch (func) {
833 	case KVM_HCALL_FUNC_IPI:
834 		if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_IPI)) {
835 			kvm_send_pv_ipi(vcpu);
836 			ret = KVM_HCALL_SUCCESS;
837 		}
838 		break;
839 	case KVM_HCALL_FUNC_NOTIFY:
840 		if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME))
841 			ret = kvm_save_notify(vcpu);
842 		break;
843 	default:
844 		break;
845 	}
846 
847 	kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret);
848 }
849 
850 static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
851 {
852 	int ret;
853 	larch_inst inst;
854 	unsigned int code;
855 
856 	inst.word = vcpu->arch.badi;
857 	code = inst.reg0i15_format.immediate;
858 	ret = RESUME_GUEST;
859 
860 	switch (code) {
861 	case KVM_HCALL_SERVICE:
862 		vcpu->stat.hypercall_exits++;
863 		kvm_handle_service(vcpu);
864 		break;
865 	case KVM_HCALL_SWDBG:
866 		/* KVM_HCALL_SWDBG only in effective when SW_BP is enabled */
867 		if (vcpu->guest_debug & KVM_GUESTDBG_SW_BP_MASK) {
868 			vcpu->run->exit_reason = KVM_EXIT_DEBUG;
869 			ret = RESUME_HOST;
870 			break;
871 		}
872 		fallthrough;
873 	default:
874 		/* Treat it as noop intruction, only set return value */
875 		kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE);
876 		break;
877 	}
878 
879 	if (ret == RESUME_GUEST)
880 		update_pc(&vcpu->arch);
881 
882 	return ret;
883 }
884 
885 /*
886  * LoongArch KVM callback handling for unimplemented guest exiting
887  */
888 static int kvm_fault_ni(struct kvm_vcpu *vcpu)
889 {
890 	unsigned int ecode, inst;
891 	unsigned long estat, badv;
892 
893 	/* Fetch the instruction */
894 	inst = vcpu->arch.badi;
895 	badv = vcpu->arch.badv;
896 	estat = vcpu->arch.host_estat;
897 	ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
898 	kvm_err("ECode: %d PC=%#lx Inst=0x%08x BadVaddr=%#lx ESTAT=%#lx\n",
899 			ecode, vcpu->arch.pc, inst, badv, read_gcsr_estat());
900 	kvm_arch_vcpu_dump_regs(vcpu);
901 	kvm_queue_exception(vcpu, EXCCODE_INE, 0);
902 
903 	return RESUME_GUEST;
904 }
905 
906 static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
907 	[0 ... EXCCODE_INT_START - 1]	= kvm_fault_ni,
908 	[EXCCODE_TLBI]			= kvm_handle_read_fault,
909 	[EXCCODE_TLBL]			= kvm_handle_read_fault,
910 	[EXCCODE_TLBS]			= kvm_handle_write_fault,
911 	[EXCCODE_TLBM]			= kvm_handle_write_fault,
912 	[EXCCODE_FPDIS]			= kvm_handle_fpu_disabled,
913 	[EXCCODE_LSXDIS]		= kvm_handle_lsx_disabled,
914 	[EXCCODE_LASXDIS]		= kvm_handle_lasx_disabled,
915 	[EXCCODE_BTDIS]			= kvm_handle_lbt_disabled,
916 	[EXCCODE_GSPR]			= kvm_handle_gspr,
917 	[EXCCODE_HVC]			= kvm_handle_hypercall,
918 };
919 
920 int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault)
921 {
922 	return kvm_fault_tables[fault](vcpu);
923 }
924