xref: /linux/arch/loongarch/kvm/exit.c (revision 23c996fc2bc1978a02c64eddb90b4ab5d309c8df)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4  */
5 
6 #include <linux/err.h>
7 #include <linux/errno.h>
8 #include <linux/kvm_host.h>
9 #include <linux/module.h>
10 #include <linux/preempt.h>
11 #include <linux/vmalloc.h>
12 #include <trace/events/kvm.h>
13 #include <asm/fpu.h>
14 #include <asm/inst.h>
15 #include <asm/loongarch.h>
16 #include <asm/mmzone.h>
17 #include <asm/numa.h>
18 #include <asm/time.h>
19 #include <asm/tlb.h>
20 #include <asm/kvm_csr.h>
21 #include <asm/kvm_vcpu.h>
22 #include "trace.h"
23 
24 static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
25 {
26 	int rd, rj;
27 	unsigned int index;
28 
29 	if (inst.reg2_format.opcode != cpucfg_op)
30 		return EMULATE_FAIL;
31 
32 	rd = inst.reg2_format.rd;
33 	rj = inst.reg2_format.rj;
34 	++vcpu->stat.cpucfg_exits;
35 	index = vcpu->arch.gprs[rj];
36 
37 	/*
38 	 * By LoongArch Reference Manual 2.2.10.5
39 	 * Return value is 0 for undefined CPUCFG index
40 	 *
41 	 * Disable preemption since hw gcsr is accessed
42 	 */
43 	preempt_disable();
44 	switch (index) {
45 	case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
46 		vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index];
47 		break;
48 	case CPUCFG_KVM_SIG:
49 		/* CPUCFG emulation between 0x40000000 -- 0x400000ff */
50 		vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE;
51 		break;
52 	case CPUCFG_KVM_FEATURE:
53 		vcpu->arch.gprs[rd] = KVM_FEATURE_IPI;
54 		break;
55 	default:
56 		vcpu->arch.gprs[rd] = 0;
57 		break;
58 	}
59 	preempt_enable();
60 
61 	return EMULATE_DONE;
62 }
63 
64 static unsigned long kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid)
65 {
66 	unsigned long val = 0;
67 	struct loongarch_csrs *csr = vcpu->arch.csr;
68 
69 	/*
70 	 * From LoongArch Reference Manual Volume 1 Chapter 4.2.1
71 	 * For undefined CSR id, return value is 0
72 	 */
73 	if (get_gcsr_flag(csrid) & SW_GCSR)
74 		val = kvm_read_sw_gcsr(csr, csrid);
75 	else
76 		pr_warn_once("Unsupported csrrd 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
77 
78 	return val;
79 }
80 
81 static unsigned long kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long val)
82 {
83 	unsigned long old = 0;
84 	struct loongarch_csrs *csr = vcpu->arch.csr;
85 
86 	if (get_gcsr_flag(csrid) & SW_GCSR) {
87 		old = kvm_read_sw_gcsr(csr, csrid);
88 		kvm_write_sw_gcsr(csr, csrid, val);
89 	} else
90 		pr_warn_once("Unsupported csrwr 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
91 
92 	return old;
93 }
94 
95 static unsigned long kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid,
96 				unsigned long csr_mask, unsigned long val)
97 {
98 	unsigned long old = 0;
99 	struct loongarch_csrs *csr = vcpu->arch.csr;
100 
101 	if (get_gcsr_flag(csrid) & SW_GCSR) {
102 		old = kvm_read_sw_gcsr(csr, csrid);
103 		val = (old & ~csr_mask) | (val & csr_mask);
104 		kvm_write_sw_gcsr(csr, csrid, val);
105 		old = old & csr_mask;
106 	} else
107 		pr_warn_once("Unsupported csrxchg 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
108 
109 	return old;
110 }
111 
112 static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
113 {
114 	unsigned int rd, rj, csrid;
115 	unsigned long csr_mask, val = 0;
116 
117 	/*
118 	 * CSR value mask imm
119 	 * rj = 0 means csrrd
120 	 * rj = 1 means csrwr
121 	 * rj != 0,1 means csrxchg
122 	 */
123 	rd = inst.reg2csr_format.rd;
124 	rj = inst.reg2csr_format.rj;
125 	csrid = inst.reg2csr_format.csr;
126 
127 	/* Process CSR ops */
128 	switch (rj) {
129 	case 0: /* process csrrd */
130 		val = kvm_emu_read_csr(vcpu, csrid);
131 		vcpu->arch.gprs[rd] = val;
132 		break;
133 	case 1: /* process csrwr */
134 		val = vcpu->arch.gprs[rd];
135 		val = kvm_emu_write_csr(vcpu, csrid, val);
136 		vcpu->arch.gprs[rd] = val;
137 		break;
138 	default: /* process csrxchg */
139 		val = vcpu->arch.gprs[rd];
140 		csr_mask = vcpu->arch.gprs[rj];
141 		val = kvm_emu_xchg_csr(vcpu, csrid, csr_mask, val);
142 		vcpu->arch.gprs[rd] = val;
143 	}
144 
145 	return EMULATE_DONE;
146 }
147 
148 int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
149 {
150 	int ret;
151 	unsigned long val;
152 	u32 addr, rd, rj, opcode;
153 
154 	/*
155 	 * Each IOCSR with different opcode
156 	 */
157 	rd = inst.reg2_format.rd;
158 	rj = inst.reg2_format.rj;
159 	opcode = inst.reg2_format.opcode;
160 	addr = vcpu->arch.gprs[rj];
161 	ret = EMULATE_DO_IOCSR;
162 	run->iocsr_io.phys_addr = addr;
163 	run->iocsr_io.is_write = 0;
164 
165 	/* LoongArch is Little endian */
166 	switch (opcode) {
167 	case iocsrrdb_op:
168 		run->iocsr_io.len = 1;
169 		break;
170 	case iocsrrdh_op:
171 		run->iocsr_io.len = 2;
172 		break;
173 	case iocsrrdw_op:
174 		run->iocsr_io.len = 4;
175 		break;
176 	case iocsrrdd_op:
177 		run->iocsr_io.len = 8;
178 		break;
179 	case iocsrwrb_op:
180 		run->iocsr_io.len = 1;
181 		run->iocsr_io.is_write = 1;
182 		break;
183 	case iocsrwrh_op:
184 		run->iocsr_io.len = 2;
185 		run->iocsr_io.is_write = 1;
186 		break;
187 	case iocsrwrw_op:
188 		run->iocsr_io.len = 4;
189 		run->iocsr_io.is_write = 1;
190 		break;
191 	case iocsrwrd_op:
192 		run->iocsr_io.len = 8;
193 		run->iocsr_io.is_write = 1;
194 		break;
195 	default:
196 		ret = EMULATE_FAIL;
197 		break;
198 	}
199 
200 	if (ret == EMULATE_DO_IOCSR) {
201 		if (run->iocsr_io.is_write) {
202 			val = vcpu->arch.gprs[rd];
203 			memcpy(run->iocsr_io.data, &val, run->iocsr_io.len);
204 		}
205 		vcpu->arch.io_gpr = rd;
206 	}
207 
208 	return ret;
209 }
210 
211 int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
212 {
213 	enum emulation_result er = EMULATE_DONE;
214 	unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
215 
216 	switch (run->iocsr_io.len) {
217 	case 1:
218 		*gpr = *(s8 *)run->iocsr_io.data;
219 		break;
220 	case 2:
221 		*gpr = *(s16 *)run->iocsr_io.data;
222 		break;
223 	case 4:
224 		*gpr = *(s32 *)run->iocsr_io.data;
225 		break;
226 	case 8:
227 		*gpr = *(s64 *)run->iocsr_io.data;
228 		break;
229 	default:
230 		kvm_err("Bad IOCSR length: %d, addr is 0x%lx\n",
231 				run->iocsr_io.len, vcpu->arch.badv);
232 		er = EMULATE_FAIL;
233 		break;
234 	}
235 
236 	return er;
237 }
238 
239 int kvm_emu_idle(struct kvm_vcpu *vcpu)
240 {
241 	++vcpu->stat.idle_exits;
242 	trace_kvm_exit_idle(vcpu, KVM_TRACE_EXIT_IDLE);
243 
244 	if (!kvm_arch_vcpu_runnable(vcpu))
245 		kvm_vcpu_halt(vcpu);
246 
247 	return EMULATE_DONE;
248 }
249 
250 static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
251 {
252 	unsigned long curr_pc;
253 	larch_inst inst;
254 	enum emulation_result er = EMULATE_DONE;
255 	struct kvm_run *run = vcpu->run;
256 
257 	/* Fetch the instruction */
258 	inst.word = vcpu->arch.badi;
259 	curr_pc = vcpu->arch.pc;
260 	update_pc(&vcpu->arch);
261 
262 	trace_kvm_exit_gspr(vcpu, inst.word);
263 	er = EMULATE_FAIL;
264 	switch (((inst.word >> 24) & 0xff)) {
265 	case 0x0: /* CPUCFG GSPR */
266 		er = kvm_emu_cpucfg(vcpu, inst);
267 		break;
268 	case 0x4: /* CSR{RD,WR,XCHG} GSPR */
269 		er = kvm_handle_csr(vcpu, inst);
270 		break;
271 	case 0x6: /* Cache, Idle and IOCSR GSPR */
272 		switch (((inst.word >> 22) & 0x3ff)) {
273 		case 0x18: /* Cache GSPR */
274 			er = EMULATE_DONE;
275 			trace_kvm_exit_cache(vcpu, KVM_TRACE_EXIT_CACHE);
276 			break;
277 		case 0x19: /* Idle/IOCSR GSPR */
278 			switch (((inst.word >> 15) & 0x1ffff)) {
279 			case 0xc90: /* IOCSR GSPR */
280 				er = kvm_emu_iocsr(inst, run, vcpu);
281 				break;
282 			case 0xc91: /* Idle GSPR */
283 				er = kvm_emu_idle(vcpu);
284 				break;
285 			default:
286 				er = EMULATE_FAIL;
287 				break;
288 			}
289 			break;
290 		default:
291 			er = EMULATE_FAIL;
292 			break;
293 		}
294 		break;
295 	default:
296 		er = EMULATE_FAIL;
297 		break;
298 	}
299 
300 	/* Rollback PC only if emulation was unsuccessful */
301 	if (er == EMULATE_FAIL) {
302 		kvm_err("[%#lx]%s: unsupported gspr instruction 0x%08x\n",
303 			curr_pc, __func__, inst.word);
304 
305 		kvm_arch_vcpu_dump_regs(vcpu);
306 		vcpu->arch.pc = curr_pc;
307 	}
308 
309 	return er;
310 }
311 
312 /*
313  * Trigger GSPR:
314  * 1) Execute CPUCFG instruction;
315  * 2) Execute CACOP/IDLE instructions;
316  * 3) Access to unimplemented CSRs/IOCSRs.
317  */
318 static int kvm_handle_gspr(struct kvm_vcpu *vcpu)
319 {
320 	int ret = RESUME_GUEST;
321 	enum emulation_result er = EMULATE_DONE;
322 
323 	er = kvm_trap_handle_gspr(vcpu);
324 
325 	if (er == EMULATE_DONE) {
326 		ret = RESUME_GUEST;
327 	} else if (er == EMULATE_DO_MMIO) {
328 		vcpu->run->exit_reason = KVM_EXIT_MMIO;
329 		ret = RESUME_HOST;
330 	} else if (er == EMULATE_DO_IOCSR) {
331 		vcpu->run->exit_reason = KVM_EXIT_LOONGARCH_IOCSR;
332 		ret = RESUME_HOST;
333 	} else {
334 		kvm_queue_exception(vcpu, EXCCODE_INE, 0);
335 		ret = RESUME_GUEST;
336 	}
337 
338 	return ret;
339 }
340 
341 int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
342 {
343 	int ret;
344 	unsigned int op8, opcode, rd;
345 	struct kvm_run *run = vcpu->run;
346 
347 	run->mmio.phys_addr = vcpu->arch.badv;
348 	vcpu->mmio_needed = 2;	/* signed */
349 	op8 = (inst.word >> 24) & 0xff;
350 	ret = EMULATE_DO_MMIO;
351 
352 	switch (op8) {
353 	case 0x24 ... 0x27:	/* ldptr.w/d process */
354 		rd = inst.reg2i14_format.rd;
355 		opcode = inst.reg2i14_format.opcode;
356 
357 		switch (opcode) {
358 		case ldptrw_op:
359 			run->mmio.len = 4;
360 			break;
361 		case ldptrd_op:
362 			run->mmio.len = 8;
363 			break;
364 		default:
365 			break;
366 		}
367 		break;
368 	case 0x28 ... 0x2e:	/* ld.b/h/w/d, ld.bu/hu/wu process */
369 		rd = inst.reg2i12_format.rd;
370 		opcode = inst.reg2i12_format.opcode;
371 
372 		switch (opcode) {
373 		case ldb_op:
374 			run->mmio.len = 1;
375 			break;
376 		case ldbu_op:
377 			vcpu->mmio_needed = 1;	/* unsigned */
378 			run->mmio.len = 1;
379 			break;
380 		case ldh_op:
381 			run->mmio.len = 2;
382 			break;
383 		case ldhu_op:
384 			vcpu->mmio_needed = 1;	/* unsigned */
385 			run->mmio.len = 2;
386 			break;
387 		case ldw_op:
388 			run->mmio.len = 4;
389 			break;
390 		case ldwu_op:
391 			vcpu->mmio_needed = 1;	/* unsigned */
392 			run->mmio.len = 4;
393 			break;
394 		case ldd_op:
395 			run->mmio.len = 8;
396 			break;
397 		default:
398 			ret = EMULATE_FAIL;
399 			break;
400 		}
401 		break;
402 	case 0x38:	/* ldx.b/h/w/d, ldx.bu/hu/wu process */
403 		rd = inst.reg3_format.rd;
404 		opcode = inst.reg3_format.opcode;
405 
406 		switch (opcode) {
407 		case ldxb_op:
408 			run->mmio.len = 1;
409 			break;
410 		case ldxbu_op:
411 			run->mmio.len = 1;
412 			vcpu->mmio_needed = 1;	/* unsigned */
413 			break;
414 		case ldxh_op:
415 			run->mmio.len = 2;
416 			break;
417 		case ldxhu_op:
418 			run->mmio.len = 2;
419 			vcpu->mmio_needed = 1;	/* unsigned */
420 			break;
421 		case ldxw_op:
422 			run->mmio.len = 4;
423 			break;
424 		case ldxwu_op:
425 			run->mmio.len = 4;
426 			vcpu->mmio_needed = 1;	/* unsigned */
427 			break;
428 		case ldxd_op:
429 			run->mmio.len = 8;
430 			break;
431 		default:
432 			ret = EMULATE_FAIL;
433 			break;
434 		}
435 		break;
436 	default:
437 		ret = EMULATE_FAIL;
438 	}
439 
440 	if (ret == EMULATE_DO_MMIO) {
441 		/* Set for kvm_complete_mmio_read() use */
442 		vcpu->arch.io_gpr = rd;
443 		run->mmio.is_write = 0;
444 		vcpu->mmio_is_write = 0;
445 		trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, run->mmio.len,
446 				run->mmio.phys_addr, NULL);
447 	} else {
448 		kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
449 			inst.word, vcpu->arch.pc, vcpu->arch.badv);
450 		kvm_arch_vcpu_dump_regs(vcpu);
451 		vcpu->mmio_needed = 0;
452 	}
453 
454 	return ret;
455 }
456 
457 int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
458 {
459 	enum emulation_result er = EMULATE_DONE;
460 	unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
461 
462 	/* Update with new PC */
463 	update_pc(&vcpu->arch);
464 	switch (run->mmio.len) {
465 	case 1:
466 		if (vcpu->mmio_needed == 2)
467 			*gpr = *(s8 *)run->mmio.data;
468 		else
469 			*gpr = *(u8 *)run->mmio.data;
470 		break;
471 	case 2:
472 		if (vcpu->mmio_needed == 2)
473 			*gpr = *(s16 *)run->mmio.data;
474 		else
475 			*gpr = *(u16 *)run->mmio.data;
476 		break;
477 	case 4:
478 		if (vcpu->mmio_needed == 2)
479 			*gpr = *(s32 *)run->mmio.data;
480 		else
481 			*gpr = *(u32 *)run->mmio.data;
482 		break;
483 	case 8:
484 		*gpr = *(s64 *)run->mmio.data;
485 		break;
486 	default:
487 		kvm_err("Bad MMIO length: %d, addr is 0x%lx\n",
488 				run->mmio.len, vcpu->arch.badv);
489 		er = EMULATE_FAIL;
490 		break;
491 	}
492 
493 	trace_kvm_mmio(KVM_TRACE_MMIO_READ, run->mmio.len,
494 			run->mmio.phys_addr, run->mmio.data);
495 
496 	return er;
497 }
498 
499 int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
500 {
501 	int ret;
502 	unsigned int rd, op8, opcode;
503 	unsigned long curr_pc, rd_val = 0;
504 	struct kvm_run *run = vcpu->run;
505 	void *data = run->mmio.data;
506 
507 	/*
508 	 * Update PC and hold onto current PC in case there is
509 	 * an error and we want to rollback the PC
510 	 */
511 	curr_pc = vcpu->arch.pc;
512 	update_pc(&vcpu->arch);
513 
514 	op8 = (inst.word >> 24) & 0xff;
515 	run->mmio.phys_addr = vcpu->arch.badv;
516 	ret = EMULATE_DO_MMIO;
517 	switch (op8) {
518 	case 0x24 ... 0x27:	/* stptr.w/d process */
519 		rd = inst.reg2i14_format.rd;
520 		opcode = inst.reg2i14_format.opcode;
521 
522 		switch (opcode) {
523 		case stptrw_op:
524 			run->mmio.len = 4;
525 			*(unsigned int *)data = vcpu->arch.gprs[rd];
526 			break;
527 		case stptrd_op:
528 			run->mmio.len = 8;
529 			*(unsigned long *)data = vcpu->arch.gprs[rd];
530 			break;
531 		default:
532 			ret = EMULATE_FAIL;
533 			break;
534 		}
535 		break;
536 	case 0x28 ... 0x2e:	/* st.b/h/w/d  process */
537 		rd = inst.reg2i12_format.rd;
538 		opcode = inst.reg2i12_format.opcode;
539 		rd_val = vcpu->arch.gprs[rd];
540 
541 		switch (opcode) {
542 		case stb_op:
543 			run->mmio.len = 1;
544 			*(unsigned char *)data = rd_val;
545 			break;
546 		case sth_op:
547 			run->mmio.len = 2;
548 			*(unsigned short *)data = rd_val;
549 			break;
550 		case stw_op:
551 			run->mmio.len = 4;
552 			*(unsigned int *)data = rd_val;
553 			break;
554 		case std_op:
555 			run->mmio.len = 8;
556 			*(unsigned long *)data = rd_val;
557 			break;
558 		default:
559 			ret = EMULATE_FAIL;
560 			break;
561 		}
562 		break;
563 	case 0x38:	/* stx.b/h/w/d process */
564 		rd = inst.reg3_format.rd;
565 		opcode = inst.reg3_format.opcode;
566 
567 		switch (opcode) {
568 		case stxb_op:
569 			run->mmio.len = 1;
570 			*(unsigned char *)data = vcpu->arch.gprs[rd];
571 			break;
572 		case stxh_op:
573 			run->mmio.len = 2;
574 			*(unsigned short *)data = vcpu->arch.gprs[rd];
575 			break;
576 		case stxw_op:
577 			run->mmio.len = 4;
578 			*(unsigned int *)data = vcpu->arch.gprs[rd];
579 			break;
580 		case stxd_op:
581 			run->mmio.len = 8;
582 			*(unsigned long *)data = vcpu->arch.gprs[rd];
583 			break;
584 		default:
585 			ret = EMULATE_FAIL;
586 			break;
587 		}
588 		break;
589 	default:
590 		ret = EMULATE_FAIL;
591 	}
592 
593 	if (ret == EMULATE_DO_MMIO) {
594 		run->mmio.is_write = 1;
595 		vcpu->mmio_needed = 1;
596 		vcpu->mmio_is_write = 1;
597 		trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, run->mmio.len,
598 				run->mmio.phys_addr, data);
599 	} else {
600 		vcpu->arch.pc = curr_pc;
601 		kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
602 			inst.word, vcpu->arch.pc, vcpu->arch.badv);
603 		kvm_arch_vcpu_dump_regs(vcpu);
604 		/* Rollback PC if emulation was unsuccessful */
605 	}
606 
607 	return ret;
608 }
609 
610 static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
611 {
612 	int ret;
613 	larch_inst inst;
614 	enum emulation_result er = EMULATE_DONE;
615 	struct kvm_run *run = vcpu->run;
616 	unsigned long badv = vcpu->arch.badv;
617 
618 	ret = kvm_handle_mm_fault(vcpu, badv, write);
619 	if (ret) {
620 		/* Treat as MMIO */
621 		inst.word = vcpu->arch.badi;
622 		if (write) {
623 			er = kvm_emu_mmio_write(vcpu, inst);
624 		} else {
625 			/* A code fetch fault doesn't count as an MMIO */
626 			if (kvm_is_ifetch_fault(&vcpu->arch)) {
627 				kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEF);
628 				return RESUME_GUEST;
629 			}
630 
631 			er = kvm_emu_mmio_read(vcpu, inst);
632 		}
633 	}
634 
635 	if (er == EMULATE_DONE) {
636 		ret = RESUME_GUEST;
637 	} else if (er == EMULATE_DO_MMIO) {
638 		run->exit_reason = KVM_EXIT_MMIO;
639 		ret = RESUME_HOST;
640 	} else {
641 		kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM);
642 		ret = RESUME_GUEST;
643 	}
644 
645 	return ret;
646 }
647 
648 static int kvm_handle_read_fault(struct kvm_vcpu *vcpu)
649 {
650 	return kvm_handle_rdwr_fault(vcpu, false);
651 }
652 
653 static int kvm_handle_write_fault(struct kvm_vcpu *vcpu)
654 {
655 	return kvm_handle_rdwr_fault(vcpu, true);
656 }
657 
658 /**
659  * kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host
660  * @vcpu:	Virtual CPU context.
661  *
662  * Handle when the guest attempts to use fpu which hasn't been allowed
663  * by the root context.
664  */
665 static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
666 {
667 	struct kvm_run *run = vcpu->run;
668 
669 	if (!kvm_guest_has_fpu(&vcpu->arch)) {
670 		kvm_queue_exception(vcpu, EXCCODE_INE, 0);
671 		return RESUME_GUEST;
672 	}
673 
674 	/*
675 	 * If guest FPU not present, the FPU operation should have been
676 	 * treated as a reserved instruction!
677 	 * If FPU already in use, we shouldn't get this at all.
678 	 */
679 	if (WARN_ON(vcpu->arch.aux_inuse & KVM_LARCH_FPU)) {
680 		kvm_err("%s internal error\n", __func__);
681 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
682 		return RESUME_HOST;
683 	}
684 
685 	kvm_own_fpu(vcpu);
686 
687 	return RESUME_GUEST;
688 }
689 
690 /*
691  * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
692  * @vcpu:      Virtual CPU context.
693  *
694  * Handle when the guest attempts to use LSX when it is disabled in the root
695  * context.
696  */
697 static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
698 {
699 	if (kvm_own_lsx(vcpu))
700 		kvm_queue_exception(vcpu, EXCCODE_INE, 0);
701 
702 	return RESUME_GUEST;
703 }
704 
705 /*
706  * kvm_handle_lasx_disabled() - Guest used LASX while disabled in root.
707  * @vcpu:	Virtual CPU context.
708  *
709  * Handle when the guest attempts to use LASX when it is disabled in the root
710  * context.
711  */
712 static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
713 {
714 	if (kvm_own_lasx(vcpu))
715 		kvm_queue_exception(vcpu, EXCCODE_INE, 0);
716 
717 	return RESUME_GUEST;
718 }
719 
720 static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
721 {
722 	unsigned int min, cpu, i;
723 	unsigned long ipi_bitmap;
724 	struct kvm_vcpu *dest;
725 
726 	min = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
727 	for (i = 0; i < 2; i++, min += BITS_PER_LONG) {
728 		ipi_bitmap = kvm_read_reg(vcpu, LOONGARCH_GPR_A1 + i);
729 		if (!ipi_bitmap)
730 			continue;
731 
732 		cpu = find_first_bit((void *)&ipi_bitmap, BITS_PER_LONG);
733 		while (cpu < BITS_PER_LONG) {
734 			dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min);
735 			cpu = find_next_bit((void *)&ipi_bitmap, BITS_PER_LONG, cpu + 1);
736 			if (!dest)
737 				continue;
738 
739 			/* Send SWI0 to dest vcpu to emulate IPI interrupt */
740 			kvm_queue_irq(dest, INT_SWI0);
741 			kvm_vcpu_kick(dest);
742 		}
743 	}
744 
745 	return 0;
746 }
747 
748 /*
749  * Hypercall emulation always return to guest, Caller should check retval.
750  */
751 static void kvm_handle_service(struct kvm_vcpu *vcpu)
752 {
753 	unsigned long func = kvm_read_reg(vcpu, LOONGARCH_GPR_A0);
754 	long ret;
755 
756 	switch (func) {
757 	case KVM_HCALL_FUNC_IPI:
758 		kvm_send_pv_ipi(vcpu);
759 		ret = KVM_HCALL_SUCCESS;
760 		break;
761 	default:
762 		ret = KVM_HCALL_INVALID_CODE;
763 		break;
764 	};
765 
766 	kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret);
767 }
768 
769 static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
770 {
771 	int ret;
772 	larch_inst inst;
773 	unsigned int code;
774 
775 	inst.word = vcpu->arch.badi;
776 	code = inst.reg0i15_format.immediate;
777 	ret = RESUME_GUEST;
778 
779 	switch (code) {
780 	case KVM_HCALL_SERVICE:
781 		vcpu->stat.hypercall_exits++;
782 		kvm_handle_service(vcpu);
783 		break;
784 	case KVM_HCALL_SWDBG:
785 		/* KVM_HCALL_SWDBG only in effective when SW_BP is enabled */
786 		if (vcpu->guest_debug & KVM_GUESTDBG_SW_BP_MASK) {
787 			vcpu->run->exit_reason = KVM_EXIT_DEBUG;
788 			ret = RESUME_HOST;
789 			break;
790 		}
791 		fallthrough;
792 	default:
793 		/* Treat it as noop intruction, only set return value */
794 		kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE);
795 		break;
796 	}
797 
798 	if (ret == RESUME_GUEST)
799 		update_pc(&vcpu->arch);
800 
801 	return ret;
802 }
803 
804 /*
805  * LoongArch KVM callback handling for unimplemented guest exiting
806  */
807 static int kvm_fault_ni(struct kvm_vcpu *vcpu)
808 {
809 	unsigned int ecode, inst;
810 	unsigned long estat, badv;
811 
812 	/* Fetch the instruction */
813 	inst = vcpu->arch.badi;
814 	badv = vcpu->arch.badv;
815 	estat = vcpu->arch.host_estat;
816 	ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
817 	kvm_err("ECode: %d PC=%#lx Inst=0x%08x BadVaddr=%#lx ESTAT=%#lx\n",
818 			ecode, vcpu->arch.pc, inst, badv, read_gcsr_estat());
819 	kvm_arch_vcpu_dump_regs(vcpu);
820 	kvm_queue_exception(vcpu, EXCCODE_INE, 0);
821 
822 	return RESUME_GUEST;
823 }
824 
825 static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
826 	[0 ... EXCCODE_INT_START - 1]	= kvm_fault_ni,
827 	[EXCCODE_TLBI]			= kvm_handle_read_fault,
828 	[EXCCODE_TLBL]			= kvm_handle_read_fault,
829 	[EXCCODE_TLBS]			= kvm_handle_write_fault,
830 	[EXCCODE_TLBM]			= kvm_handle_write_fault,
831 	[EXCCODE_FPDIS]			= kvm_handle_fpu_disabled,
832 	[EXCCODE_LSXDIS]		= kvm_handle_lsx_disabled,
833 	[EXCCODE_LASXDIS]		= kvm_handle_lasx_disabled,
834 	[EXCCODE_GSPR]			= kvm_handle_gspr,
835 	[EXCCODE_HVC]			= kvm_handle_hypercall,
836 };
837 
838 int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault)
839 {
840 	return kvm_fault_tables[fault](vcpu);
841 }
842