xref: /linux/arch/loongarch/kvm/exit.c (revision 2e51e0ac575c2095da869ea62d406f617550e6ed)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4  */
5 
6 #include <linux/err.h>
7 #include <linux/errno.h>
8 #include <linux/kvm_host.h>
9 #include <linux/module.h>
10 #include <linux/preempt.h>
11 #include <linux/vmalloc.h>
12 #include <trace/events/kvm.h>
13 #include <asm/fpu.h>
14 #include <asm/inst.h>
15 #include <asm/loongarch.h>
16 #include <asm/mmzone.h>
17 #include <asm/numa.h>
18 #include <asm/time.h>
19 #include <asm/tlb.h>
20 #include <asm/kvm_csr.h>
21 #include <asm/kvm_vcpu.h>
22 #include "trace.h"
23 
kvm_emu_cpucfg(struct kvm_vcpu * vcpu,larch_inst inst)24 static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
25 {
26 	int rd, rj;
27 	unsigned int index, ret;
28 
29 	if (inst.reg2_format.opcode != cpucfg_op)
30 		return EMULATE_FAIL;
31 
32 	rd = inst.reg2_format.rd;
33 	rj = inst.reg2_format.rj;
34 	++vcpu->stat.cpucfg_exits;
35 	index = vcpu->arch.gprs[rj];
36 
37 	/*
38 	 * By LoongArch Reference Manual 2.2.10.5
39 	 * Return value is 0 for undefined CPUCFG index
40 	 *
41 	 * Disable preemption since hw gcsr is accessed
42 	 */
43 	preempt_disable();
44 	switch (index) {
45 	case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
46 		vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index];
47 		break;
48 	case CPUCFG_KVM_SIG:
49 		/* CPUCFG emulation between 0x40000000 -- 0x400000ff */
50 		vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE;
51 		break;
52 	case CPUCFG_KVM_FEATURE:
53 		ret = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
54 		vcpu->arch.gprs[rd] = ret;
55 		break;
56 	default:
57 		vcpu->arch.gprs[rd] = 0;
58 		break;
59 	}
60 	preempt_enable();
61 
62 	return EMULATE_DONE;
63 }
64 
kvm_emu_read_csr(struct kvm_vcpu * vcpu,int csrid)65 static unsigned long kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid)
66 {
67 	unsigned long val = 0;
68 	struct loongarch_csrs *csr = vcpu->arch.csr;
69 
70 	/*
71 	 * From LoongArch Reference Manual Volume 1 Chapter 4.2.1
72 	 * For undefined CSR id, return value is 0
73 	 */
74 	if (get_gcsr_flag(csrid) & SW_GCSR)
75 		val = kvm_read_sw_gcsr(csr, csrid);
76 	else
77 		pr_warn_once("Unsupported csrrd 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
78 
79 	return val;
80 }
81 
kvm_emu_write_csr(struct kvm_vcpu * vcpu,int csrid,unsigned long val)82 static unsigned long kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long val)
83 {
84 	unsigned long old = 0;
85 	struct loongarch_csrs *csr = vcpu->arch.csr;
86 
87 	if (get_gcsr_flag(csrid) & SW_GCSR) {
88 		old = kvm_read_sw_gcsr(csr, csrid);
89 		kvm_write_sw_gcsr(csr, csrid, val);
90 	} else
91 		pr_warn_once("Unsupported csrwr 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
92 
93 	return old;
94 }
95 
kvm_emu_xchg_csr(struct kvm_vcpu * vcpu,int csrid,unsigned long csr_mask,unsigned long val)96 static unsigned long kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid,
97 				unsigned long csr_mask, unsigned long val)
98 {
99 	unsigned long old = 0;
100 	struct loongarch_csrs *csr = vcpu->arch.csr;
101 
102 	if (get_gcsr_flag(csrid) & SW_GCSR) {
103 		old = kvm_read_sw_gcsr(csr, csrid);
104 		val = (old & ~csr_mask) | (val & csr_mask);
105 		kvm_write_sw_gcsr(csr, csrid, val);
106 		old = old & csr_mask;
107 	} else
108 		pr_warn_once("Unsupported csrxchg 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
109 
110 	return old;
111 }
112 
kvm_handle_csr(struct kvm_vcpu * vcpu,larch_inst inst)113 static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
114 {
115 	unsigned int rd, rj, csrid;
116 	unsigned long csr_mask, val = 0;
117 
118 	/*
119 	 * CSR value mask imm
120 	 * rj = 0 means csrrd
121 	 * rj = 1 means csrwr
122 	 * rj != 0,1 means csrxchg
123 	 */
124 	rd = inst.reg2csr_format.rd;
125 	rj = inst.reg2csr_format.rj;
126 	csrid = inst.reg2csr_format.csr;
127 
128 	if (csrid >= LOONGARCH_CSR_PERFCTRL0 && csrid <= vcpu->arch.max_pmu_csrid) {
129 		if (kvm_guest_has_pmu(&vcpu->arch)) {
130 			vcpu->arch.pc -= 4;
131 			kvm_make_request(KVM_REQ_PMU, vcpu);
132 			return EMULATE_DONE;
133 		}
134 	}
135 
136 	/* Process CSR ops */
137 	switch (rj) {
138 	case 0: /* process csrrd */
139 		val = kvm_emu_read_csr(vcpu, csrid);
140 		vcpu->arch.gprs[rd] = val;
141 		break;
142 	case 1: /* process csrwr */
143 		val = vcpu->arch.gprs[rd];
144 		val = kvm_emu_write_csr(vcpu, csrid, val);
145 		vcpu->arch.gprs[rd] = val;
146 		break;
147 	default: /* process csrxchg */
148 		val = vcpu->arch.gprs[rd];
149 		csr_mask = vcpu->arch.gprs[rj];
150 		val = kvm_emu_xchg_csr(vcpu, csrid, csr_mask, val);
151 		vcpu->arch.gprs[rd] = val;
152 	}
153 
154 	return EMULATE_DONE;
155 }
156 
kvm_emu_iocsr(larch_inst inst,struct kvm_run * run,struct kvm_vcpu * vcpu)157 int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
158 {
159 	int idx, ret;
160 	unsigned long *val;
161 	u32 addr, rd, rj, opcode;
162 
163 	/*
164 	 * Each IOCSR with different opcode
165 	 */
166 	rd = inst.reg2_format.rd;
167 	rj = inst.reg2_format.rj;
168 	opcode = inst.reg2_format.opcode;
169 	addr = vcpu->arch.gprs[rj];
170 	run->iocsr_io.phys_addr = addr;
171 	run->iocsr_io.is_write = 0;
172 	val = &vcpu->arch.gprs[rd];
173 
174 	/* LoongArch is Little endian */
175 	switch (opcode) {
176 	case iocsrrdb_op:
177 		run->iocsr_io.len = 1;
178 		break;
179 	case iocsrrdh_op:
180 		run->iocsr_io.len = 2;
181 		break;
182 	case iocsrrdw_op:
183 		run->iocsr_io.len = 4;
184 		break;
185 	case iocsrrdd_op:
186 		run->iocsr_io.len = 8;
187 		break;
188 	case iocsrwrb_op:
189 		run->iocsr_io.len = 1;
190 		run->iocsr_io.is_write = 1;
191 		break;
192 	case iocsrwrh_op:
193 		run->iocsr_io.len = 2;
194 		run->iocsr_io.is_write = 1;
195 		break;
196 	case iocsrwrw_op:
197 		run->iocsr_io.len = 4;
198 		run->iocsr_io.is_write = 1;
199 		break;
200 	case iocsrwrd_op:
201 		run->iocsr_io.len = 8;
202 		run->iocsr_io.is_write = 1;
203 		break;
204 	default:
205 		return EMULATE_FAIL;
206 	}
207 
208 	if (run->iocsr_io.is_write) {
209 		idx = srcu_read_lock(&vcpu->kvm->srcu);
210 		ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val);
211 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
212 		if (ret == 0)
213 			ret = EMULATE_DONE;
214 		else {
215 			ret = EMULATE_DO_IOCSR;
216 			/* Save data and let user space to write it */
217 			memcpy(run->iocsr_io.data, val, run->iocsr_io.len);
218 		}
219 		trace_kvm_iocsr(KVM_TRACE_IOCSR_WRITE, run->iocsr_io.len, addr, val);
220 	} else {
221 		idx = srcu_read_lock(&vcpu->kvm->srcu);
222 		ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val);
223 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
224 		if (ret == 0)
225 			ret = EMULATE_DONE;
226 		else {
227 			ret = EMULATE_DO_IOCSR;
228 			/* Save register id for iocsr read completion */
229 			vcpu->arch.io_gpr = rd;
230 		}
231 		trace_kvm_iocsr(KVM_TRACE_IOCSR_READ, run->iocsr_io.len, addr, NULL);
232 	}
233 
234 	return ret;
235 }
236 
kvm_complete_iocsr_read(struct kvm_vcpu * vcpu,struct kvm_run * run)237 int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
238 {
239 	enum emulation_result er = EMULATE_DONE;
240 	unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
241 
242 	switch (run->iocsr_io.len) {
243 	case 1:
244 		*gpr = *(s8 *)run->iocsr_io.data;
245 		break;
246 	case 2:
247 		*gpr = *(s16 *)run->iocsr_io.data;
248 		break;
249 	case 4:
250 		*gpr = *(s32 *)run->iocsr_io.data;
251 		break;
252 	case 8:
253 		*gpr = *(s64 *)run->iocsr_io.data;
254 		break;
255 	default:
256 		kvm_err("Bad IOCSR length: %d, addr is 0x%lx\n",
257 				run->iocsr_io.len, vcpu->arch.badv);
258 		er = EMULATE_FAIL;
259 		break;
260 	}
261 
262 	return er;
263 }
264 
kvm_emu_idle(struct kvm_vcpu * vcpu)265 int kvm_emu_idle(struct kvm_vcpu *vcpu)
266 {
267 	++vcpu->stat.idle_exits;
268 	trace_kvm_exit_idle(vcpu, KVM_TRACE_EXIT_IDLE);
269 
270 	if (!kvm_arch_vcpu_runnable(vcpu))
271 		kvm_vcpu_halt(vcpu);
272 
273 	return EMULATE_DONE;
274 }
275 
kvm_trap_handle_gspr(struct kvm_vcpu * vcpu)276 static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
277 {
278 	unsigned long curr_pc;
279 	larch_inst inst;
280 	enum emulation_result er = EMULATE_DONE;
281 	struct kvm_run *run = vcpu->run;
282 
283 	/* Fetch the instruction */
284 	inst.word = vcpu->arch.badi;
285 	curr_pc = vcpu->arch.pc;
286 	update_pc(&vcpu->arch);
287 
288 	trace_kvm_exit_gspr(vcpu, inst.word);
289 	er = EMULATE_FAIL;
290 	switch (((inst.word >> 24) & 0xff)) {
291 	case 0x0: /* CPUCFG GSPR */
292 		er = kvm_emu_cpucfg(vcpu, inst);
293 		break;
294 	case 0x4: /* CSR{RD,WR,XCHG} GSPR */
295 		er = kvm_handle_csr(vcpu, inst);
296 		break;
297 	case 0x6: /* Cache, Idle and IOCSR GSPR */
298 		switch (((inst.word >> 22) & 0x3ff)) {
299 		case 0x18: /* Cache GSPR */
300 			er = EMULATE_DONE;
301 			trace_kvm_exit_cache(vcpu, KVM_TRACE_EXIT_CACHE);
302 			break;
303 		case 0x19: /* Idle/IOCSR GSPR */
304 			switch (((inst.word >> 15) & 0x1ffff)) {
305 			case 0xc90: /* IOCSR GSPR */
306 				er = kvm_emu_iocsr(inst, run, vcpu);
307 				break;
308 			case 0xc91: /* Idle GSPR */
309 				er = kvm_emu_idle(vcpu);
310 				break;
311 			default:
312 				er = EMULATE_FAIL;
313 				break;
314 			}
315 			break;
316 		default:
317 			er = EMULATE_FAIL;
318 			break;
319 		}
320 		break;
321 	default:
322 		er = EMULATE_FAIL;
323 		break;
324 	}
325 
326 	/* Rollback PC only if emulation was unsuccessful */
327 	if (er == EMULATE_FAIL) {
328 		kvm_err("[%#lx]%s: unsupported gspr instruction 0x%08x\n",
329 			curr_pc, __func__, inst.word);
330 
331 		kvm_arch_vcpu_dump_regs(vcpu);
332 		vcpu->arch.pc = curr_pc;
333 	}
334 
335 	return er;
336 }
337 
338 /*
339  * Trigger GSPR:
340  * 1) Execute CPUCFG instruction;
341  * 2) Execute CACOP/IDLE instructions;
342  * 3) Access to unimplemented CSRs/IOCSRs.
343  */
kvm_handle_gspr(struct kvm_vcpu * vcpu)344 static int kvm_handle_gspr(struct kvm_vcpu *vcpu)
345 {
346 	int ret = RESUME_GUEST;
347 	enum emulation_result er = EMULATE_DONE;
348 
349 	er = kvm_trap_handle_gspr(vcpu);
350 
351 	if (er == EMULATE_DONE) {
352 		ret = RESUME_GUEST;
353 	} else if (er == EMULATE_DO_MMIO) {
354 		vcpu->run->exit_reason = KVM_EXIT_MMIO;
355 		ret = RESUME_HOST;
356 	} else if (er == EMULATE_DO_IOCSR) {
357 		vcpu->run->exit_reason = KVM_EXIT_LOONGARCH_IOCSR;
358 		ret = RESUME_HOST;
359 	} else {
360 		kvm_queue_exception(vcpu, EXCCODE_INE, 0);
361 		ret = RESUME_GUEST;
362 	}
363 
364 	return ret;
365 }
366 
kvm_emu_mmio_read(struct kvm_vcpu * vcpu,larch_inst inst)367 int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
368 {
369 	int idx, ret;
370 	unsigned int op8, opcode, rd;
371 	struct kvm_run *run = vcpu->run;
372 
373 	run->mmio.phys_addr = vcpu->arch.badv;
374 	vcpu->mmio_needed = 2;	/* signed */
375 	op8 = (inst.word >> 24) & 0xff;
376 	ret = EMULATE_DO_MMIO;
377 
378 	switch (op8) {
379 	case 0x24 ... 0x27:	/* ldptr.w/d process */
380 		rd = inst.reg2i14_format.rd;
381 		opcode = inst.reg2i14_format.opcode;
382 
383 		switch (opcode) {
384 		case ldptrw_op:
385 			run->mmio.len = 4;
386 			break;
387 		case ldptrd_op:
388 			run->mmio.len = 8;
389 			break;
390 		default:
391 			break;
392 		}
393 		break;
394 	case 0x28 ... 0x2e:	/* ld.b/h/w/d, ld.bu/hu/wu process */
395 		rd = inst.reg2i12_format.rd;
396 		opcode = inst.reg2i12_format.opcode;
397 
398 		switch (opcode) {
399 		case ldb_op:
400 			run->mmio.len = 1;
401 			break;
402 		case ldbu_op:
403 			vcpu->mmio_needed = 1;	/* unsigned */
404 			run->mmio.len = 1;
405 			break;
406 		case ldh_op:
407 			run->mmio.len = 2;
408 			break;
409 		case ldhu_op:
410 			vcpu->mmio_needed = 1;	/* unsigned */
411 			run->mmio.len = 2;
412 			break;
413 		case ldw_op:
414 			run->mmio.len = 4;
415 			break;
416 		case ldwu_op:
417 			vcpu->mmio_needed = 1;	/* unsigned */
418 			run->mmio.len = 4;
419 			break;
420 		case ldd_op:
421 			run->mmio.len = 8;
422 			break;
423 		default:
424 			ret = EMULATE_FAIL;
425 			break;
426 		}
427 		break;
428 	case 0x38:	/* ldx.b/h/w/d, ldx.bu/hu/wu process */
429 		rd = inst.reg3_format.rd;
430 		opcode = inst.reg3_format.opcode;
431 
432 		switch (opcode) {
433 		case ldxb_op:
434 			run->mmio.len = 1;
435 			break;
436 		case ldxbu_op:
437 			run->mmio.len = 1;
438 			vcpu->mmio_needed = 1;	/* unsigned */
439 			break;
440 		case ldxh_op:
441 			run->mmio.len = 2;
442 			break;
443 		case ldxhu_op:
444 			run->mmio.len = 2;
445 			vcpu->mmio_needed = 1;	/* unsigned */
446 			break;
447 		case ldxw_op:
448 			run->mmio.len = 4;
449 			break;
450 		case ldxwu_op:
451 			run->mmio.len = 4;
452 			vcpu->mmio_needed = 1;	/* unsigned */
453 			break;
454 		case ldxd_op:
455 			run->mmio.len = 8;
456 			break;
457 		default:
458 			ret = EMULATE_FAIL;
459 			break;
460 		}
461 		break;
462 	default:
463 		ret = EMULATE_FAIL;
464 	}
465 
466 	if (ret == EMULATE_DO_MMIO) {
467 		trace_kvm_mmio(KVM_TRACE_MMIO_READ, run->mmio.len, run->mmio.phys_addr, NULL);
468 
469 		/*
470 		 * If mmio device such as PCH-PIC is emulated in KVM,
471 		 * it need not return to user space to handle the mmio
472 		 * exception.
473 		 */
474 		idx = srcu_read_lock(&vcpu->kvm->srcu);
475 		ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, vcpu->arch.badv,
476 				      run->mmio.len, &vcpu->arch.gprs[rd]);
477 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
478 		if (!ret) {
479 			update_pc(&vcpu->arch);
480 			vcpu->mmio_needed = 0;
481 			return EMULATE_DONE;
482 		}
483 
484 		/* Set for kvm_complete_mmio_read() use */
485 		vcpu->arch.io_gpr = rd;
486 		run->mmio.is_write = 0;
487 		vcpu->mmio_is_write = 0;
488 		return EMULATE_DO_MMIO;
489 	}
490 
491 	kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
492 			inst.word, vcpu->arch.pc, vcpu->arch.badv);
493 	kvm_arch_vcpu_dump_regs(vcpu);
494 	vcpu->mmio_needed = 0;
495 
496 	return ret;
497 }
498 
kvm_complete_mmio_read(struct kvm_vcpu * vcpu,struct kvm_run * run)499 int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
500 {
501 	enum emulation_result er = EMULATE_DONE;
502 	unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
503 
504 	/* Update with new PC */
505 	update_pc(&vcpu->arch);
506 	switch (run->mmio.len) {
507 	case 1:
508 		if (vcpu->mmio_needed == 2)
509 			*gpr = *(s8 *)run->mmio.data;
510 		else
511 			*gpr = *(u8 *)run->mmio.data;
512 		break;
513 	case 2:
514 		if (vcpu->mmio_needed == 2)
515 			*gpr = *(s16 *)run->mmio.data;
516 		else
517 			*gpr = *(u16 *)run->mmio.data;
518 		break;
519 	case 4:
520 		if (vcpu->mmio_needed == 2)
521 			*gpr = *(s32 *)run->mmio.data;
522 		else
523 			*gpr = *(u32 *)run->mmio.data;
524 		break;
525 	case 8:
526 		*gpr = *(s64 *)run->mmio.data;
527 		break;
528 	default:
529 		kvm_err("Bad MMIO length: %d, addr is 0x%lx\n",
530 				run->mmio.len, vcpu->arch.badv);
531 		er = EMULATE_FAIL;
532 		break;
533 	}
534 
535 	trace_kvm_mmio(KVM_TRACE_MMIO_READ, run->mmio.len,
536 			run->mmio.phys_addr, run->mmio.data);
537 
538 	return er;
539 }
540 
kvm_emu_mmio_write(struct kvm_vcpu * vcpu,larch_inst inst)541 int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
542 {
543 	int idx, ret;
544 	unsigned int rd, op8, opcode;
545 	unsigned long curr_pc, rd_val = 0;
546 	struct kvm_run *run = vcpu->run;
547 	void *data = run->mmio.data;
548 
549 	/*
550 	 * Update PC and hold onto current PC in case there is
551 	 * an error and we want to rollback the PC
552 	 */
553 	curr_pc = vcpu->arch.pc;
554 	update_pc(&vcpu->arch);
555 
556 	op8 = (inst.word >> 24) & 0xff;
557 	run->mmio.phys_addr = vcpu->arch.badv;
558 	ret = EMULATE_DO_MMIO;
559 	switch (op8) {
560 	case 0x24 ... 0x27:	/* stptr.w/d process */
561 		rd = inst.reg2i14_format.rd;
562 		opcode = inst.reg2i14_format.opcode;
563 
564 		switch (opcode) {
565 		case stptrw_op:
566 			run->mmio.len = 4;
567 			*(unsigned int *)data = vcpu->arch.gprs[rd];
568 			break;
569 		case stptrd_op:
570 			run->mmio.len = 8;
571 			*(unsigned long *)data = vcpu->arch.gprs[rd];
572 			break;
573 		default:
574 			ret = EMULATE_FAIL;
575 			break;
576 		}
577 		break;
578 	case 0x28 ... 0x2e:	/* st.b/h/w/d  process */
579 		rd = inst.reg2i12_format.rd;
580 		opcode = inst.reg2i12_format.opcode;
581 		rd_val = vcpu->arch.gprs[rd];
582 
583 		switch (opcode) {
584 		case stb_op:
585 			run->mmio.len = 1;
586 			*(unsigned char *)data = rd_val;
587 			break;
588 		case sth_op:
589 			run->mmio.len = 2;
590 			*(unsigned short *)data = rd_val;
591 			break;
592 		case stw_op:
593 			run->mmio.len = 4;
594 			*(unsigned int *)data = rd_val;
595 			break;
596 		case std_op:
597 			run->mmio.len = 8;
598 			*(unsigned long *)data = rd_val;
599 			break;
600 		default:
601 			ret = EMULATE_FAIL;
602 			break;
603 		}
604 		break;
605 	case 0x38:	/* stx.b/h/w/d process */
606 		rd = inst.reg3_format.rd;
607 		opcode = inst.reg3_format.opcode;
608 
609 		switch (opcode) {
610 		case stxb_op:
611 			run->mmio.len = 1;
612 			*(unsigned char *)data = vcpu->arch.gprs[rd];
613 			break;
614 		case stxh_op:
615 			run->mmio.len = 2;
616 			*(unsigned short *)data = vcpu->arch.gprs[rd];
617 			break;
618 		case stxw_op:
619 			run->mmio.len = 4;
620 			*(unsigned int *)data = vcpu->arch.gprs[rd];
621 			break;
622 		case stxd_op:
623 			run->mmio.len = 8;
624 			*(unsigned long *)data = vcpu->arch.gprs[rd];
625 			break;
626 		default:
627 			ret = EMULATE_FAIL;
628 			break;
629 		}
630 		break;
631 	default:
632 		ret = EMULATE_FAIL;
633 	}
634 
635 	if (ret == EMULATE_DO_MMIO) {
636 		trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, run->mmio.len, run->mmio.phys_addr, data);
637 
638 		/*
639 		 * If mmio device such as PCH-PIC is emulated in KVM,
640 		 * it need not return to user space to handle the mmio
641 		 * exception.
642 		 */
643 		idx = srcu_read_lock(&vcpu->kvm->srcu);
644 		ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, vcpu->arch.badv, run->mmio.len, data);
645 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
646 		if (!ret)
647 			return EMULATE_DONE;
648 
649 		run->mmio.is_write = 1;
650 		vcpu->mmio_needed = 1;
651 		vcpu->mmio_is_write = 1;
652 		return EMULATE_DO_MMIO;
653 	}
654 
655 	vcpu->arch.pc = curr_pc;
656 	kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
657 			inst.word, vcpu->arch.pc, vcpu->arch.badv);
658 	kvm_arch_vcpu_dump_regs(vcpu);
659 	/* Rollback PC if emulation was unsuccessful */
660 
661 	return ret;
662 }
663 
kvm_handle_rdwr_fault(struct kvm_vcpu * vcpu,bool write)664 static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
665 {
666 	int ret;
667 	larch_inst inst;
668 	enum emulation_result er = EMULATE_DONE;
669 	struct kvm_run *run = vcpu->run;
670 	unsigned long badv = vcpu->arch.badv;
671 
672 	/* Inject ADE exception if exceed max GPA size */
673 	if (unlikely(badv >= vcpu->kvm->arch.gpa_size)) {
674 		kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM);
675 		return RESUME_GUEST;
676 	}
677 
678 	ret = kvm_handle_mm_fault(vcpu, badv, write);
679 	if (ret) {
680 		/* Treat as MMIO */
681 		inst.word = vcpu->arch.badi;
682 		if (write) {
683 			er = kvm_emu_mmio_write(vcpu, inst);
684 		} else {
685 			/* A code fetch fault doesn't count as an MMIO */
686 			if (kvm_is_ifetch_fault(&vcpu->arch)) {
687 				kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEF);
688 				return RESUME_GUEST;
689 			}
690 
691 			er = kvm_emu_mmio_read(vcpu, inst);
692 		}
693 	}
694 
695 	if (er == EMULATE_DONE) {
696 		ret = RESUME_GUEST;
697 	} else if (er == EMULATE_DO_MMIO) {
698 		run->exit_reason = KVM_EXIT_MMIO;
699 		ret = RESUME_HOST;
700 	} else {
701 		kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM);
702 		ret = RESUME_GUEST;
703 	}
704 
705 	return ret;
706 }
707 
kvm_handle_read_fault(struct kvm_vcpu * vcpu)708 static int kvm_handle_read_fault(struct kvm_vcpu *vcpu)
709 {
710 	return kvm_handle_rdwr_fault(vcpu, false);
711 }
712 
kvm_handle_write_fault(struct kvm_vcpu * vcpu)713 static int kvm_handle_write_fault(struct kvm_vcpu *vcpu)
714 {
715 	return kvm_handle_rdwr_fault(vcpu, true);
716 }
717 
kvm_complete_user_service(struct kvm_vcpu * vcpu,struct kvm_run * run)718 int kvm_complete_user_service(struct kvm_vcpu *vcpu, struct kvm_run *run)
719 {
720 	update_pc(&vcpu->arch);
721 	kvm_write_reg(vcpu, LOONGARCH_GPR_A0, run->hypercall.ret);
722 
723 	return 0;
724 }
725 
726 /**
727  * kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host
728  * @vcpu:	Virtual CPU context.
729  *
730  * Handle when the guest attempts to use fpu which hasn't been allowed
731  * by the root context.
732  */
kvm_handle_fpu_disabled(struct kvm_vcpu * vcpu)733 static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
734 {
735 	struct kvm_run *run = vcpu->run;
736 
737 	if (!kvm_guest_has_fpu(&vcpu->arch)) {
738 		kvm_queue_exception(vcpu, EXCCODE_INE, 0);
739 		return RESUME_GUEST;
740 	}
741 
742 	/*
743 	 * If guest FPU not present, the FPU operation should have been
744 	 * treated as a reserved instruction!
745 	 * If FPU already in use, we shouldn't get this at all.
746 	 */
747 	if (WARN_ON(vcpu->arch.aux_inuse & KVM_LARCH_FPU)) {
748 		kvm_err("%s internal error\n", __func__);
749 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
750 		return RESUME_HOST;
751 	}
752 
753 	kvm_own_fpu(vcpu);
754 
755 	return RESUME_GUEST;
756 }
757 
kvm_save_notify(struct kvm_vcpu * vcpu)758 static long kvm_save_notify(struct kvm_vcpu *vcpu)
759 {
760 	unsigned long id, data;
761 
762 	id   = kvm_read_reg(vcpu, LOONGARCH_GPR_A1);
763 	data = kvm_read_reg(vcpu, LOONGARCH_GPR_A2);
764 	switch (id) {
765 	case BIT(KVM_FEATURE_STEAL_TIME):
766 		if (data & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
767 			return KVM_HCALL_INVALID_PARAMETER;
768 
769 		vcpu->arch.st.guest_addr = data;
770 		if (!(data & KVM_STEAL_PHYS_VALID))
771 			return 0;
772 
773 		vcpu->arch.st.last_steal = current->sched_info.run_delay;
774 		kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
775 		return 0;
776 	default:
777 		return KVM_HCALL_INVALID_CODE;
778 	};
779 
780 	return KVM_HCALL_INVALID_CODE;
781 };
782 
783 /*
784  * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
785  * @vcpu:      Virtual CPU context.
786  *
787  * Handle when the guest attempts to use LSX when it is disabled in the root
788  * context.
789  */
kvm_handle_lsx_disabled(struct kvm_vcpu * vcpu)790 static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
791 {
792 	if (kvm_own_lsx(vcpu))
793 		kvm_queue_exception(vcpu, EXCCODE_INE, 0);
794 
795 	return RESUME_GUEST;
796 }
797 
798 /*
799  * kvm_handle_lasx_disabled() - Guest used LASX while disabled in root.
800  * @vcpu:	Virtual CPU context.
801  *
802  * Handle when the guest attempts to use LASX when it is disabled in the root
803  * context.
804  */
kvm_handle_lasx_disabled(struct kvm_vcpu * vcpu)805 static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
806 {
807 	if (kvm_own_lasx(vcpu))
808 		kvm_queue_exception(vcpu, EXCCODE_INE, 0);
809 
810 	return RESUME_GUEST;
811 }
812 
kvm_handle_lbt_disabled(struct kvm_vcpu * vcpu)813 static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu)
814 {
815 	if (kvm_own_lbt(vcpu))
816 		kvm_queue_exception(vcpu, EXCCODE_INE, 0);
817 
818 	return RESUME_GUEST;
819 }
820 
kvm_send_pv_ipi(struct kvm_vcpu * vcpu)821 static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
822 {
823 	unsigned int min, cpu, i;
824 	unsigned long ipi_bitmap;
825 	struct kvm_vcpu *dest;
826 
827 	min = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
828 	for (i = 0; i < 2; i++, min += BITS_PER_LONG) {
829 		ipi_bitmap = kvm_read_reg(vcpu, LOONGARCH_GPR_A1 + i);
830 		if (!ipi_bitmap)
831 			continue;
832 
833 		cpu = find_first_bit((void *)&ipi_bitmap, BITS_PER_LONG);
834 		while (cpu < BITS_PER_LONG) {
835 			dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min);
836 			cpu = find_next_bit((void *)&ipi_bitmap, BITS_PER_LONG, cpu + 1);
837 			if (!dest)
838 				continue;
839 
840 			/* Send SWI0 to dest vcpu to emulate IPI interrupt */
841 			kvm_queue_irq(dest, INT_SWI0);
842 			kvm_vcpu_kick(dest);
843 		}
844 	}
845 
846 	return 0;
847 }
848 
849 /*
850  * Hypercall emulation always return to guest, Caller should check retval.
851  */
kvm_handle_service(struct kvm_vcpu * vcpu)852 static void kvm_handle_service(struct kvm_vcpu *vcpu)
853 {
854 	long ret = KVM_HCALL_INVALID_CODE;
855 	unsigned long func = kvm_read_reg(vcpu, LOONGARCH_GPR_A0);
856 
857 	switch (func) {
858 	case KVM_HCALL_FUNC_IPI:
859 		if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_IPI)) {
860 			kvm_send_pv_ipi(vcpu);
861 			ret = KVM_HCALL_SUCCESS;
862 		}
863 		break;
864 	case KVM_HCALL_FUNC_NOTIFY:
865 		if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME))
866 			ret = kvm_save_notify(vcpu);
867 		break;
868 	default:
869 		break;
870 	}
871 
872 	kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret);
873 }
874 
kvm_handle_hypercall(struct kvm_vcpu * vcpu)875 static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
876 {
877 	int ret;
878 	larch_inst inst;
879 	unsigned int code;
880 
881 	inst.word = vcpu->arch.badi;
882 	code = inst.reg0i15_format.immediate;
883 	ret = RESUME_GUEST;
884 
885 	switch (code) {
886 	case KVM_HCALL_SERVICE:
887 		vcpu->stat.hypercall_exits++;
888 		kvm_handle_service(vcpu);
889 		break;
890 	case KVM_HCALL_USER_SERVICE:
891 		if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_USER_HCALL)) {
892 			kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE);
893 			break;
894 		}
895 
896 		vcpu->stat.hypercall_exits++;
897 		vcpu->run->exit_reason = KVM_EXIT_HYPERCALL;
898 		vcpu->run->hypercall.nr = KVM_HCALL_USER_SERVICE;
899 		vcpu->run->hypercall.args[0] = kvm_read_reg(vcpu, LOONGARCH_GPR_A0);
900 		vcpu->run->hypercall.args[1] = kvm_read_reg(vcpu, LOONGARCH_GPR_A1);
901 		vcpu->run->hypercall.args[2] = kvm_read_reg(vcpu, LOONGARCH_GPR_A2);
902 		vcpu->run->hypercall.args[3] = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
903 		vcpu->run->hypercall.args[4] = kvm_read_reg(vcpu, LOONGARCH_GPR_A4);
904 		vcpu->run->hypercall.args[5] = kvm_read_reg(vcpu, LOONGARCH_GPR_A5);
905 		vcpu->run->hypercall.flags = 0;
906 		/*
907 		 * Set invalid return value by default, let user-mode VMM modify it.
908 		 */
909 		vcpu->run->hypercall.ret = KVM_HCALL_INVALID_CODE;
910 		ret = RESUME_HOST;
911 		break;
912 	case KVM_HCALL_SWDBG:
913 		/* KVM_HCALL_SWDBG only in effective when SW_BP is enabled */
914 		if (vcpu->guest_debug & KVM_GUESTDBG_SW_BP_MASK) {
915 			vcpu->run->exit_reason = KVM_EXIT_DEBUG;
916 			ret = RESUME_HOST;
917 			break;
918 		}
919 		fallthrough;
920 	default:
921 		/* Treat it as noop intruction, only set return value */
922 		kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE);
923 		break;
924 	}
925 
926 	if (ret == RESUME_GUEST)
927 		update_pc(&vcpu->arch);
928 
929 	return ret;
930 }
931 
932 /*
933  * LoongArch KVM callback handling for unimplemented guest exiting
934  */
kvm_fault_ni(struct kvm_vcpu * vcpu)935 static int kvm_fault_ni(struct kvm_vcpu *vcpu)
936 {
937 	unsigned int ecode, inst;
938 	unsigned long estat, badv;
939 
940 	/* Fetch the instruction */
941 	inst = vcpu->arch.badi;
942 	badv = vcpu->arch.badv;
943 	estat = vcpu->arch.host_estat;
944 	ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
945 	kvm_err("ECode: %d PC=%#lx Inst=0x%08x BadVaddr=%#lx ESTAT=%#lx\n",
946 			ecode, vcpu->arch.pc, inst, badv, read_gcsr_estat());
947 	kvm_arch_vcpu_dump_regs(vcpu);
948 	kvm_queue_exception(vcpu, EXCCODE_INE, 0);
949 
950 	return RESUME_GUEST;
951 }
952 
953 static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
954 	[0 ... EXCCODE_INT_START - 1]	= kvm_fault_ni,
955 	[EXCCODE_TLBI]			= kvm_handle_read_fault,
956 	[EXCCODE_TLBL]			= kvm_handle_read_fault,
957 	[EXCCODE_TLBS]			= kvm_handle_write_fault,
958 	[EXCCODE_TLBM]			= kvm_handle_write_fault,
959 	[EXCCODE_FPDIS]			= kvm_handle_fpu_disabled,
960 	[EXCCODE_LSXDIS]		= kvm_handle_lsx_disabled,
961 	[EXCCODE_LASXDIS]		= kvm_handle_lasx_disabled,
962 	[EXCCODE_BTDIS]			= kvm_handle_lbt_disabled,
963 	[EXCCODE_GSPR]			= kvm_handle_gspr,
964 	[EXCCODE_HVC]			= kvm_handle_hypercall,
965 };
966 
kvm_handle_fault(struct kvm_vcpu * vcpu,int fault)967 int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault)
968 {
969 	return kvm_fault_tables[fault](vcpu);
970 }
971