1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4 */
5
6 #include <linux/err.h>
7 #include <linux/errno.h>
8 #include <linux/kvm_host.h>
9 #include <linux/module.h>
10 #include <linux/preempt.h>
11 #include <linux/vmalloc.h>
12 #include <trace/events/kvm.h>
13 #include <asm/fpu.h>
14 #include <asm/inst.h>
15 #include <asm/loongarch.h>
16 #include <asm/mmzone.h>
17 #include <asm/numa.h>
18 #include <asm/time.h>
19 #include <asm/tlb.h>
20 #include <asm/kvm_csr.h>
21 #include <asm/kvm_vcpu.h>
22 #include "trace.h"
23
kvm_emu_cpucfg(struct kvm_vcpu * vcpu,larch_inst inst)24 static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
25 {
26 int rd, rj;
27 unsigned int index, ret;
28
29 if (inst.reg2_format.opcode != cpucfg_op)
30 return EMULATE_FAIL;
31
32 rd = inst.reg2_format.rd;
33 rj = inst.reg2_format.rj;
34 ++vcpu->stat.cpucfg_exits;
35 index = vcpu->arch.gprs[rj];
36
37 /*
38 * By LoongArch Reference Manual 2.2.10.5
39 * Return value is 0 for undefined CPUCFG index
40 *
41 * Disable preemption since hw gcsr is accessed
42 */
43 preempt_disable();
44 switch (index) {
45 case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
46 vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index];
47 break;
48 case CPUCFG_KVM_SIG:
49 /* CPUCFG emulation between 0x40000000 -- 0x400000ff */
50 vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE;
51 break;
52 case CPUCFG_KVM_FEATURE:
53 ret = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
54 vcpu->arch.gprs[rd] = ret;
55 break;
56 default:
57 vcpu->arch.gprs[rd] = 0;
58 break;
59 }
60 preempt_enable();
61
62 return EMULATE_DONE;
63 }
64
kvm_emu_read_csr(struct kvm_vcpu * vcpu,int csrid)65 static unsigned long kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid)
66 {
67 unsigned long val = 0;
68 struct loongarch_csrs *csr = vcpu->arch.csr;
69
70 /*
71 * From LoongArch Reference Manual Volume 1 Chapter 4.2.1
72 * For undefined CSR id, return value is 0
73 */
74 if (get_gcsr_flag(csrid) & SW_GCSR)
75 val = kvm_read_sw_gcsr(csr, csrid);
76 else
77 pr_warn_once("Unsupported csrrd 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
78
79 return val;
80 }
81
kvm_emu_write_csr(struct kvm_vcpu * vcpu,int csrid,unsigned long val)82 static unsigned long kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long val)
83 {
84 unsigned long old = 0;
85 struct loongarch_csrs *csr = vcpu->arch.csr;
86
87 if (get_gcsr_flag(csrid) & SW_GCSR) {
88 old = kvm_read_sw_gcsr(csr, csrid);
89 kvm_write_sw_gcsr(csr, csrid, val);
90 } else
91 pr_warn_once("Unsupported csrwr 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
92
93 return old;
94 }
95
kvm_emu_xchg_csr(struct kvm_vcpu * vcpu,int csrid,unsigned long csr_mask,unsigned long val)96 static unsigned long kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid,
97 unsigned long csr_mask, unsigned long val)
98 {
99 unsigned long old = 0;
100 struct loongarch_csrs *csr = vcpu->arch.csr;
101
102 if (get_gcsr_flag(csrid) & SW_GCSR) {
103 old = kvm_read_sw_gcsr(csr, csrid);
104 val = (old & ~csr_mask) | (val & csr_mask);
105 kvm_write_sw_gcsr(csr, csrid, val);
106 old = old & csr_mask;
107 } else
108 pr_warn_once("Unsupported csrxchg 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
109
110 return old;
111 }
112
kvm_handle_csr(struct kvm_vcpu * vcpu,larch_inst inst)113 static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
114 {
115 unsigned int rd, rj, csrid;
116 unsigned long csr_mask, val = 0;
117
118 /*
119 * CSR value mask imm
120 * rj = 0 means csrrd
121 * rj = 1 means csrwr
122 * rj != 0,1 means csrxchg
123 */
124 rd = inst.reg2csr_format.rd;
125 rj = inst.reg2csr_format.rj;
126 csrid = inst.reg2csr_format.csr;
127
128 if (csrid >= LOONGARCH_CSR_PERFCTRL0 && csrid <= vcpu->arch.max_pmu_csrid) {
129 if (kvm_guest_has_pmu(&vcpu->arch)) {
130 vcpu->arch.pc -= 4;
131 kvm_make_request(KVM_REQ_PMU, vcpu);
132 return EMULATE_DONE;
133 }
134 }
135
136 /* Process CSR ops */
137 switch (rj) {
138 case 0: /* process csrrd */
139 val = kvm_emu_read_csr(vcpu, csrid);
140 vcpu->arch.gprs[rd] = val;
141 break;
142 case 1: /* process csrwr */
143 val = vcpu->arch.gprs[rd];
144 val = kvm_emu_write_csr(vcpu, csrid, val);
145 vcpu->arch.gprs[rd] = val;
146 break;
147 default: /* process csrxchg */
148 val = vcpu->arch.gprs[rd];
149 csr_mask = vcpu->arch.gprs[rj];
150 val = kvm_emu_xchg_csr(vcpu, csrid, csr_mask, val);
151 vcpu->arch.gprs[rd] = val;
152 }
153
154 return EMULATE_DONE;
155 }
156
kvm_emu_iocsr(larch_inst inst,struct kvm_run * run,struct kvm_vcpu * vcpu)157 int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
158 {
159 int idx, ret;
160 unsigned long *val;
161 u32 addr, rd, rj, opcode;
162
163 /*
164 * Each IOCSR with different opcode
165 */
166 rd = inst.reg2_format.rd;
167 rj = inst.reg2_format.rj;
168 opcode = inst.reg2_format.opcode;
169 addr = vcpu->arch.gprs[rj];
170 run->iocsr_io.phys_addr = addr;
171 run->iocsr_io.is_write = 0;
172 val = &vcpu->arch.gprs[rd];
173
174 /* LoongArch is Little endian */
175 switch (opcode) {
176 case iocsrrdb_op:
177 run->iocsr_io.len = 1;
178 break;
179 case iocsrrdh_op:
180 run->iocsr_io.len = 2;
181 break;
182 case iocsrrdw_op:
183 run->iocsr_io.len = 4;
184 break;
185 case iocsrrdd_op:
186 run->iocsr_io.len = 8;
187 break;
188 case iocsrwrb_op:
189 run->iocsr_io.len = 1;
190 run->iocsr_io.is_write = 1;
191 break;
192 case iocsrwrh_op:
193 run->iocsr_io.len = 2;
194 run->iocsr_io.is_write = 1;
195 break;
196 case iocsrwrw_op:
197 run->iocsr_io.len = 4;
198 run->iocsr_io.is_write = 1;
199 break;
200 case iocsrwrd_op:
201 run->iocsr_io.len = 8;
202 run->iocsr_io.is_write = 1;
203 break;
204 default:
205 return EMULATE_FAIL;
206 }
207
208 if (run->iocsr_io.is_write) {
209 idx = srcu_read_lock(&vcpu->kvm->srcu);
210 ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val);
211 srcu_read_unlock(&vcpu->kvm->srcu, idx);
212 if (ret == 0)
213 ret = EMULATE_DONE;
214 else {
215 ret = EMULATE_DO_IOCSR;
216 /* Save data and let user space to write it */
217 memcpy(run->iocsr_io.data, val, run->iocsr_io.len);
218 }
219 trace_kvm_iocsr(KVM_TRACE_IOCSR_WRITE, run->iocsr_io.len, addr, val);
220 } else {
221 vcpu->arch.io_gpr = rd; /* Set register id for iocsr read completion */
222 idx = srcu_read_lock(&vcpu->kvm->srcu);
223 ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr,
224 run->iocsr_io.len, run->iocsr_io.data);
225 srcu_read_unlock(&vcpu->kvm->srcu, idx);
226 if (ret == 0) {
227 kvm_complete_iocsr_read(vcpu, run);
228 ret = EMULATE_DONE;
229 } else
230 ret = EMULATE_DO_IOCSR;
231 trace_kvm_iocsr(KVM_TRACE_IOCSR_READ, run->iocsr_io.len, addr, NULL);
232 }
233
234 return ret;
235 }
236
kvm_complete_iocsr_read(struct kvm_vcpu * vcpu,struct kvm_run * run)237 int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
238 {
239 enum emulation_result er = EMULATE_DONE;
240 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
241
242 switch (run->iocsr_io.len) {
243 case 1:
244 *gpr = *(s8 *)run->iocsr_io.data;
245 break;
246 case 2:
247 *gpr = *(s16 *)run->iocsr_io.data;
248 break;
249 case 4:
250 *gpr = *(s32 *)run->iocsr_io.data;
251 break;
252 case 8:
253 *gpr = *(s64 *)run->iocsr_io.data;
254 break;
255 default:
256 kvm_err("Bad IOCSR length: %d, addr is 0x%lx\n",
257 run->iocsr_io.len, vcpu->arch.badv);
258 er = EMULATE_FAIL;
259 break;
260 }
261
262 return er;
263 }
264
kvm_emu_idle(struct kvm_vcpu * vcpu)265 int kvm_emu_idle(struct kvm_vcpu *vcpu)
266 {
267 ++vcpu->stat.idle_exits;
268 trace_kvm_exit_idle(vcpu, KVM_TRACE_EXIT_IDLE);
269
270 if (!kvm_arch_vcpu_runnable(vcpu))
271 kvm_vcpu_halt(vcpu);
272
273 return EMULATE_DONE;
274 }
275
kvm_trap_handle_gspr(struct kvm_vcpu * vcpu)276 static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
277 {
278 unsigned long curr_pc;
279 larch_inst inst;
280 enum emulation_result er = EMULATE_DONE;
281 struct kvm_run *run = vcpu->run;
282
283 /* Fetch the instruction */
284 inst.word = vcpu->arch.badi;
285 curr_pc = vcpu->arch.pc;
286 update_pc(&vcpu->arch);
287
288 trace_kvm_exit_gspr(vcpu, inst.word);
289 er = EMULATE_FAIL;
290 switch (((inst.word >> 24) & 0xff)) {
291 case 0x0: /* CPUCFG GSPR */
292 trace_kvm_exit_cpucfg(vcpu, KVM_TRACE_EXIT_CPUCFG);
293 er = kvm_emu_cpucfg(vcpu, inst);
294 break;
295 case 0x4: /* CSR{RD,WR,XCHG} GSPR */
296 trace_kvm_exit_csr(vcpu, KVM_TRACE_EXIT_CSR);
297 er = kvm_handle_csr(vcpu, inst);
298 break;
299 case 0x6: /* Cache, Idle and IOCSR GSPR */
300 switch (((inst.word >> 22) & 0x3ff)) {
301 case 0x18: /* Cache GSPR */
302 er = EMULATE_DONE;
303 trace_kvm_exit_cache(vcpu, KVM_TRACE_EXIT_CACHE);
304 break;
305 case 0x19: /* Idle/IOCSR GSPR */
306 switch (((inst.word >> 15) & 0x1ffff)) {
307 case 0xc90: /* IOCSR GSPR */
308 er = kvm_emu_iocsr(inst, run, vcpu);
309 break;
310 case 0xc91: /* Idle GSPR */
311 er = kvm_emu_idle(vcpu);
312 break;
313 default:
314 er = EMULATE_FAIL;
315 break;
316 }
317 break;
318 default:
319 er = EMULATE_FAIL;
320 break;
321 }
322 break;
323 default:
324 er = EMULATE_FAIL;
325 break;
326 }
327
328 /* Rollback PC only if emulation was unsuccessful */
329 if (er == EMULATE_FAIL) {
330 kvm_err("[%#lx]%s: unsupported gspr instruction 0x%08x\n",
331 curr_pc, __func__, inst.word);
332
333 kvm_arch_vcpu_dump_regs(vcpu);
334 vcpu->arch.pc = curr_pc;
335 }
336
337 return er;
338 }
339
340 /*
341 * Trigger GSPR:
342 * 1) Execute CPUCFG instruction;
343 * 2) Execute CACOP/IDLE instructions;
344 * 3) Access to unimplemented CSRs/IOCSRs.
345 */
kvm_handle_gspr(struct kvm_vcpu * vcpu,int ecode)346 static int kvm_handle_gspr(struct kvm_vcpu *vcpu, int ecode)
347 {
348 int ret = RESUME_GUEST;
349 enum emulation_result er = EMULATE_DONE;
350
351 er = kvm_trap_handle_gspr(vcpu);
352
353 if (er == EMULATE_DONE) {
354 ret = RESUME_GUEST;
355 } else if (er == EMULATE_DO_MMIO) {
356 vcpu->run->exit_reason = KVM_EXIT_MMIO;
357 ret = RESUME_HOST;
358 } else if (er == EMULATE_DO_IOCSR) {
359 vcpu->run->exit_reason = KVM_EXIT_LOONGARCH_IOCSR;
360 ret = RESUME_HOST;
361 } else {
362 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
363 ret = RESUME_GUEST;
364 }
365
366 return ret;
367 }
368
kvm_emu_mmio_read(struct kvm_vcpu * vcpu,larch_inst inst)369 int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
370 {
371 int idx, ret;
372 unsigned int op8, opcode, rd;
373 struct kvm_run *run = vcpu->run;
374
375 run->mmio.phys_addr = vcpu->arch.badv;
376 vcpu->mmio_needed = 2; /* signed */
377 op8 = (inst.word >> 24) & 0xff;
378 ret = EMULATE_DO_MMIO;
379
380 switch (op8) {
381 case 0x24 ... 0x27: /* ldptr.w/d process */
382 rd = inst.reg2i14_format.rd;
383 opcode = inst.reg2i14_format.opcode;
384
385 switch (opcode) {
386 case ldptrw_op:
387 run->mmio.len = 4;
388 break;
389 case ldptrd_op:
390 run->mmio.len = 8;
391 break;
392 default:
393 ret = EMULATE_FAIL;
394 break;
395 }
396 break;
397 case 0x28 ... 0x2e: /* ld.b/h/w/d, ld.bu/hu/wu process */
398 rd = inst.reg2i12_format.rd;
399 opcode = inst.reg2i12_format.opcode;
400
401 switch (opcode) {
402 case ldb_op:
403 run->mmio.len = 1;
404 break;
405 case ldbu_op:
406 vcpu->mmio_needed = 1; /* unsigned */
407 run->mmio.len = 1;
408 break;
409 case ldh_op:
410 run->mmio.len = 2;
411 break;
412 case ldhu_op:
413 vcpu->mmio_needed = 1; /* unsigned */
414 run->mmio.len = 2;
415 break;
416 case ldw_op:
417 run->mmio.len = 4;
418 break;
419 case ldwu_op:
420 vcpu->mmio_needed = 1; /* unsigned */
421 run->mmio.len = 4;
422 break;
423 case ldd_op:
424 run->mmio.len = 8;
425 break;
426 default:
427 ret = EMULATE_FAIL;
428 break;
429 }
430 break;
431 case 0x38: /* ldx.b/h/w/d, ldx.bu/hu/wu process */
432 rd = inst.reg3_format.rd;
433 opcode = inst.reg3_format.opcode;
434
435 switch (opcode) {
436 case ldxb_op:
437 run->mmio.len = 1;
438 break;
439 case ldxbu_op:
440 run->mmio.len = 1;
441 vcpu->mmio_needed = 1; /* unsigned */
442 break;
443 case ldxh_op:
444 run->mmio.len = 2;
445 break;
446 case ldxhu_op:
447 run->mmio.len = 2;
448 vcpu->mmio_needed = 1; /* unsigned */
449 break;
450 case ldxw_op:
451 run->mmio.len = 4;
452 break;
453 case ldxwu_op:
454 run->mmio.len = 4;
455 vcpu->mmio_needed = 1; /* unsigned */
456 break;
457 case ldxd_op:
458 run->mmio.len = 8;
459 break;
460 default:
461 ret = EMULATE_FAIL;
462 break;
463 }
464 break;
465 default:
466 ret = EMULATE_FAIL;
467 }
468
469 if (ret == EMULATE_DO_MMIO) {
470 trace_kvm_mmio(KVM_TRACE_MMIO_READ, run->mmio.len, run->mmio.phys_addr, NULL);
471
472 vcpu->arch.io_gpr = rd; /* Set for kvm_complete_mmio_read() use */
473
474 /*
475 * If mmio device such as PCH-PIC is emulated in KVM,
476 * it need not return to user space to handle the mmio
477 * exception.
478 */
479 idx = srcu_read_lock(&vcpu->kvm->srcu);
480 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, vcpu->arch.badv,
481 run->mmio.len, run->mmio.data);
482 srcu_read_unlock(&vcpu->kvm->srcu, idx);
483 if (!ret) {
484 kvm_complete_mmio_read(vcpu, run);
485 update_pc(&vcpu->arch);
486 vcpu->mmio_needed = 0;
487 return EMULATE_DONE;
488 }
489
490 run->mmio.is_write = 0;
491 vcpu->mmio_is_write = 0;
492 return EMULATE_DO_MMIO;
493 }
494
495 kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
496 inst.word, vcpu->arch.pc, vcpu->arch.badv);
497 kvm_arch_vcpu_dump_regs(vcpu);
498 vcpu->mmio_needed = 0;
499
500 return ret;
501 }
502
kvm_complete_mmio_read(struct kvm_vcpu * vcpu,struct kvm_run * run)503 int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
504 {
505 enum emulation_result er = EMULATE_DONE;
506 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
507
508 /* Update with new PC */
509 update_pc(&vcpu->arch);
510 switch (run->mmio.len) {
511 case 1:
512 if (vcpu->mmio_needed == 2)
513 *gpr = *(s8 *)run->mmio.data;
514 else
515 *gpr = *(u8 *)run->mmio.data;
516 break;
517 case 2:
518 if (vcpu->mmio_needed == 2)
519 *gpr = *(s16 *)run->mmio.data;
520 else
521 *gpr = *(u16 *)run->mmio.data;
522 break;
523 case 4:
524 if (vcpu->mmio_needed == 2)
525 *gpr = *(s32 *)run->mmio.data;
526 else
527 *gpr = *(u32 *)run->mmio.data;
528 break;
529 case 8:
530 *gpr = *(s64 *)run->mmio.data;
531 break;
532 default:
533 kvm_err("Bad MMIO length: %d, addr is 0x%lx\n",
534 run->mmio.len, vcpu->arch.badv);
535 er = EMULATE_FAIL;
536 break;
537 }
538
539 trace_kvm_mmio(KVM_TRACE_MMIO_READ, run->mmio.len,
540 run->mmio.phys_addr, run->mmio.data);
541
542 return er;
543 }
544
kvm_emu_mmio_write(struct kvm_vcpu * vcpu,larch_inst inst)545 int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
546 {
547 int idx, ret;
548 unsigned int rd, op8, opcode;
549 unsigned long curr_pc, rd_val = 0;
550 struct kvm_run *run = vcpu->run;
551 void *data = run->mmio.data;
552
553 /*
554 * Update PC and hold onto current PC in case there is
555 * an error and we want to rollback the PC
556 */
557 curr_pc = vcpu->arch.pc;
558 update_pc(&vcpu->arch);
559
560 op8 = (inst.word >> 24) & 0xff;
561 run->mmio.phys_addr = vcpu->arch.badv;
562 ret = EMULATE_DO_MMIO;
563 switch (op8) {
564 case 0x24 ... 0x27: /* stptr.w/d process */
565 rd = inst.reg2i14_format.rd;
566 opcode = inst.reg2i14_format.opcode;
567
568 switch (opcode) {
569 case stptrw_op:
570 run->mmio.len = 4;
571 *(unsigned int *)data = vcpu->arch.gprs[rd];
572 break;
573 case stptrd_op:
574 run->mmio.len = 8;
575 *(unsigned long *)data = vcpu->arch.gprs[rd];
576 break;
577 default:
578 ret = EMULATE_FAIL;
579 break;
580 }
581 break;
582 case 0x28 ... 0x2e: /* st.b/h/w/d process */
583 rd = inst.reg2i12_format.rd;
584 opcode = inst.reg2i12_format.opcode;
585 rd_val = vcpu->arch.gprs[rd];
586
587 switch (opcode) {
588 case stb_op:
589 run->mmio.len = 1;
590 *(unsigned char *)data = rd_val;
591 break;
592 case sth_op:
593 run->mmio.len = 2;
594 *(unsigned short *)data = rd_val;
595 break;
596 case stw_op:
597 run->mmio.len = 4;
598 *(unsigned int *)data = rd_val;
599 break;
600 case std_op:
601 run->mmio.len = 8;
602 *(unsigned long *)data = rd_val;
603 break;
604 default:
605 ret = EMULATE_FAIL;
606 break;
607 }
608 break;
609 case 0x38: /* stx.b/h/w/d process */
610 rd = inst.reg3_format.rd;
611 opcode = inst.reg3_format.opcode;
612
613 switch (opcode) {
614 case stxb_op:
615 run->mmio.len = 1;
616 *(unsigned char *)data = vcpu->arch.gprs[rd];
617 break;
618 case stxh_op:
619 run->mmio.len = 2;
620 *(unsigned short *)data = vcpu->arch.gprs[rd];
621 break;
622 case stxw_op:
623 run->mmio.len = 4;
624 *(unsigned int *)data = vcpu->arch.gprs[rd];
625 break;
626 case stxd_op:
627 run->mmio.len = 8;
628 *(unsigned long *)data = vcpu->arch.gprs[rd];
629 break;
630 default:
631 ret = EMULATE_FAIL;
632 break;
633 }
634 break;
635 default:
636 ret = EMULATE_FAIL;
637 }
638
639 if (ret == EMULATE_DO_MMIO) {
640 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, run->mmio.len, run->mmio.phys_addr, data);
641
642 /*
643 * If mmio device such as PCH-PIC is emulated in KVM,
644 * it need not return to user space to handle the mmio
645 * exception.
646 */
647 idx = srcu_read_lock(&vcpu->kvm->srcu);
648 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, vcpu->arch.badv, run->mmio.len, data);
649 srcu_read_unlock(&vcpu->kvm->srcu, idx);
650 if (!ret)
651 return EMULATE_DONE;
652
653 run->mmio.is_write = 1;
654 vcpu->mmio_needed = 1;
655 vcpu->mmio_is_write = 1;
656 return EMULATE_DO_MMIO;
657 }
658
659 vcpu->arch.pc = curr_pc;
660 kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
661 inst.word, vcpu->arch.pc, vcpu->arch.badv);
662 kvm_arch_vcpu_dump_regs(vcpu);
663 /* Rollback PC if emulation was unsuccessful */
664
665 return ret;
666 }
667
kvm_handle_rdwr_fault(struct kvm_vcpu * vcpu,bool write,int ecode)668 static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write, int ecode)
669 {
670 int ret;
671 larch_inst inst;
672 enum emulation_result er = EMULATE_DONE;
673 struct kvm_run *run = vcpu->run;
674 unsigned long badv = vcpu->arch.badv;
675
676 /* Inject ADE exception if exceed max GPA size */
677 if (unlikely(badv >= vcpu->kvm->arch.gpa_size)) {
678 kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM);
679 return RESUME_GUEST;
680 }
681
682 ret = kvm_handle_mm_fault(vcpu, badv, write, ecode);
683 if (ret) {
684 /* Treat as MMIO */
685 inst.word = vcpu->arch.badi;
686 if (write) {
687 er = kvm_emu_mmio_write(vcpu, inst);
688 } else {
689 /* A code fetch fault doesn't count as an MMIO */
690 if (kvm_is_ifetch_fault(&vcpu->arch)) {
691 kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEF);
692 return RESUME_GUEST;
693 }
694
695 er = kvm_emu_mmio_read(vcpu, inst);
696 }
697 }
698
699 if (er == EMULATE_DONE) {
700 ret = RESUME_GUEST;
701 } else if (er == EMULATE_DO_MMIO) {
702 run->exit_reason = KVM_EXIT_MMIO;
703 ret = RESUME_HOST;
704 } else {
705 kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM);
706 ret = RESUME_GUEST;
707 }
708
709 return ret;
710 }
711
kvm_handle_read_fault(struct kvm_vcpu * vcpu,int ecode)712 static int kvm_handle_read_fault(struct kvm_vcpu *vcpu, int ecode)
713 {
714 return kvm_handle_rdwr_fault(vcpu, false, ecode);
715 }
716
kvm_handle_write_fault(struct kvm_vcpu * vcpu,int ecode)717 static int kvm_handle_write_fault(struct kvm_vcpu *vcpu, int ecode)
718 {
719 return kvm_handle_rdwr_fault(vcpu, true, ecode);
720 }
721
kvm_complete_user_service(struct kvm_vcpu * vcpu,struct kvm_run * run)722 int kvm_complete_user_service(struct kvm_vcpu *vcpu, struct kvm_run *run)
723 {
724 update_pc(&vcpu->arch);
725 kvm_write_reg(vcpu, LOONGARCH_GPR_A0, run->hypercall.ret);
726
727 return 0;
728 }
729
730 /**
731 * kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host
732 * @vcpu: Virtual CPU context.
733 * @ecode: Exception code.
734 *
735 * Handle when the guest attempts to use fpu which hasn't been allowed
736 * by the root context.
737 */
kvm_handle_fpu_disabled(struct kvm_vcpu * vcpu,int ecode)738 static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu, int ecode)
739 {
740 struct kvm_run *run = vcpu->run;
741
742 if (!kvm_guest_has_fpu(&vcpu->arch)) {
743 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
744 return RESUME_GUEST;
745 }
746
747 /*
748 * If guest FPU not present, the FPU operation should have been
749 * treated as a reserved instruction!
750 * If FPU already in use, we shouldn't get this at all.
751 */
752 if (WARN_ON(vcpu->arch.aux_inuse & KVM_LARCH_FPU)) {
753 kvm_err("%s internal error\n", __func__);
754 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
755 return RESUME_HOST;
756 }
757
758 vcpu->arch.aux_ldtype = KVM_LARCH_FPU;
759 kvm_make_request(KVM_REQ_AUX_LOAD, vcpu);
760
761 return RESUME_GUEST;
762 }
763
kvm_save_notify(struct kvm_vcpu * vcpu)764 static long kvm_save_notify(struct kvm_vcpu *vcpu)
765 {
766 unsigned long id, data;
767
768 id = kvm_read_reg(vcpu, LOONGARCH_GPR_A1);
769 data = kvm_read_reg(vcpu, LOONGARCH_GPR_A2);
770 switch (id) {
771 case BIT(KVM_FEATURE_STEAL_TIME):
772 if (data & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
773 return KVM_HCALL_INVALID_PARAMETER;
774
775 vcpu->arch.st.guest_addr = data;
776 if (!(data & KVM_STEAL_PHYS_VALID))
777 return 0;
778
779 vcpu->arch.st.last_steal = current->sched_info.run_delay;
780 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
781 return 0;
782 default:
783 return KVM_HCALL_INVALID_CODE;
784 }
785 }
786
787 /*
788 * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
789 * @vcpu: Virtual CPU context.
790 * @ecode: Exception code.
791 *
792 * Handle when the guest attempts to use LSX when it is disabled in the root
793 * context.
794 */
kvm_handle_lsx_disabled(struct kvm_vcpu * vcpu,int ecode)795 static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu, int ecode)
796 {
797 if (!kvm_guest_has_lsx(&vcpu->arch))
798 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
799 else {
800 vcpu->arch.aux_ldtype = KVM_LARCH_LSX;
801 kvm_make_request(KVM_REQ_AUX_LOAD, vcpu);
802 }
803
804 return RESUME_GUEST;
805 }
806
807 /*
808 * kvm_handle_lasx_disabled() - Guest used LASX while disabled in root.
809 * @vcpu: Virtual CPU context.
810 * @ecode: Exception code.
811 *
812 * Handle when the guest attempts to use LASX when it is disabled in the root
813 * context.
814 */
kvm_handle_lasx_disabled(struct kvm_vcpu * vcpu,int ecode)815 static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu, int ecode)
816 {
817 if (!kvm_guest_has_lasx(&vcpu->arch))
818 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
819 else {
820 vcpu->arch.aux_ldtype = KVM_LARCH_LASX;
821 kvm_make_request(KVM_REQ_AUX_LOAD, vcpu);
822 }
823
824 return RESUME_GUEST;
825 }
826
kvm_handle_lbt_disabled(struct kvm_vcpu * vcpu,int ecode)827 static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu, int ecode)
828 {
829 if (!kvm_guest_has_lbt(&vcpu->arch))
830 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
831 else {
832 vcpu->arch.aux_ldtype = KVM_LARCH_LBT;
833 kvm_make_request(KVM_REQ_AUX_LOAD, vcpu);
834 }
835
836 return RESUME_GUEST;
837 }
838
kvm_send_pv_ipi(struct kvm_vcpu * vcpu)839 static void kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
840 {
841 unsigned int min, cpu;
842 struct kvm_vcpu *dest;
843 DECLARE_BITMAP(ipi_bitmap, BITS_PER_LONG * 2) = {
844 kvm_read_reg(vcpu, LOONGARCH_GPR_A1),
845 kvm_read_reg(vcpu, LOONGARCH_GPR_A2)
846 };
847
848 min = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
849 for_each_set_bit(cpu, ipi_bitmap, BITS_PER_LONG * 2) {
850 dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min);
851 if (!dest)
852 continue;
853
854 /* Send SWI0 to dest vcpu to emulate IPI interrupt */
855 kvm_queue_irq(dest, INT_SWI0);
856 kvm_vcpu_kick(dest);
857 }
858 }
859
860 /*
861 * Hypercall emulation always return to guest, Caller should check retval.
862 */
kvm_handle_service(struct kvm_vcpu * vcpu)863 static void kvm_handle_service(struct kvm_vcpu *vcpu)
864 {
865 long ret = KVM_HCALL_INVALID_CODE;
866 unsigned long func = kvm_read_reg(vcpu, LOONGARCH_GPR_A0);
867
868 switch (func) {
869 case KVM_HCALL_FUNC_IPI:
870 if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_IPI)) {
871 kvm_send_pv_ipi(vcpu);
872 ret = KVM_HCALL_SUCCESS;
873 }
874 break;
875 case KVM_HCALL_FUNC_NOTIFY:
876 if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME))
877 ret = kvm_save_notify(vcpu);
878 break;
879 default:
880 break;
881 }
882
883 kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret);
884 }
885
kvm_handle_hypercall(struct kvm_vcpu * vcpu,int ecode)886 static int kvm_handle_hypercall(struct kvm_vcpu *vcpu, int ecode)
887 {
888 int ret;
889 larch_inst inst;
890 unsigned int code;
891
892 inst.word = vcpu->arch.badi;
893 code = inst.reg0i15_format.immediate;
894 ret = RESUME_GUEST;
895
896 switch (code) {
897 case KVM_HCALL_SERVICE:
898 vcpu->stat.hypercall_exits++;
899 kvm_handle_service(vcpu);
900 break;
901 case KVM_HCALL_USER_SERVICE:
902 if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_USER_HCALL)) {
903 kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE);
904 break;
905 }
906
907 vcpu->stat.hypercall_exits++;
908 vcpu->run->exit_reason = KVM_EXIT_HYPERCALL;
909 vcpu->run->hypercall.nr = KVM_HCALL_USER_SERVICE;
910 vcpu->run->hypercall.args[0] = kvm_read_reg(vcpu, LOONGARCH_GPR_A0);
911 vcpu->run->hypercall.args[1] = kvm_read_reg(vcpu, LOONGARCH_GPR_A1);
912 vcpu->run->hypercall.args[2] = kvm_read_reg(vcpu, LOONGARCH_GPR_A2);
913 vcpu->run->hypercall.args[3] = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
914 vcpu->run->hypercall.args[4] = kvm_read_reg(vcpu, LOONGARCH_GPR_A4);
915 vcpu->run->hypercall.args[5] = kvm_read_reg(vcpu, LOONGARCH_GPR_A5);
916 vcpu->run->hypercall.flags = 0;
917 /*
918 * Set invalid return value by default, let user-mode VMM modify it.
919 */
920 vcpu->run->hypercall.ret = KVM_HCALL_INVALID_CODE;
921 ret = RESUME_HOST;
922 break;
923 case KVM_HCALL_SWDBG:
924 /* KVM_HCALL_SWDBG only in effective when SW_BP is enabled */
925 if (vcpu->guest_debug & KVM_GUESTDBG_SW_BP_MASK) {
926 vcpu->run->exit_reason = KVM_EXIT_DEBUG;
927 ret = RESUME_HOST;
928 break;
929 }
930 fallthrough;
931 default:
932 /* Treat it as noop intruction, only set return value */
933 kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE);
934 break;
935 }
936
937 if (ret == RESUME_GUEST)
938 update_pc(&vcpu->arch);
939
940 return ret;
941 }
942
943 /*
944 * LoongArch KVM callback handling for unimplemented guest exiting
945 */
kvm_fault_ni(struct kvm_vcpu * vcpu,int ecode)946 static int kvm_fault_ni(struct kvm_vcpu *vcpu, int ecode)
947 {
948 unsigned int inst;
949 unsigned long badv;
950
951 /* Fetch the instruction */
952 inst = vcpu->arch.badi;
953 badv = vcpu->arch.badv;
954 kvm_err("ECode: %d PC=%#lx Inst=0x%08x BadVaddr=%#lx ESTAT=%#lx\n",
955 ecode, vcpu->arch.pc, inst, badv, read_gcsr_estat());
956 kvm_arch_vcpu_dump_regs(vcpu);
957 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
958
959 return RESUME_GUEST;
960 }
961
962 static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
963 [0 ... EXCCODE_INT_START - 1] = kvm_fault_ni,
964 [EXCCODE_TLBI] = kvm_handle_read_fault,
965 [EXCCODE_TLBL] = kvm_handle_read_fault,
966 [EXCCODE_TLBS] = kvm_handle_write_fault,
967 [EXCCODE_TLBM] = kvm_handle_write_fault,
968 [EXCCODE_FPDIS] = kvm_handle_fpu_disabled,
969 [EXCCODE_LSXDIS] = kvm_handle_lsx_disabled,
970 [EXCCODE_LASXDIS] = kvm_handle_lasx_disabled,
971 [EXCCODE_BTDIS] = kvm_handle_lbt_disabled,
972 [EXCCODE_GSPR] = kvm_handle_gspr,
973 [EXCCODE_HVC] = kvm_handle_hypercall,
974 };
975
kvm_handle_fault(struct kvm_vcpu * vcpu,int fault)976 int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault)
977 {
978 return kvm_fault_tables[fault](vcpu, fault);
979 }
980