1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4 */
5
6 #include <linux/err.h>
7 #include <linux/errno.h>
8 #include <linux/kvm_host.h>
9 #include <linux/module.h>
10 #include <linux/preempt.h>
11 #include <linux/vmalloc.h>
12 #include <trace/events/kvm.h>
13 #include <asm/fpu.h>
14 #include <asm/inst.h>
15 #include <asm/loongarch.h>
16 #include <asm/mmzone.h>
17 #include <asm/numa.h>
18 #include <asm/time.h>
19 #include <asm/tlb.h>
20 #include <asm/kvm_csr.h>
21 #include <asm/kvm_vcpu.h>
22 #include "trace.h"
23
kvm_emu_cpucfg(struct kvm_vcpu * vcpu,larch_inst inst)24 static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
25 {
26 int rd, rj;
27 unsigned int index, ret;
28
29 if (inst.reg2_format.opcode != cpucfg_op)
30 return EMULATE_FAIL;
31
32 rd = inst.reg2_format.rd;
33 rj = inst.reg2_format.rj;
34 ++vcpu->stat.cpucfg_exits;
35 index = vcpu->arch.gprs[rj];
36
37 /*
38 * By LoongArch Reference Manual 2.2.10.5
39 * Return value is 0 for undefined CPUCFG index
40 *
41 * Disable preemption since hw gcsr is accessed
42 */
43 preempt_disable();
44 switch (index) {
45 case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
46 vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index];
47 break;
48 case CPUCFG_KVM_SIG:
49 /* CPUCFG emulation between 0x40000000 -- 0x400000ff */
50 vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE;
51 break;
52 case CPUCFG_KVM_FEATURE:
53 ret = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
54 vcpu->arch.gprs[rd] = ret;
55 break;
56 default:
57 vcpu->arch.gprs[rd] = 0;
58 break;
59 }
60 preempt_enable();
61
62 return EMULATE_DONE;
63 }
64
kvm_emu_read_csr(struct kvm_vcpu * vcpu,int csrid)65 static unsigned long kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid)
66 {
67 unsigned long val = 0;
68 struct loongarch_csrs *csr = vcpu->arch.csr;
69
70 /*
71 * From LoongArch Reference Manual Volume 1 Chapter 4.2.1
72 * For undefined CSR id, return value is 0
73 */
74 if (get_gcsr_flag(csrid) & SW_GCSR)
75 val = kvm_read_sw_gcsr(csr, csrid);
76 else
77 pr_warn_once("Unsupported csrrd 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
78
79 return val;
80 }
81
kvm_emu_write_csr(struct kvm_vcpu * vcpu,int csrid,unsigned long val)82 static unsigned long kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long val)
83 {
84 unsigned long old = 0;
85 struct loongarch_csrs *csr = vcpu->arch.csr;
86
87 if (get_gcsr_flag(csrid) & SW_GCSR) {
88 old = kvm_read_sw_gcsr(csr, csrid);
89 kvm_write_sw_gcsr(csr, csrid, val);
90 } else
91 pr_warn_once("Unsupported csrwr 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
92
93 return old;
94 }
95
kvm_emu_xchg_csr(struct kvm_vcpu * vcpu,int csrid,unsigned long csr_mask,unsigned long val)96 static unsigned long kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid,
97 unsigned long csr_mask, unsigned long val)
98 {
99 unsigned long old = 0;
100 struct loongarch_csrs *csr = vcpu->arch.csr;
101
102 if (get_gcsr_flag(csrid) & SW_GCSR) {
103 old = kvm_read_sw_gcsr(csr, csrid);
104 val = (old & ~csr_mask) | (val & csr_mask);
105 kvm_write_sw_gcsr(csr, csrid, val);
106 old = old & csr_mask;
107 } else
108 pr_warn_once("Unsupported csrxchg 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
109
110 return old;
111 }
112
kvm_handle_csr(struct kvm_vcpu * vcpu,larch_inst inst)113 static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
114 {
115 unsigned int rd, rj, csrid;
116 unsigned long csr_mask, val = 0;
117
118 /*
119 * CSR value mask imm
120 * rj = 0 means csrrd
121 * rj = 1 means csrwr
122 * rj != 0,1 means csrxchg
123 */
124 rd = inst.reg2csr_format.rd;
125 rj = inst.reg2csr_format.rj;
126 csrid = inst.reg2csr_format.csr;
127
128 if (csrid >= LOONGARCH_CSR_PERFCTRL0 && csrid <= vcpu->arch.max_pmu_csrid) {
129 if (kvm_guest_has_pmu(&vcpu->arch)) {
130 vcpu->arch.pc -= 4;
131 kvm_make_request(KVM_REQ_PMU, vcpu);
132 return EMULATE_DONE;
133 }
134 }
135
136 /* Process CSR ops */
137 switch (rj) {
138 case 0: /* process csrrd */
139 val = kvm_emu_read_csr(vcpu, csrid);
140 vcpu->arch.gprs[rd] = val;
141 break;
142 case 1: /* process csrwr */
143 val = vcpu->arch.gprs[rd];
144 val = kvm_emu_write_csr(vcpu, csrid, val);
145 vcpu->arch.gprs[rd] = val;
146 break;
147 default: /* process csrxchg */
148 val = vcpu->arch.gprs[rd];
149 csr_mask = vcpu->arch.gprs[rj];
150 val = kvm_emu_xchg_csr(vcpu, csrid, csr_mask, val);
151 vcpu->arch.gprs[rd] = val;
152 }
153
154 return EMULATE_DONE;
155 }
156
kvm_emu_iocsr(larch_inst inst,struct kvm_run * run,struct kvm_vcpu * vcpu)157 int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
158 {
159 int idx, ret;
160 unsigned long *val;
161 u32 addr, rd, rj, opcode;
162
163 /*
164 * Each IOCSR with different opcode
165 */
166 rd = inst.reg2_format.rd;
167 rj = inst.reg2_format.rj;
168 opcode = inst.reg2_format.opcode;
169 addr = vcpu->arch.gprs[rj];
170 run->iocsr_io.phys_addr = addr;
171 run->iocsr_io.is_write = 0;
172 val = &vcpu->arch.gprs[rd];
173
174 /* LoongArch is Little endian */
175 switch (opcode) {
176 case iocsrrdb_op:
177 run->iocsr_io.len = 1;
178 break;
179 case iocsrrdh_op:
180 run->iocsr_io.len = 2;
181 break;
182 case iocsrrdw_op:
183 run->iocsr_io.len = 4;
184 break;
185 case iocsrrdd_op:
186 run->iocsr_io.len = 8;
187 break;
188 case iocsrwrb_op:
189 run->iocsr_io.len = 1;
190 run->iocsr_io.is_write = 1;
191 break;
192 case iocsrwrh_op:
193 run->iocsr_io.len = 2;
194 run->iocsr_io.is_write = 1;
195 break;
196 case iocsrwrw_op:
197 run->iocsr_io.len = 4;
198 run->iocsr_io.is_write = 1;
199 break;
200 case iocsrwrd_op:
201 run->iocsr_io.len = 8;
202 run->iocsr_io.is_write = 1;
203 break;
204 default:
205 return EMULATE_FAIL;
206 }
207
208 if (run->iocsr_io.is_write) {
209 idx = srcu_read_lock(&vcpu->kvm->srcu);
210 ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val);
211 srcu_read_unlock(&vcpu->kvm->srcu, idx);
212 if (ret == 0)
213 ret = EMULATE_DONE;
214 else {
215 ret = EMULATE_DO_IOCSR;
216 /* Save data and let user space to write it */
217 memcpy(run->iocsr_io.data, val, run->iocsr_io.len);
218 }
219 trace_kvm_iocsr(KVM_TRACE_IOCSR_WRITE, run->iocsr_io.len, addr, val);
220 } else {
221 idx = srcu_read_lock(&vcpu->kvm->srcu);
222 ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val);
223 srcu_read_unlock(&vcpu->kvm->srcu, idx);
224 if (ret == 0)
225 ret = EMULATE_DONE;
226 else {
227 ret = EMULATE_DO_IOCSR;
228 /* Save register id for iocsr read completion */
229 vcpu->arch.io_gpr = rd;
230 }
231 trace_kvm_iocsr(KVM_TRACE_IOCSR_READ, run->iocsr_io.len, addr, NULL);
232 }
233
234 return ret;
235 }
236
kvm_complete_iocsr_read(struct kvm_vcpu * vcpu,struct kvm_run * run)237 int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
238 {
239 enum emulation_result er = EMULATE_DONE;
240 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
241
242 switch (run->iocsr_io.len) {
243 case 1:
244 *gpr = *(s8 *)run->iocsr_io.data;
245 break;
246 case 2:
247 *gpr = *(s16 *)run->iocsr_io.data;
248 break;
249 case 4:
250 *gpr = *(s32 *)run->iocsr_io.data;
251 break;
252 case 8:
253 *gpr = *(s64 *)run->iocsr_io.data;
254 break;
255 default:
256 kvm_err("Bad IOCSR length: %d, addr is 0x%lx\n",
257 run->iocsr_io.len, vcpu->arch.badv);
258 er = EMULATE_FAIL;
259 break;
260 }
261
262 return er;
263 }
264
kvm_emu_idle(struct kvm_vcpu * vcpu)265 int kvm_emu_idle(struct kvm_vcpu *vcpu)
266 {
267 ++vcpu->stat.idle_exits;
268 trace_kvm_exit_idle(vcpu, KVM_TRACE_EXIT_IDLE);
269
270 if (!kvm_arch_vcpu_runnable(vcpu))
271 kvm_vcpu_halt(vcpu);
272
273 return EMULATE_DONE;
274 }
275
kvm_trap_handle_gspr(struct kvm_vcpu * vcpu)276 static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
277 {
278 unsigned long curr_pc;
279 larch_inst inst;
280 enum emulation_result er = EMULATE_DONE;
281 struct kvm_run *run = vcpu->run;
282
283 /* Fetch the instruction */
284 inst.word = vcpu->arch.badi;
285 curr_pc = vcpu->arch.pc;
286 update_pc(&vcpu->arch);
287
288 trace_kvm_exit_gspr(vcpu, inst.word);
289 er = EMULATE_FAIL;
290 switch (((inst.word >> 24) & 0xff)) {
291 case 0x0: /* CPUCFG GSPR */
292 er = kvm_emu_cpucfg(vcpu, inst);
293 break;
294 case 0x4: /* CSR{RD,WR,XCHG} GSPR */
295 er = kvm_handle_csr(vcpu, inst);
296 break;
297 case 0x6: /* Cache, Idle and IOCSR GSPR */
298 switch (((inst.word >> 22) & 0x3ff)) {
299 case 0x18: /* Cache GSPR */
300 er = EMULATE_DONE;
301 trace_kvm_exit_cache(vcpu, KVM_TRACE_EXIT_CACHE);
302 break;
303 case 0x19: /* Idle/IOCSR GSPR */
304 switch (((inst.word >> 15) & 0x1ffff)) {
305 case 0xc90: /* IOCSR GSPR */
306 er = kvm_emu_iocsr(inst, run, vcpu);
307 break;
308 case 0xc91: /* Idle GSPR */
309 er = kvm_emu_idle(vcpu);
310 break;
311 default:
312 er = EMULATE_FAIL;
313 break;
314 }
315 break;
316 default:
317 er = EMULATE_FAIL;
318 break;
319 }
320 break;
321 default:
322 er = EMULATE_FAIL;
323 break;
324 }
325
326 /* Rollback PC only if emulation was unsuccessful */
327 if (er == EMULATE_FAIL) {
328 kvm_err("[%#lx]%s: unsupported gspr instruction 0x%08x\n",
329 curr_pc, __func__, inst.word);
330
331 kvm_arch_vcpu_dump_regs(vcpu);
332 vcpu->arch.pc = curr_pc;
333 }
334
335 return er;
336 }
337
338 /*
339 * Trigger GSPR:
340 * 1) Execute CPUCFG instruction;
341 * 2) Execute CACOP/IDLE instructions;
342 * 3) Access to unimplemented CSRs/IOCSRs.
343 */
kvm_handle_gspr(struct kvm_vcpu * vcpu)344 static int kvm_handle_gspr(struct kvm_vcpu *vcpu)
345 {
346 int ret = RESUME_GUEST;
347 enum emulation_result er = EMULATE_DONE;
348
349 er = kvm_trap_handle_gspr(vcpu);
350
351 if (er == EMULATE_DONE) {
352 ret = RESUME_GUEST;
353 } else if (er == EMULATE_DO_MMIO) {
354 vcpu->run->exit_reason = KVM_EXIT_MMIO;
355 ret = RESUME_HOST;
356 } else if (er == EMULATE_DO_IOCSR) {
357 vcpu->run->exit_reason = KVM_EXIT_LOONGARCH_IOCSR;
358 ret = RESUME_HOST;
359 } else {
360 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
361 ret = RESUME_GUEST;
362 }
363
364 return ret;
365 }
366
kvm_emu_mmio_read(struct kvm_vcpu * vcpu,larch_inst inst)367 int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
368 {
369 int idx, ret;
370 unsigned int op8, opcode, rd;
371 struct kvm_run *run = vcpu->run;
372
373 run->mmio.phys_addr = vcpu->arch.badv;
374 vcpu->mmio_needed = 2; /* signed */
375 op8 = (inst.word >> 24) & 0xff;
376 ret = EMULATE_DO_MMIO;
377
378 switch (op8) {
379 case 0x24 ... 0x27: /* ldptr.w/d process */
380 rd = inst.reg2i14_format.rd;
381 opcode = inst.reg2i14_format.opcode;
382
383 switch (opcode) {
384 case ldptrw_op:
385 run->mmio.len = 4;
386 break;
387 case ldptrd_op:
388 run->mmio.len = 8;
389 break;
390 default:
391 break;
392 }
393 break;
394 case 0x28 ... 0x2e: /* ld.b/h/w/d, ld.bu/hu/wu process */
395 rd = inst.reg2i12_format.rd;
396 opcode = inst.reg2i12_format.opcode;
397
398 switch (opcode) {
399 case ldb_op:
400 run->mmio.len = 1;
401 break;
402 case ldbu_op:
403 vcpu->mmio_needed = 1; /* unsigned */
404 run->mmio.len = 1;
405 break;
406 case ldh_op:
407 run->mmio.len = 2;
408 break;
409 case ldhu_op:
410 vcpu->mmio_needed = 1; /* unsigned */
411 run->mmio.len = 2;
412 break;
413 case ldw_op:
414 run->mmio.len = 4;
415 break;
416 case ldwu_op:
417 vcpu->mmio_needed = 1; /* unsigned */
418 run->mmio.len = 4;
419 break;
420 case ldd_op:
421 run->mmio.len = 8;
422 break;
423 default:
424 ret = EMULATE_FAIL;
425 break;
426 }
427 break;
428 case 0x38: /* ldx.b/h/w/d, ldx.bu/hu/wu process */
429 rd = inst.reg3_format.rd;
430 opcode = inst.reg3_format.opcode;
431
432 switch (opcode) {
433 case ldxb_op:
434 run->mmio.len = 1;
435 break;
436 case ldxbu_op:
437 run->mmio.len = 1;
438 vcpu->mmio_needed = 1; /* unsigned */
439 break;
440 case ldxh_op:
441 run->mmio.len = 2;
442 break;
443 case ldxhu_op:
444 run->mmio.len = 2;
445 vcpu->mmio_needed = 1; /* unsigned */
446 break;
447 case ldxw_op:
448 run->mmio.len = 4;
449 break;
450 case ldxwu_op:
451 run->mmio.len = 4;
452 vcpu->mmio_needed = 1; /* unsigned */
453 break;
454 case ldxd_op:
455 run->mmio.len = 8;
456 break;
457 default:
458 ret = EMULATE_FAIL;
459 break;
460 }
461 break;
462 default:
463 ret = EMULATE_FAIL;
464 }
465
466 if (ret == EMULATE_DO_MMIO) {
467 trace_kvm_mmio(KVM_TRACE_MMIO_READ, run->mmio.len, run->mmio.phys_addr, NULL);
468
469 /*
470 * If mmio device such as PCH-PIC is emulated in KVM,
471 * it need not return to user space to handle the mmio
472 * exception.
473 */
474 idx = srcu_read_lock(&vcpu->kvm->srcu);
475 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, vcpu->arch.badv,
476 run->mmio.len, &vcpu->arch.gprs[rd]);
477 srcu_read_unlock(&vcpu->kvm->srcu, idx);
478 if (!ret) {
479 update_pc(&vcpu->arch);
480 vcpu->mmio_needed = 0;
481 return EMULATE_DONE;
482 }
483
484 /* Set for kvm_complete_mmio_read() use */
485 vcpu->arch.io_gpr = rd;
486 run->mmio.is_write = 0;
487 vcpu->mmio_is_write = 0;
488 return EMULATE_DO_MMIO;
489 }
490
491 kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
492 inst.word, vcpu->arch.pc, vcpu->arch.badv);
493 kvm_arch_vcpu_dump_regs(vcpu);
494 vcpu->mmio_needed = 0;
495
496 return ret;
497 }
498
kvm_complete_mmio_read(struct kvm_vcpu * vcpu,struct kvm_run * run)499 int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
500 {
501 enum emulation_result er = EMULATE_DONE;
502 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
503
504 /* Update with new PC */
505 update_pc(&vcpu->arch);
506 switch (run->mmio.len) {
507 case 1:
508 if (vcpu->mmio_needed == 2)
509 *gpr = *(s8 *)run->mmio.data;
510 else
511 *gpr = *(u8 *)run->mmio.data;
512 break;
513 case 2:
514 if (vcpu->mmio_needed == 2)
515 *gpr = *(s16 *)run->mmio.data;
516 else
517 *gpr = *(u16 *)run->mmio.data;
518 break;
519 case 4:
520 if (vcpu->mmio_needed == 2)
521 *gpr = *(s32 *)run->mmio.data;
522 else
523 *gpr = *(u32 *)run->mmio.data;
524 break;
525 case 8:
526 *gpr = *(s64 *)run->mmio.data;
527 break;
528 default:
529 kvm_err("Bad MMIO length: %d, addr is 0x%lx\n",
530 run->mmio.len, vcpu->arch.badv);
531 er = EMULATE_FAIL;
532 break;
533 }
534
535 trace_kvm_mmio(KVM_TRACE_MMIO_READ, run->mmio.len,
536 run->mmio.phys_addr, run->mmio.data);
537
538 return er;
539 }
540
kvm_emu_mmio_write(struct kvm_vcpu * vcpu,larch_inst inst)541 int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
542 {
543 int idx, ret;
544 unsigned int rd, op8, opcode;
545 unsigned long curr_pc, rd_val = 0;
546 struct kvm_run *run = vcpu->run;
547 void *data = run->mmio.data;
548
549 /*
550 * Update PC and hold onto current PC in case there is
551 * an error and we want to rollback the PC
552 */
553 curr_pc = vcpu->arch.pc;
554 update_pc(&vcpu->arch);
555
556 op8 = (inst.word >> 24) & 0xff;
557 run->mmio.phys_addr = vcpu->arch.badv;
558 ret = EMULATE_DO_MMIO;
559 switch (op8) {
560 case 0x24 ... 0x27: /* stptr.w/d process */
561 rd = inst.reg2i14_format.rd;
562 opcode = inst.reg2i14_format.opcode;
563
564 switch (opcode) {
565 case stptrw_op:
566 run->mmio.len = 4;
567 *(unsigned int *)data = vcpu->arch.gprs[rd];
568 break;
569 case stptrd_op:
570 run->mmio.len = 8;
571 *(unsigned long *)data = vcpu->arch.gprs[rd];
572 break;
573 default:
574 ret = EMULATE_FAIL;
575 break;
576 }
577 break;
578 case 0x28 ... 0x2e: /* st.b/h/w/d process */
579 rd = inst.reg2i12_format.rd;
580 opcode = inst.reg2i12_format.opcode;
581 rd_val = vcpu->arch.gprs[rd];
582
583 switch (opcode) {
584 case stb_op:
585 run->mmio.len = 1;
586 *(unsigned char *)data = rd_val;
587 break;
588 case sth_op:
589 run->mmio.len = 2;
590 *(unsigned short *)data = rd_val;
591 break;
592 case stw_op:
593 run->mmio.len = 4;
594 *(unsigned int *)data = rd_val;
595 break;
596 case std_op:
597 run->mmio.len = 8;
598 *(unsigned long *)data = rd_val;
599 break;
600 default:
601 ret = EMULATE_FAIL;
602 break;
603 }
604 break;
605 case 0x38: /* stx.b/h/w/d process */
606 rd = inst.reg3_format.rd;
607 opcode = inst.reg3_format.opcode;
608
609 switch (opcode) {
610 case stxb_op:
611 run->mmio.len = 1;
612 *(unsigned char *)data = vcpu->arch.gprs[rd];
613 break;
614 case stxh_op:
615 run->mmio.len = 2;
616 *(unsigned short *)data = vcpu->arch.gprs[rd];
617 break;
618 case stxw_op:
619 run->mmio.len = 4;
620 *(unsigned int *)data = vcpu->arch.gprs[rd];
621 break;
622 case stxd_op:
623 run->mmio.len = 8;
624 *(unsigned long *)data = vcpu->arch.gprs[rd];
625 break;
626 default:
627 ret = EMULATE_FAIL;
628 break;
629 }
630 break;
631 default:
632 ret = EMULATE_FAIL;
633 }
634
635 if (ret == EMULATE_DO_MMIO) {
636 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, run->mmio.len, run->mmio.phys_addr, data);
637
638 /*
639 * If mmio device such as PCH-PIC is emulated in KVM,
640 * it need not return to user space to handle the mmio
641 * exception.
642 */
643 idx = srcu_read_lock(&vcpu->kvm->srcu);
644 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, vcpu->arch.badv, run->mmio.len, data);
645 srcu_read_unlock(&vcpu->kvm->srcu, idx);
646 if (!ret)
647 return EMULATE_DONE;
648
649 run->mmio.is_write = 1;
650 vcpu->mmio_needed = 1;
651 vcpu->mmio_is_write = 1;
652 return EMULATE_DO_MMIO;
653 }
654
655 vcpu->arch.pc = curr_pc;
656 kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
657 inst.word, vcpu->arch.pc, vcpu->arch.badv);
658 kvm_arch_vcpu_dump_regs(vcpu);
659 /* Rollback PC if emulation was unsuccessful */
660
661 return ret;
662 }
663
kvm_handle_rdwr_fault(struct kvm_vcpu * vcpu,bool write)664 static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
665 {
666 int ret;
667 larch_inst inst;
668 enum emulation_result er = EMULATE_DONE;
669 struct kvm_run *run = vcpu->run;
670 unsigned long badv = vcpu->arch.badv;
671
672 ret = kvm_handle_mm_fault(vcpu, badv, write);
673 if (ret) {
674 /* Treat as MMIO */
675 inst.word = vcpu->arch.badi;
676 if (write) {
677 er = kvm_emu_mmio_write(vcpu, inst);
678 } else {
679 /* A code fetch fault doesn't count as an MMIO */
680 if (kvm_is_ifetch_fault(&vcpu->arch)) {
681 kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEF);
682 return RESUME_GUEST;
683 }
684
685 er = kvm_emu_mmio_read(vcpu, inst);
686 }
687 }
688
689 if (er == EMULATE_DONE) {
690 ret = RESUME_GUEST;
691 } else if (er == EMULATE_DO_MMIO) {
692 run->exit_reason = KVM_EXIT_MMIO;
693 ret = RESUME_HOST;
694 } else {
695 kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM);
696 ret = RESUME_GUEST;
697 }
698
699 return ret;
700 }
701
kvm_handle_read_fault(struct kvm_vcpu * vcpu)702 static int kvm_handle_read_fault(struct kvm_vcpu *vcpu)
703 {
704 return kvm_handle_rdwr_fault(vcpu, false);
705 }
706
kvm_handle_write_fault(struct kvm_vcpu * vcpu)707 static int kvm_handle_write_fault(struct kvm_vcpu *vcpu)
708 {
709 return kvm_handle_rdwr_fault(vcpu, true);
710 }
711
712 /**
713 * kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host
714 * @vcpu: Virtual CPU context.
715 *
716 * Handle when the guest attempts to use fpu which hasn't been allowed
717 * by the root context.
718 */
kvm_handle_fpu_disabled(struct kvm_vcpu * vcpu)719 static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
720 {
721 struct kvm_run *run = vcpu->run;
722
723 if (!kvm_guest_has_fpu(&vcpu->arch)) {
724 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
725 return RESUME_GUEST;
726 }
727
728 /*
729 * If guest FPU not present, the FPU operation should have been
730 * treated as a reserved instruction!
731 * If FPU already in use, we shouldn't get this at all.
732 */
733 if (WARN_ON(vcpu->arch.aux_inuse & KVM_LARCH_FPU)) {
734 kvm_err("%s internal error\n", __func__);
735 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
736 return RESUME_HOST;
737 }
738
739 kvm_own_fpu(vcpu);
740
741 return RESUME_GUEST;
742 }
743
kvm_save_notify(struct kvm_vcpu * vcpu)744 static long kvm_save_notify(struct kvm_vcpu *vcpu)
745 {
746 unsigned long id, data;
747
748 id = kvm_read_reg(vcpu, LOONGARCH_GPR_A1);
749 data = kvm_read_reg(vcpu, LOONGARCH_GPR_A2);
750 switch (id) {
751 case BIT(KVM_FEATURE_STEAL_TIME):
752 if (data & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
753 return KVM_HCALL_INVALID_PARAMETER;
754
755 vcpu->arch.st.guest_addr = data;
756 if (!(data & KVM_STEAL_PHYS_VALID))
757 return 0;
758
759 vcpu->arch.st.last_steal = current->sched_info.run_delay;
760 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
761 return 0;
762 default:
763 return KVM_HCALL_INVALID_CODE;
764 };
765
766 return KVM_HCALL_INVALID_CODE;
767 };
768
769 /*
770 * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
771 * @vcpu: Virtual CPU context.
772 *
773 * Handle when the guest attempts to use LSX when it is disabled in the root
774 * context.
775 */
kvm_handle_lsx_disabled(struct kvm_vcpu * vcpu)776 static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
777 {
778 if (kvm_own_lsx(vcpu))
779 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
780
781 return RESUME_GUEST;
782 }
783
784 /*
785 * kvm_handle_lasx_disabled() - Guest used LASX while disabled in root.
786 * @vcpu: Virtual CPU context.
787 *
788 * Handle when the guest attempts to use LASX when it is disabled in the root
789 * context.
790 */
kvm_handle_lasx_disabled(struct kvm_vcpu * vcpu)791 static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
792 {
793 if (kvm_own_lasx(vcpu))
794 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
795
796 return RESUME_GUEST;
797 }
798
kvm_handle_lbt_disabled(struct kvm_vcpu * vcpu)799 static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu)
800 {
801 if (kvm_own_lbt(vcpu))
802 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
803
804 return RESUME_GUEST;
805 }
806
kvm_send_pv_ipi(struct kvm_vcpu * vcpu)807 static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
808 {
809 unsigned int min, cpu, i;
810 unsigned long ipi_bitmap;
811 struct kvm_vcpu *dest;
812
813 min = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
814 for (i = 0; i < 2; i++, min += BITS_PER_LONG) {
815 ipi_bitmap = kvm_read_reg(vcpu, LOONGARCH_GPR_A1 + i);
816 if (!ipi_bitmap)
817 continue;
818
819 cpu = find_first_bit((void *)&ipi_bitmap, BITS_PER_LONG);
820 while (cpu < BITS_PER_LONG) {
821 dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min);
822 cpu = find_next_bit((void *)&ipi_bitmap, BITS_PER_LONG, cpu + 1);
823 if (!dest)
824 continue;
825
826 /* Send SWI0 to dest vcpu to emulate IPI interrupt */
827 kvm_queue_irq(dest, INT_SWI0);
828 kvm_vcpu_kick(dest);
829 }
830 }
831
832 return 0;
833 }
834
835 /*
836 * Hypercall emulation always return to guest, Caller should check retval.
837 */
kvm_handle_service(struct kvm_vcpu * vcpu)838 static void kvm_handle_service(struct kvm_vcpu *vcpu)
839 {
840 long ret = KVM_HCALL_INVALID_CODE;
841 unsigned long func = kvm_read_reg(vcpu, LOONGARCH_GPR_A0);
842
843 switch (func) {
844 case KVM_HCALL_FUNC_IPI:
845 if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_IPI)) {
846 kvm_send_pv_ipi(vcpu);
847 ret = KVM_HCALL_SUCCESS;
848 }
849 break;
850 case KVM_HCALL_FUNC_NOTIFY:
851 if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME))
852 ret = kvm_save_notify(vcpu);
853 break;
854 default:
855 break;
856 }
857
858 kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret);
859 }
860
kvm_handle_hypercall(struct kvm_vcpu * vcpu)861 static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
862 {
863 int ret;
864 larch_inst inst;
865 unsigned int code;
866
867 inst.word = vcpu->arch.badi;
868 code = inst.reg0i15_format.immediate;
869 ret = RESUME_GUEST;
870
871 switch (code) {
872 case KVM_HCALL_SERVICE:
873 vcpu->stat.hypercall_exits++;
874 kvm_handle_service(vcpu);
875 break;
876 case KVM_HCALL_SWDBG:
877 /* KVM_HCALL_SWDBG only in effective when SW_BP is enabled */
878 if (vcpu->guest_debug & KVM_GUESTDBG_SW_BP_MASK) {
879 vcpu->run->exit_reason = KVM_EXIT_DEBUG;
880 ret = RESUME_HOST;
881 break;
882 }
883 fallthrough;
884 default:
885 /* Treat it as noop intruction, only set return value */
886 kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE);
887 break;
888 }
889
890 if (ret == RESUME_GUEST)
891 update_pc(&vcpu->arch);
892
893 return ret;
894 }
895
896 /*
897 * LoongArch KVM callback handling for unimplemented guest exiting
898 */
kvm_fault_ni(struct kvm_vcpu * vcpu)899 static int kvm_fault_ni(struct kvm_vcpu *vcpu)
900 {
901 unsigned int ecode, inst;
902 unsigned long estat, badv;
903
904 /* Fetch the instruction */
905 inst = vcpu->arch.badi;
906 badv = vcpu->arch.badv;
907 estat = vcpu->arch.host_estat;
908 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
909 kvm_err("ECode: %d PC=%#lx Inst=0x%08x BadVaddr=%#lx ESTAT=%#lx\n",
910 ecode, vcpu->arch.pc, inst, badv, read_gcsr_estat());
911 kvm_arch_vcpu_dump_regs(vcpu);
912 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
913
914 return RESUME_GUEST;
915 }
916
917 static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
918 [0 ... EXCCODE_INT_START - 1] = kvm_fault_ni,
919 [EXCCODE_TLBI] = kvm_handle_read_fault,
920 [EXCCODE_TLBL] = kvm_handle_read_fault,
921 [EXCCODE_TLBS] = kvm_handle_write_fault,
922 [EXCCODE_TLBM] = kvm_handle_write_fault,
923 [EXCCODE_FPDIS] = kvm_handle_fpu_disabled,
924 [EXCCODE_LSXDIS] = kvm_handle_lsx_disabled,
925 [EXCCODE_LASXDIS] = kvm_handle_lasx_disabled,
926 [EXCCODE_BTDIS] = kvm_handle_lbt_disabled,
927 [EXCCODE_GSPR] = kvm_handle_gspr,
928 [EXCCODE_HVC] = kvm_handle_hypercall,
929 };
930
kvm_handle_fault(struct kvm_vcpu * vcpu,int fault)931 int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault)
932 {
933 return kvm_fault_tables[fault](vcpu);
934 }
935