1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4 */
5
6 #include <linux/err.h>
7 #include <linux/errno.h>
8 #include <linux/kvm_host.h>
9 #include <linux/module.h>
10 #include <linux/preempt.h>
11 #include <linux/vmalloc.h>
12 #include <trace/events/kvm.h>
13 #include <asm/fpu.h>
14 #include <asm/inst.h>
15 #include <asm/loongarch.h>
16 #include <asm/mmzone.h>
17 #include <asm/numa.h>
18 #include <asm/time.h>
19 #include <asm/tlb.h>
20 #include <asm/kvm_csr.h>
21 #include <asm/kvm_vcpu.h>
22 #include "trace.h"
23
kvm_emu_cpucfg(struct kvm_vcpu * vcpu,larch_inst inst)24 static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
25 {
26 int rd, rj;
27 unsigned int index, ret;
28
29 if (inst.reg2_format.opcode != cpucfg_op)
30 return EMULATE_FAIL;
31
32 rd = inst.reg2_format.rd;
33 rj = inst.reg2_format.rj;
34 ++vcpu->stat.cpucfg_exits;
35 index = vcpu->arch.gprs[rj];
36
37 /*
38 * By LoongArch Reference Manual 2.2.10.5
39 * Return value is 0 for undefined CPUCFG index
40 *
41 * Disable preemption since hw gcsr is accessed
42 */
43 preempt_disable();
44 switch (index) {
45 case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
46 vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index];
47 break;
48 case CPUCFG_KVM_SIG:
49 /* CPUCFG emulation between 0x40000000 -- 0x400000ff */
50 vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE;
51 break;
52 case CPUCFG_KVM_FEATURE:
53 ret = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
54 vcpu->arch.gprs[rd] = ret;
55 break;
56 default:
57 vcpu->arch.gprs[rd] = 0;
58 break;
59 }
60 preempt_enable();
61
62 return EMULATE_DONE;
63 }
64
kvm_emu_read_csr(struct kvm_vcpu * vcpu,int csrid)65 static unsigned long kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid)
66 {
67 unsigned long val = 0;
68 struct loongarch_csrs *csr = vcpu->arch.csr;
69
70 /*
71 * From LoongArch Reference Manual Volume 1 Chapter 4.2.1
72 * For undefined CSR id, return value is 0
73 */
74 if (get_gcsr_flag(csrid) & SW_GCSR)
75 val = kvm_read_sw_gcsr(csr, csrid);
76 else
77 pr_warn_once("Unsupported csrrd 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
78
79 return val;
80 }
81
kvm_emu_write_csr(struct kvm_vcpu * vcpu,int csrid,unsigned long val)82 static unsigned long kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long val)
83 {
84 unsigned long old = 0;
85 struct loongarch_csrs *csr = vcpu->arch.csr;
86
87 if (get_gcsr_flag(csrid) & SW_GCSR) {
88 old = kvm_read_sw_gcsr(csr, csrid);
89 kvm_write_sw_gcsr(csr, csrid, val);
90 } else
91 pr_warn_once("Unsupported csrwr 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
92
93 return old;
94 }
95
kvm_emu_xchg_csr(struct kvm_vcpu * vcpu,int csrid,unsigned long csr_mask,unsigned long val)96 static unsigned long kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid,
97 unsigned long csr_mask, unsigned long val)
98 {
99 unsigned long old = 0;
100 struct loongarch_csrs *csr = vcpu->arch.csr;
101
102 if (get_gcsr_flag(csrid) & SW_GCSR) {
103 old = kvm_read_sw_gcsr(csr, csrid);
104 val = (old & ~csr_mask) | (val & csr_mask);
105 kvm_write_sw_gcsr(csr, csrid, val);
106 old = old & csr_mask;
107 } else
108 pr_warn_once("Unsupported csrxchg 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
109
110 return old;
111 }
112
kvm_handle_csr(struct kvm_vcpu * vcpu,larch_inst inst)113 static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
114 {
115 unsigned int rd, rj, csrid;
116 unsigned long csr_mask, val = 0;
117
118 /*
119 * CSR value mask imm
120 * rj = 0 means csrrd
121 * rj = 1 means csrwr
122 * rj != 0,1 means csrxchg
123 */
124 rd = inst.reg2csr_format.rd;
125 rj = inst.reg2csr_format.rj;
126 csrid = inst.reg2csr_format.csr;
127
128 if (csrid >= LOONGARCH_CSR_PERFCTRL0 && csrid <= vcpu->arch.max_pmu_csrid) {
129 if (kvm_guest_has_pmu(&vcpu->arch)) {
130 vcpu->arch.pc -= 4;
131 kvm_make_request(KVM_REQ_PMU, vcpu);
132 return EMULATE_DONE;
133 }
134 }
135
136 /* Process CSR ops */
137 switch (rj) {
138 case 0: /* process csrrd */
139 val = kvm_emu_read_csr(vcpu, csrid);
140 vcpu->arch.gprs[rd] = val;
141 break;
142 case 1: /* process csrwr */
143 val = vcpu->arch.gprs[rd];
144 val = kvm_emu_write_csr(vcpu, csrid, val);
145 vcpu->arch.gprs[rd] = val;
146 break;
147 default: /* process csrxchg */
148 val = vcpu->arch.gprs[rd];
149 csr_mask = vcpu->arch.gprs[rj];
150 val = kvm_emu_xchg_csr(vcpu, csrid, csr_mask, val);
151 vcpu->arch.gprs[rd] = val;
152 }
153
154 return EMULATE_DONE;
155 }
156
kvm_emu_iocsr(larch_inst inst,struct kvm_run * run,struct kvm_vcpu * vcpu)157 int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
158 {
159 int ret;
160 unsigned long val;
161 u32 addr, rd, rj, opcode;
162
163 /*
164 * Each IOCSR with different opcode
165 */
166 rd = inst.reg2_format.rd;
167 rj = inst.reg2_format.rj;
168 opcode = inst.reg2_format.opcode;
169 addr = vcpu->arch.gprs[rj];
170 ret = EMULATE_DO_IOCSR;
171 run->iocsr_io.phys_addr = addr;
172 run->iocsr_io.is_write = 0;
173
174 /* LoongArch is Little endian */
175 switch (opcode) {
176 case iocsrrdb_op:
177 run->iocsr_io.len = 1;
178 break;
179 case iocsrrdh_op:
180 run->iocsr_io.len = 2;
181 break;
182 case iocsrrdw_op:
183 run->iocsr_io.len = 4;
184 break;
185 case iocsrrdd_op:
186 run->iocsr_io.len = 8;
187 break;
188 case iocsrwrb_op:
189 run->iocsr_io.len = 1;
190 run->iocsr_io.is_write = 1;
191 break;
192 case iocsrwrh_op:
193 run->iocsr_io.len = 2;
194 run->iocsr_io.is_write = 1;
195 break;
196 case iocsrwrw_op:
197 run->iocsr_io.len = 4;
198 run->iocsr_io.is_write = 1;
199 break;
200 case iocsrwrd_op:
201 run->iocsr_io.len = 8;
202 run->iocsr_io.is_write = 1;
203 break;
204 default:
205 ret = EMULATE_FAIL;
206 break;
207 }
208
209 if (ret == EMULATE_DO_IOCSR) {
210 if (run->iocsr_io.is_write) {
211 val = vcpu->arch.gprs[rd];
212 memcpy(run->iocsr_io.data, &val, run->iocsr_io.len);
213 }
214 vcpu->arch.io_gpr = rd;
215 }
216
217 return ret;
218 }
219
kvm_complete_iocsr_read(struct kvm_vcpu * vcpu,struct kvm_run * run)220 int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
221 {
222 enum emulation_result er = EMULATE_DONE;
223 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
224
225 switch (run->iocsr_io.len) {
226 case 1:
227 *gpr = *(s8 *)run->iocsr_io.data;
228 break;
229 case 2:
230 *gpr = *(s16 *)run->iocsr_io.data;
231 break;
232 case 4:
233 *gpr = *(s32 *)run->iocsr_io.data;
234 break;
235 case 8:
236 *gpr = *(s64 *)run->iocsr_io.data;
237 break;
238 default:
239 kvm_err("Bad IOCSR length: %d, addr is 0x%lx\n",
240 run->iocsr_io.len, vcpu->arch.badv);
241 er = EMULATE_FAIL;
242 break;
243 }
244
245 return er;
246 }
247
kvm_emu_idle(struct kvm_vcpu * vcpu)248 int kvm_emu_idle(struct kvm_vcpu *vcpu)
249 {
250 ++vcpu->stat.idle_exits;
251 trace_kvm_exit_idle(vcpu, KVM_TRACE_EXIT_IDLE);
252
253 if (!kvm_arch_vcpu_runnable(vcpu))
254 kvm_vcpu_halt(vcpu);
255
256 return EMULATE_DONE;
257 }
258
kvm_trap_handle_gspr(struct kvm_vcpu * vcpu)259 static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
260 {
261 unsigned long curr_pc;
262 larch_inst inst;
263 enum emulation_result er = EMULATE_DONE;
264 struct kvm_run *run = vcpu->run;
265
266 /* Fetch the instruction */
267 inst.word = vcpu->arch.badi;
268 curr_pc = vcpu->arch.pc;
269 update_pc(&vcpu->arch);
270
271 trace_kvm_exit_gspr(vcpu, inst.word);
272 er = EMULATE_FAIL;
273 switch (((inst.word >> 24) & 0xff)) {
274 case 0x0: /* CPUCFG GSPR */
275 er = kvm_emu_cpucfg(vcpu, inst);
276 break;
277 case 0x4: /* CSR{RD,WR,XCHG} GSPR */
278 er = kvm_handle_csr(vcpu, inst);
279 break;
280 case 0x6: /* Cache, Idle and IOCSR GSPR */
281 switch (((inst.word >> 22) & 0x3ff)) {
282 case 0x18: /* Cache GSPR */
283 er = EMULATE_DONE;
284 trace_kvm_exit_cache(vcpu, KVM_TRACE_EXIT_CACHE);
285 break;
286 case 0x19: /* Idle/IOCSR GSPR */
287 switch (((inst.word >> 15) & 0x1ffff)) {
288 case 0xc90: /* IOCSR GSPR */
289 er = kvm_emu_iocsr(inst, run, vcpu);
290 break;
291 case 0xc91: /* Idle GSPR */
292 er = kvm_emu_idle(vcpu);
293 break;
294 default:
295 er = EMULATE_FAIL;
296 break;
297 }
298 break;
299 default:
300 er = EMULATE_FAIL;
301 break;
302 }
303 break;
304 default:
305 er = EMULATE_FAIL;
306 break;
307 }
308
309 /* Rollback PC only if emulation was unsuccessful */
310 if (er == EMULATE_FAIL) {
311 kvm_err("[%#lx]%s: unsupported gspr instruction 0x%08x\n",
312 curr_pc, __func__, inst.word);
313
314 kvm_arch_vcpu_dump_regs(vcpu);
315 vcpu->arch.pc = curr_pc;
316 }
317
318 return er;
319 }
320
321 /*
322 * Trigger GSPR:
323 * 1) Execute CPUCFG instruction;
324 * 2) Execute CACOP/IDLE instructions;
325 * 3) Access to unimplemented CSRs/IOCSRs.
326 */
kvm_handle_gspr(struct kvm_vcpu * vcpu)327 static int kvm_handle_gspr(struct kvm_vcpu *vcpu)
328 {
329 int ret = RESUME_GUEST;
330 enum emulation_result er = EMULATE_DONE;
331
332 er = kvm_trap_handle_gspr(vcpu);
333
334 if (er == EMULATE_DONE) {
335 ret = RESUME_GUEST;
336 } else if (er == EMULATE_DO_MMIO) {
337 vcpu->run->exit_reason = KVM_EXIT_MMIO;
338 ret = RESUME_HOST;
339 } else if (er == EMULATE_DO_IOCSR) {
340 vcpu->run->exit_reason = KVM_EXIT_LOONGARCH_IOCSR;
341 ret = RESUME_HOST;
342 } else {
343 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
344 ret = RESUME_GUEST;
345 }
346
347 return ret;
348 }
349
kvm_emu_mmio_read(struct kvm_vcpu * vcpu,larch_inst inst)350 int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
351 {
352 int ret;
353 unsigned int op8, opcode, rd;
354 struct kvm_run *run = vcpu->run;
355
356 run->mmio.phys_addr = vcpu->arch.badv;
357 vcpu->mmio_needed = 2; /* signed */
358 op8 = (inst.word >> 24) & 0xff;
359 ret = EMULATE_DO_MMIO;
360
361 switch (op8) {
362 case 0x24 ... 0x27: /* ldptr.w/d process */
363 rd = inst.reg2i14_format.rd;
364 opcode = inst.reg2i14_format.opcode;
365
366 switch (opcode) {
367 case ldptrw_op:
368 run->mmio.len = 4;
369 break;
370 case ldptrd_op:
371 run->mmio.len = 8;
372 break;
373 default:
374 break;
375 }
376 break;
377 case 0x28 ... 0x2e: /* ld.b/h/w/d, ld.bu/hu/wu process */
378 rd = inst.reg2i12_format.rd;
379 opcode = inst.reg2i12_format.opcode;
380
381 switch (opcode) {
382 case ldb_op:
383 run->mmio.len = 1;
384 break;
385 case ldbu_op:
386 vcpu->mmio_needed = 1; /* unsigned */
387 run->mmio.len = 1;
388 break;
389 case ldh_op:
390 run->mmio.len = 2;
391 break;
392 case ldhu_op:
393 vcpu->mmio_needed = 1; /* unsigned */
394 run->mmio.len = 2;
395 break;
396 case ldw_op:
397 run->mmio.len = 4;
398 break;
399 case ldwu_op:
400 vcpu->mmio_needed = 1; /* unsigned */
401 run->mmio.len = 4;
402 break;
403 case ldd_op:
404 run->mmio.len = 8;
405 break;
406 default:
407 ret = EMULATE_FAIL;
408 break;
409 }
410 break;
411 case 0x38: /* ldx.b/h/w/d, ldx.bu/hu/wu process */
412 rd = inst.reg3_format.rd;
413 opcode = inst.reg3_format.opcode;
414
415 switch (opcode) {
416 case ldxb_op:
417 run->mmio.len = 1;
418 break;
419 case ldxbu_op:
420 run->mmio.len = 1;
421 vcpu->mmio_needed = 1; /* unsigned */
422 break;
423 case ldxh_op:
424 run->mmio.len = 2;
425 break;
426 case ldxhu_op:
427 run->mmio.len = 2;
428 vcpu->mmio_needed = 1; /* unsigned */
429 break;
430 case ldxw_op:
431 run->mmio.len = 4;
432 break;
433 case ldxwu_op:
434 run->mmio.len = 4;
435 vcpu->mmio_needed = 1; /* unsigned */
436 break;
437 case ldxd_op:
438 run->mmio.len = 8;
439 break;
440 default:
441 ret = EMULATE_FAIL;
442 break;
443 }
444 break;
445 default:
446 ret = EMULATE_FAIL;
447 }
448
449 if (ret == EMULATE_DO_MMIO) {
450 /* Set for kvm_complete_mmio_read() use */
451 vcpu->arch.io_gpr = rd;
452 run->mmio.is_write = 0;
453 vcpu->mmio_is_write = 0;
454 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, run->mmio.len,
455 run->mmio.phys_addr, NULL);
456 } else {
457 kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
458 inst.word, vcpu->arch.pc, vcpu->arch.badv);
459 kvm_arch_vcpu_dump_regs(vcpu);
460 vcpu->mmio_needed = 0;
461 }
462
463 return ret;
464 }
465
kvm_complete_mmio_read(struct kvm_vcpu * vcpu,struct kvm_run * run)466 int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
467 {
468 enum emulation_result er = EMULATE_DONE;
469 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
470
471 /* Update with new PC */
472 update_pc(&vcpu->arch);
473 switch (run->mmio.len) {
474 case 1:
475 if (vcpu->mmio_needed == 2)
476 *gpr = *(s8 *)run->mmio.data;
477 else
478 *gpr = *(u8 *)run->mmio.data;
479 break;
480 case 2:
481 if (vcpu->mmio_needed == 2)
482 *gpr = *(s16 *)run->mmio.data;
483 else
484 *gpr = *(u16 *)run->mmio.data;
485 break;
486 case 4:
487 if (vcpu->mmio_needed == 2)
488 *gpr = *(s32 *)run->mmio.data;
489 else
490 *gpr = *(u32 *)run->mmio.data;
491 break;
492 case 8:
493 *gpr = *(s64 *)run->mmio.data;
494 break;
495 default:
496 kvm_err("Bad MMIO length: %d, addr is 0x%lx\n",
497 run->mmio.len, vcpu->arch.badv);
498 er = EMULATE_FAIL;
499 break;
500 }
501
502 trace_kvm_mmio(KVM_TRACE_MMIO_READ, run->mmio.len,
503 run->mmio.phys_addr, run->mmio.data);
504
505 return er;
506 }
507
kvm_emu_mmio_write(struct kvm_vcpu * vcpu,larch_inst inst)508 int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
509 {
510 int ret;
511 unsigned int rd, op8, opcode;
512 unsigned long curr_pc, rd_val = 0;
513 struct kvm_run *run = vcpu->run;
514 void *data = run->mmio.data;
515
516 /*
517 * Update PC and hold onto current PC in case there is
518 * an error and we want to rollback the PC
519 */
520 curr_pc = vcpu->arch.pc;
521 update_pc(&vcpu->arch);
522
523 op8 = (inst.word >> 24) & 0xff;
524 run->mmio.phys_addr = vcpu->arch.badv;
525 ret = EMULATE_DO_MMIO;
526 switch (op8) {
527 case 0x24 ... 0x27: /* stptr.w/d process */
528 rd = inst.reg2i14_format.rd;
529 opcode = inst.reg2i14_format.opcode;
530
531 switch (opcode) {
532 case stptrw_op:
533 run->mmio.len = 4;
534 *(unsigned int *)data = vcpu->arch.gprs[rd];
535 break;
536 case stptrd_op:
537 run->mmio.len = 8;
538 *(unsigned long *)data = vcpu->arch.gprs[rd];
539 break;
540 default:
541 ret = EMULATE_FAIL;
542 break;
543 }
544 break;
545 case 0x28 ... 0x2e: /* st.b/h/w/d process */
546 rd = inst.reg2i12_format.rd;
547 opcode = inst.reg2i12_format.opcode;
548 rd_val = vcpu->arch.gprs[rd];
549
550 switch (opcode) {
551 case stb_op:
552 run->mmio.len = 1;
553 *(unsigned char *)data = rd_val;
554 break;
555 case sth_op:
556 run->mmio.len = 2;
557 *(unsigned short *)data = rd_val;
558 break;
559 case stw_op:
560 run->mmio.len = 4;
561 *(unsigned int *)data = rd_val;
562 break;
563 case std_op:
564 run->mmio.len = 8;
565 *(unsigned long *)data = rd_val;
566 break;
567 default:
568 ret = EMULATE_FAIL;
569 break;
570 }
571 break;
572 case 0x38: /* stx.b/h/w/d process */
573 rd = inst.reg3_format.rd;
574 opcode = inst.reg3_format.opcode;
575
576 switch (opcode) {
577 case stxb_op:
578 run->mmio.len = 1;
579 *(unsigned char *)data = vcpu->arch.gprs[rd];
580 break;
581 case stxh_op:
582 run->mmio.len = 2;
583 *(unsigned short *)data = vcpu->arch.gprs[rd];
584 break;
585 case stxw_op:
586 run->mmio.len = 4;
587 *(unsigned int *)data = vcpu->arch.gprs[rd];
588 break;
589 case stxd_op:
590 run->mmio.len = 8;
591 *(unsigned long *)data = vcpu->arch.gprs[rd];
592 break;
593 default:
594 ret = EMULATE_FAIL;
595 break;
596 }
597 break;
598 default:
599 ret = EMULATE_FAIL;
600 }
601
602 if (ret == EMULATE_DO_MMIO) {
603 run->mmio.is_write = 1;
604 vcpu->mmio_needed = 1;
605 vcpu->mmio_is_write = 1;
606 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, run->mmio.len,
607 run->mmio.phys_addr, data);
608 } else {
609 vcpu->arch.pc = curr_pc;
610 kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
611 inst.word, vcpu->arch.pc, vcpu->arch.badv);
612 kvm_arch_vcpu_dump_regs(vcpu);
613 /* Rollback PC if emulation was unsuccessful */
614 }
615
616 return ret;
617 }
618
kvm_handle_rdwr_fault(struct kvm_vcpu * vcpu,bool write)619 static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
620 {
621 int ret;
622 larch_inst inst;
623 enum emulation_result er = EMULATE_DONE;
624 struct kvm_run *run = vcpu->run;
625 unsigned long badv = vcpu->arch.badv;
626
627 ret = kvm_handle_mm_fault(vcpu, badv, write);
628 if (ret) {
629 /* Treat as MMIO */
630 inst.word = vcpu->arch.badi;
631 if (write) {
632 er = kvm_emu_mmio_write(vcpu, inst);
633 } else {
634 /* A code fetch fault doesn't count as an MMIO */
635 if (kvm_is_ifetch_fault(&vcpu->arch)) {
636 kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEF);
637 return RESUME_GUEST;
638 }
639
640 er = kvm_emu_mmio_read(vcpu, inst);
641 }
642 }
643
644 if (er == EMULATE_DONE) {
645 ret = RESUME_GUEST;
646 } else if (er == EMULATE_DO_MMIO) {
647 run->exit_reason = KVM_EXIT_MMIO;
648 ret = RESUME_HOST;
649 } else {
650 kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM);
651 ret = RESUME_GUEST;
652 }
653
654 return ret;
655 }
656
kvm_handle_read_fault(struct kvm_vcpu * vcpu)657 static int kvm_handle_read_fault(struct kvm_vcpu *vcpu)
658 {
659 return kvm_handle_rdwr_fault(vcpu, false);
660 }
661
kvm_handle_write_fault(struct kvm_vcpu * vcpu)662 static int kvm_handle_write_fault(struct kvm_vcpu *vcpu)
663 {
664 return kvm_handle_rdwr_fault(vcpu, true);
665 }
666
667 /**
668 * kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host
669 * @vcpu: Virtual CPU context.
670 *
671 * Handle when the guest attempts to use fpu which hasn't been allowed
672 * by the root context.
673 */
kvm_handle_fpu_disabled(struct kvm_vcpu * vcpu)674 static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
675 {
676 struct kvm_run *run = vcpu->run;
677
678 if (!kvm_guest_has_fpu(&vcpu->arch)) {
679 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
680 return RESUME_GUEST;
681 }
682
683 /*
684 * If guest FPU not present, the FPU operation should have been
685 * treated as a reserved instruction!
686 * If FPU already in use, we shouldn't get this at all.
687 */
688 if (WARN_ON(vcpu->arch.aux_inuse & KVM_LARCH_FPU)) {
689 kvm_err("%s internal error\n", __func__);
690 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
691 return RESUME_HOST;
692 }
693
694 kvm_own_fpu(vcpu);
695
696 return RESUME_GUEST;
697 }
698
kvm_save_notify(struct kvm_vcpu * vcpu)699 static long kvm_save_notify(struct kvm_vcpu *vcpu)
700 {
701 unsigned long id, data;
702
703 id = kvm_read_reg(vcpu, LOONGARCH_GPR_A1);
704 data = kvm_read_reg(vcpu, LOONGARCH_GPR_A2);
705 switch (id) {
706 case BIT(KVM_FEATURE_STEAL_TIME):
707 if (data & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
708 return KVM_HCALL_INVALID_PARAMETER;
709
710 vcpu->arch.st.guest_addr = data;
711 if (!(data & KVM_STEAL_PHYS_VALID))
712 return 0;
713
714 vcpu->arch.st.last_steal = current->sched_info.run_delay;
715 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
716 return 0;
717 default:
718 return KVM_HCALL_INVALID_CODE;
719 };
720
721 return KVM_HCALL_INVALID_CODE;
722 };
723
724 /*
725 * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
726 * @vcpu: Virtual CPU context.
727 *
728 * Handle when the guest attempts to use LSX when it is disabled in the root
729 * context.
730 */
kvm_handle_lsx_disabled(struct kvm_vcpu * vcpu)731 static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
732 {
733 if (kvm_own_lsx(vcpu))
734 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
735
736 return RESUME_GUEST;
737 }
738
739 /*
740 * kvm_handle_lasx_disabled() - Guest used LASX while disabled in root.
741 * @vcpu: Virtual CPU context.
742 *
743 * Handle when the guest attempts to use LASX when it is disabled in the root
744 * context.
745 */
kvm_handle_lasx_disabled(struct kvm_vcpu * vcpu)746 static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
747 {
748 if (kvm_own_lasx(vcpu))
749 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
750
751 return RESUME_GUEST;
752 }
753
kvm_handle_lbt_disabled(struct kvm_vcpu * vcpu)754 static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu)
755 {
756 if (kvm_own_lbt(vcpu))
757 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
758
759 return RESUME_GUEST;
760 }
761
kvm_send_pv_ipi(struct kvm_vcpu * vcpu)762 static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
763 {
764 unsigned int min, cpu, i;
765 unsigned long ipi_bitmap;
766 struct kvm_vcpu *dest;
767
768 min = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
769 for (i = 0; i < 2; i++, min += BITS_PER_LONG) {
770 ipi_bitmap = kvm_read_reg(vcpu, LOONGARCH_GPR_A1 + i);
771 if (!ipi_bitmap)
772 continue;
773
774 cpu = find_first_bit((void *)&ipi_bitmap, BITS_PER_LONG);
775 while (cpu < BITS_PER_LONG) {
776 dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min);
777 cpu = find_next_bit((void *)&ipi_bitmap, BITS_PER_LONG, cpu + 1);
778 if (!dest)
779 continue;
780
781 /* Send SWI0 to dest vcpu to emulate IPI interrupt */
782 kvm_queue_irq(dest, INT_SWI0);
783 kvm_vcpu_kick(dest);
784 }
785 }
786
787 return 0;
788 }
789
790 /*
791 * Hypercall emulation always return to guest, Caller should check retval.
792 */
kvm_handle_service(struct kvm_vcpu * vcpu)793 static void kvm_handle_service(struct kvm_vcpu *vcpu)
794 {
795 long ret = KVM_HCALL_INVALID_CODE;
796 unsigned long func = kvm_read_reg(vcpu, LOONGARCH_GPR_A0);
797
798 switch (func) {
799 case KVM_HCALL_FUNC_IPI:
800 if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_IPI)) {
801 kvm_send_pv_ipi(vcpu);
802 ret = KVM_HCALL_SUCCESS;
803 }
804 break;
805 case KVM_HCALL_FUNC_NOTIFY:
806 if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME))
807 ret = kvm_save_notify(vcpu);
808 break;
809 default:
810 break;
811 }
812
813 kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret);
814 }
815
kvm_handle_hypercall(struct kvm_vcpu * vcpu)816 static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
817 {
818 int ret;
819 larch_inst inst;
820 unsigned int code;
821
822 inst.word = vcpu->arch.badi;
823 code = inst.reg0i15_format.immediate;
824 ret = RESUME_GUEST;
825
826 switch (code) {
827 case KVM_HCALL_SERVICE:
828 vcpu->stat.hypercall_exits++;
829 kvm_handle_service(vcpu);
830 break;
831 case KVM_HCALL_SWDBG:
832 /* KVM_HCALL_SWDBG only in effective when SW_BP is enabled */
833 if (vcpu->guest_debug & KVM_GUESTDBG_SW_BP_MASK) {
834 vcpu->run->exit_reason = KVM_EXIT_DEBUG;
835 ret = RESUME_HOST;
836 break;
837 }
838 fallthrough;
839 default:
840 /* Treat it as noop intruction, only set return value */
841 kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE);
842 break;
843 }
844
845 if (ret == RESUME_GUEST)
846 update_pc(&vcpu->arch);
847
848 return ret;
849 }
850
851 /*
852 * LoongArch KVM callback handling for unimplemented guest exiting
853 */
kvm_fault_ni(struct kvm_vcpu * vcpu)854 static int kvm_fault_ni(struct kvm_vcpu *vcpu)
855 {
856 unsigned int ecode, inst;
857 unsigned long estat, badv;
858
859 /* Fetch the instruction */
860 inst = vcpu->arch.badi;
861 badv = vcpu->arch.badv;
862 estat = vcpu->arch.host_estat;
863 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
864 kvm_err("ECode: %d PC=%#lx Inst=0x%08x BadVaddr=%#lx ESTAT=%#lx\n",
865 ecode, vcpu->arch.pc, inst, badv, read_gcsr_estat());
866 kvm_arch_vcpu_dump_regs(vcpu);
867 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
868
869 return RESUME_GUEST;
870 }
871
872 static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
873 [0 ... EXCCODE_INT_START - 1] = kvm_fault_ni,
874 [EXCCODE_TLBI] = kvm_handle_read_fault,
875 [EXCCODE_TLBL] = kvm_handle_read_fault,
876 [EXCCODE_TLBS] = kvm_handle_write_fault,
877 [EXCCODE_TLBM] = kvm_handle_write_fault,
878 [EXCCODE_FPDIS] = kvm_handle_fpu_disabled,
879 [EXCCODE_LSXDIS] = kvm_handle_lsx_disabled,
880 [EXCCODE_LASXDIS] = kvm_handle_lasx_disabled,
881 [EXCCODE_BTDIS] = kvm_handle_lbt_disabled,
882 [EXCCODE_GSPR] = kvm_handle_gspr,
883 [EXCCODE_HVC] = kvm_handle_hypercall,
884 };
885
kvm_handle_fault(struct kvm_vcpu * vcpu,int fault)886 int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault)
887 {
888 return kvm_fault_tables[fault](vcpu);
889 }
890