Lines Matching +full:csr +full:- +full:mask

1 // SPDX-License-Identifier: GPL-2.0
94 #define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1))
115 ((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
118 ((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
121 (SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
140 unsigned long mask; member
145 * 2) Returns 0 for exit to user-space
162 utrap.sepc = vcpu->arch.guest_context.sepc; in truly_illegal_insn()
178 utrap.sepc = vcpu->arch.guest_context.sepc; in truly_virtual_insn()
189 * kvm_riscv_vcpu_wfi -- Emulate wait for interrupt (WFI) behaviour
204 vcpu->stat.wfi_exit_stat++; in wfi_insn()
211 vcpu->stat.wrs_exit_stat++; in wrs_insn()
212 kvm_vcpu_on_spin(vcpu, vcpu->arch.guest_context.sstatus & SR_SPP); in wrs_insn()
232 if (!riscv_isa_extension_available(vcpu->arch.isa, ZKR)) in seed_csr_rmw()
245 * kvm_riscv_vcpu_csr_return -- Handle CSR read/write after user space
246 * emulation or in-kernel emulation
249 * @run: The VCPU run struct containing the CSR data
257 if (vcpu->arch.csr_decode.return_handled) in kvm_riscv_vcpu_csr_return()
259 vcpu->arch.csr_decode.return_handled = 1; in kvm_riscv_vcpu_csr_return()
261 /* Update destination register for CSR reads */ in kvm_riscv_vcpu_csr_return()
262 insn = vcpu->arch.csr_decode.insn; in kvm_riscv_vcpu_csr_return()
264 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_csr_return()
265 run->riscv_csr.ret_value); in kvm_riscv_vcpu_csr_return()
268 vcpu->arch.guest_context.sepc += INSN_LEN(insn); in kvm_riscv_vcpu_csr_return()
278 ulong rs1_val = GET_RS1(insn, &vcpu->arch.guest_context); in csr_insn()
282 /* Decode the CSR instruction */ in csr_insn()
285 wr_mask = -1UL; in csr_insn()
290 new_val = -1UL; in csr_insn()
297 wr_mask = -1UL; in csr_insn()
302 new_val = -1UL; in csr_insn()
313 vcpu->arch.csr_decode.insn = insn; in csr_insn()
314 vcpu->arch.csr_decode.return_handled = 0; in csr_insn()
316 /* Update CSR details in kvm_run struct */ in csr_insn()
317 run->riscv_csr.csr_num = csr_num; in csr_insn()
318 run->riscv_csr.new_value = new_val; in csr_insn()
319 run->riscv_csr.write_mask = wr_mask; in csr_insn()
320 run->riscv_csr.ret_value = 0; in csr_insn()
322 /* Find in-kernel CSR function */ in csr_insn()
325 if ((tcfn->base <= csr_num) && in csr_insn()
326 (csr_num < (tcfn->base + tcfn->count))) { in csr_insn()
332 /* First try in-kernel CSR emulation */ in csr_insn()
333 if (cfn && cfn->func) { in csr_insn()
334 rc = cfn->func(vcpu, csr_num, &val, new_val, wr_mask); in csr_insn()
337 run->riscv_csr.ret_value = val; in csr_insn()
338 vcpu->stat.csr_exit_kernel++; in csr_insn()
346 /* Exit to user-space for CSR emulation */ in csr_insn()
348 vcpu->stat.csr_exit_user++; in csr_insn()
349 run->exit_reason = KVM_EXIT_RISCV_CSR; in csr_insn()
357 .mask = INSN_MASK_CSRRW,
362 .mask = INSN_MASK_CSRRS,
367 .mask = INSN_MASK_CSRRC,
372 .mask = INSN_MASK_CSRRWI,
377 .mask = INSN_MASK_CSRRSI,
382 .mask = INSN_MASK_CSRRCI,
387 .mask = INSN_MASK_WFI,
392 .mask = INSN_MASK_WRS,
406 if ((insn & ifn->mask) == ifn->match) { in system_opcode_insn()
407 rc = ifn->func(vcpu, run, insn); in system_opcode_insn()
418 vcpu->arch.guest_context.sepc += INSN_LEN(insn); in system_opcode_insn()
428 * kvm_riscv_vcpu_virtual_insn -- Handle virtual instruction trap
434 * Returns > 0 to continue run-loop
435 * Returns 0 to exit run-loop and handle in user-space.
436 * Returns < 0 to report failure and exit run-loop
441 unsigned long insn = trap->stval; in kvm_riscv_vcpu_virtual_insn()
447 ct = &vcpu->arch.guest_context; in kvm_riscv_vcpu_virtual_insn()
449 ct->sepc, in kvm_riscv_vcpu_virtual_insn()
452 utrap.sepc = ct->sepc; in kvm_riscv_vcpu_virtual_insn()
470 * kvm_riscv_vcpu_mmio_load -- Emulate MMIO load instruction
477 * Returns > 0 to continue run-loop
478 * Returns 0 to exit run-loop and handle in user-space.
479 * Returns < 0 to report failure and exit run-loop
489 struct kvm_cpu_context *ct = &vcpu->arch.guest_context; in kvm_riscv_vcpu_mmio_load()
504 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc, in kvm_riscv_vcpu_mmio_load()
508 utrap.sepc = ct->sepc; in kvm_riscv_vcpu_mmio_load()
518 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
521 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
524 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
528 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
534 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
540 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
545 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
549 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
554 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
556 return -EOPNOTSUPP; in kvm_riscv_vcpu_mmio_load()
560 if (fault_addr & (len - 1)) in kvm_riscv_vcpu_mmio_load()
561 return -EIO; in kvm_riscv_vcpu_mmio_load()
564 vcpu->arch.mmio_decode.insn = insn; in kvm_riscv_vcpu_mmio_load()
565 vcpu->arch.mmio_decode.insn_len = insn_len; in kvm_riscv_vcpu_mmio_load()
566 vcpu->arch.mmio_decode.shift = shift; in kvm_riscv_vcpu_mmio_load()
567 vcpu->arch.mmio_decode.len = len; in kvm_riscv_vcpu_mmio_load()
568 vcpu->arch.mmio_decode.return_handled = 0; in kvm_riscv_vcpu_mmio_load()
571 run->mmio.is_write = false; in kvm_riscv_vcpu_mmio_load()
572 run->mmio.phys_addr = fault_addr; in kvm_riscv_vcpu_mmio_load()
573 run->mmio.len = len; in kvm_riscv_vcpu_mmio_load()
578 memcpy(run->mmio.data, data_buf, len); in kvm_riscv_vcpu_mmio_load()
579 vcpu->stat.mmio_exit_kernel++; in kvm_riscv_vcpu_mmio_load()
585 vcpu->stat.mmio_exit_user++; in kvm_riscv_vcpu_mmio_load()
586 run->exit_reason = KVM_EXIT_MMIO; in kvm_riscv_vcpu_mmio_load()
592 * kvm_riscv_vcpu_mmio_store -- Emulate MMIO store instruction
599 * Returns > 0 to continue run-loop
600 * Returns 0 to exit run-loop and handle in user-space.
601 * Returns < 0 to report failure and exit run-loop
615 struct kvm_cpu_context *ct = &vcpu->arch.guest_context; in kvm_riscv_vcpu_mmio_store()
630 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc, in kvm_riscv_vcpu_mmio_store()
634 utrap.sepc = ct->sepc; in kvm_riscv_vcpu_mmio_store()
641 data = GET_RS2(insn, &vcpu->arch.guest_context); in kvm_riscv_vcpu_mmio_store()
657 data64 = GET_RS2S(insn, &vcpu->arch.guest_context); in kvm_riscv_vcpu_mmio_store()
661 data64 = GET_RS2C(insn, &vcpu->arch.guest_context); in kvm_riscv_vcpu_mmio_store()
665 data32 = GET_RS2S(insn, &vcpu->arch.guest_context); in kvm_riscv_vcpu_mmio_store()
669 data32 = GET_RS2C(insn, &vcpu->arch.guest_context); in kvm_riscv_vcpu_mmio_store()
671 return -EOPNOTSUPP; in kvm_riscv_vcpu_mmio_store()
675 if (fault_addr & (len - 1)) in kvm_riscv_vcpu_mmio_store()
676 return -EIO; in kvm_riscv_vcpu_mmio_store()
679 vcpu->arch.mmio_decode.insn = insn; in kvm_riscv_vcpu_mmio_store()
680 vcpu->arch.mmio_decode.insn_len = insn_len; in kvm_riscv_vcpu_mmio_store()
681 vcpu->arch.mmio_decode.shift = 0; in kvm_riscv_vcpu_mmio_store()
682 vcpu->arch.mmio_decode.len = len; in kvm_riscv_vcpu_mmio_store()
683 vcpu->arch.mmio_decode.return_handled = 0; in kvm_riscv_vcpu_mmio_store()
688 *((u8 *)run->mmio.data) = data8; in kvm_riscv_vcpu_mmio_store()
691 *((u16 *)run->mmio.data) = data16; in kvm_riscv_vcpu_mmio_store()
694 *((u32 *)run->mmio.data) = data32; in kvm_riscv_vcpu_mmio_store()
697 *((u64 *)run->mmio.data) = data64; in kvm_riscv_vcpu_mmio_store()
700 return -EOPNOTSUPP; in kvm_riscv_vcpu_mmio_store()
704 run->mmio.is_write = true; in kvm_riscv_vcpu_mmio_store()
705 run->mmio.phys_addr = fault_addr; in kvm_riscv_vcpu_mmio_store()
706 run->mmio.len = len; in kvm_riscv_vcpu_mmio_store()
710 fault_addr, len, run->mmio.data)) { in kvm_riscv_vcpu_mmio_store()
712 vcpu->stat.mmio_exit_kernel++; in kvm_riscv_vcpu_mmio_store()
718 vcpu->stat.mmio_exit_user++; in kvm_riscv_vcpu_mmio_store()
719 run->exit_reason = KVM_EXIT_MMIO; in kvm_riscv_vcpu_mmio_store()
725 * kvm_riscv_vcpu_mmio_return -- Handle MMIO loads after user space emulation
726 * or in-kernel IO emulation
740 if (vcpu->arch.mmio_decode.return_handled) in kvm_riscv_vcpu_mmio_return()
743 vcpu->arch.mmio_decode.return_handled = 1; in kvm_riscv_vcpu_mmio_return()
744 insn = vcpu->arch.mmio_decode.insn; in kvm_riscv_vcpu_mmio_return()
746 if (run->mmio.is_write) in kvm_riscv_vcpu_mmio_return()
749 len = vcpu->arch.mmio_decode.len; in kvm_riscv_vcpu_mmio_return()
750 shift = vcpu->arch.mmio_decode.shift; in kvm_riscv_vcpu_mmio_return()
754 data8 = *((u8 *)run->mmio.data); in kvm_riscv_vcpu_mmio_return()
755 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_mmio_return()
759 data16 = *((u16 *)run->mmio.data); in kvm_riscv_vcpu_mmio_return()
760 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_mmio_return()
764 data32 = *((u32 *)run->mmio.data); in kvm_riscv_vcpu_mmio_return()
765 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_mmio_return()
769 data64 = *((u64 *)run->mmio.data); in kvm_riscv_vcpu_mmio_return()
770 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_mmio_return()
774 return -EOPNOTSUPP; in kvm_riscv_vcpu_mmio_return()
779 vcpu->arch.guest_context.sepc += vcpu->arch.mmio_decode.insn_len; in kvm_riscv_vcpu_mmio_return()