Lines Matching +full:sync +full:- +full:update +full:- +full:mask

1 // SPDX-License-Identifier: GPL-2.0
57 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_context_reset()
58 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_context_reset()
59 void *vector_datap = cntx->vector.datap; in kvm_riscv_vcpu_context_reset()
63 memset(&vcpu->arch.smstateen_csr, 0, sizeof(vcpu->arch.smstateen_csr)); in kvm_riscv_vcpu_context_reset()
66 cntx->vector.datap = vector_datap; in kvm_riscv_vcpu_context_reset()
72 cntx->sstatus = SR_SPP | SR_SPIE; in kvm_riscv_vcpu_context_reset()
74 cntx->hstatus |= HSTATUS_VTW; in kvm_riscv_vcpu_context_reset()
75 cntx->hstatus |= HSTATUS_SPVP; in kvm_riscv_vcpu_context_reset()
76 cntx->hstatus |= HSTATUS_SPV; in kvm_riscv_vcpu_context_reset()
89 loaded = (vcpu->cpu != -1); in kvm_riscv_reset_vcpu()
93 vcpu->arch.last_exit_cpu = -1; in kvm_riscv_reset_vcpu()
105 bitmap_zero(vcpu->arch.irqs_pending, KVM_RISCV_VCPU_NR_IRQS); in kvm_riscv_reset_vcpu()
106 bitmap_zero(vcpu->arch.irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS); in kvm_riscv_reset_vcpu()
110 vcpu->arch.hfence_head = 0; in kvm_riscv_reset_vcpu()
111 vcpu->arch.hfence_tail = 0; in kvm_riscv_reset_vcpu()
112 memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue)); in kvm_riscv_reset_vcpu()
131 spin_lock_init(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_create()
134 vcpu->arch.ran_atleast_once = false; in kvm_arch_vcpu_create()
136 vcpu->arch.cfg.hedeleg = KVM_HEDELEG_DEFAULT; in kvm_arch_vcpu_create()
137 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; in kvm_arch_vcpu_create()
138 bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX); in kvm_arch_vcpu_create()
144 vcpu->arch.mvendorid = sbi_get_mvendorid(); in kvm_arch_vcpu_create()
145 vcpu->arch.marchid = sbi_get_marchid(); in kvm_arch_vcpu_create()
146 vcpu->arch.mimpid = sbi_get_mimpid(); in kvm_arch_vcpu_create()
149 spin_lock_init(&vcpu->arch.hfence_lock); in kvm_arch_vcpu_create()
151 spin_lock_init(&vcpu->arch.reset_state.lock); in kvm_arch_vcpu_create()
182 * Keep all vcpus with non-zero id in power-off state so that in kvm_arch_vcpu_postcreate()
185 if (vcpu->vcpu_idx != 0) in kvm_arch_vcpu_postcreate()
201 /* Free unused pages pre-allocated for G-stage page table mappings */ in kvm_arch_vcpu_destroy()
202 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); in kvm_arch_vcpu_destroy()
215 return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) && in kvm_arch_vcpu_runnable()
216 !kvm_riscv_vcpu_stopped(vcpu) && !vcpu->arch.pause); in kvm_arch_vcpu_runnable()
226 return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false; in kvm_arch_vcpu_in_kernel()
232 return vcpu->arch.guest_context.sepc; in kvm_arch_vcpu_get_ip()
244 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl()
251 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
259 return -ENOIOCTLCMD; in kvm_arch_vcpu_async_ioctl()
265 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
267 long r = -EINVAL; in kvm_arch_vcpu_ioctl()
274 r = -EFAULT; in kvm_arch_vcpu_ioctl()
289 r = -EFAULT; in kvm_arch_vcpu_ioctl()
296 r = -E2BIG; in kvm_arch_vcpu_ioctl()
299 r = kvm_riscv_vcpu_copy_reg_indices(vcpu, user_list->reg); in kvm_arch_vcpu_ioctl()
312 return -EINVAL; in kvm_arch_vcpu_ioctl_get_sregs()
318 return -EINVAL; in kvm_arch_vcpu_ioctl_set_sregs()
323 return -EINVAL; in kvm_arch_vcpu_ioctl_get_fpu()
328 return -EINVAL; in kvm_arch_vcpu_ioctl_set_fpu()
334 return -EINVAL; in kvm_arch_vcpu_ioctl_translate()
339 return -EINVAL; in kvm_arch_vcpu_ioctl_get_regs()
344 return -EINVAL; in kvm_arch_vcpu_ioctl_set_regs()
349 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_flush_interrupts()
350 unsigned long mask, val; in kvm_riscv_vcpu_flush_interrupts() local
352 if (READ_ONCE(vcpu->arch.irqs_pending_mask[0])) { in kvm_riscv_vcpu_flush_interrupts()
353 mask = xchg_acquire(&vcpu->arch.irqs_pending_mask[0], 0); in kvm_riscv_vcpu_flush_interrupts()
354 val = READ_ONCE(vcpu->arch.irqs_pending[0]) & mask; in kvm_riscv_vcpu_flush_interrupts()
356 csr->hvip &= ~mask; in kvm_riscv_vcpu_flush_interrupts()
357 csr->hvip |= val; in kvm_riscv_vcpu_flush_interrupts()
367 struct kvm_vcpu_arch *v = &vcpu->arch; in kvm_riscv_vcpu_sync_interrupts()
368 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_sync_interrupts()
371 csr->vsie = ncsr_read(CSR_VSIE); in kvm_riscv_vcpu_sync_interrupts()
373 /* Sync-up HVIP.VSSIP bit changes does by Guest */ in kvm_riscv_vcpu_sync_interrupts()
375 if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) { in kvm_riscv_vcpu_sync_interrupts()
378 v->irqs_pending_mask)) in kvm_riscv_vcpu_sync_interrupts()
379 set_bit(IRQ_VS_SOFT, v->irqs_pending); in kvm_riscv_vcpu_sync_interrupts()
382 v->irqs_pending_mask)) in kvm_riscv_vcpu_sync_interrupts()
383 clear_bit(IRQ_VS_SOFT, v->irqs_pending); in kvm_riscv_vcpu_sync_interrupts()
387 /* Sync up the HVIP.LCOFIP bit changes (only clear) by the guest */ in kvm_riscv_vcpu_sync_interrupts()
388 if ((csr->hvip ^ hvip) & (1UL << IRQ_PMU_OVF)) { in kvm_riscv_vcpu_sync_interrupts()
390 !test_and_set_bit(IRQ_PMU_OVF, v->irqs_pending_mask)) in kvm_riscv_vcpu_sync_interrupts()
391 clear_bit(IRQ_PMU_OVF, v->irqs_pending); in kvm_riscv_vcpu_sync_interrupts()
394 /* Sync-up AIA high interrupts */ in kvm_riscv_vcpu_sync_interrupts()
397 /* Sync-up timer CSRs */ in kvm_riscv_vcpu_sync_interrupts()
404 * We only allow VS-mode software, timer, and external in kvm_riscv_vcpu_set_interrupt()
406 * defined by RISC-V privilege specification. in kvm_riscv_vcpu_set_interrupt()
413 return -EINVAL; in kvm_riscv_vcpu_set_interrupt()
415 set_bit(irq, vcpu->arch.irqs_pending); in kvm_riscv_vcpu_set_interrupt()
417 set_bit(irq, vcpu->arch.irqs_pending_mask); in kvm_riscv_vcpu_set_interrupt()
427 * We only allow VS-mode software, timer, counter overflow and external in kvm_riscv_vcpu_unset_interrupt()
429 * defined by RISC-V privilege specification. in kvm_riscv_vcpu_unset_interrupt()
436 return -EINVAL; in kvm_riscv_vcpu_unset_interrupt()
438 clear_bit(irq, vcpu->arch.irqs_pending); in kvm_riscv_vcpu_unset_interrupt()
440 set_bit(irq, vcpu->arch.irqs_pending_mask); in kvm_riscv_vcpu_unset_interrupt()
445 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask) in kvm_riscv_vcpu_has_interrupts() argument
449 ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK) in kvm_riscv_vcpu_has_interrupts()
450 << VSIP_TO_HVIP_SHIFT) & (unsigned long)mask; in kvm_riscv_vcpu_has_interrupts()
451 ie |= vcpu->arch.guest_csr.vsie & ~IRQ_LOCAL_MASK & in kvm_riscv_vcpu_has_interrupts()
452 (unsigned long)mask; in kvm_riscv_vcpu_has_interrupts()
453 if (READ_ONCE(vcpu->arch.irqs_pending[0]) & ie) in kvm_riscv_vcpu_has_interrupts()
457 return kvm_riscv_vcpu_aia_has_interrupts(vcpu, mask); in kvm_riscv_vcpu_has_interrupts()
462 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED); in __kvm_riscv_vcpu_power_off()
469 spin_lock(&vcpu->arch.mp_state_lock); in kvm_riscv_vcpu_power_off()
471 spin_unlock(&vcpu->arch.mp_state_lock); in kvm_riscv_vcpu_power_off()
476 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE); in __kvm_riscv_vcpu_power_on()
482 spin_lock(&vcpu->arch.mp_state_lock); in kvm_riscv_vcpu_power_on()
484 spin_unlock(&vcpu->arch.mp_state_lock); in kvm_riscv_vcpu_power_on()
489 return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED; in kvm_riscv_vcpu_stopped()
495 *mp_state = READ_ONCE(vcpu->arch.mp_state); in kvm_arch_vcpu_ioctl_get_mpstate()
505 spin_lock(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_ioctl_set_mpstate()
507 switch (mp_state->mp_state) { in kvm_arch_vcpu_ioctl_set_mpstate()
509 WRITE_ONCE(vcpu->arch.mp_state, *mp_state); in kvm_arch_vcpu_ioctl_set_mpstate()
515 if (vcpu->kvm->arch.mp_state_reset) in kvm_arch_vcpu_ioctl_set_mpstate()
518 ret = -EINVAL; in kvm_arch_vcpu_ioctl_set_mpstate()
521 ret = -EINVAL; in kvm_arch_vcpu_ioctl_set_mpstate()
524 spin_unlock(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_ioctl_set_mpstate()
532 if (dbg->control & KVM_GUESTDBG_ENABLE) { in kvm_arch_vcpu_ioctl_set_guest_debug()
533 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
534 vcpu->arch.cfg.hedeleg &= ~BIT(EXC_BREAKPOINT); in kvm_arch_vcpu_ioctl_set_guest_debug()
536 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
537 vcpu->arch.cfg.hedeleg |= BIT(EXC_BREAKPOINT); in kvm_arch_vcpu_ioctl_set_guest_debug()
545 const unsigned long *isa = vcpu->arch.isa; in kvm_riscv_vcpu_setup_config()
546 struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; in kvm_riscv_vcpu_setup_config()
549 cfg->henvcfg |= ENVCFG_PBMTE; in kvm_riscv_vcpu_setup_config()
552 cfg->henvcfg |= ENVCFG_STCE; in kvm_riscv_vcpu_setup_config()
555 cfg->henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE); in kvm_riscv_vcpu_setup_config()
558 cfg->henvcfg |= ENVCFG_CBZE; in kvm_riscv_vcpu_setup_config()
562 cfg->henvcfg |= ENVCFG_ADUE; in kvm_riscv_vcpu_setup_config()
565 cfg->hstateen0 |= SMSTATEEN0_HSENVCFG; in kvm_riscv_vcpu_setup_config()
567 cfg->hstateen0 |= SMSTATEEN0_AIA_IMSIC | in kvm_riscv_vcpu_setup_config()
571 cfg->hstateen0 |= SMSTATEEN0_SSTATEEN0; in kvm_riscv_vcpu_setup_config()
574 if (vcpu->guest_debug) in kvm_riscv_vcpu_setup_config()
575 cfg->hedeleg &= ~BIT(EXC_BREAKPOINT); in kvm_riscv_vcpu_setup_config()
581 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_arch_vcpu_load()
582 struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; in kvm_arch_vcpu_load()
586 nacl_csr_write(nsh, CSR_VSSTATUS, csr->vsstatus); in kvm_arch_vcpu_load()
587 nacl_csr_write(nsh, CSR_VSIE, csr->vsie); in kvm_arch_vcpu_load()
588 nacl_csr_write(nsh, CSR_VSTVEC, csr->vstvec); in kvm_arch_vcpu_load()
589 nacl_csr_write(nsh, CSR_VSSCRATCH, csr->vsscratch); in kvm_arch_vcpu_load()
590 nacl_csr_write(nsh, CSR_VSEPC, csr->vsepc); in kvm_arch_vcpu_load()
591 nacl_csr_write(nsh, CSR_VSCAUSE, csr->vscause); in kvm_arch_vcpu_load()
592 nacl_csr_write(nsh, CSR_VSTVAL, csr->vstval); in kvm_arch_vcpu_load()
593 nacl_csr_write(nsh, CSR_HEDELEG, cfg->hedeleg); in kvm_arch_vcpu_load()
594 nacl_csr_write(nsh, CSR_HVIP, csr->hvip); in kvm_arch_vcpu_load()
595 nacl_csr_write(nsh, CSR_VSATP, csr->vsatp); in kvm_arch_vcpu_load()
596 nacl_csr_write(nsh, CSR_HENVCFG, cfg->henvcfg); in kvm_arch_vcpu_load()
598 nacl_csr_write(nsh, CSR_HENVCFGH, cfg->henvcfg >> 32); in kvm_arch_vcpu_load()
600 nacl_csr_write(nsh, CSR_HSTATEEN0, cfg->hstateen0); in kvm_arch_vcpu_load()
602 nacl_csr_write(nsh, CSR_HSTATEEN0H, cfg->hstateen0 >> 32); in kvm_arch_vcpu_load()
605 csr_write(CSR_VSSTATUS, csr->vsstatus); in kvm_arch_vcpu_load()
606 csr_write(CSR_VSIE, csr->vsie); in kvm_arch_vcpu_load()
607 csr_write(CSR_VSTVEC, csr->vstvec); in kvm_arch_vcpu_load()
608 csr_write(CSR_VSSCRATCH, csr->vsscratch); in kvm_arch_vcpu_load()
609 csr_write(CSR_VSEPC, csr->vsepc); in kvm_arch_vcpu_load()
610 csr_write(CSR_VSCAUSE, csr->vscause); in kvm_arch_vcpu_load()
611 csr_write(CSR_VSTVAL, csr->vstval); in kvm_arch_vcpu_load()
612 csr_write(CSR_HEDELEG, cfg->hedeleg); in kvm_arch_vcpu_load()
613 csr_write(CSR_HVIP, csr->hvip); in kvm_arch_vcpu_load()
614 csr_write(CSR_VSATP, csr->vsatp); in kvm_arch_vcpu_load()
615 csr_write(CSR_HENVCFG, cfg->henvcfg); in kvm_arch_vcpu_load()
617 csr_write(CSR_HENVCFGH, cfg->henvcfg >> 32); in kvm_arch_vcpu_load()
619 csr_write(CSR_HSTATEEN0, cfg->hstateen0); in kvm_arch_vcpu_load()
621 csr_write(CSR_HSTATEEN0H, cfg->hstateen0 >> 32); in kvm_arch_vcpu_load()
629 kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context); in kvm_arch_vcpu_load()
630 kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context, in kvm_arch_vcpu_load()
631 vcpu->arch.isa); in kvm_arch_vcpu_load()
632 kvm_riscv_vcpu_host_vector_save(&vcpu->arch.host_context); in kvm_arch_vcpu_load()
633 kvm_riscv_vcpu_guest_vector_restore(&vcpu->arch.guest_context, in kvm_arch_vcpu_load()
634 vcpu->arch.isa); in kvm_arch_vcpu_load()
640 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
646 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_arch_vcpu_put()
648 vcpu->cpu = -1; in kvm_arch_vcpu_put()
652 kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context, in kvm_arch_vcpu_put()
653 vcpu->arch.isa); in kvm_arch_vcpu_put()
654 kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context); in kvm_arch_vcpu_put()
657 kvm_riscv_vcpu_guest_vector_save(&vcpu->arch.guest_context, in kvm_arch_vcpu_put()
658 vcpu->arch.isa); in kvm_arch_vcpu_put()
659 kvm_riscv_vcpu_host_vector_restore(&vcpu->arch.host_context); in kvm_arch_vcpu_put()
663 csr->vsstatus = nacl_csr_read(nsh, CSR_VSSTATUS); in kvm_arch_vcpu_put()
664 csr->vsie = nacl_csr_read(nsh, CSR_VSIE); in kvm_arch_vcpu_put()
665 csr->vstvec = nacl_csr_read(nsh, CSR_VSTVEC); in kvm_arch_vcpu_put()
666 csr->vsscratch = nacl_csr_read(nsh, CSR_VSSCRATCH); in kvm_arch_vcpu_put()
667 csr->vsepc = nacl_csr_read(nsh, CSR_VSEPC); in kvm_arch_vcpu_put()
668 csr->vscause = nacl_csr_read(nsh, CSR_VSCAUSE); in kvm_arch_vcpu_put()
669 csr->vstval = nacl_csr_read(nsh, CSR_VSTVAL); in kvm_arch_vcpu_put()
670 csr->hvip = nacl_csr_read(nsh, CSR_HVIP); in kvm_arch_vcpu_put()
671 csr->vsatp = nacl_csr_read(nsh, CSR_VSATP); in kvm_arch_vcpu_put()
673 csr->vsstatus = csr_read(CSR_VSSTATUS); in kvm_arch_vcpu_put()
674 csr->vsie = csr_read(CSR_VSIE); in kvm_arch_vcpu_put()
675 csr->vstvec = csr_read(CSR_VSTVEC); in kvm_arch_vcpu_put()
676 csr->vsscratch = csr_read(CSR_VSSCRATCH); in kvm_arch_vcpu_put()
677 csr->vsepc = csr_read(CSR_VSEPC); in kvm_arch_vcpu_put()
678 csr->vscause = csr_read(CSR_VSCAUSE); in kvm_arch_vcpu_put()
679 csr->vstval = csr_read(CSR_VSTVAL); in kvm_arch_vcpu_put()
680 csr->hvip = csr_read(CSR_HVIP); in kvm_arch_vcpu_put()
681 csr->vsatp = csr_read(CSR_VSATP); in kvm_arch_vcpu_put()
686 * kvm_riscv_check_vcpu_requests - check and handle pending vCPU requests
700 (!kvm_riscv_vcpu_stopped(vcpu)) && (!vcpu->arch.pause), in kvm_riscv_check_vcpu_requests()
704 if (kvm_riscv_vcpu_stopped(vcpu) || vcpu->arch.pause) { in kvm_riscv_check_vcpu_requests()
743 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_update_hvip()
745 ncsr_write(CSR_HVIP, csr->hvip); in kvm_riscv_update_hvip()
751 struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr; in kvm_riscv_vcpu_swap_in_guest_state()
752 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_swap_in_guest_state()
753 struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; in kvm_riscv_vcpu_swap_in_guest_state()
755 vcpu->arch.host_scounteren = csr_swap(CSR_SCOUNTEREN, csr->scounteren); in kvm_riscv_vcpu_swap_in_guest_state()
756 vcpu->arch.host_senvcfg = csr_swap(CSR_SENVCFG, csr->senvcfg); in kvm_riscv_vcpu_swap_in_guest_state()
758 (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0)) in kvm_riscv_vcpu_swap_in_guest_state()
759 vcpu->arch.host_sstateen0 = csr_swap(CSR_SSTATEEN0, in kvm_riscv_vcpu_swap_in_guest_state()
760 smcsr->sstateen0); in kvm_riscv_vcpu_swap_in_guest_state()
765 struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr; in kvm_riscv_vcpu_swap_in_host_state()
766 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_swap_in_host_state()
767 struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; in kvm_riscv_vcpu_swap_in_host_state()
769 csr->scounteren = csr_swap(CSR_SCOUNTEREN, vcpu->arch.host_scounteren); in kvm_riscv_vcpu_swap_in_host_state()
770 csr->senvcfg = csr_swap(CSR_SENVCFG, vcpu->arch.host_senvcfg); in kvm_riscv_vcpu_swap_in_host_state()
772 (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0)) in kvm_riscv_vcpu_swap_in_host_state()
773 smcsr->sstateen0 = csr_swap(CSR_SSTATEEN0, in kvm_riscv_vcpu_swap_in_host_state()
774 vcpu->arch.host_sstateen0); in kvm_riscv_vcpu_swap_in_host_state()
788 struct kvm_cpu_context *gcntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_enter_exit()
789 struct kvm_cpu_context *hcntx = &vcpu->arch.host_context; in kvm_riscv_vcpu_enter_exit()
806 hcntx->hstatus = in kvm_riscv_vcpu_enter_exit()
811 gcntx->hstatus); in kvm_riscv_vcpu_enter_exit()
816 hcntx->hstatus = nacl_csr_swap(nsh, in kvm_riscv_vcpu_enter_exit()
817 CSR_HSTATUS, gcntx->hstatus); in kvm_riscv_vcpu_enter_exit()
819 hcntx->hstatus = csr_swap(CSR_HSTATUS, gcntx->hstatus); in kvm_riscv_vcpu_enter_exit()
825 &gcntx->ra, in kvm_riscv_vcpu_enter_exit()
828 __kvm_riscv_nacl_switch_to(&vcpu->arch, SBI_EXT_NACL, in kvm_riscv_vcpu_enter_exit()
835 gcntx->hstatus = nacl_scratch_read_long(nsh, in kvm_riscv_vcpu_enter_exit()
839 gcntx->hstatus = csr_swap(CSR_HSTATUS, hcntx->hstatus); in kvm_riscv_vcpu_enter_exit()
842 trap->htval = nacl_csr_read(nsh, CSR_HTVAL); in kvm_riscv_vcpu_enter_exit()
843 trap->htinst = nacl_csr_read(nsh, CSR_HTINST); in kvm_riscv_vcpu_enter_exit()
845 hcntx->hstatus = csr_swap(CSR_HSTATUS, gcntx->hstatus); in kvm_riscv_vcpu_enter_exit()
847 __kvm_riscv_switch_to(&vcpu->arch); in kvm_riscv_vcpu_enter_exit()
849 gcntx->hstatus = csr_swap(CSR_HSTATUS, hcntx->hstatus); in kvm_riscv_vcpu_enter_exit()
851 trap->htval = csr_read(CSR_HTVAL); in kvm_riscv_vcpu_enter_exit()
852 trap->htinst = csr_read(CSR_HTINST); in kvm_riscv_vcpu_enter_exit()
855 trap->sepc = gcntx->sepc; in kvm_riscv_vcpu_enter_exit()
856 trap->scause = csr_read(CSR_SCAUSE); in kvm_riscv_vcpu_enter_exit()
857 trap->stval = csr_read(CSR_STVAL); in kvm_riscv_vcpu_enter_exit()
859 vcpu->arch.last_exit_cpu = vcpu->cpu; in kvm_riscv_vcpu_enter_exit()
868 struct kvm_run *run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
870 if (!vcpu->arch.ran_atleast_once) in kvm_arch_vcpu_ioctl_run()
874 vcpu->arch.ran_atleast_once = true; in kvm_arch_vcpu_ioctl_run()
878 switch (run->exit_reason) { in kvm_arch_vcpu_ioctl_run()
880 /* Process MMIO value returned from user-space */ in kvm_arch_vcpu_ioctl_run()
881 ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run); in kvm_arch_vcpu_ioctl_run()
884 /* Process SBI value returned from user-space */ in kvm_arch_vcpu_ioctl_run()
885 ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run); in kvm_arch_vcpu_ioctl_run()
888 /* Process CSR value returned from user-space */ in kvm_arch_vcpu_ioctl_run()
889 ret = kvm_riscv_vcpu_csr_return(vcpu, vcpu->run); in kvm_arch_vcpu_ioctl_run()
900 if (!vcpu->wants_to_run) { in kvm_arch_vcpu_ioctl_run()
902 return -EINTR; in kvm_arch_vcpu_ioctl_run()
910 run->exit_reason = KVM_EXIT_UNKNOWN; in kvm_arch_vcpu_ioctl_run()
926 /* Update AIA HW state before entering guest */ in kvm_arch_vcpu_ioctl_run()
939 * Documentation/virt/kvm/vcpu-requests.rst in kvm_arch_vcpu_ioctl_run()
941 vcpu->mode = IN_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
948 * so update it in HW. in kvm_arch_vcpu_ioctl_run()
952 /* Update HVIP CSR for current CPU */ in kvm_arch_vcpu_ioctl_run()
955 if (kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) || in kvm_arch_vcpu_ioctl_run()
958 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
968 * Note: This should be done after G-stage VMID has been in kvm_arch_vcpu_ioctl_run()
979 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
980 vcpu->stat.exits++; in kvm_arch_vcpu_ioctl_run()