Lines Matching full:arch

39 	context = this_cpu_ptr(vcpu->kvm->arch.vmcs);  in kvm_save_host_pmu()
54 context = this_cpu_ptr(vcpu->kvm->arch.vmcs); in kvm_restore_host_pmu()
68 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_save_guest_pmu()
82 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_restore_guest_pmu()
98 if (!kvm_guest_has_pmu(&vcpu->arch)) in kvm_own_pmu()
105 val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT; in kvm_own_pmu()
116 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_lose_pmu()
118 if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU)) in kvm_lose_pmu()
136 vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU; in kvm_lose_pmu()
143 if ((vcpu->arch.aux_inuse & KVM_LARCH_PMU)) in kvm_restore_pmu()
151 vcpu->arch.aux_inuse |= KVM_LARCH_PMU; in kvm_check_pmu()
164 ghc = &vcpu->arch.st.cache; in kvm_update_stolen_time()
165 gpa = vcpu->arch.st.guest_addr; in kvm_update_stolen_time()
188 steal += current->sched_info.run_delay - vcpu->arch.st.last_steal; in kvm_update_stolen_time()
189 vcpu->arch.st.last_steal = current->sched_info.run_delay; in kvm_update_stolen_time()
211 vcpu->arch.vpid = 0; /* Drop vpid for this vCPU */ in kvm_check_requests()
226 if (vcpu->arch.flush_gpa != INVALID_GPA) { in kvm_late_check_requests()
227 kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa); in kvm_late_check_requests()
228 vcpu->arch.flush_gpa = INVALID_GPA; in kvm_late_check_requests()
292 vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY); in kvm_pre_enter_guest()
294 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; in kvm_pre_enter_guest()
314 unsigned long estat = vcpu->arch.host_estat; in kvm_handle_exit()
354 return !!(vcpu->arch.irq_pending) && in kvm_arch_vcpu_runnable()
355 vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_runnable()
377 return vcpu->arch.pc; in kvm_arch_vcpu_get_ip()
387 return (vcpu && !(vcpu->arch.aux_inuse & KVM_LARCH_PMU)); in kvm_arch_pmi_in_guest()
425 kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc); in kvm_arch_vcpu_dump_regs()
426 kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending); in kvm_arch_vcpu_dump_regs()
430 vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1], in kvm_arch_vcpu_dump_regs()
431 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); in kvm_arch_vcpu_dump_regs()
446 *mp_state = vcpu->arch.mp_state; in kvm_arch_vcpu_ioctl_get_mpstate()
458 vcpu->arch.mp_state = *mp_state; in kvm_arch_vcpu_ioctl_set_mpstate()
485 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_set_cpuid()
490 map = vcpu->kvm->arch.phyid_map; in kvm_set_cpuid()
493 spin_lock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
497 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
505 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
512 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
520 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
527 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
536 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_drop_cpuid()
538 map = vcpu->kvm->arch.phyid_map; in kvm_drop_cpuid()
544 spin_lock(&vcpu->kvm->arch.phyid_map_lock); in kvm_drop_cpuid()
550 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_drop_cpuid()
560 map = kvm->arch.phyid_map; in kvm_get_vcpu_by_cpuid()
570 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_getcsr()
583 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; in _kvm_getcsr()
605 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_setcsr()
770 *v = vcpu->arch.cpucfg[id]; in kvm_get_one_reg()
775 if (!kvm_guest_has_lbt(&vcpu->arch)) in kvm_get_one_reg()
780 *v = vcpu->arch.lbt.scr0; in kvm_get_one_reg()
783 *v = vcpu->arch.lbt.scr1; in kvm_get_one_reg()
786 *v = vcpu->arch.lbt.scr2; in kvm_get_one_reg()
789 *v = vcpu->arch.lbt.scr3; in kvm_get_one_reg()
792 *v = vcpu->arch.lbt.eflags; in kvm_get_one_reg()
795 *v = vcpu->arch.fpu.ftop; in kvm_get_one_reg()
805 *v = drdtime() + vcpu->kvm->arch.time_offset; in kvm_get_one_reg()
859 vcpu->arch.cpucfg[id] = (u32)v; in kvm_set_one_reg()
861 vcpu->arch.max_pmu_csrid = in kvm_set_one_reg()
862 LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1; in kvm_set_one_reg()
865 if (!kvm_guest_has_lbt(&vcpu->arch)) in kvm_set_one_reg()
870 vcpu->arch.lbt.scr0 = v; in kvm_set_one_reg()
873 vcpu->arch.lbt.scr1 = v; in kvm_set_one_reg()
876 vcpu->arch.lbt.scr2 = v; in kvm_set_one_reg()
879 vcpu->arch.lbt.scr3 = v; in kvm_set_one_reg()
882 vcpu->arch.lbt.eflags = v; in kvm_set_one_reg()
885 vcpu->arch.fpu.ftop = v; in kvm_set_one_reg()
900 vcpu->kvm->arch.time_offset = (signed long)(v - drdtime()); in kvm_set_one_reg()
903 vcpu->arch.st.guest_addr = 0; in kvm_set_one_reg()
904 memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending)); in kvm_set_one_reg()
905 memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear)); in kvm_set_one_reg()
911 kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0); in kvm_set_one_reg()
912 kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0); in kvm_set_one_reg()
959 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_get_regs()
960 regs->gpr[i] = vcpu->arch.gprs[i]; in kvm_arch_vcpu_ioctl_get_regs()
962 regs->pc = vcpu->arch.pc; in kvm_arch_vcpu_ioctl_get_regs()
971 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_set_regs()
972 vcpu->arch.gprs[i] = regs->gpr[i]; in kvm_arch_vcpu_ioctl_set_regs()
974 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ in kvm_arch_vcpu_ioctl_set_regs()
975 vcpu->arch.pc = regs->pc; in kvm_arch_vcpu_ioctl_set_regs()
1046 val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK; in kvm_loongarch_cpucfg_get_attr()
1067 gpa = vcpu->arch.st.guest_addr; in kvm_loongarch_pvtime_get_attr()
1110 if ((kvm->arch.pv_features & LOONGARCH_PV_FEAT_UPDATED) in kvm_loongarch_cpucfg_set_attr()
1111 && ((kvm->arch.pv_features & valid) != val)) in kvm_loongarch_cpucfg_set_attr()
1113 kvm->arch.pv_features = val | LOONGARCH_PV_FEAT_UPDATED; in kvm_loongarch_cpucfg_set_attr()
1138 vcpu->arch.st.guest_addr = gpa; in kvm_loongarch_pvtime_set_attr()
1149 vcpu->arch.st.guest_addr = gpa; in kvm_loongarch_pvtime_set_attr()
1150 vcpu->arch.st.last_steal = current->sched_info.run_delay; in kvm_loongarch_pvtime_set_attr()
1192 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check in kvm_arch_vcpu_ioctl()
1206 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; in kvm_arch_vcpu_ioctl()
1253 fpu->fcc = vcpu->arch.fpu.fcc; in kvm_arch_vcpu_ioctl_get_fpu()
1254 fpu->fcsr = vcpu->arch.fpu.fcsr; in kvm_arch_vcpu_ioctl_get_fpu()
1256 memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64); in kvm_arch_vcpu_ioctl_get_fpu()
1265 vcpu->arch.fpu.fcc = fpu->fcc; in kvm_arch_vcpu_ioctl_set_fpu()
1266 vcpu->arch.fpu.fcsr = fpu->fcsr; in kvm_arch_vcpu_ioctl_set_fpu()
1268 memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64); in kvm_arch_vcpu_ioctl_set_fpu()
1276 if (!kvm_guest_has_lbt(&vcpu->arch)) in kvm_own_lbt()
1281 _restore_lbt(&vcpu->arch.lbt); in kvm_own_lbt()
1282 vcpu->arch.aux_inuse |= KVM_LARCH_LBT; in kvm_own_lbt()
1291 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) { in kvm_lose_lbt()
1292 _save_lbt(&vcpu->arch.lbt); in kvm_lose_lbt()
1294 vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT; in kvm_lose_lbt()
1311 if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { in kvm_check_fcsr_alive()
1312 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) in kvm_check_fcsr_alive()
1332 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); in kvm_own_fpu()
1335 kvm_restore_fpu(&vcpu->arch.fpu); in kvm_own_fpu()
1336 vcpu->arch.aux_inuse |= KVM_LARCH_FPU; in kvm_own_fpu()
1346 if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch)) in kvm_own_lsx()
1352 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); in kvm_own_lsx()
1354 switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { in kvm_own_lsx()
1360 _restore_lsx_upper(&vcpu->arch.fpu); in kvm_own_lsx()
1366 kvm_restore_lsx(&vcpu->arch.fpu); in kvm_own_lsx()
1371 vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU; in kvm_own_lsx()
1382 …if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcp… in kvm_own_lasx()
1387 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); in kvm_own_lasx()
1389 switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) { in kvm_own_lasx()
1393 _restore_lasx_upper(&vcpu->arch.fpu); in kvm_own_lasx()
1397 _restore_lsx_upper(&vcpu->arch.fpu); in kvm_own_lasx()
1398 _restore_lasx_upper(&vcpu->arch.fpu); in kvm_own_lasx()
1402 kvm_restore_lasx(&vcpu->arch.fpu); in kvm_own_lasx()
1407 vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU; in kvm_own_lasx()
1420 if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) { in kvm_lose_fpu()
1421 kvm_save_lasx(&vcpu->arch.fpu); in kvm_lose_fpu()
1422 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX); in kvm_lose_fpu()
1427 } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) { in kvm_lose_fpu()
1428 kvm_save_lsx(&vcpu->arch.fpu); in kvm_lose_fpu()
1429 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU); in kvm_lose_fpu()
1434 } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { in kvm_lose_fpu()
1435 kvm_save_fpu(&vcpu->arch.fpu); in kvm_lose_fpu()
1436 vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU; in kvm_lose_fpu()
1495 vcpu->arch.vpid = 0; in kvm_arch_vcpu_create()
1496 vcpu->arch.flush_gpa = INVALID_GPA; in kvm_arch_vcpu_create()
1498 hrtimer_setup(&vcpu->arch.swtimer, kvm_swtimer_wakeup, CLOCK_MONOTONIC, in kvm_arch_vcpu_create()
1502 vcpu->arch.kvm_pgd = __pa(vcpu->kvm->arch.pgd); in kvm_arch_vcpu_create()
1508 vcpu->arch.host_pgd = (unsigned long)vcpu->kvm->mm->pgd; in kvm_arch_vcpu_create()
1510 vcpu->arch.handle_exit = kvm_handle_exit; in kvm_arch_vcpu_create()
1511 vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry; in kvm_arch_vcpu_create()
1512 vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL); in kvm_arch_vcpu_create()
1513 if (!vcpu->arch.csr) in kvm_arch_vcpu_create()
1520 vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS); in kvm_arch_vcpu_create()
1523 vcpu->arch.last_sched_cpu = -1; in kvm_arch_vcpu_create()
1526 spin_lock_init(&vcpu->arch.ipi_state.lock); in kvm_arch_vcpu_create()
1535 csr = vcpu->arch.csr; in kvm_arch_vcpu_create()
1557 hrtimer_cancel(&vcpu->arch.swtimer); in kvm_arch_vcpu_destroy()
1558 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); in kvm_arch_vcpu_destroy()
1560 kfree(vcpu->arch.csr); in kvm_arch_vcpu_destroy()
1567 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); in kvm_arch_vcpu_destroy()
1577 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_vcpu_load()
1583 migrated = (vcpu->arch.last_sched_cpu != cpu); in _kvm_vcpu_load()
1589 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); in _kvm_vcpu_load()
1591 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; in _kvm_vcpu_load()
1602 if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE) in _kvm_vcpu_load()
1605 write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset); in _kvm_vcpu_load()
1664 vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE; in _kvm_vcpu_load()
1681 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_vcpu_put()
1691 if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST) in _kvm_vcpu_put()
1742 vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST; in _kvm_vcpu_put()
1759 vcpu->arch.last_sched_cpu = cpu; in kvm_arch_vcpu_put()