Searched refs:walk_mmu (Results 1 – 9 of 9) sorted by relevance
161 return vcpu->arch.walk_mmu->pdptrs[index]; in kvm_pdptr_read()166 vcpu->arch.walk_mmu->pdptrs[index] = value; in kvm_pdptr_write()
272 return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; in mmu_is_nested()
989 vcpu->arch.walk_mmu; in kvm_inject_emulated_page_fault()1055 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in load_pdptrs()7815 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_read()7825 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_write()7837 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_system()7846 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_read_guest_virt_helper()7879 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_fetch_guest_virt()7938 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_write_guest_virt_helper()8044 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in vcpu_mmio_gva_to_gpa()8054 !permission_fault(vcpu, vcpu->arch.walk_mmu, in vcpu_mmio_gva_to_gpa()[all …]
517 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; in FNAME()870 WARN_ON_ONCE((addr >> 32) && mmu == vcpu->arch.walk_mmu); in FNAME()
6563 kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, KVM_MMU_ROOTS_ALL); in kvm_mmu_invlpg()6700 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; in kvm_mmu_create()
103 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; in nested_svm_init_mmu_context()109 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; in nested_svm_uninit_mmu_context()
852 struct kvm_mmu *walk_mmu; member
486 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; in nested_ept_init_mmu_context()492 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; in nested_ept_uninit_mmu_context()
3404 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in vmx_ept_load_pdptrs()3419 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in ept_save_pdptrs()