Lines Matching full:fault

258 	 * Maximum page size that can be created for this fault; input to
300 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
306 * RET_PF_CONTINUE: So far, so good, keep handling the page fault.
307 * RET_PF_RETRY: let CPU fault again on the address.
308 * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
311 * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
340 struct kvm_page_fault *fault) in kvm_mmu_prepare_memory_fault_exit() argument
342 kvm_prepare_memory_fault_exit(vcpu, fault->gfn << PAGE_SHIFT, in kvm_mmu_prepare_memory_fault_exit()
343 PAGE_SIZE, fault->write, fault->exec, in kvm_mmu_prepare_memory_fault_exit()
344 fault->is_private); in kvm_mmu_prepare_memory_fault_exit()
351 struct kvm_page_fault fault = { in kvm_mmu_do_page_fault() local
377 * fault.addr can be used when the shared bit is needed. in kvm_mmu_do_page_fault()
379 fault.gfn = gpa_to_gfn(fault.addr) & ~kvm_gfn_direct_bits(vcpu->kvm); in kvm_mmu_do_page_fault()
380 fault.slot = kvm_vcpu_gfn_to_memslot(vcpu, fault.gfn); in kvm_mmu_do_page_fault()
387 if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && fault.is_tdp) in kvm_mmu_do_page_fault()
388 r = kvm_tdp_page_fault(vcpu, &fault); in kvm_mmu_do_page_fault()
390 r = vcpu->arch.mmu->page_fault(vcpu, &fault); in kvm_mmu_do_page_fault()
397 if (r == RET_PF_EMULATE && fault.is_private) { in kvm_mmu_do_page_fault()
399 kvm_mmu_prepare_memory_fault_exit(vcpu, &fault); in kvm_mmu_do_page_fault()
403 if (fault.write_fault_to_shadow_pgtable && emulation_type) in kvm_mmu_do_page_fault()
406 *level = fault.goal_level; in kvm_mmu_do_page_fault()
413 void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
414 void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level);