Lines Matching +full:page +full:- +full:level
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Kernel-based Virtual Machine driver for Linux
5 * This module enables machines with Intel VT-x extensions to run virtual
19 * The MMU needs to be able to access/walk 32-bit and 64-bit guest page tables,
21 * once per guest PTE type. The per-type defines are #undef'd at the end.
50 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
58 #define PT_HAVE_ACCESSED_DIRTY(mmu) (!(mmu)->cpu_role.base.ad_disabled)
64 /* Common logic, but per-type values. These also need to be undefined. */
77 * The guest_walker structure emulates the behavior of the hardware page
81 int level; member
98 int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT; in pse36_gfn_delta()
122 mask |= (gpte >> (PT_GUEST_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) & in FNAME()
145 static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level) in FNAME()
147 return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level) || in FNAME()
148 FNAME(is_bad_mt_xwr)(&mmu->guest_rsvd_check, gpte); in FNAME()
159 if (PT_HAVE_ACCESSED_DIRTY(vcpu->arch.mmu) && in FNAME()
163 if (FNAME(is_rsvd_bits_set)(vcpu->arch.mmu, gpte, PG_LEVEL_4K)) in FNAME()
169 drop_spte(vcpu->kvm, spte); in FNAME()
174 * For PTTYPE_EPT, a page table can be executable but not readable
202 unsigned level, index; in FNAME() local
212 for (level = walker->max_level; level >= walker->level; --level) { in FNAME()
213 pte = orig_pte = walker->ptes[level - 1]; in FNAME()
214 table_gfn = walker->table_gfn[level - 1]; in FNAME()
215 ptep_user = walker->ptep_user[level - 1]; in FNAME()
221 if (level == walker->level && write_fault && in FNAME()
225 if (kvm_x86_ops.nested_ops->write_log_dirty(vcpu, addr)) in FNAME()
226 return -EINVAL; in FNAME()
234 * If the slot is read-only, simply do not process the accessed in FNAME()
236 * is ROM, and page tables in read-as-ROM/write-as-MMIO slots in FNAME()
241 * it always wants nested page table entries for the guest in FNAME()
242 * page tables to be writable. And EPT works but will simply in FNAME()
243 * overwrite the read-only memory to set the accessed and dirty in FNAME()
246 if (unlikely(!walker->pte_writable[level - 1])) in FNAME()
254 walker->ptes[level - 1] = pte; in FNAME()
271 unsigned int level, unsigned int gpte) in FNAME()
275 * all level or indicates a huge page (ignoring CR3/EPTP). In either in FNAME()
280 * 32-bit paging requires special handling because bit 7 is ignored if in FNAME()
281 * CR4.PSE=0, not reserved. Clear bit 7 in the gpte if the level is in FNAME()
282 * greater than the last level for which bit 7 is the PAGE_SIZE bit. in FNAME()
284 * The RHS has bit 7 set iff level < (2 + PSE). If it is clear, bit 7 in FNAME()
285 * is not reserved and does not indicate a large page at this level, in FNAME()
288 gpte &= level - (PT32_ROOT_LEVEL + mmu->cpu_role.ext.cr4_pse); in FNAME()
292 * iff level <= PG_LEVEL_4K, which for our purpose means in FNAME()
293 * level == PG_LEVEL_4K; set PT_PAGE_SIZE_MASK in gpte then. in FNAME()
295 gpte |= level - PG_LEVEL_4K - 1; in FNAME()
326 walker->level = mmu->cpu_role.base.level; in FNAME()
332 if (walker->level == PT32E_ROOT_LEVEL) { in FNAME()
333 pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3); in FNAME()
334 trace_kvm_mmu_paging_element(pte, walker->level); in FNAME()
337 --walker->level; in FNAME()
340 walker->max_level = walker->level; in FNAME()
345 * processor to set the dirty flag in any EPT paging-structure entry. in FNAME()
352 * Queue a page fault for injection if this assertion fails, as callers in FNAME()
358 if (KVM_BUG_ON(is_long_mode(vcpu) && !is_pae(vcpu), vcpu->kvm)) in FNAME()
361 ++walker->level; in FNAME()
368 --walker->level; in FNAME()
370 index = PT_INDEX(addr, walker->level); in FNAME()
375 BUG_ON(walker->level < 1); in FNAME()
376 walker->table_gfn[walker->level - 1] = table_gfn; in FNAME()
377 walker->pte_gpa[walker->level - 1] = pte_gpa; in FNAME()
380 nested_access, &walker->fault); in FNAME()
384 * instruction) triggers a nested page fault. The exit in FNAME()
386 * "guest page access" as the nested page fault's cause, in FNAME()
387 * instead of "guest page structure access". To fix this, in FNAME()
400 &walker->pte_writable[walker->level - 1]); in FNAME()
407 walker->ptep_user[walker->level - 1] = ptep_user; in FNAME()
409 trace_kvm_mmu_paging_element(pte, walker->level); in FNAME()
420 if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte, walker->level))) { in FNAME()
425 walker->ptes[walker->level - 1] = pte; in FNAME()
428 walker->pt_access[walker->level - 1] = FNAME(gpte_access)(pt_access ^ walk_nx_mask); in FNAME()
429 } while (!FNAME(is_last_gpte)(mmu, walker->level, pte)); in FNAME()
435 walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask); in FNAME()
436 errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access); in FNAME()
440 gfn = gpte_to_gfn_lvl(pte, walker->level); in FNAME()
441 gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT; in FNAME()
444 if (walker->level > PG_LEVEL_4K && is_cpuid_PSE36()) in FNAME()
448 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, &walker->fault); in FNAME()
452 walker->gfn = real_gpa >> PAGE_SHIFT; in FNAME()
455 FNAME(protect_clean_gpte)(mmu, &walker->pte_access, pte); in FNAME()
463 (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT); in FNAME()
481 walker->fault.vector = PF_VECTOR; in FNAME()
482 walker->fault.error_code_valid = true; in FNAME()
483 walker->fault.error_code = errcode; in FNAME()
492 * [2:0] - Derive from the access bits. The exit_qualification might be in FNAME()
494 * [5:3] - Calculated by the page walk of the guest EPT page tables in FNAME()
495 * [7:8] - Derived from [7:8] of real exit_qualification in FNAME()
500 walker->fault.exit_qualification = 0; in FNAME()
503 walker->fault.exit_qualification |= EPT_VIOLATION_ACC_WRITE; in FNAME()
505 walker->fault.exit_qualification |= EPT_VIOLATION_ACC_READ; in FNAME()
507 walker->fault.exit_qualification |= EPT_VIOLATION_ACC_INSTR; in FNAME()
513 walker->fault.exit_qualification |= (pte_access & VMX_EPT_RWX_MASK) << in FNAME()
517 walker->fault.address = addr; in FNAME()
518 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; in FNAME()
519 walker->fault.async_page_fault = false; in FNAME()
521 trace_kvm_mmu_walker_error(walker->fault.error_code); in FNAME()
528 return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr, in FNAME()
545 pte_access = sp->role.access & FNAME(gpte_access)(gpte); in FNAME()
546 FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); in FNAME()
562 struct guest_walker *gw, int level) in FNAME()
565 gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1]; in FNAME()
569 if (level == PG_LEVEL_4K) { in FNAME()
570 mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1; in FNAME()
572 index = (pte_gpa - base_gpa) / sizeof(pt_element_t); in FNAME()
575 gw->prefetch_ptes, sizeof(gw->prefetch_ptes)); in FNAME()
576 curr_pte = gw->prefetch_ptes[index]; in FNAME()
581 return r || curr_pte != gw->ptes[level - 1]; in FNAME()
588 pt_element_t *gptep = gw->prefetch_ptes; in FNAME()
594 if (sp->role.level > PG_LEVEL_4K) in FNAME()
601 if (unlikely(vcpu->kvm->mmu_invalidate_in_progress)) in FNAME()
604 if (sp->role.direct) in FNAME()
607 i = spte_index(sptep) & ~(PTE_PREFETCH_NUM - 1); in FNAME()
608 spte = sp->spt + i; in FNAME()
623 * Fetch a shadow pte for a specific level in the paging hierarchy.
624 * If the guest tries to write a write-protected page, we need to
634 gfn_t base_gfn = fault->gfn; in FNAME()
636 WARN_ON_ONCE(gw->gfn != base_gfn); in FNAME()
637 direct_access = gw->pte_access; in FNAME()
639 top_level = vcpu->arch.mmu->cpu_role.base.level; in FNAME()
643 * Verify that the top-level gpte is still there. Since the page in FNAME()
644 * is a root page, it is either write protected (and cannot be in FNAME()
651 if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa))) in FNAME()
657 * loading a dummy root and handling the resulting page fault, e.g. if in FNAME()
660 if (unlikely(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa))) { in FNAME()
665 for_each_shadow_entry(vcpu, fault->addr, it) { in FNAME()
669 if (it.level == gw->level) in FNAME()
672 table_gfn = gw->table_gfn[it.level - 2]; in FNAME()
673 access = gw->pt_access[it.level - 2]; in FNAME()
678 * Synchronize the new page before linking it, as the CPU (KVM) in FNAME()
679 * is architecturally disallowed from inserting non-present in FNAME()
681 * the TLB when changing the gPTE from non-present to present. in FNAME()
684 * synchronized the page via kvm_sync_page(). in FNAME()
686 * For higher level pages, which cannot be unsync themselves in FNAME()
693 if (sp != ERR_PTR(-EEXIST) && sp->unsync_children && in FNAME()
698 * Verify that the gpte in the page, which is now either in FNAME()
699 * write-protected or unsync, wasn't modified between the fault in FNAME()
701 * reusing an existing shadow page to ensure the information in FNAME()
703 * shadow page (which could have been modified by a different in FNAME()
704 * vCPU even if the page was already linked). Holding mmu_lock in FNAME()
705 * prevents the shadow page from changing after this point. in FNAME()
707 if (FNAME(gpte_changed)(vcpu, gw, it.level - 1)) in FNAME()
710 if (sp != ERR_PTR(-EEXIST)) in FNAME()
713 if (fault->write && table_gfn == fault->gfn) in FNAME()
714 fault->write_fault_to_shadow_pgtable = true; in FNAME()
720 * are being shadowed by KVM, i.e. allocating a new shadow page may in FNAME()
729 * We cannot overwrite existing page tables with an NX in FNAME()
730 * large page, as the leaf could be executable. in FNAME()
732 if (fault->nx_huge_page_workaround_enabled) in FNAME()
733 disallowed_hugepage_adjust(fault, *it.sptep, it.level); in FNAME()
735 base_gfn = gfn_round_for_level(fault->gfn, it.level); in FNAME()
736 if (it.level == fault->goal_level) in FNAME()
743 if (sp == ERR_PTR(-EEXIST)) in FNAME()
747 if (fault->huge_page_disallowed) in FNAME()
748 account_nx_huge_page(vcpu->kvm, sp, in FNAME()
749 fault->req_level >= it.level); in FNAME()
752 if (WARN_ON_ONCE(it.level != fault->goal_level)) in FNAME()
753 return -EFAULT; in FNAME()
755 ret = mmu_set_spte(vcpu, fault->slot, it.sptep, gw->pte_access, in FNAME()
756 base_gfn, fault->pfn, fault); in FNAME()
765 * Page fault handler. There are several causes for a page fault:
766 * - there is no shadow pte for the guest pte
767 * - write access through a shadow pte marked read only so that we can set
769 * - write access to a shadow pte marked read only so we can update the page
771 * - mmio access; in this case we will never install a present shadow pte
772 * - normal guest page fault due to the guest pte marked not present, not
783 WARN_ON_ONCE(fault->is_tdp); in FNAME()
787 * If PFEC.RSVD is set, this is a shadow page fault. in FNAME()
788 * The bit needs to be cleared before walking guest page tables. in FNAME()
790 r = FNAME(walk_addr)(&walker, vcpu, fault->addr, in FNAME()
791 fault->error_code & ~PFERR_RSVD_MASK); in FNAME()
794 * The page is not mapped by the guest. Let the guest handle it. in FNAME()
797 if (!fault->prefetch) in FNAME()
803 fault->gfn = walker.gfn; in FNAME()
804 fault->max_level = walker.level; in FNAME()
805 fault->slot = kvm_vcpu_gfn_to_memslot(vcpu, fault->gfn); in FNAME()
808 shadow_page_table_clear_flood(vcpu, fault->addr); in FNAME()
821 * Do not change pte_access if the pfn is a mmio page, otherwise in FNAME()
824 if (fault->write && !(walker.pte_access & ACC_WRITE_MASK) && in FNAME()
825 !is_cr0_wp(vcpu->arch.mmu) && !fault->user && fault->slot) { in FNAME()
830 * If we converted a user page to a kernel page, in FNAME()
835 if (is_cr4_smep(vcpu->arch.mmu)) in FNAME()
840 write_lock(&vcpu->kvm->mmu_lock); in FNAME()
851 write_unlock(&vcpu->kvm->mmu_lock); in FNAME()
852 kvm_release_pfn_clean(fault->pfn); in FNAME()
860 WARN_ON_ONCE(sp->role.level != PG_LEVEL_4K); in FNAME()
863 offset = sp->role.quadrant << SPTE_LEVEL_BITS; in FNAME()
865 return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t); in FNAME()
878 /* A 64-bit GVA should be impossible on 32-bit KVM. */ in FNAME()
879 WARN_ON_ONCE((addr >> 32) && mmu == vcpu->arch.walk_mmu); in FNAME()
894 * Using the information in sp->shadowed_translation (kvm_mmu_page_get_gfn()) is
896 * - The spte has a reference to the struct page, so the pfn for a given gfn
915 if (WARN_ON_ONCE(sp->spt[i] == SHADOW_NONPRESENT_VALUE || in FNAME()
916 !sp->shadowed_translation)) in FNAME()
924 return -1; in FNAME()
926 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) in FNAME()
930 pte_access = sp->role.access; in FNAME()
932 FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); in FNAME()
934 if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access)) in FNAME()
940 * only affects EPT with execute-only support with pte_access==0; in FNAME()
941 * all other paging modes will create a read-only SPTE if in FNAME()
946 drop_spte(vcpu->kvm, &sp->spt[i]); in FNAME()
961 sptep = &sp->spt[i]; in FNAME()