Searched refs:spte (Results 1 – 7 of 7) sorted by relevance
/linux/arch/x86/kvm/mmu/ |
H A D | spte.h | 229 static inline bool is_frozen_spte(u64 spte) in is_frozen_spte() argument 231 return spte == FROZEN_SPTE; in is_frozen_spte() 257 static inline struct kvm_mmu_page *spte_to_child_sp(u64 spte) in spte_to_child_sp() argument 259 return to_shadow_page(spte & SPTE_BASE_ADDR_MASK); in spte_to_child_sp() 284 static inline bool is_mmio_spte(struct kvm *kvm, u64 spte) in is_mmio_spte() argument 286 return (spte & shadow_mmio_mask) == kvm->arch.shadow_mmio_value && in is_mmio_spte() 295 static inline bool is_ept_ve_possible(u64 spte) in is_ept_ve_possible() argument 298 !(spte & VMX_EPT_SUPPRESS_VE_BIT) && in is_ept_ve_possible() 299 (spte & VMX_EPT_RWX_MASK) != VMX_EPT_MISCONFIG_WX_VALUE; in is_ept_ve_possible() 307 static inline bool spte_ad_enabled(u64 spte) in spte_ad_enabled() argument [all …]
|
H A D | mmutrace.h | 213 TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte), 214 TP_ARGS(sptep, gfn, spte), 226 __entry->access = spte & ACC_ALL; 227 __entry->gen = get_mmio_spte_generation(spte); 313 TP_PROTO(u64 spte, unsigned int kvm_gen, unsigned int spte_gen), 314 TP_ARGS(spte, kvm_gen, spte_gen), 319 __field(u64, spte) 325 __entry->spte = spte; 328 TP_printk("spte %llx kvm_gen %x spte-gen %x valid %d", __entry->spte, 341 __field(u64, spte) [all …]
|
H A D | mmu.c | 174 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \ argument 177 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \ 178 __shadow_walk_next(&(_walker), spte)) 183 static void mmu_spte_set(u64 *sptep, u64 spte); 294 u64 spte = make_mmio_spte(vcpu, gfn, access); in mark_mmio_spte() local 296 trace_mark_mmio_spte(sptep, gfn, spte); in mark_mmio_spte() 297 mmu_spte_set(sptep, spte); in mark_mmio_spte() 300 static gfn_t get_mmio_spte_gfn(u64 spte) in get_mmio_spte_gfn() argument 302 u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask; in get_mmio_spte_gfn() 304 gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN) in get_mmio_spte_gfn() [all …]
|
H A D | tdp_mmu.c | 1201 u64 spte = make_nonleaf_spte(sp->spt, !kvm_ad_enabled); in tdp_mmu_link_sp() local 1205 ret = tdp_mmu_set_spte_atomic(kvm, iter, spte); in tdp_mmu_link_sp() 1209 tdp_mmu_iter_set_spte(kvm, iter, spte); in tdp_mmu_link_sp() 1927 u64 *spte) in kvm_tdp_mmu_fast_pf_get_last_sptep() argument 1935 *spte = iter.old_spte; in kvm_tdp_mmu_fast_pf_get_last_sptep()
|
/linux/Documentation/virt/kvm/x86/ |
H A D | mmu.rst | 55 spte shadow pte (referring to pfns) 125 A nonleaf spte allows the hardware mmu to reach the leaf pages and 128 A leaf spte corresponds to either one or two translations encoded into 251 parent_ptes bit 0 is zero, only one spte points at this page and 252 parent_ptes points at this single spte, otherwise, there exists multiple 272 Only present on 32-bit hosts, where a 64-bit spte cannot be written 341 - check for valid generation number in the spte (see "Fast invalidation of 360 - walk the shadow page table to find the spte for the translation, 363 - If this is an mmio request, cache the mmio info to the spte and set some 364 reserved bit on the spte (see callers of kvm_mmu_set_mmio_spte_mask) [all …]
|
/linux/mm/ |
H A D | hugetlb.c | 7238 pte_t *spte = NULL; in huge_pmd_share() local 7248 spte = hugetlb_walk(svma, saddr, in huge_pmd_share() 7250 if (spte) { in huge_pmd_share() 7251 ptdesc_pmd_pts_inc(virt_to_ptdesc(spte)); in huge_pmd_share() 7257 if (!spte) in huge_pmd_share() 7263 (pmd_t *)((unsigned long)spte & PAGE_MASK)); in huge_pmd_share() 7266 ptdesc_pmd_pts_dec(virt_to_ptdesc(spte)); in huge_pmd_share()
|
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
H A D | vmm.c | 448 bool spte = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES; in nvkm_vmm_ref_hwpt() local 451 if (spte != next) in nvkm_vmm_ref_hwpt() 455 if (!spte) { in nvkm_vmm_ref_hwpt()
|