/linux/arch/x86/kvm/mmu/ |
H A D | mmutrace.h | 212 TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte), 213 TP_ARGS(sptep, gfn, spte), 216 __field(void *, sptep) 223 __entry->sptep = sptep; 229 TP_printk("sptep:%p gfn %llx access %x gen %x", __entry->sptep, 257 u64 *sptep, u64 old_spte, int ret), 258 TP_ARGS(vcpu, fault, sptep, old_spte, ret), 264 __field(u64 *, sptep) 274 __entry->sptep = sptep; 276 __entry->new_spte = *sptep; [all …]
|
H A D | mmu.c | 158 u64 *sptep; member 177 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \ 184 static void mmu_spte_set(u64 *sptep, u64 spte); 284 static void kvm_flush_remote_tlbs_sptep(struct kvm *kvm, u64 *sptep) in kvm_flush_remote_tlbs_sptep() argument 286 struct kvm_mmu_page *sp = sptep_to_sp(sptep); in kvm_flush_remote_tlbs_sptep() 287 gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(sptep)); in kvm_flush_remote_tlbs_sptep() 292 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, in mark_mmio_spte() argument 297 trace_mark_mmio_spte(sptep, gfn, spte); in mark_mmio_spte() 298 mmu_spte_set(sptep, spte); in mark_mmio_spte() 337 static void __set_spte(u64 *sptep, u64 spte) in __set_spte() argument [all …]
|
H A D | tdp_mmu.c | 197 static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep, in tdp_mmu_init_sp() argument 206 sp->ptep = sptep; in tdp_mmu_init_sp() 218 parent_sp = sptep_to_sp(rcu_dereference(iter->sptep)); in tdp_mmu_init_child_sp() 223 tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role); in tdp_mmu_init_child_sp() 354 tdp_ptep_t sptep = pt + i; in handle_removed_pt() local 368 old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, FROZEN_SPTE); in handle_removed_pt() 383 old_spte = kvm_tdp_mmu_read_spte(sptep); in handle_removed_pt() 415 old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, in handle_removed_pt() 536 u64 *sptep = rcu_dereference(iter->sptep); in __tdp_mmu_set_spte_atomic() local 553 if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte)) in __tdp_mmu_set_spte_atomic() [all …]
|
H A D | paging_tmpl.h | 585 u64 *sptep) in FNAME() 592 sp = sptep_to_sp(sptep); in FNAME() 605 return __direct_pte_prefetch(vcpu, sp, sptep); in FNAME() 607 i = spte_index(sptep) & ~(PTE_PREFETCH_NUM - 1); in FNAME() 611 if (spte == sptep) in FNAME() 668 clear_sp_write_flooding_count(it.sptep); in FNAME() 674 sp = kvm_mmu_get_child_sp(vcpu, it.sptep, table_gfn, in FNAME() 707 link_shadow_page(vcpu, it.sptep, sp); in FNAME() 729 disallowed_hugepage_adjust(fault, *it.sptep, it.level); in FNAME() 735 validate_direct_spte(vcpu, it.sptep, direct_access); in FNAME() [all …]
|
H A D | tdp_iter.c | 14 iter->sptep = iter->pt_path[iter->level - 1] + in tdp_iter_refresh_sptep() 16 iter->old_spte = kvm_tdp_mmu_read_spte(iter->sptep); in tdp_iter_refresh_sptep() 89 iter->old_spte = kvm_tdp_mmu_read_spte(iter->sptep); in try_step_down() 122 iter->sptep++; in try_step_side() 123 iter->old_spte = kvm_tdp_mmu_read_spte(iter->sptep); in try_step_side()
|
H A D | spte.h | 226 static inline int spte_index(u64 *sptep) in spte_index() argument 228 return ((unsigned long)sptep / sizeof(*sptep)) & (SPTE_ENT_PER_PAGE - 1); in spte_index() 253 static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep) in sptep_to_sp() argument 255 return to_shadow_page(__pa(sptep)); in sptep_to_sp()
|
/linux/arch/s390/mm/ |
H A D | pgtable.c | 679 pte_t *sptep, pte_t *tptep, pte_t pte) in ptep_shadow_pte() argument 687 spgste = pgste_get_lock(sptep); in ptep_shadow_pte() 688 spte = *sptep; in ptep_shadow_pte() 701 pgste_set_unlock(sptep, spgste); in ptep_shadow_pte()
|
H A D | gmap.c | 2143 pte_t *sptep, *tptep; in gmap_shadow_page() local 2167 sptep = gmap_pte_op_walk(parent, paddr, &ptl); in gmap_shadow_page() 2168 if (sptep) { in gmap_shadow_page() 2174 gmap_pte_op_end(sptep, ptl); in gmap_shadow_page() 2178 rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte); in gmap_shadow_page() 2185 gmap_pte_op_end(sptep, ptl); in gmap_shadow_page()
|
/linux/arch/s390/include/asm/ |
H A D | pgtable.h | 1340 pte_t *sptep, pte_t *tptep, pte_t pte);
|