/linux/virt/kvm/ |
H A D | pfncache.c | 99 static void *gpc_map(kvm_pfn_t pfn) in gpc_map() 111 static void gpc_unmap(kvm_pfn_t pfn, void *khva) in gpc_unmap() 155 static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc) in hva_to_pfn_retry() 159 kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT; in hva_to_pfn_retry() 261 kvm_pfn_t old_pfn; in __kvm_gpc_refresh() 450 kvm_pfn_t old_pfn; in kvm_gpc_deactivate()
|
H A D | guest_memfd.c | 23 static inline kvm_pfn_t folio_file_pfn(struct folio *folio, pgoff_t index) in folio_file_pfn() 32 kvm_pfn_t pfn = folio_file_pfn(folio, index); in __kvm_gmem_prepare_folio() 369 kvm_pfn_t pfn = page_to_pfn(page); in kvm_gmem_free_folio() 559 pgoff_t index, kvm_pfn_t *pfn, in __kvm_gmem_get_pfn() 596 gfn_t gfn, kvm_pfn_t *pfn, struct page **page, in kvm_gmem_get_pfn() 661 kvm_pfn_t pfn; in kvm_gmem_populate()
|
H A D | kvm_mm.h | 56 kvm_pfn_t hva_to_pfn(struct kvm_follow_pfn *kfp); in kvm_gmem_bind()
|
H A D | kvm_main.c | 2824 static kvm_pfn_t kvm_resolve_pfn(struct kvm_follow_pfn *kfp, struct page *page, in kvm_resolve_pfn() 2827 kvm_pfn_t pfn; in kvm_resolve_pfn() 2848 static bool hva_to_pfn_fast(struct kvm_follow_pfn *kfp, kvm_pfn_t *pfn) in hva_to_pfn_fast() 2880 static int hva_to_pfn_slow(struct kvm_follow_pfn *kfp, kvm_pfn_t *pfn) in hva_to_pfn_slow() 2937 struct kvm_follow_pfn *kfp, kvm_pfn_t *p_pfn) in hva_to_pfn_remapped() 2982 kvm_pfn_t hva_to_pfn(struct kvm_follow_pfn *kfp) in hva_to_pfn() 2985 kvm_pfn_t pfn; in hva_to_pfn() 3027 static kvm_pfn_t kvm_follow_pfn(struct kvm_follow_pfn *kfp) in kvm_follow_pfn() 3046 kvm_pfn_t __kvm_faultin_pfn(const struct kvm_memory_slot *slot, gfn_t gfn, in __kvm_faultin_pfn()
|
/linux/include/linux/ |
H A D | kvm_types.h | 50 typedef hfn_t kvm_pfn_t; typedef 70 kvm_pfn_t pfn;
|
H A D | kvm_host.h | 106 static inline bool is_error_pfn(kvm_pfn_t pfn) in is_error_pfn() 115 static inline bool is_sigpending_pfn(kvm_pfn_t pfn) in is_sigpending_pfn() 125 static inline bool is_error_noslot_pfn(kvm_pfn_t pfn) in is_error_noslot_pfn() 131 static inline bool is_noslot_pfn(kvm_pfn_t pfn) in is_noslot_pfn() 295 kvm_pfn_t pfn; 296 kvm_pfn_t gfn; 1288 kvm_pfn_t __kvm_faultin_pfn(const struct kvm_memory_slot *slot, gfn_t gfn, 1292 static inline kvm_pfn_t kvm_faultin_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, in kvm_faultin_pfn() 1883 static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn) in pfn_to_hpa() 2520 gfn_t gfn, kvm_pfn_t *pfn, struct page **page, [all …]
|
/linux/arch/x86/kvm/svm/ |
H A D | svm.h | 867 int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order); 868 void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end); 869 int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn); 893 static inline int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order) in sev_gmem_prepare() 897 static inline void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end) {} in sev_gmem_invalidate() 898 static inline int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn) in sev_private_max_mapping_level()
|
H A D | sev.c | 2221 static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pfn, in sev_gmem_post_populate() 3623 static int snp_rmptable_psmash(kvm_pfn_t pfn) in snp_rmptable_psmash() 3888 kvm_pfn_t pfn; in sev_snp_init_protected_guest_state() 4699 kvm_pfn_t pfn; in sev_handle_rmp_fault() 4785 static bool is_pfn_range_shared(kvm_pfn_t start, kvm_pfn_t end) in is_pfn_range_shared() 4787 kvm_pfn_t pfn = start; in is_pfn_range_shared() 4820 static bool is_large_rmp_possible(struct kvm *kvm, kvm_pfn_t pfn, int order) in is_large_rmp_possible() 4822 kvm_pfn_t pfn_aligned = ALIGN_DOWN(pfn, PTRS_PER_PMD); in is_large_rmp_possible() 4836 int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order) in sev_gmem_prepare() 4839 kvm_pfn_t pfn_aligned; in sev_gmem_prepare() [all …]
|
/linux/arch/x86/kvm/mmu/ |
H A D | spte.c | 107 static bool __kvm_is_mmio_pfn(kvm_pfn_t pfn) in __kvm_is_mmio_pfn() 128 static bool kvm_is_mmio_pfn(kvm_pfn_t pfn, int *is_host_mmio) in kvm_is_mmio_pfn() 188 unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, in make_spte()
|
H A D | spte.h | 353 static inline kvm_pfn_t spte_to_pfn(u64 pte) in spte_to_pfn() 535 unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
|
H A D | mmu_internal.h | 291 kvm_pfn_t pfn;
|
H A D | tdp_mmu.c | 365 kvm_pfn_t old_pfn = spte_to_pfn(old_spte); in remove_external_spte() 522 kvm_pfn_t new_pfn = spte_to_pfn(new_spte); in set_external_spte_present()
|
H A D | mmu.c | 3028 kvm_pfn_t pfn, struct kvm_page_fault *fault) in mmu_set_spte() 3324 kvm_pfn_t mask; in kvm_mmu_hugepage_adjust() 4523 static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, in kvm_max_private_mapping_level()
|
/linux/arch/powerpc/kvm/ |
H A D | e500_mmu_host.c | 167 kvm_pfn_t pfn; in kvmppc_map_magic() 169 pfn = (kvm_pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT; in kvmppc_map_magic() 250 kvm_pfn_t pfn, unsigned int wimg, in kvmppc_e500_ref_setup() 310 kvm_pfn_t pfn = ref->pfn; in kvmppc_e500_setup_stlbe()
|
H A D | e500.h | 43 kvm_pfn_t pfn; /* valid only for TLB0, except briefly */
|
H A D | trace_pr.h | 32 TP_PROTO(int rflags, ulong hpteg, ulong va, kvm_pfn_t hpaddr,
|
/linux/arch/x86/include/asm/ |
H A D | kvm_host.h | 1830 kvm_pfn_t pfn_for_gfn); 1838 kvm_pfn_t pfn_for_gfn); 1923 int (*gmem_prepare)(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order); 1924 void (*gmem_invalidate)(kvm_pfn_t start, kvm_pfn_t end); 1925 int (*private_max_mapping_level)(struct kvm *kvm, kvm_pfn_t pfn);
|
/linux/arch/x86/kvm/vmx/ |
H A D | x86_ops.h | 156 int tdx_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn);
|
H A D | tdx.c | 1629 enum pg_level level, kvm_pfn_t pfn) in tdx_mem_page_record_premap_cnt() 1642 enum pg_level level, kvm_pfn_t pfn) in tdx_sept_set_private_spte() 1882 enum pg_level level, kvm_pfn_t pfn) in tdx_sept_remove_private_spte() 3151 static int tdx_gmem_post_populate(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, in tdx_gmem_post_populate() 3321 int tdx_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn) in tdx_gmem_private_max_mapping_level()
|
H A D | main.c | 834 static int vt_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn) in vt_gmem_private_max_mapping_level()
|
/linux/arch/arm64/kvm/ |
H A D | mmu.c | 1398 unsigned long hva, kvm_pfn_t *pfnp, in transparent_hugepage_adjust() 1401 kvm_pfn_t pfn = *pfnp; in transparent_hugepage_adjust() 1468 static void sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn, in sanitise_mte_tags() 1529 kvm_pfn_t pfn; in user_mem_abort()
|
/linux/arch/arm64/include/asm/ |
H A D | kvm_pgtable.h | 149 static inline kvm_pfn_t kvm_pte_to_pfn(kvm_pte_t pte) in kvm_pte_to_pfn()
|
/linux/arch/riscv/kvm/ |
H A D | mmu.c | 331 kvm_pfn_t hfn; in kvm_riscv_mmu_map()
|
/linux/arch/loongarch/kvm/ |
H A D | mmu.c | 777 kvm_pfn_t pfn; in kvm_map_page()
|
/linux/arch/powerpc/include/asm/ |
H A D | kvm_ppc.h | 930 static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn) in kvmppc_mmu_flush_icache()
|