| /linux/arch/x86/kvm/mmu/ |
| H A D | mmutrace.h | 13 __field(__u64, gfn) \ 20 __entry->gfn = sp->gfn; \ 37 __entry->gfn, role.level, \ 216 TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte), 217 TP_ARGS(sptep, gfn, spte), 221 __field(gfn_t, gfn) 228 __entry->gfn = gfn; 234 __entry->gfn, __entry->access, __entry->gen) 239 TP_PROTO(u64 addr, gfn_t gfn, unsigned access), 240 TP_ARGS(addr, gfn, access), [all …]
|
| H A D | tdp_mmu.c | 225 gfn_t gfn, union kvm_mmu_page_role role) in tdp_mmu_init_sp() argument 232 sp->gfn = gfn; in tdp_mmu_init_sp() 250 tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role); in tdp_mmu_init_child_sp() 323 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 362 static void remove_external_spte(struct kvm *kvm, gfn_t gfn, u64 old_spte, in remove_external_spte() argument 378 kvm_x86_call(remove_external_spte)(kvm, gfn, level, old_spte); in remove_external_spte() 402 gfn_t base_gfn = sp->gfn; in handle_removed_pt() 411 gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); in handle_removed_pt() local 474 handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn, in handle_removed_pt() 479 remove_external_spte(kvm, gfn, old_spte, level); in handle_removed_pt() [all …]
|
| H A D | page_track.c | 75 static void update_gfn_write_track(struct kvm_memory_slot *slot, gfn_t gfn, in update_gfn_write_track() argument 80 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in update_gfn_write_track() 91 gfn_t gfn) in __kvm_write_track_add_gfn() argument 101 update_gfn_write_track(slot, gfn, 1); in __kvm_write_track_add_gfn() 107 kvm_mmu_gfn_disallow_lpage(slot, gfn); in __kvm_write_track_add_gfn() 109 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K)) in __kvm_write_track_add_gfn() 114 struct kvm_memory_slot *slot, gfn_t gfn) in __kvm_write_track_remove_gfn() argument 124 update_gfn_write_track(slot, gfn, -1); in __kvm_write_track_remove_gfn() 130 kvm_mmu_gfn_allow_lpage(slot, gfn); in __kvm_write_track_remove_gfn() 137 const struct kvm_memory_slot *slot, gfn_t gfn) in kvm_gfn_is_write_tracked() argument [all …]
|
| H A D | mmu_internal.h | 73 gfn_t gfn; member 197 static inline gfn_t gfn_round_for_level(gfn_t gfn, int level) in gfn_round_for_level() argument 199 return gfn & -KVM_PAGES_PER_HPAGE(level); in gfn_round_for_level() 203 gfn_t gfn, bool synchronizing, bool prefetch); 205 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); 206 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); 208 struct kvm_memory_slot *slot, u64 gfn, 212 static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level) in kvm_flush_remote_tlbs_gfn() argument 214 kvm_flush_remote_tlbs_range(kvm, gfn_round_for_level(gfn, level), in kvm_flush_remote_tlbs_gfn() 274 gfn_t gfn; member [all …]
|
| H A D | mmu.c | 287 gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(sptep)); in kvm_flush_remote_tlbs_sptep() local 289 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level); in kvm_flush_remote_tlbs_sptep() 292 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, in mark_mmio_spte() argument 295 u64 spte = make_mmio_spte(vcpu, gfn, access); in mark_mmio_spte() 297 trace_mark_mmio_spte(sptep, gfn, spte); in mark_mmio_spte() 642 return sp->gfn; in kvm_mmu_page_get_gfn() 647 return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS)); in kvm_mmu_page_get_gfn() 677 gfn_t gfn, unsigned int access) in kvm_mmu_page_set_translation() argument 680 sp->shadowed_translation[index] = (gfn << PAGE_SHIFT) | access; in kvm_mmu_page_set_translation() 687 sp->gfn, kvm_mmu_page_get_access(sp, index), access); in kvm_mmu_page_set_translation() [all …]
|
| H A D | paging_tmpl.h | 91 gfn_t gfn; member 322 gfn_t gfn; in FNAME() local 440 gfn = gpte_to_gfn_lvl(pte, walker->level); in FNAME() 441 gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT; in FNAME() 445 gfn += pse36_gfn_delta(pte); in FNAME() 448 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, &walker->fault); in FNAME() 452 walker->gfn = real_gpa >> PAGE_SHIFT; in FNAME() 536 gfn_t gfn; in FNAME() local 541 gfn = gpte_to_gfn(gpte); in FNAME() 545 return kvm_mmu_prefetch_sptes(vcpu, gfn, spte, 1, pte_access); in FNAME() [all …]
|
| H A D | page_track.h | 19 gfn_t gfn); 21 struct kvm_memory_slot *slot, gfn_t gfn); 24 const struct kvm_memory_slot *slot, gfn_t gfn);
|
| /linux/arch/powerpc/kvm/ |
| H A D | book3s_hv_uvmem.c | 289 static void kvmppc_mark_gfn(unsigned long gfn, struct kvm *kvm, in kvmppc_mark_gfn() argument 295 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { in kvmppc_mark_gfn() 296 unsigned long index = gfn - p->base_pfn; in kvmppc_mark_gfn() 308 static void kvmppc_gfn_secure_uvmem_pfn(unsigned long gfn, in kvmppc_gfn_secure_uvmem_pfn() argument 311 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_UVMEM_PFN, uvmem_pfn); in kvmppc_gfn_secure_uvmem_pfn() 315 static void kvmppc_gfn_secure_mem_pfn(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_secure_mem_pfn() argument 317 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_MEM_PFN, 0); in kvmppc_gfn_secure_mem_pfn() 321 static void kvmppc_gfn_shared(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_shared() argument 323 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_SHARED, 0); in kvmppc_gfn_shared() 327 static void kvmppc_gfn_remove(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_remove() argument [all …]
|
| H A D | e500_mmu_host.c | 324 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, in kvmppc_e500_shadow_map() argument 355 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); in kvmppc_e500_shadow_map() 356 hva = gfn_to_hva_memslot(slot, gfn); in kvmppc_e500_shadow_map() 358 pfn = __kvm_faultin_pfn(slot, gfn, FOLL_WRITE, &writable, &page); in kvmppc_e500_shadow_map() 362 __func__, (long)gfn); in kvmppc_e500_shadow_map() 391 __func__, (long)gfn, pfn); in kvmppc_e500_shadow_map() 407 slot_start = pfn - (gfn - slot->base_gfn); in kvmppc_e500_shadow_map() 441 gfn_start = gfn & ~(tsize_pages - 1); in kvmppc_e500_shadow_map() 444 if (gfn_start + pfn - gfn < start) in kvmppc_e500_shadow_map() 446 if (gfn_end + pfn - gfn > end) in kvmppc_e500_shadow_map() [all …]
|
| H A D | book3s_hv_rm_mmu.c | 97 unsigned long gfn, unsigned long psize) in kvmppc_update_dirty_map() argument 104 gfn -= memslot->base_gfn; in kvmppc_update_dirty_map() 105 set_dirty_bits_atomic(memslot->dirty_bitmap, gfn, npages); in kvmppc_update_dirty_map() 113 unsigned long gfn; in kvmppc_set_dirty_from_hpte() local 117 gfn = hpte_rpn(hpte_gr, psize); in kvmppc_set_dirty_from_hpte() 118 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); in kvmppc_set_dirty_from_hpte() 120 kvmppc_update_dirty_map(memslot, gfn, psize); in kvmppc_set_dirty_from_hpte() 131 unsigned long gfn; in revmap_for_hpte() local 133 gfn = hpte_rpn(hpte_gr, kvmppc_actual_pgsz(hpte_v, hpte_gr)); in revmap_for_hpte() 134 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); in revmap_for_hpte() [all …]
|
| /linux/include/linux/ |
| H A D | kvm_host.h | 298 kvm_pfn_t gfn; member 1212 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); 1214 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); 1252 int kvm_prefetch_pages(struct kvm_memory_slot *slot, gfn_t gfn, 1255 struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn, bool write); 1256 static inline struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) in gfn_to_page() argument 1258 return __gfn_to_page(kvm, gfn, true); in gfn_to_page() 1261 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 1262 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); 1263 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); [all …]
|
| /linux/drivers/gpu/drm/i915/gvt/ |
| H A D | kvmgt.c | 93 gfn_t gfn; member 101 gfn_t gfn; member 112 static void kvmgt_page_track_remove_region(gfn_t gfn, unsigned long nr_pages, 129 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_unpin_guest_page() argument 132 vfio_unpin_pages(&vgpu->vfio_device, gfn << PAGE_SHIFT, in gvt_unpin_guest_page() 137 static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_pin_guest_page() argument 150 dma_addr_t cur_iova = (gfn + npage) << PAGE_SHIFT; in gvt_pin_guest_page() 174 gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE); in gvt_pin_guest_page() 178 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_dma_map_page() argument 185 ret = gvt_pin_guest_page(vgpu, gfn, size, &page); in gvt_dma_map_page() [all …]
|
| H A D | page_track.h | 45 struct intel_vgpu *vgpu, unsigned long gfn); 48 unsigned long gfn, gvt_page_track_handler_t handler, 51 unsigned long gfn); 53 int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn); 54 int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
|
| /linux/arch/arm64/kvm/hyp/include/nvhe/ |
| H A D | mem_protect.h | 42 int __pkvm_host_share_guest(u64 pfn, u64 gfn, u64 nr_pages, struct pkvm_hyp_vcpu *vcpu, 44 int __pkvm_host_unshare_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *hyp_vm); 45 int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_pgtable_prot prot); 46 int __pkvm_host_wrprotect_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *hyp_vm); 47 int __pkvm_host_test_clear_young_guest(u64 gfn, u64 nr_pages, bool mkold, struct pkvm_hyp_vm *vm); 48 int __pkvm_host_mkyoung_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu);
|
| /linux/include/xen/ |
| H A D | xen-ops.h | 65 xen_pfn_t *gfn, int nr, 78 xen_pfn_t *gfn, int nr, in xen_xlate_remap_gfn_array() argument 115 xen_pfn_t *gfn, int nr, in xen_remap_domain_gfn_array() argument 121 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, in xen_remap_domain_gfn_array() 129 return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid, in xen_remap_domain_gfn_array() 175 xen_pfn_t gfn, int nr, in xen_remap_domain_gfn_range() argument 182 return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false); in xen_remap_domain_gfn_range()
|
| /linux/arch/loongarch/kvm/ |
| H A D | mmu.c | 68 offset = (addr >> PAGE_SHIFT) - ctx->gfn; in kvm_mkclean_pte() 373 ctx.gfn = base_gfn; in kvm_arch_mmu_enable_log_dirty_pt_masked() 556 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_map_page_fast() local 582 slot = gfn_to_memslot(kvm, gfn); in kvm_map_page_fast() 600 mark_page_dirty(kvm, gfn); in kvm_map_page_fast() 666 static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, in host_pfn_mapping_level() argument 685 hva = __gfn_to_hva_memslot(slot, gfn); in host_pfn_mapping_level() 728 static kvm_pte_t *kvm_split_huge(struct kvm_vcpu *vcpu, kvm_pte_t *ptep, gfn_t gfn) in kvm_split_huge() argument 750 return child + (gfn & (PTRS_PER_PTE - 1)); in kvm_split_huge() 779 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_map_page() local [all …]
|
| /linux/drivers/xen/ |
| H A D | xlate_mmu.c | 45 typedef void (*xen_gfn_fn_t)(unsigned long gfn, void *data); 84 static void setup_hparams(unsigned long gfn, void *data) in setup_hparams() argument 89 info->h_gpfns[info->h_iter] = gfn; in setup_hparams() 145 xen_pfn_t *gfn, int nr, in xen_xlate_remap_gfn_array() argument 158 data.fgfn = gfn; in xen_xlate_remap_gfn_array() 174 static void unmap_gfn(unsigned long gfn, void *data) in unmap_gfn() argument 179 xrp.gpfn = gfn; in unmap_gfn() 197 static void setup_balloon_gfn(unsigned long gfn, void *data) in setup_balloon_gfn() argument 201 info->pfns[info->idx++] = gfn; in setup_balloon_gfn()
|
| /linux/virt/kvm/ |
| H A D | kvm_main.c | 315 void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages) in kvm_flush_remote_tlbs_range() argument 317 if (!kvm_arch_flush_remote_tlbs_range(kvm, gfn, nr_pages)) in kvm_flush_remote_tlbs_range() 616 * {gfn(page) | page intersects with [hva_start, hva_end)} = in kvm_handle_hva_range() 1573 * If the memslot gfn is unchanged, rb_replace_node() can be used to in kvm_replace_memslot() 1574 * switch the node in the gfn tree instead of removing the old and in kvm_replace_memslot() 1843 * memslot will be created. Validation of sp->gfn happens in: in kvm_invalidate_memslot() 2559 /* Set @attributes for the gfn range [@start, @end). */ in kvm_vm_set_mem_attributes() 2651 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) in kvm_vcpu_gfn_to_memslot() 2653 return __gfn_to_memslot(kvm_memslots(kvm), gfn); in kvm_vcpu_gfn_to_memslot() 2657 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_gfn_to_memslot() 2636 gfn_to_memslot(struct kvm * kvm,gfn_t gfn) gfn_to_memslot() argument 2642 kvm_vcpu_gfn_to_memslot(struct kvm_vcpu * vcpu,gfn_t gfn) kvm_vcpu_gfn_to_memslot() argument 2676 kvm_is_visible_gfn(struct kvm * kvm,gfn_t gfn) kvm_is_visible_gfn() argument 2684 kvm_vcpu_is_visible_gfn(struct kvm_vcpu * vcpu,gfn_t gfn) kvm_vcpu_is_visible_gfn() argument 2692 kvm_host_page_size(struct kvm_vcpu * vcpu,gfn_t gfn) kvm_host_page_size() argument 2721 __gfn_to_hva_many(const struct kvm_memory_slot * slot,gfn_t gfn,gfn_t * nr_pages,bool write) __gfn_to_hva_many() argument 2736 gfn_to_hva_many(struct kvm_memory_slot * slot,gfn_t gfn,gfn_t * nr_pages) gfn_to_hva_many() argument 2743 gfn_to_hva_memslot(struct kvm_memory_slot * slot,gfn_t gfn) gfn_to_hva_memslot() argument 2749 gfn_to_hva(struct kvm * kvm,gfn_t gfn) gfn_to_hva() argument 2755 kvm_vcpu_gfn_to_hva(struct kvm_vcpu * vcpu,gfn_t gfn) kvm_vcpu_gfn_to_hva() argument 2770 gfn_to_hva_memslot_prot(struct kvm_memory_slot * slot,gfn_t gfn,bool * writable) gfn_to_hva_memslot_prot() argument 2780 gfn_to_hva_prot(struct kvm * kvm,gfn_t gfn,bool * writable) gfn_to_hva_prot() argument 2787 kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu * vcpu,gfn_t gfn,bool * writable) kvm_vcpu_gfn_to_hva_prot() argument 3057 __kvm_faultin_pfn(const struct kvm_memory_slot * slot,gfn_t gfn,unsigned int foll,bool * writable,struct page ** refcounted_page) __kvm_faultin_pfn() argument 3079 kvm_prefetch_pages(struct kvm_memory_slot * slot,gfn_t gfn,struct page ** pages,int nr_pages) kvm_prefetch_pages() argument 3103 __gfn_to_page(struct kvm * kvm,gfn_t gfn,bool write) __gfn_to_page() argument 3118 __kvm_vcpu_map(struct kvm_vcpu * vcpu,gfn_t gfn,struct kvm_host_map * map,bool writable) __kvm_vcpu_map() argument 3189 __kvm_read_guest_page(struct kvm_memory_slot * slot,gfn_t gfn,void * data,int offset,int len) __kvm_read_guest_page() argument 3207 kvm_read_guest_page(struct kvm * kvm,gfn_t gfn,void * data,int offset,int len) kvm_read_guest_page() argument 3216 kvm_vcpu_read_guest_page(struct kvm_vcpu * vcpu,gfn_t gfn,void * data,int offset,int len) kvm_vcpu_read_guest_page() argument 3227 gfn_t gfn = gpa >> PAGE_SHIFT; kvm_read_guest() local 3247 gfn_t gfn = gpa >> PAGE_SHIFT; kvm_vcpu_read_guest() local 3265 __kvm_read_guest_atomic(struct kvm_memory_slot * slot,gfn_t gfn,void * data,int offset,unsigned long len) __kvm_read_guest_atomic() argument 3288 gfn_t gfn = gpa >> PAGE_SHIFT; kvm_vcpu_read_guest_atomic() local 3298 __kvm_write_guest_page(struct kvm * kvm,struct kvm_memory_slot * memslot,gfn_t gfn,const void * data,int offset,int len) __kvm_write_guest_page() argument 3317 kvm_write_guest_page(struct kvm * kvm,gfn_t gfn,const void * data,int offset,int len) kvm_write_guest_page() argument 3326 kvm_vcpu_write_guest_page(struct kvm_vcpu * vcpu,gfn_t gfn,const void * data,int offset,int len) kvm_vcpu_write_guest_page() argument 3338 gfn_t gfn = gpa >> PAGE_SHIFT; kvm_write_guest() local 3359 gfn_t gfn = gpa >> PAGE_SHIFT; kvm_vcpu_write_guest() local 3504 gfn_t gfn = gpa >> PAGE_SHIFT; kvm_clear_guest() local 3523 mark_page_dirty_in_slot(struct kvm * kvm,const struct kvm_memory_slot * memslot,gfn_t gfn) mark_page_dirty_in_slot() argument 3546 mark_page_dirty(struct kvm * kvm,gfn_t gfn) mark_page_dirty() argument 3555 kvm_vcpu_mark_page_dirty(struct kvm_vcpu * vcpu,gfn_t gfn) kvm_vcpu_mark_page_dirty() argument [all...] |
| H A D | dirty_ring.c | 90 static inline void kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn *gfn) in kvm_dirty_gfn_set_invalid() argument 92 smp_store_release(&gfn->flags, 0); in kvm_dirty_gfn_set_invalid() 95 static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn) in kvm_dirty_gfn_set_dirtied() argument 97 gfn->flags = KVM_DIRTY_GFN_F_DIRTY; in kvm_dirty_gfn_set_dirtied() 100 static inline bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn) in kvm_dirty_gfn_harvested() argument 102 return smp_load_acquire(&gfn->flags) & KVM_DIRTY_GFN_F_RESET; in kvm_dirty_gfn_harvested()
|
| H A D | guest_memfd.c | 57 static pgoff_t kvm_gmem_get_index(struct kvm_memory_slot *slot, gfn_t gfn) in kvm_gmem_get_index() argument 59 return gfn - slot->base_gfn + slot->gmem.pgoff; in kvm_gmem_get_index() 67 gfn_t gfn = slot->base_gfn + index - slot->gmem.pgoff; in __kvm_gmem_prepare_folio() local 68 int rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, folio_order(folio)); in __kvm_gmem_prepare_folio() 71 index, gfn, pfn, rc); in __kvm_gmem_prepare_folio() 91 gfn_t gfn, struct folio *folio) in kvm_gmem_prepare_folio() argument 115 index = kvm_gmem_get_index(slot, gfn); in kvm_gmem_prepare_folio() 795 gfn_t gfn, kvm_pfn_t *pfn, struct page **page, in kvm_gmem_get_pfn() argument 798 pgoff_t index = kvm_gmem_get_index(slot, gfn); in kvm_gmem_get_pfn() 812 r = kvm_gmem_prepare_folio(kvm, slot, gfn, folio); in kvm_gmem_get_pfn() [all …]
|
| /linux/arch/arm64/kvm/hyp/nvhe/ |
| H A D | mem_protect.c | 961 int __pkvm_host_share_guest(u64 pfn, u64 gfn, u64 nr_pages, struct pkvm_hyp_vcpu *vcpu, in __pkvm_host_share_guest() argument 966 u64 ipa = hyp_pfn_to_phys(gfn); in __pkvm_host_share_guest() 1068 int __pkvm_host_unshare_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *vm) in __pkvm_host_unshare_guest() argument 1070 u64 ipa = hyp_pfn_to_phys(gfn); in __pkvm_host_unshare_guest() 1122 int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_pgtable_prot prot) in __pkvm_host_relax_perms_guest() argument 1125 u64 ipa = hyp_pfn_to_phys(gfn); in __pkvm_host_relax_perms_guest() 1142 int __pkvm_host_wrprotect_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *vm) in __pkvm_host_wrprotect_guest() argument 1144 u64 size, ipa = hyp_pfn_to_phys(gfn); in __pkvm_host_wrprotect_guest() 1162 int __pkvm_host_test_clear_young_guest(u64 gfn, u64 nr_pages, bool mkold, struct pkvm_hyp_vm *vm) in __pkvm_host_test_clear_young_guest() argument 1164 u64 size, ipa = hyp_pfn_to_phys(gfn); in __pkvm_host_test_clear_young_guest() [all …]
|
| H A D | hyp-main.c | 251 DECLARE_REG(u64, gfn, host_ctxt, 2); in handle___pkvm_host_share_guest() 268 ret = __pkvm_host_share_guest(pfn, gfn, nr_pages, hyp_vcpu, prot); in handle___pkvm_host_share_guest() 276 DECLARE_REG(u64, gfn, host_ctxt, 2); in handle___pkvm_host_unshare_guest() 288 ret = __pkvm_host_unshare_guest(gfn, nr_pages, hyp_vm); in handle___pkvm_host_unshare_guest() 296 DECLARE_REG(u64, gfn, host_ctxt, 1); in handle___pkvm_host_relax_perms_guest() 308 ret = __pkvm_host_relax_perms_guest(gfn, hyp_vcpu, prot); in handle___pkvm_host_relax_perms_guest() 316 DECLARE_REG(u64, gfn, host_ctxt, 2); in handle___pkvm_host_wrprotect_guest() 328 ret = __pkvm_host_wrprotect_guest(gfn, nr_pages, hyp_vm); in handle___pkvm_host_wrprotect_guest() 337 DECLARE_REG(u64, gfn, host_ctxt, 2); in handle___pkvm_host_test_clear_young_guest() 350 ret = __pkvm_host_test_clear_young_guest(gfn, nr_pages, mkold, hyp_vm); in handle___pkvm_host_test_clear_young_guest() [all …]
|
| /linux/arch/x86/include/asm/ |
| H A D | kvm_page_track.h | 43 void (*track_remove_region)(gfn_t gfn, unsigned long nr_pages, 52 int kvm_write_track_add_gfn(struct kvm *kvm, gfn_t gfn); 53 int kvm_write_track_remove_gfn(struct kvm *kvm, gfn_t gfn);
|
| /linux/arch/arm64/kvm/ |
| H A D | pkvm.c | 290 return m->gfn * PAGE_SIZE; in __pkvm_mapping_start() 295 return (m->gfn + m->nr_pages) * PAGE_SIZE - 1; in __pkvm_mapping_end() 336 ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn, in __pkvm_pgtable_stage2_unmap() 366 u64 gfn = addr >> PAGE_SHIFT; in pkvm_pgtable_stage2_map() local 392 ret = kvm_call_hyp_nvhe(__pkvm_host_share_guest, pfn, gfn, size / PAGE_SIZE, prot); in pkvm_pgtable_stage2_map() 397 mapping->gfn = gfn; in pkvm_pgtable_stage2_map() 421 ret = kvm_call_hyp_nvhe(__pkvm_host_wrprotect_guest, handle, mapping->gfn, in pkvm_pgtable_stage2_wrprotect() 452 young |= kvm_call_hyp_nvhe(__pkvm_host_test_clear_young_guest, handle, mapping->gfn, in pkvm_pgtable_stage2_test_clear_young()
|
| /linux/arch/x86/kvm/ |
| H A D | mmu.h | 258 int kvm_tdp_mmu_map_private_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn); 265 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) in gfn_to_index() argument 268 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - in gfn_to_index() 321 static inline bool kvm_is_gfn_alias(struct kvm *kvm, gfn_t gfn) in kvm_is_gfn_alias() argument 323 return gfn & kvm_gfn_direct_bits(kvm); in kvm_is_gfn_alias()
|