/linux/arch/x86/kvm/mmu/ |
H A D | mmutrace.h | 13 __field(__u64, gfn) \ 20 __entry->gfn = sp->gfn; \ 37 __entry->gfn, role.level, \ 213 TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte), 214 TP_ARGS(sptep, gfn, spte), 218 __field(gfn_t, gfn) 225 __entry->gfn = gfn; 231 __entry->gfn, __entry->access, __entry->gen) 236 TP_PROTO(u64 addr, gfn_t gfn, unsigned access), 237 TP_ARGS(addr, gfn, access), [all …]
|
H A D | tdp_mmu.c | 210 gfn_t gfn, union kvm_mmu_page_role role) in tdp_mmu_init_sp() argument 217 sp->gfn = gfn; in tdp_mmu_init_sp() 235 tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role); in tdp_mmu_init_child_sp() 308 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 343 static void remove_external_spte(struct kvm *kvm, gfn_t gfn, u64 old_spte, in remove_external_spte() argument 362 ret = static_call(kvm_x86_remove_external_spte)(kvm, gfn, level, old_pfn); in remove_external_spte() 387 gfn_t base_gfn = sp->gfn; in handle_removed_pt() 396 gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); in handle_removed_pt() local 459 handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn, in handle_removed_pt() 464 remove_external_spte(kvm, gfn, old_spte, level); in handle_removed_pt() [all …]
|
H A D | page_track.c | 75 static void update_gfn_write_track(struct kvm_memory_slot *slot, gfn_t gfn, in update_gfn_write_track() argument 80 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in update_gfn_write_track() 91 gfn_t gfn) in __kvm_write_track_add_gfn() argument 101 update_gfn_write_track(slot, gfn, 1); in __kvm_write_track_add_gfn() 107 kvm_mmu_gfn_disallow_lpage(slot, gfn); in __kvm_write_track_add_gfn() 109 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K)) in __kvm_write_track_add_gfn() 114 struct kvm_memory_slot *slot, gfn_t gfn) in __kvm_write_track_remove_gfn() argument 124 update_gfn_write_track(slot, gfn, -1); in __kvm_write_track_remove_gfn() 130 kvm_mmu_gfn_allow_lpage(slot, gfn); in __kvm_write_track_remove_gfn() 137 const struct kvm_memory_slot *slot, gfn_t gfn) in kvm_gfn_is_write_tracked() argument [all …]
|
H A D | mmu.c | 286 gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(sptep)); in kvm_flush_remote_tlbs_sptep() local 288 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level); in kvm_flush_remote_tlbs_sptep() 291 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, in mark_mmio_spte() argument 294 u64 spte = make_mmio_spte(vcpu, gfn, access); in mark_mmio_spte() 296 trace_mark_mmio_spte(sptep, gfn, spte); in mark_mmio_spte() 641 return sp->gfn; in kvm_mmu_page_get_gfn() 646 return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS)); in kvm_mmu_page_get_gfn() 676 gfn_t gfn, unsigned int access) in kvm_mmu_page_set_translation() argument 679 sp->shadowed_translation[index] = (gfn << PAGE_SHIFT) | access; in kvm_mmu_page_set_translation() 686 sp->gfn, kvm_mmu_page_get_access(sp, index), access); in kvm_mmu_page_set_translation() [all …]
|
H A D | page_track.h | 19 gfn_t gfn); 21 struct kvm_memory_slot *slot, gfn_t gfn); 24 const struct kvm_memory_slot *slot, gfn_t gfn);
|
/linux/include/linux/ |
H A D | kvm_host.h | 294 kvm_pfn_t gfn; member 1194 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); 1196 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); 1234 int kvm_prefetch_pages(struct kvm_memory_slot *slot, gfn_t gfn, 1237 struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn, bool write); 1238 static inline struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) in gfn_to_page() argument 1240 return __gfn_to_page(kvm, gfn, true); in gfn_to_page() 1243 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 1244 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); 1245 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); [all …]
|
/linux/drivers/gpu/drm/i915/gvt/ |
H A D | kvmgt.c | 92 gfn_t gfn; member 100 gfn_t gfn; member 111 static void kvmgt_page_track_remove_region(gfn_t gfn, unsigned long nr_pages, 128 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_unpin_guest_page() argument 131 vfio_unpin_pages(&vgpu->vfio_device, gfn << PAGE_SHIFT, in gvt_unpin_guest_page() 136 static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_pin_guest_page() argument 149 dma_addr_t cur_iova = (gfn + npage) << PAGE_SHIFT; in gvt_pin_guest_page() 173 gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE); in gvt_pin_guest_page() 177 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_dma_map_page() argument 184 ret = gvt_pin_guest_page(vgpu, gfn, size, &page); in gvt_dma_map_page() [all …]
|
H A D | page_track.h | 45 struct intel_vgpu *vgpu, unsigned long gfn); 48 unsigned long gfn, gvt_page_track_handler_t handler, 51 unsigned long gfn); 53 int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn); 54 int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
|
/linux/include/xen/ |
H A D | xen-ops.h | 66 xen_pfn_t *gfn, int nr, 79 xen_pfn_t *gfn, int nr, in xen_xlate_remap_gfn_array() argument 116 xen_pfn_t *gfn, int nr, in xen_remap_domain_gfn_array() argument 122 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, in xen_remap_domain_gfn_array() 130 return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid, in xen_remap_domain_gfn_array() 176 xen_pfn_t gfn, int nr, in xen_remap_domain_gfn_range() argument 183 return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false); in xen_remap_domain_gfn_range()
|
/linux/drivers/xen/ |
H A D | xlate_mmu.c | 45 typedef void (*xen_gfn_fn_t)(unsigned long gfn, void *data); 84 static void setup_hparams(unsigned long gfn, void *data) in setup_hparams() argument 89 info->h_gpfns[info->h_iter] = gfn; in setup_hparams() 145 xen_pfn_t *gfn, int nr, in xen_xlate_remap_gfn_array() argument 158 data.fgfn = gfn; in xen_xlate_remap_gfn_array() 174 static void unmap_gfn(unsigned long gfn, void *data) in unmap_gfn() argument 179 xrp.gpfn = gfn; in unmap_gfn() 197 static void setup_balloon_gfn(unsigned long gfn, void *data) in setup_balloon_gfn() argument 201 info->pfns[info->idx++] = gfn; in setup_balloon_gfn()
|
/linux/virt/kvm/ |
H A D | guest_memfd.c | 33 gfn_t gfn = slot->base_gfn + index - slot->gmem.pgoff; in __kvm_gmem_prepare_folio() local 34 int rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, folio_order(folio)); in __kvm_gmem_prepare_folio() 37 index, gfn, pfn, rc); in __kvm_gmem_prepare_folio() 51 * Process @folio, which contains @gfn, so that the guest can use it. 52 * The folio must be locked and the gfn must be contained in @slot. 57 gfn_t gfn, struct folio *folio) in kvm_gmem_prepare_folio() argument 81 index = gfn - slot->base_gfn + slot->gmem.pgoff; in kvm_gmem_prepare_folio() 310 static pgoff_t kvm_gmem_get_index(struct kvm_memory_slot *slot, gfn_t gfn) 312 return gfn - slot->base_gfn + slot->gmem.pgoff; in kvm_gmem_init() 607 gfn_t gfn, kvm_pfn_ in kvm_gmem_get_pfn() 556 __kvm_gmem_get_pfn(struct file * file,struct kvm_memory_slot * slot,gfn_t gfn,kvm_pfn_t * pfn,bool * is_prepared,int * max_order) __kvm_gmem_get_pfn() argument 593 kvm_gmem_get_pfn(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn,kvm_pfn_t * pfn,int * max_order) kvm_gmem_get_pfn() argument 650 gfn_t gfn = start_gfn + i; kvm_gmem_populate() local [all...] |
H A D | kvm_main.c | 316 void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages) in kvm_flush_remote_tlbs_range() argument 318 if (!kvm_arch_flush_remote_tlbs_range(kvm, gfn, nr_pages)) in kvm_flush_remote_tlbs_range() 2548 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) in gfn_to_memslot() argument 2550 return __gfn_to_memslot(kvm_memslots(kvm), gfn); in gfn_to_memslot() 2554 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_gfn_to_memslot() argument 2569 slot = try_get_memslot(vcpu->last_used_slot, gfn); in kvm_vcpu_gfn_to_memslot() 2578 slot = search_memslots(slots, gfn, false); in kvm_vcpu_gfn_to_memslot() 2587 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) in kvm_is_visible_gfn() argument 2589 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); in kvm_is_visible_gfn() 2595 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_is_visible_gfn() argument [all …]
|
H A D | dirty_ring.c | 92 static inline void kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn *gfn) in kvm_dirty_gfn_set_invalid() argument 94 smp_store_release(&gfn->flags, 0); in kvm_dirty_gfn_set_invalid() 97 static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn) in kvm_dirty_gfn_set_dirtied() argument 99 gfn->flags = KVM_DIRTY_GFN_F_DIRTY; in kvm_dirty_gfn_set_dirtied() 102 static inline bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn) in kvm_dirty_gfn_harvested() argument 104 return smp_load_acquire(&gfn->flags) & KVM_DIRTY_GFN_F_RESET; in kvm_dirty_gfn_harvested()
|
H A D | pfncache.c | 166 .gfn = gpa_to_gfn(gpc->gpa), in hva_to_pfn_retry() 299 gfn_t gfn = gpa_to_gfn(gpa); in __kvm_gpc_refresh() 303 gpc->memslot = __gfn_to_memslot(slots, gfn); in __kvm_gpc_refresh() 304 gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn); in __kvm_gpc_refresh() 291 gfn_t gfn = gpa_to_gfn(gpa); __kvm_gpc_refresh() local
|
/linux/arch/powerpc/kvm/ |
H A D | e500_mmu_host.c | 324 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, in kvmppc_e500_shadow_map() argument 355 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); in kvmppc_e500_shadow_map() 356 hva = gfn_to_hva_memslot(slot, gfn); in kvmppc_e500_shadow_map() 358 pfn = __kvm_faultin_pfn(slot, gfn, FOLL_WRITE, &writable, &page); in kvmppc_e500_shadow_map() 362 __func__, (long)gfn); in kvmppc_e500_shadow_map() 391 __func__, (long)gfn, pfn); in kvmppc_e500_shadow_map() 407 slot_start = pfn - (gfn - slot->base_gfn); in kvmppc_e500_shadow_map() 441 gfn_start = gfn & ~(tsize_pages - 1); in kvmppc_e500_shadow_map() 444 if (gfn_start + pfn - gfn < start) in kvmppc_e500_shadow_map() 446 if (gfn_end + pfn - gfn > end) in kvmppc_e500_shadow_map() [all …]
|
H A D | book3s_hv_rm_mmu.c | 97 unsigned long gfn, unsigned long psize) in kvmppc_update_dirty_map() argument 104 gfn -= memslot->base_gfn; in kvmppc_update_dirty_map() 105 set_dirty_bits_atomic(memslot->dirty_bitmap, gfn, npages); in kvmppc_update_dirty_map() 113 unsigned long gfn; in kvmppc_set_dirty_from_hpte() local 117 gfn = hpte_rpn(hpte_gr, psize); in kvmppc_set_dirty_from_hpte() 118 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); in kvmppc_set_dirty_from_hpte() 120 kvmppc_update_dirty_map(memslot, gfn, psize); in kvmppc_set_dirty_from_hpte() 131 unsigned long gfn; in revmap_for_hpte() local 133 gfn = hpte_rpn(hpte_gr, kvmppc_actual_pgsz(hpte_v, hpte_gr)); in revmap_for_hpte() 134 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); in revmap_for_hpte() [all …]
|
/linux/arch/x86/include/asm/ |
H A D | kvm_page_track.h | 43 void (*track_remove_region)(gfn_t gfn, unsigned long nr_pages, 52 int kvm_write_track_add_gfn(struct kvm *kvm, gfn_t gfn); 53 int kvm_write_track_remove_gfn(struct kvm *kvm, gfn_t gfn);
|
H A D | kvm_host.h | 1711 int (*flush_remote_tlbs_range)(struct kvm *kvm, gfn_t gfn, 1772 u8 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); 1778 int (*link_external_spt)(struct kvm *kvm, gfn_t gfn, enum pg_level level, 1781 int (*set_external_spte)(struct kvm *kvm, gfn_t gfn, enum pg_level level, 1785 int (*free_external_spt)(struct kvm *kvm, gfn_t gfn, enum pg_level level, 1789 int (*remove_external_spte)(struct kvm *kvm, gfn_t gfn, enum pg_level level, 1877 int (*gmem_prepare)(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order); 1914 gfn_t gfn; member 1958 static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, in kvm_arch_flush_remote_tlbs_range() argument 1964 return kvm_x86_call(flush_remote_tlbs_range)(kvm, gfn, nr_pages); in kvm_arch_flush_remote_tlbs_range() [all …]
|
/linux/arch/riscv/kvm/ |
H A D | vcpu_sbi_sta.c | 36 gfn_t gfn; in kvm_riscv_vcpu_record_steal_time() local 46 gfn = shmem >> PAGE_SHIFT; in kvm_riscv_vcpu_record_steal_time() 47 hva = kvm_vcpu_gfn_to_hva(vcpu, gfn); in kvm_riscv_vcpu_record_steal_time() 78 kvm_vcpu_mark_page_dirty(vcpu, gfn); in kvm_riscv_vcpu_record_steal_time()
|
/linux/drivers/xen/xenbus/ |
H A D | xenbus_client.c | 411 unsigned long gfn; in xenbus_setup_ring() local 414 gfn = pfn_to_gfn(vmalloc_to_pfn(addr)); in xenbus_setup_ring() 416 gfn = virt_to_gfn(addr); in xenbus_setup_ring() 420 gfn, 0); in xenbus_setup_ring() 653 static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn, in xenbus_map_ring_setup_grant_hvm() argument 659 unsigned long vaddr = (unsigned long)gfn_to_virt(gfn); in xenbus_map_ring_setup_grant_hvm() 871 static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn, in xenbus_unmap_ring_setup_grant_hvm() argument 878 info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn); in xenbus_unmap_ring_setup_grant_hvm()
|
/linux/arch/powerpc/include/asm/ |
H A D | kvm_book3s_uvmem.h | 22 int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn); 81 static inline int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn) in kvmppc_send_page_to_uv() argument
|
/linux/include/xen/arm/ |
H A D | page.h | 52 static inline unsigned long gfn_to_pfn(unsigned long gfn) in gfn_to_pfn() argument 54 return gfn; in gfn_to_pfn()
|
/linux/arch/s390/kvm/ |
H A D | gaccess.c | 829 const gfn_t gfn = gpa_to_gfn(gpa); in access_guest_page() local 832 if (!gfn_to_memslot(kvm, gfn)) in access_guest_page() 835 rc = kvm_write_guest_page(kvm, gfn, data, offset, len); in access_guest_page() 837 rc = kvm_read_guest_page(kvm, gfn, data, offset, len); in access_guest_page() 847 gfn_t gfn; in access_guest_page_with_key() local 851 gfn = gpa >> PAGE_SHIFT; in access_guest_page_with_key() 852 slot = gfn_to_memslot(kvm, gfn); in access_guest_page_with_key() 853 hva = gfn_to_hva_memslot_prot(slot, gfn, &writable); in access_guest_page_with_key() 871 mark_page_dirty_in_slot(kvm, slot, gfn); in access_guest_page_with_key() 1024 gfn_t gfn = gpa_to_gfn(gpa); in cmpxchg_guest_abs_with_key() local [all …]
|
/linux/arch/x86/kvm/svm/ |
H A D | sev.c | 2266 gfn_t gfn; in sev_gmem_post_populate() local 2271 for (gfn = gfn_start, i = 0; gfn < gfn_start + npages; gfn++, i++) { in sev_gmem_post_populate() 2279 __func__, gfn, ret, assigned); in sev_gmem_post_populate() 2294 ret = rmp_make_private(pfn + i, gfn << PAGE_SHIFT, PG_LEVEL_4K, in sev_gmem_post_populate() 3728 u64 gfn; in snp_begin_psc() local 3759 gfn = entry_start.gfn; in snp_begin_psc() 3763 if (entry_start.cur_page > npages || !IS_ALIGNED(gfn, npages)) { in snp_begin_psc() 3777 gfn += entry_start.cur_page; in snp_begin_psc() 3804 entry.gfn != entry_start.gfn + npages || in snp_begin_psc() 3824 vcpu->run->hypercall.args[0] = gfn_to_gpa(gfn); in snp_begin_psc() [all …]
|
/linux/arch/x86/include/asm/xen/ |
H A D | page.h | 251 static inline unsigned long gfn_to_pfn(unsigned long gfn) in gfn_to_pfn() argument 254 return gfn; in gfn_to_pfn() 256 return mfn_to_pfn(gfn); in gfn_to_pfn()
|