Lines Matching refs:gfn
314 void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages) in kvm_flush_remote_tlbs_range() argument
316 if (!kvm_arch_flush_remote_tlbs_range(kvm, gfn, nr_pages)) in kvm_flush_remote_tlbs_range()
2626 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) in gfn_to_memslot() argument
2628 return __gfn_to_memslot(kvm_memslots(kvm), gfn); in gfn_to_memslot()
2632 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_gfn_to_memslot() argument
2647 slot = try_get_memslot(vcpu->last_used_slot, gfn); in kvm_vcpu_gfn_to_memslot()
2656 slot = search_memslots(slots, gfn, false); in kvm_vcpu_gfn_to_memslot()
2665 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) in kvm_is_visible_gfn() argument
2667 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); in kvm_is_visible_gfn()
2673 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_is_visible_gfn() argument
2675 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in kvm_vcpu_is_visible_gfn()
2681 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_host_page_size() argument
2688 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL); in kvm_host_page_size()
2710 static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn, in __gfn_to_hva_many() argument
2720 *nr_pages = slot->npages - (gfn - slot->base_gfn); in __gfn_to_hva_many()
2722 return __gfn_to_hva_memslot(slot, gfn); in __gfn_to_hva_many()
2725 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, in gfn_to_hva_many() argument
2728 return __gfn_to_hva_many(slot, gfn, nr_pages, true); in gfn_to_hva_many()
2732 gfn_t gfn) in gfn_to_hva_memslot() argument
2734 return gfn_to_hva_many(slot, gfn, NULL); in gfn_to_hva_memslot()
2738 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) in gfn_to_hva() argument
2740 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); in gfn_to_hva()
2744 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_gfn_to_hva() argument
2746 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); in kvm_vcpu_gfn_to_hva()
2759 gfn_t gfn, bool *writable) in gfn_to_hva_memslot_prot() argument
2761 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); in gfn_to_hva_memslot_prot()
2769 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) in gfn_to_hva_prot() argument
2771 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in gfn_to_hva_prot()
2773 return gfn_to_hva_memslot_prot(slot, gfn, writable); in gfn_to_hva_prot()
2776 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) in kvm_vcpu_gfn_to_hva_prot() argument
2778 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in kvm_vcpu_gfn_to_hva_prot()
2780 return gfn_to_hva_memslot_prot(slot, gfn, writable); in kvm_vcpu_gfn_to_hva_prot()
3029 kfp->hva = __gfn_to_hva_many(kfp->slot, kfp->gfn, NULL, in kvm_follow_pfn()
3046 kvm_pfn_t __kvm_faultin_pfn(const struct kvm_memory_slot *slot, gfn_t gfn, in __kvm_faultin_pfn() argument
3052 .gfn = gfn, in __kvm_faultin_pfn()
3068 int kvm_prefetch_pages(struct kvm_memory_slot *slot, gfn_t gfn, in kvm_prefetch_pages() argument
3074 addr = gfn_to_hva_many(slot, gfn, &entry); in kvm_prefetch_pages()
3092 struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn, bool write) in __gfn_to_page() argument
3096 .slot = gfn_to_memslot(kvm, gfn), in __gfn_to_page()
3097 .gfn = gfn, in __gfn_to_page()
3107 int __kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, in __kvm_vcpu_map() argument
3111 .slot = gfn_to_memslot(vcpu->kvm, gfn), in __kvm_vcpu_map()
3112 .gfn = gfn, in __kvm_vcpu_map()
3121 map->gfn = gfn; in __kvm_vcpu_map()
3154 kvm_vcpu_mark_page_dirty(vcpu, map->gfn); in kvm_vcpu_unmap()
3178 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, in __kvm_read_guest_page() argument
3187 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); in __kvm_read_guest_page()
3196 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, in kvm_read_guest_page() argument
3199 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in kvm_read_guest_page()
3201 return __kvm_read_guest_page(slot, gfn, data, offset, len); in kvm_read_guest_page()
3205 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, in kvm_vcpu_read_guest_page() argument
3208 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in kvm_vcpu_read_guest_page()
3210 return __kvm_read_guest_page(slot, gfn, data, offset, len); in kvm_vcpu_read_guest_page()
3216 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_read_guest() local
3222 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); in kvm_read_guest()
3228 ++gfn; in kvm_read_guest()
3236 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_vcpu_read_guest() local
3242 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); in kvm_vcpu_read_guest()
3248 ++gfn; in kvm_vcpu_read_guest()
3254 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, in __kvm_read_guest_atomic() argument
3263 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); in __kvm_read_guest_atomic()
3277 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_vcpu_read_guest_atomic() local
3278 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in kvm_vcpu_read_guest_atomic()
3281 return __kvm_read_guest_atomic(slot, gfn, data, offset, len); in kvm_vcpu_read_guest_atomic()
3287 struct kvm_memory_slot *memslot, gfn_t gfn, in __kvm_write_guest_page() argument
3296 addr = gfn_to_hva_memslot(memslot, gfn); in __kvm_write_guest_page()
3302 mark_page_dirty_in_slot(kvm, memslot, gfn); in __kvm_write_guest_page()
3306 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, in kvm_write_guest_page() argument
3309 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in kvm_write_guest_page()
3311 return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len); in kvm_write_guest_page()
3315 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, in kvm_vcpu_write_guest_page() argument
3318 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in kvm_vcpu_write_guest_page()
3320 return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len); in kvm_vcpu_write_guest_page()
3327 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_write_guest() local
3333 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); in kvm_write_guest()
3339 ++gfn; in kvm_write_guest()
3348 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_vcpu_write_guest() local
3354 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); in kvm_vcpu_write_guest()
3360 ++gfn; in kvm_vcpu_write_guest()
3493 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_clear_guest() local
3499 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, seg); in kvm_clear_guest()
3504 ++gfn; in kvm_clear_guest()
3512 gfn_t gfn) in mark_page_dirty_in_slot() argument
3524 unsigned long rel_gfn = gfn - memslot->base_gfn; in mark_page_dirty_in_slot()
3535 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) in mark_page_dirty() argument
3539 memslot = gfn_to_memslot(kvm, gfn); in mark_page_dirty()
3540 mark_page_dirty_in_slot(kvm, memslot, gfn); in mark_page_dirty()
3544 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_mark_page_dirty() argument
3548 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in kvm_vcpu_mark_page_dirty()
3549 mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn); in kvm_vcpu_mark_page_dirty()