Lines Matching refs:gfn
316 void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages) in kvm_flush_remote_tlbs_range() argument
318 if (!kvm_arch_flush_remote_tlbs_range(kvm, gfn, nr_pages)) in kvm_flush_remote_tlbs_range()
2548 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) in gfn_to_memslot() argument
2550 return __gfn_to_memslot(kvm_memslots(kvm), gfn); in gfn_to_memslot()
2554 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_gfn_to_memslot() argument
2569 slot = try_get_memslot(vcpu->last_used_slot, gfn); in kvm_vcpu_gfn_to_memslot()
2578 slot = search_memslots(slots, gfn, false); in kvm_vcpu_gfn_to_memslot()
2587 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) in kvm_is_visible_gfn() argument
2589 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); in kvm_is_visible_gfn()
2595 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_is_visible_gfn() argument
2597 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in kvm_vcpu_is_visible_gfn()
2603 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_host_page_size() argument
2610 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL); in kvm_host_page_size()
2632 static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn, in __gfn_to_hva_many() argument
2642 *nr_pages = slot->npages - (gfn - slot->base_gfn); in __gfn_to_hva_many()
2644 return __gfn_to_hva_memslot(slot, gfn); in __gfn_to_hva_many()
2647 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, in gfn_to_hva_many() argument
2650 return __gfn_to_hva_many(slot, gfn, nr_pages, true); in gfn_to_hva_many()
2654 gfn_t gfn) in gfn_to_hva_memslot() argument
2656 return gfn_to_hva_many(slot, gfn, NULL); in gfn_to_hva_memslot()
2660 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) in gfn_to_hva() argument
2662 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); in gfn_to_hva()
2666 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_gfn_to_hva() argument
2668 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); in kvm_vcpu_gfn_to_hva()
2681 gfn_t gfn, bool *writable) in gfn_to_hva_memslot_prot() argument
2683 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); in gfn_to_hva_memslot_prot()
2691 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) in gfn_to_hva_prot() argument
2693 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in gfn_to_hva_prot()
2695 return gfn_to_hva_memslot_prot(slot, gfn, writable); in gfn_to_hva_prot()
2698 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) in kvm_vcpu_gfn_to_hva_prot() argument
2700 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in kvm_vcpu_gfn_to_hva_prot()
2702 return gfn_to_hva_memslot_prot(slot, gfn, writable); in kvm_vcpu_gfn_to_hva_prot()
2951 kfp->hva = __gfn_to_hva_many(kfp->slot, kfp->gfn, NULL, in kvm_follow_pfn()
2968 kvm_pfn_t __kvm_faultin_pfn(const struct kvm_memory_slot *slot, gfn_t gfn, in __kvm_faultin_pfn() argument
2974 .gfn = gfn, in __kvm_faultin_pfn()
2990 int kvm_prefetch_pages(struct kvm_memory_slot *slot, gfn_t gfn, in kvm_prefetch_pages() argument
2996 addr = gfn_to_hva_many(slot, gfn, &entry); in kvm_prefetch_pages()
3014 struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn, bool write) in __gfn_to_page() argument
3018 .slot = gfn_to_memslot(kvm, gfn), in __gfn_to_page()
3019 .gfn = gfn, in __gfn_to_page()
3029 int __kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, in __kvm_vcpu_map() argument
3033 .slot = gfn_to_memslot(vcpu->kvm, gfn), in __kvm_vcpu_map()
3034 .gfn = gfn, in __kvm_vcpu_map()
3043 map->gfn = gfn; in __kvm_vcpu_map()
3076 kvm_vcpu_mark_page_dirty(vcpu, map->gfn); in kvm_vcpu_unmap()
3100 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, in __kvm_read_guest_page() argument
3109 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); in __kvm_read_guest_page()
3118 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, in kvm_read_guest_page() argument
3121 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in kvm_read_guest_page()
3123 return __kvm_read_guest_page(slot, gfn, data, offset, len); in kvm_read_guest_page()
3127 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, in kvm_vcpu_read_guest_page() argument
3130 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in kvm_vcpu_read_guest_page()
3132 return __kvm_read_guest_page(slot, gfn, data, offset, len); in kvm_vcpu_read_guest_page()
3138 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_read_guest() local
3144 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); in kvm_read_guest()
3150 ++gfn; in kvm_read_guest()
3158 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_vcpu_read_guest() local
3164 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); in kvm_vcpu_read_guest()
3170 ++gfn; in kvm_vcpu_read_guest()
3176 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, in __kvm_read_guest_atomic() argument
3185 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); in __kvm_read_guest_atomic()
3199 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_vcpu_read_guest_atomic() local
3200 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in kvm_vcpu_read_guest_atomic()
3203 return __kvm_read_guest_atomic(slot, gfn, data, offset, len); in kvm_vcpu_read_guest_atomic()
3209 struct kvm_memory_slot *memslot, gfn_t gfn, in __kvm_write_guest_page() argument
3218 addr = gfn_to_hva_memslot(memslot, gfn); in __kvm_write_guest_page()
3224 mark_page_dirty_in_slot(kvm, memslot, gfn); in __kvm_write_guest_page()
3228 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, in kvm_write_guest_page() argument
3231 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in kvm_write_guest_page()
3233 return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len); in kvm_write_guest_page()
3237 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, in kvm_vcpu_write_guest_page() argument
3240 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in kvm_vcpu_write_guest_page()
3242 return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len); in kvm_vcpu_write_guest_page()
3249 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_write_guest() local
3255 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); in kvm_write_guest()
3261 ++gfn; in kvm_write_guest()
3270 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_vcpu_write_guest() local
3276 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); in kvm_vcpu_write_guest()
3282 ++gfn; in kvm_vcpu_write_guest()
3415 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_clear_guest() local
3421 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, seg); in kvm_clear_guest()
3426 ++gfn; in kvm_clear_guest()
3434 gfn_t gfn) in mark_page_dirty_in_slot() argument
3446 unsigned long rel_gfn = gfn - memslot->base_gfn; in mark_page_dirty_in_slot()
3457 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) in mark_page_dirty() argument
3461 memslot = gfn_to_memslot(kvm, gfn); in mark_page_dirty()
3462 mark_page_dirty_in_slot(kvm, memslot, gfn); in mark_page_dirty()
3466 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_mark_page_dirty() argument
3470 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in kvm_vcpu_mark_page_dirty()
3471 mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn); in kvm_vcpu_mark_page_dirty()