Lines Matching refs:gfn

286 	gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(sptep));  in kvm_flush_remote_tlbs_sptep()  local
288 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level); in kvm_flush_remote_tlbs_sptep()
291 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, in mark_mmio_spte() argument
294 u64 spte = make_mmio_spte(vcpu, gfn, access); in mark_mmio_spte()
296 trace_mark_mmio_spte(sptep, gfn, spte); in mark_mmio_spte()
641 return sp->gfn; in kvm_mmu_page_get_gfn()
646 return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS)); in kvm_mmu_page_get_gfn()
676 gfn_t gfn, unsigned int access) in kvm_mmu_page_set_translation() argument
679 sp->shadowed_translation[index] = (gfn << PAGE_SHIFT) | access; in kvm_mmu_page_set_translation()
686 sp->gfn, kvm_mmu_page_get_access(sp, index), access); in kvm_mmu_page_set_translation()
688 WARN_ONCE(gfn != kvm_mmu_page_get_gfn(sp, index), in kvm_mmu_page_set_translation()
691 sp->gfn, kvm_mmu_page_get_gfn(sp, index), gfn); in kvm_mmu_page_set_translation()
697 gfn_t gfn = kvm_mmu_page_get_gfn(sp, index); in kvm_mmu_page_set_access() local
699 kvm_mmu_page_set_translation(sp, index, gfn, access); in kvm_mmu_page_set_access()
706 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, in lpage_info_slot() argument
711 idx = gfn_to_index(gfn, slot->base_gfn, level); in lpage_info_slot()
724 gfn_t gfn, int count) in update_gfn_disallow_lpage_count() argument
730 linfo = lpage_info_slot(gfn, slot, i); in update_gfn_disallow_lpage_count()
738 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn) in kvm_mmu_gfn_disallow_lpage() argument
740 update_gfn_disallow_lpage_count(slot, gfn, 1); in kvm_mmu_gfn_disallow_lpage()
743 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn) in kvm_mmu_gfn_allow_lpage() argument
745 update_gfn_disallow_lpage_count(slot, gfn, -1); in kvm_mmu_gfn_allow_lpage()
752 gfn_t gfn; in account_shadowed() local
764 gfn = sp->gfn; in account_shadowed()
766 slot = __gfn_to_memslot(slots, gfn); in account_shadowed()
770 return __kvm_write_track_add_gfn(kvm, slot, gfn); in account_shadowed()
772 kvm_mmu_gfn_disallow_lpage(slot, gfn); in account_shadowed()
774 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K)) in account_shadowed()
775 kvm_flush_remote_tlbs_gfn(kvm, gfn, PG_LEVEL_4K); in account_shadowed()
809 gfn_t gfn; in unaccount_shadowed() local
812 gfn = sp->gfn; in unaccount_shadowed()
814 slot = __gfn_to_memslot(slots, gfn); in unaccount_shadowed()
816 return __kvm_write_track_remove_gfn(kvm, slot, gfn); in unaccount_shadowed()
818 kvm_mmu_gfn_allow_lpage(slot, gfn); in unaccount_shadowed()
838 gfn_t gfn, in gfn_to_memslot_dirty_bitmap() argument
843 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in gfn_to_memslot_dirty_bitmap()
1017 static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level, in gfn_to_rmap() argument
1022 idx = gfn_to_index(gfn, slot->base_gfn, level); in gfn_to_rmap()
1031 gfn_t gfn; in rmap_remove() local
1035 gfn = kvm_mmu_page_get_gfn(sp, spte_index(spte)); in rmap_remove()
1044 slot = __gfn_to_memslot(slots, gfn); in rmap_remove()
1045 rmap_head = gfn_to_rmap(gfn, sp->role.level, slot); in rmap_remove()
1319 struct kvm_memory_slot *slot, u64 gfn, in kvm_mmu_slot_gfn_write_protect() argument
1328 rmap_head = gfn_to_rmap(gfn, i, slot); in kvm_mmu_slot_gfn_write_protect()
1335 kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level); in kvm_mmu_slot_gfn_write_protect()
1340 static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn) in kvm_vcpu_write_protect_gfn() argument
1344 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in kvm_vcpu_write_protect_gfn()
1345 return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K); in kvm_vcpu_write_protect_gfn()
1363 gfn_t gfn; member
1375 iterator->gfn = iterator->start_gfn; in rmap_walk_init_level()
1376 iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot); in rmap_walk_init_level()
1402 iterator->gfn += KVM_PAGES_PER_HPAGE(iterator->level); in slot_rmap_walk_next()
1451 iterator.gfn - start_gfn + 1); in __walk_slot_rmaps()
1525 u64 *spte, gfn_t gfn, unsigned int access) in __rmap_add() argument
1532 kvm_mmu_page_set_translation(sp, spte_index(spte), gfn, access); in __rmap_add()
1535 rmap_head = gfn_to_rmap(gfn, sp->role.level, slot); in __rmap_add()
1542 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level); in __rmap_add()
1547 u64 *spte, gfn_t gfn, unsigned int access) in rmap_add() argument
1551 __rmap_add(vcpu->kvm, cache, slot, spte, gfn, access); in rmap_add()
1654 static unsigned kvm_page_table_hashfn(gfn_t gfn) in kvm_page_table_hashfn() argument
1656 return hash_64(gfn, KVM_MMU_HASH_SHIFT); in kvm_page_table_hashfn()
1821 if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else
2013 protected |= kvm_vcpu_write_protect_gfn(vcpu, sp->gfn); in mmu_sync_children()
2059 gfn_t gfn, in kvm_mmu_find_shadow_page() argument
2069 if (sp->gfn != gfn) { in kvm_mmu_find_shadow_page()
2144 gfn_t gfn, in kvm_mmu_alloc_shadow_page() argument
2168 sp->gfn = gfn; in kvm_mmu_alloc_shadow_page()
2181 gfn_t gfn, in __kvm_mmu_get_shadow_page() argument
2188 sp_list = &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]; in __kvm_mmu_get_shadow_page()
2190 sp = kvm_mmu_find_shadow_page(kvm, vcpu, gfn, sp_list, role); in __kvm_mmu_get_shadow_page()
2193 sp = kvm_mmu_alloc_shadow_page(kvm, caches, gfn, sp_list, role); in __kvm_mmu_get_shadow_page()
2201 gfn_t gfn, in kvm_mmu_get_shadow_page() argument
2210 return __kvm_mmu_get_shadow_page(vcpu->kvm, vcpu, &caches, gfn, role); in kvm_mmu_get_shadow_page()
2260 u64 *sptep, gfn_t gfn, in kvm_mmu_get_child_sp() argument
2269 return kvm_mmu_get_shadow_page(vcpu, gfn, role); in kvm_mmu_get_child_sp()
2725 gfn_t gfn, bool synchronizing, bool prefetch) in mmu_try_to_unsync_pages() argument
2735 if (kvm_gfn_is_write_tracked(kvm, slot, gfn)) in mmu_try_to_unsync_pages()
2744 for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) { in mmu_try_to_unsync_pages()
2826 u64 *sptep, unsigned int pte_access, gfn_t gfn, in mmu_set_spte() argument
2844 mark_mmio_spte(vcpu, sptep, gfn, pte_access); in mmu_set_spte()
2870 wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch, in mmu_set_spte()
2877 trace_kvm_mmu_set_spte(level, gfn, sptep); in mmu_set_spte()
2884 kvm_flush_remote_tlbs_gfn(vcpu->kvm, gfn, level); in mmu_set_spte()
2888 rmap_add(vcpu, slot, sptep, gfn, pte_access); in mmu_set_spte()
2897 static bool kvm_mmu_prefetch_sptes(struct kvm_vcpu *vcpu, gfn_t gfn, u64 *sptep, in kvm_mmu_prefetch_sptes() argument
2907 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK); in kvm_mmu_prefetch_sptes()
2911 nr_pages = kvm_prefetch_pages(slot, gfn, pages, nr_pages); in kvm_mmu_prefetch_sptes()
2915 for (i = 0; i < nr_pages; i++, gfn++, sptep++) { in kvm_mmu_prefetch_sptes()
2916 mmu_set_spte(vcpu, slot, sptep, access, gfn, in kvm_mmu_prefetch_sptes()
2938 gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(start)); in direct_pte_prefetch_many() local
2941 return kvm_mmu_prefetch_sptes(vcpu, gfn, start, end - start, access); in direct_pte_prefetch_many()
3022 static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, in host_pfn_mapping_level() argument
3041 hva = __gfn_to_hva_memslot(slot, gfn); in host_pfn_mapping_level()
3088 gfn_t gfn, int max_level, bool is_private) in __kvm_mmu_max_mapping_level() argument
3095 linfo = lpage_info_slot(gfn, slot, max_level); in __kvm_mmu_max_mapping_level()
3106 host_level = host_pfn_mapping_level(kvm, gfn, slot); in __kvm_mmu_max_mapping_level()
3111 const struct kvm_memory_slot *slot, gfn_t gfn) in kvm_mmu_max_mapping_level() argument
3114 kvm_mem_is_private(kvm, gfn); in kvm_mmu_max_mapping_level()
3116 return __kvm_mmu_max_mapping_level(kvm, slot, gfn, PG_LEVEL_NUM, is_private); in kvm_mmu_max_mapping_level()
3140 fault->gfn, fault->max_level, in kvm_mmu_hugepage_adjust()
3151 VM_BUG_ON((fault->gfn & mask) != (fault->pfn & mask)); in kvm_mmu_hugepage_adjust()
3171 fault->pfn |= fault->gfn & page_mask; in disallowed_hugepage_adjust()
3181 gfn_t base_gfn = fault->gfn; in direct_map()
3194 base_gfn = gfn_round_for_level(fault->gfn, it.level); in direct_map()
3220 static void kvm_send_hwpoison_signal(struct kvm_memory_slot *slot, gfn_t gfn) in kvm_send_hwpoison_signal() argument
3222 unsigned long hva = gfn_to_hva_memslot(slot, gfn); in kvm_send_hwpoison_signal()
3243 kvm_send_hwpoison_signal(fault->slot, fault->gfn); in kvm_handle_error_pfn()
3261 vcpu_cache_mmio_info(vcpu, gva, fault->gfn, in kvm_handle_noslot_fault()
3283 if (unlikely(fault->gfn > kvm_mmu_max_gfn())) in kvm_handle_noslot_fault()
3317 fault->is_private != kvm_mem_is_private(kvm, fault->gfn)) in page_fault_can_be_fast()
3369 mark_page_dirty_in_slot(vcpu->kvm, fault->slot, fault->gfn); in fast_pf_fix_direct_spte()
3417 sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, fault->gfn, &spte); in fast_page_fault()
3640 static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant, in mmu_alloc_root() argument
3652 sp = kvm_mmu_get_shadow_page(vcpu, gfn, role); in mmu_alloc_root()
4170 gfn_t gfn = get_mmio_spte_gfn(spte); in handle_mmio_page_fault() local
4179 trace_handle_mmio_page_fault(addr, gfn, access); in handle_mmio_page_fault()
4180 vcpu_cache_mmio_info(vcpu, addr, gfn, access); in handle_mmio_page_fault()
4204 if (kvm_gfn_is_write_tracked(vcpu->kvm, fault->slot, fault->gfn)) in page_fault_handle_page_track()
4238 arch.gfn = fault->gfn; in kvm_arch_setup_async_pf()
4244 kvm_vcpu_gfn_to_hva(vcpu, fault->gfn), &arch); in kvm_arch_setup_async_pf()
4331 r = kvm_gmem_get_pfn(vcpu->kvm, fault->slot, fault->gfn, &fault->pfn, in kvm_mmu_faultin_pfn_private()
4354 fault->pfn = __kvm_faultin_pfn(fault->slot, fault->gfn, foll, in __kvm_mmu_faultin_pfn()
4367 trace_kvm_try_async_get_page(fault->addr, fault->gfn); in __kvm_mmu_faultin_pfn()
4368 if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) { in __kvm_mmu_faultin_pfn()
4369 trace_kvm_async_pf_repeated_fault(fault->addr, fault->gfn); in __kvm_mmu_faultin_pfn()
4384 fault->pfn = __kvm_faultin_pfn(fault->slot, fault->gfn, foll, in __kvm_mmu_faultin_pfn()
4397 if (KVM_BUG_ON(kvm_is_gfn_alias(kvm, fault->gfn), kvm)) in kvm_mmu_faultin_pfn()
4413 if (fault->is_private != kvm_mem_is_private(kvm, fault->gfn)) { in kvm_mmu_faultin_pfn()
4475 if (mmu_invalidate_retry_gfn_unsafe(kvm, fault->mmu_seq, fault->gfn)) in kvm_mmu_faultin_pfn()
4495 if (mmu_invalidate_retry_gfn_unsafe(kvm, fault->mmu_seq, fault->gfn)) { in kvm_mmu_faultin_pfn()
4533 mmu_invalidate_retry_gfn(vcpu->kvm, fault->mmu_seq, fault->gfn); in is_page_fault_stale()
4917 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, in sync_mmio_spte() argument
4921 if (gfn != get_mmio_spte_gfn(*sptep)) { in sync_mmio_spte()
4926 mark_mmio_spte(vcpu, sptep, gfn, access); in sync_mmio_spte()
5907 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_mmu_track_write() local
5932 for_each_gfn_valid_sp_with_gptes(vcpu->kvm, sp, gfn) { in kvm_mmu_track_write()
6681 gfn_t gfn; in shadow_mmu_get_sp_for_split() local
6683 gfn = kvm_mmu_page_get_gfn(huge_sp, spte_index(huge_sptep)); in shadow_mmu_get_sp_for_split()
6699 return __kvm_mmu_get_shadow_page(kvm, NULL, &caches, gfn, role); in shadow_mmu_get_sp_for_split()
6712 gfn_t gfn; in shadow_mmu_split_huge_page() local
6719 gfn = kvm_mmu_page_get_gfn(sp, index); in shadow_mmu_split_huge_page()
6742 __rmap_add(kvm, cache, slot, sptep, gfn, sp->role.access); in shadow_mmu_split_huge_page()
6754 gfn_t gfn; in shadow_mmu_try_split_huge_page() local
6758 gfn = kvm_mmu_page_get_gfn(huge_sp, spte_index(huge_sptep)); in shadow_mmu_try_split_huge_page()
6783 trace_kvm_mmu_split_huge_page(gfn, spte, level, r); in shadow_mmu_try_split_huge_page()
6923 sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn)) { in kvm_mmu_zap_collapsible_spte()
7044 gfn_t gfn = slot->base_gfn + i; in kvm_mmu_zap_memslot_pages_and_flush() local
7046 for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) in kvm_mmu_zap_memslot_pages_and_flush()
7404 slot = __gfn_to_memslot(slots, sp->gfn); in kvm_recover_nx_huge_pages()
7525 static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn, in hugepage_test_mixed() argument
7528 return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG; in hugepage_test_mixed()
7531 static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn, in hugepage_clear_mixed() argument
7534 lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG; in hugepage_clear_mixed()
7537 static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn, in hugepage_set_mixed() argument
7540 lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG; in hugepage_set_mixed()
7544 gfn_t gfn, int level, unsigned long attrs) in hugepage_has_attrs() argument
7546 const unsigned long start = gfn; in hugepage_has_attrs()
7552 for (gfn = start; gfn < end; gfn += KVM_PAGES_PER_HPAGE(level - 1)) { in hugepage_has_attrs()
7553 if (hugepage_test_mixed(slot, gfn, level - 1) || in hugepage_has_attrs()
7554 attrs != kvm_get_memory_attributes(kvm, gfn)) in hugepage_has_attrs()
7585 gfn_t gfn = gfn_round_for_level(range->start, level); in kvm_arch_post_set_memory_attributes() local
7588 if (gfn != range->start || gfn + nr_pages > range->end) { in kvm_arch_post_set_memory_attributes()
7594 if (gfn >= slot->base_gfn && in kvm_arch_post_set_memory_attributes()
7595 gfn + nr_pages <= slot->base_gfn + slot->npages) { in kvm_arch_post_set_memory_attributes()
7596 if (hugepage_has_attrs(kvm, slot, gfn, level, attrs)) in kvm_arch_post_set_memory_attributes()
7597 hugepage_clear_mixed(slot, gfn, level); in kvm_arch_post_set_memory_attributes()
7599 hugepage_set_mixed(slot, gfn, level); in kvm_arch_post_set_memory_attributes()
7601 gfn += nr_pages; in kvm_arch_post_set_memory_attributes()
7608 for ( ; gfn + nr_pages <= range->end; gfn += nr_pages) in kvm_arch_post_set_memory_attributes()
7609 hugepage_clear_mixed(slot, gfn, level); in kvm_arch_post_set_memory_attributes()
7616 if (gfn < range->end && in kvm_arch_post_set_memory_attributes()
7617 (gfn + nr_pages) <= (slot->base_gfn + slot->npages)) { in kvm_arch_post_set_memory_attributes()
7618 if (hugepage_has_attrs(kvm, slot, gfn, level, attrs)) in kvm_arch_post_set_memory_attributes()
7619 hugepage_clear_mixed(slot, gfn, level); in kvm_arch_post_set_memory_attributes()
7621 hugepage_set_mixed(slot, gfn, level); in kvm_arch_post_set_memory_attributes()
7644 gfn_t gfn; in kvm_mmu_init_memslot_memory_attributes() local
7653 for (gfn = start; gfn < end; gfn += nr_pages) { in kvm_mmu_init_memslot_memory_attributes()
7654 unsigned long attrs = kvm_get_memory_attributes(kvm, gfn); in kvm_mmu_init_memslot_memory_attributes()
7656 if (hugepage_has_attrs(kvm, slot, gfn, level, attrs)) in kvm_mmu_init_memslot_memory_attributes()
7657 hugepage_clear_mixed(slot, gfn, level); in kvm_mmu_init_memslot_memory_attributes()
7659 hugepage_set_mixed(slot, gfn, level); in kvm_mmu_init_memslot_memory_attributes()