| /linux/arch/loongarch/kvm/ |
| H A D | mmu.c | 366 gfn_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked() local 367 gfn_t start = base_gfn + __ffs(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked() 368 gfn_t end = base_gfn + __fls(mask) + 1; in kvm_arch_mmu_enable_log_dirty_pt_masked() 373 ctx.gfn = base_gfn; in kvm_arch_mmu_enable_log_dirty_pt_masked() 392 if ((new->base_gfn + new->npages) > (kvm->arch.gpa_size >> PAGE_SHIFT)) in kvm_arch_prepare_memory_region() 397 gpa_start = new->base_gfn << PAGE_SHIFT; in kvm_arch_prepare_memory_region() 479 needs_flush = kvm_mkclean_gpa_pt(kvm, new->base_gfn, in kvm_arch_commit_memory_region() 480 new->base_gfn + new->npages); in kvm_arch_commit_memory_region() 498 kvm_flush_range(kvm, slot->base_gfn, slot->base_gfn + slot->npages, 1); in kvm_arch_flush_shadow_memslot()
|
| /linux/arch/riscv/kvm/ |
| H A D | mmu.c | 23 phys_addr_t start = memslot->base_gfn << PAGE_SHIFT; in mmu_wp_memory_region() 24 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in mmu_wp_memory_region() 107 phys_addr_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked() local 108 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; in kvm_arch_mmu_enable_log_dirty_pt_masked() 109 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; in kvm_arch_mmu_enable_log_dirty_pt_masked() 140 gpa_t gpa = slot->base_gfn << PAGE_SHIFT; in kvm_arch_flush_shadow_memslot() 188 if ((new->base_gfn + new->npages) >= in kvm_arch_prepare_memory_region()
|
| /linux/arch/x86/kvm/ |
| H A D | mmu.h | 265 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) in gfn_to_index() argument 269 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); in gfn_to_index() 276 return gfn_to_index(slot->base_gfn + npages - 1, in __kvm_mmu_slot_lpages() 277 slot->base_gfn, level) + 1; in __kvm_mmu_slot_lpages()
|
| H A D | x86.c | 13432 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) in kvm_alloc_memslot_metadata() 13434 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) in kvm_alloc_memslot_metadata() 13441 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1)) { in kvm_alloc_memslot_metadata() 13497 if ((new->base_gfn + new->npages - 1) > kvm_mmu_max_gfn()) in kvm_arch_prepare_memory_region() 13500 if (kvm_is_gfn_alias(kvm, new->base_gfn + new->npages - 1)) in kvm_arch_prepare_memory_region()
|
| /linux/arch/x86/kvm/mmu/ |
| H A D | page_track.c | 80 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in update_gfn_write_track() 147 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in kvm_gfn_is_write_tracked() 311 n->track_remove_region(slot->base_gfn, slot->npages, n); in kvm_page_track_delete_slot()
|
| H A D | tdp_mmu.c | 402 gfn_t base_gfn = sp->gfn; in handle_removed_pt() local 411 gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); in handle_removed_pt() 484 WARN_ON(kvm_x86_call(free_external_spt)(kvm, base_gfn, sp->role.level, in handle_removed_pt() 1496 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_wrprot_slot() 1497 slot->base_gfn + slot->npages, min_level); in kvm_tdp_mmu_wrprot_slot() 1710 clear_dirty_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_clear_dirty_slot() 1711 slot->base_gfn + slot->npages); in kvm_tdp_mmu_clear_dirty_slot() 1800 gfn_t start = slot->base_gfn; in recover_huge_pages_range()
|
| H A D | paging_tmpl.h | 621 gfn_t base_gfn = fault->gfn; in FNAME() local 623 WARN_ON_ONCE(gw->gfn != base_gfn); in FNAME() 722 base_gfn = gfn_round_for_level(fault->gfn, it.level); in FNAME() 728 sp = kvm_mmu_get_child_sp(vcpu, it.sptep, base_gfn, in FNAME() 743 base_gfn, fault->pfn, fault); in FNAME()
|
| H A D | mmu.c | 712 idx = gfn_to_index(gfn, slot->base_gfn, level); in lpage_info_slot() 1180 idx = gfn_to_index(gfn, slot->base_gfn, level); in gfn_to_rmap() 1385 slot->base_gfn + gfn_offset, mask, true); in kvm_mmu_write_protect_pt_masked() 1391 rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), in kvm_mmu_write_protect_pt_masked() 1408 slot->base_gfn + gfn_offset, mask, false); in kvm_mmu_clear_dirty_pt_masked() 1414 rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), in kvm_mmu_clear_dirty_pt_masked() 1439 gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked() 1440 gfn_t end = slot->base_gfn + gfn_offset + __fls(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked() 1625 slot->base_gfn, slot->base_gfn + slot->npages - 1, in walk_slot_rmaps() 3438 gfn_t base_gfn = fault->gfn; in direct_map() local [all …]
|
| /linux/arch/arm64/kvm/ |
| H A D | mmu.c | 354 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; in stage2_flush_memslot() 1031 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; in stage2_unmap_memslot() 1247 start = memslot->base_gfn << PAGE_SHIFT; in kvm_mmu_wp_memory_region() 1248 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in kvm_mmu_wp_memory_region() 1277 start = memslot->base_gfn << PAGE_SHIFT; in kvm_mmu_split_memory_region() 1278 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in kvm_mmu_split_memory_region() 1300 phys_addr_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked() local 1301 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; in kvm_arch_mmu_enable_log_dirty_pt_masked() 1302 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; in kvm_arch_mmu_enable_log_dirty_pt_masked() 1345 gpa_start = memslot->base_gfn << PAGE_SHIFT; in fault_supports_stage2_huge_mapping() [all …]
|
| /linux/arch/powerpc/kvm/ |
| H A D | book3s_hv_uvmem.c | 261 p->base_pfn = slot->base_gfn; in kvmppc_uvmem_slot_init() 279 if (p->base_pfn == slot->base_gfn) { in kvmppc_uvmem_slot_free() 394 unsigned long gfn = memslot->base_gfn; in kvmppc_memslot_page_merge() 450 memslot->base_gfn << PAGE_SHIFT, in __kvmppc_uvmem_memslot_create() 624 gfn = slot->base_gfn; in kvmppc_uvmem_drop_pages() 797 unsigned long gfn = memslot->base_gfn; in kvmppc_uv_migrate_mem_slot()
|
| H A D | book3s_hv_rm_mmu.c | 104 gfn -= memslot->base_gfn; in kvmppc_update_dirty_map() 142 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); in revmap_for_hpte() 242 slot_fn = gfn - memslot->base_gfn; in kvmppc_do_h_enter()
|
| H A D | e500_mmu_host.c | 407 slot_start = pfn - (gfn - slot->base_gfn); in kvmppc_e500_shadow_map()
|
| /linux/virt/kvm/ |
| H A D | guest_memfd.c | 59 return gfn - slot->base_gfn + slot->gmem.pgoff; in kvm_gmem_get_index() 67 gfn_t gfn = slot->base_gfn + index - slot->gmem.pgoff; in __kvm_gmem_prepare_folio() 178 .start = slot->base_gfn + max(pgoff, start) - pgoff, in __kvm_gmem_invalidate_begin() 179 .end = slot->base_gfn + min(pgoff + slot->npages, end) - pgoff, in __kvm_gmem_invalidate_begin() 850 npages = min_t(ulong, slot->npages - (start_gfn - slot->base_gfn), npages); in kvm_gmem_populate()
|
| H A D | kvm_main.c | 339 kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages); in kvm_flush_remote_tlbs_memslot() 1496 if (slot->base_gfn < tmp->base_gfn) in kvm_insert_gfn_node() 1498 else if (slot->base_gfn > tmp->base_gfn) in kvm_insert_gfn_node() 1520 WARN_ON_ONCE(old->base_gfn != new->base_gfn); in kvm_replace_gfn_node() 1579 if (old && old->base_gfn == new->base_gfn) { in kvm_replace_memslot() 1811 dest->base_gfn = src->base_gfn; in kvm_copy_memslot() 2024 gfn_t base_gfn; in kvm_set_memory_region() local 2086 base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT); in kvm_set_memory_region() 2107 if (base_gfn != old->base_gfn) in kvm_set_memory_region() 2116 kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages)) in kvm_set_memory_region() [all …]
|
| /linux/include/linux/ |
| H A D | kvm_host.h | 598 gfn_t base_gfn; member 1151 if (start < slot->base_gfn) { in kvm_memslot_iter_start() 1189 if (iter->slot->base_gfn + iter->slot->npages <= start) in kvm_memslot_iter_start() 1203 return iter->slot->base_gfn < end; in kvm_memslot_iter_is_valid() 1800 if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages) in try_get_memslot() 1823 if (gfn >= slot->base_gfn) { in search_memslots() 1824 if (gfn < slot->base_gfn + slot->npages) in search_memslots() 1873 unsigned long offset = gfn - slot->base_gfn; in __gfn_to_hva_memslot() 1888 return slot->base_gfn + gfn_offset; in hva_to_gfn_memslot()
|
| /linux/arch/s390/kvm/ |
| H A D | pv.c | 321 while (slot && slot->base_gfn < pages_2g) { in kvm_s390_destroy_lower_2g() 322 len = min_t(u64, slot->npages, pages_2g - slot->base_gfn) * PAGE_SIZE; in kvm_s390_destroy_lower_2g() 325 slot = gfn_to_memslot(kvm, slot->base_gfn + slot->npages); in kvm_s390_destroy_lower_2g()
|
| H A D | kvm-s390.c | 708 cur_gfn = memslot->base_gfn; in kvm_arch_sync_dirty_log() 709 last_gfn = memslot->base_gfn + memslot->npages; in kvm_arch_sync_dirty_log() 2271 unsigned long ofs = cur_gfn - ms->base_gfn; in kvm_s390_next_dirty_cmma() 2274 if (ms->base_gfn + ms->npages <= cur_gfn) { in kvm_s390_next_dirty_cmma() 2284 if (cur_gfn < ms->base_gfn) in kvm_s390_next_dirty_cmma() 2292 return ms->base_gfn + ofs; in kvm_s390_next_dirty_cmma() 2319 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms))) in kvm_s390_get_cmma() 2337 if (cur_gfn - ms->base_gfn >= ms->npages) { in kvm_s390_get_cmma() 5963 if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit) in kvm_arch_prepare_memory_region() 5998 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region() [all …]
|
| H A D | kvm-s390.h | 267 return ms->base_gfn + ms->npages; in kvm_s390_get_gfn_end()
|
| /linux/arch/mips/kvm/ |
| H A D | mips.c | 198 kvm_mips_flush_gpa_pt(kvm, slot->base_gfn, in kvm_arch_flush_shadow_memslot() 199 slot->base_gfn + slot->npages - 1); in kvm_arch_flush_shadow_memslot() 233 needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn, in kvm_arch_commit_memory_region() 234 new->base_gfn + new->npages - 1); in kvm_arch_commit_memory_region()
|