Home
last modified time | relevance | path

Searched refs:memslot (Results 1 – 25 of 37) sorted by relevance

12

/linux/arch/powerpc/kvm/
H A Dbook3s_64_mmu_hv.c206 void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, in kvmppc_map_vrma() argument
220 npages = memslot->npages >> (porder - PAGE_SHIFT); in kvmppc_map_vrma()
516 struct kvm_memory_slot *memslot; in kvmppc_book3s_hv_page_fault() local
579 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_book3s_hv_page_fault()
581 trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr); in kvmppc_book3s_hv_page_fault()
584 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) in kvmppc_book3s_hv_page_fault()
592 if (gfn_base < memslot->base_gfn) in kvmppc_book3s_hv_page_fault()
604 hva = gfn_to_hva_memslot(memslot, gfn); in kvmppc_book3s_hv_page_fault()
606 pfn = __kvm_faultin_pfn(memslot, gfn, writing ? FOLL_WRITE : 0, in kvmppc_book3s_hv_page_fault()
689 rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn]; in kvmppc_book3s_hv_page_fault()
[all …]
H A Dbook3s_hv_uvmem.c361 static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot, in kvmppc_next_nontransitioned_gfn() argument
392 const struct kvm_memory_slot *memslot, bool merge) in kvmppc_memslot_page_merge() argument
394 unsigned long gfn = memslot->base_gfn; in kvmppc_memslot_page_merge()
404 end = start + (memslot->npages << PAGE_SHIFT); in kvmppc_memslot_page_merge()
431 const struct kvm_memory_slot *memslot) in __kvmppc_uvmem_memslot_delete() argument
433 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id); in __kvmppc_uvmem_memslot_delete()
434 kvmppc_uvmem_slot_free(kvm, memslot); in __kvmppc_uvmem_memslot_delete()
435 kvmppc_memslot_page_merge(kvm, memslot, true); in __kvmppc_uvmem_memslot_delete()
439 const struct kvm_memory_slot *memslot) in __kvmppc_uvmem_memslot_create() argument
443 if (kvmppc_memslot_page_merge(kvm, memslot, false)) in __kvmppc_uvmem_memslot_create()
[all …]
H A Dbook3s_64_mmu_radix.c425 const struct kvm_memory_slot *memslot, in kvmppc_unmap_pte() argument
441 if (!memslot) { in kvmppc_unmap_pte()
442 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_unmap_pte()
443 if (!memslot) in kvmppc_unmap_pte()
456 kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size); in kvmppc_unmap_pte()
458 if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) in kvmppc_unmap_pte()
459 kvmppc_update_dirty_map(memslot, gfn, page_size); in kvmppc_unmap_pte()
823 struct kvm_memory_slot *memslot, in kvmppc_book3s_instantiate_page() argument
842 hva = gfn_to_hva_memslot(memslot, gfn); in kvmppc_book3s_instantiate_page()
843 pfn = __kvm_faultin_pfn(memslot, gfn, writing ? FOLL_WRITE : 0, in kvmppc_book3s_instantiate_page()
[all …]
H A Dbook3s_hv_rm_mmu.c96 void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot, in kvmppc_update_dirty_map() argument
101 if (!psize || !memslot->dirty_bitmap) in kvmppc_update_dirty_map()
104 gfn -= memslot->base_gfn; in kvmppc_update_dirty_map()
105 set_dirty_bits_atomic(memslot->dirty_bitmap, gfn, npages); in kvmppc_update_dirty_map()
112 struct kvm_memory_slot *memslot; in kvmppc_set_dirty_from_hpte() local
118 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); in kvmppc_set_dirty_from_hpte()
119 if (memslot && memslot->dirty_bitmap) in kvmppc_set_dirty_from_hpte()
120 kvmppc_update_dirty_map(memslot, gfn, psize); in kvmppc_set_dirty_from_hpte()
129 struct kvm_memory_slot *memslot; in revmap_for_hpte() local
134 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); in revmap_for_hpte()
[all …]
H A Dbook3s_hv_nested.c805 struct kvm_memory_slot *memslot; in kvmhv_release_all_nested() local
825 kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm)) in kvmhv_release_all_nested()
826 kvmhv_free_memslot_nest_rmap(memslot); in kvmhv_release_all_nested()
1037 const struct kvm_memory_slot *memslot, in kvmhv_remove_nest_rmap_range() argument
1044 if (!memslot) in kvmhv_remove_nest_rmap_range()
1046 gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn; in kvmhv_remove_nest_rmap_range()
1053 unsigned long *rmap = &memslot->arch.rmap[gfn]; in kvmhv_remove_nest_rmap_range()
1527 struct kvm_memory_slot *memslot; in __kvmhv_nested_page_fault() local
1596 memslot = gfn_to_memslot(kvm, gfn); in __kvmhv_nested_page_fault()
1597 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { in __kvmhv_nested_page_fault()
[all …]
H A Dbook3s.h11 struct kvm_memory_slot *memslot);
H A Dbook3s_64_vio.c356 struct kvm_memory_slot *memslot; in kvmppc_tce_to_ua() local
358 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); in kvmppc_tce_to_ua()
359 if (!memslot) in kvmppc_tce_to_ua()
362 *ua = __gfn_to_hva_memslot(memslot, gfn) | in kvmppc_tce_to_ua()
H A Dbook3s_pr.c1865 struct kvm_memory_slot *memslot; in kvm_vm_ioctl_get_dirty_log_pr() local
1874 r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot); in kvm_vm_ioctl_get_dirty_log_pr()
1880 ga = memslot->base_gfn << PAGE_SHIFT; in kvm_vm_ioctl_get_dirty_log_pr()
1881 ga_end = ga + (memslot->npages << PAGE_SHIFT); in kvm_vm_ioctl_get_dirty_log_pr()
1886 n = kvm_dirty_bitmap_bytes(memslot); in kvm_vm_ioctl_get_dirty_log_pr()
1887 memset(memslot->dirty_bitmap, 0, n); in kvm_vm_ioctl_get_dirty_log_pr()
1897 struct kvm_memory_slot *memslot) in kvmppc_core_flush_memslot_pr() argument
H A Dbook3s.c848 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) in kvm_arch_sync_dirty_log() argument
863 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) in kvmppc_core_flush_memslot() argument
865 kvm->arch.kvm_ops->flush_memslot(kvm, memslot); in kvmppc_core_flush_memslot()
/linux/arch/riscv/kvm/
H A Dmmu.c22 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); in mmu_wp_memory_region() local
23 phys_addr_t start = memslot->base_gfn << PAGE_SHIFT; in mmu_wp_memory_region()
24 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in mmu_wp_memory_region()
32 kvm_flush_remote_tlbs_memslot(kvm, memslot); in mmu_wp_memory_region()
108 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) in kvm_arch_sync_dirty_log() argument
288 static bool fault_supports_gstage_huge_mapping(struct kvm_memory_slot *memslot, in fault_supports_gstage_huge_mapping() argument
295 size = memslot->npages * PAGE_SIZE; in fault_supports_gstage_huge_mapping()
296 uaddr_start = memslot->userspace_addr; in fault_supports_gstage_huge_mapping()
299 gpa_start = memslot->base_gfn << PAGE_SHIFT; in fault_supports_gstage_huge_mapping()
396 struct kvm_memory_slot *memslot, in transparent_hugepage_adjust() argument
[all …]
H A Dvcpu_exit.c19 struct kvm_memory_slot *memslot; in gstage_page_fault() local
27 memslot = gfn_to_memslot(vcpu->kvm, gfn); in gstage_page_fault()
28 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); in gstage_page_fault()
46 ret = kvm_riscv_mmu_map(vcpu, memslot, fault_addr, hva, in gstage_page_fault()
/linux/arch/arm64/kvm/
H A Dmmu.c164 static bool memslot_is_logging(struct kvm_memory_slot *memslot) in memslot_is_logging() argument
166 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); in memslot_is_logging()
355 struct kvm_memory_slot *memslot) in stage2_flush_memslot() argument
357 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; in stage2_flush_memslot()
358 phys_addr_t end = addr + PAGE_SIZE * memslot->npages; in stage2_flush_memslot()
373 struct kvm_memory_slot *memslot; in stage2_flush_vm() local
380 kvm_for_each_memslot(memslot, bkt, slots) in stage2_flush_vm()
381 stage2_flush_memslot(kvm, memslot); in stage2_flush_vm()
1029 struct kvm_memory_slot *memslot) in stage2_unmap_memslot() argument
1031 hva_t hva = memslot->userspace_addr; in stage2_unmap_memslot()
[all …]
H A Dnested.c1266 struct kvm_memory_slot *memslot; in kvm_translate_vncr() local
1309 memslot = gfn_to_memslot(vcpu->kvm, gfn); in kvm_translate_vncr()
1310 if (!memslot) in kvm_translate_vncr()
1313 *is_gmem = kvm_slot_has_gmem(memslot); in kvm_translate_vncr()
1315 pfn = __kvm_faultin_pfn(memslot, gfn, write_fault ? FOLL_WRITE : 0, in kvm_translate_vncr()
1320 ret = kvm_gmem_get_pfn(vcpu->kvm, memslot, gfn, &pfn, &page, NULL); in kvm_translate_vncr()
/linux/arch/loongarch/kvm/
H A Dmmu.c608 static bool fault_supports_huge_mapping(struct kvm_memory_slot *memslot, in fault_supports_huge_mapping() argument
614 if (kvm_slot_dirty_track_enabled(memslot) && write) in fault_supports_huge_mapping()
617 if (kvm_hugepage_capable(memslot)) in fault_supports_huge_mapping()
620 if (kvm_hugepage_incapable(memslot)) in fault_supports_huge_mapping()
623 start = memslot->userspace_addr; in fault_supports_huge_mapping()
624 end = start + memslot->npages * PAGE_SIZE; in fault_supports_huge_mapping()
781 struct kvm_memory_slot *memslot; in kvm_map_page() local
791 memslot = gfn_to_memslot(kvm, gfn); in kvm_map_page()
792 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writeable); in kvm_map_page()
860 if (write || !kvm_slot_dirty_track_enabled(memslot)) in kvm_map_page()
[all …]
/linux/virt/kvm/
H A Ddirty_ring.c55 struct kvm_memory_slot *memslot; in kvm_reset_dirty_gfn() local
64 memslot = id_to_memslot(__kvm_memslots(kvm, as_id), id); in kvm_reset_dirty_gfn()
66 if (!memslot || (offset + __fls(mask)) >= memslot->npages) in kvm_reset_dirty_gfn()
70 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, mask); in kvm_reset_dirty_gfn()
H A Dkvm_main.c328 const struct kvm_memory_slot *memslot) in kvm_flush_remote_tlbs_memslot() argument
338 kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages); in kvm_flush_remote_tlbs_memslot()
925 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) in kvm_destroy_dirty_bitmap() argument
927 if (!memslot->dirty_bitmap) in kvm_destroy_dirty_bitmap()
930 vfree(memslot->dirty_bitmap); in kvm_destroy_dirty_bitmap()
931 memslot->dirty_bitmap = NULL; in kvm_destroy_dirty_bitmap()
950 struct kvm_memory_slot *memslot; in kvm_free_memslots() local
962 hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1]) in kvm_free_memslots()
963 kvm_free_memslot(kvm, memslot); in kvm_free_memslots()
1421 static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot) in kvm_alloc_dirty_bitmap() argument
[all …]
H A Dpfncache.c81 * If the page was cached from a memslot, make sure the memslots have in kvm_gpc_check()
165 .slot = gpc->memslot, in hva_to_pfn_retry()
287 gpc->memslot = NULL; in __kvm_gpc_refresh()
303 gpc->memslot = __gfn_to_memslot(slots, gfn); in __kvm_gpc_refresh()
304 gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn); in __kvm_gpc_refresh()
312 * Even if the GPA and/or the memslot generation changed, the in __kvm_gpc_refresh()
345 * Some/all of the uhva, gpa, and memslot generation info may still be in __kvm_gpc_refresh()
467 * memslot generation. The PFN lookup needs to be redone every in kvm_gpc_deactivate()
/linux/include/linux/
H A Dkvm_host.h629 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) in kvm_dirty_bitmap_bytes() argument
631 return ALIGN(memslot->npages, BITS_PER_LONG) / 8; in kvm_dirty_bitmap_bytes()
634 static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot) in kvm_second_dirty_bitmap() argument
636 unsigned long len = kvm_dirty_bitmap_bytes(memslot); in kvm_second_dirty_bitmap()
638 return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap); in kvm_second_dirty_bitmap()
1106 #define kvm_for_each_memslot(memslot, bkt, slots) \ argument
1107 hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \
1108 if (WARN_ON_ONCE(!memslot->npages)) { \
1389 void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn);
1555 const struct kvm_memory_slot *memslot);
[all …]
/linux/arch/riscv/include/asm/
H A Dkvm_mmu.h14 int kvm_riscv_mmu_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
/linux/arch/x86/include/asm/uv/
H A Duv_geo.h57 char memslot; /* The memory slot on the bus */ member
/linux/tools/testing/selftests/kvm/lib/
H A Dkvm_util.c1165 memslot2region(struct kvm_vm *vm, u32 memslot) in memslot2region() argument
1170 memslot) in memslot2region()
1171 if (region->region.slot == memslot) in memslot2region()
1175 " requested slot: %u\n", memslot); in memslot2region()
2025 gpa_t min_gpa, u32 memslot, in __vm_phy_pages_alloc() argument
2038 region = memslot2region(vm, memslot); in __vm_phy_pages_alloc()
2055 min_gpa, vm->page_size, memslot); in __vm_phy_pages_alloc()
2070 gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t min_gpa, u32 memslot) in vm_phy_page_alloc() argument
2072 return vm_phy_pages_alloc(vm, 1, min_gpa, memslot); in vm_phy_page_alloc()
/linux/tools/testing/selftests/kvm/include/
H A Dkvm_util.h167 memslot2region(struct kvm_vm *vm, u32 memslot);
993 gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t min_gpa, u32 memslot);
995 u32 memslot, bool protected);
999 gpa_t min_gpa, u32 memslot) in vm_phy_pages_alloc() argument
1006 return __vm_phy_pages_alloc(vm, num, min_gpa, memslot, in vm_phy_pages_alloc()
/linux/arch/loongarch/include/asm/
H A Dkvm_host.h355 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot);
/linux/arch/x86/include/asm/
H A Dkvm_host.h2112 const struct kvm_memory_slot *memslot,
2115 const struct kvm_memory_slot *memslot,
2118 const struct kvm_memory_slot *memslot,
2122 const struct kvm_memory_slot *memslot);
2124 const struct kvm_memory_slot *memslot);
/linux/arch/powerpc/include/asm/
H A Dkvm_ppc.h174 struct kvm_memory_slot *memslot, unsigned long porder);
217 struct kvm_memory_slot *memslot);
278 void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);

12