| /linux/tools/testing/selftests/kvm/x86/ |
| H A D | private_mem_conversions_test.c | 30 #define memcmp_g(gpa, pattern, size) \ argument 32 uint8_t *mem = (uint8_t *)gpa; \ 38 pattern, i, gpa + i, mem[i]); \ 41 static void memcmp_h(uint8_t *mem, uint64_t gpa, uint8_t pattern, size_t size) in memcmp_h() argument 48 pattern, gpa + i, mem[i]); in memcmp_h() 73 static void guest_sync_shared(uint64_t gpa, uint64_t size, in guest_sync_shared() argument 76 GUEST_SYNC5(SYNC_SHARED, gpa, size, current_pattern, new_pattern); in guest_sync_shared() 79 static void guest_sync_private(uint64_t gpa, uint64_t size, uint8_t pattern) in guest_sync_private() argument 81 GUEST_SYNC4(SYNC_PRIVATE, gpa, size, pattern); in guest_sync_private() 89 static void guest_map_mem(uint64_t gpa, uint64_t size, bool map_shared, in guest_map_mem() argument [all …]
|
| /linux/tools/testing/selftests/kvm/ |
| H A D | mmu_stress_test.c | 25 uint64_t gpa; in guest_code() local 29 for (gpa = start_gpa; gpa < end_gpa; gpa += stride) in guest_code() 30 vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa); in guest_code() 34 for (gpa = start_gpa; gpa < end_gpa; gpa += stride) in guest_code() 35 *((volatile uint64_t *)gpa); in guest_code() 52 for (gpa = start_gpa; gpa < end_gpa; gpa += stride) in guest_code() 54 asm volatile(".byte 0x48,0x89,0x00" :: "a"(gpa) : "memory"); /* mov %rax, (%rax) */ in guest_code() 56 asm volatile("str %0, [%0]" :: "r" (gpa) : "memory"); in guest_code() 58 vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa); in guest_code() 70 for (gpa = start_gpa; gpa < end_gpa; gpa += stride) in guest_code() [all …]
|
| H A D | pre_fault_memory_test.c | 36 u64 gpa; member 58 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, data->gpa, in delete_slot_worker() 68 .gpa = base_gpa + offset, in pre_fault_memory() 74 .gpa = base_gpa, in pre_fault_memory() 164 uint64_t gpa, gva, alignment, guest_page_size; in __test_pre_fault_memory() local 177 gpa = (vm->max_gfn - TEST_NPAGES) * guest_page_size; in __test_pre_fault_memory() 183 gpa = align_down(gpa, alignment); in __test_pre_fault_memory() 184 gva = gpa & ((1ULL << (vm->va_bits - 1)) - 1); in __test_pre_fault_memory() 186 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, gpa, TEST_SLOT, in __test_pre_fault_memory() 188 virt_map(vm, gva, gpa, TEST_NPAGES); in __test_pre_fault_memory() [all …]
|
| H A D | memslot_perf_test.c | 189 static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages) in vm_gpa2hva() 196 TEST_ASSERT(gpa >= MEM_GPA, "Too low gpa to translate"); in vm_gpa2hva() 197 TEST_ASSERT(gpa < MEM_GPA + data->npages * guest_page_size, in vm_gpa2hva() 198 "Too high gpa to translate"); in vm_gpa2hva() 199 gpa -= MEM_GPA; in vm_gpa2hva() 201 gpage = gpa / guest_page_size; in vm_gpa2hva() 202 pgoffs = gpa % guest_page_size; in vm_gpa2hva() 215 "Asking for remaining pages in slot but gpa not page aligned"); in vm_gpa2hva() 335 uint64_t gpa; in prepare_vm() 188 vm_gpa2hva(struct vm_data * data,uint64_t gpa,uint64_t * rempages) vm_gpa2hva() argument 334 uint64_t gpa; prepare_vm() local 640 uint64_t gpa, ctr; test_memslot_do_unmap() local 665 uint64_t gpa; test_memslot_map_unmap_check() local [all...] |
| H A D | memslot_modification_stress_test.c | 61 uint64_t gpa; in add_remove_memslot() 68 gpa = memstress_args.gpa - pages * vm->page_size; in add_remove_memslot() 72 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, gpa, in add_remove_memslot() 60 uint64_t gpa; add_remove_memslot() local
|
| /linux/virt/kvm/ |
| H A D | pfncache.c | 60 static bool kvm_gpc_is_valid_len(gpa_t gpa, unsigned long uhva, in kvm_gpc_is_valid_len() argument 63 unsigned long offset = kvm_is_error_gpa(gpa) ? offset_in_page(uhva) : in kvm_gpc_is_valid_len() 64 offset_in_page(gpa); in kvm_gpc_is_valid_len() 84 if (!kvm_is_error_gpa(gpc->gpa) && gpc->generation != slots->generation) in kvm_gpc_check() 90 if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len)) in kvm_gpc_check() 166 .gfn = gpa_to_gfn(gpc->gpa), in hva_to_pfn_retry() 177 * Invalidate the cache prior to dropping gpc->lock, the gpa=>uhva in hva_to_pfn_retry() 179 * different task may not fail the gpa/uhva/generation checks. in hva_to_pfn_retry() 256 static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva) in __kvm_gpc_refresh() 266 /* Either gpa o in __kvm_gpc_refresh() 248 __kvm_gpc_refresh(struct gfn_to_pfn_cache * gpc,gpa_t gpa,unsigned long uhva) __kvm_gpc_refresh() argument 389 __kvm_gpc_activate(struct gfn_to_pfn_cache * gpc,gpa_t gpa,unsigned long uhva,unsigned long len) __kvm_gpc_activate() argument 419 kvm_gpc_activate(struct gfn_to_pfn_cache * gpc,gpa_t gpa,unsigned long len) kvm_gpc_activate() argument [all...] |
| /linux/arch/s390/kvm/ |
| H A D | gaccess.h | 152 unsigned long gpa = gra + kvm_s390_get_prefix(vcpu); in write_guest_lc() local 154 return kvm_write_guest(vcpu->kvm, gpa, data, len); in write_guest_lc() 178 unsigned long gpa = gra + kvm_s390_get_prefix(vcpu); in read_guest_lc() local 180 return kvm_read_guest(vcpu->kvm, gpa, data, len); in read_guest_lc() 190 unsigned long *gpa, enum gacc_mode mode, 196 int check_gpa_range(struct kvm *kvm, unsigned long gpa, unsigned long length, 199 int access_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, void *data, 209 int cmpxchg_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, int len, __uint128_t *old, 371 int write_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data, in write_guest_abs() argument 374 return kvm_write_guest(vcpu->kvm, gpa, data, len); in write_guest_abs() [all …]
|
| H A D | vsie.c | 692 static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa) in pin_guest_page() argument 696 page = gfn_to_page(kvm, gpa_to_gfn(gpa)); in pin_guest_page() 699 *hpa = (hpa_t)page_to_phys(page) + (gpa & ~PAGE_MASK); in pin_guest_page() 704 static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa) in unpin_guest_page() argument 708 mark_page_dirty(kvm, gpa_to_gfn(gpa)); in unpin_guest_page() 773 gpa_t gpa; in pin_blocks() local 776 gpa = READ_ONCE(scb_o->scaol) & ~0xfUL; in pin_blocks() 778 gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32; in pin_blocks() 779 if (gpa) { in pin_blocks() 780 if (gpa < 2 * PAGE_SIZE) in pin_blocks() [all …]
|
| H A D | gaccess.c | 420 static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val) in deref_table() argument 422 return kvm_read_guest(kvm, gpa, val, sizeof(*val)); in deref_table() 447 unsigned long *gpa, const union asce asce, in guest_translate() argument 606 *gpa = raddr.addr; in guest_translate() 630 enum gacc_mode mode, gpa_t gpa) in vm_check_access_key() argument 640 hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); in vm_check_access_key() 693 enum gacc_mode mode, union asce asce, gpa_t gpa, in vcpu_check_access_key() argument 707 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gpa)); in vcpu_check_access_key() 776 unsigned long gpa; in guest_range_to_gpas() local 786 rc = guest_translate(vcpu, ga, &gpa, asce, mode, &prot); in guest_range_to_gpas() [all …]
|
| /linux/tools/testing/selftests/kvm/lib/ |
| H A D | memstress.c | 108 vcpu_args->gpa = args->gpa + (i * vcpu_memory_bytes); in memstress_setup_vcpus() 113 vcpu_args->gpa = args->gpa; in memstress_setup_vcpus() 119 i, vcpu_args->gpa, vcpu_args->gpa + in memstress_setup_vcpus() 197 args->gpa = (region_end_gfn - guest_num_pages - 1) * args->guest_page_size; in memstress_create_vm() 198 args->gpa = align_down(args->gpa, backing_src_pagesz); in memstress_create_vm() 201 args->gpa = align_down(args->gpa, 1 << 20); in memstress_create_vm() 205 args->gpa, args->gpa + args->size); in memstress_create_vm() 210 vm_paddr_t region_start = args->gpa + region_pages * args->guest_page_size * i; in memstress_create_vm() 218 virt_map(vm, guest_test_virt_mem, args->gpa, guest_num_pages); in memstress_create_vm()
|
| H A D | kvm_util.c | 889 uint64_t gpa, uint64_t size, void *hva) in __vm_set_user_memory_region() argument 894 .guest_phys_addr = gpa, in __vm_set_user_memory_region() 903 uint64_t gpa, uint64_t size, void *hva) in vm_set_user_memory_region() argument 905 int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva); in vm_set_user_memory_region() 916 uint64_t gpa, uint64_t size, void *hva, in __vm_set_user_memory_region2() argument 922 .guest_phys_addr = gpa, in __vm_set_user_memory_region2() 935 uint64_t gpa, uint64_t size, void *hva, in vm_set_user_memory_region2() argument 938 int ret = __vm_set_user_memory_region2(vm, slot, flags, gpa, size, hva, in vm_set_user_memory_region2() 948 uint64_t gpa, uint32_t slot, uint64_t npages, uint32_t flags, in vm_mem_add() argument 963 TEST_ASSERT((gpa % vm->page_size) == 0, "Guest physical " in vm_mem_add() [all …]
|
| /linux/arch/x86/include/asm/uv/ |
| H A D | uv_hub.h | 461 uv_gpa_in_mmr_space(unsigned long gpa) in uv_gpa_in_mmr_space() argument 463 return (gpa >> 62) == 0x3UL; in uv_gpa_in_mmr_space() 467 static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa) in uv_gpa_to_soc_phys_ram() argument 475 gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) | in uv_gpa_to_soc_phys_ram() 476 ((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val); in uv_gpa_to_soc_phys_ram() 478 paddr = gpa & uv_hub_info->gpa_mask; in uv_gpa_to_soc_phys_ram() 485 static inline unsigned long uv_gpa_to_gnode(unsigned long gpa) in uv_gpa_to_gnode() argument 490 return gpa >> n_lshift; in uv_gpa_to_gnode() 492 return uv_gam_range(gpa)->nasid >> 1; in uv_gpa_to_gnode() 496 static inline int uv_gpa_to_pnode(unsigned long gpa) in uv_gpa_to_pnode() argument [all …]
|
| /linux/arch/powerpc/kvm/ |
| H A D | book3s_hv_uvmem.c | 234 unsigned long gpa; member 516 struct kvm *kvm, unsigned long gpa, struct page *fault_page) in __kvmppc_svm_page_out() argument 536 if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL)) in __kvmppc_svm_page_out() 569 gpa, 0, page_shift); in __kvmppc_svm_page_out() 589 struct kvm *kvm, unsigned long gpa, in kvmppc_svm_page_out() argument 595 ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa, in kvmppc_svm_page_out() 645 PAGE_SHIFT, kvm, pvt->gpa, NULL)) in kvmppc_uvmem_drop_pages() 647 pvt->gpa, addr); in kvmppc_uvmem_drop_pages() 695 static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm) in kvmppc_uvmem_get_page() argument 719 kvmppc_gfn_secure_uvmem_pfn(gpa >> PAGE_SHIFT, uvmem_pfn, kvm); in kvmppc_uvmem_get_page() [all …]
|
| /linux/arch/x86/kvm/mmu/ |
| H A D | page_track.h | 30 void __kvm_page_track_write(struct kvm *kvm, gpa_t gpa, const u8 *new, int bytes); 41 static inline void __kvm_page_track_write(struct kvm *kvm, gpa_t gpa, in __kvm_page_track_write() argument 50 static inline void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, in kvm_page_track_write() argument 53 __kvm_page_track_write(vcpu->kvm, gpa, new, bytes); in kvm_page_track_write() 55 kvm_mmu_track_write(vcpu, gpa, new, bytes); in kvm_page_track_write()
|
| /linux/arch/x86/kvm/vmx/ |
| H A D | tdx.c | 1218 u64 gpa = tdx->map_gpa_next; in __tdx_map_gpa() local 1233 tdx->vcpu.run->hypercall.args[0] = gpa & ~gfn_to_gpa(kvm_gfn_direct_bits(tdx->vcpu.kvm)); in __tdx_map_gpa() 1235 tdx->vcpu.run->hypercall.args[2] = vt_is_tdx_private_gpa(tdx->vcpu.kvm, gpa) ? in __tdx_map_gpa() 1246 u64 gpa = tdx->vp_enter_args.r12; in tdx_map_gpa() local 1263 if (gpa + size <= gpa || !kvm_vcpu_is_legal_gpa(vcpu, gpa) || in tdx_map_gpa() 1264 !kvm_vcpu_is_legal_gpa(vcpu, gpa + size - 1) || in tdx_map_gpa() 1265 (vt_is_tdx_private_gpa(vcpu->kvm, gpa) != in tdx_map_gpa() 1266 vt_is_tdx_private_gpa(vcpu->kvm, gpa + size - 1))) { in tdx_map_gpa() 1271 if (!PAGE_ALIGNED(gpa) || !PAGE_ALIGNED(size)) { in tdx_map_gpa() 1276 tdx->map_gpa_end = gpa + size; in tdx_map_gpa() [all …]
|
| H A D | common.h | 77 static inline bool vt_is_tdx_private_gpa(struct kvm *kvm, gpa_t gpa) in vt_is_tdx_private_gpa() argument 80 return !kvm_is_addr_direct(kvm, gpa); in vt_is_tdx_private_gpa() 83 static inline int __vmx_handle_ept_violation(struct kvm_vcpu *vcpu, gpa_t gpa, in __vmx_handle_ept_violation() argument 105 if (vt_is_tdx_private_gpa(vcpu->kvm, gpa)) in __vmx_handle_ept_violation() 108 return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); in __vmx_handle_ept_violation()
|
| /linux/arch/riscv/kvm/ |
| H A D | tlb.c | 24 gpa_t gpa, gpa_t gpsz, in kvm_riscv_local_hfence_gvma_vmid_gpa() argument 36 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_vmid_gpa() 41 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_vmid_gpa() 52 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz, in kvm_riscv_local_hfence_gvma_gpa() argument 64 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_gpa() 69 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) in kvm_riscv_local_hfence_gvma_gpa() 373 gpa_t gpa, gpa_t gpsz, in kvm_riscv_hfence_gvma_vmid_gpa() argument 381 data.addr = gpa; in kvm_riscv_hfence_gvma_vmid_gpa()
|
| H A D | mmu.c | 38 int kvm_riscv_mmu_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa, in kvm_riscv_mmu_ioremap() argument 57 end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK; in kvm_riscv_mmu_ioremap() 61 for (addr = gpa; addr < end; addr += PAGE_SIZE) { in kvm_riscv_mmu_ioremap() 88 void kvm_riscv_mmu_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size) in kvm_riscv_mmu_iounmap() argument 98 kvm_riscv_gstage_unmap_range(&gstage, gpa, size, false); in kvm_riscv_mmu_iounmap() 140 gpa_t gpa = slot->base_gfn << PAGE_SHIFT; in kvm_arch_flush_shadow_memslot() local 150 kvm_riscv_gstage_unmap_range(&gstage, gpa, size, false); in kvm_arch_flush_shadow_memslot() 309 gpa_t gpa, unsigned long hva, bool is_write, in kvm_riscv_mmu_map() argument 316 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_riscv_mmu_map() 359 gfn = (gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; in kvm_riscv_mmu_map() [all …]
|
| /linux/arch/loongarch/kvm/ |
| H A D | tlb.c | 24 void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa) in kvm_flush_tlb_gpa() argument 27 gpa &= (PAGE_MASK << 1); in kvm_flush_tlb_gpa() 28 invtlb(INVTLB_GID_ADDR, read_csr_gstat() & CSR_GSTAT_GID, gpa); in kvm_flush_tlb_gpa()
|
| H A D | mmu.c | 528 gpa_t gpa = range->start << PAGE_SHIFT; in kvm_test_age_gfn() local 529 kvm_pte_t *ptep = kvm_populate_gpa(kvm, NULL, gpa, 0); in kvm_test_age_gfn() 552 static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) in kvm_map_page_fast() argument 556 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_map_page_fast() 563 ptep = kvm_populate_gpa(kvm, NULL, gpa, 0); in kvm_map_page_fast() 772 static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) in kvm_map_page() argument 779 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_map_page() 787 err = kvm_map_page_fast(vcpu, gpa, write); in kvm_map_page() 875 ptep = kvm_populate_gpa(kvm, NULL, gpa, 0); in kvm_map_page() 887 ptep = kvm_populate_gpa(kvm, memcache, gpa, level); in kvm_map_page() [all …]
|
| /linux/arch/x86/kvm/ |
| H A D | cpuid.h | 69 static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) in kvm_vcpu_is_legal_gpa() argument 71 return !(gpa & vcpu->arch.reserved_gpa_bits); in kvm_vcpu_is_legal_gpa() 75 gpa_t gpa, gpa_t alignment) in kvm_vcpu_is_legal_aligned_gpa() argument 77 return IS_ALIGNED(gpa, alignment) && kvm_vcpu_is_legal_gpa(vcpu, gpa); in kvm_vcpu_is_legal_aligned_gpa() 80 static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa) in page_address_valid() argument 82 return kvm_vcpu_is_legal_aligned_gpa(vcpu, gpa, PAGE_SIZE); in page_address_valid()
|
| H A D | mmu.h | 103 void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, 291 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access, 296 gpa_t gpa, u64 access, in kvm_translate_gpa() argument 300 return gpa; in kvm_translate_gpa() 301 return translate_nested_gpa(vcpu, gpa, access, exception); in kvm_translate_gpa() 314 static inline bool kvm_is_addr_direct(struct kvm *kvm, gpa_t gpa) in kvm_is_addr_direct() argument 318 return !gpa_direct_bits || (gpa & gpa_direct_bits); in kvm_is_addr_direct()
|
| /linux/arch/riscv/include/asm/ |
| H A D | kvm_mmu.h | 11 int kvm_riscv_mmu_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa, 13 void kvm_riscv_mmu_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size); 15 gpa_t gpa, unsigned long hva, bool is_write,
|
| /linux/tools/testing/selftests/kvm/include/ |
| H A D | kvm_util.h | 397 static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa, in vm_set_memory_attributes() argument 402 .address = gpa, in vm_set_memory_attributes() 418 static inline void vm_mem_set_private(struct kvm_vm *vm, uint64_t gpa, in vm_mem_set_private() argument 421 vm_set_memory_attributes(vm, gpa, size, KVM_MEMORY_ATTRIBUTE_PRIVATE); in vm_mem_set_private() 424 static inline void vm_mem_set_shared(struct kvm_vm *vm, uint64_t gpa, in vm_mem_set_shared() argument 427 vm_set_memory_attributes(vm, gpa, size, 0); in vm_mem_set_shared() 430 void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t gpa, uint64_t size, 433 static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, uint64_t gpa, in vm_guest_mem_punch_hole() argument 436 vm_guest_mem_fallocate(vm, gpa, size, true); in vm_guest_mem_punch_hole() 439 static inline void vm_guest_mem_allocate(struct kvm_vm *vm, uint64_t gpa, in vm_guest_mem_allocate() argument [all …]
|
| /linux/include/linux/ |
| H A D | kvm_host.h | 153 static inline bool kvm_is_error_gpa(gpa_t gpa) in kvm_is_error_gpa() argument 155 return gpa == INVALID_GPA; in kvm_is_error_gpa() 321 gpa_t gpa; member 1318 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); 1326 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 1334 gpa_t gpa, unsigned long len); 1347 #define kvm_get_guest(kvm, gpa, v) \ argument 1349 gpa_t __gpa = gpa; \ 1369 #define kvm_put_guest(kvm, gpa, v) \ argument 1371 gpa_t __gpa = gpa; \ [all …]
|