Home
last modified time | relevance | path

Searched refs:gva (Results 1 – 25 of 39) sorted by relevance

12

/linux/tools/testing/selftests/kvm/lib/s390/
H A Dprocessor.c50 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa) in virt_arch_pg_map() argument
55 TEST_ASSERT((gva % vm->page_size) == 0, in virt_arch_pg_map()
58 gva, vm->page_size); in virt_arch_pg_map()
60 (gva >> vm->page_shift)), in virt_arch_pg_map()
62 gva); in virt_arch_pg_map()
66 gva, vm->page_size); in virt_arch_pg_map()
70 gva, vm->max_gfn, vm->page_size); in virt_arch_pg_map()
75 idx = (gva >> (64 - 11 * ri)) & 0x7ffu; in virt_arch_pg_map()
82 idx = (gva >> PAGE_SHIFT) & 0x0ffu; /* page index */ in virt_arch_pg_map()
89 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) in addr_arch_gva2gpa() argument
[all …]
/linux/arch/riscv/kvm/
H A Dtlb.c82 unsigned long gva, in kvm_riscv_local_hfence_vvma_asid_gva() argument
97 for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) in kvm_riscv_local_hfence_vvma_asid_gva()
102 for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) in kvm_riscv_local_hfence_vvma_asid_gva()
123 unsigned long gva, unsigned long gvsz, in kvm_riscv_local_hfence_vvma_gva() argument
137 for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) in kvm_riscv_local_hfence_vvma_gva()
142 for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) in kvm_riscv_local_hfence_vvma_gva()
402 unsigned long gva, unsigned long gvsz, in kvm_riscv_hfence_vvma_asid_gva() argument
411 data.addr = gva; in kvm_riscv_hfence_vvma_asid_gva()
433 unsigned long gva, unsigned long gvsz, in kvm_riscv_hfence_vvma_gva() argument
441 data.addr = gva; in kvm_riscv_hfence_vvma_gva()
/linux/tools/testing/selftests/kvm/lib/arm64/
H A Dprocessor.c29 static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva) in pgd_index() argument
34 return (gva >> shift) & mask; in pgd_index()
37 static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva) in pud_index() argument
45 return (gva >> shift) & mask; in pud_index()
48 static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva) in pmd_index() argument
56 return (gva >> shift) & mask; in pmd_index()
59 static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva) in pte_index() argument
62 return (gva >> vm->page_shift) & mask; in pte_index()
189 uint64_t *virt_get_pte_hva_at_level(struct kvm_vm *vm, vm_vaddr_t gva, int level) in virt_get_pte_hva_at_level() argument
196 ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8; in virt_get_pte_hva_at_level()
[all …]
/linux/arch/riscv/include/asm/
H A Dkvm_tlb.h43 unsigned long gva,
49 unsigned long gva, unsigned long gvsz,
71 unsigned long gva, unsigned long gvsz,
79 unsigned long gva, unsigned long gvsz,
/linux/tools/testing/selftests/kvm/lib/loongarch/
H A Dprocessor.c17 static uint64_t virt_pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level) in virt_pte_index() argument
24 return (gva >> shift) & mask; in virt_pte_index()
74 static uint64_t *virt_populate_pte(struct kvm_vm *vm, vm_vaddr_t gva, int alloc) in virt_populate_pte() argument
86 ptep = addr_gpa2hva(vm, child) + virt_pte_index(vm, gva, level) * 8; in virt_populate_pte()
100 ptep = addr_gpa2hva(vm, child) + virt_pte_index(vm, gva, level) * 8; in virt_populate_pte()
104 TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva); in virt_populate_pte()
108 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) in addr_arch_gva2gpa() argument
112 ptep = virt_populate_pte(vm, gva, 0); in addr_arch_gva2gpa()
113 TEST_ASSERT(*ptep != 0, "Virtual address vaddr: 0x%lx not mapped\n", gva); in addr_arch_gva2gpa()
115 return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1)); in addr_arch_gva2gpa()
/linux/tools/testing/selftests/kvm/lib/
H A Dmemstress.c52 uint64_t gva; in memstress_guest_code() local
60 gva = vcpu_args->gva; in memstress_guest_code()
76 addr = gva + (page * args->guest_page_size); in memstress_guest_code()
104 vcpu_args->gva = guest_test_virt_mem + in memstress_setup_vcpus()
110 vcpu_args->gva = guest_test_virt_mem; in memstress_setup_vcpus()
/linux/arch/x86/kvm/
H A Dx86.h349 gva_t gva, gfn_t gfn, unsigned access) in vcpu_cache_mmio_info() argument
360 vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK; in vcpu_cache_mmio_info()
377 static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) in vcpu_clear_mmio_info() argument
379 if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) in vcpu_clear_mmio_info()
385 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) in vcpu_match_mmio_gva() argument
388 vcpu->arch.mmio_gva == (gva & PAGE_MASK)) in vcpu_match_mmio_gva()
459 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
642 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
H A Dtrace.h956 TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
957 TP_ARGS(gva, gpa, write, gpa_match),
960 __field(gva_t, gva)
967 __entry->gva = gva;
973 TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
/linux/tools/testing/selftests/kvm/x86/
H A Dsev_smoke_test.c81 vm_vaddr_t gva; in test_sync_vmsa() local
88 gva = vm_vaddr_alloc_shared(vm, PAGE_SIZE, KVM_UTIL_MIN_VADDR, in test_sync_vmsa()
90 hva = addr_gva2hva(vm, gva); in test_sync_vmsa()
92 vcpu_args_set(vcpu, 1, gva); in test_sync_vmsa()
H A Dcpuid_test.c146 vm_vaddr_t gva = vm_vaddr_alloc(vm, size, KVM_UTIL_MIN_VADDR); in vcpu_alloc_cpuid() local
147 struct kvm_cpuid2 *guest_cpuids = addr_gva2hva(vm, gva); in vcpu_alloc_cpuid()
151 *p_gva = gva; in vcpu_alloc_cpuid()
H A Dhyperv_tlb_flush.c584 vm_vaddr_t test_data_page, gva; in main() local
620 gva = vm_vaddr_unused_gap(vm, NTEST_PAGES * PAGE_SIZE, KVM_UTIL_MIN_VADDR); in main()
624 virt_pg_map(vm, gva + PAGE_SIZE * i, gpa & PAGE_MASK); in main()
625 data->test_pages_pte[i] = gva + (gpa & ~PAGE_MASK); in main()
/linux/tools/testing/selftests/kvm/
H A Dpre_fault_memory_test.c164 uint64_t gpa, gva, alignment, guest_page_size; in __test_pre_fault_memory() local
184 gva = gpa & ((1ULL << (vm->va_bits - 1)) - 1); in __test_pre_fault_memory()
188 virt_map(vm, gva, gpa, TEST_NPAGES); in __test_pre_fault_memory()
197 vcpu_args_set(vcpu, 1, gva); in __test_pre_fault_memory()
H A Daccess_tracking_perf_test.c126 static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva) in lookup_pfn()
128 uint64_t hva = (uint64_t) addr_gva2hva(vm, gva); in lookup_pfn()
177 uint64_t base_gva = vcpu_args->gva; in pageidle_mark_vcpu_memory_idle()
196 uint64_t gva = base_gva + page * memstress_args.guest_page_size; in pageidle_mark_vcpu_memory_idle()
197 uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva); in pageidle_mark_vcpu_memory_idle()
125 lookup_pfn(int pagemap_fd,struct kvm_vm * vm,uint64_t gva) lookup_pfn() argument
195 uint64_t gva = base_gva + page * memstress_args.guest_page_size; pageidle_mark_vcpu_memory_idle() local
/linux/arch/s390/kvm/
H A Dgaccess.c310 static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva, u8 ar, in trans_exc_ending() argument
360 teid->addr = gva >> PAGE_SHIFT; in trans_exc_ending()
380 static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva, u8 ar, in trans_exc() argument
383 return trans_exc_ending(vcpu, code, gva, ar, mode, prot, false); in trans_exc()
446 static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, in guest_translate() argument
450 union vaddress vaddr = {.addr = gva}; in guest_translate()
451 union raddress raddr = {.addr = gva}; in guest_translate()
1106 int guest_translate_address_with_key(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar, in guest_translate_address_with_key() argument
1113 gva = kvm_s390_logical_to_effective(vcpu, gva); in guest_translate_address_with_key()
1114 rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode); in guest_translate_address_with_key()
[all …]
H A Dgaccess.h189 int guest_translate_address_with_key(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
193 int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
/linux/arch/mips/kvm/
H A Dtlb.c166 int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva, in kvm_vz_guest_tlb_lookup() argument
184 write_gc0_entryhi((o_entryhi & 0x3ff) | (gva & ~0xfffl)); in kvm_vz_guest_tlb_lookup()
226 pa = entrylo[!!(gva & pagemaskbit)]; in kvm_vz_guest_tlb_lookup()
240 pa |= gva & ~(pagemask | pagemaskbit); in kvm_vz_guest_tlb_lookup()
H A Dvz.c197 static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva) in kvm_vz_gva_to_gpa_cb() argument
200 return gva; in kvm_vz_gva_to_gpa_cb()
689 static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, in kvm_vz_gva_to_gpa() argument
692 u32 gva32 = gva; in kvm_vz_gva_to_gpa()
695 if ((long)gva == (s32)gva32) { in kvm_vz_gva_to_gpa()
750 } else if ((gva & 0xc000000000000000) == 0x8000000000000000) { in kvm_vz_gva_to_gpa()
758 if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) { in kvm_vz_gva_to_gpa()
772 *gpa = gva & 0x07ffffffffffffff; in kvm_vz_gva_to_gpa()
778 return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa); in kvm_vz_gva_to_gpa()
/linux/tools/testing/selftests/kvm/lib/riscv/
H A Dprocessor.c59 static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level) in pte_index() argument
66 return (gva & pte_index_mask[level]) >> pte_index_shift[level]; in pte_index()
126 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) in addr_arch_gva2gpa() argument
134 ptep = addr_gpa2hva(vm, vm->pgd) + pte_index(vm, gva, level) * 8; in addr_arch_gva2gpa()
141 pte_index(vm, gva, level) * 8; in addr_arch_gva2gpa()
147 return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1)); in addr_arch_gva2gpa()
151 gva, level); in addr_arch_gva2gpa()
/linux/tools/testing/selftests/kvm/arm64/
H A Dsea_to_user.c203 run->arm_sea.gva, run->arm_sea.gpa); in run_vm()
216 TEST_ASSERT_EQ(run->arm_sea.gva, EINJ_GVA); in run_vm()
/linux/tools/testing/selftests/kvm/include/
H A Dmemstress.h24 uint64_t gva; member
H A Dkvm_util.h711 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
1226 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
1228 static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) in addr_gva2gpa() argument
1230 return addr_arch_gva2gpa(vm, gva); in addr_gva2gpa()
/linux/tools/testing/selftests/kvm/include/arm64/
H A Dprocessor.h178 uint64_t *virt_get_pte_hva_at_level(struct kvm_vm *vm, vm_vaddr_t gva, int level);
179 uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva);
/linux/arch/x86/include/asm/
H A Dkvm_host.h1938 gva_t (*get_untagged_addr)(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
2270 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
2272 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
2274 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
2300 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
2303 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
/linux/arch/arm64/kvm/
H A Dnested.c22 u64 gva; member
934 va_start = vt->gva & ~(va_size - 1); in invalidate_vncr_va()
1287 vt->gva = va; in kvm_translate_vncr()
1324 if (read_vncr_el2(vcpu) != vt->gva) in kvm_vncr_tlb_lookup()
1440 if (read_vncr_el2(vcpu) != vt->gva) in kvm_map_l1_vncr()
/linux/arch/x86/kvm/vmx/
H A Dnested.c5369 gva_t gva; in nested_vmx_get_vmptr() local
5375 sizeof(*vmpointer), &gva)) { in nested_vmx_get_vmptr()
5380 r = kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e); in nested_vmx_get_vmptr()
5664 gva_t gva = 0; in handle_vmread() local
5725 instr_info, true, len, &gva)) in handle_vmread()
5728 r = kvm_write_guest_virt_system(vcpu, gva, &value, len, &e); in handle_vmread()
5770 gva_t gva; in handle_vmwrite() local
5799 instr_info, false, len, &gva)) in handle_vmwrite()
5801 r = kvm_read_guest_virt(vcpu, gva, &value, len, &e); in handle_vmwrite()
5957 gva_t gva; in handle_vmptrst() local
[all …]

12