Lines Matching refs:vm
159 void virt_arch_pgd_alloc(struct kvm_vm *vm) in virt_arch_pgd_alloc() argument
161 TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K, in virt_arch_pgd_alloc()
162 "Unknown or unsupported guest mode: 0x%x", vm->mode); in virt_arch_pgd_alloc()
165 if (!vm->pgd_created) { in virt_arch_pgd_alloc()
166 vm->pgd = vm_alloc_page_table(vm); in virt_arch_pgd_alloc()
167 vm->pgd_created = true; in virt_arch_pgd_alloc()
171 static void *virt_get_pte(struct kvm_vm *vm, uint64_t *parent_pte, in virt_get_pte() argument
175 uint64_t *page_table = addr_gpa2hva(vm, pt_gpa); in virt_get_pte()
178 TEST_ASSERT((*parent_pte & PTE_PRESENT_MASK) || parent_pte == &vm->pgd, in virt_get_pte()
185 static uint64_t *virt_create_upper_pte(struct kvm_vm *vm, in virt_create_upper_pte() argument
192 uint64_t *pte = virt_get_pte(vm, parent_pte, vaddr, current_level); in virt_create_upper_pte()
194 paddr = vm_untag_gpa(vm, paddr); in virt_create_upper_pte()
201 *pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK; in virt_create_upper_pte()
218 void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level) in __virt_pg_map() argument
221 uint64_t *pte = &vm->pgd; in __virt_pg_map()
224 TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K, in __virt_pg_map()
225 "Unknown or unsupported guest mode: 0x%x", vm->mode); in __virt_pg_map()
230 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (vaddr >> vm->page_shift)), in __virt_pg_map()
235 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, in __virt_pg_map()
238 paddr, vm->max_gfn, vm->page_size); in __virt_pg_map()
239 TEST_ASSERT(vm_untag_gpa(vm, paddr) == paddr, in __virt_pg_map()
246 for (current_level = vm->pgtable_levels; in __virt_pg_map()
249 pte = virt_create_upper_pte(vm, pte, vaddr, paddr, in __virt_pg_map()
256 pte = virt_get_pte(vm, pte, vaddr, PG_LEVEL_4K); in __virt_pg_map()
265 if (vm_is_gpa_protected(vm, paddr)) in __virt_pg_map()
266 *pte |= vm->arch.c_bit; in __virt_pg_map()
268 *pte |= vm->arch.s_bit; in __virt_pg_map()
271 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) in virt_arch_pg_map() argument
273 __virt_pg_map(vm, vaddr, paddr, PG_LEVEL_4K); in virt_arch_pg_map()
276 void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, in virt_map_level() argument
288 __virt_pg_map(vm, vaddr, paddr, level); in virt_map_level()
289 sparsebit_set_num(vm->vpages_mapped, vaddr >> vm->page_shift, in virt_map_level()
309 uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr, in __vm_get_page_table_entry() argument
312 int va_width = 12 + (vm->pgtable_levels) * 9; in __vm_get_page_table_entry()
313 uint64_t *pte = &vm->pgd; in __vm_get_page_table_entry()
316 TEST_ASSERT(!vm->arch.is_pt_protected, in __vm_get_page_table_entry()
319 TEST_ASSERT(*level >= PG_LEVEL_NONE && *level <= vm->pgtable_levels, in __vm_get_page_table_entry()
322 TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K, in __vm_get_page_table_entry()
323 "Unknown or unsupported guest mode: 0x%x", vm->mode); in __vm_get_page_table_entry()
324 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, in __vm_get_page_table_entry()
325 (vaddr >> vm->page_shift)), in __vm_get_page_table_entry()
335 for (current_level = vm->pgtable_levels; in __vm_get_page_table_entry()
338 pte = virt_get_pte(vm, pte, vaddr, current_level); in __vm_get_page_table_entry()
343 return virt_get_pte(vm, pte, vaddr, PG_LEVEL_4K); in __vm_get_page_table_entry()
346 uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr) in vm_get_page_table_entry() argument
350 return __vm_get_page_table_entry(vm, vaddr, &level); in vm_get_page_table_entry()
353 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) in virt_arch_dump() argument
360 if (!vm->pgd_created) in virt_arch_dump()
368 pml4e_start = (uint64_t *) addr_gpa2hva(vm, vm->pgd); in virt_arch_dump()
377 addr_hva2gpa(vm, pml4e), PTE_GET_PFN(*pml4e), in virt_arch_dump()
380 pdpe_start = addr_gpa2hva(vm, *pml4e & PHYSICAL_PAGE_MASK); in virt_arch_dump()
389 addr_hva2gpa(vm, pdpe), in virt_arch_dump()
393 pde_start = addr_gpa2hva(vm, *pdpe & PHYSICAL_PAGE_MASK); in virt_arch_dump()
401 addr_hva2gpa(vm, pde), in virt_arch_dump()
405 pte_start = addr_gpa2hva(vm, *pde & PHYSICAL_PAGE_MASK); in virt_arch_dump()
415 addr_hva2gpa(vm, pte), in virt_arch_dump()
448 static void kvm_seg_fill_gdt_64bit(struct kvm_vm *vm, struct kvm_segment *segp) in kvm_seg_fill_gdt_64bit() argument
450 void *gdt = addr_gva2hva(vm, vm->arch.gdt); in kvm_seg_fill_gdt_64bit()
497 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) in addr_arch_gva2gpa() argument
500 uint64_t *pte = __vm_get_page_table_entry(vm, gva, &level); in addr_arch_gva2gpa()
509 return vm_untag_gpa(vm, PTE_GET_PA(*pte)) | (gva & ~HUGEPAGE_MASK(level)); in addr_arch_gva2gpa()
522 static void vcpu_init_sregs(struct kvm_vm *vm, struct kvm_vcpu *vcpu) in vcpu_init_sregs() argument
526 TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K, in vcpu_init_sregs()
527 "Unknown or unsupported guest mode: 0x%x", vm->mode); in vcpu_init_sregs()
532 sregs.idt.base = vm->arch.idt; in vcpu_init_sregs()
534 sregs.gdt.base = vm->arch.gdt; in vcpu_init_sregs()
541 if (vm->pgtable_levels == 5) in vcpu_init_sregs()
550 kvm_seg_set_tss_64bit(vm->arch.tss, &sregs.tr); in vcpu_init_sregs()
552 sregs.cr3 = vm->pgd; in vcpu_init_sregs()
556 static void vcpu_init_xcrs(struct kvm_vm *vm, struct kvm_vcpu *vcpu) in vcpu_init_xcrs() argument
570 static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr, in set_idt_entry() argument
574 (struct idt_entry *)addr_gva2hva(vm, vm->arch.idt); in set_idt_entry()
619 static void vm_init_descriptor_tables(struct kvm_vm *vm) in vm_init_descriptor_tables() argument
625 vm->arch.gdt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); in vm_init_descriptor_tables()
626 vm->arch.idt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); in vm_init_descriptor_tables()
627 vm->handlers = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); in vm_init_descriptor_tables()
628 vm->arch.tss = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); in vm_init_descriptor_tables()
632 set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0, KERNEL_CS); in vm_init_descriptor_tables()
634 *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; in vm_init_descriptor_tables()
637 kvm_seg_fill_gdt_64bit(vm, &seg); in vm_init_descriptor_tables()
640 kvm_seg_fill_gdt_64bit(vm, &seg); in vm_init_descriptor_tables()
642 kvm_seg_set_tss_64bit(vm->arch.tss, &seg); in vm_init_descriptor_tables()
643 kvm_seg_fill_gdt_64bit(vm, &seg); in vm_init_descriptor_tables()
646 void vm_install_exception_handler(struct kvm_vm *vm, int vector, in vm_install_exception_handler() argument
649 vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers); in vm_install_exception_handler()
662 void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus) in kvm_arch_vm_post_create() argument
669 vm_create_irqchip(vm); in kvm_arch_vm_post_create()
670 vm_init_descriptor_tables(vm); in kvm_arch_vm_post_create()
672 sync_global_to_guest(vm, host_cpu_is_intel); in kvm_arch_vm_post_create()
673 sync_global_to_guest(vm, host_cpu_is_amd); in kvm_arch_vm_post_create()
674 sync_global_to_guest(vm, is_forced_emulation_enabled); in kvm_arch_vm_post_create()
675 sync_global_to_guest(vm, pmu_errata_mask); in kvm_arch_vm_post_create()
677 if (is_sev_vm(vm)) { in kvm_arch_vm_post_create()
680 vm_sev_ioctl(vm, KVM_SEV_INIT2, &init); in kvm_arch_vm_post_create()
683 r = __vm_ioctl(vm, KVM_GET_TSC_KHZ, NULL); in kvm_arch_vm_post_create()
686 sync_global_to_guest(vm, guest_tsc_khz); in kvm_arch_vm_post_create()
698 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) in vm_arch_vcpu_add() argument
705 stack_vaddr = __vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(), in vm_arch_vcpu_add()
724 vcpu = __vm_vcpu_add(vm, vcpu_id); in vm_arch_vcpu_add()
726 vcpu_init_sregs(vm, vcpu); in vm_arch_vcpu_add()
727 vcpu_init_xcrs(vm, vcpu); in vm_arch_vcpu_add()
749 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id) in vm_arch_vcpu_recreate() argument
751 struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id); in vm_arch_vcpu_recreate()
1089 int size = vm_check_cap(vcpu->vm, KVM_CAP_XSAVE2); in vcpu_save_xsave_state()
1192 void kvm_init_vm_address_properties(struct kvm_vm *vm) in kvm_init_vm_address_properties() argument
1194 if (is_sev_vm(vm)) { in kvm_init_vm_address_properties()
1195 vm->arch.sev_fd = open_sev_dev_path_or_exit(); in kvm_init_vm_address_properties()
1196 vm->arch.c_bit = BIT_ULL(this_cpu_property(X86_PROPERTY_SEV_C_BIT)); in kvm_init_vm_address_properties()
1197 vm->gpa_tag_mask = vm->arch.c_bit; in kvm_init_vm_address_properties()
1199 vm->arch.sev_fd = -1; in kvm_init_vm_address_properties()
1251 unsigned long vm_compute_max_gfn(struct kvm_vm *vm) in vm_compute_max_gfn() argument
1253 const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */ in vm_compute_max_gfn()
1264 guest_maxphyaddr = guest_maxphyaddr ?: vm->pa_bits; in vm_compute_max_gfn()
1265 TEST_ASSERT(guest_maxphyaddr <= vm->pa_bits, in vm_compute_max_gfn()
1268 max_gfn = (1ULL << (guest_maxphyaddr - vm->page_shift)) - 1; in vm_compute_max_gfn()
1275 if (vm->pa_bits < 40) in vm_compute_max_gfn()
1292 max_pfn = (1ULL << (maxphyaddr - vm->page_shift)) - 1; in vm_compute_max_gfn()