Lines Matching full:vm
30 static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
36 static uint64_t ptrs_per_pte(struct kvm_vm *vm)
55 static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level)
59 TEST_ASSERT(level < vm->mmu.pgtable_levels,
65 void virt_arch_pgd_alloc(struct kvm_vm *vm)
67 size_t nr_pages = vm_page_align(vm, ptrs_per_pte(vm) * 8) / vm->page_size;
69 if (vm->mmu.pgd_created)
72 vm->mmu.pgd = vm_phy_pages_alloc(vm, nr_pages,
74 vm->memslots[MEM_REGION_PT]);
75 vm->mmu.pgd_created = true;
78 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
81 int level = vm->mmu.pgtable_levels - 1;
83 TEST_ASSERT((vaddr % vm->page_size) == 0,
85 " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
86 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
87 (vaddr >> vm->page_shift)),
89 TEST_ASSERT((paddr % vm->page_size) == 0,
91 " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
92 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
94 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
95 paddr, vm->max_gfn, vm->page_size);
97 ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pte_index(vm, vaddr, level) * 8;
99 next_ppn = vm_alloc_page_table(vm) >> PGTBL_PAGE_SIZE_SHIFT;
106 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) +
107 pte_index(vm, vaddr, level) * 8;
109 next_ppn = vm_alloc_page_table(vm) >>
122 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
125 int level = vm->mmu.pgtable_levels - 1;
127 if (!vm->mmu.pgd_created)
130 ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pte_index(vm, gva, level) * 8;
136 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) +
137 pte_index(vm, gva, level) * 8;
143 return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
146 TEST_FAIL("No mapping for vm virtual address gva: 0x%lx level: %d",
151 static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent,
161 for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
162 ptep = addr_gpa2hva(vm, pte);
167 pte_dump(stream, vm, indent + 1,
168 pte_addr(vm, *ptep), level - 1);
173 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
175 struct kvm_mmu *mmu = &vm->mmu;
182 for (pgd = mmu->pgd; pgd < mmu->pgd + ptrs_per_pte(vm) * 8; pgd += 8) {
183 ptep = addr_gpa2hva(vm, pgd);
188 pte_dump(stream, vm, indent + 1,
189 pte_addr(vm, *ptep), level - 1);
195 struct kvm_vm *vm = vcpu->vm;
204 switch (vm->mode) {
221 TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
230 satp = (vm->mmu.pgd >> PGTBL_PAGE_SIZE_SHIFT) & SATP_PPN;
314 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
323 stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size :
324 vm->page_size;
325 stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
329 vcpu = __vm_vcpu_add(vm, vcpu_id);
450 void vm_init_vector_tables(struct kvm_vm *vm)
452 vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers),
453 vm->page_size, MEM_REGION_DATA);
455 *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
458 void vm_install_exception_handler(struct kvm_vm *vm, int vector, exception_handler_fn handler)
460 struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
466 void vm_install_interrupt_handler(struct kvm_vm *vm, exception_handler_fn handler)
468 struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
539 * supported vm mode.