Lines Matching refs:kvm
19 static void mmu_wp_memory_region(struct kvm *kvm, int slot) in mmu_wp_memory_region() argument
21 struct kvm_memslots *slots = kvm_memslots(kvm); in mmu_wp_memory_region()
27 gstage.kvm = kvm; in mmu_wp_memory_region()
29 gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid); in mmu_wp_memory_region()
30 gstage.pgd = kvm->arch.pgd; in mmu_wp_memory_region()
32 spin_lock(&kvm->mmu_lock); in mmu_wp_memory_region()
34 spin_unlock(&kvm->mmu_lock); in mmu_wp_memory_region()
35 kvm_flush_remote_tlbs_memslot(kvm, memslot); in mmu_wp_memory_region()
38 int kvm_riscv_mmu_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa, in kvm_riscv_mmu_ioremap() argument
52 gstage.kvm = kvm; in kvm_riscv_mmu_ioremap()
54 gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid); in kvm_riscv_mmu_ioremap()
55 gstage.pgd = kvm->arch.pgd; in kvm_riscv_mmu_ioremap()
74 spin_lock(&kvm->mmu_lock); in kvm_riscv_mmu_ioremap()
76 spin_unlock(&kvm->mmu_lock); in kvm_riscv_mmu_ioremap()
88 void kvm_riscv_mmu_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size) in kvm_riscv_mmu_iounmap() argument
92 gstage.kvm = kvm; in kvm_riscv_mmu_iounmap()
94 gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid); in kvm_riscv_mmu_iounmap()
95 gstage.pgd = kvm->arch.pgd; in kvm_riscv_mmu_iounmap()
97 spin_lock(&kvm->mmu_lock); in kvm_riscv_mmu_iounmap()
99 spin_unlock(&kvm->mmu_lock); in kvm_riscv_mmu_iounmap()
102 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, in kvm_arch_mmu_enable_log_dirty_pt_masked() argument
112 gstage.kvm = kvm; in kvm_arch_mmu_enable_log_dirty_pt_masked()
114 gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid); in kvm_arch_mmu_enable_log_dirty_pt_masked()
115 gstage.pgd = kvm->arch.pgd; in kvm_arch_mmu_enable_log_dirty_pt_masked()
120 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) in kvm_arch_sync_dirty_log() argument
124 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free) in kvm_arch_free_memslot() argument
128 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) in kvm_arch_memslots_updated() argument
132 void kvm_arch_flush_shadow_all(struct kvm *kvm) in kvm_arch_flush_shadow_all() argument
134 kvm_riscv_mmu_free_pgd(kvm); in kvm_arch_flush_shadow_all()
137 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, in kvm_arch_flush_shadow_memslot() argument
144 gstage.kvm = kvm; in kvm_arch_flush_shadow_memslot()
146 gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid); in kvm_arch_flush_shadow_memslot()
147 gstage.pgd = kvm->arch.pgd; in kvm_arch_flush_shadow_memslot()
149 spin_lock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
151 spin_unlock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
154 void kvm_arch_commit_memory_region(struct kvm *kvm, in kvm_arch_commit_memory_region() argument
165 mmu_wp_memory_region(kvm, new->id); in kvm_arch_commit_memory_region()
168 int kvm_arch_prepare_memory_region(struct kvm *kvm, in kvm_arch_prepare_memory_region() argument
244 ret = kvm_riscv_mmu_ioremap(kvm, gpa, pa, vm_end - vm_start, in kvm_arch_prepare_memory_region()
256 kvm_riscv_mmu_iounmap(kvm, base_gpa, size); in kvm_arch_prepare_memory_region()
263 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_unmap_gfn_range() argument
267 if (!kvm->arch.pgd) in kvm_unmap_gfn_range()
270 gstage.kvm = kvm; in kvm_unmap_gfn_range()
272 gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid); in kvm_unmap_gfn_range()
273 gstage.pgd = kvm->arch.pgd; in kvm_unmap_gfn_range()
280 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_age_gfn() argument
287 if (!kvm->arch.pgd) in kvm_age_gfn()
292 gstage.kvm = kvm; in kvm_age_gfn()
294 gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid); in kvm_age_gfn()
295 gstage.pgd = kvm->arch.pgd; in kvm_age_gfn()
303 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_test_age_gfn() argument
310 if (!kvm->arch.pgd) in kvm_test_age_gfn()
315 gstage.kvm = kvm; in kvm_test_age_gfn()
317 gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid); in kvm_test_age_gfn()
318 gstage.pgd = kvm->arch.pgd; in kvm_test_age_gfn()
336 struct kvm *kvm = vcpu->kvm; in kvm_riscv_mmu_map() local
344 gstage.kvm = kvm; in kvm_riscv_mmu_map()
346 gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid); in kvm_riscv_mmu_map()
347 gstage.pgd = kvm->arch.pgd; in kvm_riscv_mmu_map()
387 mmu_seq = kvm->mmu_invalidate_seq; in kvm_riscv_mmu_map()
414 spin_lock(&kvm->mmu_lock); in kvm_riscv_mmu_map()
416 if (mmu_invalidate_retry(kvm, mmu_seq)) in kvm_riscv_mmu_map()
420 mark_page_dirty_in_slot(kvm, memslot, gfn); in kvm_riscv_mmu_map()
432 kvm_release_faultin_page(kvm, page, ret && ret != -EEXIST, writable); in kvm_riscv_mmu_map()
433 spin_unlock(&kvm->mmu_lock); in kvm_riscv_mmu_map()
437 int kvm_riscv_mmu_alloc_pgd(struct kvm *kvm) in kvm_riscv_mmu_alloc_pgd() argument
441 if (kvm->arch.pgd != NULL) { in kvm_riscv_mmu_alloc_pgd()
450 kvm->arch.pgd = page_to_virt(pgd_page); in kvm_riscv_mmu_alloc_pgd()
451 kvm->arch.pgd_phys = page_to_phys(pgd_page); in kvm_riscv_mmu_alloc_pgd()
456 void kvm_riscv_mmu_free_pgd(struct kvm *kvm) in kvm_riscv_mmu_free_pgd() argument
461 spin_lock(&kvm->mmu_lock); in kvm_riscv_mmu_free_pgd()
462 if (kvm->arch.pgd) { in kvm_riscv_mmu_free_pgd()
463 gstage.kvm = kvm; in kvm_riscv_mmu_free_pgd()
465 gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid); in kvm_riscv_mmu_free_pgd()
466 gstage.pgd = kvm->arch.pgd; in kvm_riscv_mmu_free_pgd()
468 pgd = READ_ONCE(kvm->arch.pgd); in kvm_riscv_mmu_free_pgd()
469 kvm->arch.pgd = NULL; in kvm_riscv_mmu_free_pgd()
470 kvm->arch.pgd_phys = 0; in kvm_riscv_mmu_free_pgd()
472 spin_unlock(&kvm->mmu_lock); in kvm_riscv_mmu_free_pgd()
481 struct kvm_arch *k = &vcpu->kvm->arch; in kvm_riscv_mmu_update_hgatp()