14ecbd3ebSAnup Patel /* SPDX-License-Identifier: GPL-2.0-only */ 24ecbd3ebSAnup Patel /* 34ecbd3ebSAnup Patel * Copyright (c) 2025 Ventana Micro Systems Inc. 44ecbd3ebSAnup Patel */ 54ecbd3ebSAnup Patel 64ecbd3ebSAnup Patel #ifndef __RISCV_KVM_MMU_H_ 74ecbd3ebSAnup Patel #define __RISCV_KVM_MMU_H_ 84ecbd3ebSAnup Patel 94ecbd3ebSAnup Patel #include <linux/kvm_types.h> 104ecbd3ebSAnup Patel 11*f035b44bSAnup Patel struct kvm_gstage_mapping { 12*f035b44bSAnup Patel gpa_t addr; 13*f035b44bSAnup Patel pte_t pte; 14*f035b44bSAnup Patel u32 level; 15*f035b44bSAnup Patel }; 16*f035b44bSAnup Patel 174ecbd3ebSAnup Patel int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa, 184ecbd3ebSAnup Patel phys_addr_t hpa, unsigned long size, 194ecbd3ebSAnup Patel bool writable, bool in_atomic); 204ecbd3ebSAnup Patel void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa, 214ecbd3ebSAnup Patel unsigned long size); 224ecbd3ebSAnup Patel int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, 234ecbd3ebSAnup Patel struct kvm_memory_slot *memslot, 24*f035b44bSAnup Patel gpa_t gpa, unsigned long hva, bool is_write, 25*f035b44bSAnup Patel struct kvm_gstage_mapping *out_map); 264ecbd3ebSAnup Patel int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm); 274ecbd3ebSAnup Patel void kvm_riscv_gstage_free_pgd(struct kvm *kvm); 284ecbd3ebSAnup Patel void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu); 294ecbd3ebSAnup Patel void kvm_riscv_gstage_mode_detect(void); 304ecbd3ebSAnup Patel unsigned long kvm_riscv_gstage_mode(void); 314ecbd3ebSAnup Patel int kvm_riscv_gstage_gpa_bits(void); 324ecbd3ebSAnup Patel 334ecbd3ebSAnup Patel #endif 34