xref: /linux/arch/riscv/include/asm/kvm_tlb.h (revision 4c933f3a39ded0df1d727069dbe28bbb460b32ec)
14ecbd3ebSAnup Patel /* SPDX-License-Identifier: GPL-2.0-only */
24ecbd3ebSAnup Patel /*
34ecbd3ebSAnup Patel  * Copyright (c) 2025 Ventana Micro Systems Inc.
44ecbd3ebSAnup Patel  */
54ecbd3ebSAnup Patel 
64ecbd3ebSAnup Patel #ifndef __RISCV_KVM_TLB_H_
74ecbd3ebSAnup Patel #define __RISCV_KVM_TLB_H_
84ecbd3ebSAnup Patel 
94ecbd3ebSAnup Patel #include <linux/kvm_types.h>
104ecbd3ebSAnup Patel 
114ecbd3ebSAnup Patel enum kvm_riscv_hfence_type {
124ecbd3ebSAnup Patel 	KVM_RISCV_HFENCE_UNKNOWN = 0,
134ecbd3ebSAnup Patel 	KVM_RISCV_HFENCE_GVMA_VMID_GPA,
144ecbd3ebSAnup Patel 	KVM_RISCV_HFENCE_VVMA_ASID_GVA,
154ecbd3ebSAnup Patel 	KVM_RISCV_HFENCE_VVMA_ASID_ALL,
164ecbd3ebSAnup Patel 	KVM_RISCV_HFENCE_VVMA_GVA,
174ecbd3ebSAnup Patel };
184ecbd3ebSAnup Patel 
194ecbd3ebSAnup Patel struct kvm_riscv_hfence {
204ecbd3ebSAnup Patel 	enum kvm_riscv_hfence_type type;
214ecbd3ebSAnup Patel 	unsigned long asid;
22*4c933f3aSAnup Patel 	unsigned long vmid;
234ecbd3ebSAnup Patel 	unsigned long order;
244ecbd3ebSAnup Patel 	gpa_t addr;
254ecbd3ebSAnup Patel 	gpa_t size;
264ecbd3ebSAnup Patel };
274ecbd3ebSAnup Patel 
284ecbd3ebSAnup Patel #define KVM_RISCV_VCPU_MAX_HFENCE	64
294ecbd3ebSAnup Patel 
304ecbd3ebSAnup Patel #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER		12
314ecbd3ebSAnup Patel 
324ecbd3ebSAnup Patel void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
334ecbd3ebSAnup Patel 					  gpa_t gpa, gpa_t gpsz,
344ecbd3ebSAnup Patel 					  unsigned long order);
354ecbd3ebSAnup Patel void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid);
364ecbd3ebSAnup Patel void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
374ecbd3ebSAnup Patel 				     unsigned long order);
384ecbd3ebSAnup Patel void kvm_riscv_local_hfence_gvma_all(void);
394ecbd3ebSAnup Patel void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
404ecbd3ebSAnup Patel 					  unsigned long asid,
414ecbd3ebSAnup Patel 					  unsigned long gva,
424ecbd3ebSAnup Patel 					  unsigned long gvsz,
434ecbd3ebSAnup Patel 					  unsigned long order);
444ecbd3ebSAnup Patel void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
454ecbd3ebSAnup Patel 					  unsigned long asid);
464ecbd3ebSAnup Patel void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
474ecbd3ebSAnup Patel 				     unsigned long gva, unsigned long gvsz,
484ecbd3ebSAnup Patel 				     unsigned long order);
494ecbd3ebSAnup Patel void kvm_riscv_local_hfence_vvma_all(unsigned long vmid);
504ecbd3ebSAnup Patel 
514ecbd3ebSAnup Patel void kvm_riscv_tlb_flush_process(struct kvm_vcpu *vcpu);
524ecbd3ebSAnup Patel 
534ecbd3ebSAnup Patel void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu);
544ecbd3ebSAnup Patel void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu);
554ecbd3ebSAnup Patel void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu);
564ecbd3ebSAnup Patel 
574ecbd3ebSAnup Patel void kvm_riscv_fence_i(struct kvm *kvm,
584ecbd3ebSAnup Patel 		       unsigned long hbase, unsigned long hmask);
594ecbd3ebSAnup Patel void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
604ecbd3ebSAnup Patel 				    unsigned long hbase, unsigned long hmask,
614ecbd3ebSAnup Patel 				    gpa_t gpa, gpa_t gpsz,
624ecbd3ebSAnup Patel 				    unsigned long order);
634ecbd3ebSAnup Patel void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
644ecbd3ebSAnup Patel 				    unsigned long hbase, unsigned long hmask);
654ecbd3ebSAnup Patel void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
664ecbd3ebSAnup Patel 				    unsigned long hbase, unsigned long hmask,
674ecbd3ebSAnup Patel 				    unsigned long gva, unsigned long gvsz,
684ecbd3ebSAnup Patel 				    unsigned long order, unsigned long asid);
694ecbd3ebSAnup Patel void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
704ecbd3ebSAnup Patel 				    unsigned long hbase, unsigned long hmask,
714ecbd3ebSAnup Patel 				    unsigned long asid);
724ecbd3ebSAnup Patel void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
734ecbd3ebSAnup Patel 			       unsigned long hbase, unsigned long hmask,
744ecbd3ebSAnup Patel 			       unsigned long gva, unsigned long gvsz,
754ecbd3ebSAnup Patel 			       unsigned long order);
764ecbd3ebSAnup Patel void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
774ecbd3ebSAnup Patel 			       unsigned long hbase, unsigned long hmask);
784ecbd3ebSAnup Patel 
794ecbd3ebSAnup Patel #endif
80