xref: /linux/arch/riscv/include/asm/kvm_tlb.h (revision 63eb28bb1402891b1ad2be02a530f29a9dd7f1cd)
14ecbd3ebSAnup Patel /* SPDX-License-Identifier: GPL-2.0-only */
24ecbd3ebSAnup Patel /*
34ecbd3ebSAnup Patel  * Copyright (c) 2025 Ventana Micro Systems Inc.
44ecbd3ebSAnup Patel  */
54ecbd3ebSAnup Patel 
64ecbd3ebSAnup Patel #ifndef __RISCV_KVM_TLB_H_
74ecbd3ebSAnup Patel #define __RISCV_KVM_TLB_H_
84ecbd3ebSAnup Patel 
94ecbd3ebSAnup Patel #include <linux/kvm_types.h>
104ecbd3ebSAnup Patel 
114ecbd3ebSAnup Patel enum kvm_riscv_hfence_type {
124ecbd3ebSAnup Patel 	KVM_RISCV_HFENCE_UNKNOWN = 0,
134ecbd3ebSAnup Patel 	KVM_RISCV_HFENCE_GVMA_VMID_GPA,
14*1f6d0eeeSAnup Patel 	KVM_RISCV_HFENCE_GVMA_VMID_ALL,
154ecbd3ebSAnup Patel 	KVM_RISCV_HFENCE_VVMA_ASID_GVA,
164ecbd3ebSAnup Patel 	KVM_RISCV_HFENCE_VVMA_ASID_ALL,
174ecbd3ebSAnup Patel 	KVM_RISCV_HFENCE_VVMA_GVA,
18*1f6d0eeeSAnup Patel 	KVM_RISCV_HFENCE_VVMA_ALL
194ecbd3ebSAnup Patel };
204ecbd3ebSAnup Patel 
214ecbd3ebSAnup Patel struct kvm_riscv_hfence {
224ecbd3ebSAnup Patel 	enum kvm_riscv_hfence_type type;
234ecbd3ebSAnup Patel 	unsigned long asid;
244c933f3aSAnup Patel 	unsigned long vmid;
254ecbd3ebSAnup Patel 	unsigned long order;
264ecbd3ebSAnup Patel 	gpa_t addr;
274ecbd3ebSAnup Patel 	gpa_t size;
284ecbd3ebSAnup Patel };
294ecbd3ebSAnup Patel 
304ecbd3ebSAnup Patel #define KVM_RISCV_VCPU_MAX_HFENCE	64
314ecbd3ebSAnup Patel 
324ecbd3ebSAnup Patel #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER		12
334ecbd3ebSAnup Patel 
344ecbd3ebSAnup Patel void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
354ecbd3ebSAnup Patel 					  gpa_t gpa, gpa_t gpsz,
364ecbd3ebSAnup Patel 					  unsigned long order);
374ecbd3ebSAnup Patel void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid);
384ecbd3ebSAnup Patel void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
394ecbd3ebSAnup Patel 				     unsigned long order);
404ecbd3ebSAnup Patel void kvm_riscv_local_hfence_gvma_all(void);
414ecbd3ebSAnup Patel void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
424ecbd3ebSAnup Patel 					  unsigned long asid,
434ecbd3ebSAnup Patel 					  unsigned long gva,
444ecbd3ebSAnup Patel 					  unsigned long gvsz,
454ecbd3ebSAnup Patel 					  unsigned long order);
464ecbd3ebSAnup Patel void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
474ecbd3ebSAnup Patel 					  unsigned long asid);
484ecbd3ebSAnup Patel void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
494ecbd3ebSAnup Patel 				     unsigned long gva, unsigned long gvsz,
504ecbd3ebSAnup Patel 				     unsigned long order);
514ecbd3ebSAnup Patel void kvm_riscv_local_hfence_vvma_all(unsigned long vmid);
524ecbd3ebSAnup Patel 
534ecbd3ebSAnup Patel void kvm_riscv_tlb_flush_process(struct kvm_vcpu *vcpu);
544ecbd3ebSAnup Patel 
554ecbd3ebSAnup Patel void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu);
564ecbd3ebSAnup Patel void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu);
574ecbd3ebSAnup Patel void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu);
584ecbd3ebSAnup Patel 
594ecbd3ebSAnup Patel void kvm_riscv_fence_i(struct kvm *kvm,
604ecbd3ebSAnup Patel 		       unsigned long hbase, unsigned long hmask);
614ecbd3ebSAnup Patel void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
624ecbd3ebSAnup Patel 				    unsigned long hbase, unsigned long hmask,
634ecbd3ebSAnup Patel 				    gpa_t gpa, gpa_t gpsz,
64*1f6d0eeeSAnup Patel 				    unsigned long order, unsigned long vmid);
654ecbd3ebSAnup Patel void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
66*1f6d0eeeSAnup Patel 				    unsigned long hbase, unsigned long hmask,
67*1f6d0eeeSAnup Patel 				    unsigned long vmid);
684ecbd3ebSAnup Patel void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
694ecbd3ebSAnup Patel 				    unsigned long hbase, unsigned long hmask,
704ecbd3ebSAnup Patel 				    unsigned long gva, unsigned long gvsz,
71*1f6d0eeeSAnup Patel 				    unsigned long order, unsigned long asid,
72*1f6d0eeeSAnup Patel 				    unsigned long vmid);
734ecbd3ebSAnup Patel void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
744ecbd3ebSAnup Patel 				    unsigned long hbase, unsigned long hmask,
75*1f6d0eeeSAnup Patel 				    unsigned long asid, unsigned long vmid);
764ecbd3ebSAnup Patel void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
774ecbd3ebSAnup Patel 			       unsigned long hbase, unsigned long hmask,
784ecbd3ebSAnup Patel 			       unsigned long gva, unsigned long gvsz,
79*1f6d0eeeSAnup Patel 			       unsigned long order, unsigned long vmid);
804ecbd3ebSAnup Patel void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
81*1f6d0eeeSAnup Patel 			       unsigned long hbase, unsigned long hmask,
82*1f6d0eeeSAnup Patel 			       unsigned long vmid);
834ecbd3ebSAnup Patel 
844ecbd3ebSAnup Patel #endif
85