xref: /linux/arch/riscv/include/asm/kvm_tlb.h (revision 4ecbd3eb5b1ba41db8f39d9cd4d20440e88482fa)
1*4ecbd3ebSAnup Patel /* SPDX-License-Identifier: GPL-2.0-only */
2*4ecbd3ebSAnup Patel /*
3*4ecbd3ebSAnup Patel  * Copyright (c) 2025 Ventana Micro Systems Inc.
4*4ecbd3ebSAnup Patel  */
5*4ecbd3ebSAnup Patel 
6*4ecbd3ebSAnup Patel #ifndef __RISCV_KVM_TLB_H_
7*4ecbd3ebSAnup Patel #define __RISCV_KVM_TLB_H_
8*4ecbd3ebSAnup Patel 
9*4ecbd3ebSAnup Patel #include <linux/kvm_types.h>
10*4ecbd3ebSAnup Patel 
11*4ecbd3ebSAnup Patel enum kvm_riscv_hfence_type {
12*4ecbd3ebSAnup Patel 	KVM_RISCV_HFENCE_UNKNOWN = 0,
13*4ecbd3ebSAnup Patel 	KVM_RISCV_HFENCE_GVMA_VMID_GPA,
14*4ecbd3ebSAnup Patel 	KVM_RISCV_HFENCE_VVMA_ASID_GVA,
15*4ecbd3ebSAnup Patel 	KVM_RISCV_HFENCE_VVMA_ASID_ALL,
16*4ecbd3ebSAnup Patel 	KVM_RISCV_HFENCE_VVMA_GVA,
17*4ecbd3ebSAnup Patel };
18*4ecbd3ebSAnup Patel 
19*4ecbd3ebSAnup Patel struct kvm_riscv_hfence {
20*4ecbd3ebSAnup Patel 	enum kvm_riscv_hfence_type type;
21*4ecbd3ebSAnup Patel 	unsigned long asid;
22*4ecbd3ebSAnup Patel 	unsigned long order;
23*4ecbd3ebSAnup Patel 	gpa_t addr;
24*4ecbd3ebSAnup Patel 	gpa_t size;
25*4ecbd3ebSAnup Patel };
26*4ecbd3ebSAnup Patel 
27*4ecbd3ebSAnup Patel #define KVM_RISCV_VCPU_MAX_HFENCE	64
28*4ecbd3ebSAnup Patel 
29*4ecbd3ebSAnup Patel #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER		12
30*4ecbd3ebSAnup Patel 
31*4ecbd3ebSAnup Patel void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
32*4ecbd3ebSAnup Patel 					  gpa_t gpa, gpa_t gpsz,
33*4ecbd3ebSAnup Patel 					  unsigned long order);
34*4ecbd3ebSAnup Patel void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid);
35*4ecbd3ebSAnup Patel void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
36*4ecbd3ebSAnup Patel 				     unsigned long order);
37*4ecbd3ebSAnup Patel void kvm_riscv_local_hfence_gvma_all(void);
38*4ecbd3ebSAnup Patel void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
39*4ecbd3ebSAnup Patel 					  unsigned long asid,
40*4ecbd3ebSAnup Patel 					  unsigned long gva,
41*4ecbd3ebSAnup Patel 					  unsigned long gvsz,
42*4ecbd3ebSAnup Patel 					  unsigned long order);
43*4ecbd3ebSAnup Patel void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
44*4ecbd3ebSAnup Patel 					  unsigned long asid);
45*4ecbd3ebSAnup Patel void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
46*4ecbd3ebSAnup Patel 				     unsigned long gva, unsigned long gvsz,
47*4ecbd3ebSAnup Patel 				     unsigned long order);
48*4ecbd3ebSAnup Patel void kvm_riscv_local_hfence_vvma_all(unsigned long vmid);
49*4ecbd3ebSAnup Patel 
50*4ecbd3ebSAnup Patel void kvm_riscv_tlb_flush_process(struct kvm_vcpu *vcpu);
51*4ecbd3ebSAnup Patel 
52*4ecbd3ebSAnup Patel void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu);
53*4ecbd3ebSAnup Patel void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu);
54*4ecbd3ebSAnup Patel void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu);
55*4ecbd3ebSAnup Patel 
56*4ecbd3ebSAnup Patel void kvm_riscv_fence_i(struct kvm *kvm,
57*4ecbd3ebSAnup Patel 		       unsigned long hbase, unsigned long hmask);
58*4ecbd3ebSAnup Patel void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
59*4ecbd3ebSAnup Patel 				    unsigned long hbase, unsigned long hmask,
60*4ecbd3ebSAnup Patel 				    gpa_t gpa, gpa_t gpsz,
61*4ecbd3ebSAnup Patel 				    unsigned long order);
62*4ecbd3ebSAnup Patel void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
63*4ecbd3ebSAnup Patel 				    unsigned long hbase, unsigned long hmask);
64*4ecbd3ebSAnup Patel void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
65*4ecbd3ebSAnup Patel 				    unsigned long hbase, unsigned long hmask,
66*4ecbd3ebSAnup Patel 				    unsigned long gva, unsigned long gvsz,
67*4ecbd3ebSAnup Patel 				    unsigned long order, unsigned long asid);
68*4ecbd3ebSAnup Patel void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
69*4ecbd3ebSAnup Patel 				    unsigned long hbase, unsigned long hmask,
70*4ecbd3ebSAnup Patel 				    unsigned long asid);
71*4ecbd3ebSAnup Patel void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
72*4ecbd3ebSAnup Patel 			       unsigned long hbase, unsigned long hmask,
73*4ecbd3ebSAnup Patel 			       unsigned long gva, unsigned long gvsz,
74*4ecbd3ebSAnup Patel 			       unsigned long order);
75*4ecbd3ebSAnup Patel void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
76*4ecbd3ebSAnup Patel 			       unsigned long hbase, unsigned long hmask);
77*4ecbd3ebSAnup Patel 
78*4ecbd3ebSAnup Patel #endif
79