xref: /linux/arch/riscv/include/asm/kvm_nacl.h (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (c) 2024 Ventana Micro Systems Inc.
4  */
5 
6 #ifndef __KVM_NACL_H
7 #define __KVM_NACL_H
8 
9 #include <linux/jump_label.h>
10 #include <linux/percpu.h>
11 #include <asm/byteorder.h>
12 #include <asm/csr.h>
13 #include <asm/sbi.h>
14 
15 struct kvm_vcpu_arch;
16 
17 DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_available);
18 #define kvm_riscv_nacl_available() \
19 	static_branch_unlikely(&kvm_riscv_nacl_available)
20 
21 DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_sync_csr_available);
22 #define kvm_riscv_nacl_sync_csr_available() \
23 	static_branch_unlikely(&kvm_riscv_nacl_sync_csr_available)
24 
25 DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_sync_hfence_available);
26 #define kvm_riscv_nacl_sync_hfence_available() \
27 	static_branch_unlikely(&kvm_riscv_nacl_sync_hfence_available)
28 
29 DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_sync_sret_available);
30 #define kvm_riscv_nacl_sync_sret_available() \
31 	static_branch_unlikely(&kvm_riscv_nacl_sync_sret_available)
32 
33 DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_autoswap_csr_available);
34 #define kvm_riscv_nacl_autoswap_csr_available() \
35 	static_branch_unlikely(&kvm_riscv_nacl_autoswap_csr_available)
36 
37 struct kvm_riscv_nacl {
38 	void *shmem;
39 	phys_addr_t shmem_phys;
40 };
41 DECLARE_PER_CPU(struct kvm_riscv_nacl, kvm_riscv_nacl);
42 
43 void __kvm_riscv_nacl_hfence(void *shmem,
44 			     unsigned long control,
45 			     unsigned long page_num,
46 			     unsigned long page_count);
47 
48 void __kvm_riscv_nacl_switch_to(struct kvm_vcpu_arch *vcpu_arch,
49 				unsigned long sbi_ext_id,
50 				unsigned long sbi_func_id);
51 
52 int kvm_riscv_nacl_enable(void);
53 
54 void kvm_riscv_nacl_disable(void);
55 
56 void kvm_riscv_nacl_exit(void);
57 
58 int kvm_riscv_nacl_init(void);
59 
60 #ifdef CONFIG_32BIT
61 #define lelong_to_cpu(__x)	le32_to_cpu(__x)
62 #define cpu_to_lelong(__x)	cpu_to_le32(__x)
63 #else
64 #define lelong_to_cpu(__x)	le64_to_cpu(__x)
65 #define cpu_to_lelong(__x)	cpu_to_le64(__x)
66 #endif
67 
68 #define nacl_shmem()							\
69 	this_cpu_ptr(&kvm_riscv_nacl)->shmem
70 
71 #define nacl_scratch_read_long(__shmem, __offset)			\
72 ({									\
73 	unsigned long *__p = (__shmem) +				\
74 			     SBI_NACL_SHMEM_SCRATCH_OFFSET +		\
75 			     (__offset);				\
76 	lelong_to_cpu(*__p);						\
77 })
78 
79 #define nacl_scratch_write_long(__shmem, __offset, __val)		\
80 do {									\
81 	unsigned long *__p = (__shmem) +				\
82 			     SBI_NACL_SHMEM_SCRATCH_OFFSET +		\
83 			     (__offset);				\
84 	*__p = cpu_to_lelong(__val);					\
85 } while (0)
86 
87 #define nacl_scratch_write_longs(__shmem, __offset, __array, __count)	\
88 do {									\
89 	unsigned int __i;						\
90 	unsigned long *__p = (__shmem) +				\
91 			     SBI_NACL_SHMEM_SCRATCH_OFFSET +		\
92 			     (__offset);				\
93 	for (__i = 0; __i < (__count); __i++)				\
94 		__p[__i] = cpu_to_lelong((__array)[__i]);		\
95 } while (0)
96 
97 #define nacl_sync_hfence(__e)						\
98 	sbi_ecall(SBI_EXT_NACL, SBI_EXT_NACL_SYNC_HFENCE,		\
99 		  (__e), 0, 0, 0, 0, 0)
100 
101 #define nacl_hfence_mkconfig(__type, __order, __vmid, __asid)		\
102 ({									\
103 	unsigned long __c = SBI_NACL_SHMEM_HFENCE_CONFIG_PEND;		\
104 	__c |= ((__type) & SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_MASK)	\
105 		<< SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_SHIFT;		\
106 	__c |= (((__order) - SBI_NACL_SHMEM_HFENCE_ORDER_BASE) &	\
107 		SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_MASK)		\
108 		<< SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_SHIFT;		\
109 	__c |= ((__vmid) & SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_MASK)	\
110 		<< SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_SHIFT;		\
111 	__c |= ((__asid) & SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_MASK);	\
112 	__c;								\
113 })
114 
115 #define nacl_hfence_mkpnum(__order, __addr)				\
116 	((__addr) >> (__order))
117 
118 #define nacl_hfence_mkpcount(__order, __size)				\
119 	((__size) >> (__order))
120 
121 #define nacl_hfence_gvma(__shmem, __gpa, __gpsz, __order)		\
122 __kvm_riscv_nacl_hfence(__shmem,					\
123 	nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_GVMA,		\
124 			   __order, 0, 0),				\
125 	nacl_hfence_mkpnum(__order, __gpa),				\
126 	nacl_hfence_mkpcount(__order, __gpsz))
127 
128 #define nacl_hfence_gvma_all(__shmem)					\
129 __kvm_riscv_nacl_hfence(__shmem,					\
130 	nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_ALL,	\
131 			   0, 0, 0), 0, 0)
132 
133 #define nacl_hfence_gvma_vmid(__shmem, __vmid, __gpa, __gpsz, __order)	\
134 __kvm_riscv_nacl_hfence(__shmem,					\
135 	nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_VMID,	\
136 			   __order, __vmid, 0),				\
137 	nacl_hfence_mkpnum(__order, __gpa),				\
138 	nacl_hfence_mkpcount(__order, __gpsz))
139 
140 #define nacl_hfence_gvma_vmid_all(__shmem, __vmid)			\
141 __kvm_riscv_nacl_hfence(__shmem,					\
142 	nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_VMID_ALL,	\
143 			   0, __vmid, 0), 0, 0)
144 
145 #define nacl_hfence_vvma(__shmem, __vmid, __gva, __gvsz, __order)	\
146 __kvm_riscv_nacl_hfence(__shmem,					\
147 	nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_VVMA,		\
148 			   __order, __vmid, 0),				\
149 	nacl_hfence_mkpnum(__order, __gva),				\
150 	nacl_hfence_mkpcount(__order, __gvsz))
151 
152 #define nacl_hfence_vvma_all(__shmem, __vmid)				\
153 __kvm_riscv_nacl_hfence(__shmem,					\
154 	nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ALL,	\
155 			   0, __vmid, 0), 0, 0)
156 
157 #define nacl_hfence_vvma_asid(__shmem, __vmid, __asid, __gva, __gvsz, __order)\
158 __kvm_riscv_nacl_hfence(__shmem,					\
159 	nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ASID,	\
160 			   __order, __vmid, __asid),			\
161 	nacl_hfence_mkpnum(__order, __gva),				\
162 	nacl_hfence_mkpcount(__order, __gvsz))
163 
164 #define nacl_hfence_vvma_asid_all(__shmem, __vmid, __asid)		\
165 __kvm_riscv_nacl_hfence(__shmem,					\
166 	nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ASID_ALL,	\
167 			   0, __vmid, __asid), 0, 0)
168 
169 #define nacl_csr_read(__shmem, __csr)					\
170 ({									\
171 	unsigned long *__a = (__shmem) + SBI_NACL_SHMEM_CSR_OFFSET;	\
172 	lelong_to_cpu(__a[SBI_NACL_SHMEM_CSR_INDEX(__csr)]);		\
173 })
174 
175 #define nacl_csr_write(__shmem, __csr, __val)				\
176 do {									\
177 	void *__s = (__shmem);						\
178 	unsigned int __i = SBI_NACL_SHMEM_CSR_INDEX(__csr);		\
179 	unsigned long *__a = (__s) + SBI_NACL_SHMEM_CSR_OFFSET;		\
180 	u8 *__b = (__s) + SBI_NACL_SHMEM_DBITMAP_OFFSET;		\
181 	__a[__i] = cpu_to_lelong(__val);				\
182 	__b[__i >> 3] |= 1U << (__i & 0x7);				\
183 } while (0)
184 
185 #define nacl_csr_swap(__shmem, __csr, __val)				\
186 ({									\
187 	void *__s = (__shmem);						\
188 	unsigned int __i = SBI_NACL_SHMEM_CSR_INDEX(__csr);		\
189 	unsigned long *__a = (__s) + SBI_NACL_SHMEM_CSR_OFFSET;		\
190 	u8 *__b = (__s) + SBI_NACL_SHMEM_DBITMAP_OFFSET;		\
191 	unsigned long __r = lelong_to_cpu(__a[__i]);			\
192 	__a[__i] = cpu_to_lelong(__val);				\
193 	__b[__i >> 3] |= 1U << (__i & 0x7);				\
194 	__r;								\
195 })
196 
197 #define nacl_sync_csr(__csr)						\
198 	sbi_ecall(SBI_EXT_NACL, SBI_EXT_NACL_SYNC_CSR,			\
199 		  (__csr), 0, 0, 0, 0, 0)
200 
201 /*
202  * Each ncsr_xyz() macro defined below has it's own static-branch so every
203  * use of ncsr_xyz() macro emits a patchable direct jump. This means multiple
204  * back-to-back ncsr_xyz() macro usage will emit multiple patchable direct
205  * jumps which is sub-optimal.
206  *
207  * Based on the above, it is recommended to avoid multiple back-to-back
208  * ncsr_xyz() macro usage.
209  */
210 
211 #define ncsr_read(__csr)						\
212 ({									\
213 	unsigned long __r;						\
214 	if (kvm_riscv_nacl_available())					\
215 		__r = nacl_csr_read(nacl_shmem(), __csr);		\
216 	else								\
217 		__r = csr_read(__csr);					\
218 	__r;								\
219 })
220 
221 #define ncsr_write(__csr, __val)					\
222 do {									\
223 	if (kvm_riscv_nacl_sync_csr_available())			\
224 		nacl_csr_write(nacl_shmem(), __csr, __val);		\
225 	else								\
226 		csr_write(__csr, __val);				\
227 } while (0)
228 
229 #define ncsr_swap(__csr, __val)						\
230 ({									\
231 	unsigned long __r;						\
232 	if (kvm_riscv_nacl_sync_csr_available())			\
233 		__r = nacl_csr_swap(nacl_shmem(), __csr, __val);	\
234 	else								\
235 		__r = csr_swap(__csr, __val);				\
236 	__r;								\
237 })
238 
239 #define nsync_csr(__csr)						\
240 do {									\
241 	if (kvm_riscv_nacl_sync_csr_available())			\
242 		nacl_sync_csr(__csr);					\
243 } while (0)
244 
245 #endif
246