xref: /linux/arch/riscv/include/asm/kvm_nacl.h (revision c771600c6af14749609b49565ffb4cac2959710d)
1d466c19cSAnup Patel /* SPDX-License-Identifier: GPL-2.0-only */
2d466c19cSAnup Patel /*
3d466c19cSAnup Patel  * Copyright (c) 2024 Ventana Micro Systems Inc.
4d466c19cSAnup Patel  */
5d466c19cSAnup Patel 
6d466c19cSAnup Patel #ifndef __KVM_NACL_H
7d466c19cSAnup Patel #define __KVM_NACL_H
8d466c19cSAnup Patel 
9d466c19cSAnup Patel #include <linux/jump_label.h>
10d466c19cSAnup Patel #include <linux/percpu.h>
11d466c19cSAnup Patel #include <asm/byteorder.h>
12d466c19cSAnup Patel #include <asm/csr.h>
13d466c19cSAnup Patel #include <asm/sbi.h>
14d466c19cSAnup Patel 
15*68c72a65SAnup Patel struct kvm_vcpu_arch;
16*68c72a65SAnup Patel 
17d466c19cSAnup Patel DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_available);
18d466c19cSAnup Patel #define kvm_riscv_nacl_available() \
19d466c19cSAnup Patel 	static_branch_unlikely(&kvm_riscv_nacl_available)
20d466c19cSAnup Patel 
21d466c19cSAnup Patel DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_sync_csr_available);
22d466c19cSAnup Patel #define kvm_riscv_nacl_sync_csr_available() \
23d466c19cSAnup Patel 	static_branch_unlikely(&kvm_riscv_nacl_sync_csr_available)
24d466c19cSAnup Patel 
25d466c19cSAnup Patel DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_sync_hfence_available);
26d466c19cSAnup Patel #define kvm_riscv_nacl_sync_hfence_available() \
27d466c19cSAnup Patel 	static_branch_unlikely(&kvm_riscv_nacl_sync_hfence_available)
28d466c19cSAnup Patel 
29d466c19cSAnup Patel DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_sync_sret_available);
30d466c19cSAnup Patel #define kvm_riscv_nacl_sync_sret_available() \
31d466c19cSAnup Patel 	static_branch_unlikely(&kvm_riscv_nacl_sync_sret_available)
32d466c19cSAnup Patel 
33d466c19cSAnup Patel DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_autoswap_csr_available);
34d466c19cSAnup Patel #define kvm_riscv_nacl_autoswap_csr_available() \
35d466c19cSAnup Patel 	static_branch_unlikely(&kvm_riscv_nacl_autoswap_csr_available)
36d466c19cSAnup Patel 
37d466c19cSAnup Patel struct kvm_riscv_nacl {
38d466c19cSAnup Patel 	void *shmem;
39d466c19cSAnup Patel 	phys_addr_t shmem_phys;
40d466c19cSAnup Patel };
41d466c19cSAnup Patel DECLARE_PER_CPU(struct kvm_riscv_nacl, kvm_riscv_nacl);
42d466c19cSAnup Patel 
43d466c19cSAnup Patel void __kvm_riscv_nacl_hfence(void *shmem,
44d466c19cSAnup Patel 			     unsigned long control,
45d466c19cSAnup Patel 			     unsigned long page_num,
46d466c19cSAnup Patel 			     unsigned long page_count);
47d466c19cSAnup Patel 
48*68c72a65SAnup Patel void __kvm_riscv_nacl_switch_to(struct kvm_vcpu_arch *vcpu_arch,
49*68c72a65SAnup Patel 				unsigned long sbi_ext_id,
50*68c72a65SAnup Patel 				unsigned long sbi_func_id);
51*68c72a65SAnup Patel 
52d466c19cSAnup Patel int kvm_riscv_nacl_enable(void);
53d466c19cSAnup Patel 
54d466c19cSAnup Patel void kvm_riscv_nacl_disable(void);
55d466c19cSAnup Patel 
56d466c19cSAnup Patel void kvm_riscv_nacl_exit(void);
57d466c19cSAnup Patel 
58d466c19cSAnup Patel int kvm_riscv_nacl_init(void);
59d466c19cSAnup Patel 
60d466c19cSAnup Patel #ifdef CONFIG_32BIT
61d466c19cSAnup Patel #define lelong_to_cpu(__x)	le32_to_cpu(__x)
62d466c19cSAnup Patel #define cpu_to_lelong(__x)	cpu_to_le32(__x)
63d466c19cSAnup Patel #else
64d466c19cSAnup Patel #define lelong_to_cpu(__x)	le64_to_cpu(__x)
65d466c19cSAnup Patel #define cpu_to_lelong(__x)	cpu_to_le64(__x)
66d466c19cSAnup Patel #endif
67d466c19cSAnup Patel 
68d466c19cSAnup Patel #define nacl_shmem()							\
69d466c19cSAnup Patel 	this_cpu_ptr(&kvm_riscv_nacl)->shmem
70d466c19cSAnup Patel 
71d466c19cSAnup Patel #define nacl_scratch_read_long(__shmem, __offset)			\
72d466c19cSAnup Patel ({									\
73d466c19cSAnup Patel 	unsigned long *__p = (__shmem) +				\
74d466c19cSAnup Patel 			     SBI_NACL_SHMEM_SCRATCH_OFFSET +		\
75d466c19cSAnup Patel 			     (__offset);				\
76d466c19cSAnup Patel 	lelong_to_cpu(*__p);						\
77d466c19cSAnup Patel })
78d466c19cSAnup Patel 
79d466c19cSAnup Patel #define nacl_scratch_write_long(__shmem, __offset, __val)		\
80d466c19cSAnup Patel do {									\
81d466c19cSAnup Patel 	unsigned long *__p = (__shmem) +				\
82d466c19cSAnup Patel 			     SBI_NACL_SHMEM_SCRATCH_OFFSET +		\
83d466c19cSAnup Patel 			     (__offset);				\
84d466c19cSAnup Patel 	*__p = cpu_to_lelong(__val);					\
85d466c19cSAnup Patel } while (0)
86d466c19cSAnup Patel 
87d466c19cSAnup Patel #define nacl_scratch_write_longs(__shmem, __offset, __array, __count)	\
88d466c19cSAnup Patel do {									\
89d466c19cSAnup Patel 	unsigned int __i;						\
90d466c19cSAnup Patel 	unsigned long *__p = (__shmem) +				\
91d466c19cSAnup Patel 			     SBI_NACL_SHMEM_SCRATCH_OFFSET +		\
92d466c19cSAnup Patel 			     (__offset);				\
93d466c19cSAnup Patel 	for (__i = 0; __i < (__count); __i++)				\
94d466c19cSAnup Patel 		__p[__i] = cpu_to_lelong((__array)[__i]);		\
95d466c19cSAnup Patel } while (0)
96d466c19cSAnup Patel 
97d466c19cSAnup Patel #define nacl_sync_hfence(__e)						\
98d466c19cSAnup Patel 	sbi_ecall(SBI_EXT_NACL, SBI_EXT_NACL_SYNC_HFENCE,		\
99d466c19cSAnup Patel 		  (__e), 0, 0, 0, 0, 0)
100d466c19cSAnup Patel 
101d466c19cSAnup Patel #define nacl_hfence_mkconfig(__type, __order, __vmid, __asid)		\
102d466c19cSAnup Patel ({									\
103d466c19cSAnup Patel 	unsigned long __c = SBI_NACL_SHMEM_HFENCE_CONFIG_PEND;		\
104d466c19cSAnup Patel 	__c |= ((__type) & SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_MASK)	\
105d466c19cSAnup Patel 		<< SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_SHIFT;		\
106d466c19cSAnup Patel 	__c |= (((__order) - SBI_NACL_SHMEM_HFENCE_ORDER_BASE) &	\
107d466c19cSAnup Patel 		SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_MASK)		\
108d466c19cSAnup Patel 		<< SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_SHIFT;		\
109d466c19cSAnup Patel 	__c |= ((__vmid) & SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_MASK)	\
110d466c19cSAnup Patel 		<< SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_SHIFT;		\
111d466c19cSAnup Patel 	__c |= ((__asid) & SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_MASK);	\
112d466c19cSAnup Patel 	__c;								\
113d466c19cSAnup Patel })
114d466c19cSAnup Patel 
115d466c19cSAnup Patel #define nacl_hfence_mkpnum(__order, __addr)				\
116d466c19cSAnup Patel 	((__addr) >> (__order))
117d466c19cSAnup Patel 
118d466c19cSAnup Patel #define nacl_hfence_mkpcount(__order, __size)				\
119d466c19cSAnup Patel 	((__size) >> (__order))
120d466c19cSAnup Patel 
121d466c19cSAnup Patel #define nacl_hfence_gvma(__shmem, __gpa, __gpsz, __order)		\
122d466c19cSAnup Patel __kvm_riscv_nacl_hfence(__shmem,					\
123d466c19cSAnup Patel 	nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_GVMA,		\
124d466c19cSAnup Patel 			   __order, 0, 0),				\
125d466c19cSAnup Patel 	nacl_hfence_mkpnum(__order, __gpa),				\
126d466c19cSAnup Patel 	nacl_hfence_mkpcount(__order, __gpsz))
127d466c19cSAnup Patel 
128d466c19cSAnup Patel #define nacl_hfence_gvma_all(__shmem)					\
129d466c19cSAnup Patel __kvm_riscv_nacl_hfence(__shmem,					\
130d466c19cSAnup Patel 	nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_ALL,	\
131d466c19cSAnup Patel 			   0, 0, 0), 0, 0)
132d466c19cSAnup Patel 
133d466c19cSAnup Patel #define nacl_hfence_gvma_vmid(__shmem, __vmid, __gpa, __gpsz, __order)	\
134d466c19cSAnup Patel __kvm_riscv_nacl_hfence(__shmem,					\
135d466c19cSAnup Patel 	nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_VMID,	\
136d466c19cSAnup Patel 			   __order, __vmid, 0),				\
137d466c19cSAnup Patel 	nacl_hfence_mkpnum(__order, __gpa),				\
138d466c19cSAnup Patel 	nacl_hfence_mkpcount(__order, __gpsz))
139d466c19cSAnup Patel 
140d466c19cSAnup Patel #define nacl_hfence_gvma_vmid_all(__shmem, __vmid)			\
141d466c19cSAnup Patel __kvm_riscv_nacl_hfence(__shmem,					\
142d466c19cSAnup Patel 	nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_VMID_ALL,	\
143d466c19cSAnup Patel 			   0, __vmid, 0), 0, 0)
144d466c19cSAnup Patel 
145d466c19cSAnup Patel #define nacl_hfence_vvma(__shmem, __vmid, __gva, __gvsz, __order)	\
146d466c19cSAnup Patel __kvm_riscv_nacl_hfence(__shmem,					\
147d466c19cSAnup Patel 	nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_VVMA,		\
148d466c19cSAnup Patel 			   __order, __vmid, 0),				\
149d466c19cSAnup Patel 	nacl_hfence_mkpnum(__order, __gva),				\
150d466c19cSAnup Patel 	nacl_hfence_mkpcount(__order, __gvsz))
151d466c19cSAnup Patel 
152d466c19cSAnup Patel #define nacl_hfence_vvma_all(__shmem, __vmid)				\
153d466c19cSAnup Patel __kvm_riscv_nacl_hfence(__shmem,					\
154d466c19cSAnup Patel 	nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ALL,	\
155d466c19cSAnup Patel 			   0, __vmid, 0), 0, 0)
156d466c19cSAnup Patel 
157d466c19cSAnup Patel #define nacl_hfence_vvma_asid(__shmem, __vmid, __asid, __gva, __gvsz, __order)\
158d466c19cSAnup Patel __kvm_riscv_nacl_hfence(__shmem,					\
159d466c19cSAnup Patel 	nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ASID,	\
160d466c19cSAnup Patel 			   __order, __vmid, __asid),			\
161d466c19cSAnup Patel 	nacl_hfence_mkpnum(__order, __gva),				\
162d466c19cSAnup Patel 	nacl_hfence_mkpcount(__order, __gvsz))
163d466c19cSAnup Patel 
164d466c19cSAnup Patel #define nacl_hfence_vvma_asid_all(__shmem, __vmid, __asid)		\
165d466c19cSAnup Patel __kvm_riscv_nacl_hfence(__shmem,					\
166d466c19cSAnup Patel 	nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ASID_ALL,	\
167d466c19cSAnup Patel 			   0, __vmid, __asid), 0, 0)
168d466c19cSAnup Patel 
169d466c19cSAnup Patel #define nacl_csr_read(__shmem, __csr)					\
170d466c19cSAnup Patel ({									\
171d466c19cSAnup Patel 	unsigned long *__a = (__shmem) + SBI_NACL_SHMEM_CSR_OFFSET;	\
172d466c19cSAnup Patel 	lelong_to_cpu(__a[SBI_NACL_SHMEM_CSR_INDEX(__csr)]);		\
173d466c19cSAnup Patel })
174d466c19cSAnup Patel 
175d466c19cSAnup Patel #define nacl_csr_write(__shmem, __csr, __val)				\
176d466c19cSAnup Patel do {									\
177d466c19cSAnup Patel 	void *__s = (__shmem);						\
178d466c19cSAnup Patel 	unsigned int __i = SBI_NACL_SHMEM_CSR_INDEX(__csr);		\
179d466c19cSAnup Patel 	unsigned long *__a = (__s) + SBI_NACL_SHMEM_CSR_OFFSET;		\
180d466c19cSAnup Patel 	u8 *__b = (__s) + SBI_NACL_SHMEM_DBITMAP_OFFSET;		\
181d466c19cSAnup Patel 	__a[__i] = cpu_to_lelong(__val);				\
182d466c19cSAnup Patel 	__b[__i >> 3] |= 1U << (__i & 0x7);				\
183d466c19cSAnup Patel } while (0)
184d466c19cSAnup Patel 
185d466c19cSAnup Patel #define nacl_csr_swap(__shmem, __csr, __val)				\
186d466c19cSAnup Patel ({									\
187d466c19cSAnup Patel 	void *__s = (__shmem);						\
188d466c19cSAnup Patel 	unsigned int __i = SBI_NACL_SHMEM_CSR_INDEX(__csr);		\
189d466c19cSAnup Patel 	unsigned long *__a = (__s) + SBI_NACL_SHMEM_CSR_OFFSET;		\
190d466c19cSAnup Patel 	u8 *__b = (__s) + SBI_NACL_SHMEM_DBITMAP_OFFSET;		\
191d466c19cSAnup Patel 	unsigned long __r = lelong_to_cpu(__a[__i]);			\
192d466c19cSAnup Patel 	__a[__i] = cpu_to_lelong(__val);				\
193d466c19cSAnup Patel 	__b[__i >> 3] |= 1U << (__i & 0x7);				\
194d466c19cSAnup Patel 	__r;								\
195d466c19cSAnup Patel })
196d466c19cSAnup Patel 
197d466c19cSAnup Patel #define nacl_sync_csr(__csr)						\
198d466c19cSAnup Patel 	sbi_ecall(SBI_EXT_NACL, SBI_EXT_NACL_SYNC_CSR,			\
199d466c19cSAnup Patel 		  (__csr), 0, 0, 0, 0, 0)
200d466c19cSAnup Patel 
201d466c19cSAnup Patel /*
202d466c19cSAnup Patel  * Each ncsr_xyz() macro defined below has it's own static-branch so every
203d466c19cSAnup Patel  * use of ncsr_xyz() macro emits a patchable direct jump. This means multiple
204d466c19cSAnup Patel  * back-to-back ncsr_xyz() macro usage will emit multiple patchable direct
205d466c19cSAnup Patel  * jumps which is sub-optimal.
206d466c19cSAnup Patel  *
207d466c19cSAnup Patel  * Based on the above, it is recommended to avoid multiple back-to-back
208d466c19cSAnup Patel  * ncsr_xyz() macro usage.
209d466c19cSAnup Patel  */
210d466c19cSAnup Patel 
211d466c19cSAnup Patel #define ncsr_read(__csr)						\
212d466c19cSAnup Patel ({									\
213d466c19cSAnup Patel 	unsigned long __r;						\
214d466c19cSAnup Patel 	if (kvm_riscv_nacl_available())					\
215d466c19cSAnup Patel 		__r = nacl_csr_read(nacl_shmem(), __csr);		\
216d466c19cSAnup Patel 	else								\
217d466c19cSAnup Patel 		__r = csr_read(__csr);					\
218d466c19cSAnup Patel 	__r;								\
219d466c19cSAnup Patel })
220d466c19cSAnup Patel 
221d466c19cSAnup Patel #define ncsr_write(__csr, __val)					\
222d466c19cSAnup Patel do {									\
223d466c19cSAnup Patel 	if (kvm_riscv_nacl_sync_csr_available())			\
224d466c19cSAnup Patel 		nacl_csr_write(nacl_shmem(), __csr, __val);		\
225d466c19cSAnup Patel 	else								\
226d466c19cSAnup Patel 		csr_write(__csr, __val);				\
227d466c19cSAnup Patel } while (0)
228d466c19cSAnup Patel 
229d466c19cSAnup Patel #define ncsr_swap(__csr, __val)						\
230d466c19cSAnup Patel ({									\
231d466c19cSAnup Patel 	unsigned long __r;						\
232d466c19cSAnup Patel 	if (kvm_riscv_nacl_sync_csr_available())			\
233d466c19cSAnup Patel 		__r = nacl_csr_swap(nacl_shmem(), __csr, __val);	\
234d466c19cSAnup Patel 	else								\
235d466c19cSAnup Patel 		__r = csr_swap(__csr, __val);				\
236d466c19cSAnup Patel 	__r;								\
237d466c19cSAnup Patel })
238d466c19cSAnup Patel 
239d466c19cSAnup Patel #define nsync_csr(__csr)						\
240d466c19cSAnup Patel do {									\
241d466c19cSAnup Patel 	if (kvm_riscv_nacl_sync_csr_available())			\
242d466c19cSAnup Patel 		nacl_sync_csr(__csr);					\
243d466c19cSAnup Patel } while (0)
244d466c19cSAnup Patel 
245d466c19cSAnup Patel #endif
246