xref: /linux/arch/arm64/include/asm/fpsimd.h (revision be239684b18e1cdcafcf8c7face4a2f562c745ad)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  */
5 #ifndef __ASM_FP_H
6 #define __ASM_FP_H
7 
8 #include <asm/errno.h>
9 #include <asm/ptrace.h>
10 #include <asm/processor.h>
11 #include <asm/sigcontext.h>
12 #include <asm/sysreg.h>
13 
14 #ifndef __ASSEMBLY__
15 
16 #include <linux/bitmap.h>
17 #include <linux/build_bug.h>
18 #include <linux/bug.h>
19 #include <linux/cache.h>
20 #include <linux/init.h>
21 #include <linux/stddef.h>
22 #include <linux/types.h>
23 
24 #ifdef CONFIG_COMPAT
25 /* Masks for extracting the FPSR and FPCR from the FPSCR */
26 #define VFP_FPSCR_STAT_MASK	0xf800009f
27 #define VFP_FPSCR_CTRL_MASK	0x07f79f00
28 /*
29  * The VFP state has 32x64-bit registers and a single 32-bit
30  * control/status register.
31  */
32 #define VFP_STATE_SIZE		((32 * 8) + 4)
33 #endif
34 
35 static inline unsigned long cpacr_save_enable_kernel_sve(void)
36 {
37 	unsigned long old = read_sysreg(cpacr_el1);
38 	unsigned long set = CPACR_EL1_FPEN_EL1EN | CPACR_EL1_ZEN_EL1EN;
39 
40 	write_sysreg(old | set, cpacr_el1);
41 	isb();
42 	return old;
43 }
44 
45 static inline unsigned long cpacr_save_enable_kernel_sme(void)
46 {
47 	unsigned long old = read_sysreg(cpacr_el1);
48 	unsigned long set = CPACR_EL1_FPEN_EL1EN | CPACR_EL1_SMEN_EL1EN;
49 
50 	write_sysreg(old | set, cpacr_el1);
51 	isb();
52 	return old;
53 }
54 
55 static inline void cpacr_restore(unsigned long cpacr)
56 {
57 	write_sysreg(cpacr, cpacr_el1);
58 	isb();
59 }
60 
61 /*
62  * When we defined the maximum SVE vector length we defined the ABI so
63  * that the maximum vector length included all the reserved for future
64  * expansion bits in ZCR rather than those just currently defined by
65  * the architecture.  Using this length to allocate worst size buffers
66  * results in excessively large allocations, and this effect is even
67  * more pronounced for SME due to ZA.  Define more suitable VLs for
68  * these situations.
69  */
70 #define ARCH_SVE_VQ_MAX ((ZCR_ELx_LEN_MASK >> ZCR_ELx_LEN_SHIFT) + 1)
71 #define SME_VQ_MAX	((SMCR_ELx_LEN_MASK >> SMCR_ELx_LEN_SHIFT) + 1)
72 
73 struct task_struct;
74 
75 extern void fpsimd_save_state(struct user_fpsimd_state *state);
76 extern void fpsimd_load_state(struct user_fpsimd_state *state);
77 
78 extern void fpsimd_thread_switch(struct task_struct *next);
79 extern void fpsimd_flush_thread(void);
80 
81 extern void fpsimd_signal_preserve_current_state(void);
82 extern void fpsimd_preserve_current_state(void);
83 extern void fpsimd_restore_current_state(void);
84 extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
85 extern void fpsimd_kvm_prepare(void);
86 
87 struct cpu_fp_state {
88 	struct user_fpsimd_state *st;
89 	void *sve_state;
90 	void *sme_state;
91 	u64 *svcr;
92 	unsigned int sve_vl;
93 	unsigned int sme_vl;
94 	enum fp_type *fp_type;
95 	enum fp_type to_save;
96 };
97 
98 extern void fpsimd_bind_state_to_cpu(struct cpu_fp_state *fp_state);
99 
100 extern void fpsimd_flush_task_state(struct task_struct *target);
101 extern void fpsimd_save_and_flush_cpu_state(void);
102 
103 static inline bool thread_sm_enabled(struct thread_struct *thread)
104 {
105 	return system_supports_sme() && (thread->svcr & SVCR_SM_MASK);
106 }
107 
108 static inline bool thread_za_enabled(struct thread_struct *thread)
109 {
110 	return system_supports_sme() && (thread->svcr & SVCR_ZA_MASK);
111 }
112 
113 /* Maximum VL that SVE/SME VL-agnostic software can transparently support */
114 #define VL_ARCH_MAX 0x100
115 
116 /* Offset of FFR in the SVE register dump */
117 static inline size_t sve_ffr_offset(int vl)
118 {
119 	return SVE_SIG_FFR_OFFSET(sve_vq_from_vl(vl)) - SVE_SIG_REGS_OFFSET;
120 }
121 
122 static inline void *sve_pffr(struct thread_struct *thread)
123 {
124 	unsigned int vl;
125 
126 	if (system_supports_sme() && thread_sm_enabled(thread))
127 		vl = thread_get_sme_vl(thread);
128 	else
129 		vl = thread_get_sve_vl(thread);
130 
131 	return (char *)thread->sve_state + sve_ffr_offset(vl);
132 }
133 
134 static inline void *thread_zt_state(struct thread_struct *thread)
135 {
136 	/* The ZT register state is stored immediately after the ZA state */
137 	unsigned int sme_vq = sve_vq_from_vl(thread_get_sme_vl(thread));
138 	return thread->sme_state + ZA_SIG_REGS_SIZE(sme_vq);
139 }
140 
141 extern void sve_save_state(void *state, u32 *pfpsr, int save_ffr);
142 extern void sve_load_state(void const *state, u32 const *pfpsr,
143 			   int restore_ffr);
144 extern void sve_flush_live(bool flush_ffr, unsigned long vq_minus_1);
145 extern unsigned int sve_get_vl(void);
146 extern void sve_set_vq(unsigned long vq_minus_1);
147 extern void sme_set_vq(unsigned long vq_minus_1);
148 extern void sme_save_state(void *state, int zt);
149 extern void sme_load_state(void const *state, int zt);
150 
151 struct arm64_cpu_capabilities;
152 extern void cpu_enable_fpsimd(const struct arm64_cpu_capabilities *__unused);
153 extern void cpu_enable_sve(const struct arm64_cpu_capabilities *__unused);
154 extern void cpu_enable_sme(const struct arm64_cpu_capabilities *__unused);
155 extern void cpu_enable_sme2(const struct arm64_cpu_capabilities *__unused);
156 extern void cpu_enable_fa64(const struct arm64_cpu_capabilities *__unused);
157 
158 extern u64 read_smcr_features(void);
159 
160 /*
161  * Helpers to translate bit indices in sve_vq_map to VQ values (and
162  * vice versa).  This allows find_next_bit() to be used to find the
163  * _maximum_ VQ not exceeding a certain value.
164  */
165 static inline unsigned int __vq_to_bit(unsigned int vq)
166 {
167 	return SVE_VQ_MAX - vq;
168 }
169 
170 static inline unsigned int __bit_to_vq(unsigned int bit)
171 {
172 	return SVE_VQ_MAX - bit;
173 }
174 
175 
176 struct vl_info {
177 	enum vec_type type;
178 	const char *name;		/* For display purposes */
179 
180 	/* Minimum supported vector length across all CPUs */
181 	int min_vl;
182 
183 	/* Maximum supported vector length across all CPUs */
184 	int max_vl;
185 	int max_virtualisable_vl;
186 
187 	/*
188 	 * Set of available vector lengths,
189 	 * where length vq encoded as bit __vq_to_bit(vq):
190 	 */
191 	DECLARE_BITMAP(vq_map, SVE_VQ_MAX);
192 
193 	/* Set of vector lengths present on at least one cpu: */
194 	DECLARE_BITMAP(vq_partial_map, SVE_VQ_MAX);
195 };
196 
197 #ifdef CONFIG_ARM64_SVE
198 
199 extern void sve_alloc(struct task_struct *task, bool flush);
200 extern void fpsimd_release_task(struct task_struct *task);
201 extern void fpsimd_sync_to_sve(struct task_struct *task);
202 extern void fpsimd_force_sync_to_sve(struct task_struct *task);
203 extern void sve_sync_to_fpsimd(struct task_struct *task);
204 extern void sve_sync_from_fpsimd_zeropad(struct task_struct *task);
205 
206 extern int vec_set_vector_length(struct task_struct *task, enum vec_type type,
207 				 unsigned long vl, unsigned long flags);
208 
209 extern int sve_set_current_vl(unsigned long arg);
210 extern int sve_get_current_vl(void);
211 
212 static inline void sve_user_disable(void)
213 {
214 	sysreg_clear_set(cpacr_el1, CPACR_EL1_ZEN_EL0EN, 0);
215 }
216 
217 static inline void sve_user_enable(void)
218 {
219 	sysreg_clear_set(cpacr_el1, 0, CPACR_EL1_ZEN_EL0EN);
220 }
221 
222 #define sve_cond_update_zcr_vq(val, reg)		\
223 	do {						\
224 		u64 __zcr = read_sysreg_s((reg));	\
225 		u64 __new = __zcr & ~ZCR_ELx_LEN_MASK;	\
226 		__new |= (val) & ZCR_ELx_LEN_MASK;	\
227 		if (__zcr != __new)			\
228 			write_sysreg_s(__new, (reg));	\
229 	} while (0)
230 
231 /*
232  * Probing and setup functions.
233  * Calls to these functions must be serialised with one another.
234  */
235 enum vec_type;
236 
237 extern void __init vec_init_vq_map(enum vec_type type);
238 extern void vec_update_vq_map(enum vec_type type);
239 extern int vec_verify_vq_map(enum vec_type type);
240 extern void __init sve_setup(void);
241 
242 extern __ro_after_init struct vl_info vl_info[ARM64_VEC_MAX];
243 
244 static inline void write_vl(enum vec_type type, u64 val)
245 {
246 	u64 tmp;
247 
248 	switch (type) {
249 #ifdef CONFIG_ARM64_SVE
250 	case ARM64_VEC_SVE:
251 		tmp = read_sysreg_s(SYS_ZCR_EL1) & ~ZCR_ELx_LEN_MASK;
252 		write_sysreg_s(tmp | val, SYS_ZCR_EL1);
253 		break;
254 #endif
255 #ifdef CONFIG_ARM64_SME
256 	case ARM64_VEC_SME:
257 		tmp = read_sysreg_s(SYS_SMCR_EL1) & ~SMCR_ELx_LEN_MASK;
258 		write_sysreg_s(tmp | val, SYS_SMCR_EL1);
259 		break;
260 #endif
261 	default:
262 		WARN_ON_ONCE(1);
263 		break;
264 	}
265 }
266 
267 static inline int vec_max_vl(enum vec_type type)
268 {
269 	return vl_info[type].max_vl;
270 }
271 
272 static inline int vec_max_virtualisable_vl(enum vec_type type)
273 {
274 	return vl_info[type].max_virtualisable_vl;
275 }
276 
277 static inline int sve_max_vl(void)
278 {
279 	return vec_max_vl(ARM64_VEC_SVE);
280 }
281 
282 static inline int sve_max_virtualisable_vl(void)
283 {
284 	return vec_max_virtualisable_vl(ARM64_VEC_SVE);
285 }
286 
287 /* Ensure vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX before calling this function */
288 static inline bool vq_available(enum vec_type type, unsigned int vq)
289 {
290 	return test_bit(__vq_to_bit(vq), vl_info[type].vq_map);
291 }
292 
293 static inline bool sve_vq_available(unsigned int vq)
294 {
295 	return vq_available(ARM64_VEC_SVE, vq);
296 }
297 
298 size_t sve_state_size(struct task_struct const *task);
299 
300 #else /* ! CONFIG_ARM64_SVE */
301 
302 static inline void sve_alloc(struct task_struct *task, bool flush) { }
303 static inline void fpsimd_release_task(struct task_struct *task) { }
304 static inline void sve_sync_to_fpsimd(struct task_struct *task) { }
305 static inline void sve_sync_from_fpsimd_zeropad(struct task_struct *task) { }
306 
307 static inline int sve_max_virtualisable_vl(void)
308 {
309 	return 0;
310 }
311 
312 static inline int sve_set_current_vl(unsigned long arg)
313 {
314 	return -EINVAL;
315 }
316 
317 static inline int sve_get_current_vl(void)
318 {
319 	return -EINVAL;
320 }
321 
322 static inline int sve_max_vl(void)
323 {
324 	return -EINVAL;
325 }
326 
327 static inline bool sve_vq_available(unsigned int vq) { return false; }
328 
329 static inline void sve_user_disable(void) { BUILD_BUG(); }
330 static inline void sve_user_enable(void) { BUILD_BUG(); }
331 
332 #define sve_cond_update_zcr_vq(val, reg) do { } while (0)
333 
334 static inline void vec_init_vq_map(enum vec_type t) { }
335 static inline void vec_update_vq_map(enum vec_type t) { }
336 static inline int vec_verify_vq_map(enum vec_type t) { return 0; }
337 static inline void sve_setup(void) { }
338 
339 static inline size_t sve_state_size(struct task_struct const *task)
340 {
341 	return 0;
342 }
343 
344 #endif /* ! CONFIG_ARM64_SVE */
345 
346 #ifdef CONFIG_ARM64_SME
347 
348 static inline void sme_user_disable(void)
349 {
350 	sysreg_clear_set(cpacr_el1, CPACR_EL1_SMEN_EL0EN, 0);
351 }
352 
353 static inline void sme_user_enable(void)
354 {
355 	sysreg_clear_set(cpacr_el1, 0, CPACR_EL1_SMEN_EL0EN);
356 }
357 
358 static inline void sme_smstart_sm(void)
359 {
360 	asm volatile(__msr_s(SYS_SVCR_SMSTART_SM_EL0, "xzr"));
361 }
362 
363 static inline void sme_smstop_sm(void)
364 {
365 	asm volatile(__msr_s(SYS_SVCR_SMSTOP_SM_EL0, "xzr"));
366 }
367 
368 static inline void sme_smstop(void)
369 {
370 	asm volatile(__msr_s(SYS_SVCR_SMSTOP_SMZA_EL0, "xzr"));
371 }
372 
373 extern void __init sme_setup(void);
374 
375 static inline int sme_max_vl(void)
376 {
377 	return vec_max_vl(ARM64_VEC_SME);
378 }
379 
380 static inline int sme_max_virtualisable_vl(void)
381 {
382 	return vec_max_virtualisable_vl(ARM64_VEC_SME);
383 }
384 
385 extern void sme_alloc(struct task_struct *task, bool flush);
386 extern unsigned int sme_get_vl(void);
387 extern int sme_set_current_vl(unsigned long arg);
388 extern int sme_get_current_vl(void);
389 extern void sme_suspend_exit(void);
390 
391 /*
392  * Return how many bytes of memory are required to store the full SME
393  * specific state for task, given task's currently configured vector
394  * length.
395  */
396 static inline size_t sme_state_size(struct task_struct const *task)
397 {
398 	unsigned int vl = task_get_sme_vl(task);
399 	size_t size;
400 
401 	size = ZA_SIG_REGS_SIZE(sve_vq_from_vl(vl));
402 
403 	if (system_supports_sme2())
404 		size += ZT_SIG_REG_SIZE;
405 
406 	return size;
407 }
408 
409 #else
410 
411 static inline void sme_user_disable(void) { BUILD_BUG(); }
412 static inline void sme_user_enable(void) { BUILD_BUG(); }
413 
414 static inline void sme_smstart_sm(void) { }
415 static inline void sme_smstop_sm(void) { }
416 static inline void sme_smstop(void) { }
417 
418 static inline void sme_alloc(struct task_struct *task, bool flush) { }
419 static inline void sme_setup(void) { }
420 static inline unsigned int sme_get_vl(void) { return 0; }
421 static inline int sme_max_vl(void) { return 0; }
422 static inline int sme_max_virtualisable_vl(void) { return 0; }
423 static inline int sme_set_current_vl(unsigned long arg) { return -EINVAL; }
424 static inline int sme_get_current_vl(void) { return -EINVAL; }
425 static inline void sme_suspend_exit(void) { }
426 
427 static inline size_t sme_state_size(struct task_struct const *task)
428 {
429 	return 0;
430 }
431 
432 #endif /* ! CONFIG_ARM64_SME */
433 
434 /* For use by EFI runtime services calls only */
435 extern void __efi_fpsimd_begin(void);
436 extern void __efi_fpsimd_end(void);
437 
438 #endif
439 
440 #endif
441