xref: /linux/arch/arm64/kernel/fpsimd.c (revision bb5b94f5bbe75470912b70fb08880fc5273aa62d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * FP/SIMD context switching and fault handling
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  * Author: Catalin Marinas <catalin.marinas@arm.com>
7  */
8 
9 #include <linux/bitmap.h>
10 #include <linux/bitops.h>
11 #include <linux/bottom_half.h>
12 #include <linux/bug.h>
13 #include <linux/cache.h>
14 #include <linux/compat.h>
15 #include <linux/compiler.h>
16 #include <linux/cpu.h>
17 #include <linux/cpu_pm.h>
18 #include <linux/kernel.h>
19 #include <linux/linkage.h>
20 #include <linux/irqflags.h>
21 #include <linux/init.h>
22 #include <linux/percpu.h>
23 #include <linux/prctl.h>
24 #include <linux/preempt.h>
25 #include <linux/ptrace.h>
26 #include <linux/sched/signal.h>
27 #include <linux/sched/task_stack.h>
28 #include <linux/signal.h>
29 #include <linux/slab.h>
30 #include <linux/stddef.h>
31 #include <linux/sysctl.h>
32 #include <linux/swab.h>
33 
34 #include <asm/esr.h>
35 #include <asm/exception.h>
36 #include <asm/fpsimd.h>
37 #include <asm/cpufeature.h>
38 #include <asm/cputype.h>
39 #include <asm/neon.h>
40 #include <asm/processor.h>
41 #include <asm/simd.h>
42 #include <asm/sigcontext.h>
43 #include <asm/sysreg.h>
44 #include <asm/traps.h>
45 #include <asm/virt.h>
46 
47 #define FPEXC_IOF	(1 << 0)
48 #define FPEXC_DZF	(1 << 1)
49 #define FPEXC_OFF	(1 << 2)
50 #define FPEXC_UFF	(1 << 3)
51 #define FPEXC_IXF	(1 << 4)
52 #define FPEXC_IDF	(1 << 7)
53 
54 /*
55  * (Note: in this discussion, statements about FPSIMD apply equally to SVE.)
56  *
57  * In order to reduce the number of times the FPSIMD state is needlessly saved
58  * and restored, we need to keep track of two things:
59  * (a) for each task, we need to remember which CPU was the last one to have
60  *     the task's FPSIMD state loaded into its FPSIMD registers;
61  * (b) for each CPU, we need to remember which task's userland FPSIMD state has
62  *     been loaded into its FPSIMD registers most recently, or whether it has
63  *     been used to perform kernel mode NEON in the meantime.
64  *
65  * For (a), we add a fpsimd_cpu field to thread_struct, which gets updated to
66  * the id of the current CPU every time the state is loaded onto a CPU. For (b),
67  * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the
68  * address of the userland FPSIMD state of the task that was loaded onto the CPU
69  * the most recently, or NULL if kernel mode NEON has been performed after that.
70  *
71  * With this in place, we no longer have to restore the next FPSIMD state right
72  * when switching between tasks. Instead, we can defer this check to userland
73  * resume, at which time we verify whether the CPU's fpsimd_last_state and the
74  * task's fpsimd_cpu are still mutually in sync. If this is the case, we
75  * can omit the FPSIMD restore.
76  *
77  * As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to
78  * indicate whether or not the userland FPSIMD state of the current task is
79  * present in the registers. The flag is set unless the FPSIMD registers of this
80  * CPU currently contain the most recent userland FPSIMD state of the current
81  * task.
82  *
83  * In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may
84  * save the task's FPSIMD context back to task_struct from softirq context.
85  * To prevent this from racing with the manipulation of the task's FPSIMD state
86  * from task context and thereby corrupting the state, it is necessary to
87  * protect any manipulation of a task's fpsimd_state or TIF_FOREIGN_FPSTATE
88  * flag with {, __}get_cpu_fpsimd_context(). This will still allow softirqs to
89  * run but prevent them to use FPSIMD.
90  *
91  * For a certain task, the sequence may look something like this:
92  * - the task gets scheduled in; if both the task's fpsimd_cpu field
93  *   contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu
94  *   variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is
95  *   cleared, otherwise it is set;
96  *
97  * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's
98  *   userland FPSIMD state is copied from memory to the registers, the task's
99  *   fpsimd_cpu field is set to the id of the current CPU, the current
100  *   CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the
101  *   TIF_FOREIGN_FPSTATE flag is cleared;
102  *
103  * - the task executes an ordinary syscall; upon return to userland, the
104  *   TIF_FOREIGN_FPSTATE flag will still be cleared, so no FPSIMD state is
105  *   restored;
106  *
107  * - the task executes a syscall which executes some NEON instructions; this is
108  *   preceded by a call to kernel_neon_begin(), which copies the task's FPSIMD
109  *   register contents to memory, clears the fpsimd_last_state per-cpu variable
110  *   and sets the TIF_FOREIGN_FPSTATE flag;
111  *
112  * - the task gets preempted after kernel_neon_end() is called; as we have not
113  *   returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so
114  *   whatever is in the FPSIMD registers is not saved to memory, but discarded.
115  */
116 struct fpsimd_last_state_struct {
117 	struct user_fpsimd_state *st;
118 	void *sve_state;
119 	unsigned int sve_vl;
120 };
121 
122 static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
123 
124 /* Default VL for tasks that don't set it explicitly: */
125 static int __sve_default_vl = -1;
126 
127 static int get_sve_default_vl(void)
128 {
129 	return READ_ONCE(__sve_default_vl);
130 }
131 
132 #ifdef CONFIG_ARM64_SVE
133 
134 static void set_sve_default_vl(int val)
135 {
136 	WRITE_ONCE(__sve_default_vl, val);
137 }
138 
139 /* Maximum supported vector length across all CPUs (initially poisoned) */
140 int __ro_after_init sve_max_vl = SVE_VL_MIN;
141 int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN;
142 
143 /*
144  * Set of available vector lengths,
145  * where length vq encoded as bit __vq_to_bit(vq):
146  */
147 __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
148 /* Set of vector lengths present on at least one cpu: */
149 static __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX);
150 
151 static void __percpu *efi_sve_state;
152 
153 #else /* ! CONFIG_ARM64_SVE */
154 
155 /* Dummy declaration for code that will be optimised out: */
156 extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
157 extern __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX);
158 extern void __percpu *efi_sve_state;
159 
160 #endif /* ! CONFIG_ARM64_SVE */
161 
162 DEFINE_PER_CPU(bool, fpsimd_context_busy);
163 EXPORT_PER_CPU_SYMBOL(fpsimd_context_busy);
164 
165 static void __get_cpu_fpsimd_context(void)
166 {
167 	bool busy = __this_cpu_xchg(fpsimd_context_busy, true);
168 
169 	WARN_ON(busy);
170 }
171 
172 /*
173  * Claim ownership of the CPU FPSIMD context for use by the calling context.
174  *
175  * The caller may freely manipulate the FPSIMD context metadata until
176  * put_cpu_fpsimd_context() is called.
177  *
178  * The double-underscore version must only be called if you know the task
179  * can't be preempted.
180  */
181 static void get_cpu_fpsimd_context(void)
182 {
183 	local_bh_disable();
184 	__get_cpu_fpsimd_context();
185 }
186 
187 static void __put_cpu_fpsimd_context(void)
188 {
189 	bool busy = __this_cpu_xchg(fpsimd_context_busy, false);
190 
191 	WARN_ON(!busy); /* No matching get_cpu_fpsimd_context()? */
192 }
193 
194 /*
195  * Release the CPU FPSIMD context.
196  *
197  * Must be called from a context in which get_cpu_fpsimd_context() was
198  * previously called, with no call to put_cpu_fpsimd_context() in the
199  * meantime.
200  */
201 static void put_cpu_fpsimd_context(void)
202 {
203 	__put_cpu_fpsimd_context();
204 	local_bh_enable();
205 }
206 
207 static bool have_cpu_fpsimd_context(void)
208 {
209 	return !preemptible() && __this_cpu_read(fpsimd_context_busy);
210 }
211 
212 /*
213  * Call __sve_free() directly only if you know task can't be scheduled
214  * or preempted.
215  */
216 static void __sve_free(struct task_struct *task)
217 {
218 	kfree(task->thread.sve_state);
219 	task->thread.sve_state = NULL;
220 }
221 
222 static void sve_free(struct task_struct *task)
223 {
224 	WARN_ON(test_tsk_thread_flag(task, TIF_SVE));
225 
226 	__sve_free(task);
227 }
228 
229 /*
230  * TIF_SVE controls whether a task can use SVE without trapping while
231  * in userspace, and also the way a task's FPSIMD/SVE state is stored
232  * in thread_struct.
233  *
234  * The kernel uses this flag to track whether a user task is actively
235  * using SVE, and therefore whether full SVE register state needs to
236  * be tracked.  If not, the cheaper FPSIMD context handling code can
237  * be used instead of the more costly SVE equivalents.
238  *
239  *  * TIF_SVE set:
240  *
241  *    The task can execute SVE instructions while in userspace without
242  *    trapping to the kernel.
243  *
244  *    When stored, Z0-Z31 (incorporating Vn in bits[127:0] or the
245  *    corresponding Zn), P0-P15 and FFR are encoded in in
246  *    task->thread.sve_state, formatted appropriately for vector
247  *    length task->thread.sve_vl.
248  *
249  *    task->thread.sve_state must point to a valid buffer at least
250  *    sve_state_size(task) bytes in size.
251  *
252  *    During any syscall, the kernel may optionally clear TIF_SVE and
253  *    discard the vector state except for the FPSIMD subset.
254  *
255  *  * TIF_SVE clear:
256  *
257  *    An attempt by the user task to execute an SVE instruction causes
258  *    do_sve_acc() to be called, which does some preparation and then
259  *    sets TIF_SVE.
260  *
261  *    When stored, FPSIMD registers V0-V31 are encoded in
262  *    task->thread.uw.fpsimd_state; bits [max : 128] for each of Z0-Z31 are
263  *    logically zero but not stored anywhere; P0-P15 and FFR are not
264  *    stored and have unspecified values from userspace's point of
265  *    view.  For hygiene purposes, the kernel zeroes them on next use,
266  *    but userspace is discouraged from relying on this.
267  *
268  *    task->thread.sve_state does not need to be non-NULL, valid or any
269  *    particular size: it must not be dereferenced.
270  *
271  *  * FPSR and FPCR are always stored in task->thread.uw.fpsimd_state
272  *    irrespective of whether TIF_SVE is clear or set, since these are
273  *    not vector length dependent.
274  */
275 
276 /*
277  * Update current's FPSIMD/SVE registers from thread_struct.
278  *
279  * This function should be called only when the FPSIMD/SVE state in
280  * thread_struct is known to be up to date, when preparing to enter
281  * userspace.
282  */
283 static void task_fpsimd_load(void)
284 {
285 	WARN_ON(!system_supports_fpsimd());
286 	WARN_ON(!have_cpu_fpsimd_context());
287 
288 	if (IS_ENABLED(CONFIG_ARM64_SVE) && test_thread_flag(TIF_SVE))
289 		sve_load_state(sve_pffr(&current->thread),
290 			       &current->thread.uw.fpsimd_state.fpsr,
291 			       sve_vq_from_vl(current->thread.sve_vl) - 1);
292 	else
293 		fpsimd_load_state(&current->thread.uw.fpsimd_state);
294 }
295 
296 /*
297  * Ensure FPSIMD/SVE storage in memory for the loaded context is up to
298  * date with respect to the CPU registers.
299  */
300 static void fpsimd_save(void)
301 {
302 	struct fpsimd_last_state_struct const *last =
303 		this_cpu_ptr(&fpsimd_last_state);
304 	/* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
305 
306 	WARN_ON(!system_supports_fpsimd());
307 	WARN_ON(!have_cpu_fpsimd_context());
308 
309 	if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
310 		if (IS_ENABLED(CONFIG_ARM64_SVE) &&
311 		    test_thread_flag(TIF_SVE)) {
312 			if (WARN_ON(sve_get_vl() != last->sve_vl)) {
313 				/*
314 				 * Can't save the user regs, so current would
315 				 * re-enter user with corrupt state.
316 				 * There's no way to recover, so kill it:
317 				 */
318 				force_signal_inject(SIGKILL, SI_KERNEL, 0, 0);
319 				return;
320 			}
321 
322 			sve_save_state((char *)last->sve_state +
323 						sve_ffr_offset(last->sve_vl),
324 				       &last->st->fpsr);
325 		} else
326 			fpsimd_save_state(last->st);
327 	}
328 }
329 
330 /*
331  * All vector length selection from userspace comes through here.
332  * We're on a slow path, so some sanity-checks are included.
333  * If things go wrong there's a bug somewhere, but try to fall back to a
334  * safe choice.
335  */
336 static unsigned int find_supported_vector_length(unsigned int vl)
337 {
338 	int bit;
339 	int max_vl = sve_max_vl;
340 
341 	if (WARN_ON(!sve_vl_valid(vl)))
342 		vl = SVE_VL_MIN;
343 
344 	if (WARN_ON(!sve_vl_valid(max_vl)))
345 		max_vl = SVE_VL_MIN;
346 
347 	if (vl > max_vl)
348 		vl = max_vl;
349 
350 	bit = find_next_bit(sve_vq_map, SVE_VQ_MAX,
351 			    __vq_to_bit(sve_vq_from_vl(vl)));
352 	return sve_vl_from_vq(__bit_to_vq(bit));
353 }
354 
355 #if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL)
356 
357 static int sve_proc_do_default_vl(struct ctl_table *table, int write,
358 				  void *buffer, size_t *lenp, loff_t *ppos)
359 {
360 	int ret;
361 	int vl = get_sve_default_vl();
362 	struct ctl_table tmp_table = {
363 		.data = &vl,
364 		.maxlen = sizeof(vl),
365 	};
366 
367 	ret = proc_dointvec(&tmp_table, write, buffer, lenp, ppos);
368 	if (ret || !write)
369 		return ret;
370 
371 	/* Writing -1 has the special meaning "set to max": */
372 	if (vl == -1)
373 		vl = sve_max_vl;
374 
375 	if (!sve_vl_valid(vl))
376 		return -EINVAL;
377 
378 	set_sve_default_vl(find_supported_vector_length(vl));
379 	return 0;
380 }
381 
382 static struct ctl_table sve_default_vl_table[] = {
383 	{
384 		.procname	= "sve_default_vector_length",
385 		.mode		= 0644,
386 		.proc_handler	= sve_proc_do_default_vl,
387 	},
388 	{ }
389 };
390 
391 static int __init sve_sysctl_init(void)
392 {
393 	if (system_supports_sve())
394 		if (!register_sysctl("abi", sve_default_vl_table))
395 			return -EINVAL;
396 
397 	return 0;
398 }
399 
400 #else /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
401 static int __init sve_sysctl_init(void) { return 0; }
402 #endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
403 
404 #define ZREG(sve_state, vq, n) ((char *)(sve_state) +		\
405 	(SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
406 
407 #ifdef CONFIG_CPU_BIG_ENDIAN
408 static __uint128_t arm64_cpu_to_le128(__uint128_t x)
409 {
410 	u64 a = swab64(x);
411 	u64 b = swab64(x >> 64);
412 
413 	return ((__uint128_t)a << 64) | b;
414 }
415 #else
416 static __uint128_t arm64_cpu_to_le128(__uint128_t x)
417 {
418 	return x;
419 }
420 #endif
421 
422 #define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x)
423 
424 static void __fpsimd_to_sve(void *sst, struct user_fpsimd_state const *fst,
425 			    unsigned int vq)
426 {
427 	unsigned int i;
428 	__uint128_t *p;
429 
430 	for (i = 0; i < SVE_NUM_ZREGS; ++i) {
431 		p = (__uint128_t *)ZREG(sst, vq, i);
432 		*p = arm64_cpu_to_le128(fst->vregs[i]);
433 	}
434 }
435 
436 /*
437  * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
438  * task->thread.sve_state.
439  *
440  * Task can be a non-runnable task, or current.  In the latter case,
441  * the caller must have ownership of the cpu FPSIMD context before calling
442  * this function.
443  * task->thread.sve_state must point to at least sve_state_size(task)
444  * bytes of allocated kernel memory.
445  * task->thread.uw.fpsimd_state must be up to date before calling this
446  * function.
447  */
448 static void fpsimd_to_sve(struct task_struct *task)
449 {
450 	unsigned int vq;
451 	void *sst = task->thread.sve_state;
452 	struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
453 
454 	if (!system_supports_sve())
455 		return;
456 
457 	vq = sve_vq_from_vl(task->thread.sve_vl);
458 	__fpsimd_to_sve(sst, fst, vq);
459 }
460 
461 /*
462  * Transfer the SVE state in task->thread.sve_state to
463  * task->thread.uw.fpsimd_state.
464  *
465  * Task can be a non-runnable task, or current.  In the latter case,
466  * the caller must have ownership of the cpu FPSIMD context before calling
467  * this function.
468  * task->thread.sve_state must point to at least sve_state_size(task)
469  * bytes of allocated kernel memory.
470  * task->thread.sve_state must be up to date before calling this function.
471  */
472 static void sve_to_fpsimd(struct task_struct *task)
473 {
474 	unsigned int vq;
475 	void const *sst = task->thread.sve_state;
476 	struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
477 	unsigned int i;
478 	__uint128_t const *p;
479 
480 	if (!system_supports_sve())
481 		return;
482 
483 	vq = sve_vq_from_vl(task->thread.sve_vl);
484 	for (i = 0; i < SVE_NUM_ZREGS; ++i) {
485 		p = (__uint128_t const *)ZREG(sst, vq, i);
486 		fst->vregs[i] = arm64_le128_to_cpu(*p);
487 	}
488 }
489 
490 #ifdef CONFIG_ARM64_SVE
491 
492 /*
493  * Return how many bytes of memory are required to store the full SVE
494  * state for task, given task's currently configured vector length.
495  */
496 size_t sve_state_size(struct task_struct const *task)
497 {
498 	return SVE_SIG_REGS_SIZE(sve_vq_from_vl(task->thread.sve_vl));
499 }
500 
501 /*
502  * Ensure that task->thread.sve_state is allocated and sufficiently large.
503  *
504  * This function should be used only in preparation for replacing
505  * task->thread.sve_state with new data.  The memory is always zeroed
506  * here to prevent stale data from showing through: this is done in
507  * the interest of testability and predictability: except in the
508  * do_sve_acc() case, there is no ABI requirement to hide stale data
509  * written previously be task.
510  */
511 void sve_alloc(struct task_struct *task)
512 {
513 	if (task->thread.sve_state) {
514 		memset(task->thread.sve_state, 0, sve_state_size(current));
515 		return;
516 	}
517 
518 	/* This is a small allocation (maximum ~8KB) and Should Not Fail. */
519 	task->thread.sve_state =
520 		kzalloc(sve_state_size(task), GFP_KERNEL);
521 
522 	/*
523 	 * If future SVE revisions can have larger vectors though,
524 	 * this may cease to be true:
525 	 */
526 	BUG_ON(!task->thread.sve_state);
527 }
528 
529 
530 /*
531  * Ensure that task->thread.sve_state is up to date with respect to
532  * the user task, irrespective of when SVE is in use or not.
533  *
534  * This should only be called by ptrace.  task must be non-runnable.
535  * task->thread.sve_state must point to at least sve_state_size(task)
536  * bytes of allocated kernel memory.
537  */
538 void fpsimd_sync_to_sve(struct task_struct *task)
539 {
540 	if (!test_tsk_thread_flag(task, TIF_SVE))
541 		fpsimd_to_sve(task);
542 }
543 
544 /*
545  * Ensure that task->thread.uw.fpsimd_state is up to date with respect to
546  * the user task, irrespective of whether SVE is in use or not.
547  *
548  * This should only be called by ptrace.  task must be non-runnable.
549  * task->thread.sve_state must point to at least sve_state_size(task)
550  * bytes of allocated kernel memory.
551  */
552 void sve_sync_to_fpsimd(struct task_struct *task)
553 {
554 	if (test_tsk_thread_flag(task, TIF_SVE))
555 		sve_to_fpsimd(task);
556 }
557 
558 /*
559  * Ensure that task->thread.sve_state is up to date with respect to
560  * the task->thread.uw.fpsimd_state.
561  *
562  * This should only be called by ptrace to merge new FPSIMD register
563  * values into a task for which SVE is currently active.
564  * task must be non-runnable.
565  * task->thread.sve_state must point to at least sve_state_size(task)
566  * bytes of allocated kernel memory.
567  * task->thread.uw.fpsimd_state must already have been initialised with
568  * the new FPSIMD register values to be merged in.
569  */
570 void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
571 {
572 	unsigned int vq;
573 	void *sst = task->thread.sve_state;
574 	struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
575 
576 	if (!test_tsk_thread_flag(task, TIF_SVE))
577 		return;
578 
579 	vq = sve_vq_from_vl(task->thread.sve_vl);
580 
581 	memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
582 	__fpsimd_to_sve(sst, fst, vq);
583 }
584 
585 int sve_set_vector_length(struct task_struct *task,
586 			  unsigned long vl, unsigned long flags)
587 {
588 	if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
589 				     PR_SVE_SET_VL_ONEXEC))
590 		return -EINVAL;
591 
592 	if (!sve_vl_valid(vl))
593 		return -EINVAL;
594 
595 	/*
596 	 * Clamp to the maximum vector length that VL-agnostic SVE code can
597 	 * work with.  A flag may be assigned in the future to allow setting
598 	 * of larger vector lengths without confusing older software.
599 	 */
600 	if (vl > SVE_VL_ARCH_MAX)
601 		vl = SVE_VL_ARCH_MAX;
602 
603 	vl = find_supported_vector_length(vl);
604 
605 	if (flags & (PR_SVE_VL_INHERIT |
606 		     PR_SVE_SET_VL_ONEXEC))
607 		task->thread.sve_vl_onexec = vl;
608 	else
609 		/* Reset VL to system default on next exec: */
610 		task->thread.sve_vl_onexec = 0;
611 
612 	/* Only actually set the VL if not deferred: */
613 	if (flags & PR_SVE_SET_VL_ONEXEC)
614 		goto out;
615 
616 	if (vl == task->thread.sve_vl)
617 		goto out;
618 
619 	/*
620 	 * To ensure the FPSIMD bits of the SVE vector registers are preserved,
621 	 * write any live register state back to task_struct, and convert to a
622 	 * non-SVE thread.
623 	 */
624 	if (task == current) {
625 		get_cpu_fpsimd_context();
626 
627 		fpsimd_save();
628 	}
629 
630 	fpsimd_flush_task_state(task);
631 	if (test_and_clear_tsk_thread_flag(task, TIF_SVE))
632 		sve_to_fpsimd(task);
633 
634 	if (task == current)
635 		put_cpu_fpsimd_context();
636 
637 	/*
638 	 * Force reallocation of task SVE state to the correct size
639 	 * on next use:
640 	 */
641 	sve_free(task);
642 
643 	task->thread.sve_vl = vl;
644 
645 out:
646 	update_tsk_thread_flag(task, TIF_SVE_VL_INHERIT,
647 			       flags & PR_SVE_VL_INHERIT);
648 
649 	return 0;
650 }
651 
652 /*
653  * Encode the current vector length and flags for return.
654  * This is only required for prctl(): ptrace has separate fields
655  *
656  * flags are as for sve_set_vector_length().
657  */
658 static int sve_prctl_status(unsigned long flags)
659 {
660 	int ret;
661 
662 	if (flags & PR_SVE_SET_VL_ONEXEC)
663 		ret = current->thread.sve_vl_onexec;
664 	else
665 		ret = current->thread.sve_vl;
666 
667 	if (test_thread_flag(TIF_SVE_VL_INHERIT))
668 		ret |= PR_SVE_VL_INHERIT;
669 
670 	return ret;
671 }
672 
673 /* PR_SVE_SET_VL */
674 int sve_set_current_vl(unsigned long arg)
675 {
676 	unsigned long vl, flags;
677 	int ret;
678 
679 	vl = arg & PR_SVE_VL_LEN_MASK;
680 	flags = arg & ~vl;
681 
682 	if (!system_supports_sve() || is_compat_task())
683 		return -EINVAL;
684 
685 	ret = sve_set_vector_length(current, vl, flags);
686 	if (ret)
687 		return ret;
688 
689 	return sve_prctl_status(flags);
690 }
691 
692 /* PR_SVE_GET_VL */
693 int sve_get_current_vl(void)
694 {
695 	if (!system_supports_sve() || is_compat_task())
696 		return -EINVAL;
697 
698 	return sve_prctl_status(0);
699 }
700 
701 static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX))
702 {
703 	unsigned int vq, vl;
704 	unsigned long zcr;
705 
706 	bitmap_zero(map, SVE_VQ_MAX);
707 
708 	zcr = ZCR_ELx_LEN_MASK;
709 	zcr = read_sysreg_s(SYS_ZCR_EL1) & ~zcr;
710 
711 	for (vq = SVE_VQ_MAX; vq >= SVE_VQ_MIN; --vq) {
712 		write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */
713 		vl = sve_get_vl();
714 		vq = sve_vq_from_vl(vl); /* skip intervening lengths */
715 		set_bit(__vq_to_bit(vq), map);
716 	}
717 }
718 
719 /*
720  * Initialise the set of known supported VQs for the boot CPU.
721  * This is called during kernel boot, before secondary CPUs are brought up.
722  */
723 void __init sve_init_vq_map(void)
724 {
725 	sve_probe_vqs(sve_vq_map);
726 	bitmap_copy(sve_vq_partial_map, sve_vq_map, SVE_VQ_MAX);
727 }
728 
729 /*
730  * If we haven't committed to the set of supported VQs yet, filter out
731  * those not supported by the current CPU.
732  * This function is called during the bring-up of early secondary CPUs only.
733  */
734 void sve_update_vq_map(void)
735 {
736 	DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
737 
738 	sve_probe_vqs(tmp_map);
739 	bitmap_and(sve_vq_map, sve_vq_map, tmp_map, SVE_VQ_MAX);
740 	bitmap_or(sve_vq_partial_map, sve_vq_partial_map, tmp_map, SVE_VQ_MAX);
741 }
742 
743 /*
744  * Check whether the current CPU supports all VQs in the committed set.
745  * This function is called during the bring-up of late secondary CPUs only.
746  */
747 int sve_verify_vq_map(void)
748 {
749 	DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
750 	unsigned long b;
751 
752 	sve_probe_vqs(tmp_map);
753 
754 	bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
755 	if (bitmap_intersects(tmp_map, sve_vq_map, SVE_VQ_MAX)) {
756 		pr_warn("SVE: cpu%d: Required vector length(s) missing\n",
757 			smp_processor_id());
758 		return -EINVAL;
759 	}
760 
761 	if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
762 		return 0;
763 
764 	/*
765 	 * For KVM, it is necessary to ensure that this CPU doesn't
766 	 * support any vector length that guests may have probed as
767 	 * unsupported.
768 	 */
769 
770 	/* Recover the set of supported VQs: */
771 	bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
772 	/* Find VQs supported that are not globally supported: */
773 	bitmap_andnot(tmp_map, tmp_map, sve_vq_map, SVE_VQ_MAX);
774 
775 	/* Find the lowest such VQ, if any: */
776 	b = find_last_bit(tmp_map, SVE_VQ_MAX);
777 	if (b >= SVE_VQ_MAX)
778 		return 0; /* no mismatches */
779 
780 	/*
781 	 * Mismatches above sve_max_virtualisable_vl are fine, since
782 	 * no guest is allowed to configure ZCR_EL2.LEN to exceed this:
783 	 */
784 	if (sve_vl_from_vq(__bit_to_vq(b)) <= sve_max_virtualisable_vl) {
785 		pr_warn("SVE: cpu%d: Unsupported vector length(s) present\n",
786 			smp_processor_id());
787 		return -EINVAL;
788 	}
789 
790 	return 0;
791 }
792 
793 static void __init sve_efi_setup(void)
794 {
795 	if (!IS_ENABLED(CONFIG_EFI))
796 		return;
797 
798 	/*
799 	 * alloc_percpu() warns and prints a backtrace if this goes wrong.
800 	 * This is evidence of a crippled system and we are returning void,
801 	 * so no attempt is made to handle this situation here.
802 	 */
803 	if (!sve_vl_valid(sve_max_vl))
804 		goto fail;
805 
806 	efi_sve_state = __alloc_percpu(
807 		SVE_SIG_REGS_SIZE(sve_vq_from_vl(sve_max_vl)), SVE_VQ_BYTES);
808 	if (!efi_sve_state)
809 		goto fail;
810 
811 	return;
812 
813 fail:
814 	panic("Cannot allocate percpu memory for EFI SVE save/restore");
815 }
816 
817 /*
818  * Enable SVE for EL1.
819  * Intended for use by the cpufeatures code during CPU boot.
820  */
821 void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
822 {
823 	write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1);
824 	isb();
825 }
826 
827 /*
828  * Read the pseudo-ZCR used by cpufeatures to identify the supported SVE
829  * vector length.
830  *
831  * Use only if SVE is present.
832  * This function clobbers the SVE vector length.
833  */
834 u64 read_zcr_features(void)
835 {
836 	u64 zcr;
837 	unsigned int vq_max;
838 
839 	/*
840 	 * Set the maximum possible VL, and write zeroes to all other
841 	 * bits to see if they stick.
842 	 */
843 	sve_kernel_enable(NULL);
844 	write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1);
845 
846 	zcr = read_sysreg_s(SYS_ZCR_EL1);
847 	zcr &= ~(u64)ZCR_ELx_LEN_MASK; /* find sticky 1s outside LEN field */
848 	vq_max = sve_vq_from_vl(sve_get_vl());
849 	zcr |= vq_max - 1; /* set LEN field to maximum effective value */
850 
851 	return zcr;
852 }
853 
854 void __init sve_setup(void)
855 {
856 	u64 zcr;
857 	DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
858 	unsigned long b;
859 
860 	if (!system_supports_sve())
861 		return;
862 
863 	/*
864 	 * The SVE architecture mandates support for 128-bit vectors,
865 	 * so sve_vq_map must have at least SVE_VQ_MIN set.
866 	 * If something went wrong, at least try to patch it up:
867 	 */
868 	if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map)))
869 		set_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map);
870 
871 	zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
872 	sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1);
873 
874 	/*
875 	 * Sanity-check that the max VL we determined through CPU features
876 	 * corresponds properly to sve_vq_map.  If not, do our best:
877 	 */
878 	if (WARN_ON(sve_max_vl != find_supported_vector_length(sve_max_vl)))
879 		sve_max_vl = find_supported_vector_length(sve_max_vl);
880 
881 	/*
882 	 * For the default VL, pick the maximum supported value <= 64.
883 	 * VL == 64 is guaranteed not to grow the signal frame.
884 	 */
885 	set_sve_default_vl(find_supported_vector_length(64));
886 
887 	bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map,
888 		      SVE_VQ_MAX);
889 
890 	b = find_last_bit(tmp_map, SVE_VQ_MAX);
891 	if (b >= SVE_VQ_MAX)
892 		/* No non-virtualisable VLs found */
893 		sve_max_virtualisable_vl = SVE_VQ_MAX;
894 	else if (WARN_ON(b == SVE_VQ_MAX - 1))
895 		/* No virtualisable VLs?  This is architecturally forbidden. */
896 		sve_max_virtualisable_vl = SVE_VQ_MIN;
897 	else /* b + 1 < SVE_VQ_MAX */
898 		sve_max_virtualisable_vl = sve_vl_from_vq(__bit_to_vq(b + 1));
899 
900 	if (sve_max_virtualisable_vl > sve_max_vl)
901 		sve_max_virtualisable_vl = sve_max_vl;
902 
903 	pr_info("SVE: maximum available vector length %u bytes per vector\n",
904 		sve_max_vl);
905 	pr_info("SVE: default vector length %u bytes per vector\n",
906 		get_sve_default_vl());
907 
908 	/* KVM decides whether to support mismatched systems. Just warn here: */
909 	if (sve_max_virtualisable_vl < sve_max_vl)
910 		pr_warn("SVE: unvirtualisable vector lengths present\n");
911 
912 	sve_efi_setup();
913 }
914 
915 /*
916  * Called from the put_task_struct() path, which cannot get here
917  * unless dead_task is really dead and not schedulable.
918  */
919 void fpsimd_release_task(struct task_struct *dead_task)
920 {
921 	__sve_free(dead_task);
922 }
923 
924 #endif /* CONFIG_ARM64_SVE */
925 
926 /*
927  * Trapped SVE access
928  *
929  * Storage is allocated for the full SVE state, the current FPSIMD
930  * register contents are migrated across, and the access trap is
931  * disabled.
932  *
933  * TIF_SVE should be clear on entry: otherwise, fpsimd_restore_current_state()
934  * would have disabled the SVE access trap for userspace during
935  * ret_to_user, making an SVE access trap impossible in that case.
936  */
937 void do_sve_acc(unsigned int esr, struct pt_regs *regs)
938 {
939 	/* Even if we chose not to use SVE, the hardware could still trap: */
940 	if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) {
941 		force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
942 		return;
943 	}
944 
945 	sve_alloc(current);
946 
947 	get_cpu_fpsimd_context();
948 
949 	if (test_and_set_thread_flag(TIF_SVE))
950 		WARN_ON(1); /* SVE access shouldn't have trapped */
951 
952 	/*
953 	 * Convert the FPSIMD state to SVE, zeroing all the state that
954 	 * is not shared with FPSIMD. If (as is likely) the current
955 	 * state is live in the registers then do this there and
956 	 * update our metadata for the current task including
957 	 * disabling the trap, otherwise update our in-memory copy.
958 	 */
959 	if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
960 		unsigned long vq_minus_one =
961 			sve_vq_from_vl(current->thread.sve_vl) - 1;
962 		sve_set_vq(vq_minus_one);
963 		sve_flush_live(vq_minus_one);
964 		fpsimd_bind_task_to_cpu();
965 	} else {
966 		fpsimd_to_sve(current);
967 	}
968 
969 	put_cpu_fpsimd_context();
970 }
971 
972 /*
973  * Trapped FP/ASIMD access.
974  */
975 void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
976 {
977 	/* TODO: implement lazy context saving/restoring */
978 	WARN_ON(1);
979 }
980 
981 /*
982  * Raise a SIGFPE for the current process.
983  */
984 void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
985 {
986 	unsigned int si_code = FPE_FLTUNK;
987 
988 	if (esr & ESR_ELx_FP_EXC_TFV) {
989 		if (esr & FPEXC_IOF)
990 			si_code = FPE_FLTINV;
991 		else if (esr & FPEXC_DZF)
992 			si_code = FPE_FLTDIV;
993 		else if (esr & FPEXC_OFF)
994 			si_code = FPE_FLTOVF;
995 		else if (esr & FPEXC_UFF)
996 			si_code = FPE_FLTUND;
997 		else if (esr & FPEXC_IXF)
998 			si_code = FPE_FLTRES;
999 	}
1000 
1001 	send_sig_fault(SIGFPE, si_code,
1002 		       (void __user *)instruction_pointer(regs),
1003 		       current);
1004 }
1005 
1006 void fpsimd_thread_switch(struct task_struct *next)
1007 {
1008 	bool wrong_task, wrong_cpu;
1009 
1010 	if (!system_supports_fpsimd())
1011 		return;
1012 
1013 	__get_cpu_fpsimd_context();
1014 
1015 	/* Save unsaved fpsimd state, if any: */
1016 	fpsimd_save();
1017 
1018 	/*
1019 	 * Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
1020 	 * state.  For kernel threads, FPSIMD registers are never loaded
1021 	 * and wrong_task and wrong_cpu will always be true.
1022 	 */
1023 	wrong_task = __this_cpu_read(fpsimd_last_state.st) !=
1024 					&next->thread.uw.fpsimd_state;
1025 	wrong_cpu = next->thread.fpsimd_cpu != smp_processor_id();
1026 
1027 	update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE,
1028 			       wrong_task || wrong_cpu);
1029 
1030 	__put_cpu_fpsimd_context();
1031 }
1032 
1033 void fpsimd_flush_thread(void)
1034 {
1035 	int vl, supported_vl;
1036 
1037 	if (!system_supports_fpsimd())
1038 		return;
1039 
1040 	get_cpu_fpsimd_context();
1041 
1042 	fpsimd_flush_task_state(current);
1043 	memset(&current->thread.uw.fpsimd_state, 0,
1044 	       sizeof(current->thread.uw.fpsimd_state));
1045 
1046 	if (system_supports_sve()) {
1047 		clear_thread_flag(TIF_SVE);
1048 		sve_free(current);
1049 
1050 		/*
1051 		 * Reset the task vector length as required.
1052 		 * This is where we ensure that all user tasks have a valid
1053 		 * vector length configured: no kernel task can become a user
1054 		 * task without an exec and hence a call to this function.
1055 		 * By the time the first call to this function is made, all
1056 		 * early hardware probing is complete, so __sve_default_vl
1057 		 * should be valid.
1058 		 * If a bug causes this to go wrong, we make some noise and
1059 		 * try to fudge thread.sve_vl to a safe value here.
1060 		 */
1061 		vl = current->thread.sve_vl_onexec ?
1062 			current->thread.sve_vl_onexec : get_sve_default_vl();
1063 
1064 		if (WARN_ON(!sve_vl_valid(vl)))
1065 			vl = SVE_VL_MIN;
1066 
1067 		supported_vl = find_supported_vector_length(vl);
1068 		if (WARN_ON(supported_vl != vl))
1069 			vl = supported_vl;
1070 
1071 		current->thread.sve_vl = vl;
1072 
1073 		/*
1074 		 * If the task is not set to inherit, ensure that the vector
1075 		 * length will be reset by a subsequent exec:
1076 		 */
1077 		if (!test_thread_flag(TIF_SVE_VL_INHERIT))
1078 			current->thread.sve_vl_onexec = 0;
1079 	}
1080 
1081 	put_cpu_fpsimd_context();
1082 }
1083 
1084 /*
1085  * Save the userland FPSIMD state of 'current' to memory, but only if the state
1086  * currently held in the registers does in fact belong to 'current'
1087  */
1088 void fpsimd_preserve_current_state(void)
1089 {
1090 	if (!system_supports_fpsimd())
1091 		return;
1092 
1093 	get_cpu_fpsimd_context();
1094 	fpsimd_save();
1095 	put_cpu_fpsimd_context();
1096 }
1097 
1098 /*
1099  * Like fpsimd_preserve_current_state(), but ensure that
1100  * current->thread.uw.fpsimd_state is updated so that it can be copied to
1101  * the signal frame.
1102  */
1103 void fpsimd_signal_preserve_current_state(void)
1104 {
1105 	fpsimd_preserve_current_state();
1106 	if (test_thread_flag(TIF_SVE))
1107 		sve_to_fpsimd(current);
1108 }
1109 
1110 /*
1111  * Associate current's FPSIMD context with this cpu
1112  * The caller must have ownership of the cpu FPSIMD context before calling
1113  * this function.
1114  */
1115 void fpsimd_bind_task_to_cpu(void)
1116 {
1117 	struct fpsimd_last_state_struct *last =
1118 		this_cpu_ptr(&fpsimd_last_state);
1119 
1120 	WARN_ON(!system_supports_fpsimd());
1121 	last->st = &current->thread.uw.fpsimd_state;
1122 	last->sve_state = current->thread.sve_state;
1123 	last->sve_vl = current->thread.sve_vl;
1124 	current->thread.fpsimd_cpu = smp_processor_id();
1125 
1126 	if (system_supports_sve()) {
1127 		/* Toggle SVE trapping for userspace if needed */
1128 		if (test_thread_flag(TIF_SVE))
1129 			sve_user_enable();
1130 		else
1131 			sve_user_disable();
1132 
1133 		/* Serialised by exception return to user */
1134 	}
1135 }
1136 
1137 void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
1138 			      unsigned int sve_vl)
1139 {
1140 	struct fpsimd_last_state_struct *last =
1141 		this_cpu_ptr(&fpsimd_last_state);
1142 
1143 	WARN_ON(!system_supports_fpsimd());
1144 	WARN_ON(!in_softirq() && !irqs_disabled());
1145 
1146 	last->st = st;
1147 	last->sve_state = sve_state;
1148 	last->sve_vl = sve_vl;
1149 }
1150 
1151 /*
1152  * Load the userland FPSIMD state of 'current' from memory, but only if the
1153  * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
1154  * state of 'current'
1155  */
1156 void fpsimd_restore_current_state(void)
1157 {
1158 	/*
1159 	 * For the tasks that were created before we detected the absence of
1160 	 * FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(),
1161 	 * e.g, init. This could be then inherited by the children processes.
1162 	 * If we later detect that the system doesn't support FP/SIMD,
1163 	 * we must clear the flag for  all the tasks to indicate that the
1164 	 * FPSTATE is clean (as we can't have one) to avoid looping for ever in
1165 	 * do_notify_resume().
1166 	 */
1167 	if (!system_supports_fpsimd()) {
1168 		clear_thread_flag(TIF_FOREIGN_FPSTATE);
1169 		return;
1170 	}
1171 
1172 	get_cpu_fpsimd_context();
1173 
1174 	if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
1175 		task_fpsimd_load();
1176 		fpsimd_bind_task_to_cpu();
1177 	}
1178 
1179 	put_cpu_fpsimd_context();
1180 }
1181 
1182 /*
1183  * Load an updated userland FPSIMD state for 'current' from memory and set the
1184  * flag that indicates that the FPSIMD register contents are the most recent
1185  * FPSIMD state of 'current'
1186  */
1187 void fpsimd_update_current_state(struct user_fpsimd_state const *state)
1188 {
1189 	if (WARN_ON(!system_supports_fpsimd()))
1190 		return;
1191 
1192 	get_cpu_fpsimd_context();
1193 
1194 	current->thread.uw.fpsimd_state = *state;
1195 	if (test_thread_flag(TIF_SVE))
1196 		fpsimd_to_sve(current);
1197 
1198 	task_fpsimd_load();
1199 	fpsimd_bind_task_to_cpu();
1200 
1201 	clear_thread_flag(TIF_FOREIGN_FPSTATE);
1202 
1203 	put_cpu_fpsimd_context();
1204 }
1205 
1206 /*
1207  * Invalidate live CPU copies of task t's FPSIMD state
1208  *
1209  * This function may be called with preemption enabled.  The barrier()
1210  * ensures that the assignment to fpsimd_cpu is visible to any
1211  * preemption/softirq that could race with set_tsk_thread_flag(), so
1212  * that TIF_FOREIGN_FPSTATE cannot be spuriously re-cleared.
1213  *
1214  * The final barrier ensures that TIF_FOREIGN_FPSTATE is seen set by any
1215  * subsequent code.
1216  */
1217 void fpsimd_flush_task_state(struct task_struct *t)
1218 {
1219 	t->thread.fpsimd_cpu = NR_CPUS;
1220 	/*
1221 	 * If we don't support fpsimd, bail out after we have
1222 	 * reset the fpsimd_cpu for this task and clear the
1223 	 * FPSTATE.
1224 	 */
1225 	if (!system_supports_fpsimd())
1226 		return;
1227 	barrier();
1228 	set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE);
1229 
1230 	barrier();
1231 }
1232 
1233 /*
1234  * Invalidate any task's FPSIMD state that is present on this cpu.
1235  * The FPSIMD context should be acquired with get_cpu_fpsimd_context()
1236  * before calling this function.
1237  */
1238 static void fpsimd_flush_cpu_state(void)
1239 {
1240 	WARN_ON(!system_supports_fpsimd());
1241 	__this_cpu_write(fpsimd_last_state.st, NULL);
1242 	set_thread_flag(TIF_FOREIGN_FPSTATE);
1243 }
1244 
1245 /*
1246  * Save the FPSIMD state to memory and invalidate cpu view.
1247  * This function must be called with preemption disabled.
1248  */
1249 void fpsimd_save_and_flush_cpu_state(void)
1250 {
1251 	if (!system_supports_fpsimd())
1252 		return;
1253 	WARN_ON(preemptible());
1254 	__get_cpu_fpsimd_context();
1255 	fpsimd_save();
1256 	fpsimd_flush_cpu_state();
1257 	__put_cpu_fpsimd_context();
1258 }
1259 
1260 #ifdef CONFIG_KERNEL_MODE_NEON
1261 
1262 /*
1263  * Kernel-side NEON support functions
1264  */
1265 
1266 /*
1267  * kernel_neon_begin(): obtain the CPU FPSIMD registers for use by the calling
1268  * context
1269  *
1270  * Must not be called unless may_use_simd() returns true.
1271  * Task context in the FPSIMD registers is saved back to memory as necessary.
1272  *
1273  * A matching call to kernel_neon_end() must be made before returning from the
1274  * calling context.
1275  *
1276  * The caller may freely use the FPSIMD registers until kernel_neon_end() is
1277  * called.
1278  */
1279 void kernel_neon_begin(void)
1280 {
1281 	if (WARN_ON(!system_supports_fpsimd()))
1282 		return;
1283 
1284 	BUG_ON(!may_use_simd());
1285 
1286 	get_cpu_fpsimd_context();
1287 
1288 	/* Save unsaved fpsimd state, if any: */
1289 	fpsimd_save();
1290 
1291 	/* Invalidate any task state remaining in the fpsimd regs: */
1292 	fpsimd_flush_cpu_state();
1293 }
1294 EXPORT_SYMBOL(kernel_neon_begin);
1295 
1296 /*
1297  * kernel_neon_end(): give the CPU FPSIMD registers back to the current task
1298  *
1299  * Must be called from a context in which kernel_neon_begin() was previously
1300  * called, with no call to kernel_neon_end() in the meantime.
1301  *
1302  * The caller must not use the FPSIMD registers after this function is called,
1303  * unless kernel_neon_begin() is called again in the meantime.
1304  */
1305 void kernel_neon_end(void)
1306 {
1307 	if (!system_supports_fpsimd())
1308 		return;
1309 
1310 	put_cpu_fpsimd_context();
1311 }
1312 EXPORT_SYMBOL(kernel_neon_end);
1313 
1314 #ifdef CONFIG_EFI
1315 
1316 static DEFINE_PER_CPU(struct user_fpsimd_state, efi_fpsimd_state);
1317 static DEFINE_PER_CPU(bool, efi_fpsimd_state_used);
1318 static DEFINE_PER_CPU(bool, efi_sve_state_used);
1319 
1320 /*
1321  * EFI runtime services support functions
1322  *
1323  * The ABI for EFI runtime services allows EFI to use FPSIMD during the call.
1324  * This means that for EFI (and only for EFI), we have to assume that FPSIMD
1325  * is always used rather than being an optional accelerator.
1326  *
1327  * These functions provide the necessary support for ensuring FPSIMD
1328  * save/restore in the contexts from which EFI is used.
1329  *
1330  * Do not use them for any other purpose -- if tempted to do so, you are
1331  * either doing something wrong or you need to propose some refactoring.
1332  */
1333 
1334 /*
1335  * __efi_fpsimd_begin(): prepare FPSIMD for making an EFI runtime services call
1336  */
1337 void __efi_fpsimd_begin(void)
1338 {
1339 	if (!system_supports_fpsimd())
1340 		return;
1341 
1342 	WARN_ON(preemptible());
1343 
1344 	if (may_use_simd()) {
1345 		kernel_neon_begin();
1346 	} else {
1347 		/*
1348 		 * If !efi_sve_state, SVE can't be in use yet and doesn't need
1349 		 * preserving:
1350 		 */
1351 		if (system_supports_sve() && likely(efi_sve_state)) {
1352 			char *sve_state = this_cpu_ptr(efi_sve_state);
1353 
1354 			__this_cpu_write(efi_sve_state_used, true);
1355 
1356 			sve_save_state(sve_state + sve_ffr_offset(sve_max_vl),
1357 				       &this_cpu_ptr(&efi_fpsimd_state)->fpsr);
1358 		} else {
1359 			fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state));
1360 		}
1361 
1362 		__this_cpu_write(efi_fpsimd_state_used, true);
1363 	}
1364 }
1365 
1366 /*
1367  * __efi_fpsimd_end(): clean up FPSIMD after an EFI runtime services call
1368  */
1369 void __efi_fpsimd_end(void)
1370 {
1371 	if (!system_supports_fpsimd())
1372 		return;
1373 
1374 	if (!__this_cpu_xchg(efi_fpsimd_state_used, false)) {
1375 		kernel_neon_end();
1376 	} else {
1377 		if (system_supports_sve() &&
1378 		    likely(__this_cpu_read(efi_sve_state_used))) {
1379 			char const *sve_state = this_cpu_ptr(efi_sve_state);
1380 
1381 			sve_load_state(sve_state + sve_ffr_offset(sve_max_vl),
1382 				       &this_cpu_ptr(&efi_fpsimd_state)->fpsr,
1383 				       sve_vq_from_vl(sve_get_vl()) - 1);
1384 
1385 			__this_cpu_write(efi_sve_state_used, false);
1386 		} else {
1387 			fpsimd_load_state(this_cpu_ptr(&efi_fpsimd_state));
1388 		}
1389 	}
1390 }
1391 
1392 #endif /* CONFIG_EFI */
1393 
1394 #endif /* CONFIG_KERNEL_MODE_NEON */
1395 
1396 #ifdef CONFIG_CPU_PM
1397 static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
1398 				  unsigned long cmd, void *v)
1399 {
1400 	switch (cmd) {
1401 	case CPU_PM_ENTER:
1402 		fpsimd_save_and_flush_cpu_state();
1403 		break;
1404 	case CPU_PM_EXIT:
1405 		break;
1406 	case CPU_PM_ENTER_FAILED:
1407 	default:
1408 		return NOTIFY_DONE;
1409 	}
1410 	return NOTIFY_OK;
1411 }
1412 
1413 static struct notifier_block fpsimd_cpu_pm_notifier_block = {
1414 	.notifier_call = fpsimd_cpu_pm_notifier,
1415 };
1416 
1417 static void __init fpsimd_pm_init(void)
1418 {
1419 	cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block);
1420 }
1421 
1422 #else
1423 static inline void fpsimd_pm_init(void) { }
1424 #endif /* CONFIG_CPU_PM */
1425 
1426 #ifdef CONFIG_HOTPLUG_CPU
1427 static int fpsimd_cpu_dead(unsigned int cpu)
1428 {
1429 	per_cpu(fpsimd_last_state.st, cpu) = NULL;
1430 	return 0;
1431 }
1432 
1433 static inline void fpsimd_hotplug_init(void)
1434 {
1435 	cpuhp_setup_state_nocalls(CPUHP_ARM64_FPSIMD_DEAD, "arm64/fpsimd:dead",
1436 				  NULL, fpsimd_cpu_dead);
1437 }
1438 
1439 #else
1440 static inline void fpsimd_hotplug_init(void) { }
1441 #endif
1442 
1443 /*
1444  * FP/SIMD support code initialisation.
1445  */
1446 static int __init fpsimd_init(void)
1447 {
1448 	if (cpu_have_named_feature(FP)) {
1449 		fpsimd_pm_init();
1450 		fpsimd_hotplug_init();
1451 	} else {
1452 		pr_notice("Floating-point is not implemented\n");
1453 	}
1454 
1455 	if (!cpu_have_named_feature(ASIMD))
1456 		pr_notice("Advanced SIMD is not implemented\n");
1457 
1458 	return sve_sysctl_init();
1459 }
1460 core_initcall(fpsimd_init);
1461