xref: /linux/arch/arm64/kernel/fpsimd.c (revision 6419945e3313fd894af79caefca6823d4511133f)
1 /*
2  * FP/SIMD context switching and fault handling
3  *
4  * Copyright (C) 2012 ARM Ltd.
5  * Author: Catalin Marinas <catalin.marinas@arm.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <linux/bitmap.h>
21 #include <linux/bottom_half.h>
22 #include <linux/bug.h>
23 #include <linux/cache.h>
24 #include <linux/compat.h>
25 #include <linux/cpu.h>
26 #include <linux/cpu_pm.h>
27 #include <linux/kernel.h>
28 #include <linux/linkage.h>
29 #include <linux/irqflags.h>
30 #include <linux/init.h>
31 #include <linux/percpu.h>
32 #include <linux/prctl.h>
33 #include <linux/preempt.h>
34 #include <linux/ptrace.h>
35 #include <linux/sched/signal.h>
36 #include <linux/sched/task_stack.h>
37 #include <linux/signal.h>
38 #include <linux/slab.h>
39 #include <linux/sysctl.h>
40 
41 #include <asm/esr.h>
42 #include <asm/fpsimd.h>
43 #include <asm/cpufeature.h>
44 #include <asm/cputype.h>
45 #include <asm/simd.h>
46 #include <asm/sigcontext.h>
47 #include <asm/sysreg.h>
48 #include <asm/traps.h>
49 
50 #define FPEXC_IOF	(1 << 0)
51 #define FPEXC_DZF	(1 << 1)
52 #define FPEXC_OFF	(1 << 2)
53 #define FPEXC_UFF	(1 << 3)
54 #define FPEXC_IXF	(1 << 4)
55 #define FPEXC_IDF	(1 << 7)
56 
57 /*
58  * (Note: in this discussion, statements about FPSIMD apply equally to SVE.)
59  *
60  * In order to reduce the number of times the FPSIMD state is needlessly saved
61  * and restored, we need to keep track of two things:
62  * (a) for each task, we need to remember which CPU was the last one to have
63  *     the task's FPSIMD state loaded into its FPSIMD registers;
64  * (b) for each CPU, we need to remember which task's userland FPSIMD state has
65  *     been loaded into its FPSIMD registers most recently, or whether it has
66  *     been used to perform kernel mode NEON in the meantime.
67  *
68  * For (a), we add a fpsimd_cpu field to thread_struct, which gets updated to
69  * the id of the current CPU every time the state is loaded onto a CPU. For (b),
70  * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the
71  * address of the userland FPSIMD state of the task that was loaded onto the CPU
72  * the most recently, or NULL if kernel mode NEON has been performed after that.
73  *
74  * With this in place, we no longer have to restore the next FPSIMD state right
75  * when switching between tasks. Instead, we can defer this check to userland
76  * resume, at which time we verify whether the CPU's fpsimd_last_state and the
77  * task's fpsimd_cpu are still mutually in sync. If this is the case, we
78  * can omit the FPSIMD restore.
79  *
80  * As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to
81  * indicate whether or not the userland FPSIMD state of the current task is
82  * present in the registers. The flag is set unless the FPSIMD registers of this
83  * CPU currently contain the most recent userland FPSIMD state of the current
84  * task.
85  *
86  * In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may
87  * save the task's FPSIMD context back to task_struct from softirq context.
88  * To prevent this from racing with the manipulation of the task's FPSIMD state
89  * from task context and thereby corrupting the state, it is necessary to
90  * protect any manipulation of a task's fpsimd_state or TIF_FOREIGN_FPSTATE
91  * flag with local_bh_disable() unless softirqs are already masked.
92  *
93  * For a certain task, the sequence may look something like this:
94  * - the task gets scheduled in; if both the task's fpsimd_cpu field
95  *   contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu
96  *   variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is
97  *   cleared, otherwise it is set;
98  *
99  * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's
100  *   userland FPSIMD state is copied from memory to the registers, the task's
101  *   fpsimd_cpu field is set to the id of the current CPU, the current
102  *   CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the
103  *   TIF_FOREIGN_FPSTATE flag is cleared;
104  *
105  * - the task executes an ordinary syscall; upon return to userland, the
106  *   TIF_FOREIGN_FPSTATE flag will still be cleared, so no FPSIMD state is
107  *   restored;
108  *
109  * - the task executes a syscall which executes some NEON instructions; this is
110  *   preceded by a call to kernel_neon_begin(), which copies the task's FPSIMD
111  *   register contents to memory, clears the fpsimd_last_state per-cpu variable
112  *   and sets the TIF_FOREIGN_FPSTATE flag;
113  *
114  * - the task gets preempted after kernel_neon_end() is called; as we have not
115  *   returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so
116  *   whatever is in the FPSIMD registers is not saved to memory, but discarded.
117  */
118 struct fpsimd_last_state_struct {
119 	struct user_fpsimd_state *st;
120 	bool sve_in_use;
121 };
122 
123 static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
124 
125 /* Default VL for tasks that don't set it explicitly: */
126 static int sve_default_vl = -1;
127 
128 #ifdef CONFIG_ARM64_SVE
129 
130 /* Maximum supported vector length across all CPUs (initially poisoned) */
131 int __ro_after_init sve_max_vl = SVE_VL_MIN;
132 /* Set of available vector lengths, as vq_to_bit(vq): */
133 static __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
134 static void __percpu *efi_sve_state;
135 
136 #else /* ! CONFIG_ARM64_SVE */
137 
138 /* Dummy declaration for code that will be optimised out: */
139 extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
140 extern void __percpu *efi_sve_state;
141 
142 #endif /* ! CONFIG_ARM64_SVE */
143 
144 /*
145  * Call __sve_free() directly only if you know task can't be scheduled
146  * or preempted.
147  */
148 static void __sve_free(struct task_struct *task)
149 {
150 	kfree(task->thread.sve_state);
151 	task->thread.sve_state = NULL;
152 }
153 
154 static void sve_free(struct task_struct *task)
155 {
156 	WARN_ON(test_tsk_thread_flag(task, TIF_SVE));
157 
158 	__sve_free(task);
159 }
160 
161 
162 /* Offset of FFR in the SVE register dump */
163 static size_t sve_ffr_offset(int vl)
164 {
165 	return SVE_SIG_FFR_OFFSET(sve_vq_from_vl(vl)) - SVE_SIG_REGS_OFFSET;
166 }
167 
168 static void *sve_pffr(struct task_struct *task)
169 {
170 	return (char *)task->thread.sve_state +
171 		sve_ffr_offset(task->thread.sve_vl);
172 }
173 
174 static void change_cpacr(u64 val, u64 mask)
175 {
176 	u64 cpacr = read_sysreg(CPACR_EL1);
177 	u64 new = (cpacr & ~mask) | val;
178 
179 	if (new != cpacr)
180 		write_sysreg(new, CPACR_EL1);
181 }
182 
183 static void sve_user_disable(void)
184 {
185 	change_cpacr(0, CPACR_EL1_ZEN_EL0EN);
186 }
187 
188 static void sve_user_enable(void)
189 {
190 	change_cpacr(CPACR_EL1_ZEN_EL0EN, CPACR_EL1_ZEN_EL0EN);
191 }
192 
193 /*
194  * TIF_SVE controls whether a task can use SVE without trapping while
195  * in userspace, and also the way a task's FPSIMD/SVE state is stored
196  * in thread_struct.
197  *
198  * The kernel uses this flag to track whether a user task is actively
199  * using SVE, and therefore whether full SVE register state needs to
200  * be tracked.  If not, the cheaper FPSIMD context handling code can
201  * be used instead of the more costly SVE equivalents.
202  *
203  *  * TIF_SVE set:
204  *
205  *    The task can execute SVE instructions while in userspace without
206  *    trapping to the kernel.
207  *
208  *    When stored, Z0-Z31 (incorporating Vn in bits[127:0] or the
209  *    corresponding Zn), P0-P15 and FFR are encoded in in
210  *    task->thread.sve_state, formatted appropriately for vector
211  *    length task->thread.sve_vl.
212  *
213  *    task->thread.sve_state must point to a valid buffer at least
214  *    sve_state_size(task) bytes in size.
215  *
216  *    During any syscall, the kernel may optionally clear TIF_SVE and
217  *    discard the vector state except for the FPSIMD subset.
218  *
219  *  * TIF_SVE clear:
220  *
221  *    An attempt by the user task to execute an SVE instruction causes
222  *    do_sve_acc() to be called, which does some preparation and then
223  *    sets TIF_SVE.
224  *
225  *    When stored, FPSIMD registers V0-V31 are encoded in
226  *    task->thread.uw.fpsimd_state; bits [max : 128] for each of Z0-Z31 are
227  *    logically zero but not stored anywhere; P0-P15 and FFR are not
228  *    stored and have unspecified values from userspace's point of
229  *    view.  For hygiene purposes, the kernel zeroes them on next use,
230  *    but userspace is discouraged from relying on this.
231  *
232  *    task->thread.sve_state does not need to be non-NULL, valid or any
233  *    particular size: it must not be dereferenced.
234  *
235  *  * FPSR and FPCR are always stored in task->thread.uw.fpsimd_state
236  *    irrespective of whether TIF_SVE is clear or set, since these are
237  *    not vector length dependent.
238  */
239 
240 /*
241  * Update current's FPSIMD/SVE registers from thread_struct.
242  *
243  * This function should be called only when the FPSIMD/SVE state in
244  * thread_struct is known to be up to date, when preparing to enter
245  * userspace.
246  *
247  * Softirqs (and preemption) must be disabled.
248  */
249 static void task_fpsimd_load(void)
250 {
251 	WARN_ON(!in_softirq() && !irqs_disabled());
252 
253 	if (system_supports_sve() && test_thread_flag(TIF_SVE))
254 		sve_load_state(sve_pffr(current),
255 			       &current->thread.uw.fpsimd_state.fpsr,
256 			       sve_vq_from_vl(current->thread.sve_vl) - 1);
257 	else
258 		fpsimd_load_state(&current->thread.uw.fpsimd_state);
259 
260 	if (system_supports_sve()) {
261 		/* Toggle SVE trapping for userspace if needed */
262 		if (test_thread_flag(TIF_SVE))
263 			sve_user_enable();
264 		else
265 			sve_user_disable();
266 
267 		/* Serialised by exception return to user */
268 	}
269 }
270 
271 /*
272  * Ensure current's FPSIMD/SVE storage in thread_struct is up to date
273  * with respect to the CPU registers.
274  *
275  * Softirqs (and preemption) must be disabled.
276  */
277 static void task_fpsimd_save(void)
278 {
279 	WARN_ON(!in_softirq() && !irqs_disabled());
280 
281 	if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
282 		if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
283 			if (WARN_ON(sve_get_vl() != current->thread.sve_vl)) {
284 				/*
285 				 * Can't save the user regs, so current would
286 				 * re-enter user with corrupt state.
287 				 * There's no way to recover, so kill it:
288 				 */
289 				force_signal_inject(SIGKILL, SI_KERNEL, 0);
290 				return;
291 			}
292 
293 			sve_save_state(sve_pffr(current),
294 				       &current->thread.uw.fpsimd_state.fpsr);
295 		} else
296 			fpsimd_save_state(&current->thread.uw.fpsimd_state);
297 	}
298 }
299 
300 /*
301  * Helpers to translate bit indices in sve_vq_map to VQ values (and
302  * vice versa).  This allows find_next_bit() to be used to find the
303  * _maximum_ VQ not exceeding a certain value.
304  */
305 
306 static unsigned int vq_to_bit(unsigned int vq)
307 {
308 	return SVE_VQ_MAX - vq;
309 }
310 
311 static unsigned int bit_to_vq(unsigned int bit)
312 {
313 	if (WARN_ON(bit >= SVE_VQ_MAX))
314 		bit = SVE_VQ_MAX - 1;
315 
316 	return SVE_VQ_MAX - bit;
317 }
318 
319 /*
320  * All vector length selection from userspace comes through here.
321  * We're on a slow path, so some sanity-checks are included.
322  * If things go wrong there's a bug somewhere, but try to fall back to a
323  * safe choice.
324  */
325 static unsigned int find_supported_vector_length(unsigned int vl)
326 {
327 	int bit;
328 	int max_vl = sve_max_vl;
329 
330 	if (WARN_ON(!sve_vl_valid(vl)))
331 		vl = SVE_VL_MIN;
332 
333 	if (WARN_ON(!sve_vl_valid(max_vl)))
334 		max_vl = SVE_VL_MIN;
335 
336 	if (vl > max_vl)
337 		vl = max_vl;
338 
339 	bit = find_next_bit(sve_vq_map, SVE_VQ_MAX,
340 			    vq_to_bit(sve_vq_from_vl(vl)));
341 	return sve_vl_from_vq(bit_to_vq(bit));
342 }
343 
344 #ifdef CONFIG_SYSCTL
345 
346 static int sve_proc_do_default_vl(struct ctl_table *table, int write,
347 				  void __user *buffer, size_t *lenp,
348 				  loff_t *ppos)
349 {
350 	int ret;
351 	int vl = sve_default_vl;
352 	struct ctl_table tmp_table = {
353 		.data = &vl,
354 		.maxlen = sizeof(vl),
355 	};
356 
357 	ret = proc_dointvec(&tmp_table, write, buffer, lenp, ppos);
358 	if (ret || !write)
359 		return ret;
360 
361 	/* Writing -1 has the special meaning "set to max": */
362 	if (vl == -1)
363 		vl = sve_max_vl;
364 
365 	if (!sve_vl_valid(vl))
366 		return -EINVAL;
367 
368 	sve_default_vl = find_supported_vector_length(vl);
369 	return 0;
370 }
371 
372 static struct ctl_table sve_default_vl_table[] = {
373 	{
374 		.procname	= "sve_default_vector_length",
375 		.mode		= 0644,
376 		.proc_handler	= sve_proc_do_default_vl,
377 	},
378 	{ }
379 };
380 
381 static int __init sve_sysctl_init(void)
382 {
383 	if (system_supports_sve())
384 		if (!register_sysctl("abi", sve_default_vl_table))
385 			return -EINVAL;
386 
387 	return 0;
388 }
389 
390 #else /* ! CONFIG_SYSCTL */
391 static int __init sve_sysctl_init(void) { return 0; }
392 #endif /* ! CONFIG_SYSCTL */
393 
394 #define ZREG(sve_state, vq, n) ((char *)(sve_state) +		\
395 	(SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
396 
397 /*
398  * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
399  * task->thread.sve_state.
400  *
401  * Task can be a non-runnable task, or current.  In the latter case,
402  * softirqs (and preemption) must be disabled.
403  * task->thread.sve_state must point to at least sve_state_size(task)
404  * bytes of allocated kernel memory.
405  * task->thread.uw.fpsimd_state must be up to date before calling this
406  * function.
407  */
408 static void fpsimd_to_sve(struct task_struct *task)
409 {
410 	unsigned int vq;
411 	void *sst = task->thread.sve_state;
412 	struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
413 	unsigned int i;
414 
415 	if (!system_supports_sve())
416 		return;
417 
418 	vq = sve_vq_from_vl(task->thread.sve_vl);
419 	for (i = 0; i < 32; ++i)
420 		memcpy(ZREG(sst, vq, i), &fst->vregs[i],
421 		       sizeof(fst->vregs[i]));
422 }
423 
424 /*
425  * Transfer the SVE state in task->thread.sve_state to
426  * task->thread.uw.fpsimd_state.
427  *
428  * Task can be a non-runnable task, or current.  In the latter case,
429  * softirqs (and preemption) must be disabled.
430  * task->thread.sve_state must point to at least sve_state_size(task)
431  * bytes of allocated kernel memory.
432  * task->thread.sve_state must be up to date before calling this function.
433  */
434 static void sve_to_fpsimd(struct task_struct *task)
435 {
436 	unsigned int vq;
437 	void const *sst = task->thread.sve_state;
438 	struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
439 	unsigned int i;
440 
441 	if (!system_supports_sve())
442 		return;
443 
444 	vq = sve_vq_from_vl(task->thread.sve_vl);
445 	for (i = 0; i < 32; ++i)
446 		memcpy(&fst->vregs[i], ZREG(sst, vq, i),
447 		       sizeof(fst->vregs[i]));
448 }
449 
450 #ifdef CONFIG_ARM64_SVE
451 
452 /*
453  * Return how many bytes of memory are required to store the full SVE
454  * state for task, given task's currently configured vector length.
455  */
456 size_t sve_state_size(struct task_struct const *task)
457 {
458 	return SVE_SIG_REGS_SIZE(sve_vq_from_vl(task->thread.sve_vl));
459 }
460 
461 /*
462  * Ensure that task->thread.sve_state is allocated and sufficiently large.
463  *
464  * This function should be used only in preparation for replacing
465  * task->thread.sve_state with new data.  The memory is always zeroed
466  * here to prevent stale data from showing through: this is done in
467  * the interest of testability and predictability: except in the
468  * do_sve_acc() case, there is no ABI requirement to hide stale data
469  * written previously be task.
470  */
471 void sve_alloc(struct task_struct *task)
472 {
473 	if (task->thread.sve_state) {
474 		memset(task->thread.sve_state, 0, sve_state_size(current));
475 		return;
476 	}
477 
478 	/* This is a small allocation (maximum ~8KB) and Should Not Fail. */
479 	task->thread.sve_state =
480 		kzalloc(sve_state_size(task), GFP_KERNEL);
481 
482 	/*
483 	 * If future SVE revisions can have larger vectors though,
484 	 * this may cease to be true:
485 	 */
486 	BUG_ON(!task->thread.sve_state);
487 }
488 
489 
490 /*
491  * Ensure that task->thread.sve_state is up to date with respect to
492  * the user task, irrespective of when SVE is in use or not.
493  *
494  * This should only be called by ptrace.  task must be non-runnable.
495  * task->thread.sve_state must point to at least sve_state_size(task)
496  * bytes of allocated kernel memory.
497  */
498 void fpsimd_sync_to_sve(struct task_struct *task)
499 {
500 	if (!test_tsk_thread_flag(task, TIF_SVE))
501 		fpsimd_to_sve(task);
502 }
503 
504 /*
505  * Ensure that task->thread.uw.fpsimd_state is up to date with respect to
506  * the user task, irrespective of whether SVE is in use or not.
507  *
508  * This should only be called by ptrace.  task must be non-runnable.
509  * task->thread.sve_state must point to at least sve_state_size(task)
510  * bytes of allocated kernel memory.
511  */
512 void sve_sync_to_fpsimd(struct task_struct *task)
513 {
514 	if (test_tsk_thread_flag(task, TIF_SVE))
515 		sve_to_fpsimd(task);
516 }
517 
518 /*
519  * Ensure that task->thread.sve_state is up to date with respect to
520  * the task->thread.uw.fpsimd_state.
521  *
522  * This should only be called by ptrace to merge new FPSIMD register
523  * values into a task for which SVE is currently active.
524  * task must be non-runnable.
525  * task->thread.sve_state must point to at least sve_state_size(task)
526  * bytes of allocated kernel memory.
527  * task->thread.uw.fpsimd_state must already have been initialised with
528  * the new FPSIMD register values to be merged in.
529  */
530 void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
531 {
532 	unsigned int vq;
533 	void *sst = task->thread.sve_state;
534 	struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
535 	unsigned int i;
536 
537 	if (!test_tsk_thread_flag(task, TIF_SVE))
538 		return;
539 
540 	vq = sve_vq_from_vl(task->thread.sve_vl);
541 
542 	memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
543 
544 	for (i = 0; i < 32; ++i)
545 		memcpy(ZREG(sst, vq, i), &fst->vregs[i],
546 		       sizeof(fst->vregs[i]));
547 }
548 
549 int sve_set_vector_length(struct task_struct *task,
550 			  unsigned long vl, unsigned long flags)
551 {
552 	if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
553 				     PR_SVE_SET_VL_ONEXEC))
554 		return -EINVAL;
555 
556 	if (!sve_vl_valid(vl))
557 		return -EINVAL;
558 
559 	/*
560 	 * Clamp to the maximum vector length that VL-agnostic SVE code can
561 	 * work with.  A flag may be assigned in the future to allow setting
562 	 * of larger vector lengths without confusing older software.
563 	 */
564 	if (vl > SVE_VL_ARCH_MAX)
565 		vl = SVE_VL_ARCH_MAX;
566 
567 	vl = find_supported_vector_length(vl);
568 
569 	if (flags & (PR_SVE_VL_INHERIT |
570 		     PR_SVE_SET_VL_ONEXEC))
571 		task->thread.sve_vl_onexec = vl;
572 	else
573 		/* Reset VL to system default on next exec: */
574 		task->thread.sve_vl_onexec = 0;
575 
576 	/* Only actually set the VL if not deferred: */
577 	if (flags & PR_SVE_SET_VL_ONEXEC)
578 		goto out;
579 
580 	if (vl == task->thread.sve_vl)
581 		goto out;
582 
583 	/*
584 	 * To ensure the FPSIMD bits of the SVE vector registers are preserved,
585 	 * write any live register state back to task_struct, and convert to a
586 	 * non-SVE thread.
587 	 */
588 	if (task == current) {
589 		local_bh_disable();
590 
591 		task_fpsimd_save();
592 		set_thread_flag(TIF_FOREIGN_FPSTATE);
593 	}
594 
595 	fpsimd_flush_task_state(task);
596 	if (test_and_clear_tsk_thread_flag(task, TIF_SVE))
597 		sve_to_fpsimd(task);
598 
599 	if (task == current)
600 		local_bh_enable();
601 
602 	/*
603 	 * Force reallocation of task SVE state to the correct size
604 	 * on next use:
605 	 */
606 	sve_free(task);
607 
608 	task->thread.sve_vl = vl;
609 
610 out:
611 	if (flags & PR_SVE_VL_INHERIT)
612 		set_tsk_thread_flag(task, TIF_SVE_VL_INHERIT);
613 	else
614 		clear_tsk_thread_flag(task, TIF_SVE_VL_INHERIT);
615 
616 	return 0;
617 }
618 
619 /*
620  * Encode the current vector length and flags for return.
621  * This is only required for prctl(): ptrace has separate fields
622  *
623  * flags are as for sve_set_vector_length().
624  */
625 static int sve_prctl_status(unsigned long flags)
626 {
627 	int ret;
628 
629 	if (flags & PR_SVE_SET_VL_ONEXEC)
630 		ret = current->thread.sve_vl_onexec;
631 	else
632 		ret = current->thread.sve_vl;
633 
634 	if (test_thread_flag(TIF_SVE_VL_INHERIT))
635 		ret |= PR_SVE_VL_INHERIT;
636 
637 	return ret;
638 }
639 
640 /* PR_SVE_SET_VL */
641 int sve_set_current_vl(unsigned long arg)
642 {
643 	unsigned long vl, flags;
644 	int ret;
645 
646 	vl = arg & PR_SVE_VL_LEN_MASK;
647 	flags = arg & ~vl;
648 
649 	if (!system_supports_sve())
650 		return -EINVAL;
651 
652 	ret = sve_set_vector_length(current, vl, flags);
653 	if (ret)
654 		return ret;
655 
656 	return sve_prctl_status(flags);
657 }
658 
659 /* PR_SVE_GET_VL */
660 int sve_get_current_vl(void)
661 {
662 	if (!system_supports_sve())
663 		return -EINVAL;
664 
665 	return sve_prctl_status(0);
666 }
667 
668 /*
669  * Bitmap for temporary storage of the per-CPU set of supported vector lengths
670  * during secondary boot.
671  */
672 static DECLARE_BITMAP(sve_secondary_vq_map, SVE_VQ_MAX);
673 
674 static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX))
675 {
676 	unsigned int vq, vl;
677 	unsigned long zcr;
678 
679 	bitmap_zero(map, SVE_VQ_MAX);
680 
681 	zcr = ZCR_ELx_LEN_MASK;
682 	zcr = read_sysreg_s(SYS_ZCR_EL1) & ~zcr;
683 
684 	for (vq = SVE_VQ_MAX; vq >= SVE_VQ_MIN; --vq) {
685 		write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */
686 		vl = sve_get_vl();
687 		vq = sve_vq_from_vl(vl); /* skip intervening lengths */
688 		set_bit(vq_to_bit(vq), map);
689 	}
690 }
691 
692 void __init sve_init_vq_map(void)
693 {
694 	sve_probe_vqs(sve_vq_map);
695 }
696 
697 /*
698  * If we haven't committed to the set of supported VQs yet, filter out
699  * those not supported by the current CPU.
700  */
701 void sve_update_vq_map(void)
702 {
703 	sve_probe_vqs(sve_secondary_vq_map);
704 	bitmap_and(sve_vq_map, sve_vq_map, sve_secondary_vq_map, SVE_VQ_MAX);
705 }
706 
707 /* Check whether the current CPU supports all VQs in the committed set */
708 int sve_verify_vq_map(void)
709 {
710 	int ret = 0;
711 
712 	sve_probe_vqs(sve_secondary_vq_map);
713 	bitmap_andnot(sve_secondary_vq_map, sve_vq_map, sve_secondary_vq_map,
714 		      SVE_VQ_MAX);
715 	if (!bitmap_empty(sve_secondary_vq_map, SVE_VQ_MAX)) {
716 		pr_warn("SVE: cpu%d: Required vector length(s) missing\n",
717 			smp_processor_id());
718 		ret = -EINVAL;
719 	}
720 
721 	return ret;
722 }
723 
724 static void __init sve_efi_setup(void)
725 {
726 	if (!IS_ENABLED(CONFIG_EFI))
727 		return;
728 
729 	/*
730 	 * alloc_percpu() warns and prints a backtrace if this goes wrong.
731 	 * This is evidence of a crippled system and we are returning void,
732 	 * so no attempt is made to handle this situation here.
733 	 */
734 	if (!sve_vl_valid(sve_max_vl))
735 		goto fail;
736 
737 	efi_sve_state = __alloc_percpu(
738 		SVE_SIG_REGS_SIZE(sve_vq_from_vl(sve_max_vl)), SVE_VQ_BYTES);
739 	if (!efi_sve_state)
740 		goto fail;
741 
742 	return;
743 
744 fail:
745 	panic("Cannot allocate percpu memory for EFI SVE save/restore");
746 }
747 
748 /*
749  * Enable SVE for EL1.
750  * Intended for use by the cpufeatures code during CPU boot.
751  */
752 void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
753 {
754 	write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1);
755 	isb();
756 }
757 
758 void __init sve_setup(void)
759 {
760 	u64 zcr;
761 
762 	if (!system_supports_sve())
763 		return;
764 
765 	/*
766 	 * The SVE architecture mandates support for 128-bit vectors,
767 	 * so sve_vq_map must have at least SVE_VQ_MIN set.
768 	 * If something went wrong, at least try to patch it up:
769 	 */
770 	if (WARN_ON(!test_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map)))
771 		set_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map);
772 
773 	zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
774 	sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1);
775 
776 	/*
777 	 * Sanity-check that the max VL we determined through CPU features
778 	 * corresponds properly to sve_vq_map.  If not, do our best:
779 	 */
780 	if (WARN_ON(sve_max_vl != find_supported_vector_length(sve_max_vl)))
781 		sve_max_vl = find_supported_vector_length(sve_max_vl);
782 
783 	/*
784 	 * For the default VL, pick the maximum supported value <= 64.
785 	 * VL == 64 is guaranteed not to grow the signal frame.
786 	 */
787 	sve_default_vl = find_supported_vector_length(64);
788 
789 	pr_info("SVE: maximum available vector length %u bytes per vector\n",
790 		sve_max_vl);
791 	pr_info("SVE: default vector length %u bytes per vector\n",
792 		sve_default_vl);
793 
794 	sve_efi_setup();
795 }
796 
797 /*
798  * Called from the put_task_struct() path, which cannot get here
799  * unless dead_task is really dead and not schedulable.
800  */
801 void fpsimd_release_task(struct task_struct *dead_task)
802 {
803 	__sve_free(dead_task);
804 }
805 
806 #endif /* CONFIG_ARM64_SVE */
807 
808 /*
809  * Trapped SVE access
810  *
811  * Storage is allocated for the full SVE state, the current FPSIMD
812  * register contents are migrated across, and TIF_SVE is set so that
813  * the SVE access trap will be disabled the next time this task
814  * reaches ret_to_user.
815  *
816  * TIF_SVE should be clear on entry: otherwise, task_fpsimd_load()
817  * would have disabled the SVE access trap for userspace during
818  * ret_to_user, making an SVE access trap impossible in that case.
819  */
820 asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs)
821 {
822 	/* Even if we chose not to use SVE, the hardware could still trap: */
823 	if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) {
824 		force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
825 		return;
826 	}
827 
828 	sve_alloc(current);
829 
830 	local_bh_disable();
831 
832 	task_fpsimd_save();
833 	fpsimd_to_sve(current);
834 
835 	/* Force ret_to_user to reload the registers: */
836 	fpsimd_flush_task_state(current);
837 	set_thread_flag(TIF_FOREIGN_FPSTATE);
838 
839 	if (test_and_set_thread_flag(TIF_SVE))
840 		WARN_ON(1); /* SVE access shouldn't have trapped */
841 
842 	local_bh_enable();
843 }
844 
845 /*
846  * Trapped FP/ASIMD access.
847  */
848 asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
849 {
850 	/* TODO: implement lazy context saving/restoring */
851 	WARN_ON(1);
852 }
853 
854 /*
855  * Raise a SIGFPE for the current process.
856  */
857 asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
858 {
859 	siginfo_t info;
860 	unsigned int si_code = FPE_FLTUNK;
861 
862 	if (esr & ESR_ELx_FP_EXC_TFV) {
863 		if (esr & FPEXC_IOF)
864 			si_code = FPE_FLTINV;
865 		else if (esr & FPEXC_DZF)
866 			si_code = FPE_FLTDIV;
867 		else if (esr & FPEXC_OFF)
868 			si_code = FPE_FLTOVF;
869 		else if (esr & FPEXC_UFF)
870 			si_code = FPE_FLTUND;
871 		else if (esr & FPEXC_IXF)
872 			si_code = FPE_FLTRES;
873 	}
874 
875 	clear_siginfo(&info);
876 	info.si_signo = SIGFPE;
877 	info.si_code = si_code;
878 	info.si_addr = (void __user *)instruction_pointer(regs);
879 
880 	send_sig_info(SIGFPE, &info, current);
881 }
882 
883 void fpsimd_thread_switch(struct task_struct *next)
884 {
885 	if (!system_supports_fpsimd())
886 		return;
887 	/*
888 	 * Save the current FPSIMD state to memory, but only if whatever is in
889 	 * the registers is in fact the most recent userland FPSIMD state of
890 	 * 'current'.
891 	 */
892 	if (current->mm)
893 		task_fpsimd_save();
894 
895 	if (next->mm) {
896 		/*
897 		 * If we are switching to a task whose most recent userland
898 		 * FPSIMD state is already in the registers of *this* cpu,
899 		 * we can skip loading the state from memory. Otherwise, set
900 		 * the TIF_FOREIGN_FPSTATE flag so the state will be loaded
901 		 * upon the next return to userland.
902 		 */
903 		if (__this_cpu_read(fpsimd_last_state.st) ==
904 			&next->thread.uw.fpsimd_state
905 		    && next->thread.fpsimd_cpu == smp_processor_id())
906 			clear_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE);
907 		else
908 			set_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE);
909 	}
910 }
911 
912 void fpsimd_flush_thread(void)
913 {
914 	int vl, supported_vl;
915 
916 	if (!system_supports_fpsimd())
917 		return;
918 
919 	local_bh_disable();
920 
921 	memset(&current->thread.uw.fpsimd_state, 0,
922 	       sizeof(current->thread.uw.fpsimd_state));
923 	fpsimd_flush_task_state(current);
924 
925 	if (system_supports_sve()) {
926 		clear_thread_flag(TIF_SVE);
927 		sve_free(current);
928 
929 		/*
930 		 * Reset the task vector length as required.
931 		 * This is where we ensure that all user tasks have a valid
932 		 * vector length configured: no kernel task can become a user
933 		 * task without an exec and hence a call to this function.
934 		 * By the time the first call to this function is made, all
935 		 * early hardware probing is complete, so sve_default_vl
936 		 * should be valid.
937 		 * If a bug causes this to go wrong, we make some noise and
938 		 * try to fudge thread.sve_vl to a safe value here.
939 		 */
940 		vl = current->thread.sve_vl_onexec ?
941 			current->thread.sve_vl_onexec : sve_default_vl;
942 
943 		if (WARN_ON(!sve_vl_valid(vl)))
944 			vl = SVE_VL_MIN;
945 
946 		supported_vl = find_supported_vector_length(vl);
947 		if (WARN_ON(supported_vl != vl))
948 			vl = supported_vl;
949 
950 		current->thread.sve_vl = vl;
951 
952 		/*
953 		 * If the task is not set to inherit, ensure that the vector
954 		 * length will be reset by a subsequent exec:
955 		 */
956 		if (!test_thread_flag(TIF_SVE_VL_INHERIT))
957 			current->thread.sve_vl_onexec = 0;
958 	}
959 
960 	set_thread_flag(TIF_FOREIGN_FPSTATE);
961 
962 	local_bh_enable();
963 }
964 
965 /*
966  * Save the userland FPSIMD state of 'current' to memory, but only if the state
967  * currently held in the registers does in fact belong to 'current'
968  */
969 void fpsimd_preserve_current_state(void)
970 {
971 	if (!system_supports_fpsimd())
972 		return;
973 
974 	local_bh_disable();
975 	task_fpsimd_save();
976 	local_bh_enable();
977 }
978 
979 /*
980  * Like fpsimd_preserve_current_state(), but ensure that
981  * current->thread.uw.fpsimd_state is updated so that it can be copied to
982  * the signal frame.
983  */
984 void fpsimd_signal_preserve_current_state(void)
985 {
986 	fpsimd_preserve_current_state();
987 	if (system_supports_sve() && test_thread_flag(TIF_SVE))
988 		sve_to_fpsimd(current);
989 }
990 
991 /*
992  * Associate current's FPSIMD context with this cpu
993  * Preemption must be disabled when calling this function.
994  */
995 static void fpsimd_bind_to_cpu(void)
996 {
997 	struct fpsimd_last_state_struct *last =
998 		this_cpu_ptr(&fpsimd_last_state);
999 
1000 	last->st = &current->thread.uw.fpsimd_state;
1001 	last->sve_in_use = test_thread_flag(TIF_SVE);
1002 	current->thread.fpsimd_cpu = smp_processor_id();
1003 }
1004 
1005 /*
1006  * Load the userland FPSIMD state of 'current' from memory, but only if the
1007  * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
1008  * state of 'current'
1009  */
1010 void fpsimd_restore_current_state(void)
1011 {
1012 	if (!system_supports_fpsimd())
1013 		return;
1014 
1015 	local_bh_disable();
1016 
1017 	if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
1018 		task_fpsimd_load();
1019 		fpsimd_bind_to_cpu();
1020 	}
1021 
1022 	local_bh_enable();
1023 }
1024 
1025 /*
1026  * Load an updated userland FPSIMD state for 'current' from memory and set the
1027  * flag that indicates that the FPSIMD register contents are the most recent
1028  * FPSIMD state of 'current'
1029  */
1030 void fpsimd_update_current_state(struct user_fpsimd_state const *state)
1031 {
1032 	if (!system_supports_fpsimd())
1033 		return;
1034 
1035 	local_bh_disable();
1036 
1037 	current->thread.uw.fpsimd_state = *state;
1038 	if (system_supports_sve() && test_thread_flag(TIF_SVE))
1039 		fpsimd_to_sve(current);
1040 
1041 	task_fpsimd_load();
1042 
1043 	if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE))
1044 		fpsimd_bind_to_cpu();
1045 
1046 	local_bh_enable();
1047 }
1048 
1049 /*
1050  * Invalidate live CPU copies of task t's FPSIMD state
1051  */
1052 void fpsimd_flush_task_state(struct task_struct *t)
1053 {
1054 	t->thread.fpsimd_cpu = NR_CPUS;
1055 }
1056 
1057 static inline void fpsimd_flush_cpu_state(void)
1058 {
1059 	__this_cpu_write(fpsimd_last_state.st, NULL);
1060 }
1061 
1062 /*
1063  * Invalidate any task SVE state currently held in this CPU's regs.
1064  *
1065  * This is used to prevent the kernel from trying to reuse SVE register data
1066  * that is detroyed by KVM guest enter/exit.  This function should go away when
1067  * KVM SVE support is implemented.  Don't use it for anything else.
1068  */
1069 #ifdef CONFIG_ARM64_SVE
1070 void sve_flush_cpu_state(void)
1071 {
1072 	struct fpsimd_last_state_struct const *last =
1073 		this_cpu_ptr(&fpsimd_last_state);
1074 
1075 	if (last->st && last->sve_in_use)
1076 		fpsimd_flush_cpu_state();
1077 }
1078 #endif /* CONFIG_ARM64_SVE */
1079 
1080 #ifdef CONFIG_KERNEL_MODE_NEON
1081 
1082 DEFINE_PER_CPU(bool, kernel_neon_busy);
1083 EXPORT_PER_CPU_SYMBOL(kernel_neon_busy);
1084 
1085 /*
1086  * Kernel-side NEON support functions
1087  */
1088 
1089 /*
1090  * kernel_neon_begin(): obtain the CPU FPSIMD registers for use by the calling
1091  * context
1092  *
1093  * Must not be called unless may_use_simd() returns true.
1094  * Task context in the FPSIMD registers is saved back to memory as necessary.
1095  *
1096  * A matching call to kernel_neon_end() must be made before returning from the
1097  * calling context.
1098  *
1099  * The caller may freely use the FPSIMD registers until kernel_neon_end() is
1100  * called.
1101  */
1102 void kernel_neon_begin(void)
1103 {
1104 	if (WARN_ON(!system_supports_fpsimd()))
1105 		return;
1106 
1107 	BUG_ON(!may_use_simd());
1108 
1109 	local_bh_disable();
1110 
1111 	__this_cpu_write(kernel_neon_busy, true);
1112 
1113 	/* Save unsaved task fpsimd state, if any: */
1114 	if (current->mm) {
1115 		task_fpsimd_save();
1116 		set_thread_flag(TIF_FOREIGN_FPSTATE);
1117 	}
1118 
1119 	/* Invalidate any task state remaining in the fpsimd regs: */
1120 	fpsimd_flush_cpu_state();
1121 
1122 	preempt_disable();
1123 
1124 	local_bh_enable();
1125 }
1126 EXPORT_SYMBOL(kernel_neon_begin);
1127 
1128 /*
1129  * kernel_neon_end(): give the CPU FPSIMD registers back to the current task
1130  *
1131  * Must be called from a context in which kernel_neon_begin() was previously
1132  * called, with no call to kernel_neon_end() in the meantime.
1133  *
1134  * The caller must not use the FPSIMD registers after this function is called,
1135  * unless kernel_neon_begin() is called again in the meantime.
1136  */
1137 void kernel_neon_end(void)
1138 {
1139 	bool busy;
1140 
1141 	if (!system_supports_fpsimd())
1142 		return;
1143 
1144 	busy = __this_cpu_xchg(kernel_neon_busy, false);
1145 	WARN_ON(!busy);	/* No matching kernel_neon_begin()? */
1146 
1147 	preempt_enable();
1148 }
1149 EXPORT_SYMBOL(kernel_neon_end);
1150 
1151 #ifdef CONFIG_EFI
1152 
1153 static DEFINE_PER_CPU(struct user_fpsimd_state, efi_fpsimd_state);
1154 static DEFINE_PER_CPU(bool, efi_fpsimd_state_used);
1155 static DEFINE_PER_CPU(bool, efi_sve_state_used);
1156 
1157 /*
1158  * EFI runtime services support functions
1159  *
1160  * The ABI for EFI runtime services allows EFI to use FPSIMD during the call.
1161  * This means that for EFI (and only for EFI), we have to assume that FPSIMD
1162  * is always used rather than being an optional accelerator.
1163  *
1164  * These functions provide the necessary support for ensuring FPSIMD
1165  * save/restore in the contexts from which EFI is used.
1166  *
1167  * Do not use them for any other purpose -- if tempted to do so, you are
1168  * either doing something wrong or you need to propose some refactoring.
1169  */
1170 
1171 /*
1172  * __efi_fpsimd_begin(): prepare FPSIMD for making an EFI runtime services call
1173  */
1174 void __efi_fpsimd_begin(void)
1175 {
1176 	if (!system_supports_fpsimd())
1177 		return;
1178 
1179 	WARN_ON(preemptible());
1180 
1181 	if (may_use_simd()) {
1182 		kernel_neon_begin();
1183 	} else {
1184 		/*
1185 		 * If !efi_sve_state, SVE can't be in use yet and doesn't need
1186 		 * preserving:
1187 		 */
1188 		if (system_supports_sve() && likely(efi_sve_state)) {
1189 			char *sve_state = this_cpu_ptr(efi_sve_state);
1190 
1191 			__this_cpu_write(efi_sve_state_used, true);
1192 
1193 			sve_save_state(sve_state + sve_ffr_offset(sve_max_vl),
1194 				       &this_cpu_ptr(&efi_fpsimd_state)->fpsr);
1195 		} else {
1196 			fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state));
1197 		}
1198 
1199 		__this_cpu_write(efi_fpsimd_state_used, true);
1200 	}
1201 }
1202 
1203 /*
1204  * __efi_fpsimd_end(): clean up FPSIMD after an EFI runtime services call
1205  */
1206 void __efi_fpsimd_end(void)
1207 {
1208 	if (!system_supports_fpsimd())
1209 		return;
1210 
1211 	if (!__this_cpu_xchg(efi_fpsimd_state_used, false)) {
1212 		kernel_neon_end();
1213 	} else {
1214 		if (system_supports_sve() &&
1215 		    likely(__this_cpu_read(efi_sve_state_used))) {
1216 			char const *sve_state = this_cpu_ptr(efi_sve_state);
1217 
1218 			sve_load_state(sve_state + sve_ffr_offset(sve_max_vl),
1219 				       &this_cpu_ptr(&efi_fpsimd_state)->fpsr,
1220 				       sve_vq_from_vl(sve_get_vl()) - 1);
1221 
1222 			__this_cpu_write(efi_sve_state_used, false);
1223 		} else {
1224 			fpsimd_load_state(this_cpu_ptr(&efi_fpsimd_state));
1225 		}
1226 	}
1227 }
1228 
1229 #endif /* CONFIG_EFI */
1230 
1231 #endif /* CONFIG_KERNEL_MODE_NEON */
1232 
1233 #ifdef CONFIG_CPU_PM
1234 static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
1235 				  unsigned long cmd, void *v)
1236 {
1237 	switch (cmd) {
1238 	case CPU_PM_ENTER:
1239 		if (current->mm)
1240 			task_fpsimd_save();
1241 		fpsimd_flush_cpu_state();
1242 		break;
1243 	case CPU_PM_EXIT:
1244 		if (current->mm)
1245 			set_thread_flag(TIF_FOREIGN_FPSTATE);
1246 		break;
1247 	case CPU_PM_ENTER_FAILED:
1248 	default:
1249 		return NOTIFY_DONE;
1250 	}
1251 	return NOTIFY_OK;
1252 }
1253 
1254 static struct notifier_block fpsimd_cpu_pm_notifier_block = {
1255 	.notifier_call = fpsimd_cpu_pm_notifier,
1256 };
1257 
1258 static void __init fpsimd_pm_init(void)
1259 {
1260 	cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block);
1261 }
1262 
1263 #else
1264 static inline void fpsimd_pm_init(void) { }
1265 #endif /* CONFIG_CPU_PM */
1266 
1267 #ifdef CONFIG_HOTPLUG_CPU
1268 static int fpsimd_cpu_dead(unsigned int cpu)
1269 {
1270 	per_cpu(fpsimd_last_state.st, cpu) = NULL;
1271 	return 0;
1272 }
1273 
1274 static inline void fpsimd_hotplug_init(void)
1275 {
1276 	cpuhp_setup_state_nocalls(CPUHP_ARM64_FPSIMD_DEAD, "arm64/fpsimd:dead",
1277 				  NULL, fpsimd_cpu_dead);
1278 }
1279 
1280 #else
1281 static inline void fpsimd_hotplug_init(void) { }
1282 #endif
1283 
1284 /*
1285  * FP/SIMD support code initialisation.
1286  */
1287 static int __init fpsimd_init(void)
1288 {
1289 	if (elf_hwcap & HWCAP_FP) {
1290 		fpsimd_pm_init();
1291 		fpsimd_hotplug_init();
1292 	} else {
1293 		pr_notice("Floating-point is not implemented\n");
1294 	}
1295 
1296 	if (!(elf_hwcap & HWCAP_ASIMD))
1297 		pr_notice("Advanced SIMD is not implemented\n");
1298 
1299 	return sve_sysctl_init();
1300 }
1301 core_initcall(fpsimd_init);
1302