xref: /linux/arch/arm64/kernel/fpsimd.c (revision 0baba94a9779c13c857f6efc55807e6a45b1d4e4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * FP/SIMD context switching and fault handling
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  * Author: Catalin Marinas <catalin.marinas@arm.com>
7  */
8 
9 #include <linux/bitmap.h>
10 #include <linux/bitops.h>
11 #include <linux/bottom_half.h>
12 #include <linux/bug.h>
13 #include <linux/cache.h>
14 #include <linux/compat.h>
15 #include <linux/compiler.h>
16 #include <linux/cpu.h>
17 #include <linux/cpu_pm.h>
18 #include <linux/cpumask.h>
19 #include <linux/ctype.h>
20 #include <linux/kernel.h>
21 #include <linux/linkage.h>
22 #include <linux/irqflags.h>
23 #include <linux/init.h>
24 #include <linux/percpu.h>
25 #include <linux/prctl.h>
26 #include <linux/preempt.h>
27 #include <linux/ptrace.h>
28 #include <linux/sched/signal.h>
29 #include <linux/sched/task_stack.h>
30 #include <linux/signal.h>
31 #include <linux/slab.h>
32 #include <linux/smp.h>
33 #include <linux/stddef.h>
34 #include <linux/sysctl.h>
35 #include <linux/swab.h>
36 
37 #include <asm/esr.h>
38 #include <asm/exception.h>
39 #include <asm/fpsimd.h>
40 #include <asm/cpufeature.h>
41 #include <asm/cputype.h>
42 #include <asm/neon.h>
43 #include <asm/processor.h>
44 #include <asm/simd.h>
45 #include <asm/sigcontext.h>
46 #include <asm/sysreg.h>
47 #include <asm/traps.h>
48 #include <asm/virt.h>
49 
50 #define FPEXC_IOF	(1 << 0)
51 #define FPEXC_DZF	(1 << 1)
52 #define FPEXC_OFF	(1 << 2)
53 #define FPEXC_UFF	(1 << 3)
54 #define FPEXC_IXF	(1 << 4)
55 #define FPEXC_IDF	(1 << 7)
56 
57 /*
58  * (Note: in this discussion, statements about FPSIMD apply equally to SVE.)
59  *
60  * In order to reduce the number of times the FPSIMD state is needlessly saved
61  * and restored, we need to keep track of two things:
62  * (a) for each task, we need to remember which CPU was the last one to have
63  *     the task's FPSIMD state loaded into its FPSIMD registers;
64  * (b) for each CPU, we need to remember which task's userland FPSIMD state has
65  *     been loaded into its FPSIMD registers most recently, or whether it has
66  *     been used to perform kernel mode NEON in the meantime.
67  *
68  * For (a), we add a fpsimd_cpu field to thread_struct, which gets updated to
69  * the id of the current CPU every time the state is loaded onto a CPU. For (b),
70  * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the
71  * address of the userland FPSIMD state of the task that was loaded onto the CPU
72  * the most recently, or NULL if kernel mode NEON has been performed after that.
73  *
74  * With this in place, we no longer have to restore the next FPSIMD state right
75  * when switching between tasks. Instead, we can defer this check to userland
76  * resume, at which time we verify whether the CPU's fpsimd_last_state and the
77  * task's fpsimd_cpu are still mutually in sync. If this is the case, we
78  * can omit the FPSIMD restore.
79  *
80  * As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to
81  * indicate whether or not the userland FPSIMD state of the current task is
82  * present in the registers. The flag is set unless the FPSIMD registers of this
83  * CPU currently contain the most recent userland FPSIMD state of the current
84  * task. If the task is behaving as a VMM, then this is will be managed by
85  * KVM which will clear it to indicate that the vcpu FPSIMD state is currently
86  * loaded on the CPU, allowing the state to be saved if a FPSIMD-aware
87  * softirq kicks in. Upon vcpu_put(), KVM will save the vcpu FP state and
88  * flag the register state as invalid.
89  *
90  * In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may be
91  * called from softirq context, which will save the task's FPSIMD context back
92  * to task_struct. To prevent this from racing with the manipulation of the
93  * task's FPSIMD state from task context and thereby corrupting the state, it
94  * is necessary to protect any manipulation of a task's fpsimd_state or
95  * TIF_FOREIGN_FPSTATE flag with get_cpu_fpsimd_context(), which will suspend
96  * softirq servicing entirely until put_cpu_fpsimd_context() is called.
97  *
98  * For a certain task, the sequence may look something like this:
99  * - the task gets scheduled in; if both the task's fpsimd_cpu field
100  *   contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu
101  *   variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is
102  *   cleared, otherwise it is set;
103  *
104  * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's
105  *   userland FPSIMD state is copied from memory to the registers, the task's
106  *   fpsimd_cpu field is set to the id of the current CPU, the current
107  *   CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the
108  *   TIF_FOREIGN_FPSTATE flag is cleared;
109  *
110  * - the task executes an ordinary syscall; upon return to userland, the
111  *   TIF_FOREIGN_FPSTATE flag will still be cleared, so no FPSIMD state is
112  *   restored;
113  *
114  * - the task executes a syscall which executes some NEON instructions; this is
115  *   preceded by a call to kernel_neon_begin(), which copies the task's FPSIMD
116  *   register contents to memory, clears the fpsimd_last_state per-cpu variable
117  *   and sets the TIF_FOREIGN_FPSTATE flag;
118  *
119  * - the task gets preempted after kernel_neon_end() is called; as we have not
120  *   returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so
121  *   whatever is in the FPSIMD registers is not saved to memory, but discarded.
122  */
123 
124 DEFINE_PER_CPU(struct cpu_fp_state, fpsimd_last_state);
125 
126 __ro_after_init struct vl_info vl_info[ARM64_VEC_MAX] = {
127 #ifdef CONFIG_ARM64_SVE
128 	[ARM64_VEC_SVE] = {
129 		.type			= ARM64_VEC_SVE,
130 		.name			= "SVE",
131 		.min_vl			= SVE_VL_MIN,
132 		.max_vl			= SVE_VL_MIN,
133 		.max_virtualisable_vl	= SVE_VL_MIN,
134 	},
135 #endif
136 #ifdef CONFIG_ARM64_SME
137 	[ARM64_VEC_SME] = {
138 		.type			= ARM64_VEC_SME,
139 		.name			= "SME",
140 	},
141 #endif
142 };
143 
144 static unsigned int vec_vl_inherit_flag(enum vec_type type)
145 {
146 	switch (type) {
147 	case ARM64_VEC_SVE:
148 		return TIF_SVE_VL_INHERIT;
149 	case ARM64_VEC_SME:
150 		return TIF_SME_VL_INHERIT;
151 	default:
152 		WARN_ON_ONCE(1);
153 		return 0;
154 	}
155 }
156 
157 struct vl_config {
158 	int __default_vl;		/* Default VL for tasks */
159 };
160 
161 static struct vl_config vl_config[ARM64_VEC_MAX];
162 
163 static inline int get_default_vl(enum vec_type type)
164 {
165 	return READ_ONCE(vl_config[type].__default_vl);
166 }
167 
168 #ifdef CONFIG_ARM64_SVE
169 
170 static inline int get_sve_default_vl(void)
171 {
172 	return get_default_vl(ARM64_VEC_SVE);
173 }
174 
175 static inline void set_default_vl(enum vec_type type, int val)
176 {
177 	WRITE_ONCE(vl_config[type].__default_vl, val);
178 }
179 
180 static inline void set_sve_default_vl(int val)
181 {
182 	set_default_vl(ARM64_VEC_SVE, val);
183 }
184 
185 #endif /* ! CONFIG_ARM64_SVE */
186 
187 #ifdef CONFIG_ARM64_SME
188 
189 static int get_sme_default_vl(void)
190 {
191 	return get_default_vl(ARM64_VEC_SME);
192 }
193 
194 static void set_sme_default_vl(int val)
195 {
196 	set_default_vl(ARM64_VEC_SME, val);
197 }
198 
199 static void sme_free(struct task_struct *);
200 
201 #else
202 
203 static inline void sme_free(struct task_struct *t) { }
204 
205 #endif
206 
207 static void fpsimd_bind_task_to_cpu(void);
208 
209 /*
210  * Claim ownership of the CPU FPSIMD context for use by the calling context.
211  *
212  * The caller may freely manipulate the FPSIMD context metadata until
213  * put_cpu_fpsimd_context() is called.
214  *
215  * On RT kernels local_bh_disable() is not sufficient because it only
216  * serializes soft interrupt related sections via a local lock, but stays
217  * preemptible. Disabling preemption is the right choice here as bottom
218  * half processing is always in thread context on RT kernels so it
219  * implicitly prevents bottom half processing as well.
220  */
221 static void get_cpu_fpsimd_context(void)
222 {
223 	if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
224 		/*
225 		 * The softirq subsystem lacks a true unmask/mask API, and
226 		 * re-enabling softirq processing using local_bh_enable() will
227 		 * not only unmask softirqs, it will also result in immediate
228 		 * delivery of any pending softirqs.
229 		 * This is undesirable when running with IRQs disabled, but in
230 		 * that case, there is no need to mask softirqs in the first
231 		 * place, so only bother doing so when IRQs are enabled.
232 		 */
233 		if (!irqs_disabled())
234 			local_bh_disable();
235 	} else {
236 		preempt_disable();
237 	}
238 }
239 
240 /*
241  * Release the CPU FPSIMD context.
242  *
243  * Must be called from a context in which get_cpu_fpsimd_context() was
244  * previously called, with no call to put_cpu_fpsimd_context() in the
245  * meantime.
246  */
247 static void put_cpu_fpsimd_context(void)
248 {
249 	if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
250 		if (!irqs_disabled())
251 			local_bh_enable();
252 	} else {
253 		preempt_enable();
254 	}
255 }
256 
257 unsigned int task_get_vl(const struct task_struct *task, enum vec_type type)
258 {
259 	return task->thread.vl[type];
260 }
261 
262 void task_set_vl(struct task_struct *task, enum vec_type type,
263 		 unsigned long vl)
264 {
265 	task->thread.vl[type] = vl;
266 }
267 
268 unsigned int task_get_vl_onexec(const struct task_struct *task,
269 				enum vec_type type)
270 {
271 	return task->thread.vl_onexec[type];
272 }
273 
274 void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
275 			unsigned long vl)
276 {
277 	task->thread.vl_onexec[type] = vl;
278 }
279 
280 /*
281  * TIF_SME controls whether a task can use SME without trapping while
282  * in userspace, when TIF_SME is set then we must have storage
283  * allocated in sve_state and sme_state to store the contents of both ZA
284  * and the SVE registers for both streaming and non-streaming modes.
285  *
286  * If both SVCR.ZA and SVCR.SM are disabled then at any point we
287  * may disable TIF_SME and reenable traps.
288  */
289 
290 
291 /*
292  * TIF_SVE controls whether a task can use SVE without trapping while
293  * in userspace, and also (together with TIF_SME) the way a task's
294  * FPSIMD/SVE state is stored in thread_struct.
295  *
296  * The kernel uses this flag to track whether a user task is actively
297  * using SVE, and therefore whether full SVE register state needs to
298  * be tracked.  If not, the cheaper FPSIMD context handling code can
299  * be used instead of the more costly SVE equivalents.
300  *
301  *  * TIF_SVE or SVCR.SM set:
302  *
303  *    The task can execute SVE instructions while in userspace without
304  *    trapping to the kernel.
305  *
306  *    During any syscall, the kernel may optionally clear TIF_SVE and
307  *    discard the vector state except for the FPSIMD subset.
308  *
309  *  * TIF_SVE clear:
310  *
311  *    An attempt by the user task to execute an SVE instruction causes
312  *    do_sve_acc() to be called, which does some preparation and then
313  *    sets TIF_SVE.
314  *
315  * During any syscall, the kernel may optionally clear TIF_SVE and
316  * discard the vector state except for the FPSIMD subset.
317  *
318  * The data will be stored in one of two formats:
319  *
320  *  * FPSIMD only - FP_STATE_FPSIMD:
321  *
322  *    When the FPSIMD only state stored task->thread.fp_type is set to
323  *    FP_STATE_FPSIMD, the FPSIMD registers V0-V31 are encoded in
324  *    task->thread.uw.fpsimd_state; bits [max : 128] for each of Z0-Z31 are
325  *    logically zero but not stored anywhere; P0-P15 and FFR are not
326  *    stored and have unspecified values from userspace's point of
327  *    view.  For hygiene purposes, the kernel zeroes them on next use,
328  *    but userspace is discouraged from relying on this.
329  *
330  *    task->thread.sve_state does not need to be non-NULL, valid or any
331  *    particular size: it must not be dereferenced and any data stored
332  *    there should be considered stale and not referenced.
333  *
334  *  * SVE state - FP_STATE_SVE:
335  *
336  *    When the full SVE state is stored task->thread.fp_type is set to
337  *    FP_STATE_SVE and Z0-Z31 (incorporating Vn in bits[127:0] or the
338  *    corresponding Zn), P0-P15 and FFR are encoded in in
339  *    task->thread.sve_state, formatted appropriately for vector
340  *    length task->thread.sve_vl or, if SVCR.SM is set,
341  *    task->thread.sme_vl. The storage for the vector registers in
342  *    task->thread.uw.fpsimd_state should be ignored.
343  *
344  *    task->thread.sve_state must point to a valid buffer at least
345  *    sve_state_size(task) bytes in size. The data stored in
346  *    task->thread.uw.fpsimd_state.vregs should be considered stale
347  *    and not referenced.
348  *
349  *  * FPSR and FPCR are always stored in task->thread.uw.fpsimd_state
350  *    irrespective of whether TIF_SVE is clear or set, since these are
351  *    not vector length dependent.
352  */
353 
354 /*
355  * Update current's FPSIMD/SVE registers from thread_struct.
356  *
357  * This function should be called only when the FPSIMD/SVE state in
358  * thread_struct is known to be up to date, when preparing to enter
359  * userspace.
360  */
361 static void task_fpsimd_load(void)
362 {
363 	bool restore_sve_regs = false;
364 	bool restore_ffr;
365 
366 	WARN_ON(!system_supports_fpsimd());
367 	WARN_ON(preemptible());
368 	WARN_ON(test_thread_flag(TIF_KERNEL_FPSTATE));
369 
370 	if (system_supports_sve() || system_supports_sme()) {
371 		switch (current->thread.fp_type) {
372 		case FP_STATE_FPSIMD:
373 			/* Stop tracking SVE for this task until next use. */
374 			clear_thread_flag(TIF_SVE);
375 			break;
376 		case FP_STATE_SVE:
377 			if (!thread_sm_enabled(&current->thread))
378 				WARN_ON_ONCE(!test_and_set_thread_flag(TIF_SVE));
379 
380 			if (test_thread_flag(TIF_SVE))
381 				sve_set_vq(sve_vq_from_vl(task_get_sve_vl(current)) - 1);
382 
383 			restore_sve_regs = true;
384 			restore_ffr = true;
385 			break;
386 		default:
387 			/*
388 			 * This indicates either a bug in
389 			 * fpsimd_save_user_state() or memory corruption, we
390 			 * should always record an explicit format
391 			 * when we save. We always at least have the
392 			 * memory allocated for FPSIMD registers so
393 			 * try that and hope for the best.
394 			 */
395 			WARN_ON_ONCE(1);
396 			clear_thread_flag(TIF_SVE);
397 			break;
398 		}
399 	}
400 
401 	/* Restore SME, override SVE register configuration if needed */
402 	if (system_supports_sme()) {
403 		unsigned long sme_vl = task_get_sme_vl(current);
404 
405 		/* Ensure VL is set up for restoring data */
406 		if (test_thread_flag(TIF_SME))
407 			sme_set_vq(sve_vq_from_vl(sme_vl) - 1);
408 
409 		write_sysreg_s(current->thread.svcr, SYS_SVCR);
410 
411 		if (thread_za_enabled(&current->thread))
412 			sme_load_state(current->thread.sme_state,
413 				       system_supports_sme2());
414 
415 		if (thread_sm_enabled(&current->thread))
416 			restore_ffr = system_supports_fa64();
417 	}
418 
419 	if (system_supports_fpmr())
420 		write_sysreg_s(current->thread.uw.fpmr, SYS_FPMR);
421 
422 	if (restore_sve_regs) {
423 		WARN_ON_ONCE(current->thread.fp_type != FP_STATE_SVE);
424 		sve_load_state(sve_pffr(&current->thread),
425 			       &current->thread.uw.fpsimd_state.fpsr,
426 			       restore_ffr);
427 	} else {
428 		WARN_ON_ONCE(current->thread.fp_type != FP_STATE_FPSIMD);
429 		fpsimd_load_state(&current->thread.uw.fpsimd_state);
430 	}
431 }
432 
433 /*
434  * Ensure FPSIMD/SVE storage in memory for the loaded context is up to
435  * date with respect to the CPU registers. Note carefully that the
436  * current context is the context last bound to the CPU stored in
437  * last, if KVM is involved this may be the guest VM context rather
438  * than the host thread for the VM pointed to by current. This means
439  * that we must always reference the state storage via last rather
440  * than via current, if we are saving KVM state then it will have
441  * ensured that the type of registers to save is set in last->to_save.
442  */
443 static void fpsimd_save_user_state(void)
444 {
445 	struct cpu_fp_state const *last =
446 		this_cpu_ptr(&fpsimd_last_state);
447 	/* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
448 	bool save_sve_regs = false;
449 	bool save_ffr;
450 	unsigned int vl;
451 
452 	WARN_ON(!system_supports_fpsimd());
453 	WARN_ON(preemptible());
454 
455 	if (test_thread_flag(TIF_FOREIGN_FPSTATE))
456 		return;
457 
458 	if (system_supports_fpmr())
459 		*(last->fpmr) = read_sysreg_s(SYS_FPMR);
460 
461 	/*
462 	 * Save SVE state if it is live.
463 	 *
464 	 * The syscall ABI discards live SVE state at syscall entry. When
465 	 * entering a syscall, fpsimd_syscall_enter() sets to_save to
466 	 * FP_STATE_FPSIMD to allow the SVE state to be lazily discarded until
467 	 * either new SVE state is loaded+bound or fpsimd_syscall_exit() is
468 	 * called prior to a return to userspace.
469 	 */
470 	if ((last->to_save == FP_STATE_CURRENT && test_thread_flag(TIF_SVE)) ||
471 	    last->to_save == FP_STATE_SVE) {
472 		save_sve_regs = true;
473 		save_ffr = true;
474 		vl = last->sve_vl;
475 	}
476 
477 	if (system_supports_sme()) {
478 		u64 *svcr = last->svcr;
479 
480 		*svcr = read_sysreg_s(SYS_SVCR);
481 
482 		if (*svcr & SVCR_ZA_MASK)
483 			sme_save_state(last->sme_state,
484 				       system_supports_sme2());
485 
486 		/* If we are in streaming mode override regular SVE. */
487 		if (*svcr & SVCR_SM_MASK) {
488 			save_sve_regs = true;
489 			save_ffr = system_supports_fa64();
490 			vl = last->sme_vl;
491 		}
492 	}
493 
494 	if (IS_ENABLED(CONFIG_ARM64_SVE) && save_sve_regs) {
495 		/* Get the configured VL from RDVL, will account for SM */
496 		if (WARN_ON(sve_get_vl() != vl)) {
497 			/*
498 			 * Can't save the user regs, so current would
499 			 * re-enter user with corrupt state.
500 			 * There's no way to recover, so kill it:
501 			 */
502 			force_signal_inject(SIGKILL, SI_KERNEL, 0, 0);
503 			return;
504 		}
505 
506 		sve_save_state((char *)last->sve_state +
507 					sve_ffr_offset(vl),
508 			       &last->st->fpsr, save_ffr);
509 		*last->fp_type = FP_STATE_SVE;
510 	} else {
511 		fpsimd_save_state(last->st);
512 		*last->fp_type = FP_STATE_FPSIMD;
513 	}
514 }
515 
516 /*
517  * All vector length selection from userspace comes through here.
518  * We're on a slow path, so some sanity-checks are included.
519  * If things go wrong there's a bug somewhere, but try to fall back to a
520  * safe choice.
521  */
522 static unsigned int find_supported_vector_length(enum vec_type type,
523 						 unsigned int vl)
524 {
525 	struct vl_info *info = &vl_info[type];
526 	int bit;
527 	int max_vl = info->max_vl;
528 
529 	if (WARN_ON(!sve_vl_valid(vl)))
530 		vl = info->min_vl;
531 
532 	if (WARN_ON(!sve_vl_valid(max_vl)))
533 		max_vl = info->min_vl;
534 
535 	if (vl > max_vl)
536 		vl = max_vl;
537 	if (vl < info->min_vl)
538 		vl = info->min_vl;
539 
540 	bit = find_next_bit(info->vq_map, SVE_VQ_MAX,
541 			    __vq_to_bit(sve_vq_from_vl(vl)));
542 	return sve_vl_from_vq(__bit_to_vq(bit));
543 }
544 
545 #if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL)
546 
547 static int vec_proc_do_default_vl(const struct ctl_table *table, int write,
548 				  void *buffer, size_t *lenp, loff_t *ppos)
549 {
550 	struct vl_info *info = table->extra1;
551 	enum vec_type type = info->type;
552 	int ret;
553 	int vl = get_default_vl(type);
554 	struct ctl_table tmp_table = {
555 		.data = &vl,
556 		.maxlen = sizeof(vl),
557 	};
558 
559 	ret = proc_dointvec(&tmp_table, write, buffer, lenp, ppos);
560 	if (ret || !write)
561 		return ret;
562 
563 	/* Writing -1 has the special meaning "set to max": */
564 	if (vl == -1)
565 		vl = info->max_vl;
566 
567 	if (!sve_vl_valid(vl))
568 		return -EINVAL;
569 
570 	set_default_vl(type, find_supported_vector_length(type, vl));
571 	return 0;
572 }
573 
574 static const struct ctl_table sve_default_vl_table[] = {
575 	{
576 		.procname	= "sve_default_vector_length",
577 		.mode		= 0644,
578 		.proc_handler	= vec_proc_do_default_vl,
579 		.extra1		= &vl_info[ARM64_VEC_SVE],
580 	},
581 };
582 
583 static int __init sve_sysctl_init(void)
584 {
585 	if (system_supports_sve())
586 		if (!register_sysctl("abi", sve_default_vl_table))
587 			return -EINVAL;
588 
589 	return 0;
590 }
591 
592 #else /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
593 static int __init sve_sysctl_init(void) { return 0; }
594 #endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
595 
596 #if defined(CONFIG_ARM64_SME) && defined(CONFIG_SYSCTL)
597 static const struct ctl_table sme_default_vl_table[] = {
598 	{
599 		.procname	= "sme_default_vector_length",
600 		.mode		= 0644,
601 		.proc_handler	= vec_proc_do_default_vl,
602 		.extra1		= &vl_info[ARM64_VEC_SME],
603 	},
604 };
605 
606 static int __init sme_sysctl_init(void)
607 {
608 	if (system_supports_sme())
609 		if (!register_sysctl("abi", sme_default_vl_table))
610 			return -EINVAL;
611 
612 	return 0;
613 }
614 
615 #else /* ! (CONFIG_ARM64_SME && CONFIG_SYSCTL) */
616 static int __init sme_sysctl_init(void) { return 0; }
617 #endif /* ! (CONFIG_ARM64_SME && CONFIG_SYSCTL) */
618 
619 #define ZREG(sve_state, vq, n) ((char *)(sve_state) +		\
620 	(SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
621 
622 #ifdef CONFIG_CPU_BIG_ENDIAN
623 static __uint128_t arm64_cpu_to_le128(__uint128_t x)
624 {
625 	u64 a = swab64(x);
626 	u64 b = swab64(x >> 64);
627 
628 	return ((__uint128_t)a << 64) | b;
629 }
630 #else
631 static __uint128_t arm64_cpu_to_le128(__uint128_t x)
632 {
633 	return x;
634 }
635 #endif
636 
637 #define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x)
638 
639 static void __fpsimd_to_sve(void *sst, struct user_fpsimd_state const *fst,
640 			    unsigned int vq)
641 {
642 	unsigned int i;
643 	__uint128_t *p;
644 
645 	for (i = 0; i < SVE_NUM_ZREGS; ++i) {
646 		p = (__uint128_t *)ZREG(sst, vq, i);
647 		*p = arm64_cpu_to_le128(fst->vregs[i]);
648 	}
649 }
650 
651 /*
652  * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
653  * task->thread.sve_state.
654  *
655  * Task can be a non-runnable task, or current.  In the latter case,
656  * the caller must have ownership of the cpu FPSIMD context before calling
657  * this function.
658  * task->thread.sve_state must point to at least sve_state_size(task)
659  * bytes of allocated kernel memory.
660  * task->thread.uw.fpsimd_state must be up to date before calling this
661  * function.
662  */
663 static inline void fpsimd_to_sve(struct task_struct *task)
664 {
665 	unsigned int vq;
666 	void *sst = task->thread.sve_state;
667 	struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
668 
669 	if (!system_supports_sve() && !system_supports_sme())
670 		return;
671 
672 	vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread));
673 	__fpsimd_to_sve(sst, fst, vq);
674 }
675 
676 /*
677  * Transfer the SVE state in task->thread.sve_state to
678  * task->thread.uw.fpsimd_state.
679  *
680  * Task can be a non-runnable task, or current.  In the latter case,
681  * the caller must have ownership of the cpu FPSIMD context before calling
682  * this function.
683  * task->thread.sve_state must point to at least sve_state_size(task)
684  * bytes of allocated kernel memory.
685  * task->thread.sve_state must be up to date before calling this function.
686  */
687 static inline void sve_to_fpsimd(struct task_struct *task)
688 {
689 	unsigned int vq, vl;
690 	void const *sst = task->thread.sve_state;
691 	struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
692 	unsigned int i;
693 	__uint128_t const *p;
694 
695 	if (!system_supports_sve() && !system_supports_sme())
696 		return;
697 
698 	vl = thread_get_cur_vl(&task->thread);
699 	vq = sve_vq_from_vl(vl);
700 	for (i = 0; i < SVE_NUM_ZREGS; ++i) {
701 		p = (__uint128_t const *)ZREG(sst, vq, i);
702 		fst->vregs[i] = arm64_le128_to_cpu(*p);
703 	}
704 }
705 
706 static inline void __fpsimd_zero_vregs(struct user_fpsimd_state *fpsimd)
707 {
708 	memset(&fpsimd->vregs, 0, sizeof(fpsimd->vregs));
709 }
710 
711 /*
712  * Simulate the effects of an SMSTOP SM instruction.
713  */
714 void task_smstop_sm(struct task_struct *task)
715 {
716 	if (!thread_sm_enabled(&task->thread))
717 		return;
718 
719 	__fpsimd_zero_vregs(&task->thread.uw.fpsimd_state);
720 	task->thread.uw.fpsimd_state.fpsr = 0x0800009f;
721 	if (system_supports_fpmr())
722 		task->thread.uw.fpmr = 0;
723 
724 	task->thread.svcr &= ~SVCR_SM_MASK;
725 	task->thread.fp_type = FP_STATE_FPSIMD;
726 }
727 
728 void cpu_enable_fpmr(const struct arm64_cpu_capabilities *__always_unused p)
729 {
730 	write_sysreg_s(read_sysreg_s(SYS_SCTLR_EL1) | SCTLR_EL1_EnFPM_MASK,
731 		       SYS_SCTLR_EL1);
732 }
733 
734 #ifdef CONFIG_ARM64_SVE
735 static void sve_free(struct task_struct *task)
736 {
737 	kfree(task->thread.sve_state);
738 	task->thread.sve_state = NULL;
739 }
740 
741 /*
742  * Ensure that task->thread.sve_state is allocated and sufficiently large.
743  *
744  * This function should be used only in preparation for replacing
745  * task->thread.sve_state with new data.  The memory is always zeroed
746  * here to prevent stale data from showing through: this is done in
747  * the interest of testability and predictability: except in the
748  * do_sve_acc() case, there is no ABI requirement to hide stale data
749  * written previously be task.
750  */
751 void sve_alloc(struct task_struct *task, bool flush)
752 {
753 	if (task->thread.sve_state) {
754 		if (flush)
755 			memset(task->thread.sve_state, 0,
756 			       sve_state_size(task));
757 		return;
758 	}
759 
760 	/* This is a small allocation (maximum ~8KB) and Should Not Fail. */
761 	task->thread.sve_state =
762 		kzalloc(sve_state_size(task), GFP_KERNEL);
763 }
764 
765 /*
766  * Ensure that task->thread.uw.fpsimd_state is up to date with respect to the
767  * task's currently effective FPSIMD/SVE state.
768  *
769  * The task's FPSIMD/SVE/SME state must not be subject to concurrent
770  * manipulation.
771  */
772 void fpsimd_sync_from_effective_state(struct task_struct *task)
773 {
774 	if (task->thread.fp_type == FP_STATE_SVE)
775 		sve_to_fpsimd(task);
776 }
777 
778 /*
779  * Ensure that the task's currently effective FPSIMD/SVE state is up to date
780  * with respect to task->thread.uw.fpsimd_state, zeroing any effective
781  * non-FPSIMD (S)SVE state.
782  *
783  * The task's FPSIMD/SVE/SME state must not be subject to concurrent
784  * manipulation.
785  */
786 void fpsimd_sync_to_effective_state_zeropad(struct task_struct *task)
787 {
788 	unsigned int vq;
789 	void *sst = task->thread.sve_state;
790 	struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
791 
792 	if (task->thread.fp_type != FP_STATE_SVE)
793 		return;
794 
795 	vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread));
796 
797 	memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
798 	__fpsimd_to_sve(sst, fst, vq);
799 }
800 
801 static int change_live_vector_length(struct task_struct *task,
802 				     enum vec_type type,
803 				     unsigned long vl)
804 {
805 	unsigned int sve_vl = task_get_sve_vl(task);
806 	unsigned int sme_vl = task_get_sme_vl(task);
807 	void *sve_state = NULL, *sme_state = NULL;
808 
809 	if (type == ARM64_VEC_SME)
810 		sme_vl = vl;
811 	else
812 		sve_vl = vl;
813 
814 	/*
815 	 * Allocate the new sve_state and sme_state before freeing the old
816 	 * copies so that allocation failure can be handled without needing to
817 	 * mutate the task's state in any way.
818 	 *
819 	 * Changes to the SVE vector length must not discard live ZA state or
820 	 * clear PSTATE.ZA, as userspace code which is unaware of the AAPCS64
821 	 * ZA lazy saving scheme may attempt to change the SVE vector length
822 	 * while unsaved/dormant ZA state exists.
823 	 */
824 	sve_state = kzalloc(__sve_state_size(sve_vl, sme_vl), GFP_KERNEL);
825 	if (!sve_state)
826 		goto out_mem;
827 
828 	if (type == ARM64_VEC_SME) {
829 		sme_state = kzalloc(__sme_state_size(sme_vl), GFP_KERNEL);
830 		if (!sme_state)
831 			goto out_mem;
832 	}
833 
834 	if (task == current)
835 		fpsimd_save_and_flush_current_state();
836 	else
837 		fpsimd_flush_task_state(task);
838 
839 	/*
840 	 * Always preserve PSTATE.SM and the effective FPSIMD state, zeroing
841 	 * other SVE state.
842 	 */
843 	fpsimd_sync_from_effective_state(task);
844 	task_set_vl(task, type, vl);
845 	kfree(task->thread.sve_state);
846 	task->thread.sve_state = sve_state;
847 	fpsimd_sync_to_effective_state_zeropad(task);
848 
849 	if (type == ARM64_VEC_SME) {
850 		task->thread.svcr &= ~SVCR_ZA_MASK;
851 		kfree(task->thread.sme_state);
852 		task->thread.sme_state = sme_state;
853 	}
854 
855 	return 0;
856 
857 out_mem:
858 	kfree(sve_state);
859 	kfree(sme_state);
860 	return -ENOMEM;
861 }
862 
863 int vec_set_vector_length(struct task_struct *task, enum vec_type type,
864 			  unsigned long vl, unsigned long flags)
865 {
866 	bool onexec = flags & PR_SVE_SET_VL_ONEXEC;
867 	bool inherit = flags & PR_SVE_VL_INHERIT;
868 
869 	if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
870 				     PR_SVE_SET_VL_ONEXEC))
871 		return -EINVAL;
872 
873 	if (!sve_vl_valid(vl))
874 		return -EINVAL;
875 
876 	/*
877 	 * Clamp to the maximum vector length that VL-agnostic code
878 	 * can work with.  A flag may be assigned in the future to
879 	 * allow setting of larger vector lengths without confusing
880 	 * older software.
881 	 */
882 	if (vl > VL_ARCH_MAX)
883 		vl = VL_ARCH_MAX;
884 
885 	vl = find_supported_vector_length(type, vl);
886 
887 	if (!onexec && vl != task_get_vl(task, type)) {
888 		if (change_live_vector_length(task, type, vl))
889 			return -ENOMEM;
890 	}
891 
892 	if (onexec || inherit)
893 		task_set_vl_onexec(task, type, vl);
894 	else
895 		/* Reset VL to system default on next exec: */
896 		task_set_vl_onexec(task, type, 0);
897 
898 	update_tsk_thread_flag(task, vec_vl_inherit_flag(type),
899 			       flags & PR_SVE_VL_INHERIT);
900 
901 	return 0;
902 }
903 
904 /*
905  * Encode the current vector length and flags for return.
906  * This is only required for prctl(): ptrace has separate fields.
907  * SVE and SME use the same bits for _ONEXEC and _INHERIT.
908  *
909  * flags are as for vec_set_vector_length().
910  */
911 static int vec_prctl_status(enum vec_type type, unsigned long flags)
912 {
913 	int ret;
914 
915 	if (flags & PR_SVE_SET_VL_ONEXEC)
916 		ret = task_get_vl_onexec(current, type);
917 	else
918 		ret = task_get_vl(current, type);
919 
920 	if (test_thread_flag(vec_vl_inherit_flag(type)))
921 		ret |= PR_SVE_VL_INHERIT;
922 
923 	return ret;
924 }
925 
926 /* PR_SVE_SET_VL */
927 int sve_set_current_vl(unsigned long arg)
928 {
929 	unsigned long vl, flags;
930 	int ret;
931 
932 	vl = arg & PR_SVE_VL_LEN_MASK;
933 	flags = arg & ~vl;
934 
935 	if (!system_supports_sve() || is_compat_task())
936 		return -EINVAL;
937 
938 	ret = vec_set_vector_length(current, ARM64_VEC_SVE, vl, flags);
939 	if (ret)
940 		return ret;
941 
942 	return vec_prctl_status(ARM64_VEC_SVE, flags);
943 }
944 
945 /* PR_SVE_GET_VL */
946 int sve_get_current_vl(void)
947 {
948 	if (!system_supports_sve() || is_compat_task())
949 		return -EINVAL;
950 
951 	return vec_prctl_status(ARM64_VEC_SVE, 0);
952 }
953 
954 #ifdef CONFIG_ARM64_SME
955 /* PR_SME_SET_VL */
956 int sme_set_current_vl(unsigned long arg)
957 {
958 	unsigned long vl, flags;
959 	int ret;
960 
961 	vl = arg & PR_SME_VL_LEN_MASK;
962 	flags = arg & ~vl;
963 
964 	if (!system_supports_sme() || is_compat_task())
965 		return -EINVAL;
966 
967 	ret = vec_set_vector_length(current, ARM64_VEC_SME, vl, flags);
968 	if (ret)
969 		return ret;
970 
971 	return vec_prctl_status(ARM64_VEC_SME, flags);
972 }
973 
974 /* PR_SME_GET_VL */
975 int sme_get_current_vl(void)
976 {
977 	if (!system_supports_sme() || is_compat_task())
978 		return -EINVAL;
979 
980 	return vec_prctl_status(ARM64_VEC_SME, 0);
981 }
982 #endif /* CONFIG_ARM64_SME */
983 
984 static void vec_probe_vqs(struct vl_info *info,
985 			  DECLARE_BITMAP(map, SVE_VQ_MAX))
986 {
987 	unsigned int vq, vl;
988 
989 	bitmap_zero(map, SVE_VQ_MAX);
990 
991 	for (vq = SVE_VQ_MAX; vq >= SVE_VQ_MIN; --vq) {
992 		write_vl(info->type, vq - 1); /* self-syncing */
993 
994 		switch (info->type) {
995 		case ARM64_VEC_SVE:
996 			vl = sve_get_vl();
997 			break;
998 		case ARM64_VEC_SME:
999 			vl = sme_get_vl();
1000 			break;
1001 		default:
1002 			vl = 0;
1003 			break;
1004 		}
1005 
1006 		/* Minimum VL identified? */
1007 		if (sve_vq_from_vl(vl) > vq)
1008 			break;
1009 
1010 		vq = sve_vq_from_vl(vl); /* skip intervening lengths */
1011 		set_bit(__vq_to_bit(vq), map);
1012 	}
1013 }
1014 
1015 /*
1016  * Initialise the set of known supported VQs for the boot CPU.
1017  * This is called during kernel boot, before secondary CPUs are brought up.
1018  */
1019 void __init vec_init_vq_map(enum vec_type type)
1020 {
1021 	struct vl_info *info = &vl_info[type];
1022 	vec_probe_vqs(info, info->vq_map);
1023 	bitmap_copy(info->vq_partial_map, info->vq_map, SVE_VQ_MAX);
1024 }
1025 
1026 /*
1027  * If we haven't committed to the set of supported VQs yet, filter out
1028  * those not supported by the current CPU.
1029  * This function is called during the bring-up of early secondary CPUs only.
1030  */
1031 void vec_update_vq_map(enum vec_type type)
1032 {
1033 	struct vl_info *info = &vl_info[type];
1034 	DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
1035 
1036 	vec_probe_vqs(info, tmp_map);
1037 	bitmap_and(info->vq_map, info->vq_map, tmp_map, SVE_VQ_MAX);
1038 	bitmap_or(info->vq_partial_map, info->vq_partial_map, tmp_map,
1039 		  SVE_VQ_MAX);
1040 }
1041 
1042 /*
1043  * Check whether the current CPU supports all VQs in the committed set.
1044  * This function is called during the bring-up of late secondary CPUs only.
1045  */
1046 int vec_verify_vq_map(enum vec_type type)
1047 {
1048 	struct vl_info *info = &vl_info[type];
1049 	DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
1050 	unsigned long b;
1051 
1052 	vec_probe_vqs(info, tmp_map);
1053 
1054 	bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
1055 	if (bitmap_intersects(tmp_map, info->vq_map, SVE_VQ_MAX)) {
1056 		pr_warn("%s: cpu%d: Required vector length(s) missing\n",
1057 			info->name, smp_processor_id());
1058 		return -EINVAL;
1059 	}
1060 
1061 	if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
1062 		return 0;
1063 
1064 	/*
1065 	 * For KVM, it is necessary to ensure that this CPU doesn't
1066 	 * support any vector length that guests may have probed as
1067 	 * unsupported.
1068 	 */
1069 
1070 	/* Recover the set of supported VQs: */
1071 	bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
1072 	/* Find VQs supported that are not globally supported: */
1073 	bitmap_andnot(tmp_map, tmp_map, info->vq_map, SVE_VQ_MAX);
1074 
1075 	/* Find the lowest such VQ, if any: */
1076 	b = find_last_bit(tmp_map, SVE_VQ_MAX);
1077 	if (b >= SVE_VQ_MAX)
1078 		return 0; /* no mismatches */
1079 
1080 	/*
1081 	 * Mismatches above sve_max_virtualisable_vl are fine, since
1082 	 * no guest is allowed to configure ZCR_EL2.LEN to exceed this:
1083 	 */
1084 	if (sve_vl_from_vq(__bit_to_vq(b)) <= info->max_virtualisable_vl) {
1085 		pr_warn("%s: cpu%d: Unsupported vector length(s) present\n",
1086 			info->name, smp_processor_id());
1087 		return -EINVAL;
1088 	}
1089 
1090 	return 0;
1091 }
1092 
1093 void cpu_enable_sve(const struct arm64_cpu_capabilities *__always_unused p)
1094 {
1095 	write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1);
1096 	isb();
1097 
1098 	write_sysreg_s(0, SYS_ZCR_EL1);
1099 }
1100 
1101 void __init sve_setup(void)
1102 {
1103 	struct vl_info *info = &vl_info[ARM64_VEC_SVE];
1104 	DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
1105 	unsigned long b;
1106 	int max_bit;
1107 
1108 	if (!system_supports_sve())
1109 		return;
1110 
1111 	/*
1112 	 * The SVE architecture mandates support for 128-bit vectors,
1113 	 * so sve_vq_map must have at least SVE_VQ_MIN set.
1114 	 * If something went wrong, at least try to patch it up:
1115 	 */
1116 	if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), info->vq_map)))
1117 		set_bit(__vq_to_bit(SVE_VQ_MIN), info->vq_map);
1118 
1119 	max_bit = find_first_bit(info->vq_map, SVE_VQ_MAX);
1120 	info->max_vl = sve_vl_from_vq(__bit_to_vq(max_bit));
1121 
1122 	/*
1123 	 * For the default VL, pick the maximum supported value <= 64.
1124 	 * VL == 64 is guaranteed not to grow the signal frame.
1125 	 */
1126 	set_sve_default_vl(find_supported_vector_length(ARM64_VEC_SVE, 64));
1127 
1128 	bitmap_andnot(tmp_map, info->vq_partial_map, info->vq_map,
1129 		      SVE_VQ_MAX);
1130 
1131 	b = find_last_bit(tmp_map, SVE_VQ_MAX);
1132 	if (b >= SVE_VQ_MAX)
1133 		/* No non-virtualisable VLs found */
1134 		info->max_virtualisable_vl = SVE_VQ_MAX;
1135 	else if (WARN_ON(b == SVE_VQ_MAX - 1))
1136 		/* No virtualisable VLs?  This is architecturally forbidden. */
1137 		info->max_virtualisable_vl = SVE_VQ_MIN;
1138 	else /* b + 1 < SVE_VQ_MAX */
1139 		info->max_virtualisable_vl = sve_vl_from_vq(__bit_to_vq(b + 1));
1140 
1141 	if (info->max_virtualisable_vl > info->max_vl)
1142 		info->max_virtualisable_vl = info->max_vl;
1143 
1144 	pr_info("%s: maximum available vector length %u bytes per vector\n",
1145 		info->name, info->max_vl);
1146 	pr_info("%s: default vector length %u bytes per vector\n",
1147 		info->name, get_sve_default_vl());
1148 
1149 	/* KVM decides whether to support mismatched systems. Just warn here: */
1150 	if (sve_max_virtualisable_vl() < sve_max_vl())
1151 		pr_warn("%s: unvirtualisable vector lengths present\n",
1152 			info->name);
1153 }
1154 
1155 /*
1156  * Called from the put_task_struct() path, which cannot get here
1157  * unless dead_task is really dead and not schedulable.
1158  */
1159 void fpsimd_release_task(struct task_struct *dead_task)
1160 {
1161 	sve_free(dead_task);
1162 	sme_free(dead_task);
1163 }
1164 
1165 #endif /* CONFIG_ARM64_SVE */
1166 
1167 #ifdef CONFIG_ARM64_SME
1168 
1169 /*
1170  * Ensure that task->thread.sme_state is allocated and sufficiently large.
1171  *
1172  * This function should be used only in preparation for replacing
1173  * task->thread.sme_state with new data.  The memory is always zeroed
1174  * here to prevent stale data from showing through: this is done in
1175  * the interest of testability and predictability, the architecture
1176  * guarantees that when ZA is enabled it will be zeroed.
1177  */
1178 void sme_alloc(struct task_struct *task, bool flush)
1179 {
1180 	if (task->thread.sme_state) {
1181 		if (flush)
1182 			memset(task->thread.sme_state, 0,
1183 			       sme_state_size(task));
1184 		return;
1185 	}
1186 
1187 	/* This could potentially be up to 64K. */
1188 	task->thread.sme_state =
1189 		kzalloc(sme_state_size(task), GFP_KERNEL);
1190 }
1191 
1192 static void sme_free(struct task_struct *task)
1193 {
1194 	kfree(task->thread.sme_state);
1195 	task->thread.sme_state = NULL;
1196 }
1197 
1198 void cpu_enable_sme(const struct arm64_cpu_capabilities *__always_unused p)
1199 {
1200 	/* Set priority for all PEs to architecturally defined minimum */
1201 	write_sysreg_s(read_sysreg_s(SYS_SMPRI_EL1) & ~SMPRI_EL1_PRIORITY_MASK,
1202 		       SYS_SMPRI_EL1);
1203 
1204 	/* Allow SME in kernel */
1205 	write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_SMEN_EL1EN, CPACR_EL1);
1206 	isb();
1207 
1208 	/* Ensure all bits in SMCR are set to known values */
1209 	write_sysreg_s(0, SYS_SMCR_EL1);
1210 
1211 	/* Allow EL0 to access TPIDR2 */
1212 	write_sysreg(read_sysreg(SCTLR_EL1) | SCTLR_ELx_ENTP2, SCTLR_EL1);
1213 	isb();
1214 }
1215 
1216 void cpu_enable_sme2(const struct arm64_cpu_capabilities *__always_unused p)
1217 {
1218 	/* This must be enabled after SME */
1219 	BUILD_BUG_ON(ARM64_SME2 <= ARM64_SME);
1220 
1221 	/* Allow use of ZT0 */
1222 	write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_EZT0_MASK,
1223 		       SYS_SMCR_EL1);
1224 }
1225 
1226 void cpu_enable_fa64(const struct arm64_cpu_capabilities *__always_unused p)
1227 {
1228 	/* This must be enabled after SME */
1229 	BUILD_BUG_ON(ARM64_SME_FA64 <= ARM64_SME);
1230 
1231 	/* Allow use of FA64 */
1232 	write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_FA64_MASK,
1233 		       SYS_SMCR_EL1);
1234 }
1235 
1236 void __init sme_setup(void)
1237 {
1238 	struct vl_info *info = &vl_info[ARM64_VEC_SME];
1239 	int min_bit, max_bit;
1240 
1241 	if (!system_supports_sme())
1242 		return;
1243 
1244 	min_bit = find_last_bit(info->vq_map, SVE_VQ_MAX);
1245 
1246 	/*
1247 	 * SME doesn't require any particular vector length be
1248 	 * supported but it does require at least one.  We should have
1249 	 * disabled the feature entirely while bringing up CPUs but
1250 	 * let's double check here.  The bitmap is SVE_VQ_MAP sized for
1251 	 * sharing with SVE.
1252 	 */
1253 	WARN_ON(min_bit >= SVE_VQ_MAX);
1254 
1255 	info->min_vl = sve_vl_from_vq(__bit_to_vq(min_bit));
1256 
1257 	max_bit = find_first_bit(info->vq_map, SVE_VQ_MAX);
1258 	info->max_vl = sve_vl_from_vq(__bit_to_vq(max_bit));
1259 
1260 	WARN_ON(info->min_vl > info->max_vl);
1261 
1262 	/*
1263 	 * For the default VL, pick the maximum supported value <= 32
1264 	 * (256 bits) if there is one since this is guaranteed not to
1265 	 * grow the signal frame when in streaming mode, otherwise the
1266 	 * minimum available VL will be used.
1267 	 */
1268 	set_sme_default_vl(find_supported_vector_length(ARM64_VEC_SME, 32));
1269 
1270 	pr_info("SME: minimum available vector length %u bytes per vector\n",
1271 		info->min_vl);
1272 	pr_info("SME: maximum available vector length %u bytes per vector\n",
1273 		info->max_vl);
1274 	pr_info("SME: default vector length %u bytes per vector\n",
1275 		get_sme_default_vl());
1276 }
1277 
1278 void sme_suspend_exit(void)
1279 {
1280 	u64 smcr = 0;
1281 
1282 	if (!system_supports_sme())
1283 		return;
1284 
1285 	if (system_supports_fa64())
1286 		smcr |= SMCR_ELx_FA64;
1287 	if (system_supports_sme2())
1288 		smcr |= SMCR_ELx_EZT0;
1289 
1290 	write_sysreg_s(smcr, SYS_SMCR_EL1);
1291 	write_sysreg_s(0, SYS_SMPRI_EL1);
1292 }
1293 
1294 #endif /* CONFIG_ARM64_SME */
1295 
1296 static void sve_init_regs(void)
1297 {
1298 	/*
1299 	 * Convert the FPSIMD state to SVE, zeroing all the state that
1300 	 * is not shared with FPSIMD. If (as is likely) the current
1301 	 * state is live in the registers then do this there and
1302 	 * update our metadata for the current task including
1303 	 * disabling the trap, otherwise update our in-memory copy.
1304 	 * We are guaranteed to not be in streaming mode, we can only
1305 	 * take a SVE trap when not in streaming mode and we can't be
1306 	 * in streaming mode when taking a SME trap.
1307 	 */
1308 	if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
1309 		unsigned long vq_minus_one =
1310 			sve_vq_from_vl(task_get_sve_vl(current)) - 1;
1311 		sve_set_vq(vq_minus_one);
1312 		sve_flush_live(true, vq_minus_one);
1313 		fpsimd_bind_task_to_cpu();
1314 	} else {
1315 		fpsimd_to_sve(current);
1316 		current->thread.fp_type = FP_STATE_SVE;
1317 		fpsimd_flush_task_state(current);
1318 	}
1319 }
1320 
1321 /*
1322  * Trapped SVE access
1323  *
1324  * Storage is allocated for the full SVE state, the current FPSIMD
1325  * register contents are migrated across, and the access trap is
1326  * disabled.
1327  *
1328  * TIF_SVE should be clear on entry: otherwise, fpsimd_restore_current_state()
1329  * would have disabled the SVE access trap for userspace during
1330  * ret_to_user, making an SVE access trap impossible in that case.
1331  */
1332 void do_sve_acc(unsigned long esr, struct pt_regs *regs)
1333 {
1334 	/* Even if we chose not to use SVE, the hardware could still trap: */
1335 	if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) {
1336 		force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
1337 		return;
1338 	}
1339 
1340 	sve_alloc(current, true);
1341 	if (!current->thread.sve_state) {
1342 		force_sig(SIGKILL);
1343 		return;
1344 	}
1345 
1346 	get_cpu_fpsimd_context();
1347 
1348 	if (test_and_set_thread_flag(TIF_SVE))
1349 		WARN_ON(1); /* SVE access shouldn't have trapped */
1350 
1351 	/*
1352 	 * Even if the task can have used streaming mode we can only
1353 	 * generate SVE access traps in normal SVE mode and
1354 	 * transitioning out of streaming mode may discard any
1355 	 * streaming mode state.  Always clear the high bits to avoid
1356 	 * any potential errors tracking what is properly initialised.
1357 	 */
1358 	sve_init_regs();
1359 
1360 	put_cpu_fpsimd_context();
1361 }
1362 
1363 #ifdef CONFIG_ARM64_ERRATUM_4193714
1364 
1365 /*
1366  * SME/CME erratum handling.
1367  */
1368 static cpumask_t sme_dvmsync_cpus;
1369 
1370 /*
1371  * These helpers are only called from non-preemptible contexts, so
1372  * smp_processor_id() is safe here.
1373  */
1374 void sme_set_active(void)
1375 {
1376 	unsigned int cpu = smp_processor_id();
1377 
1378 	if (!cpumask_test_cpu(cpu, &sme_dvmsync_cpus))
1379 		return;
1380 
1381 	cpumask_set_cpu(cpu, mm_cpumask(current->mm));
1382 
1383 	/*
1384 	 * A subsequent (post ERET) SME access may use a stale address
1385 	 * translation. On C1-Pro, a TLBI+DSB on a different CPU will wait for
1386 	 * the completion of cpumask_set_cpu() above as it appears in program
1387 	 * order before the SME access. The post-TLBI+DSB read of mm_cpumask()
1388 	 * will lead to the IPI being issued.
1389 	 *
1390 	 * https://lore.kernel.org/r/ablEXwhfKyJW1i7l@J2N7QTR9R3
1391 	 */
1392 }
1393 
1394 void sme_clear_active(void)
1395 {
1396 	unsigned int cpu = smp_processor_id();
1397 
1398 	if (!cpumask_test_cpu(cpu, &sme_dvmsync_cpus))
1399 		return;
1400 
1401 	/*
1402 	 * With SCTLR_EL1.IESB enabled, the SME memory transactions are
1403 	 * completed on entering EL1.
1404 	 */
1405 	cpumask_clear_cpu(cpu, mm_cpumask(current->mm));
1406 }
1407 
1408 static void sme_dvmsync_ipi(void *unused)
1409 {
1410 	/*
1411 	 * With SCTLR_EL1.IESB on, taking an exception is sufficient to ensure
1412 	 * the completion of the SME memory accesses, so no need for an
1413 	 * explicit DSB.
1414 	 */
1415 }
1416 
1417 void sme_do_dvmsync(const struct cpumask *mask)
1418 {
1419 	/*
1420 	 * This is called from the TLB maintenance functions after the DSB ISH
1421 	 * to send the hardware DVMSync message. If this CPU sees the mask as
1422 	 * empty, the remote CPU executing sme_set_active() would have seen
1423 	 * the DVMSync and no IPI required.
1424 	 */
1425 	if (cpumask_empty(mask))
1426 		return;
1427 
1428 	preempt_disable();
1429 	smp_call_function_many(mask, sme_dvmsync_ipi, NULL, true);
1430 	preempt_enable();
1431 }
1432 
1433 void sme_enable_dvmsync(void)
1434 {
1435 	cpumask_set_cpu(smp_processor_id(), &sme_dvmsync_cpus);
1436 }
1437 
1438 #endif /* CONFIG_ARM64_ERRATUM_4193714 */
1439 
1440 /*
1441  * Trapped SME access
1442  *
1443  * Storage is allocated for the full SVE and SME state, the current
1444  * FPSIMD register contents are migrated to SVE if SVE is not already
1445  * active, and the access trap is disabled.
1446  *
1447  * TIF_SME should be clear on entry: otherwise, fpsimd_restore_current_state()
1448  * would have disabled the SME access trap for userspace during
1449  * ret_to_user, making an SME access trap impossible in that case.
1450  */
1451 void do_sme_acc(unsigned long esr, struct pt_regs *regs)
1452 {
1453 	/* Even if we chose not to use SME, the hardware could still trap: */
1454 	if (unlikely(!system_supports_sme()) || WARN_ON(is_compat_task())) {
1455 		force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
1456 		return;
1457 	}
1458 
1459 	/*
1460 	 * If this not a trap due to SME being disabled then something
1461 	 * is being used in the wrong mode, report as SIGILL.
1462 	 */
1463 	if (ESR_ELx_SME_ISS_SMTC(esr) != ESR_ELx_SME_ISS_SMTC_SME_DISABLED) {
1464 		force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
1465 		return;
1466 	}
1467 
1468 	sve_alloc(current, false);
1469 	sme_alloc(current, true);
1470 	if (!current->thread.sve_state || !current->thread.sme_state) {
1471 		force_sig(SIGKILL);
1472 		return;
1473 	}
1474 
1475 	get_cpu_fpsimd_context();
1476 
1477 	/* With TIF_SME userspace shouldn't generate any traps */
1478 	if (test_and_set_thread_flag(TIF_SME))
1479 		WARN_ON(1);
1480 
1481 	if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
1482 		unsigned long vq_minus_one =
1483 			sve_vq_from_vl(task_get_sme_vl(current)) - 1;
1484 		sme_set_vq(vq_minus_one);
1485 
1486 		fpsimd_bind_task_to_cpu();
1487 	} else {
1488 		fpsimd_flush_task_state(current);
1489 	}
1490 
1491 	put_cpu_fpsimd_context();
1492 }
1493 
1494 /*
1495  * Trapped FP/ASIMD access.
1496  */
1497 void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs)
1498 {
1499 	/* Even if we chose not to use FPSIMD, the hardware could still trap: */
1500 	if (!system_supports_fpsimd()) {
1501 		force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
1502 		return;
1503 	}
1504 
1505 	/*
1506 	 * When FPSIMD is enabled, we should never take a trap unless something
1507 	 * has gone very wrong.
1508 	 */
1509 	BUG();
1510 }
1511 
1512 /*
1513  * Raise a SIGFPE for the current process.
1514  */
1515 void do_fpsimd_exc(unsigned long esr, struct pt_regs *regs)
1516 {
1517 	unsigned int si_code = FPE_FLTUNK;
1518 
1519 	if (esr & ESR_ELx_FP_EXC_TFV) {
1520 		if (esr & FPEXC_IOF)
1521 			si_code = FPE_FLTINV;
1522 		else if (esr & FPEXC_DZF)
1523 			si_code = FPE_FLTDIV;
1524 		else if (esr & FPEXC_OFF)
1525 			si_code = FPE_FLTOVF;
1526 		else if (esr & FPEXC_UFF)
1527 			si_code = FPE_FLTUND;
1528 		else if (esr & FPEXC_IXF)
1529 			si_code = FPE_FLTRES;
1530 	}
1531 
1532 	send_sig_fault(SIGFPE, si_code,
1533 		       (void __user *)instruction_pointer(regs),
1534 		       current);
1535 }
1536 
1537 static void fpsimd_load_kernel_state(struct task_struct *task)
1538 {
1539 	struct cpu_fp_state *last = this_cpu_ptr(&fpsimd_last_state);
1540 
1541 	/*
1542 	 * Elide the load if this CPU holds the most recent kernel mode
1543 	 * FPSIMD context of the current task.
1544 	 */
1545 	if (last->st == task->thread.kernel_fpsimd_state &&
1546 	    task->thread.kernel_fpsimd_cpu == smp_processor_id())
1547 		return;
1548 
1549 	fpsimd_load_state(task->thread.kernel_fpsimd_state);
1550 }
1551 
1552 static void fpsimd_save_kernel_state(struct task_struct *task)
1553 {
1554 	struct cpu_fp_state cpu_fp_state = {
1555 		.st		= task->thread.kernel_fpsimd_state,
1556 		.to_save	= FP_STATE_FPSIMD,
1557 	};
1558 
1559 	BUG_ON(!cpu_fp_state.st);
1560 
1561 	fpsimd_save_state(task->thread.kernel_fpsimd_state);
1562 	fpsimd_bind_state_to_cpu(&cpu_fp_state);
1563 
1564 	task->thread.kernel_fpsimd_cpu = smp_processor_id();
1565 }
1566 
1567 /*
1568  * Invalidate any task's FPSIMD state that is present on this cpu.
1569  * The FPSIMD context should be acquired with get_cpu_fpsimd_context()
1570  * before calling this function.
1571  */
1572 static void fpsimd_flush_cpu_state(void)
1573 {
1574 	WARN_ON(!system_supports_fpsimd());
1575 	__this_cpu_write(fpsimd_last_state.st, NULL);
1576 
1577 	/*
1578 	 * Leaving streaming mode enabled will cause issues for any kernel
1579 	 * NEON and leaving streaming mode or ZA enabled may increase power
1580 	 * consumption.
1581 	 */
1582 	if (system_supports_sme())
1583 		sme_smstop();
1584 
1585 	set_thread_flag(TIF_FOREIGN_FPSTATE);
1586 }
1587 
1588 void fpsimd_thread_switch(struct task_struct *next)
1589 {
1590 	bool wrong_task, wrong_cpu;
1591 
1592 	if (!system_supports_fpsimd())
1593 		return;
1594 
1595 	WARN_ON_ONCE(!irqs_disabled());
1596 
1597 	/* Save unsaved fpsimd state, if any: */
1598 	if (test_thread_flag(TIF_KERNEL_FPSTATE))
1599 		fpsimd_save_kernel_state(current);
1600 	else
1601 		fpsimd_save_user_state();
1602 
1603 	if (test_tsk_thread_flag(next, TIF_KERNEL_FPSTATE)) {
1604 		fpsimd_flush_cpu_state();
1605 		fpsimd_load_kernel_state(next);
1606 	} else {
1607 		/*
1608 		 * Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
1609 		 * state.  For kernel threads, FPSIMD registers are never
1610 		 * loaded with user mode FPSIMD state and so wrong_task and
1611 		 * wrong_cpu will always be true.
1612 		 */
1613 		wrong_task = __this_cpu_read(fpsimd_last_state.st) !=
1614 			&next->thread.uw.fpsimd_state;
1615 		wrong_cpu = next->thread.fpsimd_cpu != smp_processor_id();
1616 
1617 		update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE,
1618 				       wrong_task || wrong_cpu);
1619 	}
1620 }
1621 
1622 static void fpsimd_flush_thread_vl(enum vec_type type)
1623 {
1624 	int vl, supported_vl;
1625 
1626 	/*
1627 	 * Reset the task vector length as required.  This is where we
1628 	 * ensure that all user tasks have a valid vector length
1629 	 * configured: no kernel task can become a user task without
1630 	 * an exec and hence a call to this function.  By the time the
1631 	 * first call to this function is made, all early hardware
1632 	 * probing is complete, so __sve_default_vl should be valid.
1633 	 * If a bug causes this to go wrong, we make some noise and
1634 	 * try to fudge thread.sve_vl to a safe value here.
1635 	 */
1636 	vl = task_get_vl_onexec(current, type);
1637 	if (!vl)
1638 		vl = get_default_vl(type);
1639 
1640 	if (WARN_ON(!sve_vl_valid(vl)))
1641 		vl = vl_info[type].min_vl;
1642 
1643 	supported_vl = find_supported_vector_length(type, vl);
1644 	if (WARN_ON(supported_vl != vl))
1645 		vl = supported_vl;
1646 
1647 	task_set_vl(current, type, vl);
1648 
1649 	/*
1650 	 * If the task is not set to inherit, ensure that the vector
1651 	 * length will be reset by a subsequent exec:
1652 	 */
1653 	if (!test_thread_flag(vec_vl_inherit_flag(type)))
1654 		task_set_vl_onexec(current, type, 0);
1655 }
1656 
1657 void fpsimd_flush_thread(void)
1658 {
1659 	void *sve_state = NULL;
1660 	void *sme_state = NULL;
1661 
1662 	if (!system_supports_fpsimd())
1663 		return;
1664 
1665 	get_cpu_fpsimd_context();
1666 
1667 	fpsimd_flush_task_state(current);
1668 	memset(&current->thread.uw.fpsimd_state, 0,
1669 	       sizeof(current->thread.uw.fpsimd_state));
1670 
1671 	if (system_supports_sve()) {
1672 		clear_thread_flag(TIF_SVE);
1673 
1674 		/* Defer kfree() while in atomic context */
1675 		sve_state = current->thread.sve_state;
1676 		current->thread.sve_state = NULL;
1677 
1678 		fpsimd_flush_thread_vl(ARM64_VEC_SVE);
1679 	}
1680 
1681 	if (system_supports_sme()) {
1682 		clear_thread_flag(TIF_SME);
1683 
1684 		/* Defer kfree() while in atomic context */
1685 		sme_state = current->thread.sme_state;
1686 		current->thread.sme_state = NULL;
1687 
1688 		fpsimd_flush_thread_vl(ARM64_VEC_SME);
1689 		current->thread.svcr = 0;
1690 	}
1691 
1692 	if (system_supports_fpmr())
1693 		current->thread.uw.fpmr = 0;
1694 
1695 	current->thread.fp_type = FP_STATE_FPSIMD;
1696 
1697 	put_cpu_fpsimd_context();
1698 	kfree(sve_state);
1699 	kfree(sme_state);
1700 }
1701 
1702 /*
1703  * Save the userland FPSIMD state of 'current' to memory, but only if the state
1704  * currently held in the registers does in fact belong to 'current'
1705  */
1706 void fpsimd_preserve_current_state(void)
1707 {
1708 	if (!system_supports_fpsimd())
1709 		return;
1710 
1711 	get_cpu_fpsimd_context();
1712 	fpsimd_save_user_state();
1713 	put_cpu_fpsimd_context();
1714 }
1715 
1716 /*
1717  * Associate current's FPSIMD context with this cpu
1718  * The caller must have ownership of the cpu FPSIMD context before calling
1719  * this function.
1720  */
1721 static void fpsimd_bind_task_to_cpu(void)
1722 {
1723 	struct cpu_fp_state *last = this_cpu_ptr(&fpsimd_last_state);
1724 
1725 	WARN_ON(!system_supports_fpsimd());
1726 	last->st = &current->thread.uw.fpsimd_state;
1727 	last->sve_state = current->thread.sve_state;
1728 	last->sme_state = current->thread.sme_state;
1729 	last->sve_vl = task_get_sve_vl(current);
1730 	last->sme_vl = task_get_sme_vl(current);
1731 	last->svcr = &current->thread.svcr;
1732 	last->fpmr = &current->thread.uw.fpmr;
1733 	last->fp_type = &current->thread.fp_type;
1734 	last->to_save = FP_STATE_CURRENT;
1735 	current->thread.fpsimd_cpu = smp_processor_id();
1736 
1737 	/*
1738 	 * Toggle SVE and SME trapping for userspace if needed, these
1739 	 * are serialsied by ret_to_user().
1740 	 */
1741 	if (system_supports_sme()) {
1742 		if (test_thread_flag(TIF_SME))
1743 			sme_user_enable();
1744 		else
1745 			sme_user_disable();
1746 	}
1747 
1748 	if (system_supports_sve()) {
1749 		if (test_thread_flag(TIF_SVE))
1750 			sve_user_enable();
1751 		else
1752 			sve_user_disable();
1753 	}
1754 }
1755 
1756 void fpsimd_bind_state_to_cpu(struct cpu_fp_state *state)
1757 {
1758 	struct cpu_fp_state *last = this_cpu_ptr(&fpsimd_last_state);
1759 
1760 	WARN_ON(!system_supports_fpsimd());
1761 	WARN_ON(!in_softirq() && !irqs_disabled());
1762 
1763 	*last = *state;
1764 }
1765 
1766 /*
1767  * Load the userland FPSIMD state of 'current' from memory, but only if the
1768  * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
1769  * state of 'current'.  This is called when we are preparing to return to
1770  * userspace to ensure that userspace sees a good register state.
1771  */
1772 void fpsimd_restore_current_state(void)
1773 {
1774 	/*
1775 	 * TIF_FOREIGN_FPSTATE is set on the init task and copied by
1776 	 * arch_dup_task_struct() regardless of whether FP/SIMD is detected.
1777 	 * Thus user threads can have this set even when FP/SIMD hasn't been
1778 	 * detected.
1779 	 *
1780 	 * When FP/SIMD is detected, begin_new_exec() will set
1781 	 * TIF_FOREIGN_FPSTATE via flush_thread() -> fpsimd_flush_thread(),
1782 	 * and fpsimd_thread_switch() will set TIF_FOREIGN_FPSTATE when
1783 	 * switching tasks. We detect FP/SIMD before we exec the first user
1784 	 * process, ensuring this has TIF_FOREIGN_FPSTATE set and
1785 	 * do_notify_resume() will call fpsimd_restore_current_state() to
1786 	 * install the user FP/SIMD context.
1787 	 *
1788 	 * When FP/SIMD is not detected, nothing else will clear or set
1789 	 * TIF_FOREIGN_FPSTATE prior to the first return to userspace, and
1790 	 * we must clear TIF_FOREIGN_FPSTATE to avoid do_notify_resume()
1791 	 * looping forever calling fpsimd_restore_current_state().
1792 	 */
1793 	if (!system_supports_fpsimd()) {
1794 		clear_thread_flag(TIF_FOREIGN_FPSTATE);
1795 		return;
1796 	}
1797 
1798 	get_cpu_fpsimd_context();
1799 
1800 	if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
1801 		task_fpsimd_load();
1802 		fpsimd_bind_task_to_cpu();
1803 	}
1804 
1805 	put_cpu_fpsimd_context();
1806 }
1807 
1808 void fpsimd_update_current_state(struct user_fpsimd_state const *state)
1809 {
1810 	if (WARN_ON(!system_supports_fpsimd()))
1811 		return;
1812 
1813 	current->thread.uw.fpsimd_state = *state;
1814 	if (current->thread.fp_type == FP_STATE_SVE)
1815 		fpsimd_to_sve(current);
1816 }
1817 
1818 /*
1819  * Invalidate live CPU copies of task t's FPSIMD state
1820  *
1821  * This function may be called with preemption enabled.  The barrier()
1822  * ensures that the assignment to fpsimd_cpu is visible to any
1823  * preemption/softirq that could race with set_tsk_thread_flag(), so
1824  * that TIF_FOREIGN_FPSTATE cannot be spuriously re-cleared.
1825  *
1826  * The final barrier ensures that TIF_FOREIGN_FPSTATE is seen set by any
1827  * subsequent code.
1828  */
1829 void fpsimd_flush_task_state(struct task_struct *t)
1830 {
1831 	t->thread.fpsimd_cpu = NR_CPUS;
1832 	t->thread.kernel_fpsimd_state = NULL;
1833 	/*
1834 	 * If we don't support fpsimd, bail out after we have
1835 	 * reset the fpsimd_cpu for this task and clear the
1836 	 * FPSTATE.
1837 	 */
1838 	if (!system_supports_fpsimd())
1839 		return;
1840 	barrier();
1841 	set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE);
1842 
1843 	barrier();
1844 }
1845 
1846 void fpsimd_save_and_flush_current_state(void)
1847 {
1848 	if (!system_supports_fpsimd())
1849 		return;
1850 
1851 	get_cpu_fpsimd_context();
1852 	fpsimd_save_user_state();
1853 	fpsimd_flush_task_state(current);
1854 	put_cpu_fpsimd_context();
1855 }
1856 
1857 /*
1858  * Save the FPSIMD state to memory and invalidate cpu view.
1859  * This function must be called with preemption disabled.
1860  */
1861 void fpsimd_save_and_flush_cpu_state(void)
1862 {
1863 	unsigned long flags;
1864 
1865 	if (!system_supports_fpsimd())
1866 		return;
1867 	WARN_ON(preemptible());
1868 	local_irq_save(flags);
1869 	fpsimd_save_user_state();
1870 	fpsimd_flush_cpu_state();
1871 	local_irq_restore(flags);
1872 }
1873 
1874 #ifdef CONFIG_KERNEL_MODE_NEON
1875 
1876 /*
1877  * Kernel-side NEON support functions
1878  */
1879 
1880 /*
1881  * kernel_neon_begin(): obtain the CPU FPSIMD registers for use by the calling
1882  * context
1883  *
1884  * Must not be called unless may_use_simd() returns true.
1885  * Task context in the FPSIMD registers is saved back to memory as necessary.
1886  *
1887  * A matching call to kernel_neon_end() must be made before returning from the
1888  * calling context.
1889  *
1890  * The caller may freely use the FPSIMD registers until kernel_neon_end() is
1891  * called.
1892  *
1893  * Unless called from non-preemptible task context, @state must point to a
1894  * caller provided buffer that will be used to preserve the task's kernel mode
1895  * FPSIMD context when it is scheduled out, or if it is interrupted by kernel
1896  * mode FPSIMD occurring in softirq context. May be %NULL otherwise.
1897  */
1898 void kernel_neon_begin(struct user_fpsimd_state *state)
1899 {
1900 	if (WARN_ON(!system_supports_fpsimd()))
1901 		return;
1902 
1903 	WARN_ON((preemptible() || in_serving_softirq()) && !state);
1904 
1905 	BUG_ON(!may_use_simd());
1906 
1907 	get_cpu_fpsimd_context();
1908 
1909 	/* Save unsaved fpsimd state, if any: */
1910 	if (test_thread_flag(TIF_KERNEL_FPSTATE)) {
1911 		BUG_ON(IS_ENABLED(CONFIG_PREEMPT_RT) || !in_serving_softirq());
1912 		fpsimd_save_state(state);
1913 	} else {
1914 		fpsimd_save_user_state();
1915 
1916 		/*
1917 		 * Set the thread flag so that the kernel mode FPSIMD state
1918 		 * will be context switched along with the rest of the task
1919 		 * state.
1920 		 *
1921 		 * On non-PREEMPT_RT, softirqs may interrupt task level kernel
1922 		 * mode FPSIMD, but the task will not be preemptible so setting
1923 		 * TIF_KERNEL_FPSTATE for those would be both wrong (as it
1924 		 * would mark the task context FPSIMD state as requiring a
1925 		 * context switch) and unnecessary.
1926 		 *
1927 		 * On PREEMPT_RT, softirqs are serviced from a separate thread,
1928 		 * which is scheduled as usual, and this guarantees that these
1929 		 * softirqs are not interrupting use of the FPSIMD in kernel
1930 		 * mode in task context. So in this case, setting the flag here
1931 		 * is always appropriate.
1932 		 */
1933 		if (IS_ENABLED(CONFIG_PREEMPT_RT) || !in_serving_softirq()) {
1934 			/*
1935 			 * Record the caller provided buffer as the kernel mode
1936 			 * FP/SIMD buffer for this task, so that the state can
1937 			 * be preserved and restored on a context switch.
1938 			 */
1939 			WARN_ON(current->thread.kernel_fpsimd_state != NULL);
1940 			current->thread.kernel_fpsimd_state = state;
1941 			set_thread_flag(TIF_KERNEL_FPSTATE);
1942 		}
1943 	}
1944 
1945 	/* Invalidate any task state remaining in the fpsimd regs: */
1946 	fpsimd_flush_cpu_state();
1947 
1948 	put_cpu_fpsimd_context();
1949 }
1950 EXPORT_SYMBOL_GPL(kernel_neon_begin);
1951 
1952 /*
1953  * kernel_neon_end(): give the CPU FPSIMD registers back to the current task
1954  *
1955  * Must be called from a context in which kernel_neon_begin() was previously
1956  * called, with no call to kernel_neon_end() in the meantime.
1957  *
1958  * The caller must not use the FPSIMD registers after this function is called,
1959  * unless kernel_neon_begin() is called again in the meantime.
1960  *
1961  * The value of @state must match the value passed to the preceding call to
1962  * kernel_neon_begin().
1963  */
1964 void kernel_neon_end(struct user_fpsimd_state *state)
1965 {
1966 	if (!system_supports_fpsimd())
1967 		return;
1968 
1969 	if (!test_thread_flag(TIF_KERNEL_FPSTATE))
1970 		return;
1971 
1972 	/*
1973 	 * If we are returning from a nested use of kernel mode FPSIMD, restore
1974 	 * the task context kernel mode FPSIMD state. This can only happen when
1975 	 * running in softirq context on non-PREEMPT_RT.
1976 	 */
1977 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) && in_serving_softirq()) {
1978 		fpsimd_load_state(state);
1979 	} else {
1980 		clear_thread_flag(TIF_KERNEL_FPSTATE);
1981 		WARN_ON(current->thread.kernel_fpsimd_state != state);
1982 		current->thread.kernel_fpsimd_state = NULL;
1983 	}
1984 }
1985 EXPORT_SYMBOL_GPL(kernel_neon_end);
1986 
1987 #ifdef CONFIG_EFI
1988 
1989 static struct user_fpsimd_state efi_fpsimd_state;
1990 
1991 /*
1992  * EFI runtime services support functions
1993  *
1994  * The ABI for EFI runtime services allows EFI to use FPSIMD during the call.
1995  * This means that for EFI (and only for EFI), we have to assume that FPSIMD
1996  * is always used rather than being an optional accelerator.
1997  *
1998  * These functions provide the necessary support for ensuring FPSIMD
1999  * save/restore in the contexts from which EFI is used.
2000  *
2001  * Do not use them for any other purpose -- if tempted to do so, you are
2002  * either doing something wrong or you need to propose some refactoring.
2003  */
2004 
2005 /*
2006  * __efi_fpsimd_begin(): prepare FPSIMD for making an EFI runtime services call
2007  */
2008 void __efi_fpsimd_begin(void)
2009 {
2010 	if (!system_supports_fpsimd())
2011 		return;
2012 
2013 	if (may_use_simd()) {
2014 		kernel_neon_begin(&efi_fpsimd_state);
2015 	} else {
2016 		/*
2017 		 * We are running in hardirq or NMI context, and the only
2018 		 * legitimate case where this might happen is when EFI pstore
2019 		 * is attempting to record the system's dying gasps into EFI
2020 		 * variables. This could be due to an oops, a panic or a call
2021 		 * to emergency_restart(), and in none of those cases, we can
2022 		 * expect the current task to ever return to user space again,
2023 		 * or for the kernel to resume any normal execution, for that
2024 		 * matter (an oops in hardirq context triggers a panic too).
2025 		 *
2026 		 * Therefore, there is no point in attempting to preserve any
2027 		 * SVE/SME state here. On the off chance that we might have
2028 		 * ended up here for a different reason inadvertently, kill the
2029 		 * task and preserve/restore the base FP/SIMD state, which
2030 		 * might belong to kernel mode FP/SIMD.
2031 		 */
2032 		pr_warn_ratelimited("Calling EFI runtime from %s context\n",
2033 				    in_nmi() ? "NMI" : "hardirq");
2034 		force_signal_inject(SIGKILL, SI_KERNEL, 0, 0);
2035 		fpsimd_save_state(&efi_fpsimd_state);
2036 	}
2037 }
2038 
2039 /*
2040  * __efi_fpsimd_end(): clean up FPSIMD after an EFI runtime services call
2041  */
2042 void __efi_fpsimd_end(void)
2043 {
2044 	if (!system_supports_fpsimd())
2045 		return;
2046 
2047 	if (may_use_simd()) {
2048 		kernel_neon_end(&efi_fpsimd_state);
2049 	} else {
2050 		fpsimd_load_state(&efi_fpsimd_state);
2051 	}
2052 }
2053 
2054 #endif /* CONFIG_EFI */
2055 
2056 #endif /* CONFIG_KERNEL_MODE_NEON */
2057 
2058 #ifdef CONFIG_CPU_PM
2059 static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
2060 				  unsigned long cmd, void *v)
2061 {
2062 	switch (cmd) {
2063 	case CPU_PM_ENTER:
2064 		fpsimd_save_and_flush_cpu_state();
2065 		break;
2066 	case CPU_PM_EXIT:
2067 		break;
2068 	case CPU_PM_ENTER_FAILED:
2069 	default:
2070 		return NOTIFY_DONE;
2071 	}
2072 	return NOTIFY_OK;
2073 }
2074 
2075 static struct notifier_block fpsimd_cpu_pm_notifier_block = {
2076 	.notifier_call = fpsimd_cpu_pm_notifier,
2077 };
2078 
2079 static void __init fpsimd_pm_init(void)
2080 {
2081 	cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block);
2082 }
2083 
2084 #else
2085 static inline void fpsimd_pm_init(void) { }
2086 #endif /* CONFIG_CPU_PM */
2087 
2088 #ifdef CONFIG_HOTPLUG_CPU
2089 static int fpsimd_cpu_dead(unsigned int cpu)
2090 {
2091 	per_cpu(fpsimd_last_state.st, cpu) = NULL;
2092 	return 0;
2093 }
2094 
2095 static inline void fpsimd_hotplug_init(void)
2096 {
2097 	cpuhp_setup_state_nocalls(CPUHP_ARM64_FPSIMD_DEAD, "arm64/fpsimd:dead",
2098 				  NULL, fpsimd_cpu_dead);
2099 }
2100 
2101 #else
2102 static inline void fpsimd_hotplug_init(void) { }
2103 #endif
2104 
2105 void cpu_enable_fpsimd(const struct arm64_cpu_capabilities *__always_unused p)
2106 {
2107 	unsigned long enable = CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN;
2108 	write_sysreg(read_sysreg(CPACR_EL1) | enable, CPACR_EL1);
2109 	isb();
2110 }
2111 
2112 /*
2113  * FP/SIMD support code initialisation.
2114  */
2115 static int __init fpsimd_init(void)
2116 {
2117 	if (cpu_have_named_feature(FP)) {
2118 		fpsimd_pm_init();
2119 		fpsimd_hotplug_init();
2120 	} else {
2121 		pr_notice("Floating-point is not implemented\n");
2122 	}
2123 
2124 	if (!cpu_have_named_feature(ASIMD))
2125 		pr_notice("Advanced SIMD is not implemented\n");
2126 
2127 
2128 	sve_sysctl_init();
2129 	sme_sysctl_init();
2130 
2131 	return 0;
2132 }
2133 core_initcall(fpsimd_init);
2134