1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Based on arch/arm/kernel/signal.c
4 *
5 * Copyright (C) 1995-2009 Russell King
6 * Copyright (C) 2012 ARM Ltd.
7 */
8
9 #include <linux/cache.h>
10 #include <linux/compat.h>
11 #include <linux/errno.h>
12 #include <linux/irq-entry-common.h>
13 #include <linux/kernel.h>
14 #include <linux/signal.h>
15 #include <linux/freezer.h>
16 #include <linux/stddef.h>
17 #include <linux/uaccess.h>
18 #include <linux/sizes.h>
19 #include <linux/string.h>
20 #include <linux/ratelimit.h>
21 #include <linux/rseq.h>
22 #include <linux/syscalls.h>
23 #include <linux/pkeys.h>
24
25 #include <asm/daifflags.h>
26 #include <asm/debug-monitors.h>
27 #include <asm/elf.h>
28 #include <asm/exception.h>
29 #include <asm/cacheflush.h>
30 #include <asm/gcs.h>
31 #include <asm/ucontext.h>
32 #include <asm/unistd.h>
33 #include <asm/fpsimd.h>
34 #include <asm/ptrace.h>
35 #include <asm/syscall.h>
36 #include <asm/signal32.h>
37 #include <asm/traps.h>
38 #include <asm/vdso.h>
39
40 #define GCS_SIGNAL_CAP(addr) (((unsigned long)addr) & GCS_CAP_ADDR_MASK)
41
42 /*
43 * Do a signal return; undo the signal stack. These are aligned to 128-bit.
44 */
45 struct rt_sigframe {
46 struct siginfo info;
47 struct ucontext uc;
48 };
49
50 struct rt_sigframe_user_layout {
51 struct rt_sigframe __user *sigframe;
52 struct frame_record __user *next_frame;
53
54 unsigned long size; /* size of allocated sigframe data */
55 unsigned long limit; /* largest allowed size */
56
57 unsigned long fpsimd_offset;
58 unsigned long esr_offset;
59 unsigned long gcs_offset;
60 unsigned long sve_offset;
61 unsigned long tpidr2_offset;
62 unsigned long za_offset;
63 unsigned long zt_offset;
64 unsigned long fpmr_offset;
65 unsigned long poe_offset;
66 unsigned long extra_offset;
67 unsigned long end_offset;
68 };
69
70 /*
71 * Holds any EL0-controlled state that influences unprivileged memory accesses.
72 * This includes both accesses done in userspace and uaccess done in the kernel.
73 *
74 * This state needs to be carefully managed to ensure that it doesn't cause
75 * uaccess to fail when setting up the signal frame, and the signal handler
76 * itself also expects a well-defined state when entered.
77 */
78 struct user_access_state {
79 u64 por_el0;
80 };
81
82 #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
83 #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
84
85 /*
86 * Save the user access state into ua_state and reset it to disable any
87 * restrictions.
88 */
save_reset_user_access_state(struct user_access_state * ua_state)89 static void save_reset_user_access_state(struct user_access_state *ua_state)
90 {
91 if (system_supports_poe()) {
92 u64 por_enable_all = 0;
93
94 for (int pkey = 0; pkey < arch_max_pkey(); pkey++)
95 por_enable_all |= POR_ELx_PERM_PREP(pkey, POE_RWX);
96
97 ua_state->por_el0 = read_sysreg_s(SYS_POR_EL0);
98 write_sysreg_s(por_enable_all, SYS_POR_EL0);
99 /*
100 * No ISB required as we can tolerate spurious Overlay faults -
101 * the fault handler will check again based on the new value
102 * of POR_EL0.
103 */
104 }
105 }
106
107 /*
108 * Set the user access state for invoking the signal handler.
109 *
110 * No uaccess should be done after that function is called.
111 */
set_handler_user_access_state(void)112 static void set_handler_user_access_state(void)
113 {
114 if (system_supports_poe())
115 write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0);
116 }
117
118 /*
119 * Restore the user access state to the values saved in ua_state.
120 *
121 * No uaccess should be done after that function is called.
122 */
restore_user_access_state(const struct user_access_state * ua_state)123 static void restore_user_access_state(const struct user_access_state *ua_state)
124 {
125 if (system_supports_poe())
126 write_sysreg_s(ua_state->por_el0, SYS_POR_EL0);
127 }
128
init_user_layout(struct rt_sigframe_user_layout * user)129 static void init_user_layout(struct rt_sigframe_user_layout *user)
130 {
131 const size_t reserved_size =
132 sizeof(user->sigframe->uc.uc_mcontext.__reserved);
133
134 memset(user, 0, sizeof(*user));
135 user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved);
136
137 user->limit = user->size + reserved_size;
138
139 user->limit -= TERMINATOR_SIZE;
140 user->limit -= EXTRA_CONTEXT_SIZE;
141 /* Reserve space for extension and terminator ^ */
142 }
143
sigframe_size(struct rt_sigframe_user_layout const * user)144 static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
145 {
146 return round_up(max(user->size, sizeof(struct rt_sigframe)), 16);
147 }
148
149 /*
150 * Sanity limit on the approximate maximum size of signal frame we'll
151 * try to generate. Stack alignment padding and the frame record are
152 * not taken into account. This limit is not a guarantee and is
153 * NOT ABI.
154 */
155 #define SIGFRAME_MAXSZ SZ_256K
156
__sigframe_alloc(struct rt_sigframe_user_layout * user,unsigned long * offset,size_t size,bool extend)157 static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
158 unsigned long *offset, size_t size, bool extend)
159 {
160 size_t padded_size = round_up(size, 16);
161
162 if (padded_size > user->limit - user->size &&
163 !user->extra_offset &&
164 extend) {
165 int ret;
166
167 user->limit += EXTRA_CONTEXT_SIZE;
168 ret = __sigframe_alloc(user, &user->extra_offset,
169 sizeof(struct extra_context), false);
170 if (ret) {
171 user->limit -= EXTRA_CONTEXT_SIZE;
172 return ret;
173 }
174
175 /* Reserve space for the __reserved[] terminator */
176 user->size += TERMINATOR_SIZE;
177
178 /*
179 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for
180 * the terminator:
181 */
182 user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE;
183 }
184
185 /* Still not enough space? Bad luck! */
186 if (padded_size > user->limit - user->size)
187 return -ENOMEM;
188
189 *offset = user->size;
190 user->size += padded_size;
191
192 return 0;
193 }
194
195 /*
196 * Allocate space for an optional record of <size> bytes in the user
197 * signal frame. The offset from the signal frame base address to the
198 * allocated block is assigned to *offset.
199 */
sigframe_alloc(struct rt_sigframe_user_layout * user,unsigned long * offset,size_t size)200 static int sigframe_alloc(struct rt_sigframe_user_layout *user,
201 unsigned long *offset, size_t size)
202 {
203 return __sigframe_alloc(user, offset, size, true);
204 }
205
206 /* Allocate the null terminator record and prevent further allocations */
sigframe_alloc_end(struct rt_sigframe_user_layout * user)207 static int sigframe_alloc_end(struct rt_sigframe_user_layout *user)
208 {
209 int ret;
210
211 /* Un-reserve the space reserved for the terminator: */
212 user->limit += TERMINATOR_SIZE;
213
214 ret = sigframe_alloc(user, &user->end_offset,
215 sizeof(struct _aarch64_ctx));
216 if (ret)
217 return ret;
218
219 /* Prevent further allocation: */
220 user->limit = user->size;
221 return 0;
222 }
223
apply_user_offset(struct rt_sigframe_user_layout const * user,unsigned long offset)224 static void __user *apply_user_offset(
225 struct rt_sigframe_user_layout const *user, unsigned long offset)
226 {
227 char __user *base = (char __user *)user->sigframe;
228
229 return base + offset;
230 }
231
232 struct user_ctxs {
233 struct fpsimd_context __user *fpsimd;
234 u32 fpsimd_size;
235 struct sve_context __user *sve;
236 u32 sve_size;
237 struct tpidr2_context __user *tpidr2;
238 u32 tpidr2_size;
239 struct za_context __user *za;
240 u32 za_size;
241 struct zt_context __user *zt;
242 u32 zt_size;
243 struct fpmr_context __user *fpmr;
244 u32 fpmr_size;
245 struct poe_context __user *poe;
246 u32 poe_size;
247 struct gcs_context __user *gcs;
248 u32 gcs_size;
249 };
250
preserve_fpsimd_context(struct fpsimd_context __user * ctx)251 static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
252 {
253 struct user_fpsimd_state const *fpsimd =
254 ¤t->thread.uw.fpsimd_state;
255 int err;
256
257 fpsimd_sync_from_effective_state(current);
258
259 /* copy the FP and status/control registers */
260 err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
261 __put_user_error(fpsimd->fpsr, &ctx->fpsr, err);
262 __put_user_error(fpsimd->fpcr, &ctx->fpcr, err);
263
264 /* copy the magic/size information */
265 __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err);
266 __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err);
267
268 return err ? -EFAULT : 0;
269 }
270
read_fpsimd_context(struct user_fpsimd_state * fpsimd,struct user_ctxs * user)271 static int read_fpsimd_context(struct user_fpsimd_state *fpsimd,
272 struct user_ctxs *user)
273 {
274 int err;
275
276 /* check the size information */
277 if (user->fpsimd_size != sizeof(struct fpsimd_context))
278 return -EINVAL;
279
280 /* copy the FP and status/control registers */
281 err = __copy_from_user(fpsimd->vregs, &(user->fpsimd->vregs),
282 sizeof(fpsimd->vregs));
283 __get_user_error(fpsimd->fpsr, &(user->fpsimd->fpsr), err);
284 __get_user_error(fpsimd->fpcr, &(user->fpsimd->fpcr), err);
285
286 return err ? -EFAULT : 0;
287 }
288
restore_fpsimd_context(struct user_ctxs * user)289 static int restore_fpsimd_context(struct user_ctxs *user)
290 {
291 struct user_fpsimd_state fpsimd;
292 int err;
293
294 err = read_fpsimd_context(&fpsimd, user);
295 if (err)
296 return err;
297
298 clear_thread_flag(TIF_SVE);
299 current->thread.svcr &= ~SVCR_SM_MASK;
300 current->thread.fp_type = FP_STATE_FPSIMD;
301
302 /* load the hardware registers from the fpsimd_state structure */
303 fpsimd_update_current_state(&fpsimd);
304 return 0;
305 }
306
preserve_fpmr_context(struct fpmr_context __user * ctx)307 static int preserve_fpmr_context(struct fpmr_context __user *ctx)
308 {
309 int err = 0;
310
311 __put_user_error(FPMR_MAGIC, &ctx->head.magic, err);
312 __put_user_error(sizeof(*ctx), &ctx->head.size, err);
313 __put_user_error(current->thread.uw.fpmr, &ctx->fpmr, err);
314
315 return err;
316 }
317
restore_fpmr_context(struct user_ctxs * user)318 static int restore_fpmr_context(struct user_ctxs *user)
319 {
320 u64 fpmr;
321 int err = 0;
322
323 if (user->fpmr_size != sizeof(*user->fpmr))
324 return -EINVAL;
325
326 __get_user_error(fpmr, &user->fpmr->fpmr, err);
327 if (!err)
328 current->thread.uw.fpmr = fpmr;
329
330 return err;
331 }
332
preserve_poe_context(struct poe_context __user * ctx,const struct user_access_state * ua_state)333 static int preserve_poe_context(struct poe_context __user *ctx,
334 const struct user_access_state *ua_state)
335 {
336 int err = 0;
337
338 __put_user_error(POE_MAGIC, &ctx->head.magic, err);
339 __put_user_error(sizeof(*ctx), &ctx->head.size, err);
340 __put_user_error(ua_state->por_el0, &ctx->por_el0, err);
341
342 return err;
343 }
344
restore_poe_context(struct user_ctxs * user,struct user_access_state * ua_state)345 static int restore_poe_context(struct user_ctxs *user,
346 struct user_access_state *ua_state)
347 {
348 u64 por_el0;
349 int err = 0;
350
351 if (user->poe_size != sizeof(*user->poe))
352 return -EINVAL;
353
354 __get_user_error(por_el0, &(user->poe->por_el0), err);
355 if (!err)
356 ua_state->por_el0 = por_el0;
357
358 return err;
359 }
360
361 #ifdef CONFIG_ARM64_SVE
362
preserve_sve_context(struct sve_context __user * ctx)363 static int preserve_sve_context(struct sve_context __user *ctx)
364 {
365 int err = 0;
366 u16 reserved[ARRAY_SIZE(ctx->__reserved)];
367 u16 flags = 0;
368 unsigned int vl = task_get_sve_vl(current);
369 unsigned int vq = 0;
370
371 if (thread_sm_enabled(¤t->thread)) {
372 vl = task_get_sme_vl(current);
373 vq = sve_vq_from_vl(vl);
374 flags |= SVE_SIG_FLAG_SM;
375 } else if (current->thread.fp_type == FP_STATE_SVE) {
376 vq = sve_vq_from_vl(vl);
377 }
378
379 memset(reserved, 0, sizeof(reserved));
380
381 __put_user_error(SVE_MAGIC, &ctx->head.magic, err);
382 __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16),
383 &ctx->head.size, err);
384 __put_user_error(vl, &ctx->vl, err);
385 __put_user_error(flags, &ctx->flags, err);
386 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
387 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
388
389 if (vq) {
390 err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET,
391 current->thread.sve_state,
392 SVE_SIG_REGS_SIZE(vq));
393 }
394
395 return err ? -EFAULT : 0;
396 }
397
restore_sve_fpsimd_context(struct user_ctxs * user)398 static int restore_sve_fpsimd_context(struct user_ctxs *user)
399 {
400 int err = 0;
401 unsigned int vl, vq;
402 struct user_fpsimd_state fpsimd;
403 u16 user_vl, flags;
404 bool sm;
405
406 if (user->sve_size < sizeof(*user->sve))
407 return -EINVAL;
408
409 __get_user_error(user_vl, &(user->sve->vl), err);
410 __get_user_error(flags, &(user->sve->flags), err);
411 if (err)
412 return err;
413
414 sm = flags & SVE_SIG_FLAG_SM;
415 if (sm) {
416 if (!system_supports_sme())
417 return -EINVAL;
418
419 vl = task_get_sme_vl(current);
420 } else {
421 /*
422 * A SME only system use SVE for streaming mode so can
423 * have a SVE formatted context with a zero VL and no
424 * payload data.
425 */
426 if (!system_supports_sve() && !system_supports_sme())
427 return -EINVAL;
428
429 vl = task_get_sve_vl(current);
430 }
431
432 if (user_vl != vl)
433 return -EINVAL;
434
435 /*
436 * Non-streaming SVE state may be preserved without an SVE payload, in
437 * which case the SVE context only has a header with VL==0, and all
438 * state can be restored from the FPSIMD context.
439 *
440 * Streaming SVE state is always preserved with an SVE payload. For
441 * consistency and robustness, reject restoring streaming SVE state
442 * without an SVE payload.
443 */
444 if (!sm && user->sve_size == sizeof(*user->sve))
445 return restore_fpsimd_context(user);
446
447 vq = sve_vq_from_vl(vl);
448
449 if (user->sve_size < SVE_SIG_CONTEXT_SIZE(vq))
450 return -EINVAL;
451
452 if (sm) {
453 sme_alloc(current, false);
454 if (!current->thread.sme_state)
455 return -ENOMEM;
456 }
457
458 sve_alloc(current, true);
459 if (!current->thread.sve_state) {
460 clear_thread_flag(TIF_SVE);
461 return -ENOMEM;
462 }
463
464 if (sm) {
465 current->thread.svcr |= SVCR_SM_MASK;
466 set_thread_flag(TIF_SME);
467 } else {
468 current->thread.svcr &= ~SVCR_SM_MASK;
469 set_thread_flag(TIF_SVE);
470 }
471
472 current->thread.fp_type = FP_STATE_SVE;
473
474 err = __copy_from_user(current->thread.sve_state,
475 (char __user const *)user->sve +
476 SVE_SIG_REGS_OFFSET,
477 SVE_SIG_REGS_SIZE(vq));
478 if (err)
479 return -EFAULT;
480
481 err = read_fpsimd_context(&fpsimd, user);
482 if (err)
483 return err;
484
485 /* Merge the FPSIMD registers into the SVE state */
486 fpsimd_update_current_state(&fpsimd);
487
488 return 0;
489 }
490
491 #else /* ! CONFIG_ARM64_SVE */
492
restore_sve_fpsimd_context(struct user_ctxs * user)493 static int restore_sve_fpsimd_context(struct user_ctxs *user)
494 {
495 WARN_ON_ONCE(1);
496 return -EINVAL;
497 }
498
499 /* Turn any non-optimised out attempts to use this into a link error: */
500 extern int preserve_sve_context(void __user *ctx);
501
502 #endif /* ! CONFIG_ARM64_SVE */
503
504 #ifdef CONFIG_ARM64_SME
505
preserve_tpidr2_context(struct tpidr2_context __user * ctx)506 static int preserve_tpidr2_context(struct tpidr2_context __user *ctx)
507 {
508 u64 tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
509 int err = 0;
510
511 __put_user_error(TPIDR2_MAGIC, &ctx->head.magic, err);
512 __put_user_error(sizeof(*ctx), &ctx->head.size, err);
513 __put_user_error(tpidr2_el0, &ctx->tpidr2, err);
514
515 return err;
516 }
517
restore_tpidr2_context(struct user_ctxs * user)518 static int restore_tpidr2_context(struct user_ctxs *user)
519 {
520 u64 tpidr2_el0;
521 int err = 0;
522
523 if (user->tpidr2_size != sizeof(*user->tpidr2))
524 return -EINVAL;
525
526 __get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err);
527 if (!err)
528 write_sysreg_s(tpidr2_el0, SYS_TPIDR2_EL0);
529
530 return err;
531 }
532
preserve_za_context(struct za_context __user * ctx)533 static int preserve_za_context(struct za_context __user *ctx)
534 {
535 int err = 0;
536 u16 reserved[ARRAY_SIZE(ctx->__reserved)];
537 unsigned int vl = task_get_sme_vl(current);
538 unsigned int vq;
539
540 if (thread_za_enabled(¤t->thread))
541 vq = sve_vq_from_vl(vl);
542 else
543 vq = 0;
544
545 memset(reserved, 0, sizeof(reserved));
546
547 __put_user_error(ZA_MAGIC, &ctx->head.magic, err);
548 __put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16),
549 &ctx->head.size, err);
550 __put_user_error(vl, &ctx->vl, err);
551 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
552 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
553
554 if (vq) {
555 err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET,
556 current->thread.sme_state,
557 ZA_SIG_REGS_SIZE(vq));
558 }
559
560 return err ? -EFAULT : 0;
561 }
562
restore_za_context(struct user_ctxs * user)563 static int restore_za_context(struct user_ctxs *user)
564 {
565 int err = 0;
566 unsigned int vq;
567 u16 user_vl;
568
569 if (user->za_size < sizeof(*user->za))
570 return -EINVAL;
571
572 __get_user_error(user_vl, &(user->za->vl), err);
573 if (err)
574 return err;
575
576 if (user_vl != task_get_sme_vl(current))
577 return -EINVAL;
578
579 if (user->za_size == sizeof(*user->za)) {
580 current->thread.svcr &= ~SVCR_ZA_MASK;
581 return 0;
582 }
583
584 vq = sve_vq_from_vl(user_vl);
585
586 if (user->za_size < ZA_SIG_CONTEXT_SIZE(vq))
587 return -EINVAL;
588
589 sve_alloc(current, false);
590 if (!current->thread.sve_state)
591 return -ENOMEM;
592
593 sme_alloc(current, true);
594 if (!current->thread.sme_state) {
595 current->thread.svcr &= ~SVCR_ZA_MASK;
596 clear_thread_flag(TIF_SME);
597 return -ENOMEM;
598 }
599
600 err = __copy_from_user(current->thread.sme_state,
601 (char __user const *)user->za +
602 ZA_SIG_REGS_OFFSET,
603 ZA_SIG_REGS_SIZE(vq));
604 if (err)
605 return -EFAULT;
606
607 set_thread_flag(TIF_SME);
608 current->thread.svcr |= SVCR_ZA_MASK;
609
610 return 0;
611 }
612
preserve_zt_context(struct zt_context __user * ctx)613 static int preserve_zt_context(struct zt_context __user *ctx)
614 {
615 int err = 0;
616 u16 reserved[ARRAY_SIZE(ctx->__reserved)];
617
618 if (WARN_ON(!thread_za_enabled(¤t->thread)))
619 return -EINVAL;
620
621 memset(reserved, 0, sizeof(reserved));
622
623 __put_user_error(ZT_MAGIC, &ctx->head.magic, err);
624 __put_user_error(round_up(ZT_SIG_CONTEXT_SIZE(1), 16),
625 &ctx->head.size, err);
626 __put_user_error(1, &ctx->nregs, err);
627 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
628 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
629
630 err |= __copy_to_user((char __user *)ctx + ZT_SIG_REGS_OFFSET,
631 thread_zt_state(¤t->thread),
632 ZT_SIG_REGS_SIZE(1));
633
634 return err ? -EFAULT : 0;
635 }
636
restore_zt_context(struct user_ctxs * user)637 static int restore_zt_context(struct user_ctxs *user)
638 {
639 int err;
640 u16 nregs;
641
642 /* ZA must be restored first for this check to be valid */
643 if (!thread_za_enabled(¤t->thread))
644 return -EINVAL;
645
646 if (user->zt_size != ZT_SIG_CONTEXT_SIZE(1))
647 return -EINVAL;
648
649 if (__copy_from_user(&nregs, &(user->zt->nregs), sizeof(nregs)))
650 return -EFAULT;
651
652 if (nregs != 1)
653 return -EINVAL;
654
655 err = __copy_from_user(thread_zt_state(¤t->thread),
656 (char __user const *)user->zt +
657 ZT_SIG_REGS_OFFSET,
658 ZT_SIG_REGS_SIZE(1));
659 if (err)
660 return -EFAULT;
661
662 return 0;
663 }
664
665 #else /* ! CONFIG_ARM64_SME */
666
667 /* Turn any non-optimised out attempts to use these into a link error: */
668 extern int preserve_tpidr2_context(void __user *ctx);
669 extern int restore_tpidr2_context(struct user_ctxs *user);
670 extern int preserve_za_context(void __user *ctx);
671 extern int restore_za_context(struct user_ctxs *user);
672 extern int preserve_zt_context(void __user *ctx);
673 extern int restore_zt_context(struct user_ctxs *user);
674
675 #endif /* ! CONFIG_ARM64_SME */
676
677 #ifdef CONFIG_ARM64_GCS
678
preserve_gcs_context(struct gcs_context __user * ctx)679 static int preserve_gcs_context(struct gcs_context __user *ctx)
680 {
681 int err = 0;
682 u64 gcspr = read_sysreg_s(SYS_GCSPR_EL0);
683
684 /*
685 * If GCS is enabled we will add a cap token to the frame,
686 * include it in the GCSPR_EL0 we report to support stack
687 * switching via sigreturn if GCS is enabled. We do not allow
688 * enabling via sigreturn so the token is only relevant for
689 * threads with GCS enabled.
690 */
691 if (task_gcs_el0_enabled(current))
692 gcspr -= 8;
693
694 __put_user_error(GCS_MAGIC, &ctx->head.magic, err);
695 __put_user_error(sizeof(*ctx), &ctx->head.size, err);
696 __put_user_error(gcspr, &ctx->gcspr, err);
697 __put_user_error(0, &ctx->reserved, err);
698 __put_user_error(current->thread.gcs_el0_mode,
699 &ctx->features_enabled, err);
700
701 return err;
702 }
703
restore_gcs_context(struct user_ctxs * user)704 static int restore_gcs_context(struct user_ctxs *user)
705 {
706 u64 gcspr, enabled;
707 int err = 0;
708
709 if (user->gcs_size != sizeof(*user->gcs))
710 return -EINVAL;
711
712 __get_user_error(gcspr, &user->gcs->gcspr, err);
713 __get_user_error(enabled, &user->gcs->features_enabled, err);
714 if (err)
715 return err;
716
717 /* Don't allow unknown modes */
718 if (enabled & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK)
719 return -EINVAL;
720
721 err = gcs_check_locked(current, enabled);
722 if (err != 0)
723 return err;
724
725 /* Don't allow enabling */
726 if (!task_gcs_el0_enabled(current) &&
727 (enabled & PR_SHADOW_STACK_ENABLE))
728 return -EINVAL;
729
730 /* If we are disabling disable everything */
731 if (!(enabled & PR_SHADOW_STACK_ENABLE))
732 enabled = 0;
733
734 current->thread.gcs_el0_mode = enabled;
735
736 /*
737 * We let userspace set GCSPR_EL0 to anything here, we will
738 * validate later in gcs_restore_signal().
739 */
740 write_sysreg_s(gcspr, SYS_GCSPR_EL0);
741
742 return 0;
743 }
744
745 #else /* ! CONFIG_ARM64_GCS */
746
747 /* Turn any non-optimised out attempts to use these into a link error: */
748 extern int preserve_gcs_context(void __user *ctx);
749 extern int restore_gcs_context(struct user_ctxs *user);
750
751 #endif /* ! CONFIG_ARM64_GCS */
752
parse_user_sigframe(struct user_ctxs * user,struct rt_sigframe __user * sf)753 static int parse_user_sigframe(struct user_ctxs *user,
754 struct rt_sigframe __user *sf)
755 {
756 struct sigcontext __user *const sc = &sf->uc.uc_mcontext;
757 struct _aarch64_ctx __user *head;
758 char __user *base = (char __user *)&sc->__reserved;
759 size_t offset = 0;
760 size_t limit = sizeof(sc->__reserved);
761 bool have_extra_context = false;
762 char const __user *const sfp = (char const __user *)sf;
763
764 user->fpsimd = NULL;
765 user->sve = NULL;
766 user->tpidr2 = NULL;
767 user->za = NULL;
768 user->zt = NULL;
769 user->fpmr = NULL;
770 user->poe = NULL;
771 user->gcs = NULL;
772
773 if (!IS_ALIGNED((unsigned long)base, 16))
774 goto invalid;
775
776 while (1) {
777 int err = 0;
778 u32 magic, size;
779 char const __user *userp;
780 struct extra_context const __user *extra;
781 u64 extra_datap;
782 u32 extra_size;
783 struct _aarch64_ctx const __user *end;
784 u32 end_magic, end_size;
785
786 if (limit - offset < sizeof(*head))
787 goto invalid;
788
789 if (!IS_ALIGNED(offset, 16))
790 goto invalid;
791
792 head = (struct _aarch64_ctx __user *)(base + offset);
793 __get_user_error(magic, &head->magic, err);
794 __get_user_error(size, &head->size, err);
795 if (err)
796 return err;
797
798 if (limit - offset < size)
799 goto invalid;
800
801 switch (magic) {
802 case 0:
803 if (size)
804 goto invalid;
805
806 goto done;
807
808 case FPSIMD_MAGIC:
809 if (!system_supports_fpsimd())
810 goto invalid;
811 if (user->fpsimd)
812 goto invalid;
813
814 user->fpsimd = (struct fpsimd_context __user *)head;
815 user->fpsimd_size = size;
816 break;
817
818 case ESR_MAGIC:
819 /* ignore */
820 break;
821
822 case POE_MAGIC:
823 if (!system_supports_poe())
824 goto invalid;
825
826 if (user->poe)
827 goto invalid;
828
829 user->poe = (struct poe_context __user *)head;
830 user->poe_size = size;
831 break;
832
833 case SVE_MAGIC:
834 if (!system_supports_sve() && !system_supports_sme())
835 goto invalid;
836
837 if (user->sve)
838 goto invalid;
839
840 user->sve = (struct sve_context __user *)head;
841 user->sve_size = size;
842 break;
843
844 case TPIDR2_MAGIC:
845 if (!system_supports_tpidr2())
846 goto invalid;
847
848 if (user->tpidr2)
849 goto invalid;
850
851 user->tpidr2 = (struct tpidr2_context __user *)head;
852 user->tpidr2_size = size;
853 break;
854
855 case ZA_MAGIC:
856 if (!system_supports_sme())
857 goto invalid;
858
859 if (user->za)
860 goto invalid;
861
862 user->za = (struct za_context __user *)head;
863 user->za_size = size;
864 break;
865
866 case ZT_MAGIC:
867 if (!system_supports_sme2())
868 goto invalid;
869
870 if (user->zt)
871 goto invalid;
872
873 user->zt = (struct zt_context __user *)head;
874 user->zt_size = size;
875 break;
876
877 case FPMR_MAGIC:
878 if (!system_supports_fpmr())
879 goto invalid;
880
881 if (user->fpmr)
882 goto invalid;
883
884 user->fpmr = (struct fpmr_context __user *)head;
885 user->fpmr_size = size;
886 break;
887
888 case GCS_MAGIC:
889 if (!system_supports_gcs())
890 goto invalid;
891
892 if (user->gcs)
893 goto invalid;
894
895 user->gcs = (struct gcs_context __user *)head;
896 user->gcs_size = size;
897 break;
898
899 case EXTRA_MAGIC:
900 if (have_extra_context)
901 goto invalid;
902
903 if (size < sizeof(*extra))
904 goto invalid;
905
906 userp = (char const __user *)head;
907
908 extra = (struct extra_context const __user *)userp;
909 userp += size;
910
911 __get_user_error(extra_datap, &extra->datap, err);
912 __get_user_error(extra_size, &extra->size, err);
913 if (err)
914 return err;
915
916 /* Check for the dummy terminator in __reserved[]: */
917
918 if (limit - offset - size < TERMINATOR_SIZE)
919 goto invalid;
920
921 end = (struct _aarch64_ctx const __user *)userp;
922 userp += TERMINATOR_SIZE;
923
924 __get_user_error(end_magic, &end->magic, err);
925 __get_user_error(end_size, &end->size, err);
926 if (err)
927 return err;
928
929 if (end_magic || end_size)
930 goto invalid;
931
932 /* Prevent looping/repeated parsing of extra_context */
933 have_extra_context = true;
934
935 base = (__force void __user *)extra_datap;
936 if (!IS_ALIGNED((unsigned long)base, 16))
937 goto invalid;
938
939 if (!IS_ALIGNED(extra_size, 16))
940 goto invalid;
941
942 if (base != userp)
943 goto invalid;
944
945 /* Reject "unreasonably large" frames: */
946 if (extra_size > sfp + SIGFRAME_MAXSZ - userp)
947 goto invalid;
948
949 /*
950 * Ignore trailing terminator in __reserved[]
951 * and start parsing extra data:
952 */
953 offset = 0;
954 limit = extra_size;
955
956 if (!access_ok(base, limit))
957 goto invalid;
958
959 continue;
960
961 default:
962 goto invalid;
963 }
964
965 if (size < sizeof(*head))
966 goto invalid;
967
968 if (limit - offset < size)
969 goto invalid;
970
971 offset += size;
972 }
973
974 done:
975 return 0;
976
977 invalid:
978 return -EINVAL;
979 }
980
restore_sigframe(struct pt_regs * regs,struct rt_sigframe __user * sf,struct user_access_state * ua_state)981 static int restore_sigframe(struct pt_regs *regs,
982 struct rt_sigframe __user *sf,
983 struct user_access_state *ua_state)
984 {
985 sigset_t set;
986 int i, err;
987 struct user_ctxs user;
988
989 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
990 if (err == 0)
991 set_current_blocked(&set);
992
993 for (i = 0; i < 31; i++)
994 __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
995 err);
996 __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
997 __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
998 __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
999
1000 /*
1001 * Avoid sys_rt_sigreturn() restarting.
1002 */
1003 forget_syscall(regs);
1004
1005 fpsimd_save_and_flush_current_state();
1006
1007 err |= !valid_user_regs(®s->user_regs, current);
1008 if (err == 0)
1009 err = parse_user_sigframe(&user, sf);
1010
1011 if (err == 0 && system_supports_fpsimd()) {
1012 if (!user.fpsimd)
1013 return -EINVAL;
1014
1015 if (user.sve)
1016 err = restore_sve_fpsimd_context(&user);
1017 else
1018 err = restore_fpsimd_context(&user);
1019 }
1020
1021 if (err == 0 && system_supports_gcs() && user.gcs)
1022 err = restore_gcs_context(&user);
1023
1024 if (err == 0 && system_supports_tpidr2() && user.tpidr2)
1025 err = restore_tpidr2_context(&user);
1026
1027 if (err == 0 && system_supports_fpmr() && user.fpmr)
1028 err = restore_fpmr_context(&user);
1029
1030 if (err == 0 && system_supports_sme() && user.za)
1031 err = restore_za_context(&user);
1032
1033 if (err == 0 && system_supports_sme2() && user.zt)
1034 err = restore_zt_context(&user);
1035
1036 if (err == 0 && system_supports_poe() && user.poe)
1037 err = restore_poe_context(&user, ua_state);
1038
1039 return err;
1040 }
1041
1042 #ifdef CONFIG_ARM64_GCS
gcs_restore_signal(void)1043 static int gcs_restore_signal(void)
1044 {
1045 u64 gcspr_el0, cap;
1046 int ret;
1047
1048 if (!system_supports_gcs())
1049 return 0;
1050
1051 if (!(current->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE))
1052 return 0;
1053
1054 gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0);
1055
1056 /*
1057 * Ensure that any changes to the GCS done via GCS operations
1058 * are visible to the normal reads we do to validate the
1059 * token.
1060 */
1061 gcsb_dsync();
1062
1063 /*
1064 * GCSPR_EL0 should be pointing at a capped GCS, read the cap.
1065 * We don't enforce that this is in a GCS page, if it is not
1066 * then faults will be generated on GCS operations - the main
1067 * concern is to protect GCS pages.
1068 */
1069 ret = copy_from_user(&cap, (unsigned long __user *)gcspr_el0,
1070 sizeof(cap));
1071 if (ret)
1072 return -EFAULT;
1073
1074 /*
1075 * Check that the cap is the actual GCS before replacing it.
1076 */
1077 if (cap != GCS_SIGNAL_CAP(gcspr_el0))
1078 return -EINVAL;
1079
1080 /* Invalidate the token to prevent reuse */
1081 put_user_gcs(0, (unsigned long __user *)gcspr_el0, &ret);
1082 if (ret != 0)
1083 return -EFAULT;
1084
1085 write_sysreg_s(gcspr_el0 + 8, SYS_GCSPR_EL0);
1086
1087 return 0;
1088 }
1089
1090 #else
gcs_restore_signal(void)1091 static int gcs_restore_signal(void) { return 0; }
1092 #endif
1093
SYSCALL_DEFINE0(rt_sigreturn)1094 SYSCALL_DEFINE0(rt_sigreturn)
1095 {
1096 struct pt_regs *regs = current_pt_regs();
1097 struct rt_sigframe __user *frame;
1098 struct user_access_state ua_state;
1099
1100 /* Always make any pending restarted system calls return -EINTR */
1101 current->restart_block.fn = do_no_restart_syscall;
1102
1103 /*
1104 * Since we stacked the signal on a 128-bit boundary, then 'sp' should
1105 * be word aligned here.
1106 */
1107 if (regs->sp & 15)
1108 goto badframe;
1109
1110 frame = (struct rt_sigframe __user *)regs->sp;
1111
1112 if (!access_ok(frame, sizeof (*frame)))
1113 goto badframe;
1114
1115 if (restore_sigframe(regs, frame, &ua_state))
1116 goto badframe;
1117
1118 if (gcs_restore_signal())
1119 goto badframe;
1120
1121 if (restore_altstack(&frame->uc.uc_stack))
1122 goto badframe;
1123
1124 restore_user_access_state(&ua_state);
1125
1126 return regs->regs[0];
1127
1128 badframe:
1129 arm64_notify_segfault(regs->sp);
1130 return 0;
1131 }
1132
1133 /*
1134 * Determine the layout of optional records in the signal frame
1135 *
1136 * add_all: if true, lays out the biggest possible signal frame for
1137 * this task; otherwise, generates a layout for the current state
1138 * of the task.
1139 */
setup_sigframe_layout(struct rt_sigframe_user_layout * user,bool add_all)1140 static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
1141 bool add_all)
1142 {
1143 int err;
1144
1145 if (system_supports_fpsimd()) {
1146 err = sigframe_alloc(user, &user->fpsimd_offset,
1147 sizeof(struct fpsimd_context));
1148 if (err)
1149 return err;
1150 }
1151
1152 /* fault information, if valid */
1153 if (add_all || current->thread.fault_code) {
1154 err = sigframe_alloc(user, &user->esr_offset,
1155 sizeof(struct esr_context));
1156 if (err)
1157 return err;
1158 }
1159
1160 #ifdef CONFIG_ARM64_GCS
1161 if (system_supports_gcs() && (add_all || current->thread.gcspr_el0)) {
1162 err = sigframe_alloc(user, &user->gcs_offset,
1163 sizeof(struct gcs_context));
1164 if (err)
1165 return err;
1166 }
1167 #endif
1168
1169 if (system_supports_sve() || system_supports_sme()) {
1170 unsigned int vq = 0;
1171
1172 if (add_all || current->thread.fp_type == FP_STATE_SVE ||
1173 thread_sm_enabled(¤t->thread)) {
1174 int vl = max(sve_max_vl(), sme_max_vl());
1175
1176 if (!add_all)
1177 vl = thread_get_cur_vl(¤t->thread);
1178
1179 vq = sve_vq_from_vl(vl);
1180 }
1181
1182 err = sigframe_alloc(user, &user->sve_offset,
1183 SVE_SIG_CONTEXT_SIZE(vq));
1184 if (err)
1185 return err;
1186 }
1187
1188 if (system_supports_tpidr2()) {
1189 err = sigframe_alloc(user, &user->tpidr2_offset,
1190 sizeof(struct tpidr2_context));
1191 if (err)
1192 return err;
1193 }
1194
1195 if (system_supports_sme()) {
1196 unsigned int vl;
1197 unsigned int vq = 0;
1198
1199 if (add_all)
1200 vl = sme_max_vl();
1201 else
1202 vl = task_get_sme_vl(current);
1203
1204 if (thread_za_enabled(¤t->thread))
1205 vq = sve_vq_from_vl(vl);
1206
1207 err = sigframe_alloc(user, &user->za_offset,
1208 ZA_SIG_CONTEXT_SIZE(vq));
1209 if (err)
1210 return err;
1211 }
1212
1213 if (system_supports_sme2()) {
1214 if (add_all || thread_za_enabled(¤t->thread)) {
1215 err = sigframe_alloc(user, &user->zt_offset,
1216 ZT_SIG_CONTEXT_SIZE(1));
1217 if (err)
1218 return err;
1219 }
1220 }
1221
1222 if (system_supports_fpmr()) {
1223 err = sigframe_alloc(user, &user->fpmr_offset,
1224 sizeof(struct fpmr_context));
1225 if (err)
1226 return err;
1227 }
1228
1229 if (system_supports_poe()) {
1230 err = sigframe_alloc(user, &user->poe_offset,
1231 sizeof(struct poe_context));
1232 if (err)
1233 return err;
1234 }
1235
1236 return sigframe_alloc_end(user);
1237 }
1238
setup_sigframe(struct rt_sigframe_user_layout * user,struct pt_regs * regs,sigset_t * set,const struct user_access_state * ua_state)1239 static int setup_sigframe(struct rt_sigframe_user_layout *user,
1240 struct pt_regs *regs, sigset_t *set,
1241 const struct user_access_state *ua_state)
1242 {
1243 int i, err = 0;
1244 struct rt_sigframe __user *sf = user->sigframe;
1245
1246 /* set up the stack frame for unwinding */
1247 __put_user_error(regs->regs[29], &user->next_frame->fp, err);
1248 __put_user_error(regs->regs[30], &user->next_frame->lr, err);
1249
1250 for (i = 0; i < 31; i++)
1251 __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
1252 err);
1253 __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
1254 __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
1255 __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
1256
1257 __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
1258
1259 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
1260
1261 if (err == 0 && system_supports_fpsimd()) {
1262 struct fpsimd_context __user *fpsimd_ctx =
1263 apply_user_offset(user, user->fpsimd_offset);
1264 err |= preserve_fpsimd_context(fpsimd_ctx);
1265 }
1266
1267 /* fault information, if valid */
1268 if (err == 0 && user->esr_offset) {
1269 struct esr_context __user *esr_ctx =
1270 apply_user_offset(user, user->esr_offset);
1271
1272 __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err);
1273 __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err);
1274 __put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
1275 }
1276
1277 if (system_supports_gcs() && err == 0 && user->gcs_offset) {
1278 struct gcs_context __user *gcs_ctx =
1279 apply_user_offset(user, user->gcs_offset);
1280 err |= preserve_gcs_context(gcs_ctx);
1281 }
1282
1283 /* Scalable Vector Extension state (including streaming), if present */
1284 if ((system_supports_sve() || system_supports_sme()) &&
1285 err == 0 && user->sve_offset) {
1286 struct sve_context __user *sve_ctx =
1287 apply_user_offset(user, user->sve_offset);
1288 err |= preserve_sve_context(sve_ctx);
1289 }
1290
1291 /* TPIDR2 if supported */
1292 if (system_supports_tpidr2() && err == 0) {
1293 struct tpidr2_context __user *tpidr2_ctx =
1294 apply_user_offset(user, user->tpidr2_offset);
1295 err |= preserve_tpidr2_context(tpidr2_ctx);
1296 }
1297
1298 /* FPMR if supported */
1299 if (system_supports_fpmr() && err == 0) {
1300 struct fpmr_context __user *fpmr_ctx =
1301 apply_user_offset(user, user->fpmr_offset);
1302 err |= preserve_fpmr_context(fpmr_ctx);
1303 }
1304
1305 if (system_supports_poe() && err == 0) {
1306 struct poe_context __user *poe_ctx =
1307 apply_user_offset(user, user->poe_offset);
1308
1309 err |= preserve_poe_context(poe_ctx, ua_state);
1310 }
1311
1312 /* ZA state if present */
1313 if (system_supports_sme() && err == 0 && user->za_offset) {
1314 struct za_context __user *za_ctx =
1315 apply_user_offset(user, user->za_offset);
1316 err |= preserve_za_context(za_ctx);
1317 }
1318
1319 /* ZT state if present */
1320 if (system_supports_sme2() && err == 0 && user->zt_offset) {
1321 struct zt_context __user *zt_ctx =
1322 apply_user_offset(user, user->zt_offset);
1323 err |= preserve_zt_context(zt_ctx);
1324 }
1325
1326 if (err == 0 && user->extra_offset) {
1327 char __user *sfp = (char __user *)user->sigframe;
1328 char __user *userp =
1329 apply_user_offset(user, user->extra_offset);
1330
1331 struct extra_context __user *extra;
1332 struct _aarch64_ctx __user *end;
1333 u64 extra_datap;
1334 u32 extra_size;
1335
1336 extra = (struct extra_context __user *)userp;
1337 userp += EXTRA_CONTEXT_SIZE;
1338
1339 end = (struct _aarch64_ctx __user *)userp;
1340 userp += TERMINATOR_SIZE;
1341
1342 /*
1343 * extra_datap is just written to the signal frame.
1344 * The value gets cast back to a void __user *
1345 * during sigreturn.
1346 */
1347 extra_datap = (__force u64)userp;
1348 extra_size = sfp + round_up(user->size, 16) - userp;
1349
1350 __put_user_error(EXTRA_MAGIC, &extra->head.magic, err);
1351 __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err);
1352 __put_user_error(extra_datap, &extra->datap, err);
1353 __put_user_error(extra_size, &extra->size, err);
1354
1355 /* Add the terminator */
1356 __put_user_error(0, &end->magic, err);
1357 __put_user_error(0, &end->size, err);
1358 }
1359
1360 /* set the "end" magic */
1361 if (err == 0) {
1362 struct _aarch64_ctx __user *end =
1363 apply_user_offset(user, user->end_offset);
1364
1365 __put_user_error(0, &end->magic, err);
1366 __put_user_error(0, &end->size, err);
1367 }
1368
1369 return err;
1370 }
1371
get_sigframe(struct rt_sigframe_user_layout * user,struct ksignal * ksig,struct pt_regs * regs)1372 static int get_sigframe(struct rt_sigframe_user_layout *user,
1373 struct ksignal *ksig, struct pt_regs *regs)
1374 {
1375 unsigned long sp, sp_top;
1376 int err;
1377
1378 init_user_layout(user);
1379 err = setup_sigframe_layout(user, false);
1380 if (err)
1381 return err;
1382
1383 sp = sp_top = sigsp(regs->sp, ksig);
1384
1385 sp = round_down(sp - sizeof(struct frame_record), 16);
1386 user->next_frame = (struct frame_record __user *)sp;
1387
1388 sp = round_down(sp, 16) - sigframe_size(user);
1389 user->sigframe = (struct rt_sigframe __user *)sp;
1390
1391 /*
1392 * Check that we can actually write to the signal frame.
1393 */
1394 if (!access_ok(user->sigframe, sp_top - sp))
1395 return -EFAULT;
1396
1397 return 0;
1398 }
1399
1400 #ifdef CONFIG_ARM64_GCS
1401
gcs_signal_entry(__sigrestore_t sigtramp,struct ksignal * ksig)1402 static int gcs_signal_entry(__sigrestore_t sigtramp, struct ksignal *ksig)
1403 {
1404 u64 gcspr_el0;
1405 int ret = 0;
1406
1407 if (!system_supports_gcs())
1408 return 0;
1409
1410 if (!task_gcs_el0_enabled(current))
1411 return 0;
1412
1413 /*
1414 * We are entering a signal handler, current register state is
1415 * active.
1416 */
1417 gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0);
1418
1419 /*
1420 * Push a cap and the GCS entry for the trampoline onto the GCS.
1421 */
1422 put_user_gcs((unsigned long)sigtramp,
1423 (unsigned long __user *)(gcspr_el0 - 16), &ret);
1424 put_user_gcs(GCS_SIGNAL_CAP(gcspr_el0 - 8),
1425 (unsigned long __user *)(gcspr_el0 - 8), &ret);
1426 if (ret != 0)
1427 return ret;
1428
1429 gcspr_el0 -= 16;
1430 write_sysreg_s(gcspr_el0, SYS_GCSPR_EL0);
1431
1432 return 0;
1433 }
1434 #else
1435
gcs_signal_entry(__sigrestore_t sigtramp,struct ksignal * ksig)1436 static int gcs_signal_entry(__sigrestore_t sigtramp, struct ksignal *ksig)
1437 {
1438 return 0;
1439 }
1440
1441 #endif
1442
setup_return(struct pt_regs * regs,struct ksignal * ksig,struct rt_sigframe_user_layout * user,int usig)1443 static int setup_return(struct pt_regs *regs, struct ksignal *ksig,
1444 struct rt_sigframe_user_layout *user, int usig)
1445 {
1446 __sigrestore_t sigtramp;
1447 int err;
1448
1449 if (ksig->ka.sa.sa_flags & SA_RESTORER)
1450 sigtramp = ksig->ka.sa.sa_restorer;
1451 else
1452 sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
1453
1454 err = gcs_signal_entry(sigtramp, ksig);
1455 if (err)
1456 return err;
1457
1458 /*
1459 * We must not fail from this point onwards. We are going to update
1460 * registers, including SP, in order to invoke the signal handler. If
1461 * we failed and attempted to deliver a nested SIGSEGV to a handler
1462 * after that point, the subsequent sigreturn would end up restoring
1463 * the (partial) state for the original signal handler.
1464 */
1465
1466 regs->regs[0] = usig;
1467 if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
1468 regs->regs[1] = (unsigned long)&user->sigframe->info;
1469 regs->regs[2] = (unsigned long)&user->sigframe->uc;
1470 }
1471 regs->sp = (unsigned long)user->sigframe;
1472 regs->regs[29] = (unsigned long)&user->next_frame->fp;
1473 regs->regs[30] = (unsigned long)sigtramp;
1474 regs->pc = (unsigned long)ksig->ka.sa.sa_handler;
1475
1476 /*
1477 * Signal delivery is a (wacky) indirect function call in
1478 * userspace, so simulate the same setting of BTYPE as a BLR
1479 * <register containing the signal handler entry point>.
1480 * Signal delivery to a location in a PROT_BTI guarded page
1481 * that is not a function entry point will now trigger a
1482 * SIGILL in userspace.
1483 *
1484 * If the signal handler entry point is not in a PROT_BTI
1485 * guarded page, this is harmless.
1486 */
1487 if (system_supports_bti()) {
1488 regs->pstate &= ~PSR_BTYPE_MASK;
1489 regs->pstate |= PSR_BTYPE_C;
1490 }
1491
1492 /* TCO (Tag Check Override) always cleared for signal handlers */
1493 regs->pstate &= ~PSR_TCO_BIT;
1494
1495 /* Signal handlers are invoked with ZA and streaming mode disabled */
1496 if (system_supports_sme()) {
1497 task_smstop_sm(current);
1498 current->thread.svcr &= ~SVCR_ZA_MASK;
1499 write_sysreg_s(0, SYS_TPIDR2_EL0);
1500 }
1501
1502 return 0;
1503 }
1504
setup_rt_frame(int usig,struct ksignal * ksig,sigset_t * set,struct pt_regs * regs)1505 static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
1506 struct pt_regs *regs)
1507 {
1508 struct rt_sigframe_user_layout user;
1509 struct rt_sigframe __user *frame;
1510 struct user_access_state ua_state;
1511 int err = 0;
1512
1513 fpsimd_save_and_flush_current_state();
1514
1515 if (get_sigframe(&user, ksig, regs))
1516 return 1;
1517
1518 save_reset_user_access_state(&ua_state);
1519 frame = user.sigframe;
1520
1521 __put_user_error(0, &frame->uc.uc_flags, err);
1522 __put_user_error(NULL, &frame->uc.uc_link, err);
1523
1524 err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
1525 err |= setup_sigframe(&user, regs, set, &ua_state);
1526 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
1527 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
1528
1529 if (err == 0)
1530 err = setup_return(regs, ksig, &user, usig);
1531
1532 /*
1533 * We must not fail if setup_return() succeeded - see comment at the
1534 * beginning of setup_return().
1535 */
1536
1537 if (err == 0)
1538 set_handler_user_access_state();
1539 else
1540 restore_user_access_state(&ua_state);
1541
1542 return err;
1543 }
1544
setup_restart_syscall(struct pt_regs * regs)1545 static void setup_restart_syscall(struct pt_regs *regs)
1546 {
1547 if (is_compat_task())
1548 compat_setup_restart_syscall(regs);
1549 else
1550 regs->regs[8] = __NR_restart_syscall;
1551 }
1552
1553 /*
1554 * OK, we're invoking a handler
1555 */
handle_signal(struct ksignal * ksig,struct pt_regs * regs)1556 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
1557 {
1558 sigset_t *oldset = sigmask_to_save();
1559 int usig = ksig->sig;
1560 int ret;
1561
1562 rseq_signal_deliver(ksig, regs);
1563
1564 /*
1565 * Set up the stack frame
1566 */
1567 if (is_compat_task()) {
1568 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
1569 ret = compat_setup_rt_frame(usig, ksig, oldset, regs);
1570 else
1571 ret = compat_setup_frame(usig, ksig, oldset, regs);
1572 } else {
1573 ret = setup_rt_frame(usig, ksig, oldset, regs);
1574 }
1575
1576 /*
1577 * Check that the resulting registers are actually sane.
1578 */
1579 ret |= !valid_user_regs(®s->user_regs, current);
1580
1581 /* Step into the signal handler if we are stepping */
1582 signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
1583 }
1584
1585 /*
1586 * Note that 'init' is a special process: it doesn't get signals it doesn't
1587 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1588 * mistake.
1589 *
1590 * Note that we go through the signals twice: once to check the signals that
1591 * the kernel can handle, and then we build all the user-level signal handling
1592 * stack-frames in one go after that.
1593 */
arch_do_signal_or_restart(struct pt_regs * regs)1594 void arch_do_signal_or_restart(struct pt_regs *regs)
1595 {
1596 unsigned long continue_addr = 0, restart_addr = 0;
1597 int retval = 0;
1598 struct ksignal ksig;
1599 bool syscall = in_syscall(regs);
1600
1601 /*
1602 * If we were from a system call, check for system call restarting...
1603 */
1604 if (syscall) {
1605 continue_addr = regs->pc;
1606 restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
1607 retval = regs->regs[0];
1608
1609 /*
1610 * Avoid additional syscall restarting via ret_to_user.
1611 */
1612 forget_syscall(regs);
1613
1614 /*
1615 * Prepare for system call restart. We do this here so that a
1616 * debugger will see the already changed PC.
1617 */
1618 switch (retval) {
1619 case -ERESTARTNOHAND:
1620 case -ERESTARTSYS:
1621 case -ERESTARTNOINTR:
1622 case -ERESTART_RESTARTBLOCK:
1623 regs->regs[0] = regs->orig_x0;
1624 regs->pc = restart_addr;
1625 break;
1626 }
1627 }
1628
1629 /*
1630 * Get the signal to deliver. When running under ptrace, at this point
1631 * the debugger may change all of our registers.
1632 */
1633 if (get_signal(&ksig)) {
1634 /*
1635 * Depending on the signal settings, we may need to revert the
1636 * decision to restart the system call, but skip this if a
1637 * debugger has chosen to restart at a different PC.
1638 */
1639 if (regs->pc == restart_addr &&
1640 (retval == -ERESTARTNOHAND ||
1641 retval == -ERESTART_RESTARTBLOCK ||
1642 (retval == -ERESTARTSYS &&
1643 !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
1644 syscall_set_return_value(current, regs, -EINTR, 0);
1645 regs->pc = continue_addr;
1646 }
1647
1648 handle_signal(&ksig, regs);
1649 return;
1650 }
1651
1652 /*
1653 * Handle restarting a different system call. As above, if a debugger
1654 * has chosen to restart at a different PC, ignore the restart.
1655 */
1656 if (syscall && regs->pc == restart_addr) {
1657 if (retval == -ERESTART_RESTARTBLOCK)
1658 setup_restart_syscall(regs);
1659 user_rewind_single_step(current);
1660 }
1661
1662 restore_saved_sigmask();
1663 }
1664
1665 unsigned long __ro_after_init signal_minsigstksz;
1666
1667 /*
1668 * Determine the stack space required for guaranteed signal devliery.
1669 * This function is used to populate AT_MINSIGSTKSZ at process startup.
1670 * cpufeatures setup is assumed to be complete.
1671 */
minsigstksz_setup(void)1672 void __init minsigstksz_setup(void)
1673 {
1674 struct rt_sigframe_user_layout user;
1675
1676 init_user_layout(&user);
1677
1678 /*
1679 * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't
1680 * be big enough, but it's our best guess:
1681 */
1682 if (WARN_ON(setup_sigframe_layout(&user, true)))
1683 return;
1684
1685 signal_minsigstksz = sigframe_size(&user) +
1686 round_up(sizeof(struct frame_record), 16) +
1687 16; /* max alignment padding */
1688 }
1689
1690 /*
1691 * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as
1692 * changes likely come with new fields that should be added below.
1693 */
1694 static_assert(NSIGILL == 11);
1695 static_assert(NSIGFPE == 15);
1696 static_assert(NSIGSEGV == 10);
1697 static_assert(NSIGBUS == 5);
1698 static_assert(NSIGTRAP == 6);
1699 static_assert(NSIGCHLD == 6);
1700 static_assert(NSIGSYS == 2);
1701 static_assert(sizeof(siginfo_t) == 128);
1702 static_assert(__alignof__(siginfo_t) == 8);
1703 static_assert(offsetof(siginfo_t, si_signo) == 0x00);
1704 static_assert(offsetof(siginfo_t, si_errno) == 0x04);
1705 static_assert(offsetof(siginfo_t, si_code) == 0x08);
1706 static_assert(offsetof(siginfo_t, si_pid) == 0x10);
1707 static_assert(offsetof(siginfo_t, si_uid) == 0x14);
1708 static_assert(offsetof(siginfo_t, si_tid) == 0x10);
1709 static_assert(offsetof(siginfo_t, si_overrun) == 0x14);
1710 static_assert(offsetof(siginfo_t, si_status) == 0x18);
1711 static_assert(offsetof(siginfo_t, si_utime) == 0x20);
1712 static_assert(offsetof(siginfo_t, si_stime) == 0x28);
1713 static_assert(offsetof(siginfo_t, si_value) == 0x18);
1714 static_assert(offsetof(siginfo_t, si_int) == 0x18);
1715 static_assert(offsetof(siginfo_t, si_ptr) == 0x18);
1716 static_assert(offsetof(siginfo_t, si_addr) == 0x10);
1717 static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x18);
1718 static_assert(offsetof(siginfo_t, si_lower) == 0x20);
1719 static_assert(offsetof(siginfo_t, si_upper) == 0x28);
1720 static_assert(offsetof(siginfo_t, si_pkey) == 0x20);
1721 static_assert(offsetof(siginfo_t, si_perf_data) == 0x18);
1722 static_assert(offsetof(siginfo_t, si_perf_type) == 0x20);
1723 static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24);
1724 static_assert(offsetof(siginfo_t, si_band) == 0x10);
1725 static_assert(offsetof(siginfo_t, si_fd) == 0x18);
1726 static_assert(offsetof(siginfo_t, si_call_addr) == 0x10);
1727 static_assert(offsetof(siginfo_t, si_syscall) == 0x18);
1728 static_assert(offsetof(siginfo_t, si_arch) == 0x1c);
1729