1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Based on arch/arm/kernel/signal.c
4 *
5 * Copyright (C) 1995-2009 Russell King
6 * Copyright (C) 2012 ARM Ltd.
7 */
8
9 #include <linux/cache.h>
10 #include <linux/compat.h>
11 #include <linux/errno.h>
12 #include <linux/kernel.h>
13 #include <linux/signal.h>
14 #include <linux/freezer.h>
15 #include <linux/stddef.h>
16 #include <linux/uaccess.h>
17 #include <linux/sizes.h>
18 #include <linux/string.h>
19 #include <linux/ratelimit.h>
20 #include <linux/rseq.h>
21 #include <linux/syscalls.h>
22 #include <linux/pkeys.h>
23
24 #include <asm/daifflags.h>
25 #include <asm/debug-monitors.h>
26 #include <asm/elf.h>
27 #include <asm/exception.h>
28 #include <asm/cacheflush.h>
29 #include <asm/gcs.h>
30 #include <asm/ucontext.h>
31 #include <asm/unistd.h>
32 #include <asm/fpsimd.h>
33 #include <asm/ptrace.h>
34 #include <asm/syscall.h>
35 #include <asm/signal32.h>
36 #include <asm/traps.h>
37 #include <asm/vdso.h>
38
39 #define GCS_SIGNAL_CAP(addr) (((unsigned long)addr) & GCS_CAP_ADDR_MASK)
40
41 /*
42 * Do a signal return; undo the signal stack. These are aligned to 128-bit.
43 */
44 struct rt_sigframe {
45 struct siginfo info;
46 struct ucontext uc;
47 };
48
49 struct rt_sigframe_user_layout {
50 struct rt_sigframe __user *sigframe;
51 struct frame_record __user *next_frame;
52
53 unsigned long size; /* size of allocated sigframe data */
54 unsigned long limit; /* largest allowed size */
55
56 unsigned long fpsimd_offset;
57 unsigned long esr_offset;
58 unsigned long gcs_offset;
59 unsigned long sve_offset;
60 unsigned long tpidr2_offset;
61 unsigned long za_offset;
62 unsigned long zt_offset;
63 unsigned long fpmr_offset;
64 unsigned long poe_offset;
65 unsigned long extra_offset;
66 unsigned long end_offset;
67 };
68
69 /*
70 * Holds any EL0-controlled state that influences unprivileged memory accesses.
71 * This includes both accesses done in userspace and uaccess done in the kernel.
72 *
73 * This state needs to be carefully managed to ensure that it doesn't cause
74 * uaccess to fail when setting up the signal frame, and the signal handler
75 * itself also expects a well-defined state when entered.
76 */
77 struct user_access_state {
78 u64 por_el0;
79 };
80
81 #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
82 #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
83
84 /*
85 * Save the user access state into ua_state and reset it to disable any
86 * restrictions.
87 */
save_reset_user_access_state(struct user_access_state * ua_state)88 static void save_reset_user_access_state(struct user_access_state *ua_state)
89 {
90 if (system_supports_poe()) {
91 u64 por_enable_all = 0;
92
93 for (int pkey = 0; pkey < arch_max_pkey(); pkey++)
94 por_enable_all |= POR_ELx_PERM_PREP(pkey, POE_RWX);
95
96 ua_state->por_el0 = read_sysreg_s(SYS_POR_EL0);
97 write_sysreg_s(por_enable_all, SYS_POR_EL0);
98 /*
99 * No ISB required as we can tolerate spurious Overlay faults -
100 * the fault handler will check again based on the new value
101 * of POR_EL0.
102 */
103 }
104 }
105
106 /*
107 * Set the user access state for invoking the signal handler.
108 *
109 * No uaccess should be done after that function is called.
110 */
set_handler_user_access_state(void)111 static void set_handler_user_access_state(void)
112 {
113 if (system_supports_poe())
114 write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0);
115 }
116
117 /*
118 * Restore the user access state to the values saved in ua_state.
119 *
120 * No uaccess should be done after that function is called.
121 */
restore_user_access_state(const struct user_access_state * ua_state)122 static void restore_user_access_state(const struct user_access_state *ua_state)
123 {
124 if (system_supports_poe())
125 write_sysreg_s(ua_state->por_el0, SYS_POR_EL0);
126 }
127
init_user_layout(struct rt_sigframe_user_layout * user)128 static void init_user_layout(struct rt_sigframe_user_layout *user)
129 {
130 const size_t reserved_size =
131 sizeof(user->sigframe->uc.uc_mcontext.__reserved);
132
133 memset(user, 0, sizeof(*user));
134 user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved);
135
136 user->limit = user->size + reserved_size;
137
138 user->limit -= TERMINATOR_SIZE;
139 user->limit -= EXTRA_CONTEXT_SIZE;
140 /* Reserve space for extension and terminator ^ */
141 }
142
sigframe_size(struct rt_sigframe_user_layout const * user)143 static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
144 {
145 return round_up(max(user->size, sizeof(struct rt_sigframe)), 16);
146 }
147
148 /*
149 * Sanity limit on the approximate maximum size of signal frame we'll
150 * try to generate. Stack alignment padding and the frame record are
151 * not taken into account. This limit is not a guarantee and is
152 * NOT ABI.
153 */
154 #define SIGFRAME_MAXSZ SZ_256K
155
__sigframe_alloc(struct rt_sigframe_user_layout * user,unsigned long * offset,size_t size,bool extend)156 static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
157 unsigned long *offset, size_t size, bool extend)
158 {
159 size_t padded_size = round_up(size, 16);
160
161 if (padded_size > user->limit - user->size &&
162 !user->extra_offset &&
163 extend) {
164 int ret;
165
166 user->limit += EXTRA_CONTEXT_SIZE;
167 ret = __sigframe_alloc(user, &user->extra_offset,
168 sizeof(struct extra_context), false);
169 if (ret) {
170 user->limit -= EXTRA_CONTEXT_SIZE;
171 return ret;
172 }
173
174 /* Reserve space for the __reserved[] terminator */
175 user->size += TERMINATOR_SIZE;
176
177 /*
178 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for
179 * the terminator:
180 */
181 user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE;
182 }
183
184 /* Still not enough space? Bad luck! */
185 if (padded_size > user->limit - user->size)
186 return -ENOMEM;
187
188 *offset = user->size;
189 user->size += padded_size;
190
191 return 0;
192 }
193
194 /*
195 * Allocate space for an optional record of <size> bytes in the user
196 * signal frame. The offset from the signal frame base address to the
197 * allocated block is assigned to *offset.
198 */
sigframe_alloc(struct rt_sigframe_user_layout * user,unsigned long * offset,size_t size)199 static int sigframe_alloc(struct rt_sigframe_user_layout *user,
200 unsigned long *offset, size_t size)
201 {
202 return __sigframe_alloc(user, offset, size, true);
203 }
204
205 /* Allocate the null terminator record and prevent further allocations */
sigframe_alloc_end(struct rt_sigframe_user_layout * user)206 static int sigframe_alloc_end(struct rt_sigframe_user_layout *user)
207 {
208 int ret;
209
210 /* Un-reserve the space reserved for the terminator: */
211 user->limit += TERMINATOR_SIZE;
212
213 ret = sigframe_alloc(user, &user->end_offset,
214 sizeof(struct _aarch64_ctx));
215 if (ret)
216 return ret;
217
218 /* Prevent further allocation: */
219 user->limit = user->size;
220 return 0;
221 }
222
apply_user_offset(struct rt_sigframe_user_layout const * user,unsigned long offset)223 static void __user *apply_user_offset(
224 struct rt_sigframe_user_layout const *user, unsigned long offset)
225 {
226 char __user *base = (char __user *)user->sigframe;
227
228 return base + offset;
229 }
230
231 struct user_ctxs {
232 struct fpsimd_context __user *fpsimd;
233 u32 fpsimd_size;
234 struct sve_context __user *sve;
235 u32 sve_size;
236 struct tpidr2_context __user *tpidr2;
237 u32 tpidr2_size;
238 struct za_context __user *za;
239 u32 za_size;
240 struct zt_context __user *zt;
241 u32 zt_size;
242 struct fpmr_context __user *fpmr;
243 u32 fpmr_size;
244 struct poe_context __user *poe;
245 u32 poe_size;
246 struct gcs_context __user *gcs;
247 u32 gcs_size;
248 };
249
preserve_fpsimd_context(struct fpsimd_context __user * ctx)250 static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
251 {
252 struct user_fpsimd_state const *fpsimd =
253 ¤t->thread.uw.fpsimd_state;
254 int err;
255
256 fpsimd_sync_from_effective_state(current);
257
258 /* copy the FP and status/control registers */
259 err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
260 __put_user_error(fpsimd->fpsr, &ctx->fpsr, err);
261 __put_user_error(fpsimd->fpcr, &ctx->fpcr, err);
262
263 /* copy the magic/size information */
264 __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err);
265 __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err);
266
267 return err ? -EFAULT : 0;
268 }
269
read_fpsimd_context(struct user_fpsimd_state * fpsimd,struct user_ctxs * user)270 static int read_fpsimd_context(struct user_fpsimd_state *fpsimd,
271 struct user_ctxs *user)
272 {
273 int err;
274
275 /* check the size information */
276 if (user->fpsimd_size != sizeof(struct fpsimd_context))
277 return -EINVAL;
278
279 /* copy the FP and status/control registers */
280 err = __copy_from_user(fpsimd->vregs, &(user->fpsimd->vregs),
281 sizeof(fpsimd->vregs));
282 __get_user_error(fpsimd->fpsr, &(user->fpsimd->fpsr), err);
283 __get_user_error(fpsimd->fpcr, &(user->fpsimd->fpcr), err);
284
285 return err ? -EFAULT : 0;
286 }
287
restore_fpsimd_context(struct user_ctxs * user)288 static int restore_fpsimd_context(struct user_ctxs *user)
289 {
290 struct user_fpsimd_state fpsimd;
291 int err;
292
293 err = read_fpsimd_context(&fpsimd, user);
294 if (err)
295 return err;
296
297 clear_thread_flag(TIF_SVE);
298 current->thread.svcr &= ~SVCR_SM_MASK;
299 current->thread.fp_type = FP_STATE_FPSIMD;
300
301 /* load the hardware registers from the fpsimd_state structure */
302 fpsimd_update_current_state(&fpsimd);
303 return 0;
304 }
305
preserve_fpmr_context(struct fpmr_context __user * ctx)306 static int preserve_fpmr_context(struct fpmr_context __user *ctx)
307 {
308 int err = 0;
309
310 __put_user_error(FPMR_MAGIC, &ctx->head.magic, err);
311 __put_user_error(sizeof(*ctx), &ctx->head.size, err);
312 __put_user_error(current->thread.uw.fpmr, &ctx->fpmr, err);
313
314 return err;
315 }
316
restore_fpmr_context(struct user_ctxs * user)317 static int restore_fpmr_context(struct user_ctxs *user)
318 {
319 u64 fpmr;
320 int err = 0;
321
322 if (user->fpmr_size != sizeof(*user->fpmr))
323 return -EINVAL;
324
325 __get_user_error(fpmr, &user->fpmr->fpmr, err);
326 if (!err)
327 current->thread.uw.fpmr = fpmr;
328
329 return err;
330 }
331
preserve_poe_context(struct poe_context __user * ctx,const struct user_access_state * ua_state)332 static int preserve_poe_context(struct poe_context __user *ctx,
333 const struct user_access_state *ua_state)
334 {
335 int err = 0;
336
337 __put_user_error(POE_MAGIC, &ctx->head.magic, err);
338 __put_user_error(sizeof(*ctx), &ctx->head.size, err);
339 __put_user_error(ua_state->por_el0, &ctx->por_el0, err);
340
341 return err;
342 }
343
restore_poe_context(struct user_ctxs * user,struct user_access_state * ua_state)344 static int restore_poe_context(struct user_ctxs *user,
345 struct user_access_state *ua_state)
346 {
347 u64 por_el0;
348 int err = 0;
349
350 if (user->poe_size != sizeof(*user->poe))
351 return -EINVAL;
352
353 __get_user_error(por_el0, &(user->poe->por_el0), err);
354 if (!err)
355 ua_state->por_el0 = por_el0;
356
357 return err;
358 }
359
360 #ifdef CONFIG_ARM64_SVE
361
preserve_sve_context(struct sve_context __user * ctx)362 static int preserve_sve_context(struct sve_context __user *ctx)
363 {
364 int err = 0;
365 u16 reserved[ARRAY_SIZE(ctx->__reserved)];
366 u16 flags = 0;
367 unsigned int vl = task_get_sve_vl(current);
368 unsigned int vq = 0;
369
370 if (thread_sm_enabled(¤t->thread)) {
371 vl = task_get_sme_vl(current);
372 vq = sve_vq_from_vl(vl);
373 flags |= SVE_SIG_FLAG_SM;
374 } else if (current->thread.fp_type == FP_STATE_SVE) {
375 vq = sve_vq_from_vl(vl);
376 }
377
378 memset(reserved, 0, sizeof(reserved));
379
380 __put_user_error(SVE_MAGIC, &ctx->head.magic, err);
381 __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16),
382 &ctx->head.size, err);
383 __put_user_error(vl, &ctx->vl, err);
384 __put_user_error(flags, &ctx->flags, err);
385 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
386 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
387
388 if (vq) {
389 err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET,
390 current->thread.sve_state,
391 SVE_SIG_REGS_SIZE(vq));
392 }
393
394 return err ? -EFAULT : 0;
395 }
396
restore_sve_fpsimd_context(struct user_ctxs * user)397 static int restore_sve_fpsimd_context(struct user_ctxs *user)
398 {
399 int err = 0;
400 unsigned int vl, vq;
401 struct user_fpsimd_state fpsimd;
402 u16 user_vl, flags;
403 bool sm;
404
405 if (user->sve_size < sizeof(*user->sve))
406 return -EINVAL;
407
408 __get_user_error(user_vl, &(user->sve->vl), err);
409 __get_user_error(flags, &(user->sve->flags), err);
410 if (err)
411 return err;
412
413 sm = flags & SVE_SIG_FLAG_SM;
414 if (sm) {
415 if (!system_supports_sme())
416 return -EINVAL;
417
418 vl = task_get_sme_vl(current);
419 } else {
420 /*
421 * A SME only system use SVE for streaming mode so can
422 * have a SVE formatted context with a zero VL and no
423 * payload data.
424 */
425 if (!system_supports_sve() && !system_supports_sme())
426 return -EINVAL;
427
428 vl = task_get_sve_vl(current);
429 }
430
431 if (user_vl != vl)
432 return -EINVAL;
433
434 /*
435 * Non-streaming SVE state may be preserved without an SVE payload, in
436 * which case the SVE context only has a header with VL==0, and all
437 * state can be restored from the FPSIMD context.
438 *
439 * Streaming SVE state is always preserved with an SVE payload. For
440 * consistency and robustness, reject restoring streaming SVE state
441 * without an SVE payload.
442 */
443 if (!sm && user->sve_size == sizeof(*user->sve))
444 return restore_fpsimd_context(user);
445
446 vq = sve_vq_from_vl(vl);
447
448 if (user->sve_size < SVE_SIG_CONTEXT_SIZE(vq))
449 return -EINVAL;
450
451 sve_alloc(current, true);
452 if (!current->thread.sve_state) {
453 clear_thread_flag(TIF_SVE);
454 return -ENOMEM;
455 }
456
457 err = __copy_from_user(current->thread.sve_state,
458 (char __user const *)user->sve +
459 SVE_SIG_REGS_OFFSET,
460 SVE_SIG_REGS_SIZE(vq));
461 if (err)
462 return -EFAULT;
463
464 if (flags & SVE_SIG_FLAG_SM)
465 current->thread.svcr |= SVCR_SM_MASK;
466 else
467 set_thread_flag(TIF_SVE);
468 current->thread.fp_type = FP_STATE_SVE;
469
470 err = read_fpsimd_context(&fpsimd, user);
471 if (err)
472 return err;
473
474 /* Merge the FPSIMD registers into the SVE state */
475 fpsimd_update_current_state(&fpsimd);
476
477 return 0;
478 }
479
480 #else /* ! CONFIG_ARM64_SVE */
481
restore_sve_fpsimd_context(struct user_ctxs * user)482 static int restore_sve_fpsimd_context(struct user_ctxs *user)
483 {
484 WARN_ON_ONCE(1);
485 return -EINVAL;
486 }
487
488 /* Turn any non-optimised out attempts to use this into a link error: */
489 extern int preserve_sve_context(void __user *ctx);
490
491 #endif /* ! CONFIG_ARM64_SVE */
492
493 #ifdef CONFIG_ARM64_SME
494
preserve_tpidr2_context(struct tpidr2_context __user * ctx)495 static int preserve_tpidr2_context(struct tpidr2_context __user *ctx)
496 {
497 u64 tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
498 int err = 0;
499
500 __put_user_error(TPIDR2_MAGIC, &ctx->head.magic, err);
501 __put_user_error(sizeof(*ctx), &ctx->head.size, err);
502 __put_user_error(tpidr2_el0, &ctx->tpidr2, err);
503
504 return err;
505 }
506
restore_tpidr2_context(struct user_ctxs * user)507 static int restore_tpidr2_context(struct user_ctxs *user)
508 {
509 u64 tpidr2_el0;
510 int err = 0;
511
512 if (user->tpidr2_size != sizeof(*user->tpidr2))
513 return -EINVAL;
514
515 __get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err);
516 if (!err)
517 write_sysreg_s(tpidr2_el0, SYS_TPIDR2_EL0);
518
519 return err;
520 }
521
preserve_za_context(struct za_context __user * ctx)522 static int preserve_za_context(struct za_context __user *ctx)
523 {
524 int err = 0;
525 u16 reserved[ARRAY_SIZE(ctx->__reserved)];
526 unsigned int vl = task_get_sme_vl(current);
527 unsigned int vq;
528
529 if (thread_za_enabled(¤t->thread))
530 vq = sve_vq_from_vl(vl);
531 else
532 vq = 0;
533
534 memset(reserved, 0, sizeof(reserved));
535
536 __put_user_error(ZA_MAGIC, &ctx->head.magic, err);
537 __put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16),
538 &ctx->head.size, err);
539 __put_user_error(vl, &ctx->vl, err);
540 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
541 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
542
543 if (vq) {
544 err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET,
545 current->thread.sme_state,
546 ZA_SIG_REGS_SIZE(vq));
547 }
548
549 return err ? -EFAULT : 0;
550 }
551
restore_za_context(struct user_ctxs * user)552 static int restore_za_context(struct user_ctxs *user)
553 {
554 int err = 0;
555 unsigned int vq;
556 u16 user_vl;
557
558 if (user->za_size < sizeof(*user->za))
559 return -EINVAL;
560
561 __get_user_error(user_vl, &(user->za->vl), err);
562 if (err)
563 return err;
564
565 if (user_vl != task_get_sme_vl(current))
566 return -EINVAL;
567
568 if (user->za_size == sizeof(*user->za)) {
569 current->thread.svcr &= ~SVCR_ZA_MASK;
570 return 0;
571 }
572
573 vq = sve_vq_from_vl(user_vl);
574
575 if (user->za_size < ZA_SIG_CONTEXT_SIZE(vq))
576 return -EINVAL;
577
578 sme_alloc(current, true);
579 if (!current->thread.sme_state) {
580 current->thread.svcr &= ~SVCR_ZA_MASK;
581 clear_thread_flag(TIF_SME);
582 return -ENOMEM;
583 }
584
585 err = __copy_from_user(current->thread.sme_state,
586 (char __user const *)user->za +
587 ZA_SIG_REGS_OFFSET,
588 ZA_SIG_REGS_SIZE(vq));
589 if (err)
590 return -EFAULT;
591
592 set_thread_flag(TIF_SME);
593 current->thread.svcr |= SVCR_ZA_MASK;
594
595 return 0;
596 }
597
preserve_zt_context(struct zt_context __user * ctx)598 static int preserve_zt_context(struct zt_context __user *ctx)
599 {
600 int err = 0;
601 u16 reserved[ARRAY_SIZE(ctx->__reserved)];
602
603 if (WARN_ON(!thread_za_enabled(¤t->thread)))
604 return -EINVAL;
605
606 memset(reserved, 0, sizeof(reserved));
607
608 __put_user_error(ZT_MAGIC, &ctx->head.magic, err);
609 __put_user_error(round_up(ZT_SIG_CONTEXT_SIZE(1), 16),
610 &ctx->head.size, err);
611 __put_user_error(1, &ctx->nregs, err);
612 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
613 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
614
615 err |= __copy_to_user((char __user *)ctx + ZT_SIG_REGS_OFFSET,
616 thread_zt_state(¤t->thread),
617 ZT_SIG_REGS_SIZE(1));
618
619 return err ? -EFAULT : 0;
620 }
621
restore_zt_context(struct user_ctxs * user)622 static int restore_zt_context(struct user_ctxs *user)
623 {
624 int err;
625 u16 nregs;
626
627 /* ZA must be restored first for this check to be valid */
628 if (!thread_za_enabled(¤t->thread))
629 return -EINVAL;
630
631 if (user->zt_size != ZT_SIG_CONTEXT_SIZE(1))
632 return -EINVAL;
633
634 if (__copy_from_user(&nregs, &(user->zt->nregs), sizeof(nregs)))
635 return -EFAULT;
636
637 if (nregs != 1)
638 return -EINVAL;
639
640 err = __copy_from_user(thread_zt_state(¤t->thread),
641 (char __user const *)user->zt +
642 ZT_SIG_REGS_OFFSET,
643 ZT_SIG_REGS_SIZE(1));
644 if (err)
645 return -EFAULT;
646
647 return 0;
648 }
649
650 #else /* ! CONFIG_ARM64_SME */
651
652 /* Turn any non-optimised out attempts to use these into a link error: */
653 extern int preserve_tpidr2_context(void __user *ctx);
654 extern int restore_tpidr2_context(struct user_ctxs *user);
655 extern int preserve_za_context(void __user *ctx);
656 extern int restore_za_context(struct user_ctxs *user);
657 extern int preserve_zt_context(void __user *ctx);
658 extern int restore_zt_context(struct user_ctxs *user);
659
660 #endif /* ! CONFIG_ARM64_SME */
661
662 #ifdef CONFIG_ARM64_GCS
663
preserve_gcs_context(struct gcs_context __user * ctx)664 static int preserve_gcs_context(struct gcs_context __user *ctx)
665 {
666 int err = 0;
667 u64 gcspr = read_sysreg_s(SYS_GCSPR_EL0);
668
669 /*
670 * If GCS is enabled we will add a cap token to the frame,
671 * include it in the GCSPR_EL0 we report to support stack
672 * switching via sigreturn if GCS is enabled. We do not allow
673 * enabling via sigreturn so the token is only relevant for
674 * threads with GCS enabled.
675 */
676 if (task_gcs_el0_enabled(current))
677 gcspr -= 8;
678
679 __put_user_error(GCS_MAGIC, &ctx->head.magic, err);
680 __put_user_error(sizeof(*ctx), &ctx->head.size, err);
681 __put_user_error(gcspr, &ctx->gcspr, err);
682 __put_user_error(0, &ctx->reserved, err);
683 __put_user_error(current->thread.gcs_el0_mode,
684 &ctx->features_enabled, err);
685
686 return err;
687 }
688
restore_gcs_context(struct user_ctxs * user)689 static int restore_gcs_context(struct user_ctxs *user)
690 {
691 u64 gcspr, enabled;
692 int err = 0;
693
694 if (user->gcs_size != sizeof(*user->gcs))
695 return -EINVAL;
696
697 __get_user_error(gcspr, &user->gcs->gcspr, err);
698 __get_user_error(enabled, &user->gcs->features_enabled, err);
699 if (err)
700 return err;
701
702 /* Don't allow unknown modes */
703 if (enabled & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK)
704 return -EINVAL;
705
706 err = gcs_check_locked(current, enabled);
707 if (err != 0)
708 return err;
709
710 /* Don't allow enabling */
711 if (!task_gcs_el0_enabled(current) &&
712 (enabled & PR_SHADOW_STACK_ENABLE))
713 return -EINVAL;
714
715 /* If we are disabling disable everything */
716 if (!(enabled & PR_SHADOW_STACK_ENABLE))
717 enabled = 0;
718
719 current->thread.gcs_el0_mode = enabled;
720
721 /*
722 * We let userspace set GCSPR_EL0 to anything here, we will
723 * validate later in gcs_restore_signal().
724 */
725 write_sysreg_s(gcspr, SYS_GCSPR_EL0);
726
727 return 0;
728 }
729
730 #else /* ! CONFIG_ARM64_GCS */
731
732 /* Turn any non-optimised out attempts to use these into a link error: */
733 extern int preserve_gcs_context(void __user *ctx);
734 extern int restore_gcs_context(struct user_ctxs *user);
735
736 #endif /* ! CONFIG_ARM64_GCS */
737
parse_user_sigframe(struct user_ctxs * user,struct rt_sigframe __user * sf)738 static int parse_user_sigframe(struct user_ctxs *user,
739 struct rt_sigframe __user *sf)
740 {
741 struct sigcontext __user *const sc = &sf->uc.uc_mcontext;
742 struct _aarch64_ctx __user *head;
743 char __user *base = (char __user *)&sc->__reserved;
744 size_t offset = 0;
745 size_t limit = sizeof(sc->__reserved);
746 bool have_extra_context = false;
747 char const __user *const sfp = (char const __user *)sf;
748
749 user->fpsimd = NULL;
750 user->sve = NULL;
751 user->tpidr2 = NULL;
752 user->za = NULL;
753 user->zt = NULL;
754 user->fpmr = NULL;
755 user->poe = NULL;
756 user->gcs = NULL;
757
758 if (!IS_ALIGNED((unsigned long)base, 16))
759 goto invalid;
760
761 while (1) {
762 int err = 0;
763 u32 magic, size;
764 char const __user *userp;
765 struct extra_context const __user *extra;
766 u64 extra_datap;
767 u32 extra_size;
768 struct _aarch64_ctx const __user *end;
769 u32 end_magic, end_size;
770
771 if (limit - offset < sizeof(*head))
772 goto invalid;
773
774 if (!IS_ALIGNED(offset, 16))
775 goto invalid;
776
777 head = (struct _aarch64_ctx __user *)(base + offset);
778 __get_user_error(magic, &head->magic, err);
779 __get_user_error(size, &head->size, err);
780 if (err)
781 return err;
782
783 if (limit - offset < size)
784 goto invalid;
785
786 switch (magic) {
787 case 0:
788 if (size)
789 goto invalid;
790
791 goto done;
792
793 case FPSIMD_MAGIC:
794 if (!system_supports_fpsimd())
795 goto invalid;
796 if (user->fpsimd)
797 goto invalid;
798
799 user->fpsimd = (struct fpsimd_context __user *)head;
800 user->fpsimd_size = size;
801 break;
802
803 case ESR_MAGIC:
804 /* ignore */
805 break;
806
807 case POE_MAGIC:
808 if (!system_supports_poe())
809 goto invalid;
810
811 if (user->poe)
812 goto invalid;
813
814 user->poe = (struct poe_context __user *)head;
815 user->poe_size = size;
816 break;
817
818 case SVE_MAGIC:
819 if (!system_supports_sve() && !system_supports_sme())
820 goto invalid;
821
822 if (user->sve)
823 goto invalid;
824
825 user->sve = (struct sve_context __user *)head;
826 user->sve_size = size;
827 break;
828
829 case TPIDR2_MAGIC:
830 if (!system_supports_tpidr2())
831 goto invalid;
832
833 if (user->tpidr2)
834 goto invalid;
835
836 user->tpidr2 = (struct tpidr2_context __user *)head;
837 user->tpidr2_size = size;
838 break;
839
840 case ZA_MAGIC:
841 if (!system_supports_sme())
842 goto invalid;
843
844 if (user->za)
845 goto invalid;
846
847 user->za = (struct za_context __user *)head;
848 user->za_size = size;
849 break;
850
851 case ZT_MAGIC:
852 if (!system_supports_sme2())
853 goto invalid;
854
855 if (user->zt)
856 goto invalid;
857
858 user->zt = (struct zt_context __user *)head;
859 user->zt_size = size;
860 break;
861
862 case FPMR_MAGIC:
863 if (!system_supports_fpmr())
864 goto invalid;
865
866 if (user->fpmr)
867 goto invalid;
868
869 user->fpmr = (struct fpmr_context __user *)head;
870 user->fpmr_size = size;
871 break;
872
873 case GCS_MAGIC:
874 if (!system_supports_gcs())
875 goto invalid;
876
877 if (user->gcs)
878 goto invalid;
879
880 user->gcs = (struct gcs_context __user *)head;
881 user->gcs_size = size;
882 break;
883
884 case EXTRA_MAGIC:
885 if (have_extra_context)
886 goto invalid;
887
888 if (size < sizeof(*extra))
889 goto invalid;
890
891 userp = (char const __user *)head;
892
893 extra = (struct extra_context const __user *)userp;
894 userp += size;
895
896 __get_user_error(extra_datap, &extra->datap, err);
897 __get_user_error(extra_size, &extra->size, err);
898 if (err)
899 return err;
900
901 /* Check for the dummy terminator in __reserved[]: */
902
903 if (limit - offset - size < TERMINATOR_SIZE)
904 goto invalid;
905
906 end = (struct _aarch64_ctx const __user *)userp;
907 userp += TERMINATOR_SIZE;
908
909 __get_user_error(end_magic, &end->magic, err);
910 __get_user_error(end_size, &end->size, err);
911 if (err)
912 return err;
913
914 if (end_magic || end_size)
915 goto invalid;
916
917 /* Prevent looping/repeated parsing of extra_context */
918 have_extra_context = true;
919
920 base = (__force void __user *)extra_datap;
921 if (!IS_ALIGNED((unsigned long)base, 16))
922 goto invalid;
923
924 if (!IS_ALIGNED(extra_size, 16))
925 goto invalid;
926
927 if (base != userp)
928 goto invalid;
929
930 /* Reject "unreasonably large" frames: */
931 if (extra_size > sfp + SIGFRAME_MAXSZ - userp)
932 goto invalid;
933
934 /*
935 * Ignore trailing terminator in __reserved[]
936 * and start parsing extra data:
937 */
938 offset = 0;
939 limit = extra_size;
940
941 if (!access_ok(base, limit))
942 goto invalid;
943
944 continue;
945
946 default:
947 goto invalid;
948 }
949
950 if (size < sizeof(*head))
951 goto invalid;
952
953 if (limit - offset < size)
954 goto invalid;
955
956 offset += size;
957 }
958
959 done:
960 return 0;
961
962 invalid:
963 return -EINVAL;
964 }
965
restore_sigframe(struct pt_regs * regs,struct rt_sigframe __user * sf,struct user_access_state * ua_state)966 static int restore_sigframe(struct pt_regs *regs,
967 struct rt_sigframe __user *sf,
968 struct user_access_state *ua_state)
969 {
970 sigset_t set;
971 int i, err;
972 struct user_ctxs user;
973
974 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
975 if (err == 0)
976 set_current_blocked(&set);
977
978 for (i = 0; i < 31; i++)
979 __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
980 err);
981 __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
982 __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
983 __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
984
985 /*
986 * Avoid sys_rt_sigreturn() restarting.
987 */
988 forget_syscall(regs);
989
990 fpsimd_save_and_flush_current_state();
991
992 err |= !valid_user_regs(®s->user_regs, current);
993 if (err == 0)
994 err = parse_user_sigframe(&user, sf);
995
996 if (err == 0 && system_supports_fpsimd()) {
997 if (!user.fpsimd)
998 return -EINVAL;
999
1000 if (user.sve)
1001 err = restore_sve_fpsimd_context(&user);
1002 else
1003 err = restore_fpsimd_context(&user);
1004 }
1005
1006 if (err == 0 && system_supports_gcs() && user.gcs)
1007 err = restore_gcs_context(&user);
1008
1009 if (err == 0 && system_supports_tpidr2() && user.tpidr2)
1010 err = restore_tpidr2_context(&user);
1011
1012 if (err == 0 && system_supports_fpmr() && user.fpmr)
1013 err = restore_fpmr_context(&user);
1014
1015 if (err == 0 && system_supports_sme() && user.za)
1016 err = restore_za_context(&user);
1017
1018 if (err == 0 && system_supports_sme2() && user.zt)
1019 err = restore_zt_context(&user);
1020
1021 if (err == 0 && system_supports_poe() && user.poe)
1022 err = restore_poe_context(&user, ua_state);
1023
1024 return err;
1025 }
1026
1027 #ifdef CONFIG_ARM64_GCS
gcs_restore_signal(void)1028 static int gcs_restore_signal(void)
1029 {
1030 u64 gcspr_el0, cap;
1031 int ret;
1032
1033 if (!system_supports_gcs())
1034 return 0;
1035
1036 if (!(current->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE))
1037 return 0;
1038
1039 gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0);
1040
1041 /*
1042 * Ensure that any changes to the GCS done via GCS operations
1043 * are visible to the normal reads we do to validate the
1044 * token.
1045 */
1046 gcsb_dsync();
1047
1048 /*
1049 * GCSPR_EL0 should be pointing at a capped GCS, read the cap.
1050 * We don't enforce that this is in a GCS page, if it is not
1051 * then faults will be generated on GCS operations - the main
1052 * concern is to protect GCS pages.
1053 */
1054 ret = copy_from_user(&cap, (unsigned long __user *)gcspr_el0,
1055 sizeof(cap));
1056 if (ret)
1057 return -EFAULT;
1058
1059 /*
1060 * Check that the cap is the actual GCS before replacing it.
1061 */
1062 if (cap != GCS_SIGNAL_CAP(gcspr_el0))
1063 return -EINVAL;
1064
1065 /* Invalidate the token to prevent reuse */
1066 put_user_gcs(0, (unsigned long __user *)gcspr_el0, &ret);
1067 if (ret != 0)
1068 return -EFAULT;
1069
1070 write_sysreg_s(gcspr_el0 + 8, SYS_GCSPR_EL0);
1071
1072 return 0;
1073 }
1074
1075 #else
gcs_restore_signal(void)1076 static int gcs_restore_signal(void) { return 0; }
1077 #endif
1078
SYSCALL_DEFINE0(rt_sigreturn)1079 SYSCALL_DEFINE0(rt_sigreturn)
1080 {
1081 struct pt_regs *regs = current_pt_regs();
1082 struct rt_sigframe __user *frame;
1083 struct user_access_state ua_state;
1084
1085 /* Always make any pending restarted system calls return -EINTR */
1086 current->restart_block.fn = do_no_restart_syscall;
1087
1088 /*
1089 * Since we stacked the signal on a 128-bit boundary, then 'sp' should
1090 * be word aligned here.
1091 */
1092 if (regs->sp & 15)
1093 goto badframe;
1094
1095 frame = (struct rt_sigframe __user *)regs->sp;
1096
1097 if (!access_ok(frame, sizeof (*frame)))
1098 goto badframe;
1099
1100 if (restore_sigframe(regs, frame, &ua_state))
1101 goto badframe;
1102
1103 if (gcs_restore_signal())
1104 goto badframe;
1105
1106 if (restore_altstack(&frame->uc.uc_stack))
1107 goto badframe;
1108
1109 restore_user_access_state(&ua_state);
1110
1111 return regs->regs[0];
1112
1113 badframe:
1114 arm64_notify_segfault(regs->sp);
1115 return 0;
1116 }
1117
1118 /*
1119 * Determine the layout of optional records in the signal frame
1120 *
1121 * add_all: if true, lays out the biggest possible signal frame for
1122 * this task; otherwise, generates a layout for the current state
1123 * of the task.
1124 */
setup_sigframe_layout(struct rt_sigframe_user_layout * user,bool add_all)1125 static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
1126 bool add_all)
1127 {
1128 int err;
1129
1130 if (system_supports_fpsimd()) {
1131 err = sigframe_alloc(user, &user->fpsimd_offset,
1132 sizeof(struct fpsimd_context));
1133 if (err)
1134 return err;
1135 }
1136
1137 /* fault information, if valid */
1138 if (add_all || current->thread.fault_code) {
1139 err = sigframe_alloc(user, &user->esr_offset,
1140 sizeof(struct esr_context));
1141 if (err)
1142 return err;
1143 }
1144
1145 #ifdef CONFIG_ARM64_GCS
1146 if (system_supports_gcs() && (add_all || current->thread.gcspr_el0)) {
1147 err = sigframe_alloc(user, &user->gcs_offset,
1148 sizeof(struct gcs_context));
1149 if (err)
1150 return err;
1151 }
1152 #endif
1153
1154 if (system_supports_sve() || system_supports_sme()) {
1155 unsigned int vq = 0;
1156
1157 if (add_all || current->thread.fp_type == FP_STATE_SVE ||
1158 thread_sm_enabled(¤t->thread)) {
1159 int vl = max(sve_max_vl(), sme_max_vl());
1160
1161 if (!add_all)
1162 vl = thread_get_cur_vl(¤t->thread);
1163
1164 vq = sve_vq_from_vl(vl);
1165 }
1166
1167 err = sigframe_alloc(user, &user->sve_offset,
1168 SVE_SIG_CONTEXT_SIZE(vq));
1169 if (err)
1170 return err;
1171 }
1172
1173 if (system_supports_tpidr2()) {
1174 err = sigframe_alloc(user, &user->tpidr2_offset,
1175 sizeof(struct tpidr2_context));
1176 if (err)
1177 return err;
1178 }
1179
1180 if (system_supports_sme()) {
1181 unsigned int vl;
1182 unsigned int vq = 0;
1183
1184 if (add_all)
1185 vl = sme_max_vl();
1186 else
1187 vl = task_get_sme_vl(current);
1188
1189 if (thread_za_enabled(¤t->thread))
1190 vq = sve_vq_from_vl(vl);
1191
1192 err = sigframe_alloc(user, &user->za_offset,
1193 ZA_SIG_CONTEXT_SIZE(vq));
1194 if (err)
1195 return err;
1196 }
1197
1198 if (system_supports_sme2()) {
1199 if (add_all || thread_za_enabled(¤t->thread)) {
1200 err = sigframe_alloc(user, &user->zt_offset,
1201 ZT_SIG_CONTEXT_SIZE(1));
1202 if (err)
1203 return err;
1204 }
1205 }
1206
1207 if (system_supports_fpmr()) {
1208 err = sigframe_alloc(user, &user->fpmr_offset,
1209 sizeof(struct fpmr_context));
1210 if (err)
1211 return err;
1212 }
1213
1214 if (system_supports_poe()) {
1215 err = sigframe_alloc(user, &user->poe_offset,
1216 sizeof(struct poe_context));
1217 if (err)
1218 return err;
1219 }
1220
1221 return sigframe_alloc_end(user);
1222 }
1223
setup_sigframe(struct rt_sigframe_user_layout * user,struct pt_regs * regs,sigset_t * set,const struct user_access_state * ua_state)1224 static int setup_sigframe(struct rt_sigframe_user_layout *user,
1225 struct pt_regs *regs, sigset_t *set,
1226 const struct user_access_state *ua_state)
1227 {
1228 int i, err = 0;
1229 struct rt_sigframe __user *sf = user->sigframe;
1230
1231 /* set up the stack frame for unwinding */
1232 __put_user_error(regs->regs[29], &user->next_frame->fp, err);
1233 __put_user_error(regs->regs[30], &user->next_frame->lr, err);
1234
1235 for (i = 0; i < 31; i++)
1236 __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
1237 err);
1238 __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
1239 __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
1240 __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
1241
1242 __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
1243
1244 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
1245
1246 if (err == 0 && system_supports_fpsimd()) {
1247 struct fpsimd_context __user *fpsimd_ctx =
1248 apply_user_offset(user, user->fpsimd_offset);
1249 err |= preserve_fpsimd_context(fpsimd_ctx);
1250 }
1251
1252 /* fault information, if valid */
1253 if (err == 0 && user->esr_offset) {
1254 struct esr_context __user *esr_ctx =
1255 apply_user_offset(user, user->esr_offset);
1256
1257 __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err);
1258 __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err);
1259 __put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
1260 }
1261
1262 if (system_supports_gcs() && err == 0 && user->gcs_offset) {
1263 struct gcs_context __user *gcs_ctx =
1264 apply_user_offset(user, user->gcs_offset);
1265 err |= preserve_gcs_context(gcs_ctx);
1266 }
1267
1268 /* Scalable Vector Extension state (including streaming), if present */
1269 if ((system_supports_sve() || system_supports_sme()) &&
1270 err == 0 && user->sve_offset) {
1271 struct sve_context __user *sve_ctx =
1272 apply_user_offset(user, user->sve_offset);
1273 err |= preserve_sve_context(sve_ctx);
1274 }
1275
1276 /* TPIDR2 if supported */
1277 if (system_supports_tpidr2() && err == 0) {
1278 struct tpidr2_context __user *tpidr2_ctx =
1279 apply_user_offset(user, user->tpidr2_offset);
1280 err |= preserve_tpidr2_context(tpidr2_ctx);
1281 }
1282
1283 /* FPMR if supported */
1284 if (system_supports_fpmr() && err == 0) {
1285 struct fpmr_context __user *fpmr_ctx =
1286 apply_user_offset(user, user->fpmr_offset);
1287 err |= preserve_fpmr_context(fpmr_ctx);
1288 }
1289
1290 if (system_supports_poe() && err == 0) {
1291 struct poe_context __user *poe_ctx =
1292 apply_user_offset(user, user->poe_offset);
1293
1294 err |= preserve_poe_context(poe_ctx, ua_state);
1295 }
1296
1297 /* ZA state if present */
1298 if (system_supports_sme() && err == 0 && user->za_offset) {
1299 struct za_context __user *za_ctx =
1300 apply_user_offset(user, user->za_offset);
1301 err |= preserve_za_context(za_ctx);
1302 }
1303
1304 /* ZT state if present */
1305 if (system_supports_sme2() && err == 0 && user->zt_offset) {
1306 struct zt_context __user *zt_ctx =
1307 apply_user_offset(user, user->zt_offset);
1308 err |= preserve_zt_context(zt_ctx);
1309 }
1310
1311 if (err == 0 && user->extra_offset) {
1312 char __user *sfp = (char __user *)user->sigframe;
1313 char __user *userp =
1314 apply_user_offset(user, user->extra_offset);
1315
1316 struct extra_context __user *extra;
1317 struct _aarch64_ctx __user *end;
1318 u64 extra_datap;
1319 u32 extra_size;
1320
1321 extra = (struct extra_context __user *)userp;
1322 userp += EXTRA_CONTEXT_SIZE;
1323
1324 end = (struct _aarch64_ctx __user *)userp;
1325 userp += TERMINATOR_SIZE;
1326
1327 /*
1328 * extra_datap is just written to the signal frame.
1329 * The value gets cast back to a void __user *
1330 * during sigreturn.
1331 */
1332 extra_datap = (__force u64)userp;
1333 extra_size = sfp + round_up(user->size, 16) - userp;
1334
1335 __put_user_error(EXTRA_MAGIC, &extra->head.magic, err);
1336 __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err);
1337 __put_user_error(extra_datap, &extra->datap, err);
1338 __put_user_error(extra_size, &extra->size, err);
1339
1340 /* Add the terminator */
1341 __put_user_error(0, &end->magic, err);
1342 __put_user_error(0, &end->size, err);
1343 }
1344
1345 /* set the "end" magic */
1346 if (err == 0) {
1347 struct _aarch64_ctx __user *end =
1348 apply_user_offset(user, user->end_offset);
1349
1350 __put_user_error(0, &end->magic, err);
1351 __put_user_error(0, &end->size, err);
1352 }
1353
1354 return err;
1355 }
1356
get_sigframe(struct rt_sigframe_user_layout * user,struct ksignal * ksig,struct pt_regs * regs)1357 static int get_sigframe(struct rt_sigframe_user_layout *user,
1358 struct ksignal *ksig, struct pt_regs *regs)
1359 {
1360 unsigned long sp, sp_top;
1361 int err;
1362
1363 init_user_layout(user);
1364 err = setup_sigframe_layout(user, false);
1365 if (err)
1366 return err;
1367
1368 sp = sp_top = sigsp(regs->sp, ksig);
1369
1370 sp = round_down(sp - sizeof(struct frame_record), 16);
1371 user->next_frame = (struct frame_record __user *)sp;
1372
1373 sp = round_down(sp, 16) - sigframe_size(user);
1374 user->sigframe = (struct rt_sigframe __user *)sp;
1375
1376 /*
1377 * Check that we can actually write to the signal frame.
1378 */
1379 if (!access_ok(user->sigframe, sp_top - sp))
1380 return -EFAULT;
1381
1382 return 0;
1383 }
1384
1385 #ifdef CONFIG_ARM64_GCS
1386
gcs_signal_entry(__sigrestore_t sigtramp,struct ksignal * ksig)1387 static int gcs_signal_entry(__sigrestore_t sigtramp, struct ksignal *ksig)
1388 {
1389 u64 gcspr_el0;
1390 int ret = 0;
1391
1392 if (!system_supports_gcs())
1393 return 0;
1394
1395 if (!task_gcs_el0_enabled(current))
1396 return 0;
1397
1398 /*
1399 * We are entering a signal handler, current register state is
1400 * active.
1401 */
1402 gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0);
1403
1404 /*
1405 * Push a cap and the GCS entry for the trampoline onto the GCS.
1406 */
1407 put_user_gcs((unsigned long)sigtramp,
1408 (unsigned long __user *)(gcspr_el0 - 16), &ret);
1409 put_user_gcs(GCS_SIGNAL_CAP(gcspr_el0 - 8),
1410 (unsigned long __user *)(gcspr_el0 - 8), &ret);
1411 if (ret != 0)
1412 return ret;
1413
1414 gcspr_el0 -= 16;
1415 write_sysreg_s(gcspr_el0, SYS_GCSPR_EL0);
1416
1417 return 0;
1418 }
1419 #else
1420
gcs_signal_entry(__sigrestore_t sigtramp,struct ksignal * ksig)1421 static int gcs_signal_entry(__sigrestore_t sigtramp, struct ksignal *ksig)
1422 {
1423 return 0;
1424 }
1425
1426 #endif
1427
setup_return(struct pt_regs * regs,struct ksignal * ksig,struct rt_sigframe_user_layout * user,int usig)1428 static int setup_return(struct pt_regs *regs, struct ksignal *ksig,
1429 struct rt_sigframe_user_layout *user, int usig)
1430 {
1431 __sigrestore_t sigtramp;
1432 int err;
1433
1434 if (ksig->ka.sa.sa_flags & SA_RESTORER)
1435 sigtramp = ksig->ka.sa.sa_restorer;
1436 else
1437 sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
1438
1439 err = gcs_signal_entry(sigtramp, ksig);
1440 if (err)
1441 return err;
1442
1443 /*
1444 * We must not fail from this point onwards. We are going to update
1445 * registers, including SP, in order to invoke the signal handler. If
1446 * we failed and attempted to deliver a nested SIGSEGV to a handler
1447 * after that point, the subsequent sigreturn would end up restoring
1448 * the (partial) state for the original signal handler.
1449 */
1450
1451 regs->regs[0] = usig;
1452 if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
1453 regs->regs[1] = (unsigned long)&user->sigframe->info;
1454 regs->regs[2] = (unsigned long)&user->sigframe->uc;
1455 }
1456 regs->sp = (unsigned long)user->sigframe;
1457 regs->regs[29] = (unsigned long)&user->next_frame->fp;
1458 regs->regs[30] = (unsigned long)sigtramp;
1459 regs->pc = (unsigned long)ksig->ka.sa.sa_handler;
1460
1461 /*
1462 * Signal delivery is a (wacky) indirect function call in
1463 * userspace, so simulate the same setting of BTYPE as a BLR
1464 * <register containing the signal handler entry point>.
1465 * Signal delivery to a location in a PROT_BTI guarded page
1466 * that is not a function entry point will now trigger a
1467 * SIGILL in userspace.
1468 *
1469 * If the signal handler entry point is not in a PROT_BTI
1470 * guarded page, this is harmless.
1471 */
1472 if (system_supports_bti()) {
1473 regs->pstate &= ~PSR_BTYPE_MASK;
1474 regs->pstate |= PSR_BTYPE_C;
1475 }
1476
1477 /* TCO (Tag Check Override) always cleared for signal handlers */
1478 regs->pstate &= ~PSR_TCO_BIT;
1479
1480 /* Signal handlers are invoked with ZA and streaming mode disabled */
1481 if (system_supports_sme()) {
1482 task_smstop_sm(current);
1483 current->thread.svcr &= ~SVCR_ZA_MASK;
1484 write_sysreg_s(0, SYS_TPIDR2_EL0);
1485 }
1486
1487 return 0;
1488 }
1489
setup_rt_frame(int usig,struct ksignal * ksig,sigset_t * set,struct pt_regs * regs)1490 static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
1491 struct pt_regs *regs)
1492 {
1493 struct rt_sigframe_user_layout user;
1494 struct rt_sigframe __user *frame;
1495 struct user_access_state ua_state;
1496 int err = 0;
1497
1498 fpsimd_save_and_flush_current_state();
1499
1500 if (get_sigframe(&user, ksig, regs))
1501 return 1;
1502
1503 save_reset_user_access_state(&ua_state);
1504 frame = user.sigframe;
1505
1506 __put_user_error(0, &frame->uc.uc_flags, err);
1507 __put_user_error(NULL, &frame->uc.uc_link, err);
1508
1509 err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
1510 err |= setup_sigframe(&user, regs, set, &ua_state);
1511 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
1512 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
1513
1514 if (err == 0)
1515 err = setup_return(regs, ksig, &user, usig);
1516
1517 /*
1518 * We must not fail if setup_return() succeeded - see comment at the
1519 * beginning of setup_return().
1520 */
1521
1522 if (err == 0)
1523 set_handler_user_access_state();
1524 else
1525 restore_user_access_state(&ua_state);
1526
1527 return err;
1528 }
1529
setup_restart_syscall(struct pt_regs * regs)1530 static void setup_restart_syscall(struct pt_regs *regs)
1531 {
1532 if (is_compat_task())
1533 compat_setup_restart_syscall(regs);
1534 else
1535 regs->regs[8] = __NR_restart_syscall;
1536 }
1537
1538 /*
1539 * OK, we're invoking a handler
1540 */
handle_signal(struct ksignal * ksig,struct pt_regs * regs)1541 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
1542 {
1543 sigset_t *oldset = sigmask_to_save();
1544 int usig = ksig->sig;
1545 int ret;
1546
1547 rseq_signal_deliver(ksig, regs);
1548
1549 /*
1550 * Set up the stack frame
1551 */
1552 if (is_compat_task()) {
1553 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
1554 ret = compat_setup_rt_frame(usig, ksig, oldset, regs);
1555 else
1556 ret = compat_setup_frame(usig, ksig, oldset, regs);
1557 } else {
1558 ret = setup_rt_frame(usig, ksig, oldset, regs);
1559 }
1560
1561 /*
1562 * Check that the resulting registers are actually sane.
1563 */
1564 ret |= !valid_user_regs(®s->user_regs, current);
1565
1566 /* Step into the signal handler if we are stepping */
1567 signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
1568 }
1569
1570 /*
1571 * Note that 'init' is a special process: it doesn't get signals it doesn't
1572 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1573 * mistake.
1574 *
1575 * Note that we go through the signals twice: once to check the signals that
1576 * the kernel can handle, and then we build all the user-level signal handling
1577 * stack-frames in one go after that.
1578 */
do_signal(struct pt_regs * regs)1579 void do_signal(struct pt_regs *regs)
1580 {
1581 unsigned long continue_addr = 0, restart_addr = 0;
1582 int retval = 0;
1583 struct ksignal ksig;
1584 bool syscall = in_syscall(regs);
1585
1586 /*
1587 * If we were from a system call, check for system call restarting...
1588 */
1589 if (syscall) {
1590 continue_addr = regs->pc;
1591 restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
1592 retval = regs->regs[0];
1593
1594 /*
1595 * Avoid additional syscall restarting via ret_to_user.
1596 */
1597 forget_syscall(regs);
1598
1599 /*
1600 * Prepare for system call restart. We do this here so that a
1601 * debugger will see the already changed PC.
1602 */
1603 switch (retval) {
1604 case -ERESTARTNOHAND:
1605 case -ERESTARTSYS:
1606 case -ERESTARTNOINTR:
1607 case -ERESTART_RESTARTBLOCK:
1608 regs->regs[0] = regs->orig_x0;
1609 regs->pc = restart_addr;
1610 break;
1611 }
1612 }
1613
1614 /*
1615 * Get the signal to deliver. When running under ptrace, at this point
1616 * the debugger may change all of our registers.
1617 */
1618 if (get_signal(&ksig)) {
1619 /*
1620 * Depending on the signal settings, we may need to revert the
1621 * decision to restart the system call, but skip this if a
1622 * debugger has chosen to restart at a different PC.
1623 */
1624 if (regs->pc == restart_addr &&
1625 (retval == -ERESTARTNOHAND ||
1626 retval == -ERESTART_RESTARTBLOCK ||
1627 (retval == -ERESTARTSYS &&
1628 !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
1629 syscall_set_return_value(current, regs, -EINTR, 0);
1630 regs->pc = continue_addr;
1631 }
1632
1633 handle_signal(&ksig, regs);
1634 return;
1635 }
1636
1637 /*
1638 * Handle restarting a different system call. As above, if a debugger
1639 * has chosen to restart at a different PC, ignore the restart.
1640 */
1641 if (syscall && regs->pc == restart_addr) {
1642 if (retval == -ERESTART_RESTARTBLOCK)
1643 setup_restart_syscall(regs);
1644 user_rewind_single_step(current);
1645 }
1646
1647 restore_saved_sigmask();
1648 }
1649
1650 unsigned long __ro_after_init signal_minsigstksz;
1651
1652 /*
1653 * Determine the stack space required for guaranteed signal devliery.
1654 * This function is used to populate AT_MINSIGSTKSZ at process startup.
1655 * cpufeatures setup is assumed to be complete.
1656 */
minsigstksz_setup(void)1657 void __init minsigstksz_setup(void)
1658 {
1659 struct rt_sigframe_user_layout user;
1660
1661 init_user_layout(&user);
1662
1663 /*
1664 * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't
1665 * be big enough, but it's our best guess:
1666 */
1667 if (WARN_ON(setup_sigframe_layout(&user, true)))
1668 return;
1669
1670 signal_minsigstksz = sigframe_size(&user) +
1671 round_up(sizeof(struct frame_record), 16) +
1672 16; /* max alignment padding */
1673 }
1674
1675 /*
1676 * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as
1677 * changes likely come with new fields that should be added below.
1678 */
1679 static_assert(NSIGILL == 11);
1680 static_assert(NSIGFPE == 15);
1681 static_assert(NSIGSEGV == 10);
1682 static_assert(NSIGBUS == 5);
1683 static_assert(NSIGTRAP == 6);
1684 static_assert(NSIGCHLD == 6);
1685 static_assert(NSIGSYS == 2);
1686 static_assert(sizeof(siginfo_t) == 128);
1687 static_assert(__alignof__(siginfo_t) == 8);
1688 static_assert(offsetof(siginfo_t, si_signo) == 0x00);
1689 static_assert(offsetof(siginfo_t, si_errno) == 0x04);
1690 static_assert(offsetof(siginfo_t, si_code) == 0x08);
1691 static_assert(offsetof(siginfo_t, si_pid) == 0x10);
1692 static_assert(offsetof(siginfo_t, si_uid) == 0x14);
1693 static_assert(offsetof(siginfo_t, si_tid) == 0x10);
1694 static_assert(offsetof(siginfo_t, si_overrun) == 0x14);
1695 static_assert(offsetof(siginfo_t, si_status) == 0x18);
1696 static_assert(offsetof(siginfo_t, si_utime) == 0x20);
1697 static_assert(offsetof(siginfo_t, si_stime) == 0x28);
1698 static_assert(offsetof(siginfo_t, si_value) == 0x18);
1699 static_assert(offsetof(siginfo_t, si_int) == 0x18);
1700 static_assert(offsetof(siginfo_t, si_ptr) == 0x18);
1701 static_assert(offsetof(siginfo_t, si_addr) == 0x10);
1702 static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x18);
1703 static_assert(offsetof(siginfo_t, si_lower) == 0x20);
1704 static_assert(offsetof(siginfo_t, si_upper) == 0x28);
1705 static_assert(offsetof(siginfo_t, si_pkey) == 0x20);
1706 static_assert(offsetof(siginfo_t, si_perf_data) == 0x18);
1707 static_assert(offsetof(siginfo_t, si_perf_type) == 0x20);
1708 static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24);
1709 static_assert(offsetof(siginfo_t, si_band) == 0x10);
1710 static_assert(offsetof(siginfo_t, si_fd) == 0x18);
1711 static_assert(offsetof(siginfo_t, si_call_addr) == 0x10);
1712 static_assert(offsetof(siginfo_t, si_syscall) == 0x18);
1713 static_assert(offsetof(siginfo_t, si_arch) == 0x1c);
1714