1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Based on arch/arm/kernel/signal.c
4 *
5 * Copyright (C) 1995-2009 Russell King
6 * Copyright (C) 2012 ARM Ltd.
7 */
8
9 #include <linux/cache.h>
10 #include <linux/compat.h>
11 #include <linux/errno.h>
12 #include <linux/irq-entry-common.h>
13 #include <linux/kernel.h>
14 #include <linux/signal.h>
15 #include <linux/freezer.h>
16 #include <linux/stddef.h>
17 #include <linux/uaccess.h>
18 #include <linux/sizes.h>
19 #include <linux/string.h>
20 #include <linux/ratelimit.h>
21 #include <linux/rseq.h>
22 #include <linux/syscalls.h>
23 #include <linux/pkeys.h>
24
25 #include <asm/daifflags.h>
26 #include <asm/debug-monitors.h>
27 #include <asm/elf.h>
28 #include <asm/exception.h>
29 #include <asm/cacheflush.h>
30 #include <asm/gcs.h>
31 #include <asm/ucontext.h>
32 #include <asm/unistd.h>
33 #include <asm/fpsimd.h>
34 #include <asm/ptrace.h>
35 #include <asm/syscall.h>
36 #include <asm/signal32.h>
37 #include <asm/traps.h>
38 #include <asm/vdso.h>
39
40 #define GCS_SIGNAL_CAP(addr) (((unsigned long)addr) & GCS_CAP_ADDR_MASK)
41
42 /*
43 * Do a signal return; undo the signal stack. These are aligned to 128-bit.
44 */
45 struct rt_sigframe {
46 struct siginfo info;
47 struct ucontext uc;
48 };
49
50 struct rt_sigframe_user_layout {
51 struct rt_sigframe __user *sigframe;
52 struct frame_record __user *next_frame;
53
54 unsigned long size; /* size of allocated sigframe data */
55 unsigned long limit; /* largest allowed size */
56
57 unsigned long fpsimd_offset;
58 unsigned long esr_offset;
59 unsigned long gcs_offset;
60 unsigned long sve_offset;
61 unsigned long tpidr2_offset;
62 unsigned long za_offset;
63 unsigned long zt_offset;
64 unsigned long fpmr_offset;
65 unsigned long poe_offset;
66 unsigned long extra_offset;
67 unsigned long end_offset;
68 };
69
70 /*
71 * Holds any EL0-controlled state that influences unprivileged memory accesses.
72 * This includes both accesses done in userspace and uaccess done in the kernel.
73 *
74 * This state needs to be carefully managed to ensure that it doesn't cause
75 * uaccess to fail when setting up the signal frame, and the signal handler
76 * itself also expects a well-defined state when entered.
77 */
78 struct user_access_state {
79 u64 por_el0;
80 };
81
82 #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
83 #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
84
85 /*
86 * Save the user access state into ua_state and reset it to disable any
87 * restrictions.
88 */
save_reset_user_access_state(struct user_access_state * ua_state)89 static void save_reset_user_access_state(struct user_access_state *ua_state)
90 {
91 if (system_supports_poe()) {
92 u64 por_enable_all = 0;
93
94 for (int pkey = 0; pkey < arch_max_pkey(); pkey++)
95 por_enable_all |= POR_ELx_PERM_PREP(pkey, POE_RWX);
96
97 ua_state->por_el0 = read_sysreg_s(SYS_POR_EL0);
98 write_sysreg_s(por_enable_all, SYS_POR_EL0);
99 /*
100 * No ISB required as we can tolerate spurious Overlay faults -
101 * the fault handler will check again based on the new value
102 * of POR_EL0.
103 */
104 }
105 }
106
107 /*
108 * Set the user access state for invoking the signal handler.
109 *
110 * No uaccess should be done after that function is called.
111 */
set_handler_user_access_state(void)112 static void set_handler_user_access_state(void)
113 {
114 if (system_supports_poe())
115 write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0);
116 }
117
118 /*
119 * Restore the user access state to the values saved in ua_state.
120 *
121 * No uaccess should be done after that function is called.
122 */
restore_user_access_state(const struct user_access_state * ua_state)123 static void restore_user_access_state(const struct user_access_state *ua_state)
124 {
125 if (system_supports_poe())
126 write_sysreg_s(ua_state->por_el0, SYS_POR_EL0);
127 }
128
init_user_layout(struct rt_sigframe_user_layout * user)129 static void init_user_layout(struct rt_sigframe_user_layout *user)
130 {
131 const size_t reserved_size =
132 sizeof(user->sigframe->uc.uc_mcontext.__reserved);
133
134 memset(user, 0, sizeof(*user));
135 user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved);
136
137 user->limit = user->size + reserved_size;
138
139 user->limit -= TERMINATOR_SIZE;
140 user->limit -= EXTRA_CONTEXT_SIZE;
141 /* Reserve space for extension and terminator ^ */
142 }
143
sigframe_size(struct rt_sigframe_user_layout const * user)144 static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
145 {
146 return round_up(max(user->size, sizeof(struct rt_sigframe)), 16);
147 }
148
149 /*
150 * Sanity limit on the approximate maximum size of signal frame we'll
151 * try to generate. Stack alignment padding and the frame record are
152 * not taken into account. This limit is not a guarantee and is
153 * NOT ABI.
154 */
155 #define SIGFRAME_MAXSZ SZ_256K
156
__sigframe_alloc(struct rt_sigframe_user_layout * user,unsigned long * offset,size_t size,bool extend)157 static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
158 unsigned long *offset, size_t size, bool extend)
159 {
160 size_t padded_size = round_up(size, 16);
161
162 if (padded_size > user->limit - user->size &&
163 !user->extra_offset &&
164 extend) {
165 int ret;
166
167 user->limit += EXTRA_CONTEXT_SIZE;
168 ret = __sigframe_alloc(user, &user->extra_offset,
169 sizeof(struct extra_context), false);
170 if (ret) {
171 user->limit -= EXTRA_CONTEXT_SIZE;
172 return ret;
173 }
174
175 /* Reserve space for the __reserved[] terminator */
176 user->size += TERMINATOR_SIZE;
177
178 /*
179 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for
180 * the terminator:
181 */
182 user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE;
183 }
184
185 /* Still not enough space? Bad luck! */
186 if (padded_size > user->limit - user->size)
187 return -ENOMEM;
188
189 *offset = user->size;
190 user->size += padded_size;
191
192 return 0;
193 }
194
195 /*
196 * Allocate space for an optional record of <size> bytes in the user
197 * signal frame. The offset from the signal frame base address to the
198 * allocated block is assigned to *offset.
199 */
sigframe_alloc(struct rt_sigframe_user_layout * user,unsigned long * offset,size_t size)200 static int sigframe_alloc(struct rt_sigframe_user_layout *user,
201 unsigned long *offset, size_t size)
202 {
203 return __sigframe_alloc(user, offset, size, true);
204 }
205
206 /* Allocate the null terminator record and prevent further allocations */
sigframe_alloc_end(struct rt_sigframe_user_layout * user)207 static int sigframe_alloc_end(struct rt_sigframe_user_layout *user)
208 {
209 int ret;
210
211 /* Un-reserve the space reserved for the terminator: */
212 user->limit += TERMINATOR_SIZE;
213
214 ret = sigframe_alloc(user, &user->end_offset,
215 sizeof(struct _aarch64_ctx));
216 if (ret)
217 return ret;
218
219 /* Prevent further allocation: */
220 user->limit = user->size;
221 return 0;
222 }
223
apply_user_offset(struct rt_sigframe_user_layout const * user,unsigned long offset)224 static void __user *apply_user_offset(
225 struct rt_sigframe_user_layout const *user, unsigned long offset)
226 {
227 char __user *base = (char __user *)user->sigframe;
228
229 return base + offset;
230 }
231
232 struct user_ctxs {
233 struct fpsimd_context __user *fpsimd;
234 u32 fpsimd_size;
235 struct sve_context __user *sve;
236 u32 sve_size;
237 struct tpidr2_context __user *tpidr2;
238 u32 tpidr2_size;
239 struct za_context __user *za;
240 u32 za_size;
241 struct zt_context __user *zt;
242 u32 zt_size;
243 struct fpmr_context __user *fpmr;
244 u32 fpmr_size;
245 struct poe_context __user *poe;
246 u32 poe_size;
247 struct gcs_context __user *gcs;
248 u32 gcs_size;
249 };
250
preserve_fpsimd_context(struct fpsimd_context __user * ctx)251 static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
252 {
253 struct user_fpsimd_state const *fpsimd =
254 ¤t->thread.uw.fpsimd_state;
255 int err;
256
257 fpsimd_sync_from_effective_state(current);
258
259 /* copy the FP and status/control registers */
260 err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
261 __put_user_error(fpsimd->fpsr, &ctx->fpsr, err);
262 __put_user_error(fpsimd->fpcr, &ctx->fpcr, err);
263
264 /* copy the magic/size information */
265 __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err);
266 __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err);
267
268 return err ? -EFAULT : 0;
269 }
270
read_fpsimd_context(struct user_fpsimd_state * fpsimd,struct user_ctxs * user)271 static int read_fpsimd_context(struct user_fpsimd_state *fpsimd,
272 struct user_ctxs *user)
273 {
274 int err;
275
276 /* check the size information */
277 if (user->fpsimd_size != sizeof(struct fpsimd_context))
278 return -EINVAL;
279
280 /* copy the FP and status/control registers */
281 err = __copy_from_user(fpsimd->vregs, &(user->fpsimd->vregs),
282 sizeof(fpsimd->vregs));
283 __get_user_error(fpsimd->fpsr, &(user->fpsimd->fpsr), err);
284 __get_user_error(fpsimd->fpcr, &(user->fpsimd->fpcr), err);
285
286 return err ? -EFAULT : 0;
287 }
288
restore_fpsimd_context(struct user_ctxs * user)289 static int restore_fpsimd_context(struct user_ctxs *user)
290 {
291 struct user_fpsimd_state fpsimd;
292 int err;
293
294 err = read_fpsimd_context(&fpsimd, user);
295 if (err)
296 return err;
297
298 clear_thread_flag(TIF_SVE);
299 current->thread.svcr &= ~SVCR_SM_MASK;
300 current->thread.fp_type = FP_STATE_FPSIMD;
301
302 /* load the hardware registers from the fpsimd_state structure */
303 fpsimd_update_current_state(&fpsimd);
304 return 0;
305 }
306
preserve_fpmr_context(struct fpmr_context __user * ctx)307 static int preserve_fpmr_context(struct fpmr_context __user *ctx)
308 {
309 int err = 0;
310
311 __put_user_error(FPMR_MAGIC, &ctx->head.magic, err);
312 __put_user_error(sizeof(*ctx), &ctx->head.size, err);
313 __put_user_error(current->thread.uw.fpmr, &ctx->fpmr, err);
314
315 return err;
316 }
317
restore_fpmr_context(struct user_ctxs * user)318 static int restore_fpmr_context(struct user_ctxs *user)
319 {
320 u64 fpmr;
321 int err = 0;
322
323 if (user->fpmr_size != sizeof(*user->fpmr))
324 return -EINVAL;
325
326 __get_user_error(fpmr, &user->fpmr->fpmr, err);
327 if (!err)
328 current->thread.uw.fpmr = fpmr;
329
330 return err;
331 }
332
preserve_poe_context(struct poe_context __user * ctx,const struct user_access_state * ua_state)333 static int preserve_poe_context(struct poe_context __user *ctx,
334 const struct user_access_state *ua_state)
335 {
336 int err = 0;
337
338 __put_user_error(POE_MAGIC, &ctx->head.magic, err);
339 __put_user_error(sizeof(*ctx), &ctx->head.size, err);
340 __put_user_error(ua_state->por_el0, &ctx->por_el0, err);
341
342 return err;
343 }
344
restore_poe_context(struct user_ctxs * user,struct user_access_state * ua_state)345 static int restore_poe_context(struct user_ctxs *user,
346 struct user_access_state *ua_state)
347 {
348 u64 por_el0;
349 int err = 0;
350
351 if (user->poe_size != sizeof(*user->poe))
352 return -EINVAL;
353
354 __get_user_error(por_el0, &(user->poe->por_el0), err);
355 if (!err)
356 ua_state->por_el0 = por_el0;
357
358 return err;
359 }
360
361 #ifdef CONFIG_ARM64_SVE
362
preserve_sve_context(struct sve_context __user * ctx)363 static int preserve_sve_context(struct sve_context __user *ctx)
364 {
365 int err = 0;
366 u16 reserved[ARRAY_SIZE(ctx->__reserved)];
367 u16 flags = 0;
368 unsigned int vl = task_get_sve_vl(current);
369 unsigned int vq = 0;
370
371 if (thread_sm_enabled(¤t->thread)) {
372 vl = task_get_sme_vl(current);
373 vq = sve_vq_from_vl(vl);
374 flags |= SVE_SIG_FLAG_SM;
375 } else if (current->thread.fp_type == FP_STATE_SVE) {
376 vq = sve_vq_from_vl(vl);
377 }
378
379 memset(reserved, 0, sizeof(reserved));
380
381 __put_user_error(SVE_MAGIC, &ctx->head.magic, err);
382 __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16),
383 &ctx->head.size, err);
384 __put_user_error(vl, &ctx->vl, err);
385 __put_user_error(flags, &ctx->flags, err);
386 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
387 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
388
389 if (vq) {
390 err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET,
391 current->thread.sve_state,
392 SVE_SIG_REGS_SIZE(vq));
393 }
394
395 return err ? -EFAULT : 0;
396 }
397
restore_sve_fpsimd_context(struct user_ctxs * user)398 static int restore_sve_fpsimd_context(struct user_ctxs *user)
399 {
400 int err = 0;
401 unsigned int vl, vq;
402 struct user_fpsimd_state fpsimd;
403 u16 user_vl, flags;
404 bool sm;
405
406 if (user->sve_size < sizeof(*user->sve))
407 return -EINVAL;
408
409 __get_user_error(user_vl, &(user->sve->vl), err);
410 __get_user_error(flags, &(user->sve->flags), err);
411 if (err)
412 return err;
413
414 sm = flags & SVE_SIG_FLAG_SM;
415 if (sm) {
416 if (!system_supports_sme())
417 return -EINVAL;
418
419 vl = task_get_sme_vl(current);
420 } else {
421 /*
422 * A SME only system use SVE for streaming mode so can
423 * have a SVE formatted context with a zero VL and no
424 * payload data.
425 */
426 if (!system_supports_sve() && !system_supports_sme())
427 return -EINVAL;
428
429 vl = task_get_sve_vl(current);
430 }
431
432 if (user_vl != vl)
433 return -EINVAL;
434
435 /*
436 * Non-streaming SVE state may be preserved without an SVE payload, in
437 * which case the SVE context only has a header with VL==0, and all
438 * state can be restored from the FPSIMD context.
439 *
440 * Streaming SVE state is always preserved with an SVE payload. For
441 * consistency and robustness, reject restoring streaming SVE state
442 * without an SVE payload.
443 */
444 if (!sm && user->sve_size == sizeof(*user->sve))
445 return restore_fpsimd_context(user);
446
447 vq = sve_vq_from_vl(vl);
448
449 if (user->sve_size < SVE_SIG_CONTEXT_SIZE(vq))
450 return -EINVAL;
451
452 sve_alloc(current, true);
453 if (!current->thread.sve_state) {
454 clear_thread_flag(TIF_SVE);
455 return -ENOMEM;
456 }
457
458 err = __copy_from_user(current->thread.sve_state,
459 (char __user const *)user->sve +
460 SVE_SIG_REGS_OFFSET,
461 SVE_SIG_REGS_SIZE(vq));
462 if (err)
463 return -EFAULT;
464
465 if (flags & SVE_SIG_FLAG_SM)
466 current->thread.svcr |= SVCR_SM_MASK;
467 else
468 set_thread_flag(TIF_SVE);
469 current->thread.fp_type = FP_STATE_SVE;
470
471 err = read_fpsimd_context(&fpsimd, user);
472 if (err)
473 return err;
474
475 /* Merge the FPSIMD registers into the SVE state */
476 fpsimd_update_current_state(&fpsimd);
477
478 return 0;
479 }
480
481 #else /* ! CONFIG_ARM64_SVE */
482
restore_sve_fpsimd_context(struct user_ctxs * user)483 static int restore_sve_fpsimd_context(struct user_ctxs *user)
484 {
485 WARN_ON_ONCE(1);
486 return -EINVAL;
487 }
488
489 /* Turn any non-optimised out attempts to use this into a link error: */
490 extern int preserve_sve_context(void __user *ctx);
491
492 #endif /* ! CONFIG_ARM64_SVE */
493
494 #ifdef CONFIG_ARM64_SME
495
preserve_tpidr2_context(struct tpidr2_context __user * ctx)496 static int preserve_tpidr2_context(struct tpidr2_context __user *ctx)
497 {
498 u64 tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
499 int err = 0;
500
501 __put_user_error(TPIDR2_MAGIC, &ctx->head.magic, err);
502 __put_user_error(sizeof(*ctx), &ctx->head.size, err);
503 __put_user_error(tpidr2_el0, &ctx->tpidr2, err);
504
505 return err;
506 }
507
restore_tpidr2_context(struct user_ctxs * user)508 static int restore_tpidr2_context(struct user_ctxs *user)
509 {
510 u64 tpidr2_el0;
511 int err = 0;
512
513 if (user->tpidr2_size != sizeof(*user->tpidr2))
514 return -EINVAL;
515
516 __get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err);
517 if (!err)
518 write_sysreg_s(tpidr2_el0, SYS_TPIDR2_EL0);
519
520 return err;
521 }
522
preserve_za_context(struct za_context __user * ctx)523 static int preserve_za_context(struct za_context __user *ctx)
524 {
525 int err = 0;
526 u16 reserved[ARRAY_SIZE(ctx->__reserved)];
527 unsigned int vl = task_get_sme_vl(current);
528 unsigned int vq;
529
530 if (thread_za_enabled(¤t->thread))
531 vq = sve_vq_from_vl(vl);
532 else
533 vq = 0;
534
535 memset(reserved, 0, sizeof(reserved));
536
537 __put_user_error(ZA_MAGIC, &ctx->head.magic, err);
538 __put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16),
539 &ctx->head.size, err);
540 __put_user_error(vl, &ctx->vl, err);
541 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
542 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
543
544 if (vq) {
545 err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET,
546 current->thread.sme_state,
547 ZA_SIG_REGS_SIZE(vq));
548 }
549
550 return err ? -EFAULT : 0;
551 }
552
restore_za_context(struct user_ctxs * user)553 static int restore_za_context(struct user_ctxs *user)
554 {
555 int err = 0;
556 unsigned int vq;
557 u16 user_vl;
558
559 if (user->za_size < sizeof(*user->za))
560 return -EINVAL;
561
562 __get_user_error(user_vl, &(user->za->vl), err);
563 if (err)
564 return err;
565
566 if (user_vl != task_get_sme_vl(current))
567 return -EINVAL;
568
569 if (user->za_size == sizeof(*user->za)) {
570 current->thread.svcr &= ~SVCR_ZA_MASK;
571 return 0;
572 }
573
574 vq = sve_vq_from_vl(user_vl);
575
576 if (user->za_size < ZA_SIG_CONTEXT_SIZE(vq))
577 return -EINVAL;
578
579 sme_alloc(current, true);
580 if (!current->thread.sme_state) {
581 current->thread.svcr &= ~SVCR_ZA_MASK;
582 clear_thread_flag(TIF_SME);
583 return -ENOMEM;
584 }
585
586 err = __copy_from_user(current->thread.sme_state,
587 (char __user const *)user->za +
588 ZA_SIG_REGS_OFFSET,
589 ZA_SIG_REGS_SIZE(vq));
590 if (err)
591 return -EFAULT;
592
593 set_thread_flag(TIF_SME);
594 current->thread.svcr |= SVCR_ZA_MASK;
595
596 return 0;
597 }
598
preserve_zt_context(struct zt_context __user * ctx)599 static int preserve_zt_context(struct zt_context __user *ctx)
600 {
601 int err = 0;
602 u16 reserved[ARRAY_SIZE(ctx->__reserved)];
603
604 if (WARN_ON(!thread_za_enabled(¤t->thread)))
605 return -EINVAL;
606
607 memset(reserved, 0, sizeof(reserved));
608
609 __put_user_error(ZT_MAGIC, &ctx->head.magic, err);
610 __put_user_error(round_up(ZT_SIG_CONTEXT_SIZE(1), 16),
611 &ctx->head.size, err);
612 __put_user_error(1, &ctx->nregs, err);
613 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
614 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
615
616 err |= __copy_to_user((char __user *)ctx + ZT_SIG_REGS_OFFSET,
617 thread_zt_state(¤t->thread),
618 ZT_SIG_REGS_SIZE(1));
619
620 return err ? -EFAULT : 0;
621 }
622
restore_zt_context(struct user_ctxs * user)623 static int restore_zt_context(struct user_ctxs *user)
624 {
625 int err;
626 u16 nregs;
627
628 /* ZA must be restored first for this check to be valid */
629 if (!thread_za_enabled(¤t->thread))
630 return -EINVAL;
631
632 if (user->zt_size != ZT_SIG_CONTEXT_SIZE(1))
633 return -EINVAL;
634
635 if (__copy_from_user(&nregs, &(user->zt->nregs), sizeof(nregs)))
636 return -EFAULT;
637
638 if (nregs != 1)
639 return -EINVAL;
640
641 err = __copy_from_user(thread_zt_state(¤t->thread),
642 (char __user const *)user->zt +
643 ZT_SIG_REGS_OFFSET,
644 ZT_SIG_REGS_SIZE(1));
645 if (err)
646 return -EFAULT;
647
648 return 0;
649 }
650
651 #else /* ! CONFIG_ARM64_SME */
652
653 /* Turn any non-optimised out attempts to use these into a link error: */
654 extern int preserve_tpidr2_context(void __user *ctx);
655 extern int restore_tpidr2_context(struct user_ctxs *user);
656 extern int preserve_za_context(void __user *ctx);
657 extern int restore_za_context(struct user_ctxs *user);
658 extern int preserve_zt_context(void __user *ctx);
659 extern int restore_zt_context(struct user_ctxs *user);
660
661 #endif /* ! CONFIG_ARM64_SME */
662
663 #ifdef CONFIG_ARM64_GCS
664
preserve_gcs_context(struct gcs_context __user * ctx)665 static int preserve_gcs_context(struct gcs_context __user *ctx)
666 {
667 int err = 0;
668 u64 gcspr = read_sysreg_s(SYS_GCSPR_EL0);
669
670 /*
671 * If GCS is enabled we will add a cap token to the frame,
672 * include it in the GCSPR_EL0 we report to support stack
673 * switching via sigreturn if GCS is enabled. We do not allow
674 * enabling via sigreturn so the token is only relevant for
675 * threads with GCS enabled.
676 */
677 if (task_gcs_el0_enabled(current))
678 gcspr -= 8;
679
680 __put_user_error(GCS_MAGIC, &ctx->head.magic, err);
681 __put_user_error(sizeof(*ctx), &ctx->head.size, err);
682 __put_user_error(gcspr, &ctx->gcspr, err);
683 __put_user_error(0, &ctx->reserved, err);
684 __put_user_error(current->thread.gcs_el0_mode,
685 &ctx->features_enabled, err);
686
687 return err;
688 }
689
restore_gcs_context(struct user_ctxs * user)690 static int restore_gcs_context(struct user_ctxs *user)
691 {
692 u64 gcspr, enabled;
693 int err = 0;
694
695 if (user->gcs_size != sizeof(*user->gcs))
696 return -EINVAL;
697
698 __get_user_error(gcspr, &user->gcs->gcspr, err);
699 __get_user_error(enabled, &user->gcs->features_enabled, err);
700 if (err)
701 return err;
702
703 /* Don't allow unknown modes */
704 if (enabled & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK)
705 return -EINVAL;
706
707 err = gcs_check_locked(current, enabled);
708 if (err != 0)
709 return err;
710
711 /* Don't allow enabling */
712 if (!task_gcs_el0_enabled(current) &&
713 (enabled & PR_SHADOW_STACK_ENABLE))
714 return -EINVAL;
715
716 /* If we are disabling disable everything */
717 if (!(enabled & PR_SHADOW_STACK_ENABLE))
718 enabled = 0;
719
720 current->thread.gcs_el0_mode = enabled;
721
722 /*
723 * We let userspace set GCSPR_EL0 to anything here, we will
724 * validate later in gcs_restore_signal().
725 */
726 write_sysreg_s(gcspr, SYS_GCSPR_EL0);
727
728 return 0;
729 }
730
731 #else /* ! CONFIG_ARM64_GCS */
732
733 /* Turn any non-optimised out attempts to use these into a link error: */
734 extern int preserve_gcs_context(void __user *ctx);
735 extern int restore_gcs_context(struct user_ctxs *user);
736
737 #endif /* ! CONFIG_ARM64_GCS */
738
parse_user_sigframe(struct user_ctxs * user,struct rt_sigframe __user * sf)739 static int parse_user_sigframe(struct user_ctxs *user,
740 struct rt_sigframe __user *sf)
741 {
742 struct sigcontext __user *const sc = &sf->uc.uc_mcontext;
743 struct _aarch64_ctx __user *head;
744 char __user *base = (char __user *)&sc->__reserved;
745 size_t offset = 0;
746 size_t limit = sizeof(sc->__reserved);
747 bool have_extra_context = false;
748 char const __user *const sfp = (char const __user *)sf;
749
750 user->fpsimd = NULL;
751 user->sve = NULL;
752 user->tpidr2 = NULL;
753 user->za = NULL;
754 user->zt = NULL;
755 user->fpmr = NULL;
756 user->poe = NULL;
757 user->gcs = NULL;
758
759 if (!IS_ALIGNED((unsigned long)base, 16))
760 goto invalid;
761
762 while (1) {
763 int err = 0;
764 u32 magic, size;
765 char const __user *userp;
766 struct extra_context const __user *extra;
767 u64 extra_datap;
768 u32 extra_size;
769 struct _aarch64_ctx const __user *end;
770 u32 end_magic, end_size;
771
772 if (limit - offset < sizeof(*head))
773 goto invalid;
774
775 if (!IS_ALIGNED(offset, 16))
776 goto invalid;
777
778 head = (struct _aarch64_ctx __user *)(base + offset);
779 __get_user_error(magic, &head->magic, err);
780 __get_user_error(size, &head->size, err);
781 if (err)
782 return err;
783
784 if (limit - offset < size)
785 goto invalid;
786
787 switch (magic) {
788 case 0:
789 if (size)
790 goto invalid;
791
792 goto done;
793
794 case FPSIMD_MAGIC:
795 if (!system_supports_fpsimd())
796 goto invalid;
797 if (user->fpsimd)
798 goto invalid;
799
800 user->fpsimd = (struct fpsimd_context __user *)head;
801 user->fpsimd_size = size;
802 break;
803
804 case ESR_MAGIC:
805 /* ignore */
806 break;
807
808 case POE_MAGIC:
809 if (!system_supports_poe())
810 goto invalid;
811
812 if (user->poe)
813 goto invalid;
814
815 user->poe = (struct poe_context __user *)head;
816 user->poe_size = size;
817 break;
818
819 case SVE_MAGIC:
820 if (!system_supports_sve() && !system_supports_sme())
821 goto invalid;
822
823 if (user->sve)
824 goto invalid;
825
826 user->sve = (struct sve_context __user *)head;
827 user->sve_size = size;
828 break;
829
830 case TPIDR2_MAGIC:
831 if (!system_supports_tpidr2())
832 goto invalid;
833
834 if (user->tpidr2)
835 goto invalid;
836
837 user->tpidr2 = (struct tpidr2_context __user *)head;
838 user->tpidr2_size = size;
839 break;
840
841 case ZA_MAGIC:
842 if (!system_supports_sme())
843 goto invalid;
844
845 if (user->za)
846 goto invalid;
847
848 user->za = (struct za_context __user *)head;
849 user->za_size = size;
850 break;
851
852 case ZT_MAGIC:
853 if (!system_supports_sme2())
854 goto invalid;
855
856 if (user->zt)
857 goto invalid;
858
859 user->zt = (struct zt_context __user *)head;
860 user->zt_size = size;
861 break;
862
863 case FPMR_MAGIC:
864 if (!system_supports_fpmr())
865 goto invalid;
866
867 if (user->fpmr)
868 goto invalid;
869
870 user->fpmr = (struct fpmr_context __user *)head;
871 user->fpmr_size = size;
872 break;
873
874 case GCS_MAGIC:
875 if (!system_supports_gcs())
876 goto invalid;
877
878 if (user->gcs)
879 goto invalid;
880
881 user->gcs = (struct gcs_context __user *)head;
882 user->gcs_size = size;
883 break;
884
885 case EXTRA_MAGIC:
886 if (have_extra_context)
887 goto invalid;
888
889 if (size < sizeof(*extra))
890 goto invalid;
891
892 userp = (char const __user *)head;
893
894 extra = (struct extra_context const __user *)userp;
895 userp += size;
896
897 __get_user_error(extra_datap, &extra->datap, err);
898 __get_user_error(extra_size, &extra->size, err);
899 if (err)
900 return err;
901
902 /* Check for the dummy terminator in __reserved[]: */
903
904 if (limit - offset - size < TERMINATOR_SIZE)
905 goto invalid;
906
907 end = (struct _aarch64_ctx const __user *)userp;
908 userp += TERMINATOR_SIZE;
909
910 __get_user_error(end_magic, &end->magic, err);
911 __get_user_error(end_size, &end->size, err);
912 if (err)
913 return err;
914
915 if (end_magic || end_size)
916 goto invalid;
917
918 /* Prevent looping/repeated parsing of extra_context */
919 have_extra_context = true;
920
921 base = (__force void __user *)extra_datap;
922 if (!IS_ALIGNED((unsigned long)base, 16))
923 goto invalid;
924
925 if (!IS_ALIGNED(extra_size, 16))
926 goto invalid;
927
928 if (base != userp)
929 goto invalid;
930
931 /* Reject "unreasonably large" frames: */
932 if (extra_size > sfp + SIGFRAME_MAXSZ - userp)
933 goto invalid;
934
935 /*
936 * Ignore trailing terminator in __reserved[]
937 * and start parsing extra data:
938 */
939 offset = 0;
940 limit = extra_size;
941
942 if (!access_ok(base, limit))
943 goto invalid;
944
945 continue;
946
947 default:
948 goto invalid;
949 }
950
951 if (size < sizeof(*head))
952 goto invalid;
953
954 if (limit - offset < size)
955 goto invalid;
956
957 offset += size;
958 }
959
960 done:
961 return 0;
962
963 invalid:
964 return -EINVAL;
965 }
966
restore_sigframe(struct pt_regs * regs,struct rt_sigframe __user * sf,struct user_access_state * ua_state)967 static int restore_sigframe(struct pt_regs *regs,
968 struct rt_sigframe __user *sf,
969 struct user_access_state *ua_state)
970 {
971 sigset_t set;
972 int i, err;
973 struct user_ctxs user;
974
975 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
976 if (err == 0)
977 set_current_blocked(&set);
978
979 for (i = 0; i < 31; i++)
980 __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
981 err);
982 __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
983 __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
984 __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
985
986 /*
987 * Avoid sys_rt_sigreturn() restarting.
988 */
989 forget_syscall(regs);
990
991 fpsimd_save_and_flush_current_state();
992
993 err |= !valid_user_regs(®s->user_regs, current);
994 if (err == 0)
995 err = parse_user_sigframe(&user, sf);
996
997 if (err == 0 && system_supports_fpsimd()) {
998 if (!user.fpsimd)
999 return -EINVAL;
1000
1001 if (user.sve)
1002 err = restore_sve_fpsimd_context(&user);
1003 else
1004 err = restore_fpsimd_context(&user);
1005 }
1006
1007 if (err == 0 && system_supports_gcs() && user.gcs)
1008 err = restore_gcs_context(&user);
1009
1010 if (err == 0 && system_supports_tpidr2() && user.tpidr2)
1011 err = restore_tpidr2_context(&user);
1012
1013 if (err == 0 && system_supports_fpmr() && user.fpmr)
1014 err = restore_fpmr_context(&user);
1015
1016 if (err == 0 && system_supports_sme() && user.za)
1017 err = restore_za_context(&user);
1018
1019 if (err == 0 && system_supports_sme2() && user.zt)
1020 err = restore_zt_context(&user);
1021
1022 if (err == 0 && system_supports_poe() && user.poe)
1023 err = restore_poe_context(&user, ua_state);
1024
1025 return err;
1026 }
1027
1028 #ifdef CONFIG_ARM64_GCS
gcs_restore_signal(void)1029 static int gcs_restore_signal(void)
1030 {
1031 u64 gcspr_el0, cap;
1032 int ret;
1033
1034 if (!system_supports_gcs())
1035 return 0;
1036
1037 if (!(current->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE))
1038 return 0;
1039
1040 gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0);
1041
1042 /*
1043 * Ensure that any changes to the GCS done via GCS operations
1044 * are visible to the normal reads we do to validate the
1045 * token.
1046 */
1047 gcsb_dsync();
1048
1049 /*
1050 * GCSPR_EL0 should be pointing at a capped GCS, read the cap.
1051 * We don't enforce that this is in a GCS page, if it is not
1052 * then faults will be generated on GCS operations - the main
1053 * concern is to protect GCS pages.
1054 */
1055 ret = copy_from_user(&cap, (unsigned long __user *)gcspr_el0,
1056 sizeof(cap));
1057 if (ret)
1058 return -EFAULT;
1059
1060 /*
1061 * Check that the cap is the actual GCS before replacing it.
1062 */
1063 if (cap != GCS_SIGNAL_CAP(gcspr_el0))
1064 return -EINVAL;
1065
1066 /* Invalidate the token to prevent reuse */
1067 put_user_gcs(0, (unsigned long __user *)gcspr_el0, &ret);
1068 if (ret != 0)
1069 return -EFAULT;
1070
1071 write_sysreg_s(gcspr_el0 + 8, SYS_GCSPR_EL0);
1072
1073 return 0;
1074 }
1075
1076 #else
gcs_restore_signal(void)1077 static int gcs_restore_signal(void) { return 0; }
1078 #endif
1079
SYSCALL_DEFINE0(rt_sigreturn)1080 SYSCALL_DEFINE0(rt_sigreturn)
1081 {
1082 struct pt_regs *regs = current_pt_regs();
1083 struct rt_sigframe __user *frame;
1084 struct user_access_state ua_state;
1085
1086 /* Always make any pending restarted system calls return -EINTR */
1087 current->restart_block.fn = do_no_restart_syscall;
1088
1089 /*
1090 * Since we stacked the signal on a 128-bit boundary, then 'sp' should
1091 * be word aligned here.
1092 */
1093 if (regs->sp & 15)
1094 goto badframe;
1095
1096 frame = (struct rt_sigframe __user *)regs->sp;
1097
1098 if (!access_ok(frame, sizeof (*frame)))
1099 goto badframe;
1100
1101 if (restore_sigframe(regs, frame, &ua_state))
1102 goto badframe;
1103
1104 if (gcs_restore_signal())
1105 goto badframe;
1106
1107 if (restore_altstack(&frame->uc.uc_stack))
1108 goto badframe;
1109
1110 restore_user_access_state(&ua_state);
1111
1112 return regs->regs[0];
1113
1114 badframe:
1115 arm64_notify_segfault(regs->sp);
1116 return 0;
1117 }
1118
1119 /*
1120 * Determine the layout of optional records in the signal frame
1121 *
1122 * add_all: if true, lays out the biggest possible signal frame for
1123 * this task; otherwise, generates a layout for the current state
1124 * of the task.
1125 */
setup_sigframe_layout(struct rt_sigframe_user_layout * user,bool add_all)1126 static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
1127 bool add_all)
1128 {
1129 int err;
1130
1131 if (system_supports_fpsimd()) {
1132 err = sigframe_alloc(user, &user->fpsimd_offset,
1133 sizeof(struct fpsimd_context));
1134 if (err)
1135 return err;
1136 }
1137
1138 /* fault information, if valid */
1139 if (add_all || current->thread.fault_code) {
1140 err = sigframe_alloc(user, &user->esr_offset,
1141 sizeof(struct esr_context));
1142 if (err)
1143 return err;
1144 }
1145
1146 #ifdef CONFIG_ARM64_GCS
1147 if (system_supports_gcs() && (add_all || current->thread.gcspr_el0)) {
1148 err = sigframe_alloc(user, &user->gcs_offset,
1149 sizeof(struct gcs_context));
1150 if (err)
1151 return err;
1152 }
1153 #endif
1154
1155 if (system_supports_sve() || system_supports_sme()) {
1156 unsigned int vq = 0;
1157
1158 if (add_all || current->thread.fp_type == FP_STATE_SVE ||
1159 thread_sm_enabled(¤t->thread)) {
1160 int vl = max(sve_max_vl(), sme_max_vl());
1161
1162 if (!add_all)
1163 vl = thread_get_cur_vl(¤t->thread);
1164
1165 vq = sve_vq_from_vl(vl);
1166 }
1167
1168 err = sigframe_alloc(user, &user->sve_offset,
1169 SVE_SIG_CONTEXT_SIZE(vq));
1170 if (err)
1171 return err;
1172 }
1173
1174 if (system_supports_tpidr2()) {
1175 err = sigframe_alloc(user, &user->tpidr2_offset,
1176 sizeof(struct tpidr2_context));
1177 if (err)
1178 return err;
1179 }
1180
1181 if (system_supports_sme()) {
1182 unsigned int vl;
1183 unsigned int vq = 0;
1184
1185 if (add_all)
1186 vl = sme_max_vl();
1187 else
1188 vl = task_get_sme_vl(current);
1189
1190 if (thread_za_enabled(¤t->thread))
1191 vq = sve_vq_from_vl(vl);
1192
1193 err = sigframe_alloc(user, &user->za_offset,
1194 ZA_SIG_CONTEXT_SIZE(vq));
1195 if (err)
1196 return err;
1197 }
1198
1199 if (system_supports_sme2()) {
1200 if (add_all || thread_za_enabled(¤t->thread)) {
1201 err = sigframe_alloc(user, &user->zt_offset,
1202 ZT_SIG_CONTEXT_SIZE(1));
1203 if (err)
1204 return err;
1205 }
1206 }
1207
1208 if (system_supports_fpmr()) {
1209 err = sigframe_alloc(user, &user->fpmr_offset,
1210 sizeof(struct fpmr_context));
1211 if (err)
1212 return err;
1213 }
1214
1215 if (system_supports_poe()) {
1216 err = sigframe_alloc(user, &user->poe_offset,
1217 sizeof(struct poe_context));
1218 if (err)
1219 return err;
1220 }
1221
1222 return sigframe_alloc_end(user);
1223 }
1224
setup_sigframe(struct rt_sigframe_user_layout * user,struct pt_regs * regs,sigset_t * set,const struct user_access_state * ua_state)1225 static int setup_sigframe(struct rt_sigframe_user_layout *user,
1226 struct pt_regs *regs, sigset_t *set,
1227 const struct user_access_state *ua_state)
1228 {
1229 int i, err = 0;
1230 struct rt_sigframe __user *sf = user->sigframe;
1231
1232 /* set up the stack frame for unwinding */
1233 __put_user_error(regs->regs[29], &user->next_frame->fp, err);
1234 __put_user_error(regs->regs[30], &user->next_frame->lr, err);
1235
1236 for (i = 0; i < 31; i++)
1237 __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
1238 err);
1239 __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
1240 __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
1241 __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
1242
1243 __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
1244
1245 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
1246
1247 if (err == 0 && system_supports_fpsimd()) {
1248 struct fpsimd_context __user *fpsimd_ctx =
1249 apply_user_offset(user, user->fpsimd_offset);
1250 err |= preserve_fpsimd_context(fpsimd_ctx);
1251 }
1252
1253 /* fault information, if valid */
1254 if (err == 0 && user->esr_offset) {
1255 struct esr_context __user *esr_ctx =
1256 apply_user_offset(user, user->esr_offset);
1257
1258 __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err);
1259 __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err);
1260 __put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
1261 }
1262
1263 if (system_supports_gcs() && err == 0 && user->gcs_offset) {
1264 struct gcs_context __user *gcs_ctx =
1265 apply_user_offset(user, user->gcs_offset);
1266 err |= preserve_gcs_context(gcs_ctx);
1267 }
1268
1269 /* Scalable Vector Extension state (including streaming), if present */
1270 if ((system_supports_sve() || system_supports_sme()) &&
1271 err == 0 && user->sve_offset) {
1272 struct sve_context __user *sve_ctx =
1273 apply_user_offset(user, user->sve_offset);
1274 err |= preserve_sve_context(sve_ctx);
1275 }
1276
1277 /* TPIDR2 if supported */
1278 if (system_supports_tpidr2() && err == 0) {
1279 struct tpidr2_context __user *tpidr2_ctx =
1280 apply_user_offset(user, user->tpidr2_offset);
1281 err |= preserve_tpidr2_context(tpidr2_ctx);
1282 }
1283
1284 /* FPMR if supported */
1285 if (system_supports_fpmr() && err == 0) {
1286 struct fpmr_context __user *fpmr_ctx =
1287 apply_user_offset(user, user->fpmr_offset);
1288 err |= preserve_fpmr_context(fpmr_ctx);
1289 }
1290
1291 if (system_supports_poe() && err == 0) {
1292 struct poe_context __user *poe_ctx =
1293 apply_user_offset(user, user->poe_offset);
1294
1295 err |= preserve_poe_context(poe_ctx, ua_state);
1296 }
1297
1298 /* ZA state if present */
1299 if (system_supports_sme() && err == 0 && user->za_offset) {
1300 struct za_context __user *za_ctx =
1301 apply_user_offset(user, user->za_offset);
1302 err |= preserve_za_context(za_ctx);
1303 }
1304
1305 /* ZT state if present */
1306 if (system_supports_sme2() && err == 0 && user->zt_offset) {
1307 struct zt_context __user *zt_ctx =
1308 apply_user_offset(user, user->zt_offset);
1309 err |= preserve_zt_context(zt_ctx);
1310 }
1311
1312 if (err == 0 && user->extra_offset) {
1313 char __user *sfp = (char __user *)user->sigframe;
1314 char __user *userp =
1315 apply_user_offset(user, user->extra_offset);
1316
1317 struct extra_context __user *extra;
1318 struct _aarch64_ctx __user *end;
1319 u64 extra_datap;
1320 u32 extra_size;
1321
1322 extra = (struct extra_context __user *)userp;
1323 userp += EXTRA_CONTEXT_SIZE;
1324
1325 end = (struct _aarch64_ctx __user *)userp;
1326 userp += TERMINATOR_SIZE;
1327
1328 /*
1329 * extra_datap is just written to the signal frame.
1330 * The value gets cast back to a void __user *
1331 * during sigreturn.
1332 */
1333 extra_datap = (__force u64)userp;
1334 extra_size = sfp + round_up(user->size, 16) - userp;
1335
1336 __put_user_error(EXTRA_MAGIC, &extra->head.magic, err);
1337 __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err);
1338 __put_user_error(extra_datap, &extra->datap, err);
1339 __put_user_error(extra_size, &extra->size, err);
1340
1341 /* Add the terminator */
1342 __put_user_error(0, &end->magic, err);
1343 __put_user_error(0, &end->size, err);
1344 }
1345
1346 /* set the "end" magic */
1347 if (err == 0) {
1348 struct _aarch64_ctx __user *end =
1349 apply_user_offset(user, user->end_offset);
1350
1351 __put_user_error(0, &end->magic, err);
1352 __put_user_error(0, &end->size, err);
1353 }
1354
1355 return err;
1356 }
1357
get_sigframe(struct rt_sigframe_user_layout * user,struct ksignal * ksig,struct pt_regs * regs)1358 static int get_sigframe(struct rt_sigframe_user_layout *user,
1359 struct ksignal *ksig, struct pt_regs *regs)
1360 {
1361 unsigned long sp, sp_top;
1362 int err;
1363
1364 init_user_layout(user);
1365 err = setup_sigframe_layout(user, false);
1366 if (err)
1367 return err;
1368
1369 sp = sp_top = sigsp(regs->sp, ksig);
1370
1371 sp = round_down(sp - sizeof(struct frame_record), 16);
1372 user->next_frame = (struct frame_record __user *)sp;
1373
1374 sp = round_down(sp, 16) - sigframe_size(user);
1375 user->sigframe = (struct rt_sigframe __user *)sp;
1376
1377 /*
1378 * Check that we can actually write to the signal frame.
1379 */
1380 if (!access_ok(user->sigframe, sp_top - sp))
1381 return -EFAULT;
1382
1383 return 0;
1384 }
1385
1386 #ifdef CONFIG_ARM64_GCS
1387
gcs_signal_entry(__sigrestore_t sigtramp,struct ksignal * ksig)1388 static int gcs_signal_entry(__sigrestore_t sigtramp, struct ksignal *ksig)
1389 {
1390 u64 gcspr_el0;
1391 int ret = 0;
1392
1393 if (!system_supports_gcs())
1394 return 0;
1395
1396 if (!task_gcs_el0_enabled(current))
1397 return 0;
1398
1399 /*
1400 * We are entering a signal handler, current register state is
1401 * active.
1402 */
1403 gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0);
1404
1405 /*
1406 * Push a cap and the GCS entry for the trampoline onto the GCS.
1407 */
1408 put_user_gcs((unsigned long)sigtramp,
1409 (unsigned long __user *)(gcspr_el0 - 16), &ret);
1410 put_user_gcs(GCS_SIGNAL_CAP(gcspr_el0 - 8),
1411 (unsigned long __user *)(gcspr_el0 - 8), &ret);
1412 if (ret != 0)
1413 return ret;
1414
1415 gcspr_el0 -= 16;
1416 write_sysreg_s(gcspr_el0, SYS_GCSPR_EL0);
1417
1418 return 0;
1419 }
1420 #else
1421
gcs_signal_entry(__sigrestore_t sigtramp,struct ksignal * ksig)1422 static int gcs_signal_entry(__sigrestore_t sigtramp, struct ksignal *ksig)
1423 {
1424 return 0;
1425 }
1426
1427 #endif
1428
setup_return(struct pt_regs * regs,struct ksignal * ksig,struct rt_sigframe_user_layout * user,int usig)1429 static int setup_return(struct pt_regs *regs, struct ksignal *ksig,
1430 struct rt_sigframe_user_layout *user, int usig)
1431 {
1432 __sigrestore_t sigtramp;
1433 int err;
1434
1435 if (ksig->ka.sa.sa_flags & SA_RESTORER)
1436 sigtramp = ksig->ka.sa.sa_restorer;
1437 else
1438 sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
1439
1440 err = gcs_signal_entry(sigtramp, ksig);
1441 if (err)
1442 return err;
1443
1444 /*
1445 * We must not fail from this point onwards. We are going to update
1446 * registers, including SP, in order to invoke the signal handler. If
1447 * we failed and attempted to deliver a nested SIGSEGV to a handler
1448 * after that point, the subsequent sigreturn would end up restoring
1449 * the (partial) state for the original signal handler.
1450 */
1451
1452 regs->regs[0] = usig;
1453 if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
1454 regs->regs[1] = (unsigned long)&user->sigframe->info;
1455 regs->regs[2] = (unsigned long)&user->sigframe->uc;
1456 }
1457 regs->sp = (unsigned long)user->sigframe;
1458 regs->regs[29] = (unsigned long)&user->next_frame->fp;
1459 regs->regs[30] = (unsigned long)sigtramp;
1460 regs->pc = (unsigned long)ksig->ka.sa.sa_handler;
1461
1462 /*
1463 * Signal delivery is a (wacky) indirect function call in
1464 * userspace, so simulate the same setting of BTYPE as a BLR
1465 * <register containing the signal handler entry point>.
1466 * Signal delivery to a location in a PROT_BTI guarded page
1467 * that is not a function entry point will now trigger a
1468 * SIGILL in userspace.
1469 *
1470 * If the signal handler entry point is not in a PROT_BTI
1471 * guarded page, this is harmless.
1472 */
1473 if (system_supports_bti()) {
1474 regs->pstate &= ~PSR_BTYPE_MASK;
1475 regs->pstate |= PSR_BTYPE_C;
1476 }
1477
1478 /* TCO (Tag Check Override) always cleared for signal handlers */
1479 regs->pstate &= ~PSR_TCO_BIT;
1480
1481 /* Signal handlers are invoked with ZA and streaming mode disabled */
1482 if (system_supports_sme()) {
1483 task_smstop_sm(current);
1484 current->thread.svcr &= ~SVCR_ZA_MASK;
1485 write_sysreg_s(0, SYS_TPIDR2_EL0);
1486 }
1487
1488 return 0;
1489 }
1490
setup_rt_frame(int usig,struct ksignal * ksig,sigset_t * set,struct pt_regs * regs)1491 static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
1492 struct pt_regs *regs)
1493 {
1494 struct rt_sigframe_user_layout user;
1495 struct rt_sigframe __user *frame;
1496 struct user_access_state ua_state;
1497 int err = 0;
1498
1499 fpsimd_save_and_flush_current_state();
1500
1501 if (get_sigframe(&user, ksig, regs))
1502 return 1;
1503
1504 save_reset_user_access_state(&ua_state);
1505 frame = user.sigframe;
1506
1507 __put_user_error(0, &frame->uc.uc_flags, err);
1508 __put_user_error(NULL, &frame->uc.uc_link, err);
1509
1510 err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
1511 err |= setup_sigframe(&user, regs, set, &ua_state);
1512 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
1513 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
1514
1515 if (err == 0)
1516 err = setup_return(regs, ksig, &user, usig);
1517
1518 /*
1519 * We must not fail if setup_return() succeeded - see comment at the
1520 * beginning of setup_return().
1521 */
1522
1523 if (err == 0)
1524 set_handler_user_access_state();
1525 else
1526 restore_user_access_state(&ua_state);
1527
1528 return err;
1529 }
1530
setup_restart_syscall(struct pt_regs * regs)1531 static void setup_restart_syscall(struct pt_regs *regs)
1532 {
1533 if (is_compat_task())
1534 compat_setup_restart_syscall(regs);
1535 else
1536 regs->regs[8] = __NR_restart_syscall;
1537 }
1538
1539 /*
1540 * OK, we're invoking a handler
1541 */
handle_signal(struct ksignal * ksig,struct pt_regs * regs)1542 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
1543 {
1544 sigset_t *oldset = sigmask_to_save();
1545 int usig = ksig->sig;
1546 int ret;
1547
1548 rseq_signal_deliver(ksig, regs);
1549
1550 /*
1551 * Set up the stack frame
1552 */
1553 if (is_compat_task()) {
1554 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
1555 ret = compat_setup_rt_frame(usig, ksig, oldset, regs);
1556 else
1557 ret = compat_setup_frame(usig, ksig, oldset, regs);
1558 } else {
1559 ret = setup_rt_frame(usig, ksig, oldset, regs);
1560 }
1561
1562 /*
1563 * Check that the resulting registers are actually sane.
1564 */
1565 ret |= !valid_user_regs(®s->user_regs, current);
1566
1567 /* Step into the signal handler if we are stepping */
1568 signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
1569 }
1570
1571 /*
1572 * Note that 'init' is a special process: it doesn't get signals it doesn't
1573 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1574 * mistake.
1575 *
1576 * Note that we go through the signals twice: once to check the signals that
1577 * the kernel can handle, and then we build all the user-level signal handling
1578 * stack-frames in one go after that.
1579 */
arch_do_signal_or_restart(struct pt_regs * regs)1580 void arch_do_signal_or_restart(struct pt_regs *regs)
1581 {
1582 unsigned long continue_addr = 0, restart_addr = 0;
1583 int retval = 0;
1584 struct ksignal ksig;
1585 bool syscall = in_syscall(regs);
1586
1587 /*
1588 * If we were from a system call, check for system call restarting...
1589 */
1590 if (syscall) {
1591 continue_addr = regs->pc;
1592 restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
1593 retval = regs->regs[0];
1594
1595 /*
1596 * Avoid additional syscall restarting via ret_to_user.
1597 */
1598 forget_syscall(regs);
1599
1600 /*
1601 * Prepare for system call restart. We do this here so that a
1602 * debugger will see the already changed PC.
1603 */
1604 switch (retval) {
1605 case -ERESTARTNOHAND:
1606 case -ERESTARTSYS:
1607 case -ERESTARTNOINTR:
1608 case -ERESTART_RESTARTBLOCK:
1609 regs->regs[0] = regs->orig_x0;
1610 regs->pc = restart_addr;
1611 break;
1612 }
1613 }
1614
1615 /*
1616 * Get the signal to deliver. When running under ptrace, at this point
1617 * the debugger may change all of our registers.
1618 */
1619 if (get_signal(&ksig)) {
1620 /*
1621 * Depending on the signal settings, we may need to revert the
1622 * decision to restart the system call, but skip this if a
1623 * debugger has chosen to restart at a different PC.
1624 */
1625 if (regs->pc == restart_addr &&
1626 (retval == -ERESTARTNOHAND ||
1627 retval == -ERESTART_RESTARTBLOCK ||
1628 (retval == -ERESTARTSYS &&
1629 !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
1630 syscall_set_return_value(current, regs, -EINTR, 0);
1631 regs->pc = continue_addr;
1632 }
1633
1634 handle_signal(&ksig, regs);
1635 return;
1636 }
1637
1638 /*
1639 * Handle restarting a different system call. As above, if a debugger
1640 * has chosen to restart at a different PC, ignore the restart.
1641 */
1642 if (syscall && regs->pc == restart_addr) {
1643 if (retval == -ERESTART_RESTARTBLOCK)
1644 setup_restart_syscall(regs);
1645 user_rewind_single_step(current);
1646 }
1647
1648 restore_saved_sigmask();
1649 }
1650
1651 unsigned long __ro_after_init signal_minsigstksz;
1652
1653 /*
1654 * Determine the stack space required for guaranteed signal devliery.
1655 * This function is used to populate AT_MINSIGSTKSZ at process startup.
1656 * cpufeatures setup is assumed to be complete.
1657 */
minsigstksz_setup(void)1658 void __init minsigstksz_setup(void)
1659 {
1660 struct rt_sigframe_user_layout user;
1661
1662 init_user_layout(&user);
1663
1664 /*
1665 * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't
1666 * be big enough, but it's our best guess:
1667 */
1668 if (WARN_ON(setup_sigframe_layout(&user, true)))
1669 return;
1670
1671 signal_minsigstksz = sigframe_size(&user) +
1672 round_up(sizeof(struct frame_record), 16) +
1673 16; /* max alignment padding */
1674 }
1675
1676 /*
1677 * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as
1678 * changes likely come with new fields that should be added below.
1679 */
1680 static_assert(NSIGILL == 11);
1681 static_assert(NSIGFPE == 15);
1682 static_assert(NSIGSEGV == 10);
1683 static_assert(NSIGBUS == 5);
1684 static_assert(NSIGTRAP == 6);
1685 static_assert(NSIGCHLD == 6);
1686 static_assert(NSIGSYS == 2);
1687 static_assert(sizeof(siginfo_t) == 128);
1688 static_assert(__alignof__(siginfo_t) == 8);
1689 static_assert(offsetof(siginfo_t, si_signo) == 0x00);
1690 static_assert(offsetof(siginfo_t, si_errno) == 0x04);
1691 static_assert(offsetof(siginfo_t, si_code) == 0x08);
1692 static_assert(offsetof(siginfo_t, si_pid) == 0x10);
1693 static_assert(offsetof(siginfo_t, si_uid) == 0x14);
1694 static_assert(offsetof(siginfo_t, si_tid) == 0x10);
1695 static_assert(offsetof(siginfo_t, si_overrun) == 0x14);
1696 static_assert(offsetof(siginfo_t, si_status) == 0x18);
1697 static_assert(offsetof(siginfo_t, si_utime) == 0x20);
1698 static_assert(offsetof(siginfo_t, si_stime) == 0x28);
1699 static_assert(offsetof(siginfo_t, si_value) == 0x18);
1700 static_assert(offsetof(siginfo_t, si_int) == 0x18);
1701 static_assert(offsetof(siginfo_t, si_ptr) == 0x18);
1702 static_assert(offsetof(siginfo_t, si_addr) == 0x10);
1703 static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x18);
1704 static_assert(offsetof(siginfo_t, si_lower) == 0x20);
1705 static_assert(offsetof(siginfo_t, si_upper) == 0x28);
1706 static_assert(offsetof(siginfo_t, si_pkey) == 0x20);
1707 static_assert(offsetof(siginfo_t, si_perf_data) == 0x18);
1708 static_assert(offsetof(siginfo_t, si_perf_type) == 0x20);
1709 static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24);
1710 static_assert(offsetof(siginfo_t, si_band) == 0x10);
1711 static_assert(offsetof(siginfo_t, si_fd) == 0x18);
1712 static_assert(offsetof(siginfo_t, si_call_addr) == 0x10);
1713 static_assert(offsetof(siginfo_t, si_syscall) == 0x18);
1714 static_assert(offsetof(siginfo_t, si_arch) == 0x1c);
1715