xref: /linux/arch/x86/kernel/signal.c (revision 0a94608f0f7de9b1135ffea3546afe68eafef57f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 1991, 1992  Linus Torvalds
4  *  Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
5  *
6  *  1997-11-28  Modified for POSIX.1b signals by Richard Henderson
7  *  2000-06-20  Pentium III FXSR, SSE support by Gareth Hughes
8  *  2000-2002   x86-64 support by Andi Kleen
9  */
10 
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 
13 #include <linux/sched.h>
14 #include <linux/sched/task_stack.h>
15 #include <linux/mm.h>
16 #include <linux/smp.h>
17 #include <linux/kernel.h>
18 #include <linux/kstrtox.h>
19 #include <linux/errno.h>
20 #include <linux/wait.h>
21 #include <linux/unistd.h>
22 #include <linux/stddef.h>
23 #include <linux/personality.h>
24 #include <linux/uaccess.h>
25 #include <linux/user-return-notifier.h>
26 #include <linux/uprobes.h>
27 #include <linux/context_tracking.h>
28 #include <linux/entry-common.h>
29 #include <linux/syscalls.h>
30 
31 #include <asm/processor.h>
32 #include <asm/ucontext.h>
33 #include <asm/fpu/signal.h>
34 #include <asm/fpu/xstate.h>
35 #include <asm/vdso.h>
36 #include <asm/mce.h>
37 #include <asm/sighandling.h>
38 #include <asm/vm86.h>
39 
40 #ifdef CONFIG_X86_64
41 #include <linux/compat.h>
42 #include <asm/proto.h>
43 #include <asm/ia32_unistd.h>
44 #include <asm/fpu/xstate.h>
45 #endif /* CONFIG_X86_64 */
46 
47 #include <asm/syscall.h>
48 #include <asm/sigframe.h>
49 #include <asm/signal.h>
50 
51 #ifdef CONFIG_X86_64
52 /*
53  * If regs->ss will cause an IRET fault, change it.  Otherwise leave it
54  * alone.  Using this generally makes no sense unless
55  * user_64bit_mode(regs) would return true.
56  */
57 static void force_valid_ss(struct pt_regs *regs)
58 {
59 	u32 ar;
60 	asm volatile ("lar %[old_ss], %[ar]\n\t"
61 		      "jz 1f\n\t"		/* If invalid: */
62 		      "xorl %[ar], %[ar]\n\t"	/* set ar = 0 */
63 		      "1:"
64 		      : [ar] "=r" (ar)
65 		      : [old_ss] "rm" ((u16)regs->ss));
66 
67 	/*
68 	 * For a valid 64-bit user context, we need DPL 3, type
69 	 * read-write data or read-write exp-down data, and S and P
70 	 * set.  We can't use VERW because VERW doesn't check the
71 	 * P bit.
72 	 */
73 	ar &= AR_DPL_MASK | AR_S | AR_P | AR_TYPE_MASK;
74 	if (ar != (AR_DPL3 | AR_S | AR_P | AR_TYPE_RWDATA) &&
75 	    ar != (AR_DPL3 | AR_S | AR_P | AR_TYPE_RWDATA_EXPDOWN))
76 		regs->ss = __USER_DS;
77 }
78 # define CONTEXT_COPY_SIZE	offsetof(struct sigcontext, reserved1)
79 #else
80 # define CONTEXT_COPY_SIZE	sizeof(struct sigcontext)
81 #endif
82 
83 static bool restore_sigcontext(struct pt_regs *regs,
84 			       struct sigcontext __user *usc,
85 			       unsigned long uc_flags)
86 {
87 	struct sigcontext sc;
88 
89 	/* Always make any pending restarted system calls return -EINTR */
90 	current->restart_block.fn = do_no_restart_syscall;
91 
92 	if (copy_from_user(&sc, usc, CONTEXT_COPY_SIZE))
93 		return false;
94 
95 #ifdef CONFIG_X86_32
96 	set_user_gs(regs, sc.gs);
97 	regs->fs = sc.fs;
98 	regs->es = sc.es;
99 	regs->ds = sc.ds;
100 #endif /* CONFIG_X86_32 */
101 
102 	regs->bx = sc.bx;
103 	regs->cx = sc.cx;
104 	regs->dx = sc.dx;
105 	regs->si = sc.si;
106 	regs->di = sc.di;
107 	regs->bp = sc.bp;
108 	regs->ax = sc.ax;
109 	regs->sp = sc.sp;
110 	regs->ip = sc.ip;
111 
112 #ifdef CONFIG_X86_64
113 	regs->r8 = sc.r8;
114 	regs->r9 = sc.r9;
115 	regs->r10 = sc.r10;
116 	regs->r11 = sc.r11;
117 	regs->r12 = sc.r12;
118 	regs->r13 = sc.r13;
119 	regs->r14 = sc.r14;
120 	regs->r15 = sc.r15;
121 #endif /* CONFIG_X86_64 */
122 
123 	/* Get CS/SS and force CPL3 */
124 	regs->cs = sc.cs | 0x03;
125 	regs->ss = sc.ss | 0x03;
126 
127 	regs->flags = (regs->flags & ~FIX_EFLAGS) | (sc.flags & FIX_EFLAGS);
128 	/* disable syscall checks */
129 	regs->orig_ax = -1;
130 
131 #ifdef CONFIG_X86_64
132 	/*
133 	 * Fix up SS if needed for the benefit of old DOSEMU and
134 	 * CRIU.
135 	 */
136 	if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && user_64bit_mode(regs)))
137 		force_valid_ss(regs);
138 #endif
139 
140 	return fpu__restore_sig((void __user *)sc.fpstate,
141 			       IS_ENABLED(CONFIG_X86_32));
142 }
143 
144 static __always_inline int
145 __unsafe_setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
146 		     struct pt_regs *regs, unsigned long mask)
147 {
148 #ifdef CONFIG_X86_32
149 	unsafe_put_user(get_user_gs(regs),
150 				  (unsigned int __user *)&sc->gs, Efault);
151 	unsafe_put_user(regs->fs, (unsigned int __user *)&sc->fs, Efault);
152 	unsafe_put_user(regs->es, (unsigned int __user *)&sc->es, Efault);
153 	unsafe_put_user(regs->ds, (unsigned int __user *)&sc->ds, Efault);
154 #endif /* CONFIG_X86_32 */
155 
156 	unsafe_put_user(regs->di, &sc->di, Efault);
157 	unsafe_put_user(regs->si, &sc->si, Efault);
158 	unsafe_put_user(regs->bp, &sc->bp, Efault);
159 	unsafe_put_user(regs->sp, &sc->sp, Efault);
160 	unsafe_put_user(regs->bx, &sc->bx, Efault);
161 	unsafe_put_user(regs->dx, &sc->dx, Efault);
162 	unsafe_put_user(regs->cx, &sc->cx, Efault);
163 	unsafe_put_user(regs->ax, &sc->ax, Efault);
164 #ifdef CONFIG_X86_64
165 	unsafe_put_user(regs->r8, &sc->r8, Efault);
166 	unsafe_put_user(regs->r9, &sc->r9, Efault);
167 	unsafe_put_user(regs->r10, &sc->r10, Efault);
168 	unsafe_put_user(regs->r11, &sc->r11, Efault);
169 	unsafe_put_user(regs->r12, &sc->r12, Efault);
170 	unsafe_put_user(regs->r13, &sc->r13, Efault);
171 	unsafe_put_user(regs->r14, &sc->r14, Efault);
172 	unsafe_put_user(regs->r15, &sc->r15, Efault);
173 #endif /* CONFIG_X86_64 */
174 
175 	unsafe_put_user(current->thread.trap_nr, &sc->trapno, Efault);
176 	unsafe_put_user(current->thread.error_code, &sc->err, Efault);
177 	unsafe_put_user(regs->ip, &sc->ip, Efault);
178 #ifdef CONFIG_X86_32
179 	unsafe_put_user(regs->cs, (unsigned int __user *)&sc->cs, Efault);
180 	unsafe_put_user(regs->flags, &sc->flags, Efault);
181 	unsafe_put_user(regs->sp, &sc->sp_at_signal, Efault);
182 	unsafe_put_user(regs->ss, (unsigned int __user *)&sc->ss, Efault);
183 #else /* !CONFIG_X86_32 */
184 	unsafe_put_user(regs->flags, &sc->flags, Efault);
185 	unsafe_put_user(regs->cs, &sc->cs, Efault);
186 	unsafe_put_user(0, &sc->gs, Efault);
187 	unsafe_put_user(0, &sc->fs, Efault);
188 	unsafe_put_user(regs->ss, &sc->ss, Efault);
189 #endif /* CONFIG_X86_32 */
190 
191 	unsafe_put_user(fpstate, (unsigned long __user *)&sc->fpstate, Efault);
192 
193 	/* non-iBCS2 extensions.. */
194 	unsafe_put_user(mask, &sc->oldmask, Efault);
195 	unsafe_put_user(current->thread.cr2, &sc->cr2, Efault);
196 	return 0;
197 Efault:
198 	return -EFAULT;
199 }
200 
201 #define unsafe_put_sigcontext(sc, fp, regs, set, label)			\
202 do {									\
203 	if (__unsafe_setup_sigcontext(sc, fp, regs, set->sig[0]))	\
204 		goto label;						\
205 } while(0);
206 
207 #define unsafe_put_sigmask(set, frame, label) \
208 	unsafe_put_user(*(__u64 *)(set), \
209 			(__u64 __user *)&(frame)->uc.uc_sigmask, \
210 			label)
211 
212 /*
213  * Set up a signal frame.
214  */
215 
216 /* x86 ABI requires 16-byte alignment */
217 #define FRAME_ALIGNMENT	16UL
218 
219 #define MAX_FRAME_PADDING	(FRAME_ALIGNMENT - 1)
220 
221 /*
222  * Determine which stack to use..
223  */
224 static unsigned long align_sigframe(unsigned long sp)
225 {
226 #ifdef CONFIG_X86_32
227 	/*
228 	 * Align the stack pointer according to the i386 ABI,
229 	 * i.e. so that on function entry ((sp + 4) & 15) == 0.
230 	 */
231 	sp = ((sp + 4) & -FRAME_ALIGNMENT) - 4;
232 #else /* !CONFIG_X86_32 */
233 	sp = round_down(sp, FRAME_ALIGNMENT) - 8;
234 #endif
235 	return sp;
236 }
237 
238 static void __user *
239 get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
240 	     void __user **fpstate)
241 {
242 	/* Default to using normal stack */
243 	bool nested_altstack = on_sig_stack(regs->sp);
244 	bool entering_altstack = false;
245 	unsigned long math_size = 0;
246 	unsigned long sp = regs->sp;
247 	unsigned long buf_fx = 0;
248 
249 	/* redzone */
250 	if (IS_ENABLED(CONFIG_X86_64))
251 		sp -= 128;
252 
253 	/* This is the X/Open sanctioned signal stack switching.  */
254 	if (ka->sa.sa_flags & SA_ONSTACK) {
255 		/*
256 		 * This checks nested_altstack via sas_ss_flags(). Sensible
257 		 * programs use SS_AUTODISARM, which disables that check, and
258 		 * programs that don't use SS_AUTODISARM get compatible.
259 		 */
260 		if (sas_ss_flags(sp) == 0) {
261 			sp = current->sas_ss_sp + current->sas_ss_size;
262 			entering_altstack = true;
263 		}
264 	} else if (IS_ENABLED(CONFIG_X86_32) &&
265 		   !nested_altstack &&
266 		   regs->ss != __USER_DS &&
267 		   !(ka->sa.sa_flags & SA_RESTORER) &&
268 		   ka->sa.sa_restorer) {
269 		/* This is the legacy signal stack switching. */
270 		sp = (unsigned long) ka->sa.sa_restorer;
271 		entering_altstack = true;
272 	}
273 
274 	sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32),
275 				  &buf_fx, &math_size);
276 	*fpstate = (void __user *)sp;
277 
278 	sp = align_sigframe(sp - frame_size);
279 
280 	/*
281 	 * If we are on the alternate signal stack and would overflow it, don't.
282 	 * Return an always-bogus address instead so we will die with SIGSEGV.
283 	 */
284 	if (unlikely((nested_altstack || entering_altstack) &&
285 		     !__on_sig_stack(sp))) {
286 
287 		if (show_unhandled_signals && printk_ratelimit())
288 			pr_info("%s[%d] overflowed sigaltstack\n",
289 				current->comm, task_pid_nr(current));
290 
291 		return (void __user *)-1L;
292 	}
293 
294 	/* save i387 and extended state */
295 	if (!copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size))
296 		return (void __user *)-1L;
297 
298 	return (void __user *)sp;
299 }
300 
301 #ifdef CONFIG_X86_32
302 static const struct {
303 	u16 poplmovl;
304 	u32 val;
305 	u16 int80;
306 } __attribute__((packed)) retcode = {
307 	0xb858,		/* popl %eax; movl $..., %eax */
308 	__NR_sigreturn,
309 	0x80cd,		/* int $0x80 */
310 };
311 
312 static const struct {
313 	u8  movl;
314 	u32 val;
315 	u16 int80;
316 	u8  pad;
317 } __attribute__((packed)) rt_retcode = {
318 	0xb8,		/* movl $..., %eax */
319 	__NR_rt_sigreturn,
320 	0x80cd,		/* int $0x80 */
321 	0
322 };
323 
324 static int
325 __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
326 	      struct pt_regs *regs)
327 {
328 	struct sigframe __user *frame;
329 	void __user *restorer;
330 	void __user *fp = NULL;
331 
332 	frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fp);
333 
334 	if (!user_access_begin(frame, sizeof(*frame)))
335 		return -EFAULT;
336 
337 	unsafe_put_user(sig, &frame->sig, Efault);
338 	unsafe_put_sigcontext(&frame->sc, fp, regs, set, Efault);
339 	unsafe_put_user(set->sig[1], &frame->extramask[0], Efault);
340 	if (current->mm->context.vdso)
341 		restorer = current->mm->context.vdso +
342 			vdso_image_32.sym___kernel_sigreturn;
343 	else
344 		restorer = &frame->retcode;
345 	if (ksig->ka.sa.sa_flags & SA_RESTORER)
346 		restorer = ksig->ka.sa.sa_restorer;
347 
348 	/* Set up to return from userspace.  */
349 	unsafe_put_user(restorer, &frame->pretcode, Efault);
350 
351 	/*
352 	 * This is popl %eax ; movl $__NR_sigreturn, %eax ; int $0x80
353 	 *
354 	 * WE DO NOT USE IT ANY MORE! It's only left here for historical
355 	 * reasons and because gdb uses it as a signature to notice
356 	 * signal handler stack frames.
357 	 */
358 	unsafe_put_user(*((u64 *)&retcode), (u64 *)frame->retcode, Efault);
359 	user_access_end();
360 
361 	/* Set up registers for signal handler */
362 	regs->sp = (unsigned long)frame;
363 	regs->ip = (unsigned long)ksig->ka.sa.sa_handler;
364 	regs->ax = (unsigned long)sig;
365 	regs->dx = 0;
366 	regs->cx = 0;
367 
368 	regs->ds = __USER_DS;
369 	regs->es = __USER_DS;
370 	regs->ss = __USER_DS;
371 	regs->cs = __USER_CS;
372 
373 	return 0;
374 
375 Efault:
376 	user_access_end();
377 	return -EFAULT;
378 }
379 
380 static int __setup_rt_frame(int sig, struct ksignal *ksig,
381 			    sigset_t *set, struct pt_regs *regs)
382 {
383 	struct rt_sigframe __user *frame;
384 	void __user *restorer;
385 	void __user *fp = NULL;
386 
387 	frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fp);
388 
389 	if (!user_access_begin(frame, sizeof(*frame)))
390 		return -EFAULT;
391 
392 	unsafe_put_user(sig, &frame->sig, Efault);
393 	unsafe_put_user(&frame->info, &frame->pinfo, Efault);
394 	unsafe_put_user(&frame->uc, &frame->puc, Efault);
395 
396 	/* Create the ucontext.  */
397 	if (static_cpu_has(X86_FEATURE_XSAVE))
398 		unsafe_put_user(UC_FP_XSTATE, &frame->uc.uc_flags, Efault);
399 	else
400 		unsafe_put_user(0, &frame->uc.uc_flags, Efault);
401 	unsafe_put_user(0, &frame->uc.uc_link, Efault);
402 	unsafe_save_altstack(&frame->uc.uc_stack, regs->sp, Efault);
403 
404 	/* Set up to return from userspace.  */
405 	restorer = current->mm->context.vdso +
406 		vdso_image_32.sym___kernel_rt_sigreturn;
407 	if (ksig->ka.sa.sa_flags & SA_RESTORER)
408 		restorer = ksig->ka.sa.sa_restorer;
409 	unsafe_put_user(restorer, &frame->pretcode, Efault);
410 
411 	/*
412 	 * This is movl $__NR_rt_sigreturn, %ax ; int $0x80
413 	 *
414 	 * WE DO NOT USE IT ANY MORE! It's only left here for historical
415 	 * reasons and because gdb uses it as a signature to notice
416 	 * signal handler stack frames.
417 	 */
418 	unsafe_put_user(*((u64 *)&rt_retcode), (u64 *)frame->retcode, Efault);
419 	unsafe_put_sigcontext(&frame->uc.uc_mcontext, fp, regs, set, Efault);
420 	unsafe_put_sigmask(set, frame, Efault);
421 	user_access_end();
422 
423 	if (copy_siginfo_to_user(&frame->info, &ksig->info))
424 		return -EFAULT;
425 
426 	/* Set up registers for signal handler */
427 	regs->sp = (unsigned long)frame;
428 	regs->ip = (unsigned long)ksig->ka.sa.sa_handler;
429 	regs->ax = (unsigned long)sig;
430 	regs->dx = (unsigned long)&frame->info;
431 	regs->cx = (unsigned long)&frame->uc;
432 
433 	regs->ds = __USER_DS;
434 	regs->es = __USER_DS;
435 	regs->ss = __USER_DS;
436 	regs->cs = __USER_CS;
437 
438 	return 0;
439 Efault:
440 	user_access_end();
441 	return -EFAULT;
442 }
443 #else /* !CONFIG_X86_32 */
444 static unsigned long frame_uc_flags(struct pt_regs *regs)
445 {
446 	unsigned long flags;
447 
448 	if (boot_cpu_has(X86_FEATURE_XSAVE))
449 		flags = UC_FP_XSTATE | UC_SIGCONTEXT_SS;
450 	else
451 		flags = UC_SIGCONTEXT_SS;
452 
453 	if (likely(user_64bit_mode(regs)))
454 		flags |= UC_STRICT_RESTORE_SS;
455 
456 	return flags;
457 }
458 
459 static int __setup_rt_frame(int sig, struct ksignal *ksig,
460 			    sigset_t *set, struct pt_regs *regs)
461 {
462 	struct rt_sigframe __user *frame;
463 	void __user *fp = NULL;
464 	unsigned long uc_flags;
465 
466 	/* x86-64 should always use SA_RESTORER. */
467 	if (!(ksig->ka.sa.sa_flags & SA_RESTORER))
468 		return -EFAULT;
469 
470 	frame = get_sigframe(&ksig->ka, regs, sizeof(struct rt_sigframe), &fp);
471 	uc_flags = frame_uc_flags(regs);
472 
473 	if (!user_access_begin(frame, sizeof(*frame)))
474 		return -EFAULT;
475 
476 	/* Create the ucontext.  */
477 	unsafe_put_user(uc_flags, &frame->uc.uc_flags, Efault);
478 	unsafe_put_user(0, &frame->uc.uc_link, Efault);
479 	unsafe_save_altstack(&frame->uc.uc_stack, regs->sp, Efault);
480 
481 	/* Set up to return from userspace.  If provided, use a stub
482 	   already in userspace.  */
483 	unsafe_put_user(ksig->ka.sa.sa_restorer, &frame->pretcode, Efault);
484 	unsafe_put_sigcontext(&frame->uc.uc_mcontext, fp, regs, set, Efault);
485 	unsafe_put_sigmask(set, frame, Efault);
486 	user_access_end();
487 
488 	if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
489 		if (copy_siginfo_to_user(&frame->info, &ksig->info))
490 			return -EFAULT;
491 	}
492 
493 	/* Set up registers for signal handler */
494 	regs->di = sig;
495 	/* In case the signal handler was declared without prototypes */
496 	regs->ax = 0;
497 
498 	/* This also works for non SA_SIGINFO handlers because they expect the
499 	   next argument after the signal number on the stack. */
500 	regs->si = (unsigned long)&frame->info;
501 	regs->dx = (unsigned long)&frame->uc;
502 	regs->ip = (unsigned long) ksig->ka.sa.sa_handler;
503 
504 	regs->sp = (unsigned long)frame;
505 
506 	/*
507 	 * Set up the CS and SS registers to run signal handlers in
508 	 * 64-bit mode, even if the handler happens to be interrupting
509 	 * 32-bit or 16-bit code.
510 	 *
511 	 * SS is subtle.  In 64-bit mode, we don't need any particular
512 	 * SS descriptor, but we do need SS to be valid.  It's possible
513 	 * that the old SS is entirely bogus -- this can happen if the
514 	 * signal we're trying to deliver is #GP or #SS caused by a bad
515 	 * SS value.  We also have a compatibility issue here: DOSEMU
516 	 * relies on the contents of the SS register indicating the
517 	 * SS value at the time of the signal, even though that code in
518 	 * DOSEMU predates sigreturn's ability to restore SS.  (DOSEMU
519 	 * avoids relying on sigreturn to restore SS; instead it uses
520 	 * a trampoline.)  So we do our best: if the old SS was valid,
521 	 * we keep it.  Otherwise we replace it.
522 	 */
523 	regs->cs = __USER_CS;
524 
525 	if (unlikely(regs->ss != __USER_DS))
526 		force_valid_ss(regs);
527 
528 	return 0;
529 
530 Efault:
531 	user_access_end();
532 	return -EFAULT;
533 }
534 #endif /* CONFIG_X86_32 */
535 
536 #ifdef CONFIG_X86_X32_ABI
537 static int x32_copy_siginfo_to_user(struct compat_siginfo __user *to,
538 		const struct kernel_siginfo *from)
539 {
540 	struct compat_siginfo new;
541 
542 	copy_siginfo_to_external32(&new, from);
543 	if (from->si_signo == SIGCHLD) {
544 		new._sifields._sigchld_x32._utime = from->si_utime;
545 		new._sifields._sigchld_x32._stime = from->si_stime;
546 	}
547 	if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
548 		return -EFAULT;
549 	return 0;
550 }
551 
552 int copy_siginfo_to_user32(struct compat_siginfo __user *to,
553 			   const struct kernel_siginfo *from)
554 {
555 	if (in_x32_syscall())
556 		return x32_copy_siginfo_to_user(to, from);
557 	return __copy_siginfo_to_user32(to, from);
558 }
559 #endif /* CONFIG_X86_X32_ABI */
560 
561 static int x32_setup_rt_frame(struct ksignal *ksig,
562 			      compat_sigset_t *set,
563 			      struct pt_regs *regs)
564 {
565 #ifdef CONFIG_X86_X32_ABI
566 	struct rt_sigframe_x32 __user *frame;
567 	unsigned long uc_flags;
568 	void __user *restorer;
569 	void __user *fp = NULL;
570 
571 	if (!(ksig->ka.sa.sa_flags & SA_RESTORER))
572 		return -EFAULT;
573 
574 	frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fp);
575 
576 	uc_flags = frame_uc_flags(regs);
577 
578 	if (!user_access_begin(frame, sizeof(*frame)))
579 		return -EFAULT;
580 
581 	/* Create the ucontext.  */
582 	unsafe_put_user(uc_flags, &frame->uc.uc_flags, Efault);
583 	unsafe_put_user(0, &frame->uc.uc_link, Efault);
584 	unsafe_compat_save_altstack(&frame->uc.uc_stack, regs->sp, Efault);
585 	unsafe_put_user(0, &frame->uc.uc__pad0, Efault);
586 	restorer = ksig->ka.sa.sa_restorer;
587 	unsafe_put_user(restorer, (unsigned long __user *)&frame->pretcode, Efault);
588 	unsafe_put_sigcontext(&frame->uc.uc_mcontext, fp, regs, set, Efault);
589 	unsafe_put_sigmask(set, frame, Efault);
590 	user_access_end();
591 
592 	if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
593 		if (x32_copy_siginfo_to_user(&frame->info, &ksig->info))
594 			return -EFAULT;
595 	}
596 
597 	/* Set up registers for signal handler */
598 	regs->sp = (unsigned long) frame;
599 	regs->ip = (unsigned long) ksig->ka.sa.sa_handler;
600 
601 	/* We use the x32 calling convention here... */
602 	regs->di = ksig->sig;
603 	regs->si = (unsigned long) &frame->info;
604 	regs->dx = (unsigned long) &frame->uc;
605 
606 	loadsegment(ds, __USER_DS);
607 	loadsegment(es, __USER_DS);
608 
609 	regs->cs = __USER_CS;
610 	regs->ss = __USER_DS;
611 #endif	/* CONFIG_X86_X32_ABI */
612 
613 	return 0;
614 #ifdef CONFIG_X86_X32_ABI
615 Efault:
616 	user_access_end();
617 	return -EFAULT;
618 #endif
619 }
620 
621 /*
622  * Do a signal return; undo the signal stack.
623  */
624 #ifdef CONFIG_X86_32
625 SYSCALL_DEFINE0(sigreturn)
626 {
627 	struct pt_regs *regs = current_pt_regs();
628 	struct sigframe __user *frame;
629 	sigset_t set;
630 
631 	frame = (struct sigframe __user *)(regs->sp - 8);
632 
633 	if (!access_ok(frame, sizeof(*frame)))
634 		goto badframe;
635 	if (__get_user(set.sig[0], &frame->sc.oldmask) ||
636 	    __get_user(set.sig[1], &frame->extramask[0]))
637 		goto badframe;
638 
639 	set_current_blocked(&set);
640 
641 	/*
642 	 * x86_32 has no uc_flags bits relevant to restore_sigcontext.
643 	 * Save a few cycles by skipping the __get_user.
644 	 */
645 	if (!restore_sigcontext(regs, &frame->sc, 0))
646 		goto badframe;
647 	return regs->ax;
648 
649 badframe:
650 	signal_fault(regs, frame, "sigreturn");
651 
652 	return 0;
653 }
654 #endif /* CONFIG_X86_32 */
655 
656 SYSCALL_DEFINE0(rt_sigreturn)
657 {
658 	struct pt_regs *regs = current_pt_regs();
659 	struct rt_sigframe __user *frame;
660 	sigset_t set;
661 	unsigned long uc_flags;
662 
663 	frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long));
664 	if (!access_ok(frame, sizeof(*frame)))
665 		goto badframe;
666 	if (__get_user(*(__u64 *)&set, (__u64 __user *)&frame->uc.uc_sigmask))
667 		goto badframe;
668 	if (__get_user(uc_flags, &frame->uc.uc_flags))
669 		goto badframe;
670 
671 	set_current_blocked(&set);
672 
673 	if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags))
674 		goto badframe;
675 
676 	if (restore_altstack(&frame->uc.uc_stack))
677 		goto badframe;
678 
679 	return regs->ax;
680 
681 badframe:
682 	signal_fault(regs, frame, "rt_sigreturn");
683 	return 0;
684 }
685 
686 /*
687  * There are four different struct types for signal frame: sigframe_ia32,
688  * rt_sigframe_ia32, rt_sigframe_x32, and rt_sigframe. Use the worst case
689  * -- the largest size. It means the size for 64-bit apps is a bit more
690  * than needed, but this keeps the code simple.
691  */
692 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
693 # define MAX_FRAME_SIGINFO_UCTXT_SIZE	sizeof(struct sigframe_ia32)
694 #else
695 # define MAX_FRAME_SIGINFO_UCTXT_SIZE	sizeof(struct rt_sigframe)
696 #endif
697 
698 /*
699  * The FP state frame contains an XSAVE buffer which must be 64-byte aligned.
700  * If a signal frame starts at an unaligned address, extra space is required.
701  * This is the max alignment padding, conservatively.
702  */
703 #define MAX_XSAVE_PADDING	63UL
704 
705 /*
706  * The frame data is composed of the following areas and laid out as:
707  *
708  * -------------------------
709  * | alignment padding     |
710  * -------------------------
711  * | (f)xsave frame        |
712  * -------------------------
713  * | fsave header          |
714  * -------------------------
715  * | alignment padding     |
716  * -------------------------
717  * | siginfo + ucontext    |
718  * -------------------------
719  */
720 
721 /* max_frame_size tells userspace the worst case signal stack size. */
722 static unsigned long __ro_after_init max_frame_size;
723 static unsigned int __ro_after_init fpu_default_state_size;
724 
725 void __init init_sigframe_size(void)
726 {
727 	fpu_default_state_size = fpu__get_fpstate_size();
728 
729 	max_frame_size = MAX_FRAME_SIGINFO_UCTXT_SIZE + MAX_FRAME_PADDING;
730 
731 	max_frame_size += fpu_default_state_size + MAX_XSAVE_PADDING;
732 
733 	/* Userspace expects an aligned size. */
734 	max_frame_size = round_up(max_frame_size, FRAME_ALIGNMENT);
735 
736 	pr_info("max sigframe size: %lu\n", max_frame_size);
737 }
738 
739 unsigned long get_sigframe_size(void)
740 {
741 	return max_frame_size;
742 }
743 
744 static inline int is_ia32_compat_frame(struct ksignal *ksig)
745 {
746 	return IS_ENABLED(CONFIG_IA32_EMULATION) &&
747 		ksig->ka.sa.sa_flags & SA_IA32_ABI;
748 }
749 
750 static inline int is_ia32_frame(struct ksignal *ksig)
751 {
752 	return IS_ENABLED(CONFIG_X86_32) || is_ia32_compat_frame(ksig);
753 }
754 
755 static inline int is_x32_frame(struct ksignal *ksig)
756 {
757 	return IS_ENABLED(CONFIG_X86_X32_ABI) &&
758 		ksig->ka.sa.sa_flags & SA_X32_ABI;
759 }
760 
761 static int
762 setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
763 {
764 	int usig = ksig->sig;
765 	sigset_t *set = sigmask_to_save();
766 	compat_sigset_t *cset = (compat_sigset_t *) set;
767 
768 	/* Perform fixup for the pre-signal frame. */
769 	rseq_signal_deliver(ksig, regs);
770 
771 	/* Set up the stack frame */
772 	if (is_ia32_frame(ksig)) {
773 		if (ksig->ka.sa.sa_flags & SA_SIGINFO)
774 			return ia32_setup_rt_frame(usig, ksig, cset, regs);
775 		else
776 			return ia32_setup_frame(usig, ksig, cset, regs);
777 	} else if (is_x32_frame(ksig)) {
778 		return x32_setup_rt_frame(ksig, cset, regs);
779 	} else {
780 		return __setup_rt_frame(ksig->sig, ksig, set, regs);
781 	}
782 }
783 
784 static void
785 handle_signal(struct ksignal *ksig, struct pt_regs *regs)
786 {
787 	bool stepping, failed;
788 	struct fpu *fpu = &current->thread.fpu;
789 
790 	if (v8086_mode(regs))
791 		save_v86_state((struct kernel_vm86_regs *) regs, VM86_SIGNAL);
792 
793 	/* Are we from a system call? */
794 	if (syscall_get_nr(current, regs) != -1) {
795 		/* If so, check system call restarting.. */
796 		switch (syscall_get_error(current, regs)) {
797 		case -ERESTART_RESTARTBLOCK:
798 		case -ERESTARTNOHAND:
799 			regs->ax = -EINTR;
800 			break;
801 
802 		case -ERESTARTSYS:
803 			if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
804 				regs->ax = -EINTR;
805 				break;
806 			}
807 			fallthrough;
808 		case -ERESTARTNOINTR:
809 			regs->ax = regs->orig_ax;
810 			regs->ip -= 2;
811 			break;
812 		}
813 	}
814 
815 	/*
816 	 * If TF is set due to a debugger (TIF_FORCED_TF), clear TF now
817 	 * so that register information in the sigcontext is correct and
818 	 * then notify the tracer before entering the signal handler.
819 	 */
820 	stepping = test_thread_flag(TIF_SINGLESTEP);
821 	if (stepping)
822 		user_disable_single_step(current);
823 
824 	failed = (setup_rt_frame(ksig, regs) < 0);
825 	if (!failed) {
826 		/*
827 		 * Clear the direction flag as per the ABI for function entry.
828 		 *
829 		 * Clear RF when entering the signal handler, because
830 		 * it might disable possible debug exception from the
831 		 * signal handler.
832 		 *
833 		 * Clear TF for the case when it wasn't set by debugger to
834 		 * avoid the recursive send_sigtrap() in SIGTRAP handler.
835 		 */
836 		regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF);
837 		/*
838 		 * Ensure the signal handler starts with the new fpu state.
839 		 */
840 		fpu__clear_user_states(fpu);
841 	}
842 	signal_setup_done(failed, ksig, stepping);
843 }
844 
845 static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
846 {
847 #ifdef CONFIG_IA32_EMULATION
848 	if (current->restart_block.arch_data & TS_COMPAT)
849 		return __NR_ia32_restart_syscall;
850 #endif
851 #ifdef CONFIG_X86_X32_ABI
852 	return __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
853 #else
854 	return __NR_restart_syscall;
855 #endif
856 }
857 
858 /*
859  * Note that 'init' is a special process: it doesn't get signals it doesn't
860  * want to handle. Thus you cannot kill init even with a SIGKILL even by
861  * mistake.
862  */
863 void arch_do_signal_or_restart(struct pt_regs *regs)
864 {
865 	struct ksignal ksig;
866 
867 	if (get_signal(&ksig)) {
868 		/* Whee! Actually deliver the signal.  */
869 		handle_signal(&ksig, regs);
870 		return;
871 	}
872 
873 	/* Did we come from a system call? */
874 	if (syscall_get_nr(current, regs) != -1) {
875 		/* Restart the system call - no handlers present */
876 		switch (syscall_get_error(current, regs)) {
877 		case -ERESTARTNOHAND:
878 		case -ERESTARTSYS:
879 		case -ERESTARTNOINTR:
880 			regs->ax = regs->orig_ax;
881 			regs->ip -= 2;
882 			break;
883 
884 		case -ERESTART_RESTARTBLOCK:
885 			regs->ax = get_nr_restart_syscall(regs);
886 			regs->ip -= 2;
887 			break;
888 		}
889 	}
890 
891 	/*
892 	 * If there's no signal to deliver, we just put the saved sigmask
893 	 * back.
894 	 */
895 	restore_saved_sigmask();
896 }
897 
898 void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
899 {
900 	struct task_struct *me = current;
901 
902 	if (show_unhandled_signals && printk_ratelimit()) {
903 		printk("%s"
904 		       "%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx",
905 		       task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG,
906 		       me->comm, me->pid, where, frame,
907 		       regs->ip, regs->sp, regs->orig_ax);
908 		print_vma_addr(KERN_CONT " in ", regs->ip);
909 		pr_cont("\n");
910 	}
911 
912 	force_sig(SIGSEGV);
913 }
914 
915 #ifdef CONFIG_DYNAMIC_SIGFRAME
916 #ifdef CONFIG_STRICT_SIGALTSTACK_SIZE
917 static bool strict_sigaltstack_size __ro_after_init = true;
918 #else
919 static bool strict_sigaltstack_size __ro_after_init = false;
920 #endif
921 
922 static int __init strict_sas_size(char *arg)
923 {
924 	return kstrtobool(arg, &strict_sigaltstack_size);
925 }
926 __setup("strict_sas_size", strict_sas_size);
927 
928 /*
929  * MINSIGSTKSZ is 2048 and can't be changed despite the fact that AVX512
930  * exceeds that size already. As such programs might never use the
931  * sigaltstack they just continued to work. While always checking against
932  * the real size would be correct, this might be considered a regression.
933  *
934  * Therefore avoid the sanity check, unless enforced by kernel
935  * configuration or command line option.
936  *
937  * When dynamic FPU features are supported, the check is also enforced when
938  * the task has permissions to use dynamic features. Tasks which have no
939  * permission are checked against the size of the non-dynamic feature set
940  * if strict checking is enabled. This avoids forcing all tasks on the
941  * system to allocate large sigaltstacks even if they are never going
942  * to use a dynamic feature. As this is serialized via sighand::siglock
943  * any permission request for a dynamic feature either happened already
944  * or will see the newly install sigaltstack size in the permission checks.
945  */
946 bool sigaltstack_size_valid(size_t ss_size)
947 {
948 	unsigned long fsize = max_frame_size - fpu_default_state_size;
949 	u64 mask;
950 
951 	lockdep_assert_held(&current->sighand->siglock);
952 
953 	if (!fpu_state_size_dynamic() && !strict_sigaltstack_size)
954 		return true;
955 
956 	fsize += current->group_leader->thread.fpu.perm.__user_state_size;
957 	if (likely(ss_size > fsize))
958 		return true;
959 
960 	if (strict_sigaltstack_size)
961 		return ss_size > fsize;
962 
963 	mask = current->group_leader->thread.fpu.perm.__state_perm;
964 	if (mask & XFEATURE_MASK_USER_DYNAMIC)
965 		return ss_size > fsize;
966 
967 	return true;
968 }
969 #endif /* CONFIG_DYNAMIC_SIGFRAME */
970 
971 #ifdef CONFIG_X86_X32_ABI
972 COMPAT_SYSCALL_DEFINE0(x32_rt_sigreturn)
973 {
974 	struct pt_regs *regs = current_pt_regs();
975 	struct rt_sigframe_x32 __user *frame;
976 	sigset_t set;
977 	unsigned long uc_flags;
978 
979 	frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8);
980 
981 	if (!access_ok(frame, sizeof(*frame)))
982 		goto badframe;
983 	if (__get_user(set.sig[0], (__u64 __user *)&frame->uc.uc_sigmask))
984 		goto badframe;
985 	if (__get_user(uc_flags, &frame->uc.uc_flags))
986 		goto badframe;
987 
988 	set_current_blocked(&set);
989 
990 	if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags))
991 		goto badframe;
992 
993 	if (compat_restore_altstack(&frame->uc.uc_stack))
994 		goto badframe;
995 
996 	return regs->ax;
997 
998 badframe:
999 	signal_fault(regs, frame, "x32 rt_sigreturn");
1000 	return 0;
1001 }
1002 #endif
1003