1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
5 */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/kernel.h>
10 #include <linux/errno.h>
11 #include <linux/unistd.h>
12 #include <linux/uaccess.h>
13 #include <linux/syscalls.h>
14
15 #include <asm/ucontext.h>
16 #include <asm/fpu/signal.h>
17 #include <asm/sighandling.h>
18
19 #include <asm/syscall.h>
20 #include <asm/sigframe.h>
21 #include <asm/signal.h>
22
23 /*
24 * If regs->ss will cause an IRET fault, change it. Otherwise leave it
25 * alone. Using this generally makes no sense unless
26 * user_64bit_mode(regs) would return true.
27 */
force_valid_ss(struct pt_regs * regs)28 static void force_valid_ss(struct pt_regs *regs)
29 {
30 u32 ar;
31 asm volatile ("lar %[old_ss], %[ar]\n\t"
32 "jz 1f\n\t" /* If invalid: */
33 "xorl %[ar], %[ar]\n\t" /* set ar = 0 */
34 "1:"
35 : [ar] "=r" (ar)
36 : [old_ss] "rm" ((u16)regs->ss));
37
38 /*
39 * For a valid 64-bit user context, we need DPL 3, type
40 * read-write data or read-write exp-down data, and S and P
41 * set. We can't use VERW because VERW doesn't check the
42 * P bit.
43 */
44 ar &= AR_DPL_MASK | AR_S | AR_P | AR_TYPE_MASK;
45 if (ar != (AR_DPL3 | AR_S | AR_P | AR_TYPE_RWDATA) &&
46 ar != (AR_DPL3 | AR_S | AR_P | AR_TYPE_RWDATA_EXPDOWN))
47 regs->ss = __USER_DS;
48 }
49
restore_sigcontext(struct pt_regs * regs,struct sigcontext __user * usc,unsigned long uc_flags)50 static bool restore_sigcontext(struct pt_regs *regs,
51 struct sigcontext __user *usc,
52 unsigned long uc_flags)
53 {
54 struct sigcontext sc;
55
56 /* Always make any pending restarted system calls return -EINTR */
57 current->restart_block.fn = do_no_restart_syscall;
58
59 if (copy_from_user(&sc, usc, offsetof(struct sigcontext, reserved1)))
60 return false;
61
62 regs->bx = sc.bx;
63 regs->cx = sc.cx;
64 regs->dx = sc.dx;
65 regs->si = sc.si;
66 regs->di = sc.di;
67 regs->bp = sc.bp;
68 regs->ax = sc.ax;
69 regs->sp = sc.sp;
70 regs->ip = sc.ip;
71 regs->r8 = sc.r8;
72 regs->r9 = sc.r9;
73 regs->r10 = sc.r10;
74 regs->r11 = sc.r11;
75 regs->r12 = sc.r12;
76 regs->r13 = sc.r13;
77 regs->r14 = sc.r14;
78 regs->r15 = sc.r15;
79
80 /* Get CS/SS and force CPL3 */
81 regs->cs = sc.cs | 0x03;
82 regs->ss = sc.ss | 0x03;
83
84 regs->flags = (regs->flags & ~FIX_EFLAGS) | (sc.flags & FIX_EFLAGS);
85 /* disable syscall checks */
86 regs->orig_ax = -1;
87
88 /*
89 * Fix up SS if needed for the benefit of old DOSEMU and
90 * CRIU.
91 */
92 if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && user_64bit_mode(regs)))
93 force_valid_ss(regs);
94
95 return fpu__restore_sig((void __user *)sc.fpstate, 0);
96 }
97
98 static __always_inline int
__unsafe_setup_sigcontext(struct sigcontext __user * sc,void __user * fpstate,struct pt_regs * regs,unsigned long mask)99 __unsafe_setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
100 struct pt_regs *regs, unsigned long mask)
101 {
102 unsafe_put_user(regs->di, &sc->di, Efault);
103 unsafe_put_user(regs->si, &sc->si, Efault);
104 unsafe_put_user(regs->bp, &sc->bp, Efault);
105 unsafe_put_user(regs->sp, &sc->sp, Efault);
106 unsafe_put_user(regs->bx, &sc->bx, Efault);
107 unsafe_put_user(regs->dx, &sc->dx, Efault);
108 unsafe_put_user(regs->cx, &sc->cx, Efault);
109 unsafe_put_user(regs->ax, &sc->ax, Efault);
110 unsafe_put_user(regs->r8, &sc->r8, Efault);
111 unsafe_put_user(regs->r9, &sc->r9, Efault);
112 unsafe_put_user(regs->r10, &sc->r10, Efault);
113 unsafe_put_user(regs->r11, &sc->r11, Efault);
114 unsafe_put_user(regs->r12, &sc->r12, Efault);
115 unsafe_put_user(regs->r13, &sc->r13, Efault);
116 unsafe_put_user(regs->r14, &sc->r14, Efault);
117 unsafe_put_user(regs->r15, &sc->r15, Efault);
118
119 unsafe_put_user(current->thread.trap_nr, &sc->trapno, Efault);
120 unsafe_put_user(current->thread.error_code, &sc->err, Efault);
121 unsafe_put_user(regs->ip, &sc->ip, Efault);
122 unsafe_put_user(regs->flags, &sc->flags, Efault);
123 unsafe_put_user(regs->cs, &sc->cs, Efault);
124 unsafe_put_user(0, &sc->gs, Efault);
125 unsafe_put_user(0, &sc->fs, Efault);
126 unsafe_put_user(regs->ss, &sc->ss, Efault);
127
128 unsafe_put_user(fpstate, (unsigned long __user *)&sc->fpstate, Efault);
129
130 /* non-iBCS2 extensions.. */
131 unsafe_put_user(mask, &sc->oldmask, Efault);
132 unsafe_put_user(current->thread.cr2, &sc->cr2, Efault);
133 return 0;
134 Efault:
135 return -EFAULT;
136 }
137
138 #define unsafe_put_sigcontext(sc, fp, regs, set, label) \
139 do { \
140 if (__unsafe_setup_sigcontext(sc, fp, regs, set->sig[0])) \
141 goto label; \
142 } while(0);
143
144 #define unsafe_put_sigmask(set, frame, label) \
145 unsafe_put_user(*(__u64 *)(set), \
146 (__u64 __user *)&(frame)->uc.uc_sigmask, \
147 label)
148
frame_uc_flags(struct pt_regs * regs)149 static unsigned long frame_uc_flags(struct pt_regs *regs)
150 {
151 unsigned long flags;
152
153 if (boot_cpu_has(X86_FEATURE_XSAVE))
154 flags = UC_FP_XSTATE | UC_SIGCONTEXT_SS;
155 else
156 flags = UC_SIGCONTEXT_SS;
157
158 if (likely(user_64bit_mode(regs)))
159 flags |= UC_STRICT_RESTORE_SS;
160
161 return flags;
162 }
163
x64_setup_rt_frame(struct ksignal * ksig,struct pt_regs * regs)164 int x64_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
165 {
166 sigset_t *set = sigmask_to_save();
167 struct rt_sigframe __user *frame;
168 void __user *fp = NULL;
169 unsigned long uc_flags;
170
171 /* x86-64 should always use SA_RESTORER. */
172 if (!(ksig->ka.sa.sa_flags & SA_RESTORER))
173 return -EFAULT;
174
175 frame = get_sigframe(ksig, regs, sizeof(struct rt_sigframe), &fp);
176 uc_flags = frame_uc_flags(regs);
177
178 if (!user_access_begin(frame, sizeof(*frame)))
179 return -EFAULT;
180
181 /* Create the ucontext. */
182 unsafe_put_user(uc_flags, &frame->uc.uc_flags, Efault);
183 unsafe_put_user(0, &frame->uc.uc_link, Efault);
184 unsafe_save_altstack(&frame->uc.uc_stack, regs->sp, Efault);
185
186 /* Set up to return from userspace. If provided, use a stub
187 already in userspace. */
188 unsafe_put_user(ksig->ka.sa.sa_restorer, &frame->pretcode, Efault);
189 unsafe_put_sigcontext(&frame->uc.uc_mcontext, fp, regs, set, Efault);
190 unsafe_put_sigmask(set, frame, Efault);
191 user_access_end();
192
193 if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
194 if (copy_siginfo_to_user(&frame->info, &ksig->info))
195 return -EFAULT;
196 }
197
198 if (setup_signal_shadow_stack(ksig))
199 return -EFAULT;
200
201 /* Set up registers for signal handler */
202 regs->di = ksig->sig;
203 /* In case the signal handler was declared without prototypes */
204 regs->ax = 0;
205
206 /* This also works for non SA_SIGINFO handlers because they expect the
207 next argument after the signal number on the stack. */
208 regs->si = (unsigned long)&frame->info;
209 regs->dx = (unsigned long)&frame->uc;
210 regs->ip = (unsigned long) ksig->ka.sa.sa_handler;
211
212 regs->sp = (unsigned long)frame;
213
214 /*
215 * Set up the CS and SS registers to run signal handlers in
216 * 64-bit mode, even if the handler happens to be interrupting
217 * 32-bit or 16-bit code.
218 *
219 * SS is subtle. In 64-bit mode, we don't need any particular
220 * SS descriptor, but we do need SS to be valid. It's possible
221 * that the old SS is entirely bogus -- this can happen if the
222 * signal we're trying to deliver is #GP or #SS caused by a bad
223 * SS value. We also have a compatibility issue here: DOSEMU
224 * relies on the contents of the SS register indicating the
225 * SS value at the time of the signal, even though that code in
226 * DOSEMU predates sigreturn's ability to restore SS. (DOSEMU
227 * avoids relying on sigreturn to restore SS; instead it uses
228 * a trampoline.) So we do our best: if the old SS was valid,
229 * we keep it. Otherwise we replace it.
230 */
231 regs->cs = __USER_CS;
232
233 if (unlikely(regs->ss != __USER_DS))
234 force_valid_ss(regs);
235
236 return 0;
237
238 Efault:
239 user_access_end();
240 return -EFAULT;
241 }
242
243 /*
244 * Do a signal return; undo the signal stack.
245 */
SYSCALL_DEFINE0(rt_sigreturn)246 SYSCALL_DEFINE0(rt_sigreturn)
247 {
248 struct pt_regs *regs = current_pt_regs();
249 struct rt_sigframe __user *frame;
250 sigset_t set;
251 unsigned long uc_flags;
252
253 prevent_single_step_upon_eretu(regs);
254
255 frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long));
256 if (!access_ok(frame, sizeof(*frame)))
257 goto badframe;
258 if (__get_user(*(__u64 *)&set, (__u64 __user *)&frame->uc.uc_sigmask))
259 goto badframe;
260 if (__get_user(uc_flags, &frame->uc.uc_flags))
261 goto badframe;
262
263 set_current_blocked(&set);
264
265 if (restore_altstack(&frame->uc.uc_stack))
266 goto badframe;
267
268 if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags))
269 goto badframe;
270
271 if (restore_signal_shadow_stack())
272 goto badframe;
273
274 return regs->ax;
275
276 badframe:
277 signal_fault(regs, frame, "rt_sigreturn");
278 return 0;
279 }
280
281 #ifdef CONFIG_X86_X32_ABI
x32_copy_siginfo_to_user(struct compat_siginfo __user * to,const struct kernel_siginfo * from)282 static int x32_copy_siginfo_to_user(struct compat_siginfo __user *to,
283 const struct kernel_siginfo *from)
284 {
285 struct compat_siginfo new;
286
287 copy_siginfo_to_external32(&new, from);
288 if (from->si_signo == SIGCHLD) {
289 new._sifields._sigchld_x32._utime = from->si_utime;
290 new._sifields._sigchld_x32._stime = from->si_stime;
291 }
292 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
293 return -EFAULT;
294 return 0;
295 }
296
copy_siginfo_to_user32(struct compat_siginfo __user * to,const struct kernel_siginfo * from)297 int copy_siginfo_to_user32(struct compat_siginfo __user *to,
298 const struct kernel_siginfo *from)
299 {
300 if (in_x32_syscall())
301 return x32_copy_siginfo_to_user(to, from);
302 return __copy_siginfo_to_user32(to, from);
303 }
304
x32_setup_rt_frame(struct ksignal * ksig,struct pt_regs * regs)305 int x32_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
306 {
307 compat_sigset_t *set = (compat_sigset_t *) sigmask_to_save();
308 struct rt_sigframe_x32 __user *frame;
309 unsigned long uc_flags;
310 void __user *restorer;
311 void __user *fp = NULL;
312
313 if (!(ksig->ka.sa.sa_flags & SA_RESTORER))
314 return -EFAULT;
315
316 frame = get_sigframe(ksig, regs, sizeof(*frame), &fp);
317
318 uc_flags = frame_uc_flags(regs);
319
320 if (setup_signal_shadow_stack(ksig))
321 return -EFAULT;
322
323 if (!user_access_begin(frame, sizeof(*frame)))
324 return -EFAULT;
325
326 /* Create the ucontext. */
327 unsafe_put_user(uc_flags, &frame->uc.uc_flags, Efault);
328 unsafe_put_user(0, &frame->uc.uc_link, Efault);
329 unsafe_compat_save_altstack(&frame->uc.uc_stack, regs->sp, Efault);
330 unsafe_put_user(0, &frame->uc.uc__pad0, Efault);
331 restorer = ksig->ka.sa.sa_restorer;
332 unsafe_put_user(restorer, (unsigned long __user *)&frame->pretcode, Efault);
333 unsafe_put_sigcontext(&frame->uc.uc_mcontext, fp, regs, set, Efault);
334 unsafe_put_sigmask(set, frame, Efault);
335 user_access_end();
336
337 if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
338 if (x32_copy_siginfo_to_user(&frame->info, &ksig->info))
339 return -EFAULT;
340 }
341
342 /* Set up registers for signal handler */
343 regs->sp = (unsigned long) frame;
344 regs->ip = (unsigned long) ksig->ka.sa.sa_handler;
345
346 /* We use the x32 calling convention here... */
347 regs->di = ksig->sig;
348 regs->si = (unsigned long) &frame->info;
349 regs->dx = (unsigned long) &frame->uc;
350
351 loadsegment(ds, __USER_DS);
352 loadsegment(es, __USER_DS);
353
354 regs->cs = __USER_CS;
355 regs->ss = __USER_DS;
356
357 return 0;
358
359 Efault:
360 user_access_end();
361 return -EFAULT;
362 }
363
COMPAT_SYSCALL_DEFINE0(x32_rt_sigreturn)364 COMPAT_SYSCALL_DEFINE0(x32_rt_sigreturn)
365 {
366 struct pt_regs *regs = current_pt_regs();
367 struct rt_sigframe_x32 __user *frame;
368 sigset_t set;
369 unsigned long uc_flags;
370
371 prevent_single_step_upon_eretu(regs);
372
373 frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8);
374
375 if (!access_ok(frame, sizeof(*frame)))
376 goto badframe;
377 if (__get_user(set.sig[0], (__u64 __user *)&frame->uc.uc_sigmask))
378 goto badframe;
379 if (__get_user(uc_flags, &frame->uc.uc_flags))
380 goto badframe;
381
382 set_current_blocked(&set);
383
384 if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags))
385 goto badframe;
386
387 if (restore_signal_shadow_stack())
388 goto badframe;
389
390 if (compat_restore_altstack(&frame->uc.uc_stack))
391 goto badframe;
392
393 return regs->ax;
394
395 badframe:
396 signal_fault(regs, frame, "x32 rt_sigreturn");
397 return 0;
398 }
399 #endif /* CONFIG_X86_X32_ABI */
400
401 #ifdef CONFIG_COMPAT
sigaction_compat_abi(struct k_sigaction * act,struct k_sigaction * oact)402 void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact)
403 {
404 if (!act)
405 return;
406
407 if (in_ia32_syscall())
408 act->sa.sa_flags |= SA_IA32_ABI;
409 if (in_x32_syscall())
410 act->sa.sa_flags |= SA_X32_ABI;
411 }
412 #endif /* CONFIG_COMPAT */
413
414 /*
415 * If adding a new si_code, there is probably new data in
416 * the siginfo. Make sure folks bumping the si_code
417 * limits also have to look at this code. Make sure any
418 * new fields are handled in copy_siginfo_to_user32()!
419 */
420 static_assert(NSIGILL == 11);
421 static_assert(NSIGFPE == 15);
422 static_assert(NSIGSEGV == 10);
423 static_assert(NSIGBUS == 5);
424 static_assert(NSIGTRAP == 6);
425 static_assert(NSIGCHLD == 6);
426 static_assert(NSIGSYS == 2);
427
428 /* This is part of the ABI and can never change in size: */
429 static_assert(sizeof(siginfo_t) == 128);
430
431 /* This is a part of the ABI and can never change in alignment */
432 static_assert(__alignof__(siginfo_t) == 8);
433
434 /*
435 * The offsets of all the (unioned) si_fields are fixed
436 * in the ABI, of course. Make sure none of them ever
437 * move and are always at the beginning:
438 */
439 static_assert(offsetof(siginfo_t, si_signo) == 0);
440 static_assert(offsetof(siginfo_t, si_errno) == 4);
441 static_assert(offsetof(siginfo_t, si_code) == 8);
442
443 /*
444 * Ensure that the size of each si_field never changes.
445 * If it does, it is a sign that the
446 * copy_siginfo_to_user32() code below needs to updated
447 * along with the size in the CHECK_SI_SIZE().
448 *
449 * We repeat this check for both the generic and compat
450 * siginfos.
451 *
452 * Note: it is OK for these to grow as long as the whole
453 * structure stays within the padding size (checked
454 * above).
455 */
456
457 #define CHECK_SI_OFFSET(name) \
458 static_assert(offsetof(siginfo_t, _sifields) == \
459 offsetof(siginfo_t, _sifields.name))
460 #define CHECK_SI_SIZE(name, size) \
461 static_assert(sizeof_field(siginfo_t, _sifields.name) == size)
462
463 CHECK_SI_OFFSET(_kill);
464 CHECK_SI_SIZE (_kill, 2*sizeof(int));
465 static_assert(offsetof(siginfo_t, si_pid) == 0x10);
466 static_assert(offsetof(siginfo_t, si_uid) == 0x14);
467
468 CHECK_SI_OFFSET(_timer);
469 CHECK_SI_SIZE (_timer, 6*sizeof(int));
470 static_assert(offsetof(siginfo_t, si_tid) == 0x10);
471 static_assert(offsetof(siginfo_t, si_overrun) == 0x14);
472 static_assert(offsetof(siginfo_t, si_value) == 0x18);
473
474 CHECK_SI_OFFSET(_rt);
475 CHECK_SI_SIZE (_rt, 4*sizeof(int));
476 static_assert(offsetof(siginfo_t, si_pid) == 0x10);
477 static_assert(offsetof(siginfo_t, si_uid) == 0x14);
478 static_assert(offsetof(siginfo_t, si_value) == 0x18);
479
480 CHECK_SI_OFFSET(_sigchld);
481 CHECK_SI_SIZE (_sigchld, 8*sizeof(int));
482 static_assert(offsetof(siginfo_t, si_pid) == 0x10);
483 static_assert(offsetof(siginfo_t, si_uid) == 0x14);
484 static_assert(offsetof(siginfo_t, si_status) == 0x18);
485 static_assert(offsetof(siginfo_t, si_utime) == 0x20);
486 static_assert(offsetof(siginfo_t, si_stime) == 0x28);
487
488 #ifdef CONFIG_X86_X32_ABI
489 /* no _sigchld_x32 in the generic siginfo_t */
490 static_assert(sizeof_field(compat_siginfo_t, _sifields._sigchld_x32) ==
491 7*sizeof(int));
492 static_assert(offsetof(compat_siginfo_t, _sifields) ==
493 offsetof(compat_siginfo_t, _sifields._sigchld_x32));
494 static_assert(offsetof(compat_siginfo_t, _sifields._sigchld_x32._utime) == 0x18);
495 static_assert(offsetof(compat_siginfo_t, _sifields._sigchld_x32._stime) == 0x20);
496 #endif
497
498 CHECK_SI_OFFSET(_sigfault);
499 CHECK_SI_SIZE (_sigfault, 8*sizeof(int));
500 static_assert(offsetof(siginfo_t, si_addr) == 0x10);
501
502 static_assert(offsetof(siginfo_t, si_trapno) == 0x18);
503
504 static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x18);
505
506 static_assert(offsetof(siginfo_t, si_lower) == 0x20);
507 static_assert(offsetof(siginfo_t, si_upper) == 0x28);
508
509 static_assert(offsetof(siginfo_t, si_pkey) == 0x20);
510
511 static_assert(offsetof(siginfo_t, si_perf_data) == 0x18);
512 static_assert(offsetof(siginfo_t, si_perf_type) == 0x20);
513 static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24);
514
515 CHECK_SI_OFFSET(_sigpoll);
516 CHECK_SI_SIZE (_sigpoll, 4*sizeof(int));
517 static_assert(offsetof(siginfo_t, si_band) == 0x10);
518 static_assert(offsetof(siginfo_t, si_fd) == 0x18);
519
520 CHECK_SI_OFFSET(_sigsys);
521 CHECK_SI_SIZE (_sigsys, 4*sizeof(int));
522 static_assert(offsetof(siginfo_t, si_call_addr) == 0x10);
523 static_assert(offsetof(siginfo_t, si_syscall) == 0x18);
524 static_assert(offsetof(siginfo_t, si_arch) == 0x1C);
525
526 /* any new si_fields should be added here */
527