1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause
3 *
4 * Copyright (c) 2018 The FreeBSD Foundation
5 * Copyright (c) 1992 Terrence R. Lambert.
6 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * William Jolitz.
11 *
12 * Portions of this software were developed by A. Joseph Koshy under
13 * sponsorship from the FreeBSD Foundation and Google, Inc.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 * must display the following acknowledgement:
25 * This product includes software developed by the University of
26 * California, Berkeley and its contributors.
27 * 4. Neither the name of the University nor the names of its contributors
28 * may be used to endorse or promote products derived from this software
29 * without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * SUCH DAMAGE.
42 */
43
44 #include <sys/cdefs.h>
45 #include "opt_cpu.h"
46 #include "opt_ddb.h"
47 #include "opt_kstack_pages.h"
48
49 #include <sys/param.h>
50 #include <sys/proc.h>
51 #include <sys/systm.h>
52 #include <sys/exec.h>
53 #include <sys/imgact.h>
54 #include <sys/kdb.h>
55 #include <sys/kernel.h>
56 #include <sys/ktr.h>
57 #include <sys/linker.h>
58 #include <sys/lock.h>
59 #include <sys/malloc.h>
60 #include <sys/mutex.h>
61 #include <sys/pcpu.h>
62 #include <sys/ptrace.h>
63 #include <sys/reg.h>
64 #include <sys/rwlock.h>
65 #include <sys/signalvar.h>
66 #include <sys/syscallsubr.h>
67 #include <sys/sysctl.h>
68 #include <sys/sysent.h>
69 #include <sys/sysproto.h>
70 #include <sys/ucontext.h>
71 #include <sys/vmmeter.h>
72
73 #include <vm/vm.h>
74 #include <vm/vm_param.h>
75 #include <vm/vm_extern.h>
76 #include <vm/vm_kern.h>
77 #include <vm/vm_page.h>
78 #include <vm/vm_map.h>
79 #include <vm/vm_object.h>
80
81 #ifdef DDB
82 #ifndef KDB
83 #error KDB must be enabled in order for DDB to work!
84 #endif
85 #include <ddb/ddb.h>
86 #include <ddb/db_sym.h>
87 #endif
88
89 #include <machine/cpu.h>
90 #include <machine/cputypes.h>
91 #include <machine/md_var.h>
92 #include <machine/pcb.h>
93 #include <machine/pcb_ext.h>
94 #include <machine/proc.h>
95 #include <machine/sigframe.h>
96 #include <machine/specialreg.h>
97 #include <machine/sysarch.h>
98 #include <machine/trap.h>
99
100 static void fpstate_drop(struct thread *td);
101 static void get_fpcontext(struct thread *td, mcontext_t *mcp,
102 char *xfpusave, size_t xfpusave_len);
103 static int set_fpcontext(struct thread *td, mcontext_t *mcp,
104 char *xfpustate, size_t xfpustate_len);
105 #ifdef COMPAT_43
106 static void osendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
107 #endif
108 #ifdef COMPAT_FREEBSD4
109 static void freebsd4_sendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
110 #endif
111
112 extern struct sysentvec elf32_freebsd_sysvec;
113
114 _Static_assert(sizeof(mcontext_t) == 640, "mcontext_t size incorrect");
115 _Static_assert(sizeof(ucontext_t) == 704, "ucontext_t size incorrect");
116 _Static_assert(sizeof(siginfo_t) == 64, "siginfo_t size incorrect");
117
118 /*
119 * Send an interrupt to process.
120 *
121 * Stack is set up to allow sigcode stored at top to call routine,
122 * followed by call to sigreturn routine below. After sigreturn
123 * resets the signal mask, the stack, and the frame pointer, it
124 * returns to the user specified pc, psl.
125 */
126 #ifdef COMPAT_43
127 static void
osendsig(sig_t catcher,ksiginfo_t * ksi,sigset_t * mask)128 osendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
129 {
130 struct osigframe sf, *fp;
131 struct proc *p;
132 struct thread *td;
133 struct sigacts *psp;
134 struct trapframe *regs;
135 int sig;
136 int oonstack;
137
138 td = curthread;
139 p = td->td_proc;
140 PROC_LOCK_ASSERT(p, MA_OWNED);
141 sig = ksi->ksi_signo;
142 psp = p->p_sigacts;
143 mtx_assert(&psp->ps_mtx, MA_OWNED);
144 regs = td->td_frame;
145 oonstack = sigonstack(regs->tf_esp);
146
147 /* Allocate space for the signal handler context. */
148 if ((td->td_pflags & TDP_ALTSTACK) && !oonstack &&
149 SIGISMEMBER(psp->ps_sigonstack, sig)) {
150 fp = (struct osigframe *)((uintptr_t)td->td_sigstk.ss_sp +
151 td->td_sigstk.ss_size - sizeof(struct osigframe));
152 #if defined(COMPAT_43)
153 td->td_sigstk.ss_flags |= SS_ONSTACK;
154 #endif
155 } else
156 fp = (struct osigframe *)regs->tf_esp - 1;
157
158 /* Build the argument list for the signal handler. */
159 sf.sf_signum = sig;
160 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc;
161 bzero(&sf.sf_siginfo, sizeof(sf.sf_siginfo));
162 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
163 /* Signal handler installed with SA_SIGINFO. */
164 sf.sf_arg2 = (register_t)&fp->sf_siginfo;
165 sf.sf_siginfo.si_signo = sig;
166 sf.sf_siginfo.si_code = ksi->ksi_code;
167 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher;
168 sf.sf_addr = 0;
169 } else {
170 /* Old FreeBSD-style arguments. */
171 sf.sf_arg2 = ksi->ksi_code;
172 sf.sf_addr = (register_t)ksi->ksi_addr;
173 sf.sf_ahu.sf_handler = catcher;
174 }
175 mtx_unlock(&psp->ps_mtx);
176 PROC_UNLOCK(p);
177
178 /* Save most if not all of trap frame. */
179 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax;
180 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx;
181 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx;
182 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx;
183 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi;
184 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi;
185 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs;
186 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds;
187 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss;
188 sf.sf_siginfo.si_sc.sc_es = regs->tf_es;
189 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs;
190 sf.sf_siginfo.si_sc.sc_gs = rgs();
191 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp;
192
193 /* Build the signal context to be used by osigreturn(). */
194 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0;
195 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask);
196 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp;
197 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp;
198 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip;
199 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags;
200 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno;
201 sf.sf_siginfo.si_sc.sc_err = regs->tf_err;
202
203 /*
204 * If we're a vm86 process, we want to save the segment registers.
205 * We also change eflags to be our emulated eflags, not the actual
206 * eflags.
207 */
208 if (regs->tf_eflags & PSL_VM) {
209 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */
210 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
211 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
212
213 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs;
214 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs;
215 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es;
216 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds;
217
218 if (vm86->vm86_has_vme == 0)
219 sf.sf_siginfo.si_sc.sc_ps =
220 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
221 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
222
223 /* See sendsig() for comments. */
224 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
225 }
226
227 /*
228 * Copy the sigframe out to the user's stack.
229 */
230 if (copyout(&sf, fp, sizeof(*fp)) != 0) {
231 PROC_LOCK(p);
232 sigexit(td, SIGILL);
233 }
234
235 regs->tf_esp = (int)fp;
236 if (PROC_HAS_SHP(p)) {
237 regs->tf_eip = PROC_SIGCODE(p) + szsigcode -
238 szosigcode;
239 } else {
240 /* a.out sysentvec does not use shared page */
241 regs->tf_eip = PROC_PS_STRINGS(p) - szosigcode;
242 }
243 regs->tf_eflags &= ~(PSL_T | PSL_D);
244 regs->tf_cs = _ucodesel;
245 regs->tf_ds = _udatasel;
246 regs->tf_es = _udatasel;
247 regs->tf_fs = _udatasel;
248 load_gs(_udatasel);
249 regs->tf_ss = _udatasel;
250 PROC_LOCK(p);
251 mtx_lock(&psp->ps_mtx);
252 }
253 #endif /* COMPAT_43 */
254
255 #ifdef COMPAT_FREEBSD4
256 static void
freebsd4_sendsig(sig_t catcher,ksiginfo_t * ksi,sigset_t * mask)257 freebsd4_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
258 {
259 struct freebsd4_sigframe sf, *sfp;
260 struct proc *p;
261 struct thread *td;
262 struct sigacts *psp;
263 struct trapframe *regs;
264 int sig;
265 int oonstack;
266
267 td = curthread;
268 p = td->td_proc;
269 PROC_LOCK_ASSERT(p, MA_OWNED);
270 sig = ksi->ksi_signo;
271 psp = p->p_sigacts;
272 mtx_assert(&psp->ps_mtx, MA_OWNED);
273 regs = td->td_frame;
274 oonstack = sigonstack(regs->tf_esp);
275
276 /* Save user context. */
277 bzero(&sf, sizeof(sf));
278 sf.sf_uc.uc_sigmask = *mask;
279 sf.sf_uc.uc_stack = td->td_sigstk;
280 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
281 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
282 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
283 sf.sf_uc.uc_mcontext.mc_gs = rgs();
284 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
285 bzero(sf.sf_uc.uc_mcontext.mc_fpregs,
286 sizeof(sf.sf_uc.uc_mcontext.mc_fpregs));
287 bzero(sf.sf_uc.uc_mcontext.__spare__,
288 sizeof(sf.sf_uc.uc_mcontext.__spare__));
289 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
290
291 /* Allocate space for the signal handler context. */
292 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
293 SIGISMEMBER(psp->ps_sigonstack, sig)) {
294 sfp = (struct freebsd4_sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
295 td->td_sigstk.ss_size - sizeof(struct freebsd4_sigframe));
296 #if defined(COMPAT_43)
297 td->td_sigstk.ss_flags |= SS_ONSTACK;
298 #endif
299 } else
300 sfp = (struct freebsd4_sigframe *)regs->tf_esp - 1;
301
302 /* Build the argument list for the signal handler. */
303 sf.sf_signum = sig;
304 sf.sf_ucontext = (register_t)&sfp->sf_uc;
305 bzero(&sf.sf_si, sizeof(sf.sf_si));
306 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
307 /* Signal handler installed with SA_SIGINFO. */
308 sf.sf_siginfo = (register_t)&sfp->sf_si;
309 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
310
311 /* Fill in POSIX parts */
312 sf.sf_si.si_signo = sig;
313 sf.sf_si.si_code = ksi->ksi_code;
314 sf.sf_si.si_addr = ksi->ksi_addr;
315 } else {
316 /* Old FreeBSD-style arguments. */
317 sf.sf_siginfo = ksi->ksi_code;
318 sf.sf_addr = (register_t)ksi->ksi_addr;
319 sf.sf_ahu.sf_handler = catcher;
320 }
321 mtx_unlock(&psp->ps_mtx);
322 PROC_UNLOCK(p);
323
324 /*
325 * If we're a vm86 process, we want to save the segment registers.
326 * We also change eflags to be our emulated eflags, not the actual
327 * eflags.
328 */
329 if (regs->tf_eflags & PSL_VM) {
330 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
331 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
332
333 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
334 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
335 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
336 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
337
338 if (vm86->vm86_has_vme == 0)
339 sf.sf_uc.uc_mcontext.mc_eflags =
340 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
341 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
342
343 /*
344 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
345 * syscalls made by the signal handler. This just avoids
346 * wasting time for our lazy fixup of such faults. PSL_NT
347 * does nothing in vm86 mode, but vm86 programs can set it
348 * almost legitimately in probes for old cpu types.
349 */
350 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
351 }
352
353 /*
354 * Copy the sigframe out to the user's stack.
355 */
356 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
357 PROC_LOCK(p);
358 sigexit(td, SIGILL);
359 }
360
361 regs->tf_esp = (int)sfp;
362 regs->tf_eip = PROC_SIGCODE(p) + szsigcode -
363 szfreebsd4_sigcode;
364 regs->tf_eflags &= ~(PSL_T | PSL_D);
365 regs->tf_cs = _ucodesel;
366 regs->tf_ds = _udatasel;
367 regs->tf_es = _udatasel;
368 regs->tf_fs = _udatasel;
369 regs->tf_ss = _udatasel;
370 PROC_LOCK(p);
371 mtx_lock(&psp->ps_mtx);
372 }
373 #endif /* COMPAT_FREEBSD4 */
374
375 void
sendsig(sig_t catcher,ksiginfo_t * ksi,sigset_t * mask)376 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
377 {
378 struct sigframe sf, *sfp;
379 struct proc *p;
380 struct thread *td;
381 struct sigacts *psp;
382 char *sp;
383 struct trapframe *regs;
384 struct segment_descriptor *sdp;
385 char *xfpusave;
386 size_t xfpusave_len;
387 int sig;
388 int oonstack;
389
390 td = curthread;
391 p = td->td_proc;
392 PROC_LOCK_ASSERT(p, MA_OWNED);
393 sig = ksi->ksi_signo;
394 psp = p->p_sigacts;
395 mtx_assert(&psp->ps_mtx, MA_OWNED);
396 #ifdef COMPAT_FREEBSD4
397 if (SIGISMEMBER(psp->ps_freebsd4, sig)) {
398 freebsd4_sendsig(catcher, ksi, mask);
399 return;
400 }
401 #endif
402 #ifdef COMPAT_43
403 if (SIGISMEMBER(psp->ps_osigset, sig)) {
404 osendsig(catcher, ksi, mask);
405 return;
406 }
407 #endif
408 regs = td->td_frame;
409 oonstack = sigonstack(regs->tf_esp);
410
411 if (cpu_max_ext_state_size > sizeof(union savefpu) && use_xsave) {
412 xfpusave_len = cpu_max_ext_state_size - sizeof(union savefpu);
413 xfpusave = __builtin_alloca(xfpusave_len);
414 } else {
415 xfpusave_len = 0;
416 xfpusave = NULL;
417 }
418
419 /* Save user context. */
420 bzero(&sf, sizeof(sf));
421 sf.sf_uc.uc_sigmask = *mask;
422 sf.sf_uc.uc_stack = td->td_sigstk;
423 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
424 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
425 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
426 sf.sf_uc.uc_mcontext.mc_gs = rgs();
427 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
428 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
429 get_fpcontext(td, &sf.sf_uc.uc_mcontext, xfpusave, xfpusave_len);
430 fpstate_drop(td);
431 /*
432 * Unconditionally fill the fsbase and gsbase into the mcontext.
433 */
434 sdp = &td->td_pcb->pcb_fsd;
435 sf.sf_uc.uc_mcontext.mc_fsbase = sdp->sd_hibase << 24 |
436 sdp->sd_lobase;
437 sdp = &td->td_pcb->pcb_gsd;
438 sf.sf_uc.uc_mcontext.mc_gsbase = sdp->sd_hibase << 24 |
439 sdp->sd_lobase;
440 bzero(sf.sf_uc.uc_mcontext.mc_spare2,
441 sizeof(sf.sf_uc.uc_mcontext.mc_spare2));
442
443 /* Allocate space for the signal handler context. */
444 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
445 SIGISMEMBER(psp->ps_sigonstack, sig)) {
446 sp = (char *)td->td_sigstk.ss_sp + td->td_sigstk.ss_size;
447 #if defined(COMPAT_43)
448 td->td_sigstk.ss_flags |= SS_ONSTACK;
449 #endif
450 } else
451 sp = (char *)regs->tf_esp - 128;
452 if (xfpusave != NULL) {
453 sp -= xfpusave_len;
454 sp = (char *)((unsigned int)sp & ~0x3F);
455 sf.sf_uc.uc_mcontext.mc_xfpustate = (register_t)sp;
456 }
457 sp -= sizeof(struct sigframe);
458
459 /* Align to 16 bytes. */
460 sfp = (struct sigframe *)((unsigned int)sp & ~0xF);
461
462 /* Build the argument list for the signal handler. */
463 sf.sf_signum = sig;
464 sf.sf_ucontext = (register_t)&sfp->sf_uc;
465 bzero(&sf.sf_si, sizeof(sf.sf_si));
466 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
467 /* Signal handler installed with SA_SIGINFO. */
468 sf.sf_siginfo = (register_t)&sfp->sf_si;
469 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
470
471 /* Fill in POSIX parts */
472 sf.sf_si = ksi->ksi_info;
473 sf.sf_si.si_signo = sig; /* maybe a translated signal */
474 } else {
475 /* Old FreeBSD-style arguments. */
476 sf.sf_siginfo = ksi->ksi_code;
477 sf.sf_addr = (register_t)ksi->ksi_addr;
478 sf.sf_ahu.sf_handler = catcher;
479 }
480 mtx_unlock(&psp->ps_mtx);
481 PROC_UNLOCK(p);
482
483 /*
484 * If we're a vm86 process, we want to save the segment registers.
485 * We also change eflags to be our emulated eflags, not the actual
486 * eflags.
487 */
488 if (regs->tf_eflags & PSL_VM) {
489 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
490 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
491
492 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
493 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
494 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
495 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
496
497 if (vm86->vm86_has_vme == 0)
498 sf.sf_uc.uc_mcontext.mc_eflags =
499 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
500 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
501
502 /*
503 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
504 * syscalls made by the signal handler. This just avoids
505 * wasting time for our lazy fixup of such faults. PSL_NT
506 * does nothing in vm86 mode, but vm86 programs can set it
507 * almost legitimately in probes for old cpu types.
508 */
509 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
510 }
511
512 /*
513 * Copy the sigframe out to the user's stack.
514 */
515 if (copyout(&sf, sfp, sizeof(*sfp)) != 0 ||
516 (xfpusave != NULL && copyout(xfpusave,
517 (void *)sf.sf_uc.uc_mcontext.mc_xfpustate, xfpusave_len)
518 != 0)) {
519 PROC_LOCK(p);
520 sigexit(td, SIGILL);
521 }
522
523 regs->tf_esp = (int)sfp;
524 regs->tf_eip = PROC_SIGCODE(p);
525 if (regs->tf_eip == 0)
526 regs->tf_eip = PROC_PS_STRINGS(p) - szsigcode;
527 regs->tf_eflags &= ~(PSL_T | PSL_D);
528 regs->tf_cs = _ucodesel;
529 regs->tf_ds = _udatasel;
530 regs->tf_es = _udatasel;
531 regs->tf_fs = _udatasel;
532 regs->tf_ss = _udatasel;
533 PROC_LOCK(p);
534 mtx_lock(&psp->ps_mtx);
535 }
536
537 /*
538 * System call to cleanup state after a signal has been taken. Reset
539 * signal mask and stack state from context left by sendsig (above).
540 * Return to previous pc and psl as specified by context left by
541 * sendsig. Check carefully to make sure that the user has not
542 * modified the state to gain improper privileges.
543 */
544 #ifdef COMPAT_43
545 int
osigreturn(struct thread * td,struct osigreturn_args * uap)546 osigreturn(struct thread *td, struct osigreturn_args *uap)
547 {
548 struct osigcontext sc;
549 struct trapframe *regs;
550 struct osigcontext *scp;
551 int eflags, error;
552 ksiginfo_t ksi;
553
554 regs = td->td_frame;
555 error = copyin(uap->sigcntxp, &sc, sizeof(sc));
556 if (error != 0)
557 return (error);
558 scp = ≻
559 eflags = scp->sc_ps;
560 if (eflags & PSL_VM) {
561 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
562 struct vm86_kernel *vm86;
563
564 /*
565 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
566 * set up the vm86 area, and we can't enter vm86 mode.
567 */
568 if (td->td_pcb->pcb_ext == 0)
569 return (EINVAL);
570 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
571 if (vm86->vm86_inited == 0)
572 return (EINVAL);
573
574 /* Go back to user mode if both flags are set. */
575 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
576 ksiginfo_init_trap(&ksi);
577 ksi.ksi_signo = SIGBUS;
578 ksi.ksi_code = BUS_OBJERR;
579 ksi.ksi_addr = (void *)regs->tf_eip;
580 trapsignal(td, &ksi);
581 }
582
583 if (vm86->vm86_has_vme) {
584 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
585 (eflags & VME_USERCHANGE) | PSL_VM;
586 } else {
587 vm86->vm86_eflags = eflags; /* save VIF, VIP */
588 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
589 (eflags & VM_USERCHANGE) | PSL_VM;
590 }
591 tf->tf_vm86_ds = scp->sc_ds;
592 tf->tf_vm86_es = scp->sc_es;
593 tf->tf_vm86_fs = scp->sc_fs;
594 tf->tf_vm86_gs = scp->sc_gs;
595 tf->tf_ds = _udatasel;
596 tf->tf_es = _udatasel;
597 tf->tf_fs = _udatasel;
598 } else {
599 /*
600 * Don't allow users to change privileged or reserved flags.
601 */
602 if (!EFL_SECURE(eflags, regs->tf_eflags)) {
603 return (EINVAL);
604 }
605
606 /*
607 * Don't allow users to load a valid privileged %cs. Let the
608 * hardware check for invalid selectors, excess privilege in
609 * other selectors, invalid %eip's and invalid %esp's.
610 */
611 if (!CS_SECURE(scp->sc_cs)) {
612 ksiginfo_init_trap(&ksi);
613 ksi.ksi_signo = SIGBUS;
614 ksi.ksi_code = BUS_OBJERR;
615 ksi.ksi_trapno = T_PROTFLT;
616 ksi.ksi_addr = (void *)regs->tf_eip;
617 trapsignal(td, &ksi);
618 return (EINVAL);
619 }
620 regs->tf_ds = scp->sc_ds;
621 regs->tf_es = scp->sc_es;
622 regs->tf_fs = scp->sc_fs;
623 }
624
625 /* Restore remaining registers. */
626 regs->tf_eax = scp->sc_eax;
627 regs->tf_ebx = scp->sc_ebx;
628 regs->tf_ecx = scp->sc_ecx;
629 regs->tf_edx = scp->sc_edx;
630 regs->tf_esi = scp->sc_esi;
631 regs->tf_edi = scp->sc_edi;
632 regs->tf_cs = scp->sc_cs;
633 regs->tf_ss = scp->sc_ss;
634 regs->tf_isp = scp->sc_isp;
635 regs->tf_ebp = scp->sc_fp;
636 regs->tf_esp = scp->sc_sp;
637 regs->tf_eip = scp->sc_pc;
638 regs->tf_eflags = eflags;
639 regs->tf_trapno = T_RESERVED;
640
641 #if defined(COMPAT_43)
642 if (scp->sc_onstack & 1)
643 td->td_sigstk.ss_flags |= SS_ONSTACK;
644 else
645 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
646 #endif
647 kern_sigprocmask(td, SIG_SETMASK, (sigset_t *)&scp->sc_mask, NULL,
648 SIGPROCMASK_OLD);
649 return (EJUSTRETURN);
650 }
651 #endif /* COMPAT_43 */
652
653 #ifdef COMPAT_FREEBSD4
654 int
freebsd4_sigreturn(struct thread * td,struct freebsd4_sigreturn_args * uap)655 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
656 {
657 struct freebsd4_ucontext uc;
658 struct trapframe *regs;
659 struct freebsd4_ucontext *ucp;
660 int cs, eflags, error;
661 ksiginfo_t ksi;
662
663 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
664 if (error != 0)
665 return (error);
666 ucp = &uc;
667 regs = td->td_frame;
668 eflags = ucp->uc_mcontext.mc_eflags;
669 if (eflags & PSL_VM) {
670 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
671 struct vm86_kernel *vm86;
672
673 /*
674 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
675 * set up the vm86 area, and we can't enter vm86 mode.
676 */
677 if (td->td_pcb->pcb_ext == 0)
678 return (EINVAL);
679 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
680 if (vm86->vm86_inited == 0)
681 return (EINVAL);
682
683 /* Go back to user mode if both flags are set. */
684 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
685 ksiginfo_init_trap(&ksi);
686 ksi.ksi_signo = SIGBUS;
687 ksi.ksi_code = BUS_OBJERR;
688 ksi.ksi_addr = (void *)regs->tf_eip;
689 trapsignal(td, &ksi);
690 }
691 if (vm86->vm86_has_vme) {
692 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
693 (eflags & VME_USERCHANGE) | PSL_VM;
694 } else {
695 vm86->vm86_eflags = eflags; /* save VIF, VIP */
696 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
697 (eflags & VM_USERCHANGE) | PSL_VM;
698 }
699 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
700 tf->tf_eflags = eflags;
701 tf->tf_vm86_ds = tf->tf_ds;
702 tf->tf_vm86_es = tf->tf_es;
703 tf->tf_vm86_fs = tf->tf_fs;
704 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
705 tf->tf_ds = _udatasel;
706 tf->tf_es = _udatasel;
707 tf->tf_fs = _udatasel;
708 } else {
709 /*
710 * Don't allow users to change privileged or reserved flags.
711 */
712 if (!EFL_SECURE(eflags, regs->tf_eflags)) {
713 uprintf(
714 "pid %d (%s): freebsd4_sigreturn eflags = 0x%x\n",
715 td->td_proc->p_pid, td->td_name, eflags);
716 return (EINVAL);
717 }
718
719 /*
720 * Don't allow users to load a valid privileged %cs. Let the
721 * hardware check for invalid selectors, excess privilege in
722 * other selectors, invalid %eip's and invalid %esp's.
723 */
724 cs = ucp->uc_mcontext.mc_cs;
725 if (!CS_SECURE(cs)) {
726 uprintf("pid %d (%s): freebsd4_sigreturn cs = 0x%x\n",
727 td->td_proc->p_pid, td->td_name, cs);
728 ksiginfo_init_trap(&ksi);
729 ksi.ksi_signo = SIGBUS;
730 ksi.ksi_code = BUS_OBJERR;
731 ksi.ksi_trapno = T_PROTFLT;
732 ksi.ksi_addr = (void *)regs->tf_eip;
733 trapsignal(td, &ksi);
734 return (EINVAL);
735 }
736
737 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
738 }
739 regs->tf_trapno = T_RESERVED;
740
741 #if defined(COMPAT_43)
742 if (ucp->uc_mcontext.mc_onstack & 1)
743 td->td_sigstk.ss_flags |= SS_ONSTACK;
744 else
745 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
746 #endif
747 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
748 return (EJUSTRETURN);
749 }
750 #endif /* COMPAT_FREEBSD4 */
751
752 int
sys_sigreturn(struct thread * td,struct sigreturn_args * uap)753 sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
754 {
755 ucontext_t uc;
756 struct proc *p;
757 struct trapframe *regs;
758 ucontext_t *ucp;
759 char *xfpustate;
760 size_t xfpustate_len;
761 int cs, eflags, error, ret;
762 ksiginfo_t ksi;
763
764 p = td->td_proc;
765
766 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
767 if (error != 0)
768 return (error);
769 ucp = &uc;
770 if ((ucp->uc_mcontext.mc_flags & ~_MC_FLAG_MASK) != 0) {
771 uprintf("pid %d (%s): sigreturn mc_flags %x\n", p->p_pid,
772 td->td_name, ucp->uc_mcontext.mc_flags);
773 return (EINVAL);
774 }
775 regs = td->td_frame;
776 eflags = ucp->uc_mcontext.mc_eflags;
777 if (eflags & PSL_VM) {
778 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
779 struct vm86_kernel *vm86;
780
781 /*
782 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
783 * set up the vm86 area, and we can't enter vm86 mode.
784 */
785 if (td->td_pcb->pcb_ext == 0)
786 return (EINVAL);
787 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
788 if (vm86->vm86_inited == 0)
789 return (EINVAL);
790
791 /* Go back to user mode if both flags are set. */
792 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
793 ksiginfo_init_trap(&ksi);
794 ksi.ksi_signo = SIGBUS;
795 ksi.ksi_code = BUS_OBJERR;
796 ksi.ksi_addr = (void *)regs->tf_eip;
797 trapsignal(td, &ksi);
798 }
799
800 if (vm86->vm86_has_vme) {
801 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
802 (eflags & VME_USERCHANGE) | PSL_VM;
803 } else {
804 vm86->vm86_eflags = eflags; /* save VIF, VIP */
805 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
806 (eflags & VM_USERCHANGE) | PSL_VM;
807 }
808 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
809 tf->tf_eflags = eflags;
810 tf->tf_vm86_ds = tf->tf_ds;
811 tf->tf_vm86_es = tf->tf_es;
812 tf->tf_vm86_fs = tf->tf_fs;
813 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
814 tf->tf_ds = _udatasel;
815 tf->tf_es = _udatasel;
816 tf->tf_fs = _udatasel;
817 } else {
818 /*
819 * Don't allow users to change privileged or reserved flags.
820 */
821 if (!EFL_SECURE(eflags, regs->tf_eflags)) {
822 uprintf("pid %d (%s): sigreturn eflags = 0x%x\n",
823 td->td_proc->p_pid, td->td_name, eflags);
824 return (EINVAL);
825 }
826
827 /*
828 * Don't allow users to load a valid privileged %cs. Let the
829 * hardware check for invalid selectors, excess privilege in
830 * other selectors, invalid %eip's and invalid %esp's.
831 */
832 cs = ucp->uc_mcontext.mc_cs;
833 if (!CS_SECURE(cs)) {
834 uprintf("pid %d (%s): sigreturn cs = 0x%x\n",
835 td->td_proc->p_pid, td->td_name, cs);
836 ksiginfo_init_trap(&ksi);
837 ksi.ksi_signo = SIGBUS;
838 ksi.ksi_code = BUS_OBJERR;
839 ksi.ksi_trapno = T_PROTFLT;
840 ksi.ksi_addr = (void *)regs->tf_eip;
841 trapsignal(td, &ksi);
842 return (EINVAL);
843 }
844
845 if ((uc.uc_mcontext.mc_flags & _MC_HASFPXSTATE) != 0) {
846 xfpustate_len = uc.uc_mcontext.mc_xfpustate_len;
847 if (xfpustate_len > cpu_max_ext_state_size -
848 sizeof(union savefpu)) {
849 uprintf(
850 "pid %d (%s): sigreturn xfpusave_len = 0x%zx\n",
851 p->p_pid, td->td_name, xfpustate_len);
852 return (EINVAL);
853 }
854 xfpustate = __builtin_alloca(xfpustate_len);
855 error = copyin(
856 (const void *)uc.uc_mcontext.mc_xfpustate,
857 xfpustate, xfpustate_len);
858 if (error != 0) {
859 uprintf(
860 "pid %d (%s): sigreturn copying xfpustate failed\n",
861 p->p_pid, td->td_name);
862 return (error);
863 }
864 } else {
865 xfpustate = NULL;
866 xfpustate_len = 0;
867 }
868 ret = set_fpcontext(td, &ucp->uc_mcontext, xfpustate,
869 xfpustate_len);
870 if (ret != 0)
871 return (ret);
872 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
873 }
874 regs->tf_trapno = T_RESERVED;
875
876 #if defined(COMPAT_43)
877 if (ucp->uc_mcontext.mc_onstack & 1)
878 td->td_sigstk.ss_flags |= SS_ONSTACK;
879 else
880 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
881 #endif
882
883 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
884 return (EJUSTRETURN);
885 }
886
887 /*
888 * Reset the hardware debug registers if they were in use.
889 * They won't have any meaning for the newly exec'd process.
890 */
891 void
x86_clear_dbregs(struct pcb * pcb)892 x86_clear_dbregs(struct pcb *pcb)
893 {
894 if ((pcb->pcb_flags & PCB_DBREGS) == 0)
895 return;
896
897 pcb->pcb_dr0 = 0;
898 pcb->pcb_dr1 = 0;
899 pcb->pcb_dr2 = 0;
900 pcb->pcb_dr3 = 0;
901 pcb->pcb_dr6 = 0;
902 pcb->pcb_dr7 = 0;
903
904 if (pcb == curpcb) {
905 /*
906 * Clear the debug registers on the running CPU,
907 * otherwise they will end up affecting the next
908 * process we switch to.
909 */
910 reset_dbregs();
911 }
912 pcb->pcb_flags &= ~PCB_DBREGS;
913 }
914
915 #ifdef COMPAT_43
916 static void
setup_priv_lcall_gate(struct proc * p)917 setup_priv_lcall_gate(struct proc *p)
918 {
919 struct i386_ldt_args uap;
920 union descriptor desc;
921 u_int lcall_addr;
922
923 bzero(&uap, sizeof(uap));
924 uap.start = 0;
925 uap.num = 1;
926 lcall_addr = p->p_sysent->sv_psstrings - sz_lcall_tramp;
927 bzero(&desc, sizeof(desc));
928 desc.sd.sd_type = SDT_MEMERA;
929 desc.sd.sd_dpl = SEL_UPL;
930 desc.sd.sd_p = 1;
931 desc.sd.sd_def32 = 1;
932 desc.sd.sd_gran = 1;
933 desc.sd.sd_lolimit = 0xffff;
934 desc.sd.sd_hilimit = 0xf;
935 desc.sd.sd_lobase = lcall_addr;
936 desc.sd.sd_hibase = lcall_addr >> 24;
937 i386_set_ldt(curthread, &uap, &desc);
938 }
939 #endif
940
941 /*
942 * Reset registers to default values on exec.
943 */
944 void
exec_setregs(struct thread * td,struct image_params * imgp,uintptr_t stack)945 exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack)
946 {
947 struct trapframe *regs;
948 struct pcb *pcb;
949 register_t saved_eflags;
950
951 regs = td->td_frame;
952 pcb = td->td_pcb;
953
954 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */
955 pcb->pcb_gs = _udatasel;
956 load_gs(_udatasel);
957
958 mtx_lock_spin(&dt_lock);
959 if (td->td_proc->p_md.md_ldt != NULL)
960 user_ldt_free(td);
961 else
962 mtx_unlock_spin(&dt_lock);
963
964 #ifdef COMPAT_43
965 if (td->td_proc->p_sysent->sv_psstrings !=
966 elf32_freebsd_sysvec.sv_psstrings)
967 setup_priv_lcall_gate(td->td_proc);
968 #endif
969
970 /*
971 * Reset the fs and gs bases. The values from the old address
972 * space do not make sense for the new program. In particular,
973 * gsbase might be the TLS base for the old program but the new
974 * program has no TLS now.
975 */
976 set_fsbase(td, 0);
977 set_gsbase(td, 0);
978
979 /* Make sure edx is 0x0 on entry. Linux binaries depend on it. */
980 saved_eflags = regs->tf_eflags & PSL_T;
981 bzero((char *)regs, sizeof(struct trapframe));
982 regs->tf_eip = imgp->entry_addr;
983 regs->tf_esp = stack;
984 regs->tf_eflags = PSL_USER | saved_eflags;
985 regs->tf_ss = _udatasel;
986 regs->tf_ds = _udatasel;
987 regs->tf_es = _udatasel;
988 regs->tf_fs = _udatasel;
989 regs->tf_cs = _ucodesel;
990
991 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
992 regs->tf_ebx = (register_t)imgp->ps_strings;
993
994 x86_clear_dbregs(pcb);
995
996 pcb->pcb_initial_npxcw = __INITIAL_NPXCW__;
997
998 /*
999 * Drop the FP state if we hold it, so that the process gets a
1000 * clean FP state if it uses the FPU again.
1001 */
1002 fpstate_drop(td);
1003 }
1004
1005 int
fill_regs(struct thread * td,struct reg * regs)1006 fill_regs(struct thread *td, struct reg *regs)
1007 {
1008 struct pcb *pcb;
1009 struct trapframe *tp;
1010
1011 tp = td->td_frame;
1012 pcb = td->td_pcb;
1013 regs->r_gs = pcb->pcb_gs;
1014 return (fill_frame_regs(tp, regs));
1015 }
1016
1017 int
fill_frame_regs(struct trapframe * tp,struct reg * regs)1018 fill_frame_regs(struct trapframe *tp, struct reg *regs)
1019 {
1020
1021 regs->r_fs = tp->tf_fs;
1022 regs->r_es = tp->tf_es;
1023 regs->r_ds = tp->tf_ds;
1024 regs->r_edi = tp->tf_edi;
1025 regs->r_esi = tp->tf_esi;
1026 regs->r_ebp = tp->tf_ebp;
1027 regs->r_ebx = tp->tf_ebx;
1028 regs->r_edx = tp->tf_edx;
1029 regs->r_ecx = tp->tf_ecx;
1030 regs->r_eax = tp->tf_eax;
1031 regs->r_eip = tp->tf_eip;
1032 regs->r_cs = tp->tf_cs;
1033 regs->r_eflags = tp->tf_eflags;
1034 regs->r_esp = tp->tf_esp;
1035 regs->r_ss = tp->tf_ss;
1036 regs->r_err = 0;
1037 regs->r_trapno = 0;
1038 return (0);
1039 }
1040
1041 int
set_regs(struct thread * td,struct reg * regs)1042 set_regs(struct thread *td, struct reg *regs)
1043 {
1044 struct pcb *pcb;
1045 struct trapframe *tp;
1046
1047 tp = td->td_frame;
1048 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) ||
1049 !CS_SECURE(regs->r_cs))
1050 return (EINVAL);
1051 pcb = td->td_pcb;
1052 tp->tf_fs = regs->r_fs;
1053 tp->tf_es = regs->r_es;
1054 tp->tf_ds = regs->r_ds;
1055 tp->tf_edi = regs->r_edi;
1056 tp->tf_esi = regs->r_esi;
1057 tp->tf_ebp = regs->r_ebp;
1058 tp->tf_ebx = regs->r_ebx;
1059 tp->tf_edx = regs->r_edx;
1060 tp->tf_ecx = regs->r_ecx;
1061 tp->tf_eax = regs->r_eax;
1062 tp->tf_eip = regs->r_eip;
1063 tp->tf_cs = regs->r_cs;
1064 tp->tf_eflags = regs->r_eflags;
1065 tp->tf_esp = regs->r_esp;
1066 tp->tf_ss = regs->r_ss;
1067 pcb->pcb_gs = regs->r_gs;
1068 return (0);
1069 }
1070
1071 int
fill_fpregs(struct thread * td,struct fpreg * fpregs)1072 fill_fpregs(struct thread *td, struct fpreg *fpregs)
1073 {
1074
1075 KASSERT(td == curthread || TD_IS_SUSPENDED(td) ||
1076 P_SHOULDSTOP(td->td_proc),
1077 ("not suspended thread %p", td));
1078 npxgetregs(td);
1079 if (cpu_fxsr)
1080 npx_fill_fpregs_xmm(&get_pcb_user_save_td(td)->sv_xmm,
1081 (struct save87 *)fpregs);
1082 else
1083 bcopy(&get_pcb_user_save_td(td)->sv_87, fpregs,
1084 sizeof(*fpregs));
1085 return (0);
1086 }
1087
1088 int
set_fpregs(struct thread * td,struct fpreg * fpregs)1089 set_fpregs(struct thread *td, struct fpreg *fpregs)
1090 {
1091
1092 critical_enter();
1093 if (cpu_fxsr)
1094 npx_set_fpregs_xmm((struct save87 *)fpregs,
1095 &get_pcb_user_save_td(td)->sv_xmm);
1096 else
1097 bcopy(fpregs, &get_pcb_user_save_td(td)->sv_87,
1098 sizeof(*fpregs));
1099 npxuserinited(td);
1100 critical_exit();
1101 return (0);
1102 }
1103
1104 /*
1105 * Get machine context.
1106 */
1107 int
get_mcontext(struct thread * td,mcontext_t * mcp,int flags)1108 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
1109 {
1110 struct trapframe *tp;
1111 struct segment_descriptor *sdp;
1112
1113 tp = td->td_frame;
1114
1115 PROC_LOCK(curthread->td_proc);
1116 mcp->mc_onstack = sigonstack(tp->tf_esp);
1117 PROC_UNLOCK(curthread->td_proc);
1118 mcp->mc_gs = td->td_pcb->pcb_gs;
1119 mcp->mc_fs = tp->tf_fs;
1120 mcp->mc_es = tp->tf_es;
1121 mcp->mc_ds = tp->tf_ds;
1122 mcp->mc_edi = tp->tf_edi;
1123 mcp->mc_esi = tp->tf_esi;
1124 mcp->mc_ebp = tp->tf_ebp;
1125 mcp->mc_isp = tp->tf_isp;
1126 mcp->mc_eflags = tp->tf_eflags;
1127 if (flags & GET_MC_CLEAR_RET) {
1128 mcp->mc_eax = 0;
1129 mcp->mc_edx = 0;
1130 mcp->mc_eflags &= ~PSL_C;
1131 } else {
1132 mcp->mc_eax = tp->tf_eax;
1133 mcp->mc_edx = tp->tf_edx;
1134 }
1135 mcp->mc_ebx = tp->tf_ebx;
1136 mcp->mc_ecx = tp->tf_ecx;
1137 mcp->mc_eip = tp->tf_eip;
1138 mcp->mc_cs = tp->tf_cs;
1139 mcp->mc_esp = tp->tf_esp;
1140 mcp->mc_ss = tp->tf_ss;
1141 mcp->mc_len = sizeof(*mcp);
1142 get_fpcontext(td, mcp, NULL, 0);
1143 sdp = &td->td_pcb->pcb_fsd;
1144 mcp->mc_fsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
1145 sdp = &td->td_pcb->pcb_gsd;
1146 mcp->mc_gsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
1147 mcp->mc_flags = 0;
1148 mcp->mc_xfpustate = 0;
1149 mcp->mc_xfpustate_len = 0;
1150 bzero(mcp->mc_spare2, sizeof(mcp->mc_spare2));
1151 return (0);
1152 }
1153
1154 /*
1155 * Set machine context.
1156 *
1157 * However, we don't set any but the user modifiable flags, and we won't
1158 * touch the cs selector.
1159 */
1160 int
set_mcontext(struct thread * td,mcontext_t * mcp)1161 set_mcontext(struct thread *td, mcontext_t *mcp)
1162 {
1163 struct trapframe *tp;
1164 char *xfpustate;
1165 int eflags, ret;
1166
1167 tp = td->td_frame;
1168 if (mcp->mc_len != sizeof(*mcp) ||
1169 (mcp->mc_flags & ~_MC_FLAG_MASK) != 0)
1170 return (EINVAL);
1171 eflags = (mcp->mc_eflags & PSL_USERCHANGE) |
1172 (tp->tf_eflags & ~PSL_USERCHANGE);
1173 if (mcp->mc_flags & _MC_HASFPXSTATE) {
1174 if (mcp->mc_xfpustate_len > cpu_max_ext_state_size -
1175 sizeof(union savefpu))
1176 return (EINVAL);
1177 xfpustate = __builtin_alloca(mcp->mc_xfpustate_len);
1178 ret = copyin((void *)mcp->mc_xfpustate, xfpustate,
1179 mcp->mc_xfpustate_len);
1180 if (ret != 0)
1181 return (ret);
1182 } else
1183 xfpustate = NULL;
1184 ret = set_fpcontext(td, mcp, xfpustate, mcp->mc_xfpustate_len);
1185 if (ret != 0)
1186 return (ret);
1187 tp->tf_fs = mcp->mc_fs;
1188 tp->tf_es = mcp->mc_es;
1189 tp->tf_ds = mcp->mc_ds;
1190 tp->tf_edi = mcp->mc_edi;
1191 tp->tf_esi = mcp->mc_esi;
1192 tp->tf_ebp = mcp->mc_ebp;
1193 tp->tf_ebx = mcp->mc_ebx;
1194 tp->tf_edx = mcp->mc_edx;
1195 tp->tf_ecx = mcp->mc_ecx;
1196 tp->tf_eax = mcp->mc_eax;
1197 tp->tf_eip = mcp->mc_eip;
1198 tp->tf_eflags = eflags;
1199 tp->tf_esp = mcp->mc_esp;
1200 tp->tf_ss = mcp->mc_ss;
1201 td->td_pcb->pcb_gs = mcp->mc_gs;
1202 return (0);
1203 }
1204
1205 static void
get_fpcontext(struct thread * td,mcontext_t * mcp,char * xfpusave,size_t xfpusave_len)1206 get_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpusave,
1207 size_t xfpusave_len)
1208 {
1209 size_t max_len, len;
1210
1211 mcp->mc_ownedfp = npxgetregs(td);
1212 bcopy(get_pcb_user_save_td(td), &mcp->mc_fpstate[0],
1213 sizeof(mcp->mc_fpstate));
1214 mcp->mc_fpformat = npxformat();
1215 if (!use_xsave || xfpusave_len == 0)
1216 return;
1217 max_len = cpu_max_ext_state_size - sizeof(union savefpu);
1218 len = xfpusave_len;
1219 if (len > max_len) {
1220 len = max_len;
1221 bzero(xfpusave + max_len, len - max_len);
1222 }
1223 mcp->mc_flags |= _MC_HASFPXSTATE;
1224 mcp->mc_xfpustate_len = len;
1225 bcopy(get_pcb_user_save_td(td) + 1, xfpusave, len);
1226 }
1227
1228 static int
set_fpcontext(struct thread * td,mcontext_t * mcp,char * xfpustate,size_t xfpustate_len)1229 set_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpustate,
1230 size_t xfpustate_len)
1231 {
1232 int error;
1233
1234 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
1235 return (0);
1236 else if (mcp->mc_fpformat != _MC_FPFMT_387 &&
1237 mcp->mc_fpformat != _MC_FPFMT_XMM)
1238 return (EINVAL);
1239 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) {
1240 /* We don't care what state is left in the FPU or PCB. */
1241 fpstate_drop(td);
1242 error = 0;
1243 } else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
1244 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
1245 error = npxsetregs(td, (union savefpu *)&mcp->mc_fpstate,
1246 xfpustate, xfpustate_len);
1247 } else
1248 return (EINVAL);
1249 return (error);
1250 }
1251
1252 static void
fpstate_drop(struct thread * td)1253 fpstate_drop(struct thread *td)
1254 {
1255
1256 KASSERT(PCB_USER_FPU(td->td_pcb), ("fpstate_drop: kernel-owned fpu"));
1257 critical_enter();
1258 if (PCPU_GET(fpcurthread) == td)
1259 npxdrop();
1260 /*
1261 * XXX force a full drop of the npx. The above only drops it if we
1262 * owned it. npxgetregs() has the same bug in the !cpu_fxsr case.
1263 *
1264 * XXX I don't much like npxgetregs()'s semantics of doing a full
1265 * drop. Dropping only to the pcb matches fnsave's behaviour.
1266 * We only need to drop to !PCB_INITDONE in sendsig(). But
1267 * sendsig() is the only caller of npxgetregs()... perhaps we just
1268 * have too many layers.
1269 */
1270 curthread->td_pcb->pcb_flags &= ~(PCB_NPXINITDONE |
1271 PCB_NPXUSERINITDONE);
1272 critical_exit();
1273 }
1274
1275 int
fill_dbregs(struct thread * td,struct dbreg * dbregs)1276 fill_dbregs(struct thread *td, struct dbreg *dbregs)
1277 {
1278 struct pcb *pcb;
1279
1280 if (td == NULL) {
1281 dbregs->dr[0] = rdr0();
1282 dbregs->dr[1] = rdr1();
1283 dbregs->dr[2] = rdr2();
1284 dbregs->dr[3] = rdr3();
1285 dbregs->dr[6] = rdr6();
1286 dbregs->dr[7] = rdr7();
1287 } else {
1288 pcb = td->td_pcb;
1289 dbregs->dr[0] = pcb->pcb_dr0;
1290 dbregs->dr[1] = pcb->pcb_dr1;
1291 dbregs->dr[2] = pcb->pcb_dr2;
1292 dbregs->dr[3] = pcb->pcb_dr3;
1293 dbregs->dr[6] = pcb->pcb_dr6;
1294 dbregs->dr[7] = pcb->pcb_dr7;
1295 }
1296 dbregs->dr[4] = 0;
1297 dbregs->dr[5] = 0;
1298 return (0);
1299 }
1300
1301 int
set_dbregs(struct thread * td,struct dbreg * dbregs)1302 set_dbregs(struct thread *td, struct dbreg *dbregs)
1303 {
1304 struct pcb *pcb;
1305 int i;
1306
1307 if (td == NULL) {
1308 load_dr0(dbregs->dr[0]);
1309 load_dr1(dbregs->dr[1]);
1310 load_dr2(dbregs->dr[2]);
1311 load_dr3(dbregs->dr[3]);
1312 load_dr6(dbregs->dr[6]);
1313 load_dr7(dbregs->dr[7]);
1314 } else {
1315 /*
1316 * Don't let an illegal value for dr7 get set. Specifically,
1317 * check for undefined settings. Setting these bit patterns
1318 * result in undefined behaviour and can lead to an unexpected
1319 * TRCTRAP.
1320 */
1321 for (i = 0; i < 4; i++) {
1322 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
1323 return (EINVAL);
1324 if (DBREG_DR7_LEN(dbregs->dr[7], i) == 0x02)
1325 return (EINVAL);
1326 }
1327
1328 pcb = td->td_pcb;
1329
1330 /*
1331 * Don't let a process set a breakpoint that is not within the
1332 * process's address space. If a process could do this, it
1333 * could halt the system by setting a breakpoint in the kernel
1334 * (if ddb was enabled). Thus, we need to check to make sure
1335 * that no breakpoints are being enabled for addresses outside
1336 * process's address space.
1337 *
1338 * XXX - what about when the watched area of the user's
1339 * address space is written into from within the kernel
1340 * ... wouldn't that still cause a breakpoint to be generated
1341 * from within kernel mode?
1342 */
1343
1344 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
1345 /* dr0 is enabled */
1346 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
1347 return (EINVAL);
1348 }
1349
1350 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
1351 /* dr1 is enabled */
1352 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
1353 return (EINVAL);
1354 }
1355
1356 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
1357 /* dr2 is enabled */
1358 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
1359 return (EINVAL);
1360 }
1361
1362 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
1363 /* dr3 is enabled */
1364 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
1365 return (EINVAL);
1366 }
1367
1368 pcb->pcb_dr0 = dbregs->dr[0];
1369 pcb->pcb_dr1 = dbregs->dr[1];
1370 pcb->pcb_dr2 = dbregs->dr[2];
1371 pcb->pcb_dr3 = dbregs->dr[3];
1372 pcb->pcb_dr6 = dbregs->dr[6];
1373 pcb->pcb_dr7 = dbregs->dr[7];
1374
1375 pcb->pcb_flags |= PCB_DBREGS;
1376 }
1377
1378 return (0);
1379 }
1380
1381 /*
1382 * Return > 0 if a hardware breakpoint has been hit, and the
1383 * breakpoint was in user space. Return 0, otherwise.
1384 */
1385 int
user_dbreg_trap(register_t dr6)1386 user_dbreg_trap(register_t dr6)
1387 {
1388 u_int32_t dr7;
1389 u_int32_t bp; /* breakpoint bits extracted from dr6 */
1390 int nbp; /* number of breakpoints that triggered */
1391 caddr_t addr[4]; /* breakpoint addresses */
1392 int i;
1393
1394 bp = dr6 & DBREG_DR6_BMASK;
1395 if (bp == 0) {
1396 /*
1397 * None of the breakpoint bits are set meaning this
1398 * trap was not caused by any of the debug registers
1399 */
1400 return (0);
1401 }
1402
1403 dr7 = rdr7();
1404 if ((dr7 & 0x000000ff) == 0) {
1405 /*
1406 * all GE and LE bits in the dr7 register are zero,
1407 * thus the trap couldn't have been caused by the
1408 * hardware debug registers
1409 */
1410 return (0);
1411 }
1412
1413 nbp = 0;
1414
1415 /*
1416 * at least one of the breakpoints were hit, check to see
1417 * which ones and if any of them are user space addresses
1418 */
1419
1420 if (bp & 0x01) {
1421 addr[nbp++] = (caddr_t)rdr0();
1422 }
1423 if (bp & 0x02) {
1424 addr[nbp++] = (caddr_t)rdr1();
1425 }
1426 if (bp & 0x04) {
1427 addr[nbp++] = (caddr_t)rdr2();
1428 }
1429 if (bp & 0x08) {
1430 addr[nbp++] = (caddr_t)rdr3();
1431 }
1432
1433 for (i = 0; i < nbp; i++) {
1434 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
1435 /*
1436 * addr[i] is in user space
1437 */
1438 return (nbp);
1439 }
1440 }
1441
1442 /*
1443 * None of the breakpoints are in user space.
1444 */
1445 return (0);
1446 }
1447