xref: /freebsd/sys/amd64/amd64/exec_machdep.c (revision fa02551dc8a029a74eb374c418dbb5401d53c2db)
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 2003 Peter Wemm.
5  * Copyright (c) 1992 Terrence R. Lambert.
6  * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * William Jolitz.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  */
40 
41 #include <sys/cdefs.h>
42 #include "opt_cpu.h"
43 #include "opt_ddb.h"
44 #include "opt_kstack_pages.h"
45 
46 #include <sys/param.h>
47 #include <sys/proc.h>
48 #include <sys/systm.h>
49 #include <sys/exec.h>
50 #include <sys/imgact.h>
51 #include <sys/kdb.h>
52 #include <sys/kernel.h>
53 #include <sys/ktr.h>
54 #include <sys/linker.h>
55 #include <sys/lock.h>
56 #include <sys/malloc.h>
57 #include <sys/mutex.h>
58 #include <sys/pcpu.h>
59 #include <sys/reg.h>
60 #include <sys/rwlock.h>
61 #include <sys/signalvar.h>
62 #include <sys/smp.h>
63 #include <sys/syscallsubr.h>
64 #include <sys/sysctl.h>
65 #include <sys/sysent.h>
66 #include <sys/sysproto.h>
67 #include <sys/ucontext.h>
68 #include <sys/vmmeter.h>
69 
70 #include <vm/vm.h>
71 #include <vm/vm_param.h>
72 #include <vm/vm_extern.h>
73 #include <vm/pmap.h>
74 #include <vm/vm_map.h>
75 
76 #ifdef DDB
77 #ifndef KDB
78 #error KDB must be enabled in order for DDB to work!
79 #endif
80 #include <ddb/ddb.h>
81 #include <ddb/db_sym.h>
82 #endif
83 
84 #include <machine/vmparam.h>
85 #include <machine/frame.h>
86 #include <machine/md_var.h>
87 #include <machine/pcb.h>
88 #include <machine/proc.h>
89 #include <machine/sigframe.h>
90 #include <machine/specialreg.h>
91 #include <machine/trap.h>
92 
93 _Static_assert(sizeof(mcontext_t) == 800, "mcontext_t size incorrect");
94 _Static_assert(sizeof(ucontext_t) == 880, "ucontext_t size incorrect");
95 _Static_assert(sizeof(siginfo_t) == 80, "siginfo_t size incorrect");
96 
97 /*
98  * Send an interrupt to process.
99  *
100  * Stack is set up to allow sigcode stored at top to call routine,
101  * followed by call to sigreturn routine below.  After sigreturn
102  * resets the signal mask, the stack, and the frame pointer, it
103  * returns to the user specified pc, psl.
104  */
105 void
sendsig(sig_t catcher,ksiginfo_t * ksi,sigset_t * mask)106 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
107 {
108 	struct sigframe sf, *sfp;
109 	struct pcb *pcb;
110 	struct proc *p;
111 	struct thread *td;
112 	struct sigacts *psp;
113 	char *sp;
114 	struct trapframe *regs;
115 	char *xfpusave;
116 	size_t xfpusave_len;
117 	int sig;
118 	int oonstack;
119 
120 	td = curthread;
121 	pcb = td->td_pcb;
122 	p = td->td_proc;
123 	PROC_LOCK_ASSERT(p, MA_OWNED);
124 	sig = ksi->ksi_signo;
125 	psp = p->p_sigacts;
126 	mtx_assert(&psp->ps_mtx, MA_OWNED);
127 	regs = td->td_frame;
128 	oonstack = sigonstack(regs->tf_rsp);
129 
130 	/* Save user context. */
131 	bzero(&sf, sizeof(sf));
132 	sf.sf_uc.uc_sigmask = *mask;
133 	sf.sf_uc.uc_stack = td->td_sigstk;
134 	sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
135 	    ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
136 	sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
137 	bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(*regs));
138 	sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
139 	get_fpcontext(td, &sf.sf_uc.uc_mcontext, &xfpusave, &xfpusave_len);
140 	update_pcb_bases(pcb);
141 	sf.sf_uc.uc_mcontext.mc_fsbase = pcb->pcb_fsbase;
142 	sf.sf_uc.uc_mcontext.mc_gsbase = pcb->pcb_gsbase;
143 	bzero(sf.sf_uc.uc_mcontext.mc_spare,
144 	    sizeof(sf.sf_uc.uc_mcontext.mc_spare));
145 
146 	/* Allocate space for the signal handler context. */
147 	if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
148 	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
149 		sp = (char *)td->td_sigstk.ss_sp + td->td_sigstk.ss_size;
150 #if defined(COMPAT_43)
151 		td->td_sigstk.ss_flags |= SS_ONSTACK;
152 #endif
153 	} else
154 		sp = (char *)regs->tf_rsp - 128;
155 	if (xfpusave != NULL) {
156 		sp -= xfpusave_len;
157 		sp = (char *)((unsigned long)sp & ~0x3Ful);
158 		sf.sf_uc.uc_mcontext.mc_xfpustate = (register_t)sp;
159 	}
160 	sp -= sizeof(struct sigframe);
161 	/* Align to 16 bytes. */
162 	sfp = (struct sigframe *)((unsigned long)sp & ~0xFul);
163 
164 	/* Build the argument list for the signal handler. */
165 	regs->tf_rdi = sig;			/* arg 1 in %rdi */
166 	regs->tf_rdx = (register_t)&sfp->sf_uc;	/* arg 3 in %rdx */
167 	bzero(&sf.sf_si, sizeof(sf.sf_si));
168 	if (SIGISMEMBER(psp->ps_siginfo, sig)) {
169 		/* Signal handler installed with SA_SIGINFO. */
170 		regs->tf_rsi = (register_t)&sfp->sf_si;	/* arg 2 in %rsi */
171 		sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
172 
173 		/* Fill in POSIX parts */
174 		sf.sf_si = ksi->ksi_info;
175 		sf.sf_si.si_signo = sig; /* maybe a translated signal */
176 		regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */
177 	} else {
178 		/* Old FreeBSD-style arguments. */
179 		regs->tf_rsi = ksi->ksi_code;	/* arg 2 in %rsi */
180 		regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */
181 		sf.sf_ahu.sf_handler = catcher;
182 	}
183 	mtx_unlock(&psp->ps_mtx);
184 	PROC_UNLOCK(p);
185 
186 	/*
187 	 * Copy the sigframe out to the user's stack.
188 	 */
189 	if (copyout(&sf, sfp, sizeof(*sfp)) != 0 ||
190 	    (xfpusave != NULL && copyout(xfpusave,
191 	    (void *)sf.sf_uc.uc_mcontext.mc_xfpustate, xfpusave_len)
192 	    != 0)) {
193 		uprintf("pid %d comm %s has trashed its stack, killing\n",
194 		    p->p_pid, p->p_comm);
195 		PROC_LOCK(p);
196 		sigexit(td, SIGILL);
197 	}
198 
199 	fpstate_drop(td);
200 	regs->tf_rsp = (long)sfp;
201 	regs->tf_rip = PROC_SIGCODE(p);
202 	regs->tf_rflags &= ~(PSL_T | PSL_D);
203 	regs->tf_cs = _ucodesel;
204 	regs->tf_ds = _udatasel;
205 	regs->tf_ss = _udatasel;
206 	regs->tf_es = _udatasel;
207 	regs->tf_fs = _ufssel;
208 	regs->tf_gs = _ugssel;
209 	regs->tf_flags = TF_HASSEGS;
210 	if ((pcb->pcb_flags & PCB_TLSBASE) != 0)
211 		pcb->pcb_fsbase = pcb->pcb_tlsbase;
212 	PROC_LOCK(p);
213 	mtx_lock(&psp->ps_mtx);
214 }
215 
216 /*
217  * System call to cleanup state after a signal
218  * has been taken.  Reset signal mask and
219  * stack state from context left by sendsig (above).
220  * Return to previous pc and psl as specified by
221  * context left by sendsig. Check carefully to
222  * make sure that the user has not modified the
223  * state to gain improper privileges.
224  */
225 int
sys_sigreturn(struct thread * td,struct sigreturn_args * uap)226 sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
227 {
228 	ucontext_t uc;
229 	struct pcb *pcb;
230 	struct proc *p;
231 	struct trapframe *regs;
232 	ucontext_t *ucp;
233 	char *xfpustate;
234 	size_t xfpustate_len;
235 	long rflags;
236 	int cs, error, ret;
237 	ksiginfo_t ksi;
238 
239 	pcb = td->td_pcb;
240 	p = td->td_proc;
241 
242 	error = copyin(uap->sigcntxp, &uc, sizeof(uc));
243 	if (error != 0) {
244 		uprintf("pid %d (%s): sigreturn copyin failed\n",
245 		    p->p_pid, td->td_name);
246 		return (error);
247 	}
248 	ucp = &uc;
249 	if ((ucp->uc_mcontext.mc_flags & ~_MC_FLAG_MASK) != 0) {
250 		uprintf("pid %d (%s): sigreturn mc_flags %x\n", p->p_pid,
251 		    td->td_name, ucp->uc_mcontext.mc_flags);
252 		return (EINVAL);
253 	}
254 	regs = td->td_frame;
255 	rflags = ucp->uc_mcontext.mc_rflags;
256 	/*
257 	 * Don't allow users to change privileged or reserved flags.
258 	 */
259 	if (!EFL_SECURE(rflags, regs->tf_rflags)) {
260 		uprintf("pid %d (%s): sigreturn rflags = 0x%lx\n", p->p_pid,
261 		    td->td_name, rflags);
262 		return (EINVAL);
263 	}
264 
265 	/*
266 	 * Don't allow users to load a valid privileged %cs.  Let the
267 	 * hardware check for invalid selectors, excess privilege in
268 	 * other selectors, invalid %eip's and invalid %esp's.
269 	 */
270 	cs = ucp->uc_mcontext.mc_cs;
271 	if (!CS_SECURE(cs)) {
272 		uprintf("pid %d (%s): sigreturn cs = 0x%x\n", p->p_pid,
273 		    td->td_name, cs);
274 		ksiginfo_init_trap(&ksi);
275 		ksi.ksi_signo = SIGBUS;
276 		ksi.ksi_code = BUS_OBJERR;
277 		ksi.ksi_trapno = T_PROTFLT;
278 		ksi.ksi_addr = (void *)regs->tf_rip;
279 		trapsignal(td, &ksi);
280 		return (EINVAL);
281 	}
282 
283 	if ((uc.uc_mcontext.mc_flags & _MC_HASFPXSTATE) != 0) {
284 		xfpustate_len = uc.uc_mcontext.mc_xfpustate_len;
285 		if (xfpustate_len > cpu_max_ext_state_size -
286 		    sizeof(struct savefpu)) {
287 			uprintf("pid %d (%s): sigreturn xfpusave_len = 0x%zx\n",
288 			    p->p_pid, td->td_name, xfpustate_len);
289 			return (EINVAL);
290 		}
291 		xfpustate = (char *)fpu_save_area_alloc();
292 		error = copyin((const void *)uc.uc_mcontext.mc_xfpustate,
293 		    xfpustate, xfpustate_len);
294 		if (error != 0) {
295 			fpu_save_area_free((struct savefpu *)xfpustate);
296 			uprintf(
297 	"pid %d (%s): sigreturn copying xfpustate failed\n",
298 			    p->p_pid, td->td_name);
299 			return (error);
300 		}
301 	} else {
302 		xfpustate = NULL;
303 		xfpustate_len = 0;
304 	}
305 	ret = set_fpcontext(td, &ucp->uc_mcontext, xfpustate, xfpustate_len);
306 	fpu_save_area_free((struct savefpu *)xfpustate);
307 	if (ret != 0) {
308 		uprintf("pid %d (%s): sigreturn set_fpcontext err %d\n",
309 		    p->p_pid, td->td_name, ret);
310 		return (ret);
311 	}
312 	bcopy(&ucp->uc_mcontext.mc_rdi, regs, sizeof(*regs));
313 	update_pcb_bases(pcb);
314 	pcb->pcb_fsbase = ucp->uc_mcontext.mc_fsbase;
315 	pcb->pcb_gsbase = ucp->uc_mcontext.mc_gsbase;
316 
317 #if defined(COMPAT_43)
318 	if (ucp->uc_mcontext.mc_onstack & 1)
319 		td->td_sigstk.ss_flags |= SS_ONSTACK;
320 	else
321 		td->td_sigstk.ss_flags &= ~SS_ONSTACK;
322 #endif
323 
324 	kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
325 	return (EJUSTRETURN);
326 }
327 
328 #ifdef COMPAT_FREEBSD4
329 int
freebsd4_sigreturn(struct thread * td,struct freebsd4_sigreturn_args * uap)330 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
331 {
332 
333 	return sys_sigreturn(td, (struct sigreturn_args *)uap);
334 }
335 #endif
336 
337 /*
338  * Reset the hardware debug registers if they were in use.
339  * They won't have any meaning for the newly exec'd process.
340  */
341 void
x86_clear_dbregs(struct pcb * pcb)342 x86_clear_dbregs(struct pcb *pcb)
343 {
344 	if ((pcb->pcb_flags & PCB_DBREGS) == 0)
345 		return;
346 
347 	pcb->pcb_dr0 = 0;
348 	pcb->pcb_dr1 = 0;
349 	pcb->pcb_dr2 = 0;
350 	pcb->pcb_dr3 = 0;
351 	pcb->pcb_dr6 = 0;
352 	pcb->pcb_dr7 = 0;
353 
354 	if (pcb == curpcb) {
355 		/*
356 		 * Clear the debug registers on the running CPU,
357 		 * otherwise they will end up affecting the next
358 		 * process we switch to.
359 		 */
360 		reset_dbregs();
361 	}
362 	clear_pcb_flags(pcb, PCB_DBREGS);
363 }
364 
365 /*
366  * Reset registers to default values on exec.
367  */
368 void
exec_setregs(struct thread * td,struct image_params * imgp,uintptr_t stack)369 exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack)
370 {
371 	struct trapframe *regs;
372 	struct pcb *pcb;
373 	register_t saved_rflags;
374 
375 	regs = td->td_frame;
376 	pcb = td->td_pcb;
377 
378 	if (td->td_proc->p_md.md_ldt != NULL)
379 		user_ldt_free(td);
380 
381 	update_pcb_bases(pcb);
382 	pcb->pcb_fsbase = pcb->pcb_tlsbase = 0;
383 	pcb->pcb_gsbase = 0;
384 	clear_pcb_flags(pcb, PCB_32BIT | PCB_TLSBASE);
385 	pcb->pcb_initial_fpucw = __INITIAL_FPUCW__;
386 
387 	saved_rflags = regs->tf_rflags & PSL_T;
388 	bzero((char *)regs, sizeof(struct trapframe));
389 	regs->tf_rip = imgp->entry_addr;
390 	regs->tf_rsp = ((stack - 8) & ~0xFul) + 8;
391 	regs->tf_rdi = stack;		/* argv */
392 	regs->tf_rflags = PSL_USER | saved_rflags;
393 	regs->tf_ss = _udatasel;
394 	regs->tf_cs = _ucodesel;
395 	regs->tf_ds = _udatasel;
396 	regs->tf_es = _udatasel;
397 	regs->tf_fs = _ufssel;
398 	regs->tf_gs = _ugssel;
399 	regs->tf_flags = TF_HASSEGS;
400 
401 	x86_clear_dbregs(pcb);
402 
403 	/*
404 	 * Drop the FP state if we hold it, so that the process gets a
405 	 * clean FP state if it uses the FPU again.
406 	 */
407 	fpstate_drop(td);
408 }
409 
410 int
fill_regs(struct thread * td,struct reg * regs)411 fill_regs(struct thread *td, struct reg *regs)
412 {
413 	struct trapframe *tp;
414 
415 	tp = td->td_frame;
416 	return (fill_frame_regs(tp, regs));
417 }
418 
419 int
fill_frame_regs(struct trapframe * tp,struct reg * regs)420 fill_frame_regs(struct trapframe *tp, struct reg *regs)
421 {
422 
423 	regs->r_r15 = tp->tf_r15;
424 	regs->r_r14 = tp->tf_r14;
425 	regs->r_r13 = tp->tf_r13;
426 	regs->r_r12 = tp->tf_r12;
427 	regs->r_r11 = tp->tf_r11;
428 	regs->r_r10 = tp->tf_r10;
429 	regs->r_r9  = tp->tf_r9;
430 	regs->r_r8  = tp->tf_r8;
431 	regs->r_rdi = tp->tf_rdi;
432 	regs->r_rsi = tp->tf_rsi;
433 	regs->r_rbp = tp->tf_rbp;
434 	regs->r_rbx = tp->tf_rbx;
435 	regs->r_rdx = tp->tf_rdx;
436 	regs->r_rcx = tp->tf_rcx;
437 	regs->r_rax = tp->tf_rax;
438 	regs->r_rip = tp->tf_rip;
439 	regs->r_cs = tp->tf_cs;
440 	regs->r_rflags = tp->tf_rflags;
441 	regs->r_rsp = tp->tf_rsp;
442 	regs->r_ss = tp->tf_ss;
443 	if (tp->tf_flags & TF_HASSEGS) {
444 		regs->r_ds = tp->tf_ds;
445 		regs->r_es = tp->tf_es;
446 		regs->r_fs = tp->tf_fs;
447 		regs->r_gs = tp->tf_gs;
448 	} else {
449 		regs->r_ds = 0;
450 		regs->r_es = 0;
451 		regs->r_fs = 0;
452 		regs->r_gs = 0;
453 	}
454 	regs->r_err = 0;
455 	regs->r_trapno = 0;
456 	return (0);
457 }
458 
459 int
set_regs(struct thread * td,struct reg * regs)460 set_regs(struct thread *td, struct reg *regs)
461 {
462 	struct trapframe *tp;
463 	register_t rflags;
464 
465 	tp = td->td_frame;
466 	rflags = regs->r_rflags & 0xffffffff;
467 	if (!EFL_SECURE(rflags, tp->tf_rflags) || !CS_SECURE(regs->r_cs))
468 		return (EINVAL);
469 	tp->tf_r15 = regs->r_r15;
470 	tp->tf_r14 = regs->r_r14;
471 	tp->tf_r13 = regs->r_r13;
472 	tp->tf_r12 = regs->r_r12;
473 	tp->tf_r11 = regs->r_r11;
474 	tp->tf_r10 = regs->r_r10;
475 	tp->tf_r9  = regs->r_r9;
476 	tp->tf_r8  = regs->r_r8;
477 	tp->tf_rdi = regs->r_rdi;
478 	tp->tf_rsi = regs->r_rsi;
479 	tp->tf_rbp = regs->r_rbp;
480 	tp->tf_rbx = regs->r_rbx;
481 	tp->tf_rdx = regs->r_rdx;
482 	tp->tf_rcx = regs->r_rcx;
483 	tp->tf_rax = regs->r_rax;
484 	tp->tf_rip = regs->r_rip;
485 	tp->tf_cs = regs->r_cs;
486 	tp->tf_rflags = rflags;
487 	tp->tf_rsp = regs->r_rsp;
488 	tp->tf_ss = regs->r_ss;
489 	if (0) {	/* XXXKIB */
490 		tp->tf_ds = regs->r_ds;
491 		tp->tf_es = regs->r_es;
492 		tp->tf_fs = regs->r_fs;
493 		tp->tf_gs = regs->r_gs;
494 		tp->tf_flags = TF_HASSEGS;
495 	}
496 	set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
497 	return (0);
498 }
499 
500 /* XXX check all this stuff! */
501 /* externalize from sv_xmm */
502 static void
fill_fpregs_xmm(struct savefpu * sv_xmm,struct fpreg * fpregs)503 fill_fpregs_xmm(struct savefpu *sv_xmm, struct fpreg *fpregs)
504 {
505 	struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env;
506 	struct envxmm *penv_xmm = &sv_xmm->sv_env;
507 	int i;
508 
509 	/* pcb -> fpregs */
510 	bzero(fpregs, sizeof(*fpregs));
511 
512 	/* FPU control/status */
513 	penv_fpreg->en_cw = penv_xmm->en_cw;
514 	penv_fpreg->en_sw = penv_xmm->en_sw;
515 	penv_fpreg->en_tw = penv_xmm->en_tw;
516 	penv_fpreg->en_opcode = penv_xmm->en_opcode;
517 	penv_fpreg->en_rip = penv_xmm->en_rip;
518 	penv_fpreg->en_rdp = penv_xmm->en_rdp;
519 	penv_fpreg->en_mxcsr = penv_xmm->en_mxcsr;
520 	penv_fpreg->en_mxcsr_mask = penv_xmm->en_mxcsr_mask;
521 
522 	/* FPU registers */
523 	for (i = 0; i < 8; ++i)
524 		bcopy(sv_xmm->sv_fp[i].fp_acc.fp_bytes, fpregs->fpr_acc[i], 10);
525 
526 	/* SSE registers */
527 	for (i = 0; i < 16; ++i)
528 		bcopy(sv_xmm->sv_xmm[i].xmm_bytes, fpregs->fpr_xacc[i], 16);
529 }
530 
531 /* internalize from fpregs into sv_xmm */
532 static void
set_fpregs_xmm(struct fpreg * fpregs,struct savefpu * sv_xmm)533 set_fpregs_xmm(struct fpreg *fpregs, struct savefpu *sv_xmm)
534 {
535 	struct envxmm *penv_xmm = &sv_xmm->sv_env;
536 	struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env;
537 	int i;
538 
539 	/* fpregs -> pcb */
540 	/* FPU control/status */
541 	penv_xmm->en_cw = penv_fpreg->en_cw;
542 	penv_xmm->en_sw = penv_fpreg->en_sw;
543 	penv_xmm->en_tw = penv_fpreg->en_tw;
544 	penv_xmm->en_opcode = penv_fpreg->en_opcode;
545 	penv_xmm->en_rip = penv_fpreg->en_rip;
546 	penv_xmm->en_rdp = penv_fpreg->en_rdp;
547 	penv_xmm->en_mxcsr = penv_fpreg->en_mxcsr;
548 	penv_xmm->en_mxcsr_mask = penv_fpreg->en_mxcsr_mask & cpu_mxcsr_mask;
549 
550 	/* FPU registers */
551 	for (i = 0; i < 8; ++i)
552 		bcopy(fpregs->fpr_acc[i], sv_xmm->sv_fp[i].fp_acc.fp_bytes, 10);
553 
554 	/* SSE registers */
555 	for (i = 0; i < 16; ++i)
556 		bcopy(fpregs->fpr_xacc[i], sv_xmm->sv_xmm[i].xmm_bytes, 16);
557 }
558 
559 /* externalize from td->pcb */
560 int
fill_fpregs(struct thread * td,struct fpreg * fpregs)561 fill_fpregs(struct thread *td, struct fpreg *fpregs)
562 {
563 
564 	KASSERT(td == curthread || TD_IS_SUSPENDED(td) ||
565 	    P_SHOULDSTOP(td->td_proc),
566 	    ("not suspended thread %p", td));
567 	fpugetregs(td);
568 	fill_fpregs_xmm(get_pcb_user_save_td(td), fpregs);
569 	return (0);
570 }
571 
572 /* internalize to td->pcb */
573 int
set_fpregs(struct thread * td,struct fpreg * fpregs)574 set_fpregs(struct thread *td, struct fpreg *fpregs)
575 {
576 
577 	critical_enter();
578 	set_fpregs_xmm(fpregs, get_pcb_user_save_td(td));
579 	fpuuserinited(td);
580 	critical_exit();
581 	return (0);
582 }
583 
584 /*
585  * Get machine context.
586  */
587 int
get_mcontext(struct thread * td,mcontext_t * mcp,int flags)588 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
589 {
590 	struct pcb *pcb;
591 	struct trapframe *tp;
592 
593 	pcb = td->td_pcb;
594 	tp = td->td_frame;
595 	PROC_LOCK(curthread->td_proc);
596 	mcp->mc_onstack = sigonstack(tp->tf_rsp);
597 	PROC_UNLOCK(curthread->td_proc);
598 	mcp->mc_r15 = tp->tf_r15;
599 	mcp->mc_r14 = tp->tf_r14;
600 	mcp->mc_r13 = tp->tf_r13;
601 	mcp->mc_r12 = tp->tf_r12;
602 	mcp->mc_r11 = tp->tf_r11;
603 	mcp->mc_r10 = tp->tf_r10;
604 	mcp->mc_r9  = tp->tf_r9;
605 	mcp->mc_r8  = tp->tf_r8;
606 	mcp->mc_rdi = tp->tf_rdi;
607 	mcp->mc_rsi = tp->tf_rsi;
608 	mcp->mc_rbp = tp->tf_rbp;
609 	mcp->mc_rbx = tp->tf_rbx;
610 	mcp->mc_rcx = tp->tf_rcx;
611 	mcp->mc_rflags = tp->tf_rflags;
612 	if (flags & GET_MC_CLEAR_RET) {
613 		mcp->mc_rax = 0;
614 		mcp->mc_rdx = 0;
615 		mcp->mc_rflags &= ~PSL_C;
616 	} else {
617 		mcp->mc_rax = tp->tf_rax;
618 		mcp->mc_rdx = tp->tf_rdx;
619 	}
620 	mcp->mc_rip = tp->tf_rip;
621 	mcp->mc_cs = tp->tf_cs;
622 	mcp->mc_rsp = tp->tf_rsp;
623 	mcp->mc_ss = tp->tf_ss;
624 	mcp->mc_ds = tp->tf_ds;
625 	mcp->mc_es = tp->tf_es;
626 	mcp->mc_fs = tp->tf_fs;
627 	mcp->mc_gs = tp->tf_gs;
628 	mcp->mc_flags = tp->tf_flags;
629 	mcp->mc_len = sizeof(*mcp);
630 	get_fpcontext(td, mcp, NULL, NULL);
631 	update_pcb_bases(pcb);
632 	mcp->mc_fsbase = pcb->pcb_fsbase;
633 	mcp->mc_gsbase = pcb->pcb_gsbase;
634 	mcp->mc_xfpustate = 0;
635 	mcp->mc_xfpustate_len = 0;
636 	mcp->mc_tlsbase = (pcb->pcb_flags & PCB_TLSBASE) != 0 ?
637 	    pcb->pcb_tlsbase : 0;
638 	bzero(mcp->mc_spare, sizeof(mcp->mc_spare));
639 	return (0);
640 }
641 
642 /*
643  * Set machine context.
644  *
645  * However, we don't set any but the user modifiable flags, and we won't
646  * touch the cs selector.
647  */
648 int
set_mcontext(struct thread * td,mcontext_t * mcp)649 set_mcontext(struct thread *td, mcontext_t *mcp)
650 {
651 	struct pcb *pcb;
652 	struct trapframe *tp;
653 	char *xfpustate;
654 	long rflags;
655 	int ret;
656 
657 	pcb = td->td_pcb;
658 	tp = td->td_frame;
659 	if (mcp->mc_len != sizeof(*mcp) ||
660 	    (mcp->mc_flags & ~_MC_FLAG_MASK) != 0)
661 		return (EINVAL);
662 	rflags = (mcp->mc_rflags & PSL_USERCHANGE) |
663 	    (tp->tf_rflags & ~PSL_USERCHANGE);
664 	if (mcp->mc_flags & _MC_HASFPXSTATE) {
665 		if (mcp->mc_xfpustate_len > cpu_max_ext_state_size -
666 		    sizeof(struct savefpu))
667 			return (EINVAL);
668 		xfpustate = (char *)fpu_save_area_alloc();
669 		ret = copyin((void *)mcp->mc_xfpustate, xfpustate,
670 		    mcp->mc_xfpustate_len);
671 		if (ret != 0) {
672 			fpu_save_area_free((struct savefpu *)xfpustate);
673 			return (ret);
674 		}
675 	} else
676 		xfpustate = NULL;
677 	ret = set_fpcontext(td, mcp, xfpustate, mcp->mc_xfpustate_len);
678 	fpu_save_area_free((struct savefpu *)xfpustate);
679 	if (ret != 0)
680 		return (ret);
681 	tp->tf_r15 = mcp->mc_r15;
682 	tp->tf_r14 = mcp->mc_r14;
683 	tp->tf_r13 = mcp->mc_r13;
684 	tp->tf_r12 = mcp->mc_r12;
685 	tp->tf_r11 = mcp->mc_r11;
686 	tp->tf_r10 = mcp->mc_r10;
687 	tp->tf_r9  = mcp->mc_r9;
688 	tp->tf_r8  = mcp->mc_r8;
689 	tp->tf_rdi = mcp->mc_rdi;
690 	tp->tf_rsi = mcp->mc_rsi;
691 	tp->tf_rbp = mcp->mc_rbp;
692 	tp->tf_rbx = mcp->mc_rbx;
693 	tp->tf_rdx = mcp->mc_rdx;
694 	tp->tf_rcx = mcp->mc_rcx;
695 	tp->tf_rax = mcp->mc_rax;
696 	tp->tf_rip = mcp->mc_rip;
697 	tp->tf_rflags = rflags;
698 	tp->tf_rsp = mcp->mc_rsp;
699 	tp->tf_ss = mcp->mc_ss;
700 	tp->tf_flags = mcp->mc_flags;
701 	if (tp->tf_flags & TF_HASSEGS) {
702 		tp->tf_ds = mcp->mc_ds;
703 		tp->tf_es = mcp->mc_es;
704 		tp->tf_fs = mcp->mc_fs;
705 		tp->tf_gs = mcp->mc_gs;
706 	}
707 	set_pcb_flags(pcb, PCB_FULL_IRET);
708 	if (mcp->mc_flags & _MC_HASBASES) {
709 		pcb->pcb_fsbase = mcp->mc_fsbase;
710 		pcb->pcb_gsbase = mcp->mc_gsbase;
711 	}
712 	if ((mcp->mc_flags & _MC_HASTLSBASE) != 0) {
713 		pcb->pcb_tlsbase = mcp->mc_tlsbase;
714 		set_pcb_flags(pcb, PCB_TLSBASE);
715 	}
716 	return (0);
717 }
718 
719 void
get_fpcontext(struct thread * td,mcontext_t * mcp,char ** xfpusave,size_t * xfpusave_len)720 get_fpcontext(struct thread *td, mcontext_t *mcp, char **xfpusave,
721     size_t *xfpusave_len)
722 {
723 	mcp->mc_ownedfp = fpugetregs(td);
724 	bcopy(get_pcb_user_save_td(td), &mcp->mc_fpstate[0],
725 	    sizeof(mcp->mc_fpstate));
726 	mcp->mc_fpformat = fpuformat();
727 	if (xfpusave == NULL)
728 		return;
729 	if (!use_xsave || cpu_max_ext_state_size <= sizeof(struct savefpu)) {
730 		*xfpusave_len = 0;
731 		*xfpusave = NULL;
732 	} else {
733 		mcp->mc_flags |= _MC_HASFPXSTATE;
734 		*xfpusave_len = mcp->mc_xfpustate_len =
735 		    cpu_max_ext_state_size - sizeof(struct savefpu);
736 		*xfpusave = (char *)(get_pcb_user_save_td(td) + 1);
737 	}
738 }
739 
740 int
set_fpcontext(struct thread * td,mcontext_t * mcp,char * xfpustate,size_t xfpustate_len)741 set_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpustate,
742     size_t xfpustate_len)
743 {
744 	int error;
745 
746 	if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
747 		return (0);
748 	else if (mcp->mc_fpformat != _MC_FPFMT_XMM)
749 		return (EINVAL);
750 	else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) {
751 		/* We don't care what state is left in the FPU or PCB. */
752 		fpstate_drop(td);
753 		error = 0;
754 	} else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
755 	    mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
756 		error = fpusetregs(td, (struct savefpu *)&mcp->mc_fpstate,
757 		    xfpustate, xfpustate_len);
758 	} else
759 		return (EINVAL);
760 	return (error);
761 }
762 
763 void
fpstate_drop(struct thread * td)764 fpstate_drop(struct thread *td)
765 {
766 
767 	KASSERT(PCB_USER_FPU(td->td_pcb), ("fpstate_drop: kernel-owned fpu"));
768 	critical_enter();
769 	if (PCPU_GET(fpcurthread) == td)
770 		fpudrop();
771 	/*
772 	 * XXX force a full drop of the fpu.  The above only drops it if we
773 	 * owned it.
774 	 *
775 	 * XXX I don't much like fpugetuserregs()'s semantics of doing a full
776 	 * drop.  Dropping only to the pcb matches fnsave's behaviour.
777 	 * We only need to drop to !PCB_INITDONE in sendsig().  But
778 	 * sendsig() is the only caller of fpugetuserregs()... perhaps we just
779 	 * have too many layers.
780 	 */
781 	clear_pcb_flags(curthread->td_pcb,
782 	    PCB_FPUINITDONE | PCB_USERFPUINITDONE);
783 	critical_exit();
784 }
785 
786 int
fill_dbregs(struct thread * td,struct dbreg * dbregs)787 fill_dbregs(struct thread *td, struct dbreg *dbregs)
788 {
789 	struct pcb *pcb;
790 
791 	if (td == NULL) {
792 		dbregs->dr[0] = rdr0();
793 		dbregs->dr[1] = rdr1();
794 		dbregs->dr[2] = rdr2();
795 		dbregs->dr[3] = rdr3();
796 		dbregs->dr[6] = rdr6();
797 		dbregs->dr[7] = rdr7();
798 	} else {
799 		pcb = td->td_pcb;
800 		dbregs->dr[0] = pcb->pcb_dr0;
801 		dbregs->dr[1] = pcb->pcb_dr1;
802 		dbregs->dr[2] = pcb->pcb_dr2;
803 		dbregs->dr[3] = pcb->pcb_dr3;
804 		dbregs->dr[6] = pcb->pcb_dr6;
805 		dbregs->dr[7] = pcb->pcb_dr7;
806 	}
807 	dbregs->dr[4] = 0;
808 	dbregs->dr[5] = 0;
809 	dbregs->dr[8] = 0;
810 	dbregs->dr[9] = 0;
811 	dbregs->dr[10] = 0;
812 	dbregs->dr[11] = 0;
813 	dbregs->dr[12] = 0;
814 	dbregs->dr[13] = 0;
815 	dbregs->dr[14] = 0;
816 	dbregs->dr[15] = 0;
817 	return (0);
818 }
819 
820 int
set_dbregs(struct thread * td,struct dbreg * dbregs)821 set_dbregs(struct thread *td, struct dbreg *dbregs)
822 {
823 	struct pcb *pcb;
824 	int i;
825 
826 	if (td == NULL) {
827 		load_dr0(dbregs->dr[0]);
828 		load_dr1(dbregs->dr[1]);
829 		load_dr2(dbregs->dr[2]);
830 		load_dr3(dbregs->dr[3]);
831 		load_dr6(dbregs->dr[6]);
832 		load_dr7(dbregs->dr[7]);
833 	} else {
834 		/*
835 		 * Don't let an illegal value for dr7 get set.  Specifically,
836 		 * check for undefined settings.  Setting these bit patterns
837 		 * result in undefined behaviour and can lead to an unexpected
838 		 * TRCTRAP or a general protection fault right here.
839 		 * Upper bits of dr6 and dr7 must not be set
840 		 */
841 		for (i = 0; i < 4; i++) {
842 			if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
843 				return (EINVAL);
844 			if (td->td_frame->tf_cs == _ucode32sel &&
845 			    DBREG_DR7_LEN(dbregs->dr[7], i) == DBREG_DR7_LEN_8)
846 				return (EINVAL);
847 		}
848 		if ((dbregs->dr[6] & 0xffffffff00000000ul) != 0 ||
849 		    (dbregs->dr[7] & 0xffffffff00000000ul) != 0)
850 			return (EINVAL);
851 
852 		pcb = td->td_pcb;
853 
854 		/*
855 		 * Don't let a process set a breakpoint that is not within the
856 		 * process's address space.  If a process could do this, it
857 		 * could halt the system by setting a breakpoint in the kernel
858 		 * (if ddb was enabled).  Thus, we need to check to make sure
859 		 * that no breakpoints are being enabled for addresses outside
860 		 * process's address space.
861 		 *
862 		 * XXX - what about when the watched area of the user's
863 		 * address space is written into from within the kernel
864 		 * ... wouldn't that still cause a breakpoint to be generated
865 		 * from within kernel mode?
866 		 */
867 
868 		if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
869 			/* dr0 is enabled */
870 			if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
871 				return (EINVAL);
872 		}
873 		if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
874 			/* dr1 is enabled */
875 			if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
876 				return (EINVAL);
877 		}
878 		if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
879 			/* dr2 is enabled */
880 			if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
881 				return (EINVAL);
882 		}
883 		if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
884 			/* dr3 is enabled */
885 			if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
886 				return (EINVAL);
887 		}
888 
889 		pcb->pcb_dr0 = dbregs->dr[0];
890 		pcb->pcb_dr1 = dbregs->dr[1];
891 		pcb->pcb_dr2 = dbregs->dr[2];
892 		pcb->pcb_dr3 = dbregs->dr[3];
893 		pcb->pcb_dr6 = dbregs->dr[6];
894 		pcb->pcb_dr7 = dbregs->dr[7];
895 
896 		set_pcb_flags(pcb, PCB_DBREGS);
897 	}
898 
899 	return (0);
900 }
901 
902 void
reset_dbregs(void)903 reset_dbregs(void)
904 {
905 
906 	load_dr7(0);	/* Turn off the control bits first */
907 	load_dr0(0);
908 	load_dr1(0);
909 	load_dr2(0);
910 	load_dr3(0);
911 	load_dr6(0);
912 }
913 
914 /*
915  * Return > 0 if a hardware breakpoint has been hit, and the
916  * breakpoint was in user space.  Return 0, otherwise.
917  */
918 int
user_dbreg_trap(register_t dr6)919 user_dbreg_trap(register_t dr6)
920 {
921         u_int64_t dr7;
922         u_int64_t bp;       /* breakpoint bits extracted from dr6 */
923         int nbp;            /* number of breakpoints that triggered */
924         caddr_t addr[4];    /* breakpoint addresses */
925         int i;
926 
927         bp = dr6 & DBREG_DR6_BMASK;
928         if (bp == 0) {
929                 /*
930                  * None of the breakpoint bits are set meaning this
931                  * trap was not caused by any of the debug registers
932                  */
933                 return (0);
934         }
935 
936         dr7 = rdr7();
937         if ((dr7 & 0x000000ff) == 0) {
938                 /*
939                  * all GE and LE bits in the dr7 register are zero,
940                  * thus the trap couldn't have been caused by the
941                  * hardware debug registers
942                  */
943 		return (0);
944         }
945 
946         nbp = 0;
947 
948         /*
949          * at least one of the breakpoints were hit, check to see
950          * which ones and if any of them are user space addresses
951          */
952 
953         if (bp & 0x01) {
954                 addr[nbp++] = (caddr_t)rdr0();
955         }
956         if (bp & 0x02) {
957                 addr[nbp++] = (caddr_t)rdr1();
958         }
959         if (bp & 0x04) {
960                 addr[nbp++] = (caddr_t)rdr2();
961         }
962         if (bp & 0x08) {
963                 addr[nbp++] = (caddr_t)rdr3();
964         }
965 
966         for (i = 0; i < nbp; i++) {
967                 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
968                         /*
969                          * addr[i] is in user space
970                          */
971                         return (nbp);
972                 }
973         }
974 
975         /*
976          * None of the breakpoints are in user space.
977          */
978         return (0);
979 }
980