xref: /titanic_50/usr/src/uts/intel/ia32/syscall/getcontext.c (revision 84f7a9b9dca4f23b5f50edef0e59d7eb44301114)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
22 /*	  All Rights Reserved  	*/
23 
24 
25 /*
26  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
27  * Use is subject to license terms.
28  */
29 
30 #pragma ident	"%Z%%M%	%I%	%E% SMI"
31 
32 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/vmparam.h>
35 #include <sys/systm.h>
36 #include <sys/signal.h>
37 #include <sys/stack.h>
38 #include <sys/regset.h>
39 #include <sys/privregs.h>
40 #include <sys/frame.h>
41 #include <sys/proc.h>
42 #include <sys/psw.h>
43 #include <sys/ucontext.h>
44 #include <sys/asm_linkage.h>
45 #include <sys/errno.h>
46 #include <sys/archsystm.h>
47 #include <sys/schedctl.h>
48 #include <sys/debug.h>
49 #include <sys/sysmacros.h>
50 
51 /*
52  * Save user context.
53  */
54 void
55 savecontext(ucontext_t *ucp, k_sigset_t mask)
56 {
57 	proc_t *p = ttoproc(curthread);
58 	klwp_t *lwp = ttolwp(curthread);
59 	struct regs *rp = lwptoregs(lwp);
60 
61 	/*
62 	 * We unconditionally assign to every field through the end
63 	 * of the gregs, but we need to bzero() everything -after- that
64 	 * to avoid having any kernel stack garbage escape to userland.
65 	 */
66 	bzero(&ucp->uc_mcontext.fpregs, sizeof (ucontext_t) -
67 	    offsetof(ucontext_t, uc_mcontext.fpregs));
68 
69 	ucp->uc_flags = UC_ALL;
70 	ucp->uc_link = (struct ucontext *)lwp->lwp_oldcontext;
71 
72 	/*
73 	 * Try to copyin() the ustack if one is registered. If the stack
74 	 * has zero size, this indicates that stack bounds checking has
75 	 * been disabled for this LWP. If stack bounds checking is disabled
76 	 * or the copyin() fails, we fall back to the legacy behavior.
77 	 */
78 	if (lwp->lwp_ustack == NULL ||
79 	    copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
80 	    sizeof (ucp->uc_stack)) != 0 ||
81 	    ucp->uc_stack.ss_size == 0) {
82 
83 		if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
84 			ucp->uc_stack = lwp->lwp_sigaltstack;
85 		} else {
86 			ucp->uc_stack.ss_sp = p->p_usrstack - p->p_stksize;
87 			ucp->uc_stack.ss_size = p->p_stksize;
88 			ucp->uc_stack.ss_flags = 0;
89 		}
90 	}
91 
92 	getgregs(lwp, ucp->uc_mcontext.gregs);
93 	if (lwp->lwp_pcb.pcb_fpu.fpu_flags & FPU_EN)
94 		getfpregs(lwp, &ucp->uc_mcontext.fpregs);
95 	else
96 		ucp->uc_flags &= ~UC_FPU;
97 
98 	sigktou(&mask, &ucp->uc_sigmask);
99 	/*
100 	 * If the trace flag is set, arrange for single-stepping and
101 	 * turn off the trace flag.
102 	 */
103 	if (rp->r_ps & PS_T) {
104 		lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
105 		rp->r_ps &= ~PS_T;
106 		/*
107 		 * We always check DEBUG_PENDING before checking for any
108 		 * pending signal. This at times can potentially lead to
109 		 * DEBUG_PENDING not being honoured. (for eg: the lwp is
110 		 * stopped by stop_on_fault() called from trap(), after
111 		 * being awakened it might see a pending signal and call
112 		 * savecontext(), however on the way back to userland there
113 		 * is no place it can be detected). Hence in anticipation of
114 		 * such occassion, we set AST flag for the thread which will
115 		 * make the thread take an excursion through trap() where
116 		 * we will handle it appropriately.
117 		 */
118 		aston(curthread);
119 	}
120 }
121 
122 /*
123  * Restore user context.
124  */
125 void
126 restorecontext(ucontext_t *ucp)
127 {
128 	kthread_t *t = curthread;
129 	klwp_t *lwp = ttolwp(t);
130 
131 	lwp->lwp_oldcontext = (uintptr_t)ucp->uc_link;
132 
133 	if (ucp->uc_flags & UC_STACK) {
134 		if (ucp->uc_stack.ss_flags == SS_ONSTACK)
135 			lwp->lwp_sigaltstack = ucp->uc_stack;
136 		else
137 			lwp->lwp_sigaltstack.ss_flags &= ~SS_ONSTACK;
138 	}
139 
140 	if (ucp->uc_flags & UC_CPU) {
141 		/*
142 		 * If the trace flag is set, mark the lwp to take a
143 		 * single-step trap on return to user level (below).
144 		 * The x86 lcall interface and sysenter has already done this,
145 		 * and turned off the flag, but amd64 syscall interface has not.
146 		 */
147 		if (lwptoregs(lwp)->r_ps & PS_T)
148 			lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
149 		setgregs(lwp, ucp->uc_mcontext.gregs);
150 		lwp->lwp_eosys = JUSTRETURN;
151 		t->t_post_sys = 1;
152 		aston(curthread);
153 	}
154 
155 	if (ucp->uc_flags & UC_FPU)
156 		setfpregs(lwp, &ucp->uc_mcontext.fpregs);
157 
158 	if (ucp->uc_flags & UC_SIGMASK) {
159 		proc_t *p = ttoproc(t);
160 
161 		mutex_enter(&p->p_lock);
162 		schedctl_finish_sigblock(t);
163 		sigutok(&ucp->uc_sigmask, &t->t_hold);
164 		if (sigcheck(p, t))
165 			t->t_sig_check = 1;
166 		mutex_exit(&p->p_lock);
167 	}
168 }
169 
170 
171 int
172 getsetcontext(int flag, void *arg)
173 {
174 	ucontext_t uc;
175 	ucontext_t *ucp;
176 	klwp_t *lwp = ttolwp(curthread);
177 	stack_t dummy_stk;
178 
179 	/*
180 	 * In future releases, when the ucontext structure grows,
181 	 * getcontext should be modified to only return the fields
182 	 * specified in the uc_flags.  That way, the structure can grow
183 	 * and still be binary compatible will all .o's which will only
184 	 * have old fields defined in uc_flags
185 	 */
186 
187 	switch (flag) {
188 	default:
189 		return (set_errno(EINVAL));
190 
191 	case GETCONTEXT:
192 		if (schedctl_sigblock(curthread)) {
193 			proc_t *p = ttoproc(curthread);
194 			mutex_enter(&p->p_lock);
195 			schedctl_finish_sigblock(curthread);
196 			mutex_exit(&p->p_lock);
197 		}
198 		savecontext(&uc, curthread->t_hold);
199 		if (copyout(&uc, arg, sizeof (uc)))
200 			return (set_errno(EFAULT));
201 		return (0);
202 
203 	case SETCONTEXT:
204 		ucp = arg;
205 		if (ucp == NULL)
206 			exit(CLD_EXITED, 0);
207 		/*
208 		 * Don't copyin filler or floating state unless we need it.
209 		 * The ucontext_t struct and fields are specified in the ABI.
210 		 */
211 		if (copyin(ucp, &uc, sizeof (ucontext_t) -
212 		    sizeof (uc.uc_filler) -
213 		    sizeof (uc.uc_mcontext.fpregs))) {
214 			return (set_errno(EFAULT));
215 		}
216 
217 		if ((uc.uc_flags & UC_FPU) &&
218 		    copyin(&ucp->uc_mcontext.fpregs, &uc.uc_mcontext.fpregs,
219 		    sizeof (uc.uc_mcontext.fpregs))) {
220 			return (set_errno(EFAULT));
221 		}
222 
223 		restorecontext(&uc);
224 
225 		if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0))
226 			(void) copyout(&uc.uc_stack, (stack_t *)lwp->lwp_ustack,
227 			    sizeof (uc.uc_stack));
228 		return (0);
229 
230 	case GETUSTACK:
231 		if (copyout(&lwp->lwp_ustack, arg, sizeof (caddr_t)))
232 			return (set_errno(EFAULT));
233 		return (0);
234 
235 	case SETUSTACK:
236 		if (copyin(arg, &dummy_stk, sizeof (dummy_stk)))
237 			return (set_errno(EFAULT));
238 		lwp->lwp_ustack = (uintptr_t)arg;
239 		return (0);
240 	}
241 }
242 
243 #ifdef _SYSCALL32_IMPL
244 
245 /*
246  * Save user context for 32-bit processes.
247  */
248 void
249 savecontext32(ucontext32_t *ucp, k_sigset_t mask)
250 {
251 	proc_t *p = ttoproc(curthread);
252 	klwp_t *lwp = ttolwp(curthread);
253 	struct regs *rp = lwptoregs(lwp);
254 
255 	bzero(&ucp->uc_mcontext.fpregs, sizeof (ucontext32_t) -
256 	    offsetof(ucontext32_t, uc_mcontext.fpregs));
257 
258 	ucp->uc_flags = UC_ALL;
259 	ucp->uc_link = (caddr32_t)lwp->lwp_oldcontext;
260 
261 	if (lwp->lwp_ustack == NULL ||
262 	    copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
263 	    sizeof (ucp->uc_stack)) != 0 ||
264 	    ucp->uc_stack.ss_size == 0) {
265 
266 		if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
267 			ucp->uc_stack.ss_sp =
268 			    (caddr32_t)(uintptr_t)lwp->lwp_sigaltstack.ss_sp;
269 			ucp->uc_stack.ss_size =
270 			    (size32_t)lwp->lwp_sigaltstack.ss_size;
271 			ucp->uc_stack.ss_flags = SS_ONSTACK;
272 		} else {
273 			ucp->uc_stack.ss_sp = (caddr32_t)(uintptr_t)
274 			    (p->p_usrstack - p->p_stksize);
275 			ucp->uc_stack.ss_size = (size32_t)p->p_stksize;
276 			ucp->uc_stack.ss_flags = 0;
277 		}
278 	}
279 
280 	getgregs32(lwp, ucp->uc_mcontext.gregs);
281 	if (lwp->lwp_pcb.pcb_fpu.fpu_flags & FPU_EN)
282 		getfpregs32(lwp, &ucp->uc_mcontext.fpregs);
283 	else
284 		ucp->uc_flags &= ~UC_FPU;
285 
286 	sigktou(&mask, &ucp->uc_sigmask);
287 	/*
288 	 * If the trace flag is set, arrange for single-stepping and
289 	 * turn off the trace flag.
290 	 */
291 	if (rp->r_ps & PS_T) {
292 		lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
293 		rp->r_ps &= ~PS_T;
294 		/*
295 		 * See comments in savecontext().
296 		 */
297 		aston(curthread);
298 	}
299 }
300 
301 int
302 getsetcontext32(int flag, void *arg)
303 {
304 	ucontext32_t uc;
305 	ucontext_t ucnat;
306 	ucontext32_t *ucp;
307 	klwp_t *lwp = ttolwp(curthread);
308 	caddr32_t ustack32;
309 	stack32_t dummy_stk32;
310 
311 	switch (flag) {
312 	default:
313 		return (set_errno(EINVAL));
314 
315 	case GETCONTEXT:
316 		if (schedctl_sigblock(curthread)) {
317 			proc_t *p = ttoproc(curthread);
318 			mutex_enter(&p->p_lock);
319 			schedctl_finish_sigblock(curthread);
320 			mutex_exit(&p->p_lock);
321 		}
322 		savecontext32(&uc, curthread->t_hold);
323 		if (copyout(&uc, arg, sizeof (uc)))
324 			return (set_errno(EFAULT));
325 		return (0);
326 
327 	case SETCONTEXT:
328 		ucp = arg;
329 		if (ucp == NULL)
330 			exit(CLD_EXITED, 0);
331 		if (copyin(ucp, &uc, sizeof (uc) -
332 		    sizeof (uc.uc_filler) -
333 		    sizeof (uc.uc_mcontext.fpregs))) {
334 			return (set_errno(EFAULT));
335 		}
336 		if ((uc.uc_flags & UC_FPU) &&
337 		    copyin(&ucp->uc_mcontext.fpregs, &uc.uc_mcontext.fpregs,
338 		    sizeof (uc.uc_mcontext.fpregs))) {
339 			return (set_errno(EFAULT));
340 		}
341 
342 		ucontext_32ton(&uc, &ucnat);
343 		restorecontext(&ucnat);
344 
345 		if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0))
346 			(void) copyout(&uc.uc_stack,
347 			    (stack32_t *)lwp->lwp_ustack, sizeof (uc.uc_stack));
348 		return (0);
349 
350 	case GETUSTACK:
351 		ustack32 = (caddr32_t)lwp->lwp_ustack;
352 		if (copyout(&ustack32, arg, sizeof (ustack32)))
353 			return (set_errno(EFAULT));
354 		return (0);
355 
356 	case SETUSTACK:
357 		if (copyin(arg, &dummy_stk32, sizeof (dummy_stk32)))
358 			return (set_errno(EFAULT));
359 		lwp->lwp_ustack = (uintptr_t)arg;
360 		return (0);
361 	}
362 }
363 
364 #endif	/* _SYSCALL32_IMPL */
365