xref: /illumos-gate/usr/src/uts/sparc/syscall/getcontext.c (revision 88f8b78a88cbdc6d8c1af5c3e54bc49d25095c98)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2003 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
28 /*	  All Rights Reserved  	*/
29 
30 
31 #pragma ident	"%Z%%M%	%I%	%E% SMI"
32 
33 #include <sys/param.h>
34 #include <sys/types.h>
35 #include <sys/vmparam.h>
36 #include <sys/systm.h>
37 #include <sys/signal.h>
38 #include <sys/stack.h>
39 #include <sys/frame.h>
40 #include <sys/proc.h>
41 #include <sys/ucontext.h>
42 #include <sys/asm_linkage.h>
43 #include <sys/kmem.h>
44 #include <sys/errno.h>
45 #include <sys/archsystm.h>
46 #include <sys/fpu/fpusystm.h>
47 #include <sys/debug.h>
48 #include <sys/model.h>
49 #include <sys/cmn_err.h>
50 #include <sys/sysmacros.h>
51 #include <sys/privregs.h>
52 #include <sys/schedctl.h>
53 
54 
55 /*
56  * Save user context.
57  */
58 void
59 savecontext(ucontext_t *ucp, k_sigset_t mask)
60 {
61 	proc_t *p = ttoproc(curthread);
62 	klwp_t *lwp = ttolwp(curthread);
63 
64 	/*
65 	 * We assign to every field through uc_mcontext.fpregs.fpu_en,
66 	 * but we have to bzero() everything after that.
67 	 */
68 	bzero(&ucp->uc_mcontext.fpregs.fpu_en, sizeof (ucontext_t) -
69 	    offsetof(ucontext_t, uc_mcontext.fpregs.fpu_en));
70 	/*
71 	 * There are unused holes in the ucontext_t structure, zero-fill
72 	 * them so that we don't expose kernel data to the user.
73 	 */
74 	(&ucp->uc_flags)[1] = 0;
75 	(&ucp->uc_stack.ss_flags)[1] = 0;
76 
77 	/*
78 	 * Flushing the user windows isn't strictly necessary; we do
79 	 * it to maintain backward compatibility.
80 	 */
81 	(void) flush_user_windows_to_stack(NULL);
82 
83 	ucp->uc_flags = UC_ALL;
84 	ucp->uc_link = (ucontext_t *)lwp->lwp_oldcontext;
85 
86 	/*
87 	 * Try to copyin() the ustack if one is registered. If the stack
88 	 * has zero size, this indicates that stack bounds checking has
89 	 * been disabled for this LWP. If stack bounds checking is disabled
90 	 * or the copyin() fails, we fall back to the legacy behavior.
91 	 */
92 	if (lwp->lwp_ustack == NULL ||
93 	    copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
94 	    sizeof (ucp->uc_stack)) != 0 ||
95 	    ucp->uc_stack.ss_size == 0) {
96 
97 		if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
98 			ucp->uc_stack = lwp->lwp_sigaltstack;
99 		} else {
100 			ucp->uc_stack.ss_sp = p->p_usrstack - p->p_stksize;
101 			ucp->uc_stack.ss_size = p->p_stksize;
102 			ucp->uc_stack.ss_flags = 0;
103 		}
104 	}
105 
106 	getgregs(lwp, ucp->uc_mcontext.gregs);
107 	getasrs(lwp, ucp->uc_mcontext.asrs);
108 
109 	getfpregs(lwp, &ucp->uc_mcontext.fpregs);
110 	getfpasrs(lwp, ucp->uc_mcontext.asrs);
111 	if (ucp->uc_mcontext.fpregs.fpu_en == 0)
112 		ucp->uc_flags &= ~UC_FPU;
113 	ucp->uc_mcontext.gwins = (gwindows_t *)NULL;
114 
115 	/*
116 	 * Save signal mask.
117 	 */
118 	sigktou(&mask, &ucp->uc_sigmask);
119 }
120 
121 
122 void
123 restorecontext(ucontext_t *ucp)
124 {
125 	kthread_t *t = curthread;
126 	klwp_t *lwp = ttolwp(t);
127 	mcontext_t *mcp = &ucp->uc_mcontext;
128 	model_t model = lwp_getdatamodel(lwp);
129 
130 	(void) flush_user_windows_to_stack(NULL);
131 	if (lwp->lwp_pcb.pcb_xregstat != XREGNONE)
132 		xregrestore(lwp, 0);
133 
134 	lwp->lwp_oldcontext = (uintptr_t)ucp->uc_link;
135 
136 	if (ucp->uc_flags & UC_STACK) {
137 		if (ucp->uc_stack.ss_flags == SS_ONSTACK)
138 			lwp->lwp_sigaltstack = ucp->uc_stack;
139 		else
140 			lwp->lwp_sigaltstack.ss_flags &= ~SS_ONSTACK;
141 	}
142 
143 	if (ucp->uc_flags & UC_CPU) {
144 		if (mcp->gwins != 0)
145 			setgwins(lwp, mcp->gwins);
146 		setgregs(lwp, mcp->gregs);
147 		if (model == DATAMODEL_LP64)
148 			setasrs(lwp, mcp->asrs);
149 		else
150 			xregs_setgregs(lwp, xregs_getptr(lwp, ucp));
151 	}
152 
153 	if (ucp->uc_flags & UC_FPU) {
154 		fpregset_t *fp = &ucp->uc_mcontext.fpregs;
155 
156 		setfpregs(lwp, fp);
157 		if (model == DATAMODEL_LP64)
158 			setfpasrs(lwp, mcp->asrs);
159 		else
160 			xregs_setfpregs(lwp, xregs_getptr(lwp, ucp));
161 		run_fpq(lwp, fp);
162 	}
163 
164 	if (ucp->uc_flags & UC_SIGMASK) {
165 		proc_t *p = ttoproc(t);
166 
167 		mutex_enter(&p->p_lock);
168 		schedctl_finish_sigblock(t);
169 		sigutok(&ucp->uc_sigmask, &t->t_hold);
170 		if (sigcheck(p, t))
171 			t->t_sig_check = 1;
172 		mutex_exit(&p->p_lock);
173 	}
174 }
175 
176 
177 int
178 getsetcontext(int flag, void *arg)
179 {
180 	ucontext_t uc;
181 	struct fq fpu_q[MAXFPQ]; /* to hold floating queue */
182 	fpregset_t *fpp;
183 	gwindows_t *gwin = NULL;	/* to hold windows */
184 	caddr_t xregs = NULL;
185 	int xregs_size = 0;
186 	extern int nwindows;
187 	ucontext_t *ucp;
188 	klwp_t *lwp = ttolwp(curthread);
189 	stack_t dummy_stk;
190 
191 	/*
192 	 * In future releases, when the ucontext structure grows,
193 	 * getcontext should be modified to only return the fields
194 	 * specified in the uc_flags.  That way, the structure can grow
195 	 * and still be binary compatible will all .o's which will only
196 	 * have old fields defined in uc_flags
197 	 */
198 
199 	switch (flag) {
200 	default:
201 		return (set_errno(EINVAL));
202 
203 	case GETCONTEXT:
204 		if (schedctl_sigblock(curthread)) {
205 			proc_t *p = ttoproc(curthread);
206 			mutex_enter(&p->p_lock);
207 			schedctl_finish_sigblock(curthread);
208 			mutex_exit(&p->p_lock);
209 		}
210 		savecontext(&uc, curthread->t_hold);
211 		/*
212 		 * When using floating point it should not be possible to
213 		 * get here with a fpu_qcnt other than zero since we go
214 		 * to great pains to handle all outstanding FP exceptions
215 		 * before any system call code gets executed. However we
216 		 * clear fpu_q and fpu_qcnt here before copyout anyway -
217 		 * this will prevent us from interpreting the garbage we
218 		 * get back (when FP is not enabled) as valid queue data on
219 		 * a later setcontext(2).
220 		 */
221 		uc.uc_mcontext.fpregs.fpu_qcnt = 0;
222 		uc.uc_mcontext.fpregs.fpu_q = (struct fq *)NULL;
223 
224 		if (copyout(&uc, arg, sizeof (ucontext_t)))
225 			return (set_errno(EFAULT));
226 		return (0);
227 
228 	case SETCONTEXT:
229 		ucp = arg;
230 		if (ucp == NULL)
231 			exit(CLD_EXITED, 0);
232 		/*
233 		 * Don't copyin filler or floating state unless we need it.
234 		 * The ucontext_t struct and fields are specified in the ABI.
235 		 */
236 		if (copyin(ucp, &uc, sizeof (ucontext_t) -
237 		    sizeof (uc.uc_filler) -
238 		    sizeof (uc.uc_mcontext.fpregs) -
239 		    sizeof (uc.uc_mcontext.xrs) -
240 		    sizeof (uc.uc_mcontext.asrs) -
241 		    sizeof (uc.uc_mcontext.filler))) {
242 			return (set_errno(EFAULT));
243 		}
244 		if (copyin(&ucp->uc_mcontext.xrs, &uc.uc_mcontext.xrs,
245 		    sizeof (uc.uc_mcontext.xrs))) {
246 			return (set_errno(EFAULT));
247 		}
248 		fpp = &uc.uc_mcontext.fpregs;
249 		if (uc.uc_flags & UC_FPU) {
250 			/*
251 			 * Need to copyin floating point state
252 			 */
253 			if (copyin(&ucp->uc_mcontext.fpregs,
254 			    &uc.uc_mcontext.fpregs,
255 			    sizeof (uc.uc_mcontext.fpregs)))
256 				return (set_errno(EFAULT));
257 			/* if floating queue not empty */
258 			if ((fpp->fpu_q) && (fpp->fpu_qcnt)) {
259 				if (fpp->fpu_qcnt > MAXFPQ ||
260 				    fpp->fpu_q_entrysize <= 0 ||
261 				    fpp->fpu_q_entrysize > sizeof (struct fq))
262 					return (set_errno(EINVAL));
263 				if (copyin(fpp->fpu_q, fpu_q,
264 				    fpp->fpu_qcnt * fpp->fpu_q_entrysize))
265 					return (set_errno(EFAULT));
266 				fpp->fpu_q = fpu_q;
267 			} else {
268 				fpp->fpu_qcnt = 0; /* avoid confusion later */
269 			}
270 		} else {
271 			fpp->fpu_qcnt = 0;
272 		}
273 		if (uc.uc_mcontext.gwins) {	/* if windows in context */
274 			size_t gwin_size;
275 
276 			/*
277 			 * We do the same computation here to determine
278 			 * how many bytes of gwindows_t to copy in that
279 			 * is also done in sendsig() to decide how many
280 			 * bytes to copy out.  We just *know* that wbcnt
281 			 * is the first element of the structure.
282 			 */
283 			gwin = kmem_zalloc(sizeof (gwindows_t), KM_SLEEP);
284 			if (copyin(uc.uc_mcontext.gwins,
285 			    &gwin->wbcnt, sizeof (gwin->wbcnt))) {
286 				kmem_free(gwin, sizeof (gwindows_t));
287 				return (set_errno(EFAULT));
288 			}
289 			if (gwin->wbcnt < 0 || gwin->wbcnt > nwindows) {
290 				kmem_free(gwin, sizeof (gwindows_t));
291 				return (set_errno(EINVAL));
292 			}
293 			gwin_size = gwin->wbcnt * sizeof (struct rwindow) +
294 			    SPARC_MAXREGWINDOW * sizeof (int *) + sizeof (long);
295 			if (gwin_size > sizeof (gwindows_t) ||
296 			    copyin(uc.uc_mcontext.gwins, gwin, gwin_size)) {
297 				kmem_free(gwin, sizeof (gwindows_t));
298 				return (set_errno(EFAULT));
299 			}
300 			uc.uc_mcontext.gwins = gwin;
301 		}
302 
303 		/*
304 		 * get extra register state or asrs if any exists
305 		 * there is no extra register state for _LP64 user programs
306 		 */
307 		xregs_clrptr(lwp, &uc);
308 		if (copyin(&ucp->uc_mcontext.asrs, &uc.uc_mcontext.asrs,
309 		    sizeof (asrset_t))) {
310 			/* Free up gwin structure if used */
311 			if (gwin)
312 				kmem_free(gwin, sizeof (gwindows_t));
313 			return (set_errno(EFAULT));
314 		}
315 
316 		restorecontext(&uc);
317 
318 		if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0)) {
319 			(void) copyout(&uc.uc_stack, (stack_t *)lwp->lwp_ustack,
320 			    sizeof (stack_t));
321 		}
322 
323 		/*
324 		 * free extra register state area
325 		 */
326 		if (xregs_size)
327 			kmem_free(xregs, xregs_size);
328 
329 		if (gwin)
330 			kmem_free(gwin, sizeof (gwindows_t));
331 
332 		return (0);
333 
334 	case GETUSTACK:
335 		if (copyout(&lwp->lwp_ustack, arg, sizeof (caddr_t)))
336 			return (set_errno(EFAULT));
337 
338 		return (0);
339 
340 	case SETUSTACK:
341 		if (copyin(arg, &dummy_stk, sizeof (dummy_stk)))
342 			return (set_errno(EFAULT));
343 
344 		lwp->lwp_ustack = (uintptr_t)arg;
345 
346 		return (0);
347 	}
348 }
349 
350 
351 #ifdef _SYSCALL32_IMPL
352 
353 /*
354  * Save user context for 32-bit processes.
355  */
356 void
357 savecontext32(ucontext32_t *ucp, k_sigset_t mask, struct fq32 *dfq)
358 {
359 	proc_t *p = ttoproc(curthread);
360 	klwp_t *lwp = ttolwp(curthread);
361 	fpregset_t fpregs;
362 
363 	/*
364 	 * We assign to every field through uc_mcontext.fpregs.fpu_en,
365 	 * but we have to bzero() everything after that.
366 	 */
367 	bzero(&ucp->uc_mcontext.fpregs.fpu_en, sizeof (ucontext32_t) -
368 	    offsetof(ucontext32_t, uc_mcontext.fpregs.fpu_en));
369 	/*
370 	 * There is an unused hole in the ucontext32_t structure; zero-fill
371 	 * it so that we don't expose kernel data to the user.
372 	 */
373 	(&ucp->uc_stack.ss_flags)[1] = 0;
374 
375 	/*
376 	 * Flushing the user windows isn't strictly necessary; we do
377 	 * it to maintain backward compatibility.
378 	 */
379 	(void) flush_user_windows_to_stack(NULL);
380 
381 	ucp->uc_flags = UC_ALL;
382 	ucp->uc_link = (caddr32_t)lwp->lwp_oldcontext;
383 
384 	/*
385 	 * Try to copyin() the ustack if one is registered. If the stack
386 	 * has zero size, this indicates that stack bounds checking has
387 	 * been disabled for this LWP. If stack bounds checking is disabled
388 	 * or the copyin() fails, we fall back to the legacy behavior.
389 	 */
390 	if (lwp->lwp_ustack == NULL ||
391 	    copyin((void *)lwp->lwp_ustack, &ucp->uc_stack,
392 	    sizeof (ucp->uc_stack)) != 0 ||
393 	    ucp->uc_stack.ss_size == 0) {
394 
395 		if (lwp->lwp_sigaltstack.ss_flags == SS_ONSTACK) {
396 			ucp->uc_stack.ss_sp =
397 			    (caddr32_t)lwp->lwp_sigaltstack.ss_sp;
398 			ucp->uc_stack.ss_size =
399 			    (size32_t)lwp->lwp_sigaltstack.ss_size;
400 			ucp->uc_stack.ss_flags = SS_ONSTACK;
401 		} else {
402 			ucp->uc_stack.ss_sp =
403 			    (caddr32_t)p->p_usrstack - p->p_stksize;
404 			ucp->uc_stack.ss_size =
405 			    (size32_t)p->p_stksize;
406 			ucp->uc_stack.ss_flags = 0;
407 		}
408 	}
409 
410 	getgregs32(lwp, ucp->uc_mcontext.gregs);
411 	getfpregs(lwp, &fpregs);
412 	fpuregset_nto32(&fpregs, &ucp->uc_mcontext.fpregs, dfq);
413 
414 	if (ucp->uc_mcontext.fpregs.fpu_en == 0)
415 		ucp->uc_flags &= ~UC_FPU;
416 	ucp->uc_mcontext.gwins = (caddr32_t)NULL;
417 
418 	/*
419 	 * Save signal mask (the 32- and 64-bit sigset_t structures are
420 	 * identical).
421 	 */
422 	sigktou(&mask, (sigset_t *)&ucp->uc_sigmask);
423 }
424 
425 int
426 getsetcontext32(int flag, void *arg)
427 {
428 	ucontext32_t uc;
429 	ucontext_t   ucnat;
430 	struct fq fpu_qnat[MAXFPQ]; /* to hold "native" floating queue */
431 	struct fq32 fpu_q[MAXFPQ]; /* to hold 32 bit floating queue */
432 	fpregset32_t *fpp;
433 	gwindows32_t *gwin = NULL;	/* to hold windows */
434 	caddr_t xregs;
435 	int xregs_size = 0;
436 	extern int nwindows;
437 	klwp_t *lwp = ttolwp(curthread);
438 	ucontext32_t *ucp;
439 	uint32_t ustack32;
440 	stack32_t dummy_stk32;
441 
442 	/*
443 	 * In future releases, when the ucontext structure grows,
444 	 * getcontext should be modified to only return the fields
445 	 * specified in the uc_flags.  That way, the structure can grow
446 	 * and still be binary compatible will all .o's which will only
447 	 * have old fields defined in uc_flags
448 	 */
449 
450 	switch (flag) {
451 	default:
452 		return (set_errno(EINVAL));
453 
454 	case GETCONTEXT:
455 		if (schedctl_sigblock(curthread)) {
456 			proc_t *p = ttoproc(curthread);
457 			mutex_enter(&p->p_lock);
458 			schedctl_finish_sigblock(curthread);
459 			mutex_exit(&p->p_lock);
460 		}
461 		savecontext32(&uc, curthread->t_hold, NULL);
462 		/*
463 		 * When using floating point it should not be possible to
464 		 * get here with a fpu_qcnt other than zero since we go
465 		 * to great pains to handle all outstanding FP exceptions
466 		 * before any system call code gets executed. However we
467 		 * clear fpu_q and fpu_qcnt here before copyout anyway -
468 		 * this will prevent us from interpreting the garbage we
469 		 * get back (when FP is not enabled) as valid queue data on
470 		 * a later setcontext(2).
471 		 */
472 		uc.uc_mcontext.fpregs.fpu_qcnt = 0;
473 		uc.uc_mcontext.fpregs.fpu_q = (caddr32_t)NULL;
474 
475 		if (copyout(&uc, arg, sizeof (ucontext32_t)))
476 			return (set_errno(EFAULT));
477 		return (0);
478 
479 	case SETCONTEXT:
480 		ucp = arg;
481 		if (ucp == NULL)
482 			exit(CLD_EXITED, 0);
483 		/*
484 		 * Don't copyin filler or floating state unless we need it.
485 		 * The ucontext_t struct and fields are specified in the ABI.
486 		 */
487 		if (copyin(ucp, &uc, sizeof (uc) - sizeof (uc.uc_filler) -
488 		    sizeof (uc.uc_mcontext.fpregs) -
489 		    sizeof (uc.uc_mcontext.xrs) -
490 		    sizeof (uc.uc_mcontext.filler))) {
491 			return (set_errno(EFAULT));
492 		}
493 		if (copyin(&ucp->uc_mcontext.xrs, &uc.uc_mcontext.xrs,
494 		    sizeof (uc.uc_mcontext.xrs))) {
495 			return (set_errno(EFAULT));
496 		}
497 		fpp = &uc.uc_mcontext.fpregs;
498 		if (uc.uc_flags & UC_FPU) {
499 			/*
500 			 * Need to copyin floating point state
501 			 */
502 			if (copyin(&ucp->uc_mcontext.fpregs,
503 			    &uc.uc_mcontext.fpregs,
504 			    sizeof (uc.uc_mcontext.fpregs)))
505 				return (set_errno(EFAULT));
506 			/* if floating queue not empty */
507 			if ((fpp->fpu_q) && (fpp->fpu_qcnt)) {
508 				if (fpp->fpu_qcnt > MAXFPQ ||
509 				    fpp->fpu_q_entrysize <= 0 ||
510 				    fpp->fpu_q_entrysize > sizeof (struct fq32))
511 					return (set_errno(EINVAL));
512 				if (copyin((void *)fpp->fpu_q, fpu_q,
513 				    fpp->fpu_qcnt * fpp->fpu_q_entrysize))
514 					return (set_errno(EFAULT));
515 			} else {
516 				fpp->fpu_qcnt = 0; /* avoid confusion later */
517 			}
518 		} else {
519 			fpp->fpu_qcnt = 0;
520 		}
521 
522 		if (uc.uc_mcontext.gwins) {	/* if windows in context */
523 			size_t gwin_size;
524 
525 			/*
526 			 * We do the same computation here to determine
527 			 * how many bytes of gwindows_t to copy in that
528 			 * is also done in sendsig() to decide how many
529 			 * bytes to copy out.  We just *know* that wbcnt
530 			 * is the first element of the structure.
531 			 */
532 			gwin = kmem_zalloc(sizeof (gwindows32_t),
533 							KM_SLEEP);
534 			if (copyin((void *)uc.uc_mcontext.gwins,
535 			    &gwin->wbcnt, sizeof (gwin->wbcnt))) {
536 				kmem_free(gwin, sizeof (gwindows32_t));
537 				return (set_errno(EFAULT));
538 			}
539 			if (gwin->wbcnt < 0 || gwin->wbcnt > nwindows) {
540 				kmem_free(gwin, sizeof (gwindows32_t));
541 				return (set_errno(EINVAL));
542 			}
543 			gwin_size = gwin->wbcnt * sizeof (struct rwindow32) +
544 			    SPARC_MAXREGWINDOW * sizeof (caddr32_t) +
545 			    sizeof (int32_t);
546 			if (gwin_size > sizeof (gwindows32_t) ||
547 			    copyin((void *)uc.uc_mcontext.gwins,
548 			    gwin, gwin_size)) {
549 				kmem_free(gwin, sizeof (gwindows32_t));
550 				return (set_errno(EFAULT));
551 			}
552 			/* restorecontext() should ignore this */
553 			uc.uc_mcontext.gwins = (caddr32_t)0;
554 		}
555 
556 		ucontext_32ton(&uc, &ucnat, fpu_q, fpu_qnat);
557 
558 		/*
559 		 * get extra register state if any exists
560 		 */
561 		if (xregs_hasptr32(lwp, &uc) &&
562 		    ((xregs_size = xregs_getsize(curproc)) > 0)) {
563 			xregs = kmem_zalloc(xregs_size, KM_SLEEP);
564 			if (copyin((void *)
565 			    xregs_getptr32(lwp, &uc),
566 			    xregs, xregs_size)) {
567 				kmem_free(xregs, xregs_size);
568 				if (gwin)
569 					kmem_free(gwin, sizeof (gwindows32_t));
570 				return (set_errno(EFAULT));
571 			}
572 			xregs_setptr(lwp, &ucnat, xregs);
573 		} else {
574 			xregs_clrptr(lwp, &ucnat);
575 		}
576 
577 		restorecontext(&ucnat);
578 
579 		if ((uc.uc_flags & UC_STACK) && (lwp->lwp_ustack != 0)) {
580 			(void) copyout(&uc.uc_stack,
581 			    (stack32_t *)lwp->lwp_ustack, sizeof (stack32_t));
582 		}
583 
584 		if (gwin)
585 			setgwins32(lwp, gwin);
586 
587 		/*
588 		 * free extra register state area
589 		 */
590 		if (xregs_size)
591 			kmem_free(xregs, xregs_size);
592 
593 		if (gwin)
594 			kmem_free(gwin, sizeof (gwindows32_t));
595 
596 		return (0);
597 
598 	case GETUSTACK:
599 		ustack32 = (uint32_t)lwp->lwp_ustack;
600 		if (copyout(&ustack32, arg, sizeof (caddr32_t)))
601 			return (set_errno(EFAULT));
602 
603 		return (0);
604 
605 	case SETUSTACK:
606 		if (copyin(arg, &dummy_stk32, sizeof (dummy_stk32)))
607 			return (set_errno(EFAULT));
608 
609 		lwp->lwp_ustack = (uintptr_t)arg;
610 
611 		return (0);
612 	}
613 }
614 
615 #endif	/* _SYSCALL32_IMPL */
616