xref: /titanic_51/usr/src/uts/sparc/v9/os/v9dep.c (revision 35551380472894a564e057962b701af78f719377)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
23 /*	  All Rights Reserved  	*/
24 
25 
26 /*
27  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
28  * Use is subject to license terms.
29  */
30 
31 #pragma ident	"%Z%%M%	%I%	%E% SMI"
32 
33 #include <sys/param.h>
34 #include <sys/types.h>
35 #include <sys/vmparam.h>
36 #include <sys/systm.h>
37 #include <sys/stack.h>
38 #include <sys/frame.h>
39 #include <sys/proc.h>
40 #include <sys/ucontext.h>
41 #include <sys/cpuvar.h>
42 #include <sys/asm_linkage.h>
43 #include <sys/kmem.h>
44 #include <sys/errno.h>
45 #include <sys/bootconf.h>
46 #include <sys/archsystm.h>
47 #include <sys/fpu/fpusystm.h>
48 #include <sys/debug.h>
49 #include <sys/privregs.h>
50 #include <sys/machpcb.h>
51 #include <sys/psr_compat.h>
52 #include <sys/cmn_err.h>
53 #include <sys/asi.h>
54 #include <sys/copyops.h>
55 #include <sys/model.h>
56 #include <sys/panic.h>
57 #include <sys/exec.h>
58 
59 /*
60  * modify the lower 32bits of a uint64_t
61  */
62 #define	SET_LOWER_32(all, lower)	\
63 	(((uint64_t)(all) & 0xffffffff00000000) | (uint32_t)(lower))
64 
65 #define	MEMCPY_FPU_EN		2	/* fprs on and fpu_en == 0 */
66 
67 static uint_t mkpsr(uint64_t tstate, uint32_t fprs);
68 
69 #ifdef _SYSCALL32_IMPL
70 static void fpuregset_32ton(const fpregset32_t *src, fpregset_t *dest,
71     const struct fq32 *sfq, struct fq *dfq);
72 #endif /* _SYSCALL32_IMPL */
73 
74 /*
75  * Set floating-point registers.
76  * NOTE:  'lwp' might not correspond to 'curthread' since this is
77  * called from code in /proc to set the registers of another lwp.
78  */
79 void
80 setfpregs(klwp_t *lwp, fpregset_t *fp)
81 {
82 	struct machpcb *mpcb;
83 	kfpu_t *pfp;
84 	uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL);
85 	model_t model = lwp_getdatamodel(lwp);
86 
87 	mpcb = lwptompcb(lwp);
88 	pfp = lwptofpu(lwp);
89 
90 	/*
91 	 * This is always true for both "real" fp programs and memcpy fp
92 	 * programs, because we force fpu_en to MEMCPY_FPU_EN in getfpregs,
93 	 * for the memcpy and threads cases where (fpu_en == 0) &&
94 	 * (fpu_fprs & FPRS_FEF), if setfpregs is called after getfpregs.
95 	 */
96 	if (fp->fpu_en) {
97 		kpreempt_disable();
98 
99 		if (!(pfp->fpu_en) && (!(pfp->fpu_fprs & FPRS_FEF)) &&
100 		    fpu_exists) {
101 			/*
102 			 * He's not currently using the FPU but wants to in his
103 			 * new context - arrange for this on return to userland.
104 			 */
105 			pfp->fpu_fprs = (uint32_t)fprs;
106 		}
107 		/*
108 		 * Get setfpregs to restore fpu_en to zero
109 		 * for the memcpy/threads case (where pfp->fpu_en == 0 &&
110 		 * (pfp->fp_fprs & FPRS_FEF) == FPRS_FEF).
111 		 */
112 		if (fp->fpu_en == MEMCPY_FPU_EN)
113 			fp->fpu_en = 0;
114 
115 		/*
116 		 * Load up a user's floating point context.
117 		 */
118 		if (fp->fpu_qcnt > MAXFPQ) 	/* plug security holes */
119 			fp->fpu_qcnt = MAXFPQ;
120 		fp->fpu_q_entrysize = sizeof (struct fq);
121 
122 		/*
123 		 * For v9 kernel, copy all of the fp regs.
124 		 * For v8 kernel, copy v8 fp regs (lower half of v9 fp regs).
125 		 * Restore entire fsr for v9, only lower half for v8.
126 		 */
127 		(void) kcopy(fp, pfp, sizeof (fp->fpu_fr));
128 		if (model == DATAMODEL_LP64)
129 			pfp->fpu_fsr = fp->fpu_fsr;
130 		else
131 			pfp->fpu_fsr = SET_LOWER_32(pfp->fpu_fsr, fp->fpu_fsr);
132 		pfp->fpu_qcnt = fp->fpu_qcnt;
133 		pfp->fpu_q_entrysize = fp->fpu_q_entrysize;
134 		pfp->fpu_en = fp->fpu_en;
135 		pfp->fpu_q = mpcb->mpcb_fpu_q;
136 		if (fp->fpu_qcnt)
137 			(void) kcopy(fp->fpu_q, pfp->fpu_q,
138 			    fp->fpu_qcnt * fp->fpu_q_entrysize);
139 		/* FSR ignores these bits on load, so they can not be set */
140 		pfp->fpu_fsr &= ~(FSR_QNE|FSR_FTT);
141 
142 		/*
143 		 * If not the current process then resume() will handle it.
144 		 */
145 		if (lwp != ttolwp(curthread)) {
146 			/* force resume to reload fp regs */
147 			pfp->fpu_fprs |= FPRS_FEF;
148 			kpreempt_enable();
149 			return;
150 		}
151 
152 		/*
153 		 * Load up FPU with new floating point context.
154 		 */
155 		if (fpu_exists) {
156 			pfp->fpu_fprs = _fp_read_fprs();
157 			if ((pfp->fpu_fprs & FPRS_FEF) != FPRS_FEF) {
158 				_fp_write_fprs(fprs);
159 				pfp->fpu_fprs = (uint32_t)fprs;
160 #ifdef DEBUG
161 				if (fpdispr)
162 					cmn_err(CE_NOTE,
163 					    "setfpregs with fp disabled!\n");
164 #endif
165 			}
166 			/*
167 			 * Load all fp regs for v9 user programs, but only
168 			 * load the lower half for v8[plus] programs.
169 			 */
170 			if (model == DATAMODEL_LP64)
171 				fp_restore(pfp);
172 			else
173 				fp_v8_load(pfp);
174 		}
175 
176 		kpreempt_enable();
177 	} else {
178 		if ((pfp->fpu_en) ||	/* normal fp case */
179 		    (pfp->fpu_fprs & FPRS_FEF)) { /* memcpy/threads case */
180 			/*
181 			 * Currently the lwp has floating point enabled.
182 			 * Turn off FPRS_FEF in user's fprs, saved and
183 			 * real copies thereof.
184 			 */
185 			pfp->fpu_en = 0;
186 			if (fpu_exists) {
187 				fprs = 0;
188 				if (lwp == ttolwp(curthread))
189 					_fp_write_fprs(fprs);
190 				pfp->fpu_fprs = (uint32_t)fprs;
191 			}
192 		}
193 	}
194 }
195 
196 #ifdef	_SYSCALL32_IMPL
197 void
198 setfpregs32(klwp_t *lwp, fpregset32_t *fp)
199 {
200 	fpregset_t fpregs;
201 
202 	fpuregset_32ton(fp, &fpregs, NULL, NULL);
203 	setfpregs(lwp, &fpregs);
204 }
205 #endif	/* _SYSCALL32_IMPL */
206 
207 /*
208  * NOTE:  'lwp' might not correspond to 'curthread' since this is
209  * called from code in /proc to set the registers of another lwp.
210  */
211 void
212 run_fpq(klwp_t *lwp, fpregset_t *fp)
213 {
214 	/*
215 	 * If the context being loaded up includes a floating queue,
216 	 * we need to simulate those instructions (since we can't reload
217 	 * the fpu) and pass the process any appropriate signals
218 	 */
219 
220 	if (lwp == ttolwp(curthread)) {
221 		if (fpu_exists) {
222 			if (fp->fpu_qcnt)
223 				fp_runq(lwp->lwp_regs);
224 		}
225 	}
226 }
227 
228 /*
229  * Get floating-point registers.
230  * NOTE:  'lwp' might not correspond to 'curthread' since this is
231  * called from code in /proc to set the registers of another lwp.
232  */
233 void
234 getfpregs(klwp_t *lwp, fpregset_t *fp)
235 {
236 	kfpu_t *pfp;
237 	model_t model = lwp_getdatamodel(lwp);
238 
239 	pfp = lwptofpu(lwp);
240 	kpreempt_disable();
241 	if (fpu_exists && ttolwp(curthread) == lwp)
242 		pfp->fpu_fprs = _fp_read_fprs();
243 
244 	/*
245 	 * First check the fpu_en case, for normal fp programs.
246 	 * Next check the fprs case, for fp use by memcpy/threads.
247 	 */
248 	if (((fp->fpu_en = pfp->fpu_en) != 0) ||
249 	    (pfp->fpu_fprs & FPRS_FEF)) {
250 		/*
251 		 * Force setfpregs to restore the fp context in
252 		 * setfpregs for the memcpy and threads cases (where
253 		 * pfp->fpu_en == 0 && (pfp->fp_fprs & FPRS_FEF) == FPRS_FEF).
254 		 */
255 		if (pfp->fpu_en == 0)
256 			fp->fpu_en = MEMCPY_FPU_EN;
257 		/*
258 		 * If we have an fpu and the current thread owns the fp
259 		 * context, flush fp * registers into the pcb. Save all
260 		 * the fp regs for v9, xregs_getfpregs saves the upper half
261 		 * for v8plus. Save entire fsr for v9, only lower half for v8.
262 		 */
263 		if (fpu_exists && ttolwp(curthread) == lwp) {
264 			if ((pfp->fpu_fprs & FPRS_FEF) != FPRS_FEF) {
265 				uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL);
266 
267 				_fp_write_fprs(fprs);
268 				pfp->fpu_fprs = fprs;
269 #ifdef DEBUG
270 				if (fpdispr)
271 					cmn_err(CE_NOTE,
272 					    "getfpregs with fp disabled!\n");
273 #endif
274 			}
275 			if (model == DATAMODEL_LP64)
276 				fp_fksave(pfp);
277 			else
278 				fp_v8_fksave(pfp);
279 		}
280 		(void) kcopy(pfp, fp, sizeof (fp->fpu_fr));
281 		fp->fpu_q = pfp->fpu_q;
282 		if (model == DATAMODEL_LP64)
283 			fp->fpu_fsr = pfp->fpu_fsr;
284 		else
285 			fp->fpu_fsr = (uint32_t)pfp->fpu_fsr;
286 		fp->fpu_qcnt = pfp->fpu_qcnt;
287 		fp->fpu_q_entrysize = pfp->fpu_q_entrysize;
288 	} else {
289 		int i;
290 		for (i = 0; i < 32; i++)		/* NaN */
291 			((uint32_t *)fp->fpu_fr.fpu_regs)[i] = (uint32_t)-1;
292 		if (model == DATAMODEL_LP64) {
293 			for (i = 16; i < 32; i++)	/* NaN */
294 				((uint64_t *)fp->fpu_fr.fpu_dregs)[i] =
295 				    (uint64_t)-1;
296 		}
297 		fp->fpu_fsr = 0;
298 		fp->fpu_qcnt = 0;
299 	}
300 	kpreempt_enable();
301 }
302 
303 #ifdef	_SYSCALL32_IMPL
304 void
305 getfpregs32(klwp_t *lwp, fpregset32_t *fp)
306 {
307 	fpregset_t fpregs;
308 
309 	getfpregs(lwp, &fpregs);
310 	fpuregset_nto32(&fpregs, fp, NULL);
311 }
312 #endif	/* _SYSCALL32_IMPL */
313 
314 /*
315  * Set general registers.
316  * NOTE:  'lwp' might not correspond to 'curthread' since this is
317  * called from code in /proc to set the registers of another lwp.
318  */
319 
320 /* 64-bit gregset_t */
321 void
322 setgregs(klwp_t *lwp, gregset_t grp)
323 {
324 	struct regs *rp = lwptoregs(lwp);
325 	kfpu_t *fp = lwptofpu(lwp);
326 	uint64_t tbits;
327 
328 	int current = (lwp == curthread->t_lwp);
329 
330 	if (current)
331 		(void) save_syscall_args();	/* copy the args first */
332 
333 	tbits = (((grp[REG_CCR] & TSTATE_CCR_MASK) << TSTATE_CCR_SHIFT) |
334 		((grp[REG_ASI] & TSTATE_ASI_MASK) << TSTATE_ASI_SHIFT));
335 	rp->r_tstate &= ~(((uint64_t)TSTATE_CCR_MASK << TSTATE_CCR_SHIFT) |
336 		((uint64_t)TSTATE_ASI_MASK << TSTATE_ASI_SHIFT));
337 	rp->r_tstate |= tbits;
338 	kpreempt_disable();
339 	fp->fpu_fprs = (uint32_t)grp[REG_FPRS];
340 	if (fpu_exists && (current) && (fp->fpu_fprs & FPRS_FEF))
341 		_fp_write_fprs(fp->fpu_fprs);
342 	kpreempt_enable();
343 
344 	/*
345 	 * pc and npc must be 4-byte aligned on sparc.
346 	 * We silently make it so to avoid a watchdog reset.
347 	 */
348 	rp->r_pc = grp[REG_PC] & ~03L;
349 	rp->r_npc = grp[REG_nPC] & ~03L;
350 	rp->r_y = grp[REG_Y];
351 
352 	rp->r_g1 = grp[REG_G1];
353 	rp->r_g2 = grp[REG_G2];
354 	rp->r_g3 = grp[REG_G3];
355 	rp->r_g4 = grp[REG_G4];
356 	rp->r_g5 = grp[REG_G5];
357 	rp->r_g6 = grp[REG_G6];
358 	rp->r_g7 = grp[REG_G7];
359 
360 	rp->r_o0 = grp[REG_O0];
361 	rp->r_o1 = grp[REG_O1];
362 	rp->r_o2 = grp[REG_O2];
363 	rp->r_o3 = grp[REG_O3];
364 	rp->r_o4 = grp[REG_O4];
365 	rp->r_o5 = grp[REG_O5];
366 	rp->r_o6 = grp[REG_O6];
367 	rp->r_o7 = grp[REG_O7];
368 
369 	if (current) {
370 		/*
371 		 * This was called from a system call, but we
372 		 * do not want to return via the shared window;
373 		 * restoring the CPU context changes everything.
374 		 */
375 		lwp->lwp_eosys = JUSTRETURN;
376 		curthread->t_post_sys = 1;
377 	}
378 }
379 
380 /*
381  * Return the general registers.
382  * NOTE:  'lwp' might not correspond to 'curthread' since this is
383  * called from code in /proc to get the registers of another lwp.
384  */
385 void
386 getgregs(klwp_t *lwp, gregset_t grp)
387 {
388 	struct regs *rp = lwptoregs(lwp);
389 	uint32_t fprs;
390 
391 	kpreempt_disable();
392 	if (fpu_exists && ttolwp(curthread) == lwp) {
393 		fprs = _fp_read_fprs();
394 	} else {
395 		kfpu_t *fp = lwptofpu(lwp);
396 		fprs = fp->fpu_fprs;
397 	}
398 	kpreempt_enable();
399 	grp[REG_CCR] = (rp->r_tstate >> TSTATE_CCR_SHIFT) & TSTATE_CCR_MASK;
400 	grp[REG_PC] = rp->r_pc;
401 	grp[REG_nPC] = rp->r_npc;
402 	grp[REG_Y] = (uint32_t)rp->r_y;
403 	grp[REG_G1] = rp->r_g1;
404 	grp[REG_G2] = rp->r_g2;
405 	grp[REG_G3] = rp->r_g3;
406 	grp[REG_G4] = rp->r_g4;
407 	grp[REG_G5] = rp->r_g5;
408 	grp[REG_G6] = rp->r_g6;
409 	grp[REG_G7] = rp->r_g7;
410 	grp[REG_O0] = rp->r_o0;
411 	grp[REG_O1] = rp->r_o1;
412 	grp[REG_O2] = rp->r_o2;
413 	grp[REG_O3] = rp->r_o3;
414 	grp[REG_O4] = rp->r_o4;
415 	grp[REG_O5] = rp->r_o5;
416 	grp[REG_O6] = rp->r_o6;
417 	grp[REG_O7] = rp->r_o7;
418 	grp[REG_ASI] = (rp->r_tstate >> TSTATE_ASI_SHIFT) & TSTATE_ASI_MASK;
419 	grp[REG_FPRS] = fprs;
420 }
421 
422 void
423 getgregs32(klwp_t *lwp, gregset32_t grp)
424 {
425 	struct regs *rp = lwptoregs(lwp);
426 	uint32_t fprs;
427 
428 	kpreempt_disable();
429 	if (fpu_exists && ttolwp(curthread) == lwp) {
430 		fprs = _fp_read_fprs();
431 	} else {
432 		kfpu_t *fp = lwptofpu(lwp);
433 		fprs = fp->fpu_fprs;
434 	}
435 	kpreempt_enable();
436 	grp[REG_PSR] = mkpsr(rp->r_tstate, fprs);
437 	grp[REG_PC] = rp->r_pc;
438 	grp[REG_nPC] = rp->r_npc;
439 	grp[REG_Y] = rp->r_y;
440 	grp[REG_G1] = rp->r_g1;
441 	grp[REG_G2] = rp->r_g2;
442 	grp[REG_G3] = rp->r_g3;
443 	grp[REG_G4] = rp->r_g4;
444 	grp[REG_G5] = rp->r_g5;
445 	grp[REG_G6] = rp->r_g6;
446 	grp[REG_G7] = rp->r_g7;
447 	grp[REG_O0] = rp->r_o0;
448 	grp[REG_O1] = rp->r_o1;
449 	grp[REG_O2] = rp->r_o2;
450 	grp[REG_O3] = rp->r_o3;
451 	grp[REG_O4] = rp->r_o4;
452 	grp[REG_O5] = rp->r_o5;
453 	grp[REG_O6] = rp->r_o6;
454 	grp[REG_O7] = rp->r_o7;
455 }
456 
457 /*
458  * Return the user-level PC.
459  * If in a system call, return the address of the syscall trap.
460  */
461 greg_t
462 getuserpc()
463 {
464 	return (lwptoregs(ttolwp(curthread))->r_pc);
465 }
466 
467 /*
468  * Set register windows.
469  */
470 void
471 setgwins(klwp_t *lwp, gwindows_t *gwins)
472 {
473 	struct machpcb *mpcb = lwptompcb(lwp);
474 	int wbcnt = gwins->wbcnt;
475 	caddr_t sp;
476 	int i;
477 	struct rwindow32 *rwp;
478 	int wbuf_rwindow_size;
479 	int is64;
480 
481 	if (mpcb->mpcb_wstate == WSTATE_USER32) {
482 		wbuf_rwindow_size = WINDOWSIZE32;
483 		is64 = 0;
484 	} else {
485 		wbuf_rwindow_size = WINDOWSIZE64;
486 		is64 = 1;
487 	}
488 	ASSERT(wbcnt >= 0 && wbcnt <= SPARC_MAXREGWINDOW);
489 	mpcb->mpcb_wbcnt = 0;
490 	for (i = 0; i < wbcnt; i++) {
491 		sp = (caddr_t)gwins->spbuf[i];
492 		mpcb->mpcb_spbuf[i] = sp;
493 		rwp = (struct rwindow32 *)
494 			(mpcb->mpcb_wbuf + (i * wbuf_rwindow_size));
495 		if (is64 && IS_V9STACK(sp))
496 			bcopy(&gwins->wbuf[i], rwp, sizeof (struct rwindow));
497 		else
498 			rwindow_nto32(&gwins->wbuf[i], rwp);
499 		mpcb->mpcb_wbcnt++;
500 	}
501 }
502 
503 void
504 setgwins32(klwp_t *lwp, gwindows32_t *gwins)
505 {
506 	struct machpcb *mpcb = lwptompcb(lwp);
507 	int wbcnt = gwins->wbcnt;
508 	caddr_t sp;
509 	int i;
510 
511 	struct rwindow *rwp;
512 	int wbuf_rwindow_size;
513 	int is64;
514 
515 	if (mpcb->mpcb_wstate == WSTATE_USER32) {
516 		wbuf_rwindow_size = WINDOWSIZE32;
517 		is64 = 0;
518 	} else {
519 		wbuf_rwindow_size = WINDOWSIZE64;
520 		is64 = 1;
521 	}
522 
523 	ASSERT(wbcnt >= 0 && wbcnt <= SPARC_MAXREGWINDOW);
524 	mpcb->mpcb_wbcnt = 0;
525 	for (i = 0; i < wbcnt; i++) {
526 		sp = (caddr_t)(uintptr_t)gwins->spbuf[i];
527 		mpcb->mpcb_spbuf[i] = sp;
528 		rwp = (struct rwindow *)
529 			(mpcb->mpcb_wbuf + (i * wbuf_rwindow_size));
530 		if (is64 && IS_V9STACK(sp))
531 			rwindow_32ton(&gwins->wbuf[i], rwp);
532 		else
533 			bcopy(&gwins->wbuf[i], rwp, sizeof (struct rwindow32));
534 		mpcb->mpcb_wbcnt++;
535 	}
536 }
537 
538 /*
539  * Get register windows.
540  * NOTE:  'lwp' might not correspond to 'curthread' since this is
541  * called from code in /proc to set the registers of another lwp.
542  */
543 void
544 getgwins(klwp_t *lwp, gwindows_t *gwp)
545 {
546 	struct machpcb *mpcb = lwptompcb(lwp);
547 	int wbcnt = mpcb->mpcb_wbcnt;
548 	caddr_t sp;
549 	int i;
550 	struct rwindow32 *rwp;
551 	int wbuf_rwindow_size;
552 	int is64;
553 
554 	if (mpcb->mpcb_wstate == WSTATE_USER32) {
555 		wbuf_rwindow_size = WINDOWSIZE32;
556 		is64 = 0;
557 	} else {
558 		wbuf_rwindow_size = WINDOWSIZE64;
559 		is64 = 1;
560 	}
561 	ASSERT(wbcnt >= 0 && wbcnt <= SPARC_MAXREGWINDOW);
562 	gwp->wbcnt = wbcnt;
563 	for (i = 0; i < wbcnt; i++) {
564 		sp = mpcb->mpcb_spbuf[i];
565 		gwp->spbuf[i] = (greg_t *)sp;
566 		rwp = (struct rwindow32 *)
567 			(mpcb->mpcb_wbuf + (i * wbuf_rwindow_size));
568 		if (is64 && IS_V9STACK(sp))
569 			bcopy(rwp, &gwp->wbuf[i], sizeof (struct rwindow));
570 		else
571 			rwindow_32ton(rwp, &gwp->wbuf[i]);
572 	}
573 }
574 
575 void
576 getgwins32(klwp_t *lwp, gwindows32_t *gwp)
577 {
578 	struct machpcb *mpcb = lwptompcb(lwp);
579 	int wbcnt = mpcb->mpcb_wbcnt;
580 	int i;
581 	struct rwindow *rwp;
582 	int wbuf_rwindow_size;
583 	caddr_t sp;
584 	int is64;
585 
586 	if (mpcb->mpcb_wstate == WSTATE_USER32) {
587 		wbuf_rwindow_size = WINDOWSIZE32;
588 		is64 = 0;
589 	} else {
590 		wbuf_rwindow_size = WINDOWSIZE64;
591 		is64 = 1;
592 	}
593 
594 	ASSERT(wbcnt >= 0 && wbcnt <= SPARC_MAXREGWINDOW);
595 	gwp->wbcnt = wbcnt;
596 	for (i = 0; i < wbcnt; i++) {
597 		sp = mpcb->mpcb_spbuf[i];
598 		rwp = (struct rwindow *)
599 			(mpcb->mpcb_wbuf + (i * wbuf_rwindow_size));
600 		gwp->spbuf[i] = (caddr32_t)(uintptr_t)sp;
601 		if (is64 && IS_V9STACK(sp))
602 			rwindow_nto32(rwp, &gwp->wbuf[i]);
603 		else
604 			bcopy(rwp, &gwp->wbuf[i], sizeof (struct rwindow32));
605 	}
606 }
607 
608 /*
609  * For things that depend on register state being on the stack,
610  * copy any register windows that get saved into the window buffer
611  * (in the pcb) onto the stack.  This normally gets fixed up
612  * before returning to a user program.  Callers of this routine
613  * require this to happen immediately because a later kernel
614  * operation depends on window state (like instruction simulation).
615  */
616 int
617 flush_user_windows_to_stack(caddr_t *psp)
618 {
619 	int j, k;
620 	caddr_t sp;
621 	struct machpcb *mpcb = lwptompcb(ttolwp(curthread));
622 	int err;
623 	int error = 0;
624 	int wbuf_rwindow_size;
625 	int rwindow_size;
626 	int stack_align;
627 	int watched;
628 
629 	flush_user_windows();
630 
631 	if (mpcb->mpcb_wstate != WSTATE_USER32)
632 		wbuf_rwindow_size = WINDOWSIZE64;
633 	else
634 		wbuf_rwindow_size = WINDOWSIZE32;
635 
636 	j = mpcb->mpcb_wbcnt;
637 	while (j > 0) {
638 		sp = mpcb->mpcb_spbuf[--j];
639 
640 		if ((mpcb->mpcb_wstate != WSTATE_USER32) &&
641 		    IS_V9STACK(sp)) {
642 			sp += V9BIAS64;
643 			stack_align = STACK_ALIGN64;
644 			rwindow_size = WINDOWSIZE64;
645 		} else {
646 			/*
647 			 * Reduce sp to a 32 bit value.  This was originally
648 			 * done by casting down to uint32_t and back up to
649 			 * caddr_t, but one compiler didn't like that, so the
650 			 * uintptr_t casts were added.  The temporary 32 bit
651 			 * variable was introduced to avoid depending on all
652 			 * compilers to generate the desired assembly code for a
653 			 * quadruple cast in a single expression.
654 			 */
655 			caddr32_t sp32 = (uint32_t)(uintptr_t)sp;
656 			sp = (caddr_t)(uintptr_t)sp32;
657 
658 			stack_align = STACK_ALIGN32;
659 			rwindow_size = WINDOWSIZE32;
660 		}
661 		if (((uintptr_t)sp & (stack_align - 1)) != 0)
662 			continue;
663 
664 		watched = watch_disable_addr(sp, rwindow_size, S_WRITE);
665 		err = xcopyout(mpcb->mpcb_wbuf +
666 		    (j * wbuf_rwindow_size), sp, rwindow_size);
667 		if (err != 0) {
668 			if (psp != NULL) {
669 				/*
670 				 * Determine the offending address.
671 				 * It may not be the stack pointer itself.
672 				 */
673 				uint_t *kaddr = (uint_t *)(mpcb->mpcb_wbuf +
674 				    (j * wbuf_rwindow_size));
675 				uint_t *uaddr = (uint_t *)sp;
676 
677 				for (k = 0;
678 				    k < rwindow_size / sizeof (int);
679 				    k++, kaddr++, uaddr++) {
680 					if (suword32(uaddr, *kaddr))
681 						break;
682 				}
683 
684 				/* can't happen? */
685 				if (k == rwindow_size / sizeof (int))
686 					uaddr = (uint_t *)sp;
687 
688 				*psp = (caddr_t)uaddr;
689 			}
690 			error = err;
691 		} else {
692 			/*
693 			 * stack was aligned and copyout succeeded;
694 			 * move other windows down.
695 			 */
696 			mpcb->mpcb_wbcnt--;
697 			for (k = j; k < mpcb->mpcb_wbcnt; k++) {
698 				mpcb->mpcb_spbuf[k] = mpcb->mpcb_spbuf[k+1];
699 				bcopy(
700 				    mpcb->mpcb_wbuf +
701 					((k+1) * wbuf_rwindow_size),
702 				    mpcb->mpcb_wbuf +
703 					(k * wbuf_rwindow_size),
704 				    wbuf_rwindow_size);
705 			}
706 		}
707 		if (watched)
708 			watch_enable_addr(sp, rwindow_size, S_WRITE);
709 	} /* while there are windows in the wbuf */
710 	return (error);
711 }
712 
713 static int
714 copy_return_window32(int dotwo)
715 {
716 	klwp_t *lwp = ttolwp(curthread);
717 	struct machpcb *mpcb = lwptompcb(lwp);
718 	struct rwindow32 rwindow32;
719 	caddr_t sp1;
720 	caddr_t sp2;
721 
722 	(void) flush_user_windows_to_stack(NULL);
723 	if (mpcb->mpcb_rsp[0] == NULL) {
724 		/*
725 		 * Reduce r_sp to a 32 bit value before storing it in sp1.  This
726 		 * was originally done by casting down to uint32_t and back up
727 		 * to caddr_t, but that generated complaints under one compiler.
728 		 * The uintptr_t cast was added to address that, and the
729 		 * temporary 32 bit variable was introduced to avoid depending
730 		 * on all compilers to generate the desired assembly code for a
731 		 * triple cast in a single expression.
732 		 */
733 		caddr32_t sp1_32 = (uint32_t)lwptoregs(lwp)->r_sp;
734 		sp1 = (caddr_t)(uintptr_t)sp1_32;
735 
736 		if ((copyin_nowatch(sp1, &rwindow32,
737 		    sizeof (struct rwindow32))) == 0)
738 			mpcb->mpcb_rsp[0] = sp1;
739 		rwindow_32ton(&rwindow32, &mpcb->mpcb_rwin[0]);
740 	}
741 	mpcb->mpcb_rsp[1] = NULL;
742 	if (dotwo && mpcb->mpcb_rsp[0] != NULL &&
743 	    (sp2 = (caddr_t)mpcb->mpcb_rwin[0].rw_fp) != NULL) {
744 		if ((copyin_nowatch(sp2, &rwindow32,
745 		    sizeof (struct rwindow32)) == 0))
746 			mpcb->mpcb_rsp[1] = sp2;
747 		rwindow_32ton(&rwindow32, &mpcb->mpcb_rwin[1]);
748 	}
749 	return (mpcb->mpcb_rsp[0] != NULL);
750 }
751 
752 int
753 copy_return_window(int dotwo)
754 {
755 	proc_t *p = ttoproc(curthread);
756 	klwp_t *lwp;
757 	struct machpcb *mpcb;
758 	caddr_t sp1;
759 	caddr_t sp2;
760 
761 	if (p->p_model == DATAMODEL_ILP32)
762 		return (copy_return_window32(dotwo));
763 
764 	lwp = ttolwp(curthread);
765 	mpcb = lwptompcb(lwp);
766 	(void) flush_user_windows_to_stack(NULL);
767 	if (mpcb->mpcb_rsp[0] == NULL) {
768 		sp1 = (caddr_t)lwptoregs(lwp)->r_sp + STACK_BIAS;
769 		if ((copyin_nowatch(sp1, &mpcb->mpcb_rwin[0],
770 		    sizeof (struct rwindow)) == 0))
771 			mpcb->mpcb_rsp[0] = sp1 - STACK_BIAS;
772 	}
773 	mpcb->mpcb_rsp[1] = NULL;
774 	if (dotwo && mpcb->mpcb_rsp[0] != NULL &&
775 	    (sp2 = (caddr_t)mpcb->mpcb_rwin[0].rw_fp) != NULL) {
776 		sp2 += STACK_BIAS;
777 		if ((copyin_nowatch(sp2, &mpcb->mpcb_rwin[1],
778 		    sizeof (struct rwindow)) == 0))
779 			mpcb->mpcb_rsp[1] = sp2 - STACK_BIAS;
780 	}
781 	return (mpcb->mpcb_rsp[0] != NULL);
782 }
783 
784 /*
785  * Clear registers on exec(2).
786  */
787 void
788 setregs(uarg_t *args)
789 {
790 	struct regs *rp;
791 	klwp_t *lwp = ttolwp(curthread);
792 	kfpu_t *fpp = lwptofpu(lwp);
793 	struct machpcb *mpcb = lwptompcb(lwp);
794 	proc_t *p = ttoproc(curthread);
795 
796 	/*
797 	 * Initialize user registers.
798 	 */
799 	(void) save_syscall_args();	/* copy args from registers first */
800 	rp = lwptoregs(lwp);
801 	rp->r_g1 = rp->r_g2 = rp->r_g3 = rp->r_g4 = rp->r_g5 =
802 	    rp->r_g6 = rp->r_o0 = rp->r_o1 = rp->r_o2 =
803 	    rp->r_o3 = rp->r_o4 = rp->r_o5 = rp->r_o7 = 0;
804 	if (p->p_model == DATAMODEL_ILP32)
805 		rp->r_tstate = TSTATE_USER32;
806 	else
807 		rp->r_tstate = TSTATE_USER64;
808 	if (!fpu_exists)
809 		rp->r_tstate &= ~TSTATE_PEF;
810 	rp->r_g7 = args->thrptr;
811 	rp->r_pc = args->entry;
812 	rp->r_npc = args->entry + 4;
813 	rp->r_y = 0;
814 	curthread->t_post_sys = 1;
815 	lwp->lwp_eosys = JUSTRETURN;
816 	lwp->lwp_pcb.pcb_trap0addr = NULL;	/* no trap 0 handler */
817 	/*
818 	 * Clear the fixalignment flag
819 	 */
820 	p->p_fixalignment = 0;
821 
822 	/*
823 	 * Throw out old user windows, init window buf.
824 	 */
825 	trash_user_windows();
826 
827 	if (p->p_model == DATAMODEL_LP64 &&
828 	    mpcb->mpcb_wstate != WSTATE_USER64) {
829 		ASSERT(mpcb->mpcb_wbcnt == 0);
830 		kmem_free(mpcb->mpcb_wbuf, MAXWIN * sizeof (struct rwindow32));
831 		mpcb->mpcb_wbuf = kmem_alloc(MAXWIN *
832 		    sizeof (struct rwindow64), KM_SLEEP);
833 		ASSERT(((uintptr_t)mpcb->mpcb_wbuf & 7) == 0);
834 		mpcb->mpcb_wstate = WSTATE_USER64;
835 	} else if (p->p_model == DATAMODEL_ILP32 &&
836 	    mpcb->mpcb_wstate != WSTATE_USER32) {
837 		ASSERT(mpcb->mpcb_wbcnt == 0);
838 		kmem_free(mpcb->mpcb_wbuf, MAXWIN * sizeof (struct rwindow64));
839 		mpcb->mpcb_wbuf = kmem_alloc(MAXWIN *
840 		    sizeof (struct rwindow32), KM_SLEEP);
841 		mpcb->mpcb_wstate = WSTATE_USER32;
842 	}
843 	mpcb->mpcb_pa = va_to_pa(mpcb);
844 	mpcb->mpcb_wbuf_pa = va_to_pa(mpcb->mpcb_wbuf);
845 
846 	/*
847 	 * Here we initialize minimal fpu state.
848 	 * The rest is done at the first floating
849 	 * point instruction that a process executes
850 	 * or by the lib_psr memcpy routines.
851 	 */
852 	if (fpu_exists) {
853 		extern void _fp_write_fprs(unsigned);
854 		_fp_write_fprs(0);
855 	}
856 	fpp->fpu_en = 0;
857 	fpp->fpu_fprs = 0;
858 }
859 
860 void
861 lwp_swapin(kthread_t *tp)
862 {
863 	struct machpcb *mpcb = lwptompcb(ttolwp(tp));
864 
865 	mpcb->mpcb_pa = va_to_pa(mpcb);
866 	mpcb->mpcb_wbuf_pa = va_to_pa(mpcb->mpcb_wbuf);
867 }
868 
869 /*
870  * Construct the execution environment for the user's signal
871  * handler and arrange for control to be given to it on return
872  * to userland.  The library code now calls setcontext() to
873  * clean up after the signal handler, so sigret() is no longer
874  * needed.
875  */
876 int
877 sendsig(int sig, k_siginfo_t *sip, void (*hdlr)())
878 {
879 	/*
880 	 * 'volatile' is needed to ensure that values are
881 	 * correct on the error return from on_fault().
882 	 */
883 	volatile int minstacksz; /* min stack required to catch signal */
884 	int newstack = 0;	/* if true, switching to altstack */
885 	label_t ljb;
886 	caddr_t sp;
887 	struct regs *volatile rp;
888 	klwp_t *lwp = ttolwp(curthread);
889 	proc_t *volatile p = ttoproc(curthread);
890 	int fpq_size = 0;
891 	struct sigframe {
892 		struct frame frwin;
893 		ucontext_t uc;
894 	};
895 	siginfo_t *sip_addr;
896 	struct sigframe *volatile fp;
897 	ucontext_t *volatile tuc = NULL;
898 	char *volatile xregs = NULL;
899 	volatile size_t xregs_size = 0;
900 	gwindows_t *volatile gwp = NULL;
901 	volatile int gwin_size = 0;
902 	kfpu_t *fpp;
903 	struct machpcb *mpcb;
904 	volatile int watched = 0;
905 	volatile int watched2 = 0;
906 	caddr_t tos;
907 
908 	/*
909 	 * Make sure the current last user window has been flushed to
910 	 * the stack save area before we change the sp.
911 	 * Restore register window if a debugger modified it.
912 	 */
913 	(void) flush_user_windows_to_stack(NULL);
914 	if (lwp->lwp_pcb.pcb_xregstat != XREGNONE)
915 		xregrestore(lwp, 0);
916 
917 	mpcb = lwptompcb(lwp);
918 	rp = lwptoregs(lwp);
919 
920 	/*
921 	 * Clear the watchpoint return stack pointers.
922 	 */
923 	mpcb->mpcb_rsp[0] = NULL;
924 	mpcb->mpcb_rsp[1] = NULL;
925 
926 	minstacksz = sizeof (struct sigframe);
927 
928 	/*
929 	 * We know that sizeof (siginfo_t) is stack-aligned:
930 	 * 128 bytes for ILP32, 256 bytes for LP64.
931 	 */
932 	if (sip != NULL)
933 		minstacksz += sizeof (siginfo_t);
934 
935 	/*
936 	 * These two fields are pointed to by ABI structures and may
937 	 * be of arbitrary length. Size them now so we know how big
938 	 * the signal frame has to be.
939 	 */
940 	fpp = lwptofpu(lwp);
941 	fpp->fpu_fprs = _fp_read_fprs();
942 	if ((fpp->fpu_en) || (fpp->fpu_fprs & FPRS_FEF)) {
943 		fpq_size = fpp->fpu_q_entrysize * fpp->fpu_qcnt;
944 		minstacksz += SA(fpq_size);
945 	}
946 
947 	mpcb = lwptompcb(lwp);
948 	if (mpcb->mpcb_wbcnt != 0) {
949 		gwin_size = (mpcb->mpcb_wbcnt * sizeof (struct rwindow)) +
950 		    (SPARC_MAXREGWINDOW * sizeof (caddr_t)) + sizeof (long);
951 		minstacksz += SA(gwin_size);
952 	}
953 
954 	/*
955 	 * Extra registers, if support by this platform, may be of arbitrary
956 	 * length. Size them now so we know how big the signal frame has to be.
957 	 * For sparcv9 _LP64 user programs, use asrs instead of the xregs.
958 	 */
959 	minstacksz += SA(xregs_size);
960 
961 	/*
962 	 * Figure out whether we will be handling this signal on
963 	 * an alternate stack specified by the user. Then allocate
964 	 * and validate the stack requirements for the signal handler
965 	 * context. on_fault will catch any faults.
966 	 */
967 	newstack = (sigismember(&u.u_sigonstack, sig) &&
968 	    !(lwp->lwp_sigaltstack.ss_flags & (SS_ONSTACK|SS_DISABLE)));
969 
970 	tos = (caddr_t)rp->r_sp + STACK_BIAS;
971 	if (newstack != 0) {
972 		fp = (struct sigframe *)
973 		    (SA((uintptr_t)lwp->lwp_sigaltstack.ss_sp) +
974 			SA((int)lwp->lwp_sigaltstack.ss_size) - STACK_ALIGN -
975 			SA(minstacksz));
976 	} else {
977 		/*
978 		 * If we were unable to flush all register windows to
979 		 * the stack and we are not now on an alternate stack,
980 		 * just dump core with a SIGSEGV back in psig().
981 		 */
982 		if (sig == SIGSEGV &&
983 		    mpcb->mpcb_wbcnt != 0 &&
984 		    !(lwp->lwp_sigaltstack.ss_flags & SS_ONSTACK))
985 			return (0);
986 		fp = (struct sigframe *)(tos - SA(minstacksz));
987 		/*
988 		 * Could call grow here, but stack growth now handled below
989 		 * in code protected by on_fault().
990 		 */
991 	}
992 	sp = (caddr_t)fp + sizeof (struct sigframe);
993 
994 	/*
995 	 * Make sure process hasn't trashed its stack.
996 	 */
997 	if (((uintptr_t)fp & (STACK_ALIGN - 1)) != 0 ||
998 	    (caddr_t)fp >= p->p_usrstack ||
999 	    (caddr_t)fp + SA(minstacksz) >= p->p_usrstack) {
1000 #ifdef DEBUG
1001 		printf("sendsig: bad signal stack cmd=%s, pid=%d, sig=%d\n",
1002 		    PTOU(p)->u_comm, p->p_pid, sig);
1003 		printf("sigsp = 0x%p, action = 0x%p, upc = 0x%lx\n",
1004 		    (void *)fp, (void *)hdlr, rp->r_pc);
1005 
1006 		if (((uintptr_t)fp & (STACK_ALIGN - 1)) != 0)
1007 			printf("bad stack alignment\n");
1008 		else
1009 			printf("fp above USRSTACK\n");
1010 #endif
1011 		return (0);
1012 	}
1013 
1014 	watched = watch_disable_addr((caddr_t)fp, SA(minstacksz), S_WRITE);
1015 	if (on_fault(&ljb))
1016 		goto badstack;
1017 
1018 	tuc = kmem_alloc(sizeof (ucontext_t), KM_SLEEP);
1019 	savecontext(tuc, lwp->lwp_sigoldmask);
1020 
1021 	/*
1022 	 * save extra register state if it exists
1023 	 */
1024 	if (xregs_size != 0) {
1025 		xregs_setptr(lwp, tuc, sp);
1026 		xregs = kmem_alloc(xregs_size, KM_SLEEP);
1027 		xregs_get(lwp, xregs);
1028 		copyout_noerr(xregs, sp, xregs_size);
1029 		kmem_free(xregs, xregs_size);
1030 		xregs = NULL;
1031 		sp += SA(xregs_size);
1032 	}
1033 
1034 	copyout_noerr(tuc, &fp->uc, sizeof (*tuc));
1035 	kmem_free(tuc, sizeof (*tuc));
1036 	tuc = NULL;
1037 
1038 	if (sip != NULL) {
1039 		zoneid_t zoneid;
1040 
1041 		uzero(sp, sizeof (siginfo_t));
1042 		if (SI_FROMUSER(sip) &&
1043 		    (zoneid = p->p_zone->zone_id) != GLOBAL_ZONEID &&
1044 		    zoneid != sip->si_zoneid) {
1045 			k_siginfo_t sani_sip = *sip;
1046 			sani_sip.si_pid = p->p_zone->zone_zsched->p_pid;
1047 			sani_sip.si_uid = 0;
1048 			sani_sip.si_ctid = -1;
1049 			sani_sip.si_zoneid = zoneid;
1050 			copyout_noerr(&sani_sip, sp, sizeof (sani_sip));
1051 		} else {
1052 			copyout_noerr(sip, sp, sizeof (*sip));
1053 		}
1054 		sip_addr = (siginfo_t *)sp;
1055 		sp += sizeof (siginfo_t);
1056 
1057 		if (sig == SIGPROF &&
1058 		    curthread->t_rprof != NULL &&
1059 		    curthread->t_rprof->rp_anystate) {
1060 			/*
1061 			 * We stand on our head to deal with
1062 			 * the real time profiling signal.
1063 			 * Fill in the stuff that doesn't fit
1064 			 * in a normal k_siginfo structure.
1065 			 */
1066 			int i = sip->si_nsysarg;
1067 			while (--i >= 0) {
1068 				sulword_noerr(
1069 				    (ulong_t *)&sip_addr->si_sysarg[i],
1070 				    (ulong_t)lwp->lwp_arg[i]);
1071 			}
1072 			copyout_noerr(curthread->t_rprof->rp_state,
1073 			    sip_addr->si_mstate,
1074 			    sizeof (curthread->t_rprof->rp_state));
1075 		}
1076 	} else {
1077 		sip_addr = (siginfo_t *)NULL;
1078 	}
1079 
1080 	/*
1081 	 * When flush_user_windows_to_stack() can't save all the
1082 	 * windows to the stack, it puts them in the lwp's pcb.
1083 	 */
1084 	if (gwin_size != 0) {
1085 		gwp = kmem_alloc(gwin_size, KM_SLEEP);
1086 		getgwins(lwp, gwp);
1087 		sulword_noerr(&fp->uc.uc_mcontext.gwins, (ulong_t)sp);
1088 		copyout_noerr(gwp, sp, gwin_size);
1089 		kmem_free(gwp, gwin_size);
1090 		gwp = NULL;
1091 		sp += SA(gwin_size);
1092 	} else
1093 		sulword_noerr(&fp->uc.uc_mcontext.gwins, (ulong_t)NULL);
1094 
1095 	if (fpq_size != 0) {
1096 		struct fq *fqp = (struct fq *)sp;
1097 		sulword_noerr(&fp->uc.uc_mcontext.fpregs.fpu_q, (ulong_t)fqp);
1098 		copyout_noerr(mpcb->mpcb_fpu_q, fqp, fpq_size);
1099 
1100 		/*
1101 		 * forget the fp queue so that the signal handler can run
1102 		 * without being harrassed--it will do a setcontext that will
1103 		 * re-establish the queue if there still is one
1104 		 *
1105 		 * NOTE: fp_runq() relies on the qcnt field being zeroed here
1106 		 *	to terminate its processing of the queue after signal
1107 		 *	delivery.
1108 		 */
1109 		mpcb->mpcb_fpu->fpu_qcnt = 0;
1110 		sp += SA(fpq_size);
1111 
1112 		/* Also, syscall needs to know about this */
1113 		mpcb->mpcb_flags |= FP_TRAPPED;
1114 
1115 	} else {
1116 		sulword_noerr(&fp->uc.uc_mcontext.fpregs.fpu_q, (ulong_t)NULL);
1117 		suword8_noerr(&fp->uc.uc_mcontext.fpregs.fpu_qcnt, 0);
1118 	}
1119 
1120 
1121 	/*
1122 	 * Since we flushed the user's windows and we are changing his
1123 	 * stack pointer, the window that the user will return to will
1124 	 * be restored from the save area in the frame we are setting up.
1125 	 * We copy in save area for old stack pointer so that debuggers
1126 	 * can do a proper stack backtrace from the signal handler.
1127 	 */
1128 	if (mpcb->mpcb_wbcnt == 0) {
1129 		watched2 = watch_disable_addr(tos, sizeof (struct rwindow),
1130 		    S_READ);
1131 		ucopy(tos, &fp->frwin, sizeof (struct rwindow));
1132 	}
1133 
1134 	lwp->lwp_oldcontext = (uintptr_t)&fp->uc;
1135 
1136 	if (newstack != 0) {
1137 		lwp->lwp_sigaltstack.ss_flags |= SS_ONSTACK;
1138 
1139 		if (lwp->lwp_ustack) {
1140 			copyout_noerr(&lwp->lwp_sigaltstack,
1141 			    (stack_t *)lwp->lwp_ustack, sizeof (stack_t));
1142 		}
1143 	}
1144 
1145 	no_fault();
1146 	mpcb->mpcb_wbcnt = 0;		/* let user go on */
1147 
1148 	if (watched2)
1149 		watch_enable_addr(tos, sizeof (struct rwindow), S_READ);
1150 	if (watched)
1151 		watch_enable_addr((caddr_t)fp, SA(minstacksz), S_WRITE);
1152 
1153 	/*
1154 	 * Set up user registers for execution of signal handler.
1155 	 */
1156 	rp->r_sp = (uintptr_t)fp - STACK_BIAS;
1157 	rp->r_pc = (uintptr_t)hdlr;
1158 	rp->r_npc = (uintptr_t)hdlr + 4;
1159 	/* make sure %asi is ASI_PNF */
1160 	rp->r_tstate &= ~((uint64_t)TSTATE_ASI_MASK << TSTATE_ASI_SHIFT);
1161 	rp->r_tstate |= ((uint64_t)ASI_PNF << TSTATE_ASI_SHIFT);
1162 	rp->r_o0 = sig;
1163 	rp->r_o1 = (uintptr_t)sip_addr;
1164 	rp->r_o2 = (uintptr_t)&fp->uc;
1165 	/*
1166 	 * Don't set lwp_eosys here.  sendsig() is called via psig() after
1167 	 * lwp_eosys is handled, so setting it here would affect the next
1168 	 * system call.
1169 	 */
1170 	return (1);
1171 
1172 badstack:
1173 	no_fault();
1174 	if (watched2)
1175 		watch_enable_addr(tos, sizeof (struct rwindow), S_READ);
1176 	if (watched)
1177 		watch_enable_addr((caddr_t)fp, SA(minstacksz), S_WRITE);
1178 	if (tuc)
1179 		kmem_free(tuc, sizeof (ucontext_t));
1180 	if (xregs)
1181 		kmem_free(xregs, xregs_size);
1182 	if (gwp)
1183 		kmem_free(gwp, gwin_size);
1184 #ifdef DEBUG
1185 	printf("sendsig: bad signal stack cmd=%s, pid=%d, sig=%d\n",
1186 	    PTOU(p)->u_comm, p->p_pid, sig);
1187 	printf("on fault, sigsp = %p, action = %p, upc = 0x%lx\n",
1188 	    (void *)fp, (void *)hdlr, rp->r_pc);
1189 #endif
1190 	return (0);
1191 }
1192 
1193 
1194 #ifdef _SYSCALL32_IMPL
1195 
1196 /*
1197  * Construct the execution environment for the user's signal
1198  * handler and arrange for control to be given to it on return
1199  * to userland.  The library code now calls setcontext() to
1200  * clean up after the signal handler, so sigret() is no longer
1201  * needed.
1202  */
1203 int
1204 sendsig32(int sig, k_siginfo_t *sip, void (*hdlr)())
1205 {
1206 	/*
1207 	 * 'volatile' is needed to ensure that values are
1208 	 * correct on the error return from on_fault().
1209 	 */
1210 	volatile int minstacksz; /* min stack required to catch signal */
1211 	int newstack = 0;	/* if true, switching to altstack */
1212 	label_t ljb;
1213 	caddr_t sp;
1214 	struct regs *volatile rp;
1215 	klwp_t *lwp = ttolwp(curthread);
1216 	proc_t *volatile p = ttoproc(curthread);
1217 	struct fq32 fpu_q[MAXFPQ]; /* to hold floating queue */
1218 	struct fq32 *dfq = NULL;
1219 	size_t fpq_size = 0;
1220 	struct sigframe32 {
1221 		struct frame32 frwin;
1222 		ucontext32_t uc;
1223 	};
1224 	struct sigframe32 *volatile fp;
1225 	siginfo32_t *sip_addr;
1226 	ucontext32_t *volatile tuc = NULL;
1227 	char *volatile xregs = NULL;
1228 	volatile int xregs_size = 0;
1229 	gwindows32_t *volatile gwp = NULL;
1230 	volatile size_t gwin_size = 0;
1231 	kfpu_t *fpp;
1232 	struct machpcb *mpcb;
1233 	volatile int watched = 0;
1234 	volatile int watched2 = 0;
1235 	caddr_t tos;
1236 	caddr32_t tos32;
1237 
1238 	/*
1239 	 * Make sure the current last user window has been flushed to
1240 	 * the stack save area before we change the sp.
1241 	 * Restore register window if a debugger modified it.
1242 	 */
1243 	(void) flush_user_windows_to_stack(NULL);
1244 	if (lwp->lwp_pcb.pcb_xregstat != XREGNONE)
1245 		xregrestore(lwp, 0);
1246 
1247 	mpcb = lwptompcb(lwp);
1248 	rp = lwptoregs(lwp);
1249 
1250 	/*
1251 	 * Clear the watchpoint return stack pointers.
1252 	 */
1253 	mpcb->mpcb_rsp[0] = NULL;
1254 	mpcb->mpcb_rsp[1] = NULL;
1255 
1256 	minstacksz = sizeof (struct sigframe32);
1257 
1258 	if (sip != NULL)
1259 		minstacksz += sizeof (siginfo32_t);
1260 
1261 	/*
1262 	 * These two fields are pointed to by ABI structures and may
1263 	 * be of arbitrary length. Size them now so we know how big
1264 	 * the signal frame has to be.
1265 	 */
1266 	fpp = lwptofpu(lwp);
1267 	fpp->fpu_fprs = _fp_read_fprs();
1268 	if ((fpp->fpu_en) || (fpp->fpu_fprs & FPRS_FEF)) {
1269 		fpq_size = sizeof (struct fpq32) * fpp->fpu_qcnt;
1270 		minstacksz += fpq_size;
1271 		dfq = fpu_q;
1272 	}
1273 
1274 	mpcb = lwptompcb(lwp);
1275 	if (mpcb->mpcb_wbcnt != 0) {
1276 		gwin_size = (mpcb->mpcb_wbcnt * sizeof (struct rwindow32)) +
1277 		    (SPARC_MAXREGWINDOW * sizeof (caddr32_t)) +
1278 		    sizeof (int32_t);
1279 		minstacksz += gwin_size;
1280 	}
1281 
1282 	/*
1283 	 * Extra registers, if supported by this platform, may be of arbitrary
1284 	 * length. Size them now so we know how big the signal frame has to be.
1285 	 */
1286 	xregs_size = xregs_getsize(p);
1287 	minstacksz += SA32(xregs_size);
1288 
1289 	/*
1290 	 * Figure out whether we will be handling this signal on
1291 	 * an alternate stack specified by the user. Then allocate
1292 	 * and validate the stack requirements for the signal handler
1293 	 * context. on_fault will catch any faults.
1294 	 */
1295 	newstack = (sigismember(&u.u_sigonstack, sig) &&
1296 	    !(lwp->lwp_sigaltstack.ss_flags & (SS_ONSTACK|SS_DISABLE)));
1297 
1298 	/*
1299 	 * Reduce r_sp to a 32 bit value before storing it in tos.  This was
1300 	 * originally done by casting down to uint32_t and back up to void *,
1301 	 * but that generated complaints under one compiler.  The uintptr_t cast
1302 	 * was added to address that, and the temporary 32 bit variable was
1303 	 * introduced to avoid depending on all compilers to generate the
1304 	 * desired assembly code for a triple cast in a single expression.
1305 	 */
1306 	tos32 = (uint32_t)rp->r_sp;
1307 	tos = (void *)(uintptr_t)tos32;
1308 
1309 	if (newstack != 0) {
1310 		fp = (struct sigframe32 *)
1311 		    (SA32((uintptr_t)lwp->lwp_sigaltstack.ss_sp) +
1312 			SA32((int)lwp->lwp_sigaltstack.ss_size) -
1313 			STACK_ALIGN32 -
1314 			SA32(minstacksz));
1315 	} else {
1316 		/*
1317 		 * If we were unable to flush all register windows to
1318 		 * the stack and we are not now on an alternate stack,
1319 		 * just dump core with a SIGSEGV back in psig().
1320 		 */
1321 		if (sig == SIGSEGV &&
1322 		    mpcb->mpcb_wbcnt != 0 &&
1323 		    !(lwp->lwp_sigaltstack.ss_flags & SS_ONSTACK))
1324 			return (0);
1325 		fp = (struct sigframe32 *)(tos - SA32(minstacksz));
1326 		/*
1327 		 * Could call grow here, but stack growth now handled below
1328 		 * in code protected by on_fault().
1329 		 */
1330 	}
1331 	sp = (caddr_t)fp + sizeof (struct sigframe32);
1332 
1333 	/*
1334 	 * Make sure process hasn't trashed its stack.
1335 	 */
1336 	if (((uintptr_t)fp & (STACK_ALIGN32 - 1)) != 0 ||
1337 	    (caddr_t)fp >= p->p_usrstack ||
1338 	    (caddr_t)fp + SA32(minstacksz) >= p->p_usrstack) {
1339 #ifdef DEBUG
1340 		printf("sendsig32: bad signal stack cmd=%s, pid=%d, sig=%d\n",
1341 		    PTOU(p)->u_comm, p->p_pid, sig);
1342 		printf("sigsp = 0x%p, action = 0x%p, upc = 0x%lx\n",
1343 		    (void *)fp, (void *)hdlr, rp->r_pc);
1344 
1345 		if (((uintptr_t)fp & (STACK_ALIGN32 - 1)) != 0)
1346 			printf("bad stack alignment\n");
1347 		else
1348 			printf("fp above USRSTACK32\n");
1349 #endif
1350 		return (0);
1351 	}
1352 
1353 	watched = watch_disable_addr((caddr_t)fp, SA32(minstacksz), S_WRITE);
1354 	if (on_fault(&ljb))
1355 		goto badstack;
1356 
1357 	tuc = kmem_alloc(sizeof (ucontext32_t), KM_SLEEP);
1358 	savecontext32(tuc, lwp->lwp_sigoldmask, dfq);
1359 
1360 	/*
1361 	 * save extra register state if it exists
1362 	 */
1363 	if (xregs_size != 0) {
1364 		xregs_setptr32(lwp, tuc, (caddr32_t)(uintptr_t)sp);
1365 		xregs = kmem_alloc(xregs_size, KM_SLEEP);
1366 		xregs_get(lwp, xregs);
1367 		copyout_noerr(xregs, sp, xregs_size);
1368 		kmem_free(xregs, xregs_size);
1369 		xregs = NULL;
1370 		sp += SA32(xregs_size);
1371 	}
1372 
1373 	copyout_noerr(tuc, &fp->uc, sizeof (*tuc));
1374 	kmem_free(tuc, sizeof (*tuc));
1375 	tuc = NULL;
1376 
1377 	if (sip != NULL) {
1378 		siginfo32_t si32;
1379 		zoneid_t zoneid;
1380 
1381 		siginfo_kto32(sip, &si32);
1382 		if (SI_FROMUSER(sip) &&
1383 		    (zoneid = p->p_zone->zone_id) != GLOBAL_ZONEID &&
1384 		    zoneid != sip->si_zoneid) {
1385 			si32.si_pid = p->p_zone->zone_zsched->p_pid;
1386 			si32.si_uid = 0;
1387 			si32.si_ctid = -1;
1388 			si32.si_zoneid = zoneid;
1389 		}
1390 		uzero(sp, sizeof (siginfo32_t));
1391 		copyout_noerr(&si32, sp, sizeof (siginfo32_t));
1392 		sip_addr = (siginfo32_t *)sp;
1393 		sp += sizeof (siginfo32_t);
1394 
1395 		if (sig == SIGPROF &&
1396 		    curthread->t_rprof != NULL &&
1397 		    curthread->t_rprof->rp_anystate) {
1398 			/*
1399 			 * We stand on our head to deal with
1400 			 * the real time profiling signal.
1401 			 * Fill in the stuff that doesn't fit
1402 			 * in a normal k_siginfo structure.
1403 			 */
1404 			int i = sip->si_nsysarg;
1405 			while (--i >= 0) {
1406 				suword32_noerr(&sip_addr->si_sysarg[i],
1407 				    (uint32_t)lwp->lwp_arg[i]);
1408 			}
1409 			copyout_noerr(curthread->t_rprof->rp_state,
1410 			    sip_addr->si_mstate,
1411 			    sizeof (curthread->t_rprof->rp_state));
1412 		}
1413 	} else {
1414 		sip_addr = NULL;
1415 	}
1416 
1417 	/*
1418 	 * When flush_user_windows_to_stack() can't save all the
1419 	 * windows to the stack, it puts them in the lwp's pcb.
1420 	 */
1421 	if (gwin_size != 0) {
1422 		gwp = kmem_alloc(gwin_size, KM_SLEEP);
1423 		getgwins32(lwp, gwp);
1424 		suword32_noerr(&fp->uc.uc_mcontext.gwins,
1425 		    (uint32_t)(uintptr_t)sp);
1426 		copyout_noerr(gwp, sp, gwin_size);
1427 		kmem_free(gwp, gwin_size);
1428 		gwp = NULL;
1429 		sp += gwin_size;
1430 	} else {
1431 		suword32_noerr(&fp->uc.uc_mcontext.gwins, (uint32_t)NULL);
1432 	}
1433 
1434 	if (fpq_size != 0) {
1435 		/*
1436 		 * Update the (already copied out) fpu32.fpu_q pointer
1437 		 * from NULL to the 32-bit address on the user's stack
1438 		 * where we then copyout the fq32 to.
1439 		 */
1440 		struct fq32 *fqp = (struct fq32 *)sp;
1441 		suword32_noerr(&fp->uc.uc_mcontext.fpregs.fpu_q,
1442 		    (uint32_t)(uintptr_t)fqp);
1443 		copyout_noerr(dfq, fqp, fpq_size);
1444 
1445 		/*
1446 		 * forget the fp queue so that the signal handler can run
1447 		 * without being harrassed--it will do a setcontext that will
1448 		 * re-establish the queue if there still is one
1449 		 *
1450 		 * NOTE: fp_runq() relies on the qcnt field being zeroed here
1451 		 *	to terminate its processing of the queue after signal
1452 		 *	delivery.
1453 		 */
1454 		mpcb->mpcb_fpu->fpu_qcnt = 0;
1455 		sp += fpq_size;
1456 
1457 		/* Also, syscall needs to know about this */
1458 		mpcb->mpcb_flags |= FP_TRAPPED;
1459 
1460 	} else {
1461 		suword32_noerr(&fp->uc.uc_mcontext.fpregs.fpu_q,
1462 		    (uint32_t)NULL);
1463 		suword8_noerr(&fp->uc.uc_mcontext.fpregs.fpu_qcnt, 0);
1464 	}
1465 
1466 
1467 	/*
1468 	 * Since we flushed the user's windows and we are changing his
1469 	 * stack pointer, the window that the user will return to will
1470 	 * be restored from the save area in the frame we are setting up.
1471 	 * We copy in save area for old stack pointer so that debuggers
1472 	 * can do a proper stack backtrace from the signal handler.
1473 	 */
1474 	if (mpcb->mpcb_wbcnt == 0) {
1475 		watched2 = watch_disable_addr(tos, sizeof (struct rwindow32),
1476 		    S_READ);
1477 		ucopy(tos, &fp->frwin, sizeof (struct rwindow32));
1478 	}
1479 
1480 	lwp->lwp_oldcontext = (uintptr_t)&fp->uc;
1481 
1482 	if (newstack != 0) {
1483 		lwp->lwp_sigaltstack.ss_flags |= SS_ONSTACK;
1484 		if (lwp->lwp_ustack) {
1485 			stack32_t stk32;
1486 
1487 			stk32.ss_sp =
1488 			    (caddr32_t)(uintptr_t)lwp->lwp_sigaltstack.ss_sp;
1489 			stk32.ss_size = (size32_t)lwp->lwp_sigaltstack.ss_size;
1490 			stk32.ss_flags = (int32_t)lwp->lwp_sigaltstack.ss_flags;
1491 
1492 			copyout_noerr(&stk32, (stack32_t *)lwp->lwp_ustack,
1493 			    sizeof (stack32_t));
1494 		}
1495 	}
1496 
1497 	no_fault();
1498 	mpcb->mpcb_wbcnt = 0;		/* let user go on */
1499 
1500 	if (watched2)
1501 		watch_enable_addr(tos, sizeof (struct rwindow32), S_READ);
1502 	if (watched)
1503 		watch_enable_addr((caddr_t)fp, SA32(minstacksz), S_WRITE);
1504 
1505 	/*
1506 	 * Set up user registers for execution of signal handler.
1507 	 */
1508 	rp->r_sp = (uintptr_t)fp;
1509 	rp->r_pc = (uintptr_t)hdlr;
1510 	rp->r_npc = (uintptr_t)hdlr + 4;
1511 	/* make sure %asi is ASI_PNF */
1512 	rp->r_tstate &= ~((uint64_t)TSTATE_ASI_MASK << TSTATE_ASI_SHIFT);
1513 	rp->r_tstate |= ((uint64_t)ASI_PNF << TSTATE_ASI_SHIFT);
1514 	rp->r_o0 = sig;
1515 	rp->r_o1 = (uintptr_t)sip_addr;
1516 	rp->r_o2 = (uintptr_t)&fp->uc;
1517 	/*
1518 	 * Don't set lwp_eosys here.  sendsig() is called via psig() after
1519 	 * lwp_eosys is handled, so setting it here would affect the next
1520 	 * system call.
1521 	 */
1522 	return (1);
1523 
1524 badstack:
1525 	no_fault();
1526 	if (watched2)
1527 		watch_enable_addr(tos, sizeof (struct rwindow32), S_READ);
1528 	if (watched)
1529 		watch_enable_addr((caddr_t)fp, SA32(minstacksz), S_WRITE);
1530 	if (tuc)
1531 		kmem_free(tuc, sizeof (*tuc));
1532 	if (xregs)
1533 		kmem_free(xregs, xregs_size);
1534 	if (gwp)
1535 		kmem_free(gwp, gwin_size);
1536 #ifdef DEBUG
1537 	printf("sendsig32: bad signal stack cmd=%s, pid=%d, sig=%d\n",
1538 	    PTOU(p)->u_comm, p->p_pid, sig);
1539 	printf("on fault, sigsp = 0x%p, action = 0x%p, upc = 0x%lx\n",
1540 	    (void *)fp, (void *)hdlr, rp->r_pc);
1541 #endif
1542 	return (0);
1543 }
1544 
1545 #endif /* _SYSCALL32_IMPL */
1546 
1547 
1548 /*
1549  * load user registers into lwp.
1550  * thrptr ignored for sparc.
1551  */
1552 /* ARGSUSED2 */
1553 void
1554 lwp_load(klwp_t *lwp, gregset_t grp, uintptr_t thrptr)
1555 {
1556 	setgregs(lwp, grp);
1557 	if (lwptoproc(lwp)->p_model == DATAMODEL_ILP32)
1558 		lwptoregs(lwp)->r_tstate = TSTATE_USER32;
1559 	else
1560 		lwptoregs(lwp)->r_tstate = TSTATE_USER64;
1561 
1562 	if (!fpu_exists)
1563 		lwptoregs(lwp)->r_tstate &= ~TSTATE_PEF;
1564 	lwp->lwp_eosys = JUSTRETURN;
1565 	lwptot(lwp)->t_post_sys = 1;
1566 }
1567 
1568 /*
1569  * set syscall()'s return values for a lwp.
1570  */
1571 void
1572 lwp_setrval(klwp_t *lwp, int v1, int v2)
1573 {
1574 	struct regs *rp = lwptoregs(lwp);
1575 
1576 	rp->r_tstate &= ~TSTATE_IC;
1577 	rp->r_o0 = v1;
1578 	rp->r_o1 = v2;
1579 }
1580 
1581 /*
1582  * set stack pointer for a lwp
1583  */
1584 void
1585 lwp_setsp(klwp_t *lwp, caddr_t sp)
1586 {
1587 	struct regs *rp = lwptoregs(lwp);
1588 	rp->r_sp = (uintptr_t)sp;
1589 }
1590 
1591 /*
1592  * Take any PCB specific actions that are required or flagged in the PCB.
1593  */
1594 extern void trap_async_hwerr(void);
1595 #pragma	weak trap_async_hwerr
1596 
1597 void
1598 lwp_pcb_exit(void)
1599 {
1600 	klwp_t *lwp = ttolwp(curthread);
1601 
1602 	if (lwp->lwp_pcb.pcb_flags & ASYNC_HWERR) {
1603 		trap_async_hwerr();
1604 	}
1605 }
1606 
1607 /*
1608  * Invalidate the saved user register windows in the pcb struct
1609  * for the current thread. They will no longer be preserved.
1610  */
1611 void
1612 lwp_clear_uwin(void)
1613 {
1614 	struct machpcb *m = lwptompcb(ttolwp(curthread));
1615 
1616 	/*
1617 	 * This has the effect of invalidating all (any) of the
1618 	 * user level windows that are currently sitting in the
1619 	 * kernel buffer.
1620 	 */
1621 	m->mpcb_wbcnt = 0;
1622 }
1623 
1624 static uint_t
1625 mkpsr(uint64_t tstate, uint_t fprs)
1626 {
1627 	uint_t psr, icc;
1628 
1629 	psr = tstate & TSTATE_CWP_MASK;
1630 	if (tstate & TSTATE_PRIV)
1631 		psr |= PSR_PS;
1632 	if (fprs & FPRS_FEF)
1633 		psr |= PSR_EF;
1634 	icc = (uint_t)(tstate >> PSR_TSTATE_CC_SHIFT) & PSR_ICC;
1635 	psr |= icc;
1636 	psr |= V9_PSR_IMPLVER;
1637 	return (psr);
1638 }
1639 
1640 void
1641 sync_icache(caddr_t va, uint_t len)
1642 {
1643 	caddr_t end;
1644 
1645 	end = va + len;
1646 	va = (caddr_t)((uintptr_t)va & -8l);	/* sparc needs 8-byte align */
1647 	while (va < end) {
1648 		doflush(va);
1649 		va += 8;
1650 	}
1651 }
1652 
1653 #ifdef _SYSCALL32_IMPL
1654 
1655 /*
1656  * Copy the floating point queue if and only if there is a queue and a place
1657  * to copy it to. Let xregs take care of the other fp regs, for v8plus.
1658  * The issue is that while we are handling the fq32 in sendsig, we
1659  * still need a 64-bit pointer to it, and the caddr32_t in fpregset32_t
1660  * will not suffice, so we have the third parameter to this function.
1661  */
1662 void
1663 fpuregset_nto32(const fpregset_t *src, fpregset32_t *dest, struct fq32 *dfq)
1664 {
1665 	int i;
1666 
1667 	bzero(dest, sizeof (*dest));
1668 	for (i = 0; i < 32; i++)
1669 		dest->fpu_fr.fpu_regs[i] = src->fpu_fr.fpu_regs[i];
1670 	dest->fpu_q = NULL;
1671 	dest->fpu_fsr = (uint32_t)src->fpu_fsr;
1672 	dest->fpu_qcnt = src->fpu_qcnt;
1673 	dest->fpu_q_entrysize = sizeof (struct fpq32);
1674 	dest->fpu_en = src->fpu_en;
1675 
1676 	if ((src->fpu_qcnt) && (dfq != NULL)) {
1677 		struct fq *sfq = src->fpu_q;
1678 		for (i = 0; i < src->fpu_qcnt; i++, dfq++, sfq++) {
1679 			dfq->FQu.fpq.fpq_addr =
1680 			    (caddr32_t)(uintptr_t)sfq->FQu.fpq.fpq_addr;
1681 			dfq->FQu.fpq.fpq_instr = sfq->FQu.fpq.fpq_instr;
1682 		}
1683 	}
1684 }
1685 
1686 /*
1687  * Copy the floating point queue if and only if there is a queue and a place
1688  * to copy it to. Let xregs take care of the other fp regs, for v8plus.
1689  * The *dfq is required to escape the bzero in both this function and in
1690  * ucontext_32ton. The *sfq is required because once the fq32 is copied
1691  * into the kernel, in setcontext, then we need a 64-bit pointer to it.
1692  */
1693 static void
1694 fpuregset_32ton(const fpregset32_t *src, fpregset_t *dest,
1695     const struct fq32 *sfq, struct fq *dfq)
1696 {
1697 	int i;
1698 
1699 	bzero(dest, sizeof (*dest));
1700 	for (i = 0; i < 32; i++)
1701 		dest->fpu_fr.fpu_regs[i] = src->fpu_fr.fpu_regs[i];
1702 	dest->fpu_q = dfq;
1703 	dest->fpu_fsr = (uint64_t)src->fpu_fsr;
1704 	if ((dest->fpu_qcnt = src->fpu_qcnt) > 0)
1705 		dest->fpu_q_entrysize = sizeof (struct fpq);
1706 	else
1707 		dest->fpu_q_entrysize = 0;
1708 	dest->fpu_en = src->fpu_en;
1709 
1710 	if ((src->fpu_qcnt) && (sfq) && (dfq)) {
1711 		for (i = 0; i < src->fpu_qcnt; i++, dfq++, sfq++) {
1712 			dfq->FQu.fpq.fpq_addr =
1713 			    (unsigned int *)(uintptr_t)sfq->FQu.fpq.fpq_addr;
1714 			dfq->FQu.fpq.fpq_instr = sfq->FQu.fpq.fpq_instr;
1715 		}
1716 	}
1717 }
1718 
1719 void
1720 ucontext_32ton(const ucontext32_t *src, ucontext_t *dest,
1721     const struct fq32 *sfq, struct fq *dfq)
1722 {
1723 	int i;
1724 
1725 	bzero(dest, sizeof (*dest));
1726 
1727 	dest->uc_flags = src->uc_flags;
1728 	dest->uc_link = (ucontext_t *)(uintptr_t)src->uc_link;
1729 
1730 	for (i = 0; i < 4; i++) {
1731 		dest->uc_sigmask.__sigbits[i] = src->uc_sigmask.__sigbits[i];
1732 	}
1733 
1734 	dest->uc_stack.ss_sp = (void *)(uintptr_t)src->uc_stack.ss_sp;
1735 	dest->uc_stack.ss_size = (size_t)src->uc_stack.ss_size;
1736 	dest->uc_stack.ss_flags = src->uc_stack.ss_flags;
1737 
1738 	/* REG_CCR is 0, skip over it and handle it after this loop */
1739 	for (i = 1; i < _NGREG32; i++)
1740 		dest->uc_mcontext.gregs[i] =
1741 		    (greg_t)(uint32_t)src->uc_mcontext.gregs[i];
1742 	dest->uc_mcontext.gregs[REG_CCR] =
1743 	    (src->uc_mcontext.gregs[REG_PSR] & PSR_ICC) >> PSR_ICC_SHIFT;
1744 	dest->uc_mcontext.gregs[REG_ASI] = ASI_PNF;
1745 	/*
1746 	 * A valid fpregs is only copied in if (uc.uc_flags & UC_FPU),
1747 	 * otherwise there is no guarantee that anything in fpregs is valid.
1748 	 */
1749 	if (src->uc_flags & UC_FPU) {
1750 		dest->uc_mcontext.gregs[REG_FPRS] =
1751 		    ((src->uc_mcontext.fpregs.fpu_en) ?
1752 		    (FPRS_DU|FPRS_DL|FPRS_FEF) : 0);
1753 	} else {
1754 		dest->uc_mcontext.gregs[REG_FPRS] = 0;
1755 	}
1756 	dest->uc_mcontext.gwins =
1757 	    (gwindows_t *)(uintptr_t)src->uc_mcontext.gwins;
1758 	if (src->uc_flags & UC_FPU) {
1759 		fpuregset_32ton(&src->uc_mcontext.fpregs,
1760 		    &dest->uc_mcontext.fpregs, sfq, dfq);
1761 	}
1762 }
1763 
1764 void
1765 rwindow_nto32(struct rwindow *src, struct rwindow32 *dest)
1766 {
1767 	greg_t *s = (greg_t *)src;
1768 	greg32_t *d = (greg32_t *)dest;
1769 	int i;
1770 
1771 	for (i = 0; i < 16; i++)
1772 		*d++ = (greg32_t)*s++;
1773 }
1774 
1775 void
1776 rwindow_32ton(struct rwindow32 *src, struct rwindow *dest)
1777 {
1778 	greg32_t *s = (greg32_t *)src;
1779 	greg_t *d = (greg_t *)dest;
1780 	int i;
1781 
1782 	for (i = 0; i < 16; i++)
1783 		*d++ = (uint32_t)*s++;
1784 }
1785 
1786 #endif /* _SYSCALL32_IMPL */
1787 
1788 /*
1789  * The panic code invokes panic_saveregs() to record the contents of a
1790  * regs structure into the specified panic_data structure for debuggers.
1791  */
1792 void
1793 panic_saveregs(panic_data_t *pdp, struct regs *rp)
1794 {
1795 	panic_nv_t *pnv = PANICNVGET(pdp);
1796 
1797 	PANICNVADD(pnv, "tstate", rp->r_tstate);
1798 	PANICNVADD(pnv, "g1", rp->r_g1);
1799 	PANICNVADD(pnv, "g2", rp->r_g2);
1800 	PANICNVADD(pnv, "g3", rp->r_g3);
1801 	PANICNVADD(pnv, "g4", rp->r_g4);
1802 	PANICNVADD(pnv, "g5", rp->r_g5);
1803 	PANICNVADD(pnv, "g6", rp->r_g6);
1804 	PANICNVADD(pnv, "g7", rp->r_g7);
1805 	PANICNVADD(pnv, "o0", rp->r_o0);
1806 	PANICNVADD(pnv, "o1", rp->r_o1);
1807 	PANICNVADD(pnv, "o2", rp->r_o2);
1808 	PANICNVADD(pnv, "o3", rp->r_o3);
1809 	PANICNVADD(pnv, "o4", rp->r_o4);
1810 	PANICNVADD(pnv, "o5", rp->r_o5);
1811 	PANICNVADD(pnv, "o6", rp->r_o6);
1812 	PANICNVADD(pnv, "o7", rp->r_o7);
1813 	PANICNVADD(pnv, "pc", (ulong_t)rp->r_pc);
1814 	PANICNVADD(pnv, "npc", (ulong_t)rp->r_npc);
1815 	PANICNVADD(pnv, "y", (uint32_t)rp->r_y);
1816 
1817 	PANICNVSET(pdp, pnv);
1818 }
1819