xref: /titanic_52/usr/src/uts/sparc/v9/os/v9dep.c (revision f63f7506be0210195779706f51c58646e568cc40)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
23 /*	  All Rights Reserved  	*/
24 
25 /*
26  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
27  * Use is subject to license terms.
28  */
29 
30 #pragma ident	"%Z%%M%	%I%	%E% SMI"
31 
32 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/vmparam.h>
35 #include <sys/systm.h>
36 #include <sys/stack.h>
37 #include <sys/frame.h>
38 #include <sys/proc.h>
39 #include <sys/ucontext.h>
40 #include <sys/cpuvar.h>
41 #include <sys/asm_linkage.h>
42 #include <sys/kmem.h>
43 #include <sys/errno.h>
44 #include <sys/bootconf.h>
45 #include <sys/archsystm.h>
46 #include <sys/fpu/fpusystm.h>
47 #include <sys/debug.h>
48 #include <sys/privregs.h>
49 #include <sys/machpcb.h>
50 #include <sys/psr_compat.h>
51 #include <sys/cmn_err.h>
52 #include <sys/asi.h>
53 #include <sys/copyops.h>
54 #include <sys/model.h>
55 #include <sys/panic.h>
56 #include <sys/exec.h>
57 
58 /*
59  * modify the lower 32bits of a uint64_t
60  */
61 #define	SET_LOWER_32(all, lower)	\
62 	(((uint64_t)(all) & 0xffffffff00000000) | (uint32_t)(lower))
63 
64 #define	MEMCPY_FPU_EN		2	/* fprs on and fpu_en == 0 */
65 
66 static uint_t mkpsr(uint64_t tstate, uint32_t fprs);
67 
68 #ifdef _SYSCALL32_IMPL
69 static void fpuregset_32ton(const fpregset32_t *src, fpregset_t *dest,
70     const struct fq32 *sfq, struct fq *dfq);
71 #endif /* _SYSCALL32_IMPL */
72 
73 /*
74  * Set floating-point registers.
75  * NOTE:  'lwp' might not correspond to 'curthread' since this is
76  * called from code in /proc to set the registers of another lwp.
77  */
78 void
79 setfpregs(klwp_t *lwp, fpregset_t *fp)
80 {
81 	struct machpcb *mpcb;
82 	kfpu_t *pfp;
83 	uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL);
84 	model_t model = lwp_getdatamodel(lwp);
85 
86 	mpcb = lwptompcb(lwp);
87 	pfp = lwptofpu(lwp);
88 
89 	/*
90 	 * This is always true for both "real" fp programs and memcpy fp
91 	 * programs, because we force fpu_en to MEMCPY_FPU_EN in getfpregs,
92 	 * for the memcpy and threads cases where (fpu_en == 0) &&
93 	 * (fpu_fprs & FPRS_FEF), if setfpregs is called after getfpregs.
94 	 */
95 	if (fp->fpu_en) {
96 		kpreempt_disable();
97 
98 		if (!(pfp->fpu_en) && (!(pfp->fpu_fprs & FPRS_FEF)) &&
99 		    fpu_exists) {
100 			/*
101 			 * He's not currently using the FPU but wants to in his
102 			 * new context - arrange for this on return to userland.
103 			 */
104 			pfp->fpu_fprs = (uint32_t)fprs;
105 		}
106 		/*
107 		 * Get setfpregs to restore fpu_en to zero
108 		 * for the memcpy/threads case (where pfp->fpu_en == 0 &&
109 		 * (pfp->fp_fprs & FPRS_FEF) == FPRS_FEF).
110 		 */
111 		if (fp->fpu_en == MEMCPY_FPU_EN)
112 			fp->fpu_en = 0;
113 
114 		/*
115 		 * Load up a user's floating point context.
116 		 */
117 		if (fp->fpu_qcnt > MAXFPQ) 	/* plug security holes */
118 			fp->fpu_qcnt = MAXFPQ;
119 		fp->fpu_q_entrysize = sizeof (struct fq);
120 
121 		/*
122 		 * For v9 kernel, copy all of the fp regs.
123 		 * For v8 kernel, copy v8 fp regs (lower half of v9 fp regs).
124 		 * Restore entire fsr for v9, only lower half for v8.
125 		 */
126 		(void) kcopy(fp, pfp, sizeof (fp->fpu_fr));
127 		if (model == DATAMODEL_LP64)
128 			pfp->fpu_fsr = fp->fpu_fsr;
129 		else
130 			pfp->fpu_fsr = SET_LOWER_32(pfp->fpu_fsr, fp->fpu_fsr);
131 		pfp->fpu_qcnt = fp->fpu_qcnt;
132 		pfp->fpu_q_entrysize = fp->fpu_q_entrysize;
133 		pfp->fpu_en = fp->fpu_en;
134 		pfp->fpu_q = mpcb->mpcb_fpu_q;
135 		if (fp->fpu_qcnt)
136 			(void) kcopy(fp->fpu_q, pfp->fpu_q,
137 			    fp->fpu_qcnt * fp->fpu_q_entrysize);
138 		/* FSR ignores these bits on load, so they can not be set */
139 		pfp->fpu_fsr &= ~(FSR_QNE|FSR_FTT);
140 
141 		/*
142 		 * If not the current process then resume() will handle it.
143 		 */
144 		if (lwp != ttolwp(curthread)) {
145 			/* force resume to reload fp regs */
146 			pfp->fpu_fprs |= FPRS_FEF;
147 			kpreempt_enable();
148 			return;
149 		}
150 
151 		/*
152 		 * Load up FPU with new floating point context.
153 		 */
154 		if (fpu_exists) {
155 			pfp->fpu_fprs = _fp_read_fprs();
156 			if ((pfp->fpu_fprs & FPRS_FEF) != FPRS_FEF) {
157 				_fp_write_fprs(fprs);
158 				pfp->fpu_fprs = (uint32_t)fprs;
159 #ifdef DEBUG
160 				if (fpdispr)
161 					cmn_err(CE_NOTE,
162 					    "setfpregs with fp disabled!\n");
163 #endif
164 			}
165 			/*
166 			 * Load all fp regs for v9 user programs, but only
167 			 * load the lower half for v8[plus] programs.
168 			 */
169 			if (model == DATAMODEL_LP64)
170 				fp_restore(pfp);
171 			else
172 				fp_v8_load(pfp);
173 		}
174 
175 		kpreempt_enable();
176 	} else {
177 		if ((pfp->fpu_en) ||	/* normal fp case */
178 		    (pfp->fpu_fprs & FPRS_FEF)) { /* memcpy/threads case */
179 			/*
180 			 * Currently the lwp has floating point enabled.
181 			 * Turn off FPRS_FEF in user's fprs, saved and
182 			 * real copies thereof.
183 			 */
184 			pfp->fpu_en = 0;
185 			if (fpu_exists) {
186 				fprs = 0;
187 				if (lwp == ttolwp(curthread))
188 					_fp_write_fprs(fprs);
189 				pfp->fpu_fprs = (uint32_t)fprs;
190 			}
191 		}
192 	}
193 }
194 
195 #ifdef	_SYSCALL32_IMPL
196 void
197 setfpregs32(klwp_t *lwp, fpregset32_t *fp)
198 {
199 	fpregset_t fpregs;
200 
201 	fpuregset_32ton(fp, &fpregs, NULL, NULL);
202 	setfpregs(lwp, &fpregs);
203 }
204 #endif	/* _SYSCALL32_IMPL */
205 
206 /*
207  * NOTE:  'lwp' might not correspond to 'curthread' since this is
208  * called from code in /proc to set the registers of another lwp.
209  */
210 void
211 run_fpq(klwp_t *lwp, fpregset_t *fp)
212 {
213 	/*
214 	 * If the context being loaded up includes a floating queue,
215 	 * we need to simulate those instructions (since we can't reload
216 	 * the fpu) and pass the process any appropriate signals
217 	 */
218 
219 	if (lwp == ttolwp(curthread)) {
220 		if (fpu_exists) {
221 			if (fp->fpu_qcnt)
222 				fp_runq(lwp->lwp_regs);
223 		}
224 	}
225 }
226 
227 /*
228  * Get floating-point registers.
229  * NOTE:  'lwp' might not correspond to 'curthread' since this is
230  * called from code in /proc to set the registers of another lwp.
231  */
232 void
233 getfpregs(klwp_t *lwp, fpregset_t *fp)
234 {
235 	kfpu_t *pfp;
236 	model_t model = lwp_getdatamodel(lwp);
237 
238 	pfp = lwptofpu(lwp);
239 	kpreempt_disable();
240 	if (fpu_exists && ttolwp(curthread) == lwp)
241 		pfp->fpu_fprs = _fp_read_fprs();
242 
243 	/*
244 	 * First check the fpu_en case, for normal fp programs.
245 	 * Next check the fprs case, for fp use by memcpy/threads.
246 	 */
247 	if (((fp->fpu_en = pfp->fpu_en) != 0) ||
248 	    (pfp->fpu_fprs & FPRS_FEF)) {
249 		/*
250 		 * Force setfpregs to restore the fp context in
251 		 * setfpregs for the memcpy and threads cases (where
252 		 * pfp->fpu_en == 0 && (pfp->fp_fprs & FPRS_FEF) == FPRS_FEF).
253 		 */
254 		if (pfp->fpu_en == 0)
255 			fp->fpu_en = MEMCPY_FPU_EN;
256 		/*
257 		 * If we have an fpu and the current thread owns the fp
258 		 * context, flush fp * registers into the pcb. Save all
259 		 * the fp regs for v9, xregs_getfpregs saves the upper half
260 		 * for v8plus. Save entire fsr for v9, only lower half for v8.
261 		 */
262 		if (fpu_exists && ttolwp(curthread) == lwp) {
263 			if ((pfp->fpu_fprs & FPRS_FEF) != FPRS_FEF) {
264 				uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL);
265 
266 				_fp_write_fprs(fprs);
267 				pfp->fpu_fprs = fprs;
268 #ifdef DEBUG
269 				if (fpdispr)
270 					cmn_err(CE_NOTE,
271 					    "getfpregs with fp disabled!\n");
272 #endif
273 			}
274 			if (model == DATAMODEL_LP64)
275 				fp_fksave(pfp);
276 			else
277 				fp_v8_fksave(pfp);
278 		}
279 		(void) kcopy(pfp, fp, sizeof (fp->fpu_fr));
280 		fp->fpu_q = pfp->fpu_q;
281 		if (model == DATAMODEL_LP64)
282 			fp->fpu_fsr = pfp->fpu_fsr;
283 		else
284 			fp->fpu_fsr = (uint32_t)pfp->fpu_fsr;
285 		fp->fpu_qcnt = pfp->fpu_qcnt;
286 		fp->fpu_q_entrysize = pfp->fpu_q_entrysize;
287 	} else {
288 		int i;
289 		for (i = 0; i < 32; i++)		/* NaN */
290 			((uint32_t *)fp->fpu_fr.fpu_regs)[i] = (uint32_t)-1;
291 		if (model == DATAMODEL_LP64) {
292 			for (i = 16; i < 32; i++)	/* NaN */
293 				((uint64_t *)fp->fpu_fr.fpu_dregs)[i] =
294 				    (uint64_t)-1;
295 		}
296 		fp->fpu_fsr = 0;
297 		fp->fpu_qcnt = 0;
298 	}
299 	kpreempt_enable();
300 }
301 
302 #ifdef	_SYSCALL32_IMPL
303 void
304 getfpregs32(klwp_t *lwp, fpregset32_t *fp)
305 {
306 	fpregset_t fpregs;
307 
308 	getfpregs(lwp, &fpregs);
309 	fpuregset_nto32(&fpregs, fp, NULL);
310 }
311 #endif	/* _SYSCALL32_IMPL */
312 
313 /*
314  * Set general registers.
315  * NOTE:  'lwp' might not correspond to 'curthread' since this is
316  * called from code in /proc to set the registers of another lwp.
317  */
318 
319 /* 64-bit gregset_t */
320 void
321 setgregs(klwp_t *lwp, gregset_t grp)
322 {
323 	struct regs *rp = lwptoregs(lwp);
324 	kfpu_t *fp = lwptofpu(lwp);
325 	uint64_t tbits;
326 
327 	int current = (lwp == curthread->t_lwp);
328 
329 	if (current)
330 		(void) save_syscall_args();	/* copy the args first */
331 
332 	tbits = (((grp[REG_CCR] & TSTATE_CCR_MASK) << TSTATE_CCR_SHIFT) |
333 		((grp[REG_ASI] & TSTATE_ASI_MASK) << TSTATE_ASI_SHIFT));
334 	rp->r_tstate &= ~(((uint64_t)TSTATE_CCR_MASK << TSTATE_CCR_SHIFT) |
335 		((uint64_t)TSTATE_ASI_MASK << TSTATE_ASI_SHIFT));
336 	rp->r_tstate |= tbits;
337 	kpreempt_disable();
338 	fp->fpu_fprs = (uint32_t)grp[REG_FPRS];
339 	if (fpu_exists && (current) && (fp->fpu_fprs & FPRS_FEF))
340 		_fp_write_fprs(fp->fpu_fprs);
341 	kpreempt_enable();
342 
343 	/*
344 	 * pc and npc must be 4-byte aligned on sparc.
345 	 * We silently make it so to avoid a watchdog reset.
346 	 */
347 	rp->r_pc = grp[REG_PC] & ~03L;
348 	rp->r_npc = grp[REG_nPC] & ~03L;
349 	rp->r_y = grp[REG_Y];
350 
351 	rp->r_g1 = grp[REG_G1];
352 	rp->r_g2 = grp[REG_G2];
353 	rp->r_g3 = grp[REG_G3];
354 	rp->r_g4 = grp[REG_G4];
355 	rp->r_g5 = grp[REG_G5];
356 	rp->r_g6 = grp[REG_G6];
357 	rp->r_g7 = grp[REG_G7];
358 
359 	rp->r_o0 = grp[REG_O0];
360 	rp->r_o1 = grp[REG_O1];
361 	rp->r_o2 = grp[REG_O2];
362 	rp->r_o3 = grp[REG_O3];
363 	rp->r_o4 = grp[REG_O4];
364 	rp->r_o5 = grp[REG_O5];
365 	rp->r_o6 = grp[REG_O6];
366 	rp->r_o7 = grp[REG_O7];
367 
368 	if (current) {
369 		/*
370 		 * This was called from a system call, but we
371 		 * do not want to return via the shared window;
372 		 * restoring the CPU context changes everything.
373 		 */
374 		lwp->lwp_eosys = JUSTRETURN;
375 		curthread->t_post_sys = 1;
376 	}
377 }
378 
379 /*
380  * Return the general registers.
381  * NOTE:  'lwp' might not correspond to 'curthread' since this is
382  * called from code in /proc to get the registers of another lwp.
383  */
384 void
385 getgregs(klwp_t *lwp, gregset_t grp)
386 {
387 	struct regs *rp = lwptoregs(lwp);
388 	uint32_t fprs;
389 
390 	kpreempt_disable();
391 	if (fpu_exists && ttolwp(curthread) == lwp) {
392 		fprs = _fp_read_fprs();
393 	} else {
394 		kfpu_t *fp = lwptofpu(lwp);
395 		fprs = fp->fpu_fprs;
396 	}
397 	kpreempt_enable();
398 	grp[REG_CCR] = (rp->r_tstate >> TSTATE_CCR_SHIFT) & TSTATE_CCR_MASK;
399 	grp[REG_PC] = rp->r_pc;
400 	grp[REG_nPC] = rp->r_npc;
401 	grp[REG_Y] = (uint32_t)rp->r_y;
402 	grp[REG_G1] = rp->r_g1;
403 	grp[REG_G2] = rp->r_g2;
404 	grp[REG_G3] = rp->r_g3;
405 	grp[REG_G4] = rp->r_g4;
406 	grp[REG_G5] = rp->r_g5;
407 	grp[REG_G6] = rp->r_g6;
408 	grp[REG_G7] = rp->r_g7;
409 	grp[REG_O0] = rp->r_o0;
410 	grp[REG_O1] = rp->r_o1;
411 	grp[REG_O2] = rp->r_o2;
412 	grp[REG_O3] = rp->r_o3;
413 	grp[REG_O4] = rp->r_o4;
414 	grp[REG_O5] = rp->r_o5;
415 	grp[REG_O6] = rp->r_o6;
416 	grp[REG_O7] = rp->r_o7;
417 	grp[REG_ASI] = (rp->r_tstate >> TSTATE_ASI_SHIFT) & TSTATE_ASI_MASK;
418 	grp[REG_FPRS] = fprs;
419 }
420 
421 void
422 getgregs32(klwp_t *lwp, gregset32_t grp)
423 {
424 	struct regs *rp = lwptoregs(lwp);
425 	uint32_t fprs;
426 
427 	kpreempt_disable();
428 	if (fpu_exists && ttolwp(curthread) == lwp) {
429 		fprs = _fp_read_fprs();
430 	} else {
431 		kfpu_t *fp = lwptofpu(lwp);
432 		fprs = fp->fpu_fprs;
433 	}
434 	kpreempt_enable();
435 	grp[REG_PSR] = mkpsr(rp->r_tstate, fprs);
436 	grp[REG_PC] = rp->r_pc;
437 	grp[REG_nPC] = rp->r_npc;
438 	grp[REG_Y] = rp->r_y;
439 	grp[REG_G1] = rp->r_g1;
440 	grp[REG_G2] = rp->r_g2;
441 	grp[REG_G3] = rp->r_g3;
442 	grp[REG_G4] = rp->r_g4;
443 	grp[REG_G5] = rp->r_g5;
444 	grp[REG_G6] = rp->r_g6;
445 	grp[REG_G7] = rp->r_g7;
446 	grp[REG_O0] = rp->r_o0;
447 	grp[REG_O1] = rp->r_o1;
448 	grp[REG_O2] = rp->r_o2;
449 	grp[REG_O3] = rp->r_o3;
450 	grp[REG_O4] = rp->r_o4;
451 	grp[REG_O5] = rp->r_o5;
452 	grp[REG_O6] = rp->r_o6;
453 	grp[REG_O7] = rp->r_o7;
454 }
455 
456 /*
457  * Return the user-level PC.
458  * If in a system call, return the address of the syscall trap.
459  */
460 greg_t
461 getuserpc()
462 {
463 	return (lwptoregs(ttolwp(curthread))->r_pc);
464 }
465 
466 /*
467  * Set register windows.
468  */
469 void
470 setgwins(klwp_t *lwp, gwindows_t *gwins)
471 {
472 	struct machpcb *mpcb = lwptompcb(lwp);
473 	int wbcnt = gwins->wbcnt;
474 	caddr_t sp;
475 	int i;
476 	struct rwindow32 *rwp;
477 	int wbuf_rwindow_size;
478 	int is64;
479 
480 	if (mpcb->mpcb_wstate == WSTATE_USER32) {
481 		wbuf_rwindow_size = WINDOWSIZE32;
482 		is64 = 0;
483 	} else {
484 		wbuf_rwindow_size = WINDOWSIZE64;
485 		is64 = 1;
486 	}
487 	ASSERT(wbcnt >= 0 && wbcnt <= SPARC_MAXREGWINDOW);
488 	mpcb->mpcb_wbcnt = 0;
489 	for (i = 0; i < wbcnt; i++) {
490 		sp = (caddr_t)gwins->spbuf[i];
491 		mpcb->mpcb_spbuf[i] = sp;
492 		rwp = (struct rwindow32 *)
493 			(mpcb->mpcb_wbuf + (i * wbuf_rwindow_size));
494 		if (is64 && IS_V9STACK(sp))
495 			bcopy(&gwins->wbuf[i], rwp, sizeof (struct rwindow));
496 		else
497 			rwindow_nto32(&gwins->wbuf[i], rwp);
498 		mpcb->mpcb_wbcnt++;
499 	}
500 }
501 
502 void
503 setgwins32(klwp_t *lwp, gwindows32_t *gwins)
504 {
505 	struct machpcb *mpcb = lwptompcb(lwp);
506 	int wbcnt = gwins->wbcnt;
507 	caddr_t sp;
508 	int i;
509 
510 	struct rwindow *rwp;
511 	int wbuf_rwindow_size;
512 	int is64;
513 
514 	if (mpcb->mpcb_wstate == WSTATE_USER32) {
515 		wbuf_rwindow_size = WINDOWSIZE32;
516 		is64 = 0;
517 	} else {
518 		wbuf_rwindow_size = WINDOWSIZE64;
519 		is64 = 1;
520 	}
521 
522 	ASSERT(wbcnt >= 0 && wbcnt <= SPARC_MAXREGWINDOW);
523 	mpcb->mpcb_wbcnt = 0;
524 	for (i = 0; i < wbcnt; i++) {
525 		sp = (caddr_t)(uintptr_t)gwins->spbuf[i];
526 		mpcb->mpcb_spbuf[i] = sp;
527 		rwp = (struct rwindow *)
528 			(mpcb->mpcb_wbuf + (i * wbuf_rwindow_size));
529 		if (is64 && IS_V9STACK(sp))
530 			rwindow_32ton(&gwins->wbuf[i], rwp);
531 		else
532 			bcopy(&gwins->wbuf[i], rwp, sizeof (struct rwindow32));
533 		mpcb->mpcb_wbcnt++;
534 	}
535 }
536 
537 /*
538  * Get register windows.
539  * NOTE:  'lwp' might not correspond to 'curthread' since this is
540  * called from code in /proc to set the registers of another lwp.
541  */
542 void
543 getgwins(klwp_t *lwp, gwindows_t *gwp)
544 {
545 	struct machpcb *mpcb = lwptompcb(lwp);
546 	int wbcnt = mpcb->mpcb_wbcnt;
547 	caddr_t sp;
548 	int i;
549 	struct rwindow32 *rwp;
550 	int wbuf_rwindow_size;
551 	int is64;
552 
553 	if (mpcb->mpcb_wstate == WSTATE_USER32) {
554 		wbuf_rwindow_size = WINDOWSIZE32;
555 		is64 = 0;
556 	} else {
557 		wbuf_rwindow_size = WINDOWSIZE64;
558 		is64 = 1;
559 	}
560 	ASSERT(wbcnt >= 0 && wbcnt <= SPARC_MAXREGWINDOW);
561 	gwp->wbcnt = wbcnt;
562 	for (i = 0; i < wbcnt; i++) {
563 		sp = mpcb->mpcb_spbuf[i];
564 		gwp->spbuf[i] = (greg_t *)sp;
565 		rwp = (struct rwindow32 *)
566 			(mpcb->mpcb_wbuf + (i * wbuf_rwindow_size));
567 		if (is64 && IS_V9STACK(sp))
568 			bcopy(rwp, &gwp->wbuf[i], sizeof (struct rwindow));
569 		else
570 			rwindow_32ton(rwp, &gwp->wbuf[i]);
571 	}
572 }
573 
574 void
575 getgwins32(klwp_t *lwp, gwindows32_t *gwp)
576 {
577 	struct machpcb *mpcb = lwptompcb(lwp);
578 	int wbcnt = mpcb->mpcb_wbcnt;
579 	int i;
580 	struct rwindow *rwp;
581 	int wbuf_rwindow_size;
582 	caddr_t sp;
583 	int is64;
584 
585 	if (mpcb->mpcb_wstate == WSTATE_USER32) {
586 		wbuf_rwindow_size = WINDOWSIZE32;
587 		is64 = 0;
588 	} else {
589 		wbuf_rwindow_size = WINDOWSIZE64;
590 		is64 = 1;
591 	}
592 
593 	ASSERT(wbcnt >= 0 && wbcnt <= SPARC_MAXREGWINDOW);
594 	gwp->wbcnt = wbcnt;
595 	for (i = 0; i < wbcnt; i++) {
596 		sp = mpcb->mpcb_spbuf[i];
597 		rwp = (struct rwindow *)
598 			(mpcb->mpcb_wbuf + (i * wbuf_rwindow_size));
599 		gwp->spbuf[i] = (caddr32_t)(uintptr_t)sp;
600 		if (is64 && IS_V9STACK(sp))
601 			rwindow_nto32(rwp, &gwp->wbuf[i]);
602 		else
603 			bcopy(rwp, &gwp->wbuf[i], sizeof (struct rwindow32));
604 	}
605 }
606 
607 /*
608  * For things that depend on register state being on the stack,
609  * copy any register windows that get saved into the window buffer
610  * (in the pcb) onto the stack.  This normally gets fixed up
611  * before returning to a user program.  Callers of this routine
612  * require this to happen immediately because a later kernel
613  * operation depends on window state (like instruction simulation).
614  */
615 int
616 flush_user_windows_to_stack(caddr_t *psp)
617 {
618 	int j, k;
619 	caddr_t sp;
620 	struct machpcb *mpcb = lwptompcb(ttolwp(curthread));
621 	int err;
622 	int error = 0;
623 	int wbuf_rwindow_size;
624 	int rwindow_size;
625 	int stack_align;
626 	int watched;
627 
628 	flush_user_windows();
629 
630 	if (mpcb->mpcb_wstate != WSTATE_USER32)
631 		wbuf_rwindow_size = WINDOWSIZE64;
632 	else
633 		wbuf_rwindow_size = WINDOWSIZE32;
634 
635 	j = mpcb->mpcb_wbcnt;
636 	while (j > 0) {
637 		sp = mpcb->mpcb_spbuf[--j];
638 
639 		if ((mpcb->mpcb_wstate != WSTATE_USER32) &&
640 		    IS_V9STACK(sp)) {
641 			sp += V9BIAS64;
642 			stack_align = STACK_ALIGN64;
643 			rwindow_size = WINDOWSIZE64;
644 		} else {
645 			/*
646 			 * Reduce sp to a 32 bit value.  This was originally
647 			 * done by casting down to uint32_t and back up to
648 			 * caddr_t, but one compiler didn't like that, so the
649 			 * uintptr_t casts were added.  The temporary 32 bit
650 			 * variable was introduced to avoid depending on all
651 			 * compilers to generate the desired assembly code for a
652 			 * quadruple cast in a single expression.
653 			 */
654 			caddr32_t sp32 = (uint32_t)(uintptr_t)sp;
655 			sp = (caddr_t)(uintptr_t)sp32;
656 
657 			stack_align = STACK_ALIGN32;
658 			rwindow_size = WINDOWSIZE32;
659 		}
660 		if (((uintptr_t)sp & (stack_align - 1)) != 0)
661 			continue;
662 
663 		watched = watch_disable_addr(sp, rwindow_size, S_WRITE);
664 		err = xcopyout(mpcb->mpcb_wbuf +
665 		    (j * wbuf_rwindow_size), sp, rwindow_size);
666 		if (err != 0) {
667 			if (psp != NULL) {
668 				/*
669 				 * Determine the offending address.
670 				 * It may not be the stack pointer itself.
671 				 */
672 				uint_t *kaddr = (uint_t *)(mpcb->mpcb_wbuf +
673 				    (j * wbuf_rwindow_size));
674 				uint_t *uaddr = (uint_t *)sp;
675 
676 				for (k = 0;
677 				    k < rwindow_size / sizeof (int);
678 				    k++, kaddr++, uaddr++) {
679 					if (suword32(uaddr, *kaddr))
680 						break;
681 				}
682 
683 				/* can't happen? */
684 				if (k == rwindow_size / sizeof (int))
685 					uaddr = (uint_t *)sp;
686 
687 				*psp = (caddr_t)uaddr;
688 			}
689 			error = err;
690 		} else {
691 			/*
692 			 * stack was aligned and copyout succeeded;
693 			 * move other windows down.
694 			 */
695 			mpcb->mpcb_wbcnt--;
696 			for (k = j; k < mpcb->mpcb_wbcnt; k++) {
697 				mpcb->mpcb_spbuf[k] = mpcb->mpcb_spbuf[k+1];
698 				bcopy(
699 				    mpcb->mpcb_wbuf +
700 					((k+1) * wbuf_rwindow_size),
701 				    mpcb->mpcb_wbuf +
702 					(k * wbuf_rwindow_size),
703 				    wbuf_rwindow_size);
704 			}
705 		}
706 		if (watched)
707 			watch_enable_addr(sp, rwindow_size, S_WRITE);
708 	} /* while there are windows in the wbuf */
709 	return (error);
710 }
711 
712 static int
713 copy_return_window32(int dotwo)
714 {
715 	klwp_t *lwp = ttolwp(curthread);
716 	struct machpcb *mpcb = lwptompcb(lwp);
717 	struct rwindow32 rwindow32;
718 	caddr_t sp1;
719 	caddr_t sp2;
720 
721 	(void) flush_user_windows_to_stack(NULL);
722 	if (mpcb->mpcb_rsp[0] == NULL) {
723 		/*
724 		 * Reduce r_sp to a 32 bit value before storing it in sp1.  This
725 		 * was originally done by casting down to uint32_t and back up
726 		 * to caddr_t, but that generated complaints under one compiler.
727 		 * The uintptr_t cast was added to address that, and the
728 		 * temporary 32 bit variable was introduced to avoid depending
729 		 * on all compilers to generate the desired assembly code for a
730 		 * triple cast in a single expression.
731 		 */
732 		caddr32_t sp1_32 = (uint32_t)lwptoregs(lwp)->r_sp;
733 		sp1 = (caddr_t)(uintptr_t)sp1_32;
734 
735 		if ((copyin_nowatch(sp1, &rwindow32,
736 		    sizeof (struct rwindow32))) == 0)
737 			mpcb->mpcb_rsp[0] = sp1;
738 		rwindow_32ton(&rwindow32, &mpcb->mpcb_rwin[0]);
739 	}
740 	mpcb->mpcb_rsp[1] = NULL;
741 	if (dotwo && mpcb->mpcb_rsp[0] != NULL &&
742 	    (sp2 = (caddr_t)mpcb->mpcb_rwin[0].rw_fp) != NULL) {
743 		if ((copyin_nowatch(sp2, &rwindow32,
744 		    sizeof (struct rwindow32)) == 0))
745 			mpcb->mpcb_rsp[1] = sp2;
746 		rwindow_32ton(&rwindow32, &mpcb->mpcb_rwin[1]);
747 	}
748 	return (mpcb->mpcb_rsp[0] != NULL);
749 }
750 
751 int
752 copy_return_window(int dotwo)
753 {
754 	proc_t *p = ttoproc(curthread);
755 	klwp_t *lwp;
756 	struct machpcb *mpcb;
757 	caddr_t sp1;
758 	caddr_t sp2;
759 
760 	if (p->p_model == DATAMODEL_ILP32)
761 		return (copy_return_window32(dotwo));
762 
763 	lwp = ttolwp(curthread);
764 	mpcb = lwptompcb(lwp);
765 	(void) flush_user_windows_to_stack(NULL);
766 	if (mpcb->mpcb_rsp[0] == NULL) {
767 		sp1 = (caddr_t)lwptoregs(lwp)->r_sp + STACK_BIAS;
768 		if ((copyin_nowatch(sp1, &mpcb->mpcb_rwin[0],
769 		    sizeof (struct rwindow)) == 0))
770 			mpcb->mpcb_rsp[0] = sp1 - STACK_BIAS;
771 	}
772 	mpcb->mpcb_rsp[1] = NULL;
773 	if (dotwo && mpcb->mpcb_rsp[0] != NULL &&
774 	    (sp2 = (caddr_t)mpcb->mpcb_rwin[0].rw_fp) != NULL) {
775 		sp2 += STACK_BIAS;
776 		if ((copyin_nowatch(sp2, &mpcb->mpcb_rwin[1],
777 		    sizeof (struct rwindow)) == 0))
778 			mpcb->mpcb_rsp[1] = sp2 - STACK_BIAS;
779 	}
780 	return (mpcb->mpcb_rsp[0] != NULL);
781 }
782 
783 /*
784  * Clear registers on exec(2).
785  */
786 void
787 setregs(uarg_t *args)
788 {
789 	struct regs *rp;
790 	klwp_t *lwp = ttolwp(curthread);
791 	kfpu_t *fpp = lwptofpu(lwp);
792 	struct machpcb *mpcb = lwptompcb(lwp);
793 	proc_t *p = ttoproc(curthread);
794 
795 	/*
796 	 * Initialize user registers.
797 	 */
798 	(void) save_syscall_args();	/* copy args from registers first */
799 	rp = lwptoregs(lwp);
800 	rp->r_g1 = rp->r_g2 = rp->r_g3 = rp->r_g4 = rp->r_g5 =
801 	    rp->r_g6 = rp->r_o0 = rp->r_o1 = rp->r_o2 =
802 	    rp->r_o3 = rp->r_o4 = rp->r_o5 = rp->r_o7 = 0;
803 	if (p->p_model == DATAMODEL_ILP32)
804 		rp->r_tstate = TSTATE_USER32;
805 	else
806 		rp->r_tstate = TSTATE_USER64;
807 	if (!fpu_exists)
808 		rp->r_tstate &= ~TSTATE_PEF;
809 	rp->r_g7 = args->thrptr;
810 	rp->r_pc = args->entry;
811 	rp->r_npc = args->entry + 4;
812 	rp->r_y = 0;
813 	curthread->t_post_sys = 1;
814 	lwp->lwp_eosys = JUSTRETURN;
815 	lwp->lwp_pcb.pcb_trap0addr = NULL;	/* no trap 0 handler */
816 	/*
817 	 * Clear the fixalignment flag
818 	 */
819 	p->p_fixalignment = 0;
820 
821 	/*
822 	 * Throw out old user windows, init window buf.
823 	 */
824 	trash_user_windows();
825 
826 	if (p->p_model == DATAMODEL_LP64 &&
827 	    mpcb->mpcb_wstate != WSTATE_USER64) {
828 		ASSERT(mpcb->mpcb_wbcnt == 0);
829 		kmem_cache_free(wbuf32_cache, mpcb->mpcb_wbuf);
830 		mpcb->mpcb_wbuf = kmem_cache_alloc(wbuf64_cache, KM_SLEEP);
831 		ASSERT(((uintptr_t)mpcb->mpcb_wbuf & 7) == 0);
832 		mpcb->mpcb_wstate = WSTATE_USER64;
833 	} else if (p->p_model == DATAMODEL_ILP32 &&
834 	    mpcb->mpcb_wstate != WSTATE_USER32) {
835 		ASSERT(mpcb->mpcb_wbcnt == 0);
836 		kmem_cache_free(wbuf64_cache, mpcb->mpcb_wbuf);
837 		mpcb->mpcb_wbuf = kmem_cache_alloc(wbuf32_cache, KM_SLEEP);
838 		mpcb->mpcb_wstate = WSTATE_USER32;
839 	}
840 	mpcb->mpcb_pa = va_to_pa(mpcb);
841 	mpcb->mpcb_wbuf_pa = va_to_pa(mpcb->mpcb_wbuf);
842 
843 	/*
844 	 * Here we initialize minimal fpu state.
845 	 * The rest is done at the first floating
846 	 * point instruction that a process executes
847 	 * or by the lib_psr memcpy routines.
848 	 */
849 	if (fpu_exists) {
850 		extern void _fp_write_fprs(unsigned);
851 		_fp_write_fprs(0);
852 	}
853 	fpp->fpu_en = 0;
854 	fpp->fpu_fprs = 0;
855 }
856 
857 void
858 lwp_swapin(kthread_t *tp)
859 {
860 	struct machpcb *mpcb = lwptompcb(ttolwp(tp));
861 
862 	mpcb->mpcb_pa = va_to_pa(mpcb);
863 	mpcb->mpcb_wbuf_pa = va_to_pa(mpcb->mpcb_wbuf);
864 }
865 
866 /*
867  * Construct the execution environment for the user's signal
868  * handler and arrange for control to be given to it on return
869  * to userland.  The library code now calls setcontext() to
870  * clean up after the signal handler, so sigret() is no longer
871  * needed.
872  */
873 int
874 sendsig(int sig, k_siginfo_t *sip, void (*hdlr)())
875 {
876 	/*
877 	 * 'volatile' is needed to ensure that values are
878 	 * correct on the error return from on_fault().
879 	 */
880 	volatile int minstacksz; /* min stack required to catch signal */
881 	int newstack = 0;	/* if true, switching to altstack */
882 	label_t ljb;
883 	caddr_t sp;
884 	struct regs *volatile rp;
885 	klwp_t *lwp = ttolwp(curthread);
886 	proc_t *volatile p = ttoproc(curthread);
887 	int fpq_size = 0;
888 	struct sigframe {
889 		struct frame frwin;
890 		ucontext_t uc;
891 	};
892 	siginfo_t *sip_addr;
893 	struct sigframe *volatile fp;
894 	ucontext_t *volatile tuc = NULL;
895 	char *volatile xregs = NULL;
896 	volatile size_t xregs_size = 0;
897 	gwindows_t *volatile gwp = NULL;
898 	volatile int gwin_size = 0;
899 	kfpu_t *fpp;
900 	struct machpcb *mpcb;
901 	volatile int watched = 0;
902 	volatile int watched2 = 0;
903 	caddr_t tos;
904 
905 	/*
906 	 * Make sure the current last user window has been flushed to
907 	 * the stack save area before we change the sp.
908 	 * Restore register window if a debugger modified it.
909 	 */
910 	(void) flush_user_windows_to_stack(NULL);
911 	if (lwp->lwp_pcb.pcb_xregstat != XREGNONE)
912 		xregrestore(lwp, 0);
913 
914 	mpcb = lwptompcb(lwp);
915 	rp = lwptoregs(lwp);
916 
917 	/*
918 	 * Clear the watchpoint return stack pointers.
919 	 */
920 	mpcb->mpcb_rsp[0] = NULL;
921 	mpcb->mpcb_rsp[1] = NULL;
922 
923 	minstacksz = sizeof (struct sigframe);
924 
925 	/*
926 	 * We know that sizeof (siginfo_t) is stack-aligned:
927 	 * 128 bytes for ILP32, 256 bytes for LP64.
928 	 */
929 	if (sip != NULL)
930 		minstacksz += sizeof (siginfo_t);
931 
932 	/*
933 	 * These two fields are pointed to by ABI structures and may
934 	 * be of arbitrary length. Size them now so we know how big
935 	 * the signal frame has to be.
936 	 */
937 	fpp = lwptofpu(lwp);
938 	fpp->fpu_fprs = _fp_read_fprs();
939 	if ((fpp->fpu_en) || (fpp->fpu_fprs & FPRS_FEF)) {
940 		fpq_size = fpp->fpu_q_entrysize * fpp->fpu_qcnt;
941 		minstacksz += SA(fpq_size);
942 	}
943 
944 	mpcb = lwptompcb(lwp);
945 	if (mpcb->mpcb_wbcnt != 0) {
946 		gwin_size = (mpcb->mpcb_wbcnt * sizeof (struct rwindow)) +
947 		    (SPARC_MAXREGWINDOW * sizeof (caddr_t)) + sizeof (long);
948 		minstacksz += SA(gwin_size);
949 	}
950 
951 	/*
952 	 * Extra registers, if support by this platform, may be of arbitrary
953 	 * length. Size them now so we know how big the signal frame has to be.
954 	 * For sparcv9 _LP64 user programs, use asrs instead of the xregs.
955 	 */
956 	minstacksz += SA(xregs_size);
957 
958 	/*
959 	 * Figure out whether we will be handling this signal on
960 	 * an alternate stack specified by the user. Then allocate
961 	 * and validate the stack requirements for the signal handler
962 	 * context. on_fault will catch any faults.
963 	 */
964 	newstack = (sigismember(&u.u_sigonstack, sig) &&
965 	    !(lwp->lwp_sigaltstack.ss_flags & (SS_ONSTACK|SS_DISABLE)));
966 
967 	tos = (caddr_t)rp->r_sp + STACK_BIAS;
968 	/*
969 	 * Force proper stack pointer alignment, even in the face of a
970 	 * misaligned stack pointer from user-level before the signal.
971 	 * Don't use the SA() macro because that rounds up, not down.
972 	 */
973 	tos = (caddr_t)((uintptr_t)tos & ~(STACK_ALIGN - 1ul));
974 
975 	if (newstack != 0) {
976 		fp = (struct sigframe *)
977 		    (SA((uintptr_t)lwp->lwp_sigaltstack.ss_sp) +
978 			SA((int)lwp->lwp_sigaltstack.ss_size) - STACK_ALIGN -
979 			SA(minstacksz));
980 	} else {
981 		/*
982 		 * If we were unable to flush all register windows to
983 		 * the stack and we are not now on an alternate stack,
984 		 * just dump core with a SIGSEGV back in psig().
985 		 */
986 		if (sig == SIGSEGV &&
987 		    mpcb->mpcb_wbcnt != 0 &&
988 		    !(lwp->lwp_sigaltstack.ss_flags & SS_ONSTACK))
989 			return (0);
990 		fp = (struct sigframe *)(tos - SA(minstacksz));
991 		/*
992 		 * Could call grow here, but stack growth now handled below
993 		 * in code protected by on_fault().
994 		 */
995 	}
996 	sp = (caddr_t)fp + sizeof (struct sigframe);
997 
998 	/*
999 	 * Make sure process hasn't trashed its stack.
1000 	 */
1001 	if ((caddr_t)fp >= p->p_usrstack ||
1002 	    (caddr_t)fp + SA(minstacksz) >= p->p_usrstack) {
1003 #ifdef DEBUG
1004 		printf("sendsig: bad signal stack cmd=%s, pid=%d, sig=%d\n",
1005 		    PTOU(p)->u_comm, p->p_pid, sig);
1006 		printf("sigsp = 0x%p, action = 0x%p, upc = 0x%lx\n",
1007 		    (void *)fp, (void *)hdlr, rp->r_pc);
1008 		printf("fp above USRSTACK\n");
1009 #endif
1010 		return (0);
1011 	}
1012 
1013 	watched = watch_disable_addr((caddr_t)fp, SA(minstacksz), S_WRITE);
1014 	if (on_fault(&ljb))
1015 		goto badstack;
1016 
1017 	tuc = kmem_alloc(sizeof (ucontext_t), KM_SLEEP);
1018 	savecontext(tuc, lwp->lwp_sigoldmask);
1019 
1020 	/*
1021 	 * save extra register state if it exists
1022 	 */
1023 	if (xregs_size != 0) {
1024 		xregs_setptr(lwp, tuc, sp);
1025 		xregs = kmem_alloc(xregs_size, KM_SLEEP);
1026 		xregs_get(lwp, xregs);
1027 		copyout_noerr(xregs, sp, xregs_size);
1028 		kmem_free(xregs, xregs_size);
1029 		xregs = NULL;
1030 		sp += SA(xregs_size);
1031 	}
1032 
1033 	copyout_noerr(tuc, &fp->uc, sizeof (*tuc));
1034 	kmem_free(tuc, sizeof (*tuc));
1035 	tuc = NULL;
1036 
1037 	if (sip != NULL) {
1038 		zoneid_t zoneid;
1039 
1040 		uzero(sp, sizeof (siginfo_t));
1041 		if (SI_FROMUSER(sip) &&
1042 		    (zoneid = p->p_zone->zone_id) != GLOBAL_ZONEID &&
1043 		    zoneid != sip->si_zoneid) {
1044 			k_siginfo_t sani_sip = *sip;
1045 			sani_sip.si_pid = p->p_zone->zone_zsched->p_pid;
1046 			sani_sip.si_uid = 0;
1047 			sani_sip.si_ctid = -1;
1048 			sani_sip.si_zoneid = zoneid;
1049 			copyout_noerr(&sani_sip, sp, sizeof (sani_sip));
1050 		} else {
1051 			copyout_noerr(sip, sp, sizeof (*sip));
1052 		}
1053 		sip_addr = (siginfo_t *)sp;
1054 		sp += sizeof (siginfo_t);
1055 
1056 		if (sig == SIGPROF &&
1057 		    curthread->t_rprof != NULL &&
1058 		    curthread->t_rprof->rp_anystate) {
1059 			/*
1060 			 * We stand on our head to deal with
1061 			 * the real time profiling signal.
1062 			 * Fill in the stuff that doesn't fit
1063 			 * in a normal k_siginfo structure.
1064 			 */
1065 			int i = sip->si_nsysarg;
1066 			while (--i >= 0) {
1067 				sulword_noerr(
1068 				    (ulong_t *)&sip_addr->si_sysarg[i],
1069 				    (ulong_t)lwp->lwp_arg[i]);
1070 			}
1071 			copyout_noerr(curthread->t_rprof->rp_state,
1072 			    sip_addr->si_mstate,
1073 			    sizeof (curthread->t_rprof->rp_state));
1074 		}
1075 	} else {
1076 		sip_addr = (siginfo_t *)NULL;
1077 	}
1078 
1079 	/*
1080 	 * When flush_user_windows_to_stack() can't save all the
1081 	 * windows to the stack, it puts them in the lwp's pcb.
1082 	 */
1083 	if (gwin_size != 0) {
1084 		gwp = kmem_alloc(gwin_size, KM_SLEEP);
1085 		getgwins(lwp, gwp);
1086 		sulword_noerr(&fp->uc.uc_mcontext.gwins, (ulong_t)sp);
1087 		copyout_noerr(gwp, sp, gwin_size);
1088 		kmem_free(gwp, gwin_size);
1089 		gwp = NULL;
1090 		sp += SA(gwin_size);
1091 	} else
1092 		sulword_noerr(&fp->uc.uc_mcontext.gwins, (ulong_t)NULL);
1093 
1094 	if (fpq_size != 0) {
1095 		struct fq *fqp = (struct fq *)sp;
1096 		sulword_noerr(&fp->uc.uc_mcontext.fpregs.fpu_q, (ulong_t)fqp);
1097 		copyout_noerr(mpcb->mpcb_fpu_q, fqp, fpq_size);
1098 
1099 		/*
1100 		 * forget the fp queue so that the signal handler can run
1101 		 * without being harrassed--it will do a setcontext that will
1102 		 * re-establish the queue if there still is one
1103 		 *
1104 		 * NOTE: fp_runq() relies on the qcnt field being zeroed here
1105 		 *	to terminate its processing of the queue after signal
1106 		 *	delivery.
1107 		 */
1108 		mpcb->mpcb_fpu->fpu_qcnt = 0;
1109 		sp += SA(fpq_size);
1110 
1111 		/* Also, syscall needs to know about this */
1112 		mpcb->mpcb_flags |= FP_TRAPPED;
1113 
1114 	} else {
1115 		sulword_noerr(&fp->uc.uc_mcontext.fpregs.fpu_q, (ulong_t)NULL);
1116 		suword8_noerr(&fp->uc.uc_mcontext.fpregs.fpu_qcnt, 0);
1117 	}
1118 
1119 
1120 	/*
1121 	 * Since we flushed the user's windows and we are changing his
1122 	 * stack pointer, the window that the user will return to will
1123 	 * be restored from the save area in the frame we are setting up.
1124 	 * We copy in save area for old stack pointer so that debuggers
1125 	 * can do a proper stack backtrace from the signal handler.
1126 	 */
1127 	if (mpcb->mpcb_wbcnt == 0) {
1128 		watched2 = watch_disable_addr(tos, sizeof (struct rwindow),
1129 		    S_READ);
1130 		ucopy(tos, &fp->frwin, sizeof (struct rwindow));
1131 	}
1132 
1133 	lwp->lwp_oldcontext = (uintptr_t)&fp->uc;
1134 
1135 	if (newstack != 0) {
1136 		lwp->lwp_sigaltstack.ss_flags |= SS_ONSTACK;
1137 
1138 		if (lwp->lwp_ustack) {
1139 			copyout_noerr(&lwp->lwp_sigaltstack,
1140 			    (stack_t *)lwp->lwp_ustack, sizeof (stack_t));
1141 		}
1142 	}
1143 
1144 	no_fault();
1145 	mpcb->mpcb_wbcnt = 0;		/* let user go on */
1146 
1147 	if (watched2)
1148 		watch_enable_addr(tos, sizeof (struct rwindow), S_READ);
1149 	if (watched)
1150 		watch_enable_addr((caddr_t)fp, SA(minstacksz), S_WRITE);
1151 
1152 	/*
1153 	 * Set up user registers for execution of signal handler.
1154 	 */
1155 	rp->r_sp = (uintptr_t)fp - STACK_BIAS;
1156 	rp->r_pc = (uintptr_t)hdlr;
1157 	rp->r_npc = (uintptr_t)hdlr + 4;
1158 	/* make sure %asi is ASI_PNF */
1159 	rp->r_tstate &= ~((uint64_t)TSTATE_ASI_MASK << TSTATE_ASI_SHIFT);
1160 	rp->r_tstate |= ((uint64_t)ASI_PNF << TSTATE_ASI_SHIFT);
1161 	rp->r_o0 = sig;
1162 	rp->r_o1 = (uintptr_t)sip_addr;
1163 	rp->r_o2 = (uintptr_t)&fp->uc;
1164 	/*
1165 	 * Don't set lwp_eosys here.  sendsig() is called via psig() after
1166 	 * lwp_eosys is handled, so setting it here would affect the next
1167 	 * system call.
1168 	 */
1169 	return (1);
1170 
1171 badstack:
1172 	no_fault();
1173 	if (watched2)
1174 		watch_enable_addr(tos, sizeof (struct rwindow), S_READ);
1175 	if (watched)
1176 		watch_enable_addr((caddr_t)fp, SA(minstacksz), S_WRITE);
1177 	if (tuc)
1178 		kmem_free(tuc, sizeof (ucontext_t));
1179 	if (xregs)
1180 		kmem_free(xregs, xregs_size);
1181 	if (gwp)
1182 		kmem_free(gwp, gwin_size);
1183 #ifdef DEBUG
1184 	printf("sendsig: bad signal stack cmd=%s, pid=%d, sig=%d\n",
1185 	    PTOU(p)->u_comm, p->p_pid, sig);
1186 	printf("on fault, sigsp = %p, action = %p, upc = 0x%lx\n",
1187 	    (void *)fp, (void *)hdlr, rp->r_pc);
1188 #endif
1189 	return (0);
1190 }
1191 
1192 
1193 #ifdef _SYSCALL32_IMPL
1194 
1195 /*
1196  * Construct the execution environment for the user's signal
1197  * handler and arrange for control to be given to it on return
1198  * to userland.  The library code now calls setcontext() to
1199  * clean up after the signal handler, so sigret() is no longer
1200  * needed.
1201  */
1202 int
1203 sendsig32(int sig, k_siginfo_t *sip, void (*hdlr)())
1204 {
1205 	/*
1206 	 * 'volatile' is needed to ensure that values are
1207 	 * correct on the error return from on_fault().
1208 	 */
1209 	volatile int minstacksz; /* min stack required to catch signal */
1210 	int newstack = 0;	/* if true, switching to altstack */
1211 	label_t ljb;
1212 	caddr_t sp;
1213 	struct regs *volatile rp;
1214 	klwp_t *lwp = ttolwp(curthread);
1215 	proc_t *volatile p = ttoproc(curthread);
1216 	struct fq32 fpu_q[MAXFPQ]; /* to hold floating queue */
1217 	struct fq32 *dfq = NULL;
1218 	size_t fpq_size = 0;
1219 	struct sigframe32 {
1220 		struct frame32 frwin;
1221 		ucontext32_t uc;
1222 	};
1223 	struct sigframe32 *volatile fp;
1224 	siginfo32_t *sip_addr;
1225 	ucontext32_t *volatile tuc = NULL;
1226 	char *volatile xregs = NULL;
1227 	volatile int xregs_size = 0;
1228 	gwindows32_t *volatile gwp = NULL;
1229 	volatile size_t gwin_size = 0;
1230 	kfpu_t *fpp;
1231 	struct machpcb *mpcb;
1232 	volatile int watched = 0;
1233 	volatile int watched2 = 0;
1234 	caddr_t tos;
1235 
1236 	/*
1237 	 * Make sure the current last user window has been flushed to
1238 	 * the stack save area before we change the sp.
1239 	 * Restore register window if a debugger modified it.
1240 	 */
1241 	(void) flush_user_windows_to_stack(NULL);
1242 	if (lwp->lwp_pcb.pcb_xregstat != XREGNONE)
1243 		xregrestore(lwp, 0);
1244 
1245 	mpcb = lwptompcb(lwp);
1246 	rp = lwptoregs(lwp);
1247 
1248 	/*
1249 	 * Clear the watchpoint return stack pointers.
1250 	 */
1251 	mpcb->mpcb_rsp[0] = NULL;
1252 	mpcb->mpcb_rsp[1] = NULL;
1253 
1254 	minstacksz = sizeof (struct sigframe32);
1255 
1256 	if (sip != NULL)
1257 		minstacksz += sizeof (siginfo32_t);
1258 
1259 	/*
1260 	 * These two fields are pointed to by ABI structures and may
1261 	 * be of arbitrary length. Size them now so we know how big
1262 	 * the signal frame has to be.
1263 	 */
1264 	fpp = lwptofpu(lwp);
1265 	fpp->fpu_fprs = _fp_read_fprs();
1266 	if ((fpp->fpu_en) || (fpp->fpu_fprs & FPRS_FEF)) {
1267 		fpq_size = sizeof (struct fpq32) * fpp->fpu_qcnt;
1268 		minstacksz += fpq_size;
1269 		dfq = fpu_q;
1270 	}
1271 
1272 	mpcb = lwptompcb(lwp);
1273 	if (mpcb->mpcb_wbcnt != 0) {
1274 		gwin_size = (mpcb->mpcb_wbcnt * sizeof (struct rwindow32)) +
1275 		    (SPARC_MAXREGWINDOW * sizeof (caddr32_t)) +
1276 		    sizeof (int32_t);
1277 		minstacksz += gwin_size;
1278 	}
1279 
1280 	/*
1281 	 * Extra registers, if supported by this platform, may be of arbitrary
1282 	 * length. Size them now so we know how big the signal frame has to be.
1283 	 */
1284 	xregs_size = xregs_getsize(p);
1285 	minstacksz += SA32(xregs_size);
1286 
1287 	/*
1288 	 * Figure out whether we will be handling this signal on
1289 	 * an alternate stack specified by the user. Then allocate
1290 	 * and validate the stack requirements for the signal handler
1291 	 * context. on_fault will catch any faults.
1292 	 */
1293 	newstack = (sigismember(&u.u_sigonstack, sig) &&
1294 	    !(lwp->lwp_sigaltstack.ss_flags & (SS_ONSTACK|SS_DISABLE)));
1295 
1296 	tos = (void *)(uintptr_t)(uint32_t)rp->r_sp;
1297 	/*
1298 	 * Force proper stack pointer alignment, even in the face of a
1299 	 * misaligned stack pointer from user-level before the signal.
1300 	 * Don't use the SA32() macro because that rounds up, not down.
1301 	 */
1302 	tos = (caddr_t)((uintptr_t)tos & ~(STACK_ALIGN32 - 1ul));
1303 
1304 	if (newstack != 0) {
1305 		fp = (struct sigframe32 *)
1306 		    (SA32((uintptr_t)lwp->lwp_sigaltstack.ss_sp) +
1307 			SA32((int)lwp->lwp_sigaltstack.ss_size) -
1308 			STACK_ALIGN32 -
1309 			SA32(minstacksz));
1310 	} else {
1311 		/*
1312 		 * If we were unable to flush all register windows to
1313 		 * the stack and we are not now on an alternate stack,
1314 		 * just dump core with a SIGSEGV back in psig().
1315 		 */
1316 		if (sig == SIGSEGV &&
1317 		    mpcb->mpcb_wbcnt != 0 &&
1318 		    !(lwp->lwp_sigaltstack.ss_flags & SS_ONSTACK))
1319 			return (0);
1320 		fp = (struct sigframe32 *)(tos - SA32(minstacksz));
1321 		/*
1322 		 * Could call grow here, but stack growth now handled below
1323 		 * in code protected by on_fault().
1324 		 */
1325 	}
1326 	sp = (caddr_t)fp + sizeof (struct sigframe32);
1327 
1328 	/*
1329 	 * Make sure process hasn't trashed its stack.
1330 	 */
1331 	if ((caddr_t)fp >= p->p_usrstack ||
1332 	    (caddr_t)fp + SA32(minstacksz) >= p->p_usrstack) {
1333 #ifdef DEBUG
1334 		printf("sendsig32: bad signal stack cmd=%s, pid=%d, sig=%d\n",
1335 		    PTOU(p)->u_comm, p->p_pid, sig);
1336 		printf("sigsp = 0x%p, action = 0x%p, upc = 0x%lx\n",
1337 		    (void *)fp, (void *)hdlr, rp->r_pc);
1338 		printf("fp above USRSTACK32\n");
1339 #endif
1340 		return (0);
1341 	}
1342 
1343 	watched = watch_disable_addr((caddr_t)fp, SA32(minstacksz), S_WRITE);
1344 	if (on_fault(&ljb))
1345 		goto badstack;
1346 
1347 	tuc = kmem_alloc(sizeof (ucontext32_t), KM_SLEEP);
1348 	savecontext32(tuc, lwp->lwp_sigoldmask, dfq);
1349 
1350 	/*
1351 	 * save extra register state if it exists
1352 	 */
1353 	if (xregs_size != 0) {
1354 		xregs_setptr32(lwp, tuc, (caddr32_t)(uintptr_t)sp);
1355 		xregs = kmem_alloc(xregs_size, KM_SLEEP);
1356 		xregs_get(lwp, xregs);
1357 		copyout_noerr(xregs, sp, xregs_size);
1358 		kmem_free(xregs, xregs_size);
1359 		xregs = NULL;
1360 		sp += SA32(xregs_size);
1361 	}
1362 
1363 	copyout_noerr(tuc, &fp->uc, sizeof (*tuc));
1364 	kmem_free(tuc, sizeof (*tuc));
1365 	tuc = NULL;
1366 
1367 	if (sip != NULL) {
1368 		siginfo32_t si32;
1369 		zoneid_t zoneid;
1370 
1371 		siginfo_kto32(sip, &si32);
1372 		if (SI_FROMUSER(sip) &&
1373 		    (zoneid = p->p_zone->zone_id) != GLOBAL_ZONEID &&
1374 		    zoneid != sip->si_zoneid) {
1375 			si32.si_pid = p->p_zone->zone_zsched->p_pid;
1376 			si32.si_uid = 0;
1377 			si32.si_ctid = -1;
1378 			si32.si_zoneid = zoneid;
1379 		}
1380 		uzero(sp, sizeof (siginfo32_t));
1381 		copyout_noerr(&si32, sp, sizeof (siginfo32_t));
1382 		sip_addr = (siginfo32_t *)sp;
1383 		sp += sizeof (siginfo32_t);
1384 
1385 		if (sig == SIGPROF &&
1386 		    curthread->t_rprof != NULL &&
1387 		    curthread->t_rprof->rp_anystate) {
1388 			/*
1389 			 * We stand on our head to deal with
1390 			 * the real time profiling signal.
1391 			 * Fill in the stuff that doesn't fit
1392 			 * in a normal k_siginfo structure.
1393 			 */
1394 			int i = sip->si_nsysarg;
1395 			while (--i >= 0) {
1396 				suword32_noerr(&sip_addr->si_sysarg[i],
1397 				    (uint32_t)lwp->lwp_arg[i]);
1398 			}
1399 			copyout_noerr(curthread->t_rprof->rp_state,
1400 			    sip_addr->si_mstate,
1401 			    sizeof (curthread->t_rprof->rp_state));
1402 		}
1403 	} else {
1404 		sip_addr = NULL;
1405 	}
1406 
1407 	/*
1408 	 * When flush_user_windows_to_stack() can't save all the
1409 	 * windows to the stack, it puts them in the lwp's pcb.
1410 	 */
1411 	if (gwin_size != 0) {
1412 		gwp = kmem_alloc(gwin_size, KM_SLEEP);
1413 		getgwins32(lwp, gwp);
1414 		suword32_noerr(&fp->uc.uc_mcontext.gwins,
1415 		    (uint32_t)(uintptr_t)sp);
1416 		copyout_noerr(gwp, sp, gwin_size);
1417 		kmem_free(gwp, gwin_size);
1418 		gwp = NULL;
1419 		sp += gwin_size;
1420 	} else {
1421 		suword32_noerr(&fp->uc.uc_mcontext.gwins, (uint32_t)NULL);
1422 	}
1423 
1424 	if (fpq_size != 0) {
1425 		/*
1426 		 * Update the (already copied out) fpu32.fpu_q pointer
1427 		 * from NULL to the 32-bit address on the user's stack
1428 		 * where we then copyout the fq32 to.
1429 		 */
1430 		struct fq32 *fqp = (struct fq32 *)sp;
1431 		suword32_noerr(&fp->uc.uc_mcontext.fpregs.fpu_q,
1432 		    (uint32_t)(uintptr_t)fqp);
1433 		copyout_noerr(dfq, fqp, fpq_size);
1434 
1435 		/*
1436 		 * forget the fp queue so that the signal handler can run
1437 		 * without being harrassed--it will do a setcontext that will
1438 		 * re-establish the queue if there still is one
1439 		 *
1440 		 * NOTE: fp_runq() relies on the qcnt field being zeroed here
1441 		 *	to terminate its processing of the queue after signal
1442 		 *	delivery.
1443 		 */
1444 		mpcb->mpcb_fpu->fpu_qcnt = 0;
1445 		sp += fpq_size;
1446 
1447 		/* Also, syscall needs to know about this */
1448 		mpcb->mpcb_flags |= FP_TRAPPED;
1449 
1450 	} else {
1451 		suword32_noerr(&fp->uc.uc_mcontext.fpregs.fpu_q,
1452 		    (uint32_t)NULL);
1453 		suword8_noerr(&fp->uc.uc_mcontext.fpregs.fpu_qcnt, 0);
1454 	}
1455 
1456 
1457 	/*
1458 	 * Since we flushed the user's windows and we are changing his
1459 	 * stack pointer, the window that the user will return to will
1460 	 * be restored from the save area in the frame we are setting up.
1461 	 * We copy in save area for old stack pointer so that debuggers
1462 	 * can do a proper stack backtrace from the signal handler.
1463 	 */
1464 	if (mpcb->mpcb_wbcnt == 0) {
1465 		watched2 = watch_disable_addr(tos, sizeof (struct rwindow32),
1466 		    S_READ);
1467 		ucopy(tos, &fp->frwin, sizeof (struct rwindow32));
1468 	}
1469 
1470 	lwp->lwp_oldcontext = (uintptr_t)&fp->uc;
1471 
1472 	if (newstack != 0) {
1473 		lwp->lwp_sigaltstack.ss_flags |= SS_ONSTACK;
1474 		if (lwp->lwp_ustack) {
1475 			stack32_t stk32;
1476 
1477 			stk32.ss_sp =
1478 			    (caddr32_t)(uintptr_t)lwp->lwp_sigaltstack.ss_sp;
1479 			stk32.ss_size = (size32_t)lwp->lwp_sigaltstack.ss_size;
1480 			stk32.ss_flags = (int32_t)lwp->lwp_sigaltstack.ss_flags;
1481 
1482 			copyout_noerr(&stk32, (stack32_t *)lwp->lwp_ustack,
1483 			    sizeof (stack32_t));
1484 		}
1485 	}
1486 
1487 	no_fault();
1488 	mpcb->mpcb_wbcnt = 0;		/* let user go on */
1489 
1490 	if (watched2)
1491 		watch_enable_addr(tos, sizeof (struct rwindow32), S_READ);
1492 	if (watched)
1493 		watch_enable_addr((caddr_t)fp, SA32(minstacksz), S_WRITE);
1494 
1495 	/*
1496 	 * Set up user registers for execution of signal handler.
1497 	 */
1498 	rp->r_sp = (uintptr_t)fp;
1499 	rp->r_pc = (uintptr_t)hdlr;
1500 	rp->r_npc = (uintptr_t)hdlr + 4;
1501 	/* make sure %asi is ASI_PNF */
1502 	rp->r_tstate &= ~((uint64_t)TSTATE_ASI_MASK << TSTATE_ASI_SHIFT);
1503 	rp->r_tstate |= ((uint64_t)ASI_PNF << TSTATE_ASI_SHIFT);
1504 	rp->r_o0 = sig;
1505 	rp->r_o1 = (uintptr_t)sip_addr;
1506 	rp->r_o2 = (uintptr_t)&fp->uc;
1507 	/*
1508 	 * Don't set lwp_eosys here.  sendsig() is called via psig() after
1509 	 * lwp_eosys is handled, so setting it here would affect the next
1510 	 * system call.
1511 	 */
1512 	return (1);
1513 
1514 badstack:
1515 	no_fault();
1516 	if (watched2)
1517 		watch_enable_addr(tos, sizeof (struct rwindow32), S_READ);
1518 	if (watched)
1519 		watch_enable_addr((caddr_t)fp, SA32(minstacksz), S_WRITE);
1520 	if (tuc)
1521 		kmem_free(tuc, sizeof (*tuc));
1522 	if (xregs)
1523 		kmem_free(xregs, xregs_size);
1524 	if (gwp)
1525 		kmem_free(gwp, gwin_size);
1526 #ifdef DEBUG
1527 	printf("sendsig32: bad signal stack cmd=%s, pid=%d, sig=%d\n",
1528 	    PTOU(p)->u_comm, p->p_pid, sig);
1529 	printf("on fault, sigsp = 0x%p, action = 0x%p, upc = 0x%lx\n",
1530 	    (void *)fp, (void *)hdlr, rp->r_pc);
1531 #endif
1532 	return (0);
1533 }
1534 
1535 #endif /* _SYSCALL32_IMPL */
1536 
1537 
1538 /*
1539  * load user registers into lwp.
1540  * thrptr ignored for sparc.
1541  */
1542 /* ARGSUSED2 */
1543 void
1544 lwp_load(klwp_t *lwp, gregset_t grp, uintptr_t thrptr)
1545 {
1546 	setgregs(lwp, grp);
1547 	if (lwptoproc(lwp)->p_model == DATAMODEL_ILP32)
1548 		lwptoregs(lwp)->r_tstate = TSTATE_USER32;
1549 	else
1550 		lwptoregs(lwp)->r_tstate = TSTATE_USER64;
1551 
1552 	if (!fpu_exists)
1553 		lwptoregs(lwp)->r_tstate &= ~TSTATE_PEF;
1554 	lwp->lwp_eosys = JUSTRETURN;
1555 	lwptot(lwp)->t_post_sys = 1;
1556 }
1557 
1558 /*
1559  * set syscall()'s return values for a lwp.
1560  */
1561 void
1562 lwp_setrval(klwp_t *lwp, int v1, int v2)
1563 {
1564 	struct regs *rp = lwptoregs(lwp);
1565 
1566 	rp->r_tstate &= ~TSTATE_IC;
1567 	rp->r_o0 = v1;
1568 	rp->r_o1 = v2;
1569 }
1570 
1571 /*
1572  * set stack pointer for a lwp
1573  */
1574 void
1575 lwp_setsp(klwp_t *lwp, caddr_t sp)
1576 {
1577 	struct regs *rp = lwptoregs(lwp);
1578 	rp->r_sp = (uintptr_t)sp;
1579 }
1580 
1581 /*
1582  * Take any PCB specific actions that are required or flagged in the PCB.
1583  */
1584 extern void trap_async_hwerr(void);
1585 #pragma	weak trap_async_hwerr
1586 
1587 void
1588 lwp_pcb_exit(void)
1589 {
1590 	klwp_t *lwp = ttolwp(curthread);
1591 
1592 	if (lwp->lwp_pcb.pcb_flags & ASYNC_HWERR) {
1593 		trap_async_hwerr();
1594 	}
1595 }
1596 
1597 /*
1598  * Invalidate the saved user register windows in the pcb struct
1599  * for the current thread. They will no longer be preserved.
1600  */
1601 void
1602 lwp_clear_uwin(void)
1603 {
1604 	struct machpcb *m = lwptompcb(ttolwp(curthread));
1605 
1606 	/*
1607 	 * This has the effect of invalidating all (any) of the
1608 	 * user level windows that are currently sitting in the
1609 	 * kernel buffer.
1610 	 */
1611 	m->mpcb_wbcnt = 0;
1612 }
1613 
1614 static uint_t
1615 mkpsr(uint64_t tstate, uint_t fprs)
1616 {
1617 	uint_t psr, icc;
1618 
1619 	psr = tstate & TSTATE_CWP_MASK;
1620 	if (tstate & TSTATE_PRIV)
1621 		psr |= PSR_PS;
1622 	if (fprs & FPRS_FEF)
1623 		psr |= PSR_EF;
1624 	icc = (uint_t)(tstate >> PSR_TSTATE_CC_SHIFT) & PSR_ICC;
1625 	psr |= icc;
1626 	psr |= V9_PSR_IMPLVER;
1627 	return (psr);
1628 }
1629 
1630 void
1631 sync_icache(caddr_t va, uint_t len)
1632 {
1633 	caddr_t end;
1634 
1635 	end = va + len;
1636 	va = (caddr_t)((uintptr_t)va & -8l);	/* sparc needs 8-byte align */
1637 	while (va < end) {
1638 		doflush(va);
1639 		va += 8;
1640 	}
1641 }
1642 
1643 #ifdef _SYSCALL32_IMPL
1644 
1645 /*
1646  * Copy the floating point queue if and only if there is a queue and a place
1647  * to copy it to. Let xregs take care of the other fp regs, for v8plus.
1648  * The issue is that while we are handling the fq32 in sendsig, we
1649  * still need a 64-bit pointer to it, and the caddr32_t in fpregset32_t
1650  * will not suffice, so we have the third parameter to this function.
1651  */
1652 void
1653 fpuregset_nto32(const fpregset_t *src, fpregset32_t *dest, struct fq32 *dfq)
1654 {
1655 	int i;
1656 
1657 	bzero(dest, sizeof (*dest));
1658 	for (i = 0; i < 32; i++)
1659 		dest->fpu_fr.fpu_regs[i] = src->fpu_fr.fpu_regs[i];
1660 	dest->fpu_q = NULL;
1661 	dest->fpu_fsr = (uint32_t)src->fpu_fsr;
1662 	dest->fpu_qcnt = src->fpu_qcnt;
1663 	dest->fpu_q_entrysize = sizeof (struct fpq32);
1664 	dest->fpu_en = src->fpu_en;
1665 
1666 	if ((src->fpu_qcnt) && (dfq != NULL)) {
1667 		struct fq *sfq = src->fpu_q;
1668 		for (i = 0; i < src->fpu_qcnt; i++, dfq++, sfq++) {
1669 			dfq->FQu.fpq.fpq_addr =
1670 			    (caddr32_t)(uintptr_t)sfq->FQu.fpq.fpq_addr;
1671 			dfq->FQu.fpq.fpq_instr = sfq->FQu.fpq.fpq_instr;
1672 		}
1673 	}
1674 }
1675 
1676 /*
1677  * Copy the floating point queue if and only if there is a queue and a place
1678  * to copy it to. Let xregs take care of the other fp regs, for v8plus.
1679  * The *dfq is required to escape the bzero in both this function and in
1680  * ucontext_32ton. The *sfq is required because once the fq32 is copied
1681  * into the kernel, in setcontext, then we need a 64-bit pointer to it.
1682  */
1683 static void
1684 fpuregset_32ton(const fpregset32_t *src, fpregset_t *dest,
1685     const struct fq32 *sfq, struct fq *dfq)
1686 {
1687 	int i;
1688 
1689 	bzero(dest, sizeof (*dest));
1690 	for (i = 0; i < 32; i++)
1691 		dest->fpu_fr.fpu_regs[i] = src->fpu_fr.fpu_regs[i];
1692 	dest->fpu_q = dfq;
1693 	dest->fpu_fsr = (uint64_t)src->fpu_fsr;
1694 	if ((dest->fpu_qcnt = src->fpu_qcnt) > 0)
1695 		dest->fpu_q_entrysize = sizeof (struct fpq);
1696 	else
1697 		dest->fpu_q_entrysize = 0;
1698 	dest->fpu_en = src->fpu_en;
1699 
1700 	if ((src->fpu_qcnt) && (sfq) && (dfq)) {
1701 		for (i = 0; i < src->fpu_qcnt; i++, dfq++, sfq++) {
1702 			dfq->FQu.fpq.fpq_addr =
1703 			    (unsigned int *)(uintptr_t)sfq->FQu.fpq.fpq_addr;
1704 			dfq->FQu.fpq.fpq_instr = sfq->FQu.fpq.fpq_instr;
1705 		}
1706 	}
1707 }
1708 
1709 void
1710 ucontext_32ton(const ucontext32_t *src, ucontext_t *dest,
1711     const struct fq32 *sfq, struct fq *dfq)
1712 {
1713 	int i;
1714 
1715 	bzero(dest, sizeof (*dest));
1716 
1717 	dest->uc_flags = src->uc_flags;
1718 	dest->uc_link = (ucontext_t *)(uintptr_t)src->uc_link;
1719 
1720 	for (i = 0; i < 4; i++) {
1721 		dest->uc_sigmask.__sigbits[i] = src->uc_sigmask.__sigbits[i];
1722 	}
1723 
1724 	dest->uc_stack.ss_sp = (void *)(uintptr_t)src->uc_stack.ss_sp;
1725 	dest->uc_stack.ss_size = (size_t)src->uc_stack.ss_size;
1726 	dest->uc_stack.ss_flags = src->uc_stack.ss_flags;
1727 
1728 	/* REG_CCR is 0, skip over it and handle it after this loop */
1729 	for (i = 1; i < _NGREG32; i++)
1730 		dest->uc_mcontext.gregs[i] =
1731 		    (greg_t)(uint32_t)src->uc_mcontext.gregs[i];
1732 	dest->uc_mcontext.gregs[REG_CCR] =
1733 	    (src->uc_mcontext.gregs[REG_PSR] & PSR_ICC) >> PSR_ICC_SHIFT;
1734 	dest->uc_mcontext.gregs[REG_ASI] = ASI_PNF;
1735 	/*
1736 	 * A valid fpregs is only copied in if (uc.uc_flags & UC_FPU),
1737 	 * otherwise there is no guarantee that anything in fpregs is valid.
1738 	 */
1739 	if (src->uc_flags & UC_FPU) {
1740 		dest->uc_mcontext.gregs[REG_FPRS] =
1741 		    ((src->uc_mcontext.fpregs.fpu_en) ?
1742 		    (FPRS_DU|FPRS_DL|FPRS_FEF) : 0);
1743 	} else {
1744 		dest->uc_mcontext.gregs[REG_FPRS] = 0;
1745 	}
1746 	dest->uc_mcontext.gwins =
1747 	    (gwindows_t *)(uintptr_t)src->uc_mcontext.gwins;
1748 	if (src->uc_flags & UC_FPU) {
1749 		fpuregset_32ton(&src->uc_mcontext.fpregs,
1750 		    &dest->uc_mcontext.fpregs, sfq, dfq);
1751 	}
1752 }
1753 
1754 void
1755 rwindow_nto32(struct rwindow *src, struct rwindow32 *dest)
1756 {
1757 	greg_t *s = (greg_t *)src;
1758 	greg32_t *d = (greg32_t *)dest;
1759 	int i;
1760 
1761 	for (i = 0; i < 16; i++)
1762 		*d++ = (greg32_t)*s++;
1763 }
1764 
1765 void
1766 rwindow_32ton(struct rwindow32 *src, struct rwindow *dest)
1767 {
1768 	greg32_t *s = (greg32_t *)src;
1769 	greg_t *d = (greg_t *)dest;
1770 	int i;
1771 
1772 	for (i = 0; i < 16; i++)
1773 		*d++ = (uint32_t)*s++;
1774 }
1775 
1776 #endif /* _SYSCALL32_IMPL */
1777 
1778 /*
1779  * The panic code invokes panic_saveregs() to record the contents of a
1780  * regs structure into the specified panic_data structure for debuggers.
1781  */
1782 void
1783 panic_saveregs(panic_data_t *pdp, struct regs *rp)
1784 {
1785 	panic_nv_t *pnv = PANICNVGET(pdp);
1786 
1787 	PANICNVADD(pnv, "tstate", rp->r_tstate);
1788 	PANICNVADD(pnv, "g1", rp->r_g1);
1789 	PANICNVADD(pnv, "g2", rp->r_g2);
1790 	PANICNVADD(pnv, "g3", rp->r_g3);
1791 	PANICNVADD(pnv, "g4", rp->r_g4);
1792 	PANICNVADD(pnv, "g5", rp->r_g5);
1793 	PANICNVADD(pnv, "g6", rp->r_g6);
1794 	PANICNVADD(pnv, "g7", rp->r_g7);
1795 	PANICNVADD(pnv, "o0", rp->r_o0);
1796 	PANICNVADD(pnv, "o1", rp->r_o1);
1797 	PANICNVADD(pnv, "o2", rp->r_o2);
1798 	PANICNVADD(pnv, "o3", rp->r_o3);
1799 	PANICNVADD(pnv, "o4", rp->r_o4);
1800 	PANICNVADD(pnv, "o5", rp->r_o5);
1801 	PANICNVADD(pnv, "o6", rp->r_o6);
1802 	PANICNVADD(pnv, "o7", rp->r_o7);
1803 	PANICNVADD(pnv, "pc", (ulong_t)rp->r_pc);
1804 	PANICNVADD(pnv, "npc", (ulong_t)rp->r_npc);
1805 	PANICNVADD(pnv, "y", (uint32_t)rp->r_y);
1806 
1807 	PANICNVSET(pdp, pnv);
1808 }
1809