1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
23 /* All Rights Reserved */
24
25 /*
26 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
27 * Use is subject to license terms.
28 * Copyright (c) 2016 by Delphix. All rights reserved.
29 */
30
31 #include <sys/param.h>
32 #include <sys/types.h>
33 #include <sys/vmparam.h>
34 #include <sys/systm.h>
35 #include <sys/stack.h>
36 #include <sys/frame.h>
37 #include <sys/proc.h>
38 #include <sys/ucontext.h>
39 #include <sys/cpuvar.h>
40 #include <sys/asm_linkage.h>
41 #include <sys/kmem.h>
42 #include <sys/errno.h>
43 #include <sys/bootconf.h>
44 #include <sys/archsystm.h>
45 #include <sys/fpu/fpusystm.h>
46 #include <sys/debug.h>
47 #include <sys/privregs.h>
48 #include <sys/machpcb.h>
49 #include <sys/psr_compat.h>
50 #include <sys/cmn_err.h>
51 #include <sys/asi.h>
52 #include <sys/copyops.h>
53 #include <sys/model.h>
54 #include <sys/panic.h>
55 #include <sys/exec.h>
56
57 /*
58 * By default, set the weakest model to TSO (Total Store Order)
59 * which is the default memory model on SPARC.
60 * If a platform does support a weaker model than TSO, this will be
61 * updated at runtime to reflect that.
62 */
63 uint_t weakest_mem_model = TSTATE_MM_TSO;
64
65 /*
66 * modify the lower 32bits of a uint64_t
67 */
68 #define SET_LOWER_32(all, lower) \
69 (((uint64_t)(all) & 0xffffffff00000000) | (uint32_t)(lower))
70
71 #define MEMCPY_FPU_EN 2 /* fprs on and fpu_en == 0 */
72
73 static uint_t mkpsr(uint64_t tstate, uint32_t fprs);
74
75 #ifdef _SYSCALL32_IMPL
76 static void fpuregset_32ton(const fpregset32_t *src, fpregset_t *dest,
77 const struct fq32 *sfq, struct _fq *dfq);
78 #endif /* _SYSCALL32_IMPL */
79
80 /*
81 * Set floating-point registers.
82 * NOTE: 'lwp' might not correspond to 'curthread' since this is
83 * called from code in /proc to set the registers of another lwp.
84 */
85 void
setfpregs(klwp_t * lwp,fpregset_t * fp)86 setfpregs(klwp_t *lwp, fpregset_t *fp)
87 {
88 struct machpcb *mpcb;
89 kfpu_t *pfp;
90 uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL);
91 model_t model = lwp_getdatamodel(lwp);
92
93 mpcb = lwptompcb(lwp);
94 pfp = lwptofpu(lwp);
95
96 /*
97 * This is always true for both "real" fp programs and memcpy fp
98 * programs, because we force fpu_en to MEMCPY_FPU_EN in getfpregs,
99 * for the memcpy and threads cases where (fpu_en == 0) &&
100 * (fpu_fprs & FPRS_FEF), if setfpregs is called after getfpregs.
101 */
102 if (fp->fpu_en) {
103 kpreempt_disable();
104
105 if (!(pfp->fpu_en) && (!(pfp->fpu_fprs & FPRS_FEF)) &&
106 fpu_exists) {
107 /*
108 * It's not currently using the FPU but wants to in its
109 * new context - arrange for this on return to userland.
110 */
111 pfp->fpu_fprs = (uint32_t)fprs;
112 }
113 /*
114 * Get setfpregs to restore fpu_en to zero
115 * for the memcpy/threads case (where pfp->fpu_en == 0 &&
116 * (pfp->fp_fprs & FPRS_FEF) == FPRS_FEF).
117 */
118 if (fp->fpu_en == MEMCPY_FPU_EN)
119 fp->fpu_en = 0;
120
121 /*
122 * Load up a user's floating point context.
123 */
124 if (fp->fpu_qcnt > MAXFPQ) /* plug security holes */
125 fp->fpu_qcnt = MAXFPQ;
126 fp->fpu_q_entrysize = sizeof (struct _fq);
127
128 /*
129 * For v9 kernel, copy all of the fp regs.
130 * For v8 kernel, copy v8 fp regs (lower half of v9 fp regs).
131 * Restore entire fsr for v9, only lower half for v8.
132 */
133 (void) kcopy(fp, pfp, sizeof (fp->fpu_fr));
134 if (model == DATAMODEL_LP64)
135 pfp->fpu_fsr = fp->fpu_fsr;
136 else
137 pfp->fpu_fsr = SET_LOWER_32(pfp->fpu_fsr, fp->fpu_fsr);
138 pfp->fpu_qcnt = fp->fpu_qcnt;
139 pfp->fpu_q_entrysize = fp->fpu_q_entrysize;
140 pfp->fpu_en = fp->fpu_en;
141 pfp->fpu_q = mpcb->mpcb_fpu_q;
142 if (fp->fpu_qcnt)
143 (void) kcopy(fp->fpu_q, pfp->fpu_q,
144 fp->fpu_qcnt * fp->fpu_q_entrysize);
145 /* FSR ignores these bits on load, so they can not be set */
146 pfp->fpu_fsr &= ~(FSR_QNE|FSR_FTT);
147
148 /*
149 * If not the current process then resume() will handle it.
150 */
151 if (lwp != ttolwp(curthread)) {
152 /* force resume to reload fp regs */
153 pfp->fpu_fprs |= FPRS_FEF;
154 kpreempt_enable();
155 return;
156 }
157
158 /*
159 * Load up FPU with new floating point context.
160 */
161 if (fpu_exists) {
162 pfp->fpu_fprs = _fp_read_fprs();
163 if ((pfp->fpu_fprs & FPRS_FEF) != FPRS_FEF) {
164 _fp_write_fprs(fprs);
165 pfp->fpu_fprs = (uint32_t)fprs;
166 #ifdef DEBUG
167 if (fpdispr)
168 cmn_err(CE_NOTE,
169 "setfpregs with fp disabled!\n");
170 #endif
171 }
172 /*
173 * Load all fp regs for v9 user programs, but only
174 * load the lower half for v8[plus] programs.
175 */
176 if (model == DATAMODEL_LP64)
177 fp_restore(pfp);
178 else
179 fp_v8_load(pfp);
180 }
181
182 kpreempt_enable();
183 } else {
184 if ((pfp->fpu_en) || /* normal fp case */
185 (pfp->fpu_fprs & FPRS_FEF)) { /* memcpy/threads case */
186 /*
187 * Currently the lwp has floating point enabled.
188 * Turn off FPRS_FEF in user's fprs, saved and
189 * real copies thereof.
190 */
191 pfp->fpu_en = 0;
192 if (fpu_exists) {
193 fprs = 0;
194 if (lwp == ttolwp(curthread))
195 _fp_write_fprs(fprs);
196 pfp->fpu_fprs = (uint32_t)fprs;
197 }
198 }
199 }
200 }
201
202 #ifdef _SYSCALL32_IMPL
203 void
setfpregs32(klwp_t * lwp,fpregset32_t * fp)204 setfpregs32(klwp_t *lwp, fpregset32_t *fp)
205 {
206 fpregset_t fpregs;
207
208 fpuregset_32ton(fp, &fpregs, NULL, NULL);
209 setfpregs(lwp, &fpregs);
210 }
211 #endif /* _SYSCALL32_IMPL */
212
213 /*
214 * NOTE: 'lwp' might not correspond to 'curthread' since this is
215 * called from code in /proc to set the registers of another lwp.
216 */
217 void
run_fpq(klwp_t * lwp,fpregset_t * fp)218 run_fpq(klwp_t *lwp, fpregset_t *fp)
219 {
220 /*
221 * If the context being loaded up includes a floating queue,
222 * we need to simulate those instructions (since we can't reload
223 * the fpu) and pass the process any appropriate signals
224 */
225
226 if (lwp == ttolwp(curthread)) {
227 if (fpu_exists) {
228 if (fp->fpu_qcnt)
229 fp_runq(lwp->lwp_regs);
230 }
231 }
232 }
233
234 /*
235 * Get floating-point registers.
236 * NOTE: 'lwp' might not correspond to 'curthread' since this is
237 * called from code in /proc to set the registers of another lwp.
238 */
239 void
getfpregs(klwp_t * lwp,fpregset_t * fp)240 getfpregs(klwp_t *lwp, fpregset_t *fp)
241 {
242 kfpu_t *pfp;
243 model_t model = lwp_getdatamodel(lwp);
244
245 pfp = lwptofpu(lwp);
246 kpreempt_disable();
247 if (fpu_exists && ttolwp(curthread) == lwp)
248 pfp->fpu_fprs = _fp_read_fprs();
249
250 /*
251 * First check the fpu_en case, for normal fp programs.
252 * Next check the fprs case, for fp use by memcpy/threads.
253 */
254 if (((fp->fpu_en = pfp->fpu_en) != 0) ||
255 (pfp->fpu_fprs & FPRS_FEF)) {
256 /*
257 * Force setfpregs to restore the fp context in
258 * setfpregs for the memcpy and threads cases (where
259 * pfp->fpu_en == 0 && (pfp->fp_fprs & FPRS_FEF) == FPRS_FEF).
260 */
261 if (pfp->fpu_en == 0)
262 fp->fpu_en = MEMCPY_FPU_EN;
263 /*
264 * If we have an fpu and the current thread owns the fp
265 * context, flush fp * registers into the pcb. Save all
266 * the fp regs for v9, xregs_getfpregs saves the upper half
267 * for v8plus. Save entire fsr for v9, only lower half for v8.
268 */
269 if (fpu_exists && ttolwp(curthread) == lwp) {
270 if ((pfp->fpu_fprs & FPRS_FEF) != FPRS_FEF) {
271 uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL);
272
273 _fp_write_fprs(fprs);
274 pfp->fpu_fprs = fprs;
275 #ifdef DEBUG
276 if (fpdispr)
277 cmn_err(CE_NOTE,
278 "getfpregs with fp disabled!\n");
279 #endif
280 }
281 if (model == DATAMODEL_LP64)
282 fp_fksave(pfp);
283 else
284 fp_v8_fksave(pfp);
285 }
286 (void) kcopy(pfp, fp, sizeof (fp->fpu_fr));
287 fp->fpu_q = pfp->fpu_q;
288 if (model == DATAMODEL_LP64)
289 fp->fpu_fsr = pfp->fpu_fsr;
290 else
291 fp->fpu_fsr = (uint32_t)pfp->fpu_fsr;
292 fp->fpu_qcnt = pfp->fpu_qcnt;
293 fp->fpu_q_entrysize = pfp->fpu_q_entrysize;
294 } else {
295 int i;
296 for (i = 0; i < 32; i++) /* NaN */
297 ((uint32_t *)fp->fpu_fr.fpu_regs)[i] = (uint32_t)-1;
298 if (model == DATAMODEL_LP64) {
299 for (i = 16; i < 32; i++) /* NaN */
300 ((uint64_t *)fp->fpu_fr.fpu_dregs)[i] =
301 (uint64_t)-1;
302 }
303 fp->fpu_fsr = 0;
304 fp->fpu_qcnt = 0;
305 }
306 kpreempt_enable();
307 }
308
309 #ifdef _SYSCALL32_IMPL
310 void
getfpregs32(klwp_t * lwp,fpregset32_t * fp)311 getfpregs32(klwp_t *lwp, fpregset32_t *fp)
312 {
313 fpregset_t fpregs;
314
315 getfpregs(lwp, &fpregs);
316 fpuregset_nto32(&fpregs, fp, NULL);
317 }
318 #endif /* _SYSCALL32_IMPL */
319
320 /*
321 * Set general registers.
322 * NOTE: 'lwp' might not correspond to 'curthread' since this is
323 * called from code in /proc to set the registers of another lwp.
324 */
325
326 /* 64-bit gregset_t */
327 void
setgregs(klwp_t * lwp,gregset_t grp)328 setgregs(klwp_t *lwp, gregset_t grp)
329 {
330 struct regs *rp = lwptoregs(lwp);
331 kfpu_t *fp = lwptofpu(lwp);
332 uint64_t tbits;
333
334 int current = (lwp == curthread->t_lwp);
335
336 if (current)
337 (void) save_syscall_args(); /* copy the args first */
338
339 tbits = (((grp[REG_CCR] & TSTATE_CCR_MASK) << TSTATE_CCR_SHIFT) |
340 ((grp[REG_ASI] & TSTATE_ASI_MASK) << TSTATE_ASI_SHIFT));
341 rp->r_tstate &= ~(((uint64_t)TSTATE_CCR_MASK << TSTATE_CCR_SHIFT) |
342 ((uint64_t)TSTATE_ASI_MASK << TSTATE_ASI_SHIFT));
343 rp->r_tstate |= tbits;
344 kpreempt_disable();
345 fp->fpu_fprs = (uint32_t)grp[REG_FPRS];
346 if (fpu_exists && (current) && (fp->fpu_fprs & FPRS_FEF))
347 _fp_write_fprs(fp->fpu_fprs);
348 kpreempt_enable();
349
350 /*
351 * pc and npc must be 4-byte aligned on sparc.
352 * We silently make it so to avoid a watchdog reset.
353 */
354 rp->r_pc = grp[REG_PC] & ~03L;
355 rp->r_npc = grp[REG_nPC] & ~03L;
356 rp->r_y = grp[REG_Y];
357
358 rp->r_g1 = grp[REG_G1];
359 rp->r_g2 = grp[REG_G2];
360 rp->r_g3 = grp[REG_G3];
361 rp->r_g4 = grp[REG_G4];
362 rp->r_g5 = grp[REG_G5];
363 rp->r_g6 = grp[REG_G6];
364 rp->r_g7 = grp[REG_G7];
365
366 rp->r_o0 = grp[REG_O0];
367 rp->r_o1 = grp[REG_O1];
368 rp->r_o2 = grp[REG_O2];
369 rp->r_o3 = grp[REG_O3];
370 rp->r_o4 = grp[REG_O4];
371 rp->r_o5 = grp[REG_O5];
372 rp->r_o6 = grp[REG_O6];
373 rp->r_o7 = grp[REG_O7];
374
375 if (current) {
376 /*
377 * This was called from a system call, but we
378 * do not want to return via the shared window;
379 * restoring the CPU context changes everything.
380 */
381 lwp->lwp_eosys = JUSTRETURN;
382 curthread->t_post_sys = 1;
383 }
384 }
385
386 /*
387 * Return the general registers.
388 * NOTE: 'lwp' might not correspond to 'curthread' since this is
389 * called from code in /proc to get the registers of another lwp.
390 */
391 void
getgregs(klwp_t * lwp,gregset_t grp)392 getgregs(klwp_t *lwp, gregset_t grp)
393 {
394 struct regs *rp = lwptoregs(lwp);
395 uint32_t fprs;
396
397 kpreempt_disable();
398 if (fpu_exists && ttolwp(curthread) == lwp) {
399 fprs = _fp_read_fprs();
400 } else {
401 kfpu_t *fp = lwptofpu(lwp);
402 fprs = fp->fpu_fprs;
403 }
404 kpreempt_enable();
405 grp[REG_CCR] = (rp->r_tstate >> TSTATE_CCR_SHIFT) & TSTATE_CCR_MASK;
406 grp[REG_PC] = rp->r_pc;
407 grp[REG_nPC] = rp->r_npc;
408 grp[REG_Y] = (uint32_t)rp->r_y;
409 grp[REG_G1] = rp->r_g1;
410 grp[REG_G2] = rp->r_g2;
411 grp[REG_G3] = rp->r_g3;
412 grp[REG_G4] = rp->r_g4;
413 grp[REG_G5] = rp->r_g5;
414 grp[REG_G6] = rp->r_g6;
415 grp[REG_G7] = rp->r_g7;
416 grp[REG_O0] = rp->r_o0;
417 grp[REG_O1] = rp->r_o1;
418 grp[REG_O2] = rp->r_o2;
419 grp[REG_O3] = rp->r_o3;
420 grp[REG_O4] = rp->r_o4;
421 grp[REG_O5] = rp->r_o5;
422 grp[REG_O6] = rp->r_o6;
423 grp[REG_O7] = rp->r_o7;
424 grp[REG_ASI] = (rp->r_tstate >> TSTATE_ASI_SHIFT) & TSTATE_ASI_MASK;
425 grp[REG_FPRS] = fprs;
426 }
427
428 void
getgregs32(klwp_t * lwp,gregset32_t grp)429 getgregs32(klwp_t *lwp, gregset32_t grp)
430 {
431 struct regs *rp = lwptoregs(lwp);
432 uint32_t fprs;
433
434 kpreempt_disable();
435 if (fpu_exists && ttolwp(curthread) == lwp) {
436 fprs = _fp_read_fprs();
437 } else {
438 kfpu_t *fp = lwptofpu(lwp);
439 fprs = fp->fpu_fprs;
440 }
441 kpreempt_enable();
442 grp[REG_PSR] = mkpsr(rp->r_tstate, fprs);
443 grp[REG_PC] = rp->r_pc;
444 grp[REG_nPC] = rp->r_npc;
445 grp[REG_Y] = rp->r_y;
446 grp[REG_G1] = rp->r_g1;
447 grp[REG_G2] = rp->r_g2;
448 grp[REG_G3] = rp->r_g3;
449 grp[REG_G4] = rp->r_g4;
450 grp[REG_G5] = rp->r_g5;
451 grp[REG_G6] = rp->r_g6;
452 grp[REG_G7] = rp->r_g7;
453 grp[REG_O0] = rp->r_o0;
454 grp[REG_O1] = rp->r_o1;
455 grp[REG_O2] = rp->r_o2;
456 grp[REG_O3] = rp->r_o3;
457 grp[REG_O4] = rp->r_o4;
458 grp[REG_O5] = rp->r_o5;
459 grp[REG_O6] = rp->r_o6;
460 grp[REG_O7] = rp->r_o7;
461 }
462
463 /*
464 * Return the user-level PC.
465 * If in a system call, return the address of the syscall trap.
466 */
467 greg_t
getuserpc()468 getuserpc()
469 {
470 return (lwptoregs(ttolwp(curthread))->r_pc);
471 }
472
473 /*
474 * Set register windows.
475 */
476 void
setgwins(klwp_t * lwp,gwindows_t * gwins)477 setgwins(klwp_t *lwp, gwindows_t *gwins)
478 {
479 struct machpcb *mpcb = lwptompcb(lwp);
480 int wbcnt = gwins->wbcnt;
481 caddr_t sp;
482 int i;
483 struct rwindow32 *rwp;
484 int wbuf_rwindow_size;
485 int is64;
486
487 if (mpcb->mpcb_wstate == WSTATE_USER32) {
488 wbuf_rwindow_size = WINDOWSIZE32;
489 is64 = 0;
490 } else {
491 wbuf_rwindow_size = WINDOWSIZE64;
492 is64 = 1;
493 }
494 ASSERT(wbcnt >= 0 && wbcnt <= SPARC_MAXREGWINDOW);
495 mpcb->mpcb_wbcnt = 0;
496 for (i = 0; i < wbcnt; i++) {
497 sp = (caddr_t)gwins->spbuf[i];
498 mpcb->mpcb_spbuf[i] = sp;
499 rwp = (struct rwindow32 *)
500 (mpcb->mpcb_wbuf + (i * wbuf_rwindow_size));
501 if (is64 && IS_V9STACK(sp))
502 bcopy(&gwins->wbuf[i], rwp, sizeof (struct rwindow));
503 else
504 rwindow_nto32(&gwins->wbuf[i], rwp);
505 mpcb->mpcb_wbcnt++;
506 }
507 }
508
509 void
setgwins32(klwp_t * lwp,gwindows32_t * gwins)510 setgwins32(klwp_t *lwp, gwindows32_t *gwins)
511 {
512 struct machpcb *mpcb = lwptompcb(lwp);
513 int wbcnt = gwins->wbcnt;
514 caddr_t sp;
515 int i;
516
517 struct rwindow *rwp;
518 int wbuf_rwindow_size;
519 int is64;
520
521 if (mpcb->mpcb_wstate == WSTATE_USER32) {
522 wbuf_rwindow_size = WINDOWSIZE32;
523 is64 = 0;
524 } else {
525 wbuf_rwindow_size = WINDOWSIZE64;
526 is64 = 1;
527 }
528
529 ASSERT(wbcnt >= 0 && wbcnt <= SPARC_MAXREGWINDOW);
530 mpcb->mpcb_wbcnt = 0;
531 for (i = 0; i < wbcnt; i++) {
532 sp = (caddr_t)(uintptr_t)gwins->spbuf[i];
533 mpcb->mpcb_spbuf[i] = sp;
534 rwp = (struct rwindow *)
535 (mpcb->mpcb_wbuf + (i * wbuf_rwindow_size));
536 if (is64 && IS_V9STACK(sp))
537 rwindow_32ton(&gwins->wbuf[i], rwp);
538 else
539 bcopy(&gwins->wbuf[i], rwp, sizeof (struct rwindow32));
540 mpcb->mpcb_wbcnt++;
541 }
542 }
543
544 /*
545 * Get register windows.
546 * NOTE: 'lwp' might not correspond to 'curthread' since this is
547 * called from code in /proc to set the registers of another lwp.
548 */
549 void
getgwins(klwp_t * lwp,gwindows_t * gwp)550 getgwins(klwp_t *lwp, gwindows_t *gwp)
551 {
552 struct machpcb *mpcb = lwptompcb(lwp);
553 int wbcnt = mpcb->mpcb_wbcnt;
554 caddr_t sp;
555 int i;
556 struct rwindow32 *rwp;
557 int wbuf_rwindow_size;
558 int is64;
559
560 if (mpcb->mpcb_wstate == WSTATE_USER32) {
561 wbuf_rwindow_size = WINDOWSIZE32;
562 is64 = 0;
563 } else {
564 wbuf_rwindow_size = WINDOWSIZE64;
565 is64 = 1;
566 }
567 ASSERT(wbcnt >= 0 && wbcnt <= SPARC_MAXREGWINDOW);
568 gwp->wbcnt = wbcnt;
569 for (i = 0; i < wbcnt; i++) {
570 sp = mpcb->mpcb_spbuf[i];
571 gwp->spbuf[i] = (greg_t *)sp;
572 rwp = (struct rwindow32 *)
573 (mpcb->mpcb_wbuf + (i * wbuf_rwindow_size));
574 if (is64 && IS_V9STACK(sp))
575 bcopy(rwp, &gwp->wbuf[i], sizeof (struct rwindow));
576 else
577 rwindow_32ton(rwp, &gwp->wbuf[i]);
578 }
579 }
580
581 void
getgwins32(klwp_t * lwp,gwindows32_t * gwp)582 getgwins32(klwp_t *lwp, gwindows32_t *gwp)
583 {
584 struct machpcb *mpcb = lwptompcb(lwp);
585 int wbcnt = mpcb->mpcb_wbcnt;
586 int i;
587 struct rwindow *rwp;
588 int wbuf_rwindow_size;
589 caddr_t sp;
590 int is64;
591
592 if (mpcb->mpcb_wstate == WSTATE_USER32) {
593 wbuf_rwindow_size = WINDOWSIZE32;
594 is64 = 0;
595 } else {
596 wbuf_rwindow_size = WINDOWSIZE64;
597 is64 = 1;
598 }
599
600 ASSERT(wbcnt >= 0 && wbcnt <= SPARC_MAXREGWINDOW);
601 gwp->wbcnt = wbcnt;
602 for (i = 0; i < wbcnt; i++) {
603 sp = mpcb->mpcb_spbuf[i];
604 rwp = (struct rwindow *)
605 (mpcb->mpcb_wbuf + (i * wbuf_rwindow_size));
606 gwp->spbuf[i] = (caddr32_t)(uintptr_t)sp;
607 if (is64 && IS_V9STACK(sp))
608 rwindow_nto32(rwp, &gwp->wbuf[i]);
609 else
610 bcopy(rwp, &gwp->wbuf[i], sizeof (struct rwindow32));
611 }
612 }
613
614 /*
615 * For things that depend on register state being on the stack,
616 * copy any register windows that get saved into the window buffer
617 * (in the pcb) onto the stack. This normally gets fixed up
618 * before returning to a user program. Callers of this routine
619 * require this to happen immediately because a later kernel
620 * operation depends on window state (like instruction simulation).
621 */
622 int
flush_user_windows_to_stack(caddr_t * psp)623 flush_user_windows_to_stack(caddr_t *psp)
624 {
625 int j, k;
626 caddr_t sp;
627 struct machpcb *mpcb = lwptompcb(ttolwp(curthread));
628 int err;
629 int error = 0;
630 int wbuf_rwindow_size;
631 int rwindow_size;
632 int stack_align;
633 int watched;
634
635 flush_user_windows();
636
637 if (mpcb->mpcb_wstate != WSTATE_USER32)
638 wbuf_rwindow_size = WINDOWSIZE64;
639 else
640 wbuf_rwindow_size = WINDOWSIZE32;
641
642 j = mpcb->mpcb_wbcnt;
643 while (j > 0) {
644 sp = mpcb->mpcb_spbuf[--j];
645
646 if ((mpcb->mpcb_wstate != WSTATE_USER32) &&
647 IS_V9STACK(sp)) {
648 sp += V9BIAS64;
649 stack_align = STACK_ALIGN64;
650 rwindow_size = WINDOWSIZE64;
651 } else {
652 /*
653 * Reduce sp to a 32 bit value. This was originally
654 * done by casting down to uint32_t and back up to
655 * caddr_t, but one compiler didn't like that, so the
656 * uintptr_t casts were added. The temporary 32 bit
657 * variable was introduced to avoid depending on all
658 * compilers to generate the desired assembly code for a
659 * quadruple cast in a single expression.
660 */
661 caddr32_t sp32 = (uint32_t)(uintptr_t)sp;
662 sp = (caddr_t)(uintptr_t)sp32;
663
664 stack_align = STACK_ALIGN32;
665 rwindow_size = WINDOWSIZE32;
666 }
667 if (((uintptr_t)sp & (stack_align - 1)) != 0)
668 continue;
669
670 watched = watch_disable_addr(sp, rwindow_size, S_WRITE);
671 err = xcopyout(mpcb->mpcb_wbuf +
672 (j * wbuf_rwindow_size), sp, rwindow_size);
673 if (err != 0) {
674 if (psp != NULL) {
675 /*
676 * Determine the offending address.
677 * It may not be the stack pointer itself.
678 */
679 uint_t *kaddr = (uint_t *)(mpcb->mpcb_wbuf +
680 (j * wbuf_rwindow_size));
681 uint_t *uaddr = (uint_t *)sp;
682
683 for (k = 0;
684 k < rwindow_size / sizeof (int);
685 k++, kaddr++, uaddr++) {
686 if (suword32(uaddr, *kaddr))
687 break;
688 }
689
690 /* can't happen? */
691 if (k == rwindow_size / sizeof (int))
692 uaddr = (uint_t *)sp;
693
694 *psp = (caddr_t)uaddr;
695 }
696 error = err;
697 } else {
698 /*
699 * stack was aligned and copyout succeeded;
700 * move other windows down.
701 */
702 mpcb->mpcb_wbcnt--;
703 for (k = j; k < mpcb->mpcb_wbcnt; k++) {
704 mpcb->mpcb_spbuf[k] = mpcb->mpcb_spbuf[k+1];
705 bcopy(
706 mpcb->mpcb_wbuf +
707 ((k+1) * wbuf_rwindow_size),
708 mpcb->mpcb_wbuf +
709 (k * wbuf_rwindow_size),
710 wbuf_rwindow_size);
711 }
712 }
713 if (watched)
714 watch_enable_addr(sp, rwindow_size, S_WRITE);
715 } /* while there are windows in the wbuf */
716 return (error);
717 }
718
719 static int
copy_return_window32(int dotwo)720 copy_return_window32(int dotwo)
721 {
722 klwp_t *lwp = ttolwp(curthread);
723 struct machpcb *mpcb = lwptompcb(lwp);
724 struct rwindow32 rwindow32;
725 caddr_t sp1;
726 caddr_t sp2;
727
728 (void) flush_user_windows_to_stack(NULL);
729 if (mpcb->mpcb_rsp[0] == NULL) {
730 /*
731 * Reduce r_sp to a 32 bit value before storing it in sp1. This
732 * was originally done by casting down to uint32_t and back up
733 * to caddr_t, but that generated complaints under one compiler.
734 * The uintptr_t cast was added to address that, and the
735 * temporary 32 bit variable was introduced to avoid depending
736 * on all compilers to generate the desired assembly code for a
737 * triple cast in a single expression.
738 */
739 caddr32_t sp1_32 = (uint32_t)lwptoregs(lwp)->r_sp;
740 sp1 = (caddr_t)(uintptr_t)sp1_32;
741
742 if ((copyin_nowatch(sp1, &rwindow32,
743 sizeof (struct rwindow32))) == 0)
744 mpcb->mpcb_rsp[0] = sp1;
745 rwindow_32ton(&rwindow32, &mpcb->mpcb_rwin[0]);
746 }
747 mpcb->mpcb_rsp[1] = NULL;
748 if (dotwo && mpcb->mpcb_rsp[0] != NULL &&
749 (sp2 = (caddr_t)mpcb->mpcb_rwin[0].rw_fp) != NULL) {
750 if ((copyin_nowatch(sp2, &rwindow32,
751 sizeof (struct rwindow32)) == 0))
752 mpcb->mpcb_rsp[1] = sp2;
753 rwindow_32ton(&rwindow32, &mpcb->mpcb_rwin[1]);
754 }
755 return (mpcb->mpcb_rsp[0] != NULL);
756 }
757
758 int
copy_return_window(int dotwo)759 copy_return_window(int dotwo)
760 {
761 proc_t *p = ttoproc(curthread);
762 klwp_t *lwp;
763 struct machpcb *mpcb;
764 caddr_t sp1;
765 caddr_t sp2;
766
767 if (p->p_model == DATAMODEL_ILP32)
768 return (copy_return_window32(dotwo));
769
770 lwp = ttolwp(curthread);
771 mpcb = lwptompcb(lwp);
772 (void) flush_user_windows_to_stack(NULL);
773 if (mpcb->mpcb_rsp[0] == NULL) {
774 sp1 = (caddr_t)lwptoregs(lwp)->r_sp + STACK_BIAS;
775 if ((copyin_nowatch(sp1, &mpcb->mpcb_rwin[0],
776 sizeof (struct rwindow)) == 0))
777 mpcb->mpcb_rsp[0] = sp1 - STACK_BIAS;
778 }
779 mpcb->mpcb_rsp[1] = NULL;
780 if (dotwo && mpcb->mpcb_rsp[0] != NULL &&
781 (sp2 = (caddr_t)mpcb->mpcb_rwin[0].rw_fp) != NULL) {
782 sp2 += STACK_BIAS;
783 if ((copyin_nowatch(sp2, &mpcb->mpcb_rwin[1],
784 sizeof (struct rwindow)) == 0))
785 mpcb->mpcb_rsp[1] = sp2 - STACK_BIAS;
786 }
787 return (mpcb->mpcb_rsp[0] != NULL);
788 }
789
790 /*
791 * Clear registers on exec(2).
792 */
793 void
setregs(uarg_t * args)794 setregs(uarg_t *args)
795 {
796 struct regs *rp;
797 klwp_t *lwp = ttolwp(curthread);
798 kfpu_t *fpp = lwptofpu(lwp);
799 struct machpcb *mpcb = lwptompcb(lwp);
800 proc_t *p = ttoproc(curthread);
801
802 /*
803 * Initialize user registers.
804 */
805 (void) save_syscall_args(); /* copy args from registers first */
806 rp = lwptoregs(lwp);
807 rp->r_g1 = rp->r_g2 = rp->r_g3 = rp->r_g4 = rp->r_g5 =
808 rp->r_g6 = rp->r_o0 = rp->r_o1 = rp->r_o2 =
809 rp->r_o3 = rp->r_o4 = rp->r_o5 = rp->r_o7 = 0;
810 if (p->p_model == DATAMODEL_ILP32)
811 rp->r_tstate = TSTATE_USER32 | weakest_mem_model;
812 else
813 rp->r_tstate = TSTATE_USER64 | weakest_mem_model;
814 if (!fpu_exists)
815 rp->r_tstate &= ~TSTATE_PEF;
816 rp->r_g7 = args->thrptr;
817 rp->r_pc = args->entry;
818 rp->r_npc = args->entry + 4;
819 rp->r_y = 0;
820 curthread->t_post_sys = 1;
821 lwp->lwp_eosys = JUSTRETURN;
822 lwp->lwp_pcb.pcb_trap0addr = 0; /* no trap 0 handler */
823 /*
824 * Clear the fixalignment flag
825 */
826 p->p_fixalignment = 0;
827
828 /*
829 * Throw out old user windows, init window buf.
830 */
831 trash_user_windows();
832
833 if (p->p_model == DATAMODEL_LP64 &&
834 mpcb->mpcb_wstate != WSTATE_USER64) {
835 ASSERT(mpcb->mpcb_wbcnt == 0);
836 kmem_cache_free(wbuf32_cache, mpcb->mpcb_wbuf);
837 mpcb->mpcb_wbuf = kmem_cache_alloc(wbuf64_cache, KM_SLEEP);
838 ASSERT(((uintptr_t)mpcb->mpcb_wbuf & 7) == 0);
839 mpcb->mpcb_wstate = WSTATE_USER64;
840 } else if (p->p_model == DATAMODEL_ILP32 &&
841 mpcb->mpcb_wstate != WSTATE_USER32) {
842 ASSERT(mpcb->mpcb_wbcnt == 0);
843 kmem_cache_free(wbuf64_cache, mpcb->mpcb_wbuf);
844 mpcb->mpcb_wbuf = kmem_cache_alloc(wbuf32_cache, KM_SLEEP);
845 mpcb->mpcb_wstate = WSTATE_USER32;
846 }
847 mpcb->mpcb_pa = va_to_pa(mpcb);
848 mpcb->mpcb_wbuf_pa = va_to_pa(mpcb->mpcb_wbuf);
849
850 /*
851 * Here we initialize minimal fpu state.
852 * The rest is done at the first floating
853 * point instruction that a process executes
854 * or by the lib_psr memcpy routines.
855 */
856 if (fpu_exists) {
857 extern void _fp_write_fprs(unsigned);
858 _fp_write_fprs(0);
859 }
860 fpp->fpu_en = 0;
861 fpp->fpu_fprs = 0;
862 }
863
864 void
lwp_swapin(kthread_t * tp)865 lwp_swapin(kthread_t *tp)
866 {
867 struct machpcb *mpcb = lwptompcb(ttolwp(tp));
868
869 mpcb->mpcb_pa = va_to_pa(mpcb);
870 mpcb->mpcb_wbuf_pa = va_to_pa(mpcb->mpcb_wbuf);
871 }
872
873 /*
874 * Construct the execution environment for the user's signal
875 * handler and arrange for control to be given to it on return
876 * to userland. The library code now calls setcontext() to
877 * clean up after the signal handler, so sigret() is no longer
878 * needed.
879 */
880 int
sendsig(int sig,k_siginfo_t * sip,void (* hdlr)())881 sendsig(int sig, k_siginfo_t *sip, void (*hdlr)())
882 {
883 /*
884 * 'volatile' is needed to ensure that values are
885 * correct on the error return from on_fault().
886 */
887 volatile int minstacksz; /* min stack required to catch signal */
888 int newstack = 0; /* if true, switching to altstack */
889 label_t ljb;
890 caddr_t sp;
891 struct regs *volatile rp;
892 klwp_t *lwp = ttolwp(curthread);
893 proc_t *volatile p = ttoproc(curthread);
894 int fpq_size = 0;
895 struct sigframe {
896 struct frame frwin;
897 ucontext_t uc;
898 };
899 siginfo_t *sip_addr;
900 struct sigframe *volatile fp;
901 ucontext_t *volatile tuc = NULL;
902 char *volatile xregs = NULL;
903 volatile size_t xregs_size = 0;
904 gwindows_t *volatile gwp = NULL;
905 volatile int gwin_size = 0;
906 kfpu_t *fpp;
907 struct machpcb *mpcb;
908 volatile int watched = 0;
909 volatile int watched2 = 0;
910 caddr_t tos;
911
912 /*
913 * Make sure the current last user window has been flushed to
914 * the stack save area before we change the sp.
915 * Restore register window if a debugger modified it.
916 */
917 (void) flush_user_windows_to_stack(NULL);
918 if (lwp->lwp_pcb.pcb_xregstat != XREGNONE)
919 xregrestore(lwp, 0);
920
921 mpcb = lwptompcb(lwp);
922 rp = lwptoregs(lwp);
923
924 /*
925 * Clear the watchpoint return stack pointers.
926 */
927 mpcb->mpcb_rsp[0] = NULL;
928 mpcb->mpcb_rsp[1] = NULL;
929
930 minstacksz = sizeof (struct sigframe);
931
932 /*
933 * We know that sizeof (siginfo_t) is stack-aligned:
934 * 128 bytes for ILP32, 256 bytes for LP64.
935 */
936 if (sip != NULL)
937 minstacksz += sizeof (siginfo_t);
938
939 /*
940 * These two fields are pointed to by ABI structures and may
941 * be of arbitrary length. Size them now so we know how big
942 * the signal frame has to be.
943 */
944 fpp = lwptofpu(lwp);
945 fpp->fpu_fprs = _fp_read_fprs();
946 if ((fpp->fpu_en) || (fpp->fpu_fprs & FPRS_FEF)) {
947 fpq_size = fpp->fpu_q_entrysize * fpp->fpu_qcnt;
948 minstacksz += SA(fpq_size);
949 }
950
951 mpcb = lwptompcb(lwp);
952 if (mpcb->mpcb_wbcnt != 0) {
953 gwin_size = (mpcb->mpcb_wbcnt * sizeof (struct rwindow)) +
954 (SPARC_MAXREGWINDOW * sizeof (caddr_t)) + sizeof (long);
955 minstacksz += SA(gwin_size);
956 }
957
958 /*
959 * Extra registers, if support by this platform, may be of arbitrary
960 * length. Size them now so we know how big the signal frame has to be.
961 * For sparcv9 _LP64 user programs, use asrs instead of the xregs.
962 */
963 minstacksz += SA(xregs_size);
964
965 /*
966 * Figure out whether we will be handling this signal on
967 * an alternate stack specified by the user. Then allocate
968 * and validate the stack requirements for the signal handler
969 * context. on_fault will catch any faults.
970 */
971 newstack = (sigismember(&PTOU(curproc)->u_sigonstack, sig) &&
972 !(lwp->lwp_sigaltstack.ss_flags & (SS_ONSTACK|SS_DISABLE)));
973
974 tos = (caddr_t)rp->r_sp + STACK_BIAS;
975 /*
976 * Force proper stack pointer alignment, even in the face of a
977 * misaligned stack pointer from user-level before the signal.
978 * Don't use the SA() macro because that rounds up, not down.
979 */
980 tos = (caddr_t)((uintptr_t)tos & ~(STACK_ALIGN - 1ul));
981
982 if (newstack != 0) {
983 fp = (struct sigframe *)
984 (SA((uintptr_t)lwp->lwp_sigaltstack.ss_sp) +
985 SA((int)lwp->lwp_sigaltstack.ss_size) - STACK_ALIGN -
986 SA(minstacksz));
987 } else {
988 /*
989 * If we were unable to flush all register windows to
990 * the stack and we are not now on an alternate stack,
991 * just dump core with a SIGSEGV back in psig().
992 */
993 if (sig == SIGSEGV &&
994 mpcb->mpcb_wbcnt != 0 &&
995 !(lwp->lwp_sigaltstack.ss_flags & SS_ONSTACK))
996 return (0);
997 fp = (struct sigframe *)(tos - SA(minstacksz));
998 /*
999 * Could call grow here, but stack growth now handled below
1000 * in code protected by on_fault().
1001 */
1002 }
1003 sp = (caddr_t)fp + sizeof (struct sigframe);
1004
1005 /*
1006 * Make sure process hasn't trashed its stack.
1007 */
1008 if ((caddr_t)fp >= p->p_usrstack ||
1009 (caddr_t)fp + SA(minstacksz) >= p->p_usrstack) {
1010 #ifdef DEBUG
1011 printf("sendsig: bad signal stack cmd=%s, pid=%d, sig=%d\n",
1012 PTOU(p)->u_comm, p->p_pid, sig);
1013 printf("sigsp = 0x%p, action = 0x%p, upc = 0x%lx\n",
1014 (void *)fp, (void *)hdlr, rp->r_pc);
1015 printf("fp above USRSTACK\n");
1016 #endif
1017 return (0);
1018 }
1019
1020 watched = watch_disable_addr((caddr_t)fp, SA(minstacksz), S_WRITE);
1021 if (on_fault(&ljb))
1022 goto badstack;
1023
1024 tuc = kmem_alloc(sizeof (ucontext_t), KM_SLEEP);
1025 savecontext(tuc, &lwp->lwp_sigoldmask);
1026
1027 /*
1028 * save extra register state if it exists
1029 */
1030 if (xregs_size != 0) {
1031 xregs_setptr(lwp, tuc, sp);
1032 xregs = kmem_alloc(xregs_size, KM_SLEEP);
1033 xregs_get(lwp, xregs);
1034 copyout_noerr(xregs, sp, xregs_size);
1035 kmem_free(xregs, xregs_size);
1036 xregs = NULL;
1037 sp += SA(xregs_size);
1038 }
1039
1040 copyout_noerr(tuc, &fp->uc, sizeof (*tuc));
1041 kmem_free(tuc, sizeof (*tuc));
1042 tuc = NULL;
1043
1044 if (sip != NULL) {
1045 zoneid_t zoneid;
1046
1047 uzero(sp, sizeof (siginfo_t));
1048 if (SI_FROMUSER(sip) &&
1049 (zoneid = p->p_zone->zone_id) != GLOBAL_ZONEID &&
1050 zoneid != sip->si_zoneid) {
1051 k_siginfo_t sani_sip = *sip;
1052 sani_sip.si_pid = p->p_zone->zone_zsched->p_pid;
1053 sani_sip.si_uid = 0;
1054 sani_sip.si_ctid = -1;
1055 sani_sip.si_zoneid = zoneid;
1056 copyout_noerr(&sani_sip, sp, sizeof (sani_sip));
1057 } else {
1058 copyout_noerr(sip, sp, sizeof (*sip));
1059 }
1060 sip_addr = (siginfo_t *)sp;
1061 sp += sizeof (siginfo_t);
1062
1063 if (sig == SIGPROF &&
1064 curthread->t_rprof != NULL &&
1065 curthread->t_rprof->rp_anystate) {
1066 /*
1067 * We stand on our head to deal with
1068 * the real time profiling signal.
1069 * Fill in the stuff that doesn't fit
1070 * in a normal k_siginfo structure.
1071 */
1072 int i = sip->si_nsysarg;
1073 while (--i >= 0) {
1074 sulword_noerr(
1075 (ulong_t *)&sip_addr->si_sysarg[i],
1076 (ulong_t)lwp->lwp_arg[i]);
1077 }
1078 copyout_noerr(curthread->t_rprof->rp_state,
1079 sip_addr->si_mstate,
1080 sizeof (curthread->t_rprof->rp_state));
1081 }
1082 } else {
1083 sip_addr = (siginfo_t *)NULL;
1084 }
1085
1086 /*
1087 * When flush_user_windows_to_stack() can't save all the
1088 * windows to the stack, it puts them in the lwp's pcb.
1089 */
1090 if (gwin_size != 0) {
1091 gwp = kmem_alloc(gwin_size, KM_SLEEP);
1092 getgwins(lwp, gwp);
1093 sulword_noerr(&fp->uc.uc_mcontext.gwins, (ulong_t)sp);
1094 copyout_noerr(gwp, sp, gwin_size);
1095 kmem_free(gwp, gwin_size);
1096 gwp = NULL;
1097 sp += SA(gwin_size);
1098 } else
1099 sulword_noerr(&fp->uc.uc_mcontext.gwins, (ulong_t)NULL);
1100
1101 if (fpq_size != 0) {
1102 struct _fq *fqp = (struct _fq *)sp;
1103 sulword_noerr(&fp->uc.uc_mcontext.fpregs.fpu_q, (ulong_t)fqp);
1104 copyout_noerr(mpcb->mpcb_fpu_q, fqp, fpq_size);
1105
1106 /*
1107 * forget the fp queue so that the signal handler can run
1108 * without being harrassed--it will do a setcontext that will
1109 * re-establish the queue if there still is one
1110 *
1111 * NOTE: fp_runq() relies on the qcnt field being zeroed here
1112 * to terminate its processing of the queue after signal
1113 * delivery.
1114 */
1115 mpcb->mpcb_fpu->fpu_qcnt = 0;
1116 sp += SA(fpq_size);
1117
1118 /* Also, syscall needs to know about this */
1119 mpcb->mpcb_flags |= FP_TRAPPED;
1120
1121 } else {
1122 sulword_noerr(&fp->uc.uc_mcontext.fpregs.fpu_q, (ulong_t)NULL);
1123 suword8_noerr(&fp->uc.uc_mcontext.fpregs.fpu_qcnt, 0);
1124 }
1125
1126
1127 /*
1128 * Since we flushed the user's windows and we are changing their
1129 * stack pointer, the window that the user will return to will
1130 * be restored from the save area in the frame we are setting up.
1131 * We copy in save area for old stack pointer so that debuggers
1132 * can do a proper stack backtrace from the signal handler.
1133 */
1134 if (mpcb->mpcb_wbcnt == 0) {
1135 watched2 = watch_disable_addr(tos, sizeof (struct rwindow),
1136 S_READ);
1137 ucopy(tos, &fp->frwin, sizeof (struct rwindow));
1138 }
1139
1140 lwp->lwp_oldcontext = (uintptr_t)&fp->uc;
1141
1142 if (newstack != 0) {
1143 lwp->lwp_sigaltstack.ss_flags |= SS_ONSTACK;
1144
1145 if (lwp->lwp_ustack) {
1146 copyout_noerr(&lwp->lwp_sigaltstack,
1147 (stack_t *)lwp->lwp_ustack, sizeof (stack_t));
1148 }
1149 }
1150
1151 no_fault();
1152 mpcb->mpcb_wbcnt = 0; /* let user go on */
1153
1154 if (watched2)
1155 watch_enable_addr(tos, sizeof (struct rwindow), S_READ);
1156 if (watched)
1157 watch_enable_addr((caddr_t)fp, SA(minstacksz), S_WRITE);
1158
1159 /*
1160 * Set up user registers for execution of signal handler.
1161 */
1162 rp->r_sp = (uintptr_t)fp - STACK_BIAS;
1163 rp->r_pc = (uintptr_t)hdlr;
1164 rp->r_npc = (uintptr_t)hdlr + 4;
1165 /* make sure %asi is ASI_PNF */
1166 rp->r_tstate &= ~((uint64_t)TSTATE_ASI_MASK << TSTATE_ASI_SHIFT);
1167 rp->r_tstate |= ((uint64_t)ASI_PNF << TSTATE_ASI_SHIFT);
1168 rp->r_o0 = sig;
1169 rp->r_o1 = (uintptr_t)sip_addr;
1170 rp->r_o2 = (uintptr_t)&fp->uc;
1171 /*
1172 * Don't set lwp_eosys here. sendsig() is called via psig() after
1173 * lwp_eosys is handled, so setting it here would affect the next
1174 * system call.
1175 */
1176 return (1);
1177
1178 badstack:
1179 no_fault();
1180 if (watched2)
1181 watch_enable_addr(tos, sizeof (struct rwindow), S_READ);
1182 if (watched)
1183 watch_enable_addr((caddr_t)fp, SA(minstacksz), S_WRITE);
1184 if (tuc)
1185 kmem_free(tuc, sizeof (ucontext_t));
1186 if (xregs)
1187 kmem_free(xregs, xregs_size);
1188 if (gwp)
1189 kmem_free(gwp, gwin_size);
1190 #ifdef DEBUG
1191 printf("sendsig: bad signal stack cmd=%s, pid=%d, sig=%d\n",
1192 PTOU(p)->u_comm, p->p_pid, sig);
1193 printf("on fault, sigsp = %p, action = %p, upc = 0x%lx\n",
1194 (void *)fp, (void *)hdlr, rp->r_pc);
1195 #endif
1196 return (0);
1197 }
1198
1199
1200 #ifdef _SYSCALL32_IMPL
1201
1202 /*
1203 * Construct the execution environment for the user's signal
1204 * handler and arrange for control to be given to it on return
1205 * to userland. The library code now calls setcontext() to
1206 * clean up after the signal handler, so sigret() is no longer
1207 * needed.
1208 */
1209 int
sendsig32(int sig,k_siginfo_t * sip,void (* hdlr)())1210 sendsig32(int sig, k_siginfo_t *sip, void (*hdlr)())
1211 {
1212 /*
1213 * 'volatile' is needed to ensure that values are
1214 * correct on the error return from on_fault().
1215 */
1216 volatile int minstacksz; /* min stack required to catch signal */
1217 int newstack = 0; /* if true, switching to altstack */
1218 label_t ljb;
1219 caddr_t sp;
1220 struct regs *volatile rp;
1221 klwp_t *lwp = ttolwp(curthread);
1222 proc_t *volatile p = ttoproc(curthread);
1223 struct fq32 fpu_q[MAXFPQ]; /* to hold floating queue */
1224 struct fq32 *dfq = NULL;
1225 size_t fpq_size = 0;
1226 struct sigframe32 {
1227 struct frame32 frwin;
1228 ucontext32_t uc;
1229 };
1230 struct sigframe32 *volatile fp;
1231 siginfo32_t *sip_addr;
1232 ucontext32_t *volatile tuc = NULL;
1233 char *volatile xregs = NULL;
1234 volatile int xregs_size = 0;
1235 gwindows32_t *volatile gwp = NULL;
1236 volatile size_t gwin_size = 0;
1237 kfpu_t *fpp;
1238 struct machpcb *mpcb;
1239 volatile int watched = 0;
1240 volatile int watched2 = 0;
1241 caddr_t tos;
1242
1243 /*
1244 * Make sure the current last user window has been flushed to
1245 * the stack save area before we change the sp.
1246 * Restore register window if a debugger modified it.
1247 */
1248 (void) flush_user_windows_to_stack(NULL);
1249 if (lwp->lwp_pcb.pcb_xregstat != XREGNONE)
1250 xregrestore(lwp, 0);
1251
1252 mpcb = lwptompcb(lwp);
1253 rp = lwptoregs(lwp);
1254
1255 /*
1256 * Clear the watchpoint return stack pointers.
1257 */
1258 mpcb->mpcb_rsp[0] = NULL;
1259 mpcb->mpcb_rsp[1] = NULL;
1260
1261 minstacksz = sizeof (struct sigframe32);
1262
1263 if (sip != NULL)
1264 minstacksz += sizeof (siginfo32_t);
1265
1266 /*
1267 * These two fields are pointed to by ABI structures and may
1268 * be of arbitrary length. Size them now so we know how big
1269 * the signal frame has to be.
1270 */
1271 fpp = lwptofpu(lwp);
1272 fpp->fpu_fprs = _fp_read_fprs();
1273 if ((fpp->fpu_en) || (fpp->fpu_fprs & FPRS_FEF)) {
1274 fpq_size = sizeof (struct fpq32) * fpp->fpu_qcnt;
1275 minstacksz += fpq_size;
1276 dfq = fpu_q;
1277 }
1278
1279 mpcb = lwptompcb(lwp);
1280 if (mpcb->mpcb_wbcnt != 0) {
1281 gwin_size = (mpcb->mpcb_wbcnt * sizeof (struct rwindow32)) +
1282 (SPARC_MAXREGWINDOW * sizeof (caddr32_t)) +
1283 sizeof (int32_t);
1284 minstacksz += gwin_size;
1285 }
1286
1287 /*
1288 * Extra registers, if supported by this platform, may be of arbitrary
1289 * length. Size them now so we know how big the signal frame has to be.
1290 */
1291 xregs_size = xregs_getsize(p);
1292 minstacksz += SA32(xregs_size);
1293
1294 /*
1295 * Figure out whether we will be handling this signal on
1296 * an alternate stack specified by the user. Then allocate
1297 * and validate the stack requirements for the signal handler
1298 * context. on_fault will catch any faults.
1299 */
1300 newstack = (sigismember(&PTOU(curproc)->u_sigonstack, sig) &&
1301 !(lwp->lwp_sigaltstack.ss_flags & (SS_ONSTACK|SS_DISABLE)));
1302
1303 tos = (void *)(uintptr_t)(uint32_t)rp->r_sp;
1304 /*
1305 * Force proper stack pointer alignment, even in the face of a
1306 * misaligned stack pointer from user-level before the signal.
1307 * Don't use the SA32() macro because that rounds up, not down.
1308 */
1309 tos = (caddr_t)((uintptr_t)tos & ~(STACK_ALIGN32 - 1ul));
1310
1311 if (newstack != 0) {
1312 fp = (struct sigframe32 *)
1313 (SA32((uintptr_t)lwp->lwp_sigaltstack.ss_sp) +
1314 SA32((int)lwp->lwp_sigaltstack.ss_size) -
1315 STACK_ALIGN32 -
1316 SA32(minstacksz));
1317 } else {
1318 /*
1319 * If we were unable to flush all register windows to
1320 * the stack and we are not now on an alternate stack,
1321 * just dump core with a SIGSEGV back in psig().
1322 */
1323 if (sig == SIGSEGV &&
1324 mpcb->mpcb_wbcnt != 0 &&
1325 !(lwp->lwp_sigaltstack.ss_flags & SS_ONSTACK))
1326 return (0);
1327 fp = (struct sigframe32 *)(tos - SA32(minstacksz));
1328 /*
1329 * Could call grow here, but stack growth now handled below
1330 * in code protected by on_fault().
1331 */
1332 }
1333 sp = (caddr_t)fp + sizeof (struct sigframe32);
1334
1335 /*
1336 * Make sure process hasn't trashed its stack.
1337 */
1338 if ((caddr_t)fp >= p->p_usrstack ||
1339 (caddr_t)fp + SA32(minstacksz) >= p->p_usrstack) {
1340 #ifdef DEBUG
1341 printf("sendsig32: bad signal stack cmd=%s, pid=%d, sig=%d\n",
1342 PTOU(p)->u_comm, p->p_pid, sig);
1343 printf("sigsp = 0x%p, action = 0x%p, upc = 0x%lx\n",
1344 (void *)fp, (void *)hdlr, rp->r_pc);
1345 printf("fp above USRSTACK32\n");
1346 #endif
1347 return (0);
1348 }
1349
1350 watched = watch_disable_addr((caddr_t)fp, SA32(minstacksz), S_WRITE);
1351 if (on_fault(&ljb))
1352 goto badstack;
1353
1354 tuc = kmem_alloc(sizeof (ucontext32_t), KM_SLEEP);
1355 savecontext32(tuc, &lwp->lwp_sigoldmask, dfq);
1356
1357 /*
1358 * save extra register state if it exists
1359 */
1360 if (xregs_size != 0) {
1361 xregs_setptr32(lwp, tuc, (caddr32_t)(uintptr_t)sp);
1362 xregs = kmem_alloc(xregs_size, KM_SLEEP);
1363 xregs_get(lwp, xregs);
1364 copyout_noerr(xregs, sp, xregs_size);
1365 kmem_free(xregs, xregs_size);
1366 xregs = NULL;
1367 sp += SA32(xregs_size);
1368 }
1369
1370 copyout_noerr(tuc, &fp->uc, sizeof (*tuc));
1371 kmem_free(tuc, sizeof (*tuc));
1372 tuc = NULL;
1373
1374 if (sip != NULL) {
1375 siginfo32_t si32;
1376 zoneid_t zoneid;
1377
1378 siginfo_kto32(sip, &si32);
1379 if (SI_FROMUSER(sip) &&
1380 (zoneid = p->p_zone->zone_id) != GLOBAL_ZONEID &&
1381 zoneid != sip->si_zoneid) {
1382 si32.si_pid = p->p_zone->zone_zsched->p_pid;
1383 si32.si_uid = 0;
1384 si32.si_ctid = -1;
1385 si32.si_zoneid = zoneid;
1386 }
1387 uzero(sp, sizeof (siginfo32_t));
1388 copyout_noerr(&si32, sp, sizeof (siginfo32_t));
1389 sip_addr = (siginfo32_t *)sp;
1390 sp += sizeof (siginfo32_t);
1391
1392 if (sig == SIGPROF &&
1393 curthread->t_rprof != NULL &&
1394 curthread->t_rprof->rp_anystate) {
1395 /*
1396 * We stand on our head to deal with
1397 * the real time profiling signal.
1398 * Fill in the stuff that doesn't fit
1399 * in a normal k_siginfo structure.
1400 */
1401 int i = sip->si_nsysarg;
1402 while (--i >= 0) {
1403 suword32_noerr(&sip_addr->si_sysarg[i],
1404 (uint32_t)lwp->lwp_arg[i]);
1405 }
1406 copyout_noerr(curthread->t_rprof->rp_state,
1407 sip_addr->si_mstate,
1408 sizeof (curthread->t_rprof->rp_state));
1409 }
1410 } else {
1411 sip_addr = NULL;
1412 }
1413
1414 /*
1415 * When flush_user_windows_to_stack() can't save all the
1416 * windows to the stack, it puts them in the lwp's pcb.
1417 */
1418 if (gwin_size != 0) {
1419 gwp = kmem_alloc(gwin_size, KM_SLEEP);
1420 getgwins32(lwp, gwp);
1421 suword32_noerr(&fp->uc.uc_mcontext.gwins,
1422 (uint32_t)(uintptr_t)sp);
1423 copyout_noerr(gwp, sp, gwin_size);
1424 kmem_free(gwp, gwin_size);
1425 gwp = NULL;
1426 sp += gwin_size;
1427 } else {
1428 suword32_noerr(&fp->uc.uc_mcontext.gwins, 0);
1429 }
1430
1431 if (fpq_size != 0) {
1432 /*
1433 * Update the (already copied out) fpu32.fpu_q pointer
1434 * from NULL to the 32-bit address on the user's stack
1435 * where we then copyout the fq32 to.
1436 */
1437 struct fq32 *fqp = (struct fq32 *)sp;
1438 suword32_noerr(&fp->uc.uc_mcontext.fpregs.fpu_q,
1439 (uint32_t)(uintptr_t)fqp);
1440 copyout_noerr(dfq, fqp, fpq_size);
1441
1442 /*
1443 * forget the fp queue so that the signal handler can run
1444 * without being harrassed--it will do a setcontext that will
1445 * re-establish the queue if there still is one
1446 *
1447 * NOTE: fp_runq() relies on the qcnt field being zeroed here
1448 * to terminate its processing of the queue after signal
1449 * delivery.
1450 */
1451 mpcb->mpcb_fpu->fpu_qcnt = 0;
1452 sp += fpq_size;
1453
1454 /* Also, syscall needs to know about this */
1455 mpcb->mpcb_flags |= FP_TRAPPED;
1456
1457 } else {
1458 suword32_noerr(&fp->uc.uc_mcontext.fpregs.fpu_q, 0);
1459 suword8_noerr(&fp->uc.uc_mcontext.fpregs.fpu_qcnt, 0);
1460 }
1461
1462
1463 /*
1464 * Since we flushed the user's windows and we are changing their
1465 * stack pointer, the window that the user will return to will
1466 * be restored from the save area in the frame we are setting up.
1467 * We copy in save area for old stack pointer so that debuggers
1468 * can do a proper stack backtrace from the signal handler.
1469 */
1470 if (mpcb->mpcb_wbcnt == 0) {
1471 watched2 = watch_disable_addr(tos, sizeof (struct rwindow32),
1472 S_READ);
1473 ucopy(tos, &fp->frwin, sizeof (struct rwindow32));
1474 }
1475
1476 lwp->lwp_oldcontext = (uintptr_t)&fp->uc;
1477
1478 if (newstack != 0) {
1479 lwp->lwp_sigaltstack.ss_flags |= SS_ONSTACK;
1480 if (lwp->lwp_ustack) {
1481 stack32_t stk32;
1482
1483 stk32.ss_sp =
1484 (caddr32_t)(uintptr_t)lwp->lwp_sigaltstack.ss_sp;
1485 stk32.ss_size = (size32_t)lwp->lwp_sigaltstack.ss_size;
1486 stk32.ss_flags = (int32_t)lwp->lwp_sigaltstack.ss_flags;
1487
1488 copyout_noerr(&stk32, (stack32_t *)lwp->lwp_ustack,
1489 sizeof (stack32_t));
1490 }
1491 }
1492
1493 no_fault();
1494 mpcb->mpcb_wbcnt = 0; /* let user go on */
1495
1496 if (watched2)
1497 watch_enable_addr(tos, sizeof (struct rwindow32), S_READ);
1498 if (watched)
1499 watch_enable_addr((caddr_t)fp, SA32(minstacksz), S_WRITE);
1500
1501 /*
1502 * Set up user registers for execution of signal handler.
1503 */
1504 rp->r_sp = (uintptr_t)fp;
1505 rp->r_pc = (uintptr_t)hdlr;
1506 rp->r_npc = (uintptr_t)hdlr + 4;
1507 /* make sure %asi is ASI_PNF */
1508 rp->r_tstate &= ~((uint64_t)TSTATE_ASI_MASK << TSTATE_ASI_SHIFT);
1509 rp->r_tstate |= ((uint64_t)ASI_PNF << TSTATE_ASI_SHIFT);
1510 rp->r_o0 = sig;
1511 rp->r_o1 = (uintptr_t)sip_addr;
1512 rp->r_o2 = (uintptr_t)&fp->uc;
1513 /*
1514 * Don't set lwp_eosys here. sendsig() is called via psig() after
1515 * lwp_eosys is handled, so setting it here would affect the next
1516 * system call.
1517 */
1518 return (1);
1519
1520 badstack:
1521 no_fault();
1522 if (watched2)
1523 watch_enable_addr(tos, sizeof (struct rwindow32), S_READ);
1524 if (watched)
1525 watch_enable_addr((caddr_t)fp, SA32(minstacksz), S_WRITE);
1526 if (tuc)
1527 kmem_free(tuc, sizeof (*tuc));
1528 if (xregs)
1529 kmem_free(xregs, xregs_size);
1530 if (gwp)
1531 kmem_free(gwp, gwin_size);
1532 #ifdef DEBUG
1533 printf("sendsig32: bad signal stack cmd=%s, pid=%d, sig=%d\n",
1534 PTOU(p)->u_comm, p->p_pid, sig);
1535 printf("on fault, sigsp = 0x%p, action = 0x%p, upc = 0x%lx\n",
1536 (void *)fp, (void *)hdlr, rp->r_pc);
1537 #endif
1538 return (0);
1539 }
1540
1541 #endif /* _SYSCALL32_IMPL */
1542
1543
1544 /*
1545 * Load user registers into lwp. Called only from syslwp_create().
1546 * thrptr ignored for sparc.
1547 */
1548 /* ARGSUSED2 */
1549 void
lwp_load(klwp_t * lwp,gregset_t grp,uintptr_t thrptr)1550 lwp_load(klwp_t *lwp, gregset_t grp, uintptr_t thrptr)
1551 {
1552 setgregs(lwp, grp);
1553 if (lwptoproc(lwp)->p_model == DATAMODEL_ILP32)
1554 lwptoregs(lwp)->r_tstate = TSTATE_USER32 | TSTATE_MM_TSO;
1555 else
1556 lwptoregs(lwp)->r_tstate = TSTATE_USER64 | TSTATE_MM_TSO;
1557
1558 if (!fpu_exists)
1559 lwptoregs(lwp)->r_tstate &= ~TSTATE_PEF;
1560 lwp->lwp_eosys = JUSTRETURN;
1561 lwptot(lwp)->t_post_sys = 1;
1562 }
1563
1564 /*
1565 * set syscall()'s return values for a lwp.
1566 */
1567 void
lwp_setrval(klwp_t * lwp,int v1,int v2)1568 lwp_setrval(klwp_t *lwp, int v1, int v2)
1569 {
1570 struct regs *rp = lwptoregs(lwp);
1571
1572 rp->r_tstate &= ~TSTATE_IC;
1573 rp->r_o0 = v1;
1574 rp->r_o1 = v2;
1575 }
1576
1577 /*
1578 * set stack pointer for a lwp
1579 */
1580 void
lwp_setsp(klwp_t * lwp,caddr_t sp)1581 lwp_setsp(klwp_t *lwp, caddr_t sp)
1582 {
1583 struct regs *rp = lwptoregs(lwp);
1584 rp->r_sp = (uintptr_t)sp;
1585 }
1586
1587 /*
1588 * Take any PCB specific actions that are required or flagged in the PCB.
1589 */
1590 extern void trap_async_hwerr(void);
1591 #pragma weak trap_async_hwerr
1592
1593 void
lwp_pcb_exit(void)1594 lwp_pcb_exit(void)
1595 {
1596 klwp_t *lwp = ttolwp(curthread);
1597
1598 if (lwp->lwp_pcb.pcb_flags & ASYNC_HWERR) {
1599 lwp->lwp_pcb.pcb_flags &= ~ASYNC_HWERR;
1600 trap_async_hwerr();
1601 }
1602 }
1603
1604 /*
1605 * Invalidate the saved user register windows in the pcb struct
1606 * for the current thread. They will no longer be preserved.
1607 */
1608 void
lwp_clear_uwin(void)1609 lwp_clear_uwin(void)
1610 {
1611 struct machpcb *m = lwptompcb(ttolwp(curthread));
1612
1613 /*
1614 * This has the effect of invalidating all (any) of the
1615 * user level windows that are currently sitting in the
1616 * kernel buffer.
1617 */
1618 m->mpcb_wbcnt = 0;
1619 }
1620
1621 /*
1622 * Set memory model to Total Store Order (TSO).
1623 */
1624 static void
mmodel_set_tso(void)1625 mmodel_set_tso(void)
1626 {
1627 struct regs *rp = lwptoregs(ttolwp(curthread));
1628
1629 /*
1630 * The thread is doing something which requires TSO semantics
1631 * (creating a 2nd thread, or mapping writable shared memory).
1632 * It's no longer safe to run in WC mode.
1633 */
1634 rp->r_tstate &= ~TSTATE_MM;
1635 /* LINTED E_EXPR_NULL_EFFECT */
1636 rp->r_tstate |= TSTATE_MM_TSO;
1637 }
1638
1639 /*
1640 * When this routine is invoked, the process is just about to add a new lwp;
1641 * making it multi threaded.
1642 *
1643 * If the program requires default stronger/legacy memory model semantics,
1644 * this is an indication that the processor memory model
1645 * should be altered to provide those semantics.
1646 */
1647 void
lwp_mmodel_newlwp(void)1648 lwp_mmodel_newlwp(void)
1649 {
1650 /*
1651 * New thread has been created and it's no longer safe
1652 * to run in WC mode, so revert back to TSO.
1653 */
1654 mmodel_set_tso();
1655 }
1656
1657 /*
1658 * This routine is invoked immediately after the lwp has added a mapping
1659 * to shared memory to its address space. The mapping starts at address
1660 * 'addr' and extends for 'size' bytes.
1661 *
1662 * Unless we can (somehow) guarantee that all the processes we're sharing
1663 * the underlying mapped object with, are using the same memory model that
1664 * this process is using, this call should change the memory model
1665 * configuration of the processor to be the most pessimistic available.
1666 */
1667 /* ARGSUSED */
1668 void
lwp_mmodel_shared_as(caddr_t addr,size_t sz)1669 lwp_mmodel_shared_as(caddr_t addr, size_t sz)
1670 {
1671 /*
1672 * lwp has mapped shared memory and is no longer safe
1673 * to run in WC mode, so revert back to TSO.
1674 * For now, any shared memory access is enough to get back to TSO
1675 * and hence not checking on 'addr' & 'sz'.
1676 */
1677 mmodel_set_tso();
1678 }
1679
1680 static uint_t
mkpsr(uint64_t tstate,uint_t fprs)1681 mkpsr(uint64_t tstate, uint_t fprs)
1682 {
1683 uint_t psr, icc;
1684
1685 psr = tstate & TSTATE_CWP_MASK;
1686 if (tstate & TSTATE_PRIV)
1687 psr |= PSR_PS;
1688 if (fprs & FPRS_FEF)
1689 psr |= PSR_EF;
1690 icc = (uint_t)(tstate >> PSR_TSTATE_CC_SHIFT) & PSR_ICC;
1691 psr |= icc;
1692 psr |= V9_PSR_IMPLVER;
1693 return (psr);
1694 }
1695
1696 void
sync_icache(caddr_t va,uint_t len)1697 sync_icache(caddr_t va, uint_t len)
1698 {
1699 caddr_t end;
1700
1701 end = va + len;
1702 va = (caddr_t)((uintptr_t)va & -8l); /* sparc needs 8-byte align */
1703 while (va < end) {
1704 doflush(va);
1705 va += 8;
1706 }
1707 }
1708
1709 #ifdef _SYSCALL32_IMPL
1710
1711 /*
1712 * Copy the floating point queue if and only if there is a queue and a place
1713 * to copy it to. Let xregs take care of the other fp regs, for v8plus.
1714 * The issue is that while we are handling the fq32 in sendsig, we
1715 * still need a 64-bit pointer to it, and the caddr32_t in fpregset32_t
1716 * will not suffice, so we have the third parameter to this function.
1717 */
1718 void
fpuregset_nto32(const fpregset_t * src,fpregset32_t * dest,struct fq32 * dfq)1719 fpuregset_nto32(const fpregset_t *src, fpregset32_t *dest, struct fq32 *dfq)
1720 {
1721 int i;
1722
1723 bzero(dest, sizeof (*dest));
1724 for (i = 0; i < 32; i++)
1725 dest->fpu_fr.fpu_regs[i] = src->fpu_fr.fpu_regs[i];
1726 dest->fpu_q = 0;
1727 dest->fpu_fsr = (uint32_t)src->fpu_fsr;
1728 dest->fpu_qcnt = src->fpu_qcnt;
1729 dest->fpu_q_entrysize = sizeof (struct fpq32);
1730 dest->fpu_en = src->fpu_en;
1731
1732 if ((src->fpu_qcnt) && (dfq != NULL)) {
1733 struct _fq *sfq = src->fpu_q;
1734 for (i = 0; i < src->fpu_qcnt; i++, dfq++, sfq++) {
1735 dfq->FQu.fpq.fpq_addr =
1736 (caddr32_t)(uintptr_t)sfq->FQu.fpq.fpq_addr;
1737 dfq->FQu.fpq.fpq_instr = sfq->FQu.fpq.fpq_instr;
1738 }
1739 }
1740 }
1741
1742 /*
1743 * Copy the floating point queue if and only if there is a queue and a place
1744 * to copy it to. Let xregs take care of the other fp regs, for v8plus.
1745 * The *dfq is required to escape the bzero in both this function and in
1746 * ucontext_32ton. The *sfq is required because once the fq32 is copied
1747 * into the kernel, in setcontext, then we need a 64-bit pointer to it.
1748 */
1749 static void
fpuregset_32ton(const fpregset32_t * src,fpregset_t * dest,const struct fq32 * sfq,struct _fq * dfq)1750 fpuregset_32ton(const fpregset32_t *src, fpregset_t *dest,
1751 const struct fq32 *sfq, struct _fq *dfq)
1752 {
1753 int i;
1754
1755 bzero(dest, sizeof (*dest));
1756 for (i = 0; i < 32; i++)
1757 dest->fpu_fr.fpu_regs[i] = src->fpu_fr.fpu_regs[i];
1758 dest->fpu_q = dfq;
1759 dest->fpu_fsr = (uint64_t)src->fpu_fsr;
1760 if ((dest->fpu_qcnt = src->fpu_qcnt) > 0)
1761 dest->fpu_q_entrysize = sizeof (struct _fpq);
1762 else
1763 dest->fpu_q_entrysize = 0;
1764 dest->fpu_en = src->fpu_en;
1765
1766 if ((src->fpu_qcnt) && (sfq) && (dfq)) {
1767 for (i = 0; i < src->fpu_qcnt; i++, dfq++, sfq++) {
1768 dfq->FQu.fpq.fpq_addr =
1769 (unsigned int *)(uintptr_t)sfq->FQu.fpq.fpq_addr;
1770 dfq->FQu.fpq.fpq_instr = sfq->FQu.fpq.fpq_instr;
1771 }
1772 }
1773 }
1774
1775 void
ucontext_32ton(const ucontext32_t * src,ucontext_t * dest,const struct fq32 * sfq,struct _fq * dfq)1776 ucontext_32ton(const ucontext32_t *src, ucontext_t *dest,
1777 const struct fq32 *sfq, struct _fq *dfq)
1778 {
1779 int i;
1780
1781 bzero(dest, sizeof (*dest));
1782
1783 dest->uc_flags = src->uc_flags;
1784 dest->uc_link = (ucontext_t *)(uintptr_t)src->uc_link;
1785
1786 for (i = 0; i < 4; i++) {
1787 dest->uc_sigmask.__sigbits[i] = src->uc_sigmask.__sigbits[i];
1788 }
1789
1790 dest->uc_stack.ss_sp = (void *)(uintptr_t)src->uc_stack.ss_sp;
1791 dest->uc_stack.ss_size = (size_t)src->uc_stack.ss_size;
1792 dest->uc_stack.ss_flags = src->uc_stack.ss_flags;
1793
1794 /* REG_CCR is 0, skip over it and handle it after this loop */
1795 for (i = 1; i < _NGREG32; i++)
1796 dest->uc_mcontext.gregs[i] =
1797 (greg_t)(uint32_t)src->uc_mcontext.gregs[i];
1798 dest->uc_mcontext.gregs[REG_CCR] =
1799 (src->uc_mcontext.gregs[REG_PSR] & PSR_ICC) >> PSR_ICC_SHIFT;
1800 dest->uc_mcontext.gregs[REG_ASI] = ASI_PNF;
1801 /*
1802 * A valid fpregs is only copied in if (uc.uc_flags & UC_FPU),
1803 * otherwise there is no guarantee that anything in fpregs is valid.
1804 */
1805 if (src->uc_flags & UC_FPU) {
1806 dest->uc_mcontext.gregs[REG_FPRS] =
1807 ((src->uc_mcontext.fpregs.fpu_en) ?
1808 (FPRS_DU|FPRS_DL|FPRS_FEF) : 0);
1809 } else {
1810 dest->uc_mcontext.gregs[REG_FPRS] = 0;
1811 }
1812 dest->uc_mcontext.gwins =
1813 (gwindows_t *)(uintptr_t)src->uc_mcontext.gwins;
1814 if (src->uc_flags & UC_FPU) {
1815 fpuregset_32ton(&src->uc_mcontext.fpregs,
1816 &dest->uc_mcontext.fpregs, sfq, dfq);
1817 }
1818 }
1819
1820 void
rwindow_nto32(struct rwindow * src,struct rwindow32 * dest)1821 rwindow_nto32(struct rwindow *src, struct rwindow32 *dest)
1822 {
1823 greg_t *s = (greg_t *)src;
1824 greg32_t *d = (greg32_t *)dest;
1825 int i;
1826
1827 for (i = 0; i < 16; i++)
1828 *d++ = (greg32_t)*s++;
1829 }
1830
1831 void
rwindow_32ton(struct rwindow32 * src,struct rwindow * dest)1832 rwindow_32ton(struct rwindow32 *src, struct rwindow *dest)
1833 {
1834 greg32_t *s = (greg32_t *)src;
1835 greg_t *d = (greg_t *)dest;
1836 int i;
1837
1838 for (i = 0; i < 16; i++)
1839 *d++ = (uint32_t)*s++;
1840 }
1841
1842 #endif /* _SYSCALL32_IMPL */
1843
1844 /*
1845 * The panic code invokes panic_saveregs() to record the contents of a
1846 * regs structure into the specified panic_data structure for debuggers.
1847 */
1848 void
panic_saveregs(panic_data_t * pdp,struct regs * rp)1849 panic_saveregs(panic_data_t *pdp, struct regs *rp)
1850 {
1851 panic_nv_t *pnv = PANICNVGET(pdp);
1852
1853 PANICNVADD(pnv, "tstate", rp->r_tstate);
1854 PANICNVADD(pnv, "g1", rp->r_g1);
1855 PANICNVADD(pnv, "g2", rp->r_g2);
1856 PANICNVADD(pnv, "g3", rp->r_g3);
1857 PANICNVADD(pnv, "g4", rp->r_g4);
1858 PANICNVADD(pnv, "g5", rp->r_g5);
1859 PANICNVADD(pnv, "g6", rp->r_g6);
1860 PANICNVADD(pnv, "g7", rp->r_g7);
1861 PANICNVADD(pnv, "o0", rp->r_o0);
1862 PANICNVADD(pnv, "o1", rp->r_o1);
1863 PANICNVADD(pnv, "o2", rp->r_o2);
1864 PANICNVADD(pnv, "o3", rp->r_o3);
1865 PANICNVADD(pnv, "o4", rp->r_o4);
1866 PANICNVADD(pnv, "o5", rp->r_o5);
1867 PANICNVADD(pnv, "o6", rp->r_o6);
1868 PANICNVADD(pnv, "o7", rp->r_o7);
1869 PANICNVADD(pnv, "pc", (ulong_t)rp->r_pc);
1870 PANICNVADD(pnv, "npc", (ulong_t)rp->r_npc);
1871 PANICNVADD(pnv, "y", (uint32_t)rp->r_y);
1872
1873 PANICNVSET(pdp, pnv);
1874 }
1875