1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 23 /* All Rights Reserved */ 24 25 26 /* 27 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 28 * Use is subject to license terms. 29 */ 30 31 #pragma ident "%Z%%M% %I% %E% SMI" 32 33 #include <sys/param.h> 34 #include <sys/types.h> 35 #include <sys/vmparam.h> 36 #include <sys/systm.h> 37 #include <sys/stack.h> 38 #include <sys/frame.h> 39 #include <sys/proc.h> 40 #include <sys/ucontext.h> 41 #include <sys/cpuvar.h> 42 #include <sys/asm_linkage.h> 43 #include <sys/kmem.h> 44 #include <sys/errno.h> 45 #include <sys/bootconf.h> 46 #include <sys/archsystm.h> 47 #include <sys/fpu/fpusystm.h> 48 #include <sys/debug.h> 49 #include <sys/privregs.h> 50 #include <sys/machpcb.h> 51 #include <sys/psr_compat.h> 52 #include <sys/cmn_err.h> 53 #include <sys/asi.h> 54 #include <sys/copyops.h> 55 #include <sys/model.h> 56 #include <sys/panic.h> 57 #include <sys/exec.h> 58 59 /* 60 * modify the lower 32bits of a uint64_t 61 */ 62 #define SET_LOWER_32(all, lower) \ 63 (((uint64_t)(all) & 0xffffffff00000000) | (uint32_t)(lower)) 64 65 #define MEMCPY_FPU_EN 2 /* fprs on and fpu_en == 0 */ 66 67 static uint_t mkpsr(uint64_t tstate, uint32_t fprs); 68 69 #ifdef _SYSCALL32_IMPL 70 static void fpuregset_32ton(const fpregset32_t *src, fpregset_t *dest, 71 const struct fq32 *sfq, struct fq *dfq); 72 #endif /* _SYSCALL32_IMPL */ 73 74 /* 75 * Set floating-point registers. 76 * NOTE: 'lwp' might not correspond to 'curthread' since this is 77 * called from code in /proc to set the registers of another lwp. 78 */ 79 void 80 setfpregs(klwp_t *lwp, fpregset_t *fp) 81 { 82 struct machpcb *mpcb; 83 kfpu_t *pfp; 84 uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL); 85 model_t model = lwp_getdatamodel(lwp); 86 87 mpcb = lwptompcb(lwp); 88 pfp = lwptofpu(lwp); 89 90 /* 91 * This is always true for both "real" fp programs and memcpy fp 92 * programs, because we force fpu_en to MEMCPY_FPU_EN in getfpregs, 93 * for the memcpy and threads cases where (fpu_en == 0) && 94 * (fpu_fprs & FPRS_FEF), if setfpregs is called after getfpregs. 95 */ 96 if (fp->fpu_en) { 97 kpreempt_disable(); 98 99 if (!(pfp->fpu_en) && (!(pfp->fpu_fprs & FPRS_FEF)) && 100 fpu_exists) { 101 /* 102 * He's not currently using the FPU but wants to in his 103 * new context - arrange for this on return to userland. 104 */ 105 pfp->fpu_fprs = (uint32_t)fprs; 106 } 107 /* 108 * Get setfpregs to restore fpu_en to zero 109 * for the memcpy/threads case (where pfp->fpu_en == 0 && 110 * (pfp->fp_fprs & FPRS_FEF) == FPRS_FEF). 111 */ 112 if (fp->fpu_en == MEMCPY_FPU_EN) 113 fp->fpu_en = 0; 114 115 /* 116 * Load up a user's floating point context. 117 */ 118 if (fp->fpu_qcnt > MAXFPQ) /* plug security holes */ 119 fp->fpu_qcnt = MAXFPQ; 120 fp->fpu_q_entrysize = sizeof (struct fq); 121 122 /* 123 * For v9 kernel, copy all of the fp regs. 124 * For v8 kernel, copy v8 fp regs (lower half of v9 fp regs). 125 * Restore entire fsr for v9, only lower half for v8. 126 */ 127 (void) kcopy(fp, pfp, sizeof (fp->fpu_fr)); 128 if (model == DATAMODEL_LP64) 129 pfp->fpu_fsr = fp->fpu_fsr; 130 else 131 pfp->fpu_fsr = SET_LOWER_32(pfp->fpu_fsr, fp->fpu_fsr); 132 pfp->fpu_qcnt = fp->fpu_qcnt; 133 pfp->fpu_q_entrysize = fp->fpu_q_entrysize; 134 pfp->fpu_en = fp->fpu_en; 135 pfp->fpu_q = mpcb->mpcb_fpu_q; 136 if (fp->fpu_qcnt) 137 (void) kcopy(fp->fpu_q, pfp->fpu_q, 138 fp->fpu_qcnt * fp->fpu_q_entrysize); 139 /* FSR ignores these bits on load, so they can not be set */ 140 pfp->fpu_fsr &= ~(FSR_QNE|FSR_FTT); 141 142 /* 143 * If not the current process then resume() will handle it. 144 */ 145 if (lwp != ttolwp(curthread)) { 146 /* force resume to reload fp regs */ 147 pfp->fpu_fprs |= FPRS_FEF; 148 kpreempt_enable(); 149 return; 150 } 151 152 /* 153 * Load up FPU with new floating point context. 154 */ 155 if (fpu_exists) { 156 pfp->fpu_fprs = _fp_read_fprs(); 157 if ((pfp->fpu_fprs & FPRS_FEF) != FPRS_FEF) { 158 _fp_write_fprs(fprs); 159 pfp->fpu_fprs = (uint32_t)fprs; 160 #ifdef DEBUG 161 if (fpdispr) 162 cmn_err(CE_NOTE, 163 "setfpregs with fp disabled!\n"); 164 #endif 165 } 166 /* 167 * Load all fp regs for v9 user programs, but only 168 * load the lower half for v8[plus] programs. 169 */ 170 if (model == DATAMODEL_LP64) 171 fp_restore(pfp); 172 else 173 fp_v8_load(pfp); 174 } 175 176 kpreempt_enable(); 177 } else { 178 if ((pfp->fpu_en) || /* normal fp case */ 179 (pfp->fpu_fprs & FPRS_FEF)) { /* memcpy/threads case */ 180 /* 181 * Currently the lwp has floating point enabled. 182 * Turn off FPRS_FEF in user's fprs, saved and 183 * real copies thereof. 184 */ 185 pfp->fpu_en = 0; 186 if (fpu_exists) { 187 fprs = 0; 188 if (lwp == ttolwp(curthread)) 189 _fp_write_fprs(fprs); 190 pfp->fpu_fprs = (uint32_t)fprs; 191 } 192 } 193 } 194 } 195 196 #ifdef _SYSCALL32_IMPL 197 void 198 setfpregs32(klwp_t *lwp, fpregset32_t *fp) 199 { 200 fpregset_t fpregs; 201 202 fpuregset_32ton(fp, &fpregs, NULL, NULL); 203 setfpregs(lwp, &fpregs); 204 } 205 #endif /* _SYSCALL32_IMPL */ 206 207 /* 208 * NOTE: 'lwp' might not correspond to 'curthread' since this is 209 * called from code in /proc to set the registers of another lwp. 210 */ 211 void 212 run_fpq(klwp_t *lwp, fpregset_t *fp) 213 { 214 /* 215 * If the context being loaded up includes a floating queue, 216 * we need to simulate those instructions (since we can't reload 217 * the fpu) and pass the process any appropriate signals 218 */ 219 220 if (lwp == ttolwp(curthread)) { 221 if (fpu_exists) { 222 if (fp->fpu_qcnt) 223 fp_runq(lwp->lwp_regs); 224 } 225 } 226 } 227 228 /* 229 * Get floating-point registers. 230 * NOTE: 'lwp' might not correspond to 'curthread' since this is 231 * called from code in /proc to set the registers of another lwp. 232 */ 233 void 234 getfpregs(klwp_t *lwp, fpregset_t *fp) 235 { 236 kfpu_t *pfp; 237 model_t model = lwp_getdatamodel(lwp); 238 239 pfp = lwptofpu(lwp); 240 kpreempt_disable(); 241 if (fpu_exists && ttolwp(curthread) == lwp) 242 pfp->fpu_fprs = _fp_read_fprs(); 243 244 /* 245 * First check the fpu_en case, for normal fp programs. 246 * Next check the fprs case, for fp use by memcpy/threads. 247 */ 248 if (((fp->fpu_en = pfp->fpu_en) != 0) || 249 (pfp->fpu_fprs & FPRS_FEF)) { 250 /* 251 * Force setfpregs to restore the fp context in 252 * setfpregs for the memcpy and threads cases (where 253 * pfp->fpu_en == 0 && (pfp->fp_fprs & FPRS_FEF) == FPRS_FEF). 254 */ 255 if (pfp->fpu_en == 0) 256 fp->fpu_en = MEMCPY_FPU_EN; 257 /* 258 * If we have an fpu and the current thread owns the fp 259 * context, flush fp * registers into the pcb. Save all 260 * the fp regs for v9, xregs_getfpregs saves the upper half 261 * for v8plus. Save entire fsr for v9, only lower half for v8. 262 */ 263 if (fpu_exists && ttolwp(curthread) == lwp) { 264 if ((pfp->fpu_fprs & FPRS_FEF) != FPRS_FEF) { 265 uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL); 266 267 _fp_write_fprs(fprs); 268 pfp->fpu_fprs = fprs; 269 #ifdef DEBUG 270 if (fpdispr) 271 cmn_err(CE_NOTE, 272 "getfpregs with fp disabled!\n"); 273 #endif 274 } 275 if (model == DATAMODEL_LP64) 276 fp_fksave(pfp); 277 else 278 fp_v8_fksave(pfp); 279 } 280 (void) kcopy(pfp, fp, sizeof (fp->fpu_fr)); 281 fp->fpu_q = pfp->fpu_q; 282 if (model == DATAMODEL_LP64) 283 fp->fpu_fsr = pfp->fpu_fsr; 284 else 285 fp->fpu_fsr = (uint32_t)pfp->fpu_fsr; 286 fp->fpu_qcnt = pfp->fpu_qcnt; 287 fp->fpu_q_entrysize = pfp->fpu_q_entrysize; 288 } else { 289 int i; 290 for (i = 0; i < 32; i++) /* NaN */ 291 ((uint32_t *)fp->fpu_fr.fpu_regs)[i] = (uint32_t)-1; 292 if (model == DATAMODEL_LP64) { 293 for (i = 16; i < 32; i++) /* NaN */ 294 ((uint64_t *)fp->fpu_fr.fpu_dregs)[i] = 295 (uint64_t)-1; 296 } 297 fp->fpu_fsr = 0; 298 fp->fpu_qcnt = 0; 299 } 300 kpreempt_enable(); 301 } 302 303 #ifdef _SYSCALL32_IMPL 304 void 305 getfpregs32(klwp_t *lwp, fpregset32_t *fp) 306 { 307 fpregset_t fpregs; 308 309 getfpregs(lwp, &fpregs); 310 fpuregset_nto32(&fpregs, fp, NULL); 311 } 312 #endif /* _SYSCALL32_IMPL */ 313 314 /* 315 * Set general registers. 316 * NOTE: 'lwp' might not correspond to 'curthread' since this is 317 * called from code in /proc to set the registers of another lwp. 318 */ 319 320 /* 64-bit gregset_t */ 321 void 322 setgregs(klwp_t *lwp, gregset_t grp) 323 { 324 struct regs *rp = lwptoregs(lwp); 325 kfpu_t *fp = lwptofpu(lwp); 326 uint64_t tbits; 327 328 int current = (lwp == curthread->t_lwp); 329 330 if (current) 331 (void) save_syscall_args(); /* copy the args first */ 332 333 tbits = (((grp[REG_CCR] & TSTATE_CCR_MASK) << TSTATE_CCR_SHIFT) | 334 ((grp[REG_ASI] & TSTATE_ASI_MASK) << TSTATE_ASI_SHIFT)); 335 rp->r_tstate &= ~(((uint64_t)TSTATE_CCR_MASK << TSTATE_CCR_SHIFT) | 336 ((uint64_t)TSTATE_ASI_MASK << TSTATE_ASI_SHIFT)); 337 rp->r_tstate |= tbits; 338 kpreempt_disable(); 339 fp->fpu_fprs = (uint32_t)grp[REG_FPRS]; 340 if (fpu_exists && (current) && (fp->fpu_fprs & FPRS_FEF)) 341 _fp_write_fprs(fp->fpu_fprs); 342 kpreempt_enable(); 343 344 /* 345 * pc and npc must be 4-byte aligned on sparc. 346 * We silently make it so to avoid a watchdog reset. 347 */ 348 rp->r_pc = grp[REG_PC] & ~03L; 349 rp->r_npc = grp[REG_nPC] & ~03L; 350 rp->r_y = grp[REG_Y]; 351 352 rp->r_g1 = grp[REG_G1]; 353 rp->r_g2 = grp[REG_G2]; 354 rp->r_g3 = grp[REG_G3]; 355 rp->r_g4 = grp[REG_G4]; 356 rp->r_g5 = grp[REG_G5]; 357 rp->r_g6 = grp[REG_G6]; 358 rp->r_g7 = grp[REG_G7]; 359 360 rp->r_o0 = grp[REG_O0]; 361 rp->r_o1 = grp[REG_O1]; 362 rp->r_o2 = grp[REG_O2]; 363 rp->r_o3 = grp[REG_O3]; 364 rp->r_o4 = grp[REG_O4]; 365 rp->r_o5 = grp[REG_O5]; 366 rp->r_o6 = grp[REG_O6]; 367 rp->r_o7 = grp[REG_O7]; 368 369 if (current) { 370 /* 371 * This was called from a system call, but we 372 * do not want to return via the shared window; 373 * restoring the CPU context changes everything. 374 */ 375 lwp->lwp_eosys = JUSTRETURN; 376 curthread->t_post_sys = 1; 377 } 378 } 379 380 /* 381 * Return the general registers. 382 * NOTE: 'lwp' might not correspond to 'curthread' since this is 383 * called from code in /proc to get the registers of another lwp. 384 */ 385 void 386 getgregs(klwp_t *lwp, gregset_t grp) 387 { 388 struct regs *rp = lwptoregs(lwp); 389 uint32_t fprs; 390 391 kpreempt_disable(); 392 if (fpu_exists && ttolwp(curthread) == lwp) { 393 fprs = _fp_read_fprs(); 394 } else { 395 kfpu_t *fp = lwptofpu(lwp); 396 fprs = fp->fpu_fprs; 397 } 398 kpreempt_enable(); 399 grp[REG_CCR] = (rp->r_tstate >> TSTATE_CCR_SHIFT) & TSTATE_CCR_MASK; 400 grp[REG_PC] = rp->r_pc; 401 grp[REG_nPC] = rp->r_npc; 402 grp[REG_Y] = (uint32_t)rp->r_y; 403 grp[REG_G1] = rp->r_g1; 404 grp[REG_G2] = rp->r_g2; 405 grp[REG_G3] = rp->r_g3; 406 grp[REG_G4] = rp->r_g4; 407 grp[REG_G5] = rp->r_g5; 408 grp[REG_G6] = rp->r_g6; 409 grp[REG_G7] = rp->r_g7; 410 grp[REG_O0] = rp->r_o0; 411 grp[REG_O1] = rp->r_o1; 412 grp[REG_O2] = rp->r_o2; 413 grp[REG_O3] = rp->r_o3; 414 grp[REG_O4] = rp->r_o4; 415 grp[REG_O5] = rp->r_o5; 416 grp[REG_O6] = rp->r_o6; 417 grp[REG_O7] = rp->r_o7; 418 grp[REG_ASI] = (rp->r_tstate >> TSTATE_ASI_SHIFT) & TSTATE_ASI_MASK; 419 grp[REG_FPRS] = fprs; 420 } 421 422 void 423 getgregs32(klwp_t *lwp, gregset32_t grp) 424 { 425 struct regs *rp = lwptoregs(lwp); 426 uint32_t fprs; 427 428 kpreempt_disable(); 429 if (fpu_exists && ttolwp(curthread) == lwp) { 430 fprs = _fp_read_fprs(); 431 } else { 432 kfpu_t *fp = lwptofpu(lwp); 433 fprs = fp->fpu_fprs; 434 } 435 kpreempt_enable(); 436 grp[REG_PSR] = mkpsr(rp->r_tstate, fprs); 437 grp[REG_PC] = rp->r_pc; 438 grp[REG_nPC] = rp->r_npc; 439 grp[REG_Y] = rp->r_y; 440 grp[REG_G1] = rp->r_g1; 441 grp[REG_G2] = rp->r_g2; 442 grp[REG_G3] = rp->r_g3; 443 grp[REG_G4] = rp->r_g4; 444 grp[REG_G5] = rp->r_g5; 445 grp[REG_G6] = rp->r_g6; 446 grp[REG_G7] = rp->r_g7; 447 grp[REG_O0] = rp->r_o0; 448 grp[REG_O1] = rp->r_o1; 449 grp[REG_O2] = rp->r_o2; 450 grp[REG_O3] = rp->r_o3; 451 grp[REG_O4] = rp->r_o4; 452 grp[REG_O5] = rp->r_o5; 453 grp[REG_O6] = rp->r_o6; 454 grp[REG_O7] = rp->r_o7; 455 } 456 457 /* 458 * Return the user-level PC. 459 * If in a system call, return the address of the syscall trap. 460 */ 461 greg_t 462 getuserpc() 463 { 464 return (lwptoregs(ttolwp(curthread))->r_pc); 465 } 466 467 /* 468 * Set register windows. 469 */ 470 void 471 setgwins(klwp_t *lwp, gwindows_t *gwins) 472 { 473 struct machpcb *mpcb = lwptompcb(lwp); 474 int wbcnt = gwins->wbcnt; 475 caddr_t sp; 476 int i; 477 struct rwindow32 *rwp; 478 int wbuf_rwindow_size; 479 int is64; 480 481 if (mpcb->mpcb_wstate == WSTATE_USER32) { 482 wbuf_rwindow_size = WINDOWSIZE32; 483 is64 = 0; 484 } else { 485 wbuf_rwindow_size = WINDOWSIZE64; 486 is64 = 1; 487 } 488 ASSERT(wbcnt >= 0 && wbcnt <= SPARC_MAXREGWINDOW); 489 mpcb->mpcb_wbcnt = 0; 490 for (i = 0; i < wbcnt; i++) { 491 sp = (caddr_t)gwins->spbuf[i]; 492 mpcb->mpcb_spbuf[i] = sp; 493 rwp = (struct rwindow32 *) 494 (mpcb->mpcb_wbuf + (i * wbuf_rwindow_size)); 495 if (is64 && IS_V9STACK(sp)) 496 bcopy(&gwins->wbuf[i], rwp, sizeof (struct rwindow)); 497 else 498 rwindow_nto32(&gwins->wbuf[i], rwp); 499 mpcb->mpcb_wbcnt++; 500 } 501 } 502 503 void 504 setgwins32(klwp_t *lwp, gwindows32_t *gwins) 505 { 506 struct machpcb *mpcb = lwptompcb(lwp); 507 int wbcnt = gwins->wbcnt; 508 caddr_t sp; 509 int i; 510 511 struct rwindow *rwp; 512 int wbuf_rwindow_size; 513 int is64; 514 515 if (mpcb->mpcb_wstate == WSTATE_USER32) { 516 wbuf_rwindow_size = WINDOWSIZE32; 517 is64 = 0; 518 } else { 519 wbuf_rwindow_size = WINDOWSIZE64; 520 is64 = 1; 521 } 522 523 ASSERT(wbcnt >= 0 && wbcnt <= SPARC_MAXREGWINDOW); 524 mpcb->mpcb_wbcnt = 0; 525 for (i = 0; i < wbcnt; i++) { 526 sp = (caddr_t)gwins->spbuf[i]; 527 mpcb->mpcb_spbuf[i] = sp; 528 rwp = (struct rwindow *) 529 (mpcb->mpcb_wbuf + (i * wbuf_rwindow_size)); 530 if (is64 && IS_V9STACK(sp)) 531 rwindow_32ton(&gwins->wbuf[i], rwp); 532 else 533 bcopy(&gwins->wbuf[i], rwp, sizeof (struct rwindow32)); 534 mpcb->mpcb_wbcnt++; 535 } 536 } 537 538 /* 539 * Get register windows. 540 * NOTE: 'lwp' might not correspond to 'curthread' since this is 541 * called from code in /proc to set the registers of another lwp. 542 */ 543 void 544 getgwins(klwp_t *lwp, gwindows_t *gwp) 545 { 546 struct machpcb *mpcb = lwptompcb(lwp); 547 int wbcnt = mpcb->mpcb_wbcnt; 548 caddr_t sp; 549 int i; 550 struct rwindow32 *rwp; 551 int wbuf_rwindow_size; 552 int is64; 553 554 if (mpcb->mpcb_wstate == WSTATE_USER32) { 555 wbuf_rwindow_size = WINDOWSIZE32; 556 is64 = 0; 557 } else { 558 wbuf_rwindow_size = WINDOWSIZE64; 559 is64 = 1; 560 } 561 ASSERT(wbcnt >= 0 && wbcnt <= SPARC_MAXREGWINDOW); 562 gwp->wbcnt = wbcnt; 563 for (i = 0; i < wbcnt; i++) { 564 sp = mpcb->mpcb_spbuf[i]; 565 gwp->spbuf[i] = (greg_t *)sp; 566 rwp = (struct rwindow32 *) 567 (mpcb->mpcb_wbuf + (i * wbuf_rwindow_size)); 568 if (is64 && IS_V9STACK(sp)) 569 bcopy(rwp, &gwp->wbuf[i], sizeof (struct rwindow)); 570 else 571 rwindow_32ton(rwp, &gwp->wbuf[i]); 572 } 573 } 574 575 void 576 getgwins32(klwp_t *lwp, gwindows32_t *gwp) 577 { 578 struct machpcb *mpcb = lwptompcb(lwp); 579 int wbcnt = mpcb->mpcb_wbcnt; 580 int i; 581 struct rwindow *rwp; 582 int wbuf_rwindow_size; 583 caddr_t sp; 584 int is64; 585 586 if (mpcb->mpcb_wstate == WSTATE_USER32) { 587 wbuf_rwindow_size = WINDOWSIZE32; 588 is64 = 0; 589 } else { 590 wbuf_rwindow_size = WINDOWSIZE64; 591 is64 = 1; 592 } 593 594 ASSERT(wbcnt >= 0 && wbcnt <= SPARC_MAXREGWINDOW); 595 gwp->wbcnt = wbcnt; 596 for (i = 0; i < wbcnt; i++) { 597 sp = mpcb->mpcb_spbuf[i]; 598 rwp = (struct rwindow *) 599 (mpcb->mpcb_wbuf + (i * wbuf_rwindow_size)); 600 gwp->spbuf[i] = (caddr32_t)sp; 601 if (is64 && IS_V9STACK(sp)) 602 rwindow_nto32(rwp, &gwp->wbuf[i]); 603 else 604 bcopy(rwp, &gwp->wbuf[i], sizeof (struct rwindow32)); 605 } 606 } 607 608 /* 609 * For things that depend on register state being on the stack, 610 * copy any register windows that get saved into the window buffer 611 * (in the pcb) onto the stack. This normally gets fixed up 612 * before returning to a user program. Callers of this routine 613 * require this to happen immediately because a later kernel 614 * operation depends on window state (like instruction simulation). 615 */ 616 int 617 flush_user_windows_to_stack(caddr_t *psp) 618 { 619 int j, k; 620 caddr_t sp; 621 struct machpcb *mpcb = lwptompcb(ttolwp(curthread)); 622 int err; 623 int error = 0; 624 int wbuf_rwindow_size; 625 int rwindow_size; 626 int stack_align; 627 int watched; 628 629 flush_user_windows(); 630 631 if (mpcb->mpcb_wstate != WSTATE_USER32) 632 wbuf_rwindow_size = WINDOWSIZE64; 633 else 634 wbuf_rwindow_size = WINDOWSIZE32; 635 636 j = mpcb->mpcb_wbcnt; 637 while (j > 0) { 638 sp = mpcb->mpcb_spbuf[--j]; 639 640 if ((mpcb->mpcb_wstate != WSTATE_USER32) && 641 IS_V9STACK(sp)) { 642 sp += V9BIAS64; 643 stack_align = STACK_ALIGN64; 644 rwindow_size = WINDOWSIZE64; 645 } else { 646 sp = (caddr_t)(uint32_t)sp; 647 stack_align = STACK_ALIGN32; 648 rwindow_size = WINDOWSIZE32; 649 } 650 if (((uintptr_t)sp & (stack_align - 1)) != 0) 651 continue; 652 653 watched = watch_disable_addr(sp, rwindow_size, S_WRITE); 654 err = xcopyout(mpcb->mpcb_wbuf + 655 (j * wbuf_rwindow_size), sp, rwindow_size); 656 if (err != 0) { 657 if (psp != NULL) { 658 /* 659 * Determine the offending address. 660 * It may not be the stack pointer itself. 661 */ 662 uint_t *kaddr = (uint_t *)(mpcb->mpcb_wbuf + 663 (j * wbuf_rwindow_size)); 664 uint_t *uaddr = (uint_t *)sp; 665 666 for (k = 0; 667 k < rwindow_size / sizeof (int); 668 k++, kaddr++, uaddr++) { 669 if (suword32(uaddr, *kaddr)) 670 break; 671 } 672 673 /* can't happen? */ 674 if (k == rwindow_size / sizeof (int)) 675 uaddr = (uint_t *)sp; 676 677 *psp = (caddr_t)uaddr; 678 } 679 error = err; 680 } else { 681 /* 682 * stack was aligned and copyout succeeded; 683 * move other windows down. 684 */ 685 mpcb->mpcb_wbcnt--; 686 for (k = j; k < mpcb->mpcb_wbcnt; k++) { 687 mpcb->mpcb_spbuf[k] = mpcb->mpcb_spbuf[k+1]; 688 bcopy( 689 mpcb->mpcb_wbuf + 690 ((k+1) * wbuf_rwindow_size), 691 mpcb->mpcb_wbuf + 692 (k * wbuf_rwindow_size), 693 wbuf_rwindow_size); 694 } 695 } 696 if (watched) 697 watch_enable_addr(sp, rwindow_size, S_WRITE); 698 } /* while there are windows in the wbuf */ 699 return (error); 700 } 701 702 static int 703 copy_return_window32(int dotwo) 704 { 705 klwp_t *lwp = ttolwp(curthread); 706 struct machpcb *mpcb = lwptompcb(lwp); 707 struct rwindow32 rwindow32; 708 caddr_t sp1; 709 caddr_t sp2; 710 711 (void) flush_user_windows_to_stack(NULL); 712 if (mpcb->mpcb_rsp[0] == NULL) { 713 sp1 = (caddr_t)(uint32_t)lwptoregs(lwp)->r_sp; 714 if ((copyin_nowatch(sp1, &rwindow32, 715 sizeof (struct rwindow32))) == 0) 716 mpcb->mpcb_rsp[0] = sp1; 717 rwindow_32ton(&rwindow32, &mpcb->mpcb_rwin[0]); 718 } 719 mpcb->mpcb_rsp[1] = NULL; 720 if (dotwo && mpcb->mpcb_rsp[0] != NULL && 721 (sp2 = (caddr_t)mpcb->mpcb_rwin[0].rw_fp) != NULL) { 722 if ((copyin_nowatch(sp2, &rwindow32, 723 sizeof (struct rwindow32)) == 0)) 724 mpcb->mpcb_rsp[1] = sp2; 725 rwindow_32ton(&rwindow32, &mpcb->mpcb_rwin[1]); 726 } 727 return (mpcb->mpcb_rsp[0] != NULL); 728 } 729 730 int 731 copy_return_window(int dotwo) 732 { 733 proc_t *p = ttoproc(curthread); 734 klwp_t *lwp; 735 struct machpcb *mpcb; 736 caddr_t sp1; 737 caddr_t sp2; 738 739 if (p->p_model == DATAMODEL_ILP32) 740 return (copy_return_window32(dotwo)); 741 742 lwp = ttolwp(curthread); 743 mpcb = lwptompcb(lwp); 744 (void) flush_user_windows_to_stack(NULL); 745 if (mpcb->mpcb_rsp[0] == NULL) { 746 sp1 = (caddr_t)lwptoregs(lwp)->r_sp + STACK_BIAS; 747 if ((copyin_nowatch(sp1, &mpcb->mpcb_rwin[0], 748 sizeof (struct rwindow)) == 0)) 749 mpcb->mpcb_rsp[0] = sp1 - STACK_BIAS; 750 } 751 mpcb->mpcb_rsp[1] = NULL; 752 if (dotwo && mpcb->mpcb_rsp[0] != NULL && 753 (sp2 = (caddr_t)mpcb->mpcb_rwin[0].rw_fp) != NULL) { 754 sp2 += STACK_BIAS; 755 if ((copyin_nowatch(sp2, &mpcb->mpcb_rwin[1], 756 sizeof (struct rwindow)) == 0)) 757 mpcb->mpcb_rsp[1] = sp2 - STACK_BIAS; 758 } 759 return (mpcb->mpcb_rsp[0] != NULL); 760 } 761 762 /* 763 * Clear registers on exec(2). 764 */ 765 void 766 setregs(uarg_t *args) 767 { 768 struct regs *rp; 769 klwp_t *lwp = ttolwp(curthread); 770 kfpu_t *fpp = lwptofpu(lwp); 771 struct machpcb *mpcb = lwptompcb(lwp); 772 proc_t *p = ttoproc(curthread); 773 774 /* 775 * Initialize user registers. 776 */ 777 (void) save_syscall_args(); /* copy args from registers first */ 778 rp = lwptoregs(lwp); 779 rp->r_g1 = rp->r_g2 = rp->r_g3 = rp->r_g4 = rp->r_g5 = 780 rp->r_g6 = rp->r_o0 = rp->r_o1 = rp->r_o2 = 781 rp->r_o3 = rp->r_o4 = rp->r_o5 = rp->r_o7 = 0; 782 if (p->p_model == DATAMODEL_ILP32) 783 rp->r_tstate = TSTATE_USER32; 784 else 785 rp->r_tstate = TSTATE_USER64; 786 if (!fpu_exists) 787 rp->r_tstate &= ~TSTATE_PEF; 788 rp->r_g7 = args->thrptr; 789 rp->r_pc = args->entry; 790 rp->r_npc = args->entry + 4; 791 rp->r_y = 0; 792 curthread->t_post_sys = 1; 793 lwp->lwp_eosys = JUSTRETURN; 794 lwp->lwp_pcb.pcb_trap0addr = NULL; /* no trap 0 handler */ 795 /* 796 * Clear the fixalignment flag 797 */ 798 p->p_fixalignment = 0; 799 800 /* 801 * Throw out old user windows, init window buf. 802 */ 803 trash_user_windows(); 804 805 if (p->p_model == DATAMODEL_LP64 && 806 mpcb->mpcb_wstate != WSTATE_USER64) { 807 ASSERT(mpcb->mpcb_wbcnt == 0); 808 kmem_free(mpcb->mpcb_wbuf, MAXWIN * sizeof (struct rwindow32)); 809 mpcb->mpcb_wbuf = kmem_alloc(MAXWIN * 810 sizeof (struct rwindow64), KM_SLEEP); 811 ASSERT(((uintptr_t)mpcb->mpcb_wbuf & 7) == 0); 812 mpcb->mpcb_wstate = WSTATE_USER64; 813 } else if (p->p_model == DATAMODEL_ILP32 && 814 mpcb->mpcb_wstate != WSTATE_USER32) { 815 ASSERT(mpcb->mpcb_wbcnt == 0); 816 kmem_free(mpcb->mpcb_wbuf, MAXWIN * sizeof (struct rwindow64)); 817 mpcb->mpcb_wbuf = kmem_alloc(MAXWIN * 818 sizeof (struct rwindow32), KM_SLEEP); 819 mpcb->mpcb_wstate = WSTATE_USER32; 820 } 821 mpcb->mpcb_pa = va_to_pa(mpcb); 822 mpcb->mpcb_wbuf_pa = va_to_pa(mpcb->mpcb_wbuf); 823 824 /* 825 * Here we initialize minimal fpu state. 826 * The rest is done at the first floating 827 * point instruction that a process executes 828 * or by the lib_psr memcpy routines. 829 */ 830 if (fpu_exists) { 831 extern void _fp_write_fprs(unsigned); 832 _fp_write_fprs(0); 833 } 834 fpp->fpu_en = 0; 835 fpp->fpu_fprs = 0; 836 } 837 838 void 839 lwp_swapin(kthread_t *tp) 840 { 841 struct machpcb *mpcb = lwptompcb(ttolwp(tp)); 842 843 mpcb->mpcb_pa = va_to_pa(mpcb); 844 mpcb->mpcb_wbuf_pa = va_to_pa(mpcb->mpcb_wbuf); 845 } 846 847 /* 848 * Construct the execution environment for the user's signal 849 * handler and arrange for control to be given to it on return 850 * to userland. The library code now calls setcontext() to 851 * clean up after the signal handler, so sigret() is no longer 852 * needed. 853 */ 854 int 855 sendsig(int sig, k_siginfo_t *sip, void (*hdlr)()) 856 { 857 /* 858 * 'volatile' is needed to ensure that values are 859 * correct on the error return from on_fault(). 860 */ 861 volatile int minstacksz; /* min stack required to catch signal */ 862 int newstack = 0; /* if true, switching to altstack */ 863 label_t ljb; 864 caddr_t sp; 865 struct regs *volatile rp; 866 klwp_t *lwp = ttolwp(curthread); 867 proc_t *volatile p = ttoproc(curthread); 868 int fpq_size = 0; 869 struct sigframe { 870 struct frame frwin; 871 ucontext_t uc; 872 }; 873 siginfo_t *sip_addr; 874 struct sigframe *volatile fp; 875 ucontext_t *volatile tuc = NULL; 876 char *volatile xregs = NULL; 877 volatile size_t xregs_size = 0; 878 gwindows_t *volatile gwp = NULL; 879 volatile int gwin_size = 0; 880 kfpu_t *fpp; 881 struct machpcb *mpcb; 882 volatile int watched = 0; 883 volatile int watched2 = 0; 884 caddr_t tos; 885 886 /* 887 * Make sure the current last user window has been flushed to 888 * the stack save area before we change the sp. 889 * Restore register window if a debugger modified it. 890 */ 891 (void) flush_user_windows_to_stack(NULL); 892 if (lwp->lwp_pcb.pcb_xregstat != XREGNONE) 893 xregrestore(lwp, 0); 894 895 mpcb = lwptompcb(lwp); 896 rp = lwptoregs(lwp); 897 898 /* 899 * Clear the watchpoint return stack pointers. 900 */ 901 mpcb->mpcb_rsp[0] = NULL; 902 mpcb->mpcb_rsp[1] = NULL; 903 904 minstacksz = sizeof (struct sigframe); 905 906 /* 907 * We know that sizeof (siginfo_t) is stack-aligned: 908 * 128 bytes for ILP32, 256 bytes for LP64. 909 */ 910 if (sip != NULL) 911 minstacksz += sizeof (siginfo_t); 912 913 /* 914 * These two fields are pointed to by ABI structures and may 915 * be of arbitrary length. Size them now so we know how big 916 * the signal frame has to be. 917 */ 918 fpp = lwptofpu(lwp); 919 fpp->fpu_fprs = _fp_read_fprs(); 920 if ((fpp->fpu_en) || (fpp->fpu_fprs & FPRS_FEF)) { 921 fpq_size = fpp->fpu_q_entrysize * fpp->fpu_qcnt; 922 minstacksz += SA(fpq_size); 923 } 924 925 mpcb = lwptompcb(lwp); 926 if (mpcb->mpcb_wbcnt != 0) { 927 gwin_size = (mpcb->mpcb_wbcnt * sizeof (struct rwindow)) + 928 (SPARC_MAXREGWINDOW * sizeof (caddr_t)) + sizeof (long); 929 minstacksz += SA(gwin_size); 930 } 931 932 /* 933 * Extra registers, if support by this platform, may be of arbitrary 934 * length. Size them now so we know how big the signal frame has to be. 935 * For sparcv9 _LP64 user programs, use asrs instead of the xregs. 936 */ 937 minstacksz += SA(xregs_size); 938 939 /* 940 * Figure out whether we will be handling this signal on 941 * an alternate stack specified by the user. Then allocate 942 * and validate the stack requirements for the signal handler 943 * context. on_fault will catch any faults. 944 */ 945 newstack = (sigismember(&u.u_sigonstack, sig) && 946 !(lwp->lwp_sigaltstack.ss_flags & (SS_ONSTACK|SS_DISABLE))); 947 948 tos = (caddr_t)rp->r_sp + STACK_BIAS; 949 if (newstack != 0) { 950 fp = (struct sigframe *) 951 (SA((uintptr_t)lwp->lwp_sigaltstack.ss_sp) + 952 SA((int)lwp->lwp_sigaltstack.ss_size) - STACK_ALIGN - 953 SA(minstacksz)); 954 } else { 955 /* 956 * If we were unable to flush all register windows to 957 * the stack and we are not now on an alternate stack, 958 * just dump core with a SIGSEGV back in psig(). 959 */ 960 if (sig == SIGSEGV && 961 mpcb->mpcb_wbcnt != 0 && 962 !(lwp->lwp_sigaltstack.ss_flags & SS_ONSTACK)) 963 return (0); 964 fp = (struct sigframe *)(tos - SA(minstacksz)); 965 /* 966 * Could call grow here, but stack growth now handled below 967 * in code protected by on_fault(). 968 */ 969 } 970 sp = (caddr_t)fp + sizeof (struct sigframe); 971 972 /* 973 * Make sure process hasn't trashed its stack. 974 */ 975 if (((uintptr_t)fp & (STACK_ALIGN - 1)) != 0 || 976 (caddr_t)fp >= p->p_usrstack || 977 (caddr_t)fp + SA(minstacksz) >= p->p_usrstack) { 978 #ifdef DEBUG 979 printf("sendsig: bad signal stack cmd=%s, pid=%d, sig=%d\n", 980 PTOU(p)->u_comm, p->p_pid, sig); 981 printf("sigsp = 0x%p, action = 0x%p, upc = 0x%lx\n", 982 (void *)fp, (void *)hdlr, rp->r_pc); 983 984 if (((uintptr_t)fp & (STACK_ALIGN - 1)) != 0) 985 printf("bad stack alignment\n"); 986 else 987 printf("fp above USRSTACK\n"); 988 #endif 989 return (0); 990 } 991 992 watched = watch_disable_addr((caddr_t)fp, SA(minstacksz), S_WRITE); 993 if (on_fault(&ljb)) 994 goto badstack; 995 996 tuc = kmem_alloc(sizeof (ucontext_t), KM_SLEEP); 997 savecontext(tuc, lwp->lwp_sigoldmask); 998 999 /* 1000 * save extra register state if it exists 1001 */ 1002 if (xregs_size != 0) { 1003 xregs_setptr(lwp, tuc, sp); 1004 xregs = kmem_alloc(xregs_size, KM_SLEEP); 1005 xregs_get(lwp, xregs); 1006 copyout_noerr(xregs, sp, xregs_size); 1007 kmem_free(xregs, xregs_size); 1008 xregs = NULL; 1009 sp += SA(xregs_size); 1010 } 1011 1012 copyout_noerr(tuc, &fp->uc, sizeof (*tuc)); 1013 kmem_free(tuc, sizeof (*tuc)); 1014 tuc = NULL; 1015 1016 if (sip != NULL) { 1017 zoneid_t zoneid; 1018 1019 uzero(sp, sizeof (siginfo_t)); 1020 if (SI_FROMUSER(sip) && 1021 (zoneid = p->p_zone->zone_id) != GLOBAL_ZONEID && 1022 zoneid != sip->si_zoneid) { 1023 k_siginfo_t sani_sip = *sip; 1024 sani_sip.si_pid = p->p_zone->zone_zsched->p_pid; 1025 sani_sip.si_uid = 0; 1026 sani_sip.si_ctid = -1; 1027 sani_sip.si_zoneid = zoneid; 1028 copyout_noerr(&sani_sip, sp, sizeof (sani_sip)); 1029 } else { 1030 copyout_noerr(sip, sp, sizeof (*sip)); 1031 } 1032 sip_addr = (siginfo_t *)sp; 1033 sp += sizeof (siginfo_t); 1034 1035 if (sig == SIGPROF && 1036 curthread->t_rprof != NULL && 1037 curthread->t_rprof->rp_anystate) { 1038 /* 1039 * We stand on our head to deal with 1040 * the real time profiling signal. 1041 * Fill in the stuff that doesn't fit 1042 * in a normal k_siginfo structure. 1043 */ 1044 int i = sip->si_nsysarg; 1045 while (--i >= 0) { 1046 sulword_noerr( 1047 (ulong_t *)&sip_addr->si_sysarg[i], 1048 (ulong_t)lwp->lwp_arg[i]); 1049 } 1050 copyout_noerr(curthread->t_rprof->rp_state, 1051 sip_addr->si_mstate, 1052 sizeof (curthread->t_rprof->rp_state)); 1053 } 1054 } else { 1055 sip_addr = (siginfo_t *)NULL; 1056 } 1057 1058 /* 1059 * When flush_user_windows_to_stack() can't save all the 1060 * windows to the stack, it puts them in the lwp's pcb. 1061 */ 1062 if (gwin_size != 0) { 1063 gwp = kmem_alloc(gwin_size, KM_SLEEP); 1064 getgwins(lwp, gwp); 1065 sulword_noerr(&fp->uc.uc_mcontext.gwins, (ulong_t)sp); 1066 copyout_noerr(gwp, sp, gwin_size); 1067 kmem_free(gwp, gwin_size); 1068 gwp = NULL; 1069 sp += SA(gwin_size); 1070 } else 1071 sulword_noerr(&fp->uc.uc_mcontext.gwins, (ulong_t)NULL); 1072 1073 if (fpq_size != 0) { 1074 struct fq *fqp = (struct fq *)sp; 1075 sulword_noerr(&fp->uc.uc_mcontext.fpregs.fpu_q, (ulong_t)fqp); 1076 copyout_noerr(mpcb->mpcb_fpu_q, fqp, fpq_size); 1077 1078 /* 1079 * forget the fp queue so that the signal handler can run 1080 * without being harrassed--it will do a setcontext that will 1081 * re-establish the queue if there still is one 1082 * 1083 * NOTE: fp_runq() relies on the qcnt field being zeroed here 1084 * to terminate its processing of the queue after signal 1085 * delivery. 1086 */ 1087 mpcb->mpcb_fpu->fpu_qcnt = 0; 1088 sp += SA(fpq_size); 1089 1090 /* Also, syscall needs to know about this */ 1091 mpcb->mpcb_flags |= FP_TRAPPED; 1092 1093 } else { 1094 sulword_noerr(&fp->uc.uc_mcontext.fpregs.fpu_q, (ulong_t)NULL); 1095 suword8_noerr(&fp->uc.uc_mcontext.fpregs.fpu_qcnt, 0); 1096 } 1097 1098 1099 /* 1100 * Since we flushed the user's windows and we are changing his 1101 * stack pointer, the window that the user will return to will 1102 * be restored from the save area in the frame we are setting up. 1103 * We copy in save area for old stack pointer so that debuggers 1104 * can do a proper stack backtrace from the signal handler. 1105 */ 1106 if (mpcb->mpcb_wbcnt == 0) { 1107 watched2 = watch_disable_addr(tos, sizeof (struct rwindow), 1108 S_READ); 1109 ucopy(tos, &fp->frwin, sizeof (struct rwindow)); 1110 } 1111 1112 lwp->lwp_oldcontext = (uintptr_t)&fp->uc; 1113 1114 if (newstack != 0) { 1115 lwp->lwp_sigaltstack.ss_flags |= SS_ONSTACK; 1116 1117 if (lwp->lwp_ustack) { 1118 copyout_noerr(&lwp->lwp_sigaltstack, 1119 (stack_t *)lwp->lwp_ustack, sizeof (stack_t)); 1120 } 1121 } 1122 1123 no_fault(); 1124 mpcb->mpcb_wbcnt = 0; /* let user go on */ 1125 1126 if (watched2) 1127 watch_enable_addr(tos, sizeof (struct rwindow), S_READ); 1128 if (watched) 1129 watch_enable_addr((caddr_t)fp, SA(minstacksz), S_WRITE); 1130 1131 /* 1132 * Set up user registers for execution of signal handler. 1133 */ 1134 rp->r_sp = (uintptr_t)fp - STACK_BIAS; 1135 rp->r_pc = (uintptr_t)hdlr; 1136 rp->r_npc = (uintptr_t)hdlr + 4; 1137 /* make sure %asi is ASI_PNF */ 1138 rp->r_tstate &= ~((uint64_t)TSTATE_ASI_MASK << TSTATE_ASI_SHIFT); 1139 rp->r_tstate |= ((uint64_t)ASI_PNF << TSTATE_ASI_SHIFT); 1140 rp->r_o0 = sig; 1141 rp->r_o1 = (uintptr_t)sip_addr; 1142 rp->r_o2 = (uintptr_t)&fp->uc; 1143 /* 1144 * Don't set lwp_eosys here. sendsig() is called via psig() after 1145 * lwp_eosys is handled, so setting it here would affect the next 1146 * system call. 1147 */ 1148 return (1); 1149 1150 badstack: 1151 no_fault(); 1152 if (watched2) 1153 watch_enable_addr(tos, sizeof (struct rwindow), S_READ); 1154 if (watched) 1155 watch_enable_addr((caddr_t)fp, SA(minstacksz), S_WRITE); 1156 if (tuc) 1157 kmem_free(tuc, sizeof (ucontext_t)); 1158 if (xregs) 1159 kmem_free(xregs, xregs_size); 1160 if (gwp) 1161 kmem_free(gwp, gwin_size); 1162 #ifdef DEBUG 1163 printf("sendsig: bad signal stack cmd=%s, pid=%d, sig=%d\n", 1164 PTOU(p)->u_comm, p->p_pid, sig); 1165 printf("on fault, sigsp = %p, action = %p, upc = 0x%lx\n", 1166 (void *)fp, (void *)hdlr, rp->r_pc); 1167 #endif 1168 return (0); 1169 } 1170 1171 1172 #ifdef _SYSCALL32_IMPL 1173 1174 /* 1175 * Construct the execution environment for the user's signal 1176 * handler and arrange for control to be given to it on return 1177 * to userland. The library code now calls setcontext() to 1178 * clean up after the signal handler, so sigret() is no longer 1179 * needed. 1180 */ 1181 int 1182 sendsig32(int sig, k_siginfo_t *sip, void (*hdlr)()) 1183 { 1184 /* 1185 * 'volatile' is needed to ensure that values are 1186 * correct on the error return from on_fault(). 1187 */ 1188 volatile int minstacksz; /* min stack required to catch signal */ 1189 int newstack = 0; /* if true, switching to altstack */ 1190 label_t ljb; 1191 caddr_t sp; 1192 struct regs *volatile rp; 1193 klwp_t *lwp = ttolwp(curthread); 1194 proc_t *volatile p = ttoproc(curthread); 1195 struct fq32 fpu_q[MAXFPQ]; /* to hold floating queue */ 1196 struct fq32 *dfq = NULL; 1197 size_t fpq_size = 0; 1198 struct sigframe32 { 1199 struct frame32 frwin; 1200 ucontext32_t uc; 1201 }; 1202 struct sigframe32 *volatile fp; 1203 siginfo32_t *sip_addr; 1204 ucontext32_t *volatile tuc = NULL; 1205 char *volatile xregs = NULL; 1206 volatile int xregs_size = 0; 1207 gwindows32_t *volatile gwp = NULL; 1208 volatile size_t gwin_size = 0; 1209 kfpu_t *fpp; 1210 struct machpcb *mpcb; 1211 volatile int watched = 0; 1212 volatile int watched2 = 0; 1213 caddr_t tos; 1214 1215 /* 1216 * Make sure the current last user window has been flushed to 1217 * the stack save area before we change the sp. 1218 * Restore register window if a debugger modified it. 1219 */ 1220 (void) flush_user_windows_to_stack(NULL); 1221 if (lwp->lwp_pcb.pcb_xregstat != XREGNONE) 1222 xregrestore(lwp, 0); 1223 1224 mpcb = lwptompcb(lwp); 1225 rp = lwptoregs(lwp); 1226 1227 /* 1228 * Clear the watchpoint return stack pointers. 1229 */ 1230 mpcb->mpcb_rsp[0] = NULL; 1231 mpcb->mpcb_rsp[1] = NULL; 1232 1233 minstacksz = sizeof (struct sigframe32); 1234 1235 if (sip != NULL) 1236 minstacksz += sizeof (siginfo32_t); 1237 1238 /* 1239 * These two fields are pointed to by ABI structures and may 1240 * be of arbitrary length. Size them now so we know how big 1241 * the signal frame has to be. 1242 */ 1243 fpp = lwptofpu(lwp); 1244 fpp->fpu_fprs = _fp_read_fprs(); 1245 if ((fpp->fpu_en) || (fpp->fpu_fprs & FPRS_FEF)) { 1246 fpq_size = sizeof (struct fpq32) * fpp->fpu_qcnt; 1247 minstacksz += fpq_size; 1248 dfq = fpu_q; 1249 } 1250 1251 mpcb = lwptompcb(lwp); 1252 if (mpcb->mpcb_wbcnt != 0) { 1253 gwin_size = (mpcb->mpcb_wbcnt * sizeof (struct rwindow32)) + 1254 (SPARC_MAXREGWINDOW * sizeof (caddr32_t)) + 1255 sizeof (int32_t); 1256 minstacksz += gwin_size; 1257 } 1258 1259 /* 1260 * Extra registers, if supported by this platform, may be of arbitrary 1261 * length. Size them now so we know how big the signal frame has to be. 1262 */ 1263 xregs_size = xregs_getsize(p); 1264 minstacksz += SA32(xregs_size); 1265 1266 /* 1267 * Figure out whether we will be handling this signal on 1268 * an alternate stack specified by the user. Then allocate 1269 * and validate the stack requirements for the signal handler 1270 * context. on_fault will catch any faults. 1271 */ 1272 newstack = (sigismember(&u.u_sigonstack, sig) && 1273 !(lwp->lwp_sigaltstack.ss_flags & (SS_ONSTACK|SS_DISABLE))); 1274 1275 tos = (void *)(uint32_t)rp->r_sp; 1276 if (newstack != 0) { 1277 fp = (struct sigframe32 *) 1278 (SA32((uintptr_t)lwp->lwp_sigaltstack.ss_sp) + 1279 SA32((int)lwp->lwp_sigaltstack.ss_size) - 1280 STACK_ALIGN32 - 1281 SA32(minstacksz)); 1282 } else { 1283 /* 1284 * If we were unable to flush all register windows to 1285 * the stack and we are not now on an alternate stack, 1286 * just dump core with a SIGSEGV back in psig(). 1287 */ 1288 if (sig == SIGSEGV && 1289 mpcb->mpcb_wbcnt != 0 && 1290 !(lwp->lwp_sigaltstack.ss_flags & SS_ONSTACK)) 1291 return (0); 1292 fp = (struct sigframe32 *)(tos - SA32(minstacksz)); 1293 /* 1294 * Could call grow here, but stack growth now handled below 1295 * in code protected by on_fault(). 1296 */ 1297 } 1298 sp = (caddr_t)fp + sizeof (struct sigframe32); 1299 1300 /* 1301 * Make sure process hasn't trashed its stack. 1302 */ 1303 if (((uintptr_t)fp & (STACK_ALIGN32 - 1)) != 0 || 1304 (caddr_t)fp >= p->p_usrstack || 1305 (caddr_t)fp + SA32(minstacksz) >= p->p_usrstack) { 1306 #ifdef DEBUG 1307 printf("sendsig32: bad signal stack cmd=%s, pid=%d, sig=%d\n", 1308 PTOU(p)->u_comm, p->p_pid, sig); 1309 printf("sigsp = 0x%p, action = 0x%p, upc = 0x%lx\n", 1310 (void *)fp, (void *)hdlr, rp->r_pc); 1311 1312 if (((uintptr_t)fp & (STACK_ALIGN32 - 1)) != 0) 1313 printf("bad stack alignment\n"); 1314 else 1315 printf("fp above USRSTACK32\n"); 1316 #endif 1317 return (0); 1318 } 1319 1320 watched = watch_disable_addr((caddr_t)fp, SA32(minstacksz), S_WRITE); 1321 if (on_fault(&ljb)) 1322 goto badstack; 1323 1324 tuc = kmem_alloc(sizeof (ucontext32_t), KM_SLEEP); 1325 savecontext32(tuc, lwp->lwp_sigoldmask, dfq); 1326 1327 /* 1328 * save extra register state if it exists 1329 */ 1330 if (xregs_size != 0) { 1331 xregs_setptr32(lwp, tuc, (caddr32_t)sp); 1332 xregs = kmem_alloc(xregs_size, KM_SLEEP); 1333 xregs_get(lwp, xregs); 1334 copyout_noerr(xregs, sp, xregs_size); 1335 kmem_free(xregs, xregs_size); 1336 xregs = NULL; 1337 sp += SA32(xregs_size); 1338 } 1339 1340 copyout_noerr(tuc, &fp->uc, sizeof (*tuc)); 1341 kmem_free(tuc, sizeof (*tuc)); 1342 tuc = NULL; 1343 1344 if (sip != NULL) { 1345 siginfo32_t si32; 1346 zoneid_t zoneid; 1347 1348 siginfo_kto32(sip, &si32); 1349 if (SI_FROMUSER(sip) && 1350 (zoneid = p->p_zone->zone_id) != GLOBAL_ZONEID && 1351 zoneid != sip->si_zoneid) { 1352 si32.si_pid = p->p_zone->zone_zsched->p_pid; 1353 si32.si_uid = 0; 1354 si32.si_ctid = -1; 1355 si32.si_zoneid = zoneid; 1356 } 1357 uzero(sp, sizeof (siginfo32_t)); 1358 copyout_noerr(&si32, sp, sizeof (siginfo32_t)); 1359 sip_addr = (siginfo32_t *)sp; 1360 sp += sizeof (siginfo32_t); 1361 1362 if (sig == SIGPROF && 1363 curthread->t_rprof != NULL && 1364 curthread->t_rprof->rp_anystate) { 1365 /* 1366 * We stand on our head to deal with 1367 * the real time profiling signal. 1368 * Fill in the stuff that doesn't fit 1369 * in a normal k_siginfo structure. 1370 */ 1371 int i = sip->si_nsysarg; 1372 while (--i >= 0) { 1373 suword32_noerr(&sip_addr->si_sysarg[i], 1374 (uint32_t)lwp->lwp_arg[i]); 1375 } 1376 copyout_noerr(curthread->t_rprof->rp_state, 1377 sip_addr->si_mstate, 1378 sizeof (curthread->t_rprof->rp_state)); 1379 } 1380 } else { 1381 sip_addr = NULL; 1382 } 1383 1384 /* 1385 * When flush_user_windows_to_stack() can't save all the 1386 * windows to the stack, it puts them in the lwp's pcb. 1387 */ 1388 if (gwin_size != 0) { 1389 gwp = kmem_alloc(gwin_size, KM_SLEEP); 1390 getgwins32(lwp, gwp); 1391 suword32_noerr(&fp->uc.uc_mcontext.gwins, (uint32_t)sp); 1392 copyout_noerr(gwp, sp, gwin_size); 1393 kmem_free(gwp, gwin_size); 1394 gwp = NULL; 1395 sp += gwin_size; 1396 } else { 1397 suword32_noerr(&fp->uc.uc_mcontext.gwins, (uint32_t)NULL); 1398 } 1399 1400 if (fpq_size != 0) { 1401 /* 1402 * Update the (already copied out) fpu32.fpu_q pointer 1403 * from NULL to the 32-bit address on the user's stack 1404 * where we then copyout the fq32 to. 1405 */ 1406 struct fq32 *fqp = (struct fq32 *)sp; 1407 suword32_noerr(&fp->uc.uc_mcontext.fpregs.fpu_q, (uint32_t)fqp); 1408 copyout_noerr(dfq, fqp, fpq_size); 1409 1410 /* 1411 * forget the fp queue so that the signal handler can run 1412 * without being harrassed--it will do a setcontext that will 1413 * re-establish the queue if there still is one 1414 * 1415 * NOTE: fp_runq() relies on the qcnt field being zeroed here 1416 * to terminate its processing of the queue after signal 1417 * delivery. 1418 */ 1419 mpcb->mpcb_fpu->fpu_qcnt = 0; 1420 sp += fpq_size; 1421 1422 /* Also, syscall needs to know about this */ 1423 mpcb->mpcb_flags |= FP_TRAPPED; 1424 1425 } else { 1426 suword32_noerr(&fp->uc.uc_mcontext.fpregs.fpu_q, 1427 (uint32_t)NULL); 1428 suword8_noerr(&fp->uc.uc_mcontext.fpregs.fpu_qcnt, 0); 1429 } 1430 1431 1432 /* 1433 * Since we flushed the user's windows and we are changing his 1434 * stack pointer, the window that the user will return to will 1435 * be restored from the save area in the frame we are setting up. 1436 * We copy in save area for old stack pointer so that debuggers 1437 * can do a proper stack backtrace from the signal handler. 1438 */ 1439 if (mpcb->mpcb_wbcnt == 0) { 1440 watched2 = watch_disable_addr(tos, sizeof (struct rwindow32), 1441 S_READ); 1442 ucopy(tos, &fp->frwin, sizeof (struct rwindow32)); 1443 } 1444 1445 lwp->lwp_oldcontext = (uintptr_t)&fp->uc; 1446 1447 if (newstack != 0) { 1448 lwp->lwp_sigaltstack.ss_flags |= SS_ONSTACK; 1449 if (lwp->lwp_ustack) { 1450 stack32_t stk32; 1451 1452 stk32.ss_sp = (caddr32_t)lwp->lwp_sigaltstack.ss_sp; 1453 stk32.ss_size = (size32_t)lwp->lwp_sigaltstack.ss_size; 1454 stk32.ss_flags = (int32_t)lwp->lwp_sigaltstack.ss_flags; 1455 1456 copyout_noerr(&stk32, (stack32_t *)lwp->lwp_ustack, 1457 sizeof (stack32_t)); 1458 } 1459 } 1460 1461 no_fault(); 1462 mpcb->mpcb_wbcnt = 0; /* let user go on */ 1463 1464 if (watched2) 1465 watch_enable_addr(tos, sizeof (struct rwindow32), S_READ); 1466 if (watched) 1467 watch_enable_addr((caddr_t)fp, SA32(minstacksz), S_WRITE); 1468 1469 /* 1470 * Set up user registers for execution of signal handler. 1471 */ 1472 rp->r_sp = (uintptr_t)fp; 1473 rp->r_pc = (uintptr_t)hdlr; 1474 rp->r_npc = (uintptr_t)hdlr + 4; 1475 /* make sure %asi is ASI_PNF */ 1476 rp->r_tstate &= ~((uint64_t)TSTATE_ASI_MASK << TSTATE_ASI_SHIFT); 1477 rp->r_tstate |= ((uint64_t)ASI_PNF << TSTATE_ASI_SHIFT); 1478 rp->r_o0 = sig; 1479 rp->r_o1 = (uintptr_t)sip_addr; 1480 rp->r_o2 = (uintptr_t)&fp->uc; 1481 /* 1482 * Don't set lwp_eosys here. sendsig() is called via psig() after 1483 * lwp_eosys is handled, so setting it here would affect the next 1484 * system call. 1485 */ 1486 return (1); 1487 1488 badstack: 1489 no_fault(); 1490 if (watched2) 1491 watch_enable_addr(tos, sizeof (struct rwindow32), S_READ); 1492 if (watched) 1493 watch_enable_addr((caddr_t)fp, SA32(minstacksz), S_WRITE); 1494 if (tuc) 1495 kmem_free(tuc, sizeof (*tuc)); 1496 if (xregs) 1497 kmem_free(xregs, xregs_size); 1498 if (gwp) 1499 kmem_free(gwp, gwin_size); 1500 #ifdef DEBUG 1501 printf("sendsig32: bad signal stack cmd=%s, pid=%d, sig=%d\n", 1502 PTOU(p)->u_comm, p->p_pid, sig); 1503 printf("on fault, sigsp = 0x%p, action = 0x%p, upc = 0x%lx\n", 1504 (void *)fp, (void *)hdlr, rp->r_pc); 1505 #endif 1506 return (0); 1507 } 1508 1509 #endif /* _SYSCALL32_IMPL */ 1510 1511 1512 /* 1513 * load user registers into lwp. 1514 * thrptr ignored for sparc. 1515 */ 1516 /* ARGSUSED2 */ 1517 void 1518 lwp_load(klwp_t *lwp, gregset_t grp, uintptr_t thrptr) 1519 { 1520 setgregs(lwp, grp); 1521 if (lwptoproc(lwp)->p_model == DATAMODEL_ILP32) 1522 lwptoregs(lwp)->r_tstate = TSTATE_USER32; 1523 else 1524 lwptoregs(lwp)->r_tstate = TSTATE_USER64; 1525 1526 if (!fpu_exists) 1527 lwptoregs(lwp)->r_tstate &= ~TSTATE_PEF; 1528 lwp->lwp_eosys = JUSTRETURN; 1529 lwptot(lwp)->t_post_sys = 1; 1530 } 1531 1532 /* 1533 * set syscall()'s return values for a lwp. 1534 */ 1535 void 1536 lwp_setrval(klwp_t *lwp, int v1, int v2) 1537 { 1538 struct regs *rp = lwptoregs(lwp); 1539 1540 rp->r_tstate &= ~TSTATE_IC; 1541 rp->r_o0 = v1; 1542 rp->r_o1 = v2; 1543 } 1544 1545 /* 1546 * set stack pointer for a lwp 1547 */ 1548 void 1549 lwp_setsp(klwp_t *lwp, caddr_t sp) 1550 { 1551 struct regs *rp = lwptoregs(lwp); 1552 rp->r_sp = (uintptr_t)sp; 1553 } 1554 1555 /* 1556 * Take any PCB specific actions that are required or flagged in the PCB. 1557 */ 1558 extern void trap_async_hwerr(void); 1559 #pragma weak trap_async_hwerr 1560 1561 void 1562 lwp_pcb_exit(void) 1563 { 1564 klwp_t *lwp = ttolwp(curthread); 1565 1566 if (lwp->lwp_pcb.pcb_flags & ASYNC_HWERR) { 1567 trap_async_hwerr(); 1568 } 1569 } 1570 1571 /* 1572 * Invalidate the saved user register windows in the pcb struct 1573 * for the current thread. They will no longer be preserved. 1574 */ 1575 void 1576 lwp_clear_uwin(void) 1577 { 1578 struct machpcb *m = lwptompcb(ttolwp(curthread)); 1579 1580 /* 1581 * This has the effect of invalidating all (any) of the 1582 * user level windows that are currently sitting in the 1583 * kernel buffer. 1584 */ 1585 m->mpcb_wbcnt = 0; 1586 } 1587 1588 static uint_t 1589 mkpsr(uint64_t tstate, uint_t fprs) 1590 { 1591 uint_t psr, icc; 1592 1593 psr = tstate & TSTATE_CWP_MASK; 1594 if (tstate & TSTATE_PRIV) 1595 psr |= PSR_PS; 1596 if (fprs & FPRS_FEF) 1597 psr |= PSR_EF; 1598 icc = (uint_t)(tstate >> PSR_TSTATE_CC_SHIFT) & PSR_ICC; 1599 psr |= icc; 1600 psr |= V9_PSR_IMPLVER; 1601 return (psr); 1602 } 1603 1604 void 1605 sync_icache(caddr_t va, uint_t len) 1606 { 1607 caddr_t end; 1608 1609 end = va + len; 1610 va = (caddr_t)((uintptr_t)va & -8l); /* sparc needs 8-byte align */ 1611 while (va < end) { 1612 doflush(va); 1613 va += 8; 1614 } 1615 } 1616 1617 #ifdef _SYSCALL32_IMPL 1618 1619 /* 1620 * Copy the floating point queue if and only if there is a queue and a place 1621 * to copy it to. Let xregs take care of the other fp regs, for v8plus. 1622 * The issue is that while we are handling the fq32 in sendsig, we 1623 * still need a 64-bit pointer to it, and the caddr32_t in fpregset32_t 1624 * will not suffice, so we have the third parameter to this function. 1625 */ 1626 void 1627 fpuregset_nto32(const fpregset_t *src, fpregset32_t *dest, struct fq32 *dfq) 1628 { 1629 int i; 1630 1631 bzero(dest, sizeof (*dest)); 1632 for (i = 0; i < 32; i++) 1633 dest->fpu_fr.fpu_regs[i] = src->fpu_fr.fpu_regs[i]; 1634 dest->fpu_q = NULL; 1635 dest->fpu_fsr = (uint32_t)src->fpu_fsr; 1636 dest->fpu_qcnt = src->fpu_qcnt; 1637 dest->fpu_q_entrysize = sizeof (struct fpq32); 1638 dest->fpu_en = src->fpu_en; 1639 1640 if ((src->fpu_qcnt) && (dfq != NULL)) { 1641 struct fq *sfq = src->fpu_q; 1642 for (i = 0; i < src->fpu_qcnt; i++, dfq++, sfq++) { 1643 dfq->FQu.fpq.fpq_addr = 1644 (caddr32_t)sfq->FQu.fpq.fpq_addr; 1645 dfq->FQu.fpq.fpq_instr = sfq->FQu.fpq.fpq_instr; 1646 } 1647 } 1648 } 1649 1650 /* 1651 * Copy the floating point queue if and only if there is a queue and a place 1652 * to copy it to. Let xregs take care of the other fp regs, for v8plus. 1653 * The *dfq is required to escape the bzero in both this function and in 1654 * ucontext_32ton. The *sfq is required because once the fq32 is copied 1655 * into the kernel, in setcontext, then we need a 64-bit pointer to it. 1656 */ 1657 static void 1658 fpuregset_32ton(const fpregset32_t *src, fpregset_t *dest, 1659 const struct fq32 *sfq, struct fq *dfq) 1660 { 1661 int i; 1662 1663 bzero(dest, sizeof (*dest)); 1664 for (i = 0; i < 32; i++) 1665 dest->fpu_fr.fpu_regs[i] = src->fpu_fr.fpu_regs[i]; 1666 dest->fpu_q = dfq; 1667 dest->fpu_fsr = (uint64_t)src->fpu_fsr; 1668 if ((dest->fpu_qcnt = src->fpu_qcnt) > 0) 1669 dest->fpu_q_entrysize = sizeof (struct fpq); 1670 else 1671 dest->fpu_q_entrysize = 0; 1672 dest->fpu_en = src->fpu_en; 1673 1674 if ((src->fpu_qcnt) && (sfq) && (dfq)) { 1675 for (i = 0; i < src->fpu_qcnt; i++, dfq++, sfq++) { 1676 dfq->FQu.fpq.fpq_addr = 1677 (unsigned int *)sfq->FQu.fpq.fpq_addr; 1678 dfq->FQu.fpq.fpq_instr = sfq->FQu.fpq.fpq_instr; 1679 } 1680 } 1681 } 1682 1683 void 1684 ucontext_32ton(const ucontext32_t *src, ucontext_t *dest, 1685 const struct fq32 *sfq, struct fq *dfq) 1686 { 1687 int i; 1688 1689 bzero(dest, sizeof (*dest)); 1690 1691 dest->uc_flags = src->uc_flags; 1692 dest->uc_link = (ucontext_t *)src->uc_link; 1693 1694 for (i = 0; i < 4; i++) { 1695 dest->uc_sigmask.__sigbits[i] = src->uc_sigmask.__sigbits[i]; 1696 } 1697 1698 dest->uc_stack.ss_sp = (void *)src->uc_stack.ss_sp; 1699 dest->uc_stack.ss_size = (size_t)src->uc_stack.ss_size; 1700 dest->uc_stack.ss_flags = src->uc_stack.ss_flags; 1701 1702 /* REG_CCR is 0, skip over it and handle it after this loop */ 1703 for (i = 1; i < _NGREG32; i++) 1704 dest->uc_mcontext.gregs[i] = 1705 (greg_t)(uint32_t)src->uc_mcontext.gregs[i]; 1706 dest->uc_mcontext.gregs[REG_CCR] = 1707 (src->uc_mcontext.gregs[REG_PSR] & PSR_ICC) >> PSR_ICC_SHIFT; 1708 dest->uc_mcontext.gregs[REG_ASI] = ASI_PNF; 1709 /* 1710 * A valid fpregs is only copied in if (uc.uc_flags & UC_FPU), 1711 * otherwise there is no guarantee that anything in fpregs is valid. 1712 */ 1713 if (src->uc_flags & UC_FPU) { 1714 dest->uc_mcontext.gregs[REG_FPRS] = 1715 ((src->uc_mcontext.fpregs.fpu_en) ? 1716 (FPRS_DU|FPRS_DL|FPRS_FEF) : 0); 1717 } else { 1718 dest->uc_mcontext.gregs[REG_FPRS] = 0; 1719 } 1720 dest->uc_mcontext.gwins = (gwindows_t *)src->uc_mcontext.gwins; 1721 if (src->uc_flags & UC_FPU) { 1722 fpuregset_32ton(&src->uc_mcontext.fpregs, 1723 &dest->uc_mcontext.fpregs, sfq, dfq); 1724 } 1725 } 1726 1727 void 1728 rwindow_nto32(struct rwindow *src, struct rwindow32 *dest) 1729 { 1730 greg_t *s = (greg_t *)src; 1731 greg32_t *d = (greg32_t *)dest; 1732 int i; 1733 1734 for (i = 0; i < 16; i++) 1735 *d++ = (greg32_t)*s++; 1736 } 1737 1738 void 1739 rwindow_32ton(struct rwindow32 *src, struct rwindow *dest) 1740 { 1741 greg32_t *s = (greg32_t *)src; 1742 greg_t *d = (greg_t *)dest; 1743 int i; 1744 1745 for (i = 0; i < 16; i++) 1746 *d++ = (uint32_t)*s++; 1747 } 1748 1749 #endif /* _SYSCALL32_IMPL */ 1750 1751 /* 1752 * The panic code invokes panic_saveregs() to record the contents of a 1753 * regs structure into the specified panic_data structure for debuggers. 1754 */ 1755 void 1756 panic_saveregs(panic_data_t *pdp, struct regs *rp) 1757 { 1758 panic_nv_t *pnv = PANICNVGET(pdp); 1759 1760 PANICNVADD(pnv, "tstate", rp->r_tstate); 1761 PANICNVADD(pnv, "g1", rp->r_g1); 1762 PANICNVADD(pnv, "g2", rp->r_g2); 1763 PANICNVADD(pnv, "g3", rp->r_g3); 1764 PANICNVADD(pnv, "g4", rp->r_g4); 1765 PANICNVADD(pnv, "g5", rp->r_g5); 1766 PANICNVADD(pnv, "g6", rp->r_g6); 1767 PANICNVADD(pnv, "g7", rp->r_g7); 1768 PANICNVADD(pnv, "o0", rp->r_o0); 1769 PANICNVADD(pnv, "o1", rp->r_o1); 1770 PANICNVADD(pnv, "o2", rp->r_o2); 1771 PANICNVADD(pnv, "o3", rp->r_o3); 1772 PANICNVADD(pnv, "o4", rp->r_o4); 1773 PANICNVADD(pnv, "o5", rp->r_o5); 1774 PANICNVADD(pnv, "o6", rp->r_o6); 1775 PANICNVADD(pnv, "o7", rp->r_o7); 1776 PANICNVADD(pnv, "pc", (ulong_t)rp->r_pc); 1777 PANICNVADD(pnv, "npc", (ulong_t)rp->r_npc); 1778 PANICNVADD(pnv, "y", (uint32_t)rp->r_y); 1779 1780 PANICNVSET(pdp, pnv); 1781 } 1782