1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/mmu.h> 30 #include <sys/systm.h> 31 #include <sys/trap.h> 32 #include <sys/machtrap.h> 33 #include <sys/vtrace.h> 34 #include <sys/prsystm.h> 35 #include <sys/archsystm.h> 36 #include <sys/machsystm.h> 37 #include <sys/fpu/fpusystm.h> 38 #include <sys/tnf.h> 39 #include <sys/tnf_probe.h> 40 #include <sys/simulate.h> 41 #include <sys/ftrace.h> 42 #include <sys/ontrap.h> 43 #include <sys/kcpc.h> 44 #include <sys/kobj.h> 45 #include <sys/procfs.h> 46 #include <sys/sun4asi.h> 47 #include <sys/sdt.h> 48 #include <sys/fpras.h> 49 50 #ifdef TRAPTRACE 51 #include <sys/traptrace.h> 52 #endif 53 54 int tudebug = 0; 55 static int tudebugbpt = 0; 56 static int tudebugfpe = 0; 57 58 static int alignfaults = 0; 59 60 #if defined(TRAPDEBUG) || defined(lint) 61 static int lodebug = 0; 62 #else 63 #define lodebug 0 64 #endif /* defined(TRAPDEBUG) || defined(lint) */ 65 66 67 int vis1_partial_support(struct regs *rp, k_siginfo_t *siginfo, uint_t *fault); 68 #pragma weak vis1_partial_support 69 70 void showregs(unsigned, struct regs *, caddr_t, uint_t); 71 #pragma weak showregs 72 73 void trap_async_hwerr(void); 74 #pragma weak trap_async_hwerr 75 76 void trap_async_berr_bto(int, struct regs *); 77 #pragma weak trap_async_berr_bto 78 79 static enum seg_rw get_accesstype(struct regs *); 80 static int nfload(struct regs *, int *); 81 static int swap_nc(struct regs *, int); 82 static int ldstub_nc(struct regs *, int); 83 void trap_cleanup(struct regs *, uint_t, k_siginfo_t *, int); 84 void trap_rtt(void); 85 86 static int 87 die(unsigned type, struct regs *rp, caddr_t addr, uint_t mmu_fsr) 88 { 89 struct trap_info ti; 90 91 #ifdef TRAPTRACE 92 TRAPTRACE_FREEZE; 93 #endif 94 95 ti.trap_regs = rp; 96 ti.trap_type = type; 97 ti.trap_addr = addr; 98 ti.trap_mmu_fsr = mmu_fsr; 99 100 curthread->t_panic_trap = &ti; 101 102 if (type == T_DATA_MMU_MISS && addr < (caddr_t)KERNELBASE) { 103 panic("BAD TRAP: type=%x rp=%p addr=%p mmu_fsr=%x " 104 "occurred in module \"%s\" due to %s", 105 type, (void *)rp, (void *)addr, mmu_fsr, 106 mod_containing_pc((caddr_t)rp->r_pc), 107 addr < (caddr_t)PAGESIZE ? 108 "a NULL pointer dereference" : 109 "an illegal access to a user address"); 110 } else { 111 panic("BAD TRAP: type=%x rp=%p addr=%p mmu_fsr=%x", 112 type, (void *)rp, (void *)addr, mmu_fsr); 113 } 114 115 return (0); /* avoid optimization of restore in call's delay slot */ 116 } 117 118 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */ 119 int ill_calls; 120 #endif 121 122 /* 123 * Currently, the only PREFETCH/PREFETCHA instructions which cause traps 124 * are the "strong" prefetches (fcn=20-23). But we check for all flavors of 125 * PREFETCH, in case some future variant also causes a DATA_MMU_MISS. 126 */ 127 #define IS_PREFETCH(i) (((i) & 0xc1780000) == 0xc1680000) 128 129 #define IS_FLUSH(i) (((i) & 0xc1f80000) == 0x81d80000) 130 #define IS_SWAP(i) (((i) & 0xc1f80000) == 0xc0780000) 131 #define IS_LDSTUB(i) (((i) & 0xc1f80000) == 0xc0680000) 132 #define IS_FLOAT(i) (((i) & 0x1000000) != 0) 133 #define IS_STORE(i) (((i) >> 21) & 1) 134 135 /* 136 * Called from the trap handler when a processor trap occurs. 137 */ 138 /*VARARGS2*/ 139 void 140 trap(struct regs *rp, caddr_t addr, uint32_t type, uint32_t mmu_fsr) 141 { 142 proc_t *p = ttoproc(curthread); 143 klwp_id_t lwp = ttolwp(curthread); 144 struct machpcb *mpcb = NULL; 145 k_siginfo_t siginfo; 146 uint_t op3, fault = 0; 147 int stepped = 0; 148 greg_t oldpc; 149 int mstate; 150 char *badaddr; 151 faultcode_t res; 152 enum fault_type fault_type; 153 enum seg_rw rw; 154 uintptr_t lofault; 155 int instr; 156 int iskernel; 157 int watchcode; 158 int watchpage; 159 extern faultcode_t pagefault(caddr_t, enum fault_type, 160 enum seg_rw, int); 161 162 CPU_STATS_ADDQ(CPU, sys, trap, 1); 163 164 #ifdef SF_ERRATA_23 /* call causes illegal-insn */ 165 ASSERT((curthread->t_schedflag & TS_DONT_SWAP) || 166 (type == T_UNIMP_INSTR)); 167 #else 168 ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 169 #endif /* SF_ERRATA_23 */ 170 171 if (USERMODE(rp->r_tstate) || (type & T_USER)) { 172 /* 173 * Set lwp_state before trying to acquire any 174 * adaptive lock 175 */ 176 ASSERT(lwp != NULL); 177 lwp->lwp_state = LWP_SYS; 178 /* 179 * Set up the current cred to use during this trap. u_cred 180 * no longer exists. t_cred is used instead. 181 * The current process credential applies to the thread for 182 * the entire trap. If trapping from the kernel, this 183 * should already be set up. 184 */ 185 if (curthread->t_cred != p->p_cred) { 186 cred_t *oldcred = curthread->t_cred; 187 /* 188 * DTrace accesses t_cred in probe context. t_cred 189 * must always be either NULL, or point to a valid, 190 * allocated cred structure. 191 */ 192 curthread->t_cred = crgetcred(); 193 crfree(oldcred); 194 } 195 type |= T_USER; 196 ASSERT((type == (T_SYS_RTT_PAGE | T_USER)) || 197 (type == (T_SYS_RTT_ALIGN | T_USER)) || 198 lwp->lwp_regs == rp); 199 mpcb = lwptompcb(lwp); 200 switch (type) { 201 case T_WIN_OVERFLOW + T_USER: 202 case T_WIN_UNDERFLOW + T_USER: 203 case T_SYS_RTT_PAGE + T_USER: 204 case T_DATA_MMU_MISS + T_USER: 205 mstate = LMS_DFAULT; 206 break; 207 case T_INSTR_MMU_MISS + T_USER: 208 mstate = LMS_TFAULT; 209 break; 210 default: 211 mstate = LMS_TRAP; 212 break; 213 } 214 /* Kernel probe */ 215 TNF_PROBE_1(thread_state, "thread", /* CSTYLED */, 216 tnf_microstate, state, (char)mstate); 217 mstate = new_mstate(curthread, mstate); 218 siginfo.si_signo = 0; 219 stepped = 220 lwp->lwp_pcb.pcb_step != STEP_NONE && 221 ((oldpc = rp->r_pc), prundostep()) && 222 mmu_btop((uintptr_t)addr) == mmu_btop((uintptr_t)oldpc); 223 /* this assignment must not precede call to prundostep() */ 224 oldpc = rp->r_pc; 225 } 226 227 TRACE_1(TR_FAC_TRAP, TR_C_TRAP_HANDLER_ENTER, 228 "C_trap_handler_enter:type %x", type); 229 230 #ifdef F_DEFERRED 231 /* 232 * Take any pending floating point exceptions now. 233 * If the floating point unit has an exception to handle, 234 * just return to user-level to let the signal handler run. 235 * The instruction that got us to trap() will be reexecuted on 236 * return from the signal handler and we will trap to here again. 237 * This is necessary to disambiguate simultaneous traps which 238 * happen when a floating-point exception is pending and a 239 * machine fault is incurred. 240 */ 241 if (type & USER) { 242 /* 243 * FP_TRAPPED is set only by sendsig() when it copies 244 * out the floating-point queue for the signal handler. 245 * It is set there so we can test it here and in syscall(). 246 */ 247 mpcb->mpcb_flags &= ~FP_TRAPPED; 248 syncfpu(); 249 if (mpcb->mpcb_flags & FP_TRAPPED) { 250 /* 251 * trap() has have been called recursively and may 252 * have stopped the process, so do single step 253 * support for /proc. 254 */ 255 mpcb->mpcb_flags &= ~FP_TRAPPED; 256 goto out; 257 } 258 } 259 #endif 260 switch (type) { 261 case T_DATA_MMU_MISS: 262 case T_INSTR_MMU_MISS + T_USER: 263 case T_DATA_MMU_MISS + T_USER: 264 case T_DATA_PROT + T_USER: 265 case T_AST + T_USER: 266 case T_SYS_RTT_PAGE + T_USER: 267 case T_FLUSH_PCB + T_USER: 268 case T_FLUSHW + T_USER: 269 break; 270 271 default: 272 FTRACE_3("trap(): type=0x%lx, regs=0x%lx, addr=0x%lx", 273 (ulong_t)type, (ulong_t)rp, (ulong_t)addr); 274 break; 275 } 276 277 switch (type) { 278 279 default: 280 /* 281 * Check for user software trap. 282 */ 283 if (type & T_USER) { 284 if (tudebug) 285 showregs(type, rp, (caddr_t)0, 0); 286 if ((type & ~T_USER) >= T_SOFTWARE_TRAP) { 287 bzero(&siginfo, sizeof (siginfo)); 288 siginfo.si_signo = SIGILL; 289 siginfo.si_code = ILL_ILLTRP; 290 siginfo.si_addr = (caddr_t)rp->r_pc; 291 siginfo.si_trapno = type &~ T_USER; 292 fault = FLTILL; 293 break; 294 } 295 } 296 addr = (caddr_t)rp->r_pc; 297 (void) die(type, rp, addr, 0); 298 /*NOTREACHED*/ 299 300 case T_ALIGNMENT: /* supv alignment error */ 301 if (nfload(rp, NULL)) 302 goto cleanup; 303 304 if (curthread->t_lofault) { 305 if (lodebug) { 306 showregs(type, rp, addr, 0); 307 traceback((caddr_t)rp->r_sp); 308 } 309 rp->r_g1 = EFAULT; 310 rp->r_pc = curthread->t_lofault; 311 rp->r_npc = rp->r_pc + 4; 312 goto cleanup; 313 } 314 (void) die(type, rp, addr, 0); 315 /*NOTREACHED*/ 316 317 case T_INSTR_EXCEPTION: /* sys instruction access exception */ 318 addr = (caddr_t)rp->r_pc; 319 (void) die(type, rp, addr, mmu_fsr); 320 /*NOTREACHED*/ 321 322 case T_INSTR_MMU_MISS: /* sys instruction mmu miss */ 323 addr = (caddr_t)rp->r_pc; 324 (void) die(type, rp, addr, 0); 325 /*NOTREACHED*/ 326 327 case T_DATA_EXCEPTION: /* system data access exception */ 328 switch (X_FAULT_TYPE(mmu_fsr)) { 329 case FT_RANGE: 330 /* 331 * This happens when we attempt to dereference an 332 * address in the address hole. If t_ontrap is set, 333 * then break and fall through to T_DATA_MMU_MISS / 334 * T_DATA_PROT case below. If lofault is set, then 335 * honour it (perhaps the user gave us a bogus 336 * address in the hole to copyin from or copyout to?) 337 */ 338 339 if (curthread->t_ontrap != NULL) 340 break; 341 342 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 343 if (curthread->t_lofault) { 344 if (lodebug) { 345 showregs(type, rp, addr, 0); 346 traceback((caddr_t)rp->r_sp); 347 } 348 rp->r_g1 = EFAULT; 349 rp->r_pc = curthread->t_lofault; 350 rp->r_npc = rp->r_pc + 4; 351 goto cleanup; 352 } 353 (void) die(type, rp, addr, mmu_fsr); 354 /*NOTREACHED*/ 355 356 case FT_PRIV: 357 /* 358 * This can happen if we access ASI_USER from a kernel 359 * thread. To support pxfs, we need to honor lofault if 360 * we're doing a copyin/copyout from a kernel thread. 361 */ 362 363 if (nfload(rp, NULL)) 364 goto cleanup; 365 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 366 if (curthread->t_lofault) { 367 if (lodebug) { 368 showregs(type, rp, addr, 0); 369 traceback((caddr_t)rp->r_sp); 370 } 371 rp->r_g1 = EFAULT; 372 rp->r_pc = curthread->t_lofault; 373 rp->r_npc = rp->r_pc + 4; 374 goto cleanup; 375 } 376 (void) die(type, rp, addr, mmu_fsr); 377 /*NOTREACHED*/ 378 379 default: 380 if (nfload(rp, NULL)) 381 goto cleanup; 382 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 383 (void) die(type, rp, addr, mmu_fsr); 384 /*NOTREACHED*/ 385 386 case FT_NFO: 387 break; 388 } 389 /* fall into ... */ 390 391 case T_DATA_MMU_MISS: /* system data mmu miss */ 392 case T_DATA_PROT: /* system data protection fault */ 393 if (nfload(rp, &instr)) 394 goto cleanup; 395 396 /* 397 * If we're under on_trap() protection (see <sys/ontrap.h>), 398 * set ot_trap and return from the trap to the trampoline. 399 */ 400 if (curthread->t_ontrap != NULL) { 401 on_trap_data_t *otp = curthread->t_ontrap; 402 403 TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, 404 "C_trap_handler_exit"); 405 TRACE_0(TR_FAC_TRAP, TR_TRAP_END, "trap_end"); 406 407 if (otp->ot_prot & OT_DATA_ACCESS) { 408 otp->ot_trap |= OT_DATA_ACCESS; 409 rp->r_pc = otp->ot_trampoline; 410 rp->r_npc = rp->r_pc + 4; 411 goto cleanup; 412 } 413 } 414 lofault = curthread->t_lofault; 415 curthread->t_lofault = 0; 416 417 mstate = new_mstate(curthread, LMS_KFAULT); 418 419 switch (type) { 420 case T_DATA_PROT: 421 fault_type = F_PROT; 422 rw = S_WRITE; 423 break; 424 case T_INSTR_MMU_MISS: 425 fault_type = F_INVAL; 426 rw = S_EXEC; 427 break; 428 case T_DATA_MMU_MISS: 429 case T_DATA_EXCEPTION: 430 /* 431 * The hardware doesn't update the sfsr on mmu 432 * misses so it is not easy to find out whether 433 * the access was a read or a write so we need 434 * to decode the actual instruction. 435 */ 436 fault_type = F_INVAL; 437 rw = get_accesstype(rp); 438 break; 439 default: 440 cmn_err(CE_PANIC, "trap: unknown type %x", type); 441 break; 442 } 443 /* 444 * We determine if access was done to kernel or user 445 * address space. The addr passed into trap is really the 446 * tag access register. 447 */ 448 iskernel = (((uintptr_t)addr & TAGACC_CTX_MASK) == KCONTEXT); 449 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 450 451 res = pagefault(addr, fault_type, rw, iskernel); 452 if (!iskernel && res == FC_NOMAP && 453 addr < p->p_usrstack && grow(addr)) 454 res = 0; 455 456 (void) new_mstate(curthread, mstate); 457 458 /* 459 * Restore lofault. If we resolved the fault, exit. 460 * If we didn't and lofault wasn't set, die. 461 */ 462 curthread->t_lofault = lofault; 463 464 if (res == 0) 465 goto cleanup; 466 467 if (IS_PREFETCH(instr)) { 468 /* skip prefetch instructions in kernel-land */ 469 rp->r_pc = rp->r_npc; 470 rp->r_npc += 4; 471 goto cleanup; 472 } 473 474 if ((lofault == 0 || lodebug) && 475 (calc_memaddr(rp, &badaddr) == SIMU_SUCCESS)) 476 addr = badaddr; 477 if (lofault == 0) 478 (void) die(type, rp, addr, 0); 479 /* 480 * Cannot resolve fault. Return to lofault. 481 */ 482 if (lodebug) { 483 showregs(type, rp, addr, 0); 484 traceback((caddr_t)rp->r_sp); 485 } 486 if (FC_CODE(res) == FC_OBJERR) 487 res = FC_ERRNO(res); 488 else 489 res = EFAULT; 490 rp->r_g1 = res; 491 rp->r_pc = curthread->t_lofault; 492 rp->r_npc = curthread->t_lofault + 4; 493 goto cleanup; 494 495 case T_INSTR_EXCEPTION + T_USER: /* user insn access exception */ 496 bzero(&siginfo, sizeof (siginfo)); 497 siginfo.si_addr = (caddr_t)rp->r_pc; 498 siginfo.si_signo = SIGSEGV; 499 siginfo.si_code = X_FAULT_TYPE(mmu_fsr) == FT_PRIV ? 500 SEGV_ACCERR : SEGV_MAPERR; 501 fault = FLTBOUNDS; 502 break; 503 504 case T_WIN_OVERFLOW + T_USER: /* window overflow in ??? */ 505 case T_WIN_UNDERFLOW + T_USER: /* window underflow in ??? */ 506 case T_SYS_RTT_PAGE + T_USER: /* window underflow in user_rtt */ 507 case T_INSTR_MMU_MISS + T_USER: /* user instruction mmu miss */ 508 case T_DATA_MMU_MISS + T_USER: /* user data mmu miss */ 509 case T_DATA_PROT + T_USER: /* user data protection fault */ 510 switch (type) { 511 case T_INSTR_MMU_MISS + T_USER: 512 addr = (caddr_t)rp->r_pc; 513 fault_type = F_INVAL; 514 rw = S_EXEC; 515 break; 516 517 case T_DATA_MMU_MISS + T_USER: 518 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 519 fault_type = F_INVAL; 520 /* 521 * The hardware doesn't update the sfsr on mmu misses 522 * so it is not easy to find out whether the access 523 * was a read or a write so we need to decode the 524 * actual instruction. XXX BUGLY HW 525 */ 526 rw = get_accesstype(rp); 527 break; 528 529 case T_DATA_PROT + T_USER: 530 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 531 fault_type = F_PROT; 532 rw = S_WRITE; 533 break; 534 535 case T_WIN_OVERFLOW + T_USER: 536 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 537 fault_type = F_INVAL; 538 rw = S_WRITE; 539 break; 540 541 case T_WIN_UNDERFLOW + T_USER: 542 case T_SYS_RTT_PAGE + T_USER: 543 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 544 fault_type = F_INVAL; 545 rw = S_READ; 546 break; 547 548 default: 549 cmn_err(CE_PANIC, "trap: unknown type %x", type); 550 break; 551 } 552 553 /* 554 * If we are single stepping do not call pagefault 555 */ 556 if (stepped) { 557 res = FC_NOMAP; 558 } else { 559 caddr_t vaddr = addr; 560 size_t sz; 561 int ta; 562 563 ASSERT(!(curthread->t_flag & T_WATCHPT)); 564 watchpage = (pr_watch_active(p) && 565 type != T_WIN_OVERFLOW + T_USER && 566 type != T_WIN_UNDERFLOW + T_USER && 567 type != T_SYS_RTT_PAGE + T_USER && 568 pr_is_watchpage(addr, rw)); 569 570 if (!watchpage || 571 (sz = instr_size(rp, &vaddr, rw)) <= 0) 572 /* EMPTY */; 573 else if ((watchcode = pr_is_watchpoint(&vaddr, &ta, 574 sz, NULL, rw)) != 0) { 575 if (ta) { 576 do_watch_step(vaddr, sz, rw, 577 watchcode, rp->r_pc); 578 fault_type = F_INVAL; 579 } else { 580 bzero(&siginfo, sizeof (siginfo)); 581 siginfo.si_signo = SIGTRAP; 582 siginfo.si_code = watchcode; 583 siginfo.si_addr = vaddr; 584 siginfo.si_trapafter = 0; 585 siginfo.si_pc = (caddr_t)rp->r_pc; 586 fault = FLTWATCH; 587 break; 588 } 589 } else { 590 if (rw != S_EXEC && 591 pr_watch_emul(rp, vaddr, rw)) 592 goto out; 593 do_watch_step(vaddr, sz, rw, 0, 0); 594 fault_type = F_INVAL; 595 } 596 597 if (pr_watch_active(p) && 598 (type == T_WIN_OVERFLOW + T_USER || 599 type == T_WIN_UNDERFLOW + T_USER || 600 type == T_SYS_RTT_PAGE + T_USER)) { 601 int dotwo = (type == T_WIN_UNDERFLOW + T_USER); 602 if (copy_return_window(dotwo)) 603 goto out; 604 fault_type = F_INVAL; 605 } 606 607 res = pagefault(addr, fault_type, rw, 0); 608 609 /* 610 * If pagefault succeed, ok. 611 * Otherwise grow the stack automatically. 612 */ 613 if (res == 0 || 614 (res == FC_NOMAP && 615 type != T_INSTR_MMU_MISS + T_USER && 616 addr < p->p_usrstack && 617 grow(addr))) { 618 int ismem = prismember(&p->p_fltmask, FLTPAGE); 619 620 /* 621 * instr_size() is used to get the exact 622 * address of the fault, instead of the 623 * page of the fault. Unfortunately it is 624 * very slow, and this is an important 625 * code path. Don't call it unless 626 * correctness is needed. ie. if FLTPAGE 627 * is set, or we're profiling. 628 */ 629 630 if (curthread->t_rprof != NULL || ismem) 631 (void) instr_size(rp, &addr, rw); 632 633 lwp->lwp_lastfault = FLTPAGE; 634 lwp->lwp_lastfaddr = addr; 635 636 if (ismem) { 637 bzero(&siginfo, sizeof (siginfo)); 638 siginfo.si_addr = addr; 639 (void) stop_on_fault(FLTPAGE, &siginfo); 640 } 641 goto out; 642 } 643 644 if (type != (T_INSTR_MMU_MISS + T_USER)) { 645 /* 646 * check for non-faulting loads, also 647 * fetch the instruction to check for 648 * flush 649 */ 650 if (nfload(rp, &instr)) 651 goto out; 652 653 /* skip userland prefetch instructions */ 654 if (IS_PREFETCH(instr)) { 655 rp->r_pc = rp->r_npc; 656 rp->r_npc += 4; 657 goto out; 658 /*NOTREACHED*/ 659 } 660 661 /* 662 * check if the instruction was a 663 * flush. ABI allows users to specify 664 * an illegal address on the flush 665 * instruction so we simply return in 666 * this case. 667 * 668 * NB: the hardware should set a bit 669 * indicating this trap was caused by 670 * a flush instruction. Instruction 671 * decoding is bugly! 672 */ 673 if (IS_FLUSH(instr)) { 674 /* skip the flush instruction */ 675 rp->r_pc = rp->r_npc; 676 rp->r_npc += 4; 677 goto out; 678 /*NOTREACHED*/ 679 } 680 } else if (res == FC_PROT) { 681 report_stack_exec(p, addr); 682 } 683 684 if (tudebug) 685 showregs(type, rp, addr, 0); 686 } 687 688 /* 689 * In the case where both pagefault and grow fail, 690 * set the code to the value provided by pagefault. 691 */ 692 (void) instr_size(rp, &addr, rw); 693 bzero(&siginfo, sizeof (siginfo)); 694 siginfo.si_addr = addr; 695 if (FC_CODE(res) == FC_OBJERR) { 696 siginfo.si_errno = FC_ERRNO(res); 697 if (siginfo.si_errno != EINTR) { 698 siginfo.si_signo = SIGBUS; 699 siginfo.si_code = BUS_OBJERR; 700 fault = FLTACCESS; 701 } 702 } else { /* FC_NOMAP || FC_PROT */ 703 siginfo.si_signo = SIGSEGV; 704 siginfo.si_code = (res == FC_NOMAP) ? 705 SEGV_MAPERR : SEGV_ACCERR; 706 fault = FLTBOUNDS; 707 } 708 /* 709 * If this is the culmination of a single-step, 710 * reset the addr, code, signal and fault to 711 * indicate a hardware trace trap. 712 */ 713 if (stepped) { 714 pcb_t *pcb = &lwp->lwp_pcb; 715 716 siginfo.si_signo = 0; 717 fault = 0; 718 if (pcb->pcb_step == STEP_WASACTIVE) { 719 pcb->pcb_step = STEP_NONE; 720 pcb->pcb_tracepc = NULL; 721 oldpc = rp->r_pc - 4; 722 } 723 /* 724 * If both NORMAL_STEP and WATCH_STEP are in 725 * effect, give precedence to WATCH_STEP. 726 * One or the other must be set at this point. 727 */ 728 ASSERT(pcb->pcb_flags & (NORMAL_STEP|WATCH_STEP)); 729 if ((fault = undo_watch_step(&siginfo)) == 0 && 730 (pcb->pcb_flags & NORMAL_STEP)) { 731 siginfo.si_signo = SIGTRAP; 732 siginfo.si_code = TRAP_TRACE; 733 siginfo.si_addr = (caddr_t)rp->r_pc; 734 fault = FLTTRACE; 735 } 736 pcb->pcb_flags &= ~(NORMAL_STEP|WATCH_STEP); 737 } 738 break; 739 740 case T_DATA_EXCEPTION + T_USER: /* user data access exception */ 741 742 if (&vis1_partial_support != NULL) { 743 bzero(&siginfo, sizeof (siginfo)); 744 if (vis1_partial_support(rp, 745 &siginfo, &fault) == 0) 746 goto out; 747 } 748 749 if (nfload(rp, &instr)) 750 goto out; 751 if (IS_FLUSH(instr)) { 752 /* skip the flush instruction */ 753 rp->r_pc = rp->r_npc; 754 rp->r_npc += 4; 755 goto out; 756 /*NOTREACHED*/ 757 } 758 bzero(&siginfo, sizeof (siginfo)); 759 siginfo.si_addr = addr; 760 switch (X_FAULT_TYPE(mmu_fsr)) { 761 case FT_ATOMIC_NC: 762 if ((IS_SWAP(instr) && swap_nc(rp, instr)) || 763 (IS_LDSTUB(instr) && ldstub_nc(rp, instr))) { 764 /* skip the atomic */ 765 rp->r_pc = rp->r_npc; 766 rp->r_npc += 4; 767 goto out; 768 } 769 /* fall into ... */ 770 case FT_PRIV: 771 siginfo.si_signo = SIGSEGV; 772 siginfo.si_code = SEGV_ACCERR; 773 fault = FLTBOUNDS; 774 break; 775 case FT_SPEC_LD: 776 case FT_ILL_ALT: 777 siginfo.si_signo = SIGILL; 778 siginfo.si_code = ILL_ILLADR; 779 fault = FLTILL; 780 break; 781 default: 782 siginfo.si_signo = SIGSEGV; 783 siginfo.si_code = SEGV_MAPERR; 784 fault = FLTBOUNDS; 785 break; 786 } 787 break; 788 789 case T_SYS_RTT_ALIGN + T_USER: /* user alignment error */ 790 case T_ALIGNMENT + T_USER: /* user alignment error */ 791 if (tudebug) 792 showregs(type, rp, addr, 0); 793 /* 794 * If the user has to do unaligned references 795 * the ugly stuff gets done here. 796 */ 797 alignfaults++; 798 if (&vis1_partial_support != NULL) { 799 bzero(&siginfo, sizeof (siginfo)); 800 if (vis1_partial_support(rp, 801 &siginfo, &fault) == 0) 802 goto out; 803 } 804 805 bzero(&siginfo, sizeof (siginfo)); 806 if (type == T_SYS_RTT_ALIGN + T_USER) { 807 if (nfload(rp, NULL)) 808 goto out; 809 /* 810 * Can't do unaligned stack access 811 */ 812 siginfo.si_signo = SIGBUS; 813 siginfo.si_code = BUS_ADRALN; 814 siginfo.si_addr = addr; 815 fault = FLTACCESS; 816 break; 817 } 818 819 /* 820 * Try to fix alignment before non-faulting load test. 821 */ 822 if (p->p_fixalignment) { 823 if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) { 824 rp->r_pc = rp->r_npc; 825 rp->r_npc += 4; 826 goto out; 827 } 828 if (nfload(rp, NULL)) 829 goto out; 830 siginfo.si_signo = SIGSEGV; 831 siginfo.si_code = SEGV_MAPERR; 832 siginfo.si_addr = badaddr; 833 fault = FLTBOUNDS; 834 } else { 835 if (nfload(rp, NULL)) 836 goto out; 837 siginfo.si_signo = SIGBUS; 838 siginfo.si_code = BUS_ADRALN; 839 if (rp->r_pc & 3) { /* offending address, if pc */ 840 siginfo.si_addr = (caddr_t)rp->r_pc; 841 } else { 842 if (calc_memaddr(rp, &badaddr) == SIMU_UNALIGN) 843 siginfo.si_addr = badaddr; 844 else 845 siginfo.si_addr = (caddr_t)rp->r_pc; 846 } 847 fault = FLTACCESS; 848 } 849 break; 850 851 case T_PRIV_INSTR + T_USER: /* privileged instruction fault */ 852 if (tudebug) 853 showregs(type, rp, (caddr_t)0, 0); 854 bzero(&siginfo, sizeof (siginfo)); 855 siginfo.si_signo = SIGILL; 856 siginfo.si_code = ILL_PRVOPC; 857 siginfo.si_addr = (caddr_t)rp->r_pc; 858 fault = FLTILL; 859 break; 860 861 case T_UNIMP_INSTR: /* priv illegal instruction fault */ 862 if (fpras_implemented) { 863 /* 864 * Call fpras_chktrap indicating that 865 * we've come from a trap handler and pass 866 * the regs. That function may choose to panic 867 * (in which case it won't return) or it may 868 * determine that a reboot is desired. In the 869 * latter case it must alter pc/npc to skip 870 * the illegal instruction and continue at 871 * a controlled address. 872 */ 873 if (&fpras_chktrap) { 874 if (fpras_chktrap(rp)) 875 goto cleanup; 876 } 877 } 878 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */ 879 instr = *(int *)rp->r_pc; 880 if ((instr & 0xc0000000) == 0x40000000) { 881 long pc; 882 883 rp->r_o7 = (long long)rp->r_pc; 884 pc = rp->r_pc + ((instr & 0x3fffffff) << 2); 885 rp->r_pc = rp->r_npc; 886 rp->r_npc = pc; 887 ill_calls++; 888 goto cleanup; 889 } 890 #endif /* SF_ERRATA_23 || SF_ERRATA_30 */ 891 /* 892 * It's not an fpras failure and it's not SF_ERRATA_23 - die 893 */ 894 addr = (caddr_t)rp->r_pc; 895 (void) die(type, rp, addr, 0); 896 /*NOTREACHED*/ 897 898 case T_UNIMP_INSTR + T_USER: /* illegal instruction fault */ 899 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */ 900 instr = fetch_user_instr((caddr_t)rp->r_pc); 901 if ((instr & 0xc0000000) == 0x40000000) { 902 long pc; 903 904 rp->r_o7 = (long long)rp->r_pc; 905 pc = rp->r_pc + ((instr & 0x3fffffff) << 2); 906 rp->r_pc = rp->r_npc; 907 rp->r_npc = pc; 908 ill_calls++; 909 goto out; 910 } 911 #endif /* SF_ERRATA_23 || SF_ERRATA_30 */ 912 if (tudebug) 913 showregs(type, rp, (caddr_t)0, 0); 914 bzero(&siginfo, sizeof (siginfo)); 915 /* 916 * Try to simulate the instruction. 917 */ 918 switch (simulate_unimp(rp, &badaddr)) { 919 case SIMU_RETRY: 920 goto out; /* regs are already set up */ 921 /*NOTREACHED*/ 922 923 case SIMU_SUCCESS: 924 /* skip the successfully simulated instruction */ 925 rp->r_pc = rp->r_npc; 926 rp->r_npc += 4; 927 goto out; 928 /*NOTREACHED*/ 929 930 case SIMU_FAULT: 931 siginfo.si_signo = SIGSEGV; 932 siginfo.si_code = SEGV_MAPERR; 933 siginfo.si_addr = badaddr; 934 fault = FLTBOUNDS; 935 break; 936 937 case SIMU_DZERO: 938 siginfo.si_signo = SIGFPE; 939 siginfo.si_code = FPE_INTDIV; 940 siginfo.si_addr = (caddr_t)rp->r_pc; 941 fault = FLTIZDIV; 942 break; 943 944 case SIMU_UNALIGN: 945 siginfo.si_signo = SIGBUS; 946 siginfo.si_code = BUS_ADRALN; 947 siginfo.si_addr = badaddr; 948 fault = FLTACCESS; 949 break; 950 951 case SIMU_ILLEGAL: 952 default: 953 siginfo.si_signo = SIGILL; 954 op3 = (instr >> 19) & 0x3F; 955 if ((IS_FLOAT(instr) && (op3 == IOP_V8_STQFA) || 956 (op3 == IOP_V8_STDFA))) 957 siginfo.si_code = ILL_ILLADR; 958 else 959 siginfo.si_code = ILL_ILLOPC; 960 siginfo.si_addr = (caddr_t)rp->r_pc; 961 fault = FLTILL; 962 break; 963 } 964 break; 965 966 case T_UNIMP_LDD + T_USER: 967 case T_UNIMP_STD + T_USER: 968 if (tudebug) 969 showregs(type, rp, (caddr_t)0, 0); 970 switch (simulate_lddstd(rp, &badaddr)) { 971 case SIMU_SUCCESS: 972 /* skip the successfully simulated instruction */ 973 rp->r_pc = rp->r_npc; 974 rp->r_npc += 4; 975 goto out; 976 /*NOTREACHED*/ 977 978 case SIMU_FAULT: 979 if (nfload(rp, NULL)) 980 goto out; 981 siginfo.si_signo = SIGSEGV; 982 siginfo.si_code = SEGV_MAPERR; 983 siginfo.si_addr = badaddr; 984 fault = FLTBOUNDS; 985 break; 986 987 case SIMU_UNALIGN: 988 if (nfload(rp, NULL)) 989 goto out; 990 siginfo.si_signo = SIGBUS; 991 siginfo.si_code = BUS_ADRALN; 992 siginfo.si_addr = badaddr; 993 fault = FLTACCESS; 994 break; 995 996 case SIMU_ILLEGAL: 997 default: 998 siginfo.si_signo = SIGILL; 999 siginfo.si_code = ILL_ILLOPC; 1000 siginfo.si_addr = (caddr_t)rp->r_pc; 1001 fault = FLTILL; 1002 break; 1003 } 1004 break; 1005 1006 case T_UNIMP_LDD: 1007 case T_UNIMP_STD: 1008 if (simulate_lddstd(rp, &badaddr) == SIMU_SUCCESS) { 1009 /* skip the successfully simulated instruction */ 1010 rp->r_pc = rp->r_npc; 1011 rp->r_npc += 4; 1012 goto cleanup; 1013 /*NOTREACHED*/ 1014 } 1015 /* 1016 * A third party driver executed an {LDD,STD,LDDA,STDA} 1017 * that we couldn't simulate. 1018 */ 1019 if (nfload(rp, NULL)) 1020 goto cleanup; 1021 1022 if (curthread->t_lofault) { 1023 if (lodebug) { 1024 showregs(type, rp, addr, 0); 1025 traceback((caddr_t)rp->r_sp); 1026 } 1027 rp->r_g1 = EFAULT; 1028 rp->r_pc = curthread->t_lofault; 1029 rp->r_npc = rp->r_pc + 4; 1030 goto cleanup; 1031 } 1032 (void) die(type, rp, addr, 0); 1033 /*NOTREACHED*/ 1034 1035 case T_IDIV0 + T_USER: /* integer divide by zero */ 1036 case T_DIV0 + T_USER: /* integer divide by zero */ 1037 if (tudebug && tudebugfpe) 1038 showregs(type, rp, (caddr_t)0, 0); 1039 bzero(&siginfo, sizeof (siginfo)); 1040 siginfo.si_signo = SIGFPE; 1041 siginfo.si_code = FPE_INTDIV; 1042 siginfo.si_addr = (caddr_t)rp->r_pc; 1043 fault = FLTIZDIV; 1044 break; 1045 1046 case T_INT_OVERFLOW + T_USER: /* integer overflow */ 1047 if (tudebug && tudebugfpe) 1048 showregs(type, rp, (caddr_t)0, 0); 1049 bzero(&siginfo, sizeof (siginfo)); 1050 siginfo.si_signo = SIGFPE; 1051 siginfo.si_code = FPE_INTOVF; 1052 siginfo.si_addr = (caddr_t)rp->r_pc; 1053 fault = FLTIOVF; 1054 break; 1055 1056 case T_BREAKPOINT + T_USER: /* breakpoint trap (t 1) */ 1057 if (tudebug && tudebugbpt) 1058 showregs(type, rp, (caddr_t)0, 0); 1059 bzero(&siginfo, sizeof (siginfo)); 1060 siginfo.si_signo = SIGTRAP; 1061 siginfo.si_code = TRAP_BRKPT; 1062 siginfo.si_addr = (caddr_t)rp->r_pc; 1063 fault = FLTBPT; 1064 break; 1065 1066 case T_TAG_OVERFLOW + T_USER: /* tag overflow (taddcctv, tsubcctv) */ 1067 if (tudebug) 1068 showregs(type, rp, (caddr_t)0, 0); 1069 bzero(&siginfo, sizeof (siginfo)); 1070 siginfo.si_signo = SIGEMT; 1071 siginfo.si_code = EMT_TAGOVF; 1072 siginfo.si_addr = (caddr_t)rp->r_pc; 1073 fault = FLTACCESS; 1074 break; 1075 1076 case T_FLUSH_PCB + T_USER: /* finish user window overflow */ 1077 case T_FLUSHW + T_USER: /* finish user window flush */ 1078 /* 1079 * This trap is entered from sys_rtt in locore.s when, 1080 * upon return to user is is found that there are user 1081 * windows in pcb_wbuf. This happens because they could 1082 * not be saved on the user stack, either because it 1083 * wasn't resident or because it was misaligned. 1084 */ 1085 { 1086 int error; 1087 caddr_t sp; 1088 1089 error = flush_user_windows_to_stack(&sp); 1090 /* 1091 * Possible errors: 1092 * error copying out 1093 * unaligned stack pointer 1094 * The first is given to us as the return value 1095 * from flush_user_windows_to_stack(). The second 1096 * results in residual windows in the pcb. 1097 */ 1098 if (error != 0) { 1099 /* 1100 * EINTR comes from a signal during copyout; 1101 * we should not post another signal. 1102 */ 1103 if (error != EINTR) { 1104 /* 1105 * Zap the process with a SIGSEGV - process 1106 * may be managing its own stack growth by 1107 * taking SIGSEGVs on a different signal stack. 1108 */ 1109 bzero(&siginfo, sizeof (siginfo)); 1110 siginfo.si_signo = SIGSEGV; 1111 siginfo.si_code = SEGV_MAPERR; 1112 siginfo.si_addr = sp; 1113 fault = FLTBOUNDS; 1114 } 1115 break; 1116 } else if (mpcb->mpcb_wbcnt) { 1117 bzero(&siginfo, sizeof (siginfo)); 1118 siginfo.si_signo = SIGILL; 1119 siginfo.si_code = ILL_BADSTK; 1120 siginfo.si_addr = (caddr_t)rp->r_pc; 1121 fault = FLTILL; 1122 break; 1123 } 1124 } 1125 1126 /* 1127 * T_FLUSHW is used when handling a ta 0x3 -- the old flush 1128 * window trap -- which is implemented by executing the 1129 * flushw instruction. The flushw can trap if any of the 1130 * stack pages are not writable for whatever reason. In this 1131 * case only, we advance the pc to the next instruction so 1132 * that the user thread doesn't needlessly execute the trap 1133 * again. Normally this wouldn't be a problem -- we'll 1134 * usually only end up here if this is the first touch to a 1135 * stack page -- since the second execution won't trap, but 1136 * if there's a watchpoint on the stack page the user thread 1137 * would spin, continuously executing the trap instruction. 1138 */ 1139 if (type == T_FLUSHW + T_USER) { 1140 rp->r_pc = rp->r_npc; 1141 rp->r_npc += 4; 1142 } 1143 goto out; 1144 1145 case T_AST + T_USER: /* profiling or resched pseudo trap */ 1146 if (lwp->lwp_pcb.pcb_flags & CPC_OVERFLOW) { 1147 lwp->lwp_pcb.pcb_flags &= ~CPC_OVERFLOW; 1148 if (kcpc_overflow_ast()) { 1149 /* 1150 * Signal performance counter overflow 1151 */ 1152 if (tudebug) 1153 showregs(type, rp, (caddr_t)0, 0); 1154 bzero(&siginfo, sizeof (siginfo)); 1155 siginfo.si_signo = SIGEMT; 1156 siginfo.si_code = EMT_CPCOVF; 1157 siginfo.si_addr = (caddr_t)rp->r_pc; 1158 /* for trap_cleanup(), below */ 1159 oldpc = rp->r_pc - 4; 1160 fault = FLTCPCOVF; 1161 } 1162 } 1163 1164 /* 1165 * The CPC_OVERFLOW check above may already have populated 1166 * siginfo and set fault, so the checks below must not 1167 * touch these and the functions they call must use 1168 * trapsig() directly. 1169 */ 1170 1171 if (lwp->lwp_pcb.pcb_flags & ASYNC_HWERR) { 1172 lwp->lwp_pcb.pcb_flags &= ~ASYNC_HWERR; 1173 trap_async_hwerr(); 1174 } 1175 1176 if (lwp->lwp_pcb.pcb_flags & ASYNC_BERR) { 1177 lwp->lwp_pcb.pcb_flags &= ~ASYNC_BERR; 1178 trap_async_berr_bto(ASYNC_BERR, rp); 1179 } 1180 1181 if (lwp->lwp_pcb.pcb_flags & ASYNC_BTO) { 1182 lwp->lwp_pcb.pcb_flags &= ~ASYNC_BTO; 1183 trap_async_berr_bto(ASYNC_BTO, rp); 1184 } 1185 1186 break; 1187 } 1188 1189 trap_cleanup(rp, fault, &siginfo, oldpc == rp->r_pc); 1190 1191 out: /* We can't get here from a system trap */ 1192 ASSERT(type & T_USER); 1193 trap_rtt(); 1194 (void) new_mstate(curthread, mstate); 1195 /* Kernel probe */ 1196 TNF_PROBE_1(thread_state, "thread", /* CSTYLED */, 1197 tnf_microstate, state, LMS_USER); 1198 1199 TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit"); 1200 return; 1201 1202 cleanup: /* system traps end up here */ 1203 ASSERT(!(type & T_USER)); 1204 1205 TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit"); 1206 } 1207 1208 void 1209 trap_cleanup( 1210 struct regs *rp, 1211 uint_t fault, 1212 k_siginfo_t *sip, 1213 int restartable) 1214 { 1215 extern void aio_cleanup(); 1216 proc_t *p = ttoproc(curthread); 1217 klwp_id_t lwp = ttolwp(curthread); 1218 1219 if (fault) { 1220 /* 1221 * Remember the fault and fault address 1222 * for real-time (SIGPROF) profiling. 1223 */ 1224 lwp->lwp_lastfault = fault; 1225 lwp->lwp_lastfaddr = sip->si_addr; 1226 1227 DTRACE_PROC2(fault, int, fault, ksiginfo_t *, sip); 1228 1229 /* 1230 * If a debugger has declared this fault to be an 1231 * event of interest, stop the lwp. Otherwise just 1232 * deliver the associated signal. 1233 */ 1234 if (sip->si_signo != SIGKILL && 1235 prismember(&p->p_fltmask, fault) && 1236 stop_on_fault(fault, sip) == 0) 1237 sip->si_signo = 0; 1238 } 1239 1240 if (sip->si_signo) 1241 trapsig(sip, restartable); 1242 1243 if (lwp->lwp_oweupc) 1244 profil_tick(rp->r_pc); 1245 1246 if (curthread->t_astflag | curthread->t_sig_check) { 1247 /* 1248 * Turn off the AST flag before checking all the conditions that 1249 * may have caused an AST. This flag is on whenever a signal or 1250 * unusual condition should be handled after the next trap or 1251 * syscall. 1252 */ 1253 astoff(curthread); 1254 curthread->t_sig_check = 0; 1255 1256 /* 1257 * The following check is legal for the following reasons: 1258 * 1) The thread we are checking, is ourselves, so there is 1259 * no way the proc can go away. 1260 * 2) The only time we need to be protected by the 1261 * lock is if the binding is changed. 1262 * 1263 * Note we will still take the lock and check the binding 1264 * if the condition was true without the lock held. This 1265 * prevents lock contention among threads owned by the 1266 * same proc. 1267 */ 1268 1269 if (curthread->t_proc_flag & TP_CHANGEBIND) { 1270 mutex_enter(&p->p_lock); 1271 if (curthread->t_proc_flag & TP_CHANGEBIND) { 1272 timer_lwpbind(); 1273 curthread->t_proc_flag &= ~TP_CHANGEBIND; 1274 } 1275 mutex_exit(&p->p_lock); 1276 } 1277 1278 /* 1279 * for kaio requests that are on the per-process poll queue, 1280 * aiop->aio_pollq, they're AIO_POLL bit is set, the kernel 1281 * should copyout their result_t to user memory. by copying 1282 * out the result_t, the user can poll on memory waiting 1283 * for the kaio request to complete. 1284 */ 1285 if (p->p_aio) 1286 aio_cleanup(0); 1287 1288 /* 1289 * If this LWP was asked to hold, call holdlwp(), which will 1290 * stop. holdlwps() sets this up and calls pokelwps() which 1291 * sets the AST flag. 1292 * 1293 * Also check TP_EXITLWP, since this is used by fresh new LWPs 1294 * through lwp_rtt(). That flag is set if the lwp_create(2) 1295 * syscall failed after creating the LWP. 1296 */ 1297 if (ISHOLD(p)) 1298 holdlwp(); 1299 1300 /* 1301 * All code that sets signals and makes ISSIG evaluate true must 1302 * set t_astflag afterwards. 1303 */ 1304 if (ISSIG_PENDING(curthread, lwp, p)) { 1305 if (issig(FORREAL)) 1306 psig(); 1307 curthread->t_sig_check = 1; 1308 } 1309 1310 if (curthread->t_rprof != NULL) { 1311 realsigprof(0, 0); 1312 curthread->t_sig_check = 1; 1313 } 1314 } 1315 } 1316 1317 /* 1318 * Called from fp_traps when a floating point trap occurs. 1319 * Note that the T_DATA_EXCEPTION case does not use X_FAULT_TYPE(mmu_fsr), 1320 * because mmu_fsr (now changed to code) is always 0. 1321 * Note that the T_UNIMP_INSTR case does not call simulate_unimp(), 1322 * because the simulator only simulates multiply and divide instructions, 1323 * which would not cause floating point traps in the first place. 1324 * XXX - Supervisor mode floating point traps? 1325 */ 1326 void 1327 fpu_trap(struct regs *rp, caddr_t addr, uint32_t type, uint32_t code) 1328 { 1329 proc_t *p = ttoproc(curthread); 1330 klwp_id_t lwp = ttolwp(curthread); 1331 k_siginfo_t siginfo; 1332 uint_t op3, fault = 0; 1333 int mstate; 1334 char *badaddr; 1335 kfpu_t *fp; 1336 struct fpq *pfpq; 1337 uint32_t inst; 1338 utrap_handler_t *utrapp; 1339 1340 CPU_STATS_ADDQ(CPU, sys, trap, 1); 1341 1342 ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 1343 1344 if (USERMODE(rp->r_tstate)) { 1345 /* 1346 * Set lwp_state before trying to acquire any 1347 * adaptive lock 1348 */ 1349 ASSERT(lwp != NULL); 1350 lwp->lwp_state = LWP_SYS; 1351 /* 1352 * Set up the current cred to use during this trap. u_cred 1353 * no longer exists. t_cred is used instead. 1354 * The current process credential applies to the thread for 1355 * the entire trap. If trapping from the kernel, this 1356 * should already be set up. 1357 */ 1358 if (curthread->t_cred != p->p_cred) { 1359 cred_t *oldcred = curthread->t_cred; 1360 /* 1361 * DTrace accesses t_cred in probe context. t_cred 1362 * must always be either NULL, or point to a valid, 1363 * allocated cred structure. 1364 */ 1365 curthread->t_cred = crgetcred(); 1366 crfree(oldcred); 1367 } 1368 ASSERT(lwp->lwp_regs == rp); 1369 mstate = new_mstate(curthread, LMS_TRAP); 1370 siginfo.si_signo = 0; 1371 type |= T_USER; 1372 } 1373 1374 TRACE_1(TR_FAC_TRAP, TR_C_TRAP_HANDLER_ENTER, 1375 "C_fpu_trap_handler_enter:type %x", type); 1376 1377 if (tudebug && tudebugfpe) 1378 showregs(type, rp, addr, 0); 1379 1380 bzero(&siginfo, sizeof (siginfo)); 1381 siginfo.si_code = code; 1382 siginfo.si_addr = addr; 1383 1384 switch (type) { 1385 1386 case T_FP_EXCEPTION_IEEE + T_USER: /* FPU arithmetic exception */ 1387 /* 1388 * FPU arithmetic exception - fake up a fpq if we 1389 * came here directly from _fp_ieee_exception, 1390 * which is indicated by a zero fpu_qcnt. 1391 */ 1392 fp = lwptofpu(curthread->t_lwp); 1393 utrapp = curthread->t_procp->p_utraps; 1394 if (fp->fpu_qcnt == 0) { 1395 inst = fetch_user_instr((caddr_t)rp->r_pc); 1396 lwp->lwp_state = LWP_SYS; 1397 pfpq = &fp->fpu_q->FQu.fpq; 1398 pfpq->fpq_addr = (uint32_t *)rp->r_pc; 1399 pfpq->fpq_instr = inst; 1400 fp->fpu_qcnt = 1; 1401 fp->fpu_q_entrysize = sizeof (struct fpq); 1402 #ifdef SF_V9_TABLE_28 1403 /* 1404 * Spitfire and blackbird followed the SPARC V9 manual 1405 * paragraph 3 of section 5.1.7.9 FSR_current_exception 1406 * (cexc) for setting fsr.cexc bits on underflow and 1407 * overflow traps when the fsr.tem.inexact bit is set, 1408 * instead of following Table 28. Bugid 1263234. 1409 */ 1410 { 1411 extern int spitfire_bb_fsr_bug; 1412 1413 if (spitfire_bb_fsr_bug && 1414 (fp->fpu_fsr & FSR_TEM_NX)) { 1415 if (((fp->fpu_fsr & FSR_TEM_OF) == 0) && 1416 (fp->fpu_fsr & FSR_CEXC_OF)) { 1417 fp->fpu_fsr &= ~FSR_CEXC_OF; 1418 fp->fpu_fsr |= FSR_CEXC_NX; 1419 _fp_write_pfsr(&fp->fpu_fsr); 1420 siginfo.si_code = FPE_FLTRES; 1421 } 1422 if (((fp->fpu_fsr & FSR_TEM_UF) == 0) && 1423 (fp->fpu_fsr & FSR_CEXC_UF)) { 1424 fp->fpu_fsr &= ~FSR_CEXC_UF; 1425 fp->fpu_fsr |= FSR_CEXC_NX; 1426 _fp_write_pfsr(&fp->fpu_fsr); 1427 siginfo.si_code = FPE_FLTRES; 1428 } 1429 } 1430 } 1431 #endif /* SF_V9_TABLE_28 */ 1432 rp->r_pc = rp->r_npc; 1433 rp->r_npc += 4; 1434 } else if (utrapp && utrapp[UT_FP_EXCEPTION_IEEE_754]) { 1435 /* 1436 * The user had a trap handler installed. Jump to 1437 * the trap handler instead of signalling the process. 1438 */ 1439 rp->r_pc = (long)utrapp[UT_FP_EXCEPTION_IEEE_754]; 1440 rp->r_npc = rp->r_pc + 4; 1441 break; 1442 } 1443 siginfo.si_signo = SIGFPE; 1444 fault = FLTFPE; 1445 break; 1446 1447 case T_DATA_EXCEPTION + T_USER: /* user data access exception */ 1448 siginfo.si_signo = SIGSEGV; 1449 fault = FLTBOUNDS; 1450 break; 1451 1452 case T_LDDF_ALIGN + T_USER: /* 64 bit user lddfa alignment error */ 1453 case T_STDF_ALIGN + T_USER: /* 64 bit user stdfa alignment error */ 1454 alignfaults++; 1455 lwp->lwp_state = LWP_SYS; 1456 if (&vis1_partial_support != NULL) { 1457 bzero(&siginfo, sizeof (siginfo)); 1458 if (vis1_partial_support(rp, 1459 &siginfo, &fault) == 0) 1460 goto out; 1461 } 1462 if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) { 1463 rp->r_pc = rp->r_npc; 1464 rp->r_npc += 4; 1465 goto out; 1466 } 1467 fp = lwptofpu(curthread->t_lwp); 1468 fp->fpu_qcnt = 0; 1469 siginfo.si_signo = SIGSEGV; 1470 siginfo.si_code = SEGV_MAPERR; 1471 siginfo.si_addr = badaddr; 1472 fault = FLTBOUNDS; 1473 break; 1474 1475 case T_ALIGNMENT + T_USER: /* user alignment error */ 1476 /* 1477 * If the user has to do unaligned references 1478 * the ugly stuff gets done here. 1479 * Only handles vanilla loads and stores. 1480 */ 1481 alignfaults++; 1482 if (p->p_fixalignment) { 1483 if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) { 1484 rp->r_pc = rp->r_npc; 1485 rp->r_npc += 4; 1486 goto out; 1487 } 1488 siginfo.si_signo = SIGSEGV; 1489 siginfo.si_code = SEGV_MAPERR; 1490 siginfo.si_addr = badaddr; 1491 fault = FLTBOUNDS; 1492 } else { 1493 siginfo.si_signo = SIGBUS; 1494 siginfo.si_code = BUS_ADRALN; 1495 if (rp->r_pc & 3) { /* offending address, if pc */ 1496 siginfo.si_addr = (caddr_t)rp->r_pc; 1497 } else { 1498 if (calc_memaddr(rp, &badaddr) == SIMU_UNALIGN) 1499 siginfo.si_addr = badaddr; 1500 else 1501 siginfo.si_addr = (caddr_t)rp->r_pc; 1502 } 1503 fault = FLTACCESS; 1504 } 1505 break; 1506 1507 case T_UNIMP_INSTR + T_USER: /* illegal instruction fault */ 1508 siginfo.si_signo = SIGILL; 1509 inst = fetch_user_instr((caddr_t)rp->r_pc); 1510 op3 = (inst >> 19) & 0x3F; 1511 if ((op3 == IOP_V8_STQFA) || (op3 == IOP_V8_STDFA)) 1512 siginfo.si_code = ILL_ILLADR; 1513 else 1514 siginfo.si_code = ILL_ILLTRP; 1515 fault = FLTILL; 1516 break; 1517 1518 default: 1519 (void) die(type, rp, addr, 0); 1520 /*NOTREACHED*/ 1521 } 1522 1523 /* 1524 * We can't get here from a system trap 1525 * Never restart any instruction which got here from an fp trap. 1526 */ 1527 ASSERT(type & T_USER); 1528 1529 trap_cleanup(rp, fault, &siginfo, 0); 1530 out: 1531 trap_rtt(); 1532 (void) new_mstate(curthread, mstate); 1533 } 1534 1535 void 1536 trap_rtt(void) 1537 { 1538 klwp_id_t lwp = ttolwp(curthread); 1539 1540 /* 1541 * Restore register window if a debugger modified it. 1542 * Set up to perform a single-step if a debugger requested it. 1543 */ 1544 if (lwp->lwp_pcb.pcb_xregstat != XREGNONE) 1545 xregrestore(lwp, 0); 1546 1547 /* 1548 * Set state to LWP_USER here so preempt won't give us a kernel 1549 * priority if it occurs after this point. Call CL_TRAPRET() to 1550 * restore the user-level priority. 1551 * 1552 * It is important that no locks (other than spinlocks) be entered 1553 * after this point before returning to user mode (unless lwp_state 1554 * is set back to LWP_SYS). 1555 */ 1556 lwp->lwp_state = LWP_USER; 1557 if (curthread->t_trapret) { 1558 curthread->t_trapret = 0; 1559 thread_lock(curthread); 1560 CL_TRAPRET(curthread); 1561 thread_unlock(curthread); 1562 } 1563 if (CPU->cpu_runrun) 1564 preempt(); 1565 if (lwp->lwp_pcb.pcb_step != STEP_NONE) 1566 prdostep(); 1567 1568 TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit"); 1569 } 1570 1571 #define IS_LDASI(o) \ 1572 ((o) == (uint32_t)0xC0C00000 || (o) == (uint32_t)0xC0800000 || \ 1573 (o) == (uint32_t)0xC1800000) 1574 #define IS_IMM_ASI(i) (((i) & 0x2000) == 0) 1575 #define IS_ASINF(a) (((a) & 0xF6) == 0x82) 1576 #define IS_LDDA(i) (((i) & 0xC1F80000) == 0xC0980000) 1577 1578 static int 1579 nfload(struct regs *rp, int *instrp) 1580 { 1581 uint_t instr, asi, op3, rd; 1582 size_t len; 1583 struct as *as; 1584 caddr_t addr; 1585 FPU_DREGS_TYPE zero; 1586 extern int segnf_create(); 1587 1588 if (USERMODE(rp->r_tstate)) 1589 instr = fetch_user_instr((caddr_t)rp->r_pc); 1590 else 1591 instr = *(int *)rp->r_pc; 1592 1593 if (instrp) 1594 *instrp = instr; 1595 1596 op3 = (uint_t)(instr & 0xC1E00000); 1597 if (!IS_LDASI(op3)) 1598 return (0); 1599 if (IS_IMM_ASI(instr)) 1600 asi = (instr & 0x1FE0) >> 5; 1601 else 1602 asi = (uint_t)((rp->r_tstate >> TSTATE_ASI_SHIFT) & 1603 TSTATE_ASI_MASK); 1604 if (!IS_ASINF(asi)) 1605 return (0); 1606 if (calc_memaddr(rp, &addr) == SIMU_SUCCESS) { 1607 len = 1; 1608 as = USERMODE(rp->r_tstate) ? ttoproc(curthread)->p_as : &kas; 1609 as_rangelock(as); 1610 if (as_gap(as, len, &addr, &len, 0, addr) == 0) 1611 (void) as_map(as, addr, len, segnf_create, NULL); 1612 as_rangeunlock(as); 1613 } 1614 zero = 0; 1615 rd = (instr >> 25) & 0x1f; 1616 if (IS_FLOAT(instr)) { 1617 uint_t dbflg = ((instr >> 19) & 3) == 3; 1618 1619 if (dbflg) { /* clever v9 reg encoding */ 1620 if (rd & 1) 1621 rd = (rd & 0x1e) | 0x20; 1622 rd >>= 1; 1623 } 1624 if (fpu_exists) { 1625 if (!(_fp_read_fprs() & FPRS_FEF)) 1626 fp_enable(); 1627 1628 if (dbflg) 1629 _fp_write_pdreg(&zero, rd); 1630 else 1631 _fp_write_pfreg((uint_t *)&zero, rd); 1632 } else { 1633 kfpu_t *fp = lwptofpu(curthread->t_lwp); 1634 1635 if (!fp->fpu_en) 1636 fp_enable(); 1637 1638 if (dbflg) 1639 fp->fpu_fr.fpu_dregs[rd] = zero; 1640 else 1641 fp->fpu_fr.fpu_regs[rd] = 0; 1642 } 1643 } else { 1644 (void) putreg(&zero, rp, rd, &addr); 1645 if (IS_LDDA(instr)) 1646 (void) putreg(&zero, rp, rd + 1, &addr); 1647 } 1648 rp->r_pc = rp->r_npc; 1649 rp->r_npc += 4; 1650 return (1); 1651 } 1652 1653 kmutex_t atomic_nc_mutex; 1654 1655 /* 1656 * The following couple of routines are for userland drivers which 1657 * do atomics to noncached addresses. This sort of worked on previous 1658 * platforms -- the operation really wasn't atomic, but it didn't generate 1659 * a trap as sun4u systems do. 1660 */ 1661 static int 1662 swap_nc(struct regs *rp, int instr) 1663 { 1664 uint64_t rdata, mdata; 1665 caddr_t addr, badaddr; 1666 uint_t tmp, rd; 1667 1668 (void) flush_user_windows_to_stack(NULL); 1669 rd = (instr >> 25) & 0x1f; 1670 if (calc_memaddr(rp, &addr) != SIMU_SUCCESS) 1671 return (0); 1672 if (getreg(rp, rd, &rdata, &badaddr)) 1673 return (0); 1674 mutex_enter(&atomic_nc_mutex); 1675 if (fuword32(addr, &tmp) == -1) { 1676 mutex_exit(&atomic_nc_mutex); 1677 return (0); 1678 } 1679 mdata = (u_longlong_t)tmp; 1680 if (suword32(addr, (uint32_t)rdata) == -1) { 1681 mutex_exit(&atomic_nc_mutex); 1682 return (0); 1683 } 1684 (void) putreg(&mdata, rp, rd, &badaddr); 1685 mutex_exit(&atomic_nc_mutex); 1686 return (1); 1687 } 1688 1689 static int 1690 ldstub_nc(struct regs *rp, int instr) 1691 { 1692 uint64_t mdata; 1693 caddr_t addr, badaddr; 1694 uint_t rd; 1695 uint8_t tmp; 1696 1697 (void) flush_user_windows_to_stack(NULL); 1698 rd = (instr >> 25) & 0x1f; 1699 if (calc_memaddr(rp, &addr) != SIMU_SUCCESS) 1700 return (0); 1701 mutex_enter(&atomic_nc_mutex); 1702 if (fuword8(addr, &tmp) == -1) { 1703 mutex_exit(&atomic_nc_mutex); 1704 return (0); 1705 } 1706 mdata = (u_longlong_t)tmp; 1707 if (suword8(addr, (uint8_t)0xff) == -1) { 1708 mutex_exit(&atomic_nc_mutex); 1709 return (0); 1710 } 1711 (void) putreg(&mdata, rp, rd, &badaddr); 1712 mutex_exit(&atomic_nc_mutex); 1713 return (1); 1714 } 1715 1716 /* 1717 * This function helps instr_size() determine the operand size. 1718 * It is called for the extended ldda/stda asi's. 1719 */ 1720 int 1721 extended_asi_size(int asi) 1722 { 1723 switch (asi) { 1724 case ASI_PST8_P: 1725 case ASI_PST8_S: 1726 case ASI_PST16_P: 1727 case ASI_PST16_S: 1728 case ASI_PST32_P: 1729 case ASI_PST32_S: 1730 case ASI_PST8_PL: 1731 case ASI_PST8_SL: 1732 case ASI_PST16_PL: 1733 case ASI_PST16_SL: 1734 case ASI_PST32_PL: 1735 case ASI_PST32_SL: 1736 return (8); 1737 case ASI_FL8_P: 1738 case ASI_FL8_S: 1739 case ASI_FL8_PL: 1740 case ASI_FL8_SL: 1741 return (1); 1742 case ASI_FL16_P: 1743 case ASI_FL16_S: 1744 case ASI_FL16_PL: 1745 case ASI_FL16_SL: 1746 return (2); 1747 case ASI_BLK_P: 1748 case ASI_BLK_S: 1749 case ASI_BLK_PL: 1750 case ASI_BLK_SL: 1751 case ASI_BLK_COMMIT_P: 1752 case ASI_BLK_COMMIT_S: 1753 return (64); 1754 } 1755 1756 return (0); 1757 } 1758 1759 /* 1760 * Patch non-zero to disable preemption of threads in the kernel. 1761 */ 1762 int IGNORE_KERNEL_PREEMPTION = 0; /* XXX - delete this someday */ 1763 1764 struct kpreempt_cnts { /* kernel preemption statistics */ 1765 int kpc_idle; /* executing idle thread */ 1766 int kpc_intr; /* executing interrupt thread */ 1767 int kpc_clock; /* executing clock thread */ 1768 int kpc_blocked; /* thread has blocked preemption (t_preempt) */ 1769 int kpc_notonproc; /* thread is surrendering processor */ 1770 int kpc_inswtch; /* thread has ratified scheduling decision */ 1771 int kpc_prilevel; /* processor interrupt level is too high */ 1772 int kpc_apreempt; /* asynchronous preemption */ 1773 int kpc_spreempt; /* synchronous preemption */ 1774 } kpreempt_cnts; 1775 1776 /* 1777 * kernel preemption: forced rescheduling 1778 * preempt the running kernel thread. 1779 */ 1780 void 1781 kpreempt(int asyncspl) 1782 { 1783 if (IGNORE_KERNEL_PREEMPTION) { 1784 aston(CPU->cpu_dispthread); 1785 return; 1786 } 1787 /* 1788 * Check that conditions are right for kernel preemption 1789 */ 1790 do { 1791 if (curthread->t_preempt) { 1792 /* 1793 * either a privileged thread (idle, panic, interrupt) 1794 * or will check when t_preempt is lowered 1795 */ 1796 if (curthread->t_pri < 0) 1797 kpreempt_cnts.kpc_idle++; 1798 else if (curthread->t_flag & T_INTR_THREAD) { 1799 kpreempt_cnts.kpc_intr++; 1800 if (curthread->t_pil == CLOCK_LEVEL) 1801 kpreempt_cnts.kpc_clock++; 1802 } else 1803 kpreempt_cnts.kpc_blocked++; 1804 aston(CPU->cpu_dispthread); 1805 return; 1806 } 1807 if (curthread->t_state != TS_ONPROC || 1808 curthread->t_disp_queue != CPU->cpu_disp) { 1809 /* this thread will be calling swtch() shortly */ 1810 kpreempt_cnts.kpc_notonproc++; 1811 if (CPU->cpu_thread != CPU->cpu_dispthread) { 1812 /* already in swtch(), force another */ 1813 kpreempt_cnts.kpc_inswtch++; 1814 siron(); 1815 } 1816 return; 1817 } 1818 1819 if (((asyncspl != KPREEMPT_SYNC) ? spltoipl(asyncspl) : 1820 getpil()) >= DISP_LEVEL) { 1821 /* 1822 * We can't preempt this thread if it is at 1823 * a PIL >= DISP_LEVEL since it may be holding 1824 * a spin lock (like sched_lock). 1825 */ 1826 siron(); /* check back later */ 1827 kpreempt_cnts.kpc_prilevel++; 1828 return; 1829 } 1830 1831 /* 1832 * block preemption so we don't have multiple preemptions 1833 * pending on the interrupt stack 1834 */ 1835 curthread->t_preempt++; 1836 if (asyncspl != KPREEMPT_SYNC) { 1837 splx(asyncspl); 1838 kpreempt_cnts.kpc_apreempt++; 1839 } else 1840 kpreempt_cnts.kpc_spreempt++; 1841 1842 preempt(); 1843 curthread->t_preempt--; 1844 } while (CPU->cpu_kprunrun); 1845 } 1846 1847 static enum seg_rw 1848 get_accesstype(struct regs *rp) 1849 { 1850 uint32_t instr; 1851 1852 if (USERMODE(rp->r_tstate)) 1853 instr = fetch_user_instr((caddr_t)rp->r_pc); 1854 else 1855 instr = *(uint32_t *)rp->r_pc; 1856 1857 if (IS_FLUSH(instr)) 1858 return (S_OTHER); 1859 1860 if (IS_STORE(instr)) 1861 return (S_WRITE); 1862 else 1863 return (S_READ); 1864 } 1865