1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/mmu.h> 29 #include <sys/systm.h> 30 #include <sys/trap.h> 31 #include <sys/machtrap.h> 32 #include <sys/vtrace.h> 33 #include <sys/prsystm.h> 34 #include <sys/archsystm.h> 35 #include <sys/machsystm.h> 36 #include <sys/fpu/fpusystm.h> 37 #include <sys/tnf.h> 38 #include <sys/tnf_probe.h> 39 #include <sys/simulate.h> 40 #include <sys/ftrace.h> 41 #include <sys/ontrap.h> 42 #include <sys/kcpc.h> 43 #include <sys/kobj.h> 44 #include <sys/procfs.h> 45 #include <sys/sun4asi.h> 46 #include <sys/sdt.h> 47 #include <sys/fpras.h> 48 49 #ifdef TRAPTRACE 50 #include <sys/traptrace.h> 51 #endif 52 53 int tudebug = 0; 54 static int tudebugbpt = 0; 55 static int tudebugfpe = 0; 56 57 static int alignfaults = 0; 58 59 #if defined(TRAPDEBUG) || defined(lint) 60 static int lodebug = 0; 61 #else 62 #define lodebug 0 63 #endif /* defined(TRAPDEBUG) || defined(lint) */ 64 65 66 int vis1_partial_support(struct regs *rp, k_siginfo_t *siginfo, uint_t *fault); 67 #pragma weak vis1_partial_support 68 69 void showregs(unsigned, struct regs *, caddr_t, uint_t); 70 #pragma weak showregs 71 72 void trap_async_hwerr(void); 73 #pragma weak trap_async_hwerr 74 75 void trap_async_berr_bto(int, struct regs *); 76 #pragma weak trap_async_berr_bto 77 78 static enum seg_rw get_accesstype(struct regs *); 79 static int nfload(struct regs *, int *); 80 static int swap_nc(struct regs *, int); 81 static int ldstub_nc(struct regs *, int); 82 void trap_cleanup(struct regs *, uint_t, k_siginfo_t *, int); 83 void trap_rtt(void); 84 85 static int 86 die(unsigned type, struct regs *rp, caddr_t addr, uint_t mmu_fsr) 87 { 88 struct trap_info ti; 89 90 #ifdef TRAPTRACE 91 TRAPTRACE_FREEZE; 92 #endif 93 94 ti.trap_regs = rp; 95 ti.trap_type = type; 96 ti.trap_addr = addr; 97 ti.trap_mmu_fsr = mmu_fsr; 98 99 curthread->t_panic_trap = &ti; 100 101 if (type == T_DATA_MMU_MISS && addr < (caddr_t)KERNELBASE) { 102 panic("BAD TRAP: type=%x rp=%p addr=%p mmu_fsr=%x " 103 "occurred in module \"%s\" due to %s", 104 type, (void *)rp, (void *)addr, mmu_fsr, 105 mod_containing_pc((caddr_t)rp->r_pc), 106 addr < (caddr_t)PAGESIZE ? 107 "a NULL pointer dereference" : 108 "an illegal access to a user address"); 109 } else { 110 panic("BAD TRAP: type=%x rp=%p addr=%p mmu_fsr=%x", 111 type, (void *)rp, (void *)addr, mmu_fsr); 112 } 113 114 return (0); /* avoid optimization of restore in call's delay slot */ 115 } 116 117 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */ 118 int ill_calls; 119 #endif 120 121 /* 122 * Currently, the only PREFETCH/PREFETCHA instructions which cause traps 123 * are the "strong" prefetches (fcn=20-23). But we check for all flavors of 124 * PREFETCH, in case some future variant also causes a DATA_MMU_MISS. 125 */ 126 #define IS_PREFETCH(i) (((i) & 0xc1780000) == 0xc1680000) 127 128 #define IS_FLUSH(i) (((i) & 0xc1f80000) == 0x81d80000) 129 #define IS_SWAP(i) (((i) & 0xc1f80000) == 0xc0780000) 130 #define IS_LDSTUB(i) (((i) & 0xc1f80000) == 0xc0680000) 131 #define IS_FLOAT(i) (((i) & 0x1000000) != 0) 132 #define IS_STORE(i) (((i) >> 21) & 1) 133 134 /* 135 * Called from the trap handler when a processor trap occurs. 136 */ 137 /*VARARGS2*/ 138 void 139 trap(struct regs *rp, caddr_t addr, uint32_t type, uint32_t mmu_fsr) 140 { 141 proc_t *p = ttoproc(curthread); 142 klwp_id_t lwp = ttolwp(curthread); 143 struct machpcb *mpcb = NULL; 144 k_siginfo_t siginfo; 145 uint_t op3, fault = 0; 146 int stepped = 0; 147 greg_t oldpc; 148 int mstate; 149 char *badaddr; 150 faultcode_t res; 151 enum fault_type fault_type; 152 enum seg_rw rw; 153 uintptr_t lofault; 154 int instr; 155 int iskernel; 156 int watchcode; 157 int watchpage; 158 extern faultcode_t pagefault(caddr_t, enum fault_type, 159 enum seg_rw, int); 160 161 CPU_STATS_ADDQ(CPU, sys, trap, 1); 162 163 #ifdef SF_ERRATA_23 /* call causes illegal-insn */ 164 ASSERT((curthread->t_schedflag & TS_DONT_SWAP) || 165 (type == T_UNIMP_INSTR)); 166 #else 167 ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 168 #endif /* SF_ERRATA_23 */ 169 170 if (USERMODE(rp->r_tstate) || (type & T_USER)) { 171 /* 172 * Set lwp_state before trying to acquire any 173 * adaptive lock 174 */ 175 ASSERT(lwp != NULL); 176 lwp->lwp_state = LWP_SYS; 177 /* 178 * Set up the current cred to use during this trap. u_cred 179 * no longer exists. t_cred is used instead. 180 * The current process credential applies to the thread for 181 * the entire trap. If trapping from the kernel, this 182 * should already be set up. 183 */ 184 if (curthread->t_cred != p->p_cred) { 185 cred_t *oldcred = curthread->t_cred; 186 /* 187 * DTrace accesses t_cred in probe context. t_cred 188 * must always be either NULL, or point to a valid, 189 * allocated cred structure. 190 */ 191 curthread->t_cred = crgetcred(); 192 crfree(oldcred); 193 } 194 type |= T_USER; 195 ASSERT((type == (T_SYS_RTT_PAGE | T_USER)) || 196 (type == (T_SYS_RTT_ALIGN | T_USER)) || 197 lwp->lwp_regs == rp); 198 mpcb = lwptompcb(lwp); 199 switch (type) { 200 case T_WIN_OVERFLOW + T_USER: 201 case T_WIN_UNDERFLOW + T_USER: 202 case T_SYS_RTT_PAGE + T_USER: 203 case T_DATA_MMU_MISS + T_USER: 204 mstate = LMS_DFAULT; 205 break; 206 case T_INSTR_MMU_MISS + T_USER: 207 mstate = LMS_TFAULT; 208 break; 209 default: 210 mstate = LMS_TRAP; 211 break; 212 } 213 /* Kernel probe */ 214 TNF_PROBE_1(thread_state, "thread", /* CSTYLED */, 215 tnf_microstate, state, (char)mstate); 216 mstate = new_mstate(curthread, mstate); 217 siginfo.si_signo = 0; 218 stepped = 219 lwp->lwp_pcb.pcb_step != STEP_NONE && 220 ((oldpc = rp->r_pc), prundostep()) && 221 mmu_btop((uintptr_t)addr) == mmu_btop((uintptr_t)oldpc); 222 /* this assignment must not precede call to prundostep() */ 223 oldpc = rp->r_pc; 224 } 225 226 TRACE_1(TR_FAC_TRAP, TR_C_TRAP_HANDLER_ENTER, 227 "C_trap_handler_enter:type %x", type); 228 229 #ifdef F_DEFERRED 230 /* 231 * Take any pending floating point exceptions now. 232 * If the floating point unit has an exception to handle, 233 * just return to user-level to let the signal handler run. 234 * The instruction that got us to trap() will be reexecuted on 235 * return from the signal handler and we will trap to here again. 236 * This is necessary to disambiguate simultaneous traps which 237 * happen when a floating-point exception is pending and a 238 * machine fault is incurred. 239 */ 240 if (type & USER) { 241 /* 242 * FP_TRAPPED is set only by sendsig() when it copies 243 * out the floating-point queue for the signal handler. 244 * It is set there so we can test it here and in syscall(). 245 */ 246 mpcb->mpcb_flags &= ~FP_TRAPPED; 247 syncfpu(); 248 if (mpcb->mpcb_flags & FP_TRAPPED) { 249 /* 250 * trap() has have been called recursively and may 251 * have stopped the process, so do single step 252 * support for /proc. 253 */ 254 mpcb->mpcb_flags &= ~FP_TRAPPED; 255 goto out; 256 } 257 } 258 #endif 259 switch (type) { 260 case T_DATA_MMU_MISS: 261 case T_INSTR_MMU_MISS + T_USER: 262 case T_DATA_MMU_MISS + T_USER: 263 case T_DATA_PROT + T_USER: 264 case T_AST + T_USER: 265 case T_SYS_RTT_PAGE + T_USER: 266 case T_FLUSH_PCB + T_USER: 267 case T_FLUSHW + T_USER: 268 break; 269 270 default: 271 FTRACE_3("trap(): type=0x%lx, regs=0x%lx, addr=0x%lx", 272 (ulong_t)type, (ulong_t)rp, (ulong_t)addr); 273 break; 274 } 275 276 switch (type) { 277 278 default: 279 /* 280 * Check for user software trap. 281 */ 282 if (type & T_USER) { 283 if (tudebug) 284 showregs(type, rp, (caddr_t)0, 0); 285 if ((type & ~T_USER) >= T_SOFTWARE_TRAP) { 286 bzero(&siginfo, sizeof (siginfo)); 287 siginfo.si_signo = SIGILL; 288 siginfo.si_code = ILL_ILLTRP; 289 siginfo.si_addr = (caddr_t)rp->r_pc; 290 siginfo.si_trapno = type &~ T_USER; 291 fault = FLTILL; 292 break; 293 } 294 } 295 addr = (caddr_t)rp->r_pc; 296 (void) die(type, rp, addr, 0); 297 /*NOTREACHED*/ 298 299 case T_ALIGNMENT: /* supv alignment error */ 300 if (nfload(rp, NULL)) 301 goto cleanup; 302 303 if (curthread->t_lofault) { 304 if (lodebug) { 305 showregs(type, rp, addr, 0); 306 traceback((caddr_t)rp->r_sp); 307 } 308 rp->r_g1 = EFAULT; 309 rp->r_pc = curthread->t_lofault; 310 rp->r_npc = rp->r_pc + 4; 311 goto cleanup; 312 } 313 (void) die(type, rp, addr, 0); 314 /*NOTREACHED*/ 315 316 case T_INSTR_EXCEPTION: /* sys instruction access exception */ 317 addr = (caddr_t)rp->r_pc; 318 (void) die(type, rp, addr, mmu_fsr); 319 /*NOTREACHED*/ 320 321 case T_INSTR_MMU_MISS: /* sys instruction mmu miss */ 322 addr = (caddr_t)rp->r_pc; 323 (void) die(type, rp, addr, 0); 324 /*NOTREACHED*/ 325 326 case T_DATA_EXCEPTION: /* system data access exception */ 327 switch (X_FAULT_TYPE(mmu_fsr)) { 328 case FT_RANGE: 329 /* 330 * This happens when we attempt to dereference an 331 * address in the address hole. If t_ontrap is set, 332 * then break and fall through to T_DATA_MMU_MISS / 333 * T_DATA_PROT case below. If lofault is set, then 334 * honour it (perhaps the user gave us a bogus 335 * address in the hole to copyin from or copyout to?) 336 */ 337 338 if (curthread->t_ontrap != NULL) 339 break; 340 341 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 342 if (curthread->t_lofault) { 343 if (lodebug) { 344 showregs(type, rp, addr, 0); 345 traceback((caddr_t)rp->r_sp); 346 } 347 rp->r_g1 = EFAULT; 348 rp->r_pc = curthread->t_lofault; 349 rp->r_npc = rp->r_pc + 4; 350 goto cleanup; 351 } 352 (void) die(type, rp, addr, mmu_fsr); 353 /*NOTREACHED*/ 354 355 case FT_PRIV: 356 /* 357 * This can happen if we access ASI_USER from a kernel 358 * thread. To support pxfs, we need to honor lofault if 359 * we're doing a copyin/copyout from a kernel thread. 360 */ 361 362 if (nfload(rp, NULL)) 363 goto cleanup; 364 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 365 if (curthread->t_lofault) { 366 if (lodebug) { 367 showregs(type, rp, addr, 0); 368 traceback((caddr_t)rp->r_sp); 369 } 370 rp->r_g1 = EFAULT; 371 rp->r_pc = curthread->t_lofault; 372 rp->r_npc = rp->r_pc + 4; 373 goto cleanup; 374 } 375 (void) die(type, rp, addr, mmu_fsr); 376 /*NOTREACHED*/ 377 378 default: 379 if (nfload(rp, NULL)) 380 goto cleanup; 381 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 382 (void) die(type, rp, addr, mmu_fsr); 383 /*NOTREACHED*/ 384 385 case FT_NFO: 386 break; 387 } 388 /* fall into ... */ 389 390 case T_DATA_MMU_MISS: /* system data mmu miss */ 391 case T_DATA_PROT: /* system data protection fault */ 392 if (nfload(rp, &instr)) 393 goto cleanup; 394 395 /* 396 * If we're under on_trap() protection (see <sys/ontrap.h>), 397 * set ot_trap and return from the trap to the trampoline. 398 */ 399 if (curthread->t_ontrap != NULL) { 400 on_trap_data_t *otp = curthread->t_ontrap; 401 402 TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, 403 "C_trap_handler_exit"); 404 TRACE_0(TR_FAC_TRAP, TR_TRAP_END, "trap_end"); 405 406 if (otp->ot_prot & OT_DATA_ACCESS) { 407 otp->ot_trap |= OT_DATA_ACCESS; 408 rp->r_pc = otp->ot_trampoline; 409 rp->r_npc = rp->r_pc + 4; 410 goto cleanup; 411 } 412 } 413 lofault = curthread->t_lofault; 414 curthread->t_lofault = 0; 415 416 mstate = new_mstate(curthread, LMS_KFAULT); 417 418 switch (type) { 419 case T_DATA_PROT: 420 fault_type = F_PROT; 421 rw = S_WRITE; 422 break; 423 case T_INSTR_MMU_MISS: 424 fault_type = F_INVAL; 425 rw = S_EXEC; 426 break; 427 case T_DATA_MMU_MISS: 428 case T_DATA_EXCEPTION: 429 /* 430 * The hardware doesn't update the sfsr on mmu 431 * misses so it is not easy to find out whether 432 * the access was a read or a write so we need 433 * to decode the actual instruction. 434 */ 435 fault_type = F_INVAL; 436 rw = get_accesstype(rp); 437 break; 438 default: 439 cmn_err(CE_PANIC, "trap: unknown type %x", type); 440 break; 441 } 442 /* 443 * We determine if access was done to kernel or user 444 * address space. The addr passed into trap is really the 445 * tag access register. 446 */ 447 iskernel = (((uintptr_t)addr & TAGACC_CTX_MASK) == KCONTEXT); 448 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 449 450 res = pagefault(addr, fault_type, rw, iskernel); 451 if (!iskernel && res == FC_NOMAP && 452 addr < p->p_usrstack && grow(addr)) 453 res = 0; 454 455 (void) new_mstate(curthread, mstate); 456 457 /* 458 * Restore lofault. If we resolved the fault, exit. 459 * If we didn't and lofault wasn't set, die. 460 */ 461 curthread->t_lofault = lofault; 462 463 if (res == 0) 464 goto cleanup; 465 466 if (IS_PREFETCH(instr)) { 467 /* skip prefetch instructions in kernel-land */ 468 rp->r_pc = rp->r_npc; 469 rp->r_npc += 4; 470 goto cleanup; 471 } 472 473 if ((lofault == 0 || lodebug) && 474 (calc_memaddr(rp, &badaddr) == SIMU_SUCCESS)) 475 addr = badaddr; 476 if (lofault == 0) 477 (void) die(type, rp, addr, 0); 478 /* 479 * Cannot resolve fault. Return to lofault. 480 */ 481 if (lodebug) { 482 showregs(type, rp, addr, 0); 483 traceback((caddr_t)rp->r_sp); 484 } 485 if (FC_CODE(res) == FC_OBJERR) 486 res = FC_ERRNO(res); 487 else 488 res = EFAULT; 489 rp->r_g1 = res; 490 rp->r_pc = curthread->t_lofault; 491 rp->r_npc = curthread->t_lofault + 4; 492 goto cleanup; 493 494 case T_INSTR_EXCEPTION + T_USER: /* user insn access exception */ 495 bzero(&siginfo, sizeof (siginfo)); 496 siginfo.si_addr = (caddr_t)rp->r_pc; 497 siginfo.si_signo = SIGSEGV; 498 siginfo.si_code = X_FAULT_TYPE(mmu_fsr) == FT_PRIV ? 499 SEGV_ACCERR : SEGV_MAPERR; 500 fault = FLTBOUNDS; 501 break; 502 503 case T_WIN_OVERFLOW + T_USER: /* window overflow in ??? */ 504 case T_WIN_UNDERFLOW + T_USER: /* window underflow in ??? */ 505 case T_SYS_RTT_PAGE + T_USER: /* window underflow in user_rtt */ 506 case T_INSTR_MMU_MISS + T_USER: /* user instruction mmu miss */ 507 case T_DATA_MMU_MISS + T_USER: /* user data mmu miss */ 508 case T_DATA_PROT + T_USER: /* user data protection fault */ 509 switch (type) { 510 case T_INSTR_MMU_MISS + T_USER: 511 addr = (caddr_t)rp->r_pc; 512 fault_type = F_INVAL; 513 rw = S_EXEC; 514 break; 515 516 case T_DATA_MMU_MISS + T_USER: 517 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 518 fault_type = F_INVAL; 519 /* 520 * The hardware doesn't update the sfsr on mmu misses 521 * so it is not easy to find out whether the access 522 * was a read or a write so we need to decode the 523 * actual instruction. XXX BUGLY HW 524 */ 525 rw = get_accesstype(rp); 526 break; 527 528 case T_DATA_PROT + T_USER: 529 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 530 fault_type = F_PROT; 531 rw = S_WRITE; 532 break; 533 534 case T_WIN_OVERFLOW + T_USER: 535 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 536 fault_type = F_INVAL; 537 rw = S_WRITE; 538 break; 539 540 case T_WIN_UNDERFLOW + T_USER: 541 case T_SYS_RTT_PAGE + T_USER: 542 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 543 fault_type = F_INVAL; 544 rw = S_READ; 545 break; 546 547 default: 548 cmn_err(CE_PANIC, "trap: unknown type %x", type); 549 break; 550 } 551 552 /* 553 * If we are single stepping do not call pagefault 554 */ 555 if (stepped) { 556 res = FC_NOMAP; 557 } else { 558 caddr_t vaddr = addr; 559 size_t sz; 560 int ta; 561 562 ASSERT(!(curthread->t_flag & T_WATCHPT)); 563 watchpage = (pr_watch_active(p) && 564 type != T_WIN_OVERFLOW + T_USER && 565 type != T_WIN_UNDERFLOW + T_USER && 566 type != T_SYS_RTT_PAGE + T_USER && 567 pr_is_watchpage(addr, rw)); 568 569 if (!watchpage || 570 (sz = instr_size(rp, &vaddr, rw)) <= 0) 571 /* EMPTY */; 572 else if ((watchcode = pr_is_watchpoint(&vaddr, &ta, 573 sz, NULL, rw)) != 0) { 574 if (ta) { 575 do_watch_step(vaddr, sz, rw, 576 watchcode, rp->r_pc); 577 fault_type = F_INVAL; 578 } else { 579 bzero(&siginfo, sizeof (siginfo)); 580 siginfo.si_signo = SIGTRAP; 581 siginfo.si_code = watchcode; 582 siginfo.si_addr = vaddr; 583 siginfo.si_trapafter = 0; 584 siginfo.si_pc = (caddr_t)rp->r_pc; 585 fault = FLTWATCH; 586 break; 587 } 588 } else { 589 if (rw != S_EXEC && 590 pr_watch_emul(rp, vaddr, rw)) 591 goto out; 592 do_watch_step(vaddr, sz, rw, 0, 0); 593 fault_type = F_INVAL; 594 } 595 596 if (pr_watch_active(p) && 597 (type == T_WIN_OVERFLOW + T_USER || 598 type == T_WIN_UNDERFLOW + T_USER || 599 type == T_SYS_RTT_PAGE + T_USER)) { 600 int dotwo = (type == T_WIN_UNDERFLOW + T_USER); 601 if (copy_return_window(dotwo)) 602 goto out; 603 fault_type = F_INVAL; 604 } 605 606 res = pagefault(addr, fault_type, rw, 0); 607 608 /* 609 * If pagefault succeed, ok. 610 * Otherwise grow the stack automatically. 611 */ 612 if (res == 0 || 613 (res == FC_NOMAP && 614 type != T_INSTR_MMU_MISS + T_USER && 615 addr < p->p_usrstack && 616 grow(addr))) { 617 int ismem = prismember(&p->p_fltmask, FLTPAGE); 618 619 /* 620 * instr_size() is used to get the exact 621 * address of the fault, instead of the 622 * page of the fault. Unfortunately it is 623 * very slow, and this is an important 624 * code path. Don't call it unless 625 * correctness is needed. ie. if FLTPAGE 626 * is set, or we're profiling. 627 */ 628 629 if (curthread->t_rprof != NULL || ismem) 630 (void) instr_size(rp, &addr, rw); 631 632 lwp->lwp_lastfault = FLTPAGE; 633 lwp->lwp_lastfaddr = addr; 634 635 if (ismem) { 636 bzero(&siginfo, sizeof (siginfo)); 637 siginfo.si_addr = addr; 638 (void) stop_on_fault(FLTPAGE, &siginfo); 639 } 640 goto out; 641 } 642 643 if (type != (T_INSTR_MMU_MISS + T_USER)) { 644 /* 645 * check for non-faulting loads, also 646 * fetch the instruction to check for 647 * flush 648 */ 649 if (nfload(rp, &instr)) 650 goto out; 651 652 /* skip userland prefetch instructions */ 653 if (IS_PREFETCH(instr)) { 654 rp->r_pc = rp->r_npc; 655 rp->r_npc += 4; 656 goto out; 657 /*NOTREACHED*/ 658 } 659 660 /* 661 * check if the instruction was a 662 * flush. ABI allows users to specify 663 * an illegal address on the flush 664 * instruction so we simply return in 665 * this case. 666 * 667 * NB: the hardware should set a bit 668 * indicating this trap was caused by 669 * a flush instruction. Instruction 670 * decoding is bugly! 671 */ 672 if (IS_FLUSH(instr)) { 673 /* skip the flush instruction */ 674 rp->r_pc = rp->r_npc; 675 rp->r_npc += 4; 676 goto out; 677 /*NOTREACHED*/ 678 } 679 } else if (res == FC_PROT) { 680 report_stack_exec(p, addr); 681 } 682 683 if (tudebug) 684 showregs(type, rp, addr, 0); 685 } 686 687 /* 688 * In the case where both pagefault and grow fail, 689 * set the code to the value provided by pagefault. 690 */ 691 (void) instr_size(rp, &addr, rw); 692 bzero(&siginfo, sizeof (siginfo)); 693 siginfo.si_addr = addr; 694 if (FC_CODE(res) == FC_OBJERR) { 695 siginfo.si_errno = FC_ERRNO(res); 696 if (siginfo.si_errno != EINTR) { 697 siginfo.si_signo = SIGBUS; 698 siginfo.si_code = BUS_OBJERR; 699 fault = FLTACCESS; 700 } 701 } else { /* FC_NOMAP || FC_PROT */ 702 siginfo.si_signo = SIGSEGV; 703 siginfo.si_code = (res == FC_NOMAP) ? 704 SEGV_MAPERR : SEGV_ACCERR; 705 fault = FLTBOUNDS; 706 } 707 /* 708 * If this is the culmination of a single-step, 709 * reset the addr, code, signal and fault to 710 * indicate a hardware trace trap. 711 */ 712 if (stepped) { 713 pcb_t *pcb = &lwp->lwp_pcb; 714 715 siginfo.si_signo = 0; 716 fault = 0; 717 if (pcb->pcb_step == STEP_WASACTIVE) { 718 pcb->pcb_step = STEP_NONE; 719 pcb->pcb_tracepc = NULL; 720 oldpc = rp->r_pc - 4; 721 } 722 /* 723 * If both NORMAL_STEP and WATCH_STEP are in 724 * effect, give precedence to NORMAL_STEP. 725 * One or the other must be set at this point. 726 */ 727 ASSERT(pcb->pcb_flags & (NORMAL_STEP|WATCH_STEP)); 728 if (pcb->pcb_flags & NORMAL_STEP) { 729 siginfo.si_signo = SIGTRAP; 730 siginfo.si_code = TRAP_TRACE; 731 siginfo.si_addr = (caddr_t)rp->r_pc; 732 fault = FLTTRACE; 733 if (pcb->pcb_flags & WATCH_STEP) 734 (void) undo_watch_step(NULL); 735 } else { 736 fault = undo_watch_step(&siginfo); 737 } 738 pcb->pcb_flags &= ~(NORMAL_STEP|WATCH_STEP); 739 } 740 break; 741 742 case T_DATA_EXCEPTION + T_USER: /* user data access exception */ 743 744 if (&vis1_partial_support != NULL) { 745 bzero(&siginfo, sizeof (siginfo)); 746 if (vis1_partial_support(rp, 747 &siginfo, &fault) == 0) 748 goto out; 749 } 750 751 if (nfload(rp, &instr)) 752 goto out; 753 if (IS_FLUSH(instr)) { 754 /* skip the flush instruction */ 755 rp->r_pc = rp->r_npc; 756 rp->r_npc += 4; 757 goto out; 758 /*NOTREACHED*/ 759 } 760 bzero(&siginfo, sizeof (siginfo)); 761 siginfo.si_addr = addr; 762 switch (X_FAULT_TYPE(mmu_fsr)) { 763 case FT_ATOMIC_NC: 764 if ((IS_SWAP(instr) && swap_nc(rp, instr)) || 765 (IS_LDSTUB(instr) && ldstub_nc(rp, instr))) { 766 /* skip the atomic */ 767 rp->r_pc = rp->r_npc; 768 rp->r_npc += 4; 769 goto out; 770 } 771 /* fall into ... */ 772 case FT_PRIV: 773 siginfo.si_signo = SIGSEGV; 774 siginfo.si_code = SEGV_ACCERR; 775 fault = FLTBOUNDS; 776 break; 777 case FT_SPEC_LD: 778 case FT_ILL_ALT: 779 siginfo.si_signo = SIGILL; 780 siginfo.si_code = ILL_ILLADR; 781 fault = FLTILL; 782 break; 783 default: 784 siginfo.si_signo = SIGSEGV; 785 siginfo.si_code = SEGV_MAPERR; 786 fault = FLTBOUNDS; 787 break; 788 } 789 break; 790 791 case T_SYS_RTT_ALIGN + T_USER: /* user alignment error */ 792 case T_ALIGNMENT + T_USER: /* user alignment error */ 793 if (tudebug) 794 showregs(type, rp, addr, 0); 795 /* 796 * If the user has to do unaligned references 797 * the ugly stuff gets done here. 798 */ 799 alignfaults++; 800 if (&vis1_partial_support != NULL) { 801 bzero(&siginfo, sizeof (siginfo)); 802 if (vis1_partial_support(rp, 803 &siginfo, &fault) == 0) 804 goto out; 805 } 806 807 if (nfload(rp, NULL)) 808 goto out; 809 bzero(&siginfo, sizeof (siginfo)); 810 if (type == T_SYS_RTT_ALIGN + T_USER) { 811 /* 812 * Can't do unaligned stack access 813 */ 814 siginfo.si_signo = SIGBUS; 815 siginfo.si_code = BUS_ADRALN; 816 siginfo.si_addr = addr; 817 fault = FLTACCESS; 818 break; 819 } 820 if (p->p_fixalignment) { 821 if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) { 822 rp->r_pc = rp->r_npc; 823 rp->r_npc += 4; 824 goto out; 825 } 826 siginfo.si_signo = SIGSEGV; 827 siginfo.si_code = SEGV_MAPERR; 828 siginfo.si_addr = badaddr; 829 fault = FLTBOUNDS; 830 } else { 831 siginfo.si_signo = SIGBUS; 832 siginfo.si_code = BUS_ADRALN; 833 if (rp->r_pc & 3) { /* offending address, if pc */ 834 siginfo.si_addr = (caddr_t)rp->r_pc; 835 } else { 836 if (calc_memaddr(rp, &badaddr) == SIMU_UNALIGN) 837 siginfo.si_addr = badaddr; 838 else 839 siginfo.si_addr = (caddr_t)rp->r_pc; 840 } 841 fault = FLTACCESS; 842 } 843 break; 844 845 case T_PRIV_INSTR + T_USER: /* privileged instruction fault */ 846 if (tudebug) 847 showregs(type, rp, (caddr_t)0, 0); 848 bzero(&siginfo, sizeof (siginfo)); 849 siginfo.si_signo = SIGILL; 850 siginfo.si_code = ILL_PRVOPC; 851 siginfo.si_addr = (caddr_t)rp->r_pc; 852 fault = FLTILL; 853 break; 854 855 case T_UNIMP_INSTR: /* priv illegal instruction fault */ 856 if (fpras_implemented) { 857 /* 858 * Call fpras_chktrap indicating that 859 * we've come from a trap handler and pass 860 * the regs. That function may choose to panic 861 * (in which case it won't return) or it may 862 * determine that a reboot is desired. In the 863 * latter case it must alter pc/npc to skip 864 * the illegal instruction and continue at 865 * a controlled address. 866 */ 867 if (&fpras_chktrap) { 868 if (fpras_chktrap(rp)) 869 goto cleanup; 870 } 871 } 872 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */ 873 instr = *(int *)rp->r_pc; 874 if ((instr & 0xc0000000) == 0x40000000) { 875 long pc; 876 877 rp->r_o7 = (long long)rp->r_pc; 878 pc = rp->r_pc + ((instr & 0x3fffffff) << 2); 879 rp->r_pc = rp->r_npc; 880 rp->r_npc = pc; 881 ill_calls++; 882 goto cleanup; 883 } 884 #endif /* SF_ERRATA_23 || SF_ERRATA_30 */ 885 /* 886 * It's not an fpras failure and it's not SF_ERRATA_23 - die 887 */ 888 addr = (caddr_t)rp->r_pc; 889 (void) die(type, rp, addr, 0); 890 /*NOTREACHED*/ 891 892 case T_UNIMP_INSTR + T_USER: /* illegal instruction fault */ 893 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */ 894 instr = fetch_user_instr((caddr_t)rp->r_pc); 895 if ((instr & 0xc0000000) == 0x40000000) { 896 long pc; 897 898 rp->r_o7 = (long long)rp->r_pc; 899 pc = rp->r_pc + ((instr & 0x3fffffff) << 2); 900 rp->r_pc = rp->r_npc; 901 rp->r_npc = pc; 902 ill_calls++; 903 goto out; 904 } 905 #endif /* SF_ERRATA_23 || SF_ERRATA_30 */ 906 if (tudebug) 907 showregs(type, rp, (caddr_t)0, 0); 908 bzero(&siginfo, sizeof (siginfo)); 909 /* 910 * Try to simulate the instruction. 911 */ 912 switch (simulate_unimp(rp, &badaddr)) { 913 case SIMU_RETRY: 914 goto out; /* regs are already set up */ 915 /*NOTREACHED*/ 916 917 case SIMU_SUCCESS: 918 /* skip the successfully simulated instruction */ 919 rp->r_pc = rp->r_npc; 920 rp->r_npc += 4; 921 goto out; 922 /*NOTREACHED*/ 923 924 case SIMU_FAULT: 925 siginfo.si_signo = SIGSEGV; 926 siginfo.si_code = SEGV_MAPERR; 927 siginfo.si_addr = badaddr; 928 fault = FLTBOUNDS; 929 break; 930 931 case SIMU_DZERO: 932 siginfo.si_signo = SIGFPE; 933 siginfo.si_code = FPE_INTDIV; 934 siginfo.si_addr = (caddr_t)rp->r_pc; 935 fault = FLTIZDIV; 936 break; 937 938 case SIMU_UNALIGN: 939 siginfo.si_signo = SIGBUS; 940 siginfo.si_code = BUS_ADRALN; 941 siginfo.si_addr = badaddr; 942 fault = FLTACCESS; 943 break; 944 945 case SIMU_ILLEGAL: 946 default: 947 siginfo.si_signo = SIGILL; 948 op3 = (instr >> 19) & 0x3F; 949 if ((IS_FLOAT(instr) && (op3 == IOP_V8_STQFA) || 950 (op3 == IOP_V8_STDFA))) 951 siginfo.si_code = ILL_ILLADR; 952 else 953 siginfo.si_code = ILL_ILLOPC; 954 siginfo.si_addr = (caddr_t)rp->r_pc; 955 fault = FLTILL; 956 break; 957 } 958 break; 959 960 case T_UNIMP_LDD + T_USER: 961 case T_UNIMP_STD + T_USER: 962 if (tudebug) 963 showregs(type, rp, (caddr_t)0, 0); 964 switch (simulate_lddstd(rp, &badaddr)) { 965 case SIMU_SUCCESS: 966 /* skip the successfully simulated instruction */ 967 rp->r_pc = rp->r_npc; 968 rp->r_npc += 4; 969 goto out; 970 /*NOTREACHED*/ 971 972 case SIMU_FAULT: 973 if (nfload(rp, NULL)) 974 goto out; 975 siginfo.si_signo = SIGSEGV; 976 siginfo.si_code = SEGV_MAPERR; 977 siginfo.si_addr = badaddr; 978 fault = FLTBOUNDS; 979 break; 980 981 case SIMU_UNALIGN: 982 if (nfload(rp, NULL)) 983 goto out; 984 siginfo.si_signo = SIGBUS; 985 siginfo.si_code = BUS_ADRALN; 986 siginfo.si_addr = badaddr; 987 fault = FLTACCESS; 988 break; 989 990 case SIMU_ILLEGAL: 991 default: 992 siginfo.si_signo = SIGILL; 993 siginfo.si_code = ILL_ILLOPC; 994 siginfo.si_addr = (caddr_t)rp->r_pc; 995 fault = FLTILL; 996 break; 997 } 998 break; 999 1000 case T_UNIMP_LDD: 1001 case T_UNIMP_STD: 1002 if (simulate_lddstd(rp, &badaddr) == SIMU_SUCCESS) { 1003 /* skip the successfully simulated instruction */ 1004 rp->r_pc = rp->r_npc; 1005 rp->r_npc += 4; 1006 goto cleanup; 1007 /*NOTREACHED*/ 1008 } 1009 /* 1010 * A third party driver executed an {LDD,STD,LDDA,STDA} 1011 * that we couldn't simulate. 1012 */ 1013 if (nfload(rp, NULL)) 1014 goto cleanup; 1015 1016 if (curthread->t_lofault) { 1017 if (lodebug) { 1018 showregs(type, rp, addr, 0); 1019 traceback((caddr_t)rp->r_sp); 1020 } 1021 rp->r_g1 = EFAULT; 1022 rp->r_pc = curthread->t_lofault; 1023 rp->r_npc = rp->r_pc + 4; 1024 goto cleanup; 1025 } 1026 (void) die(type, rp, addr, 0); 1027 /*NOTREACHED*/ 1028 1029 case T_IDIV0 + T_USER: /* integer divide by zero */ 1030 case T_DIV0 + T_USER: /* integer divide by zero */ 1031 if (tudebug && tudebugfpe) 1032 showregs(type, rp, (caddr_t)0, 0); 1033 bzero(&siginfo, sizeof (siginfo)); 1034 siginfo.si_signo = SIGFPE; 1035 siginfo.si_code = FPE_INTDIV; 1036 siginfo.si_addr = (caddr_t)rp->r_pc; 1037 fault = FLTIZDIV; 1038 break; 1039 1040 case T_INT_OVERFLOW + T_USER: /* integer overflow */ 1041 if (tudebug && tudebugfpe) 1042 showregs(type, rp, (caddr_t)0, 0); 1043 bzero(&siginfo, sizeof (siginfo)); 1044 siginfo.si_signo = SIGFPE; 1045 siginfo.si_code = FPE_INTOVF; 1046 siginfo.si_addr = (caddr_t)rp->r_pc; 1047 fault = FLTIOVF; 1048 break; 1049 1050 case T_BREAKPOINT + T_USER: /* breakpoint trap (t 1) */ 1051 if (tudebug && tudebugbpt) 1052 showregs(type, rp, (caddr_t)0, 0); 1053 bzero(&siginfo, sizeof (siginfo)); 1054 siginfo.si_signo = SIGTRAP; 1055 siginfo.si_code = TRAP_BRKPT; 1056 siginfo.si_addr = (caddr_t)rp->r_pc; 1057 fault = FLTBPT; 1058 break; 1059 1060 case T_TAG_OVERFLOW + T_USER: /* tag overflow (taddcctv, tsubcctv) */ 1061 if (tudebug) 1062 showregs(type, rp, (caddr_t)0, 0); 1063 bzero(&siginfo, sizeof (siginfo)); 1064 siginfo.si_signo = SIGEMT; 1065 siginfo.si_code = EMT_TAGOVF; 1066 siginfo.si_addr = (caddr_t)rp->r_pc; 1067 fault = FLTACCESS; 1068 break; 1069 1070 case T_FLUSH_PCB + T_USER: /* finish user window overflow */ 1071 case T_FLUSHW + T_USER: /* finish user window flush */ 1072 /* 1073 * This trap is entered from sys_rtt in locore.s when, 1074 * upon return to user is is found that there are user 1075 * windows in pcb_wbuf. This happens because they could 1076 * not be saved on the user stack, either because it 1077 * wasn't resident or because it was misaligned. 1078 */ 1079 { 1080 int error; 1081 caddr_t sp; 1082 1083 error = flush_user_windows_to_stack(&sp); 1084 /* 1085 * Possible errors: 1086 * error copying out 1087 * unaligned stack pointer 1088 * The first is given to us as the return value 1089 * from flush_user_windows_to_stack(). The second 1090 * results in residual windows in the pcb. 1091 */ 1092 if (error != 0) { 1093 /* 1094 * EINTR comes from a signal during copyout; 1095 * we should not post another signal. 1096 */ 1097 if (error != EINTR) { 1098 /* 1099 * Zap the process with a SIGSEGV - process 1100 * may be managing its own stack growth by 1101 * taking SIGSEGVs on a different signal stack. 1102 */ 1103 bzero(&siginfo, sizeof (siginfo)); 1104 siginfo.si_signo = SIGSEGV; 1105 siginfo.si_code = SEGV_MAPERR; 1106 siginfo.si_addr = sp; 1107 fault = FLTBOUNDS; 1108 } 1109 break; 1110 } else if (mpcb->mpcb_wbcnt) { 1111 bzero(&siginfo, sizeof (siginfo)); 1112 siginfo.si_signo = SIGILL; 1113 siginfo.si_code = ILL_BADSTK; 1114 siginfo.si_addr = (caddr_t)rp->r_pc; 1115 fault = FLTILL; 1116 break; 1117 } 1118 } 1119 1120 /* 1121 * T_FLUSHW is used when handling a ta 0x3 -- the old flush 1122 * window trap -- which is implemented by executing the 1123 * flushw instruction. The flushw can trap if any of the 1124 * stack pages are not writable for whatever reason. In this 1125 * case only, we advance the pc to the next instruction so 1126 * that the user thread doesn't needlessly execute the trap 1127 * again. Normally this wouldn't be a problem -- we'll 1128 * usually only end up here if this is the first touch to a 1129 * stack page -- since the second execution won't trap, but 1130 * if there's a watchpoint on the stack page the user thread 1131 * would spin, continuously executing the trap instruction. 1132 */ 1133 if (type == T_FLUSHW + T_USER) { 1134 rp->r_pc = rp->r_npc; 1135 rp->r_npc += 4; 1136 } 1137 goto out; 1138 1139 case T_AST + T_USER: /* profiling or resched pseudo trap */ 1140 if (lwp->lwp_pcb.pcb_flags & CPC_OVERFLOW) { 1141 lwp->lwp_pcb.pcb_flags &= ~CPC_OVERFLOW; 1142 if (kcpc_overflow_ast()) { 1143 /* 1144 * Signal performance counter overflow 1145 */ 1146 if (tudebug) 1147 showregs(type, rp, (caddr_t)0, 0); 1148 bzero(&siginfo, sizeof (siginfo)); 1149 siginfo.si_signo = SIGEMT; 1150 siginfo.si_code = EMT_CPCOVF; 1151 siginfo.si_addr = (caddr_t)rp->r_pc; 1152 /* for trap_cleanup(), below */ 1153 oldpc = rp->r_pc - 4; 1154 fault = FLTCPCOVF; 1155 } 1156 } 1157 1158 /* 1159 * The CPC_OVERFLOW check above may already have populated 1160 * siginfo and set fault, so the checks below must not 1161 * touch these and the functions they call must use 1162 * trapsig() directly. 1163 */ 1164 1165 if (lwp->lwp_pcb.pcb_flags & ASYNC_HWERR) { 1166 lwp->lwp_pcb.pcb_flags &= ~ASYNC_HWERR; 1167 trap_async_hwerr(); 1168 } 1169 1170 if (lwp->lwp_pcb.pcb_flags & ASYNC_BERR) { 1171 lwp->lwp_pcb.pcb_flags &= ~ASYNC_BERR; 1172 trap_async_berr_bto(ASYNC_BERR, rp); 1173 } 1174 1175 if (lwp->lwp_pcb.pcb_flags & ASYNC_BTO) { 1176 lwp->lwp_pcb.pcb_flags &= ~ASYNC_BTO; 1177 trap_async_berr_bto(ASYNC_BTO, rp); 1178 } 1179 1180 break; 1181 } 1182 1183 trap_cleanup(rp, fault, &siginfo, oldpc == rp->r_pc); 1184 1185 out: /* We can't get here from a system trap */ 1186 ASSERT(type & T_USER); 1187 trap_rtt(); 1188 (void) new_mstate(curthread, mstate); 1189 /* Kernel probe */ 1190 TNF_PROBE_1(thread_state, "thread", /* CSTYLED */, 1191 tnf_microstate, state, LMS_USER); 1192 1193 TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit"); 1194 return; 1195 1196 cleanup: /* system traps end up here */ 1197 ASSERT(!(type & T_USER)); 1198 1199 TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit"); 1200 } 1201 1202 void 1203 trap_cleanup( 1204 struct regs *rp, 1205 uint_t fault, 1206 k_siginfo_t *sip, 1207 int restartable) 1208 { 1209 extern void aio_cleanup(); 1210 proc_t *p = ttoproc(curthread); 1211 klwp_id_t lwp = ttolwp(curthread); 1212 1213 if (fault) { 1214 /* 1215 * Remember the fault and fault address 1216 * for real-time (SIGPROF) profiling. 1217 */ 1218 lwp->lwp_lastfault = fault; 1219 lwp->lwp_lastfaddr = sip->si_addr; 1220 1221 DTRACE_PROC2(fault, int, fault, ksiginfo_t *, sip); 1222 1223 /* 1224 * If a debugger has declared this fault to be an 1225 * event of interest, stop the lwp. Otherwise just 1226 * deliver the associated signal. 1227 */ 1228 if (sip->si_signo != SIGKILL && 1229 prismember(&p->p_fltmask, fault) && 1230 stop_on_fault(fault, sip) == 0) 1231 sip->si_signo = 0; 1232 } 1233 1234 if (sip->si_signo) 1235 trapsig(sip, restartable); 1236 1237 if (lwp->lwp_oweupc) 1238 profil_tick(rp->r_pc); 1239 1240 if (curthread->t_astflag | curthread->t_sig_check) { 1241 /* 1242 * Turn off the AST flag before checking all the conditions that 1243 * may have caused an AST. This flag is on whenever a signal or 1244 * unusual condition should be handled after the next trap or 1245 * syscall. 1246 */ 1247 astoff(curthread); 1248 curthread->t_sig_check = 0; 1249 1250 /* 1251 * The following check is legal for the following reasons: 1252 * 1) The thread we are checking, is ourselves, so there is 1253 * no way the proc can go away. 1254 * 2) The only time we need to be protected by the 1255 * lock is if the binding is changed. 1256 * 1257 * Note we will still take the lock and check the binding 1258 * if the condition was true without the lock held. This 1259 * prevents lock contention among threads owned by the 1260 * same proc. 1261 */ 1262 1263 if (curthread->t_proc_flag & TP_CHANGEBIND) { 1264 mutex_enter(&p->p_lock); 1265 if (curthread->t_proc_flag & TP_CHANGEBIND) { 1266 timer_lwpbind(); 1267 curthread->t_proc_flag &= ~TP_CHANGEBIND; 1268 } 1269 mutex_exit(&p->p_lock); 1270 } 1271 1272 /* 1273 * for kaio requests that are on the per-process poll queue, 1274 * aiop->aio_pollq, they're AIO_POLL bit is set, the kernel 1275 * should copyout their result_t to user memory. by copying 1276 * out the result_t, the user can poll on memory waiting 1277 * for the kaio request to complete. 1278 */ 1279 if (p->p_aio) 1280 aio_cleanup(0); 1281 1282 /* 1283 * If this LWP was asked to hold, call holdlwp(), which will 1284 * stop. holdlwps() sets this up and calls pokelwps() which 1285 * sets the AST flag. 1286 * 1287 * Also check TP_EXITLWP, since this is used by fresh new LWPs 1288 * through lwp_rtt(). That flag is set if the lwp_create(2) 1289 * syscall failed after creating the LWP. 1290 */ 1291 if (ISHOLD(p)) 1292 holdlwp(); 1293 1294 /* 1295 * All code that sets signals and makes ISSIG evaluate true must 1296 * set t_astflag afterwards. 1297 */ 1298 if (ISSIG_PENDING(curthread, lwp, p)) { 1299 if (issig(FORREAL)) 1300 psig(); 1301 curthread->t_sig_check = 1; 1302 } 1303 1304 if (curthread->t_rprof != NULL) { 1305 realsigprof(0, 0); 1306 curthread->t_sig_check = 1; 1307 } 1308 } 1309 } 1310 1311 /* 1312 * Called from fp_traps when a floating point trap occurs. 1313 * Note that the T_DATA_EXCEPTION case does not use X_FAULT_TYPE(mmu_fsr), 1314 * because mmu_fsr (now changed to code) is always 0. 1315 * Note that the T_UNIMP_INSTR case does not call simulate_unimp(), 1316 * because the simulator only simulates multiply and divide instructions, 1317 * which would not cause floating point traps in the first place. 1318 * XXX - Supervisor mode floating point traps? 1319 */ 1320 void 1321 fpu_trap(struct regs *rp, caddr_t addr, uint32_t type, uint32_t code) 1322 { 1323 proc_t *p = ttoproc(curthread); 1324 klwp_id_t lwp = ttolwp(curthread); 1325 k_siginfo_t siginfo; 1326 uint_t op3, fault = 0; 1327 int mstate; 1328 char *badaddr; 1329 kfpu_t *fp; 1330 struct fpq *pfpq; 1331 uint32_t inst; 1332 utrap_handler_t *utrapp; 1333 1334 CPU_STATS_ADDQ(CPU, sys, trap, 1); 1335 1336 ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 1337 1338 if (USERMODE(rp->r_tstate)) { 1339 /* 1340 * Set lwp_state before trying to acquire any 1341 * adaptive lock 1342 */ 1343 ASSERT(lwp != NULL); 1344 lwp->lwp_state = LWP_SYS; 1345 /* 1346 * Set up the current cred to use during this trap. u_cred 1347 * no longer exists. t_cred is used instead. 1348 * The current process credential applies to the thread for 1349 * the entire trap. If trapping from the kernel, this 1350 * should already be set up. 1351 */ 1352 if (curthread->t_cred != p->p_cred) { 1353 cred_t *oldcred = curthread->t_cred; 1354 /* 1355 * DTrace accesses t_cred in probe context. t_cred 1356 * must always be either NULL, or point to a valid, 1357 * allocated cred structure. 1358 */ 1359 curthread->t_cred = crgetcred(); 1360 crfree(oldcred); 1361 } 1362 ASSERT(lwp->lwp_regs == rp); 1363 mstate = new_mstate(curthread, LMS_TRAP); 1364 siginfo.si_signo = 0; 1365 type |= T_USER; 1366 } 1367 1368 TRACE_1(TR_FAC_TRAP, TR_C_TRAP_HANDLER_ENTER, 1369 "C_fpu_trap_handler_enter:type %x", type); 1370 1371 if (tudebug && tudebugfpe) 1372 showregs(type, rp, addr, 0); 1373 1374 bzero(&siginfo, sizeof (siginfo)); 1375 siginfo.si_code = code; 1376 siginfo.si_addr = addr; 1377 1378 switch (type) { 1379 1380 case T_FP_EXCEPTION_IEEE + T_USER: /* FPU arithmetic exception */ 1381 /* 1382 * FPU arithmetic exception - fake up a fpq if we 1383 * came here directly from _fp_ieee_exception, 1384 * which is indicated by a zero fpu_qcnt. 1385 */ 1386 fp = lwptofpu(curthread->t_lwp); 1387 utrapp = curthread->t_procp->p_utraps; 1388 if (fp->fpu_qcnt == 0) { 1389 inst = fetch_user_instr((caddr_t)rp->r_pc); 1390 lwp->lwp_state = LWP_SYS; 1391 pfpq = &fp->fpu_q->FQu.fpq; 1392 pfpq->fpq_addr = (uint32_t *)rp->r_pc; 1393 pfpq->fpq_instr = inst; 1394 fp->fpu_qcnt = 1; 1395 fp->fpu_q_entrysize = sizeof (struct fpq); 1396 #ifdef SF_V9_TABLE_28 1397 /* 1398 * Spitfire and blackbird followed the SPARC V9 manual 1399 * paragraph 3 of section 5.1.7.9 FSR_current_exception 1400 * (cexc) for setting fsr.cexc bits on underflow and 1401 * overflow traps when the fsr.tem.inexact bit is set, 1402 * instead of following Table 28. Bugid 1263234. 1403 */ 1404 { 1405 extern int spitfire_bb_fsr_bug; 1406 1407 if (spitfire_bb_fsr_bug && 1408 (fp->fpu_fsr & FSR_TEM_NX)) { 1409 if (((fp->fpu_fsr & FSR_TEM_OF) == 0) && 1410 (fp->fpu_fsr & FSR_CEXC_OF)) { 1411 fp->fpu_fsr &= ~FSR_CEXC_OF; 1412 fp->fpu_fsr |= FSR_CEXC_NX; 1413 _fp_write_pfsr(&fp->fpu_fsr); 1414 siginfo.si_code = FPE_FLTRES; 1415 } 1416 if (((fp->fpu_fsr & FSR_TEM_UF) == 0) && 1417 (fp->fpu_fsr & FSR_CEXC_UF)) { 1418 fp->fpu_fsr &= ~FSR_CEXC_UF; 1419 fp->fpu_fsr |= FSR_CEXC_NX; 1420 _fp_write_pfsr(&fp->fpu_fsr); 1421 siginfo.si_code = FPE_FLTRES; 1422 } 1423 } 1424 } 1425 #endif /* SF_V9_TABLE_28 */ 1426 rp->r_pc = rp->r_npc; 1427 rp->r_npc += 4; 1428 } else if (utrapp && utrapp[UT_FP_EXCEPTION_IEEE_754]) { 1429 /* 1430 * The user had a trap handler installed. Jump to 1431 * the trap handler instead of signalling the process. 1432 */ 1433 rp->r_pc = (long)utrapp[UT_FP_EXCEPTION_IEEE_754]; 1434 rp->r_npc = rp->r_pc + 4; 1435 break; 1436 } 1437 siginfo.si_signo = SIGFPE; 1438 fault = FLTFPE; 1439 break; 1440 1441 case T_DATA_EXCEPTION + T_USER: /* user data access exception */ 1442 siginfo.si_signo = SIGSEGV; 1443 fault = FLTBOUNDS; 1444 break; 1445 1446 case T_LDDF_ALIGN + T_USER: /* 64 bit user lddfa alignment error */ 1447 case T_STDF_ALIGN + T_USER: /* 64 bit user stdfa alignment error */ 1448 alignfaults++; 1449 lwp->lwp_state = LWP_SYS; 1450 if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) { 1451 rp->r_pc = rp->r_npc; 1452 rp->r_npc += 4; 1453 goto out; 1454 } 1455 fp = lwptofpu(curthread->t_lwp); 1456 fp->fpu_qcnt = 0; 1457 siginfo.si_signo = SIGSEGV; 1458 siginfo.si_code = SEGV_MAPERR; 1459 siginfo.si_addr = badaddr; 1460 fault = FLTBOUNDS; 1461 break; 1462 1463 case T_ALIGNMENT + T_USER: /* user alignment error */ 1464 /* 1465 * If the user has to do unaligned references 1466 * the ugly stuff gets done here. 1467 * Only handles vanilla loads and stores. 1468 */ 1469 alignfaults++; 1470 if (p->p_fixalignment) { 1471 if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) { 1472 rp->r_pc = rp->r_npc; 1473 rp->r_npc += 4; 1474 goto out; 1475 } 1476 siginfo.si_signo = SIGSEGV; 1477 siginfo.si_code = SEGV_MAPERR; 1478 siginfo.si_addr = badaddr; 1479 fault = FLTBOUNDS; 1480 } else { 1481 siginfo.si_signo = SIGBUS; 1482 siginfo.si_code = BUS_ADRALN; 1483 if (rp->r_pc & 3) { /* offending address, if pc */ 1484 siginfo.si_addr = (caddr_t)rp->r_pc; 1485 } else { 1486 if (calc_memaddr(rp, &badaddr) == SIMU_UNALIGN) 1487 siginfo.si_addr = badaddr; 1488 else 1489 siginfo.si_addr = (caddr_t)rp->r_pc; 1490 } 1491 fault = FLTACCESS; 1492 } 1493 break; 1494 1495 case T_UNIMP_INSTR + T_USER: /* illegal instruction fault */ 1496 siginfo.si_signo = SIGILL; 1497 inst = fetch_user_instr((caddr_t)rp->r_pc); 1498 op3 = (inst >> 19) & 0x3F; 1499 if ((op3 == IOP_V8_STQFA) || (op3 == IOP_V8_STDFA)) 1500 siginfo.si_code = ILL_ILLADR; 1501 else 1502 siginfo.si_code = ILL_ILLTRP; 1503 fault = FLTILL; 1504 break; 1505 1506 default: 1507 (void) die(type, rp, addr, 0); 1508 /*NOTREACHED*/ 1509 } 1510 1511 /* 1512 * We can't get here from a system trap 1513 * Never restart any instruction which got here from an fp trap. 1514 */ 1515 ASSERT(type & T_USER); 1516 1517 trap_cleanup(rp, fault, &siginfo, 0); 1518 out: 1519 trap_rtt(); 1520 (void) new_mstate(curthread, mstate); 1521 } 1522 1523 void 1524 trap_rtt(void) 1525 { 1526 klwp_id_t lwp = ttolwp(curthread); 1527 1528 /* 1529 * Restore register window if a debugger modified it. 1530 * Set up to perform a single-step if a debugger requested it. 1531 */ 1532 if (lwp->lwp_pcb.pcb_xregstat != XREGNONE) 1533 xregrestore(lwp, 0); 1534 1535 /* 1536 * Set state to LWP_USER here so preempt won't give us a kernel 1537 * priority if it occurs after this point. Call CL_TRAPRET() to 1538 * restore the user-level priority. 1539 * 1540 * It is important that no locks (other than spinlocks) be entered 1541 * after this point before returning to user mode (unless lwp_state 1542 * is set back to LWP_SYS). 1543 */ 1544 lwp->lwp_state = LWP_USER; 1545 if (curthread->t_trapret) { 1546 curthread->t_trapret = 0; 1547 thread_lock(curthread); 1548 CL_TRAPRET(curthread); 1549 thread_unlock(curthread); 1550 } 1551 if (CPU->cpu_runrun) 1552 preempt(); 1553 if (lwp->lwp_pcb.pcb_step != STEP_NONE) 1554 prdostep(); 1555 1556 TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit"); 1557 } 1558 1559 #define IS_LDASI(o) \ 1560 ((o) == (uint32_t)0xC0C00000 || (o) == (uint32_t)0xC0800000 || \ 1561 (o) == (uint32_t)0xC1800000) 1562 #define IS_IMM_ASI(i) (((i) & 0x2000) == 0) 1563 #define IS_ASINF(a) (((a) & 0xF6) == 0x82) 1564 #define IS_LDDA(i) (((i) & 0xC1F80000) == 0xC0980000) 1565 1566 static int 1567 nfload(struct regs *rp, int *instrp) 1568 { 1569 uint_t instr, asi, op3, rd; 1570 size_t len; 1571 struct as *as; 1572 caddr_t addr; 1573 FPU_DREGS_TYPE zero; 1574 extern int segnf_create(); 1575 1576 if (USERMODE(rp->r_tstate)) 1577 instr = fetch_user_instr((caddr_t)rp->r_pc); 1578 else 1579 instr = *(int *)rp->r_pc; 1580 1581 if (instrp) 1582 *instrp = instr; 1583 1584 op3 = (uint_t)(instr & 0xC1E00000); 1585 if (!IS_LDASI(op3)) 1586 return (0); 1587 if (IS_IMM_ASI(instr)) 1588 asi = (instr & 0x1FE0) >> 5; 1589 else 1590 asi = (uint_t)((rp->r_tstate >> TSTATE_ASI_SHIFT) & 1591 TSTATE_ASI_MASK); 1592 if (!IS_ASINF(asi)) 1593 return (0); 1594 if (calc_memaddr(rp, &addr) == SIMU_SUCCESS) { 1595 len = 1; 1596 as = USERMODE(rp->r_tstate) ? ttoproc(curthread)->p_as : &kas; 1597 as_rangelock(as); 1598 if (as_gap(as, len, &addr, &len, 0, addr) == 0) 1599 (void) as_map(as, addr, len, segnf_create, NULL); 1600 as_rangeunlock(as); 1601 } 1602 zero = 0; 1603 rd = (instr >> 25) & 0x1f; 1604 if (IS_FLOAT(instr)) { 1605 uint_t dbflg = ((instr >> 19) & 3) == 3; 1606 1607 if (dbflg) { /* clever v9 reg encoding */ 1608 if (rd & 1) 1609 rd = (rd & 0x1e) | 0x20; 1610 rd >>= 1; 1611 } 1612 if (fpu_exists) { 1613 if (!(_fp_read_fprs() & FPRS_FEF)) 1614 fp_enable(); 1615 1616 if (dbflg) 1617 _fp_write_pdreg(&zero, rd); 1618 else 1619 _fp_write_pfreg((uint_t *)&zero, rd); 1620 } else { 1621 kfpu_t *fp = lwptofpu(curthread->t_lwp); 1622 1623 if (!fp->fpu_en) 1624 fp_enable(); 1625 1626 if (dbflg) 1627 fp->fpu_fr.fpu_dregs[rd] = zero; 1628 else 1629 fp->fpu_fr.fpu_regs[rd] = 0; 1630 } 1631 } else { 1632 (void) putreg(&zero, rp, rd, &addr); 1633 if (IS_LDDA(instr)) 1634 (void) putreg(&zero, rp, rd + 1, &addr); 1635 } 1636 rp->r_pc = rp->r_npc; 1637 rp->r_npc += 4; 1638 return (1); 1639 } 1640 1641 kmutex_t atomic_nc_mutex; 1642 1643 /* 1644 * The following couple of routines are for userland drivers which 1645 * do atomics to noncached addresses. This sort of worked on previous 1646 * platforms -- the operation really wasn't atomic, but it didn't generate 1647 * a trap as sun4u systems do. 1648 */ 1649 static int 1650 swap_nc(struct regs *rp, int instr) 1651 { 1652 uint64_t rdata, mdata; 1653 caddr_t addr, badaddr; 1654 uint_t tmp, rd; 1655 1656 (void) flush_user_windows_to_stack(NULL); 1657 rd = (instr >> 25) & 0x1f; 1658 if (calc_memaddr(rp, &addr) != SIMU_SUCCESS) 1659 return (0); 1660 if (getreg(rp, rd, &rdata, &badaddr)) 1661 return (0); 1662 mutex_enter(&atomic_nc_mutex); 1663 if (fuword32(addr, &tmp) == -1) { 1664 mutex_exit(&atomic_nc_mutex); 1665 return (0); 1666 } 1667 mdata = (u_longlong_t)tmp; 1668 if (suword32(addr, (uint32_t)rdata) == -1) { 1669 mutex_exit(&atomic_nc_mutex); 1670 return (0); 1671 } 1672 (void) putreg(&mdata, rp, rd, &badaddr); 1673 mutex_exit(&atomic_nc_mutex); 1674 return (1); 1675 } 1676 1677 static int 1678 ldstub_nc(struct regs *rp, int instr) 1679 { 1680 uint64_t mdata; 1681 caddr_t addr, badaddr; 1682 uint_t rd; 1683 uint8_t tmp; 1684 1685 (void) flush_user_windows_to_stack(NULL); 1686 rd = (instr >> 25) & 0x1f; 1687 if (calc_memaddr(rp, &addr) != SIMU_SUCCESS) 1688 return (0); 1689 mutex_enter(&atomic_nc_mutex); 1690 if (fuword8(addr, &tmp) == -1) { 1691 mutex_exit(&atomic_nc_mutex); 1692 return (0); 1693 } 1694 mdata = (u_longlong_t)tmp; 1695 if (suword8(addr, (uint8_t)0xff) == -1) { 1696 mutex_exit(&atomic_nc_mutex); 1697 return (0); 1698 } 1699 (void) putreg(&mdata, rp, rd, &badaddr); 1700 mutex_exit(&atomic_nc_mutex); 1701 return (1); 1702 } 1703 1704 /* 1705 * This function helps instr_size() determine the operand size. 1706 * It is called for the extended ldda/stda asi's. 1707 */ 1708 int 1709 extended_asi_size(int asi) 1710 { 1711 switch (asi) { 1712 case ASI_PST8_P: 1713 case ASI_PST8_S: 1714 case ASI_PST16_P: 1715 case ASI_PST16_S: 1716 case ASI_PST32_P: 1717 case ASI_PST32_S: 1718 case ASI_PST8_PL: 1719 case ASI_PST8_SL: 1720 case ASI_PST16_PL: 1721 case ASI_PST16_SL: 1722 case ASI_PST32_PL: 1723 case ASI_PST32_SL: 1724 return (8); 1725 case ASI_FL8_P: 1726 case ASI_FL8_S: 1727 case ASI_FL8_PL: 1728 case ASI_FL8_SL: 1729 return (1); 1730 case ASI_FL16_P: 1731 case ASI_FL16_S: 1732 case ASI_FL16_PL: 1733 case ASI_FL16_SL: 1734 return (2); 1735 case ASI_BLK_P: 1736 case ASI_BLK_S: 1737 case ASI_BLK_PL: 1738 case ASI_BLK_SL: 1739 case ASI_BLK_COMMIT_P: 1740 case ASI_BLK_COMMIT_S: 1741 return (64); 1742 } 1743 1744 return (0); 1745 } 1746 1747 /* 1748 * Patch non-zero to disable preemption of threads in the kernel. 1749 */ 1750 int IGNORE_KERNEL_PREEMPTION = 0; /* XXX - delete this someday */ 1751 1752 struct kpreempt_cnts { /* kernel preemption statistics */ 1753 int kpc_idle; /* executing idle thread */ 1754 int kpc_intr; /* executing interrupt thread */ 1755 int kpc_clock; /* executing clock thread */ 1756 int kpc_blocked; /* thread has blocked preemption (t_preempt) */ 1757 int kpc_notonproc; /* thread is surrendering processor */ 1758 int kpc_inswtch; /* thread has ratified scheduling decision */ 1759 int kpc_prilevel; /* processor interrupt level is too high */ 1760 int kpc_apreempt; /* asynchronous preemption */ 1761 int kpc_spreempt; /* synchronous preemption */ 1762 } kpreempt_cnts; 1763 1764 /* 1765 * kernel preemption: forced rescheduling 1766 * preempt the running kernel thread. 1767 */ 1768 void 1769 kpreempt(int asyncspl) 1770 { 1771 if (IGNORE_KERNEL_PREEMPTION) { 1772 aston(CPU->cpu_dispthread); 1773 return; 1774 } 1775 /* 1776 * Check that conditions are right for kernel preemption 1777 */ 1778 do { 1779 if (curthread->t_preempt) { 1780 /* 1781 * either a privileged thread (idle, panic, interrupt) 1782 * or will check when t_preempt is lowered 1783 */ 1784 if (curthread->t_pri < 0) 1785 kpreempt_cnts.kpc_idle++; 1786 else if (curthread->t_flag & T_INTR_THREAD) { 1787 kpreempt_cnts.kpc_intr++; 1788 if (curthread->t_pil == CLOCK_LEVEL) 1789 kpreempt_cnts.kpc_clock++; 1790 } else 1791 kpreempt_cnts.kpc_blocked++; 1792 aston(CPU->cpu_dispthread); 1793 return; 1794 } 1795 if (curthread->t_state != TS_ONPROC || 1796 curthread->t_disp_queue != CPU->cpu_disp) { 1797 /* this thread will be calling swtch() shortly */ 1798 kpreempt_cnts.kpc_notonproc++; 1799 if (CPU->cpu_thread != CPU->cpu_dispthread) { 1800 /* already in swtch(), force another */ 1801 kpreempt_cnts.kpc_inswtch++; 1802 siron(); 1803 } 1804 return; 1805 } 1806 1807 if (((asyncspl != KPREEMPT_SYNC) ? spltoipl(asyncspl) : 1808 getpil()) >= DISP_LEVEL) { 1809 /* 1810 * We can't preempt this thread if it is at 1811 * a PIL >= DISP_LEVEL since it may be holding 1812 * a spin lock (like sched_lock). 1813 */ 1814 siron(); /* check back later */ 1815 kpreempt_cnts.kpc_prilevel++; 1816 return; 1817 } 1818 1819 /* 1820 * block preemption so we don't have multiple preemptions 1821 * pending on the interrupt stack 1822 */ 1823 curthread->t_preempt++; 1824 if (asyncspl != KPREEMPT_SYNC) { 1825 splx(asyncspl); 1826 kpreempt_cnts.kpc_apreempt++; 1827 } else 1828 kpreempt_cnts.kpc_spreempt++; 1829 1830 preempt(); 1831 curthread->t_preempt--; 1832 } while (CPU->cpu_kprunrun); 1833 } 1834 1835 static enum seg_rw 1836 get_accesstype(struct regs *rp) 1837 { 1838 uint32_t instr; 1839 1840 if (USERMODE(rp->r_tstate)) 1841 instr = fetch_user_instr((caddr_t)rp->r_pc); 1842 else 1843 instr = *(uint32_t *)rp->r_pc; 1844 1845 if (IS_FLUSH(instr)) 1846 return (S_OTHER); 1847 1848 if (IS_STORE(instr)) 1849 return (S_WRITE); 1850 else 1851 return (S_READ); 1852 } 1853