1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/mmu.h> 29 #include <sys/systm.h> 30 #include <sys/trap.h> 31 #include <sys/machtrap.h> 32 #include <sys/vtrace.h> 33 #include <sys/prsystm.h> 34 #include <sys/archsystm.h> 35 #include <sys/machsystm.h> 36 #include <sys/fpu/fpusystm.h> 37 #include <sys/tnf.h> 38 #include <sys/tnf_probe.h> 39 #include <sys/simulate.h> 40 #include <sys/ftrace.h> 41 #include <sys/ontrap.h> 42 #include <sys/kcpc.h> 43 #include <sys/kobj.h> 44 #include <sys/procfs.h> 45 #include <sys/sun4asi.h> 46 #include <sys/sdt.h> 47 #include <sys/fpras.h> 48 49 #ifdef TRAPTRACE 50 #include <sys/traptrace.h> 51 #endif 52 53 int tudebug = 0; 54 static int tudebugbpt = 0; 55 static int tudebugfpe = 0; 56 57 static int alignfaults = 0; 58 59 #if defined(TRAPDEBUG) || defined(lint) 60 static int lodebug = 0; 61 #else 62 #define lodebug 0 63 #endif /* defined(TRAPDEBUG) || defined(lint) */ 64 65 66 int vis1_partial_support(struct regs *rp, k_siginfo_t *siginfo, uint_t *fault); 67 #pragma weak vis1_partial_support 68 69 void showregs(unsigned, struct regs *, caddr_t, uint_t); 70 #pragma weak showregs 71 72 void trap_async_hwerr(void); 73 #pragma weak trap_async_hwerr 74 75 void trap_async_berr_bto(int, struct regs *); 76 #pragma weak trap_async_berr_bto 77 78 static enum seg_rw get_accesstype(struct regs *); 79 static int nfload(struct regs *, int *); 80 static int swap_nc(struct regs *, int); 81 static int ldstub_nc(struct regs *, int); 82 void trap_cleanup(struct regs *, uint_t, k_siginfo_t *, int); 83 void trap_rtt(void); 84 85 static int 86 die(unsigned type, struct regs *rp, caddr_t addr, uint_t mmu_fsr) 87 { 88 struct trap_info ti; 89 90 #ifdef TRAPTRACE 91 TRAPTRACE_FREEZE; 92 #endif 93 94 ti.trap_regs = rp; 95 ti.trap_type = type; 96 ti.trap_addr = addr; 97 ti.trap_mmu_fsr = mmu_fsr; 98 99 curthread->t_panic_trap = &ti; 100 101 if (type == T_DATA_MMU_MISS && addr < (caddr_t)KERNELBASE) { 102 panic("BAD TRAP: type=%x rp=%p addr=%p mmu_fsr=%x " 103 "occurred in module \"%s\" due to %s", 104 type, (void *)rp, (void *)addr, mmu_fsr, 105 mod_containing_pc((caddr_t)rp->r_pc), 106 addr < (caddr_t)PAGESIZE ? 107 "a NULL pointer dereference" : 108 "an illegal access to a user address"); 109 } else { 110 panic("BAD TRAP: type=%x rp=%p addr=%p mmu_fsr=%x", 111 type, (void *)rp, (void *)addr, mmu_fsr); 112 } 113 114 return (0); /* avoid optimization of restore in call's delay slot */ 115 } 116 117 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */ 118 int ill_calls; 119 #endif 120 121 /* 122 * Currently, the only PREFETCH/PREFETCHA instructions which cause traps 123 * are the "strong" prefetches (fcn=20-23). But we check for all flavors of 124 * PREFETCH, in case some future variant also causes a DATA_MMU_MISS. 125 */ 126 #define IS_PREFETCH(i) (((i) & 0xc1780000) == 0xc1680000) 127 128 #define IS_FLUSH(i) (((i) & 0xc1f80000) == 0x81d80000) 129 #define IS_SWAP(i) (((i) & 0xc1f80000) == 0xc0780000) 130 #define IS_LDSTUB(i) (((i) & 0xc1f80000) == 0xc0680000) 131 #define IS_FLOAT(i) (((i) & 0x1000000) != 0) 132 #define IS_STORE(i) (((i) >> 21) & 1) 133 134 /* 135 * Called from the trap handler when a processor trap occurs. 136 */ 137 /*VARARGS2*/ 138 void 139 trap(struct regs *rp, caddr_t addr, uint32_t type, uint32_t mmu_fsr) 140 { 141 proc_t *p = ttoproc(curthread); 142 klwp_id_t lwp = ttolwp(curthread); 143 struct machpcb *mpcb = NULL; 144 k_siginfo_t siginfo; 145 uint_t op3, fault = 0; 146 int stepped = 0; 147 greg_t oldpc; 148 int mstate; 149 char *badaddr; 150 faultcode_t res; 151 enum fault_type fault_type; 152 enum seg_rw rw; 153 uintptr_t lofault; 154 int instr; 155 int iskernel; 156 int watchcode; 157 int watchpage; 158 extern faultcode_t pagefault(caddr_t, enum fault_type, 159 enum seg_rw, int); 160 161 CPU_STATS_ADDQ(CPU, sys, trap, 1); 162 163 #ifdef SF_ERRATA_23 /* call causes illegal-insn */ 164 ASSERT((curthread->t_schedflag & TS_DONT_SWAP) || 165 (type == T_UNIMP_INSTR)); 166 #else 167 ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 168 #endif /* SF_ERRATA_23 */ 169 170 if (USERMODE(rp->r_tstate) || (type & T_USER)) { 171 /* 172 * Set lwp_state before trying to acquire any 173 * adaptive lock 174 */ 175 ASSERT(lwp != NULL); 176 lwp->lwp_state = LWP_SYS; 177 /* 178 * Set up the current cred to use during this trap. u_cred 179 * no longer exists. t_cred is used instead. 180 * The current process credential applies to the thread for 181 * the entire trap. If trapping from the kernel, this 182 * should already be set up. 183 */ 184 if (curthread->t_cred != p->p_cred) { 185 cred_t *oldcred = curthread->t_cred; 186 /* 187 * DTrace accesses t_cred in probe context. t_cred 188 * must always be either NULL, or point to a valid, 189 * allocated cred structure. 190 */ 191 curthread->t_cred = crgetcred(); 192 crfree(oldcred); 193 } 194 type |= T_USER; 195 ASSERT((type == (T_SYS_RTT_PAGE | T_USER)) || 196 (type == (T_SYS_RTT_ALIGN | T_USER)) || 197 lwp->lwp_regs == rp); 198 mpcb = lwptompcb(lwp); 199 switch (type) { 200 case T_WIN_OVERFLOW + T_USER: 201 case T_WIN_UNDERFLOW + T_USER: 202 case T_SYS_RTT_PAGE + T_USER: 203 case T_DATA_MMU_MISS + T_USER: 204 mstate = LMS_DFAULT; 205 break; 206 case T_INSTR_MMU_MISS + T_USER: 207 mstate = LMS_TFAULT; 208 break; 209 default: 210 mstate = LMS_TRAP; 211 break; 212 } 213 /* Kernel probe */ 214 TNF_PROBE_1(thread_state, "thread", /* CSTYLED */, 215 tnf_microstate, state, (char)mstate); 216 mstate = new_mstate(curthread, mstate); 217 siginfo.si_signo = 0; 218 stepped = 219 lwp->lwp_pcb.pcb_step != STEP_NONE && 220 ((oldpc = rp->r_pc), prundostep()) && 221 mmu_btop((uintptr_t)addr) == mmu_btop((uintptr_t)oldpc); 222 /* this assignment must not precede call to prundostep() */ 223 oldpc = rp->r_pc; 224 } 225 226 TRACE_1(TR_FAC_TRAP, TR_C_TRAP_HANDLER_ENTER, 227 "C_trap_handler_enter:type %x", type); 228 229 #ifdef F_DEFERRED 230 /* 231 * Take any pending floating point exceptions now. 232 * If the floating point unit has an exception to handle, 233 * just return to user-level to let the signal handler run. 234 * The instruction that got us to trap() will be reexecuted on 235 * return from the signal handler and we will trap to here again. 236 * This is necessary to disambiguate simultaneous traps which 237 * happen when a floating-point exception is pending and a 238 * machine fault is incurred. 239 */ 240 if (type & USER) { 241 /* 242 * FP_TRAPPED is set only by sendsig() when it copies 243 * out the floating-point queue for the signal handler. 244 * It is set there so we can test it here and in syscall(). 245 */ 246 mpcb->mpcb_flags &= ~FP_TRAPPED; 247 syncfpu(); 248 if (mpcb->mpcb_flags & FP_TRAPPED) { 249 /* 250 * trap() has have been called recursively and may 251 * have stopped the process, so do single step 252 * support for /proc. 253 */ 254 mpcb->mpcb_flags &= ~FP_TRAPPED; 255 goto out; 256 } 257 } 258 #endif 259 switch (type) { 260 case T_DATA_MMU_MISS: 261 case T_INSTR_MMU_MISS + T_USER: 262 case T_DATA_MMU_MISS + T_USER: 263 case T_DATA_PROT + T_USER: 264 case T_AST + T_USER: 265 case T_SYS_RTT_PAGE + T_USER: 266 case T_FLUSH_PCB + T_USER: 267 case T_FLUSHW + T_USER: 268 break; 269 270 default: 271 FTRACE_3("trap(): type=0x%lx, regs=0x%lx, addr=0x%lx", 272 (ulong_t)type, (ulong_t)rp, (ulong_t)addr); 273 break; 274 } 275 276 switch (type) { 277 278 default: 279 /* 280 * Check for user software trap. 281 */ 282 if (type & T_USER) { 283 if (tudebug) 284 showregs(type, rp, (caddr_t)0, 0); 285 if ((type & ~T_USER) >= T_SOFTWARE_TRAP) { 286 bzero(&siginfo, sizeof (siginfo)); 287 siginfo.si_signo = SIGILL; 288 siginfo.si_code = ILL_ILLTRP; 289 siginfo.si_addr = (caddr_t)rp->r_pc; 290 siginfo.si_trapno = type &~ T_USER; 291 fault = FLTILL; 292 break; 293 } 294 } 295 addr = (caddr_t)rp->r_pc; 296 (void) die(type, rp, addr, 0); 297 /*NOTREACHED*/ 298 299 case T_ALIGNMENT: /* supv alignment error */ 300 if (nfload(rp, NULL)) 301 goto cleanup; 302 303 if (curthread->t_lofault) { 304 if (lodebug) { 305 showregs(type, rp, addr, 0); 306 traceback((caddr_t)rp->r_sp); 307 } 308 rp->r_g1 = EFAULT; 309 rp->r_pc = curthread->t_lofault; 310 rp->r_npc = rp->r_pc + 4; 311 goto cleanup; 312 } 313 (void) die(type, rp, addr, 0); 314 /*NOTREACHED*/ 315 316 case T_INSTR_EXCEPTION: /* sys instruction access exception */ 317 addr = (caddr_t)rp->r_pc; 318 (void) die(type, rp, addr, mmu_fsr); 319 /*NOTREACHED*/ 320 321 case T_INSTR_MMU_MISS: /* sys instruction mmu miss */ 322 addr = (caddr_t)rp->r_pc; 323 (void) die(type, rp, addr, 0); 324 /*NOTREACHED*/ 325 326 case T_DATA_EXCEPTION: /* system data access exception */ 327 switch (X_FAULT_TYPE(mmu_fsr)) { 328 case FT_RANGE: 329 /* 330 * This happens when we attempt to dereference an 331 * address in the address hole. If t_ontrap is set, 332 * then break and fall through to T_DATA_MMU_MISS / 333 * T_DATA_PROT case below. If lofault is set, then 334 * honour it (perhaps the user gave us a bogus 335 * address in the hole to copyin from or copyout to?) 336 */ 337 338 if (curthread->t_ontrap != NULL) 339 break; 340 341 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 342 if (curthread->t_lofault) { 343 if (lodebug) { 344 showregs(type, rp, addr, 0); 345 traceback((caddr_t)rp->r_sp); 346 } 347 rp->r_g1 = EFAULT; 348 rp->r_pc = curthread->t_lofault; 349 rp->r_npc = rp->r_pc + 4; 350 goto cleanup; 351 } 352 (void) die(type, rp, addr, mmu_fsr); 353 /*NOTREACHED*/ 354 355 case FT_PRIV: 356 /* 357 * This can happen if we access ASI_USER from a kernel 358 * thread. To support pxfs, we need to honor lofault if 359 * we're doing a copyin/copyout from a kernel thread. 360 */ 361 362 if (nfload(rp, NULL)) 363 goto cleanup; 364 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 365 if (curthread->t_lofault) { 366 if (lodebug) { 367 showregs(type, rp, addr, 0); 368 traceback((caddr_t)rp->r_sp); 369 } 370 rp->r_g1 = EFAULT; 371 rp->r_pc = curthread->t_lofault; 372 rp->r_npc = rp->r_pc + 4; 373 goto cleanup; 374 } 375 (void) die(type, rp, addr, mmu_fsr); 376 /*NOTREACHED*/ 377 378 default: 379 if (nfload(rp, NULL)) 380 goto cleanup; 381 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 382 (void) die(type, rp, addr, mmu_fsr); 383 /*NOTREACHED*/ 384 385 case FT_NFO: 386 break; 387 } 388 /* fall into ... */ 389 390 case T_DATA_MMU_MISS: /* system data mmu miss */ 391 case T_DATA_PROT: /* system data protection fault */ 392 if (nfload(rp, &instr)) 393 goto cleanup; 394 395 /* 396 * If we're under on_trap() protection (see <sys/ontrap.h>), 397 * set ot_trap and return from the trap to the trampoline. 398 */ 399 if (curthread->t_ontrap != NULL) { 400 on_trap_data_t *otp = curthread->t_ontrap; 401 402 TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, 403 "C_trap_handler_exit"); 404 TRACE_0(TR_FAC_TRAP, TR_TRAP_END, "trap_end"); 405 406 if (otp->ot_prot & OT_DATA_ACCESS) { 407 otp->ot_trap |= OT_DATA_ACCESS; 408 rp->r_pc = otp->ot_trampoline; 409 rp->r_npc = rp->r_pc + 4; 410 goto cleanup; 411 } 412 } 413 lofault = curthread->t_lofault; 414 curthread->t_lofault = 0; 415 416 mstate = new_mstate(curthread, LMS_KFAULT); 417 418 switch (type) { 419 case T_DATA_PROT: 420 fault_type = F_PROT; 421 rw = S_WRITE; 422 break; 423 case T_INSTR_MMU_MISS: 424 fault_type = F_INVAL; 425 rw = S_EXEC; 426 break; 427 case T_DATA_MMU_MISS: 428 case T_DATA_EXCEPTION: 429 /* 430 * The hardware doesn't update the sfsr on mmu 431 * misses so it is not easy to find out whether 432 * the access was a read or a write so we need 433 * to decode the actual instruction. 434 */ 435 fault_type = F_INVAL; 436 rw = get_accesstype(rp); 437 break; 438 default: 439 cmn_err(CE_PANIC, "trap: unknown type %x", type); 440 break; 441 } 442 /* 443 * We determine if access was done to kernel or user 444 * address space. The addr passed into trap is really the 445 * tag access register. 446 */ 447 iskernel = (((uintptr_t)addr & TAGACC_CTX_MASK) == KCONTEXT); 448 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 449 450 res = pagefault(addr, fault_type, rw, iskernel); 451 if (!iskernel && res == FC_NOMAP && 452 addr < p->p_usrstack && grow(addr)) 453 res = 0; 454 455 (void) new_mstate(curthread, mstate); 456 457 /* 458 * Restore lofault. If we resolved the fault, exit. 459 * If we didn't and lofault wasn't set, die. 460 */ 461 curthread->t_lofault = lofault; 462 463 if (res == 0) 464 goto cleanup; 465 466 if (IS_PREFETCH(instr)) { 467 /* skip prefetch instructions in kernel-land */ 468 rp->r_pc = rp->r_npc; 469 rp->r_npc += 4; 470 goto cleanup; 471 } 472 473 if ((lofault == 0 || lodebug) && 474 (calc_memaddr(rp, &badaddr) == SIMU_SUCCESS)) 475 addr = badaddr; 476 if (lofault == 0) 477 (void) die(type, rp, addr, 0); 478 /* 479 * Cannot resolve fault. Return to lofault. 480 */ 481 if (lodebug) { 482 showregs(type, rp, addr, 0); 483 traceback((caddr_t)rp->r_sp); 484 } 485 if (FC_CODE(res) == FC_OBJERR) 486 res = FC_ERRNO(res); 487 else 488 res = EFAULT; 489 rp->r_g1 = res; 490 rp->r_pc = curthread->t_lofault; 491 rp->r_npc = curthread->t_lofault + 4; 492 goto cleanup; 493 494 case T_INSTR_EXCEPTION + T_USER: /* user insn access exception */ 495 bzero(&siginfo, sizeof (siginfo)); 496 siginfo.si_addr = (caddr_t)rp->r_pc; 497 siginfo.si_signo = SIGSEGV; 498 siginfo.si_code = X_FAULT_TYPE(mmu_fsr) == FT_PRIV ? 499 SEGV_ACCERR : SEGV_MAPERR; 500 fault = FLTBOUNDS; 501 break; 502 503 case T_WIN_OVERFLOW + T_USER: /* window overflow in ??? */ 504 case T_WIN_UNDERFLOW + T_USER: /* window underflow in ??? */ 505 case T_SYS_RTT_PAGE + T_USER: /* window underflow in user_rtt */ 506 case T_INSTR_MMU_MISS + T_USER: /* user instruction mmu miss */ 507 case T_DATA_MMU_MISS + T_USER: /* user data mmu miss */ 508 case T_DATA_PROT + T_USER: /* user data protection fault */ 509 switch (type) { 510 case T_INSTR_MMU_MISS + T_USER: 511 addr = (caddr_t)rp->r_pc; 512 fault_type = F_INVAL; 513 rw = S_EXEC; 514 break; 515 516 case T_DATA_MMU_MISS + T_USER: 517 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 518 fault_type = F_INVAL; 519 /* 520 * The hardware doesn't update the sfsr on mmu misses 521 * so it is not easy to find out whether the access 522 * was a read or a write so we need to decode the 523 * actual instruction. XXX BUGLY HW 524 */ 525 rw = get_accesstype(rp); 526 break; 527 528 case T_DATA_PROT + T_USER: 529 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 530 fault_type = F_PROT; 531 rw = S_WRITE; 532 break; 533 534 case T_WIN_OVERFLOW + T_USER: 535 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 536 fault_type = F_INVAL; 537 rw = S_WRITE; 538 break; 539 540 case T_WIN_UNDERFLOW + T_USER: 541 case T_SYS_RTT_PAGE + T_USER: 542 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 543 fault_type = F_INVAL; 544 rw = S_READ; 545 break; 546 547 default: 548 cmn_err(CE_PANIC, "trap: unknown type %x", type); 549 break; 550 } 551 552 /* 553 * If we are single stepping do not call pagefault 554 */ 555 if (stepped) { 556 res = FC_NOMAP; 557 } else { 558 caddr_t vaddr = addr; 559 size_t sz; 560 int ta; 561 562 ASSERT(!(curthread->t_flag & T_WATCHPT)); 563 watchpage = (pr_watch_active(p) && 564 type != T_WIN_OVERFLOW + T_USER && 565 type != T_WIN_UNDERFLOW + T_USER && 566 type != T_SYS_RTT_PAGE + T_USER && 567 pr_is_watchpage(addr, rw)); 568 569 if (!watchpage || 570 (sz = instr_size(rp, &vaddr, rw)) <= 0) 571 /* EMPTY */; 572 else if ((watchcode = pr_is_watchpoint(&vaddr, &ta, 573 sz, NULL, rw)) != 0) { 574 if (ta) { 575 do_watch_step(vaddr, sz, rw, 576 watchcode, rp->r_pc); 577 fault_type = F_INVAL; 578 } else { 579 bzero(&siginfo, sizeof (siginfo)); 580 siginfo.si_signo = SIGTRAP; 581 siginfo.si_code = watchcode; 582 siginfo.si_addr = vaddr; 583 siginfo.si_trapafter = 0; 584 siginfo.si_pc = (caddr_t)rp->r_pc; 585 fault = FLTWATCH; 586 break; 587 } 588 } else { 589 if (rw != S_EXEC && 590 pr_watch_emul(rp, vaddr, rw)) 591 goto out; 592 do_watch_step(vaddr, sz, rw, 0, 0); 593 fault_type = F_INVAL; 594 } 595 596 if (pr_watch_active(p) && 597 (type == T_WIN_OVERFLOW + T_USER || 598 type == T_WIN_UNDERFLOW + T_USER || 599 type == T_SYS_RTT_PAGE + T_USER)) { 600 int dotwo = (type == T_WIN_UNDERFLOW + T_USER); 601 if (copy_return_window(dotwo)) 602 goto out; 603 fault_type = F_INVAL; 604 } 605 606 res = pagefault(addr, fault_type, rw, 0); 607 608 /* 609 * If pagefault succeed, ok. 610 * Otherwise grow the stack automatically. 611 */ 612 if (res == 0 || 613 (res == FC_NOMAP && 614 type != T_INSTR_MMU_MISS + T_USER && 615 addr < p->p_usrstack && 616 grow(addr))) { 617 int ismem = prismember(&p->p_fltmask, FLTPAGE); 618 619 /* 620 * instr_size() is used to get the exact 621 * address of the fault, instead of the 622 * page of the fault. Unfortunately it is 623 * very slow, and this is an important 624 * code path. Don't call it unless 625 * correctness is needed. ie. if FLTPAGE 626 * is set, or we're profiling. 627 */ 628 629 if (curthread->t_rprof != NULL || ismem) 630 (void) instr_size(rp, &addr, rw); 631 632 lwp->lwp_lastfault = FLTPAGE; 633 lwp->lwp_lastfaddr = addr; 634 635 if (ismem) { 636 bzero(&siginfo, sizeof (siginfo)); 637 siginfo.si_addr = addr; 638 (void) stop_on_fault(FLTPAGE, &siginfo); 639 } 640 goto out; 641 } 642 643 if (type != (T_INSTR_MMU_MISS + T_USER)) { 644 /* 645 * check for non-faulting loads, also 646 * fetch the instruction to check for 647 * flush 648 */ 649 if (nfload(rp, &instr)) 650 goto out; 651 652 /* skip userland prefetch instructions */ 653 if (IS_PREFETCH(instr)) { 654 rp->r_pc = rp->r_npc; 655 rp->r_npc += 4; 656 goto out; 657 /*NOTREACHED*/ 658 } 659 660 /* 661 * check if the instruction was a 662 * flush. ABI allows users to specify 663 * an illegal address on the flush 664 * instruction so we simply return in 665 * this case. 666 * 667 * NB: the hardware should set a bit 668 * indicating this trap was caused by 669 * a flush instruction. Instruction 670 * decoding is bugly! 671 */ 672 if (IS_FLUSH(instr)) { 673 /* skip the flush instruction */ 674 rp->r_pc = rp->r_npc; 675 rp->r_npc += 4; 676 goto out; 677 /*NOTREACHED*/ 678 } 679 } else if (res == FC_PROT) { 680 report_stack_exec(p, addr); 681 } 682 683 if (tudebug) 684 showregs(type, rp, addr, 0); 685 } 686 687 /* 688 * In the case where both pagefault and grow fail, 689 * set the code to the value provided by pagefault. 690 */ 691 (void) instr_size(rp, &addr, rw); 692 bzero(&siginfo, sizeof (siginfo)); 693 siginfo.si_addr = addr; 694 if (FC_CODE(res) == FC_OBJERR) { 695 siginfo.si_errno = FC_ERRNO(res); 696 if (siginfo.si_errno != EINTR) { 697 siginfo.si_signo = SIGBUS; 698 siginfo.si_code = BUS_OBJERR; 699 fault = FLTACCESS; 700 } 701 } else { /* FC_NOMAP || FC_PROT */ 702 siginfo.si_signo = SIGSEGV; 703 siginfo.si_code = (res == FC_NOMAP) ? 704 SEGV_MAPERR : SEGV_ACCERR; 705 fault = FLTBOUNDS; 706 } 707 /* 708 * If this is the culmination of a single-step, 709 * reset the addr, code, signal and fault to 710 * indicate a hardware trace trap. 711 */ 712 if (stepped) { 713 pcb_t *pcb = &lwp->lwp_pcb; 714 715 siginfo.si_signo = 0; 716 fault = 0; 717 if (pcb->pcb_step == STEP_WASACTIVE) { 718 pcb->pcb_step = STEP_NONE; 719 pcb->pcb_tracepc = NULL; 720 oldpc = rp->r_pc - 4; 721 } 722 /* 723 * If both NORMAL_STEP and WATCH_STEP are in 724 * effect, give precedence to NORMAL_STEP. 725 * One or the other must be set at this point. 726 */ 727 ASSERT(pcb->pcb_flags & (NORMAL_STEP|WATCH_STEP)); 728 if (pcb->pcb_flags & NORMAL_STEP) { 729 siginfo.si_signo = SIGTRAP; 730 siginfo.si_code = TRAP_TRACE; 731 siginfo.si_addr = (caddr_t)rp->r_pc; 732 fault = FLTTRACE; 733 if (pcb->pcb_flags & WATCH_STEP) 734 (void) undo_watch_step(NULL); 735 } else { 736 fault = undo_watch_step(&siginfo); 737 } 738 pcb->pcb_flags &= ~(NORMAL_STEP|WATCH_STEP); 739 } 740 break; 741 742 case T_DATA_EXCEPTION + T_USER: /* user data access exception */ 743 744 if (&vis1_partial_support != NULL) { 745 bzero(&siginfo, sizeof (siginfo)); 746 if (vis1_partial_support(rp, 747 &siginfo, &fault) == 0) 748 goto out; 749 } 750 751 if (nfload(rp, &instr)) 752 goto out; 753 if (IS_FLUSH(instr)) { 754 /* skip the flush instruction */ 755 rp->r_pc = rp->r_npc; 756 rp->r_npc += 4; 757 goto out; 758 /*NOTREACHED*/ 759 } 760 bzero(&siginfo, sizeof (siginfo)); 761 siginfo.si_addr = addr; 762 switch (X_FAULT_TYPE(mmu_fsr)) { 763 case FT_ATOMIC_NC: 764 if ((IS_SWAP(instr) && swap_nc(rp, instr)) || 765 (IS_LDSTUB(instr) && ldstub_nc(rp, instr))) { 766 /* skip the atomic */ 767 rp->r_pc = rp->r_npc; 768 rp->r_npc += 4; 769 goto out; 770 } 771 /* fall into ... */ 772 case FT_PRIV: 773 siginfo.si_signo = SIGSEGV; 774 siginfo.si_code = SEGV_ACCERR; 775 fault = FLTBOUNDS; 776 break; 777 case FT_SPEC_LD: 778 case FT_ILL_ALT: 779 siginfo.si_signo = SIGILL; 780 siginfo.si_code = ILL_ILLADR; 781 fault = FLTILL; 782 break; 783 default: 784 siginfo.si_signo = SIGSEGV; 785 siginfo.si_code = SEGV_MAPERR; 786 fault = FLTBOUNDS; 787 break; 788 } 789 break; 790 791 case T_SYS_RTT_ALIGN + T_USER: /* user alignment error */ 792 case T_ALIGNMENT + T_USER: /* user alignment error */ 793 if (tudebug) 794 showregs(type, rp, addr, 0); 795 /* 796 * If the user has to do unaligned references 797 * the ugly stuff gets done here. 798 */ 799 alignfaults++; 800 if (&vis1_partial_support != NULL) { 801 bzero(&siginfo, sizeof (siginfo)); 802 if (vis1_partial_support(rp, 803 &siginfo, &fault) == 0) 804 goto out; 805 } 806 807 bzero(&siginfo, sizeof (siginfo)); 808 if (type == T_SYS_RTT_ALIGN + T_USER) { 809 if (nfload(rp, NULL)) 810 goto out; 811 /* 812 * Can't do unaligned stack access 813 */ 814 siginfo.si_signo = SIGBUS; 815 siginfo.si_code = BUS_ADRALN; 816 siginfo.si_addr = addr; 817 fault = FLTACCESS; 818 break; 819 } 820 821 /* 822 * Try to fix alignment before non-faulting load test. 823 */ 824 if (p->p_fixalignment) { 825 if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) { 826 rp->r_pc = rp->r_npc; 827 rp->r_npc += 4; 828 goto out; 829 } 830 if (nfload(rp, NULL)) 831 goto out; 832 siginfo.si_signo = SIGSEGV; 833 siginfo.si_code = SEGV_MAPERR; 834 siginfo.si_addr = badaddr; 835 fault = FLTBOUNDS; 836 } else { 837 if (nfload(rp, NULL)) 838 goto out; 839 siginfo.si_signo = SIGBUS; 840 siginfo.si_code = BUS_ADRALN; 841 if (rp->r_pc & 3) { /* offending address, if pc */ 842 siginfo.si_addr = (caddr_t)rp->r_pc; 843 } else { 844 if (calc_memaddr(rp, &badaddr) == SIMU_UNALIGN) 845 siginfo.si_addr = badaddr; 846 else 847 siginfo.si_addr = (caddr_t)rp->r_pc; 848 } 849 fault = FLTACCESS; 850 } 851 break; 852 853 case T_PRIV_INSTR + T_USER: /* privileged instruction fault */ 854 if (tudebug) 855 showregs(type, rp, (caddr_t)0, 0); 856 bzero(&siginfo, sizeof (siginfo)); 857 siginfo.si_signo = SIGILL; 858 siginfo.si_code = ILL_PRVOPC; 859 siginfo.si_addr = (caddr_t)rp->r_pc; 860 fault = FLTILL; 861 break; 862 863 case T_UNIMP_INSTR: /* priv illegal instruction fault */ 864 if (fpras_implemented) { 865 /* 866 * Call fpras_chktrap indicating that 867 * we've come from a trap handler and pass 868 * the regs. That function may choose to panic 869 * (in which case it won't return) or it may 870 * determine that a reboot is desired. In the 871 * latter case it must alter pc/npc to skip 872 * the illegal instruction and continue at 873 * a controlled address. 874 */ 875 if (&fpras_chktrap) { 876 if (fpras_chktrap(rp)) 877 goto cleanup; 878 } 879 } 880 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */ 881 instr = *(int *)rp->r_pc; 882 if ((instr & 0xc0000000) == 0x40000000) { 883 long pc; 884 885 rp->r_o7 = (long long)rp->r_pc; 886 pc = rp->r_pc + ((instr & 0x3fffffff) << 2); 887 rp->r_pc = rp->r_npc; 888 rp->r_npc = pc; 889 ill_calls++; 890 goto cleanup; 891 } 892 #endif /* SF_ERRATA_23 || SF_ERRATA_30 */ 893 /* 894 * It's not an fpras failure and it's not SF_ERRATA_23 - die 895 */ 896 addr = (caddr_t)rp->r_pc; 897 (void) die(type, rp, addr, 0); 898 /*NOTREACHED*/ 899 900 case T_UNIMP_INSTR + T_USER: /* illegal instruction fault */ 901 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */ 902 instr = fetch_user_instr((caddr_t)rp->r_pc); 903 if ((instr & 0xc0000000) == 0x40000000) { 904 long pc; 905 906 rp->r_o7 = (long long)rp->r_pc; 907 pc = rp->r_pc + ((instr & 0x3fffffff) << 2); 908 rp->r_pc = rp->r_npc; 909 rp->r_npc = pc; 910 ill_calls++; 911 goto out; 912 } 913 #endif /* SF_ERRATA_23 || SF_ERRATA_30 */ 914 if (tudebug) 915 showregs(type, rp, (caddr_t)0, 0); 916 bzero(&siginfo, sizeof (siginfo)); 917 /* 918 * Try to simulate the instruction. 919 */ 920 switch (simulate_unimp(rp, &badaddr)) { 921 case SIMU_RETRY: 922 goto out; /* regs are already set up */ 923 /*NOTREACHED*/ 924 925 case SIMU_SUCCESS: 926 /* skip the successfully simulated instruction */ 927 rp->r_pc = rp->r_npc; 928 rp->r_npc += 4; 929 goto out; 930 /*NOTREACHED*/ 931 932 case SIMU_FAULT: 933 siginfo.si_signo = SIGSEGV; 934 siginfo.si_code = SEGV_MAPERR; 935 siginfo.si_addr = badaddr; 936 fault = FLTBOUNDS; 937 break; 938 939 case SIMU_DZERO: 940 siginfo.si_signo = SIGFPE; 941 siginfo.si_code = FPE_INTDIV; 942 siginfo.si_addr = (caddr_t)rp->r_pc; 943 fault = FLTIZDIV; 944 break; 945 946 case SIMU_UNALIGN: 947 siginfo.si_signo = SIGBUS; 948 siginfo.si_code = BUS_ADRALN; 949 siginfo.si_addr = badaddr; 950 fault = FLTACCESS; 951 break; 952 953 case SIMU_ILLEGAL: 954 default: 955 siginfo.si_signo = SIGILL; 956 op3 = (instr >> 19) & 0x3F; 957 if ((IS_FLOAT(instr) && (op3 == IOP_V8_STQFA) || 958 (op3 == IOP_V8_STDFA))) 959 siginfo.si_code = ILL_ILLADR; 960 else 961 siginfo.si_code = ILL_ILLOPC; 962 siginfo.si_addr = (caddr_t)rp->r_pc; 963 fault = FLTILL; 964 break; 965 } 966 break; 967 968 case T_UNIMP_LDD + T_USER: 969 case T_UNIMP_STD + T_USER: 970 if (tudebug) 971 showregs(type, rp, (caddr_t)0, 0); 972 switch (simulate_lddstd(rp, &badaddr)) { 973 case SIMU_SUCCESS: 974 /* skip the successfully simulated instruction */ 975 rp->r_pc = rp->r_npc; 976 rp->r_npc += 4; 977 goto out; 978 /*NOTREACHED*/ 979 980 case SIMU_FAULT: 981 if (nfload(rp, NULL)) 982 goto out; 983 siginfo.si_signo = SIGSEGV; 984 siginfo.si_code = SEGV_MAPERR; 985 siginfo.si_addr = badaddr; 986 fault = FLTBOUNDS; 987 break; 988 989 case SIMU_UNALIGN: 990 if (nfload(rp, NULL)) 991 goto out; 992 siginfo.si_signo = SIGBUS; 993 siginfo.si_code = BUS_ADRALN; 994 siginfo.si_addr = badaddr; 995 fault = FLTACCESS; 996 break; 997 998 case SIMU_ILLEGAL: 999 default: 1000 siginfo.si_signo = SIGILL; 1001 siginfo.si_code = ILL_ILLOPC; 1002 siginfo.si_addr = (caddr_t)rp->r_pc; 1003 fault = FLTILL; 1004 break; 1005 } 1006 break; 1007 1008 case T_UNIMP_LDD: 1009 case T_UNIMP_STD: 1010 if (simulate_lddstd(rp, &badaddr) == SIMU_SUCCESS) { 1011 /* skip the successfully simulated instruction */ 1012 rp->r_pc = rp->r_npc; 1013 rp->r_npc += 4; 1014 goto cleanup; 1015 /*NOTREACHED*/ 1016 } 1017 /* 1018 * A third party driver executed an {LDD,STD,LDDA,STDA} 1019 * that we couldn't simulate. 1020 */ 1021 if (nfload(rp, NULL)) 1022 goto cleanup; 1023 1024 if (curthread->t_lofault) { 1025 if (lodebug) { 1026 showregs(type, rp, addr, 0); 1027 traceback((caddr_t)rp->r_sp); 1028 } 1029 rp->r_g1 = EFAULT; 1030 rp->r_pc = curthread->t_lofault; 1031 rp->r_npc = rp->r_pc + 4; 1032 goto cleanup; 1033 } 1034 (void) die(type, rp, addr, 0); 1035 /*NOTREACHED*/ 1036 1037 case T_IDIV0 + T_USER: /* integer divide by zero */ 1038 case T_DIV0 + T_USER: /* integer divide by zero */ 1039 if (tudebug && tudebugfpe) 1040 showregs(type, rp, (caddr_t)0, 0); 1041 bzero(&siginfo, sizeof (siginfo)); 1042 siginfo.si_signo = SIGFPE; 1043 siginfo.si_code = FPE_INTDIV; 1044 siginfo.si_addr = (caddr_t)rp->r_pc; 1045 fault = FLTIZDIV; 1046 break; 1047 1048 case T_INT_OVERFLOW + T_USER: /* integer overflow */ 1049 if (tudebug && tudebugfpe) 1050 showregs(type, rp, (caddr_t)0, 0); 1051 bzero(&siginfo, sizeof (siginfo)); 1052 siginfo.si_signo = SIGFPE; 1053 siginfo.si_code = FPE_INTOVF; 1054 siginfo.si_addr = (caddr_t)rp->r_pc; 1055 fault = FLTIOVF; 1056 break; 1057 1058 case T_BREAKPOINT + T_USER: /* breakpoint trap (t 1) */ 1059 if (tudebug && tudebugbpt) 1060 showregs(type, rp, (caddr_t)0, 0); 1061 bzero(&siginfo, sizeof (siginfo)); 1062 siginfo.si_signo = SIGTRAP; 1063 siginfo.si_code = TRAP_BRKPT; 1064 siginfo.si_addr = (caddr_t)rp->r_pc; 1065 fault = FLTBPT; 1066 break; 1067 1068 case T_TAG_OVERFLOW + T_USER: /* tag overflow (taddcctv, tsubcctv) */ 1069 if (tudebug) 1070 showregs(type, rp, (caddr_t)0, 0); 1071 bzero(&siginfo, sizeof (siginfo)); 1072 siginfo.si_signo = SIGEMT; 1073 siginfo.si_code = EMT_TAGOVF; 1074 siginfo.si_addr = (caddr_t)rp->r_pc; 1075 fault = FLTACCESS; 1076 break; 1077 1078 case T_FLUSH_PCB + T_USER: /* finish user window overflow */ 1079 case T_FLUSHW + T_USER: /* finish user window flush */ 1080 /* 1081 * This trap is entered from sys_rtt in locore.s when, 1082 * upon return to user is is found that there are user 1083 * windows in pcb_wbuf. This happens because they could 1084 * not be saved on the user stack, either because it 1085 * wasn't resident or because it was misaligned. 1086 */ 1087 { 1088 int error; 1089 caddr_t sp; 1090 1091 error = flush_user_windows_to_stack(&sp); 1092 /* 1093 * Possible errors: 1094 * error copying out 1095 * unaligned stack pointer 1096 * The first is given to us as the return value 1097 * from flush_user_windows_to_stack(). The second 1098 * results in residual windows in the pcb. 1099 */ 1100 if (error != 0) { 1101 /* 1102 * EINTR comes from a signal during copyout; 1103 * we should not post another signal. 1104 */ 1105 if (error != EINTR) { 1106 /* 1107 * Zap the process with a SIGSEGV - process 1108 * may be managing its own stack growth by 1109 * taking SIGSEGVs on a different signal stack. 1110 */ 1111 bzero(&siginfo, sizeof (siginfo)); 1112 siginfo.si_signo = SIGSEGV; 1113 siginfo.si_code = SEGV_MAPERR; 1114 siginfo.si_addr = sp; 1115 fault = FLTBOUNDS; 1116 } 1117 break; 1118 } else if (mpcb->mpcb_wbcnt) { 1119 bzero(&siginfo, sizeof (siginfo)); 1120 siginfo.si_signo = SIGILL; 1121 siginfo.si_code = ILL_BADSTK; 1122 siginfo.si_addr = (caddr_t)rp->r_pc; 1123 fault = FLTILL; 1124 break; 1125 } 1126 } 1127 1128 /* 1129 * T_FLUSHW is used when handling a ta 0x3 -- the old flush 1130 * window trap -- which is implemented by executing the 1131 * flushw instruction. The flushw can trap if any of the 1132 * stack pages are not writable for whatever reason. In this 1133 * case only, we advance the pc to the next instruction so 1134 * that the user thread doesn't needlessly execute the trap 1135 * again. Normally this wouldn't be a problem -- we'll 1136 * usually only end up here if this is the first touch to a 1137 * stack page -- since the second execution won't trap, but 1138 * if there's a watchpoint on the stack page the user thread 1139 * would spin, continuously executing the trap instruction. 1140 */ 1141 if (type == T_FLUSHW + T_USER) { 1142 rp->r_pc = rp->r_npc; 1143 rp->r_npc += 4; 1144 } 1145 goto out; 1146 1147 case T_AST + T_USER: /* profiling or resched pseudo trap */ 1148 if (lwp->lwp_pcb.pcb_flags & CPC_OVERFLOW) { 1149 lwp->lwp_pcb.pcb_flags &= ~CPC_OVERFLOW; 1150 if (kcpc_overflow_ast()) { 1151 /* 1152 * Signal performance counter overflow 1153 */ 1154 if (tudebug) 1155 showregs(type, rp, (caddr_t)0, 0); 1156 bzero(&siginfo, sizeof (siginfo)); 1157 siginfo.si_signo = SIGEMT; 1158 siginfo.si_code = EMT_CPCOVF; 1159 siginfo.si_addr = (caddr_t)rp->r_pc; 1160 /* for trap_cleanup(), below */ 1161 oldpc = rp->r_pc - 4; 1162 fault = FLTCPCOVF; 1163 } 1164 } 1165 1166 /* 1167 * The CPC_OVERFLOW check above may already have populated 1168 * siginfo and set fault, so the checks below must not 1169 * touch these and the functions they call must use 1170 * trapsig() directly. 1171 */ 1172 1173 if (lwp->lwp_pcb.pcb_flags & ASYNC_HWERR) { 1174 lwp->lwp_pcb.pcb_flags &= ~ASYNC_HWERR; 1175 trap_async_hwerr(); 1176 } 1177 1178 if (lwp->lwp_pcb.pcb_flags & ASYNC_BERR) { 1179 lwp->lwp_pcb.pcb_flags &= ~ASYNC_BERR; 1180 trap_async_berr_bto(ASYNC_BERR, rp); 1181 } 1182 1183 if (lwp->lwp_pcb.pcb_flags & ASYNC_BTO) { 1184 lwp->lwp_pcb.pcb_flags &= ~ASYNC_BTO; 1185 trap_async_berr_bto(ASYNC_BTO, rp); 1186 } 1187 1188 break; 1189 } 1190 1191 trap_cleanup(rp, fault, &siginfo, oldpc == rp->r_pc); 1192 1193 out: /* We can't get here from a system trap */ 1194 ASSERT(type & T_USER); 1195 trap_rtt(); 1196 (void) new_mstate(curthread, mstate); 1197 /* Kernel probe */ 1198 TNF_PROBE_1(thread_state, "thread", /* CSTYLED */, 1199 tnf_microstate, state, LMS_USER); 1200 1201 TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit"); 1202 return; 1203 1204 cleanup: /* system traps end up here */ 1205 ASSERT(!(type & T_USER)); 1206 1207 TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit"); 1208 } 1209 1210 void 1211 trap_cleanup( 1212 struct regs *rp, 1213 uint_t fault, 1214 k_siginfo_t *sip, 1215 int restartable) 1216 { 1217 extern void aio_cleanup(); 1218 proc_t *p = ttoproc(curthread); 1219 klwp_id_t lwp = ttolwp(curthread); 1220 1221 if (fault) { 1222 /* 1223 * Remember the fault and fault address 1224 * for real-time (SIGPROF) profiling. 1225 */ 1226 lwp->lwp_lastfault = fault; 1227 lwp->lwp_lastfaddr = sip->si_addr; 1228 1229 DTRACE_PROC2(fault, int, fault, ksiginfo_t *, sip); 1230 1231 /* 1232 * If a debugger has declared this fault to be an 1233 * event of interest, stop the lwp. Otherwise just 1234 * deliver the associated signal. 1235 */ 1236 if (sip->si_signo != SIGKILL && 1237 prismember(&p->p_fltmask, fault) && 1238 stop_on_fault(fault, sip) == 0) 1239 sip->si_signo = 0; 1240 } 1241 1242 if (sip->si_signo) 1243 trapsig(sip, restartable); 1244 1245 if (lwp->lwp_oweupc) 1246 profil_tick(rp->r_pc); 1247 1248 if (curthread->t_astflag | curthread->t_sig_check) { 1249 /* 1250 * Turn off the AST flag before checking all the conditions that 1251 * may have caused an AST. This flag is on whenever a signal or 1252 * unusual condition should be handled after the next trap or 1253 * syscall. 1254 */ 1255 astoff(curthread); 1256 curthread->t_sig_check = 0; 1257 1258 /* 1259 * The following check is legal for the following reasons: 1260 * 1) The thread we are checking, is ourselves, so there is 1261 * no way the proc can go away. 1262 * 2) The only time we need to be protected by the 1263 * lock is if the binding is changed. 1264 * 1265 * Note we will still take the lock and check the binding 1266 * if the condition was true without the lock held. This 1267 * prevents lock contention among threads owned by the 1268 * same proc. 1269 */ 1270 1271 if (curthread->t_proc_flag & TP_CHANGEBIND) { 1272 mutex_enter(&p->p_lock); 1273 if (curthread->t_proc_flag & TP_CHANGEBIND) { 1274 timer_lwpbind(); 1275 curthread->t_proc_flag &= ~TP_CHANGEBIND; 1276 } 1277 mutex_exit(&p->p_lock); 1278 } 1279 1280 /* 1281 * for kaio requests that are on the per-process poll queue, 1282 * aiop->aio_pollq, they're AIO_POLL bit is set, the kernel 1283 * should copyout their result_t to user memory. by copying 1284 * out the result_t, the user can poll on memory waiting 1285 * for the kaio request to complete. 1286 */ 1287 if (p->p_aio) 1288 aio_cleanup(0); 1289 1290 /* 1291 * If this LWP was asked to hold, call holdlwp(), which will 1292 * stop. holdlwps() sets this up and calls pokelwps() which 1293 * sets the AST flag. 1294 * 1295 * Also check TP_EXITLWP, since this is used by fresh new LWPs 1296 * through lwp_rtt(). That flag is set if the lwp_create(2) 1297 * syscall failed after creating the LWP. 1298 */ 1299 if (ISHOLD(p)) 1300 holdlwp(); 1301 1302 /* 1303 * All code that sets signals and makes ISSIG evaluate true must 1304 * set t_astflag afterwards. 1305 */ 1306 if (ISSIG_PENDING(curthread, lwp, p)) { 1307 if (issig(FORREAL)) 1308 psig(); 1309 curthread->t_sig_check = 1; 1310 } 1311 1312 if (curthread->t_rprof != NULL) { 1313 realsigprof(0, 0); 1314 curthread->t_sig_check = 1; 1315 } 1316 } 1317 } 1318 1319 /* 1320 * Called from fp_traps when a floating point trap occurs. 1321 * Note that the T_DATA_EXCEPTION case does not use X_FAULT_TYPE(mmu_fsr), 1322 * because mmu_fsr (now changed to code) is always 0. 1323 * Note that the T_UNIMP_INSTR case does not call simulate_unimp(), 1324 * because the simulator only simulates multiply and divide instructions, 1325 * which would not cause floating point traps in the first place. 1326 * XXX - Supervisor mode floating point traps? 1327 */ 1328 void 1329 fpu_trap(struct regs *rp, caddr_t addr, uint32_t type, uint32_t code) 1330 { 1331 proc_t *p = ttoproc(curthread); 1332 klwp_id_t lwp = ttolwp(curthread); 1333 k_siginfo_t siginfo; 1334 uint_t op3, fault = 0; 1335 int mstate; 1336 char *badaddr; 1337 kfpu_t *fp; 1338 struct fpq *pfpq; 1339 uint32_t inst; 1340 utrap_handler_t *utrapp; 1341 1342 CPU_STATS_ADDQ(CPU, sys, trap, 1); 1343 1344 ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 1345 1346 if (USERMODE(rp->r_tstate)) { 1347 /* 1348 * Set lwp_state before trying to acquire any 1349 * adaptive lock 1350 */ 1351 ASSERT(lwp != NULL); 1352 lwp->lwp_state = LWP_SYS; 1353 /* 1354 * Set up the current cred to use during this trap. u_cred 1355 * no longer exists. t_cred is used instead. 1356 * The current process credential applies to the thread for 1357 * the entire trap. If trapping from the kernel, this 1358 * should already be set up. 1359 */ 1360 if (curthread->t_cred != p->p_cred) { 1361 cred_t *oldcred = curthread->t_cred; 1362 /* 1363 * DTrace accesses t_cred in probe context. t_cred 1364 * must always be either NULL, or point to a valid, 1365 * allocated cred structure. 1366 */ 1367 curthread->t_cred = crgetcred(); 1368 crfree(oldcred); 1369 } 1370 ASSERT(lwp->lwp_regs == rp); 1371 mstate = new_mstate(curthread, LMS_TRAP); 1372 siginfo.si_signo = 0; 1373 type |= T_USER; 1374 } 1375 1376 TRACE_1(TR_FAC_TRAP, TR_C_TRAP_HANDLER_ENTER, 1377 "C_fpu_trap_handler_enter:type %x", type); 1378 1379 if (tudebug && tudebugfpe) 1380 showregs(type, rp, addr, 0); 1381 1382 bzero(&siginfo, sizeof (siginfo)); 1383 siginfo.si_code = code; 1384 siginfo.si_addr = addr; 1385 1386 switch (type) { 1387 1388 case T_FP_EXCEPTION_IEEE + T_USER: /* FPU arithmetic exception */ 1389 /* 1390 * FPU arithmetic exception - fake up a fpq if we 1391 * came here directly from _fp_ieee_exception, 1392 * which is indicated by a zero fpu_qcnt. 1393 */ 1394 fp = lwptofpu(curthread->t_lwp); 1395 utrapp = curthread->t_procp->p_utraps; 1396 if (fp->fpu_qcnt == 0) { 1397 inst = fetch_user_instr((caddr_t)rp->r_pc); 1398 lwp->lwp_state = LWP_SYS; 1399 pfpq = &fp->fpu_q->FQu.fpq; 1400 pfpq->fpq_addr = (uint32_t *)rp->r_pc; 1401 pfpq->fpq_instr = inst; 1402 fp->fpu_qcnt = 1; 1403 fp->fpu_q_entrysize = sizeof (struct fpq); 1404 #ifdef SF_V9_TABLE_28 1405 /* 1406 * Spitfire and blackbird followed the SPARC V9 manual 1407 * paragraph 3 of section 5.1.7.9 FSR_current_exception 1408 * (cexc) for setting fsr.cexc bits on underflow and 1409 * overflow traps when the fsr.tem.inexact bit is set, 1410 * instead of following Table 28. Bugid 1263234. 1411 */ 1412 { 1413 extern int spitfire_bb_fsr_bug; 1414 1415 if (spitfire_bb_fsr_bug && 1416 (fp->fpu_fsr & FSR_TEM_NX)) { 1417 if (((fp->fpu_fsr & FSR_TEM_OF) == 0) && 1418 (fp->fpu_fsr & FSR_CEXC_OF)) { 1419 fp->fpu_fsr &= ~FSR_CEXC_OF; 1420 fp->fpu_fsr |= FSR_CEXC_NX; 1421 _fp_write_pfsr(&fp->fpu_fsr); 1422 siginfo.si_code = FPE_FLTRES; 1423 } 1424 if (((fp->fpu_fsr & FSR_TEM_UF) == 0) && 1425 (fp->fpu_fsr & FSR_CEXC_UF)) { 1426 fp->fpu_fsr &= ~FSR_CEXC_UF; 1427 fp->fpu_fsr |= FSR_CEXC_NX; 1428 _fp_write_pfsr(&fp->fpu_fsr); 1429 siginfo.si_code = FPE_FLTRES; 1430 } 1431 } 1432 } 1433 #endif /* SF_V9_TABLE_28 */ 1434 rp->r_pc = rp->r_npc; 1435 rp->r_npc += 4; 1436 } else if (utrapp && utrapp[UT_FP_EXCEPTION_IEEE_754]) { 1437 /* 1438 * The user had a trap handler installed. Jump to 1439 * the trap handler instead of signalling the process. 1440 */ 1441 rp->r_pc = (long)utrapp[UT_FP_EXCEPTION_IEEE_754]; 1442 rp->r_npc = rp->r_pc + 4; 1443 break; 1444 } 1445 siginfo.si_signo = SIGFPE; 1446 fault = FLTFPE; 1447 break; 1448 1449 case T_DATA_EXCEPTION + T_USER: /* user data access exception */ 1450 siginfo.si_signo = SIGSEGV; 1451 fault = FLTBOUNDS; 1452 break; 1453 1454 case T_LDDF_ALIGN + T_USER: /* 64 bit user lddfa alignment error */ 1455 case T_STDF_ALIGN + T_USER: /* 64 bit user stdfa alignment error */ 1456 alignfaults++; 1457 lwp->lwp_state = LWP_SYS; 1458 if (&vis1_partial_support != NULL) { 1459 bzero(&siginfo, sizeof (siginfo)); 1460 if (vis1_partial_support(rp, 1461 &siginfo, &fault) == 0) 1462 goto out; 1463 } 1464 if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) { 1465 rp->r_pc = rp->r_npc; 1466 rp->r_npc += 4; 1467 goto out; 1468 } 1469 fp = lwptofpu(curthread->t_lwp); 1470 fp->fpu_qcnt = 0; 1471 siginfo.si_signo = SIGSEGV; 1472 siginfo.si_code = SEGV_MAPERR; 1473 siginfo.si_addr = badaddr; 1474 fault = FLTBOUNDS; 1475 break; 1476 1477 case T_ALIGNMENT + T_USER: /* user alignment error */ 1478 /* 1479 * If the user has to do unaligned references 1480 * the ugly stuff gets done here. 1481 * Only handles vanilla loads and stores. 1482 */ 1483 alignfaults++; 1484 if (p->p_fixalignment) { 1485 if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) { 1486 rp->r_pc = rp->r_npc; 1487 rp->r_npc += 4; 1488 goto out; 1489 } 1490 siginfo.si_signo = SIGSEGV; 1491 siginfo.si_code = SEGV_MAPERR; 1492 siginfo.si_addr = badaddr; 1493 fault = FLTBOUNDS; 1494 } else { 1495 siginfo.si_signo = SIGBUS; 1496 siginfo.si_code = BUS_ADRALN; 1497 if (rp->r_pc & 3) { /* offending address, if pc */ 1498 siginfo.si_addr = (caddr_t)rp->r_pc; 1499 } else { 1500 if (calc_memaddr(rp, &badaddr) == SIMU_UNALIGN) 1501 siginfo.si_addr = badaddr; 1502 else 1503 siginfo.si_addr = (caddr_t)rp->r_pc; 1504 } 1505 fault = FLTACCESS; 1506 } 1507 break; 1508 1509 case T_UNIMP_INSTR + T_USER: /* illegal instruction fault */ 1510 siginfo.si_signo = SIGILL; 1511 inst = fetch_user_instr((caddr_t)rp->r_pc); 1512 op3 = (inst >> 19) & 0x3F; 1513 if ((op3 == IOP_V8_STQFA) || (op3 == IOP_V8_STDFA)) 1514 siginfo.si_code = ILL_ILLADR; 1515 else 1516 siginfo.si_code = ILL_ILLTRP; 1517 fault = FLTILL; 1518 break; 1519 1520 default: 1521 (void) die(type, rp, addr, 0); 1522 /*NOTREACHED*/ 1523 } 1524 1525 /* 1526 * We can't get here from a system trap 1527 * Never restart any instruction which got here from an fp trap. 1528 */ 1529 ASSERT(type & T_USER); 1530 1531 trap_cleanup(rp, fault, &siginfo, 0); 1532 out: 1533 trap_rtt(); 1534 (void) new_mstate(curthread, mstate); 1535 } 1536 1537 void 1538 trap_rtt(void) 1539 { 1540 klwp_id_t lwp = ttolwp(curthread); 1541 1542 /* 1543 * Restore register window if a debugger modified it. 1544 * Set up to perform a single-step if a debugger requested it. 1545 */ 1546 if (lwp->lwp_pcb.pcb_xregstat != XREGNONE) 1547 xregrestore(lwp, 0); 1548 1549 /* 1550 * Set state to LWP_USER here so preempt won't give us a kernel 1551 * priority if it occurs after this point. Call CL_TRAPRET() to 1552 * restore the user-level priority. 1553 * 1554 * It is important that no locks (other than spinlocks) be entered 1555 * after this point before returning to user mode (unless lwp_state 1556 * is set back to LWP_SYS). 1557 */ 1558 lwp->lwp_state = LWP_USER; 1559 if (curthread->t_trapret) { 1560 curthread->t_trapret = 0; 1561 thread_lock(curthread); 1562 CL_TRAPRET(curthread); 1563 thread_unlock(curthread); 1564 } 1565 if (CPU->cpu_runrun) 1566 preempt(); 1567 if (lwp->lwp_pcb.pcb_step != STEP_NONE) 1568 prdostep(); 1569 1570 TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit"); 1571 } 1572 1573 #define IS_LDASI(o) \ 1574 ((o) == (uint32_t)0xC0C00000 || (o) == (uint32_t)0xC0800000 || \ 1575 (o) == (uint32_t)0xC1800000) 1576 #define IS_IMM_ASI(i) (((i) & 0x2000) == 0) 1577 #define IS_ASINF(a) (((a) & 0xF6) == 0x82) 1578 #define IS_LDDA(i) (((i) & 0xC1F80000) == 0xC0980000) 1579 1580 static int 1581 nfload(struct regs *rp, int *instrp) 1582 { 1583 uint_t instr, asi, op3, rd; 1584 size_t len; 1585 struct as *as; 1586 caddr_t addr; 1587 FPU_DREGS_TYPE zero; 1588 extern int segnf_create(); 1589 1590 if (USERMODE(rp->r_tstate)) 1591 instr = fetch_user_instr((caddr_t)rp->r_pc); 1592 else 1593 instr = *(int *)rp->r_pc; 1594 1595 if (instrp) 1596 *instrp = instr; 1597 1598 op3 = (uint_t)(instr & 0xC1E00000); 1599 if (!IS_LDASI(op3)) 1600 return (0); 1601 if (IS_IMM_ASI(instr)) 1602 asi = (instr & 0x1FE0) >> 5; 1603 else 1604 asi = (uint_t)((rp->r_tstate >> TSTATE_ASI_SHIFT) & 1605 TSTATE_ASI_MASK); 1606 if (!IS_ASINF(asi)) 1607 return (0); 1608 if (calc_memaddr(rp, &addr) == SIMU_SUCCESS) { 1609 len = 1; 1610 as = USERMODE(rp->r_tstate) ? ttoproc(curthread)->p_as : &kas; 1611 as_rangelock(as); 1612 if (as_gap(as, len, &addr, &len, 0, addr) == 0) 1613 (void) as_map(as, addr, len, segnf_create, NULL); 1614 as_rangeunlock(as); 1615 } 1616 zero = 0; 1617 rd = (instr >> 25) & 0x1f; 1618 if (IS_FLOAT(instr)) { 1619 uint_t dbflg = ((instr >> 19) & 3) == 3; 1620 1621 if (dbflg) { /* clever v9 reg encoding */ 1622 if (rd & 1) 1623 rd = (rd & 0x1e) | 0x20; 1624 rd >>= 1; 1625 } 1626 if (fpu_exists) { 1627 if (!(_fp_read_fprs() & FPRS_FEF)) 1628 fp_enable(); 1629 1630 if (dbflg) 1631 _fp_write_pdreg(&zero, rd); 1632 else 1633 _fp_write_pfreg((uint_t *)&zero, rd); 1634 } else { 1635 kfpu_t *fp = lwptofpu(curthread->t_lwp); 1636 1637 if (!fp->fpu_en) 1638 fp_enable(); 1639 1640 if (dbflg) 1641 fp->fpu_fr.fpu_dregs[rd] = zero; 1642 else 1643 fp->fpu_fr.fpu_regs[rd] = 0; 1644 } 1645 } else { 1646 (void) putreg(&zero, rp, rd, &addr); 1647 if (IS_LDDA(instr)) 1648 (void) putreg(&zero, rp, rd + 1, &addr); 1649 } 1650 rp->r_pc = rp->r_npc; 1651 rp->r_npc += 4; 1652 return (1); 1653 } 1654 1655 kmutex_t atomic_nc_mutex; 1656 1657 /* 1658 * The following couple of routines are for userland drivers which 1659 * do atomics to noncached addresses. This sort of worked on previous 1660 * platforms -- the operation really wasn't atomic, but it didn't generate 1661 * a trap as sun4u systems do. 1662 */ 1663 static int 1664 swap_nc(struct regs *rp, int instr) 1665 { 1666 uint64_t rdata, mdata; 1667 caddr_t addr, badaddr; 1668 uint_t tmp, rd; 1669 1670 (void) flush_user_windows_to_stack(NULL); 1671 rd = (instr >> 25) & 0x1f; 1672 if (calc_memaddr(rp, &addr) != SIMU_SUCCESS) 1673 return (0); 1674 if (getreg(rp, rd, &rdata, &badaddr)) 1675 return (0); 1676 mutex_enter(&atomic_nc_mutex); 1677 if (fuword32(addr, &tmp) == -1) { 1678 mutex_exit(&atomic_nc_mutex); 1679 return (0); 1680 } 1681 mdata = (u_longlong_t)tmp; 1682 if (suword32(addr, (uint32_t)rdata) == -1) { 1683 mutex_exit(&atomic_nc_mutex); 1684 return (0); 1685 } 1686 (void) putreg(&mdata, rp, rd, &badaddr); 1687 mutex_exit(&atomic_nc_mutex); 1688 return (1); 1689 } 1690 1691 static int 1692 ldstub_nc(struct regs *rp, int instr) 1693 { 1694 uint64_t mdata; 1695 caddr_t addr, badaddr; 1696 uint_t rd; 1697 uint8_t tmp; 1698 1699 (void) flush_user_windows_to_stack(NULL); 1700 rd = (instr >> 25) & 0x1f; 1701 if (calc_memaddr(rp, &addr) != SIMU_SUCCESS) 1702 return (0); 1703 mutex_enter(&atomic_nc_mutex); 1704 if (fuword8(addr, &tmp) == -1) { 1705 mutex_exit(&atomic_nc_mutex); 1706 return (0); 1707 } 1708 mdata = (u_longlong_t)tmp; 1709 if (suword8(addr, (uint8_t)0xff) == -1) { 1710 mutex_exit(&atomic_nc_mutex); 1711 return (0); 1712 } 1713 (void) putreg(&mdata, rp, rd, &badaddr); 1714 mutex_exit(&atomic_nc_mutex); 1715 return (1); 1716 } 1717 1718 /* 1719 * This function helps instr_size() determine the operand size. 1720 * It is called for the extended ldda/stda asi's. 1721 */ 1722 int 1723 extended_asi_size(int asi) 1724 { 1725 switch (asi) { 1726 case ASI_PST8_P: 1727 case ASI_PST8_S: 1728 case ASI_PST16_P: 1729 case ASI_PST16_S: 1730 case ASI_PST32_P: 1731 case ASI_PST32_S: 1732 case ASI_PST8_PL: 1733 case ASI_PST8_SL: 1734 case ASI_PST16_PL: 1735 case ASI_PST16_SL: 1736 case ASI_PST32_PL: 1737 case ASI_PST32_SL: 1738 return (8); 1739 case ASI_FL8_P: 1740 case ASI_FL8_S: 1741 case ASI_FL8_PL: 1742 case ASI_FL8_SL: 1743 return (1); 1744 case ASI_FL16_P: 1745 case ASI_FL16_S: 1746 case ASI_FL16_PL: 1747 case ASI_FL16_SL: 1748 return (2); 1749 case ASI_BLK_P: 1750 case ASI_BLK_S: 1751 case ASI_BLK_PL: 1752 case ASI_BLK_SL: 1753 case ASI_BLK_COMMIT_P: 1754 case ASI_BLK_COMMIT_S: 1755 return (64); 1756 } 1757 1758 return (0); 1759 } 1760 1761 /* 1762 * Patch non-zero to disable preemption of threads in the kernel. 1763 */ 1764 int IGNORE_KERNEL_PREEMPTION = 0; /* XXX - delete this someday */ 1765 1766 struct kpreempt_cnts { /* kernel preemption statistics */ 1767 int kpc_idle; /* executing idle thread */ 1768 int kpc_intr; /* executing interrupt thread */ 1769 int kpc_clock; /* executing clock thread */ 1770 int kpc_blocked; /* thread has blocked preemption (t_preempt) */ 1771 int kpc_notonproc; /* thread is surrendering processor */ 1772 int kpc_inswtch; /* thread has ratified scheduling decision */ 1773 int kpc_prilevel; /* processor interrupt level is too high */ 1774 int kpc_apreempt; /* asynchronous preemption */ 1775 int kpc_spreempt; /* synchronous preemption */ 1776 } kpreempt_cnts; 1777 1778 /* 1779 * kernel preemption: forced rescheduling 1780 * preempt the running kernel thread. 1781 */ 1782 void 1783 kpreempt(int asyncspl) 1784 { 1785 if (IGNORE_KERNEL_PREEMPTION) { 1786 aston(CPU->cpu_dispthread); 1787 return; 1788 } 1789 /* 1790 * Check that conditions are right for kernel preemption 1791 */ 1792 do { 1793 if (curthread->t_preempt) { 1794 /* 1795 * either a privileged thread (idle, panic, interrupt) 1796 * or will check when t_preempt is lowered 1797 */ 1798 if (curthread->t_pri < 0) 1799 kpreempt_cnts.kpc_idle++; 1800 else if (curthread->t_flag & T_INTR_THREAD) { 1801 kpreempt_cnts.kpc_intr++; 1802 if (curthread->t_pil == CLOCK_LEVEL) 1803 kpreempt_cnts.kpc_clock++; 1804 } else 1805 kpreempt_cnts.kpc_blocked++; 1806 aston(CPU->cpu_dispthread); 1807 return; 1808 } 1809 if (curthread->t_state != TS_ONPROC || 1810 curthread->t_disp_queue != CPU->cpu_disp) { 1811 /* this thread will be calling swtch() shortly */ 1812 kpreempt_cnts.kpc_notonproc++; 1813 if (CPU->cpu_thread != CPU->cpu_dispthread) { 1814 /* already in swtch(), force another */ 1815 kpreempt_cnts.kpc_inswtch++; 1816 siron(); 1817 } 1818 return; 1819 } 1820 1821 if (((asyncspl != KPREEMPT_SYNC) ? spltoipl(asyncspl) : 1822 getpil()) >= DISP_LEVEL) { 1823 /* 1824 * We can't preempt this thread if it is at 1825 * a PIL >= DISP_LEVEL since it may be holding 1826 * a spin lock (like sched_lock). 1827 */ 1828 siron(); /* check back later */ 1829 kpreempt_cnts.kpc_prilevel++; 1830 return; 1831 } 1832 1833 /* 1834 * block preemption so we don't have multiple preemptions 1835 * pending on the interrupt stack 1836 */ 1837 curthread->t_preempt++; 1838 if (asyncspl != KPREEMPT_SYNC) { 1839 splx(asyncspl); 1840 kpreempt_cnts.kpc_apreempt++; 1841 } else 1842 kpreempt_cnts.kpc_spreempt++; 1843 1844 preempt(); 1845 curthread->t_preempt--; 1846 } while (CPU->cpu_kprunrun); 1847 } 1848 1849 static enum seg_rw 1850 get_accesstype(struct regs *rp) 1851 { 1852 uint32_t instr; 1853 1854 if (USERMODE(rp->r_tstate)) 1855 instr = fetch_user_instr((caddr_t)rp->r_pc); 1856 else 1857 instr = *(uint32_t *)rp->r_pc; 1858 1859 if (IS_FLUSH(instr)) 1860 return (S_OTHER); 1861 1862 if (IS_STORE(instr)) 1863 return (S_WRITE); 1864 else 1865 return (S_READ); 1866 } 1867