1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/mmu.h> 30 #include <sys/systm.h> 31 #include <sys/trap.h> 32 #include <sys/machtrap.h> 33 #include <sys/vtrace.h> 34 #include <sys/prsystm.h> 35 #include <sys/archsystm.h> 36 #include <sys/machsystm.h> 37 #include <sys/fpu/fpusystm.h> 38 #include <sys/tnf.h> 39 #include <sys/tnf_probe.h> 40 #include <sys/simulate.h> 41 #include <sys/ftrace.h> 42 #include <sys/ontrap.h> 43 #include <sys/kcpc.h> 44 #include <sys/kobj.h> 45 #include <sys/procfs.h> 46 #include <sys/sun4asi.h> 47 #include <sys/sdt.h> 48 #include <sys/fpras.h> 49 50 #ifdef TRAPTRACE 51 #include <sys/traptrace.h> 52 #endif 53 54 int tudebug = 0; 55 static int tudebugbpt = 0; 56 static int tudebugfpe = 0; 57 58 static int alignfaults = 0; 59 60 #if defined(TRAPDEBUG) || defined(lint) 61 static int lodebug = 0; 62 #else 63 #define lodebug 0 64 #endif /* defined(TRAPDEBUG) || defined(lint) */ 65 66 67 int vis1_partial_support(struct regs *rp, k_siginfo_t *siginfo, uint_t *fault); 68 #pragma weak vis1_partial_support 69 70 void showregs(unsigned, struct regs *, caddr_t, uint_t); 71 #pragma weak showregs 72 73 void trap_async_hwerr(void); 74 #pragma weak trap_async_hwerr 75 76 void trap_async_berr_bto(int, struct regs *); 77 #pragma weak trap_async_berr_bto 78 79 static enum seg_rw get_accesstype(struct regs *); 80 static int nfload(struct regs *, int *); 81 static int swap_nc(struct regs *, int); 82 static int ldstub_nc(struct regs *, int); 83 void trap_cleanup(struct regs *, uint_t, k_siginfo_t *, int); 84 void trap_rtt(void); 85 86 static int 87 die(unsigned type, struct regs *rp, caddr_t addr, uint_t mmu_fsr) 88 { 89 struct trap_info ti; 90 91 #ifdef TRAPTRACE 92 TRAPTRACE_FREEZE; 93 #endif 94 95 ti.trap_regs = rp; 96 ti.trap_type = type; 97 ti.trap_addr = addr; 98 ti.trap_mmu_fsr = mmu_fsr; 99 100 curthread->t_panic_trap = &ti; 101 102 if (type == T_DATA_MMU_MISS && addr < (caddr_t)KERNELBASE) { 103 panic("BAD TRAP: type=%x rp=%p addr=%p mmu_fsr=%x " 104 "occurred in module \"%s\" due to %s", 105 type, (void *)rp, (void *)addr, mmu_fsr, 106 mod_containing_pc((caddr_t)rp->r_pc), 107 addr < (caddr_t)PAGESIZE ? 108 "a NULL pointer dereference" : 109 "an illegal access to a user address"); 110 } else { 111 panic("BAD TRAP: type=%x rp=%p addr=%p mmu_fsr=%x", 112 type, (void *)rp, (void *)addr, mmu_fsr); 113 } 114 115 return (0); /* avoid optimization of restore in call's delay slot */ 116 } 117 118 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */ 119 int ill_calls; 120 #endif 121 122 /* 123 * Currently, the only PREFETCH/PREFETCHA instructions which cause traps 124 * are the "strong" prefetches (fcn=20-23). But we check for all flavors of 125 * PREFETCH, in case some future variant also causes a DATA_MMU_MISS. 126 */ 127 #define IS_PREFETCH(i) (((i) & 0xc1780000) == 0xc1680000) 128 129 #define IS_FLUSH(i) (((i) & 0xc1f80000) == 0x81d80000) 130 #define IS_SWAP(i) (((i) & 0xc1f80000) == 0xc0780000) 131 #define IS_LDSTUB(i) (((i) & 0xc1f80000) == 0xc0680000) 132 #define IS_FLOAT(i) (((i) & 0x1000000) != 0) 133 #define IS_STORE(i) (((i) >> 21) & 1) 134 135 /* 136 * Called from the trap handler when a processor trap occurs. 137 */ 138 /*VARARGS2*/ 139 void 140 trap(struct regs *rp, caddr_t addr, uint32_t type, uint32_t mmu_fsr) 141 { 142 proc_t *p = ttoproc(curthread); 143 klwp_id_t lwp = ttolwp(curthread); 144 struct machpcb *mpcb = NULL; 145 k_siginfo_t siginfo; 146 uint_t op3, fault = 0; 147 int stepped = 0; 148 greg_t oldpc; 149 int mstate; 150 char *badaddr; 151 faultcode_t res; 152 enum fault_type fault_type; 153 enum seg_rw rw; 154 uintptr_t lofault; 155 int instr; 156 int iskernel; 157 int watchcode; 158 int watchpage; 159 extern faultcode_t pagefault(caddr_t, enum fault_type, 160 enum seg_rw, int); 161 162 CPU_STATS_ADDQ(CPU, sys, trap, 1); 163 164 #ifdef SF_ERRATA_23 /* call causes illegal-insn */ 165 ASSERT((curthread->t_schedflag & TS_DONT_SWAP) || 166 (type == T_UNIMP_INSTR)); 167 #else 168 ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 169 #endif /* SF_ERRATA_23 */ 170 171 if (USERMODE(rp->r_tstate) || (type & T_USER)) { 172 /* 173 * Set lwp_state before trying to acquire any 174 * adaptive lock 175 */ 176 ASSERT(lwp != NULL); 177 lwp->lwp_state = LWP_SYS; 178 /* 179 * Set up the current cred to use during this trap. u_cred 180 * no longer exists. t_cred is used instead. 181 * The current process credential applies to the thread for 182 * the entire trap. If trapping from the kernel, this 183 * should already be set up. 184 */ 185 if (curthread->t_cred != p->p_cred) { 186 cred_t *oldcred = curthread->t_cred; 187 /* 188 * DTrace accesses t_cred in probe context. t_cred 189 * must always be either NULL, or point to a valid, 190 * allocated cred structure. 191 */ 192 curthread->t_cred = crgetcred(); 193 crfree(oldcred); 194 } 195 type |= T_USER; 196 ASSERT((type == (T_SYS_RTT_PAGE | T_USER)) || 197 (type == (T_SYS_RTT_ALIGN | T_USER)) || 198 lwp->lwp_regs == rp); 199 mpcb = lwptompcb(lwp); 200 switch (type) { 201 case T_WIN_OVERFLOW + T_USER: 202 case T_WIN_UNDERFLOW + T_USER: 203 case T_SYS_RTT_PAGE + T_USER: 204 case T_DATA_MMU_MISS + T_USER: 205 mstate = LMS_DFAULT; 206 break; 207 case T_INSTR_MMU_MISS + T_USER: 208 mstate = LMS_TFAULT; 209 break; 210 default: 211 mstate = LMS_TRAP; 212 break; 213 } 214 /* Kernel probe */ 215 TNF_PROBE_1(thread_state, "thread", /* CSTYLED */, 216 tnf_microstate, state, (char)mstate); 217 mstate = new_mstate(curthread, mstate); 218 siginfo.si_signo = 0; 219 stepped = 220 lwp->lwp_pcb.pcb_step != STEP_NONE && 221 ((oldpc = rp->r_pc), prundostep()) && 222 mmu_btop((uintptr_t)addr) == mmu_btop((uintptr_t)oldpc); 223 /* this assignment must not precede call to prundostep() */ 224 oldpc = rp->r_pc; 225 } 226 227 TRACE_1(TR_FAC_TRAP, TR_C_TRAP_HANDLER_ENTER, 228 "C_trap_handler_enter:type %x", type); 229 230 #ifdef F_DEFERRED 231 /* 232 * Take any pending floating point exceptions now. 233 * If the floating point unit has an exception to handle, 234 * just return to user-level to let the signal handler run. 235 * The instruction that got us to trap() will be reexecuted on 236 * return from the signal handler and we will trap to here again. 237 * This is necessary to disambiguate simultaneous traps which 238 * happen when a floating-point exception is pending and a 239 * machine fault is incurred. 240 */ 241 if (type & USER) { 242 /* 243 * FP_TRAPPED is set only by sendsig() when it copies 244 * out the floating-point queue for the signal handler. 245 * It is set there so we can test it here and in syscall(). 246 */ 247 mpcb->mpcb_flags &= ~FP_TRAPPED; 248 syncfpu(); 249 if (mpcb->mpcb_flags & FP_TRAPPED) { 250 /* 251 * trap() has have been called recursively and may 252 * have stopped the process, so do single step 253 * support for /proc. 254 */ 255 mpcb->mpcb_flags &= ~FP_TRAPPED; 256 goto out; 257 } 258 } 259 #endif 260 switch (type) { 261 case T_DATA_MMU_MISS: 262 case T_INSTR_MMU_MISS + T_USER: 263 case T_DATA_MMU_MISS + T_USER: 264 case T_DATA_PROT + T_USER: 265 case T_AST + T_USER: 266 case T_SYS_RTT_PAGE + T_USER: 267 case T_FLUSH_PCB + T_USER: 268 case T_FLUSHW + T_USER: 269 break; 270 271 default: 272 FTRACE_3("trap(): type=0x%lx, regs=0x%lx, addr=0x%lx", 273 (ulong_t)type, (ulong_t)rp, (ulong_t)addr); 274 break; 275 } 276 277 switch (type) { 278 279 default: 280 /* 281 * Check for user software trap. 282 */ 283 if (type & T_USER) { 284 if (tudebug) 285 showregs(type, rp, (caddr_t)0, 0); 286 if ((type & ~T_USER) >= T_SOFTWARE_TRAP) { 287 bzero(&siginfo, sizeof (siginfo)); 288 siginfo.si_signo = SIGILL; 289 siginfo.si_code = ILL_ILLTRP; 290 siginfo.si_addr = (caddr_t)rp->r_pc; 291 siginfo.si_trapno = type &~ T_USER; 292 fault = FLTILL; 293 break; 294 } 295 } 296 addr = (caddr_t)rp->r_pc; 297 (void) die(type, rp, addr, 0); 298 /*NOTREACHED*/ 299 300 case T_ALIGNMENT: /* supv alignment error */ 301 if (nfload(rp, NULL)) 302 goto cleanup; 303 304 if (curthread->t_lofault) { 305 if (lodebug) { 306 showregs(type, rp, addr, 0); 307 traceback((caddr_t)rp->r_sp); 308 } 309 rp->r_g1 = EFAULT; 310 rp->r_pc = curthread->t_lofault; 311 rp->r_npc = rp->r_pc + 4; 312 goto cleanup; 313 } 314 (void) die(type, rp, addr, 0); 315 /*NOTREACHED*/ 316 317 case T_INSTR_EXCEPTION: /* sys instruction access exception */ 318 addr = (caddr_t)rp->r_pc; 319 (void) die(type, rp, addr, mmu_fsr); 320 /*NOTREACHED*/ 321 322 case T_INSTR_MMU_MISS: /* sys instruction mmu miss */ 323 addr = (caddr_t)rp->r_pc; 324 (void) die(type, rp, addr, 0); 325 /*NOTREACHED*/ 326 327 case T_DATA_EXCEPTION: /* system data access exception */ 328 switch (X_FAULT_TYPE(mmu_fsr)) { 329 case FT_RANGE: 330 /* 331 * This happens when we attempt to dereference an 332 * address in the address hole. If t_ontrap is set, 333 * then break and fall through to T_DATA_MMU_MISS / 334 * T_DATA_PROT case below. If lofault is set, then 335 * honour it (perhaps the user gave us a bogus 336 * address in the hole to copyin from or copyout to?) 337 */ 338 339 if (curthread->t_ontrap != NULL) 340 break; 341 342 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 343 if (curthread->t_lofault) { 344 if (lodebug) { 345 showregs(type, rp, addr, 0); 346 traceback((caddr_t)rp->r_sp); 347 } 348 rp->r_g1 = EFAULT; 349 rp->r_pc = curthread->t_lofault; 350 rp->r_npc = rp->r_pc + 4; 351 goto cleanup; 352 } 353 (void) die(type, rp, addr, mmu_fsr); 354 /*NOTREACHED*/ 355 356 case FT_PRIV: 357 /* 358 * This can happen if we access ASI_USER from a kernel 359 * thread. To support pxfs, we need to honor lofault if 360 * we're doing a copyin/copyout from a kernel thread. 361 */ 362 363 if (nfload(rp, NULL)) 364 goto cleanup; 365 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 366 if (curthread->t_lofault) { 367 if (lodebug) { 368 showregs(type, rp, addr, 0); 369 traceback((caddr_t)rp->r_sp); 370 } 371 rp->r_g1 = EFAULT; 372 rp->r_pc = curthread->t_lofault; 373 rp->r_npc = rp->r_pc + 4; 374 goto cleanup; 375 } 376 (void) die(type, rp, addr, mmu_fsr); 377 /*NOTREACHED*/ 378 379 default: 380 if (nfload(rp, NULL)) 381 goto cleanup; 382 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 383 (void) die(type, rp, addr, mmu_fsr); 384 /*NOTREACHED*/ 385 386 case FT_NFO: 387 break; 388 } 389 /* fall into ... */ 390 391 case T_DATA_MMU_MISS: /* system data mmu miss */ 392 case T_DATA_PROT: /* system data protection fault */ 393 if (nfload(rp, &instr)) 394 goto cleanup; 395 396 /* 397 * If we're under on_trap() protection (see <sys/ontrap.h>), 398 * set ot_trap and return from the trap to the trampoline. 399 */ 400 if (curthread->t_ontrap != NULL) { 401 on_trap_data_t *otp = curthread->t_ontrap; 402 403 TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, 404 "C_trap_handler_exit"); 405 TRACE_0(TR_FAC_TRAP, TR_TRAP_END, "trap_end"); 406 407 if (otp->ot_prot & OT_DATA_ACCESS) { 408 otp->ot_trap |= OT_DATA_ACCESS; 409 rp->r_pc = otp->ot_trampoline; 410 rp->r_npc = rp->r_pc + 4; 411 goto cleanup; 412 } 413 } 414 lofault = curthread->t_lofault; 415 curthread->t_lofault = 0; 416 417 mstate = new_mstate(curthread, LMS_KFAULT); 418 419 switch (type) { 420 case T_DATA_PROT: 421 fault_type = F_PROT; 422 rw = S_WRITE; 423 break; 424 case T_INSTR_MMU_MISS: 425 fault_type = F_INVAL; 426 rw = S_EXEC; 427 break; 428 case T_DATA_MMU_MISS: 429 case T_DATA_EXCEPTION: 430 /* 431 * The hardware doesn't update the sfsr on mmu 432 * misses so it is not easy to find out whether 433 * the access was a read or a write so we need 434 * to decode the actual instruction. 435 */ 436 fault_type = F_INVAL; 437 rw = get_accesstype(rp); 438 break; 439 default: 440 cmn_err(CE_PANIC, "trap: unknown type %x", type); 441 break; 442 } 443 /* 444 * We determine if access was done to kernel or user 445 * address space. The addr passed into trap is really the 446 * tag access register. 447 */ 448 iskernel = (((uintptr_t)addr & TAGACC_CTX_MASK) == KCONTEXT); 449 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 450 451 res = pagefault(addr, fault_type, rw, iskernel); 452 if (!iskernel && res == FC_NOMAP && 453 addr < p->p_usrstack && grow(addr)) 454 res = 0; 455 456 (void) new_mstate(curthread, mstate); 457 458 /* 459 * Restore lofault. If we resolved the fault, exit. 460 * If we didn't and lofault wasn't set, die. 461 */ 462 curthread->t_lofault = lofault; 463 464 if (res == 0) 465 goto cleanup; 466 467 if (IS_PREFETCH(instr)) { 468 /* skip prefetch instructions in kernel-land */ 469 rp->r_pc = rp->r_npc; 470 rp->r_npc += 4; 471 goto cleanup; 472 } 473 474 if ((lofault == 0 || lodebug) && 475 (calc_memaddr(rp, &badaddr) == SIMU_SUCCESS)) 476 addr = badaddr; 477 if (lofault == 0) 478 (void) die(type, rp, addr, 0); 479 /* 480 * Cannot resolve fault. Return to lofault. 481 */ 482 if (lodebug) { 483 showregs(type, rp, addr, 0); 484 traceback((caddr_t)rp->r_sp); 485 } 486 if (FC_CODE(res) == FC_OBJERR) 487 res = FC_ERRNO(res); 488 else 489 res = EFAULT; 490 rp->r_g1 = res; 491 rp->r_pc = curthread->t_lofault; 492 rp->r_npc = curthread->t_lofault + 4; 493 goto cleanup; 494 495 case T_INSTR_EXCEPTION + T_USER: /* user insn access exception */ 496 bzero(&siginfo, sizeof (siginfo)); 497 siginfo.si_addr = (caddr_t)rp->r_pc; 498 siginfo.si_signo = SIGSEGV; 499 siginfo.si_code = X_FAULT_TYPE(mmu_fsr) == FT_PRIV ? 500 SEGV_ACCERR : SEGV_MAPERR; 501 fault = FLTBOUNDS; 502 break; 503 504 case T_WIN_OVERFLOW + T_USER: /* window overflow in ??? */ 505 case T_WIN_UNDERFLOW + T_USER: /* window underflow in ??? */ 506 case T_SYS_RTT_PAGE + T_USER: /* window underflow in user_rtt */ 507 case T_INSTR_MMU_MISS + T_USER: /* user instruction mmu miss */ 508 case T_DATA_MMU_MISS + T_USER: /* user data mmu miss */ 509 case T_DATA_PROT + T_USER: /* user data protection fault */ 510 switch (type) { 511 case T_INSTR_MMU_MISS + T_USER: 512 addr = (caddr_t)rp->r_pc; 513 fault_type = F_INVAL; 514 rw = S_EXEC; 515 break; 516 517 case T_DATA_MMU_MISS + T_USER: 518 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 519 fault_type = F_INVAL; 520 /* 521 * The hardware doesn't update the sfsr on mmu misses 522 * so it is not easy to find out whether the access 523 * was a read or a write so we need to decode the 524 * actual instruction. XXX BUGLY HW 525 */ 526 rw = get_accesstype(rp); 527 break; 528 529 case T_DATA_PROT + T_USER: 530 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 531 fault_type = F_PROT; 532 rw = S_WRITE; 533 break; 534 535 case T_WIN_OVERFLOW + T_USER: 536 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 537 fault_type = F_INVAL; 538 rw = S_WRITE; 539 break; 540 541 case T_WIN_UNDERFLOW + T_USER: 542 case T_SYS_RTT_PAGE + T_USER: 543 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK); 544 fault_type = F_INVAL; 545 rw = S_READ; 546 break; 547 548 default: 549 cmn_err(CE_PANIC, "trap: unknown type %x", type); 550 break; 551 } 552 553 /* 554 * If we are single stepping do not call pagefault 555 */ 556 if (stepped) { 557 res = FC_NOMAP; 558 } else { 559 caddr_t vaddr = addr; 560 size_t sz; 561 int ta; 562 563 ASSERT(!(curthread->t_flag & T_WATCHPT)); 564 watchpage = (pr_watch_active(p) && 565 type != T_WIN_OVERFLOW + T_USER && 566 type != T_WIN_UNDERFLOW + T_USER && 567 type != T_SYS_RTT_PAGE + T_USER && 568 pr_is_watchpage(addr, rw)); 569 570 if (!watchpage || 571 (sz = instr_size(rp, &vaddr, rw)) <= 0) 572 /* EMPTY */; 573 else if ((watchcode = pr_is_watchpoint(&vaddr, &ta, 574 sz, NULL, rw)) != 0) { 575 if (ta) { 576 do_watch_step(vaddr, sz, rw, 577 watchcode, rp->r_pc); 578 fault_type = F_INVAL; 579 } else { 580 bzero(&siginfo, sizeof (siginfo)); 581 siginfo.si_signo = SIGTRAP; 582 siginfo.si_code = watchcode; 583 siginfo.si_addr = vaddr; 584 siginfo.si_trapafter = 0; 585 siginfo.si_pc = (caddr_t)rp->r_pc; 586 fault = FLTWATCH; 587 break; 588 } 589 } else { 590 if (rw != S_EXEC && 591 pr_watch_emul(rp, vaddr, rw)) 592 goto out; 593 do_watch_step(vaddr, sz, rw, 0, 0); 594 fault_type = F_INVAL; 595 } 596 597 if (pr_watch_active(p) && 598 (type == T_WIN_OVERFLOW + T_USER || 599 type == T_WIN_UNDERFLOW + T_USER || 600 type == T_SYS_RTT_PAGE + T_USER)) { 601 int dotwo = (type == T_WIN_UNDERFLOW + T_USER); 602 if (copy_return_window(dotwo)) 603 goto out; 604 fault_type = F_INVAL; 605 } 606 607 res = pagefault(addr, fault_type, rw, 0); 608 609 /* 610 * If pagefault succeed, ok. 611 * Otherwise grow the stack automatically. 612 */ 613 if (res == 0 || 614 (res == FC_NOMAP && 615 type != T_INSTR_MMU_MISS + T_USER && 616 addr < p->p_usrstack && 617 grow(addr))) { 618 int ismem = prismember(&p->p_fltmask, FLTPAGE); 619 620 /* 621 * instr_size() is used to get the exact 622 * address of the fault, instead of the 623 * page of the fault. Unfortunately it is 624 * very slow, and this is an important 625 * code path. Don't call it unless 626 * correctness is needed. ie. if FLTPAGE 627 * is set, or we're profiling. 628 */ 629 630 if (curthread->t_rprof != NULL || ismem) 631 (void) instr_size(rp, &addr, rw); 632 633 lwp->lwp_lastfault = FLTPAGE; 634 lwp->lwp_lastfaddr = addr; 635 636 if (ismem) { 637 bzero(&siginfo, sizeof (siginfo)); 638 siginfo.si_addr = addr; 639 (void) stop_on_fault(FLTPAGE, &siginfo); 640 } 641 goto out; 642 } 643 644 if (type != (T_INSTR_MMU_MISS + T_USER)) { 645 /* 646 * check for non-faulting loads, also 647 * fetch the instruction to check for 648 * flush 649 */ 650 if (nfload(rp, &instr)) 651 goto out; 652 653 /* skip userland prefetch instructions */ 654 if (IS_PREFETCH(instr)) { 655 rp->r_pc = rp->r_npc; 656 rp->r_npc += 4; 657 goto out; 658 /*NOTREACHED*/ 659 } 660 661 /* 662 * check if the instruction was a 663 * flush. ABI allows users to specify 664 * an illegal address on the flush 665 * instruction so we simply return in 666 * this case. 667 * 668 * NB: the hardware should set a bit 669 * indicating this trap was caused by 670 * a flush instruction. Instruction 671 * decoding is bugly! 672 */ 673 if (IS_FLUSH(instr)) { 674 /* skip the flush instruction */ 675 rp->r_pc = rp->r_npc; 676 rp->r_npc += 4; 677 goto out; 678 /*NOTREACHED*/ 679 } 680 } else if (res == FC_PROT) { 681 report_stack_exec(p, addr); 682 } 683 684 if (tudebug) 685 showregs(type, rp, addr, 0); 686 } 687 688 /* 689 * In the case where both pagefault and grow fail, 690 * set the code to the value provided by pagefault. 691 */ 692 (void) instr_size(rp, &addr, rw); 693 bzero(&siginfo, sizeof (siginfo)); 694 siginfo.si_addr = addr; 695 if (FC_CODE(res) == FC_OBJERR) { 696 siginfo.si_errno = FC_ERRNO(res); 697 if (siginfo.si_errno != EINTR) { 698 siginfo.si_signo = SIGBUS; 699 siginfo.si_code = BUS_OBJERR; 700 fault = FLTACCESS; 701 } 702 } else { /* FC_NOMAP || FC_PROT */ 703 siginfo.si_signo = SIGSEGV; 704 siginfo.si_code = (res == FC_NOMAP) ? 705 SEGV_MAPERR : SEGV_ACCERR; 706 fault = FLTBOUNDS; 707 } 708 /* 709 * If this is the culmination of a single-step, 710 * reset the addr, code, signal and fault to 711 * indicate a hardware trace trap. 712 */ 713 if (stepped) { 714 pcb_t *pcb = &lwp->lwp_pcb; 715 716 siginfo.si_signo = 0; 717 fault = 0; 718 if (pcb->pcb_step == STEP_WASACTIVE) { 719 pcb->pcb_step = STEP_NONE; 720 pcb->pcb_tracepc = NULL; 721 oldpc = rp->r_pc - 4; 722 } 723 /* 724 * If both NORMAL_STEP and WATCH_STEP are in 725 * effect, give precedence to NORMAL_STEP. 726 * One or the other must be set at this point. 727 */ 728 ASSERT(pcb->pcb_flags & (NORMAL_STEP|WATCH_STEP)); 729 if (pcb->pcb_flags & NORMAL_STEP) { 730 siginfo.si_signo = SIGTRAP; 731 siginfo.si_code = TRAP_TRACE; 732 siginfo.si_addr = (caddr_t)rp->r_pc; 733 fault = FLTTRACE; 734 if (pcb->pcb_flags & WATCH_STEP) 735 (void) undo_watch_step(NULL); 736 } else { 737 fault = undo_watch_step(&siginfo); 738 } 739 pcb->pcb_flags &= ~(NORMAL_STEP|WATCH_STEP); 740 } 741 break; 742 743 case T_DATA_EXCEPTION + T_USER: /* user data access exception */ 744 745 if (&vis1_partial_support != NULL) { 746 bzero(&siginfo, sizeof (siginfo)); 747 if (vis1_partial_support(rp, 748 &siginfo, &fault) == 0) 749 goto out; 750 } 751 752 if (nfload(rp, &instr)) 753 goto out; 754 if (IS_FLUSH(instr)) { 755 /* skip the flush instruction */ 756 rp->r_pc = rp->r_npc; 757 rp->r_npc += 4; 758 goto out; 759 /*NOTREACHED*/ 760 } 761 bzero(&siginfo, sizeof (siginfo)); 762 siginfo.si_addr = addr; 763 switch (X_FAULT_TYPE(mmu_fsr)) { 764 case FT_ATOMIC_NC: 765 if ((IS_SWAP(instr) && swap_nc(rp, instr)) || 766 (IS_LDSTUB(instr) && ldstub_nc(rp, instr))) { 767 /* skip the atomic */ 768 rp->r_pc = rp->r_npc; 769 rp->r_npc += 4; 770 goto out; 771 } 772 /* fall into ... */ 773 case FT_PRIV: 774 siginfo.si_signo = SIGSEGV; 775 siginfo.si_code = SEGV_ACCERR; 776 fault = FLTBOUNDS; 777 break; 778 case FT_SPEC_LD: 779 case FT_ILL_ALT: 780 siginfo.si_signo = SIGILL; 781 siginfo.si_code = ILL_ILLADR; 782 fault = FLTILL; 783 break; 784 default: 785 siginfo.si_signo = SIGSEGV; 786 siginfo.si_code = SEGV_MAPERR; 787 fault = FLTBOUNDS; 788 break; 789 } 790 break; 791 792 case T_SYS_RTT_ALIGN + T_USER: /* user alignment error */ 793 case T_ALIGNMENT + T_USER: /* user alignment error */ 794 if (tudebug) 795 showregs(type, rp, addr, 0); 796 /* 797 * If the user has to do unaligned references 798 * the ugly stuff gets done here. 799 */ 800 alignfaults++; 801 if (&vis1_partial_support != NULL) { 802 bzero(&siginfo, sizeof (siginfo)); 803 if (vis1_partial_support(rp, 804 &siginfo, &fault) == 0) 805 goto out; 806 } 807 808 if (nfload(rp, NULL)) 809 goto out; 810 bzero(&siginfo, sizeof (siginfo)); 811 if (type == T_SYS_RTT_ALIGN + T_USER) { 812 /* 813 * Can't do unaligned stack access 814 */ 815 siginfo.si_signo = SIGBUS; 816 siginfo.si_code = BUS_ADRALN; 817 siginfo.si_addr = addr; 818 fault = FLTACCESS; 819 break; 820 } 821 if (p->p_fixalignment) { 822 if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) { 823 rp->r_pc = rp->r_npc; 824 rp->r_npc += 4; 825 goto out; 826 } 827 siginfo.si_signo = SIGSEGV; 828 siginfo.si_code = SEGV_MAPERR; 829 siginfo.si_addr = badaddr; 830 fault = FLTBOUNDS; 831 } else { 832 siginfo.si_signo = SIGBUS; 833 siginfo.si_code = BUS_ADRALN; 834 if (rp->r_pc & 3) { /* offending address, if pc */ 835 siginfo.si_addr = (caddr_t)rp->r_pc; 836 } else { 837 if (calc_memaddr(rp, &badaddr) == SIMU_UNALIGN) 838 siginfo.si_addr = badaddr; 839 else 840 siginfo.si_addr = (caddr_t)rp->r_pc; 841 } 842 fault = FLTACCESS; 843 } 844 break; 845 846 case T_PRIV_INSTR + T_USER: /* privileged instruction fault */ 847 if (tudebug) 848 showregs(type, rp, (caddr_t)0, 0); 849 bzero(&siginfo, sizeof (siginfo)); 850 siginfo.si_signo = SIGILL; 851 siginfo.si_code = ILL_PRVOPC; 852 siginfo.si_addr = (caddr_t)rp->r_pc; 853 fault = FLTILL; 854 break; 855 856 case T_UNIMP_INSTR: /* priv illegal instruction fault */ 857 if (fpras_implemented) { 858 /* 859 * Call fpras_chktrap indicating that 860 * we've come from a trap handler and pass 861 * the regs. That function may choose to panic 862 * (in which case it won't return) or it may 863 * determine that a reboot is desired. In the 864 * latter case it must alter pc/npc to skip 865 * the illegal instruction and continue at 866 * a controlled address. 867 */ 868 if (&fpras_chktrap) { 869 if (fpras_chktrap(rp)) 870 goto cleanup; 871 } 872 } 873 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */ 874 instr = *(int *)rp->r_pc; 875 if ((instr & 0xc0000000) == 0x40000000) { 876 long pc; 877 878 rp->r_o7 = (long long)rp->r_pc; 879 pc = rp->r_pc + ((instr & 0x3fffffff) << 2); 880 rp->r_pc = rp->r_npc; 881 rp->r_npc = pc; 882 ill_calls++; 883 goto cleanup; 884 } 885 #endif /* SF_ERRATA_23 || SF_ERRATA_30 */ 886 /* 887 * It's not an fpras failure and it's not SF_ERRATA_23 - die 888 */ 889 addr = (caddr_t)rp->r_pc; 890 (void) die(type, rp, addr, 0); 891 /*NOTREACHED*/ 892 893 case T_UNIMP_INSTR + T_USER: /* illegal instruction fault */ 894 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */ 895 instr = fetch_user_instr((caddr_t)rp->r_pc); 896 if ((instr & 0xc0000000) == 0x40000000) { 897 long pc; 898 899 rp->r_o7 = (long long)rp->r_pc; 900 pc = rp->r_pc + ((instr & 0x3fffffff) << 2); 901 rp->r_pc = rp->r_npc; 902 rp->r_npc = pc; 903 ill_calls++; 904 goto out; 905 } 906 #endif /* SF_ERRATA_23 || SF_ERRATA_30 */ 907 if (tudebug) 908 showregs(type, rp, (caddr_t)0, 0); 909 bzero(&siginfo, sizeof (siginfo)); 910 /* 911 * Try to simulate the instruction. 912 */ 913 switch (simulate_unimp(rp, &badaddr)) { 914 case SIMU_RETRY: 915 goto out; /* regs are already set up */ 916 /*NOTREACHED*/ 917 918 case SIMU_SUCCESS: 919 /* skip the successfully simulated instruction */ 920 rp->r_pc = rp->r_npc; 921 rp->r_npc += 4; 922 goto out; 923 /*NOTREACHED*/ 924 925 case SIMU_FAULT: 926 siginfo.si_signo = SIGSEGV; 927 siginfo.si_code = SEGV_MAPERR; 928 siginfo.si_addr = badaddr; 929 fault = FLTBOUNDS; 930 break; 931 932 case SIMU_DZERO: 933 siginfo.si_signo = SIGFPE; 934 siginfo.si_code = FPE_INTDIV; 935 siginfo.si_addr = (caddr_t)rp->r_pc; 936 fault = FLTIZDIV; 937 break; 938 939 case SIMU_UNALIGN: 940 siginfo.si_signo = SIGBUS; 941 siginfo.si_code = BUS_ADRALN; 942 siginfo.si_addr = badaddr; 943 fault = FLTACCESS; 944 break; 945 946 case SIMU_ILLEGAL: 947 default: 948 siginfo.si_signo = SIGILL; 949 op3 = (instr >> 19) & 0x3F; 950 if ((IS_FLOAT(instr) && (op3 == IOP_V8_STQFA) || 951 (op3 == IOP_V8_STDFA))) 952 siginfo.si_code = ILL_ILLADR; 953 else 954 siginfo.si_code = ILL_ILLOPC; 955 siginfo.si_addr = (caddr_t)rp->r_pc; 956 fault = FLTILL; 957 break; 958 } 959 break; 960 961 case T_IDIV0 + T_USER: /* integer divide by zero */ 962 case T_DIV0 + T_USER: /* integer divide by zero */ 963 if (tudebug && tudebugfpe) 964 showregs(type, rp, (caddr_t)0, 0); 965 bzero(&siginfo, sizeof (siginfo)); 966 siginfo.si_signo = SIGFPE; 967 siginfo.si_code = FPE_INTDIV; 968 siginfo.si_addr = (caddr_t)rp->r_pc; 969 fault = FLTIZDIV; 970 break; 971 972 case T_INT_OVERFLOW + T_USER: /* integer overflow */ 973 if (tudebug && tudebugfpe) 974 showregs(type, rp, (caddr_t)0, 0); 975 bzero(&siginfo, sizeof (siginfo)); 976 siginfo.si_signo = SIGFPE; 977 siginfo.si_code = FPE_INTOVF; 978 siginfo.si_addr = (caddr_t)rp->r_pc; 979 fault = FLTIOVF; 980 break; 981 982 case T_BREAKPOINT + T_USER: /* breakpoint trap (t 1) */ 983 if (tudebug && tudebugbpt) 984 showregs(type, rp, (caddr_t)0, 0); 985 bzero(&siginfo, sizeof (siginfo)); 986 siginfo.si_signo = SIGTRAP; 987 siginfo.si_code = TRAP_BRKPT; 988 siginfo.si_addr = (caddr_t)rp->r_pc; 989 fault = FLTBPT; 990 break; 991 992 case T_TAG_OVERFLOW + T_USER: /* tag overflow (taddcctv, tsubcctv) */ 993 if (tudebug) 994 showregs(type, rp, (caddr_t)0, 0); 995 bzero(&siginfo, sizeof (siginfo)); 996 siginfo.si_signo = SIGEMT; 997 siginfo.si_code = EMT_TAGOVF; 998 siginfo.si_addr = (caddr_t)rp->r_pc; 999 fault = FLTACCESS; 1000 break; 1001 1002 case T_FLUSH_PCB + T_USER: /* finish user window overflow */ 1003 case T_FLUSHW + T_USER: /* finish user window flush */ 1004 /* 1005 * This trap is entered from sys_rtt in locore.s when, 1006 * upon return to user is is found that there are user 1007 * windows in pcb_wbuf. This happens because they could 1008 * not be saved on the user stack, either because it 1009 * wasn't resident or because it was misaligned. 1010 */ 1011 { 1012 int error; 1013 caddr_t sp; 1014 1015 error = flush_user_windows_to_stack(&sp); 1016 /* 1017 * Possible errors: 1018 * error copying out 1019 * unaligned stack pointer 1020 * The first is given to us as the return value 1021 * from flush_user_windows_to_stack(). The second 1022 * results in residual windows in the pcb. 1023 */ 1024 if (error != 0) { 1025 /* 1026 * EINTR comes from a signal during copyout; 1027 * we should not post another signal. 1028 */ 1029 if (error != EINTR) { 1030 /* 1031 * Zap the process with a SIGSEGV - process 1032 * may be managing its own stack growth by 1033 * taking SIGSEGVs on a different signal stack. 1034 */ 1035 bzero(&siginfo, sizeof (siginfo)); 1036 siginfo.si_signo = SIGSEGV; 1037 siginfo.si_code = SEGV_MAPERR; 1038 siginfo.si_addr = sp; 1039 fault = FLTBOUNDS; 1040 } 1041 break; 1042 } else if (mpcb->mpcb_wbcnt) { 1043 bzero(&siginfo, sizeof (siginfo)); 1044 siginfo.si_signo = SIGILL; 1045 siginfo.si_code = ILL_BADSTK; 1046 siginfo.si_addr = (caddr_t)rp->r_pc; 1047 fault = FLTILL; 1048 break; 1049 } 1050 } 1051 1052 /* 1053 * T_FLUSHW is used when handling a ta 0x3 -- the old flush 1054 * window trap -- which is implemented by executing the 1055 * flushw instruction. The flushw can trap if any of the 1056 * stack pages are not writable for whatever reason. In this 1057 * case only, we advance the pc to the next instruction so 1058 * that the user thread doesn't needlessly execute the trap 1059 * again. Normally this wouldn't be a problem -- we'll 1060 * usually only end up here if this is the first touch to a 1061 * stack page -- since the second execution won't trap, but 1062 * if there's a watchpoint on the stack page the user thread 1063 * would spin, continuously executing the trap instruction. 1064 */ 1065 if (type == T_FLUSHW + T_USER) { 1066 rp->r_pc = rp->r_npc; 1067 rp->r_npc += 4; 1068 } 1069 goto out; 1070 1071 case T_AST + T_USER: /* profiling or resched pseudo trap */ 1072 if (lwp->lwp_pcb.pcb_flags & CPC_OVERFLOW) { 1073 lwp->lwp_pcb.pcb_flags &= ~CPC_OVERFLOW; 1074 if (kcpc_overflow_ast()) { 1075 /* 1076 * Signal performance counter overflow 1077 */ 1078 if (tudebug) 1079 showregs(type, rp, (caddr_t)0, 0); 1080 bzero(&siginfo, sizeof (siginfo)); 1081 siginfo.si_signo = SIGEMT; 1082 siginfo.si_code = EMT_CPCOVF; 1083 siginfo.si_addr = (caddr_t)rp->r_pc; 1084 /* for trap_cleanup(), below */ 1085 oldpc = rp->r_pc - 4; 1086 fault = FLTCPCOVF; 1087 } 1088 } 1089 1090 /* 1091 * The CPC_OVERFLOW check above may already have populated 1092 * siginfo and set fault, so the checks below must not 1093 * touch these and the functions they call must use 1094 * trapsig() directly. 1095 */ 1096 1097 if (lwp->lwp_pcb.pcb_flags & ASYNC_HWERR) { 1098 lwp->lwp_pcb.pcb_flags &= ~ASYNC_HWERR; 1099 trap_async_hwerr(); 1100 } 1101 1102 if (lwp->lwp_pcb.pcb_flags & ASYNC_BERR) { 1103 lwp->lwp_pcb.pcb_flags &= ~ASYNC_BERR; 1104 trap_async_berr_bto(ASYNC_BERR, rp); 1105 } 1106 1107 if (lwp->lwp_pcb.pcb_flags & ASYNC_BTO) { 1108 lwp->lwp_pcb.pcb_flags &= ~ASYNC_BTO; 1109 trap_async_berr_bto(ASYNC_BTO, rp); 1110 } 1111 1112 break; 1113 } 1114 1115 trap_cleanup(rp, fault, &siginfo, oldpc == rp->r_pc); 1116 1117 out: /* We can't get here from a system trap */ 1118 ASSERT(type & T_USER); 1119 trap_rtt(); 1120 (void) new_mstate(curthread, mstate); 1121 /* Kernel probe */ 1122 TNF_PROBE_1(thread_state, "thread", /* CSTYLED */, 1123 tnf_microstate, state, LMS_USER); 1124 1125 TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit"); 1126 return; 1127 1128 cleanup: /* system traps end up here */ 1129 ASSERT(!(type & T_USER)); 1130 1131 TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit"); 1132 } 1133 1134 void 1135 trap_cleanup( 1136 struct regs *rp, 1137 uint_t fault, 1138 k_siginfo_t *sip, 1139 int restartable) 1140 { 1141 extern void aio_cleanup(); 1142 proc_t *p = ttoproc(curthread); 1143 klwp_id_t lwp = ttolwp(curthread); 1144 1145 if (fault) { 1146 /* 1147 * Remember the fault and fault address 1148 * for real-time (SIGPROF) profiling. 1149 */ 1150 lwp->lwp_lastfault = fault; 1151 lwp->lwp_lastfaddr = sip->si_addr; 1152 1153 DTRACE_PROC2(fault, int, fault, ksiginfo_t *, sip); 1154 1155 /* 1156 * If a debugger has declared this fault to be an 1157 * event of interest, stop the lwp. Otherwise just 1158 * deliver the associated signal. 1159 */ 1160 if (sip->si_signo != SIGKILL && 1161 prismember(&p->p_fltmask, fault) && 1162 stop_on_fault(fault, sip) == 0) 1163 sip->si_signo = 0; 1164 } 1165 1166 if (sip->si_signo) 1167 trapsig(sip, restartable); 1168 1169 if (lwp->lwp_oweupc) 1170 profil_tick(rp->r_pc); 1171 1172 if (curthread->t_astflag | curthread->t_sig_check) { 1173 /* 1174 * Turn off the AST flag before checking all the conditions that 1175 * may have caused an AST. This flag is on whenever a signal or 1176 * unusual condition should be handled after the next trap or 1177 * syscall. 1178 */ 1179 astoff(curthread); 1180 curthread->t_sig_check = 0; 1181 1182 mutex_enter(&p->p_lock); 1183 if (curthread->t_proc_flag & TP_CHANGEBIND) { 1184 timer_lwpbind(); 1185 curthread->t_proc_flag &= ~TP_CHANGEBIND; 1186 } 1187 mutex_exit(&p->p_lock); 1188 1189 /* 1190 * for kaio requests that are on the per-process poll queue, 1191 * aiop->aio_pollq, they're AIO_POLL bit is set, the kernel 1192 * should copyout their result_t to user memory. by copying 1193 * out the result_t, the user can poll on memory waiting 1194 * for the kaio request to complete. 1195 */ 1196 if (p->p_aio) 1197 aio_cleanup(0); 1198 1199 /* 1200 * If this LWP was asked to hold, call holdlwp(), which will 1201 * stop. holdlwps() sets this up and calls pokelwps() which 1202 * sets the AST flag. 1203 * 1204 * Also check TP_EXITLWP, since this is used by fresh new LWPs 1205 * through lwp_rtt(). That flag is set if the lwp_create(2) 1206 * syscall failed after creating the LWP. 1207 */ 1208 if (ISHOLD(p)) 1209 holdlwp(); 1210 1211 /* 1212 * All code that sets signals and makes ISSIG evaluate true must 1213 * set t_astflag afterwards. 1214 */ 1215 if (ISSIG_PENDING(curthread, lwp, p)) { 1216 if (issig(FORREAL)) 1217 psig(); 1218 curthread->t_sig_check = 1; 1219 } 1220 1221 if (curthread->t_rprof != NULL) { 1222 realsigprof(0, 0); 1223 curthread->t_sig_check = 1; 1224 } 1225 } 1226 } 1227 1228 /* 1229 * Called from fp_traps when a floating point trap occurs. 1230 * Note that the T_DATA_EXCEPTION case does not use X_FAULT_TYPE(mmu_fsr), 1231 * because mmu_fsr (now changed to code) is always 0. 1232 * Note that the T_UNIMP_INSTR case does not call simulate_unimp(), 1233 * because the simulator only simulates multiply and divide instructions, 1234 * which would not cause floating point traps in the first place. 1235 * XXX - Supervisor mode floating point traps? 1236 */ 1237 void 1238 fpu_trap(struct regs *rp, caddr_t addr, uint32_t type, uint32_t code) 1239 { 1240 proc_t *p = ttoproc(curthread); 1241 klwp_id_t lwp = ttolwp(curthread); 1242 k_siginfo_t siginfo; 1243 uint_t op3, fault = 0; 1244 int mstate; 1245 char *badaddr; 1246 kfpu_t *fp; 1247 struct fpq *pfpq; 1248 uint32_t inst; 1249 utrap_handler_t *utrapp; 1250 1251 CPU_STATS_ADDQ(CPU, sys, trap, 1); 1252 1253 ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 1254 1255 if (USERMODE(rp->r_tstate)) { 1256 /* 1257 * Set lwp_state before trying to acquire any 1258 * adaptive lock 1259 */ 1260 ASSERT(lwp != NULL); 1261 lwp->lwp_state = LWP_SYS; 1262 /* 1263 * Set up the current cred to use during this trap. u_cred 1264 * no longer exists. t_cred is used instead. 1265 * The current process credential applies to the thread for 1266 * the entire trap. If trapping from the kernel, this 1267 * should already be set up. 1268 */ 1269 if (curthread->t_cred != p->p_cred) { 1270 cred_t *oldcred = curthread->t_cred; 1271 /* 1272 * DTrace accesses t_cred in probe context. t_cred 1273 * must always be either NULL, or point to a valid, 1274 * allocated cred structure. 1275 */ 1276 curthread->t_cred = crgetcred(); 1277 crfree(oldcred); 1278 } 1279 ASSERT(lwp->lwp_regs == rp); 1280 mstate = new_mstate(curthread, LMS_TRAP); 1281 siginfo.si_signo = 0; 1282 type |= T_USER; 1283 } 1284 1285 TRACE_1(TR_FAC_TRAP, TR_C_TRAP_HANDLER_ENTER, 1286 "C_fpu_trap_handler_enter:type %x", type); 1287 1288 if (tudebug && tudebugfpe) 1289 showregs(type, rp, addr, 0); 1290 1291 bzero(&siginfo, sizeof (siginfo)); 1292 siginfo.si_code = code; 1293 siginfo.si_addr = addr; 1294 1295 switch (type) { 1296 1297 case T_FP_EXCEPTION_IEEE + T_USER: /* FPU arithmetic exception */ 1298 /* 1299 * FPU arithmetic exception - fake up a fpq if we 1300 * came here directly from _fp_ieee_exception, 1301 * which is indicated by a zero fpu_qcnt. 1302 */ 1303 fp = lwptofpu(curthread->t_lwp); 1304 utrapp = curthread->t_procp->p_utraps; 1305 if (fp->fpu_qcnt == 0) { 1306 inst = fetch_user_instr((caddr_t)rp->r_pc); 1307 lwp->lwp_state = LWP_SYS; 1308 pfpq = &fp->fpu_q->FQu.fpq; 1309 pfpq->fpq_addr = (uint32_t *)rp->r_pc; 1310 pfpq->fpq_instr = inst; 1311 fp->fpu_qcnt = 1; 1312 fp->fpu_q_entrysize = sizeof (struct fpq); 1313 #ifdef SF_V9_TABLE_28 1314 /* 1315 * Spitfire and blackbird followed the SPARC V9 manual 1316 * paragraph 3 of section 5.1.7.9 FSR_current_exception 1317 * (cexc) for setting fsr.cexc bits on underflow and 1318 * overflow traps when the fsr.tem.inexact bit is set, 1319 * instead of following Table 28. Bugid 1263234. 1320 */ 1321 { 1322 extern int spitfire_bb_fsr_bug; 1323 1324 if (spitfire_bb_fsr_bug && 1325 (fp->fpu_fsr & FSR_TEM_NX)) { 1326 if (((fp->fpu_fsr & FSR_TEM_OF) == 0) && 1327 (fp->fpu_fsr & FSR_CEXC_OF)) { 1328 fp->fpu_fsr &= ~FSR_CEXC_OF; 1329 fp->fpu_fsr |= FSR_CEXC_NX; 1330 _fp_write_pfsr(&fp->fpu_fsr); 1331 siginfo.si_code = FPE_FLTRES; 1332 } 1333 if (((fp->fpu_fsr & FSR_TEM_UF) == 0) && 1334 (fp->fpu_fsr & FSR_CEXC_UF)) { 1335 fp->fpu_fsr &= ~FSR_CEXC_UF; 1336 fp->fpu_fsr |= FSR_CEXC_NX; 1337 _fp_write_pfsr(&fp->fpu_fsr); 1338 siginfo.si_code = FPE_FLTRES; 1339 } 1340 } 1341 } 1342 #endif /* SF_V9_TABLE_28 */ 1343 rp->r_pc = rp->r_npc; 1344 rp->r_npc += 4; 1345 } else if (utrapp && utrapp[UT_FP_EXCEPTION_IEEE_754]) { 1346 /* 1347 * The user had a trap handler installed. Jump to 1348 * the trap handler instead of signalling the process. 1349 */ 1350 rp->r_pc = (long)utrapp[UT_FP_EXCEPTION_IEEE_754]; 1351 rp->r_npc = rp->r_pc + 4; 1352 break; 1353 } 1354 siginfo.si_signo = SIGFPE; 1355 fault = FLTFPE; 1356 break; 1357 1358 case T_DATA_EXCEPTION + T_USER: /* user data access exception */ 1359 siginfo.si_signo = SIGSEGV; 1360 fault = FLTBOUNDS; 1361 break; 1362 1363 case T_LDDF_ALIGN + T_USER: /* 64 bit user lddfa alignment error */ 1364 case T_STDF_ALIGN + T_USER: /* 64 bit user stdfa alignment error */ 1365 alignfaults++; 1366 lwp->lwp_state = LWP_SYS; 1367 if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) { 1368 rp->r_pc = rp->r_npc; 1369 rp->r_npc += 4; 1370 goto out; 1371 } 1372 fp = lwptofpu(curthread->t_lwp); 1373 fp->fpu_qcnt = 0; 1374 siginfo.si_signo = SIGSEGV; 1375 siginfo.si_code = SEGV_MAPERR; 1376 siginfo.si_addr = badaddr; 1377 fault = FLTBOUNDS; 1378 break; 1379 1380 case T_ALIGNMENT + T_USER: /* user alignment error */ 1381 /* 1382 * If the user has to do unaligned references 1383 * the ugly stuff gets done here. 1384 * Only handles vanilla loads and stores. 1385 */ 1386 alignfaults++; 1387 if (p->p_fixalignment) { 1388 if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) { 1389 rp->r_pc = rp->r_npc; 1390 rp->r_npc += 4; 1391 goto out; 1392 } 1393 siginfo.si_signo = SIGSEGV; 1394 siginfo.si_code = SEGV_MAPERR; 1395 siginfo.si_addr = badaddr; 1396 fault = FLTBOUNDS; 1397 } else { 1398 siginfo.si_signo = SIGBUS; 1399 siginfo.si_code = BUS_ADRALN; 1400 if (rp->r_pc & 3) { /* offending address, if pc */ 1401 siginfo.si_addr = (caddr_t)rp->r_pc; 1402 } else { 1403 if (calc_memaddr(rp, &badaddr) == SIMU_UNALIGN) 1404 siginfo.si_addr = badaddr; 1405 else 1406 siginfo.si_addr = (caddr_t)rp->r_pc; 1407 } 1408 fault = FLTACCESS; 1409 } 1410 break; 1411 1412 case T_UNIMP_INSTR + T_USER: /* illegal instruction fault */ 1413 siginfo.si_signo = SIGILL; 1414 inst = fetch_user_instr((caddr_t)rp->r_pc); 1415 op3 = (inst >> 19) & 0x3F; 1416 if ((op3 == IOP_V8_STQFA) || (op3 == IOP_V8_STDFA)) 1417 siginfo.si_code = ILL_ILLADR; 1418 else 1419 siginfo.si_code = ILL_ILLTRP; 1420 fault = FLTILL; 1421 break; 1422 1423 default: 1424 (void) die(type, rp, addr, 0); 1425 /*NOTREACHED*/ 1426 } 1427 1428 /* 1429 * We can't get here from a system trap 1430 * Never restart any instruction which got here from an fp trap. 1431 */ 1432 ASSERT(type & T_USER); 1433 1434 trap_cleanup(rp, fault, &siginfo, 0); 1435 out: 1436 trap_rtt(); 1437 (void) new_mstate(curthread, mstate); 1438 } 1439 1440 void 1441 trap_rtt(void) 1442 { 1443 klwp_id_t lwp = ttolwp(curthread); 1444 1445 /* 1446 * Restore register window if a debugger modified it. 1447 * Set up to perform a single-step if a debugger requested it. 1448 */ 1449 if (lwp->lwp_pcb.pcb_xregstat != XREGNONE) 1450 xregrestore(lwp, 0); 1451 1452 /* 1453 * Set state to LWP_USER here so preempt won't give us a kernel 1454 * priority if it occurs after this point. Call CL_TRAPRET() to 1455 * restore the user-level priority. 1456 * 1457 * It is important that no locks (other than spinlocks) be entered 1458 * after this point before returning to user mode (unless lwp_state 1459 * is set back to LWP_SYS). 1460 */ 1461 lwp->lwp_state = LWP_USER; 1462 if (curthread->t_trapret) { 1463 curthread->t_trapret = 0; 1464 thread_lock(curthread); 1465 CL_TRAPRET(curthread); 1466 thread_unlock(curthread); 1467 } 1468 if (CPU->cpu_runrun) 1469 preempt(); 1470 if (lwp->lwp_pcb.pcb_step != STEP_NONE) 1471 prdostep(); 1472 1473 TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit"); 1474 } 1475 1476 #define IS_LDASI(o) \ 1477 ((o) == (uint32_t)0xC0C00000 || (o) == (uint32_t)0xC0800000 || \ 1478 (o) == (uint32_t)0xC1800000) 1479 #define IS_IMM_ASI(i) (((i) & 0x2000) == 0) 1480 #define IS_ASINF(a) (((a) & 0xF6) == 0x82) 1481 #define IS_LDDA(i) (((i) & 0xC1F80000) == 0xC0980000) 1482 1483 static int 1484 nfload(struct regs *rp, int *instrp) 1485 { 1486 uint_t instr, asi, op3, rd; 1487 size_t len; 1488 struct as *as; 1489 caddr_t addr; 1490 FPU_DREGS_TYPE zero; 1491 extern int segnf_create(); 1492 1493 if (USERMODE(rp->r_tstate)) 1494 instr = fetch_user_instr((caddr_t)rp->r_pc); 1495 else 1496 instr = *(int *)rp->r_pc; 1497 1498 if (instrp) 1499 *instrp = instr; 1500 1501 op3 = (uint_t)(instr & 0xC1E00000); 1502 if (!IS_LDASI(op3)) 1503 return (0); 1504 if (IS_IMM_ASI(instr)) 1505 asi = (instr & 0x1FE0) >> 5; 1506 else 1507 asi = (uint_t)((rp->r_tstate >> TSTATE_ASI_SHIFT) & 1508 TSTATE_ASI_MASK); 1509 if (!IS_ASINF(asi)) 1510 return (0); 1511 if (calc_memaddr(rp, &addr) == SIMU_SUCCESS) { 1512 len = 1; 1513 as = USERMODE(rp->r_tstate) ? ttoproc(curthread)->p_as : &kas; 1514 as_rangelock(as); 1515 if (as_gap(as, len, &addr, &len, 0, addr) == 0) 1516 (void) as_map(as, addr, len, segnf_create, NULL); 1517 as_rangeunlock(as); 1518 } 1519 zero = 0; 1520 rd = (instr >> 25) & 0x1f; 1521 if (IS_FLOAT(instr)) { 1522 uint_t dbflg = ((instr >> 19) & 3) == 3; 1523 1524 if (dbflg) { /* clever v9 reg encoding */ 1525 if (rd & 1) 1526 rd = (rd & 0x1e) | 0x20; 1527 rd >>= 1; 1528 } 1529 if (fpu_exists) { 1530 if (!(_fp_read_fprs() & FPRS_FEF)) 1531 fp_enable(); 1532 1533 if (dbflg) 1534 _fp_write_pdreg(&zero, rd); 1535 else 1536 _fp_write_pfreg((uint_t *)&zero, rd); 1537 } else { 1538 kfpu_t *fp = lwptofpu(curthread->t_lwp); 1539 1540 if (!fp->fpu_en) 1541 fp_enable(); 1542 1543 if (dbflg) 1544 fp->fpu_fr.fpu_dregs[rd] = zero; 1545 else 1546 fp->fpu_fr.fpu_regs[rd] = 0; 1547 } 1548 } else { 1549 (void) putreg(&zero, rp, rd, &addr); 1550 if (IS_LDDA(instr)) 1551 (void) putreg(&zero, rp, rd + 1, &addr); 1552 } 1553 rp->r_pc = rp->r_npc; 1554 rp->r_npc += 4; 1555 return (1); 1556 } 1557 1558 kmutex_t atomic_nc_mutex; 1559 1560 /* 1561 * The following couple of routines are for userland drivers which 1562 * do atomics to noncached addresses. This sort of worked on previous 1563 * platforms -- the operation really wasn't atomic, but it didn't generate 1564 * a trap as sun4u systems do. 1565 */ 1566 static int 1567 swap_nc(struct regs *rp, int instr) 1568 { 1569 uint64_t rdata, mdata; 1570 caddr_t addr, badaddr; 1571 uint_t tmp, rd; 1572 1573 (void) flush_user_windows_to_stack(NULL); 1574 rd = (instr >> 25) & 0x1f; 1575 if (calc_memaddr(rp, &addr) != SIMU_SUCCESS) 1576 return (0); 1577 if (getreg(rp, rd, &rdata, &badaddr)) 1578 return (0); 1579 mutex_enter(&atomic_nc_mutex); 1580 if (fuword32(addr, &tmp) == -1) { 1581 mutex_exit(&atomic_nc_mutex); 1582 return (0); 1583 } 1584 mdata = (u_longlong_t)tmp; 1585 if (suword32(addr, (uint32_t)rdata) == -1) { 1586 mutex_exit(&atomic_nc_mutex); 1587 return (0); 1588 } 1589 (void) putreg(&mdata, rp, rd, &badaddr); 1590 mutex_exit(&atomic_nc_mutex); 1591 return (1); 1592 } 1593 1594 static int 1595 ldstub_nc(struct regs *rp, int instr) 1596 { 1597 uint64_t mdata; 1598 caddr_t addr, badaddr; 1599 uint_t rd; 1600 uint8_t tmp; 1601 1602 (void) flush_user_windows_to_stack(NULL); 1603 rd = (instr >> 25) & 0x1f; 1604 if (calc_memaddr(rp, &addr) != SIMU_SUCCESS) 1605 return (0); 1606 mutex_enter(&atomic_nc_mutex); 1607 if (fuword8(addr, &tmp) == -1) { 1608 mutex_exit(&atomic_nc_mutex); 1609 return (0); 1610 } 1611 mdata = (u_longlong_t)tmp; 1612 if (suword8(addr, (uint8_t)0xff) == -1) { 1613 mutex_exit(&atomic_nc_mutex); 1614 return (0); 1615 } 1616 (void) putreg(&mdata, rp, rd, &badaddr); 1617 mutex_exit(&atomic_nc_mutex); 1618 return (1); 1619 } 1620 1621 /* 1622 * This function helps instr_size() determine the operand size. 1623 * It is called for the extended ldda/stda asi's. 1624 */ 1625 int 1626 extended_asi_size(int asi) 1627 { 1628 switch (asi) { 1629 case ASI_PST8_P: 1630 case ASI_PST8_S: 1631 case ASI_PST16_P: 1632 case ASI_PST16_S: 1633 case ASI_PST32_P: 1634 case ASI_PST32_S: 1635 case ASI_PST8_PL: 1636 case ASI_PST8_SL: 1637 case ASI_PST16_PL: 1638 case ASI_PST16_SL: 1639 case ASI_PST32_PL: 1640 case ASI_PST32_SL: 1641 return (8); 1642 case ASI_FL8_P: 1643 case ASI_FL8_S: 1644 case ASI_FL8_PL: 1645 case ASI_FL8_SL: 1646 return (1); 1647 case ASI_FL16_P: 1648 case ASI_FL16_S: 1649 case ASI_FL16_PL: 1650 case ASI_FL16_SL: 1651 return (2); 1652 case ASI_BLK_P: 1653 case ASI_BLK_S: 1654 case ASI_BLK_PL: 1655 case ASI_BLK_SL: 1656 case ASI_BLK_COMMIT_P: 1657 case ASI_BLK_COMMIT_S: 1658 return (64); 1659 } 1660 1661 return (0); 1662 } 1663 1664 /* 1665 * Patch non-zero to disable preemption of threads in the kernel. 1666 */ 1667 int IGNORE_KERNEL_PREEMPTION = 0; /* XXX - delete this someday */ 1668 1669 struct kpreempt_cnts { /* kernel preemption statistics */ 1670 int kpc_idle; /* executing idle thread */ 1671 int kpc_intr; /* executing interrupt thread */ 1672 int kpc_clock; /* executing clock thread */ 1673 int kpc_blocked; /* thread has blocked preemption (t_preempt) */ 1674 int kpc_notonproc; /* thread is surrendering processor */ 1675 int kpc_inswtch; /* thread has ratified scheduling decision */ 1676 int kpc_prilevel; /* processor interrupt level is too high */ 1677 int kpc_apreempt; /* asynchronous preemption */ 1678 int kpc_spreempt; /* synchronous preemption */ 1679 } kpreempt_cnts; 1680 1681 /* 1682 * kernel preemption: forced rescheduling 1683 * preempt the running kernel thread. 1684 */ 1685 void 1686 kpreempt(int asyncspl) 1687 { 1688 if (IGNORE_KERNEL_PREEMPTION) { 1689 aston(CPU->cpu_dispthread); 1690 return; 1691 } 1692 /* 1693 * Check that conditions are right for kernel preemption 1694 */ 1695 do { 1696 if (curthread->t_preempt) { 1697 /* 1698 * either a privileged thread (idle, panic, interrupt) 1699 * or will check when t_preempt is lowered 1700 */ 1701 if (curthread->t_pri < 0) 1702 kpreempt_cnts.kpc_idle++; 1703 else if (curthread->t_flag & T_INTR_THREAD) { 1704 kpreempt_cnts.kpc_intr++; 1705 if (curthread->t_pil == CLOCK_LEVEL) 1706 kpreempt_cnts.kpc_clock++; 1707 } else 1708 kpreempt_cnts.kpc_blocked++; 1709 aston(CPU->cpu_dispthread); 1710 return; 1711 } 1712 if (curthread->t_state != TS_ONPROC || 1713 curthread->t_disp_queue != CPU->cpu_disp) { 1714 /* this thread will be calling swtch() shortly */ 1715 kpreempt_cnts.kpc_notonproc++; 1716 if (CPU->cpu_thread != CPU->cpu_dispthread) { 1717 /* already in swtch(), force another */ 1718 kpreempt_cnts.kpc_inswtch++; 1719 siron(); 1720 } 1721 return; 1722 } 1723 1724 if (((asyncspl != KPREEMPT_SYNC) ? spltoipl(asyncspl) : 1725 getpil()) >= DISP_LEVEL) { 1726 /* 1727 * We can't preempt this thread if it is at 1728 * a PIL >= DISP_LEVEL since it may be holding 1729 * a spin lock (like sched_lock). 1730 */ 1731 siron(); /* check back later */ 1732 kpreempt_cnts.kpc_prilevel++; 1733 return; 1734 } 1735 1736 /* 1737 * block preemption so we don't have multiple preemptions 1738 * pending on the interrupt stack 1739 */ 1740 curthread->t_preempt++; 1741 if (asyncspl != KPREEMPT_SYNC) { 1742 splx(asyncspl); 1743 kpreempt_cnts.kpc_apreempt++; 1744 } else 1745 kpreempt_cnts.kpc_spreempt++; 1746 1747 preempt(); 1748 curthread->t_preempt--; 1749 } while (CPU->cpu_kprunrun); 1750 } 1751 1752 static enum seg_rw 1753 get_accesstype(struct regs *rp) 1754 { 1755 uint32_t instr; 1756 1757 if (USERMODE(rp->r_tstate)) 1758 instr = fetch_user_instr((caddr_t)rp->r_pc); 1759 else 1760 instr = *(uint32_t *)rp->r_pc; 1761 1762 if (IS_FLUSH(instr)) 1763 return (S_OTHER); 1764 1765 if (IS_STORE(instr)) 1766 return (S_WRITE); 1767 else 1768 return (S_READ); 1769 } 1770