1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * Copyright (c) 2012 Joyent, Inc. All rights reserved.
29 */
30
31 #include <sys/mmu.h>
32 #include <sys/systm.h>
33 #include <sys/trap.h>
34 #include <sys/machtrap.h>
35 #include <sys/vtrace.h>
36 #include <sys/prsystm.h>
37 #include <sys/archsystm.h>
38 #include <sys/machsystm.h>
39 #include <sys/fpu/fpusystm.h>
40 #include <sys/simulate.h>
41 #include <sys/ftrace.h>
42 #include <sys/ontrap.h>
43 #include <sys/kcpc.h>
44 #include <sys/kobj.h>
45 #include <sys/procfs.h>
46 #include <sys/sun4asi.h>
47 #include <sys/sdt.h>
48 #include <sys/fpras.h>
49 #include <sys/contract/process_impl.h>
50
51 #ifdef TRAPTRACE
52 #include <sys/traptrace.h>
53 #endif
54
55 int tudebug = 0;
56 static int tudebugbpt = 0;
57 static int tudebugfpe = 0;
58
59 static int alignfaults = 0;
60
61 #if defined(TRAPDEBUG) || defined(lint)
62 static int lodebug = 0;
63 #else
64 #define lodebug 0
65 #endif /* defined(TRAPDEBUG) || defined(lint) */
66
67
68 int vis1_partial_support(struct regs *rp, k_siginfo_t *siginfo, uint_t *fault);
69 #pragma weak vis1_partial_support
70
71 void showregs(unsigned, struct regs *, caddr_t, uint_t);
72 #pragma weak showregs
73
74 void trap_async_hwerr(void);
75 #pragma weak trap_async_hwerr
76
77 void trap_async_berr_bto(int, struct regs *);
78 #pragma weak trap_async_berr_bto
79
80 static enum seg_rw get_accesstype(struct regs *);
81 static int nfload(struct regs *, int *);
82 static int swap_nc(struct regs *, int);
83 static int ldstub_nc(struct regs *, int);
84 void trap_cleanup(struct regs *, uint_t, k_siginfo_t *, int);
85 void trap_rtt(void);
86
87 static int __NORETURN
die(unsigned type,struct regs * rp,caddr_t addr,uint_t mmu_fsr)88 die(unsigned type, struct regs *rp, caddr_t addr, uint_t mmu_fsr)
89 {
90 struct panic_trap_info ti;
91
92 #ifdef TRAPTRACE
93 TRAPTRACE_FREEZE;
94 #endif
95
96 ti.trap_regs = rp;
97 ti.trap_type = type;
98 ti.trap_addr = addr;
99 ti.trap_mmu_fsr = mmu_fsr;
100
101 curthread->t_panic_trap = &ti;
102
103 if (type == T_DATA_MMU_MISS && addr < (caddr_t)KERNELBASE) {
104 panic("BAD TRAP: type=%x rp=%p addr=%p mmu_fsr=%x "
105 "occurred in module \"%s\" due to %s",
106 type, (void *)rp, (void *)addr, mmu_fsr,
107 mod_containing_pc((caddr_t)rp->r_pc),
108 addr < (caddr_t)PAGESIZE ?
109 "a NULL pointer dereference" :
110 "an illegal access to a user address");
111 } else {
112 panic("BAD TRAP: type=%x rp=%p addr=%p mmu_fsr=%x",
113 type, (void *)rp, (void *)addr, mmu_fsr);
114 }
115 }
116
117 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */
118 int ill_calls;
119 #endif
120
121 /*
122 * Currently, the only PREFETCH/PREFETCHA instructions which cause traps
123 * are the "strong" prefetches (fcn=20-23). But we check for all flavors of
124 * PREFETCH, in case some future variant also causes a DATA_MMU_MISS.
125 */
126 #define IS_PREFETCH(i) (((i) & 0xc1780000) == 0xc1680000)
127
128 #define IS_FLUSH(i) (((i) & 0xc1f80000) == 0x81d80000)
129 #define IS_SWAP(i) (((i) & 0xc1f80000) == 0xc0780000)
130 #define IS_LDSTUB(i) (((i) & 0xc1f80000) == 0xc0680000)
131 #define IS_FLOAT(i) (((i) & 0x1000000) != 0)
132 #define IS_STORE(i) (((i) >> 21) & 1)
133
134 /*
135 * Called from the trap handler when a processor trap occurs.
136 */
137 /*VARARGS2*/
138 void
trap(struct regs * rp,caddr_t addr,uint32_t type,uint32_t mmu_fsr)139 trap(struct regs *rp, caddr_t addr, uint32_t type, uint32_t mmu_fsr)
140 {
141 proc_t *p = ttoproc(curthread);
142 klwp_id_t lwp = ttolwp(curthread);
143 struct machpcb *mpcb = NULL;
144 k_siginfo_t siginfo;
145 uint_t op3, fault = 0;
146 int stepped = 0;
147 greg_t oldpc;
148 int mstate;
149 char *badaddr;
150 faultcode_t res;
151 enum fault_type fault_type;
152 enum seg_rw rw;
153 uintptr_t lofault;
154 label_t *onfault;
155 int instr;
156 int iskernel;
157 int watchcode;
158 int watchpage;
159 extern faultcode_t pagefault(caddr_t, enum fault_type,
160 enum seg_rw, int);
161 #ifdef sun4v
162 extern boolean_t tick_stick_emulation_active;
163 #endif /* sun4v */
164
165 CPU_STATS_ADDQ(CPU, sys, trap, 1);
166
167 #ifdef SF_ERRATA_23 /* call causes illegal-insn */
168 ASSERT((curthread->t_schedflag & TS_DONT_SWAP) ||
169 (type == T_UNIMP_INSTR));
170 #else
171 ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
172 #endif /* SF_ERRATA_23 */
173
174 if (USERMODE(rp->r_tstate) || (type & T_USER)) {
175 /*
176 * Set lwp_state before trying to acquire any
177 * adaptive lock
178 */
179 ASSERT(lwp != NULL);
180 lwp->lwp_state = LWP_SYS;
181 /*
182 * Set up the current cred to use during this trap. u_cred
183 * no longer exists. t_cred is used instead.
184 * The current process credential applies to the thread for
185 * the entire trap. If trapping from the kernel, this
186 * should already be set up.
187 */
188 if (curthread->t_cred != p->p_cred) {
189 cred_t *oldcred = curthread->t_cred;
190 /*
191 * DTrace accesses t_cred in probe context. t_cred
192 * must always be either NULL, or point to a valid,
193 * allocated cred structure.
194 */
195 curthread->t_cred = crgetcred();
196 crfree(oldcred);
197 }
198 type |= T_USER;
199 ASSERT((type == (T_SYS_RTT_PAGE | T_USER)) ||
200 (type == (T_SYS_RTT_ALIGN | T_USER)) ||
201 lwp->lwp_regs == rp);
202 mpcb = lwptompcb(lwp);
203 switch (type) {
204 case T_WIN_OVERFLOW + T_USER:
205 case T_WIN_UNDERFLOW + T_USER:
206 case T_SYS_RTT_PAGE + T_USER:
207 case T_DATA_MMU_MISS + T_USER:
208 mstate = LMS_DFAULT;
209 break;
210 case T_INSTR_MMU_MISS + T_USER:
211 mstate = LMS_TFAULT;
212 break;
213 default:
214 mstate = LMS_TRAP;
215 break;
216 }
217 mstate = new_mstate(curthread, mstate);
218 siginfo.si_signo = 0;
219 stepped =
220 lwp->lwp_pcb.pcb_step != STEP_NONE &&
221 ((oldpc = rp->r_pc), prundostep()) &&
222 mmu_btop((uintptr_t)addr) == mmu_btop((uintptr_t)oldpc);
223 /* this assignment must not precede call to prundostep() */
224 oldpc = rp->r_pc;
225 }
226
227 TRACE_1(TR_FAC_TRAP, TR_C_TRAP_HANDLER_ENTER,
228 "C_trap_handler_enter:type %x", type);
229
230 #ifdef F_DEFERRED
231 /*
232 * Take any pending floating point exceptions now.
233 * If the floating point unit has an exception to handle,
234 * just return to user-level to let the signal handler run.
235 * The instruction that got us to trap() will be reexecuted on
236 * return from the signal handler and we will trap to here again.
237 * This is necessary to disambiguate simultaneous traps which
238 * happen when a floating-point exception is pending and a
239 * machine fault is incurred.
240 */
241 if (type & USER) {
242 /*
243 * FP_TRAPPED is set only by sendsig() when it copies
244 * out the floating-point queue for the signal handler.
245 * It is set there so we can test it here and in syscall().
246 */
247 mpcb->mpcb_flags &= ~FP_TRAPPED;
248 syncfpu();
249 if (mpcb->mpcb_flags & FP_TRAPPED) {
250 /*
251 * trap() has have been called recursively and may
252 * have stopped the process, so do single step
253 * support for /proc.
254 */
255 mpcb->mpcb_flags &= ~FP_TRAPPED;
256 goto out;
257 }
258 }
259 #endif
260 switch (type) {
261 case T_DATA_MMU_MISS:
262 case T_INSTR_MMU_MISS + T_USER:
263 case T_DATA_MMU_MISS + T_USER:
264 case T_DATA_PROT + T_USER:
265 case T_AST + T_USER:
266 case T_SYS_RTT_PAGE + T_USER:
267 case T_FLUSH_PCB + T_USER:
268 case T_FLUSHW + T_USER:
269 break;
270
271 default:
272 FTRACE_3("trap(): type=0x%lx, regs=0x%lx, addr=0x%lx",
273 (ulong_t)type, (ulong_t)rp, (ulong_t)addr);
274 break;
275 }
276
277 switch (type) {
278
279 default:
280 /*
281 * Check for user software trap.
282 */
283 if (type & T_USER) {
284 if (tudebug)
285 showregs(type, rp, (caddr_t)0, 0);
286 if ((type & ~T_USER) >= T_SOFTWARE_TRAP) {
287 bzero(&siginfo, sizeof (siginfo));
288 siginfo.si_signo = SIGILL;
289 siginfo.si_code = ILL_ILLTRP;
290 siginfo.si_addr = (caddr_t)rp->r_pc;
291 siginfo.si_trapno = type &~ T_USER;
292 fault = FLTILL;
293 break;
294 }
295 }
296 addr = (caddr_t)rp->r_pc;
297 (void) die(type, rp, addr, 0);
298 /*NOTREACHED*/
299
300 case T_ALIGNMENT: /* supv alignment error */
301 if (nfload(rp, NULL))
302 goto cleanup;
303
304 if (curthread->t_lofault) {
305 if (lodebug) {
306 showregs(type, rp, addr, 0);
307 traceback((caddr_t)rp->r_sp);
308 }
309 rp->r_g1 = EFAULT;
310 rp->r_pc = curthread->t_lofault;
311 rp->r_npc = rp->r_pc + 4;
312 goto cleanup;
313 }
314 (void) die(type, rp, addr, 0);
315 /*NOTREACHED*/
316
317 case T_INSTR_EXCEPTION: /* sys instruction access exception */
318 addr = (caddr_t)rp->r_pc;
319 (void) die(type, rp, addr, mmu_fsr);
320 /*NOTREACHED*/
321
322 case T_INSTR_MMU_MISS: /* sys instruction mmu miss */
323 addr = (caddr_t)rp->r_pc;
324 (void) die(type, rp, addr, 0);
325 /*NOTREACHED*/
326
327 case T_DATA_EXCEPTION: /* system data access exception */
328 switch (X_FAULT_TYPE(mmu_fsr)) {
329 case FT_RANGE:
330 /*
331 * This happens when we attempt to dereference an
332 * address in the address hole. If t_ontrap is set,
333 * then break and fall through to T_DATA_MMU_MISS /
334 * T_DATA_PROT case below. If lofault is set, then
335 * honour it (perhaps the user gave us a bogus
336 * address in the hole to copyin from or copyout to?)
337 */
338
339 if (curthread->t_ontrap != NULL)
340 break;
341
342 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
343 if (curthread->t_lofault) {
344 if (lodebug) {
345 showregs(type, rp, addr, 0);
346 traceback((caddr_t)rp->r_sp);
347 }
348 rp->r_g1 = EFAULT;
349 rp->r_pc = curthread->t_lofault;
350 rp->r_npc = rp->r_pc + 4;
351 goto cleanup;
352 }
353 (void) die(type, rp, addr, mmu_fsr);
354 /*NOTREACHED*/
355
356 case FT_PRIV:
357 /*
358 * This can happen if we access ASI_USER from a kernel
359 * thread. To support pxfs, we need to honor lofault if
360 * we're doing a copyin/copyout from a kernel thread.
361 */
362
363 if (nfload(rp, NULL))
364 goto cleanup;
365 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
366 if (curthread->t_lofault) {
367 if (lodebug) {
368 showregs(type, rp, addr, 0);
369 traceback((caddr_t)rp->r_sp);
370 }
371 rp->r_g1 = EFAULT;
372 rp->r_pc = curthread->t_lofault;
373 rp->r_npc = rp->r_pc + 4;
374 goto cleanup;
375 }
376 (void) die(type, rp, addr, mmu_fsr);
377 /*NOTREACHED*/
378
379 default:
380 if (nfload(rp, NULL))
381 goto cleanup;
382 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
383 (void) die(type, rp, addr, mmu_fsr);
384 /*NOTREACHED*/
385
386 case FT_NFO:
387 break;
388 }
389 /* fall into ... */
390
391 case T_DATA_MMU_MISS: /* system data mmu miss */
392 case T_DATA_PROT: /* system data protection fault */
393 if (nfload(rp, &instr))
394 goto cleanup;
395
396 /*
397 * If we're under on_trap() protection (see <sys/ontrap.h>),
398 * set ot_trap and return from the trap to the trampoline.
399 */
400 if (curthread->t_ontrap != NULL) {
401 on_trap_data_t *otp = curthread->t_ontrap;
402
403 TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT,
404 "C_trap_handler_exit");
405 TRACE_0(TR_FAC_TRAP, TR_TRAP_END, "trap_end");
406
407 if (otp->ot_prot & OT_DATA_ACCESS) {
408 otp->ot_trap |= OT_DATA_ACCESS;
409 rp->r_pc = otp->ot_trampoline;
410 rp->r_npc = rp->r_pc + 4;
411 goto cleanup;
412 }
413 }
414 lofault = curthread->t_lofault;
415 onfault = curthread->t_onfault;
416 curthread->t_lofault = 0;
417
418 mstate = new_mstate(curthread, LMS_KFAULT);
419
420 switch (type) {
421 case T_DATA_PROT:
422 fault_type = F_PROT;
423 rw = S_WRITE;
424 break;
425 case T_INSTR_MMU_MISS:
426 fault_type = F_INVAL;
427 rw = S_EXEC;
428 break;
429 case T_DATA_MMU_MISS:
430 case T_DATA_EXCEPTION:
431 /*
432 * The hardware doesn't update the sfsr on mmu
433 * misses so it is not easy to find out whether
434 * the access was a read or a write so we need
435 * to decode the actual instruction.
436 */
437 fault_type = F_INVAL;
438 rw = get_accesstype(rp);
439 break;
440 default:
441 cmn_err(CE_PANIC, "trap: unknown type %x", type);
442 break;
443 }
444 /*
445 * We determine if access was done to kernel or user
446 * address space. The addr passed into trap is really the
447 * tag access register.
448 */
449 iskernel = (((uintptr_t)addr & TAGACC_CTX_MASK) == KCONTEXT);
450 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
451
452 res = pagefault(addr, fault_type, rw, iskernel);
453 if (!iskernel && res == FC_NOMAP &&
454 addr < p->p_usrstack && grow(addr))
455 res = 0;
456
457 (void) new_mstate(curthread, mstate);
458
459 /*
460 * Restore lofault and onfault. If we resolved the fault, exit.
461 * If we didn't and lofault wasn't set, die.
462 */
463 curthread->t_lofault = lofault;
464 curthread->t_onfault = onfault;
465
466 if (res == 0)
467 goto cleanup;
468
469 if (IS_PREFETCH(instr)) {
470 /* skip prefetch instructions in kernel-land */
471 rp->r_pc = rp->r_npc;
472 rp->r_npc += 4;
473 goto cleanup;
474 }
475
476 if ((lofault == 0 || lodebug) &&
477 (calc_memaddr(rp, &badaddr) == SIMU_SUCCESS))
478 addr = badaddr;
479 if (lofault == 0)
480 (void) die(type, rp, addr, 0);
481 /*
482 * Cannot resolve fault. Return to lofault.
483 */
484 if (lodebug) {
485 showregs(type, rp, addr, 0);
486 traceback((caddr_t)rp->r_sp);
487 }
488 if (FC_CODE(res) == FC_OBJERR)
489 res = FC_ERRNO(res);
490 else
491 res = EFAULT;
492 rp->r_g1 = res;
493 rp->r_pc = curthread->t_lofault;
494 rp->r_npc = curthread->t_lofault + 4;
495 goto cleanup;
496
497 case T_INSTR_EXCEPTION + T_USER: /* user insn access exception */
498 bzero(&siginfo, sizeof (siginfo));
499 siginfo.si_addr = (caddr_t)rp->r_pc;
500 siginfo.si_signo = SIGSEGV;
501 siginfo.si_code = X_FAULT_TYPE(mmu_fsr) == FT_PRIV ?
502 SEGV_ACCERR : SEGV_MAPERR;
503 fault = FLTBOUNDS;
504 break;
505
506 case T_WIN_OVERFLOW + T_USER: /* window overflow in ??? */
507 case T_WIN_UNDERFLOW + T_USER: /* window underflow in ??? */
508 case T_SYS_RTT_PAGE + T_USER: /* window underflow in user_rtt */
509 case T_INSTR_MMU_MISS + T_USER: /* user instruction mmu miss */
510 case T_DATA_MMU_MISS + T_USER: /* user data mmu miss */
511 case T_DATA_PROT + T_USER: /* user data protection fault */
512 switch (type) {
513 case T_INSTR_MMU_MISS + T_USER:
514 addr = (caddr_t)rp->r_pc;
515 fault_type = F_INVAL;
516 rw = S_EXEC;
517 break;
518
519 case T_DATA_MMU_MISS + T_USER:
520 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
521 fault_type = F_INVAL;
522 /*
523 * The hardware doesn't update the sfsr on mmu misses
524 * so it is not easy to find out whether the access
525 * was a read or a write so we need to decode the
526 * actual instruction. XXX BUGLY HW
527 */
528 rw = get_accesstype(rp);
529 break;
530
531 case T_DATA_PROT + T_USER:
532 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
533 fault_type = F_PROT;
534 rw = S_WRITE;
535 break;
536
537 case T_WIN_OVERFLOW + T_USER:
538 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
539 fault_type = F_INVAL;
540 rw = S_WRITE;
541 break;
542
543 case T_WIN_UNDERFLOW + T_USER:
544 case T_SYS_RTT_PAGE + T_USER:
545 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
546 fault_type = F_INVAL;
547 rw = S_READ;
548 break;
549
550 default:
551 cmn_err(CE_PANIC, "trap: unknown type %x", type);
552 break;
553 }
554
555 /*
556 * If we are single stepping do not call pagefault
557 */
558 if (stepped) {
559 res = FC_NOMAP;
560 } else {
561 caddr_t vaddr = addr;
562 size_t sz;
563 int ta;
564
565 ASSERT(!(curthread->t_flag & T_WATCHPT));
566 watchpage = (pr_watch_active(p) &&
567 type != T_WIN_OVERFLOW + T_USER &&
568 type != T_WIN_UNDERFLOW + T_USER &&
569 type != T_SYS_RTT_PAGE + T_USER &&
570 pr_is_watchpage(addr, rw));
571
572 if (!watchpage ||
573 (sz = instr_size(rp, &vaddr, rw)) <= 0)
574 /* EMPTY */;
575 else if ((watchcode = pr_is_watchpoint(&vaddr, &ta,
576 sz, NULL, rw)) != 0) {
577 if (ta) {
578 do_watch_step(vaddr, sz, rw,
579 watchcode, rp->r_pc);
580 fault_type = F_INVAL;
581 } else {
582 bzero(&siginfo, sizeof (siginfo));
583 siginfo.si_signo = SIGTRAP;
584 siginfo.si_code = watchcode;
585 siginfo.si_addr = vaddr;
586 siginfo.si_trapafter = 0;
587 siginfo.si_pc = (caddr_t)rp->r_pc;
588 fault = FLTWATCH;
589 break;
590 }
591 } else {
592 if (rw != S_EXEC &&
593 pr_watch_emul(rp, vaddr, rw))
594 goto out;
595 do_watch_step(vaddr, sz, rw, 0, 0);
596 fault_type = F_INVAL;
597 }
598
599 if (pr_watch_active(p) &&
600 (type == T_WIN_OVERFLOW + T_USER ||
601 type == T_WIN_UNDERFLOW + T_USER ||
602 type == T_SYS_RTT_PAGE + T_USER)) {
603 int dotwo = (type == T_WIN_UNDERFLOW + T_USER);
604 if (copy_return_window(dotwo))
605 goto out;
606 fault_type = F_INVAL;
607 }
608
609 res = pagefault(addr, fault_type, rw, 0);
610
611 /*
612 * If pagefault succeed, ok.
613 * Otherwise grow the stack automatically.
614 */
615 if (res == 0 ||
616 (res == FC_NOMAP &&
617 type != T_INSTR_MMU_MISS + T_USER &&
618 addr < p->p_usrstack &&
619 grow(addr))) {
620 int ismem = prismember(&p->p_fltmask, FLTPAGE);
621
622 /*
623 * instr_size() is used to get the exact
624 * address of the fault, instead of the
625 * page of the fault. Unfortunately it is
626 * very slow, and this is an important
627 * code path. Don't call it unless
628 * correctness is needed. ie. if FLTPAGE
629 * is set, or we're profiling.
630 */
631
632 if (curthread->t_rprof != NULL || ismem)
633 (void) instr_size(rp, &addr, rw);
634
635 lwp->lwp_lastfault = FLTPAGE;
636 lwp->lwp_lastfaddr = addr;
637
638 if (ismem) {
639 bzero(&siginfo, sizeof (siginfo));
640 siginfo.si_addr = addr;
641 (void) stop_on_fault(FLTPAGE, &siginfo);
642 }
643 goto out;
644 }
645
646 if (type != (T_INSTR_MMU_MISS + T_USER)) {
647 /*
648 * check for non-faulting loads, also
649 * fetch the instruction to check for
650 * flush
651 */
652 if (nfload(rp, &instr))
653 goto out;
654
655 /* skip userland prefetch instructions */
656 if (IS_PREFETCH(instr)) {
657 rp->r_pc = rp->r_npc;
658 rp->r_npc += 4;
659 goto out;
660 /*NOTREACHED*/
661 }
662
663 /*
664 * check if the instruction was a
665 * flush. ABI allows users to specify
666 * an illegal address on the flush
667 * instruction so we simply return in
668 * this case.
669 *
670 * NB: the hardware should set a bit
671 * indicating this trap was caused by
672 * a flush instruction. Instruction
673 * decoding is bugly!
674 */
675 if (IS_FLUSH(instr)) {
676 /* skip the flush instruction */
677 rp->r_pc = rp->r_npc;
678 rp->r_npc += 4;
679 goto out;
680 /*NOTREACHED*/
681 }
682 } else if (res == FC_PROT) {
683 report_stack_exec(p, addr);
684 }
685
686 if (tudebug)
687 showregs(type, rp, addr, 0);
688 }
689
690 /*
691 * In the case where both pagefault and grow fail,
692 * set the code to the value provided by pagefault.
693 */
694 (void) instr_size(rp, &addr, rw);
695 bzero(&siginfo, sizeof (siginfo));
696 siginfo.si_addr = addr;
697 if (FC_CODE(res) == FC_OBJERR) {
698 siginfo.si_errno = FC_ERRNO(res);
699 if (siginfo.si_errno != EINTR) {
700 siginfo.si_signo = SIGBUS;
701 siginfo.si_code = BUS_OBJERR;
702 fault = FLTACCESS;
703 }
704 } else { /* FC_NOMAP || FC_PROT */
705 siginfo.si_signo = SIGSEGV;
706 siginfo.si_code = (res == FC_NOMAP) ?
707 SEGV_MAPERR : SEGV_ACCERR;
708 fault = FLTBOUNDS;
709 }
710 /*
711 * If this is the culmination of a single-step,
712 * reset the addr, code, signal and fault to
713 * indicate a hardware trace trap.
714 */
715 if (stepped) {
716 pcb_t *pcb = &lwp->lwp_pcb;
717
718 siginfo.si_signo = 0;
719 fault = 0;
720 if (pcb->pcb_step == STEP_WASACTIVE) {
721 pcb->pcb_step = STEP_NONE;
722 pcb->pcb_tracepc = NULL;
723 oldpc = rp->r_pc - 4;
724 }
725 /*
726 * If both NORMAL_STEP and WATCH_STEP are in
727 * effect, give precedence to WATCH_STEP.
728 * One or the other must be set at this point.
729 */
730 ASSERT(pcb->pcb_flags & (NORMAL_STEP|WATCH_STEP));
731 if ((fault = undo_watch_step(&siginfo)) == 0 &&
732 (pcb->pcb_flags & NORMAL_STEP)) {
733 siginfo.si_signo = SIGTRAP;
734 siginfo.si_code = TRAP_TRACE;
735 siginfo.si_addr = (caddr_t)rp->r_pc;
736 fault = FLTTRACE;
737 }
738 pcb->pcb_flags &= ~(NORMAL_STEP|WATCH_STEP);
739 }
740 break;
741
742 case T_DATA_EXCEPTION + T_USER: /* user data access exception */
743
744 if (&vis1_partial_support != NULL) {
745 bzero(&siginfo, sizeof (siginfo));
746 if (vis1_partial_support(rp,
747 &siginfo, &fault) == 0)
748 goto out;
749 }
750
751 if (nfload(rp, &instr))
752 goto out;
753 if (IS_FLUSH(instr)) {
754 /* skip the flush instruction */
755 rp->r_pc = rp->r_npc;
756 rp->r_npc += 4;
757 goto out;
758 /*NOTREACHED*/
759 }
760 bzero(&siginfo, sizeof (siginfo));
761 siginfo.si_addr = addr;
762 switch (X_FAULT_TYPE(mmu_fsr)) {
763 case FT_ATOMIC_NC:
764 if ((IS_SWAP(instr) && swap_nc(rp, instr)) ||
765 (IS_LDSTUB(instr) && ldstub_nc(rp, instr))) {
766 /* skip the atomic */
767 rp->r_pc = rp->r_npc;
768 rp->r_npc += 4;
769 goto out;
770 }
771 /* FALLTHROUGH */
772 case FT_PRIV:
773 siginfo.si_signo = SIGSEGV;
774 siginfo.si_code = SEGV_ACCERR;
775 fault = FLTBOUNDS;
776 break;
777 case FT_SPEC_LD:
778 case FT_ILL_ALT:
779 siginfo.si_signo = SIGILL;
780 siginfo.si_code = ILL_ILLADR;
781 fault = FLTILL;
782 break;
783 default:
784 siginfo.si_signo = SIGSEGV;
785 siginfo.si_code = SEGV_MAPERR;
786 fault = FLTBOUNDS;
787 break;
788 }
789 break;
790
791 case T_SYS_RTT_ALIGN + T_USER: /* user alignment error */
792 case T_ALIGNMENT + T_USER: /* user alignment error */
793 if (tudebug)
794 showregs(type, rp, addr, 0);
795 /*
796 * If the user has to do unaligned references
797 * the ugly stuff gets done here.
798 */
799 alignfaults++;
800 if (&vis1_partial_support != NULL) {
801 bzero(&siginfo, sizeof (siginfo));
802 if (vis1_partial_support(rp,
803 &siginfo, &fault) == 0)
804 goto out;
805 }
806
807 bzero(&siginfo, sizeof (siginfo));
808 if (type == T_SYS_RTT_ALIGN + T_USER) {
809 if (nfload(rp, NULL))
810 goto out;
811 /*
812 * Can't do unaligned stack access
813 */
814 siginfo.si_signo = SIGBUS;
815 siginfo.si_code = BUS_ADRALN;
816 siginfo.si_addr = addr;
817 fault = FLTACCESS;
818 break;
819 }
820
821 /*
822 * Try to fix alignment before non-faulting load test.
823 */
824 if (p->p_fixalignment) {
825 if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) {
826 rp->r_pc = rp->r_npc;
827 rp->r_npc += 4;
828 goto out;
829 }
830 if (nfload(rp, NULL))
831 goto out;
832 siginfo.si_signo = SIGSEGV;
833 siginfo.si_code = SEGV_MAPERR;
834 siginfo.si_addr = badaddr;
835 fault = FLTBOUNDS;
836 } else {
837 if (nfload(rp, NULL))
838 goto out;
839 siginfo.si_signo = SIGBUS;
840 siginfo.si_code = BUS_ADRALN;
841 if (rp->r_pc & 3) { /* offending address, if pc */
842 siginfo.si_addr = (caddr_t)rp->r_pc;
843 } else {
844 if (calc_memaddr(rp, &badaddr) == SIMU_UNALIGN)
845 siginfo.si_addr = badaddr;
846 else
847 siginfo.si_addr = (caddr_t)rp->r_pc;
848 }
849 fault = FLTACCESS;
850 }
851 break;
852
853 case T_PRIV_INSTR + T_USER: /* privileged instruction fault */
854 if (tudebug)
855 showregs(type, rp, (caddr_t)0, 0);
856
857 bzero(&siginfo, sizeof (siginfo));
858 #ifdef sun4v
859 /*
860 * If this instruction fault is a non-privileged %tick
861 * or %stick trap, and %tick/%stick user emulation is
862 * enabled as a result of an OS suspend, then simulate
863 * the register read. We rely on simulate_rdtick to fail
864 * if the instruction is not a %tick or %stick read,
865 * causing us to fall through to the normal privileged
866 * instruction handling.
867 */
868 if (tick_stick_emulation_active &&
869 (X_FAULT_TYPE(mmu_fsr) == FT_NEW_PRVACT) &&
870 simulate_rdtick(rp) == SIMU_SUCCESS) {
871 /* skip the successfully simulated instruction */
872 rp->r_pc = rp->r_npc;
873 rp->r_npc += 4;
874 goto out;
875 }
876 #endif
877 siginfo.si_signo = SIGILL;
878 siginfo.si_code = ILL_PRVOPC;
879 siginfo.si_addr = (caddr_t)rp->r_pc;
880 fault = FLTILL;
881 break;
882
883 case T_UNIMP_INSTR: /* priv illegal instruction fault */
884 if (fpras_implemented) {
885 /*
886 * Call fpras_chktrap indicating that
887 * we've come from a trap handler and pass
888 * the regs. That function may choose to panic
889 * (in which case it won't return) or it may
890 * determine that a reboot is desired. In the
891 * latter case it must alter pc/npc to skip
892 * the illegal instruction and continue at
893 * a controlled address.
894 */
895 if (&fpras_chktrap) {
896 if (fpras_chktrap(rp))
897 goto cleanup;
898 }
899 }
900 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */
901 instr = *(int *)rp->r_pc;
902 if ((instr & 0xc0000000) == 0x40000000) {
903 long pc;
904
905 rp->r_o7 = (long long)rp->r_pc;
906 pc = rp->r_pc + ((instr & 0x3fffffff) << 2);
907 rp->r_pc = rp->r_npc;
908 rp->r_npc = pc;
909 ill_calls++;
910 goto cleanup;
911 }
912 #endif /* SF_ERRATA_23 || SF_ERRATA_30 */
913 /*
914 * It's not an fpras failure and it's not SF_ERRATA_23 - die
915 */
916 addr = (caddr_t)rp->r_pc;
917 (void) die(type, rp, addr, 0);
918 /*NOTREACHED*/
919
920 case T_UNIMP_INSTR + T_USER: /* illegal instruction fault */
921 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */
922 instr = fetch_user_instr((caddr_t)rp->r_pc);
923 if ((instr & 0xc0000000) == 0x40000000) {
924 long pc;
925
926 rp->r_o7 = (long long)rp->r_pc;
927 pc = rp->r_pc + ((instr & 0x3fffffff) << 2);
928 rp->r_pc = rp->r_npc;
929 rp->r_npc = pc;
930 ill_calls++;
931 goto out;
932 }
933 #endif /* SF_ERRATA_23 || SF_ERRATA_30 */
934 if (tudebug)
935 showregs(type, rp, (caddr_t)0, 0);
936 bzero(&siginfo, sizeof (siginfo));
937 /*
938 * Try to simulate the instruction.
939 */
940 switch (simulate_unimp(rp, &badaddr)) {
941 case SIMU_RETRY:
942 goto out; /* regs are already set up */
943 /*NOTREACHED*/
944
945 case SIMU_SUCCESS:
946 /* skip the successfully simulated instruction */
947 rp->r_pc = rp->r_npc;
948 rp->r_npc += 4;
949 goto out;
950 /*NOTREACHED*/
951
952 case SIMU_FAULT:
953 siginfo.si_signo = SIGSEGV;
954 siginfo.si_code = SEGV_MAPERR;
955 siginfo.si_addr = badaddr;
956 fault = FLTBOUNDS;
957 break;
958
959 case SIMU_DZERO:
960 siginfo.si_signo = SIGFPE;
961 siginfo.si_code = FPE_INTDIV;
962 siginfo.si_addr = (caddr_t)rp->r_pc;
963 fault = FLTIZDIV;
964 break;
965
966 case SIMU_UNALIGN:
967 siginfo.si_signo = SIGBUS;
968 siginfo.si_code = BUS_ADRALN;
969 siginfo.si_addr = badaddr;
970 fault = FLTACCESS;
971 break;
972
973 case SIMU_ILLEGAL:
974 default:
975 siginfo.si_signo = SIGILL;
976 op3 = (instr >> 19) & 0x3F;
977 if ((IS_FLOAT(instr) && (op3 == IOP_V8_STQFA) ||
978 (op3 == IOP_V8_STDFA)))
979 siginfo.si_code = ILL_ILLADR;
980 else
981 siginfo.si_code = ILL_ILLOPC;
982 siginfo.si_addr = (caddr_t)rp->r_pc;
983 fault = FLTILL;
984 break;
985 }
986 break;
987
988 case T_UNIMP_LDD + T_USER:
989 case T_UNIMP_STD + T_USER:
990 if (tudebug)
991 showregs(type, rp, (caddr_t)0, 0);
992 switch (simulate_lddstd(rp, &badaddr)) {
993 case SIMU_SUCCESS:
994 /* skip the successfully simulated instruction */
995 rp->r_pc = rp->r_npc;
996 rp->r_npc += 4;
997 goto out;
998 /*NOTREACHED*/
999
1000 case SIMU_FAULT:
1001 if (nfload(rp, NULL))
1002 goto out;
1003 siginfo.si_signo = SIGSEGV;
1004 siginfo.si_code = SEGV_MAPERR;
1005 siginfo.si_addr = badaddr;
1006 fault = FLTBOUNDS;
1007 break;
1008
1009 case SIMU_UNALIGN:
1010 if (nfload(rp, NULL))
1011 goto out;
1012 siginfo.si_signo = SIGBUS;
1013 siginfo.si_code = BUS_ADRALN;
1014 siginfo.si_addr = badaddr;
1015 fault = FLTACCESS;
1016 break;
1017
1018 case SIMU_ILLEGAL:
1019 default:
1020 siginfo.si_signo = SIGILL;
1021 siginfo.si_code = ILL_ILLOPC;
1022 siginfo.si_addr = (caddr_t)rp->r_pc;
1023 fault = FLTILL;
1024 break;
1025 }
1026 break;
1027
1028 case T_UNIMP_LDD:
1029 case T_UNIMP_STD:
1030 if (simulate_lddstd(rp, &badaddr) == SIMU_SUCCESS) {
1031 /* skip the successfully simulated instruction */
1032 rp->r_pc = rp->r_npc;
1033 rp->r_npc += 4;
1034 goto cleanup;
1035 /*NOTREACHED*/
1036 }
1037 /*
1038 * A third party driver executed an {LDD,STD,LDDA,STDA}
1039 * that we couldn't simulate.
1040 */
1041 if (nfload(rp, NULL))
1042 goto cleanup;
1043
1044 if (curthread->t_lofault) {
1045 if (lodebug) {
1046 showregs(type, rp, addr, 0);
1047 traceback((caddr_t)rp->r_sp);
1048 }
1049 rp->r_g1 = EFAULT;
1050 rp->r_pc = curthread->t_lofault;
1051 rp->r_npc = rp->r_pc + 4;
1052 goto cleanup;
1053 }
1054 (void) die(type, rp, addr, 0);
1055 /*NOTREACHED*/
1056
1057 case T_IDIV0 + T_USER: /* integer divide by zero */
1058 case T_DIV0 + T_USER: /* integer divide by zero */
1059 if (tudebug && tudebugfpe)
1060 showregs(type, rp, (caddr_t)0, 0);
1061 bzero(&siginfo, sizeof (siginfo));
1062 siginfo.si_signo = SIGFPE;
1063 siginfo.si_code = FPE_INTDIV;
1064 siginfo.si_addr = (caddr_t)rp->r_pc;
1065 fault = FLTIZDIV;
1066 break;
1067
1068 case T_INT_OVERFLOW + T_USER: /* integer overflow */
1069 if (tudebug && tudebugfpe)
1070 showregs(type, rp, (caddr_t)0, 0);
1071 bzero(&siginfo, sizeof (siginfo));
1072 siginfo.si_signo = SIGFPE;
1073 siginfo.si_code = FPE_INTOVF;
1074 siginfo.si_addr = (caddr_t)rp->r_pc;
1075 fault = FLTIOVF;
1076 break;
1077
1078 case T_BREAKPOINT + T_USER: /* breakpoint trap (t 1) */
1079 if (tudebug && tudebugbpt)
1080 showregs(type, rp, (caddr_t)0, 0);
1081 bzero(&siginfo, sizeof (siginfo));
1082 siginfo.si_signo = SIGTRAP;
1083 siginfo.si_code = TRAP_BRKPT;
1084 siginfo.si_addr = (caddr_t)rp->r_pc;
1085 fault = FLTBPT;
1086 break;
1087
1088 case T_TAG_OVERFLOW + T_USER: /* tag overflow (taddcctv, tsubcctv) */
1089 if (tudebug)
1090 showregs(type, rp, (caddr_t)0, 0);
1091 bzero(&siginfo, sizeof (siginfo));
1092 siginfo.si_signo = SIGEMT;
1093 siginfo.si_code = EMT_TAGOVF;
1094 siginfo.si_addr = (caddr_t)rp->r_pc;
1095 fault = FLTACCESS;
1096 break;
1097
1098 case T_FLUSH_PCB + T_USER: /* finish user window overflow */
1099 case T_FLUSHW + T_USER: /* finish user window flush */
1100 /*
1101 * This trap is entered from sys_rtt in locore.s when,
1102 * upon return to user is is found that there are user
1103 * windows in pcb_wbuf. This happens because they could
1104 * not be saved on the user stack, either because it
1105 * wasn't resident or because it was misaligned.
1106 */
1107 {
1108 int error;
1109 caddr_t sp;
1110
1111 error = flush_user_windows_to_stack(&sp);
1112 /*
1113 * Possible errors:
1114 * error copying out
1115 * unaligned stack pointer
1116 * The first is given to us as the return value
1117 * from flush_user_windows_to_stack(). The second
1118 * results in residual windows in the pcb.
1119 */
1120 if (error != 0) {
1121 /*
1122 * EINTR comes from a signal during copyout;
1123 * we should not post another signal.
1124 */
1125 if (error != EINTR) {
1126 /*
1127 * Zap the process with a SIGSEGV - process
1128 * may be managing its own stack growth by
1129 * taking SIGSEGVs on a different signal stack.
1130 */
1131 bzero(&siginfo, sizeof (siginfo));
1132 siginfo.si_signo = SIGSEGV;
1133 siginfo.si_code = SEGV_MAPERR;
1134 siginfo.si_addr = sp;
1135 fault = FLTBOUNDS;
1136 }
1137 break;
1138 } else if (mpcb->mpcb_wbcnt) {
1139 bzero(&siginfo, sizeof (siginfo));
1140 siginfo.si_signo = SIGILL;
1141 siginfo.si_code = ILL_BADSTK;
1142 siginfo.si_addr = (caddr_t)rp->r_pc;
1143 fault = FLTILL;
1144 break;
1145 }
1146 }
1147
1148 /*
1149 * T_FLUSHW is used when handling a ta 0x3 -- the old flush
1150 * window trap -- which is implemented by executing the
1151 * flushw instruction. The flushw can trap if any of the
1152 * stack pages are not writable for whatever reason. In this
1153 * case only, we advance the pc to the next instruction so
1154 * that the user thread doesn't needlessly execute the trap
1155 * again. Normally this wouldn't be a problem -- we'll
1156 * usually only end up here if this is the first touch to a
1157 * stack page -- since the second execution won't trap, but
1158 * if there's a watchpoint on the stack page the user thread
1159 * would spin, continuously executing the trap instruction.
1160 */
1161 if (type == T_FLUSHW + T_USER) {
1162 rp->r_pc = rp->r_npc;
1163 rp->r_npc += 4;
1164 }
1165 goto out;
1166
1167 case T_AST + T_USER: /* profiling or resched pseudo trap */
1168 if (lwp->lwp_pcb.pcb_flags & CPC_OVERFLOW) {
1169 lwp->lwp_pcb.pcb_flags &= ~CPC_OVERFLOW;
1170 if (kcpc_overflow_ast()) {
1171 /*
1172 * Signal performance counter overflow
1173 */
1174 if (tudebug)
1175 showregs(type, rp, (caddr_t)0, 0);
1176 bzero(&siginfo, sizeof (siginfo));
1177 siginfo.si_signo = SIGEMT;
1178 siginfo.si_code = EMT_CPCOVF;
1179 siginfo.si_addr = (caddr_t)rp->r_pc;
1180 /* for trap_cleanup(), below */
1181 oldpc = rp->r_pc - 4;
1182 fault = FLTCPCOVF;
1183 }
1184 }
1185
1186 /*
1187 * The CPC_OVERFLOW check above may already have populated
1188 * siginfo and set fault, so the checks below must not
1189 * touch these and the functions they call must use
1190 * trapsig() directly.
1191 */
1192
1193 if (lwp->lwp_pcb.pcb_flags & ASYNC_HWERR) {
1194 lwp->lwp_pcb.pcb_flags &= ~ASYNC_HWERR;
1195 trap_async_hwerr();
1196 }
1197
1198 if (lwp->lwp_pcb.pcb_flags & ASYNC_BERR) {
1199 lwp->lwp_pcb.pcb_flags &= ~ASYNC_BERR;
1200 trap_async_berr_bto(ASYNC_BERR, rp);
1201 }
1202
1203 if (lwp->lwp_pcb.pcb_flags & ASYNC_BTO) {
1204 lwp->lwp_pcb.pcb_flags &= ~ASYNC_BTO;
1205 trap_async_berr_bto(ASYNC_BTO, rp);
1206 }
1207
1208 break;
1209 }
1210
1211 if (fault) {
1212 /* We took a fault so abort single step. */
1213 lwp->lwp_pcb.pcb_flags &= ~(NORMAL_STEP|WATCH_STEP);
1214 }
1215 trap_cleanup(rp, fault, &siginfo, oldpc == rp->r_pc);
1216
1217 out: /* We can't get here from a system trap */
1218 ASSERT(type & T_USER);
1219 trap_rtt();
1220 (void) new_mstate(curthread, mstate);
1221
1222 TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit");
1223 return;
1224
1225 cleanup: /* system traps end up here */
1226 ASSERT(!(type & T_USER));
1227
1228 TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit");
1229 }
1230
1231 void
trap_cleanup(struct regs * rp,uint_t fault,k_siginfo_t * sip,int restartable)1232 trap_cleanup(
1233 struct regs *rp,
1234 uint_t fault,
1235 k_siginfo_t *sip,
1236 int restartable)
1237 {
1238 extern void aio_cleanup();
1239 proc_t *p = ttoproc(curthread);
1240 klwp_id_t lwp = ttolwp(curthread);
1241
1242 if (fault) {
1243 /*
1244 * Remember the fault and fault address
1245 * for real-time (SIGPROF) profiling.
1246 */
1247 lwp->lwp_lastfault = fault;
1248 lwp->lwp_lastfaddr = sip->si_addr;
1249
1250 DTRACE_PROC2(fault, int, fault, ksiginfo_t *, sip);
1251
1252 /*
1253 * If a debugger has declared this fault to be an
1254 * event of interest, stop the lwp. Otherwise just
1255 * deliver the associated signal.
1256 */
1257 if (sip->si_signo != SIGKILL &&
1258 prismember(&p->p_fltmask, fault) &&
1259 stop_on_fault(fault, sip) == 0)
1260 sip->si_signo = 0;
1261 }
1262
1263 if (sip->si_signo)
1264 trapsig(sip, restartable);
1265
1266 if (lwp->lwp_oweupc)
1267 profil_tick(rp->r_pc);
1268
1269 if (curthread->t_astflag | curthread->t_sig_check) {
1270 /*
1271 * Turn off the AST flag before checking all the conditions that
1272 * may have caused an AST. This flag is on whenever a signal or
1273 * unusual condition should be handled after the next trap or
1274 * syscall.
1275 */
1276 astoff(curthread);
1277 curthread->t_sig_check = 0;
1278
1279 /*
1280 * The following check is legal for the following reasons:
1281 * 1) The thread we are checking, is ourselves, so there is
1282 * no way the proc can go away.
1283 * 2) The only time we need to be protected by the
1284 * lock is if the binding is changed.
1285 *
1286 * Note we will still take the lock and check the binding
1287 * if the condition was true without the lock held. This
1288 * prevents lock contention among threads owned by the
1289 * same proc.
1290 */
1291
1292 if (curthread->t_proc_flag & TP_CHANGEBIND) {
1293 mutex_enter(&p->p_lock);
1294 if (curthread->t_proc_flag & TP_CHANGEBIND) {
1295 timer_lwpbind();
1296 curthread->t_proc_flag &= ~TP_CHANGEBIND;
1297 }
1298 mutex_exit(&p->p_lock);
1299 }
1300
1301 /*
1302 * for kaio requests that are on the per-process poll queue,
1303 * aiop->aio_pollq, they're AIO_POLL bit is set, the kernel
1304 * should copyout their result_t to user memory. by copying
1305 * out the result_t, the user can poll on memory waiting
1306 * for the kaio request to complete.
1307 */
1308 if (p->p_aio)
1309 aio_cleanup(0);
1310
1311 /*
1312 * If this LWP was asked to hold, call holdlwp(), which will
1313 * stop. holdlwps() sets this up and calls pokelwps() which
1314 * sets the AST flag.
1315 *
1316 * Also check TP_EXITLWP, since this is used by fresh new LWPs
1317 * through lwp_rtt(). That flag is set if the lwp_create(2)
1318 * syscall failed after creating the LWP.
1319 */
1320 if (ISHOLD(p))
1321 holdlwp();
1322
1323 /*
1324 * All code that sets signals and makes ISSIG evaluate true must
1325 * set t_astflag afterwards.
1326 */
1327 if (ISSIG_PENDING(curthread, lwp, p)) {
1328 if (issig(FORREAL))
1329 psig();
1330 curthread->t_sig_check = 1;
1331 }
1332
1333 if (curthread->t_rprof != NULL) {
1334 realsigprof(0, 0, 0);
1335 curthread->t_sig_check = 1;
1336 }
1337 }
1338 }
1339
1340 /*
1341 * Called from fp_traps when a floating point trap occurs.
1342 * Note that the T_DATA_EXCEPTION case does not use X_FAULT_TYPE(mmu_fsr),
1343 * because mmu_fsr (now changed to code) is always 0.
1344 * Note that the T_UNIMP_INSTR case does not call simulate_unimp(),
1345 * because the simulator only simulates multiply and divide instructions,
1346 * which would not cause floating point traps in the first place.
1347 * XXX - Supervisor mode floating point traps?
1348 */
1349 void
fpu_trap(struct regs * rp,caddr_t addr,uint32_t type,uint32_t code)1350 fpu_trap(struct regs *rp, caddr_t addr, uint32_t type, uint32_t code)
1351 {
1352 proc_t *p = ttoproc(curthread);
1353 klwp_id_t lwp = ttolwp(curthread);
1354 k_siginfo_t siginfo;
1355 uint_t op3, fault = 0;
1356 int mstate;
1357 char *badaddr;
1358 kfpu_t *fp;
1359 struct _fpq *pfpq;
1360 uint32_t inst;
1361 utrap_handler_t *utrapp;
1362
1363 CPU_STATS_ADDQ(CPU, sys, trap, 1);
1364
1365 ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
1366
1367 if (USERMODE(rp->r_tstate)) {
1368 /*
1369 * Set lwp_state before trying to acquire any
1370 * adaptive lock
1371 */
1372 ASSERT(lwp != NULL);
1373 lwp->lwp_state = LWP_SYS;
1374 /*
1375 * Set up the current cred to use during this trap. u_cred
1376 * no longer exists. t_cred is used instead.
1377 * The current process credential applies to the thread for
1378 * the entire trap. If trapping from the kernel, this
1379 * should already be set up.
1380 */
1381 if (curthread->t_cred != p->p_cred) {
1382 cred_t *oldcred = curthread->t_cred;
1383 /*
1384 * DTrace accesses t_cred in probe context. t_cred
1385 * must always be either NULL, or point to a valid,
1386 * allocated cred structure.
1387 */
1388 curthread->t_cred = crgetcred();
1389 crfree(oldcred);
1390 }
1391 ASSERT(lwp->lwp_regs == rp);
1392 mstate = new_mstate(curthread, LMS_TRAP);
1393 siginfo.si_signo = 0;
1394 type |= T_USER;
1395 }
1396
1397 TRACE_1(TR_FAC_TRAP, TR_C_TRAP_HANDLER_ENTER,
1398 "C_fpu_trap_handler_enter:type %x", type);
1399
1400 if (tudebug && tudebugfpe)
1401 showregs(type, rp, addr, 0);
1402
1403 bzero(&siginfo, sizeof (siginfo));
1404 siginfo.si_code = code;
1405 siginfo.si_addr = addr;
1406
1407 switch (type) {
1408
1409 case T_FP_EXCEPTION_IEEE + T_USER: /* FPU arithmetic exception */
1410 /*
1411 * FPU arithmetic exception - fake up a fpq if we
1412 * came here directly from _fp_ieee_exception,
1413 * which is indicated by a zero fpu_qcnt.
1414 */
1415 fp = lwptofpu(curthread->t_lwp);
1416 utrapp = curthread->t_procp->p_utraps;
1417 if (fp->fpu_qcnt == 0) {
1418 inst = fetch_user_instr((caddr_t)rp->r_pc);
1419 lwp->lwp_state = LWP_SYS;
1420 pfpq = &fp->fpu_q->FQu.fpq;
1421 pfpq->fpq_addr = (uint32_t *)rp->r_pc;
1422 pfpq->fpq_instr = inst;
1423 fp->fpu_qcnt = 1;
1424 fp->fpu_q_entrysize = sizeof (struct _fpq);
1425 #ifdef SF_V9_TABLE_28
1426 /*
1427 * Spitfire and blackbird followed the SPARC V9 manual
1428 * paragraph 3 of section 5.1.7.9 FSR_current_exception
1429 * (cexc) for setting fsr.cexc bits on underflow and
1430 * overflow traps when the fsr.tem.inexact bit is set,
1431 * instead of following Table 28. Bugid 1263234.
1432 */
1433 {
1434 extern int spitfire_bb_fsr_bug;
1435
1436 if (spitfire_bb_fsr_bug &&
1437 (fp->fpu_fsr & FSR_TEM_NX)) {
1438 if (((fp->fpu_fsr & FSR_TEM_OF) == 0) &&
1439 (fp->fpu_fsr & FSR_CEXC_OF)) {
1440 fp->fpu_fsr &= ~FSR_CEXC_OF;
1441 fp->fpu_fsr |= FSR_CEXC_NX;
1442 _fp_write_pfsr(&fp->fpu_fsr);
1443 siginfo.si_code = FPE_FLTRES;
1444 }
1445 if (((fp->fpu_fsr & FSR_TEM_UF) == 0) &&
1446 (fp->fpu_fsr & FSR_CEXC_UF)) {
1447 fp->fpu_fsr &= ~FSR_CEXC_UF;
1448 fp->fpu_fsr |= FSR_CEXC_NX;
1449 _fp_write_pfsr(&fp->fpu_fsr);
1450 siginfo.si_code = FPE_FLTRES;
1451 }
1452 }
1453 }
1454 #endif /* SF_V9_TABLE_28 */
1455 rp->r_pc = rp->r_npc;
1456 rp->r_npc += 4;
1457 } else if (utrapp && utrapp[UT_FP_EXCEPTION_IEEE_754]) {
1458 /*
1459 * The user had a trap handler installed. Jump to
1460 * the trap handler instead of signalling the process.
1461 */
1462 rp->r_pc = (long)utrapp[UT_FP_EXCEPTION_IEEE_754];
1463 rp->r_npc = rp->r_pc + 4;
1464 break;
1465 }
1466 siginfo.si_signo = SIGFPE;
1467 fault = FLTFPE;
1468 break;
1469
1470 case T_DATA_EXCEPTION + T_USER: /* user data access exception */
1471 siginfo.si_signo = SIGSEGV;
1472 fault = FLTBOUNDS;
1473 break;
1474
1475 case T_LDDF_ALIGN + T_USER: /* 64 bit user lddfa alignment error */
1476 case T_STDF_ALIGN + T_USER: /* 64 bit user stdfa alignment error */
1477 alignfaults++;
1478 lwp->lwp_state = LWP_SYS;
1479 if (&vis1_partial_support != NULL) {
1480 bzero(&siginfo, sizeof (siginfo));
1481 if (vis1_partial_support(rp,
1482 &siginfo, &fault) == 0)
1483 goto out;
1484 }
1485 if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) {
1486 rp->r_pc = rp->r_npc;
1487 rp->r_npc += 4;
1488 goto out;
1489 }
1490 fp = lwptofpu(curthread->t_lwp);
1491 fp->fpu_qcnt = 0;
1492 siginfo.si_signo = SIGSEGV;
1493 siginfo.si_code = SEGV_MAPERR;
1494 siginfo.si_addr = badaddr;
1495 fault = FLTBOUNDS;
1496 break;
1497
1498 case T_ALIGNMENT + T_USER: /* user alignment error */
1499 /*
1500 * If the user has to do unaligned references
1501 * the ugly stuff gets done here.
1502 * Only handles vanilla loads and stores.
1503 */
1504 alignfaults++;
1505 if (p->p_fixalignment) {
1506 if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) {
1507 rp->r_pc = rp->r_npc;
1508 rp->r_npc += 4;
1509 goto out;
1510 }
1511 siginfo.si_signo = SIGSEGV;
1512 siginfo.si_code = SEGV_MAPERR;
1513 siginfo.si_addr = badaddr;
1514 fault = FLTBOUNDS;
1515 } else {
1516 siginfo.si_signo = SIGBUS;
1517 siginfo.si_code = BUS_ADRALN;
1518 if (rp->r_pc & 3) { /* offending address, if pc */
1519 siginfo.si_addr = (caddr_t)rp->r_pc;
1520 } else {
1521 if (calc_memaddr(rp, &badaddr) == SIMU_UNALIGN)
1522 siginfo.si_addr = badaddr;
1523 else
1524 siginfo.si_addr = (caddr_t)rp->r_pc;
1525 }
1526 fault = FLTACCESS;
1527 }
1528 break;
1529
1530 case T_UNIMP_INSTR + T_USER: /* illegal instruction fault */
1531 siginfo.si_signo = SIGILL;
1532 inst = fetch_user_instr((caddr_t)rp->r_pc);
1533 op3 = (inst >> 19) & 0x3F;
1534 if ((op3 == IOP_V8_STQFA) || (op3 == IOP_V8_STDFA))
1535 siginfo.si_code = ILL_ILLADR;
1536 else
1537 siginfo.si_code = ILL_ILLTRP;
1538 fault = FLTILL;
1539 break;
1540
1541 default:
1542 (void) die(type, rp, addr, 0);
1543 /*NOTREACHED*/
1544 }
1545
1546 /*
1547 * We can't get here from a system trap
1548 * Never restart any instruction which got here from an fp trap.
1549 */
1550 ASSERT(type & T_USER);
1551
1552 trap_cleanup(rp, fault, &siginfo, 0);
1553 out:
1554 trap_rtt();
1555 (void) new_mstate(curthread, mstate);
1556 }
1557
1558 void
trap_rtt(void)1559 trap_rtt(void)
1560 {
1561 klwp_id_t lwp = ttolwp(curthread);
1562
1563 /*
1564 * Restore register window if a debugger modified it.
1565 * Set up to perform a single-step if a debugger requested it.
1566 */
1567 if (lwp->lwp_pcb.pcb_xregstat != XREGNONE)
1568 xregrestore(lwp, 0);
1569
1570 /*
1571 * Set state to LWP_USER here so preempt won't give us a kernel
1572 * priority if it occurs after this point. Call CL_TRAPRET() to
1573 * restore the user-level priority.
1574 *
1575 * It is important that no locks (other than spinlocks) be entered
1576 * after this point before returning to user mode (unless lwp_state
1577 * is set back to LWP_SYS).
1578 */
1579 lwp->lwp_state = LWP_USER;
1580 if (curthread->t_trapret) {
1581 curthread->t_trapret = 0;
1582 thread_lock(curthread);
1583 CL_TRAPRET(curthread);
1584 thread_unlock(curthread);
1585 }
1586 if (CPU->cpu_runrun || curthread->t_schedflag & TS_ANYWAITQ)
1587 preempt();
1588 prunstop();
1589 if (lwp->lwp_pcb.pcb_step != STEP_NONE)
1590 prdostep();
1591
1592 TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit");
1593 }
1594
1595 #define IS_LDASI(o) \
1596 ((o) == (uint32_t)0xC0C00000 || (o) == (uint32_t)0xC0800000 || \
1597 (o) == (uint32_t)0xC1800000)
1598 #define IS_IMM_ASI(i) (((i) & 0x2000) == 0)
1599 #define IS_ASINF(a) (((a) & 0xF6) == 0x82)
1600 #define IS_LDDA(i) (((i) & 0xC1F80000) == 0xC0980000)
1601
1602 static int
nfload(struct regs * rp,int * instrp)1603 nfload(struct regs *rp, int *instrp)
1604 {
1605 uint_t instr, asi, op3, rd;
1606 size_t len;
1607 struct as *as;
1608 caddr_t addr;
1609 FPU_DREGS_TYPE zero;
1610 extern int segnf_create();
1611
1612 if (USERMODE(rp->r_tstate))
1613 instr = fetch_user_instr((caddr_t)rp->r_pc);
1614 else
1615 instr = *(int *)rp->r_pc;
1616
1617 if (instrp)
1618 *instrp = instr;
1619
1620 op3 = (uint_t)(instr & 0xC1E00000);
1621 if (!IS_LDASI(op3))
1622 return (0);
1623 if (IS_IMM_ASI(instr))
1624 asi = (instr & 0x1FE0) >> 5;
1625 else
1626 asi = (uint_t)((rp->r_tstate >> TSTATE_ASI_SHIFT) &
1627 TSTATE_ASI_MASK);
1628 if (!IS_ASINF(asi))
1629 return (0);
1630 if (calc_memaddr(rp, &addr) == SIMU_SUCCESS) {
1631 len = 1;
1632 as = USERMODE(rp->r_tstate) ? ttoproc(curthread)->p_as : &kas;
1633 as_rangelock(as);
1634 if (as_gap(as, len, &addr, &len, 0, addr) == 0)
1635 (void) as_map(as, addr, len, segnf_create, NULL);
1636 as_rangeunlock(as);
1637 }
1638 zero = 0;
1639 rd = (instr >> 25) & 0x1f;
1640 if (IS_FLOAT(instr)) {
1641 uint_t dbflg = ((instr >> 19) & 3) == 3;
1642
1643 if (dbflg) { /* clever v9 reg encoding */
1644 if (rd & 1)
1645 rd = (rd & 0x1e) | 0x20;
1646 rd >>= 1;
1647 }
1648 if (fpu_exists) {
1649 if (!(_fp_read_fprs() & FPRS_FEF))
1650 fp_enable();
1651
1652 if (dbflg)
1653 _fp_write_pdreg(&zero, rd);
1654 else
1655 _fp_write_pfreg((uint_t *)&zero, rd);
1656 } else {
1657 kfpu_t *fp = lwptofpu(curthread->t_lwp);
1658
1659 if (!fp->fpu_en)
1660 fp_enable();
1661
1662 if (dbflg)
1663 fp->fpu_fr.fpu_dregs[rd] = zero;
1664 else
1665 fp->fpu_fr.fpu_regs[rd] = 0;
1666 }
1667 } else {
1668 (void) putreg(&zero, rp, rd, &addr);
1669 if (IS_LDDA(instr))
1670 (void) putreg(&zero, rp, rd + 1, &addr);
1671 }
1672 rp->r_pc = rp->r_npc;
1673 rp->r_npc += 4;
1674 return (1);
1675 }
1676
1677 kmutex_t atomic_nc_mutex;
1678
1679 /*
1680 * The following couple of routines are for userland drivers which
1681 * do atomics to noncached addresses. This sort of worked on previous
1682 * platforms -- the operation really wasn't atomic, but it didn't generate
1683 * a trap as sun4u systems do.
1684 */
1685 static int
swap_nc(struct regs * rp,int instr)1686 swap_nc(struct regs *rp, int instr)
1687 {
1688 uint64_t rdata, mdata;
1689 caddr_t addr, badaddr;
1690 uint_t tmp, rd;
1691
1692 (void) flush_user_windows_to_stack(NULL);
1693 rd = (instr >> 25) & 0x1f;
1694 if (calc_memaddr(rp, &addr) != SIMU_SUCCESS)
1695 return (0);
1696 if (getreg(rp, rd, &rdata, &badaddr))
1697 return (0);
1698 mutex_enter(&atomic_nc_mutex);
1699 if (fuword32(addr, &tmp) == -1) {
1700 mutex_exit(&atomic_nc_mutex);
1701 return (0);
1702 }
1703 mdata = (u_longlong_t)tmp;
1704 if (suword32(addr, (uint32_t)rdata) == -1) {
1705 mutex_exit(&atomic_nc_mutex);
1706 return (0);
1707 }
1708 (void) putreg(&mdata, rp, rd, &badaddr);
1709 mutex_exit(&atomic_nc_mutex);
1710 return (1);
1711 }
1712
1713 static int
ldstub_nc(struct regs * rp,int instr)1714 ldstub_nc(struct regs *rp, int instr)
1715 {
1716 uint64_t mdata;
1717 caddr_t addr, badaddr;
1718 uint_t rd;
1719 uint8_t tmp;
1720
1721 (void) flush_user_windows_to_stack(NULL);
1722 rd = (instr >> 25) & 0x1f;
1723 if (calc_memaddr(rp, &addr) != SIMU_SUCCESS)
1724 return (0);
1725 mutex_enter(&atomic_nc_mutex);
1726 if (fuword8(addr, &tmp) == -1) {
1727 mutex_exit(&atomic_nc_mutex);
1728 return (0);
1729 }
1730 mdata = (u_longlong_t)tmp;
1731 if (suword8(addr, (uint8_t)0xff) == -1) {
1732 mutex_exit(&atomic_nc_mutex);
1733 return (0);
1734 }
1735 (void) putreg(&mdata, rp, rd, &badaddr);
1736 mutex_exit(&atomic_nc_mutex);
1737 return (1);
1738 }
1739
1740 /*
1741 * This function helps instr_size() determine the operand size.
1742 * It is called for the extended ldda/stda asi's.
1743 */
1744 int
extended_asi_size(int asi)1745 extended_asi_size(int asi)
1746 {
1747 switch (asi) {
1748 case ASI_PST8_P:
1749 case ASI_PST8_S:
1750 case ASI_PST16_P:
1751 case ASI_PST16_S:
1752 case ASI_PST32_P:
1753 case ASI_PST32_S:
1754 case ASI_PST8_PL:
1755 case ASI_PST8_SL:
1756 case ASI_PST16_PL:
1757 case ASI_PST16_SL:
1758 case ASI_PST32_PL:
1759 case ASI_PST32_SL:
1760 return (8);
1761 case ASI_FL8_P:
1762 case ASI_FL8_S:
1763 case ASI_FL8_PL:
1764 case ASI_FL8_SL:
1765 return (1);
1766 case ASI_FL16_P:
1767 case ASI_FL16_S:
1768 case ASI_FL16_PL:
1769 case ASI_FL16_SL:
1770 return (2);
1771 case ASI_BLK_P:
1772 case ASI_BLK_S:
1773 case ASI_BLK_PL:
1774 case ASI_BLK_SL:
1775 case ASI_BLK_COMMIT_P:
1776 case ASI_BLK_COMMIT_S:
1777 return (64);
1778 }
1779
1780 return (0);
1781 }
1782
1783 /*
1784 * Patch non-zero to disable preemption of threads in the kernel.
1785 */
1786 int IGNORE_KERNEL_PREEMPTION = 0; /* XXX - delete this someday */
1787
1788 struct kpreempt_cnts { /* kernel preemption statistics */
1789 int kpc_idle; /* executing idle thread */
1790 int kpc_intr; /* executing interrupt thread */
1791 int kpc_clock; /* executing clock thread */
1792 int kpc_blocked; /* thread has blocked preemption (t_preempt) */
1793 int kpc_notonproc; /* thread is surrendering processor */
1794 int kpc_inswtch; /* thread has ratified scheduling decision */
1795 int kpc_prilevel; /* processor interrupt level is too high */
1796 int kpc_apreempt; /* asynchronous preemption */
1797 int kpc_spreempt; /* synchronous preemption */
1798 } kpreempt_cnts;
1799
1800 /*
1801 * kernel preemption: forced rescheduling
1802 * preempt the running kernel thread.
1803 */
1804 void
kpreempt(int asyncspl)1805 kpreempt(int asyncspl)
1806 {
1807 if (IGNORE_KERNEL_PREEMPTION) {
1808 aston(CPU->cpu_dispthread);
1809 return;
1810 }
1811 /*
1812 * Check that conditions are right for kernel preemption
1813 */
1814 do {
1815 if (curthread->t_preempt) {
1816 /*
1817 * either a privileged thread (idle, panic, interrupt)
1818 * or will check when t_preempt is lowered
1819 * We need to specifically handle the case where
1820 * the thread is in the middle of swtch (resume has
1821 * been called) and has its t_preempt set
1822 * [idle thread and a thread which is in kpreempt
1823 * already] and then a high priority thread is
1824 * available in the local dispatch queue.
1825 * In this case the resumed thread needs to take a
1826 * trap so that it can call kpreempt. We achieve
1827 * this by using siron().
1828 * How do we detect this condition:
1829 * idle thread is running and is in the midst of
1830 * resume: curthread->t_pri == -1 && CPU->dispthread
1831 * != CPU->thread
1832 * Need to ensure that this happens only at high pil
1833 * resume is called at high pil
1834 * Only in resume_from_idle is the pil changed.
1835 */
1836 if (curthread->t_pri < 0) {
1837 kpreempt_cnts.kpc_idle++;
1838 if (CPU->cpu_dispthread != CPU->cpu_thread)
1839 siron();
1840 } else if (curthread->t_flag & T_INTR_THREAD) {
1841 kpreempt_cnts.kpc_intr++;
1842 if (curthread->t_pil == CLOCK_LEVEL)
1843 kpreempt_cnts.kpc_clock++;
1844 } else {
1845 kpreempt_cnts.kpc_blocked++;
1846 if (CPU->cpu_dispthread != CPU->cpu_thread)
1847 siron();
1848 }
1849 aston(CPU->cpu_dispthread);
1850 return;
1851 }
1852 if (curthread->t_state != TS_ONPROC ||
1853 curthread->t_disp_queue != CPU->cpu_disp) {
1854 /* this thread will be calling swtch() shortly */
1855 kpreempt_cnts.kpc_notonproc++;
1856 if (CPU->cpu_thread != CPU->cpu_dispthread) {
1857 /* already in swtch(), force another */
1858 kpreempt_cnts.kpc_inswtch++;
1859 siron();
1860 }
1861 return;
1862 }
1863
1864 if (((asyncspl != KPREEMPT_SYNC) ? spltoipl(asyncspl) :
1865 getpil()) >= DISP_LEVEL) {
1866 /*
1867 * We can't preempt this thread if it is at
1868 * a PIL >= DISP_LEVEL since it may be holding
1869 * a spin lock (like sched_lock).
1870 */
1871 siron(); /* check back later */
1872 kpreempt_cnts.kpc_prilevel++;
1873 return;
1874 }
1875
1876 /*
1877 * block preemption so we don't have multiple preemptions
1878 * pending on the interrupt stack
1879 */
1880 curthread->t_preempt++;
1881 if (asyncspl != KPREEMPT_SYNC) {
1882 splx(asyncspl);
1883 kpreempt_cnts.kpc_apreempt++;
1884 } else
1885 kpreempt_cnts.kpc_spreempt++;
1886
1887 preempt();
1888 curthread->t_preempt--;
1889 } while (CPU->cpu_kprunrun);
1890 }
1891
1892 static enum seg_rw
get_accesstype(struct regs * rp)1893 get_accesstype(struct regs *rp)
1894 {
1895 uint32_t instr;
1896
1897 if (USERMODE(rp->r_tstate))
1898 instr = fetch_user_instr((caddr_t)rp->r_pc);
1899 else
1900 instr = *(uint32_t *)rp->r_pc;
1901
1902 if (IS_FLUSH(instr))
1903 return (S_OTHER);
1904
1905 if (IS_STORE(instr))
1906 return (S_WRITE);
1907 else
1908 return (S_READ);
1909 }
1910
1911 /*
1912 * Handle an asynchronous hardware error.
1913 * The policy is currently to send a hardware error contract event to
1914 * the process's process contract and to kill the process. Eventually
1915 * we may want to instead send a special signal whose default
1916 * disposition is to generate the contract event.
1917 */
1918 void
trap_async_hwerr(void)1919 trap_async_hwerr(void)
1920 {
1921 k_siginfo_t si;
1922 proc_t *p = ttoproc(curthread);
1923 extern void print_msg_hwerr(ctid_t ct_id, proc_t *p);
1924
1925 errorq_drain(ue_queue); /* flush pending async error messages */
1926
1927 print_msg_hwerr(p->p_ct_process->conp_contract.ct_id, p);
1928
1929 contract_process_hwerr(p->p_ct_process, p);
1930
1931 bzero(&si, sizeof (k_siginfo_t));
1932 si.si_signo = SIGKILL;
1933 si.si_code = SI_NOINFO;
1934 trapsig(&si, 1);
1935 }
1936
1937 /*
1938 * Handle bus error and bus timeout for a user process by sending SIGBUS
1939 * The type is either ASYNC_BERR or ASYNC_BTO.
1940 */
1941 void
trap_async_berr_bto(int type,struct regs * rp)1942 trap_async_berr_bto(int type, struct regs *rp)
1943 {
1944 k_siginfo_t si;
1945
1946 errorq_drain(ue_queue); /* flush pending async error messages */
1947 bzero(&si, sizeof (k_siginfo_t));
1948
1949 si.si_signo = SIGBUS;
1950 si.si_code = (type == ASYNC_BERR ? BUS_OBJERR : BUS_ADRERR);
1951 si.si_addr = (caddr_t)rp->r_pc; /* AFAR unavailable - future RFE */
1952 si.si_errno = ENXIO;
1953
1954 trapsig(&si, 1);
1955 }
1956