xref: /titanic_51/usr/src/uts/sun4/os/trap.c (revision 791a814c934fcd4deb13b26c1f116ff283272a0d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #include <sys/mmu.h>
29 #include <sys/systm.h>
30 #include <sys/trap.h>
31 #include <sys/machtrap.h>
32 #include <sys/vtrace.h>
33 #include <sys/prsystm.h>
34 #include <sys/archsystm.h>
35 #include <sys/machsystm.h>
36 #include <sys/fpu/fpusystm.h>
37 #include <sys/tnf.h>
38 #include <sys/tnf_probe.h>
39 #include <sys/simulate.h>
40 #include <sys/ftrace.h>
41 #include <sys/ontrap.h>
42 #include <sys/kcpc.h>
43 #include <sys/kobj.h>
44 #include <sys/procfs.h>
45 #include <sys/sun4asi.h>
46 #include <sys/sdt.h>
47 #include <sys/fpras.h>
48 #include <sys/contract/process_impl.h>
49 
50 #ifdef  TRAPTRACE
51 #include <sys/traptrace.h>
52 #endif
53 
54 int tudebug = 0;
55 static int tudebugbpt = 0;
56 static int tudebugfpe = 0;
57 
58 static int alignfaults = 0;
59 
60 #if defined(TRAPDEBUG) || defined(lint)
61 static int lodebug = 0;
62 #else
63 #define	lodebug	0
64 #endif /* defined(TRAPDEBUG) || defined(lint) */
65 
66 
67 int vis1_partial_support(struct regs *rp, k_siginfo_t *siginfo, uint_t *fault);
68 #pragma weak vis1_partial_support
69 
70 void showregs(unsigned, struct regs *, caddr_t, uint_t);
71 #pragma weak showregs
72 
73 void trap_async_hwerr(void);
74 #pragma weak trap_async_hwerr
75 
76 void trap_async_berr_bto(int, struct regs *);
77 #pragma weak trap_async_berr_bto
78 
79 static enum seg_rw get_accesstype(struct regs *);
80 static int nfload(struct regs *, int *);
81 static int swap_nc(struct regs *, int);
82 static int ldstub_nc(struct regs *, int);
83 void	trap_cleanup(struct regs *, uint_t, k_siginfo_t *, int);
84 void	trap_rtt(void);
85 
86 static int
87 die(unsigned type, struct regs *rp, caddr_t addr, uint_t mmu_fsr)
88 {
89 	struct panic_trap_info ti;
90 
91 #ifdef TRAPTRACE
92 	TRAPTRACE_FREEZE;
93 #endif
94 
95 	ti.trap_regs = rp;
96 	ti.trap_type = type;
97 	ti.trap_addr = addr;
98 	ti.trap_mmu_fsr = mmu_fsr;
99 
100 	curthread->t_panic_trap = &ti;
101 
102 	if (type == T_DATA_MMU_MISS && addr < (caddr_t)KERNELBASE) {
103 		panic("BAD TRAP: type=%x rp=%p addr=%p mmu_fsr=%x "
104 		    "occurred in module \"%s\" due to %s",
105 		    type, (void *)rp, (void *)addr, mmu_fsr,
106 		    mod_containing_pc((caddr_t)rp->r_pc),
107 		    addr < (caddr_t)PAGESIZE ?
108 		    "a NULL pointer dereference" :
109 		    "an illegal access to a user address");
110 	} else {
111 		panic("BAD TRAP: type=%x rp=%p addr=%p mmu_fsr=%x",
112 		    type, (void *)rp, (void *)addr, mmu_fsr);
113 	}
114 
115 	return (0);	/* avoid optimization of restore in call's delay slot */
116 }
117 
118 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */
119 int	ill_calls;
120 #endif
121 
122 /*
123  * Currently, the only PREFETCH/PREFETCHA instructions which cause traps
124  * are the "strong" prefetches (fcn=20-23).  But we check for all flavors of
125  * PREFETCH, in case some future variant also causes a DATA_MMU_MISS.
126  */
127 #define	IS_PREFETCH(i)	(((i) & 0xc1780000) == 0xc1680000)
128 
129 #define	IS_FLUSH(i)	(((i) & 0xc1f80000) == 0x81d80000)
130 #define	IS_SWAP(i)	(((i) & 0xc1f80000) == 0xc0780000)
131 #define	IS_LDSTUB(i)	(((i) & 0xc1f80000) == 0xc0680000)
132 #define	IS_FLOAT(i)	(((i) & 0x1000000) != 0)
133 #define	IS_STORE(i)	(((i) >> 21) & 1)
134 
135 /*
136  * Called from the trap handler when a processor trap occurs.
137  */
138 /*VARARGS2*/
139 void
140 trap(struct regs *rp, caddr_t addr, uint32_t type, uint32_t mmu_fsr)
141 {
142 	proc_t *p = ttoproc(curthread);
143 	klwp_id_t lwp = ttolwp(curthread);
144 	struct machpcb *mpcb = NULL;
145 	k_siginfo_t siginfo;
146 	uint_t op3, fault = 0;
147 	int stepped = 0;
148 	greg_t oldpc;
149 	int mstate;
150 	char *badaddr;
151 	faultcode_t res;
152 	enum fault_type fault_type;
153 	enum seg_rw rw;
154 	uintptr_t lofault;
155 	int instr;
156 	int iskernel;
157 	int watchcode;
158 	int watchpage;
159 	extern faultcode_t pagefault(caddr_t, enum fault_type,
160 	    enum seg_rw, int);
161 
162 	CPU_STATS_ADDQ(CPU, sys, trap, 1);
163 
164 #ifdef SF_ERRATA_23 /* call causes illegal-insn */
165 	ASSERT((curthread->t_schedflag & TS_DONT_SWAP) ||
166 	    (type == T_UNIMP_INSTR));
167 #else
168 	ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
169 #endif /* SF_ERRATA_23 */
170 
171 	if (USERMODE(rp->r_tstate) || (type & T_USER)) {
172 		/*
173 		 * Set lwp_state before trying to acquire any
174 		 * adaptive lock
175 		 */
176 		ASSERT(lwp != NULL);
177 		lwp->lwp_state = LWP_SYS;
178 		/*
179 		 * Set up the current cred to use during this trap. u_cred
180 		 * no longer exists.  t_cred is used instead.
181 		 * The current process credential applies to the thread for
182 		 * the entire trap.  If trapping from the kernel, this
183 		 * should already be set up.
184 		 */
185 		if (curthread->t_cred != p->p_cred) {
186 			cred_t *oldcred = curthread->t_cred;
187 			/*
188 			 * DTrace accesses t_cred in probe context.  t_cred
189 			 * must always be either NULL, or point to a valid,
190 			 * allocated cred structure.
191 			 */
192 			curthread->t_cred = crgetcred();
193 			crfree(oldcred);
194 		}
195 		type |= T_USER;
196 		ASSERT((type == (T_SYS_RTT_PAGE | T_USER)) ||
197 		    (type == (T_SYS_RTT_ALIGN | T_USER)) ||
198 		    lwp->lwp_regs == rp);
199 		mpcb = lwptompcb(lwp);
200 		switch (type) {
201 		case T_WIN_OVERFLOW + T_USER:
202 		case T_WIN_UNDERFLOW + T_USER:
203 		case T_SYS_RTT_PAGE + T_USER:
204 		case T_DATA_MMU_MISS + T_USER:
205 			mstate = LMS_DFAULT;
206 			break;
207 		case T_INSTR_MMU_MISS + T_USER:
208 			mstate = LMS_TFAULT;
209 			break;
210 		default:
211 			mstate = LMS_TRAP;
212 			break;
213 		}
214 		/* Kernel probe */
215 		TNF_PROBE_1(thread_state, "thread", /* CSTYLED */,
216 		    tnf_microstate, state, (char)mstate);
217 		mstate = new_mstate(curthread, mstate);
218 		siginfo.si_signo = 0;
219 		stepped =
220 		    lwp->lwp_pcb.pcb_step != STEP_NONE &&
221 		    ((oldpc = rp->r_pc), prundostep()) &&
222 		    mmu_btop((uintptr_t)addr) == mmu_btop((uintptr_t)oldpc);
223 		/* this assignment must not precede call to prundostep() */
224 		oldpc = rp->r_pc;
225 	}
226 
227 	TRACE_1(TR_FAC_TRAP, TR_C_TRAP_HANDLER_ENTER,
228 	    "C_trap_handler_enter:type %x", type);
229 
230 #ifdef	F_DEFERRED
231 	/*
232 	 * Take any pending floating point exceptions now.
233 	 * If the floating point unit has an exception to handle,
234 	 * just return to user-level to let the signal handler run.
235 	 * The instruction that got us to trap() will be reexecuted on
236 	 * return from the signal handler and we will trap to here again.
237 	 * This is necessary to disambiguate simultaneous traps which
238 	 * happen when a floating-point exception is pending and a
239 	 * machine fault is incurred.
240 	 */
241 	if (type & USER) {
242 		/*
243 		 * FP_TRAPPED is set only by sendsig() when it copies
244 		 * out the floating-point queue for the signal handler.
245 		 * It is set there so we can test it here and in syscall().
246 		 */
247 		mpcb->mpcb_flags &= ~FP_TRAPPED;
248 		syncfpu();
249 		if (mpcb->mpcb_flags & FP_TRAPPED) {
250 			/*
251 			 * trap() has have been called recursively and may
252 			 * have stopped the process, so do single step
253 			 * support for /proc.
254 			 */
255 			mpcb->mpcb_flags &= ~FP_TRAPPED;
256 			goto out;
257 		}
258 	}
259 #endif
260 	switch (type) {
261 		case T_DATA_MMU_MISS:
262 		case T_INSTR_MMU_MISS + T_USER:
263 		case T_DATA_MMU_MISS + T_USER:
264 		case T_DATA_PROT + T_USER:
265 		case T_AST + T_USER:
266 		case T_SYS_RTT_PAGE + T_USER:
267 		case T_FLUSH_PCB + T_USER:
268 		case T_FLUSHW + T_USER:
269 			break;
270 
271 		default:
272 			FTRACE_3("trap(): type=0x%lx, regs=0x%lx, addr=0x%lx",
273 			    (ulong_t)type, (ulong_t)rp, (ulong_t)addr);
274 			break;
275 	}
276 
277 	switch (type) {
278 
279 	default:
280 		/*
281 		 * Check for user software trap.
282 		 */
283 		if (type & T_USER) {
284 			if (tudebug)
285 				showregs(type, rp, (caddr_t)0, 0);
286 			if ((type & ~T_USER) >= T_SOFTWARE_TRAP) {
287 				bzero(&siginfo, sizeof (siginfo));
288 				siginfo.si_signo = SIGILL;
289 				siginfo.si_code  = ILL_ILLTRP;
290 				siginfo.si_addr  = (caddr_t)rp->r_pc;
291 				siginfo.si_trapno = type &~ T_USER;
292 				fault = FLTILL;
293 				break;
294 			}
295 		}
296 		addr = (caddr_t)rp->r_pc;
297 		(void) die(type, rp, addr, 0);
298 		/*NOTREACHED*/
299 
300 	case T_ALIGNMENT:	/* supv alignment error */
301 		if (nfload(rp, NULL))
302 			goto cleanup;
303 
304 		if (curthread->t_lofault) {
305 			if (lodebug) {
306 				showregs(type, rp, addr, 0);
307 				traceback((caddr_t)rp->r_sp);
308 			}
309 			rp->r_g1 = EFAULT;
310 			rp->r_pc = curthread->t_lofault;
311 			rp->r_npc = rp->r_pc + 4;
312 			goto cleanup;
313 		}
314 		(void) die(type, rp, addr, 0);
315 		/*NOTREACHED*/
316 
317 	case T_INSTR_EXCEPTION:		/* sys instruction access exception */
318 		addr = (caddr_t)rp->r_pc;
319 		(void) die(type, rp, addr, mmu_fsr);
320 		/*NOTREACHED*/
321 
322 	case T_INSTR_MMU_MISS:		/* sys instruction mmu miss */
323 		addr = (caddr_t)rp->r_pc;
324 		(void) die(type, rp, addr, 0);
325 		/*NOTREACHED*/
326 
327 	case T_DATA_EXCEPTION:		/* system data access exception */
328 		switch (X_FAULT_TYPE(mmu_fsr)) {
329 		case FT_RANGE:
330 			/*
331 			 * This happens when we attempt to dereference an
332 			 * address in the address hole.  If t_ontrap is set,
333 			 * then break and fall through to T_DATA_MMU_MISS /
334 			 * T_DATA_PROT case below.  If lofault is set, then
335 			 * honour it (perhaps the user gave us a bogus
336 			 * address in the hole to copyin from or copyout to?)
337 			 */
338 
339 			if (curthread->t_ontrap != NULL)
340 				break;
341 
342 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
343 			if (curthread->t_lofault) {
344 				if (lodebug) {
345 					showregs(type, rp, addr, 0);
346 					traceback((caddr_t)rp->r_sp);
347 				}
348 				rp->r_g1 = EFAULT;
349 				rp->r_pc = curthread->t_lofault;
350 				rp->r_npc = rp->r_pc + 4;
351 				goto cleanup;
352 			}
353 			(void) die(type, rp, addr, mmu_fsr);
354 			/*NOTREACHED*/
355 
356 		case FT_PRIV:
357 			/*
358 			 * This can happen if we access ASI_USER from a kernel
359 			 * thread.  To support pxfs, we need to honor lofault if
360 			 * we're doing a copyin/copyout from a kernel thread.
361 			 */
362 
363 			if (nfload(rp, NULL))
364 				goto cleanup;
365 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
366 			if (curthread->t_lofault) {
367 				if (lodebug) {
368 					showregs(type, rp, addr, 0);
369 					traceback((caddr_t)rp->r_sp);
370 				}
371 				rp->r_g1 = EFAULT;
372 				rp->r_pc = curthread->t_lofault;
373 				rp->r_npc = rp->r_pc + 4;
374 				goto cleanup;
375 			}
376 			(void) die(type, rp, addr, mmu_fsr);
377 			/*NOTREACHED*/
378 
379 		default:
380 			if (nfload(rp, NULL))
381 				goto cleanup;
382 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
383 			(void) die(type, rp, addr, mmu_fsr);
384 			/*NOTREACHED*/
385 
386 		case FT_NFO:
387 			break;
388 		}
389 		/* fall into ... */
390 
391 	case T_DATA_MMU_MISS:		/* system data mmu miss */
392 	case T_DATA_PROT:		/* system data protection fault */
393 		if (nfload(rp, &instr))
394 			goto cleanup;
395 
396 		/*
397 		 * If we're under on_trap() protection (see <sys/ontrap.h>),
398 		 * set ot_trap and return from the trap to the trampoline.
399 		 */
400 		if (curthread->t_ontrap != NULL) {
401 			on_trap_data_t *otp = curthread->t_ontrap;
402 
403 			TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT,
404 			    "C_trap_handler_exit");
405 			TRACE_0(TR_FAC_TRAP, TR_TRAP_END, "trap_end");
406 
407 			if (otp->ot_prot & OT_DATA_ACCESS) {
408 				otp->ot_trap |= OT_DATA_ACCESS;
409 				rp->r_pc = otp->ot_trampoline;
410 				rp->r_npc = rp->r_pc + 4;
411 				goto cleanup;
412 			}
413 		}
414 		lofault = curthread->t_lofault;
415 		curthread->t_lofault = 0;
416 
417 		mstate = new_mstate(curthread, LMS_KFAULT);
418 
419 		switch (type) {
420 		case T_DATA_PROT:
421 			fault_type = F_PROT;
422 			rw = S_WRITE;
423 			break;
424 		case T_INSTR_MMU_MISS:
425 			fault_type = F_INVAL;
426 			rw = S_EXEC;
427 			break;
428 		case T_DATA_MMU_MISS:
429 		case T_DATA_EXCEPTION:
430 			/*
431 			 * The hardware doesn't update the sfsr on mmu
432 			 * misses so it is not easy to find out whether
433 			 * the access was a read or a write so we need
434 			 * to decode the actual instruction.
435 			 */
436 			fault_type = F_INVAL;
437 			rw = get_accesstype(rp);
438 			break;
439 		default:
440 			cmn_err(CE_PANIC, "trap: unknown type %x", type);
441 			break;
442 		}
443 		/*
444 		 * We determine if access was done to kernel or user
445 		 * address space.  The addr passed into trap is really the
446 		 * tag access register.
447 		 */
448 		iskernel = (((uintptr_t)addr & TAGACC_CTX_MASK) == KCONTEXT);
449 		addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
450 
451 		res = pagefault(addr, fault_type, rw, iskernel);
452 		if (!iskernel && res == FC_NOMAP &&
453 		    addr < p->p_usrstack && grow(addr))
454 			res = 0;
455 
456 		(void) new_mstate(curthread, mstate);
457 
458 		/*
459 		 * Restore lofault.  If we resolved the fault, exit.
460 		 * If we didn't and lofault wasn't set, die.
461 		 */
462 		curthread->t_lofault = lofault;
463 
464 		if (res == 0)
465 			goto cleanup;
466 
467 		if (IS_PREFETCH(instr)) {
468 			/* skip prefetch instructions in kernel-land */
469 			rp->r_pc = rp->r_npc;
470 			rp->r_npc += 4;
471 			goto cleanup;
472 		}
473 
474 		if ((lofault == 0 || lodebug) &&
475 		    (calc_memaddr(rp, &badaddr) == SIMU_SUCCESS))
476 			addr = badaddr;
477 		if (lofault == 0)
478 			(void) die(type, rp, addr, 0);
479 		/*
480 		 * Cannot resolve fault.  Return to lofault.
481 		 */
482 		if (lodebug) {
483 			showregs(type, rp, addr, 0);
484 			traceback((caddr_t)rp->r_sp);
485 		}
486 		if (FC_CODE(res) == FC_OBJERR)
487 			res = FC_ERRNO(res);
488 		else
489 			res = EFAULT;
490 		rp->r_g1 = res;
491 		rp->r_pc = curthread->t_lofault;
492 		rp->r_npc = curthread->t_lofault + 4;
493 		goto cleanup;
494 
495 	case T_INSTR_EXCEPTION + T_USER: /* user insn access exception */
496 		bzero(&siginfo, sizeof (siginfo));
497 		siginfo.si_addr = (caddr_t)rp->r_pc;
498 		siginfo.si_signo = SIGSEGV;
499 		siginfo.si_code = X_FAULT_TYPE(mmu_fsr) == FT_PRIV ?
500 		    SEGV_ACCERR : SEGV_MAPERR;
501 		fault = FLTBOUNDS;
502 		break;
503 
504 	case T_WIN_OVERFLOW + T_USER:	/* window overflow in ??? */
505 	case T_WIN_UNDERFLOW + T_USER:	/* window underflow in ??? */
506 	case T_SYS_RTT_PAGE + T_USER:	/* window underflow in user_rtt */
507 	case T_INSTR_MMU_MISS + T_USER:	/* user instruction mmu miss */
508 	case T_DATA_MMU_MISS + T_USER:	/* user data mmu miss */
509 	case T_DATA_PROT + T_USER:	/* user data protection fault */
510 		switch (type) {
511 		case T_INSTR_MMU_MISS + T_USER:
512 			addr = (caddr_t)rp->r_pc;
513 			fault_type = F_INVAL;
514 			rw = S_EXEC;
515 			break;
516 
517 		case T_DATA_MMU_MISS + T_USER:
518 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
519 			fault_type = F_INVAL;
520 			/*
521 			 * The hardware doesn't update the sfsr on mmu misses
522 			 * so it is not easy to find out whether the access
523 			 * was a read or a write so we need to decode the
524 			 * actual instruction.  XXX BUGLY HW
525 			 */
526 			rw = get_accesstype(rp);
527 			break;
528 
529 		case T_DATA_PROT + T_USER:
530 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
531 			fault_type = F_PROT;
532 			rw = S_WRITE;
533 			break;
534 
535 		case T_WIN_OVERFLOW + T_USER:
536 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
537 			fault_type = F_INVAL;
538 			rw = S_WRITE;
539 			break;
540 
541 		case T_WIN_UNDERFLOW + T_USER:
542 		case T_SYS_RTT_PAGE + T_USER:
543 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
544 			fault_type = F_INVAL;
545 			rw = S_READ;
546 			break;
547 
548 		default:
549 			cmn_err(CE_PANIC, "trap: unknown type %x", type);
550 			break;
551 		}
552 
553 		/*
554 		 * If we are single stepping do not call pagefault
555 		 */
556 		if (stepped) {
557 			res = FC_NOMAP;
558 		} else {
559 			caddr_t vaddr = addr;
560 			size_t sz;
561 			int ta;
562 
563 			ASSERT(!(curthread->t_flag & T_WATCHPT));
564 			watchpage = (pr_watch_active(p) &&
565 			    type != T_WIN_OVERFLOW + T_USER &&
566 			    type != T_WIN_UNDERFLOW + T_USER &&
567 			    type != T_SYS_RTT_PAGE + T_USER &&
568 			    pr_is_watchpage(addr, rw));
569 
570 			if (!watchpage ||
571 			    (sz = instr_size(rp, &vaddr, rw)) <= 0)
572 				/* EMPTY */;
573 			else if ((watchcode = pr_is_watchpoint(&vaddr, &ta,
574 			    sz, NULL, rw)) != 0) {
575 				if (ta) {
576 					do_watch_step(vaddr, sz, rw,
577 					    watchcode, rp->r_pc);
578 					fault_type = F_INVAL;
579 				} else {
580 					bzero(&siginfo,	sizeof (siginfo));
581 					siginfo.si_signo = SIGTRAP;
582 					siginfo.si_code = watchcode;
583 					siginfo.si_addr = vaddr;
584 					siginfo.si_trapafter = 0;
585 					siginfo.si_pc = (caddr_t)rp->r_pc;
586 					fault = FLTWATCH;
587 					break;
588 				}
589 			} else {
590 				if (rw != S_EXEC &&
591 				    pr_watch_emul(rp, vaddr, rw))
592 					goto out;
593 				do_watch_step(vaddr, sz, rw, 0, 0);
594 				fault_type = F_INVAL;
595 			}
596 
597 			if (pr_watch_active(p) &&
598 			    (type == T_WIN_OVERFLOW + T_USER ||
599 			    type == T_WIN_UNDERFLOW + T_USER ||
600 			    type == T_SYS_RTT_PAGE + T_USER)) {
601 				int dotwo = (type == T_WIN_UNDERFLOW + T_USER);
602 				if (copy_return_window(dotwo))
603 					goto out;
604 				fault_type = F_INVAL;
605 			}
606 
607 			res = pagefault(addr, fault_type, rw, 0);
608 
609 			/*
610 			 * If pagefault succeed, ok.
611 			 * Otherwise grow the stack automatically.
612 			 */
613 			if (res == 0 ||
614 			    (res == FC_NOMAP &&
615 			    type != T_INSTR_MMU_MISS + T_USER &&
616 			    addr < p->p_usrstack &&
617 			    grow(addr))) {
618 				int ismem = prismember(&p->p_fltmask, FLTPAGE);
619 
620 				/*
621 				 * instr_size() is used to get the exact
622 				 * address of the fault, instead of the
623 				 * page of the fault. Unfortunately it is
624 				 * very slow, and this is an important
625 				 * code path. Don't call it unless
626 				 * correctness is needed. ie. if FLTPAGE
627 				 * is set, or we're profiling.
628 				 */
629 
630 				if (curthread->t_rprof != NULL || ismem)
631 					(void) instr_size(rp, &addr, rw);
632 
633 				lwp->lwp_lastfault = FLTPAGE;
634 				lwp->lwp_lastfaddr = addr;
635 
636 				if (ismem) {
637 					bzero(&siginfo, sizeof (siginfo));
638 					siginfo.si_addr = addr;
639 					(void) stop_on_fault(FLTPAGE, &siginfo);
640 				}
641 				goto out;
642 			}
643 
644 			if (type != (T_INSTR_MMU_MISS + T_USER)) {
645 				/*
646 				 * check for non-faulting loads, also
647 				 * fetch the instruction to check for
648 				 * flush
649 				 */
650 				if (nfload(rp, &instr))
651 					goto out;
652 
653 				/* skip userland prefetch instructions */
654 				if (IS_PREFETCH(instr)) {
655 					rp->r_pc = rp->r_npc;
656 					rp->r_npc += 4;
657 					goto out;
658 					/*NOTREACHED*/
659 				}
660 
661 				/*
662 				 * check if the instruction was a
663 				 * flush.  ABI allows users to specify
664 				 * an illegal address on the flush
665 				 * instruction so we simply return in
666 				 * this case.
667 				 *
668 				 * NB: the hardware should set a bit
669 				 * indicating this trap was caused by
670 				 * a flush instruction.  Instruction
671 				 * decoding is bugly!
672 				 */
673 				if (IS_FLUSH(instr)) {
674 					/* skip the flush instruction */
675 					rp->r_pc = rp->r_npc;
676 					rp->r_npc += 4;
677 					goto out;
678 					/*NOTREACHED*/
679 				}
680 			} else if (res == FC_PROT) {
681 				report_stack_exec(p, addr);
682 			}
683 
684 			if (tudebug)
685 				showregs(type, rp, addr, 0);
686 		}
687 
688 		/*
689 		 * In the case where both pagefault and grow fail,
690 		 * set the code to the value provided by pagefault.
691 		 */
692 		(void) instr_size(rp, &addr, rw);
693 		bzero(&siginfo, sizeof (siginfo));
694 		siginfo.si_addr = addr;
695 		if (FC_CODE(res) == FC_OBJERR) {
696 			siginfo.si_errno = FC_ERRNO(res);
697 			if (siginfo.si_errno != EINTR) {
698 				siginfo.si_signo = SIGBUS;
699 				siginfo.si_code = BUS_OBJERR;
700 				fault = FLTACCESS;
701 			}
702 		} else { /* FC_NOMAP || FC_PROT */
703 			siginfo.si_signo = SIGSEGV;
704 			siginfo.si_code = (res == FC_NOMAP) ?
705 			    SEGV_MAPERR : SEGV_ACCERR;
706 			fault = FLTBOUNDS;
707 		}
708 		/*
709 		 * If this is the culmination of a single-step,
710 		 * reset the addr, code, signal and fault to
711 		 * indicate a hardware trace trap.
712 		 */
713 		if (stepped) {
714 			pcb_t *pcb = &lwp->lwp_pcb;
715 
716 			siginfo.si_signo = 0;
717 			fault = 0;
718 			if (pcb->pcb_step == STEP_WASACTIVE) {
719 				pcb->pcb_step = STEP_NONE;
720 				pcb->pcb_tracepc = NULL;
721 				oldpc = rp->r_pc - 4;
722 			}
723 			/*
724 			 * If both NORMAL_STEP and WATCH_STEP are in
725 			 * effect, give precedence to WATCH_STEP.
726 			 * One or the other must be set at this point.
727 			 */
728 			ASSERT(pcb->pcb_flags & (NORMAL_STEP|WATCH_STEP));
729 			if ((fault = undo_watch_step(&siginfo)) == 0 &&
730 			    (pcb->pcb_flags & NORMAL_STEP)) {
731 				siginfo.si_signo = SIGTRAP;
732 				siginfo.si_code = TRAP_TRACE;
733 				siginfo.si_addr = (caddr_t)rp->r_pc;
734 				fault = FLTTRACE;
735 			}
736 			pcb->pcb_flags &= ~(NORMAL_STEP|WATCH_STEP);
737 		}
738 		break;
739 
740 	case T_DATA_EXCEPTION + T_USER:	/* user data access exception */
741 
742 		if (&vis1_partial_support != NULL) {
743 			bzero(&siginfo, sizeof (siginfo));
744 			if (vis1_partial_support(rp,
745 			    &siginfo, &fault) == 0)
746 				goto out;
747 		}
748 
749 		if (nfload(rp, &instr))
750 			goto out;
751 		if (IS_FLUSH(instr)) {
752 			/* skip the flush instruction */
753 			rp->r_pc = rp->r_npc;
754 			rp->r_npc += 4;
755 			goto out;
756 			/*NOTREACHED*/
757 		}
758 		bzero(&siginfo, sizeof (siginfo));
759 		siginfo.si_addr = addr;
760 		switch (X_FAULT_TYPE(mmu_fsr)) {
761 		case FT_ATOMIC_NC:
762 			if ((IS_SWAP(instr) && swap_nc(rp, instr)) ||
763 			    (IS_LDSTUB(instr) && ldstub_nc(rp, instr))) {
764 				/* skip the atomic */
765 				rp->r_pc = rp->r_npc;
766 				rp->r_npc += 4;
767 				goto out;
768 			}
769 			/* fall into ... */
770 		case FT_PRIV:
771 			siginfo.si_signo = SIGSEGV;
772 			siginfo.si_code = SEGV_ACCERR;
773 			fault = FLTBOUNDS;
774 			break;
775 		case FT_SPEC_LD:
776 		case FT_ILL_ALT:
777 			siginfo.si_signo = SIGILL;
778 			siginfo.si_code = ILL_ILLADR;
779 			fault = FLTILL;
780 			break;
781 		default:
782 			siginfo.si_signo = SIGSEGV;
783 			siginfo.si_code = SEGV_MAPERR;
784 			fault = FLTBOUNDS;
785 			break;
786 		}
787 		break;
788 
789 	case T_SYS_RTT_ALIGN + T_USER:	/* user alignment error */
790 	case T_ALIGNMENT + T_USER:	/* user alignment error */
791 		if (tudebug)
792 			showregs(type, rp, addr, 0);
793 		/*
794 		 * If the user has to do unaligned references
795 		 * the ugly stuff gets done here.
796 		 */
797 		alignfaults++;
798 		if (&vis1_partial_support != NULL) {
799 			bzero(&siginfo, sizeof (siginfo));
800 			if (vis1_partial_support(rp,
801 			    &siginfo, &fault) == 0)
802 				goto out;
803 		}
804 
805 		bzero(&siginfo, sizeof (siginfo));
806 		if (type == T_SYS_RTT_ALIGN + T_USER) {
807 			if (nfload(rp, NULL))
808 				goto out;
809 			/*
810 			 * Can't do unaligned stack access
811 			 */
812 			siginfo.si_signo = SIGBUS;
813 			siginfo.si_code = BUS_ADRALN;
814 			siginfo.si_addr = addr;
815 			fault = FLTACCESS;
816 			break;
817 		}
818 
819 		/*
820 		 * Try to fix alignment before non-faulting load test.
821 		 */
822 		if (p->p_fixalignment) {
823 			if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) {
824 				rp->r_pc = rp->r_npc;
825 				rp->r_npc += 4;
826 				goto out;
827 			}
828 			if (nfload(rp, NULL))
829 				goto out;
830 			siginfo.si_signo = SIGSEGV;
831 			siginfo.si_code = SEGV_MAPERR;
832 			siginfo.si_addr = badaddr;
833 			fault = FLTBOUNDS;
834 		} else {
835 			if (nfload(rp, NULL))
836 				goto out;
837 			siginfo.si_signo = SIGBUS;
838 			siginfo.si_code = BUS_ADRALN;
839 			if (rp->r_pc & 3) {	/* offending address, if pc */
840 				siginfo.si_addr = (caddr_t)rp->r_pc;
841 			} else {
842 				if (calc_memaddr(rp, &badaddr) == SIMU_UNALIGN)
843 					siginfo.si_addr = badaddr;
844 				else
845 					siginfo.si_addr = (caddr_t)rp->r_pc;
846 			}
847 			fault = FLTACCESS;
848 		}
849 		break;
850 
851 	case T_PRIV_INSTR + T_USER:	/* privileged instruction fault */
852 		if (tudebug)
853 			showregs(type, rp, (caddr_t)0, 0);
854 		bzero(&siginfo, sizeof (siginfo));
855 		siginfo.si_signo = SIGILL;
856 		siginfo.si_code = ILL_PRVOPC;
857 		siginfo.si_addr = (caddr_t)rp->r_pc;
858 		fault = FLTILL;
859 		break;
860 
861 	case T_UNIMP_INSTR:		/* priv illegal instruction fault */
862 		if (fpras_implemented) {
863 			/*
864 			 * Call fpras_chktrap indicating that
865 			 * we've come from a trap handler and pass
866 			 * the regs.  That function may choose to panic
867 			 * (in which case it won't return) or it may
868 			 * determine that a reboot is desired.  In the
869 			 * latter case it must alter pc/npc to skip
870 			 * the illegal instruction and continue at
871 			 * a controlled address.
872 			 */
873 			if (&fpras_chktrap) {
874 				if (fpras_chktrap(rp))
875 					goto cleanup;
876 			}
877 		}
878 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */
879 		instr = *(int *)rp->r_pc;
880 		if ((instr & 0xc0000000) == 0x40000000) {
881 			long pc;
882 
883 			rp->r_o7 = (long long)rp->r_pc;
884 			pc = rp->r_pc + ((instr & 0x3fffffff) << 2);
885 			rp->r_pc = rp->r_npc;
886 			rp->r_npc = pc;
887 			ill_calls++;
888 			goto cleanup;
889 		}
890 #endif /* SF_ERRATA_23 || SF_ERRATA_30 */
891 		/*
892 		 * It's not an fpras failure and it's not SF_ERRATA_23 - die
893 		 */
894 		addr = (caddr_t)rp->r_pc;
895 		(void) die(type, rp, addr, 0);
896 		/*NOTREACHED*/
897 
898 	case T_UNIMP_INSTR + T_USER:	/* illegal instruction fault */
899 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */
900 		instr = fetch_user_instr((caddr_t)rp->r_pc);
901 		if ((instr & 0xc0000000) == 0x40000000) {
902 			long pc;
903 
904 			rp->r_o7 = (long long)rp->r_pc;
905 			pc = rp->r_pc + ((instr & 0x3fffffff) << 2);
906 			rp->r_pc = rp->r_npc;
907 			rp->r_npc = pc;
908 			ill_calls++;
909 			goto out;
910 		}
911 #endif /* SF_ERRATA_23 || SF_ERRATA_30 */
912 		if (tudebug)
913 			showregs(type, rp, (caddr_t)0, 0);
914 		bzero(&siginfo, sizeof (siginfo));
915 		/*
916 		 * Try to simulate the instruction.
917 		 */
918 		switch (simulate_unimp(rp, &badaddr)) {
919 		case SIMU_RETRY:
920 			goto out;	/* regs are already set up */
921 			/*NOTREACHED*/
922 
923 		case SIMU_SUCCESS:
924 			/* skip the successfully simulated instruction */
925 			rp->r_pc = rp->r_npc;
926 			rp->r_npc += 4;
927 			goto out;
928 			/*NOTREACHED*/
929 
930 		case SIMU_FAULT:
931 			siginfo.si_signo = SIGSEGV;
932 			siginfo.si_code = SEGV_MAPERR;
933 			siginfo.si_addr = badaddr;
934 			fault = FLTBOUNDS;
935 			break;
936 
937 		case SIMU_DZERO:
938 			siginfo.si_signo = SIGFPE;
939 			siginfo.si_code = FPE_INTDIV;
940 			siginfo.si_addr = (caddr_t)rp->r_pc;
941 			fault = FLTIZDIV;
942 			break;
943 
944 		case SIMU_UNALIGN:
945 			siginfo.si_signo = SIGBUS;
946 			siginfo.si_code = BUS_ADRALN;
947 			siginfo.si_addr = badaddr;
948 			fault = FLTACCESS;
949 			break;
950 
951 		case SIMU_ILLEGAL:
952 		default:
953 			siginfo.si_signo = SIGILL;
954 			op3 = (instr >> 19) & 0x3F;
955 			if ((IS_FLOAT(instr) && (op3 == IOP_V8_STQFA) ||
956 			    (op3 == IOP_V8_STDFA)))
957 				siginfo.si_code = ILL_ILLADR;
958 			else
959 				siginfo.si_code = ILL_ILLOPC;
960 			siginfo.si_addr = (caddr_t)rp->r_pc;
961 			fault = FLTILL;
962 			break;
963 		}
964 		break;
965 
966 	case T_UNIMP_LDD + T_USER:
967 	case T_UNIMP_STD + T_USER:
968 		if (tudebug)
969 			showregs(type, rp, (caddr_t)0, 0);
970 		switch (simulate_lddstd(rp, &badaddr)) {
971 		case SIMU_SUCCESS:
972 			/* skip the successfully simulated instruction */
973 			rp->r_pc = rp->r_npc;
974 			rp->r_npc += 4;
975 			goto out;
976 			/*NOTREACHED*/
977 
978 		case SIMU_FAULT:
979 			if (nfload(rp, NULL))
980 				goto out;
981 			siginfo.si_signo = SIGSEGV;
982 			siginfo.si_code = SEGV_MAPERR;
983 			siginfo.si_addr = badaddr;
984 			fault = FLTBOUNDS;
985 			break;
986 
987 		case SIMU_UNALIGN:
988 			if (nfload(rp, NULL))
989 				goto out;
990 			siginfo.si_signo = SIGBUS;
991 			siginfo.si_code = BUS_ADRALN;
992 			siginfo.si_addr = badaddr;
993 			fault = FLTACCESS;
994 			break;
995 
996 		case SIMU_ILLEGAL:
997 		default:
998 			siginfo.si_signo = SIGILL;
999 			siginfo.si_code = ILL_ILLOPC;
1000 			siginfo.si_addr = (caddr_t)rp->r_pc;
1001 			fault = FLTILL;
1002 			break;
1003 		}
1004 		break;
1005 
1006 	case T_UNIMP_LDD:
1007 	case T_UNIMP_STD:
1008 		if (simulate_lddstd(rp, &badaddr) == SIMU_SUCCESS) {
1009 			/* skip the successfully simulated instruction */
1010 			rp->r_pc = rp->r_npc;
1011 			rp->r_npc += 4;
1012 			goto cleanup;
1013 			/*NOTREACHED*/
1014 		}
1015 		/*
1016 		 * A third party driver executed an {LDD,STD,LDDA,STDA}
1017 		 * that we couldn't simulate.
1018 		 */
1019 		if (nfload(rp, NULL))
1020 			goto cleanup;
1021 
1022 		if (curthread->t_lofault) {
1023 			if (lodebug) {
1024 				showregs(type, rp, addr, 0);
1025 				traceback((caddr_t)rp->r_sp);
1026 			}
1027 			rp->r_g1 = EFAULT;
1028 			rp->r_pc = curthread->t_lofault;
1029 			rp->r_npc = rp->r_pc + 4;
1030 			goto cleanup;
1031 		}
1032 		(void) die(type, rp, addr, 0);
1033 		/*NOTREACHED*/
1034 
1035 	case T_IDIV0 + T_USER:		/* integer divide by zero */
1036 	case T_DIV0 + T_USER:		/* integer divide by zero */
1037 		if (tudebug && tudebugfpe)
1038 			showregs(type, rp, (caddr_t)0, 0);
1039 		bzero(&siginfo, sizeof (siginfo));
1040 		siginfo.si_signo = SIGFPE;
1041 		siginfo.si_code = FPE_INTDIV;
1042 		siginfo.si_addr = (caddr_t)rp->r_pc;
1043 		fault = FLTIZDIV;
1044 		break;
1045 
1046 	case T_INT_OVERFLOW + T_USER:	/* integer overflow */
1047 		if (tudebug && tudebugfpe)
1048 			showregs(type, rp, (caddr_t)0, 0);
1049 		bzero(&siginfo, sizeof (siginfo));
1050 		siginfo.si_signo = SIGFPE;
1051 		siginfo.si_code  = FPE_INTOVF;
1052 		siginfo.si_addr  = (caddr_t)rp->r_pc;
1053 		fault = FLTIOVF;
1054 		break;
1055 
1056 	case T_BREAKPOINT + T_USER:	/* breakpoint trap (t 1) */
1057 		if (tudebug && tudebugbpt)
1058 			showregs(type, rp, (caddr_t)0, 0);
1059 		bzero(&siginfo, sizeof (siginfo));
1060 		siginfo.si_signo = SIGTRAP;
1061 		siginfo.si_code = TRAP_BRKPT;
1062 		siginfo.si_addr = (caddr_t)rp->r_pc;
1063 		fault = FLTBPT;
1064 		break;
1065 
1066 	case T_TAG_OVERFLOW + T_USER:	/* tag overflow (taddcctv, tsubcctv) */
1067 		if (tudebug)
1068 			showregs(type, rp, (caddr_t)0, 0);
1069 		bzero(&siginfo, sizeof (siginfo));
1070 		siginfo.si_signo = SIGEMT;
1071 		siginfo.si_code = EMT_TAGOVF;
1072 		siginfo.si_addr = (caddr_t)rp->r_pc;
1073 		fault = FLTACCESS;
1074 		break;
1075 
1076 	case T_FLUSH_PCB + T_USER:	/* finish user window overflow */
1077 	case T_FLUSHW + T_USER:		/* finish user window flush */
1078 		/*
1079 		 * This trap is entered from sys_rtt in locore.s when,
1080 		 * upon return to user is is found that there are user
1081 		 * windows in pcb_wbuf.  This happens because they could
1082 		 * not be saved on the user stack, either because it
1083 		 * wasn't resident or because it was misaligned.
1084 		 */
1085 	{
1086 		int error;
1087 		caddr_t sp;
1088 
1089 		error = flush_user_windows_to_stack(&sp);
1090 		/*
1091 		 * Possible errors:
1092 		 *	error copying out
1093 		 *	unaligned stack pointer
1094 		 * The first is given to us as the return value
1095 		 * from flush_user_windows_to_stack().  The second
1096 		 * results in residual windows in the pcb.
1097 		 */
1098 		if (error != 0) {
1099 			/*
1100 			 * EINTR comes from a signal during copyout;
1101 			 * we should not post another signal.
1102 			 */
1103 			if (error != EINTR) {
1104 				/*
1105 				 * Zap the process with a SIGSEGV - process
1106 				 * may be managing its own stack growth by
1107 				 * taking SIGSEGVs on a different signal stack.
1108 				 */
1109 				bzero(&siginfo, sizeof (siginfo));
1110 				siginfo.si_signo = SIGSEGV;
1111 				siginfo.si_code  = SEGV_MAPERR;
1112 				siginfo.si_addr  = sp;
1113 				fault = FLTBOUNDS;
1114 			}
1115 			break;
1116 		} else if (mpcb->mpcb_wbcnt) {
1117 			bzero(&siginfo, sizeof (siginfo));
1118 			siginfo.si_signo = SIGILL;
1119 			siginfo.si_code  = ILL_BADSTK;
1120 			siginfo.si_addr  = (caddr_t)rp->r_pc;
1121 			fault = FLTILL;
1122 			break;
1123 		}
1124 	}
1125 
1126 		/*
1127 		 * T_FLUSHW is used when handling a ta 0x3 -- the old flush
1128 		 * window trap -- which is implemented by executing the
1129 		 * flushw instruction. The flushw can trap if any of the
1130 		 * stack pages are not writable for whatever reason. In this
1131 		 * case only, we advance the pc to the next instruction so
1132 		 * that the user thread doesn't needlessly execute the trap
1133 		 * again. Normally this wouldn't be a problem -- we'll
1134 		 * usually only end up here if this is the first touch to a
1135 		 * stack page -- since the second execution won't trap, but
1136 		 * if there's a watchpoint on the stack page the user thread
1137 		 * would spin, continuously executing the trap instruction.
1138 		 */
1139 		if (type == T_FLUSHW + T_USER) {
1140 			rp->r_pc = rp->r_npc;
1141 			rp->r_npc += 4;
1142 		}
1143 		goto out;
1144 
1145 	case T_AST + T_USER:		/* profiling or resched pseudo trap */
1146 		if (lwp->lwp_pcb.pcb_flags & CPC_OVERFLOW) {
1147 			lwp->lwp_pcb.pcb_flags &= ~CPC_OVERFLOW;
1148 			if (kcpc_overflow_ast()) {
1149 				/*
1150 				 * Signal performance counter overflow
1151 				 */
1152 				if (tudebug)
1153 					showregs(type, rp, (caddr_t)0, 0);
1154 				bzero(&siginfo, sizeof (siginfo));
1155 				siginfo.si_signo = SIGEMT;
1156 				siginfo.si_code = EMT_CPCOVF;
1157 				siginfo.si_addr = (caddr_t)rp->r_pc;
1158 				/* for trap_cleanup(), below */
1159 				oldpc = rp->r_pc - 4;
1160 				fault = FLTCPCOVF;
1161 			}
1162 		}
1163 
1164 		/*
1165 		 * The CPC_OVERFLOW check above may already have populated
1166 		 * siginfo and set fault, so the checks below must not
1167 		 * touch these and the functions they call must use
1168 		 * trapsig() directly.
1169 		 */
1170 
1171 		if (lwp->lwp_pcb.pcb_flags & ASYNC_HWERR) {
1172 			lwp->lwp_pcb.pcb_flags &= ~ASYNC_HWERR;
1173 			trap_async_hwerr();
1174 		}
1175 
1176 		if (lwp->lwp_pcb.pcb_flags & ASYNC_BERR) {
1177 			lwp->lwp_pcb.pcb_flags &= ~ASYNC_BERR;
1178 			trap_async_berr_bto(ASYNC_BERR, rp);
1179 		}
1180 
1181 		if (lwp->lwp_pcb.pcb_flags & ASYNC_BTO) {
1182 			lwp->lwp_pcb.pcb_flags &= ~ASYNC_BTO;
1183 			trap_async_berr_bto(ASYNC_BTO, rp);
1184 		}
1185 
1186 		break;
1187 	}
1188 
1189 	if (fault) {
1190 		/* We took a fault so abort single step. */
1191 		lwp->lwp_pcb.pcb_flags &= ~(NORMAL_STEP|WATCH_STEP);
1192 	}
1193 	trap_cleanup(rp, fault, &siginfo, oldpc == rp->r_pc);
1194 
1195 out:	/* We can't get here from a system trap */
1196 	ASSERT(type & T_USER);
1197 	trap_rtt();
1198 	(void) new_mstate(curthread, mstate);
1199 	/* Kernel probe */
1200 	TNF_PROBE_1(thread_state, "thread", /* CSTYLED */,
1201 		tnf_microstate, state, LMS_USER);
1202 
1203 	TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit");
1204 	return;
1205 
1206 cleanup:	/* system traps end up here */
1207 	ASSERT(!(type & T_USER));
1208 
1209 	TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit");
1210 }
1211 
1212 void
1213 trap_cleanup(
1214 	struct regs *rp,
1215 	uint_t fault,
1216 	k_siginfo_t *sip,
1217 	int restartable)
1218 {
1219 	extern void aio_cleanup();
1220 	proc_t *p = ttoproc(curthread);
1221 	klwp_id_t lwp = ttolwp(curthread);
1222 
1223 	if (fault) {
1224 		/*
1225 		 * Remember the fault and fault address
1226 		 * for real-time (SIGPROF) profiling.
1227 		 */
1228 		lwp->lwp_lastfault = fault;
1229 		lwp->lwp_lastfaddr = sip->si_addr;
1230 
1231 		DTRACE_PROC2(fault, int, fault, ksiginfo_t *, sip);
1232 
1233 		/*
1234 		 * If a debugger has declared this fault to be an
1235 		 * event of interest, stop the lwp.  Otherwise just
1236 		 * deliver the associated signal.
1237 		 */
1238 		if (sip->si_signo != SIGKILL &&
1239 		    prismember(&p->p_fltmask, fault) &&
1240 		    stop_on_fault(fault, sip) == 0)
1241 			sip->si_signo = 0;
1242 	}
1243 
1244 	if (sip->si_signo)
1245 		trapsig(sip, restartable);
1246 
1247 	if (lwp->lwp_oweupc)
1248 		profil_tick(rp->r_pc);
1249 
1250 	if (curthread->t_astflag | curthread->t_sig_check) {
1251 		/*
1252 		 * Turn off the AST flag before checking all the conditions that
1253 		 * may have caused an AST.  This flag is on whenever a signal or
1254 		 * unusual condition should be handled after the next trap or
1255 		 * syscall.
1256 		 */
1257 		astoff(curthread);
1258 		curthread->t_sig_check = 0;
1259 
1260 		/*
1261 		 * The following check is legal for the following reasons:
1262 		 *	1) The thread we are checking, is ourselves, so there is
1263 		 *	   no way the proc can go away.
1264 		 *	2) The only time we need to be protected by the
1265 		 *	   lock is if the binding is changed.
1266 		 *
1267 		 *	Note we will still take the lock and check the binding
1268 		 *	if the condition was true without the lock held.  This
1269 		 *	prevents lock contention among threads owned by the
1270 		 *	same proc.
1271 		 */
1272 
1273 		if (curthread->t_proc_flag & TP_CHANGEBIND) {
1274 			mutex_enter(&p->p_lock);
1275 			if (curthread->t_proc_flag & TP_CHANGEBIND) {
1276 				timer_lwpbind();
1277 				curthread->t_proc_flag &= ~TP_CHANGEBIND;
1278 			}
1279 			mutex_exit(&p->p_lock);
1280 		}
1281 
1282 		/*
1283 		 * for kaio requests that are on the per-process poll queue,
1284 		 * aiop->aio_pollq, they're AIO_POLL bit is set, the kernel
1285 		 * should copyout their result_t to user memory. by copying
1286 		 * out the result_t, the user can poll on memory waiting
1287 		 * for the kaio request to complete.
1288 		 */
1289 		if (p->p_aio)
1290 			aio_cleanup(0);
1291 
1292 		/*
1293 		 * If this LWP was asked to hold, call holdlwp(), which will
1294 		 * stop.  holdlwps() sets this up and calls pokelwps() which
1295 		 * sets the AST flag.
1296 		 *
1297 		 * Also check TP_EXITLWP, since this is used by fresh new LWPs
1298 		 * through lwp_rtt().  That flag is set if the lwp_create(2)
1299 		 * syscall failed after creating the LWP.
1300 		 */
1301 		if (ISHOLD(p))
1302 			holdlwp();
1303 
1304 		/*
1305 		 * All code that sets signals and makes ISSIG evaluate true must
1306 		 * set t_astflag afterwards.
1307 		 */
1308 		if (ISSIG_PENDING(curthread, lwp, p)) {
1309 			if (issig(FORREAL))
1310 				psig();
1311 			curthread->t_sig_check = 1;
1312 		}
1313 
1314 		if (curthread->t_rprof != NULL) {
1315 			realsigprof(0, 0, 0);
1316 			curthread->t_sig_check = 1;
1317 		}
1318 	}
1319 }
1320 
1321 /*
1322  * Called from fp_traps when a floating point trap occurs.
1323  * Note that the T_DATA_EXCEPTION case does not use X_FAULT_TYPE(mmu_fsr),
1324  * because mmu_fsr (now changed to code) is always 0.
1325  * Note that the T_UNIMP_INSTR case does not call simulate_unimp(),
1326  * because the simulator only simulates multiply and divide instructions,
1327  * which would not cause floating point traps in the first place.
1328  * XXX - Supervisor mode floating point traps?
1329  */
1330 void
1331 fpu_trap(struct regs *rp, caddr_t addr, uint32_t type, uint32_t code)
1332 {
1333 	proc_t *p = ttoproc(curthread);
1334 	klwp_id_t lwp = ttolwp(curthread);
1335 	k_siginfo_t siginfo;
1336 	uint_t op3, fault = 0;
1337 	int mstate;
1338 	char *badaddr;
1339 	kfpu_t *fp;
1340 	struct fpq *pfpq;
1341 	uint32_t inst;
1342 	utrap_handler_t *utrapp;
1343 
1344 	CPU_STATS_ADDQ(CPU, sys, trap, 1);
1345 
1346 	ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
1347 
1348 	if (USERMODE(rp->r_tstate)) {
1349 		/*
1350 		 * Set lwp_state before trying to acquire any
1351 		 * adaptive lock
1352 		 */
1353 		ASSERT(lwp != NULL);
1354 		lwp->lwp_state = LWP_SYS;
1355 		/*
1356 		 * Set up the current cred to use during this trap. u_cred
1357 		 * no longer exists.  t_cred is used instead.
1358 		 * The current process credential applies to the thread for
1359 		 * the entire trap.  If trapping from the kernel, this
1360 		 * should already be set up.
1361 		 */
1362 		if (curthread->t_cred != p->p_cred) {
1363 			cred_t *oldcred = curthread->t_cred;
1364 			/*
1365 			 * DTrace accesses t_cred in probe context.  t_cred
1366 			 * must always be either NULL, or point to a valid,
1367 			 * allocated cred structure.
1368 			 */
1369 			curthread->t_cred = crgetcred();
1370 			crfree(oldcred);
1371 		}
1372 		ASSERT(lwp->lwp_regs == rp);
1373 		mstate = new_mstate(curthread, LMS_TRAP);
1374 		siginfo.si_signo = 0;
1375 		type |= T_USER;
1376 	}
1377 
1378 	TRACE_1(TR_FAC_TRAP, TR_C_TRAP_HANDLER_ENTER,
1379 	    "C_fpu_trap_handler_enter:type %x", type);
1380 
1381 	if (tudebug && tudebugfpe)
1382 		showregs(type, rp, addr, 0);
1383 
1384 	bzero(&siginfo, sizeof (siginfo));
1385 	siginfo.si_code = code;
1386 	siginfo.si_addr = addr;
1387 
1388 	switch (type) {
1389 
1390 	case T_FP_EXCEPTION_IEEE + T_USER:	/* FPU arithmetic exception */
1391 		/*
1392 		 * FPU arithmetic exception - fake up a fpq if we
1393 		 *	came here directly from _fp_ieee_exception,
1394 		 *	which is indicated by a zero fpu_qcnt.
1395 		 */
1396 		fp = lwptofpu(curthread->t_lwp);
1397 		utrapp = curthread->t_procp->p_utraps;
1398 		if (fp->fpu_qcnt == 0) {
1399 			inst = fetch_user_instr((caddr_t)rp->r_pc);
1400 			lwp->lwp_state = LWP_SYS;
1401 			pfpq = &fp->fpu_q->FQu.fpq;
1402 			pfpq->fpq_addr = (uint32_t *)rp->r_pc;
1403 			pfpq->fpq_instr = inst;
1404 			fp->fpu_qcnt = 1;
1405 			fp->fpu_q_entrysize = sizeof (struct fpq);
1406 #ifdef SF_V9_TABLE_28
1407 			/*
1408 			 * Spitfire and blackbird followed the SPARC V9 manual
1409 			 * paragraph 3 of section 5.1.7.9 FSR_current_exception
1410 			 * (cexc) for setting fsr.cexc bits on underflow and
1411 			 * overflow traps when the fsr.tem.inexact bit is set,
1412 			 * instead of following Table 28. Bugid 1263234.
1413 			 */
1414 			{
1415 				extern int spitfire_bb_fsr_bug;
1416 
1417 				if (spitfire_bb_fsr_bug &&
1418 				    (fp->fpu_fsr & FSR_TEM_NX)) {
1419 					if (((fp->fpu_fsr & FSR_TEM_OF) == 0) &&
1420 					    (fp->fpu_fsr & FSR_CEXC_OF)) {
1421 						fp->fpu_fsr &= ~FSR_CEXC_OF;
1422 						fp->fpu_fsr |= FSR_CEXC_NX;
1423 						_fp_write_pfsr(&fp->fpu_fsr);
1424 						siginfo.si_code = FPE_FLTRES;
1425 					}
1426 					if (((fp->fpu_fsr & FSR_TEM_UF) == 0) &&
1427 					    (fp->fpu_fsr & FSR_CEXC_UF)) {
1428 						fp->fpu_fsr &= ~FSR_CEXC_UF;
1429 						fp->fpu_fsr |= FSR_CEXC_NX;
1430 						_fp_write_pfsr(&fp->fpu_fsr);
1431 						siginfo.si_code = FPE_FLTRES;
1432 					}
1433 				}
1434 			}
1435 #endif /* SF_V9_TABLE_28 */
1436 			rp->r_pc = rp->r_npc;
1437 			rp->r_npc += 4;
1438 		} else if (utrapp && utrapp[UT_FP_EXCEPTION_IEEE_754]) {
1439 			/*
1440 			 * The user had a trap handler installed.  Jump to
1441 			 * the trap handler instead of signalling the process.
1442 			 */
1443 			rp->r_pc = (long)utrapp[UT_FP_EXCEPTION_IEEE_754];
1444 			rp->r_npc = rp->r_pc + 4;
1445 			break;
1446 		}
1447 		siginfo.si_signo = SIGFPE;
1448 		fault = FLTFPE;
1449 		break;
1450 
1451 	case T_DATA_EXCEPTION + T_USER:		/* user data access exception */
1452 		siginfo.si_signo = SIGSEGV;
1453 		fault = FLTBOUNDS;
1454 		break;
1455 
1456 	case T_LDDF_ALIGN + T_USER: /* 64 bit user lddfa alignment error */
1457 	case T_STDF_ALIGN + T_USER: /* 64 bit user stdfa alignment error */
1458 		alignfaults++;
1459 		lwp->lwp_state = LWP_SYS;
1460 		if (&vis1_partial_support != NULL) {
1461 			bzero(&siginfo, sizeof (siginfo));
1462 			if (vis1_partial_support(rp,
1463 			    &siginfo, &fault) == 0)
1464 				goto out;
1465 		}
1466 		if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) {
1467 			rp->r_pc = rp->r_npc;
1468 			rp->r_npc += 4;
1469 			goto out;
1470 		}
1471 		fp = lwptofpu(curthread->t_lwp);
1472 		fp->fpu_qcnt = 0;
1473 		siginfo.si_signo = SIGSEGV;
1474 		siginfo.si_code = SEGV_MAPERR;
1475 		siginfo.si_addr = badaddr;
1476 		fault = FLTBOUNDS;
1477 		break;
1478 
1479 	case T_ALIGNMENT + T_USER:		/* user alignment error */
1480 		/*
1481 		 * If the user has to do unaligned references
1482 		 * the ugly stuff gets done here.
1483 		 * Only handles vanilla loads and stores.
1484 		 */
1485 		alignfaults++;
1486 		if (p->p_fixalignment) {
1487 			if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) {
1488 				rp->r_pc = rp->r_npc;
1489 				rp->r_npc += 4;
1490 				goto out;
1491 			}
1492 			siginfo.si_signo = SIGSEGV;
1493 			siginfo.si_code = SEGV_MAPERR;
1494 			siginfo.si_addr = badaddr;
1495 			fault = FLTBOUNDS;
1496 		} else {
1497 			siginfo.si_signo = SIGBUS;
1498 			siginfo.si_code = BUS_ADRALN;
1499 			if (rp->r_pc & 3) {	/* offending address, if pc */
1500 				siginfo.si_addr = (caddr_t)rp->r_pc;
1501 			} else {
1502 				if (calc_memaddr(rp, &badaddr) == SIMU_UNALIGN)
1503 					siginfo.si_addr = badaddr;
1504 				else
1505 					siginfo.si_addr = (caddr_t)rp->r_pc;
1506 			}
1507 			fault = FLTACCESS;
1508 		}
1509 		break;
1510 
1511 	case T_UNIMP_INSTR + T_USER:		/* illegal instruction fault */
1512 		siginfo.si_signo = SIGILL;
1513 		inst = fetch_user_instr((caddr_t)rp->r_pc);
1514 		op3 = (inst >> 19) & 0x3F;
1515 		if ((op3 == IOP_V8_STQFA) || (op3 == IOP_V8_STDFA))
1516 			siginfo.si_code = ILL_ILLADR;
1517 		else
1518 			siginfo.si_code = ILL_ILLTRP;
1519 		fault = FLTILL;
1520 		break;
1521 
1522 	default:
1523 		(void) die(type, rp, addr, 0);
1524 		/*NOTREACHED*/
1525 	}
1526 
1527 	/*
1528 	 * We can't get here from a system trap
1529 	 * Never restart any instruction which got here from an fp trap.
1530 	 */
1531 	ASSERT(type & T_USER);
1532 
1533 	trap_cleanup(rp, fault, &siginfo, 0);
1534 out:
1535 	trap_rtt();
1536 	(void) new_mstate(curthread, mstate);
1537 }
1538 
1539 void
1540 trap_rtt(void)
1541 {
1542 	klwp_id_t lwp = ttolwp(curthread);
1543 
1544 	/*
1545 	 * Restore register window if a debugger modified it.
1546 	 * Set up to perform a single-step if a debugger requested it.
1547 	 */
1548 	if (lwp->lwp_pcb.pcb_xregstat != XREGNONE)
1549 		xregrestore(lwp, 0);
1550 
1551 	/*
1552 	 * Set state to LWP_USER here so preempt won't give us a kernel
1553 	 * priority if it occurs after this point.  Call CL_TRAPRET() to
1554 	 * restore the user-level priority.
1555 	 *
1556 	 * It is important that no locks (other than spinlocks) be entered
1557 	 * after this point before returning to user mode (unless lwp_state
1558 	 * is set back to LWP_SYS).
1559 	 */
1560 	lwp->lwp_state = LWP_USER;
1561 	if (curthread->t_trapret) {
1562 		curthread->t_trapret = 0;
1563 		thread_lock(curthread);
1564 		CL_TRAPRET(curthread);
1565 		thread_unlock(curthread);
1566 	}
1567 	if (CPU->cpu_runrun || curthread->t_schedflag & TS_ANYWAITQ)
1568 		preempt();
1569 	prunstop();
1570 	if (lwp->lwp_pcb.pcb_step != STEP_NONE)
1571 		prdostep();
1572 
1573 	TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit");
1574 }
1575 
1576 #define	IS_LDASI(o)	\
1577 	((o) == (uint32_t)0xC0C00000 || (o) == (uint32_t)0xC0800000 ||	\
1578 	(o) == (uint32_t)0xC1800000)
1579 #define	IS_IMM_ASI(i)	(((i) & 0x2000) == 0)
1580 #define	IS_ASINF(a)	(((a) & 0xF6) == 0x82)
1581 #define	IS_LDDA(i)	(((i) & 0xC1F80000) == 0xC0980000)
1582 
1583 static int
1584 nfload(struct regs *rp, int *instrp)
1585 {
1586 	uint_t	instr, asi, op3, rd;
1587 	size_t	len;
1588 	struct as *as;
1589 	caddr_t addr;
1590 	FPU_DREGS_TYPE zero;
1591 	extern int segnf_create();
1592 
1593 	if (USERMODE(rp->r_tstate))
1594 		instr = fetch_user_instr((caddr_t)rp->r_pc);
1595 	else
1596 		instr = *(int *)rp->r_pc;
1597 
1598 	if (instrp)
1599 		*instrp = instr;
1600 
1601 	op3 = (uint_t)(instr & 0xC1E00000);
1602 	if (!IS_LDASI(op3))
1603 		return (0);
1604 	if (IS_IMM_ASI(instr))
1605 		asi = (instr & 0x1FE0) >> 5;
1606 	else
1607 		asi = (uint_t)((rp->r_tstate >> TSTATE_ASI_SHIFT) &
1608 		    TSTATE_ASI_MASK);
1609 	if (!IS_ASINF(asi))
1610 		return (0);
1611 	if (calc_memaddr(rp, &addr) == SIMU_SUCCESS) {
1612 		len = 1;
1613 		as = USERMODE(rp->r_tstate) ? ttoproc(curthread)->p_as : &kas;
1614 		as_rangelock(as);
1615 		if (as_gap(as, len, &addr, &len, 0, addr) == 0)
1616 			(void) as_map(as, addr, len, segnf_create, NULL);
1617 		as_rangeunlock(as);
1618 	}
1619 	zero = 0;
1620 	rd = (instr >> 25) & 0x1f;
1621 	if (IS_FLOAT(instr)) {
1622 		uint_t dbflg = ((instr >> 19) & 3) == 3;
1623 
1624 		if (dbflg) {		/* clever v9 reg encoding */
1625 			if (rd & 1)
1626 				rd = (rd & 0x1e) | 0x20;
1627 			rd >>= 1;
1628 		}
1629 		if (fpu_exists) {
1630 			if (!(_fp_read_fprs() & FPRS_FEF))
1631 				fp_enable();
1632 
1633 			if (dbflg)
1634 				_fp_write_pdreg(&zero, rd);
1635 			else
1636 				_fp_write_pfreg((uint_t *)&zero, rd);
1637 		} else {
1638 			kfpu_t *fp = lwptofpu(curthread->t_lwp);
1639 
1640 			if (!fp->fpu_en)
1641 				fp_enable();
1642 
1643 			if (dbflg)
1644 				fp->fpu_fr.fpu_dregs[rd] = zero;
1645 			else
1646 				fp->fpu_fr.fpu_regs[rd] = 0;
1647 		}
1648 	} else {
1649 		(void) putreg(&zero, rp, rd, &addr);
1650 		if (IS_LDDA(instr))
1651 			(void) putreg(&zero, rp, rd + 1, &addr);
1652 	}
1653 	rp->r_pc = rp->r_npc;
1654 	rp->r_npc += 4;
1655 	return (1);
1656 }
1657 
1658 kmutex_t atomic_nc_mutex;
1659 
1660 /*
1661  * The following couple of routines are for userland drivers which
1662  * do atomics to noncached addresses.  This sort of worked on previous
1663  * platforms -- the operation really wasn't atomic, but it didn't generate
1664  * a trap as sun4u systems do.
1665  */
1666 static int
1667 swap_nc(struct regs *rp, int instr)
1668 {
1669 	uint64_t rdata, mdata;
1670 	caddr_t addr, badaddr;
1671 	uint_t tmp, rd;
1672 
1673 	(void) flush_user_windows_to_stack(NULL);
1674 	rd = (instr >> 25) & 0x1f;
1675 	if (calc_memaddr(rp, &addr) != SIMU_SUCCESS)
1676 		return (0);
1677 	if (getreg(rp, rd, &rdata, &badaddr))
1678 		return (0);
1679 	mutex_enter(&atomic_nc_mutex);
1680 	if (fuword32(addr, &tmp) == -1) {
1681 		mutex_exit(&atomic_nc_mutex);
1682 		return (0);
1683 	}
1684 	mdata = (u_longlong_t)tmp;
1685 	if (suword32(addr, (uint32_t)rdata) == -1) {
1686 		mutex_exit(&atomic_nc_mutex);
1687 		return (0);
1688 	}
1689 	(void) putreg(&mdata, rp, rd, &badaddr);
1690 	mutex_exit(&atomic_nc_mutex);
1691 	return (1);
1692 }
1693 
1694 static int
1695 ldstub_nc(struct regs *rp, int instr)
1696 {
1697 	uint64_t mdata;
1698 	caddr_t addr, badaddr;
1699 	uint_t rd;
1700 	uint8_t tmp;
1701 
1702 	(void) flush_user_windows_to_stack(NULL);
1703 	rd = (instr >> 25) & 0x1f;
1704 	if (calc_memaddr(rp, &addr) != SIMU_SUCCESS)
1705 		return (0);
1706 	mutex_enter(&atomic_nc_mutex);
1707 	if (fuword8(addr, &tmp) == -1) {
1708 		mutex_exit(&atomic_nc_mutex);
1709 		return (0);
1710 	}
1711 	mdata = (u_longlong_t)tmp;
1712 	if (suword8(addr, (uint8_t)0xff) == -1) {
1713 		mutex_exit(&atomic_nc_mutex);
1714 		return (0);
1715 	}
1716 	(void) putreg(&mdata, rp, rd, &badaddr);
1717 	mutex_exit(&atomic_nc_mutex);
1718 	return (1);
1719 }
1720 
1721 /*
1722  * This function helps instr_size() determine the operand size.
1723  * It is called for the extended ldda/stda asi's.
1724  */
1725 int
1726 extended_asi_size(int asi)
1727 {
1728 	switch (asi) {
1729 	case ASI_PST8_P:
1730 	case ASI_PST8_S:
1731 	case ASI_PST16_P:
1732 	case ASI_PST16_S:
1733 	case ASI_PST32_P:
1734 	case ASI_PST32_S:
1735 	case ASI_PST8_PL:
1736 	case ASI_PST8_SL:
1737 	case ASI_PST16_PL:
1738 	case ASI_PST16_SL:
1739 	case ASI_PST32_PL:
1740 	case ASI_PST32_SL:
1741 		return (8);
1742 	case ASI_FL8_P:
1743 	case ASI_FL8_S:
1744 	case ASI_FL8_PL:
1745 	case ASI_FL8_SL:
1746 		return (1);
1747 	case ASI_FL16_P:
1748 	case ASI_FL16_S:
1749 	case ASI_FL16_PL:
1750 	case ASI_FL16_SL:
1751 		return (2);
1752 	case ASI_BLK_P:
1753 	case ASI_BLK_S:
1754 	case ASI_BLK_PL:
1755 	case ASI_BLK_SL:
1756 	case ASI_BLK_COMMIT_P:
1757 	case ASI_BLK_COMMIT_S:
1758 		return (64);
1759 	}
1760 
1761 	return (0);
1762 }
1763 
1764 /*
1765  * Patch non-zero to disable preemption of threads in the kernel.
1766  */
1767 int IGNORE_KERNEL_PREEMPTION = 0;	/* XXX - delete this someday */
1768 
1769 struct kpreempt_cnts {	/* kernel preemption statistics */
1770 	int	kpc_idle;	/* executing idle thread */
1771 	int	kpc_intr;	/* executing interrupt thread */
1772 	int	kpc_clock;	/* executing clock thread */
1773 	int	kpc_blocked;	/* thread has blocked preemption (t_preempt) */
1774 	int	kpc_notonproc;	/* thread is surrendering processor */
1775 	int	kpc_inswtch;	/* thread has ratified scheduling decision */
1776 	int	kpc_prilevel;	/* processor interrupt level is too high */
1777 	int	kpc_apreempt;	/* asynchronous preemption */
1778 	int	kpc_spreempt;	/* synchronous preemption */
1779 }	kpreempt_cnts;
1780 
1781 /*
1782  * kernel preemption: forced rescheduling
1783  *	preempt the running kernel thread.
1784  */
1785 void
1786 kpreempt(int asyncspl)
1787 {
1788 	if (IGNORE_KERNEL_PREEMPTION) {
1789 		aston(CPU->cpu_dispthread);
1790 		return;
1791 	}
1792 	/*
1793 	 * Check that conditions are right for kernel preemption
1794 	 */
1795 	do {
1796 		if (curthread->t_preempt) {
1797 			/*
1798 			 * either a privileged thread (idle, panic, interrupt)
1799 			 * or will check when t_preempt is lowered
1800 			 * We need to specifically handle the case where
1801 			 * the thread is in the middle of swtch (resume has
1802 			 * been called) and has its t_preempt set
1803 			 * [idle thread and a thread which is in kpreempt
1804 			 * already] and then a high priority thread is
1805 			 * available in the local dispatch queue.
1806 			 * In this case the resumed thread needs to take a
1807 			 * trap so that it can call kpreempt. We achieve
1808 			 * this by using siron().
1809 			 * How do we detect this condition:
1810 			 * idle thread is running and is in the midst of
1811 			 * resume: curthread->t_pri == -1 && CPU->dispthread
1812 			 * != CPU->thread
1813 			 * Need to ensure that this happens only at high pil
1814 			 * resume is called at high pil
1815 			 * Only in resume_from_idle is the pil changed.
1816 			 */
1817 			if (curthread->t_pri < 0) {
1818 				kpreempt_cnts.kpc_idle++;
1819 				if (CPU->cpu_dispthread != CPU->cpu_thread)
1820 					siron();
1821 			} else if (curthread->t_flag & T_INTR_THREAD) {
1822 				kpreempt_cnts.kpc_intr++;
1823 				if (curthread->t_pil == CLOCK_LEVEL)
1824 					kpreempt_cnts.kpc_clock++;
1825 			} else {
1826 				kpreempt_cnts.kpc_blocked++;
1827 				if (CPU->cpu_dispthread != CPU->cpu_thread)
1828 					siron();
1829 			}
1830 			aston(CPU->cpu_dispthread);
1831 			return;
1832 		}
1833 		if (curthread->t_state != TS_ONPROC ||
1834 		    curthread->t_disp_queue != CPU->cpu_disp) {
1835 			/* this thread will be calling swtch() shortly */
1836 			kpreempt_cnts.kpc_notonproc++;
1837 			if (CPU->cpu_thread != CPU->cpu_dispthread) {
1838 				/* already in swtch(), force another */
1839 				kpreempt_cnts.kpc_inswtch++;
1840 				siron();
1841 			}
1842 			return;
1843 		}
1844 
1845 		if (((asyncspl != KPREEMPT_SYNC) ? spltoipl(asyncspl) :
1846 		    getpil()) >= DISP_LEVEL) {
1847 			/*
1848 			 * We can't preempt this thread if it is at
1849 			 * a PIL >= DISP_LEVEL since it may be holding
1850 			 * a spin lock (like sched_lock).
1851 			 */
1852 			siron();	/* check back later */
1853 			kpreempt_cnts.kpc_prilevel++;
1854 			return;
1855 		}
1856 
1857 		/*
1858 		 * block preemption so we don't have multiple preemptions
1859 		 * pending on the interrupt stack
1860 		 */
1861 		curthread->t_preempt++;
1862 		if (asyncspl != KPREEMPT_SYNC) {
1863 			splx(asyncspl);
1864 			kpreempt_cnts.kpc_apreempt++;
1865 		} else
1866 			kpreempt_cnts.kpc_spreempt++;
1867 
1868 		preempt();
1869 		curthread->t_preempt--;
1870 	} while (CPU->cpu_kprunrun);
1871 }
1872 
1873 static enum seg_rw
1874 get_accesstype(struct regs *rp)
1875 {
1876 	uint32_t instr;
1877 
1878 	if (USERMODE(rp->r_tstate))
1879 		instr = fetch_user_instr((caddr_t)rp->r_pc);
1880 	else
1881 		instr = *(uint32_t *)rp->r_pc;
1882 
1883 	if (IS_FLUSH(instr))
1884 		return (S_OTHER);
1885 
1886 	if (IS_STORE(instr))
1887 		return (S_WRITE);
1888 	else
1889 		return (S_READ);
1890 }
1891 
1892 /*
1893  * Handle an asynchronous hardware error.
1894  * The policy is currently to send a hardware error contract event to
1895  * the process's process contract and to kill the process.  Eventually
1896  * we may want to instead send a special signal whose default
1897  * disposition is to generate the contract event.
1898  */
1899 void
1900 trap_async_hwerr(void)
1901 {
1902 	k_siginfo_t si;
1903 	proc_t *p = ttoproc(curthread);
1904 	extern void print_msg_hwerr(ctid_t ct_id, proc_t *p);
1905 
1906 	errorq_drain(ue_queue); /* flush pending async error messages */
1907 
1908 	print_msg_hwerr(p->p_ct_process->conp_contract.ct_id, p);
1909 
1910 	contract_process_hwerr(p->p_ct_process, p);
1911 
1912 	bzero(&si, sizeof (k_siginfo_t));
1913 	si.si_signo = SIGKILL;
1914 	si.si_code = SI_NOINFO;
1915 	trapsig(&si, 1);
1916 }
1917 
1918 /*
1919  * Handle bus error and bus timeout for a user process by sending SIGBUS
1920  * The type is either ASYNC_BERR or ASYNC_BTO.
1921  */
1922 void
1923 trap_async_berr_bto(int type, struct regs *rp)
1924 {
1925 	k_siginfo_t si;
1926 
1927 	errorq_drain(ue_queue); /* flush pending async error messages */
1928 	bzero(&si, sizeof (k_siginfo_t));
1929 
1930 	si.si_signo = SIGBUS;
1931 	si.si_code = (type == ASYNC_BERR ? BUS_OBJERR : BUS_ADRERR);
1932 	si.si_addr = (caddr_t)rp->r_pc; /* AFAR unavailable - future RFE */
1933 	si.si_errno = ENXIO;
1934 
1935 	trapsig(&si, 1);
1936 }
1937