xref: /titanic_50/usr/src/uts/sun4/os/trap.c (revision de33058aebd1018422519c8262583a3e5871d75c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #include <sys/mmu.h>
29 #include <sys/systm.h>
30 #include <sys/trap.h>
31 #include <sys/machtrap.h>
32 #include <sys/vtrace.h>
33 #include <sys/prsystm.h>
34 #include <sys/archsystm.h>
35 #include <sys/machsystm.h>
36 #include <sys/fpu/fpusystm.h>
37 #include <sys/tnf.h>
38 #include <sys/tnf_probe.h>
39 #include <sys/simulate.h>
40 #include <sys/ftrace.h>
41 #include <sys/ontrap.h>
42 #include <sys/kcpc.h>
43 #include <sys/kobj.h>
44 #include <sys/procfs.h>
45 #include <sys/sun4asi.h>
46 #include <sys/sdt.h>
47 #include <sys/fpras.h>
48 #include <sys/contract/process_impl.h>
49 
50 #ifdef  TRAPTRACE
51 #include <sys/traptrace.h>
52 #endif
53 
54 int tudebug = 0;
55 static int tudebugbpt = 0;
56 static int tudebugfpe = 0;
57 
58 static int alignfaults = 0;
59 
60 #if defined(TRAPDEBUG) || defined(lint)
61 static int lodebug = 0;
62 #else
63 #define	lodebug	0
64 #endif /* defined(TRAPDEBUG) || defined(lint) */
65 
66 
67 int vis1_partial_support(struct regs *rp, k_siginfo_t *siginfo, uint_t *fault);
68 #pragma weak vis1_partial_support
69 
70 void showregs(unsigned, struct regs *, caddr_t, uint_t);
71 #pragma weak showregs
72 
73 void trap_async_hwerr(void);
74 #pragma weak trap_async_hwerr
75 
76 void trap_async_berr_bto(int, struct regs *);
77 #pragma weak trap_async_berr_bto
78 
79 static enum seg_rw get_accesstype(struct regs *);
80 static int nfload(struct regs *, int *);
81 static int swap_nc(struct regs *, int);
82 static int ldstub_nc(struct regs *, int);
83 void	trap_cleanup(struct regs *, uint_t, k_siginfo_t *, int);
84 void	trap_rtt(void);
85 
86 static int
87 die(unsigned type, struct regs *rp, caddr_t addr, uint_t mmu_fsr)
88 {
89 	struct panic_trap_info ti;
90 
91 #ifdef TRAPTRACE
92 	TRAPTRACE_FREEZE;
93 #endif
94 
95 	ti.trap_regs = rp;
96 	ti.trap_type = type;
97 	ti.trap_addr = addr;
98 	ti.trap_mmu_fsr = mmu_fsr;
99 
100 	curthread->t_panic_trap = &ti;
101 
102 	if (type == T_DATA_MMU_MISS && addr < (caddr_t)KERNELBASE) {
103 		panic("BAD TRAP: type=%x rp=%p addr=%p mmu_fsr=%x "
104 		    "occurred in module \"%s\" due to %s",
105 		    type, (void *)rp, (void *)addr, mmu_fsr,
106 		    mod_containing_pc((caddr_t)rp->r_pc),
107 		    addr < (caddr_t)PAGESIZE ?
108 		    "a NULL pointer dereference" :
109 		    "an illegal access to a user address");
110 	} else {
111 		panic("BAD TRAP: type=%x rp=%p addr=%p mmu_fsr=%x",
112 		    type, (void *)rp, (void *)addr, mmu_fsr);
113 	}
114 
115 	return (0);	/* avoid optimization of restore in call's delay slot */
116 }
117 
118 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */
119 int	ill_calls;
120 #endif
121 
122 /*
123  * Currently, the only PREFETCH/PREFETCHA instructions which cause traps
124  * are the "strong" prefetches (fcn=20-23).  But we check for all flavors of
125  * PREFETCH, in case some future variant also causes a DATA_MMU_MISS.
126  */
127 #define	IS_PREFETCH(i)	(((i) & 0xc1780000) == 0xc1680000)
128 
129 #define	IS_FLUSH(i)	(((i) & 0xc1f80000) == 0x81d80000)
130 #define	IS_SWAP(i)	(((i) & 0xc1f80000) == 0xc0780000)
131 #define	IS_LDSTUB(i)	(((i) & 0xc1f80000) == 0xc0680000)
132 #define	IS_FLOAT(i)	(((i) & 0x1000000) != 0)
133 #define	IS_STORE(i)	(((i) >> 21) & 1)
134 
135 /*
136  * Called from the trap handler when a processor trap occurs.
137  */
138 /*VARARGS2*/
139 void
140 trap(struct regs *rp, caddr_t addr, uint32_t type, uint32_t mmu_fsr)
141 {
142 	proc_t *p = ttoproc(curthread);
143 	klwp_id_t lwp = ttolwp(curthread);
144 	struct machpcb *mpcb = NULL;
145 	k_siginfo_t siginfo;
146 	uint_t op3, fault = 0;
147 	int stepped = 0;
148 	greg_t oldpc;
149 	int mstate;
150 	char *badaddr;
151 	faultcode_t res;
152 	enum fault_type fault_type;
153 	enum seg_rw rw;
154 	uintptr_t lofault;
155 	int instr;
156 	int iskernel;
157 	int watchcode;
158 	int watchpage;
159 	extern faultcode_t pagefault(caddr_t, enum fault_type,
160 	    enum seg_rw, int);
161 #ifdef sun4v
162 	extern boolean_t tick_stick_emulation_active;
163 #endif	/* sun4v */
164 
165 	CPU_STATS_ADDQ(CPU, sys, trap, 1);
166 
167 #ifdef SF_ERRATA_23 /* call causes illegal-insn */
168 	ASSERT((curthread->t_schedflag & TS_DONT_SWAP) ||
169 	    (type == T_UNIMP_INSTR));
170 #else
171 	ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
172 #endif /* SF_ERRATA_23 */
173 
174 	if (USERMODE(rp->r_tstate) || (type & T_USER)) {
175 		/*
176 		 * Set lwp_state before trying to acquire any
177 		 * adaptive lock
178 		 */
179 		ASSERT(lwp != NULL);
180 		lwp->lwp_state = LWP_SYS;
181 		/*
182 		 * Set up the current cred to use during this trap. u_cred
183 		 * no longer exists.  t_cred is used instead.
184 		 * The current process credential applies to the thread for
185 		 * the entire trap.  If trapping from the kernel, this
186 		 * should already be set up.
187 		 */
188 		if (curthread->t_cred != p->p_cred) {
189 			cred_t *oldcred = curthread->t_cred;
190 			/*
191 			 * DTrace accesses t_cred in probe context.  t_cred
192 			 * must always be either NULL, or point to a valid,
193 			 * allocated cred structure.
194 			 */
195 			curthread->t_cred = crgetcred();
196 			crfree(oldcred);
197 		}
198 		type |= T_USER;
199 		ASSERT((type == (T_SYS_RTT_PAGE | T_USER)) ||
200 		    (type == (T_SYS_RTT_ALIGN | T_USER)) ||
201 		    lwp->lwp_regs == rp);
202 		mpcb = lwptompcb(lwp);
203 		switch (type) {
204 		case T_WIN_OVERFLOW + T_USER:
205 		case T_WIN_UNDERFLOW + T_USER:
206 		case T_SYS_RTT_PAGE + T_USER:
207 		case T_DATA_MMU_MISS + T_USER:
208 			mstate = LMS_DFAULT;
209 			break;
210 		case T_INSTR_MMU_MISS + T_USER:
211 			mstate = LMS_TFAULT;
212 			break;
213 		default:
214 			mstate = LMS_TRAP;
215 			break;
216 		}
217 		/* Kernel probe */
218 		TNF_PROBE_1(thread_state, "thread", /* CSTYLED */,
219 		    tnf_microstate, state, (char)mstate);
220 		mstate = new_mstate(curthread, mstate);
221 		siginfo.si_signo = 0;
222 		stepped =
223 		    lwp->lwp_pcb.pcb_step != STEP_NONE &&
224 		    ((oldpc = rp->r_pc), prundostep()) &&
225 		    mmu_btop((uintptr_t)addr) == mmu_btop((uintptr_t)oldpc);
226 		/* this assignment must not precede call to prundostep() */
227 		oldpc = rp->r_pc;
228 	}
229 
230 	TRACE_1(TR_FAC_TRAP, TR_C_TRAP_HANDLER_ENTER,
231 	    "C_trap_handler_enter:type %x", type);
232 
233 #ifdef	F_DEFERRED
234 	/*
235 	 * Take any pending floating point exceptions now.
236 	 * If the floating point unit has an exception to handle,
237 	 * just return to user-level to let the signal handler run.
238 	 * The instruction that got us to trap() will be reexecuted on
239 	 * return from the signal handler and we will trap to here again.
240 	 * This is necessary to disambiguate simultaneous traps which
241 	 * happen when a floating-point exception is pending and a
242 	 * machine fault is incurred.
243 	 */
244 	if (type & USER) {
245 		/*
246 		 * FP_TRAPPED is set only by sendsig() when it copies
247 		 * out the floating-point queue for the signal handler.
248 		 * It is set there so we can test it here and in syscall().
249 		 */
250 		mpcb->mpcb_flags &= ~FP_TRAPPED;
251 		syncfpu();
252 		if (mpcb->mpcb_flags & FP_TRAPPED) {
253 			/*
254 			 * trap() has have been called recursively and may
255 			 * have stopped the process, so do single step
256 			 * support for /proc.
257 			 */
258 			mpcb->mpcb_flags &= ~FP_TRAPPED;
259 			goto out;
260 		}
261 	}
262 #endif
263 	switch (type) {
264 		case T_DATA_MMU_MISS:
265 		case T_INSTR_MMU_MISS + T_USER:
266 		case T_DATA_MMU_MISS + T_USER:
267 		case T_DATA_PROT + T_USER:
268 		case T_AST + T_USER:
269 		case T_SYS_RTT_PAGE + T_USER:
270 		case T_FLUSH_PCB + T_USER:
271 		case T_FLUSHW + T_USER:
272 			break;
273 
274 		default:
275 			FTRACE_3("trap(): type=0x%lx, regs=0x%lx, addr=0x%lx",
276 			    (ulong_t)type, (ulong_t)rp, (ulong_t)addr);
277 			break;
278 	}
279 
280 	switch (type) {
281 
282 	default:
283 		/*
284 		 * Check for user software trap.
285 		 */
286 		if (type & T_USER) {
287 			if (tudebug)
288 				showregs(type, rp, (caddr_t)0, 0);
289 			if ((type & ~T_USER) >= T_SOFTWARE_TRAP) {
290 				bzero(&siginfo, sizeof (siginfo));
291 				siginfo.si_signo = SIGILL;
292 				siginfo.si_code  = ILL_ILLTRP;
293 				siginfo.si_addr  = (caddr_t)rp->r_pc;
294 				siginfo.si_trapno = type &~ T_USER;
295 				fault = FLTILL;
296 				break;
297 			}
298 		}
299 		addr = (caddr_t)rp->r_pc;
300 		(void) die(type, rp, addr, 0);
301 		/*NOTREACHED*/
302 
303 	case T_ALIGNMENT:	/* supv alignment error */
304 		if (nfload(rp, NULL))
305 			goto cleanup;
306 
307 		if (curthread->t_lofault) {
308 			if (lodebug) {
309 				showregs(type, rp, addr, 0);
310 				traceback((caddr_t)rp->r_sp);
311 			}
312 			rp->r_g1 = EFAULT;
313 			rp->r_pc = curthread->t_lofault;
314 			rp->r_npc = rp->r_pc + 4;
315 			goto cleanup;
316 		}
317 		(void) die(type, rp, addr, 0);
318 		/*NOTREACHED*/
319 
320 	case T_INSTR_EXCEPTION:		/* sys instruction access exception */
321 		addr = (caddr_t)rp->r_pc;
322 		(void) die(type, rp, addr, mmu_fsr);
323 		/*NOTREACHED*/
324 
325 	case T_INSTR_MMU_MISS:		/* sys instruction mmu miss */
326 		addr = (caddr_t)rp->r_pc;
327 		(void) die(type, rp, addr, 0);
328 		/*NOTREACHED*/
329 
330 	case T_DATA_EXCEPTION:		/* system data access exception */
331 		switch (X_FAULT_TYPE(mmu_fsr)) {
332 		case FT_RANGE:
333 			/*
334 			 * This happens when we attempt to dereference an
335 			 * address in the address hole.  If t_ontrap is set,
336 			 * then break and fall through to T_DATA_MMU_MISS /
337 			 * T_DATA_PROT case below.  If lofault is set, then
338 			 * honour it (perhaps the user gave us a bogus
339 			 * address in the hole to copyin from or copyout to?)
340 			 */
341 
342 			if (curthread->t_ontrap != NULL)
343 				break;
344 
345 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
346 			if (curthread->t_lofault) {
347 				if (lodebug) {
348 					showregs(type, rp, addr, 0);
349 					traceback((caddr_t)rp->r_sp);
350 				}
351 				rp->r_g1 = EFAULT;
352 				rp->r_pc = curthread->t_lofault;
353 				rp->r_npc = rp->r_pc + 4;
354 				goto cleanup;
355 			}
356 			(void) die(type, rp, addr, mmu_fsr);
357 			/*NOTREACHED*/
358 
359 		case FT_PRIV:
360 			/*
361 			 * This can happen if we access ASI_USER from a kernel
362 			 * thread.  To support pxfs, we need to honor lofault if
363 			 * we're doing a copyin/copyout from a kernel thread.
364 			 */
365 
366 			if (nfload(rp, NULL))
367 				goto cleanup;
368 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
369 			if (curthread->t_lofault) {
370 				if (lodebug) {
371 					showregs(type, rp, addr, 0);
372 					traceback((caddr_t)rp->r_sp);
373 				}
374 				rp->r_g1 = EFAULT;
375 				rp->r_pc = curthread->t_lofault;
376 				rp->r_npc = rp->r_pc + 4;
377 				goto cleanup;
378 			}
379 			(void) die(type, rp, addr, mmu_fsr);
380 			/*NOTREACHED*/
381 
382 		default:
383 			if (nfload(rp, NULL))
384 				goto cleanup;
385 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
386 			(void) die(type, rp, addr, mmu_fsr);
387 			/*NOTREACHED*/
388 
389 		case FT_NFO:
390 			break;
391 		}
392 		/* fall into ... */
393 
394 	case T_DATA_MMU_MISS:		/* system data mmu miss */
395 	case T_DATA_PROT:		/* system data protection fault */
396 		if (nfload(rp, &instr))
397 			goto cleanup;
398 
399 		/*
400 		 * If we're under on_trap() protection (see <sys/ontrap.h>),
401 		 * set ot_trap and return from the trap to the trampoline.
402 		 */
403 		if (curthread->t_ontrap != NULL) {
404 			on_trap_data_t *otp = curthread->t_ontrap;
405 
406 			TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT,
407 			    "C_trap_handler_exit");
408 			TRACE_0(TR_FAC_TRAP, TR_TRAP_END, "trap_end");
409 
410 			if (otp->ot_prot & OT_DATA_ACCESS) {
411 				otp->ot_trap |= OT_DATA_ACCESS;
412 				rp->r_pc = otp->ot_trampoline;
413 				rp->r_npc = rp->r_pc + 4;
414 				goto cleanup;
415 			}
416 		}
417 		lofault = curthread->t_lofault;
418 		curthread->t_lofault = 0;
419 
420 		mstate = new_mstate(curthread, LMS_KFAULT);
421 
422 		switch (type) {
423 		case T_DATA_PROT:
424 			fault_type = F_PROT;
425 			rw = S_WRITE;
426 			break;
427 		case T_INSTR_MMU_MISS:
428 			fault_type = F_INVAL;
429 			rw = S_EXEC;
430 			break;
431 		case T_DATA_MMU_MISS:
432 		case T_DATA_EXCEPTION:
433 			/*
434 			 * The hardware doesn't update the sfsr on mmu
435 			 * misses so it is not easy to find out whether
436 			 * the access was a read or a write so we need
437 			 * to decode the actual instruction.
438 			 */
439 			fault_type = F_INVAL;
440 			rw = get_accesstype(rp);
441 			break;
442 		default:
443 			cmn_err(CE_PANIC, "trap: unknown type %x", type);
444 			break;
445 		}
446 		/*
447 		 * We determine if access was done to kernel or user
448 		 * address space.  The addr passed into trap is really the
449 		 * tag access register.
450 		 */
451 		iskernel = (((uintptr_t)addr & TAGACC_CTX_MASK) == KCONTEXT);
452 		addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
453 
454 		res = pagefault(addr, fault_type, rw, iskernel);
455 		if (!iskernel && res == FC_NOMAP &&
456 		    addr < p->p_usrstack && grow(addr))
457 			res = 0;
458 
459 		(void) new_mstate(curthread, mstate);
460 
461 		/*
462 		 * Restore lofault.  If we resolved the fault, exit.
463 		 * If we didn't and lofault wasn't set, die.
464 		 */
465 		curthread->t_lofault = lofault;
466 
467 		if (res == 0)
468 			goto cleanup;
469 
470 		if (IS_PREFETCH(instr)) {
471 			/* skip prefetch instructions in kernel-land */
472 			rp->r_pc = rp->r_npc;
473 			rp->r_npc += 4;
474 			goto cleanup;
475 		}
476 
477 		if ((lofault == 0 || lodebug) &&
478 		    (calc_memaddr(rp, &badaddr) == SIMU_SUCCESS))
479 			addr = badaddr;
480 		if (lofault == 0)
481 			(void) die(type, rp, addr, 0);
482 		/*
483 		 * Cannot resolve fault.  Return to lofault.
484 		 */
485 		if (lodebug) {
486 			showregs(type, rp, addr, 0);
487 			traceback((caddr_t)rp->r_sp);
488 		}
489 		if (FC_CODE(res) == FC_OBJERR)
490 			res = FC_ERRNO(res);
491 		else
492 			res = EFAULT;
493 		rp->r_g1 = res;
494 		rp->r_pc = curthread->t_lofault;
495 		rp->r_npc = curthread->t_lofault + 4;
496 		goto cleanup;
497 
498 	case T_INSTR_EXCEPTION + T_USER: /* user insn access exception */
499 		bzero(&siginfo, sizeof (siginfo));
500 		siginfo.si_addr = (caddr_t)rp->r_pc;
501 		siginfo.si_signo = SIGSEGV;
502 		siginfo.si_code = X_FAULT_TYPE(mmu_fsr) == FT_PRIV ?
503 		    SEGV_ACCERR : SEGV_MAPERR;
504 		fault = FLTBOUNDS;
505 		break;
506 
507 	case T_WIN_OVERFLOW + T_USER:	/* window overflow in ??? */
508 	case T_WIN_UNDERFLOW + T_USER:	/* window underflow in ??? */
509 	case T_SYS_RTT_PAGE + T_USER:	/* window underflow in user_rtt */
510 	case T_INSTR_MMU_MISS + T_USER:	/* user instruction mmu miss */
511 	case T_DATA_MMU_MISS + T_USER:	/* user data mmu miss */
512 	case T_DATA_PROT + T_USER:	/* user data protection fault */
513 		switch (type) {
514 		case T_INSTR_MMU_MISS + T_USER:
515 			addr = (caddr_t)rp->r_pc;
516 			fault_type = F_INVAL;
517 			rw = S_EXEC;
518 			break;
519 
520 		case T_DATA_MMU_MISS + T_USER:
521 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
522 			fault_type = F_INVAL;
523 			/*
524 			 * The hardware doesn't update the sfsr on mmu misses
525 			 * so it is not easy to find out whether the access
526 			 * was a read or a write so we need to decode the
527 			 * actual instruction.  XXX BUGLY HW
528 			 */
529 			rw = get_accesstype(rp);
530 			break;
531 
532 		case T_DATA_PROT + T_USER:
533 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
534 			fault_type = F_PROT;
535 			rw = S_WRITE;
536 			break;
537 
538 		case T_WIN_OVERFLOW + T_USER:
539 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
540 			fault_type = F_INVAL;
541 			rw = S_WRITE;
542 			break;
543 
544 		case T_WIN_UNDERFLOW + T_USER:
545 		case T_SYS_RTT_PAGE + T_USER:
546 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
547 			fault_type = F_INVAL;
548 			rw = S_READ;
549 			break;
550 
551 		default:
552 			cmn_err(CE_PANIC, "trap: unknown type %x", type);
553 			break;
554 		}
555 
556 		/*
557 		 * If we are single stepping do not call pagefault
558 		 */
559 		if (stepped) {
560 			res = FC_NOMAP;
561 		} else {
562 			caddr_t vaddr = addr;
563 			size_t sz;
564 			int ta;
565 
566 			ASSERT(!(curthread->t_flag & T_WATCHPT));
567 			watchpage = (pr_watch_active(p) &&
568 			    type != T_WIN_OVERFLOW + T_USER &&
569 			    type != T_WIN_UNDERFLOW + T_USER &&
570 			    type != T_SYS_RTT_PAGE + T_USER &&
571 			    pr_is_watchpage(addr, rw));
572 
573 			if (!watchpage ||
574 			    (sz = instr_size(rp, &vaddr, rw)) <= 0)
575 				/* EMPTY */;
576 			else if ((watchcode = pr_is_watchpoint(&vaddr, &ta,
577 			    sz, NULL, rw)) != 0) {
578 				if (ta) {
579 					do_watch_step(vaddr, sz, rw,
580 					    watchcode, rp->r_pc);
581 					fault_type = F_INVAL;
582 				} else {
583 					bzero(&siginfo,	sizeof (siginfo));
584 					siginfo.si_signo = SIGTRAP;
585 					siginfo.si_code = watchcode;
586 					siginfo.si_addr = vaddr;
587 					siginfo.si_trapafter = 0;
588 					siginfo.si_pc = (caddr_t)rp->r_pc;
589 					fault = FLTWATCH;
590 					break;
591 				}
592 			} else {
593 				if (rw != S_EXEC &&
594 				    pr_watch_emul(rp, vaddr, rw))
595 					goto out;
596 				do_watch_step(vaddr, sz, rw, 0, 0);
597 				fault_type = F_INVAL;
598 			}
599 
600 			if (pr_watch_active(p) &&
601 			    (type == T_WIN_OVERFLOW + T_USER ||
602 			    type == T_WIN_UNDERFLOW + T_USER ||
603 			    type == T_SYS_RTT_PAGE + T_USER)) {
604 				int dotwo = (type == T_WIN_UNDERFLOW + T_USER);
605 				if (copy_return_window(dotwo))
606 					goto out;
607 				fault_type = F_INVAL;
608 			}
609 
610 			res = pagefault(addr, fault_type, rw, 0);
611 
612 			/*
613 			 * If pagefault succeed, ok.
614 			 * Otherwise grow the stack automatically.
615 			 */
616 			if (res == 0 ||
617 			    (res == FC_NOMAP &&
618 			    type != T_INSTR_MMU_MISS + T_USER &&
619 			    addr < p->p_usrstack &&
620 			    grow(addr))) {
621 				int ismem = prismember(&p->p_fltmask, FLTPAGE);
622 
623 				/*
624 				 * instr_size() is used to get the exact
625 				 * address of the fault, instead of the
626 				 * page of the fault. Unfortunately it is
627 				 * very slow, and this is an important
628 				 * code path. Don't call it unless
629 				 * correctness is needed. ie. if FLTPAGE
630 				 * is set, or we're profiling.
631 				 */
632 
633 				if (curthread->t_rprof != NULL || ismem)
634 					(void) instr_size(rp, &addr, rw);
635 
636 				lwp->lwp_lastfault = FLTPAGE;
637 				lwp->lwp_lastfaddr = addr;
638 
639 				if (ismem) {
640 					bzero(&siginfo, sizeof (siginfo));
641 					siginfo.si_addr = addr;
642 					(void) stop_on_fault(FLTPAGE, &siginfo);
643 				}
644 				goto out;
645 			}
646 
647 			if (type != (T_INSTR_MMU_MISS + T_USER)) {
648 				/*
649 				 * check for non-faulting loads, also
650 				 * fetch the instruction to check for
651 				 * flush
652 				 */
653 				if (nfload(rp, &instr))
654 					goto out;
655 
656 				/* skip userland prefetch instructions */
657 				if (IS_PREFETCH(instr)) {
658 					rp->r_pc = rp->r_npc;
659 					rp->r_npc += 4;
660 					goto out;
661 					/*NOTREACHED*/
662 				}
663 
664 				/*
665 				 * check if the instruction was a
666 				 * flush.  ABI allows users to specify
667 				 * an illegal address on the flush
668 				 * instruction so we simply return in
669 				 * this case.
670 				 *
671 				 * NB: the hardware should set a bit
672 				 * indicating this trap was caused by
673 				 * a flush instruction.  Instruction
674 				 * decoding is bugly!
675 				 */
676 				if (IS_FLUSH(instr)) {
677 					/* skip the flush instruction */
678 					rp->r_pc = rp->r_npc;
679 					rp->r_npc += 4;
680 					goto out;
681 					/*NOTREACHED*/
682 				}
683 			} else if (res == FC_PROT) {
684 				report_stack_exec(p, addr);
685 			}
686 
687 			if (tudebug)
688 				showregs(type, rp, addr, 0);
689 		}
690 
691 		/*
692 		 * In the case where both pagefault and grow fail,
693 		 * set the code to the value provided by pagefault.
694 		 */
695 		(void) instr_size(rp, &addr, rw);
696 		bzero(&siginfo, sizeof (siginfo));
697 		siginfo.si_addr = addr;
698 		if (FC_CODE(res) == FC_OBJERR) {
699 			siginfo.si_errno = FC_ERRNO(res);
700 			if (siginfo.si_errno != EINTR) {
701 				siginfo.si_signo = SIGBUS;
702 				siginfo.si_code = BUS_OBJERR;
703 				fault = FLTACCESS;
704 			}
705 		} else { /* FC_NOMAP || FC_PROT */
706 			siginfo.si_signo = SIGSEGV;
707 			siginfo.si_code = (res == FC_NOMAP) ?
708 			    SEGV_MAPERR : SEGV_ACCERR;
709 			fault = FLTBOUNDS;
710 		}
711 		/*
712 		 * If this is the culmination of a single-step,
713 		 * reset the addr, code, signal and fault to
714 		 * indicate a hardware trace trap.
715 		 */
716 		if (stepped) {
717 			pcb_t *pcb = &lwp->lwp_pcb;
718 
719 			siginfo.si_signo = 0;
720 			fault = 0;
721 			if (pcb->pcb_step == STEP_WASACTIVE) {
722 				pcb->pcb_step = STEP_NONE;
723 				pcb->pcb_tracepc = NULL;
724 				oldpc = rp->r_pc - 4;
725 			}
726 			/*
727 			 * If both NORMAL_STEP and WATCH_STEP are in
728 			 * effect, give precedence to WATCH_STEP.
729 			 * One or the other must be set at this point.
730 			 */
731 			ASSERT(pcb->pcb_flags & (NORMAL_STEP|WATCH_STEP));
732 			if ((fault = undo_watch_step(&siginfo)) == 0 &&
733 			    (pcb->pcb_flags & NORMAL_STEP)) {
734 				siginfo.si_signo = SIGTRAP;
735 				siginfo.si_code = TRAP_TRACE;
736 				siginfo.si_addr = (caddr_t)rp->r_pc;
737 				fault = FLTTRACE;
738 			}
739 			pcb->pcb_flags &= ~(NORMAL_STEP|WATCH_STEP);
740 		}
741 		break;
742 
743 	case T_DATA_EXCEPTION + T_USER:	/* user data access exception */
744 
745 		if (&vis1_partial_support != NULL) {
746 			bzero(&siginfo, sizeof (siginfo));
747 			if (vis1_partial_support(rp,
748 			    &siginfo, &fault) == 0)
749 				goto out;
750 		}
751 
752 		if (nfload(rp, &instr))
753 			goto out;
754 		if (IS_FLUSH(instr)) {
755 			/* skip the flush instruction */
756 			rp->r_pc = rp->r_npc;
757 			rp->r_npc += 4;
758 			goto out;
759 			/*NOTREACHED*/
760 		}
761 		bzero(&siginfo, sizeof (siginfo));
762 		siginfo.si_addr = addr;
763 		switch (X_FAULT_TYPE(mmu_fsr)) {
764 		case FT_ATOMIC_NC:
765 			if ((IS_SWAP(instr) && swap_nc(rp, instr)) ||
766 			    (IS_LDSTUB(instr) && ldstub_nc(rp, instr))) {
767 				/* skip the atomic */
768 				rp->r_pc = rp->r_npc;
769 				rp->r_npc += 4;
770 				goto out;
771 			}
772 			/* fall into ... */
773 		case FT_PRIV:
774 			siginfo.si_signo = SIGSEGV;
775 			siginfo.si_code = SEGV_ACCERR;
776 			fault = FLTBOUNDS;
777 			break;
778 		case FT_SPEC_LD:
779 		case FT_ILL_ALT:
780 			siginfo.si_signo = SIGILL;
781 			siginfo.si_code = ILL_ILLADR;
782 			fault = FLTILL;
783 			break;
784 		default:
785 			siginfo.si_signo = SIGSEGV;
786 			siginfo.si_code = SEGV_MAPERR;
787 			fault = FLTBOUNDS;
788 			break;
789 		}
790 		break;
791 
792 	case T_SYS_RTT_ALIGN + T_USER:	/* user alignment error */
793 	case T_ALIGNMENT + T_USER:	/* user alignment error */
794 		if (tudebug)
795 			showregs(type, rp, addr, 0);
796 		/*
797 		 * If the user has to do unaligned references
798 		 * the ugly stuff gets done here.
799 		 */
800 		alignfaults++;
801 		if (&vis1_partial_support != NULL) {
802 			bzero(&siginfo, sizeof (siginfo));
803 			if (vis1_partial_support(rp,
804 			    &siginfo, &fault) == 0)
805 				goto out;
806 		}
807 
808 		bzero(&siginfo, sizeof (siginfo));
809 		if (type == T_SYS_RTT_ALIGN + T_USER) {
810 			if (nfload(rp, NULL))
811 				goto out;
812 			/*
813 			 * Can't do unaligned stack access
814 			 */
815 			siginfo.si_signo = SIGBUS;
816 			siginfo.si_code = BUS_ADRALN;
817 			siginfo.si_addr = addr;
818 			fault = FLTACCESS;
819 			break;
820 		}
821 
822 		/*
823 		 * Try to fix alignment before non-faulting load test.
824 		 */
825 		if (p->p_fixalignment) {
826 			if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) {
827 				rp->r_pc = rp->r_npc;
828 				rp->r_npc += 4;
829 				goto out;
830 			}
831 			if (nfload(rp, NULL))
832 				goto out;
833 			siginfo.si_signo = SIGSEGV;
834 			siginfo.si_code = SEGV_MAPERR;
835 			siginfo.si_addr = badaddr;
836 			fault = FLTBOUNDS;
837 		} else {
838 			if (nfload(rp, NULL))
839 				goto out;
840 			siginfo.si_signo = SIGBUS;
841 			siginfo.si_code = BUS_ADRALN;
842 			if (rp->r_pc & 3) {	/* offending address, if pc */
843 				siginfo.si_addr = (caddr_t)rp->r_pc;
844 			} else {
845 				if (calc_memaddr(rp, &badaddr) == SIMU_UNALIGN)
846 					siginfo.si_addr = badaddr;
847 				else
848 					siginfo.si_addr = (caddr_t)rp->r_pc;
849 			}
850 			fault = FLTACCESS;
851 		}
852 		break;
853 
854 	case T_PRIV_INSTR + T_USER:	/* privileged instruction fault */
855 		if (tudebug)
856 			showregs(type, rp, (caddr_t)0, 0);
857 
858 		bzero(&siginfo, sizeof (siginfo));
859 #ifdef	sun4v
860 		/*
861 		 * If this instruction fault is a non-privileged %tick
862 		 * or %stick trap, and %tick/%stick user emulation is
863 		 * enabled as a result of an OS suspend, then simulate
864 		 * the register read. We rely on simulate_rdtick to fail
865 		 * if the instruction is not a %tick or %stick read,
866 		 * causing us to fall through to the normal privileged
867 		 * instruction handling.
868 		 */
869 		if (tick_stick_emulation_active &&
870 		    (X_FAULT_TYPE(mmu_fsr) == FT_NEW_PRVACT) &&
871 		    simulate_rdtick(rp) == SIMU_SUCCESS) {
872 			/* skip the successfully simulated instruction */
873 			rp->r_pc = rp->r_npc;
874 			rp->r_npc += 4;
875 			goto out;
876 		}
877 #endif
878 		siginfo.si_signo = SIGILL;
879 		siginfo.si_code = ILL_PRVOPC;
880 		siginfo.si_addr = (caddr_t)rp->r_pc;
881 		fault = FLTILL;
882 		break;
883 
884 	case T_UNIMP_INSTR:		/* priv illegal instruction fault */
885 		if (fpras_implemented) {
886 			/*
887 			 * Call fpras_chktrap indicating that
888 			 * we've come from a trap handler and pass
889 			 * the regs.  That function may choose to panic
890 			 * (in which case it won't return) or it may
891 			 * determine that a reboot is desired.  In the
892 			 * latter case it must alter pc/npc to skip
893 			 * the illegal instruction and continue at
894 			 * a controlled address.
895 			 */
896 			if (&fpras_chktrap) {
897 				if (fpras_chktrap(rp))
898 					goto cleanup;
899 			}
900 		}
901 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */
902 		instr = *(int *)rp->r_pc;
903 		if ((instr & 0xc0000000) == 0x40000000) {
904 			long pc;
905 
906 			rp->r_o7 = (long long)rp->r_pc;
907 			pc = rp->r_pc + ((instr & 0x3fffffff) << 2);
908 			rp->r_pc = rp->r_npc;
909 			rp->r_npc = pc;
910 			ill_calls++;
911 			goto cleanup;
912 		}
913 #endif /* SF_ERRATA_23 || SF_ERRATA_30 */
914 		/*
915 		 * It's not an fpras failure and it's not SF_ERRATA_23 - die
916 		 */
917 		addr = (caddr_t)rp->r_pc;
918 		(void) die(type, rp, addr, 0);
919 		/*NOTREACHED*/
920 
921 	case T_UNIMP_INSTR + T_USER:	/* illegal instruction fault */
922 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */
923 		instr = fetch_user_instr((caddr_t)rp->r_pc);
924 		if ((instr & 0xc0000000) == 0x40000000) {
925 			long pc;
926 
927 			rp->r_o7 = (long long)rp->r_pc;
928 			pc = rp->r_pc + ((instr & 0x3fffffff) << 2);
929 			rp->r_pc = rp->r_npc;
930 			rp->r_npc = pc;
931 			ill_calls++;
932 			goto out;
933 		}
934 #endif /* SF_ERRATA_23 || SF_ERRATA_30 */
935 		if (tudebug)
936 			showregs(type, rp, (caddr_t)0, 0);
937 		bzero(&siginfo, sizeof (siginfo));
938 		/*
939 		 * Try to simulate the instruction.
940 		 */
941 		switch (simulate_unimp(rp, &badaddr)) {
942 		case SIMU_RETRY:
943 			goto out;	/* regs are already set up */
944 			/*NOTREACHED*/
945 
946 		case SIMU_SUCCESS:
947 			/* skip the successfully simulated instruction */
948 			rp->r_pc = rp->r_npc;
949 			rp->r_npc += 4;
950 			goto out;
951 			/*NOTREACHED*/
952 
953 		case SIMU_FAULT:
954 			siginfo.si_signo = SIGSEGV;
955 			siginfo.si_code = SEGV_MAPERR;
956 			siginfo.si_addr = badaddr;
957 			fault = FLTBOUNDS;
958 			break;
959 
960 		case SIMU_DZERO:
961 			siginfo.si_signo = SIGFPE;
962 			siginfo.si_code = FPE_INTDIV;
963 			siginfo.si_addr = (caddr_t)rp->r_pc;
964 			fault = FLTIZDIV;
965 			break;
966 
967 		case SIMU_UNALIGN:
968 			siginfo.si_signo = SIGBUS;
969 			siginfo.si_code = BUS_ADRALN;
970 			siginfo.si_addr = badaddr;
971 			fault = FLTACCESS;
972 			break;
973 
974 		case SIMU_ILLEGAL:
975 		default:
976 			siginfo.si_signo = SIGILL;
977 			op3 = (instr >> 19) & 0x3F;
978 			if ((IS_FLOAT(instr) && (op3 == IOP_V8_STQFA) ||
979 			    (op3 == IOP_V8_STDFA)))
980 				siginfo.si_code = ILL_ILLADR;
981 			else
982 				siginfo.si_code = ILL_ILLOPC;
983 			siginfo.si_addr = (caddr_t)rp->r_pc;
984 			fault = FLTILL;
985 			break;
986 		}
987 		break;
988 
989 	case T_UNIMP_LDD + T_USER:
990 	case T_UNIMP_STD + T_USER:
991 		if (tudebug)
992 			showregs(type, rp, (caddr_t)0, 0);
993 		switch (simulate_lddstd(rp, &badaddr)) {
994 		case SIMU_SUCCESS:
995 			/* skip the successfully simulated instruction */
996 			rp->r_pc = rp->r_npc;
997 			rp->r_npc += 4;
998 			goto out;
999 			/*NOTREACHED*/
1000 
1001 		case SIMU_FAULT:
1002 			if (nfload(rp, NULL))
1003 				goto out;
1004 			siginfo.si_signo = SIGSEGV;
1005 			siginfo.si_code = SEGV_MAPERR;
1006 			siginfo.si_addr = badaddr;
1007 			fault = FLTBOUNDS;
1008 			break;
1009 
1010 		case SIMU_UNALIGN:
1011 			if (nfload(rp, NULL))
1012 				goto out;
1013 			siginfo.si_signo = SIGBUS;
1014 			siginfo.si_code = BUS_ADRALN;
1015 			siginfo.si_addr = badaddr;
1016 			fault = FLTACCESS;
1017 			break;
1018 
1019 		case SIMU_ILLEGAL:
1020 		default:
1021 			siginfo.si_signo = SIGILL;
1022 			siginfo.si_code = ILL_ILLOPC;
1023 			siginfo.si_addr = (caddr_t)rp->r_pc;
1024 			fault = FLTILL;
1025 			break;
1026 		}
1027 		break;
1028 
1029 	case T_UNIMP_LDD:
1030 	case T_UNIMP_STD:
1031 		if (simulate_lddstd(rp, &badaddr) == SIMU_SUCCESS) {
1032 			/* skip the successfully simulated instruction */
1033 			rp->r_pc = rp->r_npc;
1034 			rp->r_npc += 4;
1035 			goto cleanup;
1036 			/*NOTREACHED*/
1037 		}
1038 		/*
1039 		 * A third party driver executed an {LDD,STD,LDDA,STDA}
1040 		 * that we couldn't simulate.
1041 		 */
1042 		if (nfload(rp, NULL))
1043 			goto cleanup;
1044 
1045 		if (curthread->t_lofault) {
1046 			if (lodebug) {
1047 				showregs(type, rp, addr, 0);
1048 				traceback((caddr_t)rp->r_sp);
1049 			}
1050 			rp->r_g1 = EFAULT;
1051 			rp->r_pc = curthread->t_lofault;
1052 			rp->r_npc = rp->r_pc + 4;
1053 			goto cleanup;
1054 		}
1055 		(void) die(type, rp, addr, 0);
1056 		/*NOTREACHED*/
1057 
1058 	case T_IDIV0 + T_USER:		/* integer divide by zero */
1059 	case T_DIV0 + T_USER:		/* integer divide by zero */
1060 		if (tudebug && tudebugfpe)
1061 			showregs(type, rp, (caddr_t)0, 0);
1062 		bzero(&siginfo, sizeof (siginfo));
1063 		siginfo.si_signo = SIGFPE;
1064 		siginfo.si_code = FPE_INTDIV;
1065 		siginfo.si_addr = (caddr_t)rp->r_pc;
1066 		fault = FLTIZDIV;
1067 		break;
1068 
1069 	case T_INT_OVERFLOW + T_USER:	/* integer overflow */
1070 		if (tudebug && tudebugfpe)
1071 			showregs(type, rp, (caddr_t)0, 0);
1072 		bzero(&siginfo, sizeof (siginfo));
1073 		siginfo.si_signo = SIGFPE;
1074 		siginfo.si_code  = FPE_INTOVF;
1075 		siginfo.si_addr  = (caddr_t)rp->r_pc;
1076 		fault = FLTIOVF;
1077 		break;
1078 
1079 	case T_BREAKPOINT + T_USER:	/* breakpoint trap (t 1) */
1080 		if (tudebug && tudebugbpt)
1081 			showregs(type, rp, (caddr_t)0, 0);
1082 		bzero(&siginfo, sizeof (siginfo));
1083 		siginfo.si_signo = SIGTRAP;
1084 		siginfo.si_code = TRAP_BRKPT;
1085 		siginfo.si_addr = (caddr_t)rp->r_pc;
1086 		fault = FLTBPT;
1087 		break;
1088 
1089 	case T_TAG_OVERFLOW + T_USER:	/* tag overflow (taddcctv, tsubcctv) */
1090 		if (tudebug)
1091 			showregs(type, rp, (caddr_t)0, 0);
1092 		bzero(&siginfo, sizeof (siginfo));
1093 		siginfo.si_signo = SIGEMT;
1094 		siginfo.si_code = EMT_TAGOVF;
1095 		siginfo.si_addr = (caddr_t)rp->r_pc;
1096 		fault = FLTACCESS;
1097 		break;
1098 
1099 	case T_FLUSH_PCB + T_USER:	/* finish user window overflow */
1100 	case T_FLUSHW + T_USER:		/* finish user window flush */
1101 		/*
1102 		 * This trap is entered from sys_rtt in locore.s when,
1103 		 * upon return to user is is found that there are user
1104 		 * windows in pcb_wbuf.  This happens because they could
1105 		 * not be saved on the user stack, either because it
1106 		 * wasn't resident or because it was misaligned.
1107 		 */
1108 	{
1109 		int error;
1110 		caddr_t sp;
1111 
1112 		error = flush_user_windows_to_stack(&sp);
1113 		/*
1114 		 * Possible errors:
1115 		 *	error copying out
1116 		 *	unaligned stack pointer
1117 		 * The first is given to us as the return value
1118 		 * from flush_user_windows_to_stack().  The second
1119 		 * results in residual windows in the pcb.
1120 		 */
1121 		if (error != 0) {
1122 			/*
1123 			 * EINTR comes from a signal during copyout;
1124 			 * we should not post another signal.
1125 			 */
1126 			if (error != EINTR) {
1127 				/*
1128 				 * Zap the process with a SIGSEGV - process
1129 				 * may be managing its own stack growth by
1130 				 * taking SIGSEGVs on a different signal stack.
1131 				 */
1132 				bzero(&siginfo, sizeof (siginfo));
1133 				siginfo.si_signo = SIGSEGV;
1134 				siginfo.si_code  = SEGV_MAPERR;
1135 				siginfo.si_addr  = sp;
1136 				fault = FLTBOUNDS;
1137 			}
1138 			break;
1139 		} else if (mpcb->mpcb_wbcnt) {
1140 			bzero(&siginfo, sizeof (siginfo));
1141 			siginfo.si_signo = SIGILL;
1142 			siginfo.si_code  = ILL_BADSTK;
1143 			siginfo.si_addr  = (caddr_t)rp->r_pc;
1144 			fault = FLTILL;
1145 			break;
1146 		}
1147 	}
1148 
1149 		/*
1150 		 * T_FLUSHW is used when handling a ta 0x3 -- the old flush
1151 		 * window trap -- which is implemented by executing the
1152 		 * flushw instruction. The flushw can trap if any of the
1153 		 * stack pages are not writable for whatever reason. In this
1154 		 * case only, we advance the pc to the next instruction so
1155 		 * that the user thread doesn't needlessly execute the trap
1156 		 * again. Normally this wouldn't be a problem -- we'll
1157 		 * usually only end up here if this is the first touch to a
1158 		 * stack page -- since the second execution won't trap, but
1159 		 * if there's a watchpoint on the stack page the user thread
1160 		 * would spin, continuously executing the trap instruction.
1161 		 */
1162 		if (type == T_FLUSHW + T_USER) {
1163 			rp->r_pc = rp->r_npc;
1164 			rp->r_npc += 4;
1165 		}
1166 		goto out;
1167 
1168 	case T_AST + T_USER:		/* profiling or resched pseudo trap */
1169 		if (lwp->lwp_pcb.pcb_flags & CPC_OVERFLOW) {
1170 			lwp->lwp_pcb.pcb_flags &= ~CPC_OVERFLOW;
1171 			if (kcpc_overflow_ast()) {
1172 				/*
1173 				 * Signal performance counter overflow
1174 				 */
1175 				if (tudebug)
1176 					showregs(type, rp, (caddr_t)0, 0);
1177 				bzero(&siginfo, sizeof (siginfo));
1178 				siginfo.si_signo = SIGEMT;
1179 				siginfo.si_code = EMT_CPCOVF;
1180 				siginfo.si_addr = (caddr_t)rp->r_pc;
1181 				/* for trap_cleanup(), below */
1182 				oldpc = rp->r_pc - 4;
1183 				fault = FLTCPCOVF;
1184 			}
1185 		}
1186 
1187 		/*
1188 		 * The CPC_OVERFLOW check above may already have populated
1189 		 * siginfo and set fault, so the checks below must not
1190 		 * touch these and the functions they call must use
1191 		 * trapsig() directly.
1192 		 */
1193 
1194 		if (lwp->lwp_pcb.pcb_flags & ASYNC_HWERR) {
1195 			lwp->lwp_pcb.pcb_flags &= ~ASYNC_HWERR;
1196 			trap_async_hwerr();
1197 		}
1198 
1199 		if (lwp->lwp_pcb.pcb_flags & ASYNC_BERR) {
1200 			lwp->lwp_pcb.pcb_flags &= ~ASYNC_BERR;
1201 			trap_async_berr_bto(ASYNC_BERR, rp);
1202 		}
1203 
1204 		if (lwp->lwp_pcb.pcb_flags & ASYNC_BTO) {
1205 			lwp->lwp_pcb.pcb_flags &= ~ASYNC_BTO;
1206 			trap_async_berr_bto(ASYNC_BTO, rp);
1207 		}
1208 
1209 		break;
1210 	}
1211 
1212 	if (fault) {
1213 		/* We took a fault so abort single step. */
1214 		lwp->lwp_pcb.pcb_flags &= ~(NORMAL_STEP|WATCH_STEP);
1215 	}
1216 	trap_cleanup(rp, fault, &siginfo, oldpc == rp->r_pc);
1217 
1218 out:	/* We can't get here from a system trap */
1219 	ASSERT(type & T_USER);
1220 	trap_rtt();
1221 	(void) new_mstate(curthread, mstate);
1222 	/* Kernel probe */
1223 	TNF_PROBE_1(thread_state, "thread", /* CSTYLED */,
1224 		tnf_microstate, state, LMS_USER);
1225 
1226 	TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit");
1227 	return;
1228 
1229 cleanup:	/* system traps end up here */
1230 	ASSERT(!(type & T_USER));
1231 
1232 	TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit");
1233 }
1234 
1235 void
1236 trap_cleanup(
1237 	struct regs *rp,
1238 	uint_t fault,
1239 	k_siginfo_t *sip,
1240 	int restartable)
1241 {
1242 	extern void aio_cleanup();
1243 	proc_t *p = ttoproc(curthread);
1244 	klwp_id_t lwp = ttolwp(curthread);
1245 
1246 	if (fault) {
1247 		/*
1248 		 * Remember the fault and fault address
1249 		 * for real-time (SIGPROF) profiling.
1250 		 */
1251 		lwp->lwp_lastfault = fault;
1252 		lwp->lwp_lastfaddr = sip->si_addr;
1253 
1254 		DTRACE_PROC2(fault, int, fault, ksiginfo_t *, sip);
1255 
1256 		/*
1257 		 * If a debugger has declared this fault to be an
1258 		 * event of interest, stop the lwp.  Otherwise just
1259 		 * deliver the associated signal.
1260 		 */
1261 		if (sip->si_signo != SIGKILL &&
1262 		    prismember(&p->p_fltmask, fault) &&
1263 		    stop_on_fault(fault, sip) == 0)
1264 			sip->si_signo = 0;
1265 	}
1266 
1267 	if (sip->si_signo)
1268 		trapsig(sip, restartable);
1269 
1270 	if (lwp->lwp_oweupc)
1271 		profil_tick(rp->r_pc);
1272 
1273 	if (curthread->t_astflag | curthread->t_sig_check) {
1274 		/*
1275 		 * Turn off the AST flag before checking all the conditions that
1276 		 * may have caused an AST.  This flag is on whenever a signal or
1277 		 * unusual condition should be handled after the next trap or
1278 		 * syscall.
1279 		 */
1280 		astoff(curthread);
1281 		curthread->t_sig_check = 0;
1282 
1283 		/*
1284 		 * The following check is legal for the following reasons:
1285 		 *	1) The thread we are checking, is ourselves, so there is
1286 		 *	   no way the proc can go away.
1287 		 *	2) The only time we need to be protected by the
1288 		 *	   lock is if the binding is changed.
1289 		 *
1290 		 *	Note we will still take the lock and check the binding
1291 		 *	if the condition was true without the lock held.  This
1292 		 *	prevents lock contention among threads owned by the
1293 		 *	same proc.
1294 		 */
1295 
1296 		if (curthread->t_proc_flag & TP_CHANGEBIND) {
1297 			mutex_enter(&p->p_lock);
1298 			if (curthread->t_proc_flag & TP_CHANGEBIND) {
1299 				timer_lwpbind();
1300 				curthread->t_proc_flag &= ~TP_CHANGEBIND;
1301 			}
1302 			mutex_exit(&p->p_lock);
1303 		}
1304 
1305 		/*
1306 		 * for kaio requests that are on the per-process poll queue,
1307 		 * aiop->aio_pollq, they're AIO_POLL bit is set, the kernel
1308 		 * should copyout their result_t to user memory. by copying
1309 		 * out the result_t, the user can poll on memory waiting
1310 		 * for the kaio request to complete.
1311 		 */
1312 		if (p->p_aio)
1313 			aio_cleanup(0);
1314 
1315 		/*
1316 		 * If this LWP was asked to hold, call holdlwp(), which will
1317 		 * stop.  holdlwps() sets this up and calls pokelwps() which
1318 		 * sets the AST flag.
1319 		 *
1320 		 * Also check TP_EXITLWP, since this is used by fresh new LWPs
1321 		 * through lwp_rtt().  That flag is set if the lwp_create(2)
1322 		 * syscall failed after creating the LWP.
1323 		 */
1324 		if (ISHOLD(p))
1325 			holdlwp();
1326 
1327 		/*
1328 		 * All code that sets signals and makes ISSIG evaluate true must
1329 		 * set t_astflag afterwards.
1330 		 */
1331 		if (ISSIG_PENDING(curthread, lwp, p)) {
1332 			if (issig(FORREAL))
1333 				psig();
1334 			curthread->t_sig_check = 1;
1335 		}
1336 
1337 		if (curthread->t_rprof != NULL) {
1338 			realsigprof(0, 0, 0);
1339 			curthread->t_sig_check = 1;
1340 		}
1341 	}
1342 }
1343 
1344 /*
1345  * Called from fp_traps when a floating point trap occurs.
1346  * Note that the T_DATA_EXCEPTION case does not use X_FAULT_TYPE(mmu_fsr),
1347  * because mmu_fsr (now changed to code) is always 0.
1348  * Note that the T_UNIMP_INSTR case does not call simulate_unimp(),
1349  * because the simulator only simulates multiply and divide instructions,
1350  * which would not cause floating point traps in the first place.
1351  * XXX - Supervisor mode floating point traps?
1352  */
1353 void
1354 fpu_trap(struct regs *rp, caddr_t addr, uint32_t type, uint32_t code)
1355 {
1356 	proc_t *p = ttoproc(curthread);
1357 	klwp_id_t lwp = ttolwp(curthread);
1358 	k_siginfo_t siginfo;
1359 	uint_t op3, fault = 0;
1360 	int mstate;
1361 	char *badaddr;
1362 	kfpu_t *fp;
1363 	struct fpq *pfpq;
1364 	uint32_t inst;
1365 	utrap_handler_t *utrapp;
1366 
1367 	CPU_STATS_ADDQ(CPU, sys, trap, 1);
1368 
1369 	ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
1370 
1371 	if (USERMODE(rp->r_tstate)) {
1372 		/*
1373 		 * Set lwp_state before trying to acquire any
1374 		 * adaptive lock
1375 		 */
1376 		ASSERT(lwp != NULL);
1377 		lwp->lwp_state = LWP_SYS;
1378 		/*
1379 		 * Set up the current cred to use during this trap. u_cred
1380 		 * no longer exists.  t_cred is used instead.
1381 		 * The current process credential applies to the thread for
1382 		 * the entire trap.  If trapping from the kernel, this
1383 		 * should already be set up.
1384 		 */
1385 		if (curthread->t_cred != p->p_cred) {
1386 			cred_t *oldcred = curthread->t_cred;
1387 			/*
1388 			 * DTrace accesses t_cred in probe context.  t_cred
1389 			 * must always be either NULL, or point to a valid,
1390 			 * allocated cred structure.
1391 			 */
1392 			curthread->t_cred = crgetcred();
1393 			crfree(oldcred);
1394 		}
1395 		ASSERT(lwp->lwp_regs == rp);
1396 		mstate = new_mstate(curthread, LMS_TRAP);
1397 		siginfo.si_signo = 0;
1398 		type |= T_USER;
1399 	}
1400 
1401 	TRACE_1(TR_FAC_TRAP, TR_C_TRAP_HANDLER_ENTER,
1402 	    "C_fpu_trap_handler_enter:type %x", type);
1403 
1404 	if (tudebug && tudebugfpe)
1405 		showregs(type, rp, addr, 0);
1406 
1407 	bzero(&siginfo, sizeof (siginfo));
1408 	siginfo.si_code = code;
1409 	siginfo.si_addr = addr;
1410 
1411 	switch (type) {
1412 
1413 	case T_FP_EXCEPTION_IEEE + T_USER:	/* FPU arithmetic exception */
1414 		/*
1415 		 * FPU arithmetic exception - fake up a fpq if we
1416 		 *	came here directly from _fp_ieee_exception,
1417 		 *	which is indicated by a zero fpu_qcnt.
1418 		 */
1419 		fp = lwptofpu(curthread->t_lwp);
1420 		utrapp = curthread->t_procp->p_utraps;
1421 		if (fp->fpu_qcnt == 0) {
1422 			inst = fetch_user_instr((caddr_t)rp->r_pc);
1423 			lwp->lwp_state = LWP_SYS;
1424 			pfpq = &fp->fpu_q->FQu.fpq;
1425 			pfpq->fpq_addr = (uint32_t *)rp->r_pc;
1426 			pfpq->fpq_instr = inst;
1427 			fp->fpu_qcnt = 1;
1428 			fp->fpu_q_entrysize = sizeof (struct fpq);
1429 #ifdef SF_V9_TABLE_28
1430 			/*
1431 			 * Spitfire and blackbird followed the SPARC V9 manual
1432 			 * paragraph 3 of section 5.1.7.9 FSR_current_exception
1433 			 * (cexc) for setting fsr.cexc bits on underflow and
1434 			 * overflow traps when the fsr.tem.inexact bit is set,
1435 			 * instead of following Table 28. Bugid 1263234.
1436 			 */
1437 			{
1438 				extern int spitfire_bb_fsr_bug;
1439 
1440 				if (spitfire_bb_fsr_bug &&
1441 				    (fp->fpu_fsr & FSR_TEM_NX)) {
1442 					if (((fp->fpu_fsr & FSR_TEM_OF) == 0) &&
1443 					    (fp->fpu_fsr & FSR_CEXC_OF)) {
1444 						fp->fpu_fsr &= ~FSR_CEXC_OF;
1445 						fp->fpu_fsr |= FSR_CEXC_NX;
1446 						_fp_write_pfsr(&fp->fpu_fsr);
1447 						siginfo.si_code = FPE_FLTRES;
1448 					}
1449 					if (((fp->fpu_fsr & FSR_TEM_UF) == 0) &&
1450 					    (fp->fpu_fsr & FSR_CEXC_UF)) {
1451 						fp->fpu_fsr &= ~FSR_CEXC_UF;
1452 						fp->fpu_fsr |= FSR_CEXC_NX;
1453 						_fp_write_pfsr(&fp->fpu_fsr);
1454 						siginfo.si_code = FPE_FLTRES;
1455 					}
1456 				}
1457 			}
1458 #endif /* SF_V9_TABLE_28 */
1459 			rp->r_pc = rp->r_npc;
1460 			rp->r_npc += 4;
1461 		} else if (utrapp && utrapp[UT_FP_EXCEPTION_IEEE_754]) {
1462 			/*
1463 			 * The user had a trap handler installed.  Jump to
1464 			 * the trap handler instead of signalling the process.
1465 			 */
1466 			rp->r_pc = (long)utrapp[UT_FP_EXCEPTION_IEEE_754];
1467 			rp->r_npc = rp->r_pc + 4;
1468 			break;
1469 		}
1470 		siginfo.si_signo = SIGFPE;
1471 		fault = FLTFPE;
1472 		break;
1473 
1474 	case T_DATA_EXCEPTION + T_USER:		/* user data access exception */
1475 		siginfo.si_signo = SIGSEGV;
1476 		fault = FLTBOUNDS;
1477 		break;
1478 
1479 	case T_LDDF_ALIGN + T_USER: /* 64 bit user lddfa alignment error */
1480 	case T_STDF_ALIGN + T_USER: /* 64 bit user stdfa alignment error */
1481 		alignfaults++;
1482 		lwp->lwp_state = LWP_SYS;
1483 		if (&vis1_partial_support != NULL) {
1484 			bzero(&siginfo, sizeof (siginfo));
1485 			if (vis1_partial_support(rp,
1486 			    &siginfo, &fault) == 0)
1487 				goto out;
1488 		}
1489 		if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) {
1490 			rp->r_pc = rp->r_npc;
1491 			rp->r_npc += 4;
1492 			goto out;
1493 		}
1494 		fp = lwptofpu(curthread->t_lwp);
1495 		fp->fpu_qcnt = 0;
1496 		siginfo.si_signo = SIGSEGV;
1497 		siginfo.si_code = SEGV_MAPERR;
1498 		siginfo.si_addr = badaddr;
1499 		fault = FLTBOUNDS;
1500 		break;
1501 
1502 	case T_ALIGNMENT + T_USER:		/* user alignment error */
1503 		/*
1504 		 * If the user has to do unaligned references
1505 		 * the ugly stuff gets done here.
1506 		 * Only handles vanilla loads and stores.
1507 		 */
1508 		alignfaults++;
1509 		if (p->p_fixalignment) {
1510 			if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) {
1511 				rp->r_pc = rp->r_npc;
1512 				rp->r_npc += 4;
1513 				goto out;
1514 			}
1515 			siginfo.si_signo = SIGSEGV;
1516 			siginfo.si_code = SEGV_MAPERR;
1517 			siginfo.si_addr = badaddr;
1518 			fault = FLTBOUNDS;
1519 		} else {
1520 			siginfo.si_signo = SIGBUS;
1521 			siginfo.si_code = BUS_ADRALN;
1522 			if (rp->r_pc & 3) {	/* offending address, if pc */
1523 				siginfo.si_addr = (caddr_t)rp->r_pc;
1524 			} else {
1525 				if (calc_memaddr(rp, &badaddr) == SIMU_UNALIGN)
1526 					siginfo.si_addr = badaddr;
1527 				else
1528 					siginfo.si_addr = (caddr_t)rp->r_pc;
1529 			}
1530 			fault = FLTACCESS;
1531 		}
1532 		break;
1533 
1534 	case T_UNIMP_INSTR + T_USER:		/* illegal instruction fault */
1535 		siginfo.si_signo = SIGILL;
1536 		inst = fetch_user_instr((caddr_t)rp->r_pc);
1537 		op3 = (inst >> 19) & 0x3F;
1538 		if ((op3 == IOP_V8_STQFA) || (op3 == IOP_V8_STDFA))
1539 			siginfo.si_code = ILL_ILLADR;
1540 		else
1541 			siginfo.si_code = ILL_ILLTRP;
1542 		fault = FLTILL;
1543 		break;
1544 
1545 	default:
1546 		(void) die(type, rp, addr, 0);
1547 		/*NOTREACHED*/
1548 	}
1549 
1550 	/*
1551 	 * We can't get here from a system trap
1552 	 * Never restart any instruction which got here from an fp trap.
1553 	 */
1554 	ASSERT(type & T_USER);
1555 
1556 	trap_cleanup(rp, fault, &siginfo, 0);
1557 out:
1558 	trap_rtt();
1559 	(void) new_mstate(curthread, mstate);
1560 }
1561 
1562 void
1563 trap_rtt(void)
1564 {
1565 	klwp_id_t lwp = ttolwp(curthread);
1566 
1567 	/*
1568 	 * Restore register window if a debugger modified it.
1569 	 * Set up to perform a single-step if a debugger requested it.
1570 	 */
1571 	if (lwp->lwp_pcb.pcb_xregstat != XREGNONE)
1572 		xregrestore(lwp, 0);
1573 
1574 	/*
1575 	 * Set state to LWP_USER here so preempt won't give us a kernel
1576 	 * priority if it occurs after this point.  Call CL_TRAPRET() to
1577 	 * restore the user-level priority.
1578 	 *
1579 	 * It is important that no locks (other than spinlocks) be entered
1580 	 * after this point before returning to user mode (unless lwp_state
1581 	 * is set back to LWP_SYS).
1582 	 */
1583 	lwp->lwp_state = LWP_USER;
1584 	if (curthread->t_trapret) {
1585 		curthread->t_trapret = 0;
1586 		thread_lock(curthread);
1587 		CL_TRAPRET(curthread);
1588 		thread_unlock(curthread);
1589 	}
1590 	if (CPU->cpu_runrun || curthread->t_schedflag & TS_ANYWAITQ)
1591 		preempt();
1592 	prunstop();
1593 	if (lwp->lwp_pcb.pcb_step != STEP_NONE)
1594 		prdostep();
1595 
1596 	TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit");
1597 }
1598 
1599 #define	IS_LDASI(o)	\
1600 	((o) == (uint32_t)0xC0C00000 || (o) == (uint32_t)0xC0800000 ||	\
1601 	(o) == (uint32_t)0xC1800000)
1602 #define	IS_IMM_ASI(i)	(((i) & 0x2000) == 0)
1603 #define	IS_ASINF(a)	(((a) & 0xF6) == 0x82)
1604 #define	IS_LDDA(i)	(((i) & 0xC1F80000) == 0xC0980000)
1605 
1606 static int
1607 nfload(struct regs *rp, int *instrp)
1608 {
1609 	uint_t	instr, asi, op3, rd;
1610 	size_t	len;
1611 	struct as *as;
1612 	caddr_t addr;
1613 	FPU_DREGS_TYPE zero;
1614 	extern int segnf_create();
1615 
1616 	if (USERMODE(rp->r_tstate))
1617 		instr = fetch_user_instr((caddr_t)rp->r_pc);
1618 	else
1619 		instr = *(int *)rp->r_pc;
1620 
1621 	if (instrp)
1622 		*instrp = instr;
1623 
1624 	op3 = (uint_t)(instr & 0xC1E00000);
1625 	if (!IS_LDASI(op3))
1626 		return (0);
1627 	if (IS_IMM_ASI(instr))
1628 		asi = (instr & 0x1FE0) >> 5;
1629 	else
1630 		asi = (uint_t)((rp->r_tstate >> TSTATE_ASI_SHIFT) &
1631 		    TSTATE_ASI_MASK);
1632 	if (!IS_ASINF(asi))
1633 		return (0);
1634 	if (calc_memaddr(rp, &addr) == SIMU_SUCCESS) {
1635 		len = 1;
1636 		as = USERMODE(rp->r_tstate) ? ttoproc(curthread)->p_as : &kas;
1637 		as_rangelock(as);
1638 		if (as_gap(as, len, &addr, &len, 0, addr) == 0)
1639 			(void) as_map(as, addr, len, segnf_create, NULL);
1640 		as_rangeunlock(as);
1641 	}
1642 	zero = 0;
1643 	rd = (instr >> 25) & 0x1f;
1644 	if (IS_FLOAT(instr)) {
1645 		uint_t dbflg = ((instr >> 19) & 3) == 3;
1646 
1647 		if (dbflg) {		/* clever v9 reg encoding */
1648 			if (rd & 1)
1649 				rd = (rd & 0x1e) | 0x20;
1650 			rd >>= 1;
1651 		}
1652 		if (fpu_exists) {
1653 			if (!(_fp_read_fprs() & FPRS_FEF))
1654 				fp_enable();
1655 
1656 			if (dbflg)
1657 				_fp_write_pdreg(&zero, rd);
1658 			else
1659 				_fp_write_pfreg((uint_t *)&zero, rd);
1660 		} else {
1661 			kfpu_t *fp = lwptofpu(curthread->t_lwp);
1662 
1663 			if (!fp->fpu_en)
1664 				fp_enable();
1665 
1666 			if (dbflg)
1667 				fp->fpu_fr.fpu_dregs[rd] = zero;
1668 			else
1669 				fp->fpu_fr.fpu_regs[rd] = 0;
1670 		}
1671 	} else {
1672 		(void) putreg(&zero, rp, rd, &addr);
1673 		if (IS_LDDA(instr))
1674 			(void) putreg(&zero, rp, rd + 1, &addr);
1675 	}
1676 	rp->r_pc = rp->r_npc;
1677 	rp->r_npc += 4;
1678 	return (1);
1679 }
1680 
1681 kmutex_t atomic_nc_mutex;
1682 
1683 /*
1684  * The following couple of routines are for userland drivers which
1685  * do atomics to noncached addresses.  This sort of worked on previous
1686  * platforms -- the operation really wasn't atomic, but it didn't generate
1687  * a trap as sun4u systems do.
1688  */
1689 static int
1690 swap_nc(struct regs *rp, int instr)
1691 {
1692 	uint64_t rdata, mdata;
1693 	caddr_t addr, badaddr;
1694 	uint_t tmp, rd;
1695 
1696 	(void) flush_user_windows_to_stack(NULL);
1697 	rd = (instr >> 25) & 0x1f;
1698 	if (calc_memaddr(rp, &addr) != SIMU_SUCCESS)
1699 		return (0);
1700 	if (getreg(rp, rd, &rdata, &badaddr))
1701 		return (0);
1702 	mutex_enter(&atomic_nc_mutex);
1703 	if (fuword32(addr, &tmp) == -1) {
1704 		mutex_exit(&atomic_nc_mutex);
1705 		return (0);
1706 	}
1707 	mdata = (u_longlong_t)tmp;
1708 	if (suword32(addr, (uint32_t)rdata) == -1) {
1709 		mutex_exit(&atomic_nc_mutex);
1710 		return (0);
1711 	}
1712 	(void) putreg(&mdata, rp, rd, &badaddr);
1713 	mutex_exit(&atomic_nc_mutex);
1714 	return (1);
1715 }
1716 
1717 static int
1718 ldstub_nc(struct regs *rp, int instr)
1719 {
1720 	uint64_t mdata;
1721 	caddr_t addr, badaddr;
1722 	uint_t rd;
1723 	uint8_t tmp;
1724 
1725 	(void) flush_user_windows_to_stack(NULL);
1726 	rd = (instr >> 25) & 0x1f;
1727 	if (calc_memaddr(rp, &addr) != SIMU_SUCCESS)
1728 		return (0);
1729 	mutex_enter(&atomic_nc_mutex);
1730 	if (fuword8(addr, &tmp) == -1) {
1731 		mutex_exit(&atomic_nc_mutex);
1732 		return (0);
1733 	}
1734 	mdata = (u_longlong_t)tmp;
1735 	if (suword8(addr, (uint8_t)0xff) == -1) {
1736 		mutex_exit(&atomic_nc_mutex);
1737 		return (0);
1738 	}
1739 	(void) putreg(&mdata, rp, rd, &badaddr);
1740 	mutex_exit(&atomic_nc_mutex);
1741 	return (1);
1742 }
1743 
1744 /*
1745  * This function helps instr_size() determine the operand size.
1746  * It is called for the extended ldda/stda asi's.
1747  */
1748 int
1749 extended_asi_size(int asi)
1750 {
1751 	switch (asi) {
1752 	case ASI_PST8_P:
1753 	case ASI_PST8_S:
1754 	case ASI_PST16_P:
1755 	case ASI_PST16_S:
1756 	case ASI_PST32_P:
1757 	case ASI_PST32_S:
1758 	case ASI_PST8_PL:
1759 	case ASI_PST8_SL:
1760 	case ASI_PST16_PL:
1761 	case ASI_PST16_SL:
1762 	case ASI_PST32_PL:
1763 	case ASI_PST32_SL:
1764 		return (8);
1765 	case ASI_FL8_P:
1766 	case ASI_FL8_S:
1767 	case ASI_FL8_PL:
1768 	case ASI_FL8_SL:
1769 		return (1);
1770 	case ASI_FL16_P:
1771 	case ASI_FL16_S:
1772 	case ASI_FL16_PL:
1773 	case ASI_FL16_SL:
1774 		return (2);
1775 	case ASI_BLK_P:
1776 	case ASI_BLK_S:
1777 	case ASI_BLK_PL:
1778 	case ASI_BLK_SL:
1779 	case ASI_BLK_COMMIT_P:
1780 	case ASI_BLK_COMMIT_S:
1781 		return (64);
1782 	}
1783 
1784 	return (0);
1785 }
1786 
1787 /*
1788  * Patch non-zero to disable preemption of threads in the kernel.
1789  */
1790 int IGNORE_KERNEL_PREEMPTION = 0;	/* XXX - delete this someday */
1791 
1792 struct kpreempt_cnts {	/* kernel preemption statistics */
1793 	int	kpc_idle;	/* executing idle thread */
1794 	int	kpc_intr;	/* executing interrupt thread */
1795 	int	kpc_clock;	/* executing clock thread */
1796 	int	kpc_blocked;	/* thread has blocked preemption (t_preempt) */
1797 	int	kpc_notonproc;	/* thread is surrendering processor */
1798 	int	kpc_inswtch;	/* thread has ratified scheduling decision */
1799 	int	kpc_prilevel;	/* processor interrupt level is too high */
1800 	int	kpc_apreempt;	/* asynchronous preemption */
1801 	int	kpc_spreempt;	/* synchronous preemption */
1802 }	kpreempt_cnts;
1803 
1804 /*
1805  * kernel preemption: forced rescheduling
1806  *	preempt the running kernel thread.
1807  */
1808 void
1809 kpreempt(int asyncspl)
1810 {
1811 	if (IGNORE_KERNEL_PREEMPTION) {
1812 		aston(CPU->cpu_dispthread);
1813 		return;
1814 	}
1815 	/*
1816 	 * Check that conditions are right for kernel preemption
1817 	 */
1818 	do {
1819 		if (curthread->t_preempt) {
1820 			/*
1821 			 * either a privileged thread (idle, panic, interrupt)
1822 			 * or will check when t_preempt is lowered
1823 			 * We need to specifically handle the case where
1824 			 * the thread is in the middle of swtch (resume has
1825 			 * been called) and has its t_preempt set
1826 			 * [idle thread and a thread which is in kpreempt
1827 			 * already] and then a high priority thread is
1828 			 * available in the local dispatch queue.
1829 			 * In this case the resumed thread needs to take a
1830 			 * trap so that it can call kpreempt. We achieve
1831 			 * this by using siron().
1832 			 * How do we detect this condition:
1833 			 * idle thread is running and is in the midst of
1834 			 * resume: curthread->t_pri == -1 && CPU->dispthread
1835 			 * != CPU->thread
1836 			 * Need to ensure that this happens only at high pil
1837 			 * resume is called at high pil
1838 			 * Only in resume_from_idle is the pil changed.
1839 			 */
1840 			if (curthread->t_pri < 0) {
1841 				kpreempt_cnts.kpc_idle++;
1842 				if (CPU->cpu_dispthread != CPU->cpu_thread)
1843 					siron();
1844 			} else if (curthread->t_flag & T_INTR_THREAD) {
1845 				kpreempt_cnts.kpc_intr++;
1846 				if (curthread->t_pil == CLOCK_LEVEL)
1847 					kpreempt_cnts.kpc_clock++;
1848 			} else {
1849 				kpreempt_cnts.kpc_blocked++;
1850 				if (CPU->cpu_dispthread != CPU->cpu_thread)
1851 					siron();
1852 			}
1853 			aston(CPU->cpu_dispthread);
1854 			return;
1855 		}
1856 		if (curthread->t_state != TS_ONPROC ||
1857 		    curthread->t_disp_queue != CPU->cpu_disp) {
1858 			/* this thread will be calling swtch() shortly */
1859 			kpreempt_cnts.kpc_notonproc++;
1860 			if (CPU->cpu_thread != CPU->cpu_dispthread) {
1861 				/* already in swtch(), force another */
1862 				kpreempt_cnts.kpc_inswtch++;
1863 				siron();
1864 			}
1865 			return;
1866 		}
1867 
1868 		if (((asyncspl != KPREEMPT_SYNC) ? spltoipl(asyncspl) :
1869 		    getpil()) >= DISP_LEVEL) {
1870 			/*
1871 			 * We can't preempt this thread if it is at
1872 			 * a PIL >= DISP_LEVEL since it may be holding
1873 			 * a spin lock (like sched_lock).
1874 			 */
1875 			siron();	/* check back later */
1876 			kpreempt_cnts.kpc_prilevel++;
1877 			return;
1878 		}
1879 
1880 		/*
1881 		 * block preemption so we don't have multiple preemptions
1882 		 * pending on the interrupt stack
1883 		 */
1884 		curthread->t_preempt++;
1885 		if (asyncspl != KPREEMPT_SYNC) {
1886 			splx(asyncspl);
1887 			kpreempt_cnts.kpc_apreempt++;
1888 		} else
1889 			kpreempt_cnts.kpc_spreempt++;
1890 
1891 		preempt();
1892 		curthread->t_preempt--;
1893 	} while (CPU->cpu_kprunrun);
1894 }
1895 
1896 static enum seg_rw
1897 get_accesstype(struct regs *rp)
1898 {
1899 	uint32_t instr;
1900 
1901 	if (USERMODE(rp->r_tstate))
1902 		instr = fetch_user_instr((caddr_t)rp->r_pc);
1903 	else
1904 		instr = *(uint32_t *)rp->r_pc;
1905 
1906 	if (IS_FLUSH(instr))
1907 		return (S_OTHER);
1908 
1909 	if (IS_STORE(instr))
1910 		return (S_WRITE);
1911 	else
1912 		return (S_READ);
1913 }
1914 
1915 /*
1916  * Handle an asynchronous hardware error.
1917  * The policy is currently to send a hardware error contract event to
1918  * the process's process contract and to kill the process.  Eventually
1919  * we may want to instead send a special signal whose default
1920  * disposition is to generate the contract event.
1921  */
1922 void
1923 trap_async_hwerr(void)
1924 {
1925 	k_siginfo_t si;
1926 	proc_t *p = ttoproc(curthread);
1927 	extern void print_msg_hwerr(ctid_t ct_id, proc_t *p);
1928 
1929 	errorq_drain(ue_queue); /* flush pending async error messages */
1930 
1931 	print_msg_hwerr(p->p_ct_process->conp_contract.ct_id, p);
1932 
1933 	contract_process_hwerr(p->p_ct_process, p);
1934 
1935 	bzero(&si, sizeof (k_siginfo_t));
1936 	si.si_signo = SIGKILL;
1937 	si.si_code = SI_NOINFO;
1938 	trapsig(&si, 1);
1939 }
1940 
1941 /*
1942  * Handle bus error and bus timeout for a user process by sending SIGBUS
1943  * The type is either ASYNC_BERR or ASYNC_BTO.
1944  */
1945 void
1946 trap_async_berr_bto(int type, struct regs *rp)
1947 {
1948 	k_siginfo_t si;
1949 
1950 	errorq_drain(ue_queue); /* flush pending async error messages */
1951 	bzero(&si, sizeof (k_siginfo_t));
1952 
1953 	si.si_signo = SIGBUS;
1954 	si.si_code = (type == ASYNC_BERR ? BUS_OBJERR : BUS_ADRERR);
1955 	si.si_addr = (caddr_t)rp->r_pc; /* AFAR unavailable - future RFE */
1956 	si.si_errno = ENXIO;
1957 
1958 	trapsig(&si, 1);
1959 }
1960