xref: /illumos-gate/usr/src/uts/sun4/os/trap.c (revision 4c28a617e3922d92a58e813a5b955eb526b9c386)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright (c) 2012 Joyent, Inc.  All rights reserved.
29  */
30 
31 #include <sys/mmu.h>
32 #include <sys/systm.h>
33 #include <sys/trap.h>
34 #include <sys/machtrap.h>
35 #include <sys/vtrace.h>
36 #include <sys/prsystm.h>
37 #include <sys/archsystm.h>
38 #include <sys/machsystm.h>
39 #include <sys/fpu/fpusystm.h>
40 #include <sys/tnf.h>
41 #include <sys/tnf_probe.h>
42 #include <sys/simulate.h>
43 #include <sys/ftrace.h>
44 #include <sys/ontrap.h>
45 #include <sys/kcpc.h>
46 #include <sys/kobj.h>
47 #include <sys/procfs.h>
48 #include <sys/sun4asi.h>
49 #include <sys/sdt.h>
50 #include <sys/fpras.h>
51 #include <sys/contract/process_impl.h>
52 
53 #ifdef  TRAPTRACE
54 #include <sys/traptrace.h>
55 #endif
56 
57 int tudebug = 0;
58 static int tudebugbpt = 0;
59 static int tudebugfpe = 0;
60 
61 static int alignfaults = 0;
62 
63 #if defined(TRAPDEBUG) || defined(lint)
64 static int lodebug = 0;
65 #else
66 #define	lodebug	0
67 #endif /* defined(TRAPDEBUG) || defined(lint) */
68 
69 
70 int vis1_partial_support(struct regs *rp, k_siginfo_t *siginfo, uint_t *fault);
71 #pragma weak vis1_partial_support
72 
73 void showregs(unsigned, struct regs *, caddr_t, uint_t);
74 #pragma weak showregs
75 
76 void trap_async_hwerr(void);
77 #pragma weak trap_async_hwerr
78 
79 void trap_async_berr_bto(int, struct regs *);
80 #pragma weak trap_async_berr_bto
81 
82 static enum seg_rw get_accesstype(struct regs *);
83 static int nfload(struct regs *, int *);
84 static int swap_nc(struct regs *, int);
85 static int ldstub_nc(struct regs *, int);
86 void	trap_cleanup(struct regs *, uint_t, k_siginfo_t *, int);
87 void	trap_rtt(void);
88 
89 static int
90 die(unsigned type, struct regs *rp, caddr_t addr, uint_t mmu_fsr)
91 {
92 	struct panic_trap_info ti;
93 
94 #ifdef TRAPTRACE
95 	TRAPTRACE_FREEZE;
96 #endif
97 
98 	ti.trap_regs = rp;
99 	ti.trap_type = type;
100 	ti.trap_addr = addr;
101 	ti.trap_mmu_fsr = mmu_fsr;
102 
103 	curthread->t_panic_trap = &ti;
104 
105 	if (type == T_DATA_MMU_MISS && addr < (caddr_t)KERNELBASE) {
106 		panic("BAD TRAP: type=%x rp=%p addr=%p mmu_fsr=%x "
107 		    "occurred in module \"%s\" due to %s",
108 		    type, (void *)rp, (void *)addr, mmu_fsr,
109 		    mod_containing_pc((caddr_t)rp->r_pc),
110 		    addr < (caddr_t)PAGESIZE ?
111 		    "a NULL pointer dereference" :
112 		    "an illegal access to a user address");
113 	} else {
114 		panic("BAD TRAP: type=%x rp=%p addr=%p mmu_fsr=%x",
115 		    type, (void *)rp, (void *)addr, mmu_fsr);
116 	}
117 
118 	return (0);	/* avoid optimization of restore in call's delay slot */
119 }
120 
121 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */
122 int	ill_calls;
123 #endif
124 
125 /*
126  * Currently, the only PREFETCH/PREFETCHA instructions which cause traps
127  * are the "strong" prefetches (fcn=20-23).  But we check for all flavors of
128  * PREFETCH, in case some future variant also causes a DATA_MMU_MISS.
129  */
130 #define	IS_PREFETCH(i)	(((i) & 0xc1780000) == 0xc1680000)
131 
132 #define	IS_FLUSH(i)	(((i) & 0xc1f80000) == 0x81d80000)
133 #define	IS_SWAP(i)	(((i) & 0xc1f80000) == 0xc0780000)
134 #define	IS_LDSTUB(i)	(((i) & 0xc1f80000) == 0xc0680000)
135 #define	IS_FLOAT(i)	(((i) & 0x1000000) != 0)
136 #define	IS_STORE(i)	(((i) >> 21) & 1)
137 
138 /*
139  * Called from the trap handler when a processor trap occurs.
140  */
141 /*VARARGS2*/
142 void
143 trap(struct regs *rp, caddr_t addr, uint32_t type, uint32_t mmu_fsr)
144 {
145 	proc_t *p = ttoproc(curthread);
146 	klwp_id_t lwp = ttolwp(curthread);
147 	struct machpcb *mpcb = NULL;
148 	k_siginfo_t siginfo;
149 	uint_t op3, fault = 0;
150 	int stepped = 0;
151 	greg_t oldpc;
152 	int mstate;
153 	char *badaddr;
154 	faultcode_t res;
155 	enum fault_type fault_type;
156 	enum seg_rw rw;
157 	uintptr_t lofault;
158 	label_t *onfault;
159 	int instr;
160 	int iskernel;
161 	int watchcode;
162 	int watchpage;
163 	extern faultcode_t pagefault(caddr_t, enum fault_type,
164 	    enum seg_rw, int);
165 #ifdef sun4v
166 	extern boolean_t tick_stick_emulation_active;
167 #endif	/* sun4v */
168 
169 	CPU_STATS_ADDQ(CPU, sys, trap, 1);
170 
171 #ifdef SF_ERRATA_23 /* call causes illegal-insn */
172 	ASSERT((curthread->t_schedflag & TS_DONT_SWAP) ||
173 	    (type == T_UNIMP_INSTR));
174 #else
175 	ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
176 #endif /* SF_ERRATA_23 */
177 
178 	if (USERMODE(rp->r_tstate) || (type & T_USER)) {
179 		/*
180 		 * Set lwp_state before trying to acquire any
181 		 * adaptive lock
182 		 */
183 		ASSERT(lwp != NULL);
184 		lwp->lwp_state = LWP_SYS;
185 		/*
186 		 * Set up the current cred to use during this trap. u_cred
187 		 * no longer exists.  t_cred is used instead.
188 		 * The current process credential applies to the thread for
189 		 * the entire trap.  If trapping from the kernel, this
190 		 * should already be set up.
191 		 */
192 		if (curthread->t_cred != p->p_cred) {
193 			cred_t *oldcred = curthread->t_cred;
194 			/*
195 			 * DTrace accesses t_cred in probe context.  t_cred
196 			 * must always be either NULL, or point to a valid,
197 			 * allocated cred structure.
198 			 */
199 			curthread->t_cred = crgetcred();
200 			crfree(oldcred);
201 		}
202 		type |= T_USER;
203 		ASSERT((type == (T_SYS_RTT_PAGE | T_USER)) ||
204 		    (type == (T_SYS_RTT_ALIGN | T_USER)) ||
205 		    lwp->lwp_regs == rp);
206 		mpcb = lwptompcb(lwp);
207 		switch (type) {
208 		case T_WIN_OVERFLOW + T_USER:
209 		case T_WIN_UNDERFLOW + T_USER:
210 		case T_SYS_RTT_PAGE + T_USER:
211 		case T_DATA_MMU_MISS + T_USER:
212 			mstate = LMS_DFAULT;
213 			break;
214 		case T_INSTR_MMU_MISS + T_USER:
215 			mstate = LMS_TFAULT;
216 			break;
217 		default:
218 			mstate = LMS_TRAP;
219 			break;
220 		}
221 		/* Kernel probe */
222 		TNF_PROBE_1(thread_state, "thread", /* CSTYLED */,
223 		    tnf_microstate, state, (char)mstate);
224 		mstate = new_mstate(curthread, mstate);
225 		siginfo.si_signo = 0;
226 		stepped =
227 		    lwp->lwp_pcb.pcb_step != STEP_NONE &&
228 		    ((oldpc = rp->r_pc), prundostep()) &&
229 		    mmu_btop((uintptr_t)addr) == mmu_btop((uintptr_t)oldpc);
230 		/* this assignment must not precede call to prundostep() */
231 		oldpc = rp->r_pc;
232 	}
233 
234 	TRACE_1(TR_FAC_TRAP, TR_C_TRAP_HANDLER_ENTER,
235 	    "C_trap_handler_enter:type %x", type);
236 
237 #ifdef	F_DEFERRED
238 	/*
239 	 * Take any pending floating point exceptions now.
240 	 * If the floating point unit has an exception to handle,
241 	 * just return to user-level to let the signal handler run.
242 	 * The instruction that got us to trap() will be reexecuted on
243 	 * return from the signal handler and we will trap to here again.
244 	 * This is necessary to disambiguate simultaneous traps which
245 	 * happen when a floating-point exception is pending and a
246 	 * machine fault is incurred.
247 	 */
248 	if (type & USER) {
249 		/*
250 		 * FP_TRAPPED is set only by sendsig() when it copies
251 		 * out the floating-point queue for the signal handler.
252 		 * It is set there so we can test it here and in syscall().
253 		 */
254 		mpcb->mpcb_flags &= ~FP_TRAPPED;
255 		syncfpu();
256 		if (mpcb->mpcb_flags & FP_TRAPPED) {
257 			/*
258 			 * trap() has have been called recursively and may
259 			 * have stopped the process, so do single step
260 			 * support for /proc.
261 			 */
262 			mpcb->mpcb_flags &= ~FP_TRAPPED;
263 			goto out;
264 		}
265 	}
266 #endif
267 	switch (type) {
268 		case T_DATA_MMU_MISS:
269 		case T_INSTR_MMU_MISS + T_USER:
270 		case T_DATA_MMU_MISS + T_USER:
271 		case T_DATA_PROT + T_USER:
272 		case T_AST + T_USER:
273 		case T_SYS_RTT_PAGE + T_USER:
274 		case T_FLUSH_PCB + T_USER:
275 		case T_FLUSHW + T_USER:
276 			break;
277 
278 		default:
279 			FTRACE_3("trap(): type=0x%lx, regs=0x%lx, addr=0x%lx",
280 			    (ulong_t)type, (ulong_t)rp, (ulong_t)addr);
281 			break;
282 	}
283 
284 	switch (type) {
285 
286 	default:
287 		/*
288 		 * Check for user software trap.
289 		 */
290 		if (type & T_USER) {
291 			if (tudebug)
292 				showregs(type, rp, (caddr_t)0, 0);
293 			if ((type & ~T_USER) >= T_SOFTWARE_TRAP) {
294 				bzero(&siginfo, sizeof (siginfo));
295 				siginfo.si_signo = SIGILL;
296 				siginfo.si_code  = ILL_ILLTRP;
297 				siginfo.si_addr  = (caddr_t)rp->r_pc;
298 				siginfo.si_trapno = type &~ T_USER;
299 				fault = FLTILL;
300 				break;
301 			}
302 		}
303 		addr = (caddr_t)rp->r_pc;
304 		(void) die(type, rp, addr, 0);
305 		/*NOTREACHED*/
306 
307 	case T_ALIGNMENT:	/* supv alignment error */
308 		if (nfload(rp, NULL))
309 			goto cleanup;
310 
311 		if (curthread->t_lofault) {
312 			if (lodebug) {
313 				showregs(type, rp, addr, 0);
314 				traceback((caddr_t)rp->r_sp);
315 			}
316 			rp->r_g1 = EFAULT;
317 			rp->r_pc = curthread->t_lofault;
318 			rp->r_npc = rp->r_pc + 4;
319 			goto cleanup;
320 		}
321 		(void) die(type, rp, addr, 0);
322 		/*NOTREACHED*/
323 
324 	case T_INSTR_EXCEPTION:		/* sys instruction access exception */
325 		addr = (caddr_t)rp->r_pc;
326 		(void) die(type, rp, addr, mmu_fsr);
327 		/*NOTREACHED*/
328 
329 	case T_INSTR_MMU_MISS:		/* sys instruction mmu miss */
330 		addr = (caddr_t)rp->r_pc;
331 		(void) die(type, rp, addr, 0);
332 		/*NOTREACHED*/
333 
334 	case T_DATA_EXCEPTION:		/* system data access exception */
335 		switch (X_FAULT_TYPE(mmu_fsr)) {
336 		case FT_RANGE:
337 			/*
338 			 * This happens when we attempt to dereference an
339 			 * address in the address hole.  If t_ontrap is set,
340 			 * then break and fall through to T_DATA_MMU_MISS /
341 			 * T_DATA_PROT case below.  If lofault is set, then
342 			 * honour it (perhaps the user gave us a bogus
343 			 * address in the hole to copyin from or copyout to?)
344 			 */
345 
346 			if (curthread->t_ontrap != NULL)
347 				break;
348 
349 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
350 			if (curthread->t_lofault) {
351 				if (lodebug) {
352 					showregs(type, rp, addr, 0);
353 					traceback((caddr_t)rp->r_sp);
354 				}
355 				rp->r_g1 = EFAULT;
356 				rp->r_pc = curthread->t_lofault;
357 				rp->r_npc = rp->r_pc + 4;
358 				goto cleanup;
359 			}
360 			(void) die(type, rp, addr, mmu_fsr);
361 			/*NOTREACHED*/
362 
363 		case FT_PRIV:
364 			/*
365 			 * This can happen if we access ASI_USER from a kernel
366 			 * thread.  To support pxfs, we need to honor lofault if
367 			 * we're doing a copyin/copyout from a kernel thread.
368 			 */
369 
370 			if (nfload(rp, NULL))
371 				goto cleanup;
372 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
373 			if (curthread->t_lofault) {
374 				if (lodebug) {
375 					showregs(type, rp, addr, 0);
376 					traceback((caddr_t)rp->r_sp);
377 				}
378 				rp->r_g1 = EFAULT;
379 				rp->r_pc = curthread->t_lofault;
380 				rp->r_npc = rp->r_pc + 4;
381 				goto cleanup;
382 			}
383 			(void) die(type, rp, addr, mmu_fsr);
384 			/*NOTREACHED*/
385 
386 		default:
387 			if (nfload(rp, NULL))
388 				goto cleanup;
389 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
390 			(void) die(type, rp, addr, mmu_fsr);
391 			/*NOTREACHED*/
392 
393 		case FT_NFO:
394 			break;
395 		}
396 		/* fall into ... */
397 
398 	case T_DATA_MMU_MISS:		/* system data mmu miss */
399 	case T_DATA_PROT:		/* system data protection fault */
400 		if (nfload(rp, &instr))
401 			goto cleanup;
402 
403 		/*
404 		 * If we're under on_trap() protection (see <sys/ontrap.h>),
405 		 * set ot_trap and return from the trap to the trampoline.
406 		 */
407 		if (curthread->t_ontrap != NULL) {
408 			on_trap_data_t *otp = curthread->t_ontrap;
409 
410 			TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT,
411 			    "C_trap_handler_exit");
412 			TRACE_0(TR_FAC_TRAP, TR_TRAP_END, "trap_end");
413 
414 			if (otp->ot_prot & OT_DATA_ACCESS) {
415 				otp->ot_trap |= OT_DATA_ACCESS;
416 				rp->r_pc = otp->ot_trampoline;
417 				rp->r_npc = rp->r_pc + 4;
418 				goto cleanup;
419 			}
420 		}
421 		lofault = curthread->t_lofault;
422 		onfault = curthread->t_onfault;
423 		curthread->t_lofault = 0;
424 
425 		mstate = new_mstate(curthread, LMS_KFAULT);
426 
427 		switch (type) {
428 		case T_DATA_PROT:
429 			fault_type = F_PROT;
430 			rw = S_WRITE;
431 			break;
432 		case T_INSTR_MMU_MISS:
433 			fault_type = F_INVAL;
434 			rw = S_EXEC;
435 			break;
436 		case T_DATA_MMU_MISS:
437 		case T_DATA_EXCEPTION:
438 			/*
439 			 * The hardware doesn't update the sfsr on mmu
440 			 * misses so it is not easy to find out whether
441 			 * the access was a read or a write so we need
442 			 * to decode the actual instruction.
443 			 */
444 			fault_type = F_INVAL;
445 			rw = get_accesstype(rp);
446 			break;
447 		default:
448 			cmn_err(CE_PANIC, "trap: unknown type %x", type);
449 			break;
450 		}
451 		/*
452 		 * We determine if access was done to kernel or user
453 		 * address space.  The addr passed into trap is really the
454 		 * tag access register.
455 		 */
456 		iskernel = (((uintptr_t)addr & TAGACC_CTX_MASK) == KCONTEXT);
457 		addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
458 
459 		res = pagefault(addr, fault_type, rw, iskernel);
460 		if (!iskernel && res == FC_NOMAP &&
461 		    addr < p->p_usrstack && grow(addr))
462 			res = 0;
463 
464 		(void) new_mstate(curthread, mstate);
465 
466 		/*
467 		 * Restore lofault and onfault.  If we resolved the fault, exit.
468 		 * If we didn't and lofault wasn't set, die.
469 		 */
470 		curthread->t_lofault = lofault;
471 		curthread->t_onfault = onfault;
472 
473 		if (res == 0)
474 			goto cleanup;
475 
476 		if (IS_PREFETCH(instr)) {
477 			/* skip prefetch instructions in kernel-land */
478 			rp->r_pc = rp->r_npc;
479 			rp->r_npc += 4;
480 			goto cleanup;
481 		}
482 
483 		if ((lofault == 0 || lodebug) &&
484 		    (calc_memaddr(rp, &badaddr) == SIMU_SUCCESS))
485 			addr = badaddr;
486 		if (lofault == 0)
487 			(void) die(type, rp, addr, 0);
488 		/*
489 		 * Cannot resolve fault.  Return to lofault.
490 		 */
491 		if (lodebug) {
492 			showregs(type, rp, addr, 0);
493 			traceback((caddr_t)rp->r_sp);
494 		}
495 		if (FC_CODE(res) == FC_OBJERR)
496 			res = FC_ERRNO(res);
497 		else
498 			res = EFAULT;
499 		rp->r_g1 = res;
500 		rp->r_pc = curthread->t_lofault;
501 		rp->r_npc = curthread->t_lofault + 4;
502 		goto cleanup;
503 
504 	case T_INSTR_EXCEPTION + T_USER: /* user insn access exception */
505 		bzero(&siginfo, sizeof (siginfo));
506 		siginfo.si_addr = (caddr_t)rp->r_pc;
507 		siginfo.si_signo = SIGSEGV;
508 		siginfo.si_code = X_FAULT_TYPE(mmu_fsr) == FT_PRIV ?
509 		    SEGV_ACCERR : SEGV_MAPERR;
510 		fault = FLTBOUNDS;
511 		break;
512 
513 	case T_WIN_OVERFLOW + T_USER:	/* window overflow in ??? */
514 	case T_WIN_UNDERFLOW + T_USER:	/* window underflow in ??? */
515 	case T_SYS_RTT_PAGE + T_USER:	/* window underflow in user_rtt */
516 	case T_INSTR_MMU_MISS + T_USER:	/* user instruction mmu miss */
517 	case T_DATA_MMU_MISS + T_USER:	/* user data mmu miss */
518 	case T_DATA_PROT + T_USER:	/* user data protection fault */
519 		switch (type) {
520 		case T_INSTR_MMU_MISS + T_USER:
521 			addr = (caddr_t)rp->r_pc;
522 			fault_type = F_INVAL;
523 			rw = S_EXEC;
524 			break;
525 
526 		case T_DATA_MMU_MISS + T_USER:
527 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
528 			fault_type = F_INVAL;
529 			/*
530 			 * The hardware doesn't update the sfsr on mmu misses
531 			 * so it is not easy to find out whether the access
532 			 * was a read or a write so we need to decode the
533 			 * actual instruction.  XXX BUGLY HW
534 			 */
535 			rw = get_accesstype(rp);
536 			break;
537 
538 		case T_DATA_PROT + T_USER:
539 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
540 			fault_type = F_PROT;
541 			rw = S_WRITE;
542 			break;
543 
544 		case T_WIN_OVERFLOW + T_USER:
545 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
546 			fault_type = F_INVAL;
547 			rw = S_WRITE;
548 			break;
549 
550 		case T_WIN_UNDERFLOW + T_USER:
551 		case T_SYS_RTT_PAGE + T_USER:
552 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
553 			fault_type = F_INVAL;
554 			rw = S_READ;
555 			break;
556 
557 		default:
558 			cmn_err(CE_PANIC, "trap: unknown type %x", type);
559 			break;
560 		}
561 
562 		/*
563 		 * If we are single stepping do not call pagefault
564 		 */
565 		if (stepped) {
566 			res = FC_NOMAP;
567 		} else {
568 			caddr_t vaddr = addr;
569 			size_t sz;
570 			int ta;
571 
572 			ASSERT(!(curthread->t_flag & T_WATCHPT));
573 			watchpage = (pr_watch_active(p) &&
574 			    type != T_WIN_OVERFLOW + T_USER &&
575 			    type != T_WIN_UNDERFLOW + T_USER &&
576 			    type != T_SYS_RTT_PAGE + T_USER &&
577 			    pr_is_watchpage(addr, rw));
578 
579 			if (!watchpage ||
580 			    (sz = instr_size(rp, &vaddr, rw)) <= 0)
581 				/* EMPTY */;
582 			else if ((watchcode = pr_is_watchpoint(&vaddr, &ta,
583 			    sz, NULL, rw)) != 0) {
584 				if (ta) {
585 					do_watch_step(vaddr, sz, rw,
586 					    watchcode, rp->r_pc);
587 					fault_type = F_INVAL;
588 				} else {
589 					bzero(&siginfo,	sizeof (siginfo));
590 					siginfo.si_signo = SIGTRAP;
591 					siginfo.si_code = watchcode;
592 					siginfo.si_addr = vaddr;
593 					siginfo.si_trapafter = 0;
594 					siginfo.si_pc = (caddr_t)rp->r_pc;
595 					fault = FLTWATCH;
596 					break;
597 				}
598 			} else {
599 				if (rw != S_EXEC &&
600 				    pr_watch_emul(rp, vaddr, rw))
601 					goto out;
602 				do_watch_step(vaddr, sz, rw, 0, 0);
603 				fault_type = F_INVAL;
604 			}
605 
606 			if (pr_watch_active(p) &&
607 			    (type == T_WIN_OVERFLOW + T_USER ||
608 			    type == T_WIN_UNDERFLOW + T_USER ||
609 			    type == T_SYS_RTT_PAGE + T_USER)) {
610 				int dotwo = (type == T_WIN_UNDERFLOW + T_USER);
611 				if (copy_return_window(dotwo))
612 					goto out;
613 				fault_type = F_INVAL;
614 			}
615 
616 			res = pagefault(addr, fault_type, rw, 0);
617 
618 			/*
619 			 * If pagefault succeed, ok.
620 			 * Otherwise grow the stack automatically.
621 			 */
622 			if (res == 0 ||
623 			    (res == FC_NOMAP &&
624 			    type != T_INSTR_MMU_MISS + T_USER &&
625 			    addr < p->p_usrstack &&
626 			    grow(addr))) {
627 				int ismem = prismember(&p->p_fltmask, FLTPAGE);
628 
629 				/*
630 				 * instr_size() is used to get the exact
631 				 * address of the fault, instead of the
632 				 * page of the fault. Unfortunately it is
633 				 * very slow, and this is an important
634 				 * code path. Don't call it unless
635 				 * correctness is needed. ie. if FLTPAGE
636 				 * is set, or we're profiling.
637 				 */
638 
639 				if (curthread->t_rprof != NULL || ismem)
640 					(void) instr_size(rp, &addr, rw);
641 
642 				lwp->lwp_lastfault = FLTPAGE;
643 				lwp->lwp_lastfaddr = addr;
644 
645 				if (ismem) {
646 					bzero(&siginfo, sizeof (siginfo));
647 					siginfo.si_addr = addr;
648 					(void) stop_on_fault(FLTPAGE, &siginfo);
649 				}
650 				goto out;
651 			}
652 
653 			if (type != (T_INSTR_MMU_MISS + T_USER)) {
654 				/*
655 				 * check for non-faulting loads, also
656 				 * fetch the instruction to check for
657 				 * flush
658 				 */
659 				if (nfload(rp, &instr))
660 					goto out;
661 
662 				/* skip userland prefetch instructions */
663 				if (IS_PREFETCH(instr)) {
664 					rp->r_pc = rp->r_npc;
665 					rp->r_npc += 4;
666 					goto out;
667 					/*NOTREACHED*/
668 				}
669 
670 				/*
671 				 * check if the instruction was a
672 				 * flush.  ABI allows users to specify
673 				 * an illegal address on the flush
674 				 * instruction so we simply return in
675 				 * this case.
676 				 *
677 				 * NB: the hardware should set a bit
678 				 * indicating this trap was caused by
679 				 * a flush instruction.  Instruction
680 				 * decoding is bugly!
681 				 */
682 				if (IS_FLUSH(instr)) {
683 					/* skip the flush instruction */
684 					rp->r_pc = rp->r_npc;
685 					rp->r_npc += 4;
686 					goto out;
687 					/*NOTREACHED*/
688 				}
689 			} else if (res == FC_PROT) {
690 				report_stack_exec(p, addr);
691 			}
692 
693 			if (tudebug)
694 				showregs(type, rp, addr, 0);
695 		}
696 
697 		/*
698 		 * In the case where both pagefault and grow fail,
699 		 * set the code to the value provided by pagefault.
700 		 */
701 		(void) instr_size(rp, &addr, rw);
702 		bzero(&siginfo, sizeof (siginfo));
703 		siginfo.si_addr = addr;
704 		if (FC_CODE(res) == FC_OBJERR) {
705 			siginfo.si_errno = FC_ERRNO(res);
706 			if (siginfo.si_errno != EINTR) {
707 				siginfo.si_signo = SIGBUS;
708 				siginfo.si_code = BUS_OBJERR;
709 				fault = FLTACCESS;
710 			}
711 		} else { /* FC_NOMAP || FC_PROT */
712 			siginfo.si_signo = SIGSEGV;
713 			siginfo.si_code = (res == FC_NOMAP) ?
714 			    SEGV_MAPERR : SEGV_ACCERR;
715 			fault = FLTBOUNDS;
716 		}
717 		/*
718 		 * If this is the culmination of a single-step,
719 		 * reset the addr, code, signal and fault to
720 		 * indicate a hardware trace trap.
721 		 */
722 		if (stepped) {
723 			pcb_t *pcb = &lwp->lwp_pcb;
724 
725 			siginfo.si_signo = 0;
726 			fault = 0;
727 			if (pcb->pcb_step == STEP_WASACTIVE) {
728 				pcb->pcb_step = STEP_NONE;
729 				pcb->pcb_tracepc = NULL;
730 				oldpc = rp->r_pc - 4;
731 			}
732 			/*
733 			 * If both NORMAL_STEP and WATCH_STEP are in
734 			 * effect, give precedence to WATCH_STEP.
735 			 * One or the other must be set at this point.
736 			 */
737 			ASSERT(pcb->pcb_flags & (NORMAL_STEP|WATCH_STEP));
738 			if ((fault = undo_watch_step(&siginfo)) == 0 &&
739 			    (pcb->pcb_flags & NORMAL_STEP)) {
740 				siginfo.si_signo = SIGTRAP;
741 				siginfo.si_code = TRAP_TRACE;
742 				siginfo.si_addr = (caddr_t)rp->r_pc;
743 				fault = FLTTRACE;
744 			}
745 			pcb->pcb_flags &= ~(NORMAL_STEP|WATCH_STEP);
746 		}
747 		break;
748 
749 	case T_DATA_EXCEPTION + T_USER:	/* user data access exception */
750 
751 		if (&vis1_partial_support != NULL) {
752 			bzero(&siginfo, sizeof (siginfo));
753 			if (vis1_partial_support(rp,
754 			    &siginfo, &fault) == 0)
755 				goto out;
756 		}
757 
758 		if (nfload(rp, &instr))
759 			goto out;
760 		if (IS_FLUSH(instr)) {
761 			/* skip the flush instruction */
762 			rp->r_pc = rp->r_npc;
763 			rp->r_npc += 4;
764 			goto out;
765 			/*NOTREACHED*/
766 		}
767 		bzero(&siginfo, sizeof (siginfo));
768 		siginfo.si_addr = addr;
769 		switch (X_FAULT_TYPE(mmu_fsr)) {
770 		case FT_ATOMIC_NC:
771 			if ((IS_SWAP(instr) && swap_nc(rp, instr)) ||
772 			    (IS_LDSTUB(instr) && ldstub_nc(rp, instr))) {
773 				/* skip the atomic */
774 				rp->r_pc = rp->r_npc;
775 				rp->r_npc += 4;
776 				goto out;
777 			}
778 			/* fall into ... */
779 		case FT_PRIV:
780 			siginfo.si_signo = SIGSEGV;
781 			siginfo.si_code = SEGV_ACCERR;
782 			fault = FLTBOUNDS;
783 			break;
784 		case FT_SPEC_LD:
785 		case FT_ILL_ALT:
786 			siginfo.si_signo = SIGILL;
787 			siginfo.si_code = ILL_ILLADR;
788 			fault = FLTILL;
789 			break;
790 		default:
791 			siginfo.si_signo = SIGSEGV;
792 			siginfo.si_code = SEGV_MAPERR;
793 			fault = FLTBOUNDS;
794 			break;
795 		}
796 		break;
797 
798 	case T_SYS_RTT_ALIGN + T_USER:	/* user alignment error */
799 	case T_ALIGNMENT + T_USER:	/* user alignment error */
800 		if (tudebug)
801 			showregs(type, rp, addr, 0);
802 		/*
803 		 * If the user has to do unaligned references
804 		 * the ugly stuff gets done here.
805 		 */
806 		alignfaults++;
807 		if (&vis1_partial_support != NULL) {
808 			bzero(&siginfo, sizeof (siginfo));
809 			if (vis1_partial_support(rp,
810 			    &siginfo, &fault) == 0)
811 				goto out;
812 		}
813 
814 		bzero(&siginfo, sizeof (siginfo));
815 		if (type == T_SYS_RTT_ALIGN + T_USER) {
816 			if (nfload(rp, NULL))
817 				goto out;
818 			/*
819 			 * Can't do unaligned stack access
820 			 */
821 			siginfo.si_signo = SIGBUS;
822 			siginfo.si_code = BUS_ADRALN;
823 			siginfo.si_addr = addr;
824 			fault = FLTACCESS;
825 			break;
826 		}
827 
828 		/*
829 		 * Try to fix alignment before non-faulting load test.
830 		 */
831 		if (p->p_fixalignment) {
832 			if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) {
833 				rp->r_pc = rp->r_npc;
834 				rp->r_npc += 4;
835 				goto out;
836 			}
837 			if (nfload(rp, NULL))
838 				goto out;
839 			siginfo.si_signo = SIGSEGV;
840 			siginfo.si_code = SEGV_MAPERR;
841 			siginfo.si_addr = badaddr;
842 			fault = FLTBOUNDS;
843 		} else {
844 			if (nfload(rp, NULL))
845 				goto out;
846 			siginfo.si_signo = SIGBUS;
847 			siginfo.si_code = BUS_ADRALN;
848 			if (rp->r_pc & 3) {	/* offending address, if pc */
849 				siginfo.si_addr = (caddr_t)rp->r_pc;
850 			} else {
851 				if (calc_memaddr(rp, &badaddr) == SIMU_UNALIGN)
852 					siginfo.si_addr = badaddr;
853 				else
854 					siginfo.si_addr = (caddr_t)rp->r_pc;
855 			}
856 			fault = FLTACCESS;
857 		}
858 		break;
859 
860 	case T_PRIV_INSTR + T_USER:	/* privileged instruction fault */
861 		if (tudebug)
862 			showregs(type, rp, (caddr_t)0, 0);
863 
864 		bzero(&siginfo, sizeof (siginfo));
865 #ifdef	sun4v
866 		/*
867 		 * If this instruction fault is a non-privileged %tick
868 		 * or %stick trap, and %tick/%stick user emulation is
869 		 * enabled as a result of an OS suspend, then simulate
870 		 * the register read. We rely on simulate_rdtick to fail
871 		 * if the instruction is not a %tick or %stick read,
872 		 * causing us to fall through to the normal privileged
873 		 * instruction handling.
874 		 */
875 		if (tick_stick_emulation_active &&
876 		    (X_FAULT_TYPE(mmu_fsr) == FT_NEW_PRVACT) &&
877 		    simulate_rdtick(rp) == SIMU_SUCCESS) {
878 			/* skip the successfully simulated instruction */
879 			rp->r_pc = rp->r_npc;
880 			rp->r_npc += 4;
881 			goto out;
882 		}
883 #endif
884 		siginfo.si_signo = SIGILL;
885 		siginfo.si_code = ILL_PRVOPC;
886 		siginfo.si_addr = (caddr_t)rp->r_pc;
887 		fault = FLTILL;
888 		break;
889 
890 	case T_UNIMP_INSTR:		/* priv illegal instruction fault */
891 		if (fpras_implemented) {
892 			/*
893 			 * Call fpras_chktrap indicating that
894 			 * we've come from a trap handler and pass
895 			 * the regs.  That function may choose to panic
896 			 * (in which case it won't return) or it may
897 			 * determine that a reboot is desired.  In the
898 			 * latter case it must alter pc/npc to skip
899 			 * the illegal instruction and continue at
900 			 * a controlled address.
901 			 */
902 			if (&fpras_chktrap) {
903 				if (fpras_chktrap(rp))
904 					goto cleanup;
905 			}
906 		}
907 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */
908 		instr = *(int *)rp->r_pc;
909 		if ((instr & 0xc0000000) == 0x40000000) {
910 			long pc;
911 
912 			rp->r_o7 = (long long)rp->r_pc;
913 			pc = rp->r_pc + ((instr & 0x3fffffff) << 2);
914 			rp->r_pc = rp->r_npc;
915 			rp->r_npc = pc;
916 			ill_calls++;
917 			goto cleanup;
918 		}
919 #endif /* SF_ERRATA_23 || SF_ERRATA_30 */
920 		/*
921 		 * It's not an fpras failure and it's not SF_ERRATA_23 - die
922 		 */
923 		addr = (caddr_t)rp->r_pc;
924 		(void) die(type, rp, addr, 0);
925 		/*NOTREACHED*/
926 
927 	case T_UNIMP_INSTR + T_USER:	/* illegal instruction fault */
928 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */
929 		instr = fetch_user_instr((caddr_t)rp->r_pc);
930 		if ((instr & 0xc0000000) == 0x40000000) {
931 			long pc;
932 
933 			rp->r_o7 = (long long)rp->r_pc;
934 			pc = rp->r_pc + ((instr & 0x3fffffff) << 2);
935 			rp->r_pc = rp->r_npc;
936 			rp->r_npc = pc;
937 			ill_calls++;
938 			goto out;
939 		}
940 #endif /* SF_ERRATA_23 || SF_ERRATA_30 */
941 		if (tudebug)
942 			showregs(type, rp, (caddr_t)0, 0);
943 		bzero(&siginfo, sizeof (siginfo));
944 		/*
945 		 * Try to simulate the instruction.
946 		 */
947 		switch (simulate_unimp(rp, &badaddr)) {
948 		case SIMU_RETRY:
949 			goto out;	/* regs are already set up */
950 			/*NOTREACHED*/
951 
952 		case SIMU_SUCCESS:
953 			/* skip the successfully simulated instruction */
954 			rp->r_pc = rp->r_npc;
955 			rp->r_npc += 4;
956 			goto out;
957 			/*NOTREACHED*/
958 
959 		case SIMU_FAULT:
960 			siginfo.si_signo = SIGSEGV;
961 			siginfo.si_code = SEGV_MAPERR;
962 			siginfo.si_addr = badaddr;
963 			fault = FLTBOUNDS;
964 			break;
965 
966 		case SIMU_DZERO:
967 			siginfo.si_signo = SIGFPE;
968 			siginfo.si_code = FPE_INTDIV;
969 			siginfo.si_addr = (caddr_t)rp->r_pc;
970 			fault = FLTIZDIV;
971 			break;
972 
973 		case SIMU_UNALIGN:
974 			siginfo.si_signo = SIGBUS;
975 			siginfo.si_code = BUS_ADRALN;
976 			siginfo.si_addr = badaddr;
977 			fault = FLTACCESS;
978 			break;
979 
980 		case SIMU_ILLEGAL:
981 		default:
982 			siginfo.si_signo = SIGILL;
983 			op3 = (instr >> 19) & 0x3F;
984 			if ((IS_FLOAT(instr) && (op3 == IOP_V8_STQFA) ||
985 			    (op3 == IOP_V8_STDFA)))
986 				siginfo.si_code = ILL_ILLADR;
987 			else
988 				siginfo.si_code = ILL_ILLOPC;
989 			siginfo.si_addr = (caddr_t)rp->r_pc;
990 			fault = FLTILL;
991 			break;
992 		}
993 		break;
994 
995 	case T_UNIMP_LDD + T_USER:
996 	case T_UNIMP_STD + T_USER:
997 		if (tudebug)
998 			showregs(type, rp, (caddr_t)0, 0);
999 		switch (simulate_lddstd(rp, &badaddr)) {
1000 		case SIMU_SUCCESS:
1001 			/* skip the successfully simulated instruction */
1002 			rp->r_pc = rp->r_npc;
1003 			rp->r_npc += 4;
1004 			goto out;
1005 			/*NOTREACHED*/
1006 
1007 		case SIMU_FAULT:
1008 			if (nfload(rp, NULL))
1009 				goto out;
1010 			siginfo.si_signo = SIGSEGV;
1011 			siginfo.si_code = SEGV_MAPERR;
1012 			siginfo.si_addr = badaddr;
1013 			fault = FLTBOUNDS;
1014 			break;
1015 
1016 		case SIMU_UNALIGN:
1017 			if (nfload(rp, NULL))
1018 				goto out;
1019 			siginfo.si_signo = SIGBUS;
1020 			siginfo.si_code = BUS_ADRALN;
1021 			siginfo.si_addr = badaddr;
1022 			fault = FLTACCESS;
1023 			break;
1024 
1025 		case SIMU_ILLEGAL:
1026 		default:
1027 			siginfo.si_signo = SIGILL;
1028 			siginfo.si_code = ILL_ILLOPC;
1029 			siginfo.si_addr = (caddr_t)rp->r_pc;
1030 			fault = FLTILL;
1031 			break;
1032 		}
1033 		break;
1034 
1035 	case T_UNIMP_LDD:
1036 	case T_UNIMP_STD:
1037 		if (simulate_lddstd(rp, &badaddr) == SIMU_SUCCESS) {
1038 			/* skip the successfully simulated instruction */
1039 			rp->r_pc = rp->r_npc;
1040 			rp->r_npc += 4;
1041 			goto cleanup;
1042 			/*NOTREACHED*/
1043 		}
1044 		/*
1045 		 * A third party driver executed an {LDD,STD,LDDA,STDA}
1046 		 * that we couldn't simulate.
1047 		 */
1048 		if (nfload(rp, NULL))
1049 			goto cleanup;
1050 
1051 		if (curthread->t_lofault) {
1052 			if (lodebug) {
1053 				showregs(type, rp, addr, 0);
1054 				traceback((caddr_t)rp->r_sp);
1055 			}
1056 			rp->r_g1 = EFAULT;
1057 			rp->r_pc = curthread->t_lofault;
1058 			rp->r_npc = rp->r_pc + 4;
1059 			goto cleanup;
1060 		}
1061 		(void) die(type, rp, addr, 0);
1062 		/*NOTREACHED*/
1063 
1064 	case T_IDIV0 + T_USER:		/* integer divide by zero */
1065 	case T_DIV0 + T_USER:		/* integer divide by zero */
1066 		if (tudebug && tudebugfpe)
1067 			showregs(type, rp, (caddr_t)0, 0);
1068 		bzero(&siginfo, sizeof (siginfo));
1069 		siginfo.si_signo = SIGFPE;
1070 		siginfo.si_code = FPE_INTDIV;
1071 		siginfo.si_addr = (caddr_t)rp->r_pc;
1072 		fault = FLTIZDIV;
1073 		break;
1074 
1075 	case T_INT_OVERFLOW + T_USER:	/* integer overflow */
1076 		if (tudebug && tudebugfpe)
1077 			showregs(type, rp, (caddr_t)0, 0);
1078 		bzero(&siginfo, sizeof (siginfo));
1079 		siginfo.si_signo = SIGFPE;
1080 		siginfo.si_code  = FPE_INTOVF;
1081 		siginfo.si_addr  = (caddr_t)rp->r_pc;
1082 		fault = FLTIOVF;
1083 		break;
1084 
1085 	case T_BREAKPOINT + T_USER:	/* breakpoint trap (t 1) */
1086 		if (tudebug && tudebugbpt)
1087 			showregs(type, rp, (caddr_t)0, 0);
1088 		bzero(&siginfo, sizeof (siginfo));
1089 		siginfo.si_signo = SIGTRAP;
1090 		siginfo.si_code = TRAP_BRKPT;
1091 		siginfo.si_addr = (caddr_t)rp->r_pc;
1092 		fault = FLTBPT;
1093 		break;
1094 
1095 	case T_TAG_OVERFLOW + T_USER:	/* tag overflow (taddcctv, tsubcctv) */
1096 		if (tudebug)
1097 			showregs(type, rp, (caddr_t)0, 0);
1098 		bzero(&siginfo, sizeof (siginfo));
1099 		siginfo.si_signo = SIGEMT;
1100 		siginfo.si_code = EMT_TAGOVF;
1101 		siginfo.si_addr = (caddr_t)rp->r_pc;
1102 		fault = FLTACCESS;
1103 		break;
1104 
1105 	case T_FLUSH_PCB + T_USER:	/* finish user window overflow */
1106 	case T_FLUSHW + T_USER:		/* finish user window flush */
1107 		/*
1108 		 * This trap is entered from sys_rtt in locore.s when,
1109 		 * upon return to user is is found that there are user
1110 		 * windows in pcb_wbuf.  This happens because they could
1111 		 * not be saved on the user stack, either because it
1112 		 * wasn't resident or because it was misaligned.
1113 		 */
1114 	{
1115 		int error;
1116 		caddr_t sp;
1117 
1118 		error = flush_user_windows_to_stack(&sp);
1119 		/*
1120 		 * Possible errors:
1121 		 *	error copying out
1122 		 *	unaligned stack pointer
1123 		 * The first is given to us as the return value
1124 		 * from flush_user_windows_to_stack().  The second
1125 		 * results in residual windows in the pcb.
1126 		 */
1127 		if (error != 0) {
1128 			/*
1129 			 * EINTR comes from a signal during copyout;
1130 			 * we should not post another signal.
1131 			 */
1132 			if (error != EINTR) {
1133 				/*
1134 				 * Zap the process with a SIGSEGV - process
1135 				 * may be managing its own stack growth by
1136 				 * taking SIGSEGVs on a different signal stack.
1137 				 */
1138 				bzero(&siginfo, sizeof (siginfo));
1139 				siginfo.si_signo = SIGSEGV;
1140 				siginfo.si_code  = SEGV_MAPERR;
1141 				siginfo.si_addr  = sp;
1142 				fault = FLTBOUNDS;
1143 			}
1144 			break;
1145 		} else if (mpcb->mpcb_wbcnt) {
1146 			bzero(&siginfo, sizeof (siginfo));
1147 			siginfo.si_signo = SIGILL;
1148 			siginfo.si_code  = ILL_BADSTK;
1149 			siginfo.si_addr  = (caddr_t)rp->r_pc;
1150 			fault = FLTILL;
1151 			break;
1152 		}
1153 	}
1154 
1155 		/*
1156 		 * T_FLUSHW is used when handling a ta 0x3 -- the old flush
1157 		 * window trap -- which is implemented by executing the
1158 		 * flushw instruction. The flushw can trap if any of the
1159 		 * stack pages are not writable for whatever reason. In this
1160 		 * case only, we advance the pc to the next instruction so
1161 		 * that the user thread doesn't needlessly execute the trap
1162 		 * again. Normally this wouldn't be a problem -- we'll
1163 		 * usually only end up here if this is the first touch to a
1164 		 * stack page -- since the second execution won't trap, but
1165 		 * if there's a watchpoint on the stack page the user thread
1166 		 * would spin, continuously executing the trap instruction.
1167 		 */
1168 		if (type == T_FLUSHW + T_USER) {
1169 			rp->r_pc = rp->r_npc;
1170 			rp->r_npc += 4;
1171 		}
1172 		goto out;
1173 
1174 	case T_AST + T_USER:		/* profiling or resched pseudo trap */
1175 		if (lwp->lwp_pcb.pcb_flags & CPC_OVERFLOW) {
1176 			lwp->lwp_pcb.pcb_flags &= ~CPC_OVERFLOW;
1177 			if (kcpc_overflow_ast()) {
1178 				/*
1179 				 * Signal performance counter overflow
1180 				 */
1181 				if (tudebug)
1182 					showregs(type, rp, (caddr_t)0, 0);
1183 				bzero(&siginfo, sizeof (siginfo));
1184 				siginfo.si_signo = SIGEMT;
1185 				siginfo.si_code = EMT_CPCOVF;
1186 				siginfo.si_addr = (caddr_t)rp->r_pc;
1187 				/* for trap_cleanup(), below */
1188 				oldpc = rp->r_pc - 4;
1189 				fault = FLTCPCOVF;
1190 			}
1191 		}
1192 
1193 		/*
1194 		 * The CPC_OVERFLOW check above may already have populated
1195 		 * siginfo and set fault, so the checks below must not
1196 		 * touch these and the functions they call must use
1197 		 * trapsig() directly.
1198 		 */
1199 
1200 		if (lwp->lwp_pcb.pcb_flags & ASYNC_HWERR) {
1201 			lwp->lwp_pcb.pcb_flags &= ~ASYNC_HWERR;
1202 			trap_async_hwerr();
1203 		}
1204 
1205 		if (lwp->lwp_pcb.pcb_flags & ASYNC_BERR) {
1206 			lwp->lwp_pcb.pcb_flags &= ~ASYNC_BERR;
1207 			trap_async_berr_bto(ASYNC_BERR, rp);
1208 		}
1209 
1210 		if (lwp->lwp_pcb.pcb_flags & ASYNC_BTO) {
1211 			lwp->lwp_pcb.pcb_flags &= ~ASYNC_BTO;
1212 			trap_async_berr_bto(ASYNC_BTO, rp);
1213 		}
1214 
1215 		break;
1216 	}
1217 
1218 	if (fault) {
1219 		/* We took a fault so abort single step. */
1220 		lwp->lwp_pcb.pcb_flags &= ~(NORMAL_STEP|WATCH_STEP);
1221 	}
1222 	trap_cleanup(rp, fault, &siginfo, oldpc == rp->r_pc);
1223 
1224 out:	/* We can't get here from a system trap */
1225 	ASSERT(type & T_USER);
1226 	trap_rtt();
1227 	(void) new_mstate(curthread, mstate);
1228 	/* Kernel probe */
1229 	TNF_PROBE_1(thread_state, "thread", /* CSTYLED */,
1230 		tnf_microstate, state, LMS_USER);
1231 
1232 	TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit");
1233 	return;
1234 
1235 cleanup:	/* system traps end up here */
1236 	ASSERT(!(type & T_USER));
1237 
1238 	TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit");
1239 }
1240 
1241 void
1242 trap_cleanup(
1243 	struct regs *rp,
1244 	uint_t fault,
1245 	k_siginfo_t *sip,
1246 	int restartable)
1247 {
1248 	extern void aio_cleanup();
1249 	proc_t *p = ttoproc(curthread);
1250 	klwp_id_t lwp = ttolwp(curthread);
1251 
1252 	if (fault) {
1253 		/*
1254 		 * Remember the fault and fault address
1255 		 * for real-time (SIGPROF) profiling.
1256 		 */
1257 		lwp->lwp_lastfault = fault;
1258 		lwp->lwp_lastfaddr = sip->si_addr;
1259 
1260 		DTRACE_PROC2(fault, int, fault, ksiginfo_t *, sip);
1261 
1262 		/*
1263 		 * If a debugger has declared this fault to be an
1264 		 * event of interest, stop the lwp.  Otherwise just
1265 		 * deliver the associated signal.
1266 		 */
1267 		if (sip->si_signo != SIGKILL &&
1268 		    prismember(&p->p_fltmask, fault) &&
1269 		    stop_on_fault(fault, sip) == 0)
1270 			sip->si_signo = 0;
1271 	}
1272 
1273 	if (sip->si_signo)
1274 		trapsig(sip, restartable);
1275 
1276 	if (lwp->lwp_oweupc)
1277 		profil_tick(rp->r_pc);
1278 
1279 	if (curthread->t_astflag | curthread->t_sig_check) {
1280 		/*
1281 		 * Turn off the AST flag before checking all the conditions that
1282 		 * may have caused an AST.  This flag is on whenever a signal or
1283 		 * unusual condition should be handled after the next trap or
1284 		 * syscall.
1285 		 */
1286 		astoff(curthread);
1287 		curthread->t_sig_check = 0;
1288 
1289 		/*
1290 		 * The following check is legal for the following reasons:
1291 		 *	1) The thread we are checking, is ourselves, so there is
1292 		 *	   no way the proc can go away.
1293 		 *	2) The only time we need to be protected by the
1294 		 *	   lock is if the binding is changed.
1295 		 *
1296 		 *	Note we will still take the lock and check the binding
1297 		 *	if the condition was true without the lock held.  This
1298 		 *	prevents lock contention among threads owned by the
1299 		 *	same proc.
1300 		 */
1301 
1302 		if (curthread->t_proc_flag & TP_CHANGEBIND) {
1303 			mutex_enter(&p->p_lock);
1304 			if (curthread->t_proc_flag & TP_CHANGEBIND) {
1305 				timer_lwpbind();
1306 				curthread->t_proc_flag &= ~TP_CHANGEBIND;
1307 			}
1308 			mutex_exit(&p->p_lock);
1309 		}
1310 
1311 		/*
1312 		 * for kaio requests that are on the per-process poll queue,
1313 		 * aiop->aio_pollq, they're AIO_POLL bit is set, the kernel
1314 		 * should copyout their result_t to user memory. by copying
1315 		 * out the result_t, the user can poll on memory waiting
1316 		 * for the kaio request to complete.
1317 		 */
1318 		if (p->p_aio)
1319 			aio_cleanup(0);
1320 
1321 		/*
1322 		 * If this LWP was asked to hold, call holdlwp(), which will
1323 		 * stop.  holdlwps() sets this up and calls pokelwps() which
1324 		 * sets the AST flag.
1325 		 *
1326 		 * Also check TP_EXITLWP, since this is used by fresh new LWPs
1327 		 * through lwp_rtt().  That flag is set if the lwp_create(2)
1328 		 * syscall failed after creating the LWP.
1329 		 */
1330 		if (ISHOLD(p))
1331 			holdlwp();
1332 
1333 		/*
1334 		 * All code that sets signals and makes ISSIG evaluate true must
1335 		 * set t_astflag afterwards.
1336 		 */
1337 		if (ISSIG_PENDING(curthread, lwp, p)) {
1338 			if (issig(FORREAL))
1339 				psig();
1340 			curthread->t_sig_check = 1;
1341 		}
1342 
1343 		if (curthread->t_rprof != NULL) {
1344 			realsigprof(0, 0, 0);
1345 			curthread->t_sig_check = 1;
1346 		}
1347 	}
1348 }
1349 
1350 /*
1351  * Called from fp_traps when a floating point trap occurs.
1352  * Note that the T_DATA_EXCEPTION case does not use X_FAULT_TYPE(mmu_fsr),
1353  * because mmu_fsr (now changed to code) is always 0.
1354  * Note that the T_UNIMP_INSTR case does not call simulate_unimp(),
1355  * because the simulator only simulates multiply and divide instructions,
1356  * which would not cause floating point traps in the first place.
1357  * XXX - Supervisor mode floating point traps?
1358  */
1359 void
1360 fpu_trap(struct regs *rp, caddr_t addr, uint32_t type, uint32_t code)
1361 {
1362 	proc_t *p = ttoproc(curthread);
1363 	klwp_id_t lwp = ttolwp(curthread);
1364 	k_siginfo_t siginfo;
1365 	uint_t op3, fault = 0;
1366 	int mstate;
1367 	char *badaddr;
1368 	kfpu_t *fp;
1369 	struct _fpq *pfpq;
1370 	uint32_t inst;
1371 	utrap_handler_t *utrapp;
1372 
1373 	CPU_STATS_ADDQ(CPU, sys, trap, 1);
1374 
1375 	ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
1376 
1377 	if (USERMODE(rp->r_tstate)) {
1378 		/*
1379 		 * Set lwp_state before trying to acquire any
1380 		 * adaptive lock
1381 		 */
1382 		ASSERT(lwp != NULL);
1383 		lwp->lwp_state = LWP_SYS;
1384 		/*
1385 		 * Set up the current cred to use during this trap. u_cred
1386 		 * no longer exists.  t_cred is used instead.
1387 		 * The current process credential applies to the thread for
1388 		 * the entire trap.  If trapping from the kernel, this
1389 		 * should already be set up.
1390 		 */
1391 		if (curthread->t_cred != p->p_cred) {
1392 			cred_t *oldcred = curthread->t_cred;
1393 			/*
1394 			 * DTrace accesses t_cred in probe context.  t_cred
1395 			 * must always be either NULL, or point to a valid,
1396 			 * allocated cred structure.
1397 			 */
1398 			curthread->t_cred = crgetcred();
1399 			crfree(oldcred);
1400 		}
1401 		ASSERT(lwp->lwp_regs == rp);
1402 		mstate = new_mstate(curthread, LMS_TRAP);
1403 		siginfo.si_signo = 0;
1404 		type |= T_USER;
1405 	}
1406 
1407 	TRACE_1(TR_FAC_TRAP, TR_C_TRAP_HANDLER_ENTER,
1408 	    "C_fpu_trap_handler_enter:type %x", type);
1409 
1410 	if (tudebug && tudebugfpe)
1411 		showregs(type, rp, addr, 0);
1412 
1413 	bzero(&siginfo, sizeof (siginfo));
1414 	siginfo.si_code = code;
1415 	siginfo.si_addr = addr;
1416 
1417 	switch (type) {
1418 
1419 	case T_FP_EXCEPTION_IEEE + T_USER:	/* FPU arithmetic exception */
1420 		/*
1421 		 * FPU arithmetic exception - fake up a fpq if we
1422 		 *	came here directly from _fp_ieee_exception,
1423 		 *	which is indicated by a zero fpu_qcnt.
1424 		 */
1425 		fp = lwptofpu(curthread->t_lwp);
1426 		utrapp = curthread->t_procp->p_utraps;
1427 		if (fp->fpu_qcnt == 0) {
1428 			inst = fetch_user_instr((caddr_t)rp->r_pc);
1429 			lwp->lwp_state = LWP_SYS;
1430 			pfpq = &fp->fpu_q->FQu.fpq;
1431 			pfpq->fpq_addr = (uint32_t *)rp->r_pc;
1432 			pfpq->fpq_instr = inst;
1433 			fp->fpu_qcnt = 1;
1434 			fp->fpu_q_entrysize = sizeof (struct _fpq);
1435 #ifdef SF_V9_TABLE_28
1436 			/*
1437 			 * Spitfire and blackbird followed the SPARC V9 manual
1438 			 * paragraph 3 of section 5.1.7.9 FSR_current_exception
1439 			 * (cexc) for setting fsr.cexc bits on underflow and
1440 			 * overflow traps when the fsr.tem.inexact bit is set,
1441 			 * instead of following Table 28. Bugid 1263234.
1442 			 */
1443 			{
1444 				extern int spitfire_bb_fsr_bug;
1445 
1446 				if (spitfire_bb_fsr_bug &&
1447 				    (fp->fpu_fsr & FSR_TEM_NX)) {
1448 					if (((fp->fpu_fsr & FSR_TEM_OF) == 0) &&
1449 					    (fp->fpu_fsr & FSR_CEXC_OF)) {
1450 						fp->fpu_fsr &= ~FSR_CEXC_OF;
1451 						fp->fpu_fsr |= FSR_CEXC_NX;
1452 						_fp_write_pfsr(&fp->fpu_fsr);
1453 						siginfo.si_code = FPE_FLTRES;
1454 					}
1455 					if (((fp->fpu_fsr & FSR_TEM_UF) == 0) &&
1456 					    (fp->fpu_fsr & FSR_CEXC_UF)) {
1457 						fp->fpu_fsr &= ~FSR_CEXC_UF;
1458 						fp->fpu_fsr |= FSR_CEXC_NX;
1459 						_fp_write_pfsr(&fp->fpu_fsr);
1460 						siginfo.si_code = FPE_FLTRES;
1461 					}
1462 				}
1463 			}
1464 #endif /* SF_V9_TABLE_28 */
1465 			rp->r_pc = rp->r_npc;
1466 			rp->r_npc += 4;
1467 		} else if (utrapp && utrapp[UT_FP_EXCEPTION_IEEE_754]) {
1468 			/*
1469 			 * The user had a trap handler installed.  Jump to
1470 			 * the trap handler instead of signalling the process.
1471 			 */
1472 			rp->r_pc = (long)utrapp[UT_FP_EXCEPTION_IEEE_754];
1473 			rp->r_npc = rp->r_pc + 4;
1474 			break;
1475 		}
1476 		siginfo.si_signo = SIGFPE;
1477 		fault = FLTFPE;
1478 		break;
1479 
1480 	case T_DATA_EXCEPTION + T_USER:		/* user data access exception */
1481 		siginfo.si_signo = SIGSEGV;
1482 		fault = FLTBOUNDS;
1483 		break;
1484 
1485 	case T_LDDF_ALIGN + T_USER: /* 64 bit user lddfa alignment error */
1486 	case T_STDF_ALIGN + T_USER: /* 64 bit user stdfa alignment error */
1487 		alignfaults++;
1488 		lwp->lwp_state = LWP_SYS;
1489 		if (&vis1_partial_support != NULL) {
1490 			bzero(&siginfo, sizeof (siginfo));
1491 			if (vis1_partial_support(rp,
1492 			    &siginfo, &fault) == 0)
1493 				goto out;
1494 		}
1495 		if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) {
1496 			rp->r_pc = rp->r_npc;
1497 			rp->r_npc += 4;
1498 			goto out;
1499 		}
1500 		fp = lwptofpu(curthread->t_lwp);
1501 		fp->fpu_qcnt = 0;
1502 		siginfo.si_signo = SIGSEGV;
1503 		siginfo.si_code = SEGV_MAPERR;
1504 		siginfo.si_addr = badaddr;
1505 		fault = FLTBOUNDS;
1506 		break;
1507 
1508 	case T_ALIGNMENT + T_USER:		/* user alignment error */
1509 		/*
1510 		 * If the user has to do unaligned references
1511 		 * the ugly stuff gets done here.
1512 		 * Only handles vanilla loads and stores.
1513 		 */
1514 		alignfaults++;
1515 		if (p->p_fixalignment) {
1516 			if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) {
1517 				rp->r_pc = rp->r_npc;
1518 				rp->r_npc += 4;
1519 				goto out;
1520 			}
1521 			siginfo.si_signo = SIGSEGV;
1522 			siginfo.si_code = SEGV_MAPERR;
1523 			siginfo.si_addr = badaddr;
1524 			fault = FLTBOUNDS;
1525 		} else {
1526 			siginfo.si_signo = SIGBUS;
1527 			siginfo.si_code = BUS_ADRALN;
1528 			if (rp->r_pc & 3) {	/* offending address, if pc */
1529 				siginfo.si_addr = (caddr_t)rp->r_pc;
1530 			} else {
1531 				if (calc_memaddr(rp, &badaddr) == SIMU_UNALIGN)
1532 					siginfo.si_addr = badaddr;
1533 				else
1534 					siginfo.si_addr = (caddr_t)rp->r_pc;
1535 			}
1536 			fault = FLTACCESS;
1537 		}
1538 		break;
1539 
1540 	case T_UNIMP_INSTR + T_USER:		/* illegal instruction fault */
1541 		siginfo.si_signo = SIGILL;
1542 		inst = fetch_user_instr((caddr_t)rp->r_pc);
1543 		op3 = (inst >> 19) & 0x3F;
1544 		if ((op3 == IOP_V8_STQFA) || (op3 == IOP_V8_STDFA))
1545 			siginfo.si_code = ILL_ILLADR;
1546 		else
1547 			siginfo.si_code = ILL_ILLTRP;
1548 		fault = FLTILL;
1549 		break;
1550 
1551 	default:
1552 		(void) die(type, rp, addr, 0);
1553 		/*NOTREACHED*/
1554 	}
1555 
1556 	/*
1557 	 * We can't get here from a system trap
1558 	 * Never restart any instruction which got here from an fp trap.
1559 	 */
1560 	ASSERT(type & T_USER);
1561 
1562 	trap_cleanup(rp, fault, &siginfo, 0);
1563 out:
1564 	trap_rtt();
1565 	(void) new_mstate(curthread, mstate);
1566 }
1567 
1568 void
1569 trap_rtt(void)
1570 {
1571 	klwp_id_t lwp = ttolwp(curthread);
1572 
1573 	/*
1574 	 * Restore register window if a debugger modified it.
1575 	 * Set up to perform a single-step if a debugger requested it.
1576 	 */
1577 	if (lwp->lwp_pcb.pcb_xregstat != XREGNONE)
1578 		xregrestore(lwp, 0);
1579 
1580 	/*
1581 	 * Set state to LWP_USER here so preempt won't give us a kernel
1582 	 * priority if it occurs after this point.  Call CL_TRAPRET() to
1583 	 * restore the user-level priority.
1584 	 *
1585 	 * It is important that no locks (other than spinlocks) be entered
1586 	 * after this point before returning to user mode (unless lwp_state
1587 	 * is set back to LWP_SYS).
1588 	 */
1589 	lwp->lwp_state = LWP_USER;
1590 	if (curthread->t_trapret) {
1591 		curthread->t_trapret = 0;
1592 		thread_lock(curthread);
1593 		CL_TRAPRET(curthread);
1594 		thread_unlock(curthread);
1595 	}
1596 	if (CPU->cpu_runrun || curthread->t_schedflag & TS_ANYWAITQ)
1597 		preempt();
1598 	prunstop();
1599 	if (lwp->lwp_pcb.pcb_step != STEP_NONE)
1600 		prdostep();
1601 
1602 	TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit");
1603 }
1604 
1605 #define	IS_LDASI(o)	\
1606 	((o) == (uint32_t)0xC0C00000 || (o) == (uint32_t)0xC0800000 ||	\
1607 	(o) == (uint32_t)0xC1800000)
1608 #define	IS_IMM_ASI(i)	(((i) & 0x2000) == 0)
1609 #define	IS_ASINF(a)	(((a) & 0xF6) == 0x82)
1610 #define	IS_LDDA(i)	(((i) & 0xC1F80000) == 0xC0980000)
1611 
1612 static int
1613 nfload(struct regs *rp, int *instrp)
1614 {
1615 	uint_t	instr, asi, op3, rd;
1616 	size_t	len;
1617 	struct as *as;
1618 	caddr_t addr;
1619 	FPU_DREGS_TYPE zero;
1620 	extern int segnf_create();
1621 
1622 	if (USERMODE(rp->r_tstate))
1623 		instr = fetch_user_instr((caddr_t)rp->r_pc);
1624 	else
1625 		instr = *(int *)rp->r_pc;
1626 
1627 	if (instrp)
1628 		*instrp = instr;
1629 
1630 	op3 = (uint_t)(instr & 0xC1E00000);
1631 	if (!IS_LDASI(op3))
1632 		return (0);
1633 	if (IS_IMM_ASI(instr))
1634 		asi = (instr & 0x1FE0) >> 5;
1635 	else
1636 		asi = (uint_t)((rp->r_tstate >> TSTATE_ASI_SHIFT) &
1637 		    TSTATE_ASI_MASK);
1638 	if (!IS_ASINF(asi))
1639 		return (0);
1640 	if (calc_memaddr(rp, &addr) == SIMU_SUCCESS) {
1641 		len = 1;
1642 		as = USERMODE(rp->r_tstate) ? ttoproc(curthread)->p_as : &kas;
1643 		as_rangelock(as);
1644 		if (as_gap(as, len, &addr, &len, 0, addr) == 0)
1645 			(void) as_map(as, addr, len, segnf_create, NULL);
1646 		as_rangeunlock(as);
1647 	}
1648 	zero = 0;
1649 	rd = (instr >> 25) & 0x1f;
1650 	if (IS_FLOAT(instr)) {
1651 		uint_t dbflg = ((instr >> 19) & 3) == 3;
1652 
1653 		if (dbflg) {		/* clever v9 reg encoding */
1654 			if (rd & 1)
1655 				rd = (rd & 0x1e) | 0x20;
1656 			rd >>= 1;
1657 		}
1658 		if (fpu_exists) {
1659 			if (!(_fp_read_fprs() & FPRS_FEF))
1660 				fp_enable();
1661 
1662 			if (dbflg)
1663 				_fp_write_pdreg(&zero, rd);
1664 			else
1665 				_fp_write_pfreg((uint_t *)&zero, rd);
1666 		} else {
1667 			kfpu_t *fp = lwptofpu(curthread->t_lwp);
1668 
1669 			if (!fp->fpu_en)
1670 				fp_enable();
1671 
1672 			if (dbflg)
1673 				fp->fpu_fr.fpu_dregs[rd] = zero;
1674 			else
1675 				fp->fpu_fr.fpu_regs[rd] = 0;
1676 		}
1677 	} else {
1678 		(void) putreg(&zero, rp, rd, &addr);
1679 		if (IS_LDDA(instr))
1680 			(void) putreg(&zero, rp, rd + 1, &addr);
1681 	}
1682 	rp->r_pc = rp->r_npc;
1683 	rp->r_npc += 4;
1684 	return (1);
1685 }
1686 
1687 kmutex_t atomic_nc_mutex;
1688 
1689 /*
1690  * The following couple of routines are for userland drivers which
1691  * do atomics to noncached addresses.  This sort of worked on previous
1692  * platforms -- the operation really wasn't atomic, but it didn't generate
1693  * a trap as sun4u systems do.
1694  */
1695 static int
1696 swap_nc(struct regs *rp, int instr)
1697 {
1698 	uint64_t rdata, mdata;
1699 	caddr_t addr, badaddr;
1700 	uint_t tmp, rd;
1701 
1702 	(void) flush_user_windows_to_stack(NULL);
1703 	rd = (instr >> 25) & 0x1f;
1704 	if (calc_memaddr(rp, &addr) != SIMU_SUCCESS)
1705 		return (0);
1706 	if (getreg(rp, rd, &rdata, &badaddr))
1707 		return (0);
1708 	mutex_enter(&atomic_nc_mutex);
1709 	if (fuword32(addr, &tmp) == -1) {
1710 		mutex_exit(&atomic_nc_mutex);
1711 		return (0);
1712 	}
1713 	mdata = (u_longlong_t)tmp;
1714 	if (suword32(addr, (uint32_t)rdata) == -1) {
1715 		mutex_exit(&atomic_nc_mutex);
1716 		return (0);
1717 	}
1718 	(void) putreg(&mdata, rp, rd, &badaddr);
1719 	mutex_exit(&atomic_nc_mutex);
1720 	return (1);
1721 }
1722 
1723 static int
1724 ldstub_nc(struct regs *rp, int instr)
1725 {
1726 	uint64_t mdata;
1727 	caddr_t addr, badaddr;
1728 	uint_t rd;
1729 	uint8_t tmp;
1730 
1731 	(void) flush_user_windows_to_stack(NULL);
1732 	rd = (instr >> 25) & 0x1f;
1733 	if (calc_memaddr(rp, &addr) != SIMU_SUCCESS)
1734 		return (0);
1735 	mutex_enter(&atomic_nc_mutex);
1736 	if (fuword8(addr, &tmp) == -1) {
1737 		mutex_exit(&atomic_nc_mutex);
1738 		return (0);
1739 	}
1740 	mdata = (u_longlong_t)tmp;
1741 	if (suword8(addr, (uint8_t)0xff) == -1) {
1742 		mutex_exit(&atomic_nc_mutex);
1743 		return (0);
1744 	}
1745 	(void) putreg(&mdata, rp, rd, &badaddr);
1746 	mutex_exit(&atomic_nc_mutex);
1747 	return (1);
1748 }
1749 
1750 /*
1751  * This function helps instr_size() determine the operand size.
1752  * It is called for the extended ldda/stda asi's.
1753  */
1754 int
1755 extended_asi_size(int asi)
1756 {
1757 	switch (asi) {
1758 	case ASI_PST8_P:
1759 	case ASI_PST8_S:
1760 	case ASI_PST16_P:
1761 	case ASI_PST16_S:
1762 	case ASI_PST32_P:
1763 	case ASI_PST32_S:
1764 	case ASI_PST8_PL:
1765 	case ASI_PST8_SL:
1766 	case ASI_PST16_PL:
1767 	case ASI_PST16_SL:
1768 	case ASI_PST32_PL:
1769 	case ASI_PST32_SL:
1770 		return (8);
1771 	case ASI_FL8_P:
1772 	case ASI_FL8_S:
1773 	case ASI_FL8_PL:
1774 	case ASI_FL8_SL:
1775 		return (1);
1776 	case ASI_FL16_P:
1777 	case ASI_FL16_S:
1778 	case ASI_FL16_PL:
1779 	case ASI_FL16_SL:
1780 		return (2);
1781 	case ASI_BLK_P:
1782 	case ASI_BLK_S:
1783 	case ASI_BLK_PL:
1784 	case ASI_BLK_SL:
1785 	case ASI_BLK_COMMIT_P:
1786 	case ASI_BLK_COMMIT_S:
1787 		return (64);
1788 	}
1789 
1790 	return (0);
1791 }
1792 
1793 /*
1794  * Patch non-zero to disable preemption of threads in the kernel.
1795  */
1796 int IGNORE_KERNEL_PREEMPTION = 0;	/* XXX - delete this someday */
1797 
1798 struct kpreempt_cnts {	/* kernel preemption statistics */
1799 	int	kpc_idle;	/* executing idle thread */
1800 	int	kpc_intr;	/* executing interrupt thread */
1801 	int	kpc_clock;	/* executing clock thread */
1802 	int	kpc_blocked;	/* thread has blocked preemption (t_preempt) */
1803 	int	kpc_notonproc;	/* thread is surrendering processor */
1804 	int	kpc_inswtch;	/* thread has ratified scheduling decision */
1805 	int	kpc_prilevel;	/* processor interrupt level is too high */
1806 	int	kpc_apreempt;	/* asynchronous preemption */
1807 	int	kpc_spreempt;	/* synchronous preemption */
1808 }	kpreempt_cnts;
1809 
1810 /*
1811  * kernel preemption: forced rescheduling
1812  *	preempt the running kernel thread.
1813  */
1814 void
1815 kpreempt(int asyncspl)
1816 {
1817 	if (IGNORE_KERNEL_PREEMPTION) {
1818 		aston(CPU->cpu_dispthread);
1819 		return;
1820 	}
1821 	/*
1822 	 * Check that conditions are right for kernel preemption
1823 	 */
1824 	do {
1825 		if (curthread->t_preempt) {
1826 			/*
1827 			 * either a privileged thread (idle, panic, interrupt)
1828 			 * or will check when t_preempt is lowered
1829 			 * We need to specifically handle the case where
1830 			 * the thread is in the middle of swtch (resume has
1831 			 * been called) and has its t_preempt set
1832 			 * [idle thread and a thread which is in kpreempt
1833 			 * already] and then a high priority thread is
1834 			 * available in the local dispatch queue.
1835 			 * In this case the resumed thread needs to take a
1836 			 * trap so that it can call kpreempt. We achieve
1837 			 * this by using siron().
1838 			 * How do we detect this condition:
1839 			 * idle thread is running and is in the midst of
1840 			 * resume: curthread->t_pri == -1 && CPU->dispthread
1841 			 * != CPU->thread
1842 			 * Need to ensure that this happens only at high pil
1843 			 * resume is called at high pil
1844 			 * Only in resume_from_idle is the pil changed.
1845 			 */
1846 			if (curthread->t_pri < 0) {
1847 				kpreempt_cnts.kpc_idle++;
1848 				if (CPU->cpu_dispthread != CPU->cpu_thread)
1849 					siron();
1850 			} else if (curthread->t_flag & T_INTR_THREAD) {
1851 				kpreempt_cnts.kpc_intr++;
1852 				if (curthread->t_pil == CLOCK_LEVEL)
1853 					kpreempt_cnts.kpc_clock++;
1854 			} else {
1855 				kpreempt_cnts.kpc_blocked++;
1856 				if (CPU->cpu_dispthread != CPU->cpu_thread)
1857 					siron();
1858 			}
1859 			aston(CPU->cpu_dispthread);
1860 			return;
1861 		}
1862 		if (curthread->t_state != TS_ONPROC ||
1863 		    curthread->t_disp_queue != CPU->cpu_disp) {
1864 			/* this thread will be calling swtch() shortly */
1865 			kpreempt_cnts.kpc_notonproc++;
1866 			if (CPU->cpu_thread != CPU->cpu_dispthread) {
1867 				/* already in swtch(), force another */
1868 				kpreempt_cnts.kpc_inswtch++;
1869 				siron();
1870 			}
1871 			return;
1872 		}
1873 
1874 		if (((asyncspl != KPREEMPT_SYNC) ? spltoipl(asyncspl) :
1875 		    getpil()) >= DISP_LEVEL) {
1876 			/*
1877 			 * We can't preempt this thread if it is at
1878 			 * a PIL >= DISP_LEVEL since it may be holding
1879 			 * a spin lock (like sched_lock).
1880 			 */
1881 			siron();	/* check back later */
1882 			kpreempt_cnts.kpc_prilevel++;
1883 			return;
1884 		}
1885 
1886 		/*
1887 		 * block preemption so we don't have multiple preemptions
1888 		 * pending on the interrupt stack
1889 		 */
1890 		curthread->t_preempt++;
1891 		if (asyncspl != KPREEMPT_SYNC) {
1892 			splx(asyncspl);
1893 			kpreempt_cnts.kpc_apreempt++;
1894 		} else
1895 			kpreempt_cnts.kpc_spreempt++;
1896 
1897 		preempt();
1898 		curthread->t_preempt--;
1899 	} while (CPU->cpu_kprunrun);
1900 }
1901 
1902 static enum seg_rw
1903 get_accesstype(struct regs *rp)
1904 {
1905 	uint32_t instr;
1906 
1907 	if (USERMODE(rp->r_tstate))
1908 		instr = fetch_user_instr((caddr_t)rp->r_pc);
1909 	else
1910 		instr = *(uint32_t *)rp->r_pc;
1911 
1912 	if (IS_FLUSH(instr))
1913 		return (S_OTHER);
1914 
1915 	if (IS_STORE(instr))
1916 		return (S_WRITE);
1917 	else
1918 		return (S_READ);
1919 }
1920 
1921 /*
1922  * Handle an asynchronous hardware error.
1923  * The policy is currently to send a hardware error contract event to
1924  * the process's process contract and to kill the process.  Eventually
1925  * we may want to instead send a special signal whose default
1926  * disposition is to generate the contract event.
1927  */
1928 void
1929 trap_async_hwerr(void)
1930 {
1931 	k_siginfo_t si;
1932 	proc_t *p = ttoproc(curthread);
1933 	extern void print_msg_hwerr(ctid_t ct_id, proc_t *p);
1934 
1935 	errorq_drain(ue_queue); /* flush pending async error messages */
1936 
1937 	print_msg_hwerr(p->p_ct_process->conp_contract.ct_id, p);
1938 
1939 	contract_process_hwerr(p->p_ct_process, p);
1940 
1941 	bzero(&si, sizeof (k_siginfo_t));
1942 	si.si_signo = SIGKILL;
1943 	si.si_code = SI_NOINFO;
1944 	trapsig(&si, 1);
1945 }
1946 
1947 /*
1948  * Handle bus error and bus timeout for a user process by sending SIGBUS
1949  * The type is either ASYNC_BERR or ASYNC_BTO.
1950  */
1951 void
1952 trap_async_berr_bto(int type, struct regs *rp)
1953 {
1954 	k_siginfo_t si;
1955 
1956 	errorq_drain(ue_queue); /* flush pending async error messages */
1957 	bzero(&si, sizeof (k_siginfo_t));
1958 
1959 	si.si_signo = SIGBUS;
1960 	si.si_code = (type == ASYNC_BERR ? BUS_OBJERR : BUS_ADRERR);
1961 	si.si_addr = (caddr_t)rp->r_pc; /* AFAR unavailable - future RFE */
1962 	si.si_errno = ENXIO;
1963 
1964 	trapsig(&si, 1);
1965 }
1966