xref: /titanic_41/usr/src/uts/sun4v/ml/trap_table.s (revision d89fccd8788afe1e920f842edd883fe192a1b8fe)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28#if !defined(lint)
29#include "assym.h"
30#endif /* !lint */
31#include <sys/asm_linkage.h>
32#include <sys/privregs.h>
33#include <sys/sun4asi.h>
34#include <sys/machasi.h>
35#include <sys/hypervisor_api.h>
36#include <sys/machtrap.h>
37#include <sys/machthread.h>
38#include <sys/pcb.h>
39#include <sys/pte.h>
40#include <sys/mmu.h>
41#include <sys/machpcb.h>
42#include <sys/async.h>
43#include <sys/intreg.h>
44#include <sys/scb.h>
45#include <sys/psr_compat.h>
46#include <sys/syscall.h>
47#include <sys/machparam.h>
48#include <sys/traptrace.h>
49#include <vm/hat_sfmmu.h>
50#include <sys/archsystm.h>
51#include <sys/utrap.h>
52#include <sys/clock.h>
53#include <sys/intr.h>
54#include <sys/fpu/fpu_simulator.h>
55#include <vm/seg_spt.h>
56
57/*
58 * WARNING: If you add a fast trap handler which can be invoked by a
59 * non-privileged user, you may have to use the FAST_TRAP_DONE macro
60 * instead of "done" instruction to return back to the user mode. See
61 * comments for the "fast_trap_done" entry point for more information.
62 *
63 * An alternate FAST_TRAP_DONE_CHK_INTR macro should be used for the
64 * cases where you always want to process any pending interrupts before
65 * returning back to the user mode.
66 */
67#define	FAST_TRAP_DONE		\
68	ba,a	fast_trap_done
69
70#define	FAST_TRAP_DONE_CHK_INTR	\
71	ba,a	fast_trap_done_chk_intr
72
73/*
74 * SPARC V9 Trap Table
75 *
76 * Most of the trap handlers are made from common building
77 * blocks, and some are instantiated multiple times within
78 * the trap table. So, I build a bunch of macros, then
79 * populate the table using only the macros.
80 *
81 * Many macros branch to sys_trap.  Its calling convention is:
82 *	%g1		kernel trap handler
83 *	%g2, %g3	args for above
84 *	%g4		desire %pil
85 */
86
87#ifdef	TRAPTRACE
88
89/*
90 * Tracing macro. Adds two instructions if TRAPTRACE is defined.
91 */
92#define	TT_TRACE(label)		\
93	ba	label		;\
94	rd	%pc, %g7
95#define	TT_TRACE_INS	2
96
97#define	TT_TRACE_L(label)	\
98	ba	label		;\
99	rd	%pc, %l4	;\
100	clr	%l4
101#define	TT_TRACE_L_INS	3
102
103#else
104
105#define	TT_TRACE(label)
106#define	TT_TRACE_INS	0
107
108#define	TT_TRACE_L(label)
109#define	TT_TRACE_L_INS	0
110
111#endif
112
113/*
114 * This macro is used to update per cpu mmu stats in perf critical
115 * paths. It is only enabled in debug kernels or if SFMMU_STAT_GATHER
116 * is defined.
117 */
118#if defined(DEBUG) || defined(SFMMU_STAT_GATHER)
119#define	HAT_PERCPU_DBSTAT(stat)			\
120	mov	stat, %g1			;\
121	ba	stat_mmu			;\
122	rd	%pc, %g7
123#else
124#define	HAT_PERCPU_DBSTAT(stat)
125#endif /* DEBUG || SFMMU_STAT_GATHER */
126
127/*
128 * This first set are funneled to trap() with %tt as the type.
129 * Trap will then either panic or send the user a signal.
130 */
131/*
132 * NOT is used for traps that just shouldn't happen.
133 * It comes in both single and quadruple flavors.
134 */
135#if !defined(lint)
136	.global	trap
137#endif /* !lint */
138#define	NOT			\
139	TT_TRACE(trace_gen)	;\
140	set	trap, %g1	;\
141	rdpr	%tt, %g3	;\
142	ba,pt	%xcc, sys_trap	;\
143	sub	%g0, 1, %g4	;\
144	.align	32
145#define	NOT4	NOT; NOT; NOT; NOT
146
147#define	NOTP				\
148	TT_TRACE(trace_gen)		;\
149	ba,pt	%xcc, ptl1_panic	;\
150	  mov	PTL1_BAD_TRAP, %g1	;\
151	.align	32
152#define	NOTP4	NOTP; NOTP; NOTP; NOTP
153
154/*
155 * RED is for traps that use the red mode handler.
156 * We should never see these either.
157 */
158#define	RED	NOT
159/*
160 * BAD is used for trap vectors we don't have a kernel
161 * handler for.
162 * It also comes in single and quadruple versions.
163 */
164#define	BAD	NOT
165#define	BAD4	NOT4
166
167#define	DONE			\
168	done;			\
169	.align	32
170
171/*
172 * TRAP vectors to the trap() function.
173 * It's main use is for user errors.
174 */
175#if !defined(lint)
176	.global	trap
177#endif /* !lint */
178#define	TRAP(arg)		\
179	TT_TRACE(trace_gen)	;\
180	set	trap, %g1	;\
181	mov	arg, %g3	;\
182	ba,pt	%xcc, sys_trap	;\
183	sub	%g0, 1, %g4	;\
184	.align	32
185
186/*
187 * SYSCALL is used for system calls on both ILP32 and LP64 kernels
188 * depending on the "which" parameter (should be syscall_trap,
189 * syscall_trap32, or nosys for unused system call traps).
190 */
191#define	SYSCALL(which)			\
192	TT_TRACE(trace_gen)		;\
193	set	(which), %g1		;\
194	ba,pt	%xcc, sys_trap		;\
195	sub	%g0, 1, %g4		;\
196	.align	32
197
198/*
199 * GOTO just jumps to a label.
200 * It's used for things that can be fixed without going thru sys_trap.
201 */
202#define	GOTO(label)		\
203	.global	label		;\
204	ba,a	label		;\
205	.empty			;\
206	.align	32
207
208/*
209 * GOTO_TT just jumps to a label.
210 * correctable ECC error traps at  level 0 and 1 will use this macro.
211 * It's used for things that can be fixed without going thru sys_trap.
212 */
213#define	GOTO_TT(label, ttlabel)		\
214	.global	label		;\
215	TT_TRACE(ttlabel)	;\
216	ba,a	label		;\
217	.empty			;\
218	.align	32
219
220/*
221 * Privileged traps
222 * Takes breakpoint if privileged, calls trap() if not.
223 */
224#define	PRIV(label)			\
225	rdpr	%tstate, %g1		;\
226	btst	TSTATE_PRIV, %g1	;\
227	bnz	label			;\
228	rdpr	%tt, %g3		;\
229	set	trap, %g1		;\
230	ba,pt	%xcc, sys_trap		;\
231	sub	%g0, 1, %g4		;\
232	.align	32
233
234
235/*
236 * DTrace traps.
237 */
238#define	DTRACE_FASTTRAP			\
239	.global dtrace_fasttrap_probe				;\
240	.global dtrace_fasttrap_probe_ptr			;\
241	sethi	%hi(dtrace_fasttrap_probe_ptr), %g4		;\
242	ldn	[%g4 + %lo(dtrace_fasttrap_probe_ptr)], %g4	;\
243	set	dtrace_fasttrap_probe, %g1			;\
244	brnz,pn	%g4, user_trap					;\
245	sub	%g0, 1, %g4					;\
246	FAST_TRAP_DONE						;\
247	.align	32
248
249#define	DTRACE_PID			\
250	.global dtrace_pid_probe				;\
251	set	dtrace_pid_probe, %g1				;\
252	ba,pt	%xcc, user_trap					;\
253	sub	%g0, 1, %g4					;\
254	.align	32
255
256#define	DTRACE_RETURN			\
257	.global dtrace_return_probe				;\
258	set	dtrace_return_probe, %g1			;\
259	ba,pt	%xcc, user_trap					;\
260	sub	%g0, 1, %g4					;\
261	.align	32
262
263/*
264 * REGISTER WINDOW MANAGEMENT MACROS
265 */
266
267/*
268 * various convenient units of padding
269 */
270#define	SKIP(n)	.skip 4*(n)
271
272/*
273 * CLEAN_WINDOW is the simple handler for cleaning a register window.
274 */
275#define	CLEAN_WINDOW						\
276	TT_TRACE_L(trace_win)					;\
277	rdpr %cleanwin, %l0; inc %l0; wrpr %l0, %cleanwin	;\
278	clr %l0; clr %l1; clr %l2; clr %l3			;\
279	clr %l4; clr %l5; clr %l6; clr %l7			;\
280	clr %o0; clr %o1; clr %o2; clr %o3			;\
281	clr %o4; clr %o5; clr %o6; clr %o7			;\
282	retry; .align 128
283
284#if !defined(lint)
285
286/*
287 * If we get an unresolved tlb miss while in a window handler, the fault
288 * handler will resume execution at the last instruction of the window
289 * hander, instead of delivering the fault to the kernel.  Spill handlers
290 * use this to spill windows into the wbuf.
291 *
292 * The mixed handler works by checking %sp, and branching to the correct
293 * handler.  This is done by branching back to label 1: for 32b frames,
294 * or label 2: for 64b frames; which implies the handler order is: 32b,
295 * 64b, mixed.  The 1: and 2: labels are offset into the routines to
296 * allow the branchs' delay slots to contain useful instructions.
297 */
298
299/*
300 * SPILL_32bit spills a 32-bit-wide kernel register window.  It
301 * assumes that the kernel context and the nucleus context are the
302 * same.  The stack pointer is required to be eight-byte aligned even
303 * though this code only needs it to be four-byte aligned.
304 */
305#define	SPILL_32bit(tail)					\
306	srl	%sp, 0, %sp					;\
3071:	st	%l0, [%sp + 0]					;\
308	st	%l1, [%sp + 4]					;\
309	st	%l2, [%sp + 8]					;\
310	st	%l3, [%sp + 12]					;\
311	st	%l4, [%sp + 16]					;\
312	st	%l5, [%sp + 20]					;\
313	st	%l6, [%sp + 24]					;\
314	st	%l7, [%sp + 28]					;\
315	st	%i0, [%sp + 32]					;\
316	st	%i1, [%sp + 36]					;\
317	st	%i2, [%sp + 40]					;\
318	st	%i3, [%sp + 44]					;\
319	st	%i4, [%sp + 48]					;\
320	st	%i5, [%sp + 52]					;\
321	st	%i6, [%sp + 56]					;\
322	st	%i7, [%sp + 60]					;\
323	TT_TRACE_L(trace_win)					;\
324	saved							;\
325	retry							;\
326	SKIP(31-19-TT_TRACE_L_INS)				;\
327	ba,a,pt	%xcc, fault_32bit_/**/tail			;\
328	.empty
329
330/*
331 * SPILL_32bit_asi spills a 32-bit-wide register window into a 32-bit
332 * wide address space via the designated asi.  It is used to spill
333 * non-kernel windows.  The stack pointer is required to be eight-byte
334 * aligned even though this code only needs it to be four-byte
335 * aligned.
336 */
337#define	SPILL_32bit_asi(asi_num, tail)				\
338	srl	%sp, 0, %sp					;\
3391:	sta	%l0, [%sp + %g0]asi_num				;\
340	mov	4, %g1						;\
341	sta	%l1, [%sp + %g1]asi_num				;\
342	mov	8, %g2						;\
343	sta	%l2, [%sp + %g2]asi_num				;\
344	mov	12, %g3						;\
345	sta	%l3, [%sp + %g3]asi_num				;\
346	add	%sp, 16, %g4					;\
347	sta	%l4, [%g4 + %g0]asi_num				;\
348	sta	%l5, [%g4 + %g1]asi_num				;\
349	sta	%l6, [%g4 + %g2]asi_num				;\
350	sta	%l7, [%g4 + %g3]asi_num				;\
351	add	%g4, 16, %g4					;\
352	sta	%i0, [%g4 + %g0]asi_num				;\
353	sta	%i1, [%g4 + %g1]asi_num				;\
354	sta	%i2, [%g4 + %g2]asi_num				;\
355	sta	%i3, [%g4 + %g3]asi_num				;\
356	add	%g4, 16, %g4					;\
357	sta	%i4, [%g4 + %g0]asi_num				;\
358	sta	%i5, [%g4 + %g1]asi_num				;\
359	sta	%i6, [%g4 + %g2]asi_num				;\
360	sta	%i7, [%g4 + %g3]asi_num				;\
361	TT_TRACE_L(trace_win)					;\
362	saved							;\
363	retry							;\
364	SKIP(31-25-TT_TRACE_L_INS)				;\
365	ba,a,pt %xcc, fault_32bit_/**/tail			;\
366	.empty
367
368#define	SPILL_32bit_tt1(asi_num, tail)				\
369	ba,a,pt	%xcc, fault_32bit_/**/tail			;\
370	.empty							;\
371	.align 128
372
373
374/*
375 * FILL_32bit fills a 32-bit-wide kernel register window.  It assumes
376 * that the kernel context and the nucleus context are the same.  The
377 * stack pointer is required to be eight-byte aligned even though this
378 * code only needs it to be four-byte aligned.
379 */
380#define	FILL_32bit(tail)					\
381	srl	%sp, 0, %sp					;\
3821:	TT_TRACE_L(trace_win)					;\
383	ld	[%sp + 0], %l0					;\
384	ld	[%sp + 4], %l1					;\
385	ld	[%sp + 8], %l2					;\
386	ld	[%sp + 12], %l3					;\
387	ld	[%sp + 16], %l4					;\
388	ld	[%sp + 20], %l5					;\
389	ld	[%sp + 24], %l6					;\
390	ld	[%sp + 28], %l7					;\
391	ld	[%sp + 32], %i0					;\
392	ld	[%sp + 36], %i1					;\
393	ld	[%sp + 40], %i2					;\
394	ld	[%sp + 44], %i3					;\
395	ld	[%sp + 48], %i4					;\
396	ld	[%sp + 52], %i5					;\
397	ld	[%sp + 56], %i6					;\
398	ld	[%sp + 60], %i7					;\
399	restored						;\
400	retry							;\
401	SKIP(31-19-TT_TRACE_L_INS)				;\
402	ba,a,pt	%xcc, fault_32bit_/**/tail			;\
403	.empty
404
405/*
406 * FILL_32bit_asi fills a 32-bit-wide register window from a 32-bit
407 * wide address space via the designated asi.  It is used to fill
408 * non-kernel windows.  The stack pointer is required to be eight-byte
409 * aligned even though this code only needs it to be four-byte
410 * aligned.
411 */
412#define	FILL_32bit_asi(asi_num, tail)				\
413	srl	%sp, 0, %sp					;\
4141:	TT_TRACE_L(trace_win)					;\
415	mov	4, %g1						;\
416	lda	[%sp + %g0]asi_num, %l0				;\
417	mov	8, %g2						;\
418	lda	[%sp + %g1]asi_num, %l1				;\
419	mov	12, %g3						;\
420	lda	[%sp + %g2]asi_num, %l2				;\
421	lda	[%sp + %g3]asi_num, %l3				;\
422	add	%sp, 16, %g4					;\
423	lda	[%g4 + %g0]asi_num, %l4				;\
424	lda	[%g4 + %g1]asi_num, %l5				;\
425	lda	[%g4 + %g2]asi_num, %l6				;\
426	lda	[%g4 + %g3]asi_num, %l7				;\
427	add	%g4, 16, %g4					;\
428	lda	[%g4 + %g0]asi_num, %i0				;\
429	lda	[%g4 + %g1]asi_num, %i1				;\
430	lda	[%g4 + %g2]asi_num, %i2				;\
431	lda	[%g4 + %g3]asi_num, %i3				;\
432	add	%g4, 16, %g4					;\
433	lda	[%g4 + %g0]asi_num, %i4				;\
434	lda	[%g4 + %g1]asi_num, %i5				;\
435	lda	[%g4 + %g2]asi_num, %i6				;\
436	lda	[%g4 + %g3]asi_num, %i7				;\
437	restored						;\
438	retry							;\
439	SKIP(31-25-TT_TRACE_L_INS)				;\
440	ba,a,pt %xcc, fault_32bit_/**/tail			;\
441	.empty
442
443
444/*
445 * SPILL_64bit spills a 64-bit-wide kernel register window.  It
446 * assumes that the kernel context and the nucleus context are the
447 * same.  The stack pointer is required to be eight-byte aligned.
448 */
449#define	SPILL_64bit(tail)					\
4502:	stx	%l0, [%sp + V9BIAS64 + 0]			;\
451	stx	%l1, [%sp + V9BIAS64 + 8]			;\
452	stx	%l2, [%sp + V9BIAS64 + 16]			;\
453	stx	%l3, [%sp + V9BIAS64 + 24]			;\
454	stx	%l4, [%sp + V9BIAS64 + 32]			;\
455	stx	%l5, [%sp + V9BIAS64 + 40]			;\
456	stx	%l6, [%sp + V9BIAS64 + 48]			;\
457	stx	%l7, [%sp + V9BIAS64 + 56]			;\
458	stx	%i0, [%sp + V9BIAS64 + 64]			;\
459	stx	%i1, [%sp + V9BIAS64 + 72]			;\
460	stx	%i2, [%sp + V9BIAS64 + 80]			;\
461	stx	%i3, [%sp + V9BIAS64 + 88]			;\
462	stx	%i4, [%sp + V9BIAS64 + 96]			;\
463	stx	%i5, [%sp + V9BIAS64 + 104]			;\
464	stx	%i6, [%sp + V9BIAS64 + 112]			;\
465	stx	%i7, [%sp + V9BIAS64 + 120]			;\
466	TT_TRACE_L(trace_win)					;\
467	saved							;\
468	retry							;\
469	SKIP(31-18-TT_TRACE_L_INS)				;\
470	ba,a,pt	%xcc, fault_64bit_/**/tail			;\
471	.empty
472
473#define	SPILL_64bit_ktt1(tail)				\
474	ba,a,pt	%xcc, fault_64bit_/**/tail			;\
475	.empty							;\
476	.align 128
477
478#define	SPILL_mixed_ktt1(tail)				\
479	btst	1, %sp						;\
480	bz,a,pt	%xcc, fault_32bit_/**/tail			;\
481	srl	%sp, 0, %sp					;\
482	ba,a,pt	%xcc, fault_64bit_/**/tail			;\
483	.empty							;\
484	.align 128
485
486/*
487 * SPILL_64bit_asi spills a 64-bit-wide register window into a 64-bit
488 * wide address space via the designated asi.  It is used to spill
489 * non-kernel windows.  The stack pointer is required to be eight-byte
490 * aligned.
491 */
492#define	SPILL_64bit_asi(asi_num, tail)				\
493	mov	0 + V9BIAS64, %g1				;\
4942:	stxa	%l0, [%sp + %g1]asi_num				;\
495	mov	8 + V9BIAS64, %g2				;\
496	stxa	%l1, [%sp + %g2]asi_num				;\
497	mov	16 + V9BIAS64, %g3				;\
498	stxa	%l2, [%sp + %g3]asi_num				;\
499	mov	24 + V9BIAS64, %g4				;\
500	stxa	%l3, [%sp + %g4]asi_num				;\
501	add	%sp, 32, %g5					;\
502	stxa	%l4, [%g5 + %g1]asi_num				;\
503	stxa	%l5, [%g5 + %g2]asi_num				;\
504	stxa	%l6, [%g5 + %g3]asi_num				;\
505	stxa	%l7, [%g5 + %g4]asi_num				;\
506	add	%g5, 32, %g5					;\
507	stxa	%i0, [%g5 + %g1]asi_num				;\
508	stxa	%i1, [%g5 + %g2]asi_num				;\
509	stxa	%i2, [%g5 + %g3]asi_num				;\
510	stxa	%i3, [%g5 + %g4]asi_num				;\
511	add	%g5, 32, %g5					;\
512	stxa	%i4, [%g5 + %g1]asi_num				;\
513	stxa	%i5, [%g5 + %g2]asi_num				;\
514	stxa	%i6, [%g5 + %g3]asi_num				;\
515	stxa	%i7, [%g5 + %g4]asi_num				;\
516	TT_TRACE_L(trace_win)					;\
517	saved							;\
518	retry							;\
519	SKIP(31-25-TT_TRACE_L_INS)				;\
520	ba,a,pt %xcc, fault_64bit_/**/tail			;\
521	.empty
522
523#define	SPILL_64bit_tt1(asi_num, tail)				\
524	ba,a,pt	%xcc, fault_64bit_/**/tail			;\
525	.empty							;\
526	.align 128
527
528/*
529 * FILL_64bit fills a 64-bit-wide kernel register window.  It assumes
530 * that the kernel context and the nucleus context are the same.  The
531 * stack pointer is required to be eight-byte aligned.
532 */
533#define	FILL_64bit(tail)					\
5342:	TT_TRACE_L(trace_win)					;\
535	ldx	[%sp + V9BIAS64 + 0], %l0			;\
536	ldx	[%sp + V9BIAS64 + 8], %l1			;\
537	ldx	[%sp + V9BIAS64 + 16], %l2			;\
538	ldx	[%sp + V9BIAS64 + 24], %l3			;\
539	ldx	[%sp + V9BIAS64 + 32], %l4			;\
540	ldx	[%sp + V9BIAS64 + 40], %l5			;\
541	ldx	[%sp + V9BIAS64 + 48], %l6			;\
542	ldx	[%sp + V9BIAS64 + 56], %l7			;\
543	ldx	[%sp + V9BIAS64 + 64], %i0			;\
544	ldx	[%sp + V9BIAS64 + 72], %i1			;\
545	ldx	[%sp + V9BIAS64 + 80], %i2			;\
546	ldx	[%sp + V9BIAS64 + 88], %i3			;\
547	ldx	[%sp + V9BIAS64 + 96], %i4			;\
548	ldx	[%sp + V9BIAS64 + 104], %i5			;\
549	ldx	[%sp + V9BIAS64 + 112], %i6			;\
550	ldx	[%sp + V9BIAS64 + 120], %i7			;\
551	restored						;\
552	retry							;\
553	SKIP(31-18-TT_TRACE_L_INS)				;\
554	ba,a,pt	%xcc, fault_64bit_/**/tail			;\
555	.empty
556
557/*
558 * FILL_64bit_asi fills a 64-bit-wide register window from a 64-bit
559 * wide address space via the designated asi.  It is used to fill
560 * non-kernel windows.  The stack pointer is required to be eight-byte
561 * aligned.
562 */
563#define	FILL_64bit_asi(asi_num, tail)				\
564	mov	V9BIAS64 + 0, %g1				;\
5652:	TT_TRACE_L(trace_win)					;\
566	ldxa	[%sp + %g1]asi_num, %l0				;\
567	mov	V9BIAS64 + 8, %g2				;\
568	ldxa	[%sp + %g2]asi_num, %l1				;\
569	mov	V9BIAS64 + 16, %g3				;\
570	ldxa	[%sp + %g3]asi_num, %l2				;\
571	mov	V9BIAS64 + 24, %g4				;\
572	ldxa	[%sp + %g4]asi_num, %l3				;\
573	add	%sp, 32, %g5					;\
574	ldxa	[%g5 + %g1]asi_num, %l4				;\
575	ldxa	[%g5 + %g2]asi_num, %l5				;\
576	ldxa	[%g5 + %g3]asi_num, %l6				;\
577	ldxa	[%g5 + %g4]asi_num, %l7				;\
578	add	%g5, 32, %g5					;\
579	ldxa	[%g5 + %g1]asi_num, %i0				;\
580	ldxa	[%g5 + %g2]asi_num, %i1				;\
581	ldxa	[%g5 + %g3]asi_num, %i2				;\
582	ldxa	[%g5 + %g4]asi_num, %i3				;\
583	add	%g5, 32, %g5					;\
584	ldxa	[%g5 + %g1]asi_num, %i4				;\
585	ldxa	[%g5 + %g2]asi_num, %i5				;\
586	ldxa	[%g5 + %g3]asi_num, %i6				;\
587	ldxa	[%g5 + %g4]asi_num, %i7				;\
588	restored						;\
589	retry							;\
590	SKIP(31-25-TT_TRACE_L_INS)				;\
591	ba,a,pt	%xcc, fault_64bit_/**/tail			;\
592	.empty
593
594
595#endif /* !lint */
596
597/*
598 * SPILL_mixed spills either size window, depending on
599 * whether %sp is even or odd, to a 32-bit address space.
600 * This may only be used in conjunction with SPILL_32bit/
601 * FILL_64bit.
602 * Clear upper 32 bits of %sp if it is odd.
603 * We won't need to clear them in 64 bit kernel.
604 */
605#define	SPILL_mixed						\
606	btst	1, %sp						;\
607	bz,a,pt	%xcc, 1b					;\
608	srl	%sp, 0, %sp					;\
609	ba,pt	%xcc, 2b					;\
610	nop							;\
611	.align	128
612
613/*
614 * FILL_mixed(ASI) fills either size window, depending on
615 * whether %sp is even or odd, from a 32-bit address space.
616 * This may only be used in conjunction with FILL_32bit/
617 * FILL_64bit. New versions of FILL_mixed_{tt1,asi} would be
618 * needed for use with FILL_{32,64}bit_{tt1,asi}. Particular
619 * attention should be paid to the instructions that belong
620 * in the delay slots of the branches depending on the type
621 * of fill handler being branched to.
622 * Clear upper 32 bits of %sp if it is odd.
623 * We won't need to clear them in 64 bit kernel.
624 */
625#define	FILL_mixed						\
626	btst	1, %sp						;\
627	bz,a,pt	%xcc, 1b					;\
628	srl	%sp, 0, %sp					;\
629	ba,pt	%xcc, 2b					;\
630	nop							;\
631	.align	128
632
633
634/*
635 * SPILL_32clean/SPILL_64clean spill 32-bit and 64-bit register windows,
636 * respectively, into the address space via the designated asi.  The
637 * unbiased stack pointer is required to be eight-byte aligned (even for
638 * the 32-bit case even though this code does not require such strict
639 * alignment).
640 *
641 * With SPARC v9 the spill trap takes precedence over the cleanwin trap
642 * so when cansave == 0, canrestore == 6, and cleanwin == 6 the next save
643 * will cause cwp + 2 to be spilled but will not clean cwp + 1.  That
644 * window may contain kernel data so in user_rtt we set wstate to call
645 * these spill handlers on the first user spill trap.  These handler then
646 * spill the appropriate window but also back up a window and clean the
647 * window that didn't get a cleanwin trap.
648 */
649#define	SPILL_32clean(asi_num, tail)				\
650	srl	%sp, 0, %sp					;\
651	sta	%l0, [%sp + %g0]asi_num				;\
652	mov	4, %g1						;\
653	sta	%l1, [%sp + %g1]asi_num				;\
654	mov	8, %g2						;\
655	sta	%l2, [%sp + %g2]asi_num				;\
656	mov	12, %g3						;\
657	sta	%l3, [%sp + %g3]asi_num				;\
658	add	%sp, 16, %g4					;\
659	sta	%l4, [%g4 + %g0]asi_num				;\
660	sta	%l5, [%g4 + %g1]asi_num				;\
661	sta	%l6, [%g4 + %g2]asi_num				;\
662	sta	%l7, [%g4 + %g3]asi_num				;\
663	add	%g4, 16, %g4					;\
664	sta	%i0, [%g4 + %g0]asi_num				;\
665	sta	%i1, [%g4 + %g1]asi_num				;\
666	sta	%i2, [%g4 + %g2]asi_num				;\
667	sta	%i3, [%g4 + %g3]asi_num				;\
668	add	%g4, 16, %g4					;\
669	sta	%i4, [%g4 + %g0]asi_num				;\
670	sta	%i5, [%g4 + %g1]asi_num				;\
671	sta	%i6, [%g4 + %g2]asi_num				;\
672	sta	%i7, [%g4 + %g3]asi_num				;\
673	TT_TRACE_L(trace_win)					;\
674	b	.spill_clean					;\
675	  mov	WSTATE_USER32, %g7				;\
676	SKIP(31-25-TT_TRACE_L_INS)				;\
677	ba,a,pt	%xcc, fault_32bit_/**/tail			;\
678	.empty
679
680#define	SPILL_64clean(asi_num, tail)				\
681	mov	0 + V9BIAS64, %g1				;\
682	stxa	%l0, [%sp + %g1]asi_num				;\
683	mov	8 + V9BIAS64, %g2				;\
684	stxa	%l1, [%sp + %g2]asi_num				;\
685	mov	16 + V9BIAS64, %g3				;\
686	stxa	%l2, [%sp + %g3]asi_num				;\
687	mov	24 + V9BIAS64, %g4				;\
688	stxa	%l3, [%sp + %g4]asi_num				;\
689	add	%sp, 32, %g5					;\
690	stxa	%l4, [%g5 + %g1]asi_num				;\
691	stxa	%l5, [%g5 + %g2]asi_num				;\
692	stxa	%l6, [%g5 + %g3]asi_num				;\
693	stxa	%l7, [%g5 + %g4]asi_num				;\
694	add	%g5, 32, %g5					;\
695	stxa	%i0, [%g5 + %g1]asi_num				;\
696	stxa	%i1, [%g5 + %g2]asi_num				;\
697	stxa	%i2, [%g5 + %g3]asi_num				;\
698	stxa	%i3, [%g5 + %g4]asi_num				;\
699	add	%g5, 32, %g5					;\
700	stxa	%i4, [%g5 + %g1]asi_num				;\
701	stxa	%i5, [%g5 + %g2]asi_num				;\
702	stxa	%i6, [%g5 + %g3]asi_num				;\
703	stxa	%i7, [%g5 + %g4]asi_num				;\
704	TT_TRACE_L(trace_win)					;\
705	b	.spill_clean					;\
706	  mov	WSTATE_USER64, %g7				;\
707	SKIP(31-25-TT_TRACE_L_INS)				;\
708	ba,a,pt	%xcc, fault_64bit_/**/tail			;\
709	.empty
710
711
712/*
713 * Floating point disabled.
714 */
715#define	FP_DISABLED_TRAP		\
716	TT_TRACE(trace_gen)		;\
717	ba,pt	%xcc,.fp_disabled	;\
718	nop				;\
719	.align	32
720
721/*
722 * Floating point exceptions.
723 */
724#define	FP_IEEE_TRAP			\
725	TT_TRACE(trace_gen)		;\
726	ba,pt	%xcc,.fp_ieee_exception	;\
727	nop				;\
728	.align	32
729
730#define	FP_TRAP				\
731	TT_TRACE(trace_gen)		;\
732	ba,pt	%xcc,.fp_exception	;\
733	nop				;\
734	.align	32
735
736#if !defined(lint)
737
738/*
739 * ECACHE_ECC error traps at level 0 and level 1
740 */
741#define	ECACHE_ECC(table_name)		\
742	.global	table_name		;\
743table_name:				;\
744	membar	#Sync			;\
745	set	trap, %g1		;\
746	rdpr	%tt, %g3		;\
747	ba,pt	%xcc, sys_trap		;\
748	sub	%g0, 1, %g4		;\
749	.align	32
750
751#endif /* !lint */
752
753/*
754 * illegal instruction trap
755 */
756#define	ILLTRAP_INSTR			  \
757	membar	#Sync			  ;\
758	TT_TRACE(trace_gen)		  ;\
759	or	%g0, P_UTRAP4, %g2	  ;\
760	or	%g0, T_UNIMP_INSTR, %g3   ;\
761	sethi	%hi(.check_v9utrap), %g4  ;\
762	jmp	%g4 + %lo(.check_v9utrap) ;\
763	nop				  ;\
764	.align	32
765
766/*
767 * tag overflow trap
768 */
769#define	TAG_OVERFLOW			  \
770	TT_TRACE(trace_gen)		  ;\
771	or	%g0, P_UTRAP10, %g2	  ;\
772	or	%g0, T_TAG_OVERFLOW, %g3  ;\
773	sethi	%hi(.check_v9utrap), %g4  ;\
774	jmp	%g4 + %lo(.check_v9utrap) ;\
775	nop				  ;\
776	.align	32
777
778/*
779 * divide by zero trap
780 */
781#define	DIV_BY_ZERO			  \
782	TT_TRACE(trace_gen)		  ;\
783	or	%g0, P_UTRAP11, %g2	  ;\
784	or	%g0, T_IDIV0, %g3	  ;\
785	sethi	%hi(.check_v9utrap), %g4  ;\
786	jmp	%g4 + %lo(.check_v9utrap) ;\
787	nop				  ;\
788	.align	32
789
790/*
791 * trap instruction for V9 user trap handlers
792 */
793#define	TRAP_INSTR			  \
794	TT_TRACE(trace_gen)		  ;\
795	or	%g0, T_SOFTWARE_TRAP, %g3 ;\
796	sethi	%hi(.check_v9utrap), %g4  ;\
797	jmp	%g4 + %lo(.check_v9utrap) ;\
798	nop				  ;\
799	.align	32
800#define	TRP4	TRAP_INSTR; TRAP_INSTR; TRAP_INSTR; TRAP_INSTR
801
802/*
803 * LEVEL_INTERRUPT is for level N interrupts.
804 * VECTOR_INTERRUPT is for the vector trap.
805 */
806#define	LEVEL_INTERRUPT(level)		\
807	.global	tt_pil/**/level		;\
808tt_pil/**/level:			;\
809	ba,pt	%xcc, pil_interrupt	;\
810	mov	level, %g4		;\
811	.align	32
812
813#define	LEVEL14_INTERRUPT			\
814	ba	pil14_interrupt			;\
815	mov	PIL_14, %g4			;\
816	.align	32
817
818#define CPU_MONDO			\
819	ba,a,pt	%xcc, cpu_mondo		;\
820	.align	32
821
822#define DEV_MONDO			\
823	ba,a,pt	%xcc, dev_mondo		;\
824	.align	32
825
826/*
827 * MMU Trap Handlers.
828 */
829
830/*
831 * synthesize for trap(): SFSR in %g3
832 */
833#define	IMMU_EXCEPTION							\
834	MMU_FAULT_STATUS_AREA(%g3)					;\
835	rdpr	%tpc, %g2						;\
836	ldx	[%g3 + MMFSA_I_TYPE], %g1				;\
837	ldx	[%g3 + MMFSA_I_CTX], %g3				;\
838	sllx	%g3, SFSR_CTX_SHIFT, %g3				;\
839	or	%g3, %g1, %g3						;\
840	ba,pt	%xcc, .mmu_exception_end				;\
841	mov	T_INSTR_EXCEPTION, %g1					;\
842	.align	32
843
844/*
845 * synthesize for trap(): TAG_ACCESS in %g2, SFSR in %g3
846 */
847#define	DMMU_EXCEPTION							\
848	ba,a,pt	%xcc, .dmmu_exception					;\
849	.align	32
850
851/*
852 * synthesize for trap(): SFAR in %g2, SFSR in %g3
853 */
854#define	DMMU_EXC_AG_PRIV						\
855	MMU_FAULT_STATUS_AREA(%g3)					;\
856	ldx	[%g3 + MMFSA_D_ADDR], %g2				;\
857	/* Fault type not available in MMU fault status area */		;\
858	mov	MMFSA_F_PRVACT, %g1					;\
859	ldx	[%g3 + MMFSA_D_CTX], %g3				;\
860	sllx	%g3, SFSR_CTX_SHIFT, %g3				;\
861	ba,pt	%xcc, .mmu_priv_exception				;\
862	or	%g3, %g1, %g3						;\
863	.align	32
864
865/*
866 * synthesize for trap(): SFAR in %g2, SFSR in %g3
867 */
868#define	DMMU_EXC_AG_NOT_ALIGNED						\
869	MMU_FAULT_STATUS_AREA(%g3)					;\
870	ldx	[%g3 + MMFSA_D_ADDR], %g2				;\
871	/* Fault type not available in MMU fault status area */		;\
872	mov	MMFSA_F_UNALIGN, %g1					;\
873	ldx	[%g3 + MMFSA_D_CTX], %g3				;\
874	sllx	%g3, SFSR_CTX_SHIFT, %g3				;\
875	ba,pt	%xcc, .mmu_exception_not_aligned			;\
876	or	%g3, %g1, %g3			/* SFSR */		;\
877	.align	32
878/*
879 * SPARC V9 IMPL. DEP. #109(1) and (2) and #110(1) and (2)
880 */
881
882/*
883 * synthesize for trap(): SFAR in %g2, SFSR in %g3
884 */
885#define	DMMU_EXC_LDDF_NOT_ALIGNED					\
886	ba,a,pt	%xcc, .dmmu_exc_lddf_not_aligned			;\
887	.align	32
888/*
889 * synthesize for trap(): SFAR in %g2, SFSR in %g3
890 */
891#define	DMMU_EXC_STDF_NOT_ALIGNED					\
892	ba,a,pt	%xcc, .dmmu_exc_stdf_not_aligned			;\
893	.align	32
894
895#if TAGACC_CTX_MASK != CTXREG_CTX_MASK
896#error "TAGACC_CTX_MASK != CTXREG_CTX_MASK"
897#endif
898
899#if defined(cscope)
900/*
901 * Define labels to direct cscope quickly to labels that
902 * are generated by macro expansion of DTLB_MISS().
903 */
904	.global	tt0_dtlbmiss
905tt0_dtlbmiss:
906	.global	tt1_dtlbmiss
907tt1_dtlbmiss:
908	nop
909#endif
910
911/*
912 * Data miss handler (must be exactly 32 instructions)
913 *
914 * This handler is invoked only if the hypervisor has been instructed
915 * not to do any TSB walk.
916 *
917 * Kernel and invalid context cases are handled by the sfmmu_kdtlb_miss
918 * handler.
919 *
920 * User TLB miss handling depends upon whether a user process has one or
921 * two TSBs. User TSB information (physical base and size code) is kept
922 * in two dedicated scratchpad registers. Absence of a user TSB (primarily
923 * second TSB) is indicated by a negative value (-1) in that register.
924 */
925
926/*
927 * synthesize for miss handler: TAG_ACCESS in %g2
928 */
929#define	DTLB_MISS(table_name)						;\
930	.global	table_name/**/_dtlbmiss					;\
931table_name/**/_dtlbmiss:						;\
932	HAT_PERCPU_DBSTAT(TSBMISS_DTLBMISS) /* 3 instr ifdef DEBUG */	;\
933	MMU_FAULT_STATUS_AREA(%g7)					;\
934	ldx	[%g7 + MMFSA_D_ADDR], %g2	/* address */		;\
935	ldx	[%g7 + MMFSA_D_CTX], %g3	/* g3 = ctx */		;\
936	or	%g2, %g3, %g2			/* TAG_ACCESS */	;\
937	cmp	%g3, INVALID_CONTEXT					;\
938	ble,pn	%xcc, sfmmu_kdtlb_miss					;\
939	  srlx	%g2, TAG_VALO_SHIFT, %g7	/* g7 = tsb tag */	;\
940	mov	SCRATCHPAD_UTSBREG2, %g1				;\
941	ldxa	[%g1]ASI_SCRATCHPAD, %g1	/* get 2nd tsbreg */	;\
942	brgez,pn %g1, sfmmu_udtlb_slowpath	/* brnach if 2 TSBs */	;\
943	  nop								;\
944	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)	/* 11 instr */		;\
945	ba,pt	%xcc, sfmmu_udtlb_fastpath	/* no 4M TSB, miss */	;\
946	  srlx	%g2, TAG_VALO_SHIFT, %g7	/* g7 = tsb tag */	;\
947	.align 128
948
949
950#if defined(cscope)
951/*
952 * Define labels to direct cscope quickly to labels that
953 * are generated by macro expansion of ITLB_MISS().
954 */
955	.global	tt0_itlbmiss
956tt0_itlbmiss:
957	.global	tt1_itlbmiss
958tt1_itlbmiss:
959	nop
960#endif
961
962/*
963 * Instruction miss handler.
964 *
965 * This handler is invoked only if the hypervisor has been instructed
966 * not to do any TSB walk.
967 *
968 * ldda instructions will have their ASI patched
969 * by sfmmu_patch_ktsb at runtime.
970 * MUST be EXACTLY 32 instructions or we'll break.
971 */
972
973/*
974 * synthesize for miss handler: TAG_ACCESS in %g2
975 */
976#define	ITLB_MISS(table_name)						 \
977	.global	table_name/**/_itlbmiss					;\
978table_name/**/_itlbmiss:						;\
979	HAT_PERCPU_DBSTAT(TSBMISS_ITLBMISS) /* 3 instr ifdef DEBUG */	;\
980	MMU_FAULT_STATUS_AREA(%g7)					;\
981	ldx	[%g7 + MMFSA_I_ADDR], %g2	/* g2 = address */	;\
982	ldx	[%g7 + MMFSA_I_CTX], %g3	/* g3 = ctx */		;\
983	or	%g2, %g3, %g2			/* TAG_ACCESS */	;\
984	cmp	%g3, INVALID_CONTEXT					;\
985	ble,pn	%xcc, sfmmu_kitlb_miss					;\
986	  srlx	%g2, TAG_VALO_SHIFT, %g7	/* g7 = tsb tag */	;\
987	mov	SCRATCHPAD_UTSBREG2, %g1				;\
988	ldxa	[%g1]ASI_SCRATCHPAD, %g1	/* get 2nd tsbreg */	;\
989	brgez,pn %g1, sfmmu_uitlb_slowpath	/* branch if 2 TSBS */	;\
990	  nop								;\
991	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)	/* 11 instr */		;\
992	ba,pt	%xcc, sfmmu_uitlb_fastpath	/* no 4M TSB, miss */	;\
993	  srlx	%g2, TAG_VALO_SHIFT, %g7	/* g7 = tsb tag */	;\
994	.align 128
995
996#define	DTSB_MISS \
997	GOTO_TT(sfmmu_slow_dmmu_miss,trace_dmmu)
998
999#define	ITSB_MISS \
1000	GOTO_TT(sfmmu_slow_immu_miss,trace_immu)
1001
1002/*
1003 * This macro is the first level handler for fast protection faults.
1004 * It first demaps the tlb entry which generated the fault and then
1005 * attempts to set the modify bit on the hash.  It needs to be
1006 * exactly 32 instructions.
1007 */
1008/*
1009 * synthesize for miss handler: TAG_ACCESS in %g2
1010 */
1011#define	DTLB_PROT							 \
1012	MMU_FAULT_STATUS_AREA(%g7)					;\
1013	ldx	[%g7 + MMFSA_D_ADDR], %g2	/* address */		;\
1014	ldx	[%g7 + MMFSA_D_CTX], %g3	/* %g3 = ctx */		;\
1015	or	%g2, %g3, %g2			/* TAG_ACCESS */	;\
1016	/*								;\
1017	 *   g2 = tag access register					;\
1018	 *   g3 = ctx number						;\
1019	 */								;\
1020	TT_TRACE(trace_dataprot)	/* 2 instr ifdef TRAPTRACE */	;\
1021					/* clobbers g1 and g6 XXXQ? */	;\
1022	brnz,pt %g3, sfmmu_uprot_trap		/* user trap */		;\
1023	  nop								;\
1024	ba,a,pt	%xcc, sfmmu_kprot_trap		/* kernel trap */	;\
1025	.align 128
1026
1027#define	DMMU_EXCEPTION_TL1						;\
1028	ba,a,pt	%xcc, mmu_trap_tl1					;\
1029	.align 32
1030
1031#define	MISALIGN_ADDR_TL1						;\
1032	ba,a,pt	%xcc, mmu_trap_tl1					;\
1033	.align 32
1034
1035/*
1036 * Trace a tsb hit
1037 * g1 = tsbe pointer (in/clobbered)
1038 * g2 = tag access register (in)
1039 * g3 - g4 = scratch (clobbered)
1040 * g5 = tsbe data (in)
1041 * g6 = scratch (clobbered)
1042 * g7 = pc we jumped here from (in)
1043 * ttextra = value to OR in to trap type (%tt) (in)
1044 */
1045#ifdef TRAPTRACE
1046#define TRACE_TSBHIT(ttextra)						 \
1047	membar	#Sync							;\
1048	sethi	%hi(FLUSH_ADDR), %g6					;\
1049	flush	%g6							;\
1050	TRACE_PTR(%g3, %g6)						;\
1051	GET_TRACE_TICK(%g6)						;\
1052	stxa	%g6, [%g3 + TRAP_ENT_TICK]%asi				;\
1053	stna	%g2, [%g3 + TRAP_ENT_SP]%asi	/* tag access */	;\
1054	stna	%g5, [%g3 + TRAP_ENT_F1]%asi	/* tsb data */		;\
1055	rdpr	%tnpc, %g6						;\
1056	stna	%g6, [%g3 + TRAP_ENT_F2]%asi				;\
1057	stna	%g1, [%g3 + TRAP_ENT_F3]%asi	/* tsb pointer */	;\
1058	stna	%g0, [%g3 + TRAP_ENT_F4]%asi				;\
1059	rdpr	%tpc, %g6						;\
1060	stna	%g6, [%g3 + TRAP_ENT_TPC]%asi				;\
1061	TRACE_SAVE_TL_GL_REGS(%g3, %g6)					;\
1062	rdpr	%tt, %g6						;\
1063	or	%g6, (ttextra), %g1					;\
1064	stha	%g1, [%g3 + TRAP_ENT_TT]%asi				;\
1065	MMU_FAULT_STATUS_AREA(%g4)					;\
1066	mov	MMFSA_D_ADDR, %g1					;\
1067	cmp	%g6, FAST_IMMU_MISS_TT					;\
1068	move	%xcc, MMFSA_I_ADDR, %g1					;\
1069	cmp	%g6, T_INSTR_MMU_MISS					;\
1070	move	%xcc, MMFSA_I_ADDR, %g1					;\
1071	ldx	[%g4 + %g1], %g1					;\
1072	stxa	%g1, [%g3 + TRAP_ENT_TSTATE]%asi /* fault addr */	;\
1073	mov	MMFSA_D_CTX, %g1					;\
1074	cmp	%g6, FAST_IMMU_MISS_TT					;\
1075	move	%xcc, MMFSA_I_CTX, %g1					;\
1076	cmp	%g6, T_INSTR_MMU_MISS					;\
1077	move	%xcc, MMFSA_I_CTX, %g1					;\
1078	ldx	[%g4 + %g1], %g1					;\
1079	stna	%g1, [%g3 + TRAP_ENT_TR]%asi				;\
1080	TRACE_NEXT(%g3, %g4, %g6)
1081#else
1082#define TRACE_TSBHIT(ttextra)
1083#endif
1084
1085
1086#if defined(lint)
1087
1088struct scb	trap_table;
1089struct scb	scb;		/* trap_table/scb are the same object */
1090
1091#else /* lint */
1092
1093/*
1094 * =======================================================================
1095 *		SPARC V9 TRAP TABLE
1096 *
1097 * The trap table is divided into two halves: the first half is used when
1098 * taking traps when TL=0; the second half is used when taking traps from
1099 * TL>0. Note that handlers in the second half of the table might not be able
1100 * to make the same assumptions as handlers in the first half of the table.
1101 *
1102 * Worst case trap nesting so far:
1103 *
1104 *	at TL=0 client issues software trap requesting service
1105 *	at TL=1 nucleus wants a register window
1106 *	at TL=2 register window clean/spill/fill takes a TLB miss
1107 *	at TL=3 processing TLB miss
1108 *	at TL=4 handle asynchronous error
1109 *
1110 * Note that a trap from TL=4 to TL=5 places Spitfire in "RED mode".
1111 *
1112 * =======================================================================
1113 */
1114	.section ".text"
1115	.align	4
1116	.global trap_table, scb, trap_table0, trap_table1, etrap_table
1117	.type	trap_table, #object
1118	.type	trap_table0, #object
1119	.type	trap_table1, #object
1120	.type	scb, #object
1121trap_table:
1122scb:
1123trap_table0:
1124	/* hardware traps */
1125	NOT;				/* 000	reserved */
1126	RED;				/* 001	power on reset */
1127	RED;				/* 002	watchdog reset */
1128	RED;				/* 003	externally initiated reset */
1129	RED;				/* 004	software initiated reset */
1130	RED;				/* 005	red mode exception */
1131	NOT; NOT;			/* 006 - 007 reserved */
1132	IMMU_EXCEPTION;			/* 008	instruction access exception */
1133	ITSB_MISS;			/* 009	instruction access MMU miss */
1134 	NOT;				/* 00A  reserved */
1135	NOT; NOT4;			/* 00B - 00F reserved */
1136	ILLTRAP_INSTR;			/* 010	illegal instruction */
1137	TRAP(T_PRIV_INSTR);		/* 011	privileged opcode */
1138	TRAP(T_UNIMP_LDD);		/* 012	unimplemented LDD */
1139	TRAP(T_UNIMP_STD);		/* 013	unimplemented STD */
1140	NOT4; NOT4; NOT4;		/* 014 - 01F reserved */
1141	FP_DISABLED_TRAP;		/* 020	fp disabled */
1142	FP_IEEE_TRAP;			/* 021	fp exception ieee 754 */
1143	FP_TRAP;			/* 022	fp exception other */
1144	TAG_OVERFLOW;			/* 023	tag overflow */
1145	CLEAN_WINDOW;			/* 024 - 027 clean window */
1146	DIV_BY_ZERO;			/* 028	division by zero */
1147	NOT;				/* 029	internal processor error */
1148	NOT; NOT; NOT4;			/* 02A - 02F reserved */
1149	DMMU_EXCEPTION;			/* 030	data access exception */
1150	DTSB_MISS;			/* 031	data access MMU miss */
1151	NOT;				/* 032  reserved */
1152	NOT;				/* 033	data access protection */
1153	DMMU_EXC_AG_NOT_ALIGNED;	/* 034	mem address not aligned */
1154	DMMU_EXC_LDDF_NOT_ALIGNED;	/* 035	LDDF mem address not aligned */
1155	DMMU_EXC_STDF_NOT_ALIGNED;	/* 036	STDF mem address not aligned */
1156	DMMU_EXC_AG_PRIV;		/* 037	privileged action */
1157	NOT;				/* 038	LDQF mem address not aligned */
1158	NOT;				/* 039	STQF mem address not aligned */
1159	NOT; NOT; NOT4;			/* 03A - 03F reserved */
1160	NOT;				/* 040	async data error */
1161	LEVEL_INTERRUPT(1);		/* 041	interrupt level 1 */
1162	LEVEL_INTERRUPT(2);		/* 042	interrupt level 2 */
1163	LEVEL_INTERRUPT(3);		/* 043	interrupt level 3 */
1164	LEVEL_INTERRUPT(4);		/* 044	interrupt level 4 */
1165	LEVEL_INTERRUPT(5);		/* 045	interrupt level 5 */
1166	LEVEL_INTERRUPT(6);		/* 046	interrupt level 6 */
1167	LEVEL_INTERRUPT(7);		/* 047	interrupt level 7 */
1168	LEVEL_INTERRUPT(8);		/* 048	interrupt level 8 */
1169	LEVEL_INTERRUPT(9);		/* 049	interrupt level 9 */
1170	LEVEL_INTERRUPT(10);		/* 04A	interrupt level 10 */
1171	LEVEL_INTERRUPT(11);		/* 04B	interrupt level 11 */
1172	LEVEL_INTERRUPT(12);		/* 04C	interrupt level 12 */
1173	LEVEL_INTERRUPT(13);		/* 04D	interrupt level 13 */
1174	LEVEL14_INTERRUPT;		/* 04E	interrupt level 14 */
1175	LEVEL_INTERRUPT(15);		/* 04F	interrupt level 15 */
1176	NOT4; NOT4; NOT4; NOT4;		/* 050 - 05F reserved */
1177	NOT;				/* 060	interrupt vector */
1178	GOTO(kmdb_trap);		/* 061	PA watchpoint */
1179	GOTO(kmdb_trap);		/* 062	VA watchpoint */
1180	NOT;				/* 063	reserved */
1181	ITLB_MISS(tt0);			/* 064	instruction access MMU miss */
1182	DTLB_MISS(tt0);			/* 068	data access MMU miss */
1183	DTLB_PROT;			/* 06C	data access protection */
1184	NOT;				/* 070  reserved */
1185	NOT;				/* 071  reserved */
1186	NOT;				/* 072  reserved */
1187	NOT;				/* 073  reserved */
1188	NOT4; NOT4			/* 074 - 07B reserved */
1189	CPU_MONDO;			/* 07C	cpu_mondo */
1190	DEV_MONDO;			/* 07D	dev_mondo */
1191	GOTO_TT(resumable_error, trace_gen);	/* 07E  resumable error */
1192	GOTO_TT(nonresumable_error, trace_gen);	/* 07F  non-reasumable error */
1193	NOT4;				/* 080	spill 0 normal */
1194	SPILL_32bit_asi(ASI_AIUP,sn0);	/* 084	spill 1 normal */
1195	SPILL_64bit_asi(ASI_AIUP,sn0);	/* 088	spill 2 normal */
1196	SPILL_32clean(ASI_AIUP,sn0);	/* 08C	spill 3 normal */
1197	SPILL_64clean(ASI_AIUP,sn0);	/* 090	spill 4 normal */
1198	SPILL_32bit(not);		/* 094	spill 5 normal */
1199	SPILL_64bit(not);		/* 098	spill 6 normal */
1200	SPILL_mixed;			/* 09C	spill 7 normal */
1201	NOT4;				/* 0A0	spill 0 other */
1202	SPILL_32bit_asi(ASI_AIUS,so0);	/* 0A4	spill 1 other */
1203	SPILL_64bit_asi(ASI_AIUS,so0);	/* 0A8	spill 2 other */
1204	SPILL_32bit_asi(ASI_AIUS,so0);	/* 0AC	spill 3 other */
1205	SPILL_64bit_asi(ASI_AIUS,so0);	/* 0B0	spill 4 other */
1206	NOT4;				/* 0B4	spill 5 other */
1207	NOT4;				/* 0B8	spill 6 other */
1208	NOT4;				/* 0BC	spill 7 other */
1209	NOT4;				/* 0C0	fill 0 normal */
1210	FILL_32bit_asi(ASI_AIUP,fn0);	/* 0C4	fill 1 normal */
1211	FILL_64bit_asi(ASI_AIUP,fn0);	/* 0C8	fill 2 normal */
1212	FILL_32bit_asi(ASI_AIUP,fn0);	/* 0CC	fill 3 normal */
1213	FILL_64bit_asi(ASI_AIUP,fn0);	/* 0D0	fill 4 normal */
1214	FILL_32bit(not);		/* 0D4	fill 5 normal */
1215	FILL_64bit(not);		/* 0D8	fill 6 normal */
1216	FILL_mixed;			/* 0DC	fill 7 normal */
1217	NOT4;				/* 0E0	fill 0 other */
1218	NOT4;				/* 0E4	fill 1 other */
1219	NOT4;				/* 0E8	fill 2 other */
1220	NOT4;				/* 0EC	fill 3 other */
1221	NOT4;				/* 0F0	fill 4 other */
1222	NOT4;				/* 0F4	fill 5 other */
1223	NOT4;				/* 0F8	fill 6 other */
1224	NOT4;				/* 0FC	fill 7 other */
1225	/* user traps */
1226	GOTO(syscall_trap_4x);		/* 100	old system call */
1227	TRAP(T_BREAKPOINT);		/* 101	user breakpoint */
1228	TRAP(T_DIV0);			/* 102	user divide by zero */
1229	GOTO(.flushw);			/* 103	flush windows */
1230	GOTO(.clean_windows);		/* 104	clean windows */
1231	BAD;				/* 105	range check ?? */
1232	GOTO(.fix_alignment);		/* 106	do unaligned references */
1233	BAD;				/* 107	unused */
1234	SYSCALL(syscall_trap32);	/* 108	ILP32 system call on LP64 */
1235	GOTO(set_trap0_addr);		/* 109	set trap0 address */
1236	BAD; BAD; BAD4;			/* 10A - 10F unused */
1237	TRP4; TRP4; TRP4; TRP4;		/* 110 - 11F V9 user trap handlers */
1238	GOTO(.getcc);			/* 120	get condition codes */
1239	GOTO(.setcc);			/* 121	set condition codes */
1240	GOTO(.getpsr);			/* 122	get psr */
1241	GOTO(.setpsr);			/* 123	set psr (some fields) */
1242	GOTO(get_timestamp);		/* 124	get timestamp */
1243	GOTO(get_virtime);		/* 125	get lwp virtual time */
1244	PRIV(self_xcall);		/* 126	self xcall */
1245	GOTO(get_hrestime);		/* 127	get hrestime */
1246	BAD;				/* 128	ST_SETV9STACK */
1247	GOTO(.getlgrp);			/* 129  get lgrpid */
1248	BAD; BAD; BAD4;			/* 12A - 12F unused */
1249	BAD4; BAD4; 			/* 130 - 137 unused */
1250	DTRACE_PID;			/* 138  dtrace pid tracing provider */
1251	DTRACE_FASTTRAP;		/* 139  dtrace fasttrap provider */
1252	DTRACE_RETURN;			/* 13A	dtrace pid return probe */
1253	BAD; BAD4;			/* 13B - 13F unused */
1254	SYSCALL(syscall_trap)		/* 140  LP64 system call */
1255	SYSCALL(nosys);			/* 141  unused system call trap */
1256#ifdef DEBUG_USER_TRAPTRACECTL
1257	GOTO(.traptrace_freeze);	/* 142  freeze traptrace */
1258	GOTO(.traptrace_unfreeze);	/* 143  unfreeze traptrace */
1259#else
1260	SYSCALL(nosys);			/* 142  unused system call trap */
1261	SYSCALL(nosys);			/* 143  unused system call trap */
1262#endif
1263	BAD4; BAD4; BAD4;		/* 144 - 14F unused */
1264	BAD4; BAD4; BAD4; BAD4;		/* 150 - 15F unused */
1265	BAD4; BAD4; BAD4; BAD4;		/* 160 - 16F unused */
1266	BAD;				/* 170 - unused */
1267	BAD;				/* 171 - unused */
1268	BAD; BAD;			/* 172 - 173 unused */
1269	BAD4; BAD4;			/* 174 - 17B unused */
1270#ifdef	PTL1_PANIC_DEBUG
1271	mov PTL1_BAD_DEBUG, %g1; GOTO(ptl1_panic);
1272					/* 17C	test ptl1_panic */
1273#else
1274	BAD;				/* 17C  unused */
1275#endif	/* PTL1_PANIC_DEBUG */
1276	PRIV(kmdb_trap);		/* 17D	kmdb enter (L1-A) */
1277	PRIV(kmdb_trap);		/* 17E	kmdb breakpoint */
1278	PRIV(obp_bpt);			/* 17F	obp breakpoint */
1279	/* reserved */
1280	NOT4; NOT4; NOT4; NOT4;		/* 180 - 18F reserved */
1281	NOT4; NOT4; NOT4; NOT4;		/* 190 - 19F reserved */
1282	NOT4; NOT4; NOT4; NOT4;		/* 1A0 - 1AF reserved */
1283	NOT4; NOT4; NOT4; NOT4;		/* 1B0 - 1BF reserved */
1284	NOT4; NOT4; NOT4; NOT4;		/* 1C0 - 1CF reserved */
1285	NOT4; NOT4; NOT4; NOT4;		/* 1D0 - 1DF reserved */
1286	NOT4; NOT4; NOT4; NOT4;		/* 1E0 - 1EF reserved */
1287	NOT4; NOT4; NOT4; NOT4;		/* 1F0 - 1FF reserved */
1288	.size	trap_table0, (.-trap_table0)
1289trap_table1:
1290	NOT4; NOT4;			/* 000 - 007 unused */
1291	NOT;				/* 008	instruction access exception */
1292	ITSB_MISS;			/* 009	instruction access MMU miss */
1293 	NOT;				/* 00A  reserved */
1294	NOT; NOT4;			/* 00B - 00F unused */
1295	NOT4; NOT4; NOT4; NOT4;		/* 010 - 01F unused */
1296	NOT4;				/* 020 - 023 unused */
1297	CLEAN_WINDOW;			/* 024 - 027 clean window */
1298	NOT4; NOT4;			/* 028 - 02F unused */
1299	DMMU_EXCEPTION_TL1;		/* 030 	data access exception */
1300	DTSB_MISS;			/* 031  data access MMU miss */
1301	NOT;				/* 032  reserved */
1302	NOT;				/* 033	unused */
1303	MISALIGN_ADDR_TL1;		/* 034	mem address not aligned */
1304	NOT; NOT; NOT; NOT4; NOT4	/* 035 - 03F unused */
1305	NOT4; NOT4; NOT4; NOT4;		/* 040 - 04F unused */
1306	NOT4; NOT4; NOT4; NOT4;		/* 050 - 05F unused */
1307	NOT;				/* 060	unused */
1308	GOTO(kmdb_trap_tl1);		/* 061	PA watchpoint */
1309	GOTO(kmdb_trap_tl1);		/* 062	VA watchpoint */
1310	NOT;				/* 063	reserved */
1311	ITLB_MISS(tt1);			/* 064	instruction access MMU miss */
1312	DTLB_MISS(tt1);			/* 068	data access MMU miss */
1313	DTLB_PROT;			/* 06C	data access protection */
1314	NOT;				/* 070  reserved */
1315	NOT;				/* 071  reserved */
1316	NOT;				/* 072  reserved */
1317	NOT;				/* 073  reserved */
1318	NOT4; NOT4;			/* 074 - 07B reserved */
1319	NOT;				/* 07C  reserved */
1320	NOT;				/* 07D  reserved */
1321	NOT;				/* 07E  resumable error */
1322	GOTO_TT(nonresumable_error, trace_gen);	/* 07F  nonresumable error */
1323	NOTP4;				/* 080	spill 0 normal */
1324	SPILL_32bit_tt1(ASI_AIUP,sn1);	/* 084	spill 1 normal */
1325	SPILL_64bit_tt1(ASI_AIUP,sn1);	/* 088	spill 2 normal */
1326	SPILL_32bit_tt1(ASI_AIUP,sn1);	/* 08C	spill 3 normal */
1327	SPILL_64bit_tt1(ASI_AIUP,sn1);	/* 090	spill 4 normal */
1328	NOTP4;				/* 094	spill 5 normal */
1329	SPILL_64bit_ktt1(sk);		/* 098	spill 6 normal */
1330	SPILL_mixed_ktt1(sk);		/* 09C	spill 7 normal */
1331	NOTP4;				/* 0A0	spill 0 other */
1332	SPILL_32bit_tt1(ASI_AIUS,so1);	/* 0A4  spill 1 other */
1333	SPILL_64bit_tt1(ASI_AIUS,so1);	/* 0A8	spill 2 other */
1334	SPILL_32bit_tt1(ASI_AIUS,so1);	/* 0AC	spill 3 other */
1335	SPILL_64bit_tt1(ASI_AIUS,so1);	/* 0B0  spill 4 other */
1336	NOTP4;				/* 0B4  spill 5 other */
1337	NOTP4;				/* 0B8  spill 6 other */
1338	NOTP4;				/* 0BC  spill 7 other */
1339	NOT4;				/* 0C0	fill 0 normal */
1340	NOT4;				/* 0C4	fill 1 normal */
1341	NOT4;				/* 0C8	fill 2 normal */
1342	NOT4;				/* 0CC	fill 3 normal */
1343	NOT4;				/* 0D0	fill 4 normal */
1344	NOT4;				/* 0D4	fill 5 normal */
1345	NOT4;				/* 0D8	fill 6 normal */
1346	NOT4;				/* 0DC	fill 7 normal */
1347	NOT4; NOT4; NOT4; NOT4;		/* 0E0 - 0EF unused */
1348	NOT4; NOT4; NOT4; NOT4;		/* 0F0 - 0FF unused */
1349/*
1350 * Code running at TL>0 does not use soft traps, so
1351 * we can truncate the table here.
1352 * However:
1353 * sun4v uses (hypervisor) ta instructions at TL > 0, so
1354 * provide a safety net for now.
1355 */
1356	/* soft traps */
1357	BAD4; BAD4; BAD4; BAD4;		/* 100 - 10F unused */
1358	BAD4; BAD4; BAD4; BAD4;		/* 110 - 11F unused */
1359	BAD4; BAD4; BAD4; BAD4;		/* 120 - 12F unused */
1360	BAD4; BAD4; BAD4; BAD4;		/* 130 - 13F unused */
1361	BAD4; BAD4; BAD4; BAD4;		/* 140 - 14F unused */
1362	BAD4; BAD4; BAD4; BAD4;		/* 150 - 15F unused */
1363	BAD4; BAD4; BAD4; BAD4;		/* 160 - 16F unused */
1364	BAD4; BAD4; BAD4; BAD4;		/* 170 - 17F unused */
1365	/* reserved */
1366	NOT4; NOT4; NOT4; NOT4;		/* 180 - 18F reserved */
1367	NOT4; NOT4; NOT4; NOT4;		/* 190 - 19F reserved */
1368	NOT4; NOT4; NOT4; NOT4;		/* 1A0 - 1AF reserved */
1369	NOT4; NOT4; NOT4; NOT4;		/* 1B0 - 1BF reserved */
1370	NOT4; NOT4; NOT4; NOT4;		/* 1C0 - 1CF reserved */
1371	NOT4; NOT4; NOT4; NOT4;		/* 1D0 - 1DF reserved */
1372	NOT4; NOT4; NOT4; NOT4;		/* 1E0 - 1EF reserved */
1373	NOT4; NOT4; NOT4; NOT4;		/* 1F0 - 1FF reserved */
1374etrap_table:
1375	.size	trap_table1, (.-trap_table1)
1376	.size	trap_table, (.-trap_table)
1377	.size	scb, (.-scb)
1378
1379/*
1380 * We get to exec_fault in the case of an instruction miss and tte
1381 * has no execute bit set.  We go to tl0 to handle it.
1382 *
1383 * g1 = tsbe pointer (in/clobbered)
1384 * g2 = tag access register (in)
1385 * g3 - g4 = scratch (clobbered)
1386 * g5 = tsbe data (in)
1387 * g6 = scratch (clobbered)
1388 * g7 = pc we jumped here from (in)
1389 */
1390/*
1391 * synthesize for trap(): TAG_ACCESS in %g2
1392 */
1393	ALTENTRY(exec_fault)
1394	TRACE_TSBHIT(TT_MMU_EXEC)
1395	MMU_FAULT_STATUS_AREA(%g4)
1396	ldx	[%g4 + MMFSA_I_ADDR], %g2	/* g2 = address */
1397	ldx	[%g4 + MMFSA_I_CTX], %g3	/* g3 = ctx */
1398	srlx	%g2, MMU_PAGESHIFT, %g2		! align address to page boundry
1399	sllx	%g2, MMU_PAGESHIFT, %g2
1400	or	%g2, %g3, %g2			/* TAG_ACCESS */
1401	mov	T_INSTR_MMU_MISS, %g3		! arg2 = traptype
1402	set	trap, %g1
1403	ba,pt	%xcc, sys_trap
1404	  mov	-1, %g4
1405
1406.mmu_exception_not_aligned:
1407	/* %g2 = sfar, %g3 = sfsr */
1408	rdpr	%tstate, %g1
1409	btst	TSTATE_PRIV, %g1
1410	bnz,pn	%icc, 2f
1411	nop
1412	CPU_ADDR(%g1, %g4)				! load CPU struct addr
1413	ldn	[%g1 + CPU_THREAD], %g1			! load thread pointer
1414	ldn	[%g1 + T_PROCP], %g1			! load proc pointer
1415	ldn	[%g1 + P_UTRAPS], %g5			! are there utraps?
1416	brz,pt	%g5, 2f
1417	nop
1418	ldn	[%g5 + P_UTRAP15], %g5			! unaligned utrap?
1419	brz,pn	%g5, 2f
1420	nop
1421	btst	1, %sp
1422	bz,pt	%xcc, 1f				! 32 bit user program
1423	nop
1424	ba,pt	%xcc, .setup_v9utrap			! 64 bit user program
1425	nop
14261:
1427	ba,pt	%xcc, .setup_utrap
1428	or	%g2, %g0, %g7
14292:
1430	ba,pt	%xcc, .mmu_exception_end
1431	mov	T_ALIGNMENT, %g1
1432
1433.mmu_priv_exception:
1434	rdpr	%tstate, %g1
1435	btst	TSTATE_PRIV, %g1
1436	bnz,pn	%icc, 1f
1437	nop
1438	CPU_ADDR(%g1, %g4)				! load CPU struct addr
1439	ldn	[%g1 + CPU_THREAD], %g1			! load thread pointer
1440	ldn	[%g1 + T_PROCP], %g1			! load proc pointer
1441	ldn	[%g1 + P_UTRAPS], %g5			! are there utraps?
1442	brz,pt	%g5, 1f
1443	nop
1444	ldn	[%g5 + P_UTRAP16], %g5
1445	brnz,pt	%g5, .setup_v9utrap
1446	nop
14471:
1448	mov	T_PRIV_INSTR, %g1
1449
1450.mmu_exception_end:
1451	CPU_INDEX(%g4, %g5)
1452	set	cpu_core, %g5
1453	sllx	%g4, CPU_CORE_SHIFT, %g4
1454	add	%g4, %g5, %g4
1455	lduh	[%g4 + CPUC_DTRACE_FLAGS], %g5
1456	andcc	%g5, CPU_DTRACE_NOFAULT, %g0
1457	bz	1f
1458	or	%g5, CPU_DTRACE_BADADDR, %g5
1459	stuh	%g5, [%g4 + CPUC_DTRACE_FLAGS]
1460	done
1461
14621:
1463	sllx	%g3, 32, %g3
1464	or	%g3, %g1, %g3
1465	set	trap, %g1
1466	ba,pt	%xcc, sys_trap
1467	sub	%g0, 1, %g4
1468
1469.fp_disabled:
1470	CPU_ADDR(%g1, %g4)				! load CPU struct addr
1471	ldn	[%g1 + CPU_THREAD], %g1			! load thread pointer
1472	rdpr	%tstate, %g4
1473	btst	TSTATE_PRIV, %g4
1474	bnz,a,pn %icc, ptl1_panic
1475	  mov	PTL1_BAD_FPTRAP, %g1
1476
1477	ldn	[%g1 + T_PROCP], %g1			! load proc pointer
1478	ldn	[%g1 + P_UTRAPS], %g5			! are there utraps?
1479	brz,a,pt %g5, 2f
1480	  nop
1481	ldn	[%g5 + P_UTRAP7], %g5			! fp_disabled utrap?
1482	brz,a,pn %g5, 2f
1483	  nop
1484	btst	1, %sp
1485	bz,a,pt	%xcc, 1f				! 32 bit user program
1486	  nop
1487	ba,a,pt	%xcc, .setup_v9utrap			! 64 bit user program
1488	  nop
14891:
1490	ba,pt	%xcc, .setup_utrap
1491	  or	%g0, %g0, %g7
14922:
1493	set	fp_disabled, %g1
1494	ba,pt	%xcc, sys_trap
1495	  sub	%g0, 1, %g4
1496
1497.fp_ieee_exception:
1498	rdpr	%tstate, %g1
1499	btst	TSTATE_PRIV, %g1
1500	bnz,a,pn %icc, ptl1_panic
1501	  mov	PTL1_BAD_FPTRAP, %g1
1502	CPU_ADDR(%g1, %g4)				! load CPU struct addr
1503	stx	%fsr, [%g1 + CPU_TMP1]
1504	ldx	[%g1 + CPU_TMP1], %g2
1505	ldn	[%g1 + CPU_THREAD], %g1			! load thread pointer
1506	ldn	[%g1 + T_PROCP], %g1			! load proc pointer
1507	ldn	[%g1 + P_UTRAPS], %g5			! are there utraps?
1508	brz,a,pt %g5, 1f
1509	  nop
1510	ldn	[%g5 + P_UTRAP8], %g5
1511	brnz,a,pt %g5, .setup_v9utrap
1512	  nop
15131:
1514	set	_fp_ieee_exception, %g1
1515	ba,pt	%xcc, sys_trap
1516	  sub	%g0, 1, %g4
1517
1518/*
1519 * Register Inputs:
1520 *	%g5		user trap handler
1521 *	%g7		misaligned addr - for alignment traps only
1522 */
1523.setup_utrap:
1524	set	trap, %g1			! setup in case we go
1525	mov	T_FLUSH_PCB, %g3		! through sys_trap on
1526	sub	%g0, 1, %g4			! the save instruction below
1527
1528	/*
1529	 * If the DTrace pid provider is single stepping a copied-out
1530	 * instruction, t->t_dtrace_step will be set. In that case we need
1531	 * to abort the single-stepping (since execution of the instruction
1532	 * was interrupted) and use the value of t->t_dtrace_npc as the %npc.
1533	 */
1534	save	%sp, -SA(MINFRAME32), %sp	! window for trap handler
1535	CPU_ADDR(%g1, %g4)			! load CPU struct addr
1536	ldn	[%g1 + CPU_THREAD], %g1		! load thread pointer
1537	ldub	[%g1 + T_DTRACE_STEP], %g2	! load t->t_dtrace_step
1538	rdpr	%tnpc, %l2			! arg1 == tnpc
1539	brz,pt	%g2, 1f
1540	rdpr	%tpc, %l1			! arg0 == tpc
1541
1542	ldub	[%g1 + T_DTRACE_AST], %g2	! load t->t_dtrace_ast
1543	ldn	[%g1 + T_DTRACE_NPC], %l2	! arg1 = t->t_dtrace_npc (step)
1544	brz,pt	%g2, 1f
1545	st	%g0, [%g1 + T_DTRACE_FT]	! zero all pid provider flags
1546	stub	%g2, [%g1 + T_ASTFLAG]		! aston(t) if t->t_dtrace_ast
15471:
1548	mov	%g7, %l3			! arg2 == misaligned address
1549
1550	rdpr	%tstate, %g1			! cwp for trap handler
1551	rdpr	%cwp, %g4
1552	bclr	TSTATE_CWP_MASK, %g1
1553	wrpr	%g1, %g4, %tstate
1554	wrpr	%g0, %g5, %tnpc			! trap handler address
1555	FAST_TRAP_DONE
1556	/* NOTREACHED */
1557
1558.check_v9utrap:
1559	rdpr	%tstate, %g1
1560	btst	TSTATE_PRIV, %g1
1561	bnz,a,pn %icc, 3f
1562	  nop
1563	CPU_ADDR(%g4, %g1)				! load CPU struct addr
1564	ldn	[%g4 + CPU_THREAD], %g5			! load thread pointer
1565	ldn	[%g5 + T_PROCP], %g5			! load proc pointer
1566	ldn	[%g5 + P_UTRAPS], %g5			! are there utraps?
1567
1568	cmp	%g3, T_SOFTWARE_TRAP
1569	bne,a,pt %icc, 1f
1570	  nop
1571
1572	brz,pt %g5, 3f			! if p_utraps == NULL goto trap()
1573	  rdpr	%tt, %g3		! delay - get actual hw trap type
1574
1575	sub	%g3, 254, %g1		! UT_TRAP_INSTRUCTION_16 = p_utraps[18]
1576	ba,pt	%icc, 2f
1577	  smul	%g1, CPTRSIZE, %g2
15781:
1579	brz,a,pt %g5, 3f		! if p_utraps == NULL goto trap()
1580	  nop
1581
1582	cmp	%g3, T_UNIMP_INSTR
1583	bne,a,pt %icc, 2f
1584	  nop
1585
1586	mov	1, %g1
1587	st	%g1, [%g4 + CPU_TL1_HDLR] ! set CPU_TL1_HDLR
1588	rdpr	%tpc, %g1		! ld trapping instruction using
1589	lduwa	[%g1]ASI_AIUP, %g1	! "AS IF USER" ASI which could fault
1590	st	%g0, [%g4 + CPU_TL1_HDLR] ! clr CPU_TL1_HDLR
1591
1592	sethi	%hi(0xc1c00000), %g4	! setup mask for illtrap instruction
1593	andcc	%g1, %g4, %g4		! and instruction with mask
1594	bnz,a,pt %icc, 3f		! if %g4 == zero, %g1 is an ILLTRAP
1595	  nop				! fall thru to setup
15962:
1597	ldn	[%g5 + %g2], %g5
1598	brnz,a,pt %g5, .setup_v9utrap
1599	  nop
16003:
1601	set	trap, %g1
1602	ba,pt	%xcc, sys_trap
1603	  sub	%g0, 1, %g4
1604	/* NOTREACHED */
1605
1606/*
1607 * Register Inputs:
1608 *	%g5		user trap handler
1609 */
1610.setup_v9utrap:
1611	set	trap, %g1			! setup in case we go
1612	mov	T_FLUSH_PCB, %g3		! through sys_trap on
1613	sub	%g0, 1, %g4			! the save instruction below
1614
1615	/*
1616	 * If the DTrace pid provider is single stepping a copied-out
1617	 * instruction, t->t_dtrace_step will be set. In that case we need
1618	 * to abort the single-stepping (since execution of the instruction
1619	 * was interrupted) and use the value of t->t_dtrace_npc as the %npc.
1620	 */
1621	save	%sp, -SA(MINFRAME64), %sp	! window for trap handler
1622	CPU_ADDR(%g1, %g4)			! load CPU struct addr
1623	ldn	[%g1 + CPU_THREAD], %g1		! load thread pointer
1624	ldub	[%g1 + T_DTRACE_STEP], %g2	! load t->t_dtrace_step
1625	rdpr	%tnpc, %l7			! arg1 == tnpc
1626	brz,pt	%g2, 1f
1627	rdpr	%tpc, %l6			! arg0 == tpc
1628
1629	ldub	[%g1 + T_DTRACE_AST], %g2	! load t->t_dtrace_ast
1630	ldn	[%g1 + T_DTRACE_NPC], %l7	! arg1 == t->t_dtrace_npc (step)
1631	brz,pt	%g2, 1f
1632	st	%g0, [%g1 + T_DTRACE_FT]	! zero all pid provider flags
1633	stub	%g2, [%g1 + T_ASTFLAG]		! aston(t) if t->t_dtrace_ast
16341:
1635	rdpr	%tstate, %g2			! cwp for trap handler
1636	rdpr	%cwp, %g4
1637	bclr	TSTATE_CWP_MASK, %g2
1638	wrpr	%g2, %g4, %tstate
1639
1640	ldn	[%g1 + T_PROCP], %g4		! load proc pointer
1641	ldn	[%g4 + P_AS], %g4		! load as pointer
1642	ldn	[%g4 + A_USERLIMIT], %g4	! load as userlimit
1643	cmp	%l7, %g4			! check for single-step set
1644	bne,pt	%xcc, 4f
1645	  nop
1646	ldn	[%g1 + T_LWP], %g1		! load klwp pointer
1647	ld	[%g1 + PCB_STEP], %g4		! load single-step flag
1648	cmp	%g4, STEP_ACTIVE		! step flags set in pcb?
1649	bne,pt	%icc, 4f
1650	  nop
1651	stn	%g5, [%g1 + PCB_TRACEPC]	! save trap handler addr in pcb
1652	mov	%l7, %g4			! on entry to precise user trap
1653	add	%l6, 4, %l7			! handler, %l6 == pc, %l7 == npc
1654						! at time of trap
1655	wrpr	%g0, %g4, %tnpc			! generate FLTBOUNDS,
1656						! %g4 == userlimit
1657	FAST_TRAP_DONE
1658	/* NOTREACHED */
16594:
1660	wrpr	%g0, %g5, %tnpc			! trap handler address
1661	FAST_TRAP_DONE_CHK_INTR
1662	/* NOTREACHED */
1663
1664.fp_exception:
1665	CPU_ADDR(%g1, %g4)
1666	stx	%fsr, [%g1 + CPU_TMP1]
1667	ldx	[%g1 + CPU_TMP1], %g2
1668
1669	/*
1670	 * Cheetah takes unfinished_FPop trap for certain range of operands
1671	 * to the "fitos" instruction. Instead of going through the slow
1672	 * software emulation path, we try to simulate the "fitos" instruction
1673	 * via "fitod" and "fdtos" provided the following conditions are met:
1674	 *
1675	 *	fpu_exists is set (if DEBUG)
1676	 *	not in privileged mode
1677	 *	ftt is unfinished_FPop
1678	 *	NXM IEEE trap is not enabled
1679	 *	instruction at %tpc is "fitos"
1680	 *
1681	 *  Usage:
1682	 *	%g1	per cpu address
1683	 *	%g2	%fsr
1684	 *	%g6	user instruction
1685	 *
1686	 * Note that we can take a memory access related trap while trying
1687	 * to fetch the user instruction. Therefore, we set CPU_TL1_HDLR
1688	 * flag to catch those traps and let the SFMMU code deal with page
1689	 * fault and data access exception.
1690	 */
1691#if defined(DEBUG) || defined(NEED_FPU_EXISTS)
1692	sethi	%hi(fpu_exists), %g7
1693	ld	[%g7 + %lo(fpu_exists)], %g7
1694	brz,pn %g7, .fp_exception_cont
1695	  nop
1696#endif
1697	rdpr	%tstate, %g7			! branch if in privileged mode
1698	btst	TSTATE_PRIV, %g7
1699	bnz,pn	%xcc, .fp_exception_cont
1700	srl	%g2, FSR_FTT_SHIFT, %g7		! extract ftt from %fsr
1701	and	%g7, (FSR_FTT>>FSR_FTT_SHIFT), %g7
1702	cmp	%g7, FTT_UNFIN
1703	set	FSR_TEM_NX, %g5
1704	bne,pn	%xcc, .fp_exception_cont	! branch if NOT unfinished_FPop
1705	  andcc	%g2, %g5, %g0
1706	bne,pn	%xcc, .fp_exception_cont	! branch if FSR_TEM_NX enabled
1707	  rdpr	%tpc, %g5			! get faulting PC
1708
1709	or	%g0, 1, %g7
1710	st	%g7, [%g1 + CPU_TL1_HDLR]	! set tl1_hdlr flag
1711	lda	[%g5]ASI_USER, %g6		! get user's instruction
1712	st	%g0, [%g1 + CPU_TL1_HDLR]	! clear tl1_hdlr flag
1713
1714	set	FITOS_INSTR_MASK, %g7
1715	and	%g6, %g7, %g7
1716	set	FITOS_INSTR, %g5
1717	cmp	%g7, %g5
1718	bne,pn	%xcc, .fp_exception_cont	! branch if not FITOS_INSTR
1719	 nop
1720
1721	/*
1722	 * This is unfinished FPops trap for "fitos" instruction. We
1723	 * need to simulate "fitos" via "fitod" and "fdtos" instruction
1724	 * sequence.
1725	 *
1726	 * We need a temporary FP register to do the conversion. Since
1727	 * both source and destination operands for the "fitos" instruction
1728	 * have to be within %f0-%f31, we use an FP register from the upper
1729	 * half to guarantee that it won't collide with the source or the
1730	 * dest operand. However, we do have to save and restore its value.
1731	 *
1732	 * We use %d62 as a temporary FP register for the conversion and
1733	 * branch to appropriate instruction within the conversion tables
1734	 * based upon the rs2 and rd values.
1735	 */
1736
1737	std	%d62, [%g1 + CPU_TMP1]		! save original value
1738
1739	srl	%g6, FITOS_RS2_SHIFT, %g7
1740	and	%g7, FITOS_REG_MASK, %g7
1741	set	_fitos_fitod_table, %g4
1742	sllx	%g7, 2, %g7
1743	jmp	%g4 + %g7
1744	  ba,pt	%xcc, _fitos_fitod_done
1745	.empty
1746
1747_fitos_fitod_table:
1748	  fitod	%f0, %d62
1749	  fitod	%f1, %d62
1750	  fitod	%f2, %d62
1751	  fitod	%f3, %d62
1752	  fitod	%f4, %d62
1753	  fitod	%f5, %d62
1754	  fitod	%f6, %d62
1755	  fitod	%f7, %d62
1756	  fitod	%f8, %d62
1757	  fitod	%f9, %d62
1758	  fitod	%f10, %d62
1759	  fitod	%f11, %d62
1760	  fitod	%f12, %d62
1761	  fitod	%f13, %d62
1762	  fitod	%f14, %d62
1763	  fitod	%f15, %d62
1764	  fitod	%f16, %d62
1765	  fitod	%f17, %d62
1766	  fitod	%f18, %d62
1767	  fitod	%f19, %d62
1768	  fitod	%f20, %d62
1769	  fitod	%f21, %d62
1770	  fitod	%f22, %d62
1771	  fitod	%f23, %d62
1772	  fitod	%f24, %d62
1773	  fitod	%f25, %d62
1774	  fitod	%f26, %d62
1775	  fitod	%f27, %d62
1776	  fitod	%f28, %d62
1777	  fitod	%f29, %d62
1778	  fitod	%f30, %d62
1779	  fitod	%f31, %d62
1780_fitos_fitod_done:
1781
1782	/*
1783	 * Now convert data back into single precision
1784	 */
1785	srl	%g6, FITOS_RD_SHIFT, %g7
1786	and	%g7, FITOS_REG_MASK, %g7
1787	set	_fitos_fdtos_table, %g4
1788	sllx	%g7, 2, %g7
1789	jmp	%g4 + %g7
1790	  ba,pt	%xcc, _fitos_fdtos_done
1791	.empty
1792
1793_fitos_fdtos_table:
1794	  fdtos	%d62, %f0
1795	  fdtos	%d62, %f1
1796	  fdtos	%d62, %f2
1797	  fdtos	%d62, %f3
1798	  fdtos	%d62, %f4
1799	  fdtos	%d62, %f5
1800	  fdtos	%d62, %f6
1801	  fdtos	%d62, %f7
1802	  fdtos	%d62, %f8
1803	  fdtos	%d62, %f9
1804	  fdtos	%d62, %f10
1805	  fdtos	%d62, %f11
1806	  fdtos	%d62, %f12
1807	  fdtos	%d62, %f13
1808	  fdtos	%d62, %f14
1809	  fdtos	%d62, %f15
1810	  fdtos	%d62, %f16
1811	  fdtos	%d62, %f17
1812	  fdtos	%d62, %f18
1813	  fdtos	%d62, %f19
1814	  fdtos	%d62, %f20
1815	  fdtos	%d62, %f21
1816	  fdtos	%d62, %f22
1817	  fdtos	%d62, %f23
1818	  fdtos	%d62, %f24
1819	  fdtos	%d62, %f25
1820	  fdtos	%d62, %f26
1821	  fdtos	%d62, %f27
1822	  fdtos	%d62, %f28
1823	  fdtos	%d62, %f29
1824	  fdtos	%d62, %f30
1825	  fdtos	%d62, %f31
1826_fitos_fdtos_done:
1827
1828	ldd	[%g1 + CPU_TMP1], %d62		! restore %d62
1829
1830#if DEBUG
1831	/*
1832	 * Update FPop_unfinished trap kstat
1833	 */
1834	set	fpustat+FPUSTAT_UNFIN_KSTAT, %g7
1835	ldx	[%g7], %g5
18361:
1837	add	%g5, 1, %g6
1838
1839	casxa	[%g7] ASI_N, %g5, %g6
1840	cmp	%g5, %g6
1841	bne,a,pn %xcc, 1b
1842	  or	%g0, %g6, %g5
1843
1844	/*
1845	 * Update fpu_sim_fitos kstat
1846	 */
1847	set	fpuinfo+FPUINFO_FITOS_KSTAT, %g7
1848	ldx	[%g7], %g5
18491:
1850	add	%g5, 1, %g6
1851
1852	casxa	[%g7] ASI_N, %g5, %g6
1853	cmp	%g5, %g6
1854	bne,a,pn %xcc, 1b
1855	  or	%g0, %g6, %g5
1856#endif /* DEBUG */
1857
1858	FAST_TRAP_DONE
1859
1860.fp_exception_cont:
1861	/*
1862	 * Let _fp_exception deal with simulating FPop instruction.
1863	 * Note that we need to pass %fsr in %g2 (already read above).
1864	 */
1865
1866	set	_fp_exception, %g1
1867	ba,pt	%xcc, sys_trap
1868	sub	%g0, 1, %g4
1869
1870
1871/*
1872 * Register windows
1873 */
1874.flushw:
1875.clean_windows:
1876	rdpr	%tnpc, %g1
1877	wrpr	%g1, %tpc
1878	add	%g1, 4, %g1
1879	wrpr	%g1, %tnpc
1880	set	trap, %g1
1881	mov	T_FLUSH_PCB, %g3
1882	ba,pt	%xcc, sys_trap
1883	sub	%g0, 1, %g4
1884
1885/*
1886 * .spill_clean: clean the previous window, restore the wstate, and
1887 * "done".
1888 *
1889 * Entry: %g7 contains new wstate
1890 */
1891.spill_clean:
1892	sethi	%hi(nwin_minus_one), %g5
1893	ld	[%g5 + %lo(nwin_minus_one)], %g5 ! %g5 = nwin - 1
1894	rdpr	%cwp, %g6			! %g6 = %cwp
1895	deccc	%g6				! %g6--
1896	movneg	%xcc, %g5, %g6			! if (%g6<0) %g6 = nwin-1
1897	wrpr	%g6, %cwp
1898	TT_TRACE_L(trace_win)
1899	clr	%l0
1900	clr	%l1
1901	clr	%l2
1902	clr	%l3
1903	clr	%l4
1904	clr	%l5
1905	clr	%l6
1906	clr	%l7
1907	wrpr	%g0, %g7, %wstate
1908	saved
1909	retry			! restores correct %cwp
1910
1911.fix_alignment:
1912	CPU_ADDR(%g1, %g2)		! load CPU struct addr to %g1 using %g2
1913	ldn	[%g1 + CPU_THREAD], %g1	! load thread pointer
1914	ldn	[%g1 + T_PROCP], %g1
1915	mov	1, %g2
1916	stb	%g2, [%g1 + P_FIXALIGNMENT]
1917	FAST_TRAP_DONE
1918
1919#define	STDF_REG(REG, ADDR, TMP)		\
1920	sll	REG, 3, REG			;\
1921mark1:	set	start1, TMP			;\
1922	jmp	REG + TMP			;\
1923	  nop					;\
1924start1:	ba,pt	%xcc, done1			;\
1925	  std	%f0, [ADDR + CPU_TMP1]		;\
1926	ba,pt	%xcc, done1			;\
1927	  std	%f32, [ADDR + CPU_TMP1]		;\
1928	ba,pt	%xcc, done1			;\
1929	  std	%f2, [ADDR + CPU_TMP1]		;\
1930	ba,pt	%xcc, done1			;\
1931	  std	%f34, [ADDR + CPU_TMP1]		;\
1932	ba,pt	%xcc, done1			;\
1933	  std	%f4, [ADDR + CPU_TMP1]		;\
1934	ba,pt	%xcc, done1			;\
1935	  std	%f36, [ADDR + CPU_TMP1]		;\
1936	ba,pt	%xcc, done1			;\
1937	  std	%f6, [ADDR + CPU_TMP1]		;\
1938	ba,pt	%xcc, done1			;\
1939	  std	%f38, [ADDR + CPU_TMP1]		;\
1940	ba,pt	%xcc, done1			;\
1941	  std	%f8, [ADDR + CPU_TMP1]		;\
1942	ba,pt	%xcc, done1			;\
1943	  std	%f40, [ADDR + CPU_TMP1]		;\
1944	ba,pt	%xcc, done1			;\
1945	  std	%f10, [ADDR + CPU_TMP1]		;\
1946	ba,pt	%xcc, done1			;\
1947	  std	%f42, [ADDR + CPU_TMP1]		;\
1948	ba,pt	%xcc, done1			;\
1949	  std	%f12, [ADDR + CPU_TMP1]		;\
1950	ba,pt	%xcc, done1			;\
1951	  std	%f44, [ADDR + CPU_TMP1]		;\
1952	ba,pt	%xcc, done1			;\
1953	  std	%f14, [ADDR + CPU_TMP1]		;\
1954	ba,pt	%xcc, done1			;\
1955	  std	%f46, [ADDR + CPU_TMP1]		;\
1956	ba,pt	%xcc, done1			;\
1957	  std	%f16, [ADDR + CPU_TMP1]		;\
1958	ba,pt	%xcc, done1			;\
1959	  std	%f48, [ADDR + CPU_TMP1]		;\
1960	ba,pt	%xcc, done1			;\
1961	  std	%f18, [ADDR + CPU_TMP1]		;\
1962	ba,pt	%xcc, done1			;\
1963	  std	%f50, [ADDR + CPU_TMP1]		;\
1964	ba,pt	%xcc, done1			;\
1965	  std	%f20, [ADDR + CPU_TMP1]		;\
1966	ba,pt	%xcc, done1			;\
1967	  std	%f52, [ADDR + CPU_TMP1]		;\
1968	ba,pt	%xcc, done1			;\
1969	  std	%f22, [ADDR + CPU_TMP1]		;\
1970	ba,pt	%xcc, done1			;\
1971	  std	%f54, [ADDR + CPU_TMP1]		;\
1972	ba,pt	%xcc, done1			;\
1973	  std	%f24, [ADDR + CPU_TMP1]		;\
1974	ba,pt	%xcc, done1			;\
1975	  std	%f56, [ADDR + CPU_TMP1]		;\
1976	ba,pt	%xcc, done1			;\
1977	  std	%f26, [ADDR + CPU_TMP1]		;\
1978	ba,pt	%xcc, done1			;\
1979	  std	%f58, [ADDR + CPU_TMP1]		;\
1980	ba,pt	%xcc, done1			;\
1981	  std	%f28, [ADDR + CPU_TMP1]		;\
1982	ba,pt	%xcc, done1			;\
1983	  std	%f60, [ADDR + CPU_TMP1]		;\
1984	ba,pt	%xcc, done1			;\
1985	  std	%f30, [ADDR + CPU_TMP1]		;\
1986	ba,pt	%xcc, done1			;\
1987	  std	%f62, [ADDR + CPU_TMP1]		;\
1988done1:
1989
1990#define	LDDF_REG(REG, ADDR, TMP)		\
1991	sll	REG, 3, REG			;\
1992mark2:	set	start2, TMP			;\
1993	jmp	REG + TMP			;\
1994	  nop					;\
1995start2:	ba,pt	%xcc, done2			;\
1996	  ldd	[ADDR + CPU_TMP1], %f0		;\
1997	ba,pt	%xcc, done2			;\
1998	  ldd	[ADDR + CPU_TMP1], %f32		;\
1999	ba,pt	%xcc, done2			;\
2000	  ldd	[ADDR + CPU_TMP1], %f2		;\
2001	ba,pt	%xcc, done2			;\
2002	  ldd	[ADDR + CPU_TMP1], %f34		;\
2003	ba,pt	%xcc, done2			;\
2004	  ldd	[ADDR + CPU_TMP1], %f4		;\
2005	ba,pt	%xcc, done2			;\
2006	  ldd	[ADDR + CPU_TMP1], %f36		;\
2007	ba,pt	%xcc, done2			;\
2008	  ldd	[ADDR + CPU_TMP1], %f6		;\
2009	ba,pt	%xcc, done2			;\
2010	  ldd	[ADDR + CPU_TMP1], %f38		;\
2011	ba,pt	%xcc, done2			;\
2012	  ldd	[ADDR + CPU_TMP1], %f8		;\
2013	ba,pt	%xcc, done2			;\
2014	  ldd	[ADDR + CPU_TMP1], %f40		;\
2015	ba,pt	%xcc, done2			;\
2016	  ldd	[ADDR + CPU_TMP1], %f10		;\
2017	ba,pt	%xcc, done2			;\
2018	  ldd	[ADDR + CPU_TMP1], %f42		;\
2019	ba,pt	%xcc, done2			;\
2020	  ldd	[ADDR + CPU_TMP1], %f12		;\
2021	ba,pt	%xcc, done2			;\
2022	  ldd	[ADDR + CPU_TMP1], %f44		;\
2023	ba,pt	%xcc, done2			;\
2024	  ldd	[ADDR + CPU_TMP1], %f14		;\
2025	ba,pt	%xcc, done2			;\
2026	  ldd	[ADDR + CPU_TMP1], %f46		;\
2027	ba,pt	%xcc, done2			;\
2028	  ldd	[ADDR + CPU_TMP1], %f16		;\
2029	ba,pt	%xcc, done2			;\
2030	  ldd	[ADDR + CPU_TMP1], %f48		;\
2031	ba,pt	%xcc, done2			;\
2032	  ldd	[ADDR + CPU_TMP1], %f18		;\
2033	ba,pt	%xcc, done2			;\
2034	  ldd	[ADDR + CPU_TMP1], %f50		;\
2035	ba,pt	%xcc, done2			;\
2036	  ldd	[ADDR + CPU_TMP1], %f20		;\
2037	ba,pt	%xcc, done2			;\
2038	  ldd	[ADDR + CPU_TMP1], %f52		;\
2039	ba,pt	%xcc, done2			;\
2040	  ldd	[ADDR + CPU_TMP1], %f22		;\
2041	ba,pt	%xcc, done2			;\
2042	  ldd	[ADDR + CPU_TMP1], %f54		;\
2043	ba,pt	%xcc, done2			;\
2044	  ldd	[ADDR + CPU_TMP1], %f24		;\
2045	ba,pt	%xcc, done2			;\
2046	  ldd	[ADDR + CPU_TMP1], %f56		;\
2047	ba,pt	%xcc, done2			;\
2048	  ldd	[ADDR + CPU_TMP1], %f26		;\
2049	ba,pt	%xcc, done2			;\
2050	  ldd	[ADDR + CPU_TMP1], %f58		;\
2051	ba,pt	%xcc, done2			;\
2052	  ldd	[ADDR + CPU_TMP1], %f28		;\
2053	ba,pt	%xcc, done2			;\
2054	  ldd	[ADDR + CPU_TMP1], %f60		;\
2055	ba,pt	%xcc, done2			;\
2056	  ldd	[ADDR + CPU_TMP1], %f30		;\
2057	ba,pt	%xcc, done2			;\
2058	  ldd	[ADDR + CPU_TMP1], %f62		;\
2059done2:
2060
2061.lddf_exception_not_aligned:
2062	/* %g2 = sfar, %g3 = sfsr */
2063	mov	%g2, %g5		! stash sfar
2064#if defined(DEBUG) || defined(NEED_FPU_EXISTS)
2065	sethi	%hi(fpu_exists), %g2	! check fpu_exists
2066	ld	[%g2 + %lo(fpu_exists)], %g2
2067	brz,a,pn %g2, 4f
2068	  nop
2069#endif
2070	CPU_ADDR(%g1, %g4)
2071	or	%g0, 1, %g4
2072	st	%g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag
2073
2074	rdpr	%tpc, %g2
2075	lda	[%g2]ASI_AIUP, %g6	! get the user's lddf instruction
2076	srl	%g6, 23, %g1		! using ldda or not?
2077	and	%g1, 1, %g1
2078	brz,a,pt %g1, 2f		! check for ldda instruction
2079	  nop
2080	srl	%g6, 13, %g1		! check immflag
2081	and	%g1, 1, %g1
2082	rdpr	%tstate, %g2		! %tstate in %g2
2083	brnz,a,pn %g1, 1f
2084	  srl	%g2, 31, %g1		! get asi from %tstate
2085	srl	%g6, 5, %g1		! get asi from instruction
2086	and	%g1, 0xFF, %g1		! imm_asi field
20871:
2088	cmp	%g1, ASI_P		! primary address space
2089	be,a,pt %icc, 2f
2090	  nop
2091	cmp	%g1, ASI_PNF		! primary no fault address space
2092	be,a,pt %icc, 2f
2093	  nop
2094	cmp	%g1, ASI_S		! secondary address space
2095	be,a,pt %icc, 2f
2096	  nop
2097	cmp	%g1, ASI_SNF		! secondary no fault address space
2098	bne,a,pn %icc, 3f
2099	  nop
21002:
2101	lduwa	[%g5]ASI_USER, %g7	! get first half of misaligned data
2102	add	%g5, 4, %g5		! increment misaligned data address
2103	lduwa	[%g5]ASI_USER, %g5	! get second half of misaligned data
2104
2105	sllx	%g7, 32, %g7
2106	or	%g5, %g7, %g5		! combine data
2107	CPU_ADDR(%g7, %g1)		! save data on a per-cpu basis
2108	stx	%g5, [%g7 + CPU_TMP1]	! save in cpu_tmp1
2109
2110	srl	%g6, 25, %g3		! %g6 has the instruction
2111	and	%g3, 0x1F, %g3		! %g3 has rd
2112	LDDF_REG(%g3, %g7, %g4)
2113
2114	CPU_ADDR(%g1, %g4)
2115	st	%g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
2116	FAST_TRAP_DONE
21173:
2118	CPU_ADDR(%g1, %g4)
2119	st	%g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
21204:
2121	set	T_USER, %g3		! trap type in %g3
2122	or	%g3, T_LDDF_ALIGN, %g3
2123	mov	%g5, %g2		! misaligned vaddr in %g2
2124	set	fpu_trap, %g1		! goto C for the little and
2125	ba,pt	%xcc, sys_trap		! no fault little asi's
2126	  sub	%g0, 1, %g4
2127
2128.stdf_exception_not_aligned:
2129	/* %g2 = sfar, %g3 = sfsr */
2130	mov	%g2, %g5
2131
2132#if defined(DEBUG) || defined(NEED_FPU_EXISTS)
2133	sethi	%hi(fpu_exists), %g7		! check fpu_exists
2134	ld	[%g7 + %lo(fpu_exists)], %g3
2135	brz,a,pn %g3, 4f
2136	  nop
2137#endif
2138	CPU_ADDR(%g1, %g4)
2139	or	%g0, 1, %g4
2140	st	%g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag
2141
2142	rdpr	%tpc, %g2
2143	lda	[%g2]ASI_AIUP, %g6	! get the user's stdf instruction
2144
2145	srl	%g6, 23, %g1		! using stda or not?
2146	and	%g1, 1, %g1
2147	brz,a,pt %g1, 2f		! check for stda instruction
2148	  nop
2149	srl	%g6, 13, %g1		! check immflag
2150	and	%g1, 1, %g1
2151	rdpr	%tstate, %g2		! %tstate in %g2
2152	brnz,a,pn %g1, 1f
2153	  srl	%g2, 31, %g1		! get asi from %tstate
2154	srl	%g6, 5, %g1		! get asi from instruction
2155	and	%g1, 0xff, %g1		! imm_asi field
21561:
2157	cmp	%g1, ASI_P		! primary address space
2158	be,a,pt %icc, 2f
2159	  nop
2160	cmp	%g1, ASI_S		! secondary address space
2161	bne,a,pn %icc, 3f
2162	  nop
21632:
2164	srl	%g6, 25, %g6
2165	and	%g6, 0x1F, %g6		! %g6 has rd
2166	CPU_ADDR(%g7, %g1)
2167	STDF_REG(%g6, %g7, %g4)		! STDF_REG(REG, ADDR, TMP)
2168
2169	ldx	[%g7 + CPU_TMP1], %g6
2170	srlx	%g6, 32, %g7
2171	stuwa	%g7, [%g5]ASI_USER	! first half
2172	add	%g5, 4, %g5		! increment misaligned data address
2173	stuwa	%g6, [%g5]ASI_USER	! second half
2174
2175	CPU_ADDR(%g1, %g4)
2176	st	%g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
2177	FAST_TRAP_DONE
21783:
2179	CPU_ADDR(%g1, %g4)
2180	st	%g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
21814:
2182	set	T_USER, %g3		! trap type in %g3
2183	or	%g3, T_STDF_ALIGN, %g3
2184	mov	%g5, %g2		! misaligned vaddr in %g2
2185	set	fpu_trap, %g1		! goto C for the little and
2186	ba,pt	%xcc, sys_trap		! nofault little asi's
2187	  sub	%g0, 1, %g4
2188
2189#ifdef DEBUG_USER_TRAPTRACECTL
2190
2191.traptrace_freeze:
2192	mov	%l0, %g1 ; mov	%l1, %g2 ; mov	%l2, %g3 ; mov	%l4, %g4
2193	TT_TRACE_L(trace_win)
2194	mov	%g4, %l4 ; mov	%g3, %l2 ; mov	%g2, %l1 ; mov	%g1, %l0
2195	set	trap_freeze, %g1
2196	mov	1, %g2
2197	st	%g2, [%g1]
2198	FAST_TRAP_DONE
2199
2200.traptrace_unfreeze:
2201	set	trap_freeze, %g1
2202	st	%g0, [%g1]
2203	mov	%l0, %g1 ; mov	%l1, %g2 ; mov	%l2, %g3 ; mov	%l4, %g4
2204	TT_TRACE_L(trace_win)
2205	mov	%g4, %l4 ; mov	%g3, %l2 ; mov	%g2, %l1 ; mov	%g1, %l0
2206	FAST_TRAP_DONE
2207
2208#endif /* DEBUG_USER_TRAPTRACECTL */
2209
2210.getcc:
2211	CPU_ADDR(%g1, %g2)
2212	stx	%o0, [%g1 + CPU_TMP1]		! save %o0
2213	rdpr	%tstate, %g3			! get tstate
2214	srlx	%g3, PSR_TSTATE_CC_SHIFT, %o0	! shift ccr to V8 psr
2215	set	PSR_ICC, %g2
2216	and	%o0, %g2, %o0			! mask out the rest
2217	srl	%o0, PSR_ICC_SHIFT, %o0		! right justify
2218	wrpr	%g0, 0, %gl
2219	mov	%o0, %g1			! move ccr to normal %g1
2220	wrpr	%g0, 1, %gl
2221	! cannot assume globals retained their values after increasing %gl
2222	CPU_ADDR(%g1, %g2)
2223	ldx	[%g1 + CPU_TMP1], %o0		! restore %o0
2224	FAST_TRAP_DONE
2225
2226.setcc:
2227	CPU_ADDR(%g1, %g2)
2228	stx	%o0, [%g1 + CPU_TMP1]		! save %o0
2229	wrpr	%g0, 0, %gl
2230	mov	%g1, %o0
2231	wrpr	%g0, 1, %gl
2232	! cannot assume globals retained their values after increasing %gl
2233	CPU_ADDR(%g1, %g2)
2234	sll	%o0, PSR_ICC_SHIFT, %g2
2235	set	PSR_ICC, %g3
2236	and	%g2, %g3, %g2			! mask out rest
2237	sllx	%g2, PSR_TSTATE_CC_SHIFT, %g2
2238	rdpr	%tstate, %g3			! get tstate
2239	srl	%g3, 0, %g3			! clear upper word
2240	or	%g3, %g2, %g3			! or in new bits
2241	wrpr	%g3, %tstate
2242	ldx	[%g1 + CPU_TMP1], %o0		! restore %o0
2243	FAST_TRAP_DONE
2244
2245/*
2246 * getpsr(void)
2247 * Note that the xcc part of the ccr is not provided.
2248 * The V8 code shows why the V9 trap is not faster:
2249 * #define GETPSR_TRAP() \
2250 *      mov %psr, %i0; jmp %l2; rett %l2+4; nop;
2251 */
2252
2253	.type	.getpsr, #function
2254.getpsr:
2255	rdpr	%tstate, %g1			! get tstate
2256	srlx	%g1, PSR_TSTATE_CC_SHIFT, %o0	! shift ccr to V8 psr
2257	set	PSR_ICC, %g2
2258	and	%o0, %g2, %o0			! mask out the rest
2259
2260	rd	%fprs, %g1			! get fprs
2261	and	%g1, FPRS_FEF, %g2		! mask out dirty upper/lower
2262	sllx	%g2, PSR_FPRS_FEF_SHIFT, %g2	! shift fef to V8 psr.ef
2263	or	%o0, %g2, %o0			! or result into psr.ef
2264
2265	set	V9_PSR_IMPLVER, %g2		! SI assigned impl/ver: 0xef
2266	or	%o0, %g2, %o0			! or psr.impl/ver
2267	FAST_TRAP_DONE
2268	SET_SIZE(.getpsr)
2269
2270/*
2271 * setpsr(newpsr)
2272 * Note that there is no support for ccr.xcc in the V9 code.
2273 */
2274
2275	.type	.setpsr, #function
2276.setpsr:
2277	rdpr	%tstate, %g1			! get tstate
2278!	setx	TSTATE_V8_UBITS, %g2
2279	or 	%g0, CCR_ICC, %g3
2280	sllx	%g3, TSTATE_CCR_SHIFT, %g2
2281
2282	andn	%g1, %g2, %g1			! zero current user bits
2283	set	PSR_ICC, %g2
2284	and	%g2, %o0, %g2			! clear all but psr.icc bits
2285	sllx	%g2, PSR_TSTATE_CC_SHIFT, %g3	! shift to tstate.ccr.icc
2286	wrpr	%g1, %g3, %tstate		! write tstate
2287
2288	set	PSR_EF, %g2
2289	and	%g2, %o0, %g2			! clear all but fp enable bit
2290	srlx	%g2, PSR_FPRS_FEF_SHIFT, %g4	! shift ef to V9 fprs.fef
2291	wr	%g0, %g4, %fprs			! write fprs
2292
2293	CPU_ADDR(%g1, %g2)			! load CPU struct addr to %g1
2294	ldn	[%g1 + CPU_THREAD], %g2		! load thread pointer
2295	ldn	[%g2 + T_LWP], %g3		! load klwp pointer
2296	ldn	[%g3 + LWP_FPU], %g2		! get lwp_fpu pointer
2297	stuw	%g4, [%g2 + FPU_FPRS]		! write fef value to fpu_fprs
2298	srlx	%g4, 2, %g4			! shift fef value to bit 0
2299	stub	%g4, [%g2 + FPU_EN]		! write fef value to fpu_en
2300	FAST_TRAP_DONE
2301	SET_SIZE(.setpsr)
2302
2303/*
2304 * getlgrp
2305 * get home lgrpid on which the calling thread is currently executing.
2306 */
2307	.type	.getlgrp, #function
2308.getlgrp:
2309	! Thanks for the incredibly helpful comments
2310	CPU_ADDR(%g1, %g2)		! load CPU struct addr to %g1 using %g2
2311	ld	[%g1 + CPU_ID], %o0	! load cpu_id
2312	ldn	[%g1 + CPU_THREAD], %g2	! load thread pointer
2313	ldn	[%g2 + T_LPL], %g2	! load lpl pointer
2314	ld	[%g2 + LPL_LGRPID], %g1	! load lpl_lgrpid
2315	sra	%g1, 0, %o1
2316	FAST_TRAP_DONE
2317	SET_SIZE(.getlgrp)
2318
2319/*
2320 * Entry for old 4.x trap (trap 0).
2321 */
2322	ENTRY_NP(syscall_trap_4x)
2323	CPU_ADDR(%g1, %g2)		! load CPU struct addr to %g1 using %g2
2324	ldn	[%g1 + CPU_THREAD], %g2	! load thread pointer
2325	ldn	[%g2 + T_LWP], %g2	! load klwp pointer
2326	ld	[%g2 + PCB_TRAP0], %g2	! lwp->lwp_pcb.pcb_trap0addr
2327	brz,pn	%g2, 1f			! has it been set?
2328	st	%l0, [%g1 + CPU_TMP1]	! delay - save some locals
2329	st	%l1, [%g1 + CPU_TMP2]
2330	rdpr	%tnpc, %l1		! save old tnpc
2331	wrpr	%g0, %g2, %tnpc		! setup tnpc
2332
2333	mov	%g1, %l0		! save CPU struct addr
2334	wrpr	%g0, 0, %gl
2335	mov	%l1, %g6		! pass tnpc to user code in %g6
2336	wrpr	%g0, 1, %gl
2337	ld	[%l0 + CPU_TMP2], %l1	! restore locals
2338	ld	[%l0 + CPU_TMP1], %l0
2339	FAST_TRAP_DONE_CHK_INTR
23401:
2341	!
2342	! check for old syscall mmap which is the only different one which
2343	! must be the same.  Others are handled in the compatibility library.
2344	!
2345	mov	%g1, %l0		! save CPU struct addr
2346	wrpr	%g0, 0, %gl
2347	cmp	%g1, OSYS_mmap		! compare to old 4.x mmap
2348	movz	%icc, SYS_mmap, %g1
2349	wrpr	%g0, 1, %gl
2350	ld	[%l0 + CPU_TMP1], %l0
2351	SYSCALL(syscall_trap32)
2352	SET_SIZE(syscall_trap_4x)
2353
2354/*
2355 * Handler for software trap 9.
2356 * Set trap0 emulation address for old 4.x system call trap.
2357 * XXX - this should be a system call.
2358 */
2359	ENTRY_NP(set_trap0_addr)
2360	CPU_ADDR(%g1, %g2)		! load CPU struct addr to %g1 using %g2
2361	st	%l0, [%g1 + CPU_TMP1]	! save some locals
2362	st	%l1, [%g1 + CPU_TMP2]
2363	mov	%g1, %l0	! preserve CPU addr
2364	wrpr	%g0, 0, %gl
2365	mov	%g1, %l1
2366	wrpr	%g0, 1, %gl
2367	! cannot assume globals retained their values after increasing %gl
2368	ldn	[%l0 + CPU_THREAD], %g2	! load thread pointer
2369	ldn	[%g2 + T_LWP], %g2	! load klwp pointer
2370	andn	%l1, 3, %l1		! force alignment
2371	st	%l1, [%g2 + PCB_TRAP0]	! lwp->lwp_pcb.pcb_trap0addr
2372	ld	[%l0 + CPU_TMP2], %l1	! restore locals
2373	ld	[%l0 + CPU_TMP1], %l0
2374	FAST_TRAP_DONE
2375	SET_SIZE(set_trap0_addr)
2376
2377/*
2378 * mmu_trap_tl1
2379 * trap handler for unexpected mmu traps.
2380 * simply checks if the trap was a user lddf/stdf alignment trap, in which
2381 * case we go to fpu_trap or a user trap from the window handler, in which
2382 * case we go save the state on the pcb.  Otherwise, we go to ptl1_panic.
2383 */
2384	.type	mmu_trap_tl1, #function
2385mmu_trap_tl1:
2386#ifdef	TRAPTRACE
2387	TRACE_PTR(%g5, %g6)
2388	GET_TRACE_TICK(%g6)
2389	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi
2390	TRACE_SAVE_TL_GL_REGS(%g5, %g6)
2391	rdpr	%tt, %g6
2392	stha	%g6, [%g5 + TRAP_ENT_TT]%asi
2393	rdpr	%tstate, %g6
2394	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi
2395	stna	%sp, [%g5 + TRAP_ENT_SP]%asi
2396	stna	%g0, [%g5 + TRAP_ENT_TR]%asi
2397	rdpr	%tpc, %g6
2398	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi
2399	MMU_FAULT_STATUS_AREA(%g6)
2400	ldx	[%g6 + MMFSA_D_ADDR], %g6
2401	stna	%g6, [%g5 + TRAP_ENT_F1]%asi !  MMU fault address
2402	CPU_PADDR(%g7, %g6);
2403	add	%g7, CPU_TL1_HDLR, %g7
2404	lda	[%g7]ASI_MEM, %g6
2405	stna	%g6, [%g5 + TRAP_ENT_F2]%asi
2406	MMU_FAULT_STATUS_AREA(%g6)
2407	ldx	[%g6 + MMFSA_D_TYPE], %g7 ! XXXQ should be a MMFSA_F_ constant?
2408	ldx	[%g6 + MMFSA_D_CTX], %g6
2409	sllx	%g6, SFSR_CTX_SHIFT, %g6
2410	or	%g6, %g7, %g6
2411	stna	%g6, [%g5 + TRAP_ENT_F3]%asi ! MMU context/type
2412	set	0xdeadbeef, %g6
2413	stna	%g6, [%g5 + TRAP_ENT_F4]%asi
2414	TRACE_NEXT(%g5, %g6, %g7)
2415#endif /* TRAPTRACE */
2416	CPU_PADDR(%g7, %g6);
2417	add     %g7, CPU_TL1_HDLR, %g7		! %g7 = &cpu_m.tl1_hdlr (PA)
2418	lda	[%g7]ASI_MEM, %g6
2419	brz,a,pt %g6, 1f
2420	  nop
2421	sta     %g0, [%g7]ASI_MEM
2422	! XXXQ need to setup registers for sfmmu_mmu_trap?
2423	ba,a,pt	%xcc, sfmmu_mmu_trap		! handle page faults
24241:
2425	rdpr	%tpc, %g7
2426	/* in user_rtt? */
2427	set	rtt_fill_start, %g6
2428	cmp	%g7, %g6
2429	blu,pn	%xcc, 6f
2430	 .empty
2431	set	rtt_fill_end, %g6
2432	cmp	%g7, %g6
2433	bgeu,pn %xcc, 6f
2434	 nop
2435	set	fault_rtt_fn1, %g7
2436	ba,a	7f
24376:
2438	! check to see if the trap pc is in a window spill/fill handling
2439	rdpr	%tpc, %g7
2440	/* tpc should be in the trap table */
2441	set	trap_table, %g6
2442	cmp	%g7, %g6
2443	blu,a,pn %xcc, ptl1_panic
2444	  mov	PTL1_BAD_MMUTRAP, %g1
2445	set	etrap_table, %g6
2446	cmp	%g7, %g6
2447	bgeu,a,pn %xcc, ptl1_panic
2448	  mov	PTL1_BAD_MMUTRAP, %g1
2449	! pc is inside the trap table, convert to trap type
2450	srl	%g7, 5, %g6		! XXXQ need #define
2451	and	%g6, 0x1ff, %g6		! XXXQ need #define
2452	! and check for a window trap type
2453	and	%g6, WTRAP_TTMASK, %g6
2454	cmp	%g6, WTRAP_TYPE
2455	bne,a,pn %xcc, ptl1_panic
2456	  mov	PTL1_BAD_MMUTRAP, %g1
2457	andn	%g7, WTRAP_ALIGN, %g7	/* 128 byte aligned */
2458	add	%g7, WTRAP_FAULTOFF, %g7
2459
24607:
2461	! Arguments are passed in the global set active after the
2462	! 'done' instruction. Before switching sets, must save
2463	! the calculated next pc
2464	wrpr	%g0, %g7, %tnpc
2465	wrpr	%g0, 1, %gl
2466	rdpr	%tt, %g5
2467	MMU_FAULT_STATUS_AREA(%g7)
2468	cmp	%g5, T_ALIGNMENT
2469	be,pn	%xcc, 1f
2470	ldx	[%g7 + MMFSA_D_ADDR], %g6
2471	ldx	[%g7 + MMFSA_D_CTX], %g7
2472	srlx	%g6, MMU_PAGESHIFT, %g6		/* align address */
2473	sllx	%g6, MMU_PAGESHIFT, %g6
2474	or	%g6, %g7, %g6			/* TAG_ACCESS */
24751:
2476	done
2477	SET_SIZE(mmu_trap_tl1)
2478
2479/*
2480 * Several traps use kmdb_trap and kmdb_trap_tl1 as their handlers.  These
2481 * traps are valid only when kmdb is loaded.  When the debugger is active,
2482 * the code below is rewritten to transfer control to the appropriate
2483 * debugger entry points.
2484 */
2485	.global	kmdb_trap
2486	.align	8
2487kmdb_trap:
2488	ba,a	trap_table0
2489	jmp	%g1 + 0
2490	nop
2491
2492	.global	kmdb_trap_tl1
2493	.align	8
2494kmdb_trap_tl1:
2495	ba,a	trap_table0
2496	jmp	%g1 + 0
2497	nop
2498
2499/*
2500 * This entry is copied from OBP's trap table during boot.
2501 */
2502	.global	obp_bpt
2503	.align	8
2504obp_bpt:
2505	NOT
2506
2507
2508
2509#ifdef	TRAPTRACE
2510/*
2511 * TRAPTRACE support.
2512 * labels here are branched to with "rd %pc, %g7" in the delay slot.
2513 * Return is done by "jmp %g7 + 4".
2514 */
2515
2516trace_dmmu:
2517	TRACE_PTR(%g3, %g6)
2518	GET_TRACE_TICK(%g6)
2519	stxa	%g6, [%g3 + TRAP_ENT_TICK]%asi
2520	TRACE_SAVE_TL_GL_REGS(%g3, %g6)
2521	rdpr	%tt, %g6
2522	stha	%g6, [%g3 + TRAP_ENT_TT]%asi
2523	rdpr	%tstate, %g6
2524	stxa	%g6, [%g3 + TRAP_ENT_TSTATE]%asi
2525	stna	%sp, [%g3 + TRAP_ENT_SP]%asi
2526	rdpr	%tpc, %g6
2527	stna	%g6, [%g3 + TRAP_ENT_TPC]%asi
2528	MMU_FAULT_STATUS_AREA(%g6)
2529	ldx	[%g6 + MMFSA_D_ADDR], %g4
2530	stxa	%g4, [%g3 + TRAP_ENT_TR]%asi
2531	ldx	[%g6 + MMFSA_D_CTX], %g4
2532	stxa	%g4, [%g3 + TRAP_ENT_F1]%asi
2533	ldx	[%g6 + MMFSA_D_TYPE], %g4
2534	stxa	%g4, [%g3 + TRAP_ENT_F2]%asi
2535	stxa	%g6, [%g3 + TRAP_ENT_F3]%asi
2536	stna	%g0, [%g3 + TRAP_ENT_F4]%asi
2537	TRACE_NEXT(%g3, %g4, %g5)
2538	jmp	%g7 + 4
2539	nop
2540
2541trace_immu:
2542	TRACE_PTR(%g3, %g6)
2543	GET_TRACE_TICK(%g6)
2544	stxa	%g6, [%g3 + TRAP_ENT_TICK]%asi
2545	TRACE_SAVE_TL_GL_REGS(%g3, %g6)
2546	rdpr	%tt, %g6
2547	stha	%g6, [%g3 + TRAP_ENT_TT]%asi
2548	rdpr	%tstate, %g6
2549	stxa	%g6, [%g3 + TRAP_ENT_TSTATE]%asi
2550	stna	%sp, [%g3 + TRAP_ENT_SP]%asi
2551	rdpr	%tpc, %g6
2552	stna	%g6, [%g3 + TRAP_ENT_TPC]%asi
2553	MMU_FAULT_STATUS_AREA(%g6)
2554	ldx	[%g6 + MMFSA_I_ADDR], %g4
2555	stxa	%g4, [%g3 + TRAP_ENT_TR]%asi
2556	ldx	[%g6 + MMFSA_I_CTX], %g4
2557	stxa	%g4, [%g3 + TRAP_ENT_F1]%asi
2558	ldx	[%g6 + MMFSA_I_TYPE], %g4
2559	stxa	%g4, [%g3 + TRAP_ENT_F2]%asi
2560	stxa	%g6, [%g3 + TRAP_ENT_F3]%asi
2561	stna	%g0, [%g3 + TRAP_ENT_F4]%asi
2562	TRACE_NEXT(%g3, %g4, %g5)
2563	jmp	%g7 + 4
2564	nop
2565
2566trace_gen:
2567	TRACE_PTR(%g3, %g6)
2568	GET_TRACE_TICK(%g6)
2569	stxa	%g6, [%g3 + TRAP_ENT_TICK]%asi
2570	TRACE_SAVE_TL_GL_REGS(%g3, %g6)
2571	rdpr	%tt, %g6
2572	stha	%g6, [%g3 + TRAP_ENT_TT]%asi
2573	rdpr	%tstate, %g6
2574	stxa	%g6, [%g3 + TRAP_ENT_TSTATE]%asi
2575	stna	%sp, [%g3 + TRAP_ENT_SP]%asi
2576	rdpr	%tpc, %g6
2577	stna	%g6, [%g3 + TRAP_ENT_TPC]%asi
2578	stna	%g0, [%g3 + TRAP_ENT_TR]%asi
2579	stna	%g0, [%g3 + TRAP_ENT_F1]%asi
2580	stna	%g0, [%g3 + TRAP_ENT_F2]%asi
2581	stna	%g0, [%g3 + TRAP_ENT_F3]%asi
2582	stna	%g0, [%g3 + TRAP_ENT_F4]%asi
2583	TRACE_NEXT(%g3, %g4, %g5)
2584	jmp	%g7 + 4
2585	nop
2586
2587trace_win:
2588	TRACE_WIN_INFO(0, %l0, %l1, %l2)
2589	! Keep the locals as clean as possible, caller cleans %l4
2590	clr	%l2
2591	clr	%l1
2592	jmp	%l4 + 4
2593	  clr	%l0
2594
2595/*
2596 * Trace a tsb hit
2597 * g1 = tsbe pointer (in/clobbered)
2598 * g2 = tag access register (in)
2599 * g3 - g4 = scratch (clobbered)
2600 * g5 = tsbe data (in)
2601 * g6 = scratch (clobbered)
2602 * g7 = pc we jumped here from (in)
2603 */
2604
2605	! Do not disturb %g5, it will be used after the trace
2606	ALTENTRY(trace_tsbhit)
2607	TRACE_TSBHIT(0)
2608	jmp	%g7 + 4
2609	nop
2610
2611/*
2612 * Trace a TSB miss
2613 *
2614 * g1 = tsb8k pointer (in)
2615 * g2 = tag access register (in)
2616 * g3 = tsb4m pointer (in)
2617 * g4 = tsbe tag (in/clobbered)
2618 * g5 - g6 = scratch (clobbered)
2619 * g7 = pc we jumped here from (in)
2620 */
2621	.global	trace_tsbmiss
2622trace_tsbmiss:
2623	membar	#Sync
2624	sethi	%hi(FLUSH_ADDR), %g6
2625	flush	%g6
2626	TRACE_PTR(%g5, %g6)
2627	GET_TRACE_TICK(%g6)
2628	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi
2629	stna	%g2, [%g5 + TRAP_ENT_SP]%asi		! tag access
2630	stna	%g4, [%g5 + TRAP_ENT_F1]%asi		! XXX? tsb tag
2631	rdpr	%tnpc, %g6
2632	stna	%g6, [%g5 + TRAP_ENT_F2]%asi
2633	stna	%g1, [%g5 + TRAP_ENT_F3]%asi		! tsb8k pointer
2634	srlx	%g1, 32, %g6
2635	stna	%g6, [%g5 + TRAP_ENT_F4]%asi		! huh?
2636	rdpr	%tpc, %g6
2637	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi
2638	TRACE_SAVE_TL_GL_REGS(%g5, %g6)
2639	rdpr	%tt, %g6
2640	or	%g6, TT_MMU_MISS, %g4
2641	stha	%g4, [%g5 + TRAP_ENT_TT]%asi
2642	mov	MMFSA_D_ADDR, %g4
2643	cmp	%g6, FAST_IMMU_MISS_TT
2644	move	%xcc, MMFSA_I_ADDR, %g4
2645	cmp	%g6, T_INSTR_MMU_MISS
2646	move	%xcc, MMFSA_I_ADDR, %g4
2647	MMU_FAULT_STATUS_AREA(%g6)
2648	ldx	[%g6 + %g4], %g6
2649	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi	! tag target
2650	stna	%g3, [%g5 + TRAP_ENT_TR]%asi		! tsb4m pointer
2651	TRACE_NEXT(%g5, %g4, %g6)
2652	jmp	%g7 + 4
2653	nop
2654
2655/*
2656 * g2 = tag access register (in)
2657 * g3 = ctx number (in)
2658 */
2659trace_dataprot:
2660	membar	#Sync
2661	sethi	%hi(FLUSH_ADDR), %g6
2662	flush	%g6
2663	TRACE_PTR(%g1, %g6)
2664	GET_TRACE_TICK(%g6)
2665	stxa	%g6, [%g1 + TRAP_ENT_TICK]%asi
2666	rdpr	%tpc, %g6
2667	stna	%g6, [%g1 + TRAP_ENT_TPC]%asi
2668	rdpr	%tstate, %g6
2669	stxa	%g6, [%g1 + TRAP_ENT_TSTATE]%asi
2670	stna	%g2, [%g1 + TRAP_ENT_SP]%asi		! tag access reg
2671	stna	%g0, [%g1 + TRAP_ENT_TR]%asi
2672	stna	%g0, [%g1 + TRAP_ENT_F1]%asi
2673	stna	%g0, [%g1 + TRAP_ENT_F2]%asi
2674	stna	%g0, [%g1 + TRAP_ENT_F3]%asi
2675	stna	%g0, [%g1 + TRAP_ENT_F4]%asi
2676	TRACE_SAVE_TL_GL_REGS(%g1, %g6)
2677	rdpr	%tt, %g6
2678	stha	%g6, [%g1 + TRAP_ENT_TT]%asi
2679	TRACE_NEXT(%g1, %g4, %g5)
2680	jmp	%g7 + 4
2681	nop
2682
2683#endif /* TRAPTRACE */
2684
2685/*
2686 * synthesize for trap(): SFAR in %g2, SFSR in %g3
2687 */
2688	.type	.dmmu_exc_lddf_not_aligned, #function
2689.dmmu_exc_lddf_not_aligned:
2690	MMU_FAULT_STATUS_AREA(%g3)
2691	ldx	[%g3 + MMFSA_D_ADDR], %g2
2692	/* Fault type not available in MMU fault status area */
2693	mov	MMFSA_F_UNALIGN, %g1
2694	ldx	[%g3 + MMFSA_D_CTX], %g3
2695	sllx	%g3, SFSR_CTX_SHIFT, %g3
2696	btst	1, %sp
2697	bnz,pt	%xcc, .lddf_exception_not_aligned
2698	or	%g3, %g1, %g3			/* SFSR */
2699	ba,a,pt	%xcc, .mmu_exception_not_aligned
2700	SET_SIZE(.dmmu_exc_lddf_not_aligned)
2701
2702/*
2703 * synthesize for trap(): SFAR in %g2, SFSR in %g3
2704 */
2705	.type	.dmmu_exc_stdf_not_aligned, #function
2706.dmmu_exc_stdf_not_aligned:
2707	MMU_FAULT_STATUS_AREA(%g3)
2708	ldx	[%g3 + MMFSA_D_ADDR], %g2
2709	/* Fault type not available in MMU fault status area */
2710	mov	MMFSA_F_UNALIGN, %g1
2711	ldx	[%g3 + MMFSA_D_CTX], %g3
2712	sllx	%g3, SFSR_CTX_SHIFT, %g3
2713	btst	1, %sp
2714	bnz,pt	%xcc, .stdf_exception_not_aligned
2715	or	%g3, %g1, %g3			/* SFSR */
2716	ba,a,pt	%xcc, .mmu_exception_not_aligned
2717	SET_SIZE(.dmmu_exc_stdf_not_aligned)
2718
2719	.type	.dmmu_exception, #function
2720.dmmu_exception:
2721	MMU_FAULT_STATUS_AREA(%g3)
2722	ldx	[%g3 + MMFSA_D_ADDR], %g2
2723	ldx	[%g3 + MMFSA_D_TYPE], %g1
2724	ldx	[%g3 + MMFSA_D_CTX], %g3
2725	srlx	%g2, MMU_PAGESHIFT, %g2		/* align address */
2726	sllx	%g2, MMU_PAGESHIFT, %g2
2727	or	%g2, %g3, %g2			/* TAG_ACCESS */
2728	sllx	%g3, SFSR_CTX_SHIFT, %g3
2729	or	%g3, %g1, %g3			/* SFSR */
2730	ba,pt	%xcc, .mmu_exception_end
2731	mov	T_DATA_EXCEPTION, %g1
2732	SET_SIZE(.dmmu_exception)
2733/*
2734 * expects offset into tsbmiss area in %g1 and return pc in %g7
2735 */
2736stat_mmu:
2737	CPU_INDEX(%g5, %g6)
2738	sethi	%hi(tsbmiss_area), %g6
2739	sllx	%g5, TSBMISS_SHIFT, %g5
2740	or	%g6, %lo(tsbmiss_area), %g6
2741	add	%g6, %g5, %g6		/* g6 = tsbmiss area */
2742	ld	[%g6 + %g1], %g5
2743	add	%g5, 1, %g5
2744	jmp	%g7 + 4
2745	st	%g5, [%g6 + %g1]
2746
2747
2748/*
2749 * fast_trap_done, fast_trap_done_chk_intr:
2750 *
2751 * Due to the design of UltraSPARC pipeline, pending interrupts are not
2752 * taken immediately after a RETRY or DONE instruction which causes IE to
2753 * go from 0 to 1. Instead, the instruction at %tpc or %tnpc is allowed
2754 * to execute first before taking any interrupts. If that instruction
2755 * results in other traps, and if the corresponding trap handler runs
2756 * entirely at TL=1 with interrupts disabled, then pending interrupts
2757 * won't be taken until after yet another instruction following the %tpc
2758 * or %tnpc.
2759 *
2760 * A malicious user program can use this feature to block out interrupts
2761 * for extended durations, which can result in send_mondo_timeout kernel
2762 * panic.
2763 *
2764 * This problem is addressed by servicing any pending interrupts via
2765 * sys_trap before returning back to the user mode from a fast trap
2766 * handler. The "done" instruction within a fast trap handler, which
2767 * runs entirely at TL=1 with interrupts disabled, is replaced with the
2768 * FAST_TRAP_DONE macro, which branches control to this fast_trap_done
2769 * entry point.
2770 *
2771 * We check for any pending interrupts here and force a sys_trap to
2772 * service those interrupts, if any. To minimize overhead, pending
2773 * interrupts are checked if the %tpc happens to be at 16K boundary,
2774 * which allows a malicious program to execute at most 4K consecutive
2775 * instructions before we service any pending interrupts. If a worst
2776 * case fast trap handler takes about 2 usec, then interrupts will be
2777 * blocked for at most 8 msec, less than a clock tick.
2778 *
2779 * For the cases where we don't know if the %tpc will cross a 16K
2780 * boundary, we can't use the above optimization and always process
2781 * any pending interrupts via fast_frap_done_chk_intr entry point.
2782 *
2783 * Entry Conditions:
2784 * 	%pstate		am:0 priv:1 ie:0
2785 * 			globals are AG (not normal globals)
2786 */
2787
2788	.global	fast_trap_done, fast_trap_done_chk_intr
2789fast_trap_done:
2790	rdpr	%tpc, %g5
2791	sethi	%hi(0xffffc000), %g6	! 1's complement of 0x3fff
2792	andncc	%g5, %g6, %g0		! check lower 14 bits of %tpc
2793	bz,pn	%icc, 1f		! branch if zero (lower 32 bits only)
2794	nop
2795	done
2796
2797fast_trap_done_chk_intr:
27981:	rd	SOFTINT, %g6
2799	brnz,pn	%g6, 2f		! branch if any pending intr
2800	nop
2801	done
2802
28032:
2804	/*
2805	 * We get here if there are any pending interrupts.
2806	 * Adjust %tpc/%tnpc as we'll be resuming via "retry"
2807	 * instruction.
2808	 */
2809	rdpr	%tnpc, %g5
2810	wrpr	%g0, %g5, %tpc
2811	add	%g5, 4, %g5
2812	wrpr	%g0, %g5, %tnpc
2813
2814	/*
2815	 * Force a dummy sys_trap call so that interrupts can be serviced.
2816	 */
2817	set	fast_trap_dummy_call, %g1
2818	ba,pt	%xcc, sys_trap
2819	  mov	-1, %g4
2820
2821fast_trap_dummy_call:
2822	retl
2823	nop
2824
2825#endif	/* lint */
2826