xref: /titanic_41/usr/src/uts/sun4u/ml/trap_table.s (revision 4abb96737d15cd2d6530b0aa7b8404ec911ad940)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28#if !defined(lint)
29#include "assym.h"
30#endif /* !lint */
31#include <sys/asm_linkage.h>
32#include <sys/privregs.h>
33#include <sys/sun4asi.h>
34#include <sys/spitregs.h>
35#include <sys/cheetahregs.h>
36#include <sys/machtrap.h>
37#include <sys/machthread.h>
38#include <sys/pcb.h>
39#include <sys/pte.h>
40#include <sys/mmu.h>
41#include <sys/machpcb.h>
42#include <sys/async.h>
43#include <sys/intreg.h>
44#include <sys/scb.h>
45#include <sys/psr_compat.h>
46#include <sys/syscall.h>
47#include <sys/machparam.h>
48#include <sys/traptrace.h>
49#include <vm/hat_sfmmu.h>
50#include <sys/archsystm.h>
51#include <sys/utrap.h>
52#include <sys/clock.h>
53#include <sys/intr.h>
54#include <sys/fpu/fpu_simulator.h>
55#include <vm/seg_spt.h>
56
57/*
58 * WARNING: If you add a fast trap handler which can be invoked by a
59 * non-privileged user, you may have to use the FAST_TRAP_DONE macro
60 * instead of "done" instruction to return back to the user mode. See
61 * comments for the "fast_trap_done" entry point for more information.
62 *
63 * An alternate FAST_TRAP_DONE_CHK_INTR macro should be used for the
64 * cases where you always want to process any pending interrupts before
65 * returning back to the user mode.
66 */
67#define	FAST_TRAP_DONE		\
68	ba,a	fast_trap_done
69
70#define	FAST_TRAP_DONE_CHK_INTR	\
71	ba,a	fast_trap_done_chk_intr
72
73/*
74 * SPARC V9 Trap Table
75 *
76 * Most of the trap handlers are made from common building
77 * blocks, and some are instantiated multiple times within
78 * the trap table. So, I build a bunch of macros, then
79 * populate the table using only the macros.
80 *
81 * Many macros branch to sys_trap.  Its calling convention is:
82 *	%g1		kernel trap handler
83 *	%g2, %g3	args for above
84 *	%g4		desire %pil
85 */
86
87#ifdef	TRAPTRACE
88
89/*
90 * Tracing macro. Adds two instructions if TRAPTRACE is defined.
91 */
92#define	TT_TRACE(label)		\
93	ba	label		;\
94	rd	%pc, %g7
95#define	TT_TRACE_INS	2
96
97#define	TT_TRACE_L(label)	\
98	ba	label		;\
99	rd	%pc, %l4	;\
100	clr	%l4
101#define	TT_TRACE_L_INS	3
102
103#else
104
105#define	TT_TRACE(label)
106#define	TT_TRACE_INS	0
107
108#define	TT_TRACE_L(label)
109#define	TT_TRACE_L_INS	0
110
111#endif
112
113/*
114 * This macro is used to update per cpu mmu stats in perf critical
115 * paths. It is only enabled in debug kernels or if SFMMU_STAT_GATHER
116 * is defined.
117 */
118#if defined(DEBUG) || defined(SFMMU_STAT_GATHER)
119#define	HAT_PERCPU_DBSTAT(stat)			\
120	mov	stat, %g1			;\
121	ba	stat_mmu			;\
122	rd	%pc, %g7
123#else
124#define	HAT_PERCPU_DBSTAT(stat)
125#endif /* DEBUG || SFMMU_STAT_GATHER */
126
127/*
128 * This first set are funneled to trap() with %tt as the type.
129 * Trap will then either panic or send the user a signal.
130 */
131/*
132 * NOT is used for traps that just shouldn't happen.
133 * It comes in both single and quadruple flavors.
134 */
135#if !defined(lint)
136	.global	trap
137#endif /* !lint */
138#define	NOT			\
139	TT_TRACE(trace_gen)	;\
140	set	trap, %g1	;\
141	rdpr	%tt, %g3	;\
142	ba,pt	%xcc, sys_trap	;\
143	sub	%g0, 1, %g4	;\
144	.align	32
145#define	NOT4	NOT; NOT; NOT; NOT
146/*
147 * RED is for traps that use the red mode handler.
148 * We should never see these either.
149 */
150#define	RED	NOT
151/*
152 * BAD is used for trap vectors we don't have a kernel
153 * handler for.
154 * It also comes in single and quadruple versions.
155 */
156#define	BAD	NOT
157#define	BAD4	NOT4
158
159#define	DONE			\
160	done;			\
161	.align	32
162
163/*
164 * TRAP vectors to the trap() function.
165 * It's main use is for user errors.
166 */
167#if !defined(lint)
168	.global	trap
169#endif /* !lint */
170#define	TRAP(arg)		\
171	TT_TRACE(trace_gen)	;\
172	set	trap, %g1	;\
173	mov	arg, %g3	;\
174	ba,pt	%xcc, sys_trap	;\
175	sub	%g0, 1, %g4	;\
176	.align	32
177
178/*
179 * SYSCALL is used for system calls on both ILP32 and LP64 kernels
180 * depending on the "which" parameter (should be syscall_trap,
181 * syscall_trap32, or nosys for unused system call traps).
182 */
183#define	SYSCALL(which)			\
184	TT_TRACE(trace_gen)		;\
185	set	(which), %g1		;\
186	ba,pt	%xcc, sys_trap		;\
187	sub	%g0, 1, %g4		;\
188	.align	32
189
190#define	FLUSHW()			\
191	set	trap, %g1		;\
192	mov	T_FLUSHW, %g3		;\
193	sub	%g0, 1, %g4		;\
194	save				;\
195	flushw				;\
196	restore				;\
197	FAST_TRAP_DONE			;\
198	.align	32
199
200/*
201 * GOTO just jumps to a label.
202 * It's used for things that can be fixed without going thru sys_trap.
203 */
204#define	GOTO(label)		\
205	.global	label		;\
206	ba,a	label		;\
207	.empty			;\
208	.align	32
209
210/*
211 * GOTO_TT just jumps to a label.
212 * correctable ECC error traps at  level 0 and 1 will use this macro.
213 * It's used for things that can be fixed without going thru sys_trap.
214 */
215#define	GOTO_TT(label, ttlabel)		\
216	.global	label		;\
217	TT_TRACE(ttlabel)	;\
218	ba,a	label		;\
219	.empty			;\
220	.align	32
221
222/*
223 * Privileged traps
224 * Takes breakpoint if privileged, calls trap() if not.
225 */
226#define	PRIV(label)			\
227	rdpr	%tstate, %g1		;\
228	btst	TSTATE_PRIV, %g1	;\
229	bnz	label			;\
230	rdpr	%tt, %g3		;\
231	set	trap, %g1		;\
232	ba,pt	%xcc, sys_trap		;\
233	sub	%g0, 1, %g4		;\
234	.align	32
235
236
237/*
238 * DTrace traps.
239 */
240#define	DTRACE_FASTTRAP			\
241	.global dtrace_fasttrap_probe				;\
242	.global dtrace_fasttrap_probe_ptr			;\
243	sethi	%hi(dtrace_fasttrap_probe_ptr), %g4		;\
244	ldn	[%g4 + %lo(dtrace_fasttrap_probe_ptr)], %g4	;\
245	set	dtrace_fasttrap_probe, %g1			;\
246	brnz,pn	%g4, user_trap					;\
247	sub	%g0, 1, %g4					;\
248	FAST_TRAP_DONE						;\
249	.align	32
250
251#define	DTRACE_PID			\
252	.global dtrace_pid_probe				;\
253	set	dtrace_pid_probe, %g1				;\
254	ba,pt	%xcc, user_trap					;\
255	sub	%g0, 1, %g4					;\
256	.align	32
257
258#define	DTRACE_RETURN			\
259	.global dtrace_return_probe				;\
260	set	dtrace_return_probe, %g1			;\
261	ba,pt	%xcc, user_trap					;\
262	sub	%g0, 1, %g4					;\
263	.align	32
264
265/*
266 * REGISTER WINDOW MANAGEMENT MACROS
267 */
268
269/*
270 * various convenient units of padding
271 */
272#define	SKIP(n)	.skip 4*(n)
273
274/*
275 * CLEAN_WINDOW is the simple handler for cleaning a register window.
276 */
277#define	CLEAN_WINDOW						\
278	TT_TRACE_L(trace_win)					;\
279	rdpr %cleanwin, %l0; inc %l0; wrpr %l0, %cleanwin	;\
280	clr %l0; clr %l1; clr %l2; clr %l3			;\
281	clr %l4; clr %l5; clr %l6; clr %l7			;\
282	clr %o0; clr %o1; clr %o2; clr %o3			;\
283	clr %o4; clr %o5; clr %o6; clr %o7			;\
284	retry; .align 128
285
286#if !defined(lint)
287
288/*
289 * If we get an unresolved tlb miss while in a window handler, the fault
290 * handler will resume execution at the last instruction of the window
291 * hander, instead of delivering the fault to the kernel.  Spill handlers
292 * use this to spill windows into the wbuf.
293 *
294 * The mixed handler works by checking %sp, and branching to the correct
295 * handler.  This is done by branching back to label 1: for 32b frames,
296 * or label 2: for 64b frames; which implies the handler order is: 32b,
297 * 64b, mixed.  The 1: and 2: labels are offset into the routines to
298 * allow the branchs' delay slots to contain useful instructions.
299 */
300
301/*
302 * SPILL_32bit spills a 32-bit-wide kernel register window.  It
303 * assumes that the kernel context and the nucleus context are the
304 * same.  The stack pointer is required to be eight-byte aligned even
305 * though this code only needs it to be four-byte aligned.
306 */
307#define	SPILL_32bit(tail)					\
308	srl	%sp, 0, %sp					;\
3091:	st	%l0, [%sp + 0]					;\
310	st	%l1, [%sp + 4]					;\
311	st	%l2, [%sp + 8]					;\
312	st	%l3, [%sp + 12]					;\
313	st	%l4, [%sp + 16]					;\
314	st	%l5, [%sp + 20]					;\
315	st	%l6, [%sp + 24]					;\
316	st	%l7, [%sp + 28]					;\
317	st	%i0, [%sp + 32]					;\
318	st	%i1, [%sp + 36]					;\
319	st	%i2, [%sp + 40]					;\
320	st	%i3, [%sp + 44]					;\
321	st	%i4, [%sp + 48]					;\
322	st	%i5, [%sp + 52]					;\
323	st	%i6, [%sp + 56]					;\
324	st	%i7, [%sp + 60]					;\
325	TT_TRACE_L(trace_win)					;\
326	saved							;\
327	retry							;\
328	SKIP(31-19-TT_TRACE_L_INS)				;\
329	ba,a,pt	%xcc, fault_32bit_/**/tail			;\
330	.empty
331
332/*
333 * SPILL_32bit_asi spills a 32-bit-wide register window into a 32-bit
334 * wide address space via the designated asi.  It is used to spill
335 * non-kernel windows.  The stack pointer is required to be eight-byte
336 * aligned even though this code only needs it to be four-byte
337 * aligned.
338 */
339#define	SPILL_32bit_asi(asi_num, tail)				\
340	srl	%sp, 0, %sp					;\
3411:	sta	%l0, [%sp + %g0]asi_num				;\
342	mov	4, %g1						;\
343	sta	%l1, [%sp + %g1]asi_num				;\
344	mov	8, %g2						;\
345	sta	%l2, [%sp + %g2]asi_num				;\
346	mov	12, %g3						;\
347	sta	%l3, [%sp + %g3]asi_num				;\
348	add	%sp, 16, %g4					;\
349	sta	%l4, [%g4 + %g0]asi_num				;\
350	sta	%l5, [%g4 + %g1]asi_num				;\
351	sta	%l6, [%g4 + %g2]asi_num				;\
352	sta	%l7, [%g4 + %g3]asi_num				;\
353	add	%g4, 16, %g4					;\
354	sta	%i0, [%g4 + %g0]asi_num				;\
355	sta	%i1, [%g4 + %g1]asi_num				;\
356	sta	%i2, [%g4 + %g2]asi_num				;\
357	sta	%i3, [%g4 + %g3]asi_num				;\
358	add	%g4, 16, %g4					;\
359	sta	%i4, [%g4 + %g0]asi_num				;\
360	sta	%i5, [%g4 + %g1]asi_num				;\
361	sta	%i6, [%g4 + %g2]asi_num				;\
362	sta	%i7, [%g4 + %g3]asi_num				;\
363	TT_TRACE_L(trace_win)					;\
364	saved							;\
365	retry							;\
366	SKIP(31-25-TT_TRACE_L_INS)				;\
367	ba,a,pt %xcc, fault_32bit_/**/tail			;\
368	.empty
369
370/*
371 * SPILL_32bit_tt1 spills a 32-bit-wide register window into a 32-bit
372 * wide address space via the designated asi.  It is used to spill
373 * windows at tl>1 where performance isn't the primary concern and
374 * where we don't want to use unnecessary registers.  The stack
375 * pointer is required to be eight-byte aligned even though this code
376 * only needs it to be four-byte aligned.
377 */
378#define	SPILL_32bit_tt1(asi_num, tail)				\
379	mov	asi_num, %asi					;\
3801:	srl	%sp, 0, %sp					;\
381	sta	%l0, [%sp + 0]%asi				;\
382	sta	%l1, [%sp + 4]%asi				;\
383	sta	%l2, [%sp + 8]%asi				;\
384	sta	%l3, [%sp + 12]%asi				;\
385	sta	%l4, [%sp + 16]%asi				;\
386	sta	%l5, [%sp + 20]%asi				;\
387	sta	%l6, [%sp + 24]%asi				;\
388	sta	%l7, [%sp + 28]%asi				;\
389	sta	%i0, [%sp + 32]%asi				;\
390	sta	%i1, [%sp + 36]%asi				;\
391	sta	%i2, [%sp + 40]%asi				;\
392	sta	%i3, [%sp + 44]%asi				;\
393	sta	%i4, [%sp + 48]%asi				;\
394	sta	%i5, [%sp + 52]%asi				;\
395	sta	%i6, [%sp + 56]%asi				;\
396	sta	%i7, [%sp + 60]%asi				;\
397	TT_TRACE_L(trace_win)					;\
398	saved							;\
399	retry							;\
400	SKIP(31-20-TT_TRACE_L_INS)				;\
401	ba,a,pt	%xcc, fault_32bit_/**/tail			;\
402	.empty
403
404
405/*
406 * FILL_32bit fills a 32-bit-wide kernel register window.  It assumes
407 * that the kernel context and the nucleus context are the same.  The
408 * stack pointer is required to be eight-byte aligned even though this
409 * code only needs it to be four-byte aligned.
410 */
411#define	FILL_32bit(tail)					\
412	srl	%sp, 0, %sp					;\
4131:	TT_TRACE_L(trace_win)					;\
414	ld	[%sp + 0], %l0					;\
415	ld	[%sp + 4], %l1					;\
416	ld	[%sp + 8], %l2					;\
417	ld	[%sp + 12], %l3					;\
418	ld	[%sp + 16], %l4					;\
419	ld	[%sp + 20], %l5					;\
420	ld	[%sp + 24], %l6					;\
421	ld	[%sp + 28], %l7					;\
422	ld	[%sp + 32], %i0					;\
423	ld	[%sp + 36], %i1					;\
424	ld	[%sp + 40], %i2					;\
425	ld	[%sp + 44], %i3					;\
426	ld	[%sp + 48], %i4					;\
427	ld	[%sp + 52], %i5					;\
428	ld	[%sp + 56], %i6					;\
429	ld	[%sp + 60], %i7					;\
430	restored						;\
431	retry							;\
432	SKIP(31-19-TT_TRACE_L_INS)				;\
433	ba,a,pt	%xcc, fault_32bit_/**/tail			;\
434	.empty
435
436/*
437 * FILL_32bit_asi fills a 32-bit-wide register window from a 32-bit
438 * wide address space via the designated asi.  It is used to fill
439 * non-kernel windows.  The stack pointer is required to be eight-byte
440 * aligned even though this code only needs it to be four-byte
441 * aligned.
442 */
443#define	FILL_32bit_asi(asi_num, tail)				\
444	srl	%sp, 0, %sp					;\
4451:	TT_TRACE_L(trace_win)					;\
446	mov	4, %g1						;\
447	lda	[%sp + %g0]asi_num, %l0				;\
448	mov	8, %g2						;\
449	lda	[%sp + %g1]asi_num, %l1				;\
450	mov	12, %g3						;\
451	lda	[%sp + %g2]asi_num, %l2				;\
452	lda	[%sp + %g3]asi_num, %l3				;\
453	add	%sp, 16, %g4					;\
454	lda	[%g4 + %g0]asi_num, %l4				;\
455	lda	[%g4 + %g1]asi_num, %l5				;\
456	lda	[%g4 + %g2]asi_num, %l6				;\
457	lda	[%g4 + %g3]asi_num, %l7				;\
458	add	%g4, 16, %g4					;\
459	lda	[%g4 + %g0]asi_num, %i0				;\
460	lda	[%g4 + %g1]asi_num, %i1				;\
461	lda	[%g4 + %g2]asi_num, %i2				;\
462	lda	[%g4 + %g3]asi_num, %i3				;\
463	add	%g4, 16, %g4					;\
464	lda	[%g4 + %g0]asi_num, %i4				;\
465	lda	[%g4 + %g1]asi_num, %i5				;\
466	lda	[%g4 + %g2]asi_num, %i6				;\
467	lda	[%g4 + %g3]asi_num, %i7				;\
468	restored						;\
469	retry							;\
470	SKIP(31-25-TT_TRACE_L_INS)				;\
471	ba,a,pt %xcc, fault_32bit_/**/tail			;\
472	.empty
473
474/*
475 * FILL_32bit_tt1 fills a 32-bit-wide register window from a 32-bit
476 * wide address space via the designated asi.  It is used to fill
477 * windows at tl>1 where performance isn't the primary concern and
478 * where we don't want to use unnecessary registers.  The stack
479 * pointer is required to be eight-byte aligned even though this code
480 * only needs it to be four-byte aligned.
481 */
482#define	FILL_32bit_tt1(asi_num, tail)				\
483	mov	asi_num, %asi					;\
4841:	srl	%sp, 0, %sp					;\
485	TT_TRACE_L(trace_win)					;\
486	lda	[%sp + 0]%asi, %l0				;\
487	lda	[%sp + 4]%asi, %l1				;\
488	lda	[%sp + 8]%asi, %l2				;\
489	lda	[%sp + 12]%asi, %l3				;\
490	lda	[%sp + 16]%asi, %l4				;\
491	lda	[%sp + 20]%asi, %l5				;\
492	lda	[%sp + 24]%asi, %l6				;\
493	lda	[%sp + 28]%asi, %l7				;\
494	lda	[%sp + 32]%asi, %i0				;\
495	lda	[%sp + 36]%asi, %i1				;\
496	lda	[%sp + 40]%asi, %i2				;\
497	lda	[%sp + 44]%asi, %i3				;\
498	lda	[%sp + 48]%asi, %i4				;\
499	lda	[%sp + 52]%asi, %i5				;\
500	lda	[%sp + 56]%asi, %i6				;\
501	lda	[%sp + 60]%asi, %i7				;\
502	restored						;\
503	retry							;\
504	SKIP(31-20-TT_TRACE_L_INS)				;\
505	ba,a,pt	%xcc, fault_32bit_/**/tail			;\
506	.empty
507
508
509/*
510 * SPILL_64bit spills a 64-bit-wide kernel register window.  It
511 * assumes that the kernel context and the nucleus context are the
512 * same.  The stack pointer is required to be eight-byte aligned.
513 */
514#define	SPILL_64bit(tail)					\
5152:	stx	%l0, [%sp + V9BIAS64 + 0]			;\
516	stx	%l1, [%sp + V9BIAS64 + 8]			;\
517	stx	%l2, [%sp + V9BIAS64 + 16]			;\
518	stx	%l3, [%sp + V9BIAS64 + 24]			;\
519	stx	%l4, [%sp + V9BIAS64 + 32]			;\
520	stx	%l5, [%sp + V9BIAS64 + 40]			;\
521	stx	%l6, [%sp + V9BIAS64 + 48]			;\
522	stx	%l7, [%sp + V9BIAS64 + 56]			;\
523	stx	%i0, [%sp + V9BIAS64 + 64]			;\
524	stx	%i1, [%sp + V9BIAS64 + 72]			;\
525	stx	%i2, [%sp + V9BIAS64 + 80]			;\
526	stx	%i3, [%sp + V9BIAS64 + 88]			;\
527	stx	%i4, [%sp + V9BIAS64 + 96]			;\
528	stx	%i5, [%sp + V9BIAS64 + 104]			;\
529	stx	%i6, [%sp + V9BIAS64 + 112]			;\
530	stx	%i7, [%sp + V9BIAS64 + 120]			;\
531	TT_TRACE_L(trace_win)					;\
532	saved							;\
533	retry							;\
534	SKIP(31-18-TT_TRACE_L_INS)				;\
535	ba,a,pt	%xcc, fault_64bit_/**/tail			;\
536	.empty
537
538/*
539 * SPILL_64bit_asi spills a 64-bit-wide register window into a 64-bit
540 * wide address space via the designated asi.  It is used to spill
541 * non-kernel windows.  The stack pointer is required to be eight-byte
542 * aligned.
543 */
544#define	SPILL_64bit_asi(asi_num, tail)				\
545	mov	0 + V9BIAS64, %g1				;\
5462:	stxa	%l0, [%sp + %g1]asi_num				;\
547	mov	8 + V9BIAS64, %g2				;\
548	stxa	%l1, [%sp + %g2]asi_num				;\
549	mov	16 + V9BIAS64, %g3				;\
550	stxa	%l2, [%sp + %g3]asi_num				;\
551	mov	24 + V9BIAS64, %g4				;\
552	stxa	%l3, [%sp + %g4]asi_num				;\
553	add	%sp, 32, %g5					;\
554	stxa	%l4, [%g5 + %g1]asi_num				;\
555	stxa	%l5, [%g5 + %g2]asi_num				;\
556	stxa	%l6, [%g5 + %g3]asi_num				;\
557	stxa	%l7, [%g5 + %g4]asi_num				;\
558	add	%g5, 32, %g5					;\
559	stxa	%i0, [%g5 + %g1]asi_num				;\
560	stxa	%i1, [%g5 + %g2]asi_num				;\
561	stxa	%i2, [%g5 + %g3]asi_num				;\
562	stxa	%i3, [%g5 + %g4]asi_num				;\
563	add	%g5, 32, %g5					;\
564	stxa	%i4, [%g5 + %g1]asi_num				;\
565	stxa	%i5, [%g5 + %g2]asi_num				;\
566	stxa	%i6, [%g5 + %g3]asi_num				;\
567	stxa	%i7, [%g5 + %g4]asi_num				;\
568	TT_TRACE_L(trace_win)					;\
569	saved							;\
570	retry							;\
571	SKIP(31-25-TT_TRACE_L_INS)				;\
572	ba,a,pt %xcc, fault_64bit_/**/tail			;\
573	.empty
574
575/*
576 * SPILL_64bit_tt1 spills a 64-bit-wide register window into a 64-bit
577 * wide address space via the designated asi.  It is used to spill
578 * windows at tl>1 where performance isn't the primary concern and
579 * where we don't want to use unnecessary registers.  The stack
580 * pointer is required to be eight-byte aligned.
581 */
582#define	SPILL_64bit_tt1(asi_num, tail)				\
583	mov	asi_num, %asi					;\
5842:	stxa	%l0, [%sp + V9BIAS64 + 0]%asi			;\
585	stxa	%l1, [%sp + V9BIAS64 + 8]%asi			;\
586	stxa	%l2, [%sp + V9BIAS64 + 16]%asi			;\
587	stxa	%l3, [%sp + V9BIAS64 + 24]%asi			;\
588	stxa	%l4, [%sp + V9BIAS64 + 32]%asi			;\
589	stxa	%l5, [%sp + V9BIAS64 + 40]%asi			;\
590	stxa	%l6, [%sp + V9BIAS64 + 48]%asi			;\
591	stxa	%l7, [%sp + V9BIAS64 + 56]%asi			;\
592	stxa	%i0, [%sp + V9BIAS64 + 64]%asi			;\
593	stxa	%i1, [%sp + V9BIAS64 + 72]%asi			;\
594	stxa	%i2, [%sp + V9BIAS64 + 80]%asi			;\
595	stxa	%i3, [%sp + V9BIAS64 + 88]%asi			;\
596	stxa	%i4, [%sp + V9BIAS64 + 96]%asi			;\
597	stxa	%i5, [%sp + V9BIAS64 + 104]%asi			;\
598	stxa	%i6, [%sp + V9BIAS64 + 112]%asi			;\
599	stxa	%i7, [%sp + V9BIAS64 + 120]%asi			;\
600	TT_TRACE_L(trace_win)					;\
601	saved							;\
602	retry							;\
603	SKIP(31-19-TT_TRACE_L_INS)				;\
604	ba,a,pt	%xcc, fault_64bit_/**/tail			;\
605	.empty
606
607
608/*
609 * FILL_64bit fills a 64-bit-wide kernel register window.  It assumes
610 * that the kernel context and the nucleus context are the same.  The
611 * stack pointer is required to be eight-byte aligned.
612 */
613#define	FILL_64bit(tail)					\
6142:	TT_TRACE_L(trace_win)					;\
615	ldx	[%sp + V9BIAS64 + 0], %l0			;\
616	ldx	[%sp + V9BIAS64 + 8], %l1			;\
617	ldx	[%sp + V9BIAS64 + 16], %l2			;\
618	ldx	[%sp + V9BIAS64 + 24], %l3			;\
619	ldx	[%sp + V9BIAS64 + 32], %l4			;\
620	ldx	[%sp + V9BIAS64 + 40], %l5			;\
621	ldx	[%sp + V9BIAS64 + 48], %l6			;\
622	ldx	[%sp + V9BIAS64 + 56], %l7			;\
623	ldx	[%sp + V9BIAS64 + 64], %i0			;\
624	ldx	[%sp + V9BIAS64 + 72], %i1			;\
625	ldx	[%sp + V9BIAS64 + 80], %i2			;\
626	ldx	[%sp + V9BIAS64 + 88], %i3			;\
627	ldx	[%sp + V9BIAS64 + 96], %i4			;\
628	ldx	[%sp + V9BIAS64 + 104], %i5			;\
629	ldx	[%sp + V9BIAS64 + 112], %i6			;\
630	ldx	[%sp + V9BIAS64 + 120], %i7			;\
631	restored						;\
632	retry							;\
633	SKIP(31-18-TT_TRACE_L_INS)				;\
634	ba,a,pt	%xcc, fault_64bit_/**/tail			;\
635	.empty
636
637/*
638 * FILL_64bit_asi fills a 64-bit-wide register window from a 64-bit
639 * wide address space via the designated asi.  It is used to fill
640 * non-kernel windows.  The stack pointer is required to be eight-byte
641 * aligned.
642 */
643#define	FILL_64bit_asi(asi_num, tail)				\
644	mov	V9BIAS64 + 0, %g1				;\
6452:	TT_TRACE_L(trace_win)					;\
646	ldxa	[%sp + %g1]asi_num, %l0				;\
647	mov	V9BIAS64 + 8, %g2				;\
648	ldxa	[%sp + %g2]asi_num, %l1				;\
649	mov	V9BIAS64 + 16, %g3				;\
650	ldxa	[%sp + %g3]asi_num, %l2				;\
651	mov	V9BIAS64 + 24, %g4				;\
652	ldxa	[%sp + %g4]asi_num, %l3				;\
653	add	%sp, 32, %g5					;\
654	ldxa	[%g5 + %g1]asi_num, %l4				;\
655	ldxa	[%g5 + %g2]asi_num, %l5				;\
656	ldxa	[%g5 + %g3]asi_num, %l6				;\
657	ldxa	[%g5 + %g4]asi_num, %l7				;\
658	add	%g5, 32, %g5					;\
659	ldxa	[%g5 + %g1]asi_num, %i0				;\
660	ldxa	[%g5 + %g2]asi_num, %i1				;\
661	ldxa	[%g5 + %g3]asi_num, %i2				;\
662	ldxa	[%g5 + %g4]asi_num, %i3				;\
663	add	%g5, 32, %g5					;\
664	ldxa	[%g5 + %g1]asi_num, %i4				;\
665	ldxa	[%g5 + %g2]asi_num, %i5				;\
666	ldxa	[%g5 + %g3]asi_num, %i6				;\
667	ldxa	[%g5 + %g4]asi_num, %i7				;\
668	restored						;\
669	retry							;\
670	SKIP(31-25-TT_TRACE_L_INS)				;\
671	ba,a,pt	%xcc, fault_64bit_/**/tail			;\
672	.empty
673
674/*
675 * FILL_64bit_tt1 fills a 64-bit-wide register window from a 64-bit
676 * wide address space via the designated asi.  It is used to fill
677 * windows at tl>1 where performance isn't the primary concern and
678 * where we don't want to use unnecessary registers.  The stack
679 * pointer is required to be eight-byte aligned.
680 */
681#define	FILL_64bit_tt1(asi_num, tail)				\
682	mov	asi_num, %asi					;\
683	TT_TRACE_L(trace_win)					;\
684	ldxa	[%sp + V9BIAS64 + 0]%asi, %l0			;\
685	ldxa	[%sp + V9BIAS64 + 8]%asi, %l1			;\
686	ldxa	[%sp + V9BIAS64 + 16]%asi, %l2			;\
687	ldxa	[%sp + V9BIAS64 + 24]%asi, %l3			;\
688	ldxa	[%sp + V9BIAS64 + 32]%asi, %l4			;\
689	ldxa	[%sp + V9BIAS64 + 40]%asi, %l5			;\
690	ldxa	[%sp + V9BIAS64 + 48]%asi, %l6			;\
691	ldxa	[%sp + V9BIAS64 + 56]%asi, %l7			;\
692	ldxa	[%sp + V9BIAS64 + 64]%asi, %i0			;\
693	ldxa	[%sp + V9BIAS64 + 72]%asi, %i1			;\
694	ldxa	[%sp + V9BIAS64 + 80]%asi, %i2			;\
695	ldxa	[%sp + V9BIAS64 + 88]%asi, %i3			;\
696	ldxa	[%sp + V9BIAS64 + 96]%asi, %i4			;\
697	ldxa	[%sp + V9BIAS64 + 104]%asi, %i5			;\
698	ldxa	[%sp + V9BIAS64 + 112]%asi, %i6			;\
699	ldxa	[%sp + V9BIAS64 + 120]%asi, %i7			;\
700	restored						;\
701	retry							;\
702	SKIP(31-19-TT_TRACE_L_INS)				;\
703	ba,a,pt	%xcc, fault_64bit_/**/tail			;\
704	.empty
705
706#endif /* !lint */
707
708/*
709 * SPILL_mixed spills either size window, depending on
710 * whether %sp is even or odd, to a 32-bit address space.
711 * This may only be used in conjunction with SPILL_32bit/
712 * SPILL_64bit. New versions of SPILL_mixed_{tt1,asi} would be
713 * needed for use with SPILL_{32,64}bit_{tt1,asi}.  Particular
714 * attention should be paid to the instructions that belong
715 * in the delay slots of the branches depending on the type
716 * of spill handler being branched to.
717 * Clear upper 32 bits of %sp if it is odd.
718 * We won't need to clear them in 64 bit kernel.
719 */
720#define	SPILL_mixed						\
721	btst	1, %sp						;\
722	bz,a,pt	%xcc, 1b					;\
723	srl	%sp, 0, %sp					;\
724	ba,pt	%xcc, 2b					;\
725	nop							;\
726	.align	128
727
728/*
729 * FILL_mixed(ASI) fills either size window, depending on
730 * whether %sp is even or odd, from a 32-bit address space.
731 * This may only be used in conjunction with FILL_32bit/
732 * FILL_64bit. New versions of FILL_mixed_{tt1,asi} would be
733 * needed for use with FILL_{32,64}bit_{tt1,asi}. Particular
734 * attention should be paid to the instructions that belong
735 * in the delay slots of the branches depending on the type
736 * of fill handler being branched to.
737 * Clear upper 32 bits of %sp if it is odd.
738 * We won't need to clear them in 64 bit kernel.
739 */
740#define	FILL_mixed						\
741	btst	1, %sp						;\
742	bz,a,pt	%xcc, 1b					;\
743	srl	%sp, 0, %sp					;\
744	ba,pt	%xcc, 2b					;\
745	nop							;\
746	.align	128
747
748
749/*
750 * SPILL_32clean/SPILL_64clean spill 32-bit and 64-bit register windows,
751 * respectively, into the address space via the designated asi.  The
752 * unbiased stack pointer is required to be eight-byte aligned (even for
753 * the 32-bit case even though this code does not require such strict
754 * alignment).
755 *
756 * With SPARC v9 the spill trap takes precedence over the cleanwin trap
757 * so when cansave == 0, canrestore == 6, and cleanwin == 6 the next save
758 * will cause cwp + 2 to be spilled but will not clean cwp + 1.  That
759 * window may contain kernel data so in user_rtt we set wstate to call
760 * these spill handlers on the first user spill trap.  These handler then
761 * spill the appropriate window but also back up a window and clean the
762 * window that didn't get a cleanwin trap.
763 */
764#define	SPILL_32clean(asi_num, tail)				\
765	srl	%sp, 0, %sp					;\
766	sta	%l0, [%sp + %g0]asi_num				;\
767	mov	4, %g1						;\
768	sta	%l1, [%sp + %g1]asi_num				;\
769	mov	8, %g2						;\
770	sta	%l2, [%sp + %g2]asi_num				;\
771	mov	12, %g3						;\
772	sta	%l3, [%sp + %g3]asi_num				;\
773	add	%sp, 16, %g4					;\
774	sta	%l4, [%g4 + %g0]asi_num				;\
775	sta	%l5, [%g4 + %g1]asi_num				;\
776	sta	%l6, [%g4 + %g2]asi_num				;\
777	sta	%l7, [%g4 + %g3]asi_num				;\
778	add	%g4, 16, %g4					;\
779	sta	%i0, [%g4 + %g0]asi_num				;\
780	sta	%i1, [%g4 + %g1]asi_num				;\
781	sta	%i2, [%g4 + %g2]asi_num				;\
782	sta	%i3, [%g4 + %g3]asi_num				;\
783	add	%g4, 16, %g4					;\
784	sta	%i4, [%g4 + %g0]asi_num				;\
785	sta	%i5, [%g4 + %g1]asi_num				;\
786	sta	%i6, [%g4 + %g2]asi_num				;\
787	sta	%i7, [%g4 + %g3]asi_num				;\
788	TT_TRACE_L(trace_win)					;\
789	b	.spill_clean					;\
790	  mov	WSTATE_USER32, %g7				;\
791	SKIP(31-25-TT_TRACE_L_INS)				;\
792	ba,a,pt	%xcc, fault_32bit_/**/tail			;\
793	.empty
794
795#define	SPILL_64clean(asi_num, tail)				\
796	mov	0 + V9BIAS64, %g1				;\
797	stxa	%l0, [%sp + %g1]asi_num				;\
798	mov	8 + V9BIAS64, %g2				;\
799	stxa	%l1, [%sp + %g2]asi_num				;\
800	mov	16 + V9BIAS64, %g3				;\
801	stxa	%l2, [%sp + %g3]asi_num				;\
802	mov	24 + V9BIAS64, %g4				;\
803	stxa	%l3, [%sp + %g4]asi_num				;\
804	add	%sp, 32, %g5					;\
805	stxa	%l4, [%g5 + %g1]asi_num				;\
806	stxa	%l5, [%g5 + %g2]asi_num				;\
807	stxa	%l6, [%g5 + %g3]asi_num				;\
808	stxa	%l7, [%g5 + %g4]asi_num				;\
809	add	%g5, 32, %g5					;\
810	stxa	%i0, [%g5 + %g1]asi_num				;\
811	stxa	%i1, [%g5 + %g2]asi_num				;\
812	stxa	%i2, [%g5 + %g3]asi_num				;\
813	stxa	%i3, [%g5 + %g4]asi_num				;\
814	add	%g5, 32, %g5					;\
815	stxa	%i4, [%g5 + %g1]asi_num				;\
816	stxa	%i5, [%g5 + %g2]asi_num				;\
817	stxa	%i6, [%g5 + %g3]asi_num				;\
818	stxa	%i7, [%g5 + %g4]asi_num				;\
819	TT_TRACE_L(trace_win)					;\
820	b	.spill_clean					;\
821	  mov	WSTATE_USER64, %g7				;\
822	SKIP(31-25-TT_TRACE_L_INS)				;\
823	ba,a,pt	%xcc, fault_64bit_/**/tail			;\
824	.empty
825
826
827/*
828 * Floating point disabled.
829 */
830#define	FP_DISABLED_TRAP		\
831	TT_TRACE(trace_gen)		;\
832	ba,pt	%xcc,.fp_disabled	;\
833	nop				;\
834	.align	32
835
836/*
837 * Floating point exceptions.
838 */
839#define	FP_IEEE_TRAP			\
840	TT_TRACE(trace_gen)		;\
841	ba,pt	%xcc,.fp_ieee_exception	;\
842	nop				;\
843	.align	32
844
845#define	FP_TRAP				\
846	TT_TRACE(trace_gen)		;\
847	ba,pt	%xcc,.fp_exception	;\
848	nop				;\
849	.align	32
850
851#if !defined(lint)
852/*
853 * asynchronous traps at level 0 and level 1
854 *
855 * The first instruction must be a membar for UltraSPARC-III
856 * to stop RED state entry if the store queue has many
857 * pending bad stores (PRM, Chapter 11).
858 */
859#define ASYNC_TRAP(ttype, ttlabel, table_name)\
860	.global	table_name	;\
861table_name:			;\
862	membar	#Sync		;\
863	TT_TRACE(ttlabel)	;\
864	ba	async_err	;\
865	mov	ttype, %g5	;\
866	.align	32
867
868/*
869 * Defaults to BAD entry, but establishes label to be used for
870 * architecture-specific overwrite of trap table entry.
871 */
872#define	LABELED_BAD(table_name)		\
873	.global	table_name		;\
874table_name:				;\
875	BAD
876
877#endif /* !lint */
878
879/*
880 * illegal instruction trap
881 */
882#define	ILLTRAP_INSTR			  \
883	membar	#Sync			  ;\
884	TT_TRACE(trace_gen)		  ;\
885	or	%g0, P_UTRAP4, %g2	  ;\
886	or	%g0, T_UNIMP_INSTR, %g3   ;\
887	sethi	%hi(.check_v9utrap), %g4  ;\
888	jmp	%g4 + %lo(.check_v9utrap) ;\
889	nop				  ;\
890	.align	32
891
892/*
893 * tag overflow trap
894 */
895#define	TAG_OVERFLOW			  \
896	TT_TRACE(trace_gen)		  ;\
897	or	%g0, P_UTRAP10, %g2	  ;\
898	or	%g0, T_TAG_OVERFLOW, %g3  ;\
899	sethi	%hi(.check_v9utrap), %g4  ;\
900	jmp	%g4 + %lo(.check_v9utrap) ;\
901	nop				  ;\
902	.align	32
903
904/*
905 * divide by zero trap
906 */
907#define	DIV_BY_ZERO			  \
908	TT_TRACE(trace_gen)		  ;\
909	or	%g0, P_UTRAP11, %g2	  ;\
910	or	%g0, T_IDIV0, %g3	  ;\
911	sethi	%hi(.check_v9utrap), %g4  ;\
912	jmp	%g4 + %lo(.check_v9utrap) ;\
913	nop				  ;\
914	.align	32
915
916/*
917 * trap instruction for V9 user trap handlers
918 */
919#define	TRAP_INSTR			  \
920	TT_TRACE(trace_gen)		  ;\
921	or	%g0, T_SOFTWARE_TRAP, %g3 ;\
922	sethi	%hi(.check_v9utrap), %g4  ;\
923	jmp	%g4 + %lo(.check_v9utrap) ;\
924	nop				  ;\
925	.align	32
926#define	TRP4	TRAP_INSTR; TRAP_INSTR; TRAP_INSTR; TRAP_INSTR
927
928/*
929 * LEVEL_INTERRUPT is for level N interrupts.
930 * VECTOR_INTERRUPT is for the vector trap.
931 */
932#define	LEVEL_INTERRUPT(level)		\
933	.global	tt_pil/**/level		;\
934tt_pil/**/level:			;\
935	ba,pt	%xcc, pil_interrupt	;\
936	mov	level, %g4		;\
937	.align	32
938
939#define	LEVEL14_INTERRUPT			\
940	ba	pil14_interrupt			;\
941	mov	PIL_14, %g4			;\
942	.align	32
943
944#define	VECTOR_INTERRUPT				\
945	ldxa	[%g0]ASI_INTR_RECEIVE_STATUS, %g1	;\
946	btst	IRSR_BUSY, %g1				;\
947	bnz,pt	%xcc, vec_interrupt			;\
948	nop						;\
949	ba,a,pt	%xcc, vec_intr_spurious			;\
950	.empty						;\
951	.align	32
952
953/*
954 * MMU Trap Handlers.
955 */
956#define	SWITCH_GLOBALS	/* mmu->alt, alt->mmu */			\
957	rdpr	%pstate, %g5						;\
958	wrpr	%g5, PSTATE_MG | PSTATE_AG, %pstate
959
960#define	IMMU_EXCEPTION							\
961	membar	#Sync							;\
962	SWITCH_GLOBALS							;\
963	wr	%g0, ASI_IMMU, %asi					;\
964	rdpr	%tpc, %g2						;\
965	ldxa	[MMU_SFSR]%asi, %g3					;\
966	ba,pt	%xcc, .mmu_exception_end				;\
967	mov	T_INSTR_EXCEPTION, %g1					;\
968	.align	32
969
970#define	DMMU_EXCEPTION							\
971	SWITCH_GLOBALS							;\
972	wr	%g0, ASI_DMMU, %asi					;\
973	ldxa	[MMU_TAG_ACCESS]%asi, %g2				;\
974	ldxa	[MMU_SFSR]%asi, %g3					;\
975	ba,pt	%xcc, .mmu_exception_end				;\
976	mov	T_DATA_EXCEPTION, %g1					;\
977	.align	32
978
979#define	DMMU_EXC_AG_PRIV						\
980	wr	%g0, ASI_DMMU, %asi					;\
981	ldxa	[MMU_SFAR]%asi, %g2					;\
982	ba,pt	%xcc, .mmu_priv_exception				;\
983	ldxa	[MMU_SFSR]%asi, %g3					;\
984	.align	32
985
986#define	DMMU_EXC_AG_NOT_ALIGNED						\
987	wr	%g0, ASI_DMMU, %asi					;\
988	ldxa	[MMU_SFAR]%asi, %g2					;\
989	ba,pt	%xcc, .mmu_exception_not_aligned			;\
990	ldxa	[MMU_SFSR]%asi, %g3					;\
991	.align	32
992
993/*
994 * SPARC V9 IMPL. DEP. #109(1) and (2) and #110(1) and (2)
995 */
996#define	DMMU_EXC_LDDF_NOT_ALIGNED					\
997	btst	1, %sp							;\
998	bnz,pt	%xcc, .lddf_exception_not_aligned			;\
999	wr	%g0, ASI_DMMU, %asi					;\
1000	ldxa	[MMU_SFAR]%asi, %g2					;\
1001	ba,pt	%xcc, .mmu_exception_not_aligned			;\
1002	ldxa	[MMU_SFSR]%asi, %g3					;\
1003	.align	32
1004
1005#define	DMMU_EXC_STDF_NOT_ALIGNED					\
1006	btst	1, %sp							;\
1007	bnz,pt	%xcc, .stdf_exception_not_aligned			;\
1008	wr	%g0, ASI_DMMU, %asi					;\
1009	ldxa	[MMU_SFAR]%asi, %g2					;\
1010	ba,pt	%xcc, .mmu_exception_not_aligned			;\
1011	ldxa	[MMU_SFSR]%asi, %g3					;\
1012	.align	32
1013
1014/*
1015 * Flush the TLB using either the primary, secondary, or nucleus flush
1016 * operation based on whether the ctx from the tag access register matches
1017 * the primary or secondary context (flush the nucleus if neither matches).
1018 *
1019 * Requires a membar #Sync before next ld/st.
1020 * exits with:
1021 * g2 = tag access register
1022 * g3 = ctx number
1023 */
1024#if TAGACC_CTX_MASK != CTXREG_CTX_MASK
1025#error "TAGACC_CTX_MASK != CTXREG_CTX_MASK"
1026#endif
1027#define	DTLB_DEMAP_ENTRY						\
1028	mov	MMU_TAG_ACCESS, %g1					;\
1029	mov	MMU_PCONTEXT, %g5					;\
1030	ldxa	[%g1]ASI_DMMU, %g2					;\
1031	sethi	%hi(TAGACC_CTX_MASK), %g4				;\
1032	or	%g4, %lo(TAGACC_CTX_MASK), %g4				;\
1033	and	%g2, %g4, %g3			/* g3 = ctx */		;\
1034	ldxa	[%g5]ASI_DMMU, %g6		/* g6 = primary ctx */	;\
1035	and	%g6, %g4, %g6			/* &= CTXREG_CTX_MASK */ ;\
1036	cmp	%g3, %g6						;\
1037	be,pt	%xcc, 1f						;\
1038	andn	%g2, %g4, %g1			/* ctx = primary */	;\
1039	mov	MMU_SCONTEXT, %g5					;\
1040	ldxa	[%g5]ASI_DMMU, %g6		/* g6 = secondary ctx */ ;\
1041	and	%g6, %g4, %g6			/* &= CTXREG_CTX_MASK */ ;\
1042	cmp	%g3, %g6						;\
1043	be,a,pt	%xcc, 1f						;\
1044	  or	%g1, DEMAP_SECOND, %g1					;\
1045	or	%g1, DEMAP_NUCLEUS, %g1					;\
10461:	stxa	%g0, [%g1]ASI_DTLB_DEMAP	/* MMU_DEMAP_PAGE */	;\
1047	membar	#Sync
1048
1049#if defined(cscope)
1050/*
1051 * Define labels to direct cscope quickly to labels that
1052 * are generated by macro expansion of DTLB_MISS().
1053 */
1054	.global	tt0_dtlbmiss
1055tt0_dtlbmiss:
1056	.global	tt1_dtlbmiss
1057tt1_dtlbmiss:
1058	nop
1059#endif
1060
1061/*
1062 * Needs to be exactly 32 instructions
1063 *
1064 * UTLB NOTE: If we don't hit on the 8k pointer then we branch
1065 * to a special 4M tsb handler. It would be nice if that handler
1066 * could live in this file but currently it seems better to allow
1067 * it to fall thru to sfmmu_tsb_miss.
1068 */
1069#ifdef UTSB_PHYS
1070#define	DTLB_MISS(table_name)						;\
1071	.global	table_name/**/_dtlbmiss					;\
1072table_name/**/_dtlbmiss:						;\
1073	HAT_PERCPU_DBSTAT(TSBMISS_DTLBMISS) /* 3 instr ifdef DEBUG */	;\
1074	mov	MMU_TAG_ACCESS, %g6		/* select tag acc */	;\
1075	ldxa	[%g0]ASI_DMMU_TSB_8K, %g1	/* g1 = tsbe ptr */	;\
1076	ldxa	[%g6]ASI_DMMU, %g2		/* g2 = tag access */	;\
1077	sllx	%g2, TAGACC_CTX_LSHIFT, %g3				;\
1078	srlx	%g3, TAGACC_CTX_LSHIFT, %g3	/* g3 = ctx */		;\
1079	cmp	%g3, INVALID_CONTEXT					;\
1080	ble,pn	%xcc, sfmmu_kdtlb_miss					;\
1081	  srlx	%g2, TAG_VALO_SHIFT, %g7	/* g7 = tsb tag */	;\
1082	mov	SCRATCHPAD_UTSBREG, %g3				;\
1083	ldxa	[%g3]ASI_SCRATCHPAD, %g3	/* g3 = 2nd tsb reg */	;\
1084	brgez,pn %g3, sfmmu_udtlb_slowpath	/* branch if 2 TSBs */	;\
1085	  nop								;\
1086	ldda	[%g1]ASI_QUAD_LDD_PHYS, %g4	/* g4 = tag, %g5 data */;\
1087	cmp	%g4, %g7						;\
1088	bne,pn	%xcc, sfmmu_tsb_miss_tt		/* no 4M TSB, miss */	;\
1089	  mov	%g0, %g3			/* clear 4M tsbe ptr */	;\
1090	TT_TRACE(trace_tsbhit)		/* 2 instr ifdef TRAPTRACE */	;\
1091	stxa	%g5, [%g0]ASI_DTLB_IN	/* trapstat expects TTE */	;\
1092	retry				/* in %g5 */			;\
1093	unimp	0							;\
1094	unimp	0							;\
1095	unimp	0							;\
1096	unimp	0							;\
1097	unimp	0							;\
1098	unimp	0							;\
1099	unimp	0							;\
1100	unimp	0							;\
1101	unimp	0							;\
1102	.align 128
1103#else /* UTSB_PHYS */
1104#define	DTLB_MISS(table_name)						;\
1105	.global	table_name/**/_dtlbmiss					;\
1106table_name/**/_dtlbmiss:						;\
1107	HAT_PERCPU_DBSTAT(TSBMISS_DTLBMISS) /* 3 instr ifdef DEBUG */	;\
1108	mov	MMU_TAG_ACCESS, %g6		/* select tag acc */	;\
1109	ldxa	[%g0]ASI_DMMU_TSB_8K, %g1	/* g1 = tsbe ptr */	;\
1110	ldxa	[%g6]ASI_DMMU, %g2		/* g2 = tag access */	;\
1111	sllx	%g2, TAGACC_CTX_LSHIFT, %g3				;\
1112	srlx	%g3, TAGACC_CTX_LSHIFT, %g3	/* g3 = ctx */		;\
1113	cmp	%g3, INVALID_CONTEXT					;\
1114	ble,pn	%xcc, sfmmu_kdtlb_miss					;\
1115	  srlx	%g2, TAG_VALO_SHIFT, %g7	/* g7 = tsb tag */	;\
1116	brlz,pn %g1, sfmmu_udtlb_slowpath				;\
1117	  nop								;\
1118	ldda	[%g1]ASI_NQUAD_LD, %g4	/* g4 = tag, %g5 data */	;\
1119	cmp	%g4, %g7						;\
1120	bne,pn	%xcc, sfmmu_tsb_miss_tt		/* no 4M TSB, miss */	;\
1121	  mov	%g0, %g3		/* clear 4M tsbe ptr */		;\
1122	TT_TRACE(trace_tsbhit)		/* 2 instr ifdef TRAPTRACE */	;\
1123	stxa	%g5, [%g0]ASI_DTLB_IN	/* trapstat expects TTE */	;\
1124	retry				/* in %g5 */			;\
1125	unimp	0							;\
1126	unimp	0							;\
1127	unimp	0							;\
1128	unimp	0							;\
1129	unimp	0							;\
1130	unimp	0							;\
1131	unimp	0							;\
1132	unimp	0							;\
1133	unimp	0							;\
1134	unimp	0							;\
1135	unimp	0							;\
1136	.align 128
1137#endif /* UTSB_PHYS */
1138
1139#if defined(cscope)
1140/*
1141 * Define labels to direct cscope quickly to labels that
1142 * are generated by macro expansion of ITLB_MISS().
1143 */
1144	.global	tt0_itlbmiss
1145tt0_itlbmiss:
1146	.global	tt1_itlbmiss
1147tt1_itlbmiss:
1148	nop
1149#endif
1150
1151/*
1152 * Instruction miss handler.
1153 * ldda instructions will have their ASI patched
1154 * by sfmmu_patch_ktsb at runtime.
1155 * MUST be EXACTLY 32 instructions or we'll break.
1156 */
1157#ifdef UTSB_PHYS
1158#define	ITLB_MISS(table_name)						 \
1159	.global	table_name/**/_itlbmiss					;\
1160table_name/**/_itlbmiss:						;\
1161	HAT_PERCPU_DBSTAT(TSBMISS_ITLBMISS) /* 3 instr ifdef DEBUG */	;\
1162	mov	MMU_TAG_ACCESS, %g6		/* select tag acc */	;\
1163	ldxa	[%g0]ASI_IMMU_TSB_8K, %g1	/* g1 = tsbe ptr */	;\
1164	ldxa	[%g6]ASI_IMMU, %g2		/* g2 = tag access */	;\
1165	sllx	%g2, TAGACC_CTX_LSHIFT, %g3				;\
1166	srlx	%g3, TAGACC_CTX_LSHIFT, %g3	/* g3 = ctx */		;\
1167	cmp	%g3, INVALID_CONTEXT					;\
1168	ble,pn	%xcc, sfmmu_kitlb_miss					;\
1169	  srlx	%g2, TAG_VALO_SHIFT, %g7	/* g7 = tsb tag */	;\
1170	mov	SCRATCHPAD_UTSBREG, %g3				;\
1171	ldxa	[%g3]ASI_SCRATCHPAD, %g3	/* g3 = 2nd tsb reg */	;\
1172	brgez,pn %g3, sfmmu_uitlb_slowpath	/* branch if 2 TSBs */	;\
1173	  nop								;\
1174	ldda	[%g1]ASI_QUAD_LDD_PHYS, %g4 /* g4 = tag, g5 = data */	;\
1175	cmp	%g4, %g7						;\
1176	bne,pn	%xcc, sfmmu_tsb_miss_tt	/* br if 8k ptr miss */		;\
1177	  mov	%g0, %g3		/* no 4M TSB */			;\
1178	andcc	%g5, TTE_EXECPRM_INT, %g0 /* check execute bit */	;\
1179	bz,pn	%icc, exec_fault					;\
1180	  nop								;\
1181	TT_TRACE(trace_tsbhit)		/* 2 instr ifdef TRAPTRACE */	;\
1182	stxa	%g5, [%g0]ASI_ITLB_IN	/* trapstat expects %g5 */	;\
1183	retry								;\
1184	unimp	0							;\
1185	unimp	0							;\
1186	unimp	0							;\
1187	unimp	0							;\
1188	unimp	0							;\
1189	unimp	0							;\
1190	.align 128
1191#else /* UTSB_PHYS */
1192#define	ITLB_MISS(table_name)						 \
1193	.global	table_name/**/_itlbmiss					;\
1194table_name/**/_itlbmiss:						;\
1195	HAT_PERCPU_DBSTAT(TSBMISS_ITLBMISS) /* 3 instr ifdef DEBUG */	;\
1196	mov	MMU_TAG_ACCESS, %g6		/* select tag acc */	;\
1197	ldxa	[%g0]ASI_IMMU_TSB_8K, %g1	/* g1 = tsbe ptr */	;\
1198	ldxa	[%g6]ASI_IMMU, %g2		/* g2 = tag access */	;\
1199	sllx	%g2, TAGACC_CTX_LSHIFT, %g3				;\
1200	srlx	%g3, TAGACC_CTX_LSHIFT, %g3	/* g3 = ctx */		;\
1201	cmp	%g3, INVALID_CONTEXT					;\
1202	ble,pn	%xcc, sfmmu_kitlb_miss					;\
1203	  srlx	%g2, TAG_VALO_SHIFT, %g7	/* g7 = tsb tag */	;\
1204	brlz,pn	%g1, sfmmu_uitlb_slowpath	/* if >1 TSB branch */	;\
1205	  nop								;\
1206	ldda	[%g1]ASI_NQUAD_LD, %g4	/* g4 = tag, g5 = data */	;\
1207	cmp	%g4, %g7						;\
1208	bne,pn	%xcc, sfmmu_tsb_miss_tt	/* br if 8k ptr miss */		;\
1209	  mov	%g0, %g3		/* no 4M TSB */			;\
1210	andcc	%g5, TTE_EXECPRM_INT, %g0 /* check execute bit */	;\
1211	bz,pn	%icc, exec_fault					;\
1212	  nop								;\
1213	TT_TRACE(trace_tsbhit)		/* 2 instr ifdef TRAPTRACE */	;\
1214	stxa	%g5, [%g0]ASI_ITLB_IN	/* trapstat expects %g5 */	;\
1215	retry								;\
1216	unimp	0							;\
1217	unimp	0							;\
1218	unimp	0							;\
1219	unimp	0							;\
1220	unimp	0							;\
1221	unimp	0							;\
1222	unimp	0							;\
1223	unimp	0							;\
1224	.align 128
1225#endif /* UTSB_PHYS */
1226
1227
1228/*
1229 * This macro is the first level handler for fast protection faults.
1230 * It first demaps the tlb entry which generated the fault and then
1231 * attempts to set the modify bit on the hash.  It needs to be
1232 * exactly 32 instructions.
1233 */
1234#define	DTLB_PROT							 \
1235	DTLB_DEMAP_ENTRY		/* 20 instructions */		;\
1236	/*								;\
1237	 * At this point:						;\
1238	 *   g1 = ????							;\
1239	 *   g2 = tag access register					;\
1240	 *   g3 = ctx number						;\
1241	 *   g4 = ????							;\
1242	 */								;\
1243	TT_TRACE(trace_dataprot)	/* 2 instr ifdef TRAPTRACE */	;\
1244					/* clobbers g1 and g6 */	;\
1245	ldxa	[%g0]ASI_DMMU_TSB_8K, %g1	/* g1 = tsbe ptr */	;\
1246	brnz,pt %g3, sfmmu_uprot_trap		/* user trap */		;\
1247	  nop								;\
1248	ba,a,pt	%xcc, sfmmu_kprot_trap		/* kernel trap */	;\
1249	unimp	0							;\
1250	unimp	0							;\
1251	unimp	0							;\
1252	unimp	0							;\
1253	unimp	0							;\
1254	unimp	0							;\
1255	.align 128
1256
1257#define	DMMU_EXCEPTION_TL1						;\
1258	SWITCH_GLOBALS							;\
1259	ba,a,pt	%xcc, mmu_trap_tl1					;\
1260	  nop								;\
1261	.align 32
1262
1263#define	MISALIGN_ADDR_TL1						;\
1264	ba,a,pt	%xcc, mmu_trap_tl1					;\
1265	  nop								;\
1266	.align 32
1267
1268/*
1269 * Trace a tsb hit
1270 * g1 = tsbe pointer (in/clobbered)
1271 * g2 = tag access register (in)
1272 * g3 - g4 = scratch (clobbered)
1273 * g5 = tsbe data (in)
1274 * g6 = scratch (clobbered)
1275 * g7 = pc we jumped here from (in)
1276 * ttextra = value to OR in to trap type (%tt) (in)
1277 */
1278#ifdef TRAPTRACE
1279#define TRACE_TSBHIT(ttextra)						 \
1280	membar	#Sync							;\
1281	sethi	%hi(FLUSH_ADDR), %g6					;\
1282	flush	%g6							;\
1283	TRACE_PTR(%g3, %g6)						;\
1284	GET_TRACE_TICK(%g6)						;\
1285	stxa	%g6, [%g3 + TRAP_ENT_TICK]%asi				;\
1286	stxa	%g2, [%g3 + TRAP_ENT_SP]%asi	/* tag access */	;\
1287	stxa	%g5, [%g3 + TRAP_ENT_F1]%asi	/* tsb data */		;\
1288	rdpr	%tnpc, %g6						;\
1289	stxa	%g6, [%g3 + TRAP_ENT_F2]%asi				;\
1290	stxa	%g1, [%g3 + TRAP_ENT_F3]%asi	/* tsb pointer */	;\
1291	stxa	%g0, [%g3 + TRAP_ENT_F4]%asi				;\
1292	rdpr	%tpc, %g6						;\
1293	stxa	%g6, [%g3 + TRAP_ENT_TPC]%asi				;\
1294	rdpr	%tl, %g6						;\
1295	stha	%g6, [%g3 + TRAP_ENT_TL]%asi				;\
1296	rdpr	%tt, %g6						;\
1297	or	%g6, (ttextra), %g6					;\
1298	stha	%g6, [%g3 + TRAP_ENT_TT]%asi				;\
1299	ldxa	[%g0]ASI_IMMU, %g1		/* tag target */	;\
1300	ldxa	[%g0]ASI_DMMU, %g4					;\
1301	cmp	%g6, FAST_IMMU_MISS_TT					;\
1302	movne	%icc, %g4, %g1						;\
1303	stxa	%g1, [%g3 + TRAP_ENT_TSTATE]%asi /* tsb tag */		;\
1304	stxa	%g0, [%g3 + TRAP_ENT_TR]%asi				;\
1305	TRACE_NEXT(%g3, %g4, %g6)
1306#else
1307#define TRACE_TSBHIT(ttextra)
1308#endif
1309
1310#if defined(lint)
1311
1312struct scb	trap_table;
1313struct scb	scb;		/* trap_table/scb are the same object */
1314
1315#else /* lint */
1316
1317/*
1318 * =======================================================================
1319 *		SPARC V9 TRAP TABLE
1320 *
1321 * The trap table is divided into two halves: the first half is used when
1322 * taking traps when TL=0; the second half is used when taking traps from
1323 * TL>0. Note that handlers in the second half of the table might not be able
1324 * to make the same assumptions as handlers in the first half of the table.
1325 *
1326 * Worst case trap nesting so far:
1327 *
1328 *	at TL=0 client issues software trap requesting service
1329 *	at TL=1 nucleus wants a register window
1330 *	at TL=2 register window clean/spill/fill takes a TLB miss
1331 *	at TL=3 processing TLB miss
1332 *	at TL=4 handle asynchronous error
1333 *
1334 * Note that a trap from TL=4 to TL=5 places Spitfire in "RED mode".
1335 *
1336 * =======================================================================
1337 */
1338	.section ".text"
1339	.align	4
1340	.global trap_table, scb, trap_table0, trap_table1, etrap_table
1341	.type	trap_table, #object
1342	.type	scb, #object
1343trap_table:
1344scb:
1345trap_table0:
1346	/* hardware traps */
1347	NOT;				/* 000	reserved */
1348	RED;				/* 001	power on reset */
1349	RED;				/* 002	watchdog reset */
1350	RED;				/* 003	externally initiated reset */
1351	RED;				/* 004	software initiated reset */
1352	RED;				/* 005	red mode exception */
1353	NOT; NOT;			/* 006 - 007 reserved */
1354	IMMU_EXCEPTION;			/* 008	instruction access exception */
1355	NOT;				/* 009	instruction access MMU miss */
1356	ASYNC_TRAP(T_INSTR_ERROR, trace_gen, tt0_iae);
1357					/* 00A	instruction access error */
1358	NOT; NOT4;			/* 00B - 00F reserved */
1359	ILLTRAP_INSTR;			/* 010	illegal instruction */
1360	TRAP(T_PRIV_INSTR);		/* 011	privileged opcode */
1361	NOT;				/* 012	unimplemented LDD */
1362	NOT;				/* 013	unimplemented STD */
1363	NOT4; NOT4; NOT4;		/* 014 - 01F reserved */
1364	FP_DISABLED_TRAP;		/* 020	fp disabled */
1365	FP_IEEE_TRAP;			/* 021	fp exception ieee 754 */
1366	FP_TRAP;			/* 022	fp exception other */
1367	TAG_OVERFLOW;			/* 023	tag overflow */
1368	CLEAN_WINDOW;			/* 024 - 027 clean window */
1369	DIV_BY_ZERO;			/* 028	division by zero */
1370	NOT;				/* 029	internal processor error */
1371	NOT; NOT; NOT4;			/* 02A - 02F reserved */
1372	DMMU_EXCEPTION;			/* 030	data access exception */
1373	NOT;				/* 031	data access MMU miss */
1374	ASYNC_TRAP(T_DATA_ERROR, trace_gen, tt0_dae);
1375					/* 032	data access error */
1376	NOT;				/* 033	data access protection */
1377	DMMU_EXC_AG_NOT_ALIGNED;	/* 034	mem address not aligned */
1378	DMMU_EXC_LDDF_NOT_ALIGNED;	/* 035	LDDF mem address not aligned */
1379	DMMU_EXC_STDF_NOT_ALIGNED;	/* 036	STDF mem address not aligned */
1380	DMMU_EXC_AG_PRIV;		/* 037	privileged action */
1381	NOT;				/* 038	LDQF mem address not aligned */
1382	NOT;				/* 039	STQF mem address not aligned */
1383	NOT; NOT; NOT4;			/* 03A - 03F reserved */
1384	LABELED_BAD(tt0_asdat);		/* 040	async data error */
1385	LEVEL_INTERRUPT(1);		/* 041	interrupt level 1 */
1386	LEVEL_INTERRUPT(2);		/* 042	interrupt level 2 */
1387	LEVEL_INTERRUPT(3);		/* 043	interrupt level 3 */
1388	LEVEL_INTERRUPT(4);		/* 044	interrupt level 4 */
1389	LEVEL_INTERRUPT(5);		/* 045	interrupt level 5 */
1390	LEVEL_INTERRUPT(6);		/* 046	interrupt level 6 */
1391	LEVEL_INTERRUPT(7);		/* 047	interrupt level 7 */
1392	LEVEL_INTERRUPT(8);		/* 048	interrupt level 8 */
1393	LEVEL_INTERRUPT(9);		/* 049	interrupt level 9 */
1394	LEVEL_INTERRUPT(10);		/* 04A	interrupt level 10 */
1395	LEVEL_INTERRUPT(11);		/* 04B	interrupt level 11 */
1396	LEVEL_INTERRUPT(12);		/* 04C	interrupt level 12 */
1397	LEVEL_INTERRUPT(13);		/* 04D	interrupt level 13 */
1398	LEVEL14_INTERRUPT;		/* 04E	interrupt level 14 */
1399	LEVEL_INTERRUPT(15);		/* 04F	interrupt level 15 */
1400	NOT4; NOT4; NOT4; NOT4;		/* 050 - 05F reserved */
1401	VECTOR_INTERRUPT;		/* 060	interrupt vector */
1402	GOTO(kmdb_trap);		/* 061	PA watchpoint */
1403	GOTO(kmdb_trap);		/* 062	VA watchpoint */
1404	GOTO_TT(ce_err, trace_gen);	/* 063	corrected ECC error */
1405	ITLB_MISS(tt0);			/* 064	instruction access MMU miss */
1406	DTLB_MISS(tt0);			/* 068	data access MMU miss */
1407	DTLB_PROT;			/* 06C	data access protection */
1408	LABELED_BAD(tt0_fecc);		/* 070  fast ecache ECC error */
1409	LABELED_BAD(tt0_dperr);		/* 071  Cheetah+ dcache parity error */
1410	LABELED_BAD(tt0_iperr);		/* 072  Cheetah+ icache parity error */
1411	NOT;				/* 073  reserved */
1412	NOT4; NOT4; NOT4;		/* 074 - 07F reserved */
1413	NOT4;				/* 080	spill 0 normal */
1414	SPILL_32bit_asi(ASI_AIUP,sn0);	/* 084	spill 1 normal */
1415	SPILL_64bit_asi(ASI_AIUP,sn0);	/* 088	spill 2 normal */
1416	SPILL_32clean(ASI_AIUP,sn0);	/* 08C	spill 3 normal */
1417	SPILL_64clean(ASI_AIUP,sn0);	/* 090	spill 4 normal */
1418	SPILL_32bit(not);		/* 094	spill 5 normal */
1419	SPILL_64bit(not);		/* 098	spill 6 normal */
1420	SPILL_mixed;			/* 09C	spill 7 normal */
1421	NOT4;				/* 0A0	spill 0 other */
1422	SPILL_32bit_asi(ASI_AIUS,so0);	/* 0A4	spill 1 other */
1423	SPILL_64bit_asi(ASI_AIUS,so0);	/* 0A8	spill 2 other */
1424	SPILL_32bit_asi(ASI_AIUS,so0);	/* 0AC	spill 3 other */
1425	SPILL_64bit_asi(ASI_AIUS,so0);	/* 0B0	spill 4 other */
1426	NOT4;				/* 0B4	spill 5 other */
1427	NOT4;				/* 0B8	spill 6 other */
1428	NOT4;				/* 0BC	spill 7 other */
1429	NOT4;				/* 0C0	fill 0 normal */
1430	FILL_32bit_asi(ASI_AIUP,fn0);	/* 0C4	fill 1 normal */
1431	FILL_64bit_asi(ASI_AIUP,fn0);	/* 0C8	fill 2 normal */
1432	FILL_32bit_asi(ASI_AIUP,fn0);	/* 0CC	fill 3 normal */
1433	FILL_64bit_asi(ASI_AIUP,fn0);	/* 0D0	fill 4 normal */
1434	FILL_32bit(not);		/* 0D4	fill 5 normal */
1435	FILL_64bit(not);		/* 0D8	fill 6 normal */
1436	FILL_mixed;			/* 0DC	fill 7 normal */
1437	NOT4;				/* 0E0	fill 0 other */
1438	NOT4;				/* 0E4	fill 1 other */
1439	NOT4;				/* 0E8	fill 2 other */
1440	NOT4;				/* 0EC	fill 3 other */
1441	NOT4;				/* 0F0	fill 4 other */
1442	NOT4;				/* 0F4	fill 5 other */
1443	NOT4;				/* 0F8	fill 6 other */
1444	NOT4;				/* 0FC	fill 7 other */
1445	/* user traps */
1446	GOTO(syscall_trap_4x);		/* 100	old system call */
1447	TRAP(T_BREAKPOINT);		/* 101	user breakpoint */
1448	TRAP(T_DIV0);			/* 102	user divide by zero */
1449	FLUSHW();			/* 103	flush windows */
1450	GOTO(.clean_windows);		/* 104	clean windows */
1451	BAD;				/* 105	range check ?? */
1452	GOTO(.fix_alignment);		/* 106	do unaligned references */
1453	BAD;				/* 107	unused */
1454	SYSCALL(syscall_trap32);	/* 108	ILP32 system call on LP64 */
1455	GOTO(set_trap0_addr);		/* 109	set trap0 address */
1456	BAD; BAD; BAD4;			/* 10A - 10F unused */
1457	TRP4; TRP4; TRP4; TRP4;		/* 110 - 11F V9 user trap handlers */
1458	GOTO(.getcc);			/* 120	get condition codes */
1459	GOTO(.setcc);			/* 121	set condition codes */
1460	GOTO(.getpsr);			/* 122	get psr */
1461	GOTO(.setpsr);			/* 123	set psr (some fields) */
1462	GOTO(get_timestamp);		/* 124	get timestamp */
1463	GOTO(get_virtime);		/* 125	get lwp virtual time */
1464	PRIV(self_xcall);		/* 126	self xcall */
1465	GOTO(get_hrestime);		/* 127	get hrestime */
1466	BAD;				/* 128	ST_SETV9STACK */
1467	GOTO(.getlgrp);			/* 129  get lgrpid */
1468	BAD; BAD; BAD4;			/* 12A - 12F unused */
1469	BAD4; BAD4; 			/* 130 - 137 unused */
1470	DTRACE_PID;			/* 138  dtrace pid tracing provider */
1471	DTRACE_FASTTRAP;		/* 139  dtrace fasttrap provider */
1472	DTRACE_RETURN;			/* 13A	dtrace pid return probe */
1473	BAD; BAD4;			/* 13B - 13F unused */
1474	SYSCALL(syscall_trap)		/* 140  LP64 system call */
1475	SYSCALL(nosys);			/* 141  unused system call trap */
1476#ifdef DEBUG_USER_TRAPTRACECTL
1477	GOTO(.traptrace_freeze);	/* 142  freeze traptrace */
1478	GOTO(.traptrace_unfreeze);	/* 143  unfreeze traptrace */
1479#else
1480	SYSCALL(nosys);			/* 142  unused system call trap */
1481	SYSCALL(nosys);			/* 143  unused system call trap */
1482#endif
1483	BAD4; BAD4; BAD4;		/* 144 - 14F unused */
1484	BAD4; BAD4; BAD4; BAD4;		/* 150 - 15F unused */
1485	BAD4; BAD4; BAD4; BAD4;		/* 160 - 16F unused */
1486	BAD;				/* 170 - unused */
1487	BAD;				/* 171 - unused */
1488	BAD; BAD;			/* 172 - 173 unused */
1489	BAD4; BAD4;			/* 174 - 17B unused */
1490#ifdef	PTL1_PANIC_DEBUG
1491	mov PTL1_BAD_DEBUG, %g1; GOTO(ptl1_panic);
1492					/* 17C	test ptl1_panic */
1493#else
1494	BAD;				/* 17C  unused */
1495#endif	/* PTL1_PANIC_DEBUG */
1496	PRIV(kmdb_trap);		/* 17D	kmdb enter (L1-A) */
1497	PRIV(kmdb_trap);		/* 17E	kmdb breakpoint */
1498	PRIV(kctx_obp_bpt);		/* 17F	obp breakpoint */
1499	/* reserved */
1500	NOT4; NOT4; NOT4; NOT4;		/* 180 - 18F reserved */
1501	NOT4; NOT4; NOT4; NOT4;		/* 190 - 19F reserved */
1502	NOT4; NOT4; NOT4; NOT4;		/* 1A0 - 1AF reserved */
1503	NOT4; NOT4; NOT4; NOT4;		/* 1B0 - 1BF reserved */
1504	NOT4; NOT4; NOT4; NOT4;		/* 1C0 - 1CF reserved */
1505	NOT4; NOT4; NOT4; NOT4;		/* 1D0 - 1DF reserved */
1506	NOT4; NOT4; NOT4; NOT4;		/* 1E0 - 1EF reserved */
1507	NOT4; NOT4; NOT4; NOT4;		/* 1F0 - 1FF reserved */
1508trap_table1:
1509	NOT4; NOT4; NOT; NOT;		/* 000 - 009 unused */
1510	ASYNC_TRAP(T_INSTR_ERROR + T_TL1, trace_gen, tt1_iae);
1511					/* 00A	instruction access error */
1512	NOT; NOT4;			/* 00B - 00F unused */
1513	NOT4; NOT4; NOT4; NOT4;		/* 010 - 01F unused */
1514	NOT4;				/* 020 - 023 unused */
1515	CLEAN_WINDOW;			/* 024 - 027 clean window */
1516	NOT4; NOT4;			/* 028 - 02F unused */
1517	DMMU_EXCEPTION_TL1;		/* 030 	data access exception */
1518	NOT;				/* 031 unused */
1519	ASYNC_TRAP(T_DATA_ERROR + T_TL1, trace_gen, tt1_dae);
1520					/* 032	data access error */
1521	NOT;				/* 033	unused */
1522	MISALIGN_ADDR_TL1;		/* 034	mem address not aligned */
1523	NOT; NOT; NOT; NOT4; NOT4	/* 035 - 03F unused */
1524	LABELED_BAD(tt1_asdat);		/* 040	async data error */
1525	NOT; NOT; NOT;			/* 041 - 043 unused */
1526	NOT4; NOT4; NOT4;		/* 044 - 04F unused */
1527	NOT4; NOT4; NOT4; NOT4;		/* 050 - 05F unused */
1528	NOT;				/* 060	unused */
1529	GOTO(kmdb_trap_tl1);		/* 061	PA watchpoint */
1530	GOTO(kmdb_trap_tl1);		/* 062	VA watchpoint */
1531	GOTO_TT(ce_err_tl1, trace_gen);	/* 063	corrected ECC error */
1532	ITLB_MISS(tt1);			/* 064	instruction access MMU miss */
1533	DTLB_MISS(tt1);			/* 068	data access MMU miss */
1534	DTLB_PROT;			/* 06C	data access protection */
1535	LABELED_BAD(tt1_fecc);		/* 070  fast ecache ECC error */
1536	LABELED_BAD(tt1_dperr);		/* 071  Cheetah+ dcache parity error */
1537	LABELED_BAD(tt1_iperr);		/* 072  Cheetah+ icache parity error */
1538	NOT;				/* 073  reserved */
1539	NOT4; NOT4; NOT4;		/* 074 - 07F reserved */
1540	NOT4;				/* 080	spill 0 normal */
1541	SPILL_32bit_tt1(ASI_AIUP,sn1);	/* 084	spill 1 normal */
1542	SPILL_64bit_tt1(ASI_AIUP,sn1);	/* 088	spill 2 normal */
1543	SPILL_32bit_tt1(ASI_AIUP,sn1);	/* 08C	spill 3 normal */
1544	SPILL_64bit_tt1(ASI_AIUP,sn1);	/* 090	spill 4 normal */
1545	SPILL_32bit(not);		/* 094	spill 5 normal */
1546	SPILL_64bit(not);		/* 098	spill 6 normal */
1547	SPILL_mixed;			/* 09C	spill 7 normal */
1548	NOT4;				/* 0A0	spill 0 other */
1549	SPILL_32bit_tt1(ASI_AIUS,so1);	/* 0A4	spill 1 other */
1550	SPILL_64bit_tt1(ASI_AIUS,so1);	/* 0A8	spill 2 other */
1551	SPILL_32bit_tt1(ASI_AIUS,so1);	/* 0AC	spill 3 other */
1552	SPILL_64bit_tt1(ASI_AIUS,so1);	/* 0B0  spill 4 other */
1553	NOT4;				/* 0B4  spill 5 other */
1554	NOT4;				/* 0B8  spill 6 other */
1555	NOT4;				/* 0BC  spill 7 other */
1556	NOT4;				/* 0C0	fill 0 normal */
1557	FILL_32bit_tt1(ASI_AIUP,fn1);	/* 0C4	fill 1 normal */
1558	FILL_64bit_tt1(ASI_AIUP,fn1);	/* 0C8	fill 2 normal */
1559	FILL_32bit_tt1(ASI_AIUP,fn1);	/* 0CC	fill 3 normal */
1560	FILL_64bit_tt1(ASI_AIUP,fn1);	/* 0D0	fill 4 normal */
1561	FILL_32bit(not);		/* 0D4	fill 5 normal */
1562	FILL_64bit(not);		/* 0D8	fill 6 normal */
1563	FILL_mixed;			/* 0DC	fill 7 normal */
1564	NOT4; NOT4; NOT4; NOT4;		/* 0E0 - 0EF unused */
1565	NOT4; NOT4; NOT4; NOT4;		/* 0F0 - 0FF unused */
1566	LABELED_BAD(tt1_swtrap0);	/* 100  fast ecache ECC error (cont) */
1567	LABELED_BAD(tt1_swtrap1);	/* 101  Ch+ D$ parity error (cont) */
1568	LABELED_BAD(tt1_swtrap2);	/* 102  Ch+ I$ parity error (cont) */
1569	NOT;				/* 103  reserved */
1570/*
1571 * We only reserve the above four special case soft traps for code running
1572 * at TL>0, so we can truncate the trap table here.
1573 */
1574etrap_table:
1575	.size	trap_table, (.-trap_table)
1576	.size	scb, (.-scb)
1577
1578/*
1579 * We get to exec_fault in the case of an instruction miss and tte
1580 * has no execute bit set.  We go to tl0 to handle it.
1581 *
1582 * g1 = tsbe pointer (in/clobbered)
1583 * g2 = tag access register (in)
1584 * g3 - g4 = scratch (clobbered)
1585 * g5 = tsbe data (in)
1586 * g6 = scratch (clobbered)
1587 */
1588	ALTENTRY(exec_fault)
1589	TRACE_TSBHIT(0x200)
1590	SWITCH_GLOBALS
1591	mov	MMU_TAG_ACCESS, %g4
1592	ldxa	[%g4]ASI_IMMU, %g2			! arg1 = addr
1593	mov	T_INSTR_MMU_MISS, %g3			! arg2 = traptype
1594	set	trap, %g1
1595	ba,pt	%xcc, sys_trap
1596	  mov	-1, %g4
1597
1598.mmu_exception_not_aligned:
1599	rdpr	%tstate, %g1
1600	btst	TSTATE_PRIV, %g1
1601	bnz,pn	%icc, 2f
1602	nop
1603	CPU_ADDR(%g1, %g4)				! load CPU struct addr
1604	ldn	[%g1 + CPU_THREAD], %g1			! load thread pointer
1605	ldn	[%g1 + T_PROCP], %g1			! load proc pointer
1606	ldn	[%g1 + P_UTRAPS], %g5			! are there utraps?
1607	brz,pt	%g5, 2f
1608	nop
1609	ldn	[%g5 + P_UTRAP15], %g5			! unaligned utrap?
1610	brz,pn	%g5, 2f
1611	nop
1612	btst	1, %sp
1613	bz,pt	%xcc, 1f				! 32 bit user program
1614	nop
1615	ba,pt	%xcc, .setup_v9utrap			! 64 bit user program
1616	nop
16171:
1618	ba,pt	%xcc, .setup_utrap
1619	or	%g2, %g0, %g7
16202:
1621	ba,pt	%xcc, .mmu_exception_end
1622	mov	T_ALIGNMENT, %g1
1623
1624.mmu_priv_exception:
1625	rdpr	%tstate, %g1
1626	btst	TSTATE_PRIV, %g1
1627	bnz,pn	%icc, 1f
1628	nop
1629	CPU_ADDR(%g1, %g4)				! load CPU struct addr
1630	ldn	[%g1 + CPU_THREAD], %g1			! load thread pointer
1631	ldn	[%g1 + T_PROCP], %g1			! load proc pointer
1632	ldn	[%g1 + P_UTRAPS], %g5			! are there utraps?
1633	brz,pt	%g5, 1f
1634	nop
1635	ldn	[%g5 + P_UTRAP16], %g5
1636	brnz,pt	%g5, .setup_v9utrap
1637	nop
16381:
1639	mov	T_PRIV_INSTR, %g1
1640
1641.mmu_exception_end:
1642	CPU_INDEX(%g4, %g5)
1643	set	cpu_core, %g5
1644	sllx	%g4, CPU_CORE_SHIFT, %g4
1645	add	%g4, %g5, %g4
1646	lduh	[%g4 + CPUC_DTRACE_FLAGS], %g5
1647	andcc	%g5, CPU_DTRACE_NOFAULT, %g0
1648	bz	%xcc, .mmu_exception_tlb_chk
1649	or	%g5, CPU_DTRACE_BADADDR, %g5
1650	stuh	%g5, [%g4 + CPUC_DTRACE_FLAGS]
1651	done
1652
1653.mmu_exception_tlb_chk:
1654	GET_CPU_IMPL(%g5)			! check SFSR.FT to see if this
1655	cmp	%g5, PANTHER_IMPL		! is a TLB parity error. But
1656	bne	2f				! we only do this check while
1657	mov	1, %g4				! running on Panther CPUs
1658	sllx	%g4, PN_SFSR_PARITY_SHIFT, %g4	! since US-I/II use the same
1659	andcc	%g3, %g4, %g0			! bit for something else which
1660	bz	2f				! will be handled later.
1661	nop
1662.mmu_exception_is_tlb_parity:
1663	.weak itlb_parity_trap
1664	.weak dtlb_parity_trap
1665	set	itlb_parity_trap, %g4
1666	cmp	%g1, T_INSTR_EXCEPTION		! branch to the itlb or
1667	be	3f				! dtlb parity handler
1668	nop					! if this trap is due
1669	set	dtlb_parity_trap, %g4
1670	cmp	%g1, T_DATA_EXCEPTION		! to a IMMU exception
1671	be	3f				! or DMMU exception.
1672	nop
16732:
1674	sllx	%g3, 32, %g3
1675	or	%g3, %g1, %g3
1676	set	trap, %g1
1677	ba,pt	%xcc, sys_trap
1678	sub	%g0, 1, %g4
16793:
1680	jmp	%g4				! off to the appropriate
1681	nop					! TLB parity handler
1682
1683.fp_disabled:
1684	CPU_ADDR(%g1, %g4)				! load CPU struct addr
1685	ldn	[%g1 + CPU_THREAD], %g1			! load thread pointer
1686#ifdef SF_ERRATA_30 /* call causes fp-disabled */
1687	brz,a,pn %g1, 2f
1688	  nop
1689#endif
1690	rdpr	%tstate, %g4
1691	btst	TSTATE_PRIV, %g4
1692#ifdef SF_ERRATA_30 /* call causes fp-disabled */
1693	bnz,pn %icc, 2f
1694	  nop
1695#else
1696	bnz,a,pn %icc, ptl1_panic
1697	  mov	PTL1_BAD_FPTRAP, %g1
1698#endif
1699	ldn	[%g1 + T_PROCP], %g1			! load proc pointer
1700	ldn	[%g1 + P_UTRAPS], %g5			! are there utraps?
1701	brz,a,pt %g5, 2f
1702	  nop
1703	ldn	[%g5 + P_UTRAP7], %g5			! fp_disabled utrap?
1704	brz,a,pn %g5, 2f
1705	  nop
1706	btst	1, %sp
1707	bz,a,pt	%xcc, 1f				! 32 bit user program
1708	  nop
1709	ba,a,pt	%xcc, .setup_v9utrap			! 64 bit user program
1710	  nop
17111:
1712	ba,pt	%xcc, .setup_utrap
1713	  or	%g0, %g0, %g7
17142:
1715	set	fp_disabled, %g1
1716	ba,pt	%xcc, sys_trap
1717	  sub	%g0, 1, %g4
1718
1719.fp_ieee_exception:
1720	rdpr	%tstate, %g1
1721	btst	TSTATE_PRIV, %g1
1722	bnz,a,pn %icc, ptl1_panic
1723	  mov	PTL1_BAD_FPTRAP, %g1
1724	CPU_ADDR(%g1, %g4)				! load CPU struct addr
1725	stx	%fsr, [%g1 + CPU_TMP1]
1726	ldx	[%g1 + CPU_TMP1], %g2
1727	ldn	[%g1 + CPU_THREAD], %g1			! load thread pointer
1728	ldn	[%g1 + T_PROCP], %g1			! load proc pointer
1729	ldn	[%g1 + P_UTRAPS], %g5			! are there utraps?
1730	brz,a,pt %g5, 1f
1731	  nop
1732	ldn	[%g5 + P_UTRAP8], %g5
1733	brnz,a,pt %g5, .setup_v9utrap
1734	  nop
17351:
1736	set	_fp_ieee_exception, %g1
1737	ba,pt	%xcc, sys_trap
1738	  sub	%g0, 1, %g4
1739
1740/*
1741 * Register Inputs:
1742 *	%g5		user trap handler
1743 *	%g7		misaligned addr - for alignment traps only
1744 */
1745.setup_utrap:
1746	set	trap, %g1			! setup in case we go
1747	mov	T_FLUSH_PCB, %g3		! through sys_trap on
1748	sub	%g0, 1, %g4			! the save instruction below
1749
1750	/*
1751	 * If the DTrace pid provider is single stepping a copied-out
1752	 * instruction, t->t_dtrace_step will be set. In that case we need
1753	 * to abort the single-stepping (since execution of the instruction
1754	 * was interrupted) and use the value of t->t_dtrace_npc as the %npc.
1755	 */
1756	save	%sp, -SA(MINFRAME32), %sp	! window for trap handler
1757	CPU_ADDR(%g1, %g4)			! load CPU struct addr
1758	ldn	[%g1 + CPU_THREAD], %g1		! load thread pointer
1759	ldub	[%g1 + T_DTRACE_STEP], %g2	! load t->t_dtrace_step
1760	rdpr	%tnpc, %l2			! arg1 == tnpc
1761	brz,pt	%g2, 1f
1762	rdpr	%tpc, %l1			! arg0 == tpc
1763
1764	ldub	[%g1 + T_DTRACE_AST], %g2	! load t->t_dtrace_ast
1765	ldn	[%g1 + T_DTRACE_NPC], %l2	! arg1 = t->t_dtrace_npc (step)
1766	brz,pt	%g2, 1f
1767	st	%g0, [%g1 + T_DTRACE_FT]	! zero all pid provider flags
1768	stub	%g2, [%g1 + T_ASTFLAG]		! aston(t) if t->t_dtrace_ast
17691:
1770	mov	%g7, %l3			! arg2 == misaligned address
1771
1772	rdpr	%tstate, %g1			! cwp for trap handler
1773	rdpr	%cwp, %g4
1774	bclr	TSTATE_CWP_MASK, %g1
1775	wrpr	%g1, %g4, %tstate
1776	wrpr	%g0, %g5, %tnpc			! trap handler address
1777	FAST_TRAP_DONE
1778	/* NOTREACHED */
1779
1780.check_v9utrap:
1781	rdpr	%tstate, %g1
1782	btst	TSTATE_PRIV, %g1
1783	bnz,a,pn %icc, 3f
1784	  nop
1785	CPU_ADDR(%g4, %g1)				! load CPU struct addr
1786	ldn	[%g4 + CPU_THREAD], %g5			! load thread pointer
1787	ldn	[%g5 + T_PROCP], %g5			! load proc pointer
1788	ldn	[%g5 + P_UTRAPS], %g5			! are there utraps?
1789
1790	cmp	%g3, T_SOFTWARE_TRAP
1791	bne,a,pt %icc, 1f
1792	  nop
1793
1794	brz,pt %g5, 3f			! if p_utraps == NULL goto trap()
1795	  rdpr	%tt, %g3		! delay - get actual hw trap type
1796
1797	sub	%g3, 254, %g1		! UT_TRAP_INSTRUCTION_16 = p_utraps[18]
1798	ba,pt	%icc, 2f
1799	  smul	%g1, CPTRSIZE, %g2
18001:
1801	brz,a,pt %g5, 3f		! if p_utraps == NULL goto trap()
1802	  nop
1803
1804	cmp	%g3, T_UNIMP_INSTR
1805	bne,a,pt %icc, 2f
1806	  nop
1807
1808	mov	1, %g1
1809	st	%g1, [%g4 + CPU_TL1_HDLR] ! set CPU_TL1_HDLR
1810	rdpr	%tpc, %g1		! ld trapping instruction using
1811	lduwa	[%g1]ASI_AIUP, %g1	! "AS IF USER" ASI which could fault
1812	st	%g0, [%g4 + CPU_TL1_HDLR] ! clr CPU_TL1_HDLR
1813
1814	sethi	%hi(0xc1c00000), %g4	! setup mask for illtrap instruction
1815	andcc	%g1, %g4, %g4		! and instruction with mask
1816	bnz,a,pt %icc, 3f		! if %g4 == zero, %g1 is an ILLTRAP
1817	  nop				! fall thru to setup
18182:
1819	ldn	[%g5 + %g2], %g5
1820	brnz,a,pt %g5, .setup_v9utrap
1821	  nop
18223:
1823	set	trap, %g1
1824	ba,pt	%xcc, sys_trap
1825	  sub	%g0, 1, %g4
1826	/* NOTREACHED */
1827
1828/*
1829 * Register Inputs:
1830 *	%g5		user trap handler
1831 */
1832.setup_v9utrap:
1833	set	trap, %g1			! setup in case we go
1834	mov	T_FLUSH_PCB, %g3		! through sys_trap on
1835	sub	%g0, 1, %g4			! the save instruction below
1836
1837	/*
1838	 * If the DTrace pid provider is single stepping a copied-out
1839	 * instruction, t->t_dtrace_step will be set. In that case we need
1840	 * to abort the single-stepping (since execution of the instruction
1841	 * was interrupted) and use the value of t->t_dtrace_npc as the %npc.
1842	 */
1843	save	%sp, -SA(MINFRAME64), %sp	! window for trap handler
1844	CPU_ADDR(%g1, %g4)			! load CPU struct addr
1845	ldn	[%g1 + CPU_THREAD], %g1		! load thread pointer
1846	ldub	[%g1 + T_DTRACE_STEP], %g2	! load t->t_dtrace_step
1847	rdpr	%tnpc, %l7			! arg1 == tnpc
1848	brz,pt	%g2, 1f
1849	rdpr	%tpc, %l6			! arg0 == tpc
1850
1851	ldub	[%g1 + T_DTRACE_AST], %g2	! load t->t_dtrace_ast
1852	ldn	[%g1 + T_DTRACE_NPC], %l7	! arg1 == t->t_dtrace_npc (step)
1853	brz,pt	%g2, 1f
1854	st	%g0, [%g1 + T_DTRACE_FT]	! zero all pid provider flags
1855	stub	%g2, [%g1 + T_ASTFLAG]		! aston(t) if t->t_dtrace_ast
18561:
1857	rdpr	%tstate, %g2			! cwp for trap handler
1858	rdpr	%cwp, %g4
1859	bclr	TSTATE_CWP_MASK, %g2
1860	wrpr	%g2, %g4, %tstate
1861
1862	ldn	[%g1 + T_PROCP], %g4		! load proc pointer
1863	ldn	[%g4 + P_AS], %g4		! load as pointer
1864	ldn	[%g4 + A_USERLIMIT], %g4	! load as userlimit
1865	cmp	%l7, %g4			! check for single-step set
1866	bne,pt	%xcc, 4f
1867	  nop
1868	ldn	[%g1 + T_LWP], %g1		! load klwp pointer
1869	ld	[%g1 + PCB_STEP], %g4		! load single-step flag
1870	cmp	%g4, STEP_ACTIVE		! step flags set in pcb?
1871	bne,pt	%icc, 4f
1872	  nop
1873	stn	%g5, [%g1 + PCB_TRACEPC]	! save trap handler addr in pcb
1874	mov	%l7, %g4			! on entry to precise user trap
1875	add	%l6, 4, %l7			! handler, %l6 == pc, %l7 == npc
1876						! at time of trap
1877	wrpr	%g0, %g4, %tnpc			! generate FLTBOUNDS,
1878						! %g4 == userlimit
1879	FAST_TRAP_DONE
1880	/* NOTREACHED */
18814:
1882	wrpr	%g0, %g5, %tnpc			! trap handler address
1883	FAST_TRAP_DONE_CHK_INTR
1884	/* NOTREACHED */
1885
1886.fp_exception:
1887	CPU_ADDR(%g1, %g4)
1888	stx	%fsr, [%g1 + CPU_TMP1]
1889	ldx	[%g1 + CPU_TMP1], %g2
1890
1891	/*
1892	 * Cheetah takes unfinished_FPop trap for certain range of operands
1893	 * to the "fitos" instruction. Instead of going through the slow
1894	 * software emulation path, we try to simulate the "fitos" instruction
1895	 * via "fitod" and "fdtos" provided the following conditions are met:
1896	 *
1897	 *	fpu_exists is set (if DEBUG)
1898	 *	not in privileged mode
1899	 *	ftt is unfinished_FPop
1900	 *	NXM IEEE trap is not enabled
1901	 *	instruction at %tpc is "fitos"
1902	 *
1903	 *  Usage:
1904	 *	%g1	per cpu address
1905	 *	%g2	%fsr
1906	 *	%g6	user instruction
1907	 *
1908	 * Note that we can take a memory access related trap while trying
1909	 * to fetch the user instruction. Therefore, we set CPU_TL1_HDLR
1910	 * flag to catch those traps and let the SFMMU code deal with page
1911	 * fault and data access exception.
1912	 */
1913#if defined(DEBUG) || defined(NEED_FPU_EXISTS)
1914	sethi	%hi(fpu_exists), %g7
1915	ld	[%g7 + %lo(fpu_exists)], %g7
1916	brz,pn %g7, .fp_exception_cont
1917	  nop
1918#endif
1919	rdpr	%tstate, %g7			! branch if in privileged mode
1920	btst	TSTATE_PRIV, %g7
1921	bnz,pn	%xcc, .fp_exception_cont
1922	srl	%g2, FSR_FTT_SHIFT, %g7		! extract ftt from %fsr
1923	and	%g7, (FSR_FTT>>FSR_FTT_SHIFT), %g7
1924	cmp	%g7, FTT_UNFIN
1925	set	FSR_TEM_NX, %g5
1926	bne,pn	%xcc, .fp_exception_cont	! branch if NOT unfinished_FPop
1927	  andcc	%g2, %g5, %g0
1928	bne,pn	%xcc, .fp_exception_cont	! branch if FSR_TEM_NX enabled
1929	  rdpr	%tpc, %g5			! get faulting PC
1930
1931	or	%g0, 1, %g7
1932	st	%g7, [%g1 + CPU_TL1_HDLR]	! set tl1_hdlr flag
1933	lda	[%g5]ASI_USER, %g6		! get user's instruction
1934	st	%g0, [%g1 + CPU_TL1_HDLR]	! clear tl1_hdlr flag
1935
1936	set	FITOS_INSTR_MASK, %g7
1937	and	%g6, %g7, %g7
1938	set	FITOS_INSTR, %g5
1939	cmp	%g7, %g5
1940	bne,pn	%xcc, .fp_exception_cont	! branch if not FITOS_INSTR
1941	 nop
1942
1943	/*
1944	 * This is unfinished FPops trap for "fitos" instruction. We
1945	 * need to simulate "fitos" via "fitod" and "fdtos" instruction
1946	 * sequence.
1947	 *
1948	 * We need a temporary FP register to do the conversion. Since
1949	 * both source and destination operands for the "fitos" instruction
1950	 * have to be within %f0-%f31, we use an FP register from the upper
1951	 * half to guarantee that it won't collide with the source or the
1952	 * dest operand. However, we do have to save and restore its value.
1953	 *
1954	 * We use %d62 as a temporary FP register for the conversion and
1955	 * branch to appropriate instruction within the conversion tables
1956	 * based upon the rs2 and rd values.
1957	 */
1958
1959	std	%d62, [%g1 + CPU_TMP1]		! save original value
1960
1961	srl	%g6, FITOS_RS2_SHIFT, %g7
1962	and	%g7, FITOS_REG_MASK, %g7
1963	set	_fitos_fitod_table, %g4
1964	sllx	%g7, 2, %g7
1965	jmp	%g4 + %g7
1966	  ba,pt	%xcc, _fitos_fitod_done
1967	.empty
1968
1969_fitos_fitod_table:
1970	  fitod	%f0, %d62
1971	  fitod	%f1, %d62
1972	  fitod	%f2, %d62
1973	  fitod	%f3, %d62
1974	  fitod	%f4, %d62
1975	  fitod	%f5, %d62
1976	  fitod	%f6, %d62
1977	  fitod	%f7, %d62
1978	  fitod	%f8, %d62
1979	  fitod	%f9, %d62
1980	  fitod	%f10, %d62
1981	  fitod	%f11, %d62
1982	  fitod	%f12, %d62
1983	  fitod	%f13, %d62
1984	  fitod	%f14, %d62
1985	  fitod	%f15, %d62
1986	  fitod	%f16, %d62
1987	  fitod	%f17, %d62
1988	  fitod	%f18, %d62
1989	  fitod	%f19, %d62
1990	  fitod	%f20, %d62
1991	  fitod	%f21, %d62
1992	  fitod	%f22, %d62
1993	  fitod	%f23, %d62
1994	  fitod	%f24, %d62
1995	  fitod	%f25, %d62
1996	  fitod	%f26, %d62
1997	  fitod	%f27, %d62
1998	  fitod	%f28, %d62
1999	  fitod	%f29, %d62
2000	  fitod	%f30, %d62
2001	  fitod	%f31, %d62
2002_fitos_fitod_done:
2003
2004	/*
2005	 * Now convert data back into single precision
2006	 */
2007	srl	%g6, FITOS_RD_SHIFT, %g7
2008	and	%g7, FITOS_REG_MASK, %g7
2009	set	_fitos_fdtos_table, %g4
2010	sllx	%g7, 2, %g7
2011	jmp	%g4 + %g7
2012	  ba,pt	%xcc, _fitos_fdtos_done
2013	.empty
2014
2015_fitos_fdtos_table:
2016	  fdtos	%d62, %f0
2017	  fdtos	%d62, %f1
2018	  fdtos	%d62, %f2
2019	  fdtos	%d62, %f3
2020	  fdtos	%d62, %f4
2021	  fdtos	%d62, %f5
2022	  fdtos	%d62, %f6
2023	  fdtos	%d62, %f7
2024	  fdtos	%d62, %f8
2025	  fdtos	%d62, %f9
2026	  fdtos	%d62, %f10
2027	  fdtos	%d62, %f11
2028	  fdtos	%d62, %f12
2029	  fdtos	%d62, %f13
2030	  fdtos	%d62, %f14
2031	  fdtos	%d62, %f15
2032	  fdtos	%d62, %f16
2033	  fdtos	%d62, %f17
2034	  fdtos	%d62, %f18
2035	  fdtos	%d62, %f19
2036	  fdtos	%d62, %f20
2037	  fdtos	%d62, %f21
2038	  fdtos	%d62, %f22
2039	  fdtos	%d62, %f23
2040	  fdtos	%d62, %f24
2041	  fdtos	%d62, %f25
2042	  fdtos	%d62, %f26
2043	  fdtos	%d62, %f27
2044	  fdtos	%d62, %f28
2045	  fdtos	%d62, %f29
2046	  fdtos	%d62, %f30
2047	  fdtos	%d62, %f31
2048_fitos_fdtos_done:
2049
2050	ldd	[%g1 + CPU_TMP1], %d62		! restore %d62
2051
2052#if DEBUG
2053	/*
2054	 * Update FPop_unfinished trap kstat
2055	 */
2056	set	fpustat+FPUSTAT_UNFIN_KSTAT, %g7
2057	ldx	[%g7], %g5
20581:
2059	add	%g5, 1, %g6
2060
2061	casxa	[%g7] ASI_N, %g5, %g6
2062	cmp	%g5, %g6
2063	bne,a,pn %xcc, 1b
2064	  or	%g0, %g6, %g5
2065
2066	/*
2067	 * Update fpu_sim_fitos kstat
2068	 */
2069	set	fpuinfo+FPUINFO_FITOS_KSTAT, %g7
2070	ldx	[%g7], %g5
20711:
2072	add	%g5, 1, %g6
2073
2074	casxa	[%g7] ASI_N, %g5, %g6
2075	cmp	%g5, %g6
2076	bne,a,pn %xcc, 1b
2077	  or	%g0, %g6, %g5
2078#endif /* DEBUG */
2079
2080	FAST_TRAP_DONE
2081
2082.fp_exception_cont:
2083	/*
2084	 * Let _fp_exception deal with simulating FPop instruction.
2085	 * Note that we need to pass %fsr in %g2 (already read above).
2086	 */
2087
2088	set	_fp_exception, %g1
2089	ba,pt	%xcc, sys_trap
2090	sub	%g0, 1, %g4
2091
2092.clean_windows:
2093	set	trap, %g1
2094	mov	T_FLUSH_PCB, %g3
2095	sub	%g0, 1, %g4
2096	save
2097	flushw
2098	restore
2099	wrpr	%g0, %g0, %cleanwin	! no clean windows
2100
2101	CPU_ADDR(%g4, %g5)
2102	ldn	[%g4 + CPU_MPCB], %g4
2103	brz,a,pn %g4, 1f
2104	  nop
2105	ld	[%g4 + MPCB_WSTATE], %g5
2106	add	%g5, WSTATE_CLEAN_OFFSET, %g5
2107	wrpr	%g0, %g5, %wstate
21081:	FAST_TRAP_DONE
2109
2110/*
2111 * .spill_clean: clean the previous window, restore the wstate, and
2112 * "done".
2113 *
2114 * Entry: %g7 contains new wstate
2115 */
2116.spill_clean:
2117	sethi	%hi(nwin_minus_one), %g5
2118	ld	[%g5 + %lo(nwin_minus_one)], %g5 ! %g5 = nwin - 1
2119	rdpr	%cwp, %g6			! %g6 = %cwp
2120	deccc	%g6				! %g6--
2121	movneg	%xcc, %g5, %g6			! if (%g6<0) %g6 = nwin-1
2122	wrpr	%g6, %cwp
2123	TT_TRACE_L(trace_win)
2124	clr	%l0
2125	clr	%l1
2126	clr	%l2
2127	clr	%l3
2128	clr	%l4
2129	clr	%l5
2130	clr	%l6
2131	clr	%l7
2132	wrpr	%g0, %g7, %wstate
2133	saved
2134	retry			! restores correct %cwp
2135
2136.fix_alignment:
2137	CPU_ADDR(%g1, %g2)		! load CPU struct addr to %g1 using %g2
2138	ldn	[%g1 + CPU_THREAD], %g1	! load thread pointer
2139	ldn	[%g1 + T_PROCP], %g1
2140	mov	1, %g2
2141	stb	%g2, [%g1 + P_FIXALIGNMENT]
2142	FAST_TRAP_DONE
2143
2144#define	STDF_REG(REG, ADDR, TMP)		\
2145	sll	REG, 3, REG			;\
2146mark1:	set	start1, TMP			;\
2147	jmp	REG + TMP			;\
2148	  nop					;\
2149start1:	ba,pt	%xcc, done1			;\
2150	  std	%f0, [ADDR + CPU_TMP1]		;\
2151	ba,pt	%xcc, done1			;\
2152	  std	%f32, [ADDR + CPU_TMP1]		;\
2153	ba,pt	%xcc, done1			;\
2154	  std	%f2, [ADDR + CPU_TMP1]		;\
2155	ba,pt	%xcc, done1			;\
2156	  std	%f34, [ADDR + CPU_TMP1]		;\
2157	ba,pt	%xcc, done1			;\
2158	  std	%f4, [ADDR + CPU_TMP1]		;\
2159	ba,pt	%xcc, done1			;\
2160	  std	%f36, [ADDR + CPU_TMP1]		;\
2161	ba,pt	%xcc, done1			;\
2162	  std	%f6, [ADDR + CPU_TMP1]		;\
2163	ba,pt	%xcc, done1			;\
2164	  std	%f38, [ADDR + CPU_TMP1]		;\
2165	ba,pt	%xcc, done1			;\
2166	  std	%f8, [ADDR + CPU_TMP1]		;\
2167	ba,pt	%xcc, done1			;\
2168	  std	%f40, [ADDR + CPU_TMP1]		;\
2169	ba,pt	%xcc, done1			;\
2170	  std	%f10, [ADDR + CPU_TMP1]		;\
2171	ba,pt	%xcc, done1			;\
2172	  std	%f42, [ADDR + CPU_TMP1]		;\
2173	ba,pt	%xcc, done1			;\
2174	  std	%f12, [ADDR + CPU_TMP1]		;\
2175	ba,pt	%xcc, done1			;\
2176	  std	%f44, [ADDR + CPU_TMP1]		;\
2177	ba,pt	%xcc, done1			;\
2178	  std	%f14, [ADDR + CPU_TMP1]		;\
2179	ba,pt	%xcc, done1			;\
2180	  std	%f46, [ADDR + CPU_TMP1]		;\
2181	ba,pt	%xcc, done1			;\
2182	  std	%f16, [ADDR + CPU_TMP1]		;\
2183	ba,pt	%xcc, done1			;\
2184	  std	%f48, [ADDR + CPU_TMP1]		;\
2185	ba,pt	%xcc, done1			;\
2186	  std	%f18, [ADDR + CPU_TMP1]		;\
2187	ba,pt	%xcc, done1			;\
2188	  std	%f50, [ADDR + CPU_TMP1]		;\
2189	ba,pt	%xcc, done1			;\
2190	  std	%f20, [ADDR + CPU_TMP1]		;\
2191	ba,pt	%xcc, done1			;\
2192	  std	%f52, [ADDR + CPU_TMP1]		;\
2193	ba,pt	%xcc, done1			;\
2194	  std	%f22, [ADDR + CPU_TMP1]		;\
2195	ba,pt	%xcc, done1			;\
2196	  std	%f54, [ADDR + CPU_TMP1]		;\
2197	ba,pt	%xcc, done1			;\
2198	  std	%f24, [ADDR + CPU_TMP1]		;\
2199	ba,pt	%xcc, done1			;\
2200	  std	%f56, [ADDR + CPU_TMP1]		;\
2201	ba,pt	%xcc, done1			;\
2202	  std	%f26, [ADDR + CPU_TMP1]		;\
2203	ba,pt	%xcc, done1			;\
2204	  std	%f58, [ADDR + CPU_TMP1]		;\
2205	ba,pt	%xcc, done1			;\
2206	  std	%f28, [ADDR + CPU_TMP1]		;\
2207	ba,pt	%xcc, done1			;\
2208	  std	%f60, [ADDR + CPU_TMP1]		;\
2209	ba,pt	%xcc, done1			;\
2210	  std	%f30, [ADDR + CPU_TMP1]		;\
2211	ba,pt	%xcc, done1			;\
2212	  std	%f62, [ADDR + CPU_TMP1]		;\
2213done1:
2214
2215#define	LDDF_REG(REG, ADDR, TMP)		\
2216	sll	REG, 3, REG			;\
2217mark2:	set	start2, TMP			;\
2218	jmp	REG + TMP			;\
2219	  nop					;\
2220start2:	ba,pt	%xcc, done2			;\
2221	  ldd	[ADDR + CPU_TMP1], %f0		;\
2222	ba,pt	%xcc, done2			;\
2223	  ldd	[ADDR + CPU_TMP1], %f32		;\
2224	ba,pt	%xcc, done2			;\
2225	  ldd	[ADDR + CPU_TMP1], %f2		;\
2226	ba,pt	%xcc, done2			;\
2227	  ldd	[ADDR + CPU_TMP1], %f34		;\
2228	ba,pt	%xcc, done2			;\
2229	  ldd	[ADDR + CPU_TMP1], %f4		;\
2230	ba,pt	%xcc, done2			;\
2231	  ldd	[ADDR + CPU_TMP1], %f36		;\
2232	ba,pt	%xcc, done2			;\
2233	  ldd	[ADDR + CPU_TMP1], %f6		;\
2234	ba,pt	%xcc, done2			;\
2235	  ldd	[ADDR + CPU_TMP1], %f38		;\
2236	ba,pt	%xcc, done2			;\
2237	  ldd	[ADDR + CPU_TMP1], %f8		;\
2238	ba,pt	%xcc, done2			;\
2239	  ldd	[ADDR + CPU_TMP1], %f40		;\
2240	ba,pt	%xcc, done2			;\
2241	  ldd	[ADDR + CPU_TMP1], %f10		;\
2242	ba,pt	%xcc, done2			;\
2243	  ldd	[ADDR + CPU_TMP1], %f42		;\
2244	ba,pt	%xcc, done2			;\
2245	  ldd	[ADDR + CPU_TMP1], %f12		;\
2246	ba,pt	%xcc, done2			;\
2247	  ldd	[ADDR + CPU_TMP1], %f44		;\
2248	ba,pt	%xcc, done2			;\
2249	  ldd	[ADDR + CPU_TMP1], %f14		;\
2250	ba,pt	%xcc, done2			;\
2251	  ldd	[ADDR + CPU_TMP1], %f46		;\
2252	ba,pt	%xcc, done2			;\
2253	  ldd	[ADDR + CPU_TMP1], %f16		;\
2254	ba,pt	%xcc, done2			;\
2255	  ldd	[ADDR + CPU_TMP1], %f48		;\
2256	ba,pt	%xcc, done2			;\
2257	  ldd	[ADDR + CPU_TMP1], %f18		;\
2258	ba,pt	%xcc, done2			;\
2259	  ldd	[ADDR + CPU_TMP1], %f50		;\
2260	ba,pt	%xcc, done2			;\
2261	  ldd	[ADDR + CPU_TMP1], %f20		;\
2262	ba,pt	%xcc, done2			;\
2263	  ldd	[ADDR + CPU_TMP1], %f52		;\
2264	ba,pt	%xcc, done2			;\
2265	  ldd	[ADDR + CPU_TMP1], %f22		;\
2266	ba,pt	%xcc, done2			;\
2267	  ldd	[ADDR + CPU_TMP1], %f54		;\
2268	ba,pt	%xcc, done2			;\
2269	  ldd	[ADDR + CPU_TMP1], %f24		;\
2270	ba,pt	%xcc, done2			;\
2271	  ldd	[ADDR + CPU_TMP1], %f56		;\
2272	ba,pt	%xcc, done2			;\
2273	  ldd	[ADDR + CPU_TMP1], %f26		;\
2274	ba,pt	%xcc, done2			;\
2275	  ldd	[ADDR + CPU_TMP1], %f58		;\
2276	ba,pt	%xcc, done2			;\
2277	  ldd	[ADDR + CPU_TMP1], %f28		;\
2278	ba,pt	%xcc, done2			;\
2279	  ldd	[ADDR + CPU_TMP1], %f60		;\
2280	ba,pt	%xcc, done2			;\
2281	  ldd	[ADDR + CPU_TMP1], %f30		;\
2282	ba,pt	%xcc, done2			;\
2283	  ldd	[ADDR + CPU_TMP1], %f62		;\
2284done2:
2285
2286.lddf_exception_not_aligned:
2287	/*
2288	 * Cheetah overwrites SFAR on a DTLB miss, hence read it now.
2289	 */
2290	ldxa	[MMU_SFAR]%asi, %g5	! misaligned vaddr in %g5
2291
2292#if defined(DEBUG) || defined(NEED_FPU_EXISTS)
2293	sethi	%hi(fpu_exists), %g2		! check fpu_exists
2294	ld	[%g2 + %lo(fpu_exists)], %g2
2295	brz,a,pn %g2, 4f
2296	  nop
2297#endif
2298	CPU_ADDR(%g1, %g4)
2299	or	%g0, 1, %g4
2300	st	%g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag
2301
2302	rdpr	%tpc, %g2
2303	lda	[%g2]ASI_AIUP, %g6	! get the user's lddf instruction
2304	srl	%g6, 23, %g1		! using ldda or not?
2305	and	%g1, 1, %g1
2306	brz,a,pt %g1, 2f		! check for ldda instruction
2307	  nop
2308	srl	%g6, 13, %g1		! check immflag
2309	and	%g1, 1, %g1
2310	rdpr	%tstate, %g2		! %tstate in %g2
2311	brnz,a,pn %g1, 1f
2312	  srl	%g2, 31, %g1		! get asi from %tstate
2313	srl	%g6, 5, %g1		! get asi from instruction
2314	and	%g1, 0xFF, %g1		! imm_asi field
23151:
2316	cmp	%g1, ASI_P		! primary address space
2317	be,a,pt %icc, 2f
2318	  nop
2319	cmp	%g1, ASI_PNF		! primary no fault address space
2320	be,a,pt %icc, 2f
2321	  nop
2322	cmp	%g1, ASI_S		! secondary address space
2323	be,a,pt %icc, 2f
2324	  nop
2325	cmp	%g1, ASI_SNF		! secondary no fault address space
2326	bne,a,pn %icc, 3f
2327	  nop
23282:
2329	lduwa	[%g5]ASI_USER, %g7	! get first half of misaligned data
2330	add	%g5, 4, %g5		! increment misaligned data address
2331	lduwa	[%g5]ASI_USER, %g5	! get second half of misaligned data
2332
2333	sllx	%g7, 32, %g7
2334	or	%g5, %g7, %g5		! combine data
2335	CPU_ADDR(%g7, %g1)		! save data on a per-cpu basis
2336	stx	%g5, [%g7 + CPU_TMP1]	! save in cpu_tmp1
2337
2338	srl	%g6, 25, %g3		! %g6 has the instruction
2339	and	%g3, 0x1F, %g3		! %g3 has rd
2340	LDDF_REG(%g3, %g7, %g4)
2341
2342	CPU_ADDR(%g1, %g4)
2343	st	%g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
2344	FAST_TRAP_DONE
23453:
2346	CPU_ADDR(%g1, %g4)
2347	st	%g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
23484:
2349	set	T_USER, %g3		! trap type in %g3
2350	or	%g3, T_LDDF_ALIGN, %g3
2351	mov	%g5, %g2		! misaligned vaddr in %g2
2352	set	fpu_trap, %g1		! goto C for the little and
2353	ba,pt	%xcc, sys_trap		! no fault little asi's
2354	  sub	%g0, 1, %g4
2355
2356.stdf_exception_not_aligned:
2357	/*
2358	 * Cheetah overwrites SFAR on a DTLB miss, hence read it now.
2359	 */
2360	ldxa	[MMU_SFAR]%asi, %g5	! misaligned vaddr in %g5
2361
2362#if defined(DEBUG) || defined(NEED_FPU_EXISTS)
2363	sethi	%hi(fpu_exists), %g7		! check fpu_exists
2364	ld	[%g7 + %lo(fpu_exists)], %g3
2365	brz,a,pn %g3, 4f
2366	  nop
2367#endif
2368	CPU_ADDR(%g1, %g4)
2369	or	%g0, 1, %g4
2370	st	%g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag
2371
2372	rdpr	%tpc, %g2
2373	lda	[%g2]ASI_AIUP, %g6	! get the user's stdf instruction
2374
2375	srl	%g6, 23, %g1		! using stda or not?
2376	and	%g1, 1, %g1
2377	brz,a,pt %g1, 2f		! check for stda instruction
2378	  nop
2379	srl	%g6, 13, %g1		! check immflag
2380	and	%g1, 1, %g1
2381	rdpr	%tstate, %g2		! %tstate in %g2
2382	brnz,a,pn %g1, 1f
2383	  srl	%g2, 31, %g1		! get asi from %tstate
2384	srl	%g6, 5, %g1		! get asi from instruction
2385	and	%g1, 0xFF, %g1		! imm_asi field
23861:
2387	cmp	%g1, ASI_P		! primary address space
2388	be,a,pt %icc, 2f
2389	  nop
2390	cmp	%g1, ASI_S		! secondary address space
2391	bne,a,pn %icc, 3f
2392	  nop
23932:
2394	srl	%g6, 25, %g6
2395	and	%g6, 0x1F, %g6		! %g6 has rd
2396	CPU_ADDR(%g7, %g1)
2397	STDF_REG(%g6, %g7, %g4)		! STDF_REG(REG, ADDR, TMP)
2398
2399	ldx	[%g7 + CPU_TMP1], %g6
2400	srlx	%g6, 32, %g7
2401	stuwa	%g7, [%g5]ASI_USER	! first half
2402	add	%g5, 4, %g5		! increment misaligned data address
2403	stuwa	%g6, [%g5]ASI_USER	! second half
2404
2405	CPU_ADDR(%g1, %g4)
2406	st	%g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
2407	FAST_TRAP_DONE
24083:
2409	CPU_ADDR(%g1, %g4)
2410	st	%g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
24114:
2412	set	T_USER, %g3		! trap type in %g3
2413	or	%g3, T_STDF_ALIGN, %g3
2414	mov	%g5, %g2		! misaligned vaddr in %g2
2415	set	fpu_trap, %g1		! goto C for the little and
2416	ba,pt	%xcc, sys_trap		! nofault little asi's
2417	  sub	%g0, 1, %g4
2418
2419#ifdef DEBUG_USER_TRAPTRACECTL
2420
2421.traptrace_freeze:
2422	mov	%l0, %g1 ; mov	%l1, %g2 ; mov	%l2, %g3 ; mov	%l4, %g4
2423	TT_TRACE_L(trace_win)
2424	mov	%g4, %l4 ; mov	%g3, %l2 ; mov	%g2, %l1 ; mov	%g1, %l0
2425	set	trap_freeze, %g1
2426	mov	1, %g2
2427	st	%g2, [%g1]
2428	FAST_TRAP_DONE
2429
2430.traptrace_unfreeze:
2431	set	trap_freeze, %g1
2432	st	%g0, [%g1]
2433	mov	%l0, %g1 ; mov	%l1, %g2 ; mov	%l2, %g3 ; mov	%l4, %g4
2434	TT_TRACE_L(trace_win)
2435	mov	%g4, %l4 ; mov	%g3, %l2 ; mov	%g2, %l1 ; mov	%g1, %l0
2436	FAST_TRAP_DONE
2437
2438#endif /* DEBUG_USER_TRAPTRACECTL */
2439
2440.getcc:
2441	CPU_ADDR(%g1, %g2)
2442	stx	%o0, [%g1 + CPU_TMP1]		! save %o0
2443	stx	%o1, [%g1 + CPU_TMP2]		! save %o1
2444	rdpr	%tstate, %g3			! get tstate
2445	srlx	%g3, PSR_TSTATE_CC_SHIFT, %o0	! shift ccr to V8 psr
2446	set	PSR_ICC, %g2
2447	and	%o0, %g2, %o0			! mask out the rest
2448	srl	%o0, PSR_ICC_SHIFT, %o0		! right justify
2449	rdpr	%pstate, %o1
2450	wrpr	%o1, PSTATE_AG, %pstate		! get into normal globals
2451	mov	%o0, %g1			! move ccr to normal %g1
2452	wrpr	%g0, %o1, %pstate		! back into alternate globals
2453	ldx	[%g1 + CPU_TMP1], %o0		! restore %o0
2454	ldx	[%g1 + CPU_TMP2], %o1		! restore %o1
2455	FAST_TRAP_DONE
2456
2457.setcc:
2458	CPU_ADDR(%g1, %g2)
2459	stx	%o0, [%g1 + CPU_TMP1]		! save %o0
2460	stx	%o1, [%g1 + CPU_TMP2]		! save %o1
2461	rdpr	%pstate, %o0
2462	wrpr	%o0, PSTATE_AG, %pstate		! get into normal globals
2463	mov	%g1, %o1
2464	wrpr	%g0, %o0, %pstate		! back to alternates
2465	sll	%o1, PSR_ICC_SHIFT, %g2
2466	set	PSR_ICC, %g3
2467	and	%g2, %g3, %g2			! mask out rest
2468	sllx	%g2, PSR_TSTATE_CC_SHIFT, %g2
2469	rdpr	%tstate, %g3			! get tstate
2470	srl	%g3, 0, %g3			! clear upper word
2471	or	%g3, %g2, %g3			! or in new bits
2472	wrpr	%g3, %tstate
2473	ldx	[%g1 + CPU_TMP1], %o0		! restore %o0
2474	ldx	[%g1 + CPU_TMP2], %o1		! restore %o1
2475	FAST_TRAP_DONE
2476
2477/*
2478 * getpsr(void)
2479 * Note that the xcc part of the ccr is not provided.
2480 * The V8 code shows why the V9 trap is not faster:
2481 * #define GETPSR_TRAP() \
2482 *      mov %psr, %i0; jmp %l2; rett %l2+4; nop;
2483 */
2484
2485	.type	.getpsr, #function
2486.getpsr:
2487	rdpr	%tstate, %g1			! get tstate
2488	srlx	%g1, PSR_TSTATE_CC_SHIFT, %o0	! shift ccr to V8 psr
2489	set	PSR_ICC, %g2
2490	and	%o0, %g2, %o0			! mask out the rest
2491
2492	rd	%fprs, %g1			! get fprs
2493	and	%g1, FPRS_FEF, %g2		! mask out dirty upper/lower
2494	sllx	%g2, PSR_FPRS_FEF_SHIFT, %g2	! shift fef to V8 psr.ef
2495	or	%o0, %g2, %o0			! or result into psr.ef
2496
2497	set	V9_PSR_IMPLVER, %g2		! SI assigned impl/ver: 0xef
2498	or	%o0, %g2, %o0			! or psr.impl/ver
2499	FAST_TRAP_DONE
2500	SET_SIZE(.getpsr)
2501
2502/*
2503 * setpsr(newpsr)
2504 * Note that there is no support for ccr.xcc in the V9 code.
2505 */
2506
2507	.type	.setpsr, #function
2508.setpsr:
2509	rdpr	%tstate, %g1			! get tstate
2510!	setx	TSTATE_V8_UBITS, %g2
2511	or 	%g0, CCR_ICC, %g3
2512	sllx	%g3, TSTATE_CCR_SHIFT, %g2
2513
2514	andn	%g1, %g2, %g1			! zero current user bits
2515	set	PSR_ICC, %g2
2516	and	%g2, %o0, %g2			! clear all but psr.icc bits
2517	sllx	%g2, PSR_TSTATE_CC_SHIFT, %g3	! shift to tstate.ccr.icc
2518	wrpr	%g1, %g3, %tstate		! write tstate
2519
2520	set	PSR_EF, %g2
2521	and	%g2, %o0, %g2			! clear all but fp enable bit
2522	srlx	%g2, PSR_FPRS_FEF_SHIFT, %g4	! shift ef to V9 fprs.fef
2523	wr	%g0, %g4, %fprs			! write fprs
2524
2525	CPU_ADDR(%g1, %g2)			! load CPU struct addr to %g1
2526	ldn	[%g1 + CPU_THREAD], %g2		! load thread pointer
2527	ldn	[%g2 + T_LWP], %g3		! load klwp pointer
2528	ldn	[%g3 + LWP_FPU], %g2		! get lwp_fpu pointer
2529	stuw	%g4, [%g2 + FPU_FPRS]		! write fef value to fpu_fprs
2530	srlx	%g4, 2, %g4			! shift fef value to bit 0
2531	stub	%g4, [%g2 + FPU_EN]		! write fef value to fpu_en
2532	FAST_TRAP_DONE
2533	SET_SIZE(.setpsr)
2534
2535/*
2536 * getlgrp
2537 * get home lgrpid on which the calling thread is currently executing.
2538 */
2539	.type	.getlgrp, #function
2540.getlgrp:
2541	CPU_ADDR(%g1, %g2)		! load CPU struct addr to %g1 using %g2
2542	ld	[%g1 + CPU_ID], %o0	! load cpu_id
2543	ldn	[%g1 + CPU_THREAD], %g2	! load thread pointer
2544	ldn	[%g2 + T_LPL], %g2	! load lpl pointer
2545	ld	[%g2 + LPL_LGRPID], %g1	! load lpl_lgrpid
2546	sra	%g1, 0, %o1
2547	FAST_TRAP_DONE
2548	SET_SIZE(.getlgrp)
2549
2550/*
2551 * Entry for old 4.x trap (trap 0).
2552 */
2553	ENTRY_NP(syscall_trap_4x)
2554	CPU_ADDR(%g1, %g2)		! load CPU struct addr to %g1 using %g2
2555	ldn	[%g1 + CPU_THREAD], %g2	! load thread pointer
2556	ldn	[%g2 + T_LWP], %g2	! load klwp pointer
2557	ld	[%g2 + PCB_TRAP0], %g2	! lwp->lwp_pcb.pcb_trap0addr
2558	brz,pn	%g2, 1f			! has it been set?
2559	st	%l0, [%g1 + CPU_TMP1]	! delay - save some locals
2560	st	%l1, [%g1 + CPU_TMP2]
2561	rdpr	%tnpc, %l1		! save old tnpc
2562	wrpr	%g0, %g2, %tnpc		! setup tnpc
2563
2564	rdpr	%pstate, %l0
2565	wrpr	%l0, PSTATE_AG, %pstate	! switch to normal globals
2566	mov	%l1, %g6		! pass tnpc to user code in %g6
2567	wrpr	%l0, %g0, %pstate	! switch back to alternate globals
2568
2569	! Note that %g1 still contains CPU struct addr
2570	ld	[%g1 + CPU_TMP2], %l1	! restore locals
2571	ld	[%g1 + CPU_TMP1], %l0
2572	FAST_TRAP_DONE_CHK_INTR
25731:
2574	mov	%g1, %l0
2575	st	%l1, [%g1 + CPU_TMP2]
2576	rdpr	%pstate, %l1
2577	wrpr	%l1, PSTATE_AG, %pstate
2578	!
2579	! check for old syscall mmap which is the only different one which
2580	! must be the same.  Others are handled in the compatibility library.
2581	!
2582	cmp	%g1, OSYS_mmap	! compare to old 4.x mmap
2583	movz	%icc, SYS_mmap, %g1
2584	wrpr	%g0, %l1, %pstate
2585	ld	[%l0 + CPU_TMP2], %l1	! restore locals
2586	ld	[%l0 + CPU_TMP1], %l0
2587	SYSCALL(syscall_trap32)
2588	SET_SIZE(syscall_trap_4x)
2589
2590/*
2591 * Handler for software trap 9.
2592 * Set trap0 emulation address for old 4.x system call trap.
2593 * XXX - this should be a system call.
2594 */
2595	ENTRY_NP(set_trap0_addr)
2596	CPU_ADDR(%g1, %g2)		! load CPU struct addr to %g1 using %g2
2597	ldn	[%g1 + CPU_THREAD], %g2	! load thread pointer
2598	ldn	[%g2 + T_LWP], %g2	! load klwp pointer
2599	st	%l0, [%g1 + CPU_TMP1]	! save some locals
2600	st	%l1, [%g1 + CPU_TMP2]
2601	rdpr	%pstate, %l0
2602	wrpr	%l0, PSTATE_AG, %pstate
2603	mov	%g1, %l1
2604	wrpr	%g0, %l0, %pstate
2605	andn	%l1, 3, %l1		! force alignment
2606	st	%l1, [%g2 + PCB_TRAP0]	! lwp->lwp_pcb.pcb_trap0addr
2607	ld	[%g1 + CPU_TMP1], %l0	! restore locals
2608	ld	[%g1 + CPU_TMP2], %l1
2609	FAST_TRAP_DONE
2610	SET_SIZE(set_trap0_addr)
2611
2612/*
2613 * mmu_trap_tl1
2614 * trap handler for unexpected mmu traps.
2615 * simply checks if the trap was a user lddf/stdf alignment trap, in which
2616 * case we go to fpu_trap or a user trap from the window handler, in which
2617 * case we go save the state on the pcb.  Otherwise, we go to ptl1_panic.
2618 */
2619	.type	mmu_trap_tl1, #function
2620mmu_trap_tl1:
2621#ifdef	TRAPTRACE
2622	TRACE_PTR(%g5, %g6)
2623	GET_TRACE_TICK(%g6)
2624	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi
2625	rdpr	%tl, %g6
2626	stha	%g6, [%g5 + TRAP_ENT_TL]%asi
2627	rdpr	%tt, %g6
2628	stha	%g6, [%g5 + TRAP_ENT_TT]%asi
2629	rdpr	%tstate, %g6
2630	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi
2631	stna	%sp, [%g5 + TRAP_ENT_SP]%asi
2632	stna	%g0, [%g5 + TRAP_ENT_TR]%asi
2633	rdpr	%tpc, %g6
2634	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi
2635	set	MMU_SFAR, %g6
2636	ldxa	[%g6]ASI_DMMU, %g6
2637	stxa	%g6, [%g5 + TRAP_ENT_F1]%asi
2638	CPU_PADDR(%g7, %g6);
2639	add	%g7, CPU_TL1_HDLR, %g7
2640	lda	[%g7]ASI_MEM, %g6
2641	stxa	%g6, [%g5 + TRAP_ENT_F2]%asi
2642	set	0xdeadbeef, %g6
2643	stna	%g6, [%g5 + TRAP_ENT_F3]%asi
2644	stna	%g6, [%g5 + TRAP_ENT_F4]%asi
2645	TRACE_NEXT(%g5, %g6, %g7)
2646#endif /* TRAPTRACE */
2647
2648	GET_CPU_IMPL(%g5)
2649	cmp	%g5, PANTHER_IMPL
2650	bne	mmu_trap_tl1_4
2651	  nop
2652	rdpr	%tt, %g5
2653	cmp	%g5, T_DATA_EXCEPTION
2654	bne	mmu_trap_tl1_4
2655	  nop
2656	wr	%g0, ASI_DMMU, %asi
2657	ldxa	[MMU_SFSR]%asi, %g5
2658	mov	1, %g6
2659	sllx	%g6, PN_SFSR_PARITY_SHIFT, %g6
2660	andcc	%g5, %g6, %g0
2661	bz	mmu_trap_tl1_4
2662
2663	/*
2664	 * We are running on a Panther and have hit a DTLB parity error.
2665	 */
2666	ldxa	[MMU_TAG_ACCESS]%asi, %g2
2667	mov	%g5, %g3
2668	ba,pt	%xcc, .mmu_exception_is_tlb_parity
2669	mov	T_DATA_EXCEPTION, %g1
2670
2671mmu_trap_tl1_4:
2672	CPU_PADDR(%g7, %g6);
2673	add     %g7, CPU_TL1_HDLR, %g7		! %g7 = &cpu_m.tl1_hdlr (PA)
2674	/*
2675	 * AM is cleared on trap, so addresses are 64 bit
2676	 */
2677	lda     [%g7]ASI_MEM, %g6
2678	brz,a,pt %g6, 1f
2679	  nop
2680	/*
2681	 * We are going to update cpu_m.tl1_hdlr using physical address.
2682	 * Flush the D$ line, so that stale data won't be accessed later.
2683	 */
2684	CPU_ADDR(%g6, %g5)
2685	add     %g6, CPU_TL1_HDLR, %g6		! %g6 = &cpu_m.tl1_hdlr (VA)
2686	GET_CPU_IMPL(%g5)
2687	cmp	%g5, CHEETAH_IMPL
2688	bl,pt	%icc, 3f
2689	 cmp	%g5, SPITFIRE_IMPL
2690	stxa	%g0, [%g7]ASI_DC_INVAL
2691	membar	#Sync
2692	ba,pt	%xcc, 2f
2693	 nop
26943:
2695	bl,pt	%icc, 2f
2696	 sethi	%hi(dcache_line_mask), %g5
2697	ld	[%g5 + %lo(dcache_line_mask)], %g5
2698	and	%g6, %g5, %g5
2699	stxa	%g0, [%g5]ASI_DC_TAG
2700	membar	#Sync
27012:
2702	sta     %g0, [%g7]ASI_MEM
2703	SWITCH_GLOBALS				! back to mmu globals
2704	ba,a,pt	%xcc, sfmmu_mmu_trap		! handle page faults
27051:
2706	rdpr	%tt, %g5
2707	rdpr	%tl, %g7
2708	sub	%g7, 1, %g6
2709	wrpr	%g6, %tl
2710	rdpr	%tt, %g6
2711	wrpr	%g7, %tl
2712	and	%g6, WTRAP_TTMASK, %g6
2713	cmp	%g6, WTRAP_TYPE
2714	bne,a,pn %xcc, ptl1_panic
2715	mov	PTL1_BAD_MMUTRAP, %g1
2716	rdpr	%tpc, %g7
2717	/* tpc should be in the trap table */
2718	set	trap_table, %g6
2719	cmp	%g7, %g6
2720	blt,a,pn %xcc, ptl1_panic
2721	  mov	PTL1_BAD_MMUTRAP, %g1
2722	set	etrap_table, %g6
2723	cmp	%g7, %g6
2724	bge,a,pn %xcc, ptl1_panic
2725	  mov	PTL1_BAD_MMUTRAP, %g1
2726	cmp	%g5, T_ALIGNMENT
2727	move	%icc, MMU_SFAR, %g6
2728	movne	%icc, MMU_TAG_ACCESS, %g6
2729	ldxa	[%g6]ASI_DMMU, %g6
2730	andn	%g7, WTRAP_ALIGN, %g7	/* 128 byte aligned */
2731	add	%g7, WTRAP_FAULTOFF, %g7
2732	wrpr	%g0, %g7, %tnpc
2733	done
2734	SET_SIZE(mmu_trap_tl1)
2735
2736/*
2737 * Several traps use kmdb_trap and kmdb_trap_tl1 as their handlers.  These
2738 * traps are valid only when kmdb is loaded.  When the debugger is active,
2739 * the code below is rewritten to transfer control to the appropriate
2740 * debugger entry points.
2741 */
2742	.global	kmdb_trap
2743	.align	8
2744kmdb_trap:
2745	ba,a	trap_table0
2746	jmp	%g1 + 0
2747	nop
2748
2749	.global	kmdb_trap_tl1
2750	.align	8
2751kmdb_trap_tl1:
2752	ba,a	trap_table0
2753	jmp	%g1 + 0
2754	nop
2755
2756/*
2757 * This entry is copied from OBP's trap table during boot.
2758 */
2759	.global	obp_bpt
2760	.align	8
2761obp_bpt:
2762	NOT
2763
2764/*
2765 * if kernel, set PCONTEXT to 0 for debuggers
2766 * if user, clear nucleus page sizes
2767 */
2768	.global kctx_obp_bpt
2769kctx_obp_bpt:
2770	set	obp_bpt, %g2
27711:
2772	mov	MMU_PCONTEXT, %g1
2773	ldxa	[%g1]ASI_DMMU, %g1
2774	srlx	%g1, CTXREG_NEXT_SHIFT, %g3
2775	brz,pt	%g3, 3f			! nucleus pgsz is 0, no problem
2776	  sllx	%g3, CTXREG_NEXT_SHIFT, %g3
2777	set	CTXREG_CTX_MASK, %g4	! check Pcontext
2778	btst	%g4, %g1
2779	bz,a,pt	%xcc, 2f
2780	  clr	%g3			! kernel:  PCONTEXT=0
2781	xor	%g3, %g1, %g3		! user:	clr N_pgsz0/1 bits
27822:
2783	set	DEMAP_ALL_TYPE, %g1
2784	stxa	%g0, [%g1]ASI_DTLB_DEMAP
2785	stxa	%g0, [%g1]ASI_ITLB_DEMAP
2786	mov	MMU_PCONTEXT, %g1
2787	stxa	%g3, [%g1]ASI_DMMU
2788        membar  #Sync
2789	sethi	%hi(FLUSH_ADDR), %g1
2790	flush	%g1			! flush required by immu
27913:
2792	jmp	%g2
2793	  nop
2794
2795
2796#ifdef	TRAPTRACE
2797/*
2798 * TRAPTRACE support.
2799 * labels here are branched to with "rd %pc, %g7" in the delay slot.
2800 * Return is done by "jmp %g7 + 4".
2801 */
2802
2803trace_gen:
2804	TRACE_PTR(%g3, %g6)
2805	GET_TRACE_TICK(%g6)
2806	stxa	%g6, [%g3 + TRAP_ENT_TICK]%asi
2807	rdpr	%tl, %g6
2808	stha	%g6, [%g3 + TRAP_ENT_TL]%asi
2809	rdpr	%tt, %g6
2810	stha	%g6, [%g3 + TRAP_ENT_TT]%asi
2811	rdpr	%tstate, %g6
2812	stxa	%g6, [%g3 + TRAP_ENT_TSTATE]%asi
2813	stna	%sp, [%g3 + TRAP_ENT_SP]%asi
2814	rdpr	%tpc, %g6
2815	stna	%g6, [%g3 + TRAP_ENT_TPC]%asi
2816	TRACE_NEXT(%g3, %g4, %g5)
2817	jmp	%g7 + 4
2818	nop
2819
2820trace_win:
2821	TRACE_WIN_INFO(0, %l0, %l1, %l2)
2822	! Keep the locals as clean as possible, caller cleans %l4
2823	clr	%l2
2824	clr	%l1
2825	jmp	%l4 + 4
2826	  clr	%l0
2827
2828/*
2829 * Trace a tsb hit
2830 * g1 = tsbe pointer (in/clobbered)
2831 * g2 = tag access register (in)
2832 * g3 - g4 = scratch (clobbered)
2833 * g5 = tsbe data (in)
2834 * g6 = scratch (clobbered)
2835 * g7 = pc we jumped here from (in)
2836 */
2837
2838	! Do not disturb %g5, it will be used after the trace
2839	ALTENTRY(trace_tsbhit)
2840	TRACE_TSBHIT(0)
2841	jmp	%g7 + 4
2842	nop
2843
2844/*
2845 * Trace a TSB miss
2846 *
2847 * g1 = tsb8k pointer (in)
2848 * g2 = tag access register (in)
2849 * g3 = tsb4m pointer (in)
2850 * g4 = tsbe tag (in/clobbered)
2851 * g5 - g6 = scratch (clobbered)
2852 * g7 = pc we jumped here from (in)
2853 */
2854	.global	trace_tsbmiss
2855trace_tsbmiss:
2856	membar	#Sync
2857	sethi	%hi(FLUSH_ADDR), %g6
2858	flush	%g6
2859	TRACE_PTR(%g5, %g6)
2860	GET_TRACE_TICK(%g6)
2861	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi
2862	stxa	%g2, [%g5 + TRAP_ENT_SP]%asi		! tag access
2863	stxa	%g4, [%g5 + TRAP_ENT_F1]%asi		! tsb tag
2864	rdpr	%tnpc, %g6
2865	stxa	%g6, [%g5 + TRAP_ENT_F2]%asi
2866	stna	%g1, [%g5 + TRAP_ENT_F3]%asi		! tsb8k pointer
2867	srlx	%g1, 32, %g6
2868	stna	%g6, [%g5 + TRAP_ENT_F4]%asi		! huh?
2869	rdpr	%tpc, %g6
2870	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi
2871	rdpr	%tl, %g6
2872	stha	%g6, [%g5 + TRAP_ENT_TL]%asi
2873	rdpr	%tt, %g6
2874	or	%g6, TT_MMU_MISS, %g4
2875	stha	%g4, [%g5 + TRAP_ENT_TT]%asi
2876	cmp	%g6, FAST_IMMU_MISS_TT
2877	be,a	%icc, 1f
2878	  ldxa	[%g0]ASI_IMMU, %g6
2879	ldxa	[%g0]ASI_DMMU, %g6
28801:	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi	! tag target
2881	stxa	%g3, [%g5 + TRAP_ENT_TR]%asi		! tsb4m pointer
2882	TRACE_NEXT(%g5, %g4, %g6)
2883	jmp	%g7 + 4
2884	nop
2885
2886/*
2887 * g2 = tag access register (in)
2888 * g3 = ctx number (in)
2889 */
2890trace_dataprot:
2891	membar	#Sync
2892	sethi	%hi(FLUSH_ADDR), %g6
2893	flush	%g6
2894	TRACE_PTR(%g1, %g6)
2895	GET_TRACE_TICK(%g6)
2896	stxa	%g6, [%g1 + TRAP_ENT_TICK]%asi
2897	rdpr	%tpc, %g6
2898	stna	%g6, [%g1 + TRAP_ENT_TPC]%asi
2899	rdpr	%tstate, %g6
2900	stxa	%g6, [%g1 + TRAP_ENT_TSTATE]%asi
2901	stxa	%g2, [%g1 + TRAP_ENT_SP]%asi		! tag access reg
2902	stxa	%g0, [%g1 + TRAP_ENT_TR]%asi
2903	stxa	%g0, [%g1 + TRAP_ENT_F1]%asi
2904	stxa	%g0, [%g1 + TRAP_ENT_F2]%asi
2905	stxa	%g0, [%g1 + TRAP_ENT_F3]%asi
2906	stxa	%g0, [%g1 + TRAP_ENT_F4]%asi
2907	rdpr	%tl, %g6
2908	stha	%g6, [%g1 + TRAP_ENT_TL]%asi
2909	rdpr	%tt, %g6
2910	stha	%g6, [%g1 + TRAP_ENT_TT]%asi
2911	TRACE_NEXT(%g1, %g4, %g5)
2912	jmp	%g7 + 4
2913	nop
2914
2915#endif /* TRAPTRACE */
2916
2917/*
2918 * expects offset into tsbmiss area in %g1 and return pc in %g7
2919 */
2920stat_mmu:
2921	CPU_INDEX(%g5, %g6)
2922	sethi	%hi(tsbmiss_area), %g6
2923	sllx	%g5, TSBMISS_SHIFT, %g5
2924	or	%g6, %lo(tsbmiss_area), %g6
2925	add	%g6, %g5, %g6		/* g6 = tsbmiss area */
2926	ld	[%g6 + %g1], %g5
2927	add	%g5, 1, %g5
2928	jmp	%g7 + 4
2929	st	%g5, [%g6 + %g1]
2930
2931
2932/*
2933 * fast_trap_done, fast_trap_done_chk_intr:
2934 *
2935 * Due to the design of UltraSPARC pipeline, pending interrupts are not
2936 * taken immediately after a RETRY or DONE instruction which causes IE to
2937 * go from 0 to 1. Instead, the instruction at %tpc or %tnpc is allowed
2938 * to execute first before taking any interrupts. If that instruction
2939 * results in other traps, and if the corresponding trap handler runs
2940 * entirely at TL=1 with interrupts disabled, then pending interrupts
2941 * won't be taken until after yet another instruction following the %tpc
2942 * or %tnpc.
2943 *
2944 * A malicious user program can use this feature to block out interrupts
2945 * for extended durations, which can result in send_mondo_timeout kernel
2946 * panic.
2947 *
2948 * This problem is addressed by servicing any pending interrupts via
2949 * sys_trap before returning back to the user mode from a fast trap
2950 * handler. The "done" instruction within a fast trap handler, which
2951 * runs entirely at TL=1 with interrupts disabled, is replaced with the
2952 * FAST_TRAP_DONE macro, which branches control to this fast_trap_done
2953 * entry point.
2954 *
2955 * We check for any pending interrupts here and force a sys_trap to
2956 * service those interrupts, if any. To minimize overhead, pending
2957 * interrupts are checked if the %tpc happens to be at 16K boundary,
2958 * which allows a malicious program to execute at most 4K consecutive
2959 * instructions before we service any pending interrupts. If a worst
2960 * case fast trap handler takes about 2 usec, then interrupts will be
2961 * blocked for at most 8 msec, less than a clock tick.
2962 *
2963 * For the cases where we don't know if the %tpc will cross a 16K
2964 * boundary, we can't use the above optimization and always process
2965 * any pending interrupts via fast_frap_done_chk_intr entry point.
2966 *
2967 * Entry Conditions:
2968 * 	%pstate		am:0 priv:1 ie:0
2969 * 			globals are AG (not normal globals)
2970 */
2971
2972	.global	fast_trap_done, fast_trap_done_chk_intr
2973fast_trap_done:
2974	rdpr	%tpc, %g5
2975	sethi	%hi(0xffffc000), %g6	! 1's complement of 0x3fff
2976	andncc	%g5, %g6, %g0		! check lower 14 bits of %tpc
2977	bz,a,pn	%icc, 1f		! branch if zero (lower 32 bits only)
2978	  ldxa	[%g0]ASI_INTR_RECEIVE_STATUS, %g5
2979	done
2980
2981fast_trap_done_chk_intr:
2982	ldxa	[%g0]ASI_INTR_RECEIVE_STATUS, %g5
2983
29841:	rd	SOFTINT, %g6
2985	and	%g5, IRSR_BUSY, %g5
2986	orcc	%g5, %g6, %g0
2987	bnz,pn	%xcc, 2f		! branch if any pending intr
2988	nop
2989	done
2990
29912:
2992	/*
2993	 * We get here if there are any pending interrupts.
2994	 * Adjust %tpc/%tnpc as we'll be resuming via "retry"
2995	 * instruction.
2996	 */
2997	rdpr	%tnpc, %g5
2998	wrpr	%g0, %g5, %tpc
2999	add	%g5, 4, %g5
3000	wrpr	%g0, %g5, %tnpc
3001
3002	/*
3003	 * Force a dummy sys_trap call so that interrupts can be serviced.
3004	 */
3005	set	fast_trap_dummy_call, %g1
3006	ba,pt	%xcc, sys_trap
3007	  mov	-1, %g4
3008
3009fast_trap_dummy_call:
3010	retl
3011	nop
3012
3013#endif	/* lint */
3014