xref: /titanic_44/usr/src/uts/sun4v/ml/trap_table.s (revision b7f45089ccbe01bab3d7c7377b49d80d2ae18a69)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22/*
23 * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#pragma ident	"%Z%%M%	%I%	%E% SMI"
28
29#if !defined(lint)
30#include "assym.h"
31#endif /* !lint */
32#include <sys/asm_linkage.h>
33#include <sys/privregs.h>
34#include <sys/sun4asi.h>
35#include <sys/machasi.h>
36#include <sys/hypervisor_api.h>
37#include <sys/machtrap.h>
38#include <sys/machthread.h>
39#include <sys/pcb.h>
40#include <sys/pte.h>
41#include <sys/mmu.h>
42#include <sys/machpcb.h>
43#include <sys/async.h>
44#include <sys/intreg.h>
45#include <sys/scb.h>
46#include <sys/psr_compat.h>
47#include <sys/syscall.h>
48#include <sys/machparam.h>
49#include <sys/traptrace.h>
50#include <vm/hat_sfmmu.h>
51#include <sys/archsystm.h>
52#include <sys/utrap.h>
53#include <sys/clock.h>
54#include <sys/intr.h>
55#include <sys/fpu/fpu_simulator.h>
56#include <vm/seg_spt.h>
57
58/*
59 * WARNING: If you add a fast trap handler which can be invoked by a
60 * non-privileged user, you may have to use the FAST_TRAP_DONE macro
61 * instead of "done" instruction to return back to the user mode. See
62 * comments for the "fast_trap_done" entry point for more information.
63 *
64 * An alternate FAST_TRAP_DONE_CHK_INTR macro should be used for the
65 * cases where you always want to process any pending interrupts before
66 * returning back to the user mode.
67 */
68#define	FAST_TRAP_DONE		\
69	ba,a	fast_trap_done
70
71#define	FAST_TRAP_DONE_CHK_INTR	\
72	ba,a	fast_trap_done_chk_intr
73
74/*
75 * SPARC V9 Trap Table
76 *
77 * Most of the trap handlers are made from common building
78 * blocks, and some are instantiated multiple times within
79 * the trap table. So, I build a bunch of macros, then
80 * populate the table using only the macros.
81 *
82 * Many macros branch to sys_trap.  Its calling convention is:
83 *	%g1		kernel trap handler
84 *	%g2, %g3	args for above
85 *	%g4		desire %pil
86 */
87
88#ifdef	TRAPTRACE
89
90/*
91 * Tracing macro. Adds two instructions if TRAPTRACE is defined.
92 */
93#define	TT_TRACE(label)		\
94	ba	label		;\
95	rd	%pc, %g7
96#define	TT_TRACE_INS	2
97
98#define	TT_TRACE_L(label)	\
99	ba	label		;\
100	rd	%pc, %l4	;\
101	clr	%l4
102#define	TT_TRACE_L_INS	3
103
104#else
105
106#define	TT_TRACE(label)
107#define	TT_TRACE_INS	0
108
109#define	TT_TRACE_L(label)
110#define	TT_TRACE_L_INS	0
111
112#endif
113
114#ifdef NIAGARA_ERRATUM_42
115
116#define	NI_GL_RESET_TT0	\
117	wrpr	%g0, 2, %gl			;\
118	wrpr	%g0, 1, %gl
119
120#define	NI_GL_RESET_TT0_INS	2
121
122#else /* NIAGARA_ERRATUM_42 */
123
124#define	NI_GL_RESET_TT0
125#define	NI_GL_RESET_TT0_INS	0
126
127#endif /* NIAGARA_ERRATUM_42 */
128
129/*
130 * This macro is used to update per cpu mmu stats in perf critical
131 * paths. It is only enabled in debug kernels or if SFMMU_STAT_GATHER
132 * is defined.
133 */
134#if defined(DEBUG) || defined(SFMMU_STAT_GATHER)
135#define	HAT_PERCPU_DBSTAT(stat)			\
136	mov	stat, %g1			;\
137	ba	stat_mmu			;\
138	rd	%pc, %g7
139#else
140#define	HAT_PERCPU_DBSTAT(stat)
141#endif /* DEBUG || SFMMU_STAT_GATHER */
142
143/*
144 * This first set are funneled to trap() with %tt as the type.
145 * Trap will then either panic or send the user a signal.
146 */
147/*
148 * NOT is used for traps that just shouldn't happen.
149 * It comes in both single and quadruple flavors.
150 */
151#if !defined(lint)
152	.global	trap
153#endif /* !lint */
154#define	NOT			\
155	TT_TRACE(trace_gen)	;\
156	set	trap, %g1	;\
157	rdpr	%tt, %g3	;\
158	ba,pt	%xcc, sys_trap	;\
159	sub	%g0, 1, %g4	;\
160	.align	32
161#define	NOT4	NOT; NOT; NOT; NOT
162
163#define	NOTP				\
164	TT_TRACE(trace_gen)		;\
165	ba,pt	%xcc, ptl1_panic	;\
166	  mov	PTL1_BAD_TRAP, %g1	;\
167	.align	32
168#define	NOTP4	NOTP; NOTP; NOTP; NOTP
169
170/*
171 * RED is for traps that use the red mode handler.
172 * We should never see these either.
173 */
174#define	RED	NOT
175/*
176 * BAD is used for trap vectors we don't have a kernel
177 * handler for.
178 * It also comes in single and quadruple versions.
179 */
180#define	BAD	NOT
181#define	BAD4	NOT4
182
183#define	DONE			\
184	done;			\
185	.align	32
186
187/*
188 * TRAP vectors to the trap() function.
189 * It's main use is for user errors.
190 */
191#if !defined(lint)
192	.global	trap
193#endif /* !lint */
194#define	TRAP(arg)		\
195	TT_TRACE(trace_gen)	;\
196	set	trap, %g1	;\
197	mov	arg, %g3	;\
198	ba,pt	%xcc, sys_trap	;\
199	sub	%g0, 1, %g4	;\
200	.align	32
201
202/*
203 * SYSCALL is used for system calls on both ILP32 and LP64 kernels
204 * depending on the "which" parameter (should be either syscall_trap
205 * or syscall_trap32).
206 */
207#define	SYSCALL(which)			\
208	TT_TRACE(trace_gen)		;\
209	set	(which), %g1		;\
210	ba,pt	%xcc, sys_trap		;\
211	sub	%g0, 1, %g4		;\
212	.align	32
213
214/*
215 * GOTO just jumps to a label.
216 * It's used for things that can be fixed without going thru sys_trap.
217 */
218#define	GOTO(label)		\
219	.global	label		;\
220	ba,a	label		;\
221	.empty			;\
222	.align	32
223
224/*
225 * GOTO_TT just jumps to a label.
226 * correctable ECC error traps at  level 0 and 1 will use this macro.
227 * It's used for things that can be fixed without going thru sys_trap.
228 */
229#define	GOTO_TT(label, ttlabel)		\
230	.global	label		;\
231	TT_TRACE(ttlabel)	;\
232	ba,a	label		;\
233	.empty			;\
234	.align	32
235
236/*
237 * Privileged traps
238 * Takes breakpoint if privileged, calls trap() if not.
239 */
240#define	PRIV(label)			\
241	rdpr	%tstate, %g1		;\
242	btst	TSTATE_PRIV, %g1	;\
243	bnz	label			;\
244	rdpr	%tt, %g3		;\
245	set	trap, %g1		;\
246	ba,pt	%xcc, sys_trap		;\
247	sub	%g0, 1, %g4		;\
248	.align	32
249
250
251/*
252 * DTrace traps.
253 */
254#define	DTRACE_FASTTRAP			\
255	.global dtrace_fasttrap_probe				;\
256	.global dtrace_fasttrap_probe_ptr			;\
257	sethi	%hi(dtrace_fasttrap_probe_ptr), %g4		;\
258	ldn	[%g4 + %lo(dtrace_fasttrap_probe_ptr)], %g4	;\
259	set	dtrace_fasttrap_probe, %g1			;\
260	brnz,pn	%g4, user_trap					;\
261	sub	%g0, 1, %g4					;\
262	FAST_TRAP_DONE						;\
263	.align	32
264
265#define	DTRACE_PID			\
266	.global dtrace_pid_probe				;\
267	set	dtrace_pid_probe, %g1				;\
268	ba,pt	%xcc, user_trap					;\
269	sub	%g0, 1, %g4					;\
270	.align	32
271
272#define	DTRACE_RETURN			\
273	.global dtrace_return_probe				;\
274	set	dtrace_return_probe, %g1			;\
275	ba,pt	%xcc, user_trap					;\
276	sub	%g0, 1, %g4					;\
277	.align	32
278
279/*
280 * REGISTER WINDOW MANAGEMENT MACROS
281 */
282
283/*
284 * various convenient units of padding
285 */
286#define	SKIP(n)	.skip 4*(n)
287
288/*
289 * CLEAN_WINDOW is the simple handler for cleaning a register window.
290 */
291#define	CLEAN_WINDOW						\
292	TT_TRACE_L(trace_win)					;\
293	rdpr %cleanwin, %l0; inc %l0; wrpr %l0, %cleanwin	;\
294	clr %l0; clr %l1; clr %l2; clr %l3			;\
295	clr %l4; clr %l5; clr %l6; clr %l7			;\
296	clr %o0; clr %o1; clr %o2; clr %o3			;\
297	clr %o4; clr %o5; clr %o6; clr %o7			;\
298	retry; .align 128
299
300#if !defined(lint)
301
302/*
303 * If we get an unresolved tlb miss while in a window handler, the fault
304 * handler will resume execution at the last instruction of the window
305 * hander, instead of delivering the fault to the kernel.  Spill handlers
306 * use this to spill windows into the wbuf.
307 *
308 * The mixed handler works by checking %sp, and branching to the correct
309 * handler.  This is done by branching back to label 1: for 32b frames,
310 * or label 2: for 64b frames; which implies the handler order is: 32b,
311 * 64b, mixed.  The 1: and 2: labels are offset into the routines to
312 * allow the branchs' delay slots to contain useful instructions.
313 */
314
315/*
316 * SPILL_32bit spills a 32-bit-wide kernel register window.  It
317 * assumes that the kernel context and the nucleus context are the
318 * same.  The stack pointer is required to be eight-byte aligned even
319 * though this code only needs it to be four-byte aligned.
320 */
321#define	SPILL_32bit(tail)					\
322	NI_GL_RESET_TT0						;\
323	srl	%sp, 0, %sp					;\
3241:	st	%l0, [%sp + 0]					;\
325	st	%l1, [%sp + 4]					;\
326	st	%l2, [%sp + 8]					;\
327	st	%l3, [%sp + 12]					;\
328	st	%l4, [%sp + 16]					;\
329	st	%l5, [%sp + 20]					;\
330	st	%l6, [%sp + 24]					;\
331	st	%l7, [%sp + 28]					;\
332	st	%i0, [%sp + 32]					;\
333	st	%i1, [%sp + 36]					;\
334	st	%i2, [%sp + 40]					;\
335	st	%i3, [%sp + 44]					;\
336	st	%i4, [%sp + 48]					;\
337	st	%i5, [%sp + 52]					;\
338	st	%i6, [%sp + 56]					;\
339	st	%i7, [%sp + 60]					;\
340	TT_TRACE_L(trace_win)					;\
341	saved							;\
342	retry							;\
343	SKIP(31-19-NI_GL_RESET_TT0_INS-TT_TRACE_L_INS)		;\
344	ba,a,pt	%xcc, fault_32bit_/**/tail			;\
345	.empty
346
347/*
348 * SPILL_32bit_asi spills a 32-bit-wide register window into a 32-bit
349 * wide address space via the designated asi.  It is used to spill
350 * non-kernel windows.  The stack pointer is required to be eight-byte
351 * aligned even though this code only needs it to be four-byte
352 * aligned.
353 */
354#define	SPILL_32bit_asi(asi_num, tail)				\
355	NI_GL_RESET_TT0						;\
356	srl	%sp, 0, %sp					;\
3571:	sta	%l0, [%sp + %g0]asi_num				;\
358	mov	4, %g1						;\
359	sta	%l1, [%sp + %g1]asi_num				;\
360	mov	8, %g2						;\
361	sta	%l2, [%sp + %g2]asi_num				;\
362	mov	12, %g3						;\
363	sta	%l3, [%sp + %g3]asi_num				;\
364	add	%sp, 16, %g4					;\
365	sta	%l4, [%g4 + %g0]asi_num				;\
366	sta	%l5, [%g4 + %g1]asi_num				;\
367	sta	%l6, [%g4 + %g2]asi_num				;\
368	sta	%l7, [%g4 + %g3]asi_num				;\
369	add	%g4, 16, %g4					;\
370	sta	%i0, [%g4 + %g0]asi_num				;\
371	sta	%i1, [%g4 + %g1]asi_num				;\
372	sta	%i2, [%g4 + %g2]asi_num				;\
373	sta	%i3, [%g4 + %g3]asi_num				;\
374	add	%g4, 16, %g4					;\
375	sta	%i4, [%g4 + %g0]asi_num				;\
376	sta	%i5, [%g4 + %g1]asi_num				;\
377	sta	%i6, [%g4 + %g2]asi_num				;\
378	sta	%i7, [%g4 + %g3]asi_num				;\
379	TT_TRACE_L(trace_win)					;\
380	saved							;\
381	retry							;\
382	SKIP(31-25-NI_GL_RESET_TT0_INS-TT_TRACE_L_INS)		;\
383	ba,a,pt %xcc, fault_32bit_/**/tail			;\
384	.empty
385
386#define	SPILL_32bit_tt1(asi_num, tail)				\
387	ba,a,pt	%xcc, fault_32bit_/**/tail			;\
388	.empty							;\
389	.align 128
390
391
392/*
393 * FILL_32bit fills a 32-bit-wide kernel register window.  It assumes
394 * that the kernel context and the nucleus context are the same.  The
395 * stack pointer is required to be eight-byte aligned even though this
396 * code only needs it to be four-byte aligned.
397 */
398#define	FILL_32bit(tail)					\
399	srl	%sp, 0, %sp					;\
4001:	TT_TRACE_L(trace_win)					;\
401	ld	[%sp + 0], %l0					;\
402	ld	[%sp + 4], %l1					;\
403	ld	[%sp + 8], %l2					;\
404	ld	[%sp + 12], %l3					;\
405	ld	[%sp + 16], %l4					;\
406	ld	[%sp + 20], %l5					;\
407	ld	[%sp + 24], %l6					;\
408	ld	[%sp + 28], %l7					;\
409	ld	[%sp + 32], %i0					;\
410	ld	[%sp + 36], %i1					;\
411	ld	[%sp + 40], %i2					;\
412	ld	[%sp + 44], %i3					;\
413	ld	[%sp + 48], %i4					;\
414	ld	[%sp + 52], %i5					;\
415	ld	[%sp + 56], %i6					;\
416	ld	[%sp + 60], %i7					;\
417	restored						;\
418	retry							;\
419	SKIP(31-19-TT_TRACE_L_INS)				;\
420	ba,a,pt	%xcc, fault_32bit_/**/tail			;\
421	.empty
422
423/*
424 * FILL_32bit_asi fills a 32-bit-wide register window from a 32-bit
425 * wide address space via the designated asi.  It is used to fill
426 * non-kernel windows.  The stack pointer is required to be eight-byte
427 * aligned even though this code only needs it to be four-byte
428 * aligned.
429 */
430#define	FILL_32bit_asi(asi_num, tail)				\
431	srl	%sp, 0, %sp					;\
4321:	TT_TRACE_L(trace_win)					;\
433	mov	4, %g1						;\
434	lda	[%sp + %g0]asi_num, %l0				;\
435	mov	8, %g2						;\
436	lda	[%sp + %g1]asi_num, %l1				;\
437	mov	12, %g3						;\
438	lda	[%sp + %g2]asi_num, %l2				;\
439	lda	[%sp + %g3]asi_num, %l3				;\
440	add	%sp, 16, %g4					;\
441	lda	[%g4 + %g0]asi_num, %l4				;\
442	lda	[%g4 + %g1]asi_num, %l5				;\
443	lda	[%g4 + %g2]asi_num, %l6				;\
444	lda	[%g4 + %g3]asi_num, %l7				;\
445	add	%g4, 16, %g4					;\
446	lda	[%g4 + %g0]asi_num, %i0				;\
447	lda	[%g4 + %g1]asi_num, %i1				;\
448	lda	[%g4 + %g2]asi_num, %i2				;\
449	lda	[%g4 + %g3]asi_num, %i3				;\
450	add	%g4, 16, %g4					;\
451	lda	[%g4 + %g0]asi_num, %i4				;\
452	lda	[%g4 + %g1]asi_num, %i5				;\
453	lda	[%g4 + %g2]asi_num, %i6				;\
454	lda	[%g4 + %g3]asi_num, %i7				;\
455	restored						;\
456	retry							;\
457	SKIP(31-25-TT_TRACE_L_INS)				;\
458	ba,a,pt %xcc, fault_32bit_/**/tail			;\
459	.empty
460
461
462/*
463 * SPILL_64bit spills a 64-bit-wide kernel register window.  It
464 * assumes that the kernel context and the nucleus context are the
465 * same.  The stack pointer is required to be eight-byte aligned.
466 */
467#define	SPILL_64bit(tail)					\
468	NI_GL_RESET_TT0						;\
4692:	stx	%l0, [%sp + V9BIAS64 + 0]			;\
470	stx	%l1, [%sp + V9BIAS64 + 8]			;\
471	stx	%l2, [%sp + V9BIAS64 + 16]			;\
472	stx	%l3, [%sp + V9BIAS64 + 24]			;\
473	stx	%l4, [%sp + V9BIAS64 + 32]			;\
474	stx	%l5, [%sp + V9BIAS64 + 40]			;\
475	stx	%l6, [%sp + V9BIAS64 + 48]			;\
476	stx	%l7, [%sp + V9BIAS64 + 56]			;\
477	stx	%i0, [%sp + V9BIAS64 + 64]			;\
478	stx	%i1, [%sp + V9BIAS64 + 72]			;\
479	stx	%i2, [%sp + V9BIAS64 + 80]			;\
480	stx	%i3, [%sp + V9BIAS64 + 88]			;\
481	stx	%i4, [%sp + V9BIAS64 + 96]			;\
482	stx	%i5, [%sp + V9BIAS64 + 104]			;\
483	stx	%i6, [%sp + V9BIAS64 + 112]			;\
484	stx	%i7, [%sp + V9BIAS64 + 120]			;\
485	TT_TRACE_L(trace_win)					;\
486	saved							;\
487	retry							;\
488	SKIP(31-18-NI_GL_RESET_TT0_INS-TT_TRACE_L_INS)		;\
489	ba,a,pt	%xcc, fault_64bit_/**/tail			;\
490	.empty
491
492#define	SPILL_64bit_ktt1(tail)				\
493	ba,a,pt	%xcc, fault_64bit_/**/tail			;\
494	.empty							;\
495	.align 128
496
497#define	SPILL_mixed_ktt1(tail)				\
498	btst	1, %sp						;\
499	bz,a,pt	%xcc, fault_32bit_/**/tail			;\
500	srl	%sp, 0, %sp					;\
501	ba,a,pt	%xcc, fault_64bit_/**/tail			;\
502	.empty							;\
503	.align 128
504
505/*
506 * SPILL_64bit_asi spills a 64-bit-wide register window into a 64-bit
507 * wide address space via the designated asi.  It is used to spill
508 * non-kernel windows.  The stack pointer is required to be eight-byte
509 * aligned.
510 */
511#define	SPILL_64bit_asi(asi_num, tail)				\
512	NI_GL_RESET_TT0						;\
513	mov	0 + V9BIAS64, %g1				;\
5142:	stxa	%l0, [%sp + %g1]asi_num				;\
515	mov	8 + V9BIAS64, %g2				;\
516	stxa	%l1, [%sp + %g2]asi_num				;\
517	mov	16 + V9BIAS64, %g3				;\
518	stxa	%l2, [%sp + %g3]asi_num				;\
519	mov	24 + V9BIAS64, %g4				;\
520	stxa	%l3, [%sp + %g4]asi_num				;\
521	add	%sp, 32, %g5					;\
522	stxa	%l4, [%g5 + %g1]asi_num				;\
523	stxa	%l5, [%g5 + %g2]asi_num				;\
524	stxa	%l6, [%g5 + %g3]asi_num				;\
525	stxa	%l7, [%g5 + %g4]asi_num				;\
526	add	%g5, 32, %g5					;\
527	stxa	%i0, [%g5 + %g1]asi_num				;\
528	stxa	%i1, [%g5 + %g2]asi_num				;\
529	stxa	%i2, [%g5 + %g3]asi_num				;\
530	stxa	%i3, [%g5 + %g4]asi_num				;\
531	add	%g5, 32, %g5					;\
532	stxa	%i4, [%g5 + %g1]asi_num				;\
533	stxa	%i5, [%g5 + %g2]asi_num				;\
534	stxa	%i6, [%g5 + %g3]asi_num				;\
535	stxa	%i7, [%g5 + %g4]asi_num				;\
536	TT_TRACE_L(trace_win)					;\
537	saved							;\
538	retry							;\
539	SKIP(31-25-NI_GL_RESET_TT0_INS-TT_TRACE_L_INS)		;\
540	ba,a,pt %xcc, fault_64bit_/**/tail			;\
541	.empty
542
543#define	SPILL_64bit_tt1(asi_num, tail)				\
544	ba,a,pt	%xcc, fault_64bit_/**/tail			;\
545	.empty							;\
546	.align 128
547
548/*
549 * FILL_64bit fills a 64-bit-wide kernel register window.  It assumes
550 * that the kernel context and the nucleus context are the same.  The
551 * stack pointer is required to be eight-byte aligned.
552 */
553#define	FILL_64bit(tail)					\
5542:	TT_TRACE_L(trace_win)					;\
555	ldx	[%sp + V9BIAS64 + 0], %l0			;\
556	ldx	[%sp + V9BIAS64 + 8], %l1			;\
557	ldx	[%sp + V9BIAS64 + 16], %l2			;\
558	ldx	[%sp + V9BIAS64 + 24], %l3			;\
559	ldx	[%sp + V9BIAS64 + 32], %l4			;\
560	ldx	[%sp + V9BIAS64 + 40], %l5			;\
561	ldx	[%sp + V9BIAS64 + 48], %l6			;\
562	ldx	[%sp + V9BIAS64 + 56], %l7			;\
563	ldx	[%sp + V9BIAS64 + 64], %i0			;\
564	ldx	[%sp + V9BIAS64 + 72], %i1			;\
565	ldx	[%sp + V9BIAS64 + 80], %i2			;\
566	ldx	[%sp + V9BIAS64 + 88], %i3			;\
567	ldx	[%sp + V9BIAS64 + 96], %i4			;\
568	ldx	[%sp + V9BIAS64 + 104], %i5			;\
569	ldx	[%sp + V9BIAS64 + 112], %i6			;\
570	ldx	[%sp + V9BIAS64 + 120], %i7			;\
571	restored						;\
572	retry							;\
573	SKIP(31-18-TT_TRACE_L_INS)				;\
574	ba,a,pt	%xcc, fault_64bit_/**/tail			;\
575	.empty
576
577/*
578 * FILL_64bit_asi fills a 64-bit-wide register window from a 64-bit
579 * wide address space via the designated asi.  It is used to fill
580 * non-kernel windows.  The stack pointer is required to be eight-byte
581 * aligned.
582 */
583#define	FILL_64bit_asi(asi_num, tail)				\
584	mov	V9BIAS64 + 0, %g1				;\
5852:	TT_TRACE_L(trace_win)					;\
586	ldxa	[%sp + %g1]asi_num, %l0				;\
587	mov	V9BIAS64 + 8, %g2				;\
588	ldxa	[%sp + %g2]asi_num, %l1				;\
589	mov	V9BIAS64 + 16, %g3				;\
590	ldxa	[%sp + %g3]asi_num, %l2				;\
591	mov	V9BIAS64 + 24, %g4				;\
592	ldxa	[%sp + %g4]asi_num, %l3				;\
593	add	%sp, 32, %g5					;\
594	ldxa	[%g5 + %g1]asi_num, %l4				;\
595	ldxa	[%g5 + %g2]asi_num, %l5				;\
596	ldxa	[%g5 + %g3]asi_num, %l6				;\
597	ldxa	[%g5 + %g4]asi_num, %l7				;\
598	add	%g5, 32, %g5					;\
599	ldxa	[%g5 + %g1]asi_num, %i0				;\
600	ldxa	[%g5 + %g2]asi_num, %i1				;\
601	ldxa	[%g5 + %g3]asi_num, %i2				;\
602	ldxa	[%g5 + %g4]asi_num, %i3				;\
603	add	%g5, 32, %g5					;\
604	ldxa	[%g5 + %g1]asi_num, %i4				;\
605	ldxa	[%g5 + %g2]asi_num, %i5				;\
606	ldxa	[%g5 + %g3]asi_num, %i6				;\
607	ldxa	[%g5 + %g4]asi_num, %i7				;\
608	restored						;\
609	retry							;\
610	SKIP(31-25-TT_TRACE_L_INS)				;\
611	ba,a,pt	%xcc, fault_64bit_/**/tail			;\
612	.empty
613
614
615#endif /* !lint */
616
617/*
618 * SPILL_mixed spills either size window, depending on
619 * whether %sp is even or odd, to a 32-bit address space.
620 * This may only be used in conjunction with SPILL_32bit/
621 * FILL_64bit.
622 * Clear upper 32 bits of %sp if it is odd.
623 * We won't need to clear them in 64 bit kernel.
624 */
625#define	SPILL_mixed						\
626	NI_GL_RESET_TT0						;\
627	btst	1, %sp						;\
628	bz,a,pt	%xcc, 1b					;\
629	srl	%sp, 0, %sp					;\
630	ba,pt	%xcc, 2b					;\
631	nop							;\
632	.align	128
633
634/*
635 * FILL_mixed(ASI) fills either size window, depending on
636 * whether %sp is even or odd, from a 32-bit address space.
637 * This may only be used in conjunction with FILL_32bit/
638 * FILL_64bit. New versions of FILL_mixed_{tt1,asi} would be
639 * needed for use with FILL_{32,64}bit_{tt1,asi}. Particular
640 * attention should be paid to the instructions that belong
641 * in the delay slots of the branches depending on the type
642 * of fill handler being branched to.
643 * Clear upper 32 bits of %sp if it is odd.
644 * We won't need to clear them in 64 bit kernel.
645 */
646#define	FILL_mixed						\
647	btst	1, %sp						;\
648	bz,a,pt	%xcc, 1b					;\
649	srl	%sp, 0, %sp					;\
650	ba,pt	%xcc, 2b					;\
651	nop							;\
652	.align	128
653
654
655/*
656 * SPILL_32clean/SPILL_64clean spill 32-bit and 64-bit register windows,
657 * respectively, into the address space via the designated asi.  The
658 * unbiased stack pointer is required to be eight-byte aligned (even for
659 * the 32-bit case even though this code does not require such strict
660 * alignment).
661 *
662 * With SPARC v9 the spill trap takes precedence over the cleanwin trap
663 * so when cansave == 0, canrestore == 6, and cleanwin == 6 the next save
664 * will cause cwp + 2 to be spilled but will not clean cwp + 1.  That
665 * window may contain kernel data so in user_rtt we set wstate to call
666 * these spill handlers on the first user spill trap.  These handler then
667 * spill the appropriate window but also back up a window and clean the
668 * window that didn't get a cleanwin trap.
669 */
670#define	SPILL_32clean(asi_num, tail)				\
671	NI_GL_RESET_TT0						;\
672	srl	%sp, 0, %sp					;\
673	sta	%l0, [%sp + %g0]asi_num				;\
674	mov	4, %g1						;\
675	sta	%l1, [%sp + %g1]asi_num				;\
676	mov	8, %g2						;\
677	sta	%l2, [%sp + %g2]asi_num				;\
678	mov	12, %g3						;\
679	sta	%l3, [%sp + %g3]asi_num				;\
680	add	%sp, 16, %g4					;\
681	sta	%l4, [%g4 + %g0]asi_num				;\
682	sta	%l5, [%g4 + %g1]asi_num				;\
683	sta	%l6, [%g4 + %g2]asi_num				;\
684	sta	%l7, [%g4 + %g3]asi_num				;\
685	add	%g4, 16, %g4					;\
686	sta	%i0, [%g4 + %g0]asi_num				;\
687	sta	%i1, [%g4 + %g1]asi_num				;\
688	sta	%i2, [%g4 + %g2]asi_num				;\
689	sta	%i3, [%g4 + %g3]asi_num				;\
690	add	%g4, 16, %g4					;\
691	sta	%i4, [%g4 + %g0]asi_num				;\
692	sta	%i5, [%g4 + %g1]asi_num				;\
693	sta	%i6, [%g4 + %g2]asi_num				;\
694	sta	%i7, [%g4 + %g3]asi_num				;\
695	TT_TRACE_L(trace_win)					;\
696	b	.spill_clean					;\
697	  mov	WSTATE_USER32, %g7				;\
698	SKIP(31-25-NI_GL_RESET_TT0_INS-TT_TRACE_L_INS)		;\
699	ba,a,pt	%xcc, fault_32bit_/**/tail			;\
700	.empty
701
702#define	SPILL_64clean(asi_num, tail)				\
703	NI_GL_RESET_TT0						;\
704	mov	0 + V9BIAS64, %g1				;\
705	stxa	%l0, [%sp + %g1]asi_num				;\
706	mov	8 + V9BIAS64, %g2				;\
707	stxa	%l1, [%sp + %g2]asi_num				;\
708	mov	16 + V9BIAS64, %g3				;\
709	stxa	%l2, [%sp + %g3]asi_num				;\
710	mov	24 + V9BIAS64, %g4				;\
711	stxa	%l3, [%sp + %g4]asi_num				;\
712	add	%sp, 32, %g5					;\
713	stxa	%l4, [%g5 + %g1]asi_num				;\
714	stxa	%l5, [%g5 + %g2]asi_num				;\
715	stxa	%l6, [%g5 + %g3]asi_num				;\
716	stxa	%l7, [%g5 + %g4]asi_num				;\
717	add	%g5, 32, %g5					;\
718	stxa	%i0, [%g5 + %g1]asi_num				;\
719	stxa	%i1, [%g5 + %g2]asi_num				;\
720	stxa	%i2, [%g5 + %g3]asi_num				;\
721	stxa	%i3, [%g5 + %g4]asi_num				;\
722	add	%g5, 32, %g5					;\
723	stxa	%i4, [%g5 + %g1]asi_num				;\
724	stxa	%i5, [%g5 + %g2]asi_num				;\
725	stxa	%i6, [%g5 + %g3]asi_num				;\
726	stxa	%i7, [%g5 + %g4]asi_num				;\
727	TT_TRACE_L(trace_win)					;\
728	b	.spill_clean					;\
729	  mov	WSTATE_USER64, %g7				;\
730	SKIP(31-25-NI_GL_RESET_TT0_INS-TT_TRACE_L_INS)		;\
731	ba,a,pt	%xcc, fault_64bit_/**/tail			;\
732	.empty
733
734
735/*
736 * Floating point disabled.
737 */
738#define	FP_DISABLED_TRAP		\
739	TT_TRACE(trace_gen)		;\
740	ba,pt	%xcc,.fp_disabled	;\
741	nop				;\
742	.align	32
743
744/*
745 * Floating point exceptions.
746 */
747#define	FP_IEEE_TRAP			\
748	NI_GL_RESET_TT0			;\
749	TT_TRACE(trace_gen)		;\
750	ba,pt	%xcc,.fp_ieee_exception	;\
751	nop				;\
752	.align	32
753
754#define	FP_TRAP				\
755	NI_GL_RESET_TT0			;\
756	TT_TRACE(trace_gen)		;\
757	ba,pt	%xcc,.fp_exception	;\
758	nop				;\
759	.align	32
760
761#if !defined(lint)
762
763/*
764 * ECACHE_ECC error traps at level 0 and level 1
765 */
766#define	ECACHE_ECC(table_name)		\
767	.global	table_name		;\
768table_name:				;\
769	membar	#Sync			;\
770	set	trap, %g1		;\
771	rdpr	%tt, %g3		;\
772	ba,pt	%xcc, sys_trap		;\
773	sub	%g0, 1, %g4		;\
774	.align	32
775
776#endif /* !lint */
777
778/*
779 * illegal instruction trap
780 */
781#define	ILLTRAP_INSTR			  \
782	membar	#Sync			  ;\
783	TT_TRACE(trace_gen)		  ;\
784	or	%g0, P_UTRAP4, %g2	  ;\
785	or	%g0, T_UNIMP_INSTR, %g3   ;\
786	sethi	%hi(.check_v9utrap), %g4  ;\
787	jmp	%g4 + %lo(.check_v9utrap) ;\
788	nop				  ;\
789	.align	32
790
791/*
792 * tag overflow trap
793 */
794#define	TAG_OVERFLOW			  \
795	TT_TRACE(trace_gen)		  ;\
796	or	%g0, P_UTRAP10, %g2	  ;\
797	or	%g0, T_TAG_OVERFLOW, %g3  ;\
798	sethi	%hi(.check_v9utrap), %g4  ;\
799	jmp	%g4 + %lo(.check_v9utrap) ;\
800	nop				  ;\
801	.align	32
802
803/*
804 * divide by zero trap
805 */
806#define	DIV_BY_ZERO			  \
807	TT_TRACE(trace_gen)		  ;\
808	or	%g0, P_UTRAP11, %g2	  ;\
809	or	%g0, T_IDIV0, %g3	  ;\
810	sethi	%hi(.check_v9utrap), %g4  ;\
811	jmp	%g4 + %lo(.check_v9utrap) ;\
812	nop				  ;\
813	.align	32
814
815/*
816 * trap instruction for V9 user trap handlers
817 */
818#define	TRAP_INSTR			  \
819	TT_TRACE(trace_gen)		  ;\
820	or	%g0, T_SOFTWARE_TRAP, %g3 ;\
821	sethi	%hi(.check_v9utrap), %g4  ;\
822	jmp	%g4 + %lo(.check_v9utrap) ;\
823	nop				  ;\
824	.align	32
825#define	TRP4	TRAP_INSTR; TRAP_INSTR; TRAP_INSTR; TRAP_INSTR
826
827/*
828 * LEVEL_INTERRUPT is for level N interrupts.
829 * VECTOR_INTERRUPT is for the vector trap.
830 */
831#define	LEVEL_INTERRUPT(level)		\
832	.global	tt_pil/**/level		;\
833tt_pil/**/level:			;\
834	ba,pt	%xcc, pil_interrupt	;\
835	mov	level, %g4		;\
836	.align	32
837
838#define	LEVEL14_INTERRUPT			\
839	ba	pil14_interrupt			;\
840	mov	PIL_14, %g4			;\
841	.align	32
842
843#define CPU_MONDO			\
844	ba,a,pt	%xcc, cpu_mondo		;\
845	.align	32
846
847#define DEV_MONDO			\
848	ba,a,pt	%xcc, dev_mondo		;\
849	.align	32
850
851/*
852 * MMU Trap Handlers.
853 */
854#define	SFSR_CTX_SHIFT	16
855
856#define	IMMU_EXCEPTION							\
857	MMU_FAULT_STATUS_AREA(%g3)					;\
858	rdpr	%tpc, %g2						;\
859	ldx	[%g3 + MMFSA_I_TYPE], %g1				;\
860	ldx	[%g3 + MMFSA_I_CTX], %g3				;\
861	sllx	%g3, SFSR_CTX_SHIFT, %g3				;\
862	or	%g3, %g1, %g3						;\
863	ba,pt	%xcc, .mmu_exception_end				;\
864	mov	T_INSTR_EXCEPTION, %g1					;\
865	.align	32
866
867#define	DMMU_EXCEPTION							\
868	MMU_FAULT_STATUS_AREA(%g3)					;\
869	ldx	[%g3 + MMFSA_D_ADDR], %g2				;\
870	ldx	[%g3 + MMFSA_D_TYPE], %g1				;\
871	ldx	[%g3 + MMFSA_D_CTX], %g3				;\
872	sllx	%g3, SFSR_CTX_SHIFT, %g3				;\
873	or	%g3, %g1, %g3						;\
874	ba,pt	%xcc, .mmu_exception_end				;\
875	mov	T_DATA_EXCEPTION, %g1					;\
876	.align	32
877
878#define	DMMU_EXC_AG_PRIV						\
879	MMU_FAULT_STATUS_AREA(%g3)					;\
880	ldx	[%g3 + MMFSA_D_ADDR], %g2				;\
881	/* Fault type not available in MMU fault status area */		;\
882	mov	MMFSA_F_PRVACT, %g1					;\
883	ldx	[%g3 + MMFSA_D_CTX], %g3				;\
884	sllx	%g3, SFSR_CTX_SHIFT, %g3				;\
885	ba,pt	%xcc, .mmu_priv_exception				;\
886	or	%g3, %g1, %g3						;\
887	.align	32
888
889#define	DMMU_EXC_AG_NOT_ALIGNED						\
890	MMU_FAULT_STATUS_AREA(%g3)					;\
891	ldx	[%g3 + MMFSA_D_ADDR], %g2				;\
892	/* Fault type not available in MMU fault status area */		;\
893	mov	MMFSA_F_UNALIGN, %g1					;\
894	ldx	[%g3 + MMFSA_D_CTX], %g3				;\
895	sllx	%g3, SFSR_CTX_SHIFT, %g3				;\
896	ba,pt	%xcc, .mmu_exception_not_aligned			;\
897	or	%g3, %g1, %g3						;\
898	.align	32
899/*
900 * SPARC V9 IMPL. DEP. #109(1) and (2) and #110(1) and (2)
901 */
902
903#define	DMMU_EXC_LDDF_NOT_ALIGNED					\
904	ba,a,pt	%xcc, .dmmu_exc_lddf_not_aligned			;\
905	.align	32
906#define	DMMU_EXC_STDF_NOT_ALIGNED					\
907	ba,a,pt	%xcc, .dmmu_exc_stdf_not_aligned			;\
908	.align	32
909
910#if TAGACC_CTX_MASK != CTXREG_CTX_MASK
911#error "TAGACC_CTX_MASK != CTXREG_CTX_MASK"
912#endif
913
914#if defined(cscope)
915/*
916 * Define labels to direct cscope quickly to labels that
917 * are generated by macro expansion of DTLB_MISS().
918 */
919	.global	tt0_dtlbmiss
920tt0_dtlbmiss:
921	.global	tt1_dtlbmiss
922tt1_dtlbmiss:
923	nop
924#endif
925
926/*
927 * Data miss handler (must be exactly 32 instructions)
928 *
929 * This handler is invoked only if the hypervisor has been instructed
930 * not to do any TSB walk.
931 *
932 * Kernel and invalid context cases are handled by the sfmmu_kdtlb_miss
933 * handler.
934 *
935 * User TLB miss handling depends upon whether a user process has one or
936 * two TSBs. User TSB information (physical base and size code) is kept
937 * in two dedicated scratchpad registers. Absence of a user TSB (primarily
938 * second TSB) is indicated by a negative value (-1) in that register.
939 */
940
941#define	DTLB_MISS(table_name)						;\
942	.global	table_name/**/_dtlbmiss					;\
943table_name/**/_dtlbmiss:						;\
944	HAT_PERCPU_DBSTAT(TSBMISS_DTLBMISS) /* 3 instr ifdef DEBUG */	;\
945	MMU_FAULT_STATUS_AREA(%g7)					;\
946	ldx	[%g7 + MMFSA_D_ADDR], %g2	/* address */		;\
947	ldx	[%g7 + MMFSA_D_CTX], %g3	/* g3 = ctx */		;\
948	or	%g2, %g3, %g2			/* XXXQ temp */		;\
949	cmp	%g3, INVALID_CONTEXT					;\
950	ble,pn	%xcc, sfmmu_kdtlb_miss					;\
951	  srlx	%g2, TAG_VALO_SHIFT, %g7	/* g7 = tsb tag */	;\
952	mov	SCRATCHPAD_UTSBREG2, %g1				;\
953	ldxa	[%g1]ASI_SCRATCHPAD, %g1	/* get 2nd tsbreg */	;\
954	brgez,pn %g1, sfmmu_udtlb_slowpath	/* brnach if 2 TSBs */	;\
955	  nop								;\
956	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)	/* 11 instr */		;\
957	ba,pt	%xcc, sfmmu_udtlb_fastpath	/* no 4M TSB, miss */	;\
958	  srlx	%g2, TAG_VALO_SHIFT, %g7	/* g7 = tsb tag */	;\
959	.align 128
960
961
962#if defined(cscope)
963/*
964 * Define labels to direct cscope quickly to labels that
965 * are generated by macro expansion of ITLB_MISS().
966 */
967	.global	tt0_itlbmiss
968tt0_itlbmiss:
969	.global	tt1_itlbmiss
970tt1_itlbmiss:
971	nop
972#endif
973
974/*
975 * Instruction miss handler.
976 *
977 * This handler is invoked only if the hypervisor has been instructed
978 * not to do any TSB walk.
979 *
980 * ldda instructions will have their ASI patched
981 * by sfmmu_patch_ktsb at runtime.
982 * MUST be EXACTLY 32 instructions or we'll break.
983 */
984
985#define	ITLB_MISS(table_name)						 \
986	.global	table_name/**/_itlbmiss					;\
987table_name/**/_itlbmiss:						;\
988	HAT_PERCPU_DBSTAT(TSBMISS_ITLBMISS) /* 3 instr ifdef DEBUG */	;\
989	MMU_FAULT_STATUS_AREA(%g7)					;\
990	ldx	[%g7 + MMFSA_I_ADDR], %g2	/* g2 = address */	;\
991	ldx	[%g7 + MMFSA_I_CTX], %g3	/* g3 = ctx */		;\
992	or	%g2, %g3, %g2			/* XXXQ temp */		;\
993	cmp	%g3, INVALID_CONTEXT					;\
994	ble,pn	%xcc, sfmmu_kitlb_miss					;\
995	  srlx	%g2, TAG_VALO_SHIFT, %g7	/* g7 = tsb tag */	;\
996	mov	SCRATCHPAD_UTSBREG2, %g1				;\
997	ldxa	[%g1]ASI_SCRATCHPAD, %g1	/* get 2nd tsbreg */	;\
998	brgez,pn %g1, sfmmu_uitlb_slowpath	/* branch if 2 TSBS */	;\
999	  nop								;\
1000	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)	/* 11 instr */		;\
1001	ba,pt	%xcc, sfmmu_uitlb_fastpath	/* no 4M TSB, miss */	;\
1002	  srlx	%g2, TAG_VALO_SHIFT, %g7	/* g7 = tsb tag */	;\
1003	.align 128
1004
1005#define	DTSB_MISS \
1006	GOTO_TT(sfmmu_slow_dmmu_miss,trace_dmmu)
1007
1008#define	ITSB_MISS \
1009	GOTO_TT(sfmmu_slow_immu_miss,trace_immu)
1010
1011/*
1012 * This macro is the first level handler for fast protection faults.
1013 * It first demaps the tlb entry which generated the fault and then
1014 * attempts to set the modify bit on the hash.  It needs to be
1015 * exactly 32 instructions.
1016 */
1017#define	DTLB_PROT							 \
1018	MMU_FAULT_STATUS_AREA(%g7)					;\
1019	ldx	[%g7 + MMFSA_D_ADDR], %g2	/* address */		;\
1020	ldx	[%g7 + MMFSA_D_CTX], %g3	/* %g3 = ctx */		;\
1021	or	%g2, %g3, %g2			/* XXXQ temp */		;\
1022	/*								;\
1023	 *   g2 = tag access register					;\
1024	 *   g3 = ctx number						;\
1025	 */								;\
1026	TT_TRACE(trace_dataprot)	/* 2 instr ifdef TRAPTRACE */	;\
1027					/* clobbers g1 and g6 XXXQ? */	;\
1028	brnz,pt %g3, sfmmu_uprot_trap		/* user trap */		;\
1029	  nop								;\
1030	ba,a,pt	%xcc, sfmmu_kprot_trap		/* kernel trap */	;\
1031	.align 128
1032
1033#define	DMMU_EXCEPTION_TL1						;\
1034	ba,a,pt	%xcc, mmu_trap_tl1					;\
1035	.align 32
1036
1037#define	MISALIGN_ADDR_TL1						;\
1038	ba,a,pt	%xcc, mmu_trap_tl1					;\
1039	.align 32
1040
1041/*
1042 * Trace a tsb hit
1043 * g1 = tsbe pointer (in/clobbered)
1044 * g2 = tag access register (in)
1045 * g3 - g4 = scratch (clobbered)
1046 * g5 = tsbe data (in)
1047 * g6 = scratch (clobbered)
1048 * g7 = pc we jumped here from (in)
1049 * ttextra = value to OR in to trap type (%tt) (in)
1050 */
1051#ifdef TRAPTRACE
1052#define TRACE_TSBHIT(ttextra)						 \
1053	membar	#Sync							;\
1054	sethi	%hi(FLUSH_ADDR), %g6					;\
1055	flush	%g6							;\
1056	TRACE_PTR(%g3, %g6)						;\
1057	GET_TRACE_TICK(%g6)						;\
1058	stxa	%g6, [%g3 + TRAP_ENT_TICK]%asi				;\
1059	stna	%g2, [%g3 + TRAP_ENT_SP]%asi	/* tag access */	;\
1060	stna	%g5, [%g3 + TRAP_ENT_F1]%asi	/* tsb data */		;\
1061	rdpr	%tnpc, %g6						;\
1062	stna	%g6, [%g3 + TRAP_ENT_F2]%asi				;\
1063	stna	%g1, [%g3 + TRAP_ENT_F3]%asi	/* tsb pointer */	;\
1064	stna	%g0, [%g3 + TRAP_ENT_F4]%asi				;\
1065	rdpr	%tpc, %g6						;\
1066	stna	%g6, [%g3 + TRAP_ENT_TPC]%asi				;\
1067	TRACE_SAVE_TL_GL_REGS(%g3, %g6)					;\
1068	rdpr	%tt, %g6						;\
1069	or	%g6, (ttextra), %g1					;\
1070	stha	%g1, [%g3 + TRAP_ENT_TT]%asi				;\
1071	MMU_FAULT_STATUS_AREA(%g4)					;\
1072	ldx	[%g4 + MMFSA_I_ADDR], %g1				;\
1073	ldx	[%g4 + MMFSA_D_ADDR], %g4				;\
1074	cmp	%g6, FAST_IMMU_MISS_TT					;\
1075	move	%icc, %g1, %g4						;\
1076	cmp	%g6, T_INSTR_MMU_MISS					;\
1077	move	%icc, %g1, %g4						;\
1078	stxa	%g4, [%g3 + TRAP_ENT_TSTATE]%asi /* fault addr */	;\
1079	MMU_FAULT_STATUS_AREA(%g4)					;\
1080	ldx	[%g4 + MMFSA_I_CTX], %g1				;\
1081	ldx	[%g4 + MMFSA_D_CTX], %g4				;\
1082	cmp	%g6, FAST_IMMU_MISS_TT					;\
1083	move	%icc, %g1, %g4						;\
1084	cmp	%g6, T_INSTR_MMU_MISS					;\
1085	move	%icc, %g1, %g4						;\
1086	stna	%g4, [%g3 + TRAP_ENT_TR]%asi				;\
1087	TRACE_NEXT(%g3, %g4, %g6)
1088#else
1089#define TRACE_TSBHIT(ttextra)
1090#endif
1091
1092
1093#if defined(lint)
1094
1095struct scb	trap_table;
1096struct scb	scb;		/* trap_table/scb are the same object */
1097
1098#else /* lint */
1099
1100/*
1101 * =======================================================================
1102 *		SPARC V9 TRAP TABLE
1103 *
1104 * The trap table is divided into two halves: the first half is used when
1105 * taking traps when TL=0; the second half is used when taking traps from
1106 * TL>0. Note that handlers in the second half of the table might not be able
1107 * to make the same assumptions as handlers in the first half of the table.
1108 *
1109 * Worst case trap nesting so far:
1110 *
1111 *	at TL=0 client issues software trap requesting service
1112 *	at TL=1 nucleus wants a register window
1113 *	at TL=2 register window clean/spill/fill takes a TLB miss
1114 *	at TL=3 processing TLB miss
1115 *	at TL=4 handle asynchronous error
1116 *
1117 * Note that a trap from TL=4 to TL=5 places Spitfire in "RED mode".
1118 *
1119 * =======================================================================
1120 */
1121	.section ".text"
1122	.align	4
1123	.global trap_table, scb, trap_table0, trap_table1, etrap_table
1124	.type	trap_table, #function
1125	.type	trap_table0, #function
1126	.type	trap_table1, #function
1127	.type	scb, #function
1128trap_table:
1129scb:
1130trap_table0:
1131	/* hardware traps */
1132	NOT;				/* 000	reserved */
1133	RED;				/* 001	power on reset */
1134	RED;				/* 002	watchdog reset */
1135	RED;				/* 003	externally initiated reset */
1136	RED;				/* 004	software initiated reset */
1137	RED;				/* 005	red mode exception */
1138	NOT; NOT;			/* 006 - 007 reserved */
1139	IMMU_EXCEPTION;			/* 008	instruction access exception */
1140	ITSB_MISS;			/* 009	instruction access MMU miss */
1141 	NOT;				/* 00A  reserved */
1142	NOT; NOT4;			/* 00B - 00F reserved */
1143	ILLTRAP_INSTR;			/* 010	illegal instruction */
1144	TRAP(T_PRIV_INSTR);		/* 011	privileged opcode */
1145	NOT;				/* 012	unimplemented LDD */
1146	NOT;				/* 013	unimplemented STD */
1147	NOT4; NOT4; NOT4;		/* 014 - 01F reserved */
1148	FP_DISABLED_TRAP;		/* 020	fp disabled */
1149	FP_IEEE_TRAP;			/* 021	fp exception ieee 754 */
1150	FP_TRAP;			/* 022	fp exception other */
1151	TAG_OVERFLOW;			/* 023	tag overflow */
1152	CLEAN_WINDOW;			/* 024 - 027 clean window */
1153	DIV_BY_ZERO;			/* 028	division by zero */
1154	NOT;				/* 029	internal processor error */
1155	NOT; NOT; NOT4;			/* 02A - 02F reserved */
1156	DMMU_EXCEPTION;			/* 030	data access exception */
1157	DTSB_MISS;			/* 031	data access MMU miss */
1158	NOT;				/* 032  reserved */
1159	NOT;				/* 033	data access protection */
1160	DMMU_EXC_AG_NOT_ALIGNED;	/* 034	mem address not aligned */
1161	DMMU_EXC_LDDF_NOT_ALIGNED;	/* 035	LDDF mem address not aligned */
1162	DMMU_EXC_STDF_NOT_ALIGNED;	/* 036	STDF mem address not aligned */
1163	DMMU_EXC_AG_PRIV;		/* 037	privileged action */
1164	NOT;				/* 038	LDQF mem address not aligned */
1165	NOT;				/* 039	STQF mem address not aligned */
1166	NOT; NOT; NOT4;			/* 03A - 03F reserved */
1167	NOT;				/* 040	async data error */
1168	LEVEL_INTERRUPT(1);		/* 041	interrupt level 1 */
1169	LEVEL_INTERRUPT(2);		/* 042	interrupt level 2 */
1170	LEVEL_INTERRUPT(3);		/* 043	interrupt level 3 */
1171	LEVEL_INTERRUPT(4);		/* 044	interrupt level 4 */
1172	LEVEL_INTERRUPT(5);		/* 045	interrupt level 5 */
1173	LEVEL_INTERRUPT(6);		/* 046	interrupt level 6 */
1174	LEVEL_INTERRUPT(7);		/* 047	interrupt level 7 */
1175	LEVEL_INTERRUPT(8);		/* 048	interrupt level 8 */
1176	LEVEL_INTERRUPT(9);		/* 049	interrupt level 9 */
1177	LEVEL_INTERRUPT(10);		/* 04A	interrupt level 10 */
1178	LEVEL_INTERRUPT(11);		/* 04B	interrupt level 11 */
1179	LEVEL_INTERRUPT(12);		/* 04C	interrupt level 12 */
1180	LEVEL_INTERRUPT(13);		/* 04D	interrupt level 13 */
1181	LEVEL14_INTERRUPT;		/* 04E	interrupt level 14 */
1182	LEVEL_INTERRUPT(15);		/* 04F	interrupt level 15 */
1183	NOT4; NOT4; NOT4; NOT4;		/* 050 - 05F reserved */
1184	NOT;				/* 060	interrupt vector */
1185	GOTO(kmdb_trap);		/* 061	PA watchpoint */
1186	GOTO(kmdb_trap);		/* 062	VA watchpoint */
1187	NOT;				/* 063	reserved */
1188	ITLB_MISS(tt0);			/* 064	instruction access MMU miss */
1189	DTLB_MISS(tt0);			/* 068	data access MMU miss */
1190	DTLB_PROT;			/* 06C	data access protection */
1191	NOT;				/* 070  reserved */
1192	NOT;				/* 071  reserved */
1193	NOT;				/* 072  reserved */
1194	NOT;				/* 073  reserved */
1195	NOT4; NOT4			/* 074 - 07B reserved */
1196	CPU_MONDO;			/* 07C	cpu_mondo */
1197	DEV_MONDO;			/* 07D	dev_mondo */
1198	GOTO_TT(resumable_error, trace_gen);	/* 07E  resumable error */
1199	GOTO_TT(nonresumable_error, trace_gen);	/* 07F  non-reasumable error */
1200	NOT4;				/* 080	spill 0 normal */
1201	SPILL_32bit_asi(ASI_AIUP,sn0);	/* 084	spill 1 normal */
1202	SPILL_64bit_asi(ASI_AIUP,sn0);	/* 088	spill 2 normal */
1203	SPILL_32clean(ASI_AIUP,sn0);	/* 08C	spill 3 normal */
1204	SPILL_64clean(ASI_AIUP,sn0);	/* 090	spill 4 normal */
1205	SPILL_32bit(not);		/* 094	spill 5 normal */
1206	SPILL_64bit(not);		/* 098	spill 6 normal */
1207	SPILL_mixed;			/* 09C	spill 7 normal */
1208	NOT4;				/* 0A0	spill 0 other */
1209	SPILL_32bit_asi(ASI_AIUS,so0);	/* 0A4	spill 1 other */
1210	SPILL_64bit_asi(ASI_AIUS,so0);	/* 0A8	spill 2 other */
1211	SPILL_32bit_asi(ASI_AIUS,so0);	/* 0AC	spill 3 other */
1212	SPILL_64bit_asi(ASI_AIUS,so0);	/* 0B0	spill 4 other */
1213	NOT4;				/* 0B4	spill 5 other */
1214	NOT4;				/* 0B8	spill 6 other */
1215	NOT4;				/* 0BC	spill 7 other */
1216	NOT4;				/* 0C0	fill 0 normal */
1217	FILL_32bit_asi(ASI_AIUP,fn0);	/* 0C4	fill 1 normal */
1218	FILL_64bit_asi(ASI_AIUP,fn0);	/* 0C8	fill 2 normal */
1219	FILL_32bit_asi(ASI_AIUP,fn0);	/* 0CC	fill 3 normal */
1220	FILL_64bit_asi(ASI_AIUP,fn0);	/* 0D0	fill 4 normal */
1221	FILL_32bit(not);		/* 0D4	fill 5 normal */
1222	FILL_64bit(not);		/* 0D8	fill 6 normal */
1223	FILL_mixed;			/* 0DC	fill 7 normal */
1224	NOT4;				/* 0E0	fill 0 other */
1225	NOT4;				/* 0E4	fill 1 other */
1226	NOT4;				/* 0E8	fill 2 other */
1227	NOT4;				/* 0EC	fill 3 other */
1228	NOT4;				/* 0F0	fill 4 other */
1229	NOT4;				/* 0F4	fill 5 other */
1230	NOT4;				/* 0F8	fill 6 other */
1231	NOT4;				/* 0FC	fill 7 other */
1232	/* user traps */
1233	GOTO(syscall_trap_4x);		/* 100	old system call */
1234	TRAP(T_BREAKPOINT);		/* 101	user breakpoint */
1235	TRAP(T_DIV0);			/* 102	user divide by zero */
1236	GOTO(.flushw);			/* 103	flush windows */
1237	GOTO(.clean_windows);		/* 104	clean windows */
1238	BAD;				/* 105	range check ?? */
1239	GOTO(.fix_alignment);		/* 106	do unaligned references */
1240	BAD;				/* 107	unused */
1241	SYSCALL(syscall_trap32);	/* 108	ILP32 system call on LP64 */
1242	GOTO(set_trap0_addr);		/* 109	set trap0 address */
1243	BAD; BAD; BAD4;			/* 10A - 10F unused */
1244	TRP4; TRP4; TRP4; TRP4;		/* 110 - 11F V9 user trap handlers */
1245	GOTO(.getcc);			/* 120	get condition codes */
1246	GOTO(.setcc);			/* 121	set condition codes */
1247	GOTO(.getpsr);			/* 122	get psr */
1248	GOTO(.setpsr);			/* 123	set psr (some fields) */
1249	GOTO(get_timestamp);		/* 124	get timestamp */
1250	GOTO(get_virtime);		/* 125	get lwp virtual time */
1251	PRIV(self_xcall);		/* 126	self xcall */
1252	GOTO(get_hrestime);		/* 127	get hrestime */
1253	BAD;				/* 128	ST_SETV9STACK */
1254	GOTO(.getlgrp);			/* 129  get lgrpid */
1255	BAD; BAD; BAD4;			/* 12A - 12F unused */
1256	BAD4; BAD4; 			/* 130 - 137 unused */
1257	DTRACE_PID;			/* 138  dtrace pid tracing provider */
1258	DTRACE_FASTTRAP;		/* 139  dtrace fasttrap provider */
1259	DTRACE_RETURN;			/* 13A	dtrace pid return probe */
1260	BAD; BAD4;			/* 13B - 13F unused */
1261	SYSCALL(syscall_trap)		/* 140  LP64 system call */
1262	BAD;				/* 141  unused */
1263#ifdef DEBUG_USER_TRAPTRACECTL
1264	GOTO(.traptrace_freeze);	/* 142  freeze traptrace */
1265	GOTO(.traptrace_unfreeze);	/* 143  unfreeze traptrace */
1266#else
1267	BAD; BAD;			/* 142 - 143 unused */
1268#endif
1269	BAD4; BAD4; BAD4;		/* 144 - 14F unused */
1270	BAD4; BAD4; BAD4; BAD4;		/* 150 - 15F unused */
1271	BAD4; BAD4; BAD4; BAD4;		/* 160 - 16F unused */
1272	BAD;				/* 170 - unused */
1273	BAD;				/* 171 - unused */
1274	BAD; BAD;			/* 172 - 173 unused */
1275	BAD4; BAD4;			/* 174 - 17B unused */
1276#ifdef	PTL1_PANIC_DEBUG
1277	mov PTL1_BAD_DEBUG, %g1; GOTO(ptl1_panic);
1278					/* 17C	test ptl1_panic */
1279#else
1280	BAD;				/* 17C  unused */
1281#endif	/* PTL1_PANIC_DEBUG */
1282	PRIV(kmdb_trap);		/* 17D	kmdb enter (L1-A) */
1283	PRIV(kmdb_trap);		/* 17E	kmdb breakpoint */
1284	PRIV(obp_bpt);			/* 17F	obp breakpoint */
1285	/* reserved */
1286	NOT4; NOT4; NOT4; NOT4;		/* 180 - 18F reserved */
1287	NOT4; NOT4; NOT4; NOT4;		/* 190 - 19F reserved */
1288	NOT4; NOT4; NOT4; NOT4;		/* 1A0 - 1AF reserved */
1289	NOT4; NOT4; NOT4; NOT4;		/* 1B0 - 1BF reserved */
1290	NOT4; NOT4; NOT4; NOT4;		/* 1C0 - 1CF reserved */
1291	NOT4; NOT4; NOT4; NOT4;		/* 1D0 - 1DF reserved */
1292	NOT4; NOT4; NOT4; NOT4;		/* 1E0 - 1EF reserved */
1293	NOT4; NOT4; NOT4; NOT4;		/* 1F0 - 1FF reserved */
1294	.size	trap_table0, (.-trap_table0)
1295trap_table1:
1296	NOT4; NOT4;			/* 000 - 007 unused */
1297	NOT;				/* 008	instruction access exception */
1298	ITSB_MISS;			/* 009	instruction access MMU miss */
1299 	NOT;				/* 00A  reserved */
1300	NOT; NOT4;			/* 00B - 00F unused */
1301	NOT4; NOT4; NOT4; NOT4;		/* 010 - 01F unused */
1302	NOT4;				/* 020 - 023 unused */
1303	CLEAN_WINDOW;			/* 024 - 027 clean window */
1304	NOT4; NOT4;			/* 028 - 02F unused */
1305	DMMU_EXCEPTION_TL1;		/* 030 	data access exception */
1306	DTSB_MISS;			/* 031  data access MMU miss */
1307	NOT;				/* 032  reserved */
1308	NOT;				/* 033	unused */
1309	MISALIGN_ADDR_TL1;		/* 034	mem address not aligned */
1310	NOT; NOT; NOT; NOT4; NOT4	/* 035 - 03F unused */
1311	NOT4; NOT4; NOT4; NOT4;		/* 040 - 04F unused */
1312	NOT4; NOT4; NOT4; NOT4;		/* 050 - 05F unused */
1313	NOT;				/* 060	unused */
1314	GOTO(kmdb_trap_tl1);		/* 061	PA watchpoint */
1315	GOTO(kmdb_trap_tl1);		/* 062	VA watchpoint */
1316	NOT;				/* 063	reserved */
1317	ITLB_MISS(tt1);			/* 064	instruction access MMU miss */
1318	DTLB_MISS(tt1);			/* 068	data access MMU miss */
1319	DTLB_PROT;			/* 06C	data access protection */
1320	NOT;				/* 070  reserved */
1321	NOT;				/* 071  reserved */
1322	NOT;				/* 072  reserved */
1323	NOT;				/* 073  reserved */
1324	NOT4; NOT4; NOT4;		/* 074 - 07F reserved */
1325	NOTP4;				/* 080	spill 0 normal */
1326	SPILL_32bit_tt1(ASI_AIUP,sn1);	/* 084	spill 1 normal */
1327	SPILL_64bit_tt1(ASI_AIUP,sn1);	/* 088	spill 2 normal */
1328	SPILL_32bit_tt1(ASI_AIUP,sn1);	/* 08C	spill 3 normal */
1329	SPILL_64bit_tt1(ASI_AIUP,sn1);	/* 090	spill 4 normal */
1330	NOTP4;				/* 094	spill 5 normal */
1331	SPILL_64bit_ktt1(sk);		/* 098	spill 6 normal */
1332	SPILL_mixed_ktt1(sk);		/* 09C	spill 7 normal */
1333	NOTP4;				/* 0A0	spill 0 other */
1334	SPILL_32bit_tt1(ASI_AIUS,so1);	/* 0A4  spill 1 other */
1335	SPILL_64bit_tt1(ASI_AIUS,so1);	/* 0A8	spill 2 other */
1336	SPILL_32bit_tt1(ASI_AIUS,so1);	/* 0AC	spill 3 other */
1337	SPILL_64bit_tt1(ASI_AIUS,so1);	/* 0B0  spill 4 other */
1338	NOTP4;				/* 0B4  spill 5 other */
1339	NOTP4;				/* 0B8  spill 6 other */
1340	NOTP4;				/* 0BC  spill 7 other */
1341	NOT4;				/* 0C0	fill 0 normal */
1342	NOT4;				/* 0C4	fill 1 normal */
1343	NOT4;				/* 0C8	fill 2 normal */
1344	NOT4;				/* 0CC	fill 3 normal */
1345	NOT4;				/* 0D0	fill 4 normal */
1346	NOT4;				/* 0D4	fill 5 normal */
1347	NOT4;				/* 0D8	fill 6 normal */
1348	NOT4;				/* 0DC	fill 7 normal */
1349	NOT4; NOT4; NOT4; NOT4;		/* 0E0 - 0EF unused */
1350	NOT4; NOT4; NOT4; NOT4;		/* 0F0 - 0FF unused */
1351/*
1352 * Code running at TL>0 does not use soft traps, so
1353 * we can truncate the table here.
1354 * However:
1355 * sun4v uses (hypervisor) ta instructions at TL > 0, so
1356 * provide a safety net for now.
1357 */
1358	/* soft traps */
1359	BAD4; BAD4; BAD4; BAD4;		/* 100 - 10F unused */
1360	BAD4; BAD4; BAD4; BAD4;		/* 110 - 11F unused */
1361	BAD4; BAD4; BAD4; BAD4;		/* 120 - 12F unused */
1362	BAD4; BAD4; BAD4; BAD4;		/* 130 - 13F unused */
1363	BAD4; BAD4; BAD4; BAD4;		/* 140 - 14F unused */
1364	BAD4; BAD4; BAD4; BAD4;		/* 150 - 15F unused */
1365	BAD4; BAD4; BAD4; BAD4;		/* 160 - 16F unused */
1366	BAD4; BAD4; BAD4; BAD4;		/* 170 - 17F unused */
1367	/* reserved */
1368	NOT4; NOT4; NOT4; NOT4;		/* 180 - 18F reserved */
1369	NOT4; NOT4; NOT4; NOT4;		/* 190 - 19F reserved */
1370	NOT4; NOT4; NOT4; NOT4;		/* 1A0 - 1AF reserved */
1371	NOT4; NOT4; NOT4; NOT4;		/* 1B0 - 1BF reserved */
1372	NOT4; NOT4; NOT4; NOT4;		/* 1C0 - 1CF reserved */
1373	NOT4; NOT4; NOT4; NOT4;		/* 1D0 - 1DF reserved */
1374	NOT4; NOT4; NOT4; NOT4;		/* 1E0 - 1EF reserved */
1375	NOT4; NOT4; NOT4; NOT4;		/* 1F0 - 1FF reserved */
1376etrap_table:
1377	.size	trap_table1, (.-trap_table1)
1378	.size	trap_table, (.-trap_table)
1379	.size	scb, (.-scb)
1380
1381/*
1382 * We get to exec_fault in the case of an instruction miss and tte
1383 * has no execute bit set.  We go to tl0 to handle it.
1384 *
1385 * g1 = tsbe pointer (in/clobbered)
1386 * g2 = tag access register (in)
1387 * g3 - g4 = scratch (clobbered)
1388 * g5 = tsbe data (in)
1389 * g6 = scratch (clobbered)
1390 * g7 = pc we jumped here from (in)
1391 */
1392	ALTENTRY(exec_fault)
1393	TRACE_TSBHIT(TT_MMU_EXEC)
1394	MMU_FAULT_STATUS_AREA(%g4)
1395	ldx	[%g4 + MMFSA_I_ADDR], %g2	/* g2 = address */
1396	ldx	[%g4 + MMFSA_I_CTX], %g3	/* g3 = ctx */
1397	srlx	%g2, MMU_PAGESHIFT, %g2		! align address to page boundry
1398	sllx	%g2, MMU_PAGESHIFT, %g2
1399	or	%g2, %g3, %g2			/* XXXQ temp */
1400	mov	T_INSTR_MMU_MISS, %g3		! arg2 = traptype
1401	set	trap, %g1
1402	ba,pt	%xcc, sys_trap
1403	  mov	-1, %g4
1404
1405.mmu_exception_not_aligned:
1406	/* %g2 = sfar, %g3 = sfsr */
1407	rdpr	%tstate, %g1
1408	btst	TSTATE_PRIV, %g1
1409	bnz,pn	%icc, 2f
1410	nop
1411	CPU_ADDR(%g1, %g4)				! load CPU struct addr
1412	ldn	[%g1 + CPU_THREAD], %g1			! load thread pointer
1413	ldn	[%g1 + T_PROCP], %g1			! load proc pointer
1414	ldn	[%g1 + P_UTRAPS], %g5			! are there utraps?
1415	brz,pt	%g5, 2f
1416	nop
1417	ldn	[%g5 + P_UTRAP15], %g5			! unaligned utrap?
1418	brz,pn	%g5, 2f
1419	nop
1420	btst	1, %sp
1421	bz,pt	%xcc, 1f				! 32 bit user program
1422	nop
1423	ba,pt	%xcc, .setup_v9utrap			! 64 bit user program
1424	nop
14251:
1426	ba,pt	%xcc, .setup_utrap
1427	or	%g2, %g0, %g7
14282:
1429	ba,pt	%xcc, .mmu_exception_end
1430	mov	T_ALIGNMENT, %g1
1431
1432.mmu_priv_exception:
1433	rdpr	%tstate, %g1
1434	btst	TSTATE_PRIV, %g1
1435	bnz,pn	%icc, 1f
1436	nop
1437	CPU_ADDR(%g1, %g4)				! load CPU struct addr
1438	ldn	[%g1 + CPU_THREAD], %g1			! load thread pointer
1439	ldn	[%g1 + T_PROCP], %g1			! load proc pointer
1440	ldn	[%g1 + P_UTRAPS], %g5			! are there utraps?
1441	brz,pt	%g5, 1f
1442	nop
1443	ldn	[%g5 + P_UTRAP16], %g5
1444	brnz,pt	%g5, .setup_v9utrap
1445	nop
14461:
1447	mov	T_PRIV_INSTR, %g1
1448
1449.mmu_exception_end:
1450	CPU_INDEX(%g4, %g5)
1451	set	cpu_core, %g5
1452	sllx	%g4, CPU_CORE_SHIFT, %g4
1453	add	%g4, %g5, %g4
1454	lduh	[%g4 + CPUC_DTRACE_FLAGS], %g5
1455	andcc	%g5, CPU_DTRACE_NOFAULT, %g0
1456	bz	1f
1457	or	%g5, CPU_DTRACE_BADADDR, %g5
1458	stuh	%g5, [%g4 + CPUC_DTRACE_FLAGS]
1459	done
1460
14611:
1462	sllx	%g3, 32, %g3
1463	or	%g3, %g1, %g3
1464	set	trap, %g1
1465	ba,pt	%xcc, sys_trap
1466	sub	%g0, 1, %g4
1467
1468.fp_disabled:
1469	CPU_ADDR(%g1, %g4)				! load CPU struct addr
1470	ldn	[%g1 + CPU_THREAD], %g1			! load thread pointer
1471	rdpr	%tstate, %g4
1472	btst	TSTATE_PRIV, %g4
1473	bnz,a,pn %icc, ptl1_panic
1474	  mov	PTL1_BAD_FPTRAP, %g1
1475
1476	ldn	[%g1 + T_PROCP], %g1			! load proc pointer
1477	ldn	[%g1 + P_UTRAPS], %g5			! are there utraps?
1478	brz,a,pt %g5, 2f
1479	  nop
1480	ldn	[%g5 + P_UTRAP7], %g5			! fp_disabled utrap?
1481	brz,a,pn %g5, 2f
1482	  nop
1483	btst	1, %sp
1484	bz,a,pt	%xcc, 1f				! 32 bit user program
1485	  nop
1486	ba,a,pt	%xcc, .setup_v9utrap			! 64 bit user program
1487	  nop
14881:
1489	ba,pt	%xcc, .setup_utrap
1490	  or	%g0, %g0, %g7
14912:
1492	set	fp_disabled, %g1
1493	ba,pt	%xcc, sys_trap
1494	  sub	%g0, 1, %g4
1495
1496.fp_ieee_exception:
1497	rdpr	%tstate, %g1
1498	btst	TSTATE_PRIV, %g1
1499	bnz,a,pn %icc, ptl1_panic
1500	  mov	PTL1_BAD_FPTRAP, %g1
1501	CPU_ADDR(%g1, %g4)				! load CPU struct addr
1502	stx	%fsr, [%g1 + CPU_TMP1]
1503	ldx	[%g1 + CPU_TMP1], %g2
1504	ldn	[%g1 + CPU_THREAD], %g1			! load thread pointer
1505	ldn	[%g1 + T_PROCP], %g1			! load proc pointer
1506	ldn	[%g1 + P_UTRAPS], %g5			! are there utraps?
1507	brz,a,pt %g5, 1f
1508	  nop
1509	ldn	[%g5 + P_UTRAP8], %g5
1510	brnz,a,pt %g5, .setup_v9utrap
1511	  nop
15121:
1513	set	_fp_ieee_exception, %g1
1514	ba,pt	%xcc, sys_trap
1515	  sub	%g0, 1, %g4
1516
1517/*
1518 * Register Inputs:
1519 *	%g5		user trap handler
1520 *	%g7		misaligned addr - for alignment traps only
1521 */
1522.setup_utrap:
1523	set	trap, %g1			! setup in case we go
1524	mov	T_FLUSH_PCB, %g3		! through sys_trap on
1525	sub	%g0, 1, %g4			! the save instruction below
1526
1527	/*
1528	 * If the DTrace pid provider is single stepping a copied-out
1529	 * instruction, t->t_dtrace_step will be set. In that case we need
1530	 * to abort the single-stepping (since execution of the instruction
1531	 * was interrupted) and use the value of t->t_dtrace_npc as the %npc.
1532	 */
1533	save	%sp, -SA(MINFRAME32), %sp	! window for trap handler
1534	CPU_ADDR(%g1, %g4)			! load CPU struct addr
1535	ldn	[%g1 + CPU_THREAD], %g1		! load thread pointer
1536	ldub	[%g1 + T_DTRACE_STEP], %g2	! load t->t_dtrace_step
1537	rdpr	%tnpc, %l2			! arg1 == tnpc
1538	brz,pt	%g2, 1f
1539	rdpr	%tpc, %l1			! arg0 == tpc
1540
1541	ldub	[%g1 + T_DTRACE_AST], %g2	! load t->t_dtrace_ast
1542	ldn	[%g1 + T_DTRACE_NPC], %l2	! arg1 = t->t_dtrace_npc (step)
1543	brz,pt	%g2, 1f
1544	st	%g0, [%g1 + T_DTRACE_FT]	! zero all pid provider flags
1545	stub	%g2, [%g1 + T_ASTFLAG]		! aston(t) if t->t_dtrace_ast
15461:
1547	mov	%g7, %l3			! arg2 == misaligned address
1548
1549	rdpr	%tstate, %g1			! cwp for trap handler
1550	rdpr	%cwp, %g4
1551	bclr	TSTATE_CWP_MASK, %g1
1552	wrpr	%g1, %g4, %tstate
1553	wrpr	%g0, %g5, %tnpc			! trap handler address
1554	FAST_TRAP_DONE
1555	/* NOTREACHED */
1556
1557.check_v9utrap:
1558	rdpr	%tstate, %g1
1559	btst	TSTATE_PRIV, %g1
1560	bnz,a,pn %icc, 3f
1561	  nop
1562	CPU_ADDR(%g4, %g1)				! load CPU struct addr
1563	ldn	[%g4 + CPU_THREAD], %g5			! load thread pointer
1564	ldn	[%g5 + T_PROCP], %g5			! load proc pointer
1565	ldn	[%g5 + P_UTRAPS], %g5			! are there utraps?
1566
1567	cmp	%g3, T_SOFTWARE_TRAP
1568	bne,a,pt %icc, 1f
1569	  nop
1570
1571	brz,pt %g5, 3f			! if p_utraps == NULL goto trap()
1572	  rdpr	%tt, %g3		! delay - get actual hw trap type
1573
1574	sub	%g3, 254, %g1		! UT_TRAP_INSTRUCTION_16 = p_utraps[18]
1575	ba,pt	%icc, 2f
1576	  smul	%g1, CPTRSIZE, %g2
15771:
1578	brz,a,pt %g5, 3f		! if p_utraps == NULL goto trap()
1579	  nop
1580
1581	cmp	%g3, T_UNIMP_INSTR
1582	bne,a,pt %icc, 2f
1583	  nop
1584
1585	mov	1, %g1
1586	st	%g1, [%g4 + CPU_TL1_HDLR] ! set CPU_TL1_HDLR
1587	rdpr	%tpc, %g1		! ld trapping instruction using
1588	lduwa	[%g1]ASI_AIUP, %g1	! "AS IF USER" ASI which could fault
1589	st	%g0, [%g4 + CPU_TL1_HDLR] ! clr CPU_TL1_HDLR
1590
1591	sethi	%hi(0xc1c00000), %g4	! setup mask for illtrap instruction
1592	andcc	%g1, %g4, %g4		! and instruction with mask
1593	bnz,a,pt %icc, 3f		! if %g4 == zero, %g1 is an ILLTRAP
1594	  nop				! fall thru to setup
15952:
1596	ldn	[%g5 + %g2], %g5
1597	brnz,a,pt %g5, .setup_v9utrap
1598	  nop
15993:
1600	set	trap, %g1
1601	ba,pt	%xcc, sys_trap
1602	  sub	%g0, 1, %g4
1603	/* NOTREACHED */
1604
1605/*
1606 * Register Inputs:
1607 *	%g5		user trap handler
1608 */
1609.setup_v9utrap:
1610	set	trap, %g1			! setup in case we go
1611	mov	T_FLUSH_PCB, %g3		! through sys_trap on
1612	sub	%g0, 1, %g4			! the save instruction below
1613
1614	/*
1615	 * If the DTrace pid provider is single stepping a copied-out
1616	 * instruction, t->t_dtrace_step will be set. In that case we need
1617	 * to abort the single-stepping (since execution of the instruction
1618	 * was interrupted) and use the value of t->t_dtrace_npc as the %npc.
1619	 */
1620	save	%sp, -SA(MINFRAME64), %sp	! window for trap handler
1621	CPU_ADDR(%g1, %g4)			! load CPU struct addr
1622	ldn	[%g1 + CPU_THREAD], %g1		! load thread pointer
1623	ldub	[%g1 + T_DTRACE_STEP], %g2	! load t->t_dtrace_step
1624	rdpr	%tnpc, %l7			! arg1 == tnpc
1625	brz,pt	%g2, 1f
1626	rdpr	%tpc, %l6			! arg0 == tpc
1627
1628	ldub	[%g1 + T_DTRACE_AST], %g2	! load t->t_dtrace_ast
1629	ldn	[%g1 + T_DTRACE_NPC], %l7	! arg1 == t->t_dtrace_npc (step)
1630	brz,pt	%g2, 1f
1631	st	%g0, [%g1 + T_DTRACE_FT]	! zero all pid provider flags
1632	stub	%g2, [%g1 + T_ASTFLAG]		! aston(t) if t->t_dtrace_ast
16331:
1634	rdpr	%tstate, %g2			! cwp for trap handler
1635	rdpr	%cwp, %g4
1636	bclr	TSTATE_CWP_MASK, %g2
1637	wrpr	%g2, %g4, %tstate
1638
1639	ldn	[%g1 + T_PROCP], %g4		! load proc pointer
1640	ldn	[%g4 + P_AS], %g4		! load as pointer
1641	ldn	[%g4 + A_USERLIMIT], %g4	! load as userlimit
1642	cmp	%l7, %g4			! check for single-step set
1643	bne,pt	%xcc, 4f
1644	  nop
1645	ldn	[%g1 + T_LWP], %g1		! load klwp pointer
1646	ld	[%g1 + PCB_STEP], %g4		! load single-step flag
1647	cmp	%g4, STEP_ACTIVE		! step flags set in pcb?
1648	bne,pt	%icc, 4f
1649	  nop
1650	stn	%g5, [%g1 + PCB_TRACEPC]	! save trap handler addr in pcb
1651	mov	%l7, %g4			! on entry to precise user trap
1652	add	%l6, 4, %l7			! handler, %l6 == pc, %l7 == npc
1653						! at time of trap
1654	wrpr	%g0, %g4, %tnpc			! generate FLTBOUNDS,
1655						! %g4 == userlimit
1656	FAST_TRAP_DONE
1657	/* NOTREACHED */
16584:
1659	wrpr	%g0, %g5, %tnpc			! trap handler address
1660	FAST_TRAP_DONE_CHK_INTR
1661	/* NOTREACHED */
1662
1663.fp_exception:
1664	CPU_ADDR(%g1, %g4)
1665	stx	%fsr, [%g1 + CPU_TMP1]
1666	ldx	[%g1 + CPU_TMP1], %g2
1667
1668	/*
1669	 * Cheetah takes unfinished_FPop trap for certain range of operands
1670	 * to the "fitos" instruction. Instead of going through the slow
1671	 * software emulation path, we try to simulate the "fitos" instruction
1672	 * via "fitod" and "fdtos" provided the following conditions are met:
1673	 *
1674	 *	fpu_exists is set (if DEBUG)
1675	 *	not in privileged mode
1676	 *	ftt is unfinished_FPop
1677	 *	NXM IEEE trap is not enabled
1678	 *	instruction at %tpc is "fitos"
1679	 *
1680	 *  Usage:
1681	 *	%g1	per cpu address
1682	 *	%g2	%fsr
1683	 *	%g6	user instruction
1684	 *
1685	 * Note that we can take a memory access related trap while trying
1686	 * to fetch the user instruction. Therefore, we set CPU_TL1_HDLR
1687	 * flag to catch those traps and let the SFMMU code deal with page
1688	 * fault and data access exception.
1689	 */
1690#if defined(DEBUG) || defined(NEED_FPU_EXISTS)
1691	sethi	%hi(fpu_exists), %g7
1692	ld	[%g7 + %lo(fpu_exists)], %g7
1693	brz,pn %g7, .fp_exception_cont
1694	  nop
1695#endif
1696	rdpr	%tstate, %g7			! branch if in privileged mode
1697	btst	TSTATE_PRIV, %g7
1698	bnz,pn	%xcc, .fp_exception_cont
1699	srl	%g2, FSR_FTT_SHIFT, %g7		! extract ftt from %fsr
1700	and	%g7, (FSR_FTT>>FSR_FTT_SHIFT), %g7
1701	cmp	%g7, FTT_UNFIN
1702	set	FSR_TEM_NX, %g5
1703	bne,pn	%xcc, .fp_exception_cont	! branch if NOT unfinished_FPop
1704	  andcc	%g2, %g5, %g0
1705	bne,pn	%xcc, .fp_exception_cont	! branch if FSR_TEM_NX enabled
1706	  rdpr	%tpc, %g5			! get faulting PC
1707
1708	or	%g0, 1, %g7
1709	st	%g7, [%g1 + CPU_TL1_HDLR]	! set tl1_hdlr flag
1710	lda	[%g5]ASI_USER, %g6		! get user's instruction
1711	st	%g0, [%g1 + CPU_TL1_HDLR]	! clear tl1_hdlr flag
1712
1713	set	FITOS_INSTR_MASK, %g7
1714	and	%g6, %g7, %g7
1715	set	FITOS_INSTR, %g5
1716	cmp	%g7, %g5
1717	bne,pn	%xcc, .fp_exception_cont	! branch if not FITOS_INSTR
1718	 nop
1719
1720	/*
1721	 * This is unfinished FPops trap for "fitos" instruction. We
1722	 * need to simulate "fitos" via "fitod" and "fdtos" instruction
1723	 * sequence.
1724	 *
1725	 * We need a temporary FP register to do the conversion. Since
1726	 * both source and destination operands for the "fitos" instruction
1727	 * have to be within %f0-%f31, we use an FP register from the upper
1728	 * half to guarantee that it won't collide with the source or the
1729	 * dest operand. However, we do have to save and restore its value.
1730	 *
1731	 * We use %d62 as a temporary FP register for the conversion and
1732	 * branch to appropriate instruction within the conversion tables
1733	 * based upon the rs2 and rd values.
1734	 */
1735
1736	std	%d62, [%g1 + CPU_TMP1]		! save original value
1737
1738	srl	%g6, FITOS_RS2_SHIFT, %g7
1739	and	%g7, FITOS_REG_MASK, %g7
1740	set	_fitos_fitod_table, %g4
1741	sllx	%g7, 2, %g7
1742	jmp	%g4 + %g7
1743	  ba,pt	%xcc, _fitos_fitod_done
1744	.empty
1745
1746_fitos_fitod_table:
1747	  fitod	%f0, %d62
1748	  fitod	%f1, %d62
1749	  fitod	%f2, %d62
1750	  fitod	%f3, %d62
1751	  fitod	%f4, %d62
1752	  fitod	%f5, %d62
1753	  fitod	%f6, %d62
1754	  fitod	%f7, %d62
1755	  fitod	%f8, %d62
1756	  fitod	%f9, %d62
1757	  fitod	%f10, %d62
1758	  fitod	%f11, %d62
1759	  fitod	%f12, %d62
1760	  fitod	%f13, %d62
1761	  fitod	%f14, %d62
1762	  fitod	%f15, %d62
1763	  fitod	%f16, %d62
1764	  fitod	%f17, %d62
1765	  fitod	%f18, %d62
1766	  fitod	%f19, %d62
1767	  fitod	%f20, %d62
1768	  fitod	%f21, %d62
1769	  fitod	%f22, %d62
1770	  fitod	%f23, %d62
1771	  fitod	%f24, %d62
1772	  fitod	%f25, %d62
1773	  fitod	%f26, %d62
1774	  fitod	%f27, %d62
1775	  fitod	%f28, %d62
1776	  fitod	%f29, %d62
1777	  fitod	%f30, %d62
1778	  fitod	%f31, %d62
1779_fitos_fitod_done:
1780
1781	/*
1782	 * Now convert data back into single precision
1783	 */
1784	srl	%g6, FITOS_RD_SHIFT, %g7
1785	and	%g7, FITOS_REG_MASK, %g7
1786	set	_fitos_fdtos_table, %g4
1787	sllx	%g7, 2, %g7
1788	jmp	%g4 + %g7
1789	  ba,pt	%xcc, _fitos_fdtos_done
1790	.empty
1791
1792_fitos_fdtos_table:
1793	  fdtos	%d62, %f0
1794	  fdtos	%d62, %f1
1795	  fdtos	%d62, %f2
1796	  fdtos	%d62, %f3
1797	  fdtos	%d62, %f4
1798	  fdtos	%d62, %f5
1799	  fdtos	%d62, %f6
1800	  fdtos	%d62, %f7
1801	  fdtos	%d62, %f8
1802	  fdtos	%d62, %f9
1803	  fdtos	%d62, %f10
1804	  fdtos	%d62, %f11
1805	  fdtos	%d62, %f12
1806	  fdtos	%d62, %f13
1807	  fdtos	%d62, %f14
1808	  fdtos	%d62, %f15
1809	  fdtos	%d62, %f16
1810	  fdtos	%d62, %f17
1811	  fdtos	%d62, %f18
1812	  fdtos	%d62, %f19
1813	  fdtos	%d62, %f20
1814	  fdtos	%d62, %f21
1815	  fdtos	%d62, %f22
1816	  fdtos	%d62, %f23
1817	  fdtos	%d62, %f24
1818	  fdtos	%d62, %f25
1819	  fdtos	%d62, %f26
1820	  fdtos	%d62, %f27
1821	  fdtos	%d62, %f28
1822	  fdtos	%d62, %f29
1823	  fdtos	%d62, %f30
1824	  fdtos	%d62, %f31
1825_fitos_fdtos_done:
1826
1827	ldd	[%g1 + CPU_TMP1], %d62		! restore %d62
1828
1829#if DEBUG
1830	/*
1831	 * Update FPop_unfinished trap kstat
1832	 */
1833	set	fpustat+FPUSTAT_UNFIN_KSTAT, %g7
1834	ldx	[%g7], %g5
18351:
1836	add	%g5, 1, %g6
1837
1838	casxa	[%g7] ASI_N, %g5, %g6
1839	cmp	%g5, %g6
1840	bne,a,pn %xcc, 1b
1841	  or	%g0, %g6, %g5
1842
1843	/*
1844	 * Update fpu_sim_fitos kstat
1845	 */
1846	set	fpuinfo+FPUINFO_FITOS_KSTAT, %g7
1847	ldx	[%g7], %g5
18481:
1849	add	%g5, 1, %g6
1850
1851	casxa	[%g7] ASI_N, %g5, %g6
1852	cmp	%g5, %g6
1853	bne,a,pn %xcc, 1b
1854	  or	%g0, %g6, %g5
1855#endif /* DEBUG */
1856
1857	FAST_TRAP_DONE
1858
1859.fp_exception_cont:
1860	/*
1861	 * Let _fp_exception deal with simulating FPop instruction.
1862	 * Note that we need to pass %fsr in %g2 (already read above).
1863	 */
1864
1865	set	_fp_exception, %g1
1866	ba,pt	%xcc, sys_trap
1867	sub	%g0, 1, %g4
1868
1869
1870/*
1871 * Register windows
1872 */
1873
1874/*
1875 * FILL_32bit_flushw/FILL_64bit_flushw fills a 32/64-bit-wide register window
1876 * from a 32/64-bit * wide address space via the designated asi.
1877 * It is used to fill windows in user_flushw to avoid going above TL 2.
1878 */
1879/* TODO: Use the faster FILL based on FILL_32bit_asi/FILL_64bit_asi */
1880#define	FILL_32bit_flushw(asi_num)				\
1881	mov	asi_num, %asi					;\
1882	rdpr	%cwp, %g2					;\
1883	sub	%g2, 1, %g2					;\
1884	wrpr	%g2, %cwp					;\
18851:	srl	%sp, 0, %sp					;\
1886	lda	[%sp + 0]%asi, %l0				;\
1887	lda	[%sp + 4]%asi, %l1				;\
1888	lda	[%sp + 8]%asi, %l2				;\
1889	lda	[%sp + 12]%asi, %l3				;\
1890	lda	[%sp + 16]%asi, %l4				;\
1891	lda	[%sp + 20]%asi, %l5				;\
1892	lda	[%sp + 24]%asi, %l6				;\
1893	lda	[%sp + 28]%asi, %l7				;\
1894	lda	[%sp + 32]%asi, %i0				;\
1895	lda	[%sp + 36]%asi, %i1				;\
1896	lda	[%sp + 40]%asi, %i2				;\
1897	lda	[%sp + 44]%asi, %i3				;\
1898	lda	[%sp + 48]%asi, %i4				;\
1899	lda	[%sp + 52]%asi, %i5				;\
1900	lda	[%sp + 56]%asi, %i6				;\
1901	lda	[%sp + 60]%asi, %i7				;\
1902	restored						;\
1903	add	%g2, 1, %g2					;\
1904	wrpr	%g2, %cwp
1905
1906#define	FILL_64bit_flushw(asi_num)				\
1907	mov	asi_num, %asi					;\
1908	rdpr	%cwp, %g2					;\
1909	sub	%g2, 1, %g2					;\
1910	wrpr	%g2, %cwp					;\
1911	ldxa	[%sp + V9BIAS64 + 0]%asi, %l0			;\
1912	ldxa	[%sp + V9BIAS64 + 8]%asi, %l1			;\
1913	ldxa	[%sp + V9BIAS64 + 16]%asi, %l2			;\
1914	ldxa	[%sp + V9BIAS64 + 24]%asi, %l3			;\
1915	ldxa	[%sp + V9BIAS64 + 32]%asi, %l4			;\
1916	ldxa	[%sp + V9BIAS64 + 40]%asi, %l5			;\
1917	ldxa	[%sp + V9BIAS64 + 48]%asi, %l6			;\
1918	ldxa	[%sp + V9BIAS64 + 56]%asi, %l7			;\
1919	ldxa	[%sp + V9BIAS64 + 64]%asi, %i0			;\
1920	ldxa	[%sp + V9BIAS64 + 72]%asi, %i1			;\
1921	ldxa	[%sp + V9BIAS64 + 80]%asi, %i2			;\
1922	ldxa	[%sp + V9BIAS64 + 88]%asi, %i3			;\
1923	ldxa	[%sp + V9BIAS64 + 96]%asi, %i4			;\
1924	ldxa	[%sp + V9BIAS64 + 104]%asi, %i5			;\
1925	ldxa	[%sp + V9BIAS64 + 112]%asi, %i6			;\
1926	ldxa	[%sp + V9BIAS64 + 120]%asi, %i7			;\
1927	restored						;\
1928	add	%g2, 1, %g2					;\
1929	wrpr	%g2, %cwp
1930
1931.flushw:
1932	rdpr	%tnpc, %g1
1933	wrpr	%g1, %tpc
1934	add	%g1, 4, %g1
1935	wrpr	%g1, %tnpc
1936	set	trap, %g1
1937	mov	T_FLUSH_PCB, %g3
1938	ba,pt	%xcc, sys_trap
1939	sub	%g0, 1, %g4
1940
1941.clean_windows:
1942	set	trap, %g1
1943	mov	T_FLUSH_PCB, %g3
1944	sub	%g0, 1, %g4
1945	save
1946	flushw
1947	rdpr	%canrestore, %g2
1948	brnz	%g2, 1f
1949	nop
1950	rdpr	%wstate, %g2
1951	btst	1, %g2
1952	beq	2f
1953	nop
1954	FILL_32bit_flushw(ASI_AIUP)
1955	ba,a	1f
1956	 .empty
19572:
1958	FILL_64bit_flushw(ASI_AIUP)
19591:
1960	restore
1961	wrpr	%g0, %g0, %cleanwin	! no clean windows
1962
1963	CPU_ADDR(%g4, %g5)
1964	ldn	[%g4 + CPU_MPCB], %g4
1965	brz,a,pn %g4, 1f
1966	  nop
1967	ld	[%g4 + MPCB_WSTATE], %g5
1968	add	%g5, WSTATE_CLEAN_OFFSET, %g5
1969	wrpr	%g0, %g5, %wstate
19701:	FAST_TRAP_DONE
1971
1972/*
1973 * .spill_clean: clean the previous window, restore the wstate, and
1974 * "done".
1975 *
1976 * Entry: %g7 contains new wstate
1977 */
1978.spill_clean:
1979	sethi	%hi(nwin_minus_one), %g5
1980	ld	[%g5 + %lo(nwin_minus_one)], %g5 ! %g5 = nwin - 1
1981	rdpr	%cwp, %g6			! %g6 = %cwp
1982	deccc	%g6				! %g6--
1983	movneg	%xcc, %g5, %g6			! if (%g6<0) %g6 = nwin-1
1984	wrpr	%g6, %cwp
1985	TT_TRACE_L(trace_win)
1986	clr	%l0
1987	clr	%l1
1988	clr	%l2
1989	clr	%l3
1990	clr	%l4
1991	clr	%l5
1992	clr	%l6
1993	clr	%l7
1994	wrpr	%g0, %g7, %wstate
1995	saved
1996	retry			! restores correct %cwp
1997
1998.fix_alignment:
1999	CPU_ADDR(%g1, %g2)		! load CPU struct addr to %g1 using %g2
2000	ldn	[%g1 + CPU_THREAD], %g1	! load thread pointer
2001	ldn	[%g1 + T_PROCP], %g1
2002	mov	1, %g2
2003	stb	%g2, [%g1 + P_FIXALIGNMENT]
2004	FAST_TRAP_DONE
2005
2006#define	STDF_REG(REG, ADDR, TMP)		\
2007	sll	REG, 3, REG			;\
2008mark1:	set	start1, TMP			;\
2009	jmp	REG + TMP			;\
2010	  nop					;\
2011start1:	ba,pt	%xcc, done1			;\
2012	  std	%f0, [ADDR + CPU_TMP1]		;\
2013	ba,pt	%xcc, done1			;\
2014	  std	%f32, [ADDR + CPU_TMP1]		;\
2015	ba,pt	%xcc, done1			;\
2016	  std	%f2, [ADDR + CPU_TMP1]		;\
2017	ba,pt	%xcc, done1			;\
2018	  std	%f34, [ADDR + CPU_TMP1]		;\
2019	ba,pt	%xcc, done1			;\
2020	  std	%f4, [ADDR + CPU_TMP1]		;\
2021	ba,pt	%xcc, done1			;\
2022	  std	%f36, [ADDR + CPU_TMP1]		;\
2023	ba,pt	%xcc, done1			;\
2024	  std	%f6, [ADDR + CPU_TMP1]		;\
2025	ba,pt	%xcc, done1			;\
2026	  std	%f38, [ADDR + CPU_TMP1]		;\
2027	ba,pt	%xcc, done1			;\
2028	  std	%f8, [ADDR + CPU_TMP1]		;\
2029	ba,pt	%xcc, done1			;\
2030	  std	%f40, [ADDR + CPU_TMP1]		;\
2031	ba,pt	%xcc, done1			;\
2032	  std	%f10, [ADDR + CPU_TMP1]		;\
2033	ba,pt	%xcc, done1			;\
2034	  std	%f42, [ADDR + CPU_TMP1]		;\
2035	ba,pt	%xcc, done1			;\
2036	  std	%f12, [ADDR + CPU_TMP1]		;\
2037	ba,pt	%xcc, done1			;\
2038	  std	%f44, [ADDR + CPU_TMP1]		;\
2039	ba,pt	%xcc, done1			;\
2040	  std	%f14, [ADDR + CPU_TMP1]		;\
2041	ba,pt	%xcc, done1			;\
2042	  std	%f46, [ADDR + CPU_TMP1]		;\
2043	ba,pt	%xcc, done1			;\
2044	  std	%f16, [ADDR + CPU_TMP1]		;\
2045	ba,pt	%xcc, done1			;\
2046	  std	%f48, [ADDR + CPU_TMP1]		;\
2047	ba,pt	%xcc, done1			;\
2048	  std	%f18, [ADDR + CPU_TMP1]		;\
2049	ba,pt	%xcc, done1			;\
2050	  std	%f50, [ADDR + CPU_TMP1]		;\
2051	ba,pt	%xcc, done1			;\
2052	  std	%f20, [ADDR + CPU_TMP1]		;\
2053	ba,pt	%xcc, done1			;\
2054	  std	%f52, [ADDR + CPU_TMP1]		;\
2055	ba,pt	%xcc, done1			;\
2056	  std	%f22, [ADDR + CPU_TMP1]		;\
2057	ba,pt	%xcc, done1			;\
2058	  std	%f54, [ADDR + CPU_TMP1]		;\
2059	ba,pt	%xcc, done1			;\
2060	  std	%f24, [ADDR + CPU_TMP1]		;\
2061	ba,pt	%xcc, done1			;\
2062	  std	%f56, [ADDR + CPU_TMP1]		;\
2063	ba,pt	%xcc, done1			;\
2064	  std	%f26, [ADDR + CPU_TMP1]		;\
2065	ba,pt	%xcc, done1			;\
2066	  std	%f58, [ADDR + CPU_TMP1]		;\
2067	ba,pt	%xcc, done1			;\
2068	  std	%f28, [ADDR + CPU_TMP1]		;\
2069	ba,pt	%xcc, done1			;\
2070	  std	%f60, [ADDR + CPU_TMP1]		;\
2071	ba,pt	%xcc, done1			;\
2072	  std	%f30, [ADDR + CPU_TMP1]		;\
2073	ba,pt	%xcc, done1			;\
2074	  std	%f62, [ADDR + CPU_TMP1]		;\
2075done1:
2076
2077#define	LDDF_REG(REG, ADDR, TMP)		\
2078	sll	REG, 3, REG			;\
2079mark2:	set	start2, TMP			;\
2080	jmp	REG + TMP			;\
2081	  nop					;\
2082start2:	ba,pt	%xcc, done2			;\
2083	  ldd	[ADDR + CPU_TMP1], %f0		;\
2084	ba,pt	%xcc, done2			;\
2085	  ldd	[ADDR + CPU_TMP1], %f32		;\
2086	ba,pt	%xcc, done2			;\
2087	  ldd	[ADDR + CPU_TMP1], %f2		;\
2088	ba,pt	%xcc, done2			;\
2089	  ldd	[ADDR + CPU_TMP1], %f34		;\
2090	ba,pt	%xcc, done2			;\
2091	  ldd	[ADDR + CPU_TMP1], %f4		;\
2092	ba,pt	%xcc, done2			;\
2093	  ldd	[ADDR + CPU_TMP1], %f36		;\
2094	ba,pt	%xcc, done2			;\
2095	  ldd	[ADDR + CPU_TMP1], %f6		;\
2096	ba,pt	%xcc, done2			;\
2097	  ldd	[ADDR + CPU_TMP1], %f38		;\
2098	ba,pt	%xcc, done2			;\
2099	  ldd	[ADDR + CPU_TMP1], %f8		;\
2100	ba,pt	%xcc, done2			;\
2101	  ldd	[ADDR + CPU_TMP1], %f40		;\
2102	ba,pt	%xcc, done2			;\
2103	  ldd	[ADDR + CPU_TMP1], %f10		;\
2104	ba,pt	%xcc, done2			;\
2105	  ldd	[ADDR + CPU_TMP1], %f42		;\
2106	ba,pt	%xcc, done2			;\
2107	  ldd	[ADDR + CPU_TMP1], %f12		;\
2108	ba,pt	%xcc, done2			;\
2109	  ldd	[ADDR + CPU_TMP1], %f44		;\
2110	ba,pt	%xcc, done2			;\
2111	  ldd	[ADDR + CPU_TMP1], %f14		;\
2112	ba,pt	%xcc, done2			;\
2113	  ldd	[ADDR + CPU_TMP1], %f46		;\
2114	ba,pt	%xcc, done2			;\
2115	  ldd	[ADDR + CPU_TMP1], %f16		;\
2116	ba,pt	%xcc, done2			;\
2117	  ldd	[ADDR + CPU_TMP1], %f48		;\
2118	ba,pt	%xcc, done2			;\
2119	  ldd	[ADDR + CPU_TMP1], %f18		;\
2120	ba,pt	%xcc, done2			;\
2121	  ldd	[ADDR + CPU_TMP1], %f50		;\
2122	ba,pt	%xcc, done2			;\
2123	  ldd	[ADDR + CPU_TMP1], %f20		;\
2124	ba,pt	%xcc, done2			;\
2125	  ldd	[ADDR + CPU_TMP1], %f52		;\
2126	ba,pt	%xcc, done2			;\
2127	  ldd	[ADDR + CPU_TMP1], %f22		;\
2128	ba,pt	%xcc, done2			;\
2129	  ldd	[ADDR + CPU_TMP1], %f54		;\
2130	ba,pt	%xcc, done2			;\
2131	  ldd	[ADDR + CPU_TMP1], %f24		;\
2132	ba,pt	%xcc, done2			;\
2133	  ldd	[ADDR + CPU_TMP1], %f56		;\
2134	ba,pt	%xcc, done2			;\
2135	  ldd	[ADDR + CPU_TMP1], %f26		;\
2136	ba,pt	%xcc, done2			;\
2137	  ldd	[ADDR + CPU_TMP1], %f58		;\
2138	ba,pt	%xcc, done2			;\
2139	  ldd	[ADDR + CPU_TMP1], %f28		;\
2140	ba,pt	%xcc, done2			;\
2141	  ldd	[ADDR + CPU_TMP1], %f60		;\
2142	ba,pt	%xcc, done2			;\
2143	  ldd	[ADDR + CPU_TMP1], %f30		;\
2144	ba,pt	%xcc, done2			;\
2145	  ldd	[ADDR + CPU_TMP1], %f62		;\
2146done2:
2147
2148.lddf_exception_not_aligned:
2149	/* %g2 = sfar, %g3 = sfsr */
2150	mov	%g2, %g5		! stash sfar
2151#if defined(DEBUG) || defined(NEED_FPU_EXISTS)
2152	sethi	%hi(fpu_exists), %g2	! check fpu_exists
2153	ld	[%g2 + %lo(fpu_exists)], %g2
2154	brz,a,pn %g2, 4f
2155	  nop
2156#endif
2157	CPU_ADDR(%g1, %g4)
2158	or	%g0, 1, %g4
2159	st	%g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag
2160
2161	rdpr	%tpc, %g2
2162	lda	[%g2]ASI_AIUP, %g6	! get the user's lddf instruction
2163	srl	%g6, 23, %g1		! using ldda or not?
2164	and	%g1, 1, %g1
2165	brz,a,pt %g1, 2f		! check for ldda instruction
2166	  nop
2167	srl	%g6, 13, %g1		! check immflag
2168	and	%g1, 1, %g1
2169	rdpr	%tstate, %g2		! %tstate in %g2
2170	brnz,a,pn %g1, 1f
2171	  srl	%g2, 31, %g1		! get asi from %tstate
2172	srl	%g6, 5, %g1		! get asi from instruction
2173	and	%g1, 0xFF, %g1		! imm_asi field
21741:
2175	cmp	%g1, ASI_P		! primary address space
2176	be,a,pt %icc, 2f
2177	  nop
2178	cmp	%g1, ASI_PNF		! primary no fault address space
2179	be,a,pt %icc, 2f
2180	  nop
2181	cmp	%g1, ASI_S		! secondary address space
2182	be,a,pt %icc, 2f
2183	  nop
2184	cmp	%g1, ASI_SNF		! secondary no fault address space
2185	bne,a,pn %icc, 3f
2186	  nop
21872:
2188	lduwa	[%g5]ASI_USER, %g7	! get first half of misaligned data
2189	add	%g5, 4, %g5		! increment misaligned data address
2190	lduwa	[%g5]ASI_USER, %g5	! get second half of misaligned data
2191
2192	sllx	%g7, 32, %g7
2193	or	%g5, %g7, %g5		! combine data
2194	CPU_ADDR(%g7, %g1)		! save data on a per-cpu basis
2195	stx	%g5, [%g7 + CPU_TMP1]	! save in cpu_tmp1
2196
2197	srl	%g6, 25, %g3		! %g6 has the instruction
2198	and	%g3, 0x1F, %g3		! %g3 has rd
2199	LDDF_REG(%g3, %g7, %g4)
2200
2201	CPU_ADDR(%g1, %g4)
2202	st	%g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
2203	FAST_TRAP_DONE
22043:
2205	CPU_ADDR(%g1, %g4)
2206	st	%g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
22074:
2208	set	T_USER, %g3		! trap type in %g3
2209	or	%g3, T_LDDF_ALIGN, %g3
2210	mov	%g5, %g2		! misaligned vaddr in %g2
2211	set	fpu_trap, %g1		! goto C for the little and
2212	ba,pt	%xcc, sys_trap		! no fault little asi's
2213	  sub	%g0, 1, %g4
2214
2215.stdf_exception_not_aligned:
2216	/* %g2 = sfar, %g3 = sfsr */
2217	mov	%g2, %g5
2218
2219#if defined(DEBUG) || defined(NEED_FPU_EXISTS)
2220	sethi	%hi(fpu_exists), %g7		! check fpu_exists
2221	ld	[%g7 + %lo(fpu_exists)], %g3
2222	brz,a,pn %g3, 4f
2223	  nop
2224#endif
2225	CPU_ADDR(%g1, %g4)
2226	or	%g0, 1, %g4
2227	st	%g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag
2228
2229	rdpr	%tpc, %g2
2230	lda	[%g2]ASI_AIUP, %g6	! get the user's stdf instruction
2231
2232	srl	%g6, 23, %g1		! using stda or not?
2233	and	%g1, 1, %g1
2234	brz,a,pt %g1, 2f		! check for stda instruction
2235	  nop
2236	srl	%g6, 13, %g1		! check immflag
2237	and	%g1, 1, %g1
2238	rdpr	%tstate, %g2		! %tstate in %g2
2239	brnz,a,pn %g1, 1f
2240	  srl	%g2, 31, %g1		! get asi from %tstate
2241	srl	%g6, 5, %g1		! get asi from instruction
2242	and	%g1, 0xff, %g1		! imm_asi field
22431:
2244	cmp	%g1, ASI_P		! primary address space
2245	be,a,pt %icc, 2f
2246	  nop
2247	cmp	%g1, ASI_S		! secondary address space
2248	bne,a,pn %icc, 3f
2249	  nop
22502:
2251	srl	%g6, 25, %g6
2252	and	%g6, 0x1F, %g6		! %g6 has rd
2253	CPU_ADDR(%g7, %g1)
2254	STDF_REG(%g6, %g7, %g4)		! STDF_REG(REG, ADDR, TMP)
2255
2256	ldx	[%g7 + CPU_TMP1], %g6
2257	srlx	%g6, 32, %g7
2258	stuwa	%g7, [%g5]ASI_USER	! first half
2259	add	%g5, 4, %g5		! increment misaligned data address
2260	stuwa	%g6, [%g5]ASI_USER	! second half
2261
2262	CPU_ADDR(%g1, %g4)
2263	st	%g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
2264	FAST_TRAP_DONE
22653:
2266	CPU_ADDR(%g1, %g4)
2267	st	%g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
22684:
2269	set	T_USER, %g3		! trap type in %g3
2270	or	%g3, T_STDF_ALIGN, %g3
2271	mov	%g5, %g2		! misaligned vaddr in %g2
2272	set	fpu_trap, %g1		! goto C for the little and
2273	ba,pt	%xcc, sys_trap		! nofault little asi's
2274	  sub	%g0, 1, %g4
2275
2276#ifdef DEBUG_USER_TRAPTRACECTL
2277
2278.traptrace_freeze:
2279	mov	%l0, %g1 ; mov	%l1, %g2 ; mov	%l2, %g3 ; mov	%l4, %g4
2280	TT_TRACE_L(trace_win)
2281	mov	%g4, %l4 ; mov	%g3, %l2 ; mov	%g2, %l1 ; mov	%g1, %l0
2282	set	trap_freeze, %g1
2283	mov	1, %g2
2284	st	%g2, [%g1]
2285	FAST_TRAP_DONE
2286
2287.traptrace_unfreeze:
2288	set	trap_freeze, %g1
2289	st	%g0, [%g1]
2290	mov	%l0, %g1 ; mov	%l1, %g2 ; mov	%l2, %g3 ; mov	%l4, %g4
2291	TT_TRACE_L(trace_win)
2292	mov	%g4, %l4 ; mov	%g3, %l2 ; mov	%g2, %l1 ; mov	%g1, %l0
2293	FAST_TRAP_DONE
2294
2295#endif /* DEBUG_USER_TRAPTRACECTL */
2296
2297.getcc:
2298	CPU_ADDR(%g1, %g2)
2299	stx	%o0, [%g1 + CPU_TMP1]		! save %o0
2300	rdpr	%tstate, %g3			! get tstate
2301	srlx	%g3, PSR_TSTATE_CC_SHIFT, %o0	! shift ccr to V8 psr
2302	set	PSR_ICC, %g2
2303	and	%o0, %g2, %o0			! mask out the rest
2304	srl	%o0, PSR_ICC_SHIFT, %o0		! right justify
2305	wrpr	%g0, 0, %gl
2306	mov	%o0, %g1			! move ccr to normal %g1
2307	wrpr	%g0, 1, %gl
2308	! cannot assume globals retained their values after increasing %gl
2309	CPU_ADDR(%g1, %g2)
2310	ldx	[%g1 + CPU_TMP1], %o0		! restore %o0
2311	FAST_TRAP_DONE
2312
2313.setcc:
2314	CPU_ADDR(%g1, %g2)
2315	stx	%o0, [%g1 + CPU_TMP1]		! save %o0
2316	wrpr	%g0, 0, %gl
2317	mov	%g1, %o0
2318	wrpr	%g0, 1, %gl
2319	! cannot assume globals retained their values after increasing %gl
2320	CPU_ADDR(%g1, %g2)
2321	sll	%o0, PSR_ICC_SHIFT, %g2
2322	set	PSR_ICC, %g3
2323	and	%g2, %g3, %g2			! mask out rest
2324	sllx	%g2, PSR_TSTATE_CC_SHIFT, %g2
2325	rdpr	%tstate, %g3			! get tstate
2326	srl	%g3, 0, %g3			! clear upper word
2327	or	%g3, %g2, %g3			! or in new bits
2328	wrpr	%g3, %tstate
2329	ldx	[%g1 + CPU_TMP1], %o0		! restore %o0
2330	FAST_TRAP_DONE
2331
2332/*
2333 * getpsr(void)
2334 * Note that the xcc part of the ccr is not provided.
2335 * The V8 code shows why the V9 trap is not faster:
2336 * #define GETPSR_TRAP() \
2337 *      mov %psr, %i0; jmp %l2; rett %l2+4; nop;
2338 */
2339
2340	.type	.getpsr, #function
2341.getpsr:
2342	rdpr	%tstate, %g1			! get tstate
2343	srlx	%g1, PSR_TSTATE_CC_SHIFT, %o0	! shift ccr to V8 psr
2344	set	PSR_ICC, %g2
2345	and	%o0, %g2, %o0			! mask out the rest
2346
2347	rd	%fprs, %g1			! get fprs
2348	and	%g1, FPRS_FEF, %g2		! mask out dirty upper/lower
2349	sllx	%g2, PSR_FPRS_FEF_SHIFT, %g2	! shift fef to V8 psr.ef
2350	or	%o0, %g2, %o0			! or result into psr.ef
2351
2352	set	V9_PSR_IMPLVER, %g2		! SI assigned impl/ver: 0xef
2353	or	%o0, %g2, %o0			! or psr.impl/ver
2354	FAST_TRAP_DONE
2355	SET_SIZE(.getpsr)
2356
2357/*
2358 * setpsr(newpsr)
2359 * Note that there is no support for ccr.xcc in the V9 code.
2360 */
2361
2362	.type	.setpsr, #function
2363.setpsr:
2364	rdpr	%tstate, %g1			! get tstate
2365!	setx	TSTATE_V8_UBITS, %g2
2366	or 	%g0, CCR_ICC, %g3
2367	sllx	%g3, TSTATE_CCR_SHIFT, %g2
2368
2369	andn	%g1, %g2, %g1			! zero current user bits
2370	set	PSR_ICC, %g2
2371	and	%g2, %o0, %g2			! clear all but psr.icc bits
2372	sllx	%g2, PSR_TSTATE_CC_SHIFT, %g3	! shift to tstate.ccr.icc
2373	wrpr	%g1, %g3, %tstate		! write tstate
2374
2375	set	PSR_EF, %g2
2376	and	%g2, %o0, %g2			! clear all but fp enable bit
2377	srlx	%g2, PSR_FPRS_FEF_SHIFT, %g4	! shift ef to V9 fprs.fef
2378	wr	%g0, %g4, %fprs			! write fprs
2379
2380	CPU_ADDR(%g1, %g2)			! load CPU struct addr to %g1
2381	ldn	[%g1 + CPU_THREAD], %g2		! load thread pointer
2382	ldn	[%g2 + T_LWP], %g3		! load klwp pointer
2383	ldn	[%g3 + LWP_FPU], %g2		! get lwp_fpu pointer
2384	stuw	%g4, [%g2 + FPU_FPRS]		! write fef value to fpu_fprs
2385	srlx	%g4, 2, %g4			! shift fef value to bit 0
2386	stub	%g4, [%g2 + FPU_EN]		! write fef value to fpu_en
2387	FAST_TRAP_DONE
2388	SET_SIZE(.setpsr)
2389
2390/*
2391 * getlgrp
2392 * get home lgrpid on which the calling thread is currently executing.
2393 */
2394	.type	.getlgrp, #function
2395.getlgrp:
2396	! Thanks for the incredibly helpful comments
2397	CPU_ADDR(%g1, %g2)		! load CPU struct addr to %g1 using %g2
2398	ld	[%g1 + CPU_ID], %o0	! load cpu_id
2399	ldn	[%g1 + CPU_THREAD], %g2	! load thread pointer
2400	ldn	[%g2 + T_LPL], %g2	! load lpl pointer
2401	ld	[%g2 + LPL_LGRPID], %g1	! load lpl_lgrpid
2402	sra	%g1, 0, %o1
2403	FAST_TRAP_DONE
2404	SET_SIZE(.getlgrp)
2405
2406/*
2407 * Entry for old 4.x trap (trap 0).
2408 */
2409	ENTRY_NP(syscall_trap_4x)
2410	CPU_ADDR(%g1, %g2)		! load CPU struct addr to %g1 using %g2
2411	ldn	[%g1 + CPU_THREAD], %g2	! load thread pointer
2412	ldn	[%g2 + T_LWP], %g2	! load klwp pointer
2413	ld	[%g2 + PCB_TRAP0], %g2	! lwp->lwp_pcb.pcb_trap0addr
2414	brz,pn	%g2, 1f			! has it been set?
2415	st	%l0, [%g1 + CPU_TMP1]	! delay - save some locals
2416	st	%l1, [%g1 + CPU_TMP2]
2417	rdpr	%tnpc, %l1		! save old tnpc
2418	wrpr	%g0, %g2, %tnpc		! setup tnpc
2419
2420	mov	%g1, %l0		! save CPU struct addr
2421	wrpr	%g0, 0, %gl
2422	mov	%l1, %g6		! pass tnpc to user code in %g6
2423	wrpr	%g0, 1, %gl
2424	ld	[%l0 + CPU_TMP2], %l1	! restore locals
2425	ld	[%l0 + CPU_TMP1], %l0
2426	FAST_TRAP_DONE_CHK_INTR
24271:
2428	!
2429	! check for old syscall mmap which is the only different one which
2430	! must be the same.  Others are handled in the compatibility library.
2431	!
2432	mov	%g1, %l0		! save CPU struct addr
2433	wrpr	%g0, 0, %gl
2434	cmp	%g1, OSYS_mmap		! compare to old 4.x mmap
2435	movz	%icc, SYS_mmap, %g1
2436	wrpr	%g0, 1, %gl
2437	ld	[%l0 + CPU_TMP1], %l0
2438	SYSCALL(syscall_trap32)
2439	SET_SIZE(syscall_trap_4x)
2440
2441/*
2442 * Handler for software trap 9.
2443 * Set trap0 emulation address for old 4.x system call trap.
2444 * XXX - this should be a system call.
2445 */
2446	ENTRY_NP(set_trap0_addr)
2447	CPU_ADDR(%g1, %g2)		! load CPU struct addr to %g1 using %g2
2448	st	%l0, [%g1 + CPU_TMP1]	! save some locals
2449	st	%l1, [%g1 + CPU_TMP2]
2450	mov	%g1, %l0	! preserve CPU addr
2451	wrpr	%g0, 0, %gl
2452	mov	%g1, %l1
2453	wrpr	%g0, 1, %gl
2454	! cannot assume globals retained their values after increasing %gl
2455	ldn	[%l0 + CPU_THREAD], %g2	! load thread pointer
2456	ldn	[%g2 + T_LWP], %g2	! load klwp pointer
2457	andn	%l1, 3, %l1		! force alignment
2458	st	%l1, [%g2 + PCB_TRAP0]	! lwp->lwp_pcb.pcb_trap0addr
2459	ld	[%l0 + CPU_TMP2], %l1	! restore locals
2460	ld	[%l0 + CPU_TMP1], %l0
2461	FAST_TRAP_DONE
2462	SET_SIZE(set_trap0_addr)
2463
2464/*
2465 * mmu_trap_tl1
2466 * trap handler for unexpected mmu traps.
2467 * simply checks if the trap was a user lddf/stdf alignment trap, in which
2468 * case we go to fpu_trap or a user trap from the window handler, in which
2469 * case we go save the state on the pcb.  Otherwise, we go to ptl1_panic.
2470 */
2471	.type	mmu_trap_tl1, #function
2472mmu_trap_tl1:
2473#ifdef	TRAPTRACE
2474	TRACE_PTR(%g5, %g6)
2475	GET_TRACE_TICK(%g6)
2476	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi
2477	TRACE_SAVE_TL_GL_REGS(%g5, %g6)
2478	rdpr	%tt, %g6
2479	stha	%g6, [%g5 + TRAP_ENT_TT]%asi
2480	rdpr	%tstate, %g6
2481	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi
2482	stna	%sp, [%g5 + TRAP_ENT_SP]%asi
2483	stna	%g0, [%g5 + TRAP_ENT_TR]%asi
2484	rdpr	%tpc, %g6
2485	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi
2486	MMU_FAULT_STATUS_AREA(%g6)
2487	ldx	[%g6 + MMFSA_D_ADDR], %g6
2488	stna	%g6, [%g5 + TRAP_ENT_F1]%asi !  MMU fault address
2489	CPU_PADDR(%g7, %g6);
2490	add	%g7, CPU_TL1_HDLR, %g7
2491	lda	[%g7]ASI_MEM, %g6
2492	stna	%g6, [%g5 + TRAP_ENT_F2]%asi
2493	MMU_FAULT_STATUS_AREA(%g6)
2494	ldx	[%g6 + MMFSA_D_TYPE], %g7 ! XXXQ should be a MMFSA_F_ constant?
2495	ldx	[%g6 + MMFSA_D_CTX], %g6
2496	sllx	%g6, SFSR_CTX_SHIFT, %g6
2497	or	%g6, %g7, %g6
2498	stna	%g6, [%g5 + TRAP_ENT_F3]%asi ! MMU context/type
2499	set	0xdeadbeef, %g6
2500	stna	%g6, [%g5 + TRAP_ENT_F4]%asi
2501	TRACE_NEXT(%g5, %g6, %g7)
2502#endif /* TRAPTRACE */
2503	CPU_PADDR(%g7, %g6);
2504	add     %g7, CPU_TL1_HDLR, %g7		! %g7 = &cpu_m.tl1_hdlr (PA)
2505	lda	[%g7]ASI_MEM, %g6
2506	brz,a,pt %g6, 1f
2507	  nop
2508	sta     %g0, [%g7]ASI_MEM
2509	! XXXQ need to setup registers for sfmmu_mmu_trap?
2510	ba,a,pt	%xcc, sfmmu_mmu_trap		! handle page faults
25111:
2512	rdpr	%tpc, %g7
2513	/* in user_rtt? */
2514	set	rtt_fill_start, %g6
2515	cmp	%g7, %g6
2516	blu,pn	%xcc, 6f
2517	 .empty
2518	set	rtt_fill_end, %g6
2519	cmp	%g7, %g6
2520	bgeu,pn %xcc, 6f
2521	 nop
2522	set	fault_rtt_fn1, %g7
2523	ba,a	7f
25246:
2525	! check to see if the trap pc is in a window spill/fill handling
2526	rdpr	%tpc, %g7
2527	/* tpc should be in the trap table */
2528	set	trap_table, %g6
2529	cmp	%g7, %g6
2530	blu,a,pn %xcc, ptl1_panic
2531	  mov	PTL1_BAD_MMUTRAP, %g1
2532	set	etrap_table, %g6
2533	cmp	%g7, %g6
2534	bgeu,a,pn %xcc, ptl1_panic
2535	  mov	PTL1_BAD_MMUTRAP, %g1
2536	! pc is inside the trap table, convert to trap type
2537	srl	%g7, 5, %g6		! XXXQ need #define
2538	and	%g6, 0x1ff, %g6		! XXXQ need #define
2539	! and check for a window trap type
2540	and	%g6, WTRAP_TTMASK, %g6
2541	cmp	%g6, WTRAP_TYPE
2542	bne,a,pn %xcc, ptl1_panic
2543	  mov	PTL1_BAD_MMUTRAP, %g1
2544	andn	%g7, WTRAP_ALIGN, %g7	/* 128 byte aligned */
2545	add	%g7, WTRAP_FAULTOFF, %g7
2546
25477:
2548	! Arguments are passed in the global set active after the
2549	! 'done' instruction. Before switching sets, must save
2550	! the calculated next pc
2551	wrpr	%g0, %g7, %tnpc
2552	wrpr	%g0, 1, %gl
2553	rdpr	%tt, %g5
2554	cmp	%g5, T_ALIGNMENT
2555	MMU_FAULT_STATUS_AREA(%g4)
2556	ldx	[%g4 + MMFSA_D_ADDR], %g6
2557	done
2558	SET_SIZE(mmu_trap_tl1)
2559
2560/*
2561 * Several traps use kmdb_trap and kmdb_trap_tl1 as their handlers.  These
2562 * traps are valid only when kmdb is loaded.  When the debugger is active,
2563 * the code below is rewritten to transfer control to the appropriate
2564 * debugger entry points.
2565 */
2566	.global	kmdb_trap
2567	.align	8
2568kmdb_trap:
2569	ba,a	trap_table0
2570	jmp	%g1 + 0
2571	nop
2572
2573	.global	kmdb_trap_tl1
2574	.align	8
2575kmdb_trap_tl1:
2576	ba,a	trap_table0
2577	jmp	%g1 + 0
2578	nop
2579
2580/*
2581 * This entry is copied from OBP's trap table during boot.
2582 */
2583	.global	obp_bpt
2584	.align	8
2585obp_bpt:
2586	NOT
2587
2588
2589
2590#ifdef	TRAPTRACE
2591/*
2592 * TRAPTRACE support.
2593 * labels here are branched to with "rd %pc, %g7" in the delay slot.
2594 * Return is done by "jmp %g7 + 4".
2595 */
2596
2597trace_dmmu:
2598	TRACE_PTR(%g3, %g6)
2599	GET_TRACE_TICK(%g6)
2600	stxa	%g6, [%g3 + TRAP_ENT_TICK]%asi
2601	TRACE_SAVE_TL_GL_REGS(%g3, %g6)
2602	rdpr	%tt, %g6
2603	stha	%g6, [%g3 + TRAP_ENT_TT]%asi
2604	rdpr	%tstate, %g6
2605	stxa	%g6, [%g3 + TRAP_ENT_TSTATE]%asi
2606	stna	%sp, [%g3 + TRAP_ENT_SP]%asi
2607	rdpr	%tpc, %g6
2608	stna	%g6, [%g3 + TRAP_ENT_TPC]%asi
2609	MMU_FAULT_STATUS_AREA(%g6)
2610	ldx	[%g6 + MMFSA_D_ADDR], %g4
2611	stxa	%g4, [%g3 + TRAP_ENT_TR]%asi
2612	ldx	[%g6 + MMFSA_D_CTX], %g4
2613	stxa	%g4, [%g3 + TRAP_ENT_F1]%asi
2614	ldx	[%g6 + MMFSA_D_TYPE], %g4
2615	stxa	%g4, [%g3 + TRAP_ENT_F2]%asi
2616	stxa	%g6, [%g3 + TRAP_ENT_F3]%asi
2617	stna	%g0, [%g3 + TRAP_ENT_F4]%asi
2618	TRACE_NEXT(%g3, %g4, %g5)
2619	jmp	%g7 + 4
2620	nop
2621
2622trace_immu:
2623	TRACE_PTR(%g3, %g6)
2624	GET_TRACE_TICK(%g6)
2625	stxa	%g6, [%g3 + TRAP_ENT_TICK]%asi
2626	TRACE_SAVE_TL_GL_REGS(%g3, %g6)
2627	rdpr	%tt, %g6
2628	stha	%g6, [%g3 + TRAP_ENT_TT]%asi
2629	rdpr	%tstate, %g6
2630	stxa	%g6, [%g3 + TRAP_ENT_TSTATE]%asi
2631	stna	%sp, [%g3 + TRAP_ENT_SP]%asi
2632	rdpr	%tpc, %g6
2633	stna	%g6, [%g3 + TRAP_ENT_TPC]%asi
2634	MMU_FAULT_STATUS_AREA(%g6)
2635	ldx	[%g6 + MMFSA_I_ADDR], %g4
2636	stxa	%g4, [%g3 + TRAP_ENT_TR]%asi
2637	ldx	[%g6 + MMFSA_I_CTX], %g4
2638	stxa	%g4, [%g3 + TRAP_ENT_F1]%asi
2639	ldx	[%g6 + MMFSA_I_TYPE], %g4
2640	stxa	%g4, [%g3 + TRAP_ENT_F2]%asi
2641	stxa	%g6, [%g3 + TRAP_ENT_F3]%asi
2642	stna	%g0, [%g3 + TRAP_ENT_F4]%asi
2643	TRACE_NEXT(%g3, %g4, %g5)
2644	jmp	%g7 + 4
2645	nop
2646
2647trace_gen:
2648	TRACE_PTR(%g3, %g6)
2649	GET_TRACE_TICK(%g6)
2650	stxa	%g6, [%g3 + TRAP_ENT_TICK]%asi
2651	TRACE_SAVE_TL_GL_REGS(%g3, %g6)
2652	rdpr	%tt, %g6
2653	stha	%g6, [%g3 + TRAP_ENT_TT]%asi
2654	rdpr	%tstate, %g6
2655	stxa	%g6, [%g3 + TRAP_ENT_TSTATE]%asi
2656	stna	%sp, [%g3 + TRAP_ENT_SP]%asi
2657	rdpr	%tpc, %g6
2658	stna	%g6, [%g3 + TRAP_ENT_TPC]%asi
2659	stna	%g0, [%g3 + TRAP_ENT_TR]%asi
2660	stna	%g0, [%g3 + TRAP_ENT_F1]%asi
2661	stna	%g0, [%g3 + TRAP_ENT_F2]%asi
2662	stna	%g0, [%g3 + TRAP_ENT_F3]%asi
2663	stna	%g0, [%g3 + TRAP_ENT_F4]%asi
2664	TRACE_NEXT(%g3, %g4, %g5)
2665	jmp	%g7 + 4
2666	nop
2667
2668trace_win:
2669	TRACE_WIN_INFO(0, %l0, %l1, %l2)
2670	! Keep the locals as clean as possible, caller cleans %l4
2671	clr	%l2
2672	clr	%l1
2673	jmp	%l4 + 4
2674	  clr	%l0
2675
2676/*
2677 * Trace a tsb hit
2678 * g1 = tsbe pointer (in/clobbered)
2679 * g2 = tag access register (in)
2680 * g3 - g4 = scratch (clobbered)
2681 * g5 = tsbe data (in)
2682 * g6 = scratch (clobbered)
2683 * g7 = pc we jumped here from (in)
2684 */
2685
2686	! Do not disturb %g5, it will be used after the trace
2687	ALTENTRY(trace_tsbhit)
2688	TRACE_TSBHIT(0)
2689	jmp	%g7 + 4
2690	nop
2691
2692/*
2693 * Trace a TSB miss
2694 *
2695 * g1 = tsb8k pointer (in)
2696 * g2 = tag access register (in)
2697 * g3 = tsb4m pointer (in)
2698 * g4 = tsbe tag (in/clobbered)
2699 * g5 - g6 = scratch (clobbered)
2700 * g7 = pc we jumped here from (in)
2701 */
2702	.global	trace_tsbmiss
2703trace_tsbmiss:
2704	membar	#Sync
2705	sethi	%hi(FLUSH_ADDR), %g6
2706	flush	%g6
2707	TRACE_PTR(%g5, %g6)
2708	GET_TRACE_TICK(%g6)
2709	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi
2710	stna	%g2, [%g5 + TRAP_ENT_SP]%asi		! tag access
2711	stna	%g4, [%g5 + TRAP_ENT_F1]%asi		! XXX? tsb tag
2712	rdpr	%tnpc, %g6
2713	stna	%g6, [%g5 + TRAP_ENT_F2]%asi
2714	stna	%g1, [%g5 + TRAP_ENT_F3]%asi		! tsb8k pointer
2715	srlx	%g1, 32, %g6
2716	stna	%g6, [%g5 + TRAP_ENT_F4]%asi		! huh?
2717	rdpr	%tpc, %g6
2718	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi
2719	TRACE_SAVE_TL_GL_REGS(%g5, %g6)
2720	rdpr	%tt, %g6
2721	or	%g6, TT_MMU_MISS, %g4
2722	stha	%g4, [%g5 + TRAP_ENT_TT]%asi
2723	MMU_FAULT_STATUS_AREA(%g4)
2724	cmp	%g6, FAST_IMMU_MISS_TT
2725	be,a	%icc, 1f
2726	  ldx	[%g4 + MMFSA_I_ADDR], %g6
2727	cmp	%g6, T_INSTR_MMU_MISS
2728	be,a	%icc, 1f
2729	  ldx	[%g4 + MMFSA_I_ADDR], %g6
2730	ldx	[%g4 + MMFSA_D_ADDR], %g6
27311:	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi	! tag target
2732	stna	%g3, [%g5 + TRAP_ENT_TR]%asi		! tsb4m pointer
2733	TRACE_NEXT(%g5, %g4, %g6)
2734	jmp	%g7 + 4
2735	nop
2736
2737/*
2738 * g2 = tag access register (in)
2739 * g3 = ctx number (in)
2740 */
2741trace_dataprot:
2742	membar	#Sync
2743	sethi	%hi(FLUSH_ADDR), %g6
2744	flush	%g6
2745	TRACE_PTR(%g1, %g6)
2746	GET_TRACE_TICK(%g6)
2747	stxa	%g6, [%g1 + TRAP_ENT_TICK]%asi
2748	rdpr	%tpc, %g6
2749	stna	%g6, [%g1 + TRAP_ENT_TPC]%asi
2750	rdpr	%tstate, %g6
2751	stxa	%g6, [%g1 + TRAP_ENT_TSTATE]%asi
2752	stna	%g2, [%g1 + TRAP_ENT_SP]%asi		! tag access reg
2753	stna	%g0, [%g1 + TRAP_ENT_TR]%asi
2754	stna	%g0, [%g1 + TRAP_ENT_F1]%asi
2755	stna	%g0, [%g1 + TRAP_ENT_F2]%asi
2756	stna	%g0, [%g1 + TRAP_ENT_F3]%asi
2757	stna	%g0, [%g1 + TRAP_ENT_F4]%asi
2758	TRACE_SAVE_TL_GL_REGS(%g1, %g6)
2759	rdpr	%tt, %g6
2760	stha	%g6, [%g1 + TRAP_ENT_TT]%asi
2761	TRACE_NEXT(%g1, %g4, %g5)
2762	jmp	%g7 + 4
2763	nop
2764
2765#endif /* TRAPTRACE */
2766
2767.dmmu_exc_lddf_not_aligned:
2768	MMU_FAULT_STATUS_AREA(%g3)
2769	ldx	[%g3 + MMFSA_D_ADDR], %g2
2770	/* Fault type not available in MMU fault status area */
2771	mov	MMFSA_F_UNALIGN, %g1
2772	ldx	[%g3 + MMFSA_D_CTX], %g3
2773	sllx	%g3, SFSR_CTX_SHIFT, %g3
2774	btst	1, %sp
2775	bnz,pt	%xcc, .lddf_exception_not_aligned
2776	or	%g3, %g1, %g3
2777	ba,a,pt	%xcc, .mmu_exception_not_aligned
2778
2779.dmmu_exc_stdf_not_aligned:
2780	MMU_FAULT_STATUS_AREA(%g3)
2781	ldx	[%g3 + MMFSA_D_ADDR], %g2
2782	/* Fault type not available in MMU fault status area */
2783	mov	MMFSA_F_UNALIGN, %g1
2784	ldx	[%g3 + MMFSA_D_CTX], %g3
2785	sllx	%g3, SFSR_CTX_SHIFT, %g3
2786	btst	1, %sp
2787	bnz,pt	%xcc, .stdf_exception_not_aligned
2788	or	%g3, %g1, %g3
2789	ba,a,pt	%xcc, .mmu_exception_not_aligned
2790
2791/*
2792 * expects offset into tsbmiss area in %g1 and return pc in %g7
2793 */
2794stat_mmu:
2795	CPU_INDEX(%g5, %g6)
2796	sethi	%hi(tsbmiss_area), %g6
2797	sllx	%g5, TSBMISS_SHIFT, %g5
2798	or	%g6, %lo(tsbmiss_area), %g6
2799	add	%g6, %g5, %g6		/* g6 = tsbmiss area */
2800	ld	[%g6 + %g1], %g5
2801	add	%g5, 1, %g5
2802	jmp	%g7 + 4
2803	st	%g5, [%g6 + %g1]
2804
2805
2806/*
2807 * fast_trap_done, fast_trap_done_chk_intr:
2808 *
2809 * Due to the design of UltraSPARC pipeline, pending interrupts are not
2810 * taken immediately after a RETRY or DONE instruction which causes IE to
2811 * go from 0 to 1. Instead, the instruction at %tpc or %tnpc is allowed
2812 * to execute first before taking any interrupts. If that instruction
2813 * results in other traps, and if the corresponding trap handler runs
2814 * entirely at TL=1 with interrupts disabled, then pending interrupts
2815 * won't be taken until after yet another instruction following the %tpc
2816 * or %tnpc.
2817 *
2818 * A malicious user program can use this feature to block out interrupts
2819 * for extended durations, which can result in send_mondo_timeout kernel
2820 * panic.
2821 *
2822 * This problem is addressed by servicing any pending interrupts via
2823 * sys_trap before returning back to the user mode from a fast trap
2824 * handler. The "done" instruction within a fast trap handler, which
2825 * runs entirely at TL=1 with interrupts disabled, is replaced with the
2826 * FAST_TRAP_DONE macro, which branches control to this fast_trap_done
2827 * entry point.
2828 *
2829 * We check for any pending interrupts here and force a sys_trap to
2830 * service those interrupts, if any. To minimize overhead, pending
2831 * interrupts are checked if the %tpc happens to be at 16K boundary,
2832 * which allows a malicious program to execute at most 4K consecutive
2833 * instructions before we service any pending interrupts. If a worst
2834 * case fast trap handler takes about 2 usec, then interrupts will be
2835 * blocked for at most 8 msec, less than a clock tick.
2836 *
2837 * For the cases where we don't know if the %tpc will cross a 16K
2838 * boundary, we can't use the above optimization and always process
2839 * any pending interrupts via fast_frap_done_chk_intr entry point.
2840 *
2841 * Entry Conditions:
2842 * 	%pstate		am:0 priv:1 ie:0
2843 * 			globals are AG (not normal globals)
2844 */
2845
2846	.global	fast_trap_done, fast_trap_done_chk_intr
2847fast_trap_done:
2848	rdpr	%tpc, %g5
2849	sethi	%hi(0xffffc000), %g6	! 1's complement of 0x3fff
2850	andncc	%g5, %g6, %g0		! check lower 14 bits of %tpc
2851	bz,pn	%icc, 1f		! branch if zero (lower 32 bits only)
2852	nop
2853	done
2854
2855fast_trap_done_chk_intr:
28561:	rd	SOFTINT, %g6
2857	brnz,pn	%g6, 2f		! branch if any pending intr
2858	nop
2859	done
2860
28612:
2862	/*
2863	 * We get here if there are any pending interrupts.
2864	 * Adjust %tpc/%tnpc as we'll be resuming via "retry"
2865	 * instruction.
2866	 */
2867	rdpr	%tnpc, %g5
2868	wrpr	%g0, %g5, %tpc
2869	add	%g5, 4, %g5
2870	wrpr	%g0, %g5, %tnpc
2871
2872	/*
2873	 * Force a dummy sys_trap call so that interrupts can be serviced.
2874	 */
2875	set	fast_trap_dummy_call, %g1
2876	ba,pt	%xcc, sys_trap
2877	  mov	-1, %g4
2878
2879fast_trap_dummy_call:
2880	retl
2881	nop
2882
2883#endif	/* lint */
2884