xref: /titanic_41/usr/src/uts/sun4v/ml/mach_interrupt.s (revision 03831d35f7499c87d51205817c93e9a8d42c4bae)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22/*
23 * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#pragma ident	"%Z%%M%	%I%	%E% SMI"
28
29#if defined(lint)
30#include <sys/types.h>
31#include <sys/thread.h>
32#else	/* lint */
33#include "assym.h"
34#endif	/* lint */
35
36#include <sys/asm_linkage.h>
37#include <sys/machthread.h>
38#include <sys/machcpuvar.h>
39#include <sys/intreg.h>
40#include <sys/cmn_err.h>
41#include <sys/ftrace.h>
42#include <sys/machasi.h>
43#include <sys/scb.h>
44#include <sys/error.h>
45#define	INTR_REPORT_SIZE	64
46
47#ifdef TRAPTRACE
48#include <sys/traptrace.h>
49#endif /* TRAPTRACE */
50
51#if defined(lint)
52
53void
54cpu_mondo(void)
55{}
56
57#else	/* lint */
58
59
60/*
61 * (TT 0x7c, TL>0) CPU Mondo Queue Handler
62 *	Globals are the Interrupt Globals.
63 */
64	ENTRY_NP(cpu_mondo)
65	!
66	!	Register Usage:-
67	!	%g5	PC for fasttrap TL>0 handler
68	!	%g1	arg 1
69	!	%g2	arg 2
70	!	%g3	queue base VA
71	!	%g4 	queue size mask
72	!	%g6	head ptr
73	!	%g7	tail ptr
74	mov	CPU_MONDO_Q_HD, %g3
75	ldxa	[%g3]ASI_QUEUE, %g6	! %g6 = head ptr
76	mov	CPU_MONDO_Q_TL, %g4
77	ldxa	[%g4]ASI_QUEUE, %g7	! %g7 = tail ptr
78	cmp	%g6, %g7
79	be,pn	%xcc, 0f		! head == tail
80	nop
81
82	CPU_ADDR(%g1,%g2)
83	add	%g1, CPU_MCPU, %g2
84	ldx	[%g2 + MCPU_CPU_Q_BASE], %g3	! %g3 = queue base PA
85	ldx	[%g2 + MCPU_CPU_Q_SIZE], %g4	! queue size
86	sub	%g4, 1, %g4		! %g4 = queue size mask
87
88	! Load interrupt receive data registers 1 and 2 to fetch
89	! the arguments for the fast trap handler.
90	!
91	! XXX - Since the data words in the interrupt report are not defined yet
92	! we assume that the consective words contain valid data and preserve
93	! sun4u's xcall mondo arguments.
94	! Register usage:
95	!	%g5	PC for fasttrap TL>0 handler
96	!	%g1	arg 1
97	!	%g2	arg 2
98
99	ldxa	[%g3 + %g6]ASI_MEM, %g5	! get PC from q base + head
100	add	%g6, 0x8, %g6		! inc head
101	ldxa	[%g3 + %g6]ASI_MEM, %g1 ! read data word 1
102	add	%g6, 0x8, %g6		! inc head
103	ldxa	[%g3 + %g6]ASI_MEM, %g2	! read data word 2
104	add	%g6, (INTR_REPORT_SIZE - 16) , %g6 ! inc head to next record
105	and	%g6, %g4, %g6 		! and size mask for wrap around
106	mov	CPU_MONDO_Q_HD, %g3
107	stxa	%g6, [%g3]ASI_QUEUE	! store head pointer
108	membar	#Sync
109
110#ifdef TRAPTRACE
111	TRACE_PTR(%g4, %g6)
112	GET_TRACE_TICK(%g6)
113	stxa	%g6, [%g4 + TRAP_ENT_TICK]%asi
114	TRACE_SAVE_TL_GL_REGS(%g4, %g6)
115	rdpr	%tt, %g6
116	stha	%g6, [%g4 + TRAP_ENT_TT]%asi
117	rdpr	%tpc, %g6
118	stna	%g6, [%g4 + TRAP_ENT_TPC]%asi
119	rdpr	%tstate, %g6
120	stxa	%g6, [%g4 + TRAP_ENT_TSTATE]%asi
121	stna	%sp, [%g4 + TRAP_ENT_SP]%asi
122	stna	%g5, [%g4 + TRAP_ENT_TR]%asi	! pc of the TL>0 handler
123	stna	%g1, [%g4 + TRAP_ENT_F1]%asi	! arg1
124	stna	%g2, [%g4 + TRAP_ENT_F3]%asi	! arg2
125	mov	CPU_MONDO_Q_HD, %g6
126	ldxa	[%g6]ASI_QUEUE, %g6		! new head offset
127	stna	%g6, [%g4 + TRAP_ENT_F2]%asi
128	stna	%g7, [%g4 + TRAP_ENT_F4]%asi	! tail offset
129	TRACE_NEXT(%g4, %g6, %g3)
130#endif /* TRAPTRACE */
131
132	/*
133	 * For now catch invalid PC being passed via cpu_mondo queue
134	 */
135	set	KERNELBASE, %g4
136	cmp	%g5, %g4
137	bl,a,pn	%xcc, 1f		! branch if bad %pc
138	nop
139
140	jmp	%g5			! jump to traphandler
141	nop
1421:
143	! invalid trap handler, discard it for now
144	set	cpu_mondo_inval, %g4
145	ldx	[%g4], %g5
146	inc	%g5
147	stx	%g5, [%g4]
1480:
149	retry
150	/* Never Reached */
151	SET_SIZE(cpu_mondo)
152
153#endif /* lint */
154
155#if defined(lint)
156
157void
158dev_mondo(void)
159{}
160
161#else	/* lint */
162
163
164/*
165 * (TT 0x7d, TL>0) Dev Mondo Queue Handler
166 *	Globals are the Interrupt Globals.
167 * We only process one interrupt at a time causing us to keep
168 * taking this trap till the queue is empty.
169 * We really should drain the whole queue for better performance
170 * but this will do for now.
171 */
172	ENTRY_NP(dev_mondo)
173	!
174	!	Register Usage:-
175	!	%g5	PC for fasttrap TL>0 handler
176	!	%g1	arg 1
177	!	%g2	arg 2
178	!	%g3	queue base PA
179	!	%g4 	queue size mask
180	!	%g6	head ptr
181	!	%g7	tail ptr
182	mov	DEV_MONDO_Q_HD, %g3
183	ldxa	[%g3]ASI_QUEUE, %g6	! %g6 = head ptr
184	mov	DEV_MONDO_Q_TL, %g4
185	ldxa	[%g4]ASI_QUEUE, %g7	! %g7 = tail ptr
186	cmp	%g6, %g7
187	be,pn	%xcc, 0f		! head == tail
188	nop
189
190	CPU_ADDR(%g1,%g2)
191	add	%g1, CPU_MCPU, %g2
192	ldx	[%g2 + MCPU_DEV_Q_BASE], %g3	! %g3 = queue base PA
193
194	! Register usage:
195	!	%g5 - inum
196	!	%g1 - cpu struct pointer used below in TRAPTRACE
197	!
198	ldxa	[%g3 + %g6]ASI_MEM, %g5	! get inum from q base + head
199
200	!
201	! We verify that inum is valid ( < MAXVNUM). If it is greater
202	! than MAXVNUM, we let setsoftint_tl1 take care of it.
203	!
204	set	MAXIVNUM, %g4
205	cmp	%g5, %g4
206	bgeu,a,pn	%xcc, 1f
207	ldx	[%g2 + MCPU_DEV_Q_SIZE], %g4	! queue size - delay slot
208
209	!
210	!	Copy 64-byte payload to the *iv_payload if it is not NULL
211	!
212	set	intr_vector, %g1
213	sll	%g5, INTR_VECTOR_SHIFT, %g7
214	add	%g1, %g7, %g1			! %g1 = &intr_vector[inum]
215	ldx	[%g1 + IV_PAYLOAD_BUF], %g1	! %g1 = iv_payload_buf
216	brz,a,pt	%g1, 1f			! if it is NULL
217	ldx	[%g2 + MCPU_DEV_Q_SIZE], %g4	! queue size - delay slot
218
219	!
220	!	Now move 64 byte payload from mondo queue to buf
221	!
222	mov	%g6, %g7			! %g7 = head ptr
223	ldxa	[%g3 + %g7]ASI_MEM, %g4
224	stx	%g4, [%g1 + 0]			! byte 0 - 7
225	add	%g7, 8, %g7
226	ldxa	[%g3 + %g7]ASI_MEM, %g4
227	stx	%g4, [%g1 + 8]			! byte 8 - 15
228	add	%g7, 8, %g7
229	ldxa	[%g3 + %g7]ASI_MEM, %g4
230	stx	%g4, [%g1 + 16]			! byte 16 - 23
231	add	%g7, 8, %g7
232	ldxa	[%g3 + %g7]ASI_MEM, %g4
233	stx	%g4, [%g1 + 24]			! byte 24 - 31
234	add	%g7, 8, %g7
235	ldxa	[%g3 + %g7]ASI_MEM, %g4
236	stx	%g4, [%g1 + 32]			! byte 32 - 39
237	add	%g7, 8, %g7
238	ldxa	[%g3 + %g7]ASI_MEM, %g4
239	stx	%g4, [%g1 + 40]			! byte 40 - 47
240	add	%g7, 8, %g7
241	ldxa	[%g3 + %g7]ASI_MEM, %g4
242	stx	%g4, [%g1 + 48]			! byte 48 - 55
243	add	%g7, 8, %g7
244	ldxa	[%g3 + %g7]ASI_MEM, %g4
245	stx	%g4, [%g1 + 56]			! byte 56 - 63
246	ldx	[%g2 + MCPU_DEV_Q_SIZE], %g4	! queue size
247
2481:	sub	%g4, 1, %g4		! %g4 = queue size mask
249	add	%g6, INTR_REPORT_SIZE , %g6 ! inc head to next record
250	and	%g6, %g4, %g6 		! and mask for wrap around
251	mov	DEV_MONDO_Q_HD, %g3
252	stxa	%g6, [%g3]ASI_QUEUE	! increment head offset
253	membar	#Sync
254
255#ifdef TRAPTRACE
256	TRACE_PTR(%g4, %g6)
257	GET_TRACE_TICK(%g6)
258	stxa	%g6, [%g4 + TRAP_ENT_TICK]%asi
259	TRACE_SAVE_TL_GL_REGS(%g4, %g6)
260	rdpr	%tt, %g6
261	stha	%g6, [%g4 + TRAP_ENT_TT]%asi
262	rdpr	%tpc, %g6
263	stna	%g6, [%g4 + TRAP_ENT_TPC]%asi
264	rdpr	%tstate, %g6
265	stxa	%g6, [%g4 + TRAP_ENT_TSTATE]%asi
266	! move head to sp
267	ldx	[%g2 + MCPU_DEV_Q_BASE], %g6
268	stna	%g6, [%g4 + TRAP_ENT_SP]%asi	! Device Queue Base PA
269	stna	%g5, [%g4 + TRAP_ENT_TR]%asi	! Inum
270	mov	DEV_MONDO_Q_HD, %g6
271	ldxa	[%g6]ASI_QUEUE, %g6		! New head offset
272	stna	%g6, [%g4 + TRAP_ENT_F1]%asi
273#ifdef __sparcv9
274	ldx	[%g2 + MCPU_DEV_Q_SIZE], %g6
275	stna	%g6, [%g4 + TRAP_ENT_F2]%asi	! Q Size
276	stna	%g7, [%g4 + TRAP_ENT_F3]%asi	! tail offset
277	stna	%g0, [%g4 + TRAP_ENT_F4]%asi
278#endif
279	TRACE_NEXT(%g4, %g6, %g3)
280#endif /* TRAPTRACE */
281
282	!
283	! setsoftint_tl1 will do all the work, and finish with a retry
284	!
285	ba,pt	%xcc, setsoftint_tl1
286	mov	%g5, %g1		! setsoftint_tl1 expects inum in %g1
287
2880:	retry
289
290	/* Never Reached */
291	SET_SIZE(dev_mondo)
292#endif /* lint */
293
294#if defined(lint)
295uint64_t cpu_mondo_inval;
296#else /* lint */
297	.seg	".data"
298	.global	cpu_mondo_inval
299	.align	8
300cpu_mondo_inval:
301	.skip	8
302
303	.seg	".text"
304#endif	/* lint */
305
306
307#if defined(lint)
308
309void
310resumable_error(void)
311{}
312
313#else	/* lint */
314
315/*
316 * (TT 0x7e, TL>0) Resumeable Error Queue Handler
317 *	We keep a shadow copy of the queue in kernel buf.
318 *	Read the resumable queue head and tail offset
319 *	If there are entries on the queue, move them to
320 *	the kernel buf, which is next to the resumable
321 *	queue in the memory. Call C routine to process.
322 */
323	ENTRY_NP(resumable_error)
324	mov	CPU_RQ_HD, %g4
325	ldxa	[%g4]ASI_QUEUE, %g2		! %g2 = Q head offset
326	mov	CPU_RQ_TL, %g4
327	ldxa	[%g4]ASI_QUEUE, %g3		! %g3 = Q tail offset
328	mov	%g2, %g6			! save head in %g2
329
330	cmp	%g6, %g3
331	be,pn	%xcc, 0f			! head == tail
332	nop
333
334	CPU_ADDR(%g1, %g4)			! %g1 = cpu struct addr
335
3362:	set	CPU_RQ_BASE_OFF, %g4
337	ldx	[%g1 + %g4], %g4		! %g4 = queue base PA
338	add	%g6, %g4, %g4			! %g4 = PA of ER in Q
339	set	CPU_RQ_SIZE, %g7
340	add	%g4, %g7, %g7			! %g7=PA of ER in kernel buf
341
342	ldxa	[%g7]ASI_MEM, %g5		! %g5=first 8 byte of ER buf
343	cmp	0, %g5
344	bne,pn	%xcc, 1f			! first 8 byte is not 0
345	nop
346
347	/* Now we can move 64 bytes from queue to buf */
348	set	0, %g5
349	ldxa	[%g4 + %g5]ASI_MEM, %g1
350	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 0 - 7
351	add	%g5, 8, %g5
352	ldxa	[%g4 + %g5]ASI_MEM, %g1
353	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 8 - 15
354	add	%g5, 8, %g5
355	ldxa	[%g4 + %g5]ASI_MEM, %g1
356	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 16 - 23
357	add	%g5, 8, %g5
358	ldxa	[%g4 + %g5]ASI_MEM, %g1
359	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 24 - 31
360	add	%g5, 8, %g5
361	ldxa	[%g4 + %g5]ASI_MEM, %g1
362	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 32 - 39
363	add	%g5, 8, %g5
364	ldxa	[%g4 + %g5]ASI_MEM, %g1
365	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 40 - 47
366	add	%g5, 8, %g5
367	ldxa	[%g4 + %g5]ASI_MEM, %g1
368	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 48 - 55
369	add	%g5, 8, %g5
370	ldxa	[%g4 + %g5]ASI_MEM, %g1
371	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 56 - 63
372
373	set	CPU_RQ_SIZE, %g5		! %g5 = queue size
374	sub	%g5, 1, %g5			! %g5 = queu size mask
375
376	add	%g6, Q_ENTRY_SIZE, %g6		! increment q head to next
377	and	%g6, %g5, %g6			! size mask for warp around
378	cmp	%g6, %g3			! head == tail ??
379
380	bne,pn	%xcc, 2b			! still have more to process
381	nop
382
383	/*
384	 * head equals to tail now, we can update the queue head
385	 * and call sys_trap
386	 */
387	mov	CPU_RQ_HD, %g4
388	stxa	%g6, [%g4]ASI_QUEUE		! update head offset
389
390	/*
391	 * Call sys_trap at PIL 14 unless we're already at PIL 15. %g2.l is
392	 * head offset(arg2) and %g3 is tail
393	 * offset(arg3).
394	 */
395	set	process_resumable_error, %g1
396	rdpr	%pil, %g4
397	cmp	%g4, PIL_14
398	ba	sys_trap
399	  movl	%icc, PIL_14, %g4
400
401	/*
402	 * We are here because the C routine is not able to process
403	 * errors in time. So the first 8 bytes of ER in buf has not
404	 * been cleared. We update head to tail and call sys_trap to
405	 * print out an error message
406	 */
407
4081:	mov	CPU_RQ_HD, %g4
409	stxa	%g3, [%g4]ASI_QUEUE		! set head equal to tail
410
411	/*
412	 * Set %g2 to %g6, which is current head offset. %g2
413	 * is arg2 of the C routine. %g3 is the tail offset,
414	 * which is arg3 of the C routine.
415	 * Call rq_overflow at PIL 14 unless we're already at PIL 15.
416	 */
417	mov	%g6, %g2
418	set	rq_overflow, %g1
419	rdpr	%pil, %g4
420	cmp	%g4, PIL_14
421	ba	sys_trap
422	  movl	%icc, PIL_14, %g4
423
4240:	retry
425
426	/*NOTREACHED*/
427	SET_SIZE(resumable_error)
428#endif /* lint */
429
430#if defined(lint)
431
432void
433nonresumable_error(void)
434{}
435
436#else	/* lint */
437
438/*
439 * (TT 0x7f, TL>0) Non-resumeable Error Queue Handler
440 *	We keep a shadow copy of the queue in kernel buf.
441 *	Read non-resumable queue head and tail offset
442 *	If there are entries on the queue, move them to
443 *	the kernel buf, which is next to the non-resumable
444 *	queue in the memory. Call C routine to process.
445 */
446	ENTRY_NP(nonresumable_error)
447	mov	CPU_NRQ_HD, %g4
448	ldxa	[%g4]ASI_QUEUE, %g2		! %g2 = Q head offset
449	mov	CPU_NRQ_TL, %g4
450	ldxa	[%g4]ASI_QUEUE, %g3		! %g3 = Q tail offset
451
452	cmp	%g2, %g3
453	be,pn	%xcc, 0f			! head == tail
454	nop
455
456	/* force %gl to 1 as sys_trap requires */
457	wrpr	%g0, 1, %gl
458	mov	CPU_NRQ_HD, %g4
459	ldxa	[%g4]ASI_QUEUE, %g2		! %g2 = Q head offset
460	mov	CPU_NRQ_TL, %g4
461	ldxa	[%g4]ASI_QUEUE, %g3		! %g3 = Q tail offset
462	mov	%g2, %g6			! save head in %g2
463
464	CPU_PADDR(%g1, %g4)			! %g1 = cpu struct paddr
465
4662:	set	CPU_NRQ_BASE_OFF, %g4
467	ldxa	[%g1 + %g4]ASI_MEM, %g4		! %g4 = queue base PA
468	add	%g6, %g4, %g4			! %g4 = PA of ER in Q
469	set	CPU_NRQ_SIZE, %g7
470	add	%g4, %g7, %g7			! %g7 = PA of ER in kernel buf
471
472	ldxa	[%g7]ASI_MEM, %g5		! %g5 = first 8 byte of ER buf
473	cmp	0, %g5
474	bne,pn	%xcc, 1f			! first 8 byte is not 0
475	nop
476
477	/* Now we can move 64 bytes from queue to buf */
478	set	0, %g5
479	ldxa	[%g4 + %g5]ASI_MEM, %g1
480	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 0 - 7
481	add	%g5, 8, %g5
482	ldxa	[%g4 + %g5]ASI_MEM, %g1
483	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 8 - 15
484	add	%g5, 8, %g5
485	ldxa	[%g4 + %g5]ASI_MEM, %g1
486	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 16 - 23
487	add	%g5, 8, %g5
488	ldxa	[%g4 + %g5]ASI_MEM, %g1
489	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 24 - 31
490	add	%g5, 8, %g5
491	ldxa	[%g4 + %g5]ASI_MEM, %g1
492	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 32 - 39
493	add	%g5, 8, %g5
494	ldxa	[%g4 + %g5]ASI_MEM, %g1
495	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 40 - 47
496	add	%g5, 8, %g5
497	ldxa	[%g4 + %g5]ASI_MEM, %g1
498	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 48 - 55
499	add	%g5, 8, %g5
500	ldxa	[%g4 + %g5]ASI_MEM, %g1
501	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 56 - 63
502
503	set	CPU_NRQ_SIZE, %g5		! %g5 = queue size
504	sub	%g5, 1, %g5			! %g5 = queu size mask
505
506	add	%g6, Q_ENTRY_SIZE, %g6		! increment q head to next
507	and	%g6, %g5, %g6			! size mask for warp around
508	cmp	%g6, %g3			! head == tail ??
509
510	bne,pn	%xcc, 2b			! still have more to process
511	nop
512
513	/*
514	 * head equals to tail now, we can update the queue head
515	 * and call sys_trap
516	 */
517	mov	CPU_NRQ_HD, %g4
518	stxa	%g6, [%g4]ASI_QUEUE		! update head offset
519
520	/*
521	 * Call sys_trap. %g2 is TL(arg2), %g3 is head and tail
522	 * offset(arg3).
523	 * %g3 looks like following:
524	 *	+--------------------+--------------------+
525	 *	|   tail offset      |    head offset     |
526	 *	+--------------------+--------------------+
527	 *	63                 32 31                 0
528	 *
529	 * Run at PIL 14 unless we're already at PIL 15.
530	 */
531	sllx	%g3, 32, %g3			! %g3.h = tail offset
532	or	%g3, %g2, %g3			! %g3.l = head offset
533	rdpr	%tl, %g2			! %g2 = current tl
534
535	/*
536	 * Now check if the first error that sent us here was caused
537	 * in user's SPILL/FILL trap. If it was, we call sys_trap to
538	 * kill the user process. Several considerations:
539	 * - If multiple nonresumable errors happen, we only check the
540	 *   first one. Nonresumable errors cause system either panic
541	 *   or kill the user process. So the system has already
542	 *   panic'ed or killed user process after processing the first
543	 *   error. Therefore, no need to check if other error packet
544	 *   for this type of error.
545	 * - Errors happen in user's SPILL/FILL trap will bring us at
546	 *   TL = 2.
547	 * - We need to lower TL to 1 to get the trap type and tstate.
548	 *   We don't go back to TL = 2 so no need to save states.
549	 */
550	cmp	%g2, 2
551	bne,pt	%xcc, 3f			! if tl != 2
552	nop
553	/* Check to see if the trap pc is in a window spill/fill handling */
554	rdpr	%tpc, %g4
555	/* tpc should be in the trap table */
556	set	trap_table, %g5
557	cmp	%g4, %g5
558	blu,pt	%xcc, 3f
559	nop
560	set	etrap_table, %g5
561	cmp	%g4, %g5
562	bgeu,pt	%xcc, 3f
563	nop
564	/* Set tl to 1 in order to read tt[1] and tstate[1] */
565	wrpr	%g0, 1, %tl
566	rdpr	%tt, %g4			! %g4 = tt[1]
567	/* Check if tt[1] is a window trap */
568	and	%g4, WTRAP_TTMASK, %g4
569	cmp	%g4, WTRAP_TYPE
570	bne,pt	%xcc, 3f
571	nop
572	rdpr	%tstate, %g5			! %g5 = tstate[1]
573	btst	TSTATE_PRIV, %g5
574	bnz	%xcc, 3f			! Is it from user code?
575	nop
576	/*
577	 * Now we know the error happened in user's SPILL/FILL trap.
578	 * Turn on the user spill/fill flag in %g2
579	 */
580	mov	1, %g4
581	sllx	%g4, ERRH_U_SPILL_FILL_SHIFT, %g4
582	or	%g2, %g4, %g2			! turn on flag in %g2
583
5843:	sub	%g2, 1, %g2			! %g2.l = previous tl
585
586	set	process_nonresumable_error, %g1
587	rdpr	%pil, %g4
588	cmp	%g4, PIL_14
589	ba	sys_trap
590	  movl	%icc, PIL_14, %g4
591
592	/*
593	 * We are here because the C routine is not able to process
594	 * errors in time. So the first 8 bytes of ER in buf has not
595	 * been cleared. We call sys_trap to panic.
596	 * Run at PIL 14 unless we're already at PIL 15.
597	 */
5981:	set	nrq_overflow, %g1
599	rdpr	%pil, %g4
600	cmp	%g4, PIL_14
601	ba	sys_trap
602	  movl	%icc, PIL_14, %g4
603
6040:	retry
605
606	/*NOTREACHED*/
607	SET_SIZE(nonresumable_error)
608#endif /* lint */
609