xref: /titanic_41/usr/src/uts/sun4v/ml/mach_interrupt.s (revision 8d4e547db823a866b8f73efc0acdc423e2963caf)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28#if defined(lint)
29#include <sys/types.h>
30#include <sys/thread.h>
31#else	/* lint */
32#include "assym.h"
33#endif	/* lint */
34
35#include <sys/asm_linkage.h>
36#include <sys/machthread.h>
37#include <sys/machcpuvar.h>
38#include <sys/intreg.h>
39#include <sys/cmn_err.h>
40#include <sys/ftrace.h>
41#include <sys/machasi.h>
42#include <sys/scb.h>
43#include <sys/error.h>
44#define	INTR_REPORT_SIZE	64
45
46#ifdef TRAPTRACE
47#include <sys/traptrace.h>
48#endif /* TRAPTRACE */
49
50#if defined(lint)
51
52void
53cpu_mondo(void)
54{}
55
56#else	/* lint */
57
58
59/*
60 * (TT 0x7c, TL>0) CPU Mondo Queue Handler
61 *	Globals are the Interrupt Globals.
62 */
63	ENTRY_NP(cpu_mondo)
64	!
65	!	Register Usage:-
66	!	%g5	PC for fasttrap TL>0 handler
67	!	%g1	arg 1
68	!	%g2	arg 2
69	!	%g3	queue base VA
70	!	%g4 	queue size mask
71	!	%g6	head ptr
72	!	%g7	tail ptr
73	mov	CPU_MONDO_Q_HD, %g3
74	ldxa	[%g3]ASI_QUEUE, %g6	! %g6 = head ptr
75	mov	CPU_MONDO_Q_TL, %g4
76	ldxa	[%g4]ASI_QUEUE, %g7	! %g7 = tail ptr
77	cmp	%g6, %g7
78	be,pn	%xcc, 0f		! head == tail
79	nop
80
81	CPU_ADDR(%g1,%g2)
82	add	%g1, CPU_MCPU, %g2
83	ldx	[%g2 + MCPU_CPU_Q_BASE], %g3	! %g3 = queue base PA
84	ldx	[%g2 + MCPU_CPU_Q_SIZE], %g4	! queue size
85	sub	%g4, 1, %g4		! %g4 = queue size mask
86
87	! Load interrupt receive data registers 1 and 2 to fetch
88	! the arguments for the fast trap handler.
89	!
90	! XXX - Since the data words in the interrupt report are not defined yet
91	! we assume that the consective words contain valid data and preserve
92	! sun4u's xcall mondo arguments.
93	! Register usage:
94	!	%g5	PC for fasttrap TL>0 handler
95	!	%g1	arg 1
96	!	%g2	arg 2
97
98	ldxa	[%g3 + %g6]ASI_MEM, %g5	! get PC from q base + head
99	add	%g6, 0x8, %g6		! inc head
100	ldxa	[%g3 + %g6]ASI_MEM, %g1 ! read data word 1
101	add	%g6, 0x8, %g6		! inc head
102	ldxa	[%g3 + %g6]ASI_MEM, %g2	! read data word 2
103	add	%g6, (INTR_REPORT_SIZE - 16) , %g6 ! inc head to next record
104	and	%g6, %g4, %g6 		! and size mask for wrap around
105	mov	CPU_MONDO_Q_HD, %g3
106	stxa	%g6, [%g3]ASI_QUEUE	! store head pointer
107	membar	#Sync
108
109#ifdef TRAPTRACE
110	TRACE_PTR(%g4, %g6)
111	GET_TRACE_TICK(%g6)
112	stxa	%g6, [%g4 + TRAP_ENT_TICK]%asi
113	TRACE_SAVE_TL_GL_REGS(%g4, %g6)
114	rdpr	%tt, %g6
115	stha	%g6, [%g4 + TRAP_ENT_TT]%asi
116	rdpr	%tpc, %g6
117	stna	%g6, [%g4 + TRAP_ENT_TPC]%asi
118	rdpr	%tstate, %g6
119	stxa	%g6, [%g4 + TRAP_ENT_TSTATE]%asi
120	stna	%sp, [%g4 + TRAP_ENT_SP]%asi
121	stna	%g5, [%g4 + TRAP_ENT_TR]%asi	! pc of the TL>0 handler
122	stna	%g1, [%g4 + TRAP_ENT_F1]%asi	! arg1
123	stna	%g2, [%g4 + TRAP_ENT_F3]%asi	! arg2
124	mov	CPU_MONDO_Q_HD, %g6
125	ldxa	[%g6]ASI_QUEUE, %g6		! new head offset
126	stna	%g6, [%g4 + TRAP_ENT_F2]%asi
127	stna	%g7, [%g4 + TRAP_ENT_F4]%asi	! tail offset
128	TRACE_NEXT(%g4, %g6, %g3)
129#endif /* TRAPTRACE */
130
131	/*
132	 * For now catch invalid PC being passed via cpu_mondo queue
133	 */
134	set	KERNELBASE, %g4
135	cmp	%g5, %g4
136	bl,a,pn	%xcc, 1f		! branch if bad %pc
137	nop
138
139	jmp	%g5			! jump to traphandler
140	nop
1411:
142	! invalid trap handler, discard it for now
143	set	cpu_mondo_inval, %g4
144	ldx	[%g4], %g5
145	inc	%g5
146	stx	%g5, [%g4]
1470:
148	retry
149	/* Never Reached */
150	SET_SIZE(cpu_mondo)
151
152#endif /* lint */
153
154#if defined(lint)
155
156void
157dev_mondo(void)
158{}
159
160#else	/* lint */
161
162
163/*
164 * (TT 0x7d, TL>0) Dev Mondo Queue Handler
165 *	Globals are the Interrupt Globals.
166 * We only process one interrupt at a time causing us to keep
167 * taking this trap till the queue is empty.
168 * We really should drain the whole queue for better performance
169 * but this will do for now.
170 */
171	ENTRY_NP(dev_mondo)
172	!
173	!	Register Usage:-
174	!	%g5	PC for fasttrap TL>0 handler
175	!	%g1	arg 1
176	!	%g2	arg 2
177	!	%g3	queue base PA
178	!	%g4 	queue size mask
179	!	%g6	head ptr
180	!	%g7	tail ptr
181	mov	DEV_MONDO_Q_HD, %g3
182	ldxa	[%g3]ASI_QUEUE, %g6	! %g6 = head ptr
183	mov	DEV_MONDO_Q_TL, %g4
184	ldxa	[%g4]ASI_QUEUE, %g7	! %g7 = tail ptr
185	cmp	%g6, %g7
186	be,pn	%xcc, 0f		! head == tail
187	nop
188
189	CPU_ADDR(%g1,%g2)
190	add	%g1, CPU_MCPU, %g2
191	ldx	[%g2 + MCPU_DEV_Q_BASE], %g3	! %g3 = queue base PA
192
193	! Register usage:
194	!	%g5 - inum
195	!	%g1 - cpu struct pointer used below in TRAPTRACE
196	!
197	ldxa	[%g3 + %g6]ASI_MEM, %g5	! get inum from q base + head
198
199	!
200	! We verify that inum is valid ( < MAXVNUM). If it is greater
201	! than MAXVNUM, we let setvecint_tl1 take care of it.
202	!
203	set	MAXIVNUM, %g4
204	cmp	%g5, %g4
205	bgeu,a,pn	%xcc, 1f
206	ldx	[%g2 + MCPU_DEV_Q_SIZE], %g4	! queue size - delay slot
207
208	!
209	!	Copy 64-byte payload to the *iv_payload if it is not NULL
210	!
211	set	intr_vec_table, %g1		! %g1 = intr_vec_table
212	sll	%g5, CPTRSHIFT, %g7		! %g7 = offset to inum entry
213						!       in the intr_vec_table
214	add	%g1, %g7, %g7			! %g7 = &intr_vec_table[inum]
215	ldn	[%g7], %g1			! %g1 = ptr to intr_vec_t (iv)
216
217	!
218	! Verify the pointer to first intr_vec_t for a given inum and
219	! it should not be NULL. If this pointer is NULL, then it is a
220	! spurious interrupt. In this case, just call setvecint_tl1 and
221	! it will handle this spurious interrupt.
222	!
223	brz,a,pn	%g1, 1f			! if %g1 is NULL
224	ldx	[%g2 + MCPU_DEV_Q_SIZE], %g4	! queue size - delay slot
225
226	ldx	[%g1 + IV_PAYLOAD_BUF], %g1	! %g1 = iv->iv_payload_buf
227	brz,a,pt	%g1, 1f			! if it is NULL
228	ldx	[%g2 + MCPU_DEV_Q_SIZE], %g4	! queue size - delay slot
229
230	!
231	!	Now move 64 byte payload from mondo queue to buf
232	!
233	mov	%g6, %g7			! %g7 = head ptr
234	ldxa	[%g3 + %g7]ASI_MEM, %g4
235	stx	%g4, [%g1 + 0]			! byte 0 - 7
236	add	%g7, 8, %g7
237	ldxa	[%g3 + %g7]ASI_MEM, %g4
238	stx	%g4, [%g1 + 8]			! byte 8 - 15
239	add	%g7, 8, %g7
240	ldxa	[%g3 + %g7]ASI_MEM, %g4
241	stx	%g4, [%g1 + 16]			! byte 16 - 23
242	add	%g7, 8, %g7
243	ldxa	[%g3 + %g7]ASI_MEM, %g4
244	stx	%g4, [%g1 + 24]			! byte 24 - 31
245	add	%g7, 8, %g7
246	ldxa	[%g3 + %g7]ASI_MEM, %g4
247	stx	%g4, [%g1 + 32]			! byte 32 - 39
248	add	%g7, 8, %g7
249	ldxa	[%g3 + %g7]ASI_MEM, %g4
250	stx	%g4, [%g1 + 40]			! byte 40 - 47
251	add	%g7, 8, %g7
252	ldxa	[%g3 + %g7]ASI_MEM, %g4
253	stx	%g4, [%g1 + 48]			! byte 48 - 55
254	add	%g7, 8, %g7
255	ldxa	[%g3 + %g7]ASI_MEM, %g4
256	stx	%g4, [%g1 + 56]			! byte 56 - 63
257	ldx	[%g2 + MCPU_DEV_Q_SIZE], %g4	! queue size
258
2591:	sub	%g4, 1, %g4		! %g4 = queue size mask
260	add	%g6, INTR_REPORT_SIZE , %g6 ! inc head to next record
261	and	%g6, %g4, %g6 		! and mask for wrap around
262	mov	DEV_MONDO_Q_HD, %g3
263	stxa	%g6, [%g3]ASI_QUEUE	! increment head offset
264	membar	#Sync
265
266#ifdef TRAPTRACE
267	TRACE_PTR(%g4, %g6)
268	GET_TRACE_TICK(%g6)
269	stxa	%g6, [%g4 + TRAP_ENT_TICK]%asi
270	TRACE_SAVE_TL_GL_REGS(%g4, %g6)
271	rdpr	%tt, %g6
272	stha	%g6, [%g4 + TRAP_ENT_TT]%asi
273	rdpr	%tpc, %g6
274	stna	%g6, [%g4 + TRAP_ENT_TPC]%asi
275	rdpr	%tstate, %g6
276	stxa	%g6, [%g4 + TRAP_ENT_TSTATE]%asi
277	! move head to sp
278	ldx	[%g2 + MCPU_DEV_Q_BASE], %g6
279	stna	%g6, [%g4 + TRAP_ENT_SP]%asi	! Device Queue Base PA
280	stna	%g5, [%g4 + TRAP_ENT_TR]%asi	! Inum
281	mov	DEV_MONDO_Q_HD, %g6
282	ldxa	[%g6]ASI_QUEUE, %g6		! New head offset
283	stna	%g6, [%g4 + TRAP_ENT_F1]%asi
284	ldx	[%g2 + MCPU_DEV_Q_SIZE], %g6
285	stna	%g6, [%g4 + TRAP_ENT_F2]%asi	! Q Size
286	stna	%g7, [%g4 + TRAP_ENT_F3]%asi	! tail offset
287	stna	%g0, [%g4 + TRAP_ENT_F4]%asi
288	TRACE_NEXT(%g4, %g6, %g3)
289#endif /* TRAPTRACE */
290
291	!
292	! setvecint_tl1 will do all the work, and finish with a retry
293	!
294	ba,pt	%xcc, setvecint_tl1
295	mov	%g5, %g1		! setvecint_tl1 expects inum in %g1
296
2970:	retry
298
299	/* Never Reached */
300	SET_SIZE(dev_mondo)
301#endif /* lint */
302
303#if defined(lint)
304uint64_t cpu_mondo_inval;
305#else /* lint */
306	.seg	".data"
307	.global	cpu_mondo_inval
308	.align	8
309cpu_mondo_inval:
310	.skip	8
311
312	.seg	".text"
313#endif	/* lint */
314
315
316#if defined(lint)
317
318void
319resumable_error(void)
320{}
321
322#else	/* lint */
323
324/*
325 * (TT 0x7e, TL>0) Resumeable Error Queue Handler
326 *	We keep a shadow copy of the queue in kernel buf.
327 *	Read the resumable queue head and tail offset
328 *	If there are entries on the queue, move them to
329 *	the kernel buf, which is next to the resumable
330 *	queue in the memory. Call C routine to process.
331 */
332	ENTRY_NP(resumable_error)
333	mov	CPU_RQ_HD, %g4
334	ldxa	[%g4]ASI_QUEUE, %g2		! %g2 = Q head offset
335	mov	CPU_RQ_TL, %g4
336	ldxa	[%g4]ASI_QUEUE, %g3		! %g3 = Q tail offset
337	mov	%g2, %g6			! save head in %g2
338
339	cmp	%g6, %g3
340	be,pn	%xcc, 0f			! head == tail
341	nop
342
343	CPU_ADDR(%g1, %g4)			! %g1 = cpu struct addr
344
3452:	set	CPU_RQ_BASE_OFF, %g4
346	ldx	[%g1 + %g4], %g4		! %g4 = queue base PA
347	add	%g6, %g4, %g4			! %g4 = PA of ER in Q
348	set	CPU_RQ_SIZE, %g7
349	add	%g4, %g7, %g7			! %g7=PA of ER in kernel buf
350
351	ldxa	[%g7]ASI_MEM, %g5		! %g5=first 8 byte of ER buf
352	cmp	0, %g5
353	bne,pn	%xcc, 1f			! first 8 byte is not 0
354	nop
355
356	/* Now we can move 64 bytes from queue to buf */
357	set	0, %g5
358	ldxa	[%g4 + %g5]ASI_MEM, %g1
359	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 0 - 7
360	add	%g5, 8, %g5
361	ldxa	[%g4 + %g5]ASI_MEM, %g1
362	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 8 - 15
363	add	%g5, 8, %g5
364	ldxa	[%g4 + %g5]ASI_MEM, %g1
365	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 16 - 23
366	add	%g5, 8, %g5
367	ldxa	[%g4 + %g5]ASI_MEM, %g1
368	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 24 - 31
369	add	%g5, 8, %g5
370	ldxa	[%g4 + %g5]ASI_MEM, %g1
371	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 32 - 39
372	add	%g5, 8, %g5
373	ldxa	[%g4 + %g5]ASI_MEM, %g1
374	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 40 - 47
375	add	%g5, 8, %g5
376	ldxa	[%g4 + %g5]ASI_MEM, %g1
377	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 48 - 55
378	add	%g5, 8, %g5
379	ldxa	[%g4 + %g5]ASI_MEM, %g1
380	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 56 - 63
381
382	set	CPU_RQ_SIZE, %g5		! %g5 = queue size
383	sub	%g5, 1, %g5			! %g5 = queu size mask
384
385	add	%g6, Q_ENTRY_SIZE, %g6		! increment q head to next
386	and	%g6, %g5, %g6			! size mask for warp around
387	cmp	%g6, %g3			! head == tail ??
388
389	bne,pn	%xcc, 2b			! still have more to process
390	nop
391
392	/*
393	 * head equals to tail now, we can update the queue head
394	 * and call sys_trap
395	 */
396	mov	CPU_RQ_HD, %g4
397	stxa	%g6, [%g4]ASI_QUEUE		! update head offset
398
399	/*
400	 * Call sys_trap at PIL 14 unless we're already at PIL 15. %g2.l is
401	 * head offset(arg2) and %g3 is tail
402	 * offset(arg3).
403	 */
404	set	process_resumable_error, %g1
405	rdpr	%pil, %g4
406	cmp	%g4, PIL_14
407	ba	sys_trap
408	  movl	%icc, PIL_14, %g4
409
410	/*
411	 * We are here because the C routine is not able to process
412	 * errors in time. So the first 8 bytes of ER in buf has not
413	 * been cleared. We update head to tail and call sys_trap to
414	 * print out an error message
415	 */
416
4171:	mov	CPU_RQ_HD, %g4
418	stxa	%g3, [%g4]ASI_QUEUE		! set head equal to tail
419
420	/*
421	 * Set %g2 to %g6, which is current head offset. %g2
422	 * is arg2 of the C routine. %g3 is the tail offset,
423	 * which is arg3 of the C routine.
424	 * Call rq_overflow at PIL 14 unless we're already at PIL 15.
425	 */
426	mov	%g6, %g2
427	set	rq_overflow, %g1
428	rdpr	%pil, %g4
429	cmp	%g4, PIL_14
430	ba	sys_trap
431	  movl	%icc, PIL_14, %g4
432
4330:	retry
434
435	/*NOTREACHED*/
436	SET_SIZE(resumable_error)
437#endif /* lint */
438
439#if defined(lint)
440
441void
442nonresumable_error(void)
443{}
444
445#else	/* lint */
446
447/*
448 * (TT 0x7f, TL>0) Non-resumeable Error Queue Handler
449 *	We keep a shadow copy of the queue in kernel buf.
450 *	Read non-resumable queue head and tail offset
451 *	If there are entries on the queue, move them to
452 *	the kernel buf, which is next to the non-resumable
453 *	queue in the memory. Call C routine to process.
454 */
455	ENTRY_NP(nonresumable_error)
456	mov	CPU_NRQ_HD, %g4
457	ldxa	[%g4]ASI_QUEUE, %g2		! %g2 = Q head offset
458	mov	CPU_NRQ_TL, %g4
459	ldxa	[%g4]ASI_QUEUE, %g3		! %g3 = Q tail offset
460
461	cmp	%g2, %g3
462	be,pn	%xcc, 0f			! head == tail
463	nop
464
465	/* force %gl to 1 as sys_trap requires */
466	wrpr	%g0, 1, %gl
467	mov	CPU_NRQ_HD, %g4
468	ldxa	[%g4]ASI_QUEUE, %g2		! %g2 = Q head offset
469	mov	CPU_NRQ_TL, %g4
470	ldxa	[%g4]ASI_QUEUE, %g3		! %g3 = Q tail offset
471	mov	%g2, %g6			! save head in %g2
472
473	CPU_PADDR(%g1, %g4)			! %g1 = cpu struct paddr
474
4752:	set	CPU_NRQ_BASE_OFF, %g4
476	ldxa	[%g1 + %g4]ASI_MEM, %g4		! %g4 = queue base PA
477	add	%g6, %g4, %g4			! %g4 = PA of ER in Q
478	set	CPU_NRQ_SIZE, %g7
479	add	%g4, %g7, %g7			! %g7 = PA of ER in kernel buf
480
481	ldxa	[%g7]ASI_MEM, %g5		! %g5 = first 8 byte of ER buf
482	cmp	0, %g5
483	bne,pn	%xcc, 1f			! first 8 byte is not 0
484	nop
485
486	/* Now we can move 64 bytes from queue to buf */
487	set	0, %g5
488	ldxa	[%g4 + %g5]ASI_MEM, %g1
489	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 0 - 7
490	add	%g5, 8, %g5
491	ldxa	[%g4 + %g5]ASI_MEM, %g1
492	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 8 - 15
493	add	%g5, 8, %g5
494	ldxa	[%g4 + %g5]ASI_MEM, %g1
495	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 16 - 23
496	add	%g5, 8, %g5
497	ldxa	[%g4 + %g5]ASI_MEM, %g1
498	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 24 - 31
499	add	%g5, 8, %g5
500	ldxa	[%g4 + %g5]ASI_MEM, %g1
501	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 32 - 39
502	add	%g5, 8, %g5
503	ldxa	[%g4 + %g5]ASI_MEM, %g1
504	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 40 - 47
505	add	%g5, 8, %g5
506	ldxa	[%g4 + %g5]ASI_MEM, %g1
507	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 48 - 55
508	add	%g5, 8, %g5
509	ldxa	[%g4 + %g5]ASI_MEM, %g1
510	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 56 - 63
511
512	set	CPU_NRQ_SIZE, %g5		! %g5 = queue size
513	sub	%g5, 1, %g5			! %g5 = queu size mask
514
515	add	%g6, Q_ENTRY_SIZE, %g6		! increment q head to next
516	and	%g6, %g5, %g6			! size mask for warp around
517	cmp	%g6, %g3			! head == tail ??
518
519	bne,pn	%xcc, 2b			! still have more to process
520	nop
521
522	/*
523	 * head equals to tail now, we can update the queue head
524	 * and call sys_trap
525	 */
526	mov	CPU_NRQ_HD, %g4
527	stxa	%g6, [%g4]ASI_QUEUE		! update head offset
528
529	/*
530	 * Call sys_trap. %g2 is TL(arg2), %g3 is head and tail
531	 * offset(arg3).
532	 * %g3 looks like following:
533	 *	+--------------------+--------------------+
534	 *	|   tail offset      |    head offset     |
535	 *	+--------------------+--------------------+
536	 *	63                 32 31                 0
537	 *
538	 * Run at PIL 14 unless we're already at PIL 15.
539	 */
540	sllx	%g3, 32, %g3			! %g3.h = tail offset
541	or	%g3, %g2, %g3			! %g3.l = head offset
542	rdpr	%tl, %g2			! %g2 = current tl
543
544	/*
545	 * Now check if the first error that sent us here was caused
546	 * in user's SPILL/FILL trap. If it was, we call sys_trap to
547	 * kill the user process. Several considerations:
548	 * - If multiple nonresumable errors happen, we only check the
549	 *   first one. Nonresumable errors cause system either panic
550	 *   or kill the user process. So the system has already
551	 *   panic'ed or killed user process after processing the first
552	 *   error. Therefore, no need to check if other error packet
553	 *   for this type of error.
554	 * - Errors happen in user's SPILL/FILL trap will bring us at
555	 *   TL = 2.
556	 * - We need to lower TL to 1 to get the trap type and tstate.
557	 *   We don't go back to TL = 2 so no need to save states.
558	 */
559	cmp	%g2, 2
560	bne,pt	%xcc, 3f			! if tl != 2
561	nop
562	/* Check to see if the trap pc is in a window spill/fill handling */
563	rdpr	%tpc, %g4
564	/* tpc should be in the trap table */
565	set	trap_table, %g5
566	cmp	%g4, %g5
567	blu,pt	%xcc, 3f
568	nop
569	set	etrap_table, %g5
570	cmp	%g4, %g5
571	bgeu,pt	%xcc, 3f
572	nop
573	/* Set tl to 1 in order to read tt[1] and tstate[1] */
574	wrpr	%g0, 1, %tl
575	rdpr	%tt, %g4			! %g4 = tt[1]
576	/* Check if tt[1] is a window trap */
577	and	%g4, WTRAP_TTMASK, %g4
578	cmp	%g4, WTRAP_TYPE
579	bne,pt	%xcc, 3f
580	nop
581	rdpr	%tstate, %g5			! %g5 = tstate[1]
582	btst	TSTATE_PRIV, %g5
583	bnz	%xcc, 3f			! Is it from user code?
584	nop
585	/*
586	 * Now we know the error happened in user's SPILL/FILL trap.
587	 * Turn on the user spill/fill flag in %g2
588	 */
589	mov	1, %g4
590	sllx	%g4, ERRH_U_SPILL_FILL_SHIFT, %g4
591	or	%g2, %g4, %g2			! turn on flag in %g2
592
5933:	sub	%g2, 1, %g2			! %g2.l = previous tl
594
595	set	process_nonresumable_error, %g1
596	rdpr	%pil, %g4
597	cmp	%g4, PIL_14
598	ba	sys_trap
599	  movl	%icc, PIL_14, %g4
600
601	/*
602	 * We are here because the C routine is not able to process
603	 * errors in time. So the first 8 bytes of ER in buf has not
604	 * been cleared. We call sys_trap to panic.
605	 * Run at PIL 14 unless we're already at PIL 15.
606	 */
6071:	set	nrq_overflow, %g1
608	rdpr	%pil, %g4
609	cmp	%g4, PIL_14
610	ba	sys_trap
611	  movl	%icc, PIL_14, %g4
612
6130:	retry
614
615	/*NOTREACHED*/
616	SET_SIZE(nonresumable_error)
617#endif /* lint */
618