xref: /illumos-gate/usr/src/uts/sun4v/ml/mach_interrupt.S (revision d48be21240dfd051b689384ce2b23479d757f2d8)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#include "assym.h"
27
28#include <sys/asm_linkage.h>
29#include <sys/machthread.h>
30#include <sys/machcpuvar.h>
31#include <sys/intreg.h>
32#include <sys/cmn_err.h>
33#include <sys/ftrace.h>
34#include <sys/machasi.h>
35#include <sys/scb.h>
36#include <sys/error.h>
37#include <sys/mmu.h>
38#include <vm/hat_sfmmu.h>
39#define	INTR_REPORT_SIZE	64
40
41#ifdef TRAPTRACE
42#include <sys/traptrace.h>
43#endif /* TRAPTRACE */
44
45
46/*
47 * (TT 0x7c, TL>0) CPU Mondo Queue Handler
48 *	Globals are the Interrupt Globals.
49 */
50	ENTRY_NP(cpu_mondo)
51	!
52	!	Register Usage:-
53	!	%g5	PC for fasttrap TL>0 handler
54	!	%g1	arg 1
55	!	%g2	arg 2
56	!	%g3	queue base VA
57	!	%g4 	queue size mask
58	!	%g6	head ptr
59	!	%g7	tail ptr
60	mov	CPU_MONDO_Q_HD, %g3
61	ldxa	[%g3]ASI_QUEUE, %g6	! %g6 = head ptr
62	mov	CPU_MONDO_Q_TL, %g4
63	ldxa	[%g4]ASI_QUEUE, %g7	! %g7 = tail ptr
64	cmp	%g6, %g7
65	be,pn	%xcc, 3f		! head == tail
66	nop
67
68	CPU_ADDR(%g1,%g2)
69	add	%g1, CPU_MCPU, %g2
70	ldx	[%g2 + MCPU_CPU_Q_BASE], %g3	! %g3 = queue base PA
71	ldx	[%g2 + MCPU_CPU_Q_SIZE], %g4	! queue size
72	sub	%g4, 1, %g4		! %g4 = queue size mask
73
74	! Load interrupt receive data registers 1 and 2 to fetch
75	! the arguments for the fast trap handler.
76	!
77	! XXX - Since the data words in the interrupt report are not defined yet
78	! we assume that the consective words contain valid data and preserve
79	! sun4u's xcall mondo arguments.
80	! Register usage:
81	!	%g5	PC for fasttrap TL>0 handler
82	!	%g1	arg 1
83	!	%g2	arg 2
84
85	ldxa	[%g3 + %g6]ASI_MEM, %g5	! get PC from q base + head
86	add	%g6, 0x8, %g6		! inc head
87	ldxa	[%g3 + %g6]ASI_MEM, %g1 ! read data word 1
88	add	%g6, 0x8, %g6		! inc head
89	ldxa	[%g3 + %g6]ASI_MEM, %g2	! read data word 2
90	add	%g6, (INTR_REPORT_SIZE - 16) , %g6 ! inc head to next record
91	and	%g6, %g4, %g6 		! and size mask for wrap around
92	mov	CPU_MONDO_Q_HD, %g3
93	stxa	%g6, [%g3]ASI_QUEUE	! store head pointer
94	membar	#Sync
95
96#ifdef TRAPTRACE
97	TRACE_PTR(%g4, %g6)
98	GET_TRACE_TICK(%g6, %g3)
99	stxa	%g6, [%g4 + TRAP_ENT_TICK]%asi
100	TRACE_SAVE_TL_GL_REGS(%g4, %g6)
101	rdpr	%tt, %g6
102	stha	%g6, [%g4 + TRAP_ENT_TT]%asi
103	rdpr	%tpc, %g6
104	stna	%g6, [%g4 + TRAP_ENT_TPC]%asi
105	rdpr	%tstate, %g6
106	stxa	%g6, [%g4 + TRAP_ENT_TSTATE]%asi
107	stna	%sp, [%g4 + TRAP_ENT_SP]%asi
108	stna	%g5, [%g4 + TRAP_ENT_TR]%asi	! pc of the TL>0 handler
109	stna	%g1, [%g4 + TRAP_ENT_F1]%asi	! arg1
110	stna	%g2, [%g4 + TRAP_ENT_F3]%asi	! arg2
111	mov	CPU_MONDO_Q_HD, %g6
112	ldxa	[%g6]ASI_QUEUE, %g6		! new head offset
113	stna	%g6, [%g4 + TRAP_ENT_F2]%asi
114	stna	%g7, [%g4 + TRAP_ENT_F4]%asi	! tail offset
115	TRACE_NEXT(%g4, %g6, %g3)
116#endif /* TRAPTRACE */
117
118	/*
119	 * For now catch invalid PC being passed via cpu_mondo queue
120	 */
121	set	KERNELBASE, %g4
122	cmp	%g5, %g4
123	bl,pn	%xcc, 2f		! branch if bad %pc
124	  nop
125
126
127	/*
128	 * If this platform supports shared contexts and we are jumping
129	 * to OBP code, then we need to invalidate both contexts to prevent OBP
130	 * from corrupting the shared context registers.
131	 *
132	 * If shared contexts are not supported then the next two instructions
133	 * will be patched with:
134	 *
135	 * jmp       %g5
136	 * nop
137	 *
138	 */
139	.global sfmmu_shctx_cpu_mondo_patch
140sfmmu_shctx_cpu_mondo_patch:
141	set	OFW_START_ADDR, %g4	! Check if this a call into OBP?
142	cmp	%g5, %g4
143	bl,pt %xcc, 1f
144	  nop
145	set	OFW_END_ADDR, %g4
146	cmp	%g5, %g4
147	bg,pn %xcc, 1f
148	  nop
149	mov	MMU_PCONTEXT, %g3
150	ldxa	[%g3]ASI_MMU_CTX, %g4
151	cmp	%g4, INVALID_CONTEXT	! Check if we are in kernel mode
152	ble,pn %xcc, 1f			! or the primary context is invalid
153	  nop
154	set	INVALID_CONTEXT, %g4	! Invalidate contexts - compatability
155	stxa    %g4, [%g3]ASI_MMU_CTX	! mode ensures shared contexts are also
156	mov     MMU_SCONTEXT, %g3	! invalidated.
157	stxa    %g4, [%g3]ASI_MMU_CTX
158	membar  #Sync
159	mov	%o0, %g3		! save output regs
160	mov	%o1, %g4
161	mov	%o5, %g6
162	clr	%o0			! Invalidate tsbs, set ntsb = 0
163	clr	%o1			! and HV_TSB_INFO_PA = 0
164	mov	MMU_TSB_CTXNON0, %o5
165	ta	FAST_TRAP		! set TSB info for user process
166	brnz,a,pn %o0, ptl1_panic
167	  mov	PTL1_BAD_HCALL, %g1
168	mov	%g3, %o0		! restore output regs
169	mov	%g4, %o1
170	mov	%g6, %o5
1711:
172	jmp	%g5			! jump to traphandler
173	nop
1742:
175	! invalid trap handler, discard it for now
176	set	cpu_mondo_inval, %g4
177	ldx	[%g4], %g5
178	inc	%g5
179	stx	%g5, [%g4]
1803:
181	retry
182	/* Never Reached */
183	SET_SIZE(cpu_mondo)
184
185
186/*
187 * (TT 0x7d, TL>0) Dev Mondo Queue Handler
188 *	Globals are the Interrupt Globals.
189 * We only process one interrupt at a time causing us to keep
190 * taking this trap till the queue is empty.
191 * We really should drain the whole queue for better performance
192 * but this will do for now.
193 */
194	ENTRY_NP(dev_mondo)
195	!
196	!	Register Usage:-
197	!	%g5	PC for fasttrap TL>0 handler
198	!	%g1	arg 1
199	!	%g2	arg 2
200	!	%g3	queue base PA
201	!	%g4 	queue size mask
202	!	%g6	head ptr
203	!	%g7	tail ptr
204	mov	DEV_MONDO_Q_HD, %g3
205	ldxa	[%g3]ASI_QUEUE, %g6	! %g6 = head ptr
206	mov	DEV_MONDO_Q_TL, %g4
207	ldxa	[%g4]ASI_QUEUE, %g7	! %g7 = tail ptr
208	cmp	%g6, %g7
209	be,pn	%xcc, 0f		! head == tail
210	nop
211
212	CPU_ADDR(%g1,%g2)
213	add	%g1, CPU_MCPU, %g2
214	ldx	[%g2 + MCPU_DEV_Q_BASE], %g3	! %g3 = queue base PA
215
216	! Register usage:
217	!	%g5 - inum
218	!	%g1 - cpu struct pointer used below in TRAPTRACE
219	!
220	ldxa	[%g3 + %g6]ASI_MEM, %g5	! get inum from q base + head
221
222	!
223	! We verify that inum is valid ( < MAXVNUM). If it is greater
224	! than MAXVNUM, we let setvecint_tl1 take care of it.
225	!
226	set	MAXIVNUM, %g4
227	cmp	%g5, %g4
228	bgeu,a,pn	%xcc, 1f
229	ldx	[%g2 + MCPU_DEV_Q_SIZE], %g4	! queue size - delay slot
230
231	!
232	!	Copy 64-byte payload to the *iv_payload if it is not NULL
233	!
234	set	intr_vec_table, %g1		! %g1 = intr_vec_table
235	sll	%g5, CPTRSHIFT, %g7		! %g7 = offset to inum entry
236						!       in the intr_vec_table
237	add	%g1, %g7, %g7			! %g7 = &intr_vec_table[inum]
238	ldn	[%g7], %g1			! %g1 = ptr to intr_vec_t (iv)
239
240	!
241	! Verify the pointer to first intr_vec_t for a given inum and
242	! it should not be NULL. If this pointer is NULL, then it is a
243	! spurious interrupt. In this case, just call setvecint_tl1 and
244	! it will handle this spurious interrupt.
245	!
246	brz,a,pn	%g1, 1f			! if %g1 is NULL
247	ldx	[%g2 + MCPU_DEV_Q_SIZE], %g4	! queue size - delay slot
248
249	ldx	[%g1 + IV_PAYLOAD_BUF], %g1	! %g1 = iv->iv_payload_buf
250	brz,a,pt	%g1, 1f			! if it is NULL
251	ldx	[%g2 + MCPU_DEV_Q_SIZE], %g4	! queue size - delay slot
252
253	!
254	!	Now move 64 byte payload from mondo queue to buf
255	!
256	mov	%g6, %g7			! %g7 = head ptr
257	ldxa	[%g3 + %g7]ASI_MEM, %g4
258	stx	%g4, [%g1 + 0]			! byte 0 - 7
259	add	%g7, 8, %g7
260	ldxa	[%g3 + %g7]ASI_MEM, %g4
261	stx	%g4, [%g1 + 8]			! byte 8 - 15
262	add	%g7, 8, %g7
263	ldxa	[%g3 + %g7]ASI_MEM, %g4
264	stx	%g4, [%g1 + 16]			! byte 16 - 23
265	add	%g7, 8, %g7
266	ldxa	[%g3 + %g7]ASI_MEM, %g4
267	stx	%g4, [%g1 + 24]			! byte 24 - 31
268	add	%g7, 8, %g7
269	ldxa	[%g3 + %g7]ASI_MEM, %g4
270	stx	%g4, [%g1 + 32]			! byte 32 - 39
271	add	%g7, 8, %g7
272	ldxa	[%g3 + %g7]ASI_MEM, %g4
273	stx	%g4, [%g1 + 40]			! byte 40 - 47
274	add	%g7, 8, %g7
275	ldxa	[%g3 + %g7]ASI_MEM, %g4
276	stx	%g4, [%g1 + 48]			! byte 48 - 55
277	add	%g7, 8, %g7
278	ldxa	[%g3 + %g7]ASI_MEM, %g4
279	stx	%g4, [%g1 + 56]			! byte 56 - 63
280	ldx	[%g2 + MCPU_DEV_Q_SIZE], %g4	! queue size
281
2821:	sub	%g4, 1, %g4		! %g4 = queue size mask
283	add	%g6, INTR_REPORT_SIZE , %g6 ! inc head to next record
284	and	%g6, %g4, %g6 		! and mask for wrap around
285	mov	DEV_MONDO_Q_HD, %g3
286	stxa	%g6, [%g3]ASI_QUEUE	! increment head offset
287	membar	#Sync
288
289#ifdef TRAPTRACE
290	TRACE_PTR(%g4, %g6)
291	GET_TRACE_TICK(%g6, %g3)
292	stxa	%g6, [%g4 + TRAP_ENT_TICK]%asi
293	TRACE_SAVE_TL_GL_REGS(%g4, %g6)
294	rdpr	%tt, %g6
295	stha	%g6, [%g4 + TRAP_ENT_TT]%asi
296	rdpr	%tpc, %g6
297	stna	%g6, [%g4 + TRAP_ENT_TPC]%asi
298	rdpr	%tstate, %g6
299	stxa	%g6, [%g4 + TRAP_ENT_TSTATE]%asi
300	! move head to sp
301	ldx	[%g2 + MCPU_DEV_Q_BASE], %g6
302	stna	%g6, [%g4 + TRAP_ENT_SP]%asi	! Device Queue Base PA
303	stna	%g5, [%g4 + TRAP_ENT_TR]%asi	! Inum
304	mov	DEV_MONDO_Q_HD, %g6
305	ldxa	[%g6]ASI_QUEUE, %g6		! New head offset
306	stna	%g6, [%g4 + TRAP_ENT_F1]%asi
307	ldx	[%g2 + MCPU_DEV_Q_SIZE], %g6
308	stna	%g6, [%g4 + TRAP_ENT_F2]%asi	! Q Size
309	stna	%g7, [%g4 + TRAP_ENT_F3]%asi	! tail offset
310	stna	%g0, [%g4 + TRAP_ENT_F4]%asi
311	TRACE_NEXT(%g4, %g6, %g3)
312#endif /* TRAPTRACE */
313
314	!
315	! setvecint_tl1 will do all the work, and finish with a retry
316	!
317	ba,pt	%xcc, setvecint_tl1
318	mov	%g5, %g1		! setvecint_tl1 expects inum in %g1
319
3200:	retry
321
322	/* Never Reached */
323	SET_SIZE(dev_mondo)
324
325	.seg	".data"
326	.global	cpu_mondo_inval
327	.align	8
328cpu_mondo_inval:
329	.skip	8
330
331	.seg	".text"
332
333
334/*
335 * (TT 0x7e, TL>0) Resumeable Error Queue Handler
336 *	We keep a shadow copy of the queue in kernel buf.
337 *	Read the resumable queue head and tail offset
338 *	If there are entries on the queue, move them to
339 *	the kernel buf, which is next to the resumable
340 *	queue in the memory. Call C routine to process.
341 */
342	ENTRY_NP(resumable_error)
343	mov	CPU_RQ_HD, %g4
344	ldxa	[%g4]ASI_QUEUE, %g2		! %g2 = Q head offset
345	mov	CPU_RQ_TL, %g4
346	ldxa	[%g4]ASI_QUEUE, %g3		! %g3 = Q tail offset
347	mov	%g2, %g6			! save head in %g2
348
349	cmp	%g6, %g3
350	be,pn	%xcc, 0f			! head == tail
351	nop
352
353	CPU_ADDR(%g1, %g4)			! %g1 = cpu struct addr
354
3552:	set	CPU_RQ_BASE_OFF, %g4
356	ldx	[%g1 + %g4], %g4		! %g4 = queue base PA
357	add	%g6, %g4, %g4			! %g4 = PA of ER in Q
358	set	CPU_RQ_SIZE, %g7
359	add	%g4, %g7, %g7			! %g7=PA of ER in kernel buf
360
361	ldxa	[%g7]ASI_MEM, %g5		! %g5=first 8 byte of ER buf
362	cmp	0, %g5
363	bne,pn	%xcc, 1f			! first 8 byte is not 0
364	nop
365
366	/* Now we can move 64 bytes from queue to buf */
367	set	0, %g5
368	ldxa	[%g4 + %g5]ASI_MEM, %g1
369	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 0 - 7
370	add	%g5, 8, %g5
371	ldxa	[%g4 + %g5]ASI_MEM, %g1
372	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 8 - 15
373	add	%g5, 8, %g5
374	ldxa	[%g4 + %g5]ASI_MEM, %g1
375	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 16 - 23
376	add	%g5, 8, %g5
377	ldxa	[%g4 + %g5]ASI_MEM, %g1
378	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 24 - 31
379	add	%g5, 8, %g5
380	ldxa	[%g4 + %g5]ASI_MEM, %g1
381	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 32 - 39
382	add	%g5, 8, %g5
383	ldxa	[%g4 + %g5]ASI_MEM, %g1
384	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 40 - 47
385	add	%g5, 8, %g5
386	ldxa	[%g4 + %g5]ASI_MEM, %g1
387	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 48 - 55
388	add	%g5, 8, %g5
389	ldxa	[%g4 + %g5]ASI_MEM, %g1
390	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 56 - 63
391
392	set	CPU_RQ_SIZE, %g5		! %g5 = queue size
393	sub	%g5, 1, %g5			! %g5 = queu size mask
394
395	add	%g6, Q_ENTRY_SIZE, %g6		! increment q head to next
396	and	%g6, %g5, %g6			! size mask for warp around
397	cmp	%g6, %g3			! head == tail ??
398
399	bne,pn	%xcc, 2b			! still have more to process
400	nop
401
402	/*
403	 * head equals to tail now, we can update the queue head
404	 * and call sys_trap
405	 */
406	mov	CPU_RQ_HD, %g4
407	stxa	%g6, [%g4]ASI_QUEUE		! update head offset
408	membar	#Sync
409
410	/*
411	 * Call sys_trap at PIL 14 unless we're already at PIL 15. %g2.l is
412	 * head offset(arg2) and %g3 is tail
413	 * offset(arg3).
414	 */
415	set	process_resumable_error, %g1
416	rdpr	%pil, %g4
417	cmp	%g4, PIL_14
418	ba	sys_trap
419	  movl	%icc, PIL_14, %g4
420
421	/*
422	 * We are here because the C routine is not able to process
423	 * errors in time. So the first 8 bytes of ER in buf has not
424	 * been cleared. We update head to tail and call sys_trap to
425	 * print out an error message
426	 */
427
4281:	mov	CPU_RQ_HD, %g4
429	stxa	%g3, [%g4]ASI_QUEUE		! set head equal to tail
430	membar	#Sync
431
432	/*
433	 * Set %g2 to %g6, which is current head offset. %g2
434	 * is arg2 of the C routine. %g3 is the tail offset,
435	 * which is arg3 of the C routine.
436	 * Call rq_overflow at PIL 14 unless we're already at PIL 15.
437	 */
438	mov	%g6, %g2
439	set	rq_overflow, %g1
440	rdpr	%pil, %g4
441	cmp	%g4, PIL_14
442	ba	sys_trap
443	  movl	%icc, PIL_14, %g4
444
4450:	retry
446
447	/*NOTREACHED*/
448	SET_SIZE(resumable_error)
449
450/*
451 * (TT 0x7f, TL>0) Non-resumeable Error Queue Handler
452 *	We keep a shadow copy of the queue in kernel buf.
453 *	Read non-resumable queue head and tail offset
454 *	If there are entries on the queue, move them to
455 *	the kernel buf, which is next to the non-resumable
456 *	queue in the memory. Call C routine to process.
457 */
458	ENTRY_NP(nonresumable_error)
459	mov	CPU_NRQ_HD, %g4
460	ldxa	[%g4]ASI_QUEUE, %g2		! %g2 = Q head offset
461	mov	CPU_NRQ_TL, %g4
462	ldxa	[%g4]ASI_QUEUE, %g3		! %g3 = Q tail offset
463
464	cmp	%g2, %g3
465	be,pn	%xcc, 0f			! head == tail
466	nop
467
468	/* force %gl to 1 as sys_trap requires */
469	wrpr	%g0, 1, %gl
470	mov	CPU_NRQ_HD, %g4
471	ldxa	[%g4]ASI_QUEUE, %g2		! %g2 = Q head offset
472	mov	CPU_NRQ_TL, %g4
473	ldxa	[%g4]ASI_QUEUE, %g3		! %g3 = Q tail offset
474	mov	%g2, %g6			! save head in %g2
475
476	CPU_PADDR(%g1, %g4)			! %g1 = cpu struct paddr
477
4782:	set	CPU_NRQ_BASE_OFF, %g4
479	ldxa	[%g1 + %g4]ASI_MEM, %g4		! %g4 = queue base PA
480	add	%g6, %g4, %g4			! %g4 = PA of ER in Q
481	set	CPU_NRQ_SIZE, %g7
482	add	%g4, %g7, %g7			! %g7 = PA of ER in kernel buf
483
484	ldxa	[%g7]ASI_MEM, %g5		! %g5 = first 8 byte of ER buf
485	cmp	0, %g5
486	bne,pn	%xcc, 1f			! first 8 byte is not 0
487	nop
488
489	/* Now we can move 64 bytes from queue to buf */
490	set	0, %g5
491	ldxa	[%g4 + %g5]ASI_MEM, %g1
492	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 0 - 7
493	add	%g5, 8, %g5
494	ldxa	[%g4 + %g5]ASI_MEM, %g1
495	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 8 - 15
496	add	%g5, 8, %g5
497	ldxa	[%g4 + %g5]ASI_MEM, %g1
498	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 16 - 23
499	add	%g5, 8, %g5
500	ldxa	[%g4 + %g5]ASI_MEM, %g1
501	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 24 - 31
502	add	%g5, 8, %g5
503	ldxa	[%g4 + %g5]ASI_MEM, %g1
504	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 32 - 39
505	add	%g5, 8, %g5
506	ldxa	[%g4 + %g5]ASI_MEM, %g1
507	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 40 - 47
508	add	%g5, 8, %g5
509	ldxa	[%g4 + %g5]ASI_MEM, %g1
510	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 48 - 55
511	add	%g5, 8, %g5
512	ldxa	[%g4 + %g5]ASI_MEM, %g1
513	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 56 - 63
514
515	set	CPU_NRQ_SIZE, %g5		! %g5 = queue size
516	sub	%g5, 1, %g5			! %g5 = queu size mask
517
518	add	%g6, Q_ENTRY_SIZE, %g6		! increment q head to next
519	and	%g6, %g5, %g6			! size mask for warp around
520	cmp	%g6, %g3			! head == tail ??
521
522	bne,pn	%xcc, 2b			! still have more to process
523	nop
524
525	/*
526	 * head equals to tail now, we can update the queue head
527	 * and call sys_trap
528	 */
529	mov	CPU_NRQ_HD, %g4
530	stxa	%g6, [%g4]ASI_QUEUE		! update head offset
531	membar	#Sync
532
533	/*
534	 * Call sys_trap. %g2 is TL(arg2), %g3 is head and tail
535	 * offset(arg3).
536	 * %g3 looks like following:
537	 *	+--------------------+--------------------+
538	 *	|   tail offset      |    head offset     |
539	 *	+--------------------+--------------------+
540	 *	63                 32 31                 0
541	 *
542	 * Run at PIL 14 unless we're already at PIL 15.
543	 */
544	sllx	%g3, 32, %g3			! %g3.h = tail offset
545	or	%g3, %g2, %g3			! %g3.l = head offset
546	rdpr	%tl, %g2			! %g2 = current tl
547
548	/*
549	 * Now check if the first error that sent us here was caused
550	 * in user's SPILL/FILL trap. If it was, we call sys_trap to
551	 * kill the user process. Several considerations:
552	 * - If multiple nonresumable errors happen, we only check the
553	 *   first one. Nonresumable errors cause system either panic
554	 *   or kill the user process. So the system has already
555	 *   panic'ed or killed user process after processing the first
556	 *   error. Therefore, no need to check if other error packet
557	 *   for this type of error.
558	 * - Errors happen in user's SPILL/FILL trap will bring us at
559	 *   TL = 2.
560	 * - We need to lower TL to 1 to get the trap type and tstate.
561	 *   We don't go back to TL = 2 so no need to save states.
562	 */
563	cmp	%g2, 2
564	bne,pt	%xcc, 3f			! if tl != 2
565	nop
566	/* Check to see if the trap pc is in a window spill/fill handling */
567	rdpr	%tpc, %g4
568	/* tpc should be in the trap table */
569	set	trap_table, %g5
570	cmp	%g4, %g5
571	blu,pt	%xcc, 3f
572	nop
573	set	etrap_table, %g5
574	cmp	%g4, %g5
575	bgeu,pt	%xcc, 3f
576	nop
577	/* Set tl to 1 in order to read tt[1] and tstate[1] */
578	wrpr	%g0, 1, %tl
579	rdpr	%tt, %g4			! %g4 = tt[1]
580	/* Check if tt[1] is a window trap */
581	and	%g4, WTRAP_TTMASK, %g4
582	cmp	%g4, WTRAP_TYPE
583	bne,pt	%xcc, 3f
584	nop
585	rdpr	%tstate, %g5			! %g5 = tstate[1]
586	btst	TSTATE_PRIV, %g5
587	bnz	%xcc, 3f			! Is it from user code?
588	nop
589	/*
590	 * Now we know the error happened in user's SPILL/FILL trap.
591	 * Turn on the user spill/fill flag in %g2
592	 */
593	mov	1, %g4
594	sllx	%g4, ERRH_U_SPILL_FILL_SHIFT, %g4
595	or	%g2, %g4, %g2			! turn on flag in %g2
596
5973:	sub	%g2, 1, %g2			! %g2.l = previous tl
598
599	set	process_nonresumable_error, %g1
600	rdpr	%pil, %g4
601	cmp	%g4, PIL_14
602	ba	sys_trap
603	  movl	%icc, PIL_14, %g4
604
605	/*
606	 * We are here because the C routine is not able to process
607	 * errors in time. So the first 8 bytes of ER in buf has not
608	 * been cleared. We call sys_trap to panic.
609	 * Run at PIL 14 unless we're already at PIL 15.
610	 */
6111:	set	nrq_overflow, %g1
612	rdpr	%pil, %g4
613	cmp	%g4, PIL_14
614	ba	sys_trap
615	  movl	%icc, PIL_14, %g4
616
6170:	retry
618
619	/*NOTREACHED*/
620	SET_SIZE(nonresumable_error)
621