xref: /titanic_51/usr/src/uts/sun4v/ml/mach_interrupt.s (revision 7f79af0b29c00a006403444f61b261219f63cfbf)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#if defined(lint)
27#include <sys/types.h>
28#include <sys/thread.h>
29#else	/* lint */
30#include "assym.h"
31#endif	/* lint */
32
33#include <sys/asm_linkage.h>
34#include <sys/machthread.h>
35#include <sys/machcpuvar.h>
36#include <sys/intreg.h>
37#include <sys/cmn_err.h>
38#include <sys/ftrace.h>
39#include <sys/machasi.h>
40#include <sys/scb.h>
41#include <sys/error.h>
42#include <sys/mmu.h>
43#include <vm/hat_sfmmu.h>
44
45#define	INTR_REPORT_SIZE	64
46#define	ERRH_ASI_SHIFT		56		/* bits[63:56]; see errh_er_t */
47#define	NRE_ASI			0x00000001	/* ASI observed in attr field */
48#define	NRE_CTX			0x00000002	/* ASI equals ASI_MMU_CTX */
49#define	CRP_OBSERVED		(NRE_ASI | NRE_CTX)
50
51#define	OR_MCPU_NRE_ERROR(reg1,reg2,val)	\
52	add	reg1, CPU_MCPU, reg2;		\
53	add	reg2, MCPU_NRE_ERROR, reg2;	\
54	ldxa	[reg2]ASI_MEM, reg1;		\
55	or	reg1, val, reg1;		\
56	stxa	reg1, [reg2]ASI_MEM
57
58
59#ifdef TRAPTRACE
60#include <sys/traptrace.h>
61#endif /* TRAPTRACE */
62
63#if defined(lint)
64
65void
66cpu_mondo(void)
67{}
68
69#else	/* lint */
70
71
72/*
73 * (TT 0x7c, TL>0) CPU Mondo Queue Handler
74 *	Globals are the Interrupt Globals.
75 */
76	ENTRY_NP(cpu_mondo)
77	!
78	!	Register Usage:-
79	!	%g5	PC for fasttrap TL>0 handler
80	!	%g1	arg 1
81	!	%g2	arg 2
82	!	%g3	queue base VA
83	!	%g4 	queue size mask
84	!	%g6	head ptr
85	!	%g7	tail ptr
86	mov	CPU_MONDO_Q_HD, %g3
87	ldxa	[%g3]ASI_QUEUE, %g6	! %g6 = head ptr
88	mov	CPU_MONDO_Q_TL, %g4
89	ldxa	[%g4]ASI_QUEUE, %g7	! %g7 = tail ptr
90	cmp	%g6, %g7
91	be,pn	%xcc, 3f		! head == tail
92	nop
93
94	CPU_ADDR(%g1,%g2)
95	add	%g1, CPU_MCPU, %g2
96	ldx	[%g2 + MCPU_CPU_Q_BASE], %g3	! %g3 = queue base PA
97	ldx	[%g2 + MCPU_CPU_Q_SIZE], %g4	! queue size
98	sub	%g4, 1, %g4		! %g4 = queue size mask
99
100	! Load interrupt receive data registers 1 and 2 to fetch
101	! the arguments for the fast trap handler.
102	!
103	! XXX - Since the data words in the interrupt report are not defined yet
104	! we assume that the consective words contain valid data and preserve
105	! sun4u's xcall mondo arguments.
106	! Register usage:
107	!	%g5	PC for fasttrap TL>0 handler
108	!	%g1	arg 1
109	!	%g2	arg 2
110
111	ldxa	[%g3 + %g6]ASI_MEM, %g5	! get PC from q base + head
112	add	%g6, 0x8, %g6		! inc head
113	ldxa	[%g3 + %g6]ASI_MEM, %g1 ! read data word 1
114	add	%g6, 0x8, %g6		! inc head
115	ldxa	[%g3 + %g6]ASI_MEM, %g2	! read data word 2
116	add	%g6, (INTR_REPORT_SIZE - 16) , %g6 ! inc head to next record
117	and	%g6, %g4, %g6 		! and size mask for wrap around
118	mov	CPU_MONDO_Q_HD, %g3
119	stxa	%g6, [%g3]ASI_QUEUE	! store head pointer
120	membar	#Sync
121
122#ifdef TRAPTRACE
123	TRACE_PTR(%g4, %g6)
124	GET_TRACE_TICK(%g6)
125	stxa	%g6, [%g4 + TRAP_ENT_TICK]%asi
126	TRACE_SAVE_TL_GL_REGS(%g4, %g6)
127	rdpr	%tt, %g6
128	stha	%g6, [%g4 + TRAP_ENT_TT]%asi
129	rdpr	%tpc, %g6
130	stna	%g6, [%g4 + TRAP_ENT_TPC]%asi
131	rdpr	%tstate, %g6
132	stxa	%g6, [%g4 + TRAP_ENT_TSTATE]%asi
133	stna	%sp, [%g4 + TRAP_ENT_SP]%asi
134	stna	%g5, [%g4 + TRAP_ENT_TR]%asi	! pc of the TL>0 handler
135	stna	%g1, [%g4 + TRAP_ENT_F1]%asi	! arg1
136	stna	%g2, [%g4 + TRAP_ENT_F3]%asi	! arg2
137	mov	CPU_MONDO_Q_HD, %g6
138	ldxa	[%g6]ASI_QUEUE, %g6		! new head offset
139	stna	%g6, [%g4 + TRAP_ENT_F2]%asi
140	stna	%g7, [%g4 + TRAP_ENT_F4]%asi	! tail offset
141	TRACE_NEXT(%g4, %g6, %g3)
142#endif /* TRAPTRACE */
143
144	/*
145	 * For now catch invalid PC being passed via cpu_mondo queue
146	 */
147	set	KERNELBASE, %g4
148	cmp	%g5, %g4
149	bl,pn	%xcc, 2f		! branch if bad %pc
150	  nop
151
152
153	/*
154	 * If this platform supports shared contexts and we are jumping
155	 * to OBP code, then we need to invalidate both contexts to prevent OBP
156	 * from corrupting the shared context registers.
157	 *
158	 * If shared contexts are not supported then the next two instructions
159	 * will be patched with:
160	 *
161	 * jmp       %g5
162	 * nop
163	 *
164	 */
165	.global sfmmu_shctx_cpu_mondo_patch
166sfmmu_shctx_cpu_mondo_patch:
167	set	OFW_START_ADDR, %g4	! Check if this a call into OBP?
168	cmp	%g5, %g4
169	bl,pt %xcc, 1f
170	  nop
171	set	OFW_END_ADDR, %g4
172	cmp	%g5, %g4
173	bg,pn %xcc, 1f
174	  nop
175	mov	MMU_PCONTEXT, %g3
176	ldxa	[%g3]ASI_MMU_CTX, %g4
177	cmp	%g4, INVALID_CONTEXT	! Check if we are in kernel mode
178	ble,pn %xcc, 1f			! or the primary context is invalid
179	  nop
180	set	INVALID_CONTEXT, %g4	! Invalidate contexts - compatability
181	stxa    %g4, [%g3]ASI_MMU_CTX	! mode ensures shared contexts are also
182	mov     MMU_SCONTEXT, %g3	! invalidated.
183	stxa    %g4, [%g3]ASI_MMU_CTX
184	membar  #Sync
185	mov	%o0, %g3		! save output regs
186	mov	%o1, %g4
187	mov	%o5, %g6
188	clr	%o0			! Invalidate tsbs, set ntsb = 0
189	clr	%o1			! and HV_TSB_INFO_PA = 0
190	mov	MMU_TSB_CTXNON0, %o5
191	ta	FAST_TRAP		! set TSB info for user process
192	brnz,a,pn %o0, ptl1_panic
193	  mov	PTL1_BAD_HCALL, %g1
194	mov	%g3, %o0		! restore output regs
195	mov	%g4, %o1
196	mov	%g6, %o5
1971:
198	jmp	%g5			! jump to traphandler
199	nop
2002:
201	! invalid trap handler, discard it for now
202	set	cpu_mondo_inval, %g4
203	ldx	[%g4], %g5
204	inc	%g5
205	stx	%g5, [%g4]
2063:
207	retry
208	/* Never Reached */
209	SET_SIZE(cpu_mondo)
210
211#endif /* lint */
212
213#if defined(lint)
214
215void
216dev_mondo(void)
217{}
218
219#else	/* lint */
220
221
222/*
223 * (TT 0x7d, TL>0) Dev Mondo Queue Handler
224 *	Globals are the Interrupt Globals.
225 * We only process one interrupt at a time causing us to keep
226 * taking this trap till the queue is empty.
227 * We really should drain the whole queue for better performance
228 * but this will do for now.
229 */
230	ENTRY_NP(dev_mondo)
231	!
232	!	Register Usage:-
233	!	%g5	PC for fasttrap TL>0 handler
234	!	%g1	arg 1
235	!	%g2	arg 2
236	!	%g3	queue base PA
237	!	%g4 	queue size mask
238	!	%g6	head ptr
239	!	%g7	tail ptr
240	mov	DEV_MONDO_Q_HD, %g3
241	ldxa	[%g3]ASI_QUEUE, %g6	! %g6 = head ptr
242	mov	DEV_MONDO_Q_TL, %g4
243	ldxa	[%g4]ASI_QUEUE, %g7	! %g7 = tail ptr
244	cmp	%g6, %g7
245	be,pn	%xcc, 0f		! head == tail
246	nop
247
248	CPU_ADDR(%g1,%g2)
249	add	%g1, CPU_MCPU, %g2
250	ldx	[%g2 + MCPU_DEV_Q_BASE], %g3	! %g3 = queue base PA
251
252	! Register usage:
253	!	%g5 - inum
254	!	%g1 - cpu struct pointer used below in TRAPTRACE
255	!
256	ldxa	[%g3 + %g6]ASI_MEM, %g5	! get inum from q base + head
257
258	!
259	! We verify that inum is valid ( < MAXVNUM). If it is greater
260	! than MAXVNUM, we let setvecint_tl1 take care of it.
261	!
262	set	MAXIVNUM, %g4
263	cmp	%g5, %g4
264	bgeu,a,pn	%xcc, 1f
265	ldx	[%g2 + MCPU_DEV_Q_SIZE], %g4	! queue size - delay slot
266
267	!
268	!	Copy 64-byte payload to the *iv_payload if it is not NULL
269	!
270	set	intr_vec_table, %g1		! %g1 = intr_vec_table
271	sll	%g5, CPTRSHIFT, %g7		! %g7 = offset to inum entry
272						!       in the intr_vec_table
273	add	%g1, %g7, %g7			! %g7 = &intr_vec_table[inum]
274	ldn	[%g7], %g1			! %g1 = ptr to intr_vec_t (iv)
275
276	!
277	! Verify the pointer to first intr_vec_t for a given inum and
278	! it should not be NULL. If this pointer is NULL, then it is a
279	! spurious interrupt. In this case, just call setvecint_tl1 and
280	! it will handle this spurious interrupt.
281	!
282	brz,a,pn	%g1, 1f			! if %g1 is NULL
283	ldx	[%g2 + MCPU_DEV_Q_SIZE], %g4	! queue size - delay slot
284
285	ldx	[%g1 + IV_PAYLOAD_BUF], %g1	! %g1 = iv->iv_payload_buf
286	brz,a,pt	%g1, 1f			! if it is NULL
287	ldx	[%g2 + MCPU_DEV_Q_SIZE], %g4	! queue size - delay slot
288
289	!
290	!	Now move 64 byte payload from mondo queue to buf
291	!
292	mov	%g6, %g7			! %g7 = head ptr
293	ldxa	[%g3 + %g7]ASI_MEM, %g4
294	stx	%g4, [%g1 + 0]			! byte 0 - 7
295	add	%g7, 8, %g7
296	ldxa	[%g3 + %g7]ASI_MEM, %g4
297	stx	%g4, [%g1 + 8]			! byte 8 - 15
298	add	%g7, 8, %g7
299	ldxa	[%g3 + %g7]ASI_MEM, %g4
300	stx	%g4, [%g1 + 16]			! byte 16 - 23
301	add	%g7, 8, %g7
302	ldxa	[%g3 + %g7]ASI_MEM, %g4
303	stx	%g4, [%g1 + 24]			! byte 24 - 31
304	add	%g7, 8, %g7
305	ldxa	[%g3 + %g7]ASI_MEM, %g4
306	stx	%g4, [%g1 + 32]			! byte 32 - 39
307	add	%g7, 8, %g7
308	ldxa	[%g3 + %g7]ASI_MEM, %g4
309	stx	%g4, [%g1 + 40]			! byte 40 - 47
310	add	%g7, 8, %g7
311	ldxa	[%g3 + %g7]ASI_MEM, %g4
312	stx	%g4, [%g1 + 48]			! byte 48 - 55
313	add	%g7, 8, %g7
314	ldxa	[%g3 + %g7]ASI_MEM, %g4
315	stx	%g4, [%g1 + 56]			! byte 56 - 63
316	ldx	[%g2 + MCPU_DEV_Q_SIZE], %g4	! queue size
317
3181:	sub	%g4, 1, %g4		! %g4 = queue size mask
319	add	%g6, INTR_REPORT_SIZE , %g6 ! inc head to next record
320	and	%g6, %g4, %g6 		! and mask for wrap around
321	mov	DEV_MONDO_Q_HD, %g3
322	stxa	%g6, [%g3]ASI_QUEUE	! increment head offset
323	membar	#Sync
324
325#ifdef TRAPTRACE
326	TRACE_PTR(%g4, %g6)
327	GET_TRACE_TICK(%g6)
328	stxa	%g6, [%g4 + TRAP_ENT_TICK]%asi
329	TRACE_SAVE_TL_GL_REGS(%g4, %g6)
330	rdpr	%tt, %g6
331	stha	%g6, [%g4 + TRAP_ENT_TT]%asi
332	rdpr	%tpc, %g6
333	stna	%g6, [%g4 + TRAP_ENT_TPC]%asi
334	rdpr	%tstate, %g6
335	stxa	%g6, [%g4 + TRAP_ENT_TSTATE]%asi
336	! move head to sp
337	ldx	[%g2 + MCPU_DEV_Q_BASE], %g6
338	stna	%g6, [%g4 + TRAP_ENT_SP]%asi	! Device Queue Base PA
339	stna	%g5, [%g4 + TRAP_ENT_TR]%asi	! Inum
340	mov	DEV_MONDO_Q_HD, %g6
341	ldxa	[%g6]ASI_QUEUE, %g6		! New head offset
342	stna	%g6, [%g4 + TRAP_ENT_F1]%asi
343	ldx	[%g2 + MCPU_DEV_Q_SIZE], %g6
344	stna	%g6, [%g4 + TRAP_ENT_F2]%asi	! Q Size
345	stna	%g7, [%g4 + TRAP_ENT_F3]%asi	! tail offset
346	stna	%g0, [%g4 + TRAP_ENT_F4]%asi
347	TRACE_NEXT(%g4, %g6, %g3)
348#endif /* TRAPTRACE */
349
350	!
351	! setvecint_tl1 will do all the work, and finish with a retry
352	!
353	ba,pt	%xcc, setvecint_tl1
354	mov	%g5, %g1		! setvecint_tl1 expects inum in %g1
355
3560:	retry
357
358	/* Never Reached */
359	SET_SIZE(dev_mondo)
360#endif /* lint */
361
362#if defined(lint)
363uint64_t cpu_mondo_inval;
364#else /* lint */
365	.seg	".data"
366	.global	cpu_mondo_inval
367	.align	8
368cpu_mondo_inval:
369	.skip	8
370
371	.seg	".text"
372#endif	/* lint */
373
374
375#if defined(lint)
376
377void
378resumable_error(void)
379{}
380
381#else	/* lint */
382
383/*
384 * (TT 0x7e, TL>0) Resumeable Error Queue Handler
385 *	We keep a shadow copy of the queue in kernel buf.
386 *	Read the resumable queue head and tail offset
387 *	If there are entries on the queue, move them to
388 *	the kernel buf, which is next to the resumable
389 *	queue in the memory. Call C routine to process.
390 */
391	ENTRY_NP(resumable_error)
392	mov	CPU_RQ_HD, %g4
393	ldxa	[%g4]ASI_QUEUE, %g2		! %g2 = Q head offset
394	mov	CPU_RQ_TL, %g4
395	ldxa	[%g4]ASI_QUEUE, %g3		! %g3 = Q tail offset
396	mov	%g2, %g6			! save head in %g2
397
398	cmp	%g6, %g3
399	be,pn	%xcc, 0f			! head == tail
400	nop
401
402	CPU_ADDR(%g1, %g4)			! %g1 = cpu struct addr
403
4042:	set	CPU_RQ_BASE_OFF, %g4
405	ldx	[%g1 + %g4], %g4		! %g4 = queue base PA
406	add	%g6, %g4, %g4			! %g4 = PA of ER in Q
407	set	CPU_RQ_SIZE, %g7
408	add	%g4, %g7, %g7			! %g7=PA of ER in kernel buf
409
410	ldxa	[%g7]ASI_MEM, %g5		! %g5=first 8 byte of ER buf
411	cmp	0, %g5
412	bne,pn	%xcc, 1f			! first 8 byte is not 0
413	nop
414
415	/* Now we can move 64 bytes from queue to buf */
416	set	0, %g5
417	ldxa	[%g4 + %g5]ASI_MEM, %g1
418	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 0 - 7
419	add	%g5, 8, %g5
420	ldxa	[%g4 + %g5]ASI_MEM, %g1
421	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 8 - 15
422	add	%g5, 8, %g5
423	ldxa	[%g4 + %g5]ASI_MEM, %g1
424	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 16 - 23
425	add	%g5, 8, %g5
426	ldxa	[%g4 + %g5]ASI_MEM, %g1
427	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 24 - 31
428	add	%g5, 8, %g5
429	ldxa	[%g4 + %g5]ASI_MEM, %g1
430	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 32 - 39
431	add	%g5, 8, %g5
432	ldxa	[%g4 + %g5]ASI_MEM, %g1
433	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 40 - 47
434	add	%g5, 8, %g5
435	ldxa	[%g4 + %g5]ASI_MEM, %g1
436	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 48 - 55
437	add	%g5, 8, %g5
438	ldxa	[%g4 + %g5]ASI_MEM, %g1
439	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 56 - 63
440
441	set	CPU_RQ_SIZE, %g5		! %g5 = queue size
442	sub	%g5, 1, %g5			! %g5 = queu size mask
443
444	add	%g6, Q_ENTRY_SIZE, %g6		! increment q head to next
445	and	%g6, %g5, %g6			! size mask for warp around
446	cmp	%g6, %g3			! head == tail ??
447
448	bne,pn	%xcc, 2b			! still have more to process
449	nop
450
451	/*
452	 * head equals to tail now, we can update the queue head
453	 * and call sys_trap
454	 */
455	mov	CPU_RQ_HD, %g4
456	stxa	%g6, [%g4]ASI_QUEUE		! update head offset
457	membar	#Sync
458
459	/*
460	 * Call sys_trap at PIL 14 unless we're already at PIL 15. %g2.l is
461	 * head offset(arg2) and %g3 is tail
462	 * offset(arg3).
463	 */
464	set	process_resumable_error, %g1
465	rdpr	%pil, %g4
466	cmp	%g4, PIL_14
467	ba	sys_trap
468	  movl	%icc, PIL_14, %g4
469
470	/*
471	 * We are here because the C routine is not able to process
472	 * errors in time. So the first 8 bytes of ER in buf has not
473	 * been cleared. We update head to tail and call sys_trap to
474	 * print out an error message
475	 */
476
4771:	mov	CPU_RQ_HD, %g4
478	stxa	%g3, [%g4]ASI_QUEUE		! set head equal to tail
479	membar	#Sync
480
481	/*
482	 * Set %g2 to %g6, which is current head offset. %g2
483	 * is arg2 of the C routine. %g3 is the tail offset,
484	 * which is arg3 of the C routine.
485	 * Call rq_overflow at PIL 14 unless we're already at PIL 15.
486	 */
487	mov	%g6, %g2
488	set	rq_overflow, %g1
489	rdpr	%pil, %g4
490	cmp	%g4, PIL_14
491	ba	sys_trap
492	  movl	%icc, PIL_14, %g4
493
4940:	retry
495
496	/*NOTREACHED*/
497	SET_SIZE(resumable_error)
498#endif /* lint */
499
500#if defined(lint)
501
502void
503nonresumable_error(void)
504{}
505
506#else	/* lint */
507
508/*
509 * (TT 0x7f, TL>0) Non-resumeable Error Queue Handler
510 *	We keep a shadow copy of the queue in kernel buf.
511 *	Read non-resumable queue head and tail offset
512 *	If there are entries on the queue, move them to
513 *	the kernel buf, which is next to the non-resumable
514 *	queue in the memory. Call C routine to process.
515 */
516	ENTRY_NP(nonresumable_error)
517	mov	CPU_NRQ_HD, %g4
518	ldxa	[%g4]ASI_QUEUE, %g2		! %g2 = Q head offset
519	mov	CPU_NRQ_TL, %g4
520	ldxa	[%g4]ASI_QUEUE, %g3		! %g3 = Q tail offset
521
522	cmp	%g2, %g3
523	be,pn	%xcc, 0f			! head == tail
524	nop
525
526	/* force %gl to 1 as sys_trap requires */
527	wrpr	%g0, 1, %gl
528	mov	CPU_NRQ_HD, %g4
529	ldxa	[%g4]ASI_QUEUE, %g2		! %g2 = Q head offset
530	mov	CPU_NRQ_TL, %g4
531	ldxa	[%g4]ASI_QUEUE, %g3		! %g3 = Q tail offset
532	mov	%g2, %g6			! save head in %g2
533
534	CPU_PADDR(%g1, %g4)			! %g1 = cpu struct paddr
535
536	add	%g1, CPU_MCPU, %g4
537	add	%g4, MCPU_NRE_ERROR, %g4	! &CPU->cpu_m.cpu_nre_error
538	stxa	%g0, [%g4]ASI_MEM		! clear cpu_nre_error
539
5402:	set	CPU_NRQ_BASE_OFF, %g4
541	ldxa	[%g1 + %g4]ASI_MEM, %g4		! %g4 = queue base PA
542	add	%g6, %g4, %g4			! %g4 = PA of ER in Q
543	set	CPU_NRQ_SIZE, %g7
544	add	%g4, %g7, %g7			! %g7 = PA of ER in kernel buf
545
546	ldxa	[%g7]ASI_MEM, %g5		! %g5 = first 8 byte of ER buf
547	cmp	0, %g5
548	bne,pn	%xcc, 1f			! first 8 byte is not 0
549	nop
550
551	/* BEGIN: move 64 bytes from queue to buf */
552	set	0, %g5
553	ldxa	[%g4 + %g5]ASI_MEM, %g1
554	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 0 - 7
555	add	%g5, 8, %g5
556	ldxa	[%g4 + %g5]ASI_MEM, %g1
557	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 8 - 15
558	add	%g5, 8, %g5
559	ldxa	[%g4 + %g5]ASI_MEM, %g1
560	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 16 - 23
561	/* Check for sun4v ASI */
562	and	%g1, ERRH_ATTR_ASI, %g1		! isolate ASI bit
563	cmp	%g1, ERRH_ATTR_ASI
564	bne,pt	%xcc, 3f
565	  nop
566	CPU_PADDR(%g1, %g5)
567	OR_MCPU_NRE_ERROR(%g1, %g5, NRE_ASI)	! cpu_nre_error |= NRE_ASI
5683:	set	24, %g5
569	ldxa	[%g4 + %g5]ASI_MEM, %g1
570	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 24 - 31
571	add	%g5, 8, %g5
572	ldxa	[%g4 + %g5]ASI_MEM, %g1
573	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 32 - 39
574	add	%g5, 8, %g5
575	ldxa	[%g4 + %g5]ASI_MEM, %g1
576	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 40 - 47
577	/* Check for ASI==ASI_MMU_CTX */
578	srlx	%g1, ERRH_ASI_SHIFT, %g1	! isolate the ASI field
579	cmp	%g1, ASI_MMU_CTX		! ASI=0x21 for CRP
580	bne,pt	%xcc, 4f
581	  nop
582	CPU_PADDR(%g1, %g5)
583	OR_MCPU_NRE_ERROR(%g1, %g5, NRE_CTX)	! cpu_nre_error |= NRE_CTX
5844:	set	48, %g5
585	ldxa	[%g4 + %g5]ASI_MEM, %g1
586	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 48 - 55
587	add	%g5, 8, %g5
588	ldxa	[%g4 + %g5]ASI_MEM, %g1
589	stxa	%g1, [%g7 + %g5]ASI_MEM		! byte 56 - 63
590	/* END: move 64 bytes from queue to buf */
591
592	set	CPU_NRQ_SIZE, %g5		! %g5 = queue size
593	sub	%g5, 1, %g5			! %g5 = queu size mask
594
595	add	%g6, Q_ENTRY_SIZE, %g6		! increment q head to next
596	and	%g6, %g5, %g6			! size mask for warp around
597	cmp	%g6, %g3			! head == tail ??
598
599	bne,pn	%xcc, 2b			! still have more to process
600	nop
601
602	/*
603	 * head equals to tail now, we can update the queue head
604	 * and call sys_trap
605	 */
606	mov	CPU_NRQ_HD, %g4
607	stxa	%g6, [%g4]ASI_QUEUE		! update head offset
608	membar	#Sync
609
610	/*
611	 * For CRP, force a hat reload as if the context were stolen
612	 * by storing INVALID_CONTEXT in the secondary and nulling TSB.
613	 * Primary will be reset by usr_rtt for user-mode traps, or
614	 * has been reset in iae_crp or dae_crp for kernel-mode.
615	 */
616	CPU_PADDR(%g1, %g5)
617	add	%g1, CPU_MCPU, %g5
618	add	%g5, MCPU_NRE_ERROR, %g5	! &CPU->cpu_m.cpu_nre_error
619	ldxa	[%g5]ASI_MEM, %g4
620	cmp	%g4, CRP_OBSERVED		! confirm CRP
621	bne,pt	%xcc, 5f
622	  nop
623	mov	INVALID_CONTEXT, %g5		! force hat reload of context
624	mov	MMU_SCONTEXT, %g7
625	sethi	%hi(FLUSH_ADDR), %g4
626	stxa	%g5, [%g7]ASI_MMU_CTX		! set secondary context reg
627	flush	%g4
628	mov	%o0, %g4
629	mov	%o1, %g5
630	mov	%o5, %g7
631	mov	%g0, %o0
632	mov	%g0, %o1
633	mov	MMU_TSB_CTXNON0, %o5
634	ta      FAST_TRAP			! null TSB
635	  nop
636	mov	%g4, %o0
637	mov	%g5, %o1
638	mov	%g7, %o5
639
640	/*
641	 * Call sys_trap. %g2 is TL(arg2), %g3 is head and tail
642	 * offset(arg3).
643	 * %g3 looks like following:
644	 *	+--------------------+--------------------+
645	 *	|   tail offset      |    head offset     |
646	 *	+--------------------+--------------------+
647	 *	63                 32 31                 0
648	 *
649	 * Run at PIL 14 unless we're already at PIL 15.
650	 */
6515:	sllx	%g3, 32, %g3			! %g3.h = tail offset
652	or	%g3, %g2, %g3			! %g3.l = head offset
653	rdpr	%tl, %g2			! %g2 = current tl
654
655	/*
656	 * Now check if the first error that sent us here was caused
657	 * in user's SPILL/FILL trap. If it was, we call sys_trap to
658	 * kill the user process. Several considerations:
659	 * - If multiple nonresumable errors happen, we only check the
660	 *   first one. Nonresumable errors cause system either panic
661	 *   or kill the user process. So the system has already
662	 *   panic'ed or killed user process after processing the first
663	 *   error. Therefore, no need to check if other error packet
664	 *   for this type of error.
665	 * - Errors happen in user's SPILL/FILL trap will bring us at
666	 *   TL = 2.
667	 * - We need to lower TL to 1 to get the trap type and tstate.
668	 *   We don't go back to TL = 2 so no need to save states.
669	 */
670	cmp	%g2, 2
671	bne,pt	%xcc, 3f			! if tl != 2
672	nop
673	/* Check to see if the trap pc is in a window spill/fill handling */
674	rdpr	%tpc, %g4
675	/* tpc should be in the trap table */
676	set	trap_table, %g5
677	cmp	%g4, %g5
678	blu,pt	%xcc, 3f
679	nop
680	set	etrap_table, %g5
681	cmp	%g4, %g5
682	bgeu,pt	%xcc, 3f
683	nop
684	/* Set tl to 1 in order to read tt[1] and tstate[1] */
685	wrpr	%g0, 1, %tl
686	rdpr	%tt, %g4			! %g4 = tt[1]
687	/* Check if tt[1] is a window trap */
688	and	%g4, WTRAP_TTMASK, %g4
689	cmp	%g4, WTRAP_TYPE
690	bne,pt	%xcc, 3f
691	nop
692	rdpr	%tstate, %g5			! %g5 = tstate[1]
693	btst	TSTATE_PRIV, %g5
694	bnz	%xcc, 3f			! Is it from user code?
695	nop
696	/*
697	 * Now we know the error happened in user's SPILL/FILL trap.
698	 * Turn on the user spill/fill flag in %g2
699	 */
700	mov	1, %g4
701	sllx	%g4, ERRH_U_SPILL_FILL_SHIFT, %g4
702	or	%g2, %g4, %g2			! turn on flag in %g2
703
7043:	sub	%g2, 1, %g2			! %g2.l = previous tl
705
706	set	process_nonresumable_error, %g1
707	rdpr	%pil, %g4
708	cmp	%g4, PIL_14
709	ba	sys_trap
710	  movl	%icc, PIL_14, %g4
711
712	/*
713	 * We are here because the C routine is not able to process
714	 * errors in time. So the first 8 bytes of ER in buf has not
715	 * been cleared. We call sys_trap to panic.
716	 * Run at PIL 14 unless we're already at PIL 15.
717	 */
7181:	set	nrq_overflow, %g1
719	rdpr	%pil, %g4
720	cmp	%g4, PIL_14
721	ba	sys_trap
722	  movl	%icc, PIL_14, %g4
723
7240:	retry
725
726	/*NOTREACHED*/
727	SET_SIZE(nonresumable_error)
728#endif /* lint */
729