xref: /titanic_53/usr/src/uts/sun4/ml/interrupt.s (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1*7c478bd9Sstevel@tonic-gate/*
2*7c478bd9Sstevel@tonic-gate * CDDL HEADER START
3*7c478bd9Sstevel@tonic-gate *
4*7c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
5*7c478bd9Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only
6*7c478bd9Sstevel@tonic-gate * (the "License").  You may not use this file except in compliance
7*7c478bd9Sstevel@tonic-gate * with the License.
8*7c478bd9Sstevel@tonic-gate *
9*7c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*7c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
11*7c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
12*7c478bd9Sstevel@tonic-gate * and limitations under the License.
13*7c478bd9Sstevel@tonic-gate *
14*7c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
15*7c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*7c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
17*7c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
18*7c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
19*7c478bd9Sstevel@tonic-gate *
20*7c478bd9Sstevel@tonic-gate * CDDL HEADER END
21*7c478bd9Sstevel@tonic-gate */
22*7c478bd9Sstevel@tonic-gate/*
23*7c478bd9Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*7c478bd9Sstevel@tonic-gate * Use is subject to license terms.
25*7c478bd9Sstevel@tonic-gate */
26*7c478bd9Sstevel@tonic-gate
27*7c478bd9Sstevel@tonic-gate#pragma ident	"%Z%%M%	%I%	%E% SMI"
28*7c478bd9Sstevel@tonic-gate
29*7c478bd9Sstevel@tonic-gate#if defined(lint)
30*7c478bd9Sstevel@tonic-gate#include <sys/types.h>
31*7c478bd9Sstevel@tonic-gate#include <sys/thread.h>
32*7c478bd9Sstevel@tonic-gate#else	/* lint */
33*7c478bd9Sstevel@tonic-gate#include "assym.h"
34*7c478bd9Sstevel@tonic-gate#endif	/* lint */
35*7c478bd9Sstevel@tonic-gate
36*7c478bd9Sstevel@tonic-gate#include <sys/cmn_err.h>
37*7c478bd9Sstevel@tonic-gate#include <sys/ftrace.h>
38*7c478bd9Sstevel@tonic-gate#include <sys/asm_linkage.h>
39*7c478bd9Sstevel@tonic-gate#include <sys/machthread.h>
40*7c478bd9Sstevel@tonic-gate#include <sys/machcpuvar.h>
41*7c478bd9Sstevel@tonic-gate#include <sys/intreg.h>
42*7c478bd9Sstevel@tonic-gate
43*7c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE
44*7c478bd9Sstevel@tonic-gate#include <sys/traptrace.h>
45*7c478bd9Sstevel@tonic-gate#endif /* TRAPTRACE */
46*7c478bd9Sstevel@tonic-gate
47*7c478bd9Sstevel@tonic-gate
48*7c478bd9Sstevel@tonic-gate
49*7c478bd9Sstevel@tonic-gate#if defined(lint)
50*7c478bd9Sstevel@tonic-gate
51*7c478bd9Sstevel@tonic-gate/* ARGSUSED */
52*7c478bd9Sstevel@tonic-gatevoid
53*7c478bd9Sstevel@tonic-gatepil_interrupt(int level)
54*7c478bd9Sstevel@tonic-gate{}
55*7c478bd9Sstevel@tonic-gate
56*7c478bd9Sstevel@tonic-gate#else	/* lint */
57*7c478bd9Sstevel@tonic-gate
58*7c478bd9Sstevel@tonic-gate
59*7c478bd9Sstevel@tonic-gate/*
60*7c478bd9Sstevel@tonic-gate * (TT 0x40..0x4F, TL>0) Interrupt Level N Handler (N == 1..15)
61*7c478bd9Sstevel@tonic-gate * 	Register passed from LEVEL_INTERRUPT(level)
62*7c478bd9Sstevel@tonic-gate *	%g4 - interrupt request level
63*7c478bd9Sstevel@tonic-gate */
64*7c478bd9Sstevel@tonic-gate	ENTRY_NP(pil_interrupt)
65*7c478bd9Sstevel@tonic-gate	!
66*7c478bd9Sstevel@tonic-gate	! Register usage
67*7c478bd9Sstevel@tonic-gate	!	%g1 - cpu
68*7c478bd9Sstevel@tonic-gate	!	%g3 - intr_req
69*7c478bd9Sstevel@tonic-gate	!	%g4 - pil
70*7c478bd9Sstevel@tonic-gate	!	%g2, %g5, %g6 - temps
71*7c478bd9Sstevel@tonic-gate	!
72*7c478bd9Sstevel@tonic-gate	! grab the 1st intr_req off the list
73*7c478bd9Sstevel@tonic-gate	! if the list is empty, clear %clear_softint
74*7c478bd9Sstevel@tonic-gate	!
75*7c478bd9Sstevel@tonic-gate	CPU_ADDR(%g1, %g5)
76*7c478bd9Sstevel@tonic-gate	!
77*7c478bd9Sstevel@tonic-gate	ALTENTRY(pil_interrupt_common)
78*7c478bd9Sstevel@tonic-gate	sll	%g4, CPTRSHIFT, %g5
79*7c478bd9Sstevel@tonic-gate	add	%g1, INTR_HEAD, %g6	! intr_head[0]
80*7c478bd9Sstevel@tonic-gate	add	%g6, %g5, %g6		! intr_head[pil]
81*7c478bd9Sstevel@tonic-gate	ldn	[%g6], %g3		! g3 = intr_req
82*7c478bd9Sstevel@tonic-gate
83*7c478bd9Sstevel@tonic-gate#ifndef DEBUG
84*7c478bd9Sstevel@tonic-gate	brnz,pt	%g3, 5f
85*7c478bd9Sstevel@tonic-gate	nop
86*7c478bd9Sstevel@tonic-gate#else
87*7c478bd9Sstevel@tonic-gate	!
88*7c478bd9Sstevel@tonic-gate	! Verify the address of intr_req; it should be within the
89*7c478bd9Sstevel@tonic-gate	! address range of intr_pool and intr_head
90*7c478bd9Sstevel@tonic-gate	! or the address range of intr_add_head and intr_add_tail.
91*7c478bd9Sstevel@tonic-gate	! The range of intr_add_head and intr_add_tail is subdivided
92*7c478bd9Sstevel@tonic-gate	! by cpu, but the subdivision is not verified here.
93*7c478bd9Sstevel@tonic-gate	!
94*7c478bd9Sstevel@tonic-gate	! Registers passed to sys_trap()
95*7c478bd9Sstevel@tonic-gate	!	%g1 - no_intr_req
96*7c478bd9Sstevel@tonic-gate	!	%g2 - intr_req
97*7c478bd9Sstevel@tonic-gate	!	%g3 - %pil
98*7c478bd9Sstevel@tonic-gate	!	%g4 - current pil
99*7c478bd9Sstevel@tonic-gate	!
100*7c478bd9Sstevel@tonic-gate	add	%g1, INTR_POOL, %g2
101*7c478bd9Sstevel@tonic-gate	cmp	%g3, %g2
102*7c478bd9Sstevel@tonic-gate	blu,pn	%xcc, 8f
103*7c478bd9Sstevel@tonic-gate	nop
104*7c478bd9Sstevel@tonic-gate	add	%g1, INTR_HEAD, %g2
105*7c478bd9Sstevel@tonic-gate	cmp	%g2, %g3
106*7c478bd9Sstevel@tonic-gate	bgeu,pt	%xcc, 5f
107*7c478bd9Sstevel@tonic-gate	nop
108*7c478bd9Sstevel@tonic-gate8:
109*7c478bd9Sstevel@tonic-gate	sethi	%hi(intr_add_head), %g2
110*7c478bd9Sstevel@tonic-gate	ldn	[%g2 + %lo(intr_add_head)], %g2
111*7c478bd9Sstevel@tonic-gate	brz,pn	%g2, 4f			! intr_add_head can be NULL
112*7c478bd9Sstevel@tonic-gate	cmp	%g3, %g2
113*7c478bd9Sstevel@tonic-gate	blu,pn	%xcc, 4f
114*7c478bd9Sstevel@tonic-gate	nop
115*7c478bd9Sstevel@tonic-gate	sethi	%hi(intr_add_tail), %g2
116*7c478bd9Sstevel@tonic-gate	ldn	[%g2 + %lo(intr_add_tail)], %g2
117*7c478bd9Sstevel@tonic-gate	cmp	%g2, %g3
118*7c478bd9Sstevel@tonic-gate	bgeu,pt	%xcc, 5f
119*7c478bd9Sstevel@tonic-gate	nop
120*7c478bd9Sstevel@tonic-gate4:
121*7c478bd9Sstevel@tonic-gate#endif /* DEBUG */
122*7c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE
123*7c478bd9Sstevel@tonic-gate	TRACE_PTR(%g5, %g2)
124*7c478bd9Sstevel@tonic-gate	GET_TRACE_TICK(%g2)
125*7c478bd9Sstevel@tonic-gate	stxa	%g2, [%g5 + TRAP_ENT_TICK]%asi
126*7c478bd9Sstevel@tonic-gate	TRACE_SAVE_TL_GL_REGS(%g5, %g2)
127*7c478bd9Sstevel@tonic-gate	mov	0xbad, %g2
128*7c478bd9Sstevel@tonic-gate	stha	%g2, [%g5 + TRAP_ENT_TT]%asi
129*7c478bd9Sstevel@tonic-gate	rdpr	%tpc, %g2
130*7c478bd9Sstevel@tonic-gate	stna	%g2, [%g5 + TRAP_ENT_TPC]%asi
131*7c478bd9Sstevel@tonic-gate	rdpr	%tstate, %g2
132*7c478bd9Sstevel@tonic-gate	stxa	%g2, [%g5 + TRAP_ENT_TSTATE]%asi
133*7c478bd9Sstevel@tonic-gate	stna	%g0, [%g5 + TRAP_ENT_SP]%asi
134*7c478bd9Sstevel@tonic-gate	stna	%g1, [%g5 + TRAP_ENT_TR]%asi
135*7c478bd9Sstevel@tonic-gate	rd	SOFTINT, %g2
136*7c478bd9Sstevel@tonic-gate	stna	%g2, [%g5 + TRAP_ENT_F1]%asi
137*7c478bd9Sstevel@tonic-gate	stna	%g3, [%g5 + TRAP_ENT_F2]%asi
138*7c478bd9Sstevel@tonic-gate	stna	%g4, [%g5 + TRAP_ENT_F3]%asi
139*7c478bd9Sstevel@tonic-gate	stna	%g6, [%g5 + TRAP_ENT_F4]%asi
140*7c478bd9Sstevel@tonic-gate	TRACE_NEXT(%g5, %g2, %g1)
141*7c478bd9Sstevel@tonic-gate#endif /* TRAPTRACE */
142*7c478bd9Sstevel@tonic-gate	ba	ptl1_panic
143*7c478bd9Sstevel@tonic-gate	mov	PTL1_BAD_INTR_REQ, %g1
144*7c478bd9Sstevel@tonic-gate5:
145*7c478bd9Sstevel@tonic-gate	ldn	[%g3 + INTR_NEXT], %g2	! 2nd entry
146*7c478bd9Sstevel@tonic-gate	brnz,pn	%g2, 1f			! branch if list not empty
147*7c478bd9Sstevel@tonic-gate	stn	%g2, [%g6]
148*7c478bd9Sstevel@tonic-gate	add	%g1, INTR_TAIL, %g6	! intr_tail[0]
149*7c478bd9Sstevel@tonic-gate	stn	%g0, [%g5 + %g6]	! update intr_tail[pil]
150*7c478bd9Sstevel@tonic-gate	mov	1, %g5
151*7c478bd9Sstevel@tonic-gate	sll	%g5, %g4, %g5
152*7c478bd9Sstevel@tonic-gate	wr	%g5, CLEAR_SOFTINT
153*7c478bd9Sstevel@tonic-gate1:
154*7c478bd9Sstevel@tonic-gate	!
155*7c478bd9Sstevel@tonic-gate	! put intr_req on free list
156*7c478bd9Sstevel@tonic-gate	!	%g2 - inumber
157*7c478bd9Sstevel@tonic-gate	!
158*7c478bd9Sstevel@tonic-gate	ldn	[%g1 + INTR_HEAD], %g5	! current head of free list
159*7c478bd9Sstevel@tonic-gate	lduw	[%g3 + INTR_NUMBER], %g2
160*7c478bd9Sstevel@tonic-gate	stn	%g3, [%g1 + INTR_HEAD]
161*7c478bd9Sstevel@tonic-gate	stn	%g5, [%g3 + INTR_NEXT]
162*7c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE
163*7c478bd9Sstevel@tonic-gate	TRACE_PTR(%g5, %g6)
164*7c478bd9Sstevel@tonic-gate	GET_TRACE_TICK(%g6)
165*7c478bd9Sstevel@tonic-gate	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi
166*7c478bd9Sstevel@tonic-gate	TRACE_SAVE_TL_GL_REGS(%g5, %g6)
167*7c478bd9Sstevel@tonic-gate	rdpr	%tt, %g6
168*7c478bd9Sstevel@tonic-gate	stha	%g6, [%g5 + TRAP_ENT_TT]%asi
169*7c478bd9Sstevel@tonic-gate	rdpr	%tpc, %g6
170*7c478bd9Sstevel@tonic-gate	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi
171*7c478bd9Sstevel@tonic-gate	rdpr	%tstate, %g6
172*7c478bd9Sstevel@tonic-gate	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi
173*7c478bd9Sstevel@tonic-gate	stna	%sp, [%g5 + TRAP_ENT_SP]%asi
174*7c478bd9Sstevel@tonic-gate	stna	%g3, [%g5 + TRAP_ENT_TR]%asi
175*7c478bd9Sstevel@tonic-gate	stna	%g2, [%g5 + TRAP_ENT_F1]%asi
176*7c478bd9Sstevel@tonic-gate	sll	%g4, CPTRSHIFT, %g3
177*7c478bd9Sstevel@tonic-gate	add	%g1, INTR_HEAD, %g6
178*7c478bd9Sstevel@tonic-gate	ldn	[%g6 + %g3], %g6		! intr_head[pil]
179*7c478bd9Sstevel@tonic-gate	stna	%g6, [%g5 + TRAP_ENT_F2]%asi
180*7c478bd9Sstevel@tonic-gate	add	%g1, INTR_TAIL, %g6
181*7c478bd9Sstevel@tonic-gate	ldn	[%g6 + %g3], %g6		! intr_tail[pil]
182*7c478bd9Sstevel@tonic-gate	stna	%g4, [%g5 + TRAP_ENT_F3]%asi
183*7c478bd9Sstevel@tonic-gate	stna	%g6, [%g5 + TRAP_ENT_F4]%asi
184*7c478bd9Sstevel@tonic-gate	TRACE_NEXT(%g5, %g6, %g3)
185*7c478bd9Sstevel@tonic-gate#endif /* TRAPTRACE */
186*7c478bd9Sstevel@tonic-gate	!
187*7c478bd9Sstevel@tonic-gate	! clear the iv_pending flag for this inum
188*7c478bd9Sstevel@tonic-gate	!
189*7c478bd9Sstevel@tonic-gate	set	intr_vector, %g5;
190*7c478bd9Sstevel@tonic-gate	sll	%g2, INTR_VECTOR_SHIFT, %g6;
191*7c478bd9Sstevel@tonic-gate	add	%g5, %g6, %g5;			! &intr_vector[inum]
192*7c478bd9Sstevel@tonic-gate	sth	%g0, [%g5 + IV_PENDING]
193*7c478bd9Sstevel@tonic-gate
194*7c478bd9Sstevel@tonic-gate	!
195*7c478bd9Sstevel@tonic-gate	! Prepare for sys_trap()
196*7c478bd9Sstevel@tonic-gate	!
197*7c478bd9Sstevel@tonic-gate	! Registers passed to sys_trap()
198*7c478bd9Sstevel@tonic-gate	!	%g1 - interrupt handler at TL==0
199*7c478bd9Sstevel@tonic-gate	!	%g2 - inumber
200*7c478bd9Sstevel@tonic-gate	!	%g3 - pil
201*7c478bd9Sstevel@tonic-gate	!	%g4 - initial pil for handler
202*7c478bd9Sstevel@tonic-gate	!
203*7c478bd9Sstevel@tonic-gate	! figure which handler to run and which %pil it starts at
204*7c478bd9Sstevel@tonic-gate	! intr_thread starts at DISP_LEVEL to prevent preemption
205*7c478bd9Sstevel@tonic-gate	! current_thread starts at PIL_MAX to protect cpu_intr_actv
206*7c478bd9Sstevel@tonic-gate	!
207*7c478bd9Sstevel@tonic-gate	mov	%g4, %g3
208*7c478bd9Sstevel@tonic-gate	cmp	%g4, LOCK_LEVEL
209*7c478bd9Sstevel@tonic-gate	bg,a,pt	%xcc, 4f		! branch if pil > LOCK_LEVEL
210*7c478bd9Sstevel@tonic-gate	mov	PIL_MAX, %g4
211*7c478bd9Sstevel@tonic-gate	sethi	%hi(intr_thread), %g1
212*7c478bd9Sstevel@tonic-gate	mov	DISP_LEVEL, %g4
213*7c478bd9Sstevel@tonic-gate	ba,pt	%xcc, sys_trap
214*7c478bd9Sstevel@tonic-gate	or	%g1, %lo(intr_thread), %g1
215*7c478bd9Sstevel@tonic-gate4:
216*7c478bd9Sstevel@tonic-gate	sethi	%hi(current_thread), %g1
217*7c478bd9Sstevel@tonic-gate	ba,pt	%xcc, sys_trap
218*7c478bd9Sstevel@tonic-gate	or	%g1, %lo(current_thread), %g1
219*7c478bd9Sstevel@tonic-gate	SET_SIZE(pil_interrupt_common)
220*7c478bd9Sstevel@tonic-gate	SET_SIZE(pil_interrupt)
221*7c478bd9Sstevel@tonic-gate
222*7c478bd9Sstevel@tonic-gate#endif	/* lint */
223*7c478bd9Sstevel@tonic-gate
224*7c478bd9Sstevel@tonic-gate
225*7c478bd9Sstevel@tonic-gate#ifndef	lint
226*7c478bd9Sstevel@tonic-gate_spurious:
227*7c478bd9Sstevel@tonic-gate	.asciz	"!interrupt 0x%x at level %d not serviced"
228*7c478bd9Sstevel@tonic-gate
229*7c478bd9Sstevel@tonic-gate/*
230*7c478bd9Sstevel@tonic-gate * SERVE_INTR_PRE is called once, just before the first invocation
231*7c478bd9Sstevel@tonic-gate * of SERVE_INTR.
232*7c478bd9Sstevel@tonic-gate *
233*7c478bd9Sstevel@tonic-gate * Registers on entry:
234*7c478bd9Sstevel@tonic-gate *
235*7c478bd9Sstevel@tonic-gate * inum, cpu, regs: may be out-registers
236*7c478bd9Sstevel@tonic-gate * ls1, ls2: local scratch registers
237*7c478bd9Sstevel@tonic-gate * os1, os2, os3: scratch registers, may be out
238*7c478bd9Sstevel@tonic-gate */
239*7c478bd9Sstevel@tonic-gate
240*7c478bd9Sstevel@tonic-gate#define SERVE_INTR_PRE(inum, cpu, ls1, ls2, os1, os2, os3, regs)	\
241*7c478bd9Sstevel@tonic-gate	set	intr_vector, ls1;					\
242*7c478bd9Sstevel@tonic-gate	sll	inum, INTR_VECTOR_SHIFT, os1;				\
243*7c478bd9Sstevel@tonic-gate	add	ls1, os1, ls1;						\
244*7c478bd9Sstevel@tonic-gate	SERVE_INTR_TRACE(inum, os1, os2, os3, regs);			\
245*7c478bd9Sstevel@tonic-gate	mov	inum, ls2;
246*7c478bd9Sstevel@tonic-gate
247*7c478bd9Sstevel@tonic-gate/*
248*7c478bd9Sstevel@tonic-gate * SERVE_INTR is called immediately after either SERVE_INTR_PRE or
249*7c478bd9Sstevel@tonic-gate * SERVE_INTR_NEXT, without intervening code. No register values
250*7c478bd9Sstevel@tonic-gate * may be modified.
251*7c478bd9Sstevel@tonic-gate *
252*7c478bd9Sstevel@tonic-gate * After calling SERVE_INTR, the caller must check if os3 is set. If
253*7c478bd9Sstevel@tonic-gate * so, there is another interrupt to process. The caller must call
254*7c478bd9Sstevel@tonic-gate * SERVE_INTR_NEXT, immediately followed by SERVE_INTR.
255*7c478bd9Sstevel@tonic-gate *
256*7c478bd9Sstevel@tonic-gate * Before calling SERVE_INTR_NEXT, the caller may perform accounting
257*7c478bd9Sstevel@tonic-gate * and other actions which need to occur after invocation of an interrupt
258*7c478bd9Sstevel@tonic-gate * handler. However, the values of ls1 and os3 *must* be preserved and
259*7c478bd9Sstevel@tonic-gate * passed unmodified into SERVE_INTR_NEXT.
260*7c478bd9Sstevel@tonic-gate *
261*7c478bd9Sstevel@tonic-gate * Registers on return from SERVE_INTR:
262*7c478bd9Sstevel@tonic-gate *
263*7c478bd9Sstevel@tonic-gate * ls1 - the pil just processed
264*7c478bd9Sstevel@tonic-gate * ls2 - the inum just processed
265*7c478bd9Sstevel@tonic-gate * os3 - if set, another interrupt needs to be processed
266*7c478bd9Sstevel@tonic-gate * cpu, ls1, os3 - must be preserved if os3 is set
267*7c478bd9Sstevel@tonic-gate */
268*7c478bd9Sstevel@tonic-gate
269*7c478bd9Sstevel@tonic-gate#define	SERVE_INTR(os5, cpu, ls1, ls2, os1, os2, os3, os4)		\
270*7c478bd9Sstevel@tonic-gate	ldn	[ls1 + IV_HANDLER], os2;				\
271*7c478bd9Sstevel@tonic-gate	ldn	[ls1 + IV_ARG], %o0;					\
272*7c478bd9Sstevel@tonic-gate	ldn	[ls1 + IV_SOFTINT_ARG2], %o1;					\
273*7c478bd9Sstevel@tonic-gate	call	os2;							\
274*7c478bd9Sstevel@tonic-gate	lduh	[ls1 + IV_PIL], ls1;					\
275*7c478bd9Sstevel@tonic-gate	brnz,pt	%o0, 2f;						\
276*7c478bd9Sstevel@tonic-gate	mov	CE_WARN, %o0;						\
277*7c478bd9Sstevel@tonic-gate	set	_spurious, %o1;						\
278*7c478bd9Sstevel@tonic-gate	mov	ls2, %o2;						\
279*7c478bd9Sstevel@tonic-gate	call	cmn_err;						\
280*7c478bd9Sstevel@tonic-gate	rdpr	%pil, %o3;						\
281*7c478bd9Sstevel@tonic-gate2:	ldn	[THREAD_REG + T_CPU], cpu;				\
282*7c478bd9Sstevel@tonic-gate	sll	ls1, 3, os1;						\
283*7c478bd9Sstevel@tonic-gate	add	os1, CPU_STATS_SYS_INTR - 8, os2;			\
284*7c478bd9Sstevel@tonic-gate	ldx	[cpu + os2], os3;					\
285*7c478bd9Sstevel@tonic-gate	inc	os3;							\
286*7c478bd9Sstevel@tonic-gate	stx	os3, [cpu + os2];					\
287*7c478bd9Sstevel@tonic-gate	sll	ls1, CPTRSHIFT, os2;					\
288*7c478bd9Sstevel@tonic-gate	add	cpu,  INTR_HEAD, os1;					\
289*7c478bd9Sstevel@tonic-gate	add	os1, os2, os1;						\
290*7c478bd9Sstevel@tonic-gate	ldn	[os1], os3;
291*7c478bd9Sstevel@tonic-gate
292*7c478bd9Sstevel@tonic-gate/*
293*7c478bd9Sstevel@tonic-gate * Registers on entry:
294*7c478bd9Sstevel@tonic-gate *
295*7c478bd9Sstevel@tonic-gate * cpu			- cpu pointer (clobbered, set to cpu upon completion)
296*7c478bd9Sstevel@tonic-gate * ls1, os3		- preserved from prior call to SERVE_INTR
297*7c478bd9Sstevel@tonic-gate * ls2			- local scratch reg (not preserved)
298*7c478bd9Sstevel@tonic-gate * os1, os2, os4, os5	- scratch reg, can be out (not preserved)
299*7c478bd9Sstevel@tonic-gate */
300*7c478bd9Sstevel@tonic-gate#define SERVE_INTR_NEXT(os5, cpu, ls1, ls2, os1, os2, os3, os4)		\
301*7c478bd9Sstevel@tonic-gate	sll	ls1, CPTRSHIFT, os4;					\
302*7c478bd9Sstevel@tonic-gate	add	cpu, INTR_HEAD, os1;					\
303*7c478bd9Sstevel@tonic-gate	rdpr	%pstate, ls2;						\
304*7c478bd9Sstevel@tonic-gate	wrpr	ls2, PSTATE_IE, %pstate;				\
305*7c478bd9Sstevel@tonic-gate	ldn 	[os3 + INTR_NEXT], os2;					\
306*7c478bd9Sstevel@tonic-gate	brnz,pn	os2, 4f;						\
307*7c478bd9Sstevel@tonic-gate	stn	os2, [os1 + os4];					\
308*7c478bd9Sstevel@tonic-gate	add	cpu, INTR_TAIL, os1;					\
309*7c478bd9Sstevel@tonic-gate	stn	%g0, [os1 + os4];					\
310*7c478bd9Sstevel@tonic-gate	mov	1, os1;							\
311*7c478bd9Sstevel@tonic-gate	sll	os1, ls1, os1;						\
312*7c478bd9Sstevel@tonic-gate	wr	os1, CLEAR_SOFTINT;					\
313*7c478bd9Sstevel@tonic-gate4:	ldn	[cpu + INTR_HEAD], os1;					\
314*7c478bd9Sstevel@tonic-gate	ld 	[os3 + INTR_NUMBER], os5;				\
315*7c478bd9Sstevel@tonic-gate	stn	os3, [cpu + INTR_HEAD];					\
316*7c478bd9Sstevel@tonic-gate	stn	os1, [os3 + INTR_NEXT];					\
317*7c478bd9Sstevel@tonic-gate	set	intr_vector, ls1;					\
318*7c478bd9Sstevel@tonic-gate	sll	os5, INTR_VECTOR_SHIFT, os1;				\
319*7c478bd9Sstevel@tonic-gate	add	ls1, os1, ls1;						\
320*7c478bd9Sstevel@tonic-gate	sth	%g0, [ls1 + IV_PENDING];				\
321*7c478bd9Sstevel@tonic-gate	wrpr	%g0, ls2, %pstate;					\
322*7c478bd9Sstevel@tonic-gate	SERVE_INTR_TRACE2(os5, os1, os2, os3, os4);			\
323*7c478bd9Sstevel@tonic-gate	mov	os5, ls2;
324*7c478bd9Sstevel@tonic-gate
325*7c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE
326*7c478bd9Sstevel@tonic-gate/*
327*7c478bd9Sstevel@tonic-gate * inum - not modified, _spurious depends on it.
328*7c478bd9Sstevel@tonic-gate */
329*7c478bd9Sstevel@tonic-gate#define	SERVE_INTR_TRACE(inum, os1, os2, os3, os4)			\
330*7c478bd9Sstevel@tonic-gate	rdpr	%pstate, os3;						\
331*7c478bd9Sstevel@tonic-gate	andn	os3, PSTATE_IE | PSTATE_AM, os2;			\
332*7c478bd9Sstevel@tonic-gate	wrpr	%g0, os2, %pstate;					\
333*7c478bd9Sstevel@tonic-gate	TRACE_PTR(os1, os2);						\
334*7c478bd9Sstevel@tonic-gate	ldn	[os4 + PC_OFF], os2;					\
335*7c478bd9Sstevel@tonic-gate	stna	os2, [os1 + TRAP_ENT_TPC]%asi;				\
336*7c478bd9Sstevel@tonic-gate	ldx	[os4 + TSTATE_OFF], os2;				\
337*7c478bd9Sstevel@tonic-gate	stxa	os2, [os1 + TRAP_ENT_TSTATE]%asi;			\
338*7c478bd9Sstevel@tonic-gate	mov	os3, os4;						\
339*7c478bd9Sstevel@tonic-gate	GET_TRACE_TICK(os2); 						\
340*7c478bd9Sstevel@tonic-gate	stxa	os2, [os1 + TRAP_ENT_TICK]%asi;				\
341*7c478bd9Sstevel@tonic-gate	TRACE_SAVE_TL_GL_REGS(os1, os2);				\
342*7c478bd9Sstevel@tonic-gate	set	TT_SERVE_INTR, os2;					\
343*7c478bd9Sstevel@tonic-gate	rdpr	%pil, os3;						\
344*7c478bd9Sstevel@tonic-gate	or	os2, os3, os2;						\
345*7c478bd9Sstevel@tonic-gate	stha	os2, [os1 + TRAP_ENT_TT]%asi;				\
346*7c478bd9Sstevel@tonic-gate	stna	%sp, [os1 + TRAP_ENT_SP]%asi;				\
347*7c478bd9Sstevel@tonic-gate	stna	inum, [os1 + TRAP_ENT_TR]%asi;				\
348*7c478bd9Sstevel@tonic-gate	stna	%g0, [os1 + TRAP_ENT_F1]%asi;				\
349*7c478bd9Sstevel@tonic-gate	stna	%g0, [os1 + TRAP_ENT_F2]%asi;				\
350*7c478bd9Sstevel@tonic-gate	stna	%g0, [os1 + TRAP_ENT_F3]%asi;				\
351*7c478bd9Sstevel@tonic-gate	stna	%g0, [os1 + TRAP_ENT_F4]%asi;				\
352*7c478bd9Sstevel@tonic-gate	TRACE_NEXT(os1, os2, os3);					\
353*7c478bd9Sstevel@tonic-gate	wrpr	%g0, os4, %pstate
354*7c478bd9Sstevel@tonic-gate#else	/* TRAPTRACE */
355*7c478bd9Sstevel@tonic-gate#define SERVE_INTR_TRACE(inum, os1, os2, os3, os4)
356*7c478bd9Sstevel@tonic-gate#endif	/* TRAPTRACE */
357*7c478bd9Sstevel@tonic-gate
358*7c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE
359*7c478bd9Sstevel@tonic-gate/*
360*7c478bd9Sstevel@tonic-gate * inum - not modified, _spurious depends on it.
361*7c478bd9Sstevel@tonic-gate */
362*7c478bd9Sstevel@tonic-gate#define	SERVE_INTR_TRACE2(inum, os1, os2, os3, os4)			\
363*7c478bd9Sstevel@tonic-gate	rdpr	%pstate, os3;						\
364*7c478bd9Sstevel@tonic-gate	andn	os3, PSTATE_IE | PSTATE_AM, os2;			\
365*7c478bd9Sstevel@tonic-gate	wrpr	%g0, os2, %pstate;					\
366*7c478bd9Sstevel@tonic-gate	TRACE_PTR(os1, os2);						\
367*7c478bd9Sstevel@tonic-gate	stna	%g0, [os1 + TRAP_ENT_TPC]%asi;				\
368*7c478bd9Sstevel@tonic-gate	stxa	%g0, [os1 + TRAP_ENT_TSTATE]%asi;			\
369*7c478bd9Sstevel@tonic-gate	mov	os3, os4;						\
370*7c478bd9Sstevel@tonic-gate	GET_TRACE_TICK(os2); 						\
371*7c478bd9Sstevel@tonic-gate	stxa	os2, [os1 + TRAP_ENT_TICK]%asi;				\
372*7c478bd9Sstevel@tonic-gate	TRACE_SAVE_TL_GL_REGS(os1, os2);				\
373*7c478bd9Sstevel@tonic-gate	set	TT_SERVE_INTR, os2;					\
374*7c478bd9Sstevel@tonic-gate	rdpr	%pil, os3;						\
375*7c478bd9Sstevel@tonic-gate	or	os2, os3, os2;						\
376*7c478bd9Sstevel@tonic-gate	stha	os2, [os1 + TRAP_ENT_TT]%asi;				\
377*7c478bd9Sstevel@tonic-gate	stna	%sp, [os1 + TRAP_ENT_SP]%asi;				\
378*7c478bd9Sstevel@tonic-gate	stna	inum, [os1 + TRAP_ENT_TR]%asi;				\
379*7c478bd9Sstevel@tonic-gate	stna	%g0, [os1 + TRAP_ENT_F1]%asi;				\
380*7c478bd9Sstevel@tonic-gate	stna	%g0, [os1 + TRAP_ENT_F2]%asi;				\
381*7c478bd9Sstevel@tonic-gate	stna	%g0, [os1 + TRAP_ENT_F3]%asi;				\
382*7c478bd9Sstevel@tonic-gate	stna	%g0, [os1 + TRAP_ENT_F4]%asi;				\
383*7c478bd9Sstevel@tonic-gate	TRACE_NEXT(os1, os2, os3);					\
384*7c478bd9Sstevel@tonic-gate	wrpr	%g0, os4, %pstate
385*7c478bd9Sstevel@tonic-gate#else	/* TRAPTRACE */
386*7c478bd9Sstevel@tonic-gate#define SERVE_INTR_TRACE2(inum, os1, os2, os3, os4)
387*7c478bd9Sstevel@tonic-gate#endif	/* TRAPTRACE */
388*7c478bd9Sstevel@tonic-gate
389*7c478bd9Sstevel@tonic-gate#endif	/* lint */
390*7c478bd9Sstevel@tonic-gate
391*7c478bd9Sstevel@tonic-gate#if defined(lint)
392*7c478bd9Sstevel@tonic-gate
393*7c478bd9Sstevel@tonic-gate/*ARGSUSED*/
394*7c478bd9Sstevel@tonic-gatevoid
395*7c478bd9Sstevel@tonic-gateintr_thread(struct regs *regs, uint_t inumber, uint_t pil)
396*7c478bd9Sstevel@tonic-gate{}
397*7c478bd9Sstevel@tonic-gate
398*7c478bd9Sstevel@tonic-gate#else	/* lint */
399*7c478bd9Sstevel@tonic-gate
400*7c478bd9Sstevel@tonic-gate#define	INTRCNT_LIMIT 16
401*7c478bd9Sstevel@tonic-gate
402*7c478bd9Sstevel@tonic-gate/*
403*7c478bd9Sstevel@tonic-gate * Handle an interrupt in a new thread.
404*7c478bd9Sstevel@tonic-gate *	Entry:
405*7c478bd9Sstevel@tonic-gate *		%o0       = pointer to regs structure
406*7c478bd9Sstevel@tonic-gate *		%o1       = inumber
407*7c478bd9Sstevel@tonic-gate *		%o2       = pil
408*7c478bd9Sstevel@tonic-gate *		%sp       = on current thread's kernel stack
409*7c478bd9Sstevel@tonic-gate *		%o7       = return linkage to trap code
410*7c478bd9Sstevel@tonic-gate *		%g7       = current thread
411*7c478bd9Sstevel@tonic-gate *		%pstate   = normal globals, interrupts enabled,
412*7c478bd9Sstevel@tonic-gate *		            privileged, fp disabled
413*7c478bd9Sstevel@tonic-gate *		%pil      = DISP_LEVEL
414*7c478bd9Sstevel@tonic-gate *
415*7c478bd9Sstevel@tonic-gate *	Register Usage
416*7c478bd9Sstevel@tonic-gate *		%l0       = return linkage
417*7c478bd9Sstevel@tonic-gate *		%l1       = pil
418*7c478bd9Sstevel@tonic-gate *		%l2 - %l3 = scratch
419*7c478bd9Sstevel@tonic-gate *		%l4 - %l7 = reserved for sys_trap
420*7c478bd9Sstevel@tonic-gate *		%o2       = cpu
421*7c478bd9Sstevel@tonic-gate *		%o3       = intr thread
422*7c478bd9Sstevel@tonic-gate *		%o0       = scratch
423*7c478bd9Sstevel@tonic-gate *		%o4 - %o5 = scratch
424*7c478bd9Sstevel@tonic-gate */
425*7c478bd9Sstevel@tonic-gate	ENTRY_NP(intr_thread)
426*7c478bd9Sstevel@tonic-gate	mov	%o7, %l0
427*7c478bd9Sstevel@tonic-gate	mov	%o2, %l1
428*7c478bd9Sstevel@tonic-gate	!
429*7c478bd9Sstevel@tonic-gate	! See if we are interrupting another interrupt thread.
430*7c478bd9Sstevel@tonic-gate	!
431*7c478bd9Sstevel@tonic-gate	lduh	[THREAD_REG + T_FLAGS], %o3
432*7c478bd9Sstevel@tonic-gate	andcc	%o3, T_INTR_THREAD, %g0
433*7c478bd9Sstevel@tonic-gate	bz,pt	%xcc, 1f
434*7c478bd9Sstevel@tonic-gate	ldn	[THREAD_REG + T_CPU], %o2	! delay - load CPU pointer
435*7c478bd9Sstevel@tonic-gate
436*7c478bd9Sstevel@tonic-gate	! We have interrupted an interrupt thread. Take a timestamp,
437*7c478bd9Sstevel@tonic-gate	! compute its interval, and update its cumulative counter.
438*7c478bd9Sstevel@tonic-gate	add	THREAD_REG, T_INTR_START, %o5
439*7c478bd9Sstevel@tonic-gate0:
440*7c478bd9Sstevel@tonic-gate	ldx	[%o5], %o3
441*7c478bd9Sstevel@tonic-gate	brz,pn	%o3, 1f
442*7c478bd9Sstevel@tonic-gate	! We came in on top of an interrupt thread that had no timestamp.
443*7c478bd9Sstevel@tonic-gate	! This could happen if, for instance, an interrupt thread which had
444*7c478bd9Sstevel@tonic-gate	! previously blocked is being set up to run again in resume(), but
445*7c478bd9Sstevel@tonic-gate	! resume() hasn't yet stored a timestamp for it. Or, it could be in
446*7c478bd9Sstevel@tonic-gate	! swtch() after its slice has been accounted for.
447*7c478bd9Sstevel@tonic-gate	! Only account for the time slice if the starting timestamp is non-zero.
448*7c478bd9Sstevel@tonic-gate	rdpr	%tick, %o4			! delay
449*7c478bd9Sstevel@tonic-gate	sllx	%o4, 1, %o4			! shift off NPT bit
450*7c478bd9Sstevel@tonic-gate	srlx	%o4, 1, %o4
451*7c478bd9Sstevel@tonic-gate	sub	%o4, %o3, %o4			! o4 has interval
452*7c478bd9Sstevel@tonic-gate
453*7c478bd9Sstevel@tonic-gate	! A high-level interrupt in current_thread() interrupting here
454*7c478bd9Sstevel@tonic-gate	! will account for the interrupted thread's time slice, but
455*7c478bd9Sstevel@tonic-gate	! only if t_intr_start is non-zero. Since this code is going to account
456*7c478bd9Sstevel@tonic-gate	! for the time slice, we want to "atomically" load the thread's
457*7c478bd9Sstevel@tonic-gate	! starting timestamp, calculate the interval with %tick, and zero
458*7c478bd9Sstevel@tonic-gate	! its starting timestamp.
459*7c478bd9Sstevel@tonic-gate	! To do this, we do a casx on the t_intr_start field, and store 0 to it.
460*7c478bd9Sstevel@tonic-gate	! If it has changed since we loaded it above, we need to re-compute the
461*7c478bd9Sstevel@tonic-gate	! interval, since a changed t_intr_start implies current_thread placed
462*7c478bd9Sstevel@tonic-gate	! a new, later timestamp there after running a high-level interrupt,
463*7c478bd9Sstevel@tonic-gate	! and the %tick val in %o4 had become stale.
464*7c478bd9Sstevel@tonic-gate	mov	%g0, %l2
465*7c478bd9Sstevel@tonic-gate	casx	[%o5], %o3, %l2
466*7c478bd9Sstevel@tonic-gate
467*7c478bd9Sstevel@tonic-gate	! If %l2 == %o3, our casx was successful. If not, the starting timestamp
468*7c478bd9Sstevel@tonic-gate	! changed between loading it (after label 0b) and computing the
469*7c478bd9Sstevel@tonic-gate	! interval above.
470*7c478bd9Sstevel@tonic-gate	cmp	%l2, %o3
471*7c478bd9Sstevel@tonic-gate	bne,pn	%xcc, 0b
472*7c478bd9Sstevel@tonic-gate
473*7c478bd9Sstevel@tonic-gate	! Check for Energy Star mode
474*7c478bd9Sstevel@tonic-gate	lduh	[%o2 + CPU_DIVISOR], %l2	! delay -- %l2 = clock divisor
475*7c478bd9Sstevel@tonic-gate	cmp	%l2, 1
476*7c478bd9Sstevel@tonic-gate	bg,a,pn	%xcc, 2f
477*7c478bd9Sstevel@tonic-gate	mulx	%o4, %l2, %o4	! multiply interval by clock divisor iff > 1
478*7c478bd9Sstevel@tonic-gate2:
479*7c478bd9Sstevel@tonic-gate	! We now know that a valid interval for the interrupted interrupt
480*7c478bd9Sstevel@tonic-gate	! thread is in %o4. Update its cumulative counter.
481*7c478bd9Sstevel@tonic-gate	ldub	[THREAD_REG + T_PIL], %l3	! load PIL
482*7c478bd9Sstevel@tonic-gate	sllx	%l3, 4, %l3		! convert PIL index to byte offset
483*7c478bd9Sstevel@tonic-gate	add	%l3, CPU_MCPU, %l3	! CPU_INTRSTAT is too big for use
484*7c478bd9Sstevel@tonic-gate	add	%l3, MCPU_INTRSTAT, %l3	! as const, add offsets separately
485*7c478bd9Sstevel@tonic-gate	ldx	[%o2 + %l3], %o5	! old counter in o5
486*7c478bd9Sstevel@tonic-gate	add	%o5, %o4, %o5		! new counter in o5
487*7c478bd9Sstevel@tonic-gate	stx	%o5, [%o2 + %l3]	! store new counter
488*7c478bd9Sstevel@tonic-gate
489*7c478bd9Sstevel@tonic-gate1:
490*7c478bd9Sstevel@tonic-gate	!
491*7c478bd9Sstevel@tonic-gate	! Get set to run interrupt thread.
492*7c478bd9Sstevel@tonic-gate	! There should always be an interrupt thread since we allocate one
493*7c478bd9Sstevel@tonic-gate	! for each level on the CPU.
494*7c478bd9Sstevel@tonic-gate	!
495*7c478bd9Sstevel@tonic-gate	! Note that the code in kcpc_overflow_intr -relies- on the ordering
496*7c478bd9Sstevel@tonic-gate	! of events here -- in particular that t->t_lwp of the interrupt thread
497*7c478bd9Sstevel@tonic-gate	! is set to the pinned thread *before* curthread is changed.
498*7c478bd9Sstevel@tonic-gate	!
499*7c478bd9Sstevel@tonic-gate	ldn	[%o2 + CPU_INTR_THREAD], %o3	! interrupt thread pool
500*7c478bd9Sstevel@tonic-gate	ldn	[%o3 + T_LINK], %o4		! unlink thread from CPU's list
501*7c478bd9Sstevel@tonic-gate	stn	%o4, [%o2 + CPU_INTR_THREAD]
502*7c478bd9Sstevel@tonic-gate	!
503*7c478bd9Sstevel@tonic-gate	! Set bit for this level in CPU's active interrupt bitmask.
504*7c478bd9Sstevel@tonic-gate	!
505*7c478bd9Sstevel@tonic-gate	ld	[%o2 + CPU_INTR_ACTV], %o5
506*7c478bd9Sstevel@tonic-gate	mov	1, %o4
507*7c478bd9Sstevel@tonic-gate	sll	%o4, %l1, %o4
508*7c478bd9Sstevel@tonic-gate#ifdef DEBUG
509*7c478bd9Sstevel@tonic-gate	!
510*7c478bd9Sstevel@tonic-gate	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
511*7c478bd9Sstevel@tonic-gate	!
512*7c478bd9Sstevel@tonic-gate	andcc	%o5, %o4, %g0
513*7c478bd9Sstevel@tonic-gate	bz,pt	%xcc, 0f
514*7c478bd9Sstevel@tonic-gate	nop
515*7c478bd9Sstevel@tonic-gate	! Do not call panic if a panic is already in progress.
516*7c478bd9Sstevel@tonic-gate	sethi	%hi(panic_quiesce), %l2
517*7c478bd9Sstevel@tonic-gate	ld	[%l2 + %lo(panic_quiesce)], %l2
518*7c478bd9Sstevel@tonic-gate	brnz,pn	%l2, 0f
519*7c478bd9Sstevel@tonic-gate	nop
520*7c478bd9Sstevel@tonic-gate	sethi	%hi(intr_thread_actv_bit_set), %o0
521*7c478bd9Sstevel@tonic-gate	call	panic
522*7c478bd9Sstevel@tonic-gate	or	%o0, %lo(intr_thread_actv_bit_set), %o0
523*7c478bd9Sstevel@tonic-gate0:
524*7c478bd9Sstevel@tonic-gate#endif /* DEBUG */
525*7c478bd9Sstevel@tonic-gate	or	%o5, %o4, %o5
526*7c478bd9Sstevel@tonic-gate	st	%o5, [%o2 + CPU_INTR_ACTV]
527*7c478bd9Sstevel@tonic-gate	!
528*7c478bd9Sstevel@tonic-gate	! Consider the new thread part of the same LWP so that
529*7c478bd9Sstevel@tonic-gate	! window overflow code can find the PCB.
530*7c478bd9Sstevel@tonic-gate	!
531*7c478bd9Sstevel@tonic-gate	ldn	[THREAD_REG + T_LWP], %o4
532*7c478bd9Sstevel@tonic-gate	stn	%o4, [%o3 + T_LWP]
533*7c478bd9Sstevel@tonic-gate	!
534*7c478bd9Sstevel@tonic-gate	! Threads on the interrupt thread free list could have state already
535*7c478bd9Sstevel@tonic-gate	! set to TS_ONPROC, but it helps in debugging if they're TS_FREE
536*7c478bd9Sstevel@tonic-gate	! Could eliminate the next two instructions with a little work.
537*7c478bd9Sstevel@tonic-gate	!
538*7c478bd9Sstevel@tonic-gate	mov	TS_ONPROC, %o4
539*7c478bd9Sstevel@tonic-gate	st	%o4, [%o3 + T_STATE]
540*7c478bd9Sstevel@tonic-gate	!
541*7c478bd9Sstevel@tonic-gate	! Push interrupted thread onto list from new thread.
542*7c478bd9Sstevel@tonic-gate	! Set the new thread as the current one.
543*7c478bd9Sstevel@tonic-gate	! Set interrupted thread's T_SP because if it is the idle thread,
544*7c478bd9Sstevel@tonic-gate	! resume may use that stack between threads.
545*7c478bd9Sstevel@tonic-gate	!
546*7c478bd9Sstevel@tonic-gate	stn	%o7, [THREAD_REG + T_PC]	! mark pc for resume
547*7c478bd9Sstevel@tonic-gate	stn	%sp, [THREAD_REG + T_SP]	! mark stack for resume
548*7c478bd9Sstevel@tonic-gate	stn	THREAD_REG, [%o3 + T_INTR]	! push old thread
549*7c478bd9Sstevel@tonic-gate	stn	%o3, [%o2 + CPU_THREAD]		! set new thread
550*7c478bd9Sstevel@tonic-gate	mov	%o3, THREAD_REG			! set global curthread register
551*7c478bd9Sstevel@tonic-gate	ldn	[%o3 + T_STACK], %o4		! interrupt stack pointer
552*7c478bd9Sstevel@tonic-gate	sub	%o4, STACK_BIAS, %sp
553*7c478bd9Sstevel@tonic-gate	!
554*7c478bd9Sstevel@tonic-gate	! Initialize thread priority level from intr_pri
555*7c478bd9Sstevel@tonic-gate	!
556*7c478bd9Sstevel@tonic-gate	sethi	%hi(intr_pri), %o4
557*7c478bd9Sstevel@tonic-gate	ldsh	[%o4 + %lo(intr_pri)], %o4	! grab base interrupt priority
558*7c478bd9Sstevel@tonic-gate	add	%l1, %o4, %o4		! convert level to dispatch priority
559*7c478bd9Sstevel@tonic-gate	sth	%o4, [THREAD_REG + T_PRI]
560*7c478bd9Sstevel@tonic-gate	stub	%l1, [THREAD_REG + T_PIL]	! save pil for intr_passivate
561*7c478bd9Sstevel@tonic-gate
562*7c478bd9Sstevel@tonic-gate	! Store starting timestamp in thread structure.
563*7c478bd9Sstevel@tonic-gate	add	THREAD_REG, T_INTR_START, %o3
564*7c478bd9Sstevel@tonic-gate1:
565*7c478bd9Sstevel@tonic-gate	ldx	[%o3], %o5
566*7c478bd9Sstevel@tonic-gate	rdpr	%tick, %o4
567*7c478bd9Sstevel@tonic-gate	sllx	%o4, 1, %o4
568*7c478bd9Sstevel@tonic-gate	srlx	%o4, 1, %o4			! shift off NPT bit
569*7c478bd9Sstevel@tonic-gate	casx	[%o3], %o5, %o4
570*7c478bd9Sstevel@tonic-gate	cmp	%o4, %o5
571*7c478bd9Sstevel@tonic-gate	! If a high-level interrupt occurred while we were attempting to store
572*7c478bd9Sstevel@tonic-gate	! the timestamp, try again.
573*7c478bd9Sstevel@tonic-gate	bne,pn	%xcc, 1b
574*7c478bd9Sstevel@tonic-gate	nop
575*7c478bd9Sstevel@tonic-gate
576*7c478bd9Sstevel@tonic-gate	wrpr	%g0, %l1, %pil			! lower %pil to new level
577*7c478bd9Sstevel@tonic-gate	!
578*7c478bd9Sstevel@tonic-gate	! Fast event tracing.
579*7c478bd9Sstevel@tonic-gate	!
580*7c478bd9Sstevel@tonic-gate	ld	[%o2 + CPU_FTRACE_STATE], %o4	! %o2 = curthread->t_cpu
581*7c478bd9Sstevel@tonic-gate	btst	FTRACE_ENABLED, %o4
582*7c478bd9Sstevel@tonic-gate	be,pt	%icc, 1f			! skip if ftrace disabled
583*7c478bd9Sstevel@tonic-gate	  mov	%l1, %o5
584*7c478bd9Sstevel@tonic-gate	!
585*7c478bd9Sstevel@tonic-gate	! Tracing is enabled - write the trace entry.
586*7c478bd9Sstevel@tonic-gate	!
587*7c478bd9Sstevel@tonic-gate	save	%sp, -SA(MINFRAME), %sp
588*7c478bd9Sstevel@tonic-gate	set	ftrace_intr_thread_format_str, %o0
589*7c478bd9Sstevel@tonic-gate	mov	%i0, %o1
590*7c478bd9Sstevel@tonic-gate	mov	%i1, %o2
591*7c478bd9Sstevel@tonic-gate	call	ftrace_3
592*7c478bd9Sstevel@tonic-gate	mov	%i5, %o3
593*7c478bd9Sstevel@tonic-gate	restore
594*7c478bd9Sstevel@tonic-gate1:
595*7c478bd9Sstevel@tonic-gate	!
596*7c478bd9Sstevel@tonic-gate	! call the handler
597*7c478bd9Sstevel@tonic-gate	!
598*7c478bd9Sstevel@tonic-gate	SERVE_INTR_PRE(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
599*7c478bd9Sstevel@tonic-gate	!
600*7c478bd9Sstevel@tonic-gate	! %o0 and %o1 are now available as scratch registers.
601*7c478bd9Sstevel@tonic-gate	!
602*7c478bd9Sstevel@tonic-gate0:
603*7c478bd9Sstevel@tonic-gate	SERVE_INTR(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
604*7c478bd9Sstevel@tonic-gate	!
605*7c478bd9Sstevel@tonic-gate	! If %o3 is set, we must call serve_intr_post, and both %l1 and %o3
606*7c478bd9Sstevel@tonic-gate	! must be preserved. %l1 holds our pil, %l3 holds our inum.
607*7c478bd9Sstevel@tonic-gate	!
608*7c478bd9Sstevel@tonic-gate	! Note: %l1 is the pil level we're processing, but we may have a
609*7c478bd9Sstevel@tonic-gate	! higher effective pil because a higher-level interrupt may have
610*7c478bd9Sstevel@tonic-gate	! blocked.
611*7c478bd9Sstevel@tonic-gate	!
612*7c478bd9Sstevel@tonic-gate	wrpr	%g0, DISP_LEVEL, %pil
613*7c478bd9Sstevel@tonic-gate	!
614*7c478bd9Sstevel@tonic-gate	! Take timestamp, compute interval, update cumulative counter.
615*7c478bd9Sstevel@tonic-gate	!
616*7c478bd9Sstevel@tonic-gate	add	THREAD_REG, T_INTR_START, %o5
617*7c478bd9Sstevel@tonic-gate1:
618*7c478bd9Sstevel@tonic-gate	ldx	[%o5], %o0
619*7c478bd9Sstevel@tonic-gate#ifdef DEBUG
620*7c478bd9Sstevel@tonic-gate	brnz	%o0, 9f
621*7c478bd9Sstevel@tonic-gate	nop
622*7c478bd9Sstevel@tonic-gate	! Do not call panic if a panic is already in progress.
623*7c478bd9Sstevel@tonic-gate	sethi	%hi(panic_quiesce), %o1
624*7c478bd9Sstevel@tonic-gate	ld	[%o1 + %lo(panic_quiesce)], %o1
625*7c478bd9Sstevel@tonic-gate	brnz,pn	%o1, 9f
626*7c478bd9Sstevel@tonic-gate	nop
627*7c478bd9Sstevel@tonic-gate	sethi	%hi(intr_thread_t_intr_start_zero), %o0
628*7c478bd9Sstevel@tonic-gate	call	panic
629*7c478bd9Sstevel@tonic-gate	or	%o0, %lo(intr_thread_t_intr_start_zero), %o0
630*7c478bd9Sstevel@tonic-gate9:
631*7c478bd9Sstevel@tonic-gate#endif /* DEBUG */
632*7c478bd9Sstevel@tonic-gate	rdpr	%tick, %o1
633*7c478bd9Sstevel@tonic-gate	sllx	%o1, 1, %o1
634*7c478bd9Sstevel@tonic-gate	srlx	%o1, 1, %o1			! shift off NPT bit
635*7c478bd9Sstevel@tonic-gate	sub	%o1, %o0, %l2			! l2 has interval
636*7c478bd9Sstevel@tonic-gate	!
637*7c478bd9Sstevel@tonic-gate	! The general outline of what the code here does is:
638*7c478bd9Sstevel@tonic-gate	! 1. load t_intr_start, %tick, and calculate the delta
639*7c478bd9Sstevel@tonic-gate	! 2. replace t_intr_start with %tick (if %o3 is set) or 0.
640*7c478bd9Sstevel@tonic-gate	!
641*7c478bd9Sstevel@tonic-gate	! The problem is that a high-level interrupt could arrive at any time.
642*7c478bd9Sstevel@tonic-gate	! It will account for (%tick - t_intr_start) for us when it starts,
643*7c478bd9Sstevel@tonic-gate	! unless we have set t_intr_start to zero, and then set t_intr_start
644*7c478bd9Sstevel@tonic-gate	! to a new %tick when it finishes. To account for this, our first step
645*7c478bd9Sstevel@tonic-gate	! is to load t_intr_start and the last is to use casx to store the new
646*7c478bd9Sstevel@tonic-gate	! t_intr_start. This guarantees atomicity in reading t_intr_start,
647*7c478bd9Sstevel@tonic-gate	! reading %tick, and updating t_intr_start.
648*7c478bd9Sstevel@tonic-gate	!
649*7c478bd9Sstevel@tonic-gate	movrz	%o3, %g0, %o1
650*7c478bd9Sstevel@tonic-gate	casx	[%o5], %o0, %o1
651*7c478bd9Sstevel@tonic-gate	cmp	%o0, %o1
652*7c478bd9Sstevel@tonic-gate	bne,pn	%xcc, 1b
653*7c478bd9Sstevel@tonic-gate	!
654*7c478bd9Sstevel@tonic-gate	! Check for Energy Star mode
655*7c478bd9Sstevel@tonic-gate	!
656*7c478bd9Sstevel@tonic-gate	lduh	[%o2 + CPU_DIVISOR], %o0	! delay -- %o0 = clock divisor
657*7c478bd9Sstevel@tonic-gate	cmp	%o0, 1
658*7c478bd9Sstevel@tonic-gate	bg,a,pn	%xcc, 2f
659*7c478bd9Sstevel@tonic-gate	mulx	%l2, %o0, %l2	! multiply interval by clock divisor iff > 1
660*7c478bd9Sstevel@tonic-gate2:
661*7c478bd9Sstevel@tonic-gate	!
662*7c478bd9Sstevel@tonic-gate	! Update cpu_intrstat. If o3 is set then we will be processing another
663*7c478bd9Sstevel@tonic-gate	! interrupt. Above we have set t_intr_start to %tick, not 0. This
664*7c478bd9Sstevel@tonic-gate	! means a high-level interrupt can arrive and update the same stats
665*7c478bd9Sstevel@tonic-gate	! we're updating. Need to use casx.
666*7c478bd9Sstevel@tonic-gate	!
667*7c478bd9Sstevel@tonic-gate	sllx	%l1, 4, %o1			! delay - PIL as byte offset
668*7c478bd9Sstevel@tonic-gate	add	%o1, CPU_MCPU, %o1		! CPU_INTRSTAT const too big
669*7c478bd9Sstevel@tonic-gate	add	%o1, MCPU_INTRSTAT, %o1		! add parts separately
670*7c478bd9Sstevel@tonic-gate	add	%o1, %o2, %o1
671*7c478bd9Sstevel@tonic-gate1:
672*7c478bd9Sstevel@tonic-gate	ldx	[%o1], %o5			! old counter in o5
673*7c478bd9Sstevel@tonic-gate	add	%o5, %l2, %o0			! new counter in o0
674*7c478bd9Sstevel@tonic-gate 	stx	%o0, [%o1 + 8]			! store into intrstat[pil][1]
675*7c478bd9Sstevel@tonic-gate	casx	[%o1], %o5, %o0			! and into intrstat[pil][0]
676*7c478bd9Sstevel@tonic-gate	cmp	%o5, %o0
677*7c478bd9Sstevel@tonic-gate	bne,pn	%xcc, 1b
678*7c478bd9Sstevel@tonic-gate	nop
679*7c478bd9Sstevel@tonic-gate	!
680*7c478bd9Sstevel@tonic-gate	! Don't keep a pinned process pinned indefinitely. Bump cpu_intrcnt
681*7c478bd9Sstevel@tonic-gate	! for each interrupt handler we invoke. If we hit INTRCNT_LIMIT, then
682*7c478bd9Sstevel@tonic-gate	! we've crossed the threshold and we should unpin the pinned threads
683*7c478bd9Sstevel@tonic-gate	! by preempt()ing ourselves, which will bubble up the t_intr chain
684*7c478bd9Sstevel@tonic-gate	! until hitting the non-interrupt thread, which will then in turn
685*7c478bd9Sstevel@tonic-gate	! preempt itself allowing the interrupt processing to resume. Finally,
686*7c478bd9Sstevel@tonic-gate	! the scheduler takes over and picks the next thread to run.
687*7c478bd9Sstevel@tonic-gate	!
688*7c478bd9Sstevel@tonic-gate	! If our CPU is quiesced, we cannot preempt because the idle thread
689*7c478bd9Sstevel@tonic-gate	! won't ever re-enter the scheduler, and the interrupt will be forever
690*7c478bd9Sstevel@tonic-gate	! blocked.
691*7c478bd9Sstevel@tonic-gate	!
692*7c478bd9Sstevel@tonic-gate	! If t_intr is NULL, we're not pinning anyone, so we use a simpler
693*7c478bd9Sstevel@tonic-gate	! algorithm. Just check for cpu_kprunrun, and if set then preempt.
694*7c478bd9Sstevel@tonic-gate	! This insures we enter the scheduler if a higher-priority thread
695*7c478bd9Sstevel@tonic-gate	! has become runnable.
696*7c478bd9Sstevel@tonic-gate	!
697*7c478bd9Sstevel@tonic-gate	lduh	[%o2 + CPU_FLAGS], %o5		! don't preempt if quiesced
698*7c478bd9Sstevel@tonic-gate	andcc	%o5, CPU_QUIESCED, %g0
699*7c478bd9Sstevel@tonic-gate	bnz,pn	%xcc, 1f
700*7c478bd9Sstevel@tonic-gate
701*7c478bd9Sstevel@tonic-gate	ldn     [THREAD_REG + T_INTR], %o5      ! pinning anything?
702*7c478bd9Sstevel@tonic-gate	brz,pn  %o5, 3f				! if not, don't inc intrcnt
703*7c478bd9Sstevel@tonic-gate
704*7c478bd9Sstevel@tonic-gate	ldub	[%o2 + CPU_INTRCNT], %o5	! delay - %o5 = cpu_intrcnt
705*7c478bd9Sstevel@tonic-gate	inc	%o5
706*7c478bd9Sstevel@tonic-gate	cmp	%o5, INTRCNT_LIMIT		! have we hit the limit?
707*7c478bd9Sstevel@tonic-gate	bl,a,pt	%xcc, 1f			! no preempt if < INTRCNT_LIMIT
708*7c478bd9Sstevel@tonic-gate	stub	%o5, [%o2 + CPU_INTRCNT]	! delay annul - inc CPU_INTRCNT
709*7c478bd9Sstevel@tonic-gate	bg,pn	%xcc, 2f			! don't inc stats again
710*7c478bd9Sstevel@tonic-gate	!
711*7c478bd9Sstevel@tonic-gate	! We've reached the limit. Set cpu_intrcnt and cpu_kprunrun, and do
712*7c478bd9Sstevel@tonic-gate	! CPU_STATS_ADDQ(cp, sys, intrunpin, 1). Then call preempt.
713*7c478bd9Sstevel@tonic-gate	!
714*7c478bd9Sstevel@tonic-gate	mov	1, %o4				! delay
715*7c478bd9Sstevel@tonic-gate	stub	%o4, [%o2 + CPU_KPRUNRUN]
716*7c478bd9Sstevel@tonic-gate	ldx	[%o2 + CPU_STATS_SYS_INTRUNPIN], %o4
717*7c478bd9Sstevel@tonic-gate	inc	%o4
718*7c478bd9Sstevel@tonic-gate	stx	%o4, [%o2 + CPU_STATS_SYS_INTRUNPIN]
719*7c478bd9Sstevel@tonic-gate	ba	2f
720*7c478bd9Sstevel@tonic-gate	stub	%o5, [%o2 + CPU_INTRCNT]	! delay
721*7c478bd9Sstevel@tonic-gate3:
722*7c478bd9Sstevel@tonic-gate	! Code for t_intr == NULL
723*7c478bd9Sstevel@tonic-gate	ldub	[%o2 + CPU_KPRUNRUN], %o5
724*7c478bd9Sstevel@tonic-gate	brz,pt	%o5, 1f				! don't preempt unless kprunrun
725*7c478bd9Sstevel@tonic-gate2:
726*7c478bd9Sstevel@tonic-gate	! Time to call preempt
727*7c478bd9Sstevel@tonic-gate	mov	%o2, %l3			! delay - save %o2
728*7c478bd9Sstevel@tonic-gate	call	preempt
729*7c478bd9Sstevel@tonic-gate	mov	%o3, %l2			! delay - save %o3.
730*7c478bd9Sstevel@tonic-gate	mov	%l3, %o2			! restore %o2
731*7c478bd9Sstevel@tonic-gate	mov	%l2, %o3			! restore %o3
732*7c478bd9Sstevel@tonic-gate	wrpr	%g0, DISP_LEVEL, %pil		! up from cpu_base_spl
733*7c478bd9Sstevel@tonic-gate1:
734*7c478bd9Sstevel@tonic-gate	!
735*7c478bd9Sstevel@tonic-gate	! Do we need to call serve_intr_post and do this again?
736*7c478bd9Sstevel@tonic-gate	!
737*7c478bd9Sstevel@tonic-gate	brz,a,pt %o3, 0f
738*7c478bd9Sstevel@tonic-gate	ld	[%o2 + CPU_INTR_ACTV], %o5	! delay annulled
739*7c478bd9Sstevel@tonic-gate	!
740*7c478bd9Sstevel@tonic-gate	! Restore %pil before calling serve_intr() again. We must check
741*7c478bd9Sstevel@tonic-gate	! CPU_BASE_SPL and set %pil to max(our-pil, CPU_BASE_SPL)
742*7c478bd9Sstevel@tonic-gate	!
743*7c478bd9Sstevel@tonic-gate	ld	[%o2 + CPU_BASE_SPL], %o4
744*7c478bd9Sstevel@tonic-gate	cmp	%o4, %l1
745*7c478bd9Sstevel@tonic-gate	movl	%xcc, %l1, %o4
746*7c478bd9Sstevel@tonic-gate	wrpr	%g0, %o4, %pil
747*7c478bd9Sstevel@tonic-gate	SERVE_INTR_NEXT(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
748*7c478bd9Sstevel@tonic-gate	ba	0b				! compute new stats
749*7c478bd9Sstevel@tonic-gate	nop
750*7c478bd9Sstevel@tonic-gate0:
751*7c478bd9Sstevel@tonic-gate	!
752*7c478bd9Sstevel@tonic-gate	! Clear bit for this level in CPU's interrupt active bitmask.
753*7c478bd9Sstevel@tonic-gate	!
754*7c478bd9Sstevel@tonic-gate	mov	1, %o4
755*7c478bd9Sstevel@tonic-gate	sll	%o4, %l1, %o4
756*7c478bd9Sstevel@tonic-gate#ifdef DEBUG
757*7c478bd9Sstevel@tonic-gate	!
758*7c478bd9Sstevel@tonic-gate	! ASSERT(CPU->cpu_intr_actv & (1 << PIL))
759*7c478bd9Sstevel@tonic-gate	!
760*7c478bd9Sstevel@tonic-gate	andcc	%o4, %o5, %g0
761*7c478bd9Sstevel@tonic-gate	bnz,pt	%xcc, 0f
762*7c478bd9Sstevel@tonic-gate	nop
763*7c478bd9Sstevel@tonic-gate	! Do not call panic if a panic is already in progress.
764*7c478bd9Sstevel@tonic-gate	sethi	%hi(panic_quiesce), %l2
765*7c478bd9Sstevel@tonic-gate	ld	[%l2 + %lo(panic_quiesce)], %l2
766*7c478bd9Sstevel@tonic-gate	brnz,pn	%l2, 0f
767*7c478bd9Sstevel@tonic-gate	nop
768*7c478bd9Sstevel@tonic-gate	sethi	%hi(intr_thread_actv_bit_not_set), %o0
769*7c478bd9Sstevel@tonic-gate	call	panic
770*7c478bd9Sstevel@tonic-gate	or	%o0, %lo(intr_thread_actv_bit_not_set), %o0
771*7c478bd9Sstevel@tonic-gate0:
772*7c478bd9Sstevel@tonic-gate#endif /* DEBUG */
773*7c478bd9Sstevel@tonic-gate	andn	%o5, %o4, %o5
774*7c478bd9Sstevel@tonic-gate	st	%o5, [%o2 + CPU_INTR_ACTV]
775*7c478bd9Sstevel@tonic-gate	!
776*7c478bd9Sstevel@tonic-gate	! If there is still an interrupted thread underneath this one,
777*7c478bd9Sstevel@tonic-gate	! then the interrupt was never blocked and the return is fairly
778*7c478bd9Sstevel@tonic-gate	! simple.  Otherwise jump to intr_thread_exit.
779*7c478bd9Sstevel@tonic-gate	!
780*7c478bd9Sstevel@tonic-gate	ldn	[THREAD_REG + T_INTR], %o4	! pinned thread
781*7c478bd9Sstevel@tonic-gate	brz,pn	%o4, intr_thread_exit		! branch if none
782*7c478bd9Sstevel@tonic-gate	nop
783*7c478bd9Sstevel@tonic-gate	!
784*7c478bd9Sstevel@tonic-gate	! link the thread back onto the interrupt thread pool
785*7c478bd9Sstevel@tonic-gate	!
786*7c478bd9Sstevel@tonic-gate	ldn	[%o2 + CPU_INTR_THREAD], %o3
787*7c478bd9Sstevel@tonic-gate	stn	%o3, [THREAD_REG + T_LINK]
788*7c478bd9Sstevel@tonic-gate	stn	THREAD_REG, [%o2 + CPU_INTR_THREAD]
789*7c478bd9Sstevel@tonic-gate	!
790*7c478bd9Sstevel@tonic-gate	! set the thread state to free so kernel debuggers don't see it
791*7c478bd9Sstevel@tonic-gate	!
792*7c478bd9Sstevel@tonic-gate	mov	TS_FREE, %o5
793*7c478bd9Sstevel@tonic-gate	st	%o5, [THREAD_REG + T_STATE]
794*7c478bd9Sstevel@tonic-gate	!
795*7c478bd9Sstevel@tonic-gate	! Switch back to the interrupted thread and return
796*7c478bd9Sstevel@tonic-gate	!
797*7c478bd9Sstevel@tonic-gate	stn	%o4, [%o2 + CPU_THREAD]
798*7c478bd9Sstevel@tonic-gate	mov	%o4, THREAD_REG
799*7c478bd9Sstevel@tonic-gate
800*7c478bd9Sstevel@tonic-gate	! If we pinned an interrupt thread, store its starting timestamp.
801*7c478bd9Sstevel@tonic-gate	lduh	[THREAD_REG + T_FLAGS], %o5
802*7c478bd9Sstevel@tonic-gate	andcc	%o5, T_INTR_THREAD, %g0
803*7c478bd9Sstevel@tonic-gate	bz,pt	%xcc, 1f
804*7c478bd9Sstevel@tonic-gate	ldn	[THREAD_REG + T_SP], %sp	! delay - restore %sp
805*7c478bd9Sstevel@tonic-gate
806*7c478bd9Sstevel@tonic-gate	add	THREAD_REG, T_INTR_START, %o3	! o3 has &curthread->t_intr_star
807*7c478bd9Sstevel@tonic-gate0:
808*7c478bd9Sstevel@tonic-gate	ldx	[%o3], %o4			! o4 = t_intr_start before
809*7c478bd9Sstevel@tonic-gate	rdpr	%tick, %o5
810*7c478bd9Sstevel@tonic-gate	sllx	%o5, 1, %o5
811*7c478bd9Sstevel@tonic-gate	srlx	%o5, 1, %o5			! shift off NPT bit
812*7c478bd9Sstevel@tonic-gate	casx	[%o3], %o4, %o5			! put o5 in ts if o4 == ts after
813*7c478bd9Sstevel@tonic-gate	cmp	%o4, %o5
814*7c478bd9Sstevel@tonic-gate	! If a high-level interrupt occurred while we were attempting to store
815*7c478bd9Sstevel@tonic-gate	! the timestamp, try again.
816*7c478bd9Sstevel@tonic-gate	bne,pn	%xcc, 0b
817*7c478bd9Sstevel@tonic-gate	ldn	[THREAD_REG + T_SP], %sp	! delay - restore %sp
818*7c478bd9Sstevel@tonic-gate1:
819*7c478bd9Sstevel@tonic-gate	! If the thread being restarted isn't pinning anyone, and no interrupts
820*7c478bd9Sstevel@tonic-gate	! are pending, zero out cpu_intrcnt
821*7c478bd9Sstevel@tonic-gate	ldn	[THREAD_REG + T_INTR], %o4
822*7c478bd9Sstevel@tonic-gate	brnz,pn	%o4, 2f
823*7c478bd9Sstevel@tonic-gate	rd	SOFTINT, %o4			! delay
824*7c478bd9Sstevel@tonic-gate	set	SOFTINT_MASK, %o5
825*7c478bd9Sstevel@tonic-gate	andcc	%o4, %o5, %g0
826*7c478bd9Sstevel@tonic-gate	bz,a,pt	%xcc, 2f
827*7c478bd9Sstevel@tonic-gate	stub	%g0, [%o2 + CPU_INTRCNT]	! delay annul
828*7c478bd9Sstevel@tonic-gate2:
829*7c478bd9Sstevel@tonic-gate	jmp	%l0 + 8
830*7c478bd9Sstevel@tonic-gate	nop
831*7c478bd9Sstevel@tonic-gate	SET_SIZE(intr_thread)
832*7c478bd9Sstevel@tonic-gate	/* Not Reached */
833*7c478bd9Sstevel@tonic-gate
834*7c478bd9Sstevel@tonic-gate	!
835*7c478bd9Sstevel@tonic-gate	! An interrupt returned on what was once (and still might be)
836*7c478bd9Sstevel@tonic-gate	! an interrupt thread stack, but the interrupted process is no longer
837*7c478bd9Sstevel@tonic-gate	! there.  This means the interrupt must have blocked.
838*7c478bd9Sstevel@tonic-gate	!
839*7c478bd9Sstevel@tonic-gate	! There is no longer a thread under this one, so put this thread back
840*7c478bd9Sstevel@tonic-gate	! on the CPU's free list and resume the idle thread which will dispatch
841*7c478bd9Sstevel@tonic-gate	! the next thread to run.
842*7c478bd9Sstevel@tonic-gate	!
843*7c478bd9Sstevel@tonic-gate	! All traps below DISP_LEVEL are disabled here, but the mondo interrupt
844*7c478bd9Sstevel@tonic-gate	! is enabled.
845*7c478bd9Sstevel@tonic-gate	!
846*7c478bd9Sstevel@tonic-gate	ENTRY_NP(intr_thread_exit)
847*7c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE
848*7c478bd9Sstevel@tonic-gate	rdpr	%pstate, %l2
849*7c478bd9Sstevel@tonic-gate	andn	%l2, PSTATE_IE | PSTATE_AM, %o4
850*7c478bd9Sstevel@tonic-gate	wrpr	%g0, %o4, %pstate			! cpu to known state
851*7c478bd9Sstevel@tonic-gate	TRACE_PTR(%o4, %o5)
852*7c478bd9Sstevel@tonic-gate	GET_TRACE_TICK(%o5)
853*7c478bd9Sstevel@tonic-gate	stxa	%o5, [%o4 + TRAP_ENT_TICK]%asi
854*7c478bd9Sstevel@tonic-gate	TRACE_SAVE_TL_GL_REGS(%o4, %o5)
855*7c478bd9Sstevel@tonic-gate	set	TT_INTR_EXIT, %o5
856*7c478bd9Sstevel@tonic-gate	stha	%o5, [%o4 + TRAP_ENT_TT]%asi
857*7c478bd9Sstevel@tonic-gate	stna	%g0, [%o4 + TRAP_ENT_TPC]%asi
858*7c478bd9Sstevel@tonic-gate	stxa	%g0, [%o4 + TRAP_ENT_TSTATE]%asi
859*7c478bd9Sstevel@tonic-gate	stna	%sp, [%o4 + TRAP_ENT_SP]%asi
860*7c478bd9Sstevel@tonic-gate	stna	THREAD_REG, [%o4 + TRAP_ENT_TR]%asi
861*7c478bd9Sstevel@tonic-gate	ld	[%o2 + CPU_BASE_SPL], %o5
862*7c478bd9Sstevel@tonic-gate	stna	%o5, [%o4 + TRAP_ENT_F1]%asi
863*7c478bd9Sstevel@tonic-gate	stna	%g0, [%o4 + TRAP_ENT_F2]%asi
864*7c478bd9Sstevel@tonic-gate	stna	%g0, [%o4 + TRAP_ENT_F3]%asi
865*7c478bd9Sstevel@tonic-gate	stna	%g0, [%o4 + TRAP_ENT_F4]%asi
866*7c478bd9Sstevel@tonic-gate	TRACE_NEXT(%o4, %o5, %o0)
867*7c478bd9Sstevel@tonic-gate	wrpr	%g0, %l2, %pstate
868*7c478bd9Sstevel@tonic-gate#endif /* TRAPTRACE */
869*7c478bd9Sstevel@tonic-gate	! cpu_stats.sys.intrblk++
870*7c478bd9Sstevel@tonic-gate        ldx	[%o2 + CPU_STATS_SYS_INTRBLK], %o4
871*7c478bd9Sstevel@tonic-gate        inc     %o4
872*7c478bd9Sstevel@tonic-gate        stx	%o4, [%o2 + CPU_STATS_SYS_INTRBLK]
873*7c478bd9Sstevel@tonic-gate	!
874*7c478bd9Sstevel@tonic-gate	! Put thread back on the interrupt thread list.
875*7c478bd9Sstevel@tonic-gate	!
876*7c478bd9Sstevel@tonic-gate
877*7c478bd9Sstevel@tonic-gate	!
878*7c478bd9Sstevel@tonic-gate	! Set the CPU's base SPL level.
879*7c478bd9Sstevel@tonic-gate	!
880*7c478bd9Sstevel@tonic-gate#ifdef DEBUG
881*7c478bd9Sstevel@tonic-gate	!
882*7c478bd9Sstevel@tonic-gate	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
883*7c478bd9Sstevel@tonic-gate	!
884*7c478bd9Sstevel@tonic-gate	ld	[%o2 + CPU_INTR_ACTV], %o5
885*7c478bd9Sstevel@tonic-gate	mov	1, %o4
886*7c478bd9Sstevel@tonic-gate	sll	%o4, %l1, %o4
887*7c478bd9Sstevel@tonic-gate	and	%o5, %o4, %o4
888*7c478bd9Sstevel@tonic-gate	brz,pt	%o4, 0f
889*7c478bd9Sstevel@tonic-gate	nop
890*7c478bd9Sstevel@tonic-gate	! Do not call panic if a panic is already in progress.
891*7c478bd9Sstevel@tonic-gate	sethi	%hi(panic_quiesce), %l2
892*7c478bd9Sstevel@tonic-gate	ld	[%l2 + %lo(panic_quiesce)], %l2
893*7c478bd9Sstevel@tonic-gate	brnz,pn	%l2, 0f
894*7c478bd9Sstevel@tonic-gate	nop
895*7c478bd9Sstevel@tonic-gate	sethi	%hi(intr_thread_exit_actv_bit_set), %o0
896*7c478bd9Sstevel@tonic-gate	call	panic
897*7c478bd9Sstevel@tonic-gate	or	%o0, %lo(intr_thread_exit_actv_bit_set), %o0
898*7c478bd9Sstevel@tonic-gate0:
899*7c478bd9Sstevel@tonic-gate#endif /* DEBUG */
900*7c478bd9Sstevel@tonic-gate	call	_intr_set_spl			! set CPU's base SPL level
901*7c478bd9Sstevel@tonic-gate	ld	[%o2 + CPU_INTR_ACTV], %o5	! delay - load active mask
902*7c478bd9Sstevel@tonic-gate	!
903*7c478bd9Sstevel@tonic-gate	! set the thread state to free so kernel debuggers don't see it
904*7c478bd9Sstevel@tonic-gate	!
905*7c478bd9Sstevel@tonic-gate	mov	TS_FREE, %o4
906*7c478bd9Sstevel@tonic-gate	st	%o4, [THREAD_REG + T_STATE]
907*7c478bd9Sstevel@tonic-gate	!
908*7c478bd9Sstevel@tonic-gate	! Put thread on either the interrupt pool or the free pool and
909*7c478bd9Sstevel@tonic-gate	! call swtch() to resume another thread.
910*7c478bd9Sstevel@tonic-gate	!
911*7c478bd9Sstevel@tonic-gate	ldn	[%o2 + CPU_INTR_THREAD], %o5	! get list pointer
912*7c478bd9Sstevel@tonic-gate	stn	%o5, [THREAD_REG + T_LINK]
913*7c478bd9Sstevel@tonic-gate	call	swtch				! switch to best thread
914*7c478bd9Sstevel@tonic-gate	stn	THREAD_REG, [%o2 + CPU_INTR_THREAD] ! delay - put thread on list
915*7c478bd9Sstevel@tonic-gate	ba,a,pt	%xcc, .				! swtch() shouldn't return
916*7c478bd9Sstevel@tonic-gate	SET_SIZE(intr_thread_exit)
917*7c478bd9Sstevel@tonic-gate
918*7c478bd9Sstevel@tonic-gate	.global ftrace_intr_thread_format_str
919*7c478bd9Sstevel@tonic-gateftrace_intr_thread_format_str:
920*7c478bd9Sstevel@tonic-gate	.asciz	"intr_thread(): regs=0x%lx, int=0x%lx, pil=0x%lx"
921*7c478bd9Sstevel@tonic-gate#ifdef DEBUG
922*7c478bd9Sstevel@tonic-gateintr_thread_actv_bit_set:
923*7c478bd9Sstevel@tonic-gate	.asciz	"intr_thread():	cpu_intr_actv bit already set for PIL"
924*7c478bd9Sstevel@tonic-gateintr_thread_actv_bit_not_set:
925*7c478bd9Sstevel@tonic-gate	.asciz	"intr_thread():	cpu_intr_actv bit not set for PIL"
926*7c478bd9Sstevel@tonic-gateintr_thread_exit_actv_bit_set:
927*7c478bd9Sstevel@tonic-gate	.asciz	"intr_thread_exit(): cpu_intr_actv bit erroneously set for PIL"
928*7c478bd9Sstevel@tonic-gateintr_thread_t_intr_start_zero:
929*7c478bd9Sstevel@tonic-gate	.asciz	"intr_thread():	t_intr_start zero upon handler return"
930*7c478bd9Sstevel@tonic-gate#endif /* DEBUG */
931*7c478bd9Sstevel@tonic-gate#endif	/* lint */
932*7c478bd9Sstevel@tonic-gate
933*7c478bd9Sstevel@tonic-gate#if defined(lint)
934*7c478bd9Sstevel@tonic-gate
935*7c478bd9Sstevel@tonic-gate/*
936*7c478bd9Sstevel@tonic-gate * Handle an interrupt in the current thread
937*7c478bd9Sstevel@tonic-gate *	Entry:
938*7c478bd9Sstevel@tonic-gate *		%o0       = pointer to regs structure
939*7c478bd9Sstevel@tonic-gate *		%o1       = inumber
940*7c478bd9Sstevel@tonic-gate *		%o2       = pil
941*7c478bd9Sstevel@tonic-gate *		%sp       = on current thread's kernel stack
942*7c478bd9Sstevel@tonic-gate *		%o7       = return linkage to trap code
943*7c478bd9Sstevel@tonic-gate *		%g7       = current thread
944*7c478bd9Sstevel@tonic-gate *		%pstate   = normal globals, interrupts enabled,
945*7c478bd9Sstevel@tonic-gate *		            privileged, fp disabled
946*7c478bd9Sstevel@tonic-gate *		%pil      = PIL_MAX
947*7c478bd9Sstevel@tonic-gate *
948*7c478bd9Sstevel@tonic-gate *	Register Usage
949*7c478bd9Sstevel@tonic-gate *		%l0       = return linkage
950*7c478bd9Sstevel@tonic-gate *		%l1       = old stack
951*7c478bd9Sstevel@tonic-gate *		%l2 - %l3 = scratch
952*7c478bd9Sstevel@tonic-gate *		%l4 - %l7 = reserved for sys_trap
953*7c478bd9Sstevel@tonic-gate *		%o3       = cpu
954*7c478bd9Sstevel@tonic-gate *		%o0       = scratch
955*7c478bd9Sstevel@tonic-gate *		%o4 - %o5 = scratch
956*7c478bd9Sstevel@tonic-gate */
957*7c478bd9Sstevel@tonic-gate/* ARGSUSED */
958*7c478bd9Sstevel@tonic-gatevoid
959*7c478bd9Sstevel@tonic-gatecurrent_thread(struct regs *regs, uint_t inumber, uint_t pil)
960*7c478bd9Sstevel@tonic-gate{}
961*7c478bd9Sstevel@tonic-gate
962*7c478bd9Sstevel@tonic-gate#else	/* lint */
963*7c478bd9Sstevel@tonic-gate
964*7c478bd9Sstevel@tonic-gate	ENTRY_NP(current_thread)
965*7c478bd9Sstevel@tonic-gate
966*7c478bd9Sstevel@tonic-gate	mov	%o7, %l0
967*7c478bd9Sstevel@tonic-gate	ldn	[THREAD_REG + T_CPU], %o3
968*7c478bd9Sstevel@tonic-gate	!
969*7c478bd9Sstevel@tonic-gate	! Set bit for this level in CPU's active interrupt bitmask.
970*7c478bd9Sstevel@tonic-gate	!
971*7c478bd9Sstevel@tonic-gate	ld	[%o3 + CPU_INTR_ACTV], %o5	! o5 has cpu_intr_actv b4 chng
972*7c478bd9Sstevel@tonic-gate	mov	1, %o4
973*7c478bd9Sstevel@tonic-gate	sll	%o4, %o2, %o4			! construct mask for level
974*7c478bd9Sstevel@tonic-gate#ifdef DEBUG
975*7c478bd9Sstevel@tonic-gate	!
976*7c478bd9Sstevel@tonic-gate	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
977*7c478bd9Sstevel@tonic-gate	!
978*7c478bd9Sstevel@tonic-gate	andcc	%o5, %o4, %g0
979*7c478bd9Sstevel@tonic-gate	bz,pt	%xcc, 0f
980*7c478bd9Sstevel@tonic-gate	nop
981*7c478bd9Sstevel@tonic-gate	! Do not call panic if a panic is already in progress.
982*7c478bd9Sstevel@tonic-gate	sethi	%hi(panic_quiesce), %l2
983*7c478bd9Sstevel@tonic-gate	ld	[%l2 + %lo(panic_quiesce)], %l2
984*7c478bd9Sstevel@tonic-gate	brnz,pn	%l2, 0f
985*7c478bd9Sstevel@tonic-gate	nop
986*7c478bd9Sstevel@tonic-gate	sethi	%hi(current_thread_actv_bit_set), %o0
987*7c478bd9Sstevel@tonic-gate	call	panic
988*7c478bd9Sstevel@tonic-gate	or	%o0, %lo(current_thread_actv_bit_set), %o0
989*7c478bd9Sstevel@tonic-gate0:
990*7c478bd9Sstevel@tonic-gate#endif /* DEBUG */
991*7c478bd9Sstevel@tonic-gate	or	%o5, %o4, %o4
992*7c478bd9Sstevel@tonic-gate	!
993*7c478bd9Sstevel@tonic-gate	! See if we are interrupting another high-level interrupt.
994*7c478bd9Sstevel@tonic-gate	!
995*7c478bd9Sstevel@tonic-gate	srl	%o5, LOCK_LEVEL + 1, %o5	! only look at high-level bits
996*7c478bd9Sstevel@tonic-gate	brz,pt	%o5, 1f
997*7c478bd9Sstevel@tonic-gate	st	%o4, [%o3 + CPU_INTR_ACTV]	! delay - store active mask
998*7c478bd9Sstevel@tonic-gate	!
999*7c478bd9Sstevel@tonic-gate	! We have interrupted another high-level interrupt. Find its PIL,
1000*7c478bd9Sstevel@tonic-gate	! compute the interval it ran for, and update its cumulative counter.
1001*7c478bd9Sstevel@tonic-gate	!
1002*7c478bd9Sstevel@tonic-gate	! Register usage:
1003*7c478bd9Sstevel@tonic-gate
1004*7c478bd9Sstevel@tonic-gate	! o2 = PIL of this interrupt
1005*7c478bd9Sstevel@tonic-gate	! o5 = high PIL bits of INTR_ACTV (not including this PIL)
1006*7c478bd9Sstevel@tonic-gate	! l1 = bitmask used to find other active high-level PIL
1007*7c478bd9Sstevel@tonic-gate	! o4 = index of bit set in l1
1008*7c478bd9Sstevel@tonic-gate	! Use cpu_intr_actv to find the cpu_pil_high_start[] offset for the
1009*7c478bd9Sstevel@tonic-gate	! interrupted high-level interrupt.
1010*7c478bd9Sstevel@tonic-gate	! Create mask for cpu_intr_actv. Begin by looking for bits set
1011*7c478bd9Sstevel@tonic-gate	! at one level below the current PIL. Since %o5 contains the active
1012*7c478bd9Sstevel@tonic-gate	! mask already shifted right by (LOCK_LEVEL + 1), we start by looking
1013*7c478bd9Sstevel@tonic-gate	! at bit (current_pil - (LOCK_LEVEL + 2)).
1014*7c478bd9Sstevel@tonic-gate	sub	%o2, LOCK_LEVEL + 2, %o4
1015*7c478bd9Sstevel@tonic-gate	mov	1, %l1
1016*7c478bd9Sstevel@tonic-gate	sll	%l1, %o4, %l1
1017*7c478bd9Sstevel@tonic-gate2:
1018*7c478bd9Sstevel@tonic-gate#ifdef DEBUG
1019*7c478bd9Sstevel@tonic-gate	! ASSERT(%l1 != 0) (we didn't shift the bit off the right edge)
1020*7c478bd9Sstevel@tonic-gate	brnz,pt	%l1, 9f
1021*7c478bd9Sstevel@tonic-gate	nop
1022*7c478bd9Sstevel@tonic-gate
1023*7c478bd9Sstevel@tonic-gate	! Don't panic if a panic is already in progress.
1024*7c478bd9Sstevel@tonic-gate	sethi	%hi(panic_quiesce), %l3
1025*7c478bd9Sstevel@tonic-gate	ld	[%l3 + %lo(panic_quiesce)], %l3
1026*7c478bd9Sstevel@tonic-gate	brnz,pn	%l3, 9f
1027*7c478bd9Sstevel@tonic-gate	nop
1028*7c478bd9Sstevel@tonic-gate	sethi	%hi(current_thread_nested_PIL_not_found), %o0
1029*7c478bd9Sstevel@tonic-gate	call	panic
1030*7c478bd9Sstevel@tonic-gate	or	%o0, %lo(current_thread_nested_PIL_not_found), %o0
1031*7c478bd9Sstevel@tonic-gate9:
1032*7c478bd9Sstevel@tonic-gate#endif /* DEBUG */
1033*7c478bd9Sstevel@tonic-gate	andcc	%l1, %o5, %g0		! test mask against high-level bits of
1034*7c478bd9Sstevel@tonic-gate	bnz	%xcc, 3f		! cpu_intr_actv
1035*7c478bd9Sstevel@tonic-gate	nop
1036*7c478bd9Sstevel@tonic-gate	srl	%l1, 1, %l1		! No match. Try next lower PIL.
1037*7c478bd9Sstevel@tonic-gate	ba,pt	%xcc, 2b
1038*7c478bd9Sstevel@tonic-gate	sub	%o4, 1, %o4		! delay - decrement PIL
1039*7c478bd9Sstevel@tonic-gate3:
1040*7c478bd9Sstevel@tonic-gate	sll	%o4, 3, %o4			! index to byte offset
1041*7c478bd9Sstevel@tonic-gate	add	%o4, CPU_MCPU, %l1	! CPU_PIL_HIGH_START is too large
1042*7c478bd9Sstevel@tonic-gate	add	%l1, MCPU_PIL_HIGH_START, %l1
1043*7c478bd9Sstevel@tonic-gate	ldx	[%o3 + %l1], %l3		! load starting timestamp
1044*7c478bd9Sstevel@tonic-gate#ifdef DEBUG
1045*7c478bd9Sstevel@tonic-gate	brnz,pt	%l3, 9f
1046*7c478bd9Sstevel@tonic-gate	nop
1047*7c478bd9Sstevel@tonic-gate	! Don't panic if a panic is already in progress.
1048*7c478bd9Sstevel@tonic-gate	sethi	%hi(panic_quiesce), %l1
1049*7c478bd9Sstevel@tonic-gate	ld	[%l1 + %lo(panic_quiesce)], %l1
1050*7c478bd9Sstevel@tonic-gate	brnz,pn	%l1, 9f
1051*7c478bd9Sstevel@tonic-gate	nop
1052*7c478bd9Sstevel@tonic-gate	srl	%o4, 3, %o1			! Find interrupted PIL for panic
1053*7c478bd9Sstevel@tonic-gate	add	%o1, LOCK_LEVEL + 1, %o1
1054*7c478bd9Sstevel@tonic-gate	sethi	%hi(current_thread_nested_pil_zero), %o0
1055*7c478bd9Sstevel@tonic-gate	call	panic
1056*7c478bd9Sstevel@tonic-gate	or	%o0, %lo(current_thread_nested_pil_zero), %o0
1057*7c478bd9Sstevel@tonic-gate9:
1058*7c478bd9Sstevel@tonic-gate#endif /* DEBUG */
1059*7c478bd9Sstevel@tonic-gate	rdpr	%tick, %l1
1060*7c478bd9Sstevel@tonic-gate	sllx	%l1, 1, %l1
1061*7c478bd9Sstevel@tonic-gate	srlx	%l1, 1, %l1			! shake off NPT bit
1062*7c478bd9Sstevel@tonic-gate	sub	%l1, %l3, %l3			! interval in %l3
1063*7c478bd9Sstevel@tonic-gate	!
1064*7c478bd9Sstevel@tonic-gate	! Check for Energy Star mode
1065*7c478bd9Sstevel@tonic-gate	!
1066*7c478bd9Sstevel@tonic-gate	lduh	[%o3 + CPU_DIVISOR], %l1	! %l1 = clock divisor
1067*7c478bd9Sstevel@tonic-gate	cmp	%l1, 1
1068*7c478bd9Sstevel@tonic-gate	bg,a,pn	%xcc, 2f
1069*7c478bd9Sstevel@tonic-gate	mulx	%l3, %l1, %l3	! multiply interval by clock divisor iff > 1
1070*7c478bd9Sstevel@tonic-gate2:
1071*7c478bd9Sstevel@tonic-gate	!
1072*7c478bd9Sstevel@tonic-gate	! We need to find the CPU offset of the cumulative counter. We start
1073*7c478bd9Sstevel@tonic-gate	! with %o4, which has (PIL - (LOCK_LEVEL + 1)) * 8. We need PIL * 16,
1074*7c478bd9Sstevel@tonic-gate	! so we shift left 1, then add (LOCK_LEVEL + 1) * 16, which is
1075*7c478bd9Sstevel@tonic-gate	! CPU_INTRSTAT_LOW_PIL_OFFSET.
1076*7c478bd9Sstevel@tonic-gate	!
1077*7c478bd9Sstevel@tonic-gate	sll	%o4, 1, %o4
1078*7c478bd9Sstevel@tonic-gate	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT const too large
1079*7c478bd9Sstevel@tonic-gate	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1080*7c478bd9Sstevel@tonic-gate	add	%o4, CPU_INTRSTAT_LOW_PIL_OFFSET, %o4
1081*7c478bd9Sstevel@tonic-gate	ldx	[%o3 + %o4], %l1		! old counter in l1
1082*7c478bd9Sstevel@tonic-gate	add	%l1, %l3, %l1			! new counter in l1
1083*7c478bd9Sstevel@tonic-gate	! Another high-level interrupt is active below this one, so
1084*7c478bd9Sstevel@tonic-gate	! there is no need to check for an interrupt thread. That will be
1085*7c478bd9Sstevel@tonic-gate	! done by the lowest priority high-level interrupt active.
1086*7c478bd9Sstevel@tonic-gate	ba,pt	%xcc, 5f
1087*7c478bd9Sstevel@tonic-gate	stx	%l1, [%o3 + %o4]		! delay - store new counter
1088*7c478bd9Sstevel@tonic-gate1:
1089*7c478bd9Sstevel@tonic-gate	! If we haven't interrupted another high-level interrupt, we may be
1090*7c478bd9Sstevel@tonic-gate	! interrupting a low level interrupt thread. If so, compute its interval
1091*7c478bd9Sstevel@tonic-gate	! and update its cumulative counter.
1092*7c478bd9Sstevel@tonic-gate	lduh	[THREAD_REG + T_FLAGS], %o4
1093*7c478bd9Sstevel@tonic-gate	andcc	%o4, T_INTR_THREAD, %g0
1094*7c478bd9Sstevel@tonic-gate	bz,pt	%xcc, 4f
1095*7c478bd9Sstevel@tonic-gate	nop
1096*7c478bd9Sstevel@tonic-gate
1097*7c478bd9Sstevel@tonic-gate	! We have interrupted an interrupt thread. Take timestamp, compute
1098*7c478bd9Sstevel@tonic-gate	! interval, update cumulative counter.
1099*7c478bd9Sstevel@tonic-gate
1100*7c478bd9Sstevel@tonic-gate	! Check t_intr_start. If it is zero, either intr_thread() or
1101*7c478bd9Sstevel@tonic-gate	! current_thread() (at a lower PIL, of course) already did
1102*7c478bd9Sstevel@tonic-gate	! the accounting for the underlying interrupt thread.
1103*7c478bd9Sstevel@tonic-gate	ldx	[THREAD_REG + T_INTR_START], %o5
1104*7c478bd9Sstevel@tonic-gate	brz,pn	%o5, 4f
1105*7c478bd9Sstevel@tonic-gate	nop
1106*7c478bd9Sstevel@tonic-gate
1107*7c478bd9Sstevel@tonic-gate	stx	%g0, [THREAD_REG + T_INTR_START]
1108*7c478bd9Sstevel@tonic-gate	rdpr	%tick, %o4
1109*7c478bd9Sstevel@tonic-gate	sllx	%o4, 1, %o4
1110*7c478bd9Sstevel@tonic-gate	srlx	%o4, 1, %o4			! shake off NPT bit
1111*7c478bd9Sstevel@tonic-gate	sub	%o4, %o5, %o5			! o5 has the interval
1112*7c478bd9Sstevel@tonic-gate
1113*7c478bd9Sstevel@tonic-gate	! Check for Energy Star mode
1114*7c478bd9Sstevel@tonic-gate	lduh	[%o3 + CPU_DIVISOR], %o4	! %o4 = clock divisor
1115*7c478bd9Sstevel@tonic-gate	cmp	%o4, 1
1116*7c478bd9Sstevel@tonic-gate	bg,a,pn	%xcc, 2f
1117*7c478bd9Sstevel@tonic-gate	mulx	%o5, %o4, %o5	! multiply interval by clock divisor iff > 1
1118*7c478bd9Sstevel@tonic-gate2:
1119*7c478bd9Sstevel@tonic-gate	ldub	[THREAD_REG + T_PIL], %o4
1120*7c478bd9Sstevel@tonic-gate	sllx	%o4, 4, %o4			! PIL index to byte offset
1121*7c478bd9Sstevel@tonic-gate	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT const too large
1122*7c478bd9Sstevel@tonic-gate	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1123*7c478bd9Sstevel@tonic-gate	ldx	[%o3 + %o4], %l2		! old counter in l2
1124*7c478bd9Sstevel@tonic-gate	add	%l2, %o5, %l2			! new counter in l2
1125*7c478bd9Sstevel@tonic-gate	stx	%l2, [%o3 + %o4]		! store new counter
1126*7c478bd9Sstevel@tonic-gate
1127*7c478bd9Sstevel@tonic-gate4:
1128*7c478bd9Sstevel@tonic-gate	!
1129*7c478bd9Sstevel@tonic-gate	! Handle high-level interrupts on separate interrupt stack.
1130*7c478bd9Sstevel@tonic-gate	! No other high-level interrupts are active, so switch to int stack.
1131*7c478bd9Sstevel@tonic-gate	!
1132*7c478bd9Sstevel@tonic-gate	mov	%sp, %l1
1133*7c478bd9Sstevel@tonic-gate	ldn	[%o3 + CPU_INTR_STACK], %l3
1134*7c478bd9Sstevel@tonic-gate	sub	%l3, STACK_BIAS, %sp
1135*7c478bd9Sstevel@tonic-gate
1136*7c478bd9Sstevel@tonic-gate5:
1137*7c478bd9Sstevel@tonic-gate#ifdef DEBUG
1138*7c478bd9Sstevel@tonic-gate	!
1139*7c478bd9Sstevel@tonic-gate	! ASSERT(%o2 > LOCK_LEVEL)
1140*7c478bd9Sstevel@tonic-gate	!
1141*7c478bd9Sstevel@tonic-gate	cmp	%o2, LOCK_LEVEL
1142*7c478bd9Sstevel@tonic-gate	bg,pt	%xcc, 3f
1143*7c478bd9Sstevel@tonic-gate	nop
1144*7c478bd9Sstevel@tonic-gate	mov	CE_PANIC, %o0
1145*7c478bd9Sstevel@tonic-gate	sethi	%hi(current_thread_wrong_pil), %o1
1146*7c478bd9Sstevel@tonic-gate	call	cmn_err				! %o2 has the %pil already
1147*7c478bd9Sstevel@tonic-gate	or	%o1, %lo(current_thread_wrong_pil), %o1
1148*7c478bd9Sstevel@tonic-gate#endif
1149*7c478bd9Sstevel@tonic-gate3:
1150*7c478bd9Sstevel@tonic-gate	! Store starting timestamp for this PIL in CPU structure at
1151*7c478bd9Sstevel@tonic-gate	! cpu.cpu_m.pil_high_start[PIL - (LOCK_LEVEL + 1)]
1152*7c478bd9Sstevel@tonic-gate        sub     %o2, LOCK_LEVEL + 1, %o4	! convert PIL to array index
1153*7c478bd9Sstevel@tonic-gate	sllx    %o4, 3, %o4			! index to byte offset
1154*7c478bd9Sstevel@tonic-gate	add	%o4, CPU_MCPU, %o4	! CPU_PIL_HIGH_START is too large
1155*7c478bd9Sstevel@tonic-gate	add	%o4, MCPU_PIL_HIGH_START, %o4
1156*7c478bd9Sstevel@tonic-gate        rdpr    %tick, %o5
1157*7c478bd9Sstevel@tonic-gate	sllx	%o5, 1, %o5
1158*7c478bd9Sstevel@tonic-gate	srlx	%o5, 1, %o5
1159*7c478bd9Sstevel@tonic-gate        stx     %o5, [%o3 + %o4]
1160*7c478bd9Sstevel@tonic-gate
1161*7c478bd9Sstevel@tonic-gate	wrpr	%g0, %o2, %pil			! enable interrupts
1162*7c478bd9Sstevel@tonic-gate
1163*7c478bd9Sstevel@tonic-gate	!
1164*7c478bd9Sstevel@tonic-gate	! call the handler
1165*7c478bd9Sstevel@tonic-gate	!
1166*7c478bd9Sstevel@tonic-gate	SERVE_INTR_PRE(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
1167*7c478bd9Sstevel@tonic-gate1:
1168*7c478bd9Sstevel@tonic-gate	SERVE_INTR(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
1169*7c478bd9Sstevel@tonic-gate
1170*7c478bd9Sstevel@tonic-gate	brz,a,pt %o2, 0f			! if %o2, more intrs await
1171*7c478bd9Sstevel@tonic-gate	rdpr	%pil, %o2			! delay annulled
1172*7c478bd9Sstevel@tonic-gate	SERVE_INTR_NEXT(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
1173*7c478bd9Sstevel@tonic-gate	ba	1b
1174*7c478bd9Sstevel@tonic-gate	nop
1175*7c478bd9Sstevel@tonic-gate0:
1176*7c478bd9Sstevel@tonic-gate	wrpr	%g0, PIL_MAX, %pil		! disable interrupts (1-15)
1177*7c478bd9Sstevel@tonic-gate
1178*7c478bd9Sstevel@tonic-gate	cmp	%o2, PIL_15
1179*7c478bd9Sstevel@tonic-gate	bne,pt	%xcc, 3f
1180*7c478bd9Sstevel@tonic-gate	nop
1181*7c478bd9Sstevel@tonic-gate
1182*7c478bd9Sstevel@tonic-gate	sethi	%hi(cpc_level15_inum), %o1
1183*7c478bd9Sstevel@tonic-gate	ld	[%o1 + %lo(cpc_level15_inum)], %o1 ! arg for intr_enqueue_req
1184*7c478bd9Sstevel@tonic-gate	brz	%o1, 3f
1185*7c478bd9Sstevel@tonic-gate	nop
1186*7c478bd9Sstevel@tonic-gate
1187*7c478bd9Sstevel@tonic-gate	rdpr 	%pstate, %g5
1188*7c478bd9Sstevel@tonic-gate	andn	%g5, PSTATE_IE, %g1
1189*7c478bd9Sstevel@tonic-gate	wrpr	%g0, %g1, %pstate		! Disable vec interrupts
1190*7c478bd9Sstevel@tonic-gate
1191*7c478bd9Sstevel@tonic-gate	call	intr_enqueue_req		! preserves %g5
1192*7c478bd9Sstevel@tonic-gate	mov	PIL_15, %o0
1193*7c478bd9Sstevel@tonic-gate
1194*7c478bd9Sstevel@tonic-gate	! clear perfcntr overflow
1195*7c478bd9Sstevel@tonic-gate	mov	1, %o0
1196*7c478bd9Sstevel@tonic-gate	sllx	%o0, PIL_15, %o0
1197*7c478bd9Sstevel@tonic-gate	wr	%o0, CLEAR_SOFTINT
1198*7c478bd9Sstevel@tonic-gate
1199*7c478bd9Sstevel@tonic-gate	wrpr	%g0, %g5, %pstate		! Enable vec interrupts
1200*7c478bd9Sstevel@tonic-gate
1201*7c478bd9Sstevel@tonic-gate3:
1202*7c478bd9Sstevel@tonic-gate	cmp	%o2, PIL_14
1203*7c478bd9Sstevel@tonic-gate	be	tick_rtt			!  cpu-specific tick processing
1204*7c478bd9Sstevel@tonic-gate	nop
1205*7c478bd9Sstevel@tonic-gate	.global	current_thread_complete
1206*7c478bd9Sstevel@tonic-gatecurrent_thread_complete:
1207*7c478bd9Sstevel@tonic-gate	!
1208*7c478bd9Sstevel@tonic-gate	! Register usage:
1209*7c478bd9Sstevel@tonic-gate	!
1210*7c478bd9Sstevel@tonic-gate	! %l1 = stack pointer
1211*7c478bd9Sstevel@tonic-gate	! %l2 = CPU_INTR_ACTV >> (LOCK_LEVEL + 1)
1212*7c478bd9Sstevel@tonic-gate	! %o2 = PIL
1213*7c478bd9Sstevel@tonic-gate	! %o3 = CPU pointer
1214*7c478bd9Sstevel@tonic-gate	! %o4, %o5, %l3, %l4, %l5 = scratch
1215*7c478bd9Sstevel@tonic-gate	!
1216*7c478bd9Sstevel@tonic-gate	ldn	[THREAD_REG + T_CPU], %o3
1217*7c478bd9Sstevel@tonic-gate	!
1218*7c478bd9Sstevel@tonic-gate	! Clear bit for this level in CPU's interrupt active bitmask.
1219*7c478bd9Sstevel@tonic-gate	!
1220*7c478bd9Sstevel@tonic-gate	ld	[%o3 + CPU_INTR_ACTV], %l2
1221*7c478bd9Sstevel@tonic-gate	mov	1, %o5
1222*7c478bd9Sstevel@tonic-gate	sll	%o5, %o2, %o5
1223*7c478bd9Sstevel@tonic-gate#ifdef DEBUG
1224*7c478bd9Sstevel@tonic-gate	!
1225*7c478bd9Sstevel@tonic-gate	! ASSERT(CPU->cpu_intr_actv & (1 << PIL))
1226*7c478bd9Sstevel@tonic-gate	!
1227*7c478bd9Sstevel@tonic-gate	andcc	%l2, %o5, %g0
1228*7c478bd9Sstevel@tonic-gate	bnz,pt	%xcc, 0f
1229*7c478bd9Sstevel@tonic-gate	nop
1230*7c478bd9Sstevel@tonic-gate	! Do not call panic if a panic is already in progress.
1231*7c478bd9Sstevel@tonic-gate	sethi	%hi(panic_quiesce), %l2
1232*7c478bd9Sstevel@tonic-gate	ld	[%l2 + %lo(panic_quiesce)], %l2
1233*7c478bd9Sstevel@tonic-gate	brnz,pn	%l2, 0f
1234*7c478bd9Sstevel@tonic-gate	nop
1235*7c478bd9Sstevel@tonic-gate	sethi	%hi(current_thread_actv_bit_not_set), %o0
1236*7c478bd9Sstevel@tonic-gate	call	panic
1237*7c478bd9Sstevel@tonic-gate	or	%o0, %lo(current_thread_actv_bit_not_set), %o0
1238*7c478bd9Sstevel@tonic-gate0:
1239*7c478bd9Sstevel@tonic-gate#endif /* DEBUG */
1240*7c478bd9Sstevel@tonic-gate	andn	%l2, %o5, %l2
1241*7c478bd9Sstevel@tonic-gate	st	%l2, [%o3 + CPU_INTR_ACTV]
1242*7c478bd9Sstevel@tonic-gate
1243*7c478bd9Sstevel@tonic-gate	! Take timestamp, compute interval, update cumulative counter.
1244*7c478bd9Sstevel@tonic-gate        sub     %o2, LOCK_LEVEL + 1, %o4	! PIL to array index
1245*7c478bd9Sstevel@tonic-gate	sllx    %o4, 3, %o4			! index to byte offset
1246*7c478bd9Sstevel@tonic-gate	add	%o4, CPU_MCPU, %o4	! CPU_PIL_HIGH_START is too large
1247*7c478bd9Sstevel@tonic-gate	add	%o4, MCPU_PIL_HIGH_START, %o4
1248*7c478bd9Sstevel@tonic-gate        rdpr    %tick, %o5
1249*7c478bd9Sstevel@tonic-gate	sllx	%o5, 1, %o5
1250*7c478bd9Sstevel@tonic-gate	srlx	%o5, 1, %o5
1251*7c478bd9Sstevel@tonic-gate	ldx     [%o3 + %o4], %o0
1252*7c478bd9Sstevel@tonic-gate#ifdef DEBUG
1253*7c478bd9Sstevel@tonic-gate	! ASSERT(cpu.cpu_m.pil_high_start[pil - (LOCK_LEVEL + 1)] != 0)
1254*7c478bd9Sstevel@tonic-gate	brnz,pt	%o0, 9f
1255*7c478bd9Sstevel@tonic-gate	nop
1256*7c478bd9Sstevel@tonic-gate	! Don't panic if a panic is already in progress.
1257*7c478bd9Sstevel@tonic-gate	sethi	%hi(panic_quiesce), %l2
1258*7c478bd9Sstevel@tonic-gate	ld	[%l2 + %lo(panic_quiesce)], %l2
1259*7c478bd9Sstevel@tonic-gate	brnz,pn	%l2, 9f
1260*7c478bd9Sstevel@tonic-gate	nop
1261*7c478bd9Sstevel@tonic-gate	sethi	%hi(current_thread_timestamp_zero), %o0
1262*7c478bd9Sstevel@tonic-gate	call	panic
1263*7c478bd9Sstevel@tonic-gate	or	%o0, %lo(current_thread_timestamp_zero), %o0
1264*7c478bd9Sstevel@tonic-gate9:
1265*7c478bd9Sstevel@tonic-gate#endif /* DEBUG */
1266*7c478bd9Sstevel@tonic-gate	stx	%g0, [%o3 + %o4]
1267*7c478bd9Sstevel@tonic-gate	sub	%o5, %o0, %o5			! interval in o5
1268*7c478bd9Sstevel@tonic-gate
1269*7c478bd9Sstevel@tonic-gate	! Check for Energy Star mode
1270*7c478bd9Sstevel@tonic-gate	lduh	[%o3 + CPU_DIVISOR], %o4	! %o4 = clock divisor
1271*7c478bd9Sstevel@tonic-gate	cmp	%o4, 1
1272*7c478bd9Sstevel@tonic-gate	bg,a,pn	%xcc, 2f
1273*7c478bd9Sstevel@tonic-gate	mulx	%o5, %o4, %o5	! multiply interval by clock divisor iff > 1
1274*7c478bd9Sstevel@tonic-gate2:
1275*7c478bd9Sstevel@tonic-gate	sllx	%o2, 4, %o4			! PIL index to byte offset
1276*7c478bd9Sstevel@tonic-gate	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT too large
1277*7c478bd9Sstevel@tonic-gate	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1278*7c478bd9Sstevel@tonic-gate	ldx	[%o3 + %o4], %o0		! old counter in o0
1279*7c478bd9Sstevel@tonic-gate	add	%o0, %o5, %o0			! new counter in o0
1280*7c478bd9Sstevel@tonic-gate	stx	%o0, [%o3 + %o4]		! store new counter
1281*7c478bd9Sstevel@tonic-gate
1282*7c478bd9Sstevel@tonic-gate	!
1283*7c478bd9Sstevel@tonic-gate	! get back on current thread's stack
1284*7c478bd9Sstevel@tonic-gate	!
1285*7c478bd9Sstevel@tonic-gate	srl	%l2, LOCK_LEVEL + 1, %l2
1286*7c478bd9Sstevel@tonic-gate	tst	%l2				! any more high-level ints?
1287*7c478bd9Sstevel@tonic-gate	movz	%xcc, %l1, %sp
1288*7c478bd9Sstevel@tonic-gate	!
1289*7c478bd9Sstevel@tonic-gate	! Current register usage:
1290*7c478bd9Sstevel@tonic-gate	! o2 = PIL
1291*7c478bd9Sstevel@tonic-gate	! o3 = CPU pointer
1292*7c478bd9Sstevel@tonic-gate	! l0 = return address
1293*7c478bd9Sstevel@tonic-gate	! l2 = intr_actv shifted right
1294*7c478bd9Sstevel@tonic-gate	!
1295*7c478bd9Sstevel@tonic-gate	bz,pt	%xcc, 3f			! if l2 was zero, no more ints
1296*7c478bd9Sstevel@tonic-gate	nop
1297*7c478bd9Sstevel@tonic-gate	!
1298*7c478bd9Sstevel@tonic-gate	! We found another high-level interrupt active below the one that just
1299*7c478bd9Sstevel@tonic-gate	! returned. Store a starting timestamp for it in the CPU structure.
1300*7c478bd9Sstevel@tonic-gate	!
1301*7c478bd9Sstevel@tonic-gate	! Use cpu_intr_actv to find the cpu_pil_high_start[] offset for the
1302*7c478bd9Sstevel@tonic-gate	! interrupted high-level interrupt.
1303*7c478bd9Sstevel@tonic-gate	! Create mask for cpu_intr_actv. Begin by looking for bits set
1304*7c478bd9Sstevel@tonic-gate	! at one level below the current PIL. Since %l2 contains the active
1305*7c478bd9Sstevel@tonic-gate	! mask already shifted right by (LOCK_LEVEL + 1), we start by looking
1306*7c478bd9Sstevel@tonic-gate	! at bit (current_pil - (LOCK_LEVEL + 2)).
1307*7c478bd9Sstevel@tonic-gate	! %l1 = mask, %o5 = index of bit set in mask
1308*7c478bd9Sstevel@tonic-gate	!
1309*7c478bd9Sstevel@tonic-gate	mov	1, %l1
1310*7c478bd9Sstevel@tonic-gate	sub	%o2, LOCK_LEVEL + 2, %o5
1311*7c478bd9Sstevel@tonic-gate	sll	%l1, %o5, %l1			! l1 = mask for level
1312*7c478bd9Sstevel@tonic-gate1:
1313*7c478bd9Sstevel@tonic-gate#ifdef DEBUG
1314*7c478bd9Sstevel@tonic-gate	! ASSERT(%l1 != 0) (we didn't shift the bit off the right edge)
1315*7c478bd9Sstevel@tonic-gate	brnz,pt	%l1, 9f
1316*7c478bd9Sstevel@tonic-gate	nop
1317*7c478bd9Sstevel@tonic-gate	sethi	%hi(current_thread_nested_PIL_not_found), %o0
1318*7c478bd9Sstevel@tonic-gate	call	panic
1319*7c478bd9Sstevel@tonic-gate	or	%o0, %lo(current_thread_nested_PIL_not_found), %o0
1320*7c478bd9Sstevel@tonic-gate9:
1321*7c478bd9Sstevel@tonic-gate#endif /* DEBUG */
1322*7c478bd9Sstevel@tonic-gate	andcc	%l1, %l2, %g0		! test mask against high-level bits of
1323*7c478bd9Sstevel@tonic-gate	bnz	%xcc, 2f		! cpu_intr_actv
1324*7c478bd9Sstevel@tonic-gate	nop
1325*7c478bd9Sstevel@tonic-gate	srl	%l1, 1, %l1		! No match. Try next lower PIL.
1326*7c478bd9Sstevel@tonic-gate	ba,pt	%xcc, 1b
1327*7c478bd9Sstevel@tonic-gate	sub	%o5, 1, %o5		! delay - decrement PIL
1328*7c478bd9Sstevel@tonic-gate2:
1329*7c478bd9Sstevel@tonic-gate	sll	%o5, 3, %o5		! convert array index to byte offset
1330*7c478bd9Sstevel@tonic-gate	add	%o5, CPU_MCPU, %o5	! CPU_PIL_HIGH_START is too large
1331*7c478bd9Sstevel@tonic-gate	add	%o5, MCPU_PIL_HIGH_START, %o5
1332*7c478bd9Sstevel@tonic-gate	rdpr	%tick, %o4
1333*7c478bd9Sstevel@tonic-gate	sllx	%o4, 1, %o4
1334*7c478bd9Sstevel@tonic-gate	srlx	%o4, 1, %o4
1335*7c478bd9Sstevel@tonic-gate	! Another high-level interrupt is active below this one, so
1336*7c478bd9Sstevel@tonic-gate	! there is no need to check for an interrupt thread. That will be
1337*7c478bd9Sstevel@tonic-gate	! done by the lowest priority high-level interrupt active.
1338*7c478bd9Sstevel@tonic-gate	ba,pt	%xcc, 1f
1339*7c478bd9Sstevel@tonic-gate	stx	%o4, [%o3 + %o5]	! delay - store timestamp
1340*7c478bd9Sstevel@tonic-gate3:
1341*7c478bd9Sstevel@tonic-gate	! If we haven't interrupted another high-level interrupt, we may have
1342*7c478bd9Sstevel@tonic-gate	! interrupted a low level interrupt thread. If so, store a starting
1343*7c478bd9Sstevel@tonic-gate	! timestamp in its thread structure.
1344*7c478bd9Sstevel@tonic-gate	lduh	[THREAD_REG + T_FLAGS], %o4
1345*7c478bd9Sstevel@tonic-gate	andcc	%o4, T_INTR_THREAD, %g0
1346*7c478bd9Sstevel@tonic-gate	bz,pt	%xcc, 1f
1347*7c478bd9Sstevel@tonic-gate	nop
1348*7c478bd9Sstevel@tonic-gate
1349*7c478bd9Sstevel@tonic-gate	rdpr	%tick, %o4
1350*7c478bd9Sstevel@tonic-gate	sllx	%o4, 1, %o4
1351*7c478bd9Sstevel@tonic-gate	srlx	%o4, 1, %o4			! Shake off NPT bit
1352*7c478bd9Sstevel@tonic-gate	stx	%o4, [THREAD_REG + T_INTR_START]
1353*7c478bd9Sstevel@tonic-gate1:
1354*7c478bd9Sstevel@tonic-gate	! Enable interrupts and return
1355*7c478bd9Sstevel@tonic-gate	jmp	%l0 + 8
1356*7c478bd9Sstevel@tonic-gate	wrpr	%g0, %o2, %pil			! enable interrupts
1357*7c478bd9Sstevel@tonic-gate	SET_SIZE(current_thread)
1358*7c478bd9Sstevel@tonic-gate
1359*7c478bd9Sstevel@tonic-gate
1360*7c478bd9Sstevel@tonic-gate#ifdef DEBUG
1361*7c478bd9Sstevel@tonic-gatecurrent_thread_wrong_pil:
1362*7c478bd9Sstevel@tonic-gate	.asciz	"current_thread: unexpected pil level: %d"
1363*7c478bd9Sstevel@tonic-gatecurrent_thread_actv_bit_set:
1364*7c478bd9Sstevel@tonic-gate	.asciz	"current_thread(): cpu_intr_actv bit already set for PIL"
1365*7c478bd9Sstevel@tonic-gatecurrent_thread_actv_bit_not_set:
1366*7c478bd9Sstevel@tonic-gate	.asciz	"current_thread(): cpu_intr_actv bit not set for PIL"
1367*7c478bd9Sstevel@tonic-gatecurrent_thread_nested_pil_zero:
1368*7c478bd9Sstevel@tonic-gate	.asciz	"current_thread(): timestamp zero for nested PIL %d"
1369*7c478bd9Sstevel@tonic-gatecurrent_thread_timestamp_zero:
1370*7c478bd9Sstevel@tonic-gate	.asciz	"current_thread(): timestamp zero upon handler return"
1371*7c478bd9Sstevel@tonic-gatecurrent_thread_nested_PIL_not_found:
1372*7c478bd9Sstevel@tonic-gate	.asciz	"current_thread: couldn't find nested high-level PIL"
1373*7c478bd9Sstevel@tonic-gate#endif /* DEBUG */
1374*7c478bd9Sstevel@tonic-gate#endif /* lint */
1375*7c478bd9Sstevel@tonic-gate
1376*7c478bd9Sstevel@tonic-gate/*
1377*7c478bd9Sstevel@tonic-gate * Return a thread's interrupt level.
1378*7c478bd9Sstevel@tonic-gate * Since this isn't saved anywhere but in %l4 on interrupt entry, we
1379*7c478bd9Sstevel@tonic-gate * must dig it out of the save area.
1380*7c478bd9Sstevel@tonic-gate *
1381*7c478bd9Sstevel@tonic-gate * Caller 'swears' that this really is an interrupt thread.
1382*7c478bd9Sstevel@tonic-gate *
1383*7c478bd9Sstevel@tonic-gate * int
1384*7c478bd9Sstevel@tonic-gate * intr_level(t)
1385*7c478bd9Sstevel@tonic-gate *	kthread_id_t	t;
1386*7c478bd9Sstevel@tonic-gate */
1387*7c478bd9Sstevel@tonic-gate
1388*7c478bd9Sstevel@tonic-gate#if defined(lint)
1389*7c478bd9Sstevel@tonic-gate
1390*7c478bd9Sstevel@tonic-gate/* ARGSUSED */
1391*7c478bd9Sstevel@tonic-gateint
1392*7c478bd9Sstevel@tonic-gateintr_level(kthread_id_t t)
1393*7c478bd9Sstevel@tonic-gate{ return (0); }
1394*7c478bd9Sstevel@tonic-gate
1395*7c478bd9Sstevel@tonic-gate#else	/* lint */
1396*7c478bd9Sstevel@tonic-gate
1397*7c478bd9Sstevel@tonic-gate	ENTRY_NP(intr_level)
1398*7c478bd9Sstevel@tonic-gate	retl
1399*7c478bd9Sstevel@tonic-gate	ldub	[%o0 + T_PIL], %o0		! return saved pil
1400*7c478bd9Sstevel@tonic-gate	SET_SIZE(intr_level)
1401*7c478bd9Sstevel@tonic-gate
1402*7c478bd9Sstevel@tonic-gate#endif	/* lint */
1403*7c478bd9Sstevel@tonic-gate
1404*7c478bd9Sstevel@tonic-gate#if defined(lint)
1405*7c478bd9Sstevel@tonic-gate
1406*7c478bd9Sstevel@tonic-gate/* ARGSUSED */
1407*7c478bd9Sstevel@tonic-gateint
1408*7c478bd9Sstevel@tonic-gatedisable_pil_intr()
1409*7c478bd9Sstevel@tonic-gate{ return (0); }
1410*7c478bd9Sstevel@tonic-gate
1411*7c478bd9Sstevel@tonic-gate#else	/* lint */
1412*7c478bd9Sstevel@tonic-gate
1413*7c478bd9Sstevel@tonic-gate	ENTRY_NP(disable_pil_intr)
1414*7c478bd9Sstevel@tonic-gate	rdpr	%pil, %o0
1415*7c478bd9Sstevel@tonic-gate	retl
1416*7c478bd9Sstevel@tonic-gate	wrpr	%g0, PIL_MAX, %pil		! disable interrupts (1-15)
1417*7c478bd9Sstevel@tonic-gate	SET_SIZE(disable_pil_intr)
1418*7c478bd9Sstevel@tonic-gate
1419*7c478bd9Sstevel@tonic-gate#endif	/* lint */
1420*7c478bd9Sstevel@tonic-gate
1421*7c478bd9Sstevel@tonic-gate#if defined(lint)
1422*7c478bd9Sstevel@tonic-gate
1423*7c478bd9Sstevel@tonic-gate/* ARGSUSED */
1424*7c478bd9Sstevel@tonic-gatevoid
1425*7c478bd9Sstevel@tonic-gateenable_pil_intr(int pil_save)
1426*7c478bd9Sstevel@tonic-gate{}
1427*7c478bd9Sstevel@tonic-gate
1428*7c478bd9Sstevel@tonic-gate#else	/* lint */
1429*7c478bd9Sstevel@tonic-gate
1430*7c478bd9Sstevel@tonic-gate	ENTRY_NP(enable_pil_intr)
1431*7c478bd9Sstevel@tonic-gate	retl
1432*7c478bd9Sstevel@tonic-gate	wrpr	%o0, %pil
1433*7c478bd9Sstevel@tonic-gate	SET_SIZE(enable_pil_intr)
1434*7c478bd9Sstevel@tonic-gate
1435*7c478bd9Sstevel@tonic-gate#endif	/* lint */
1436*7c478bd9Sstevel@tonic-gate
1437*7c478bd9Sstevel@tonic-gate#if defined(lint)
1438*7c478bd9Sstevel@tonic-gate
1439*7c478bd9Sstevel@tonic-gate/* ARGSUSED */
1440*7c478bd9Sstevel@tonic-gateuint_t
1441*7c478bd9Sstevel@tonic-gatedisable_vec_intr(void)
1442*7c478bd9Sstevel@tonic-gate{ return (0); }
1443*7c478bd9Sstevel@tonic-gate
1444*7c478bd9Sstevel@tonic-gate#else	/* lint */
1445*7c478bd9Sstevel@tonic-gate
1446*7c478bd9Sstevel@tonic-gate	ENTRY_NP(disable_vec_intr)
1447*7c478bd9Sstevel@tonic-gate	rdpr	%pstate, %o0
1448*7c478bd9Sstevel@tonic-gate	andn	%o0, PSTATE_IE, %g1
1449*7c478bd9Sstevel@tonic-gate	retl
1450*7c478bd9Sstevel@tonic-gate	wrpr	%g0, %g1, %pstate		! disable interrupt
1451*7c478bd9Sstevel@tonic-gate	SET_SIZE(disable_vec_intr)
1452*7c478bd9Sstevel@tonic-gate
1453*7c478bd9Sstevel@tonic-gate#endif	/* lint */
1454*7c478bd9Sstevel@tonic-gate
1455*7c478bd9Sstevel@tonic-gate#if defined(lint)
1456*7c478bd9Sstevel@tonic-gate
1457*7c478bd9Sstevel@tonic-gate/* ARGSUSED */
1458*7c478bd9Sstevel@tonic-gatevoid
1459*7c478bd9Sstevel@tonic-gateenable_vec_intr(uint_t pstate_save)
1460*7c478bd9Sstevel@tonic-gate{}
1461*7c478bd9Sstevel@tonic-gate
1462*7c478bd9Sstevel@tonic-gate#else	/* lint */
1463*7c478bd9Sstevel@tonic-gate
1464*7c478bd9Sstevel@tonic-gate	ENTRY_NP(enable_vec_intr)
1465*7c478bd9Sstevel@tonic-gate	retl
1466*7c478bd9Sstevel@tonic-gate	wrpr	%g0, %o0, %pstate
1467*7c478bd9Sstevel@tonic-gate	SET_SIZE(enable_vec_intr)
1468*7c478bd9Sstevel@tonic-gate
1469*7c478bd9Sstevel@tonic-gate#endif	/* lint */
1470*7c478bd9Sstevel@tonic-gate
1471*7c478bd9Sstevel@tonic-gate#if defined(lint)
1472*7c478bd9Sstevel@tonic-gate
1473*7c478bd9Sstevel@tonic-gatevoid
1474*7c478bd9Sstevel@tonic-gatecbe_level14(void)
1475*7c478bd9Sstevel@tonic-gate{}
1476*7c478bd9Sstevel@tonic-gate
1477*7c478bd9Sstevel@tonic-gate#else   /* lint */
1478*7c478bd9Sstevel@tonic-gate
1479*7c478bd9Sstevel@tonic-gate	ENTRY_NP(cbe_level14)
1480*7c478bd9Sstevel@tonic-gate	save    %sp, -SA(MINFRAME), %sp ! get a new window
1481*7c478bd9Sstevel@tonic-gate	!
1482*7c478bd9Sstevel@tonic-gate	! Make sure that this is from TICK_COMPARE; if not just return
1483*7c478bd9Sstevel@tonic-gate	!
1484*7c478bd9Sstevel@tonic-gate	rd	SOFTINT, %l1
1485*7c478bd9Sstevel@tonic-gate	set	(TICK_INT_MASK | STICK_INT_MASK), %o2
1486*7c478bd9Sstevel@tonic-gate	andcc	%l1, %o2, %g0
1487*7c478bd9Sstevel@tonic-gate	bz,pn	%icc, 2f
1488*7c478bd9Sstevel@tonic-gate	nop
1489*7c478bd9Sstevel@tonic-gate
1490*7c478bd9Sstevel@tonic-gate	CPU_ADDR(%o1, %o2)
1491*7c478bd9Sstevel@tonic-gate	call	cyclic_fire
1492*7c478bd9Sstevel@tonic-gate	mov	%o1, %o0
1493*7c478bd9Sstevel@tonic-gate2:
1494*7c478bd9Sstevel@tonic-gate	ret
1495*7c478bd9Sstevel@tonic-gate	restore	%g0, 1, %o0
1496*7c478bd9Sstevel@tonic-gate	SET_SIZE(cbe_level14)
1497*7c478bd9Sstevel@tonic-gate
1498*7c478bd9Sstevel@tonic-gate#endif  /* lint */
1499*7c478bd9Sstevel@tonic-gate
1500*7c478bd9Sstevel@tonic-gate
1501*7c478bd9Sstevel@tonic-gate#if defined(lint)
1502*7c478bd9Sstevel@tonic-gate
1503*7c478bd9Sstevel@tonic-gate/* ARGSUSED */
1504*7c478bd9Sstevel@tonic-gatevoid
1505*7c478bd9Sstevel@tonic-gatesetsoftint(uint_t inum)
1506*7c478bd9Sstevel@tonic-gate{}
1507*7c478bd9Sstevel@tonic-gate
1508*7c478bd9Sstevel@tonic-gate#else	/* lint */
1509*7c478bd9Sstevel@tonic-gate
1510*7c478bd9Sstevel@tonic-gate	ENTRY_NP(setsoftint)
1511*7c478bd9Sstevel@tonic-gate	save	%sp, -SA(MINFRAME), %sp	! get a new window
1512*7c478bd9Sstevel@tonic-gate	rdpr	%pstate, %l5
1513*7c478bd9Sstevel@tonic-gate	andn	%l5, PSTATE_IE, %l1
1514*7c478bd9Sstevel@tonic-gate	wrpr	%l1, %pstate		! disable interrupt
1515*7c478bd9Sstevel@tonic-gate	!
1516*7c478bd9Sstevel@tonic-gate	! Fetch data from intr_vector[] table according to the inum.
1517*7c478bd9Sstevel@tonic-gate	!
1518*7c478bd9Sstevel@tonic-gate	! We have an interrupt number.
1519*7c478bd9Sstevel@tonic-gate	! Put the request on the cpu's softint list,
1520*7c478bd9Sstevel@tonic-gate	! and set %set_softint.
1521*7c478bd9Sstevel@tonic-gate	!
1522*7c478bd9Sstevel@tonic-gate	! Register usage
1523*7c478bd9Sstevel@tonic-gate	!	%i0 - inumber
1524*7c478bd9Sstevel@tonic-gate	!	%l2 - requested pil
1525*7c478bd9Sstevel@tonic-gate	!	%l3 - intr_req
1526*7c478bd9Sstevel@tonic-gate	!	%l4 - *cpu
1527*7c478bd9Sstevel@tonic-gate	!	%l1, %l6 - temps
1528*7c478bd9Sstevel@tonic-gate	!
1529*7c478bd9Sstevel@tonic-gate	! check if a softint is pending for this inum already
1530*7c478bd9Sstevel@tonic-gate	! if one is pending, don't bother queuing another
1531*7c478bd9Sstevel@tonic-gate	!
1532*7c478bd9Sstevel@tonic-gate	set	intr_vector, %l1
1533*7c478bd9Sstevel@tonic-gate	sll	%i0, INTR_VECTOR_SHIFT, %l6
1534*7c478bd9Sstevel@tonic-gate	add	%l1, %l6, %l1			! %l1 = &intr_vector[inum]
1535*7c478bd9Sstevel@tonic-gate	lduh	[%l1 + IV_PENDING], %l6
1536*7c478bd9Sstevel@tonic-gate	brnz,pn	%l6, 4f				! branch, if pending
1537*7c478bd9Sstevel@tonic-gate	or	%g0, 1, %l2
1538*7c478bd9Sstevel@tonic-gate	sth	%l2, [%l1 + IV_PENDING]		! intr_vector[inum].pend = 1
1539*7c478bd9Sstevel@tonic-gate	!
1540*7c478bd9Sstevel@tonic-gate	! allocate an intr_req from the free list
1541*7c478bd9Sstevel@tonic-gate	!
1542*7c478bd9Sstevel@tonic-gate	CPU_ADDR(%l4, %l2)
1543*7c478bd9Sstevel@tonic-gate	ldn	[%l4 + INTR_HEAD], %l3
1544*7c478bd9Sstevel@tonic-gate	lduh	[%l1 + IV_PIL], %l2
1545*7c478bd9Sstevel@tonic-gate	!
1546*7c478bd9Sstevel@tonic-gate	! fixup free list
1547*7c478bd9Sstevel@tonic-gate	!
1548*7c478bd9Sstevel@tonic-gate	ldn	[%l3 + INTR_NEXT], %l6
1549*7c478bd9Sstevel@tonic-gate	stn	%l6, [%l4 + INTR_HEAD]
1550*7c478bd9Sstevel@tonic-gate	!
1551*7c478bd9Sstevel@tonic-gate	! fill up intr_req
1552*7c478bd9Sstevel@tonic-gate	!
1553*7c478bd9Sstevel@tonic-gate	st	%i0, [%l3 + INTR_NUMBER]
1554*7c478bd9Sstevel@tonic-gate	stn	%g0, [%l3 + INTR_NEXT]
1555*7c478bd9Sstevel@tonic-gate	!
1556*7c478bd9Sstevel@tonic-gate	! move intr_req to appropriate list
1557*7c478bd9Sstevel@tonic-gate	!
1558*7c478bd9Sstevel@tonic-gate	sll	%l2, CPTRSHIFT, %l0
1559*7c478bd9Sstevel@tonic-gate	add	%l4, INTR_TAIL, %l6
1560*7c478bd9Sstevel@tonic-gate	ldn	[%l6 + %l0], %l1	! current tail
1561*7c478bd9Sstevel@tonic-gate	brz,pt	%l1, 2f			! branch if list empty
1562*7c478bd9Sstevel@tonic-gate	stn	%l3, [%l6 + %l0]	! make intr_req new tail
1563*7c478bd9Sstevel@tonic-gate	!
1564*7c478bd9Sstevel@tonic-gate	! there's pending intr_req already
1565*7c478bd9Sstevel@tonic-gate	!
1566*7c478bd9Sstevel@tonic-gate	ba,pt	%xcc, 3f
1567*7c478bd9Sstevel@tonic-gate	stn	%l3, [%l1 + INTR_NEXT]	! update old tail
1568*7c478bd9Sstevel@tonic-gate2:
1569*7c478bd9Sstevel@tonic-gate	!
1570*7c478bd9Sstevel@tonic-gate	! no pending intr_req; make intr_req new head
1571*7c478bd9Sstevel@tonic-gate	!
1572*7c478bd9Sstevel@tonic-gate	add	%l4, INTR_HEAD, %l6
1573*7c478bd9Sstevel@tonic-gate	stn	%l3, [%l6 + %l0]
1574*7c478bd9Sstevel@tonic-gate3:
1575*7c478bd9Sstevel@tonic-gate	!
1576*7c478bd9Sstevel@tonic-gate	! Write %set_softint with (1<<pil) to cause a "pil" level trap
1577*7c478bd9Sstevel@tonic-gate	!
1578*7c478bd9Sstevel@tonic-gate	mov	1, %l1
1579*7c478bd9Sstevel@tonic-gate	sll	%l1, %l2, %l1
1580*7c478bd9Sstevel@tonic-gate	wr	%l1, SET_SOFTINT
1581*7c478bd9Sstevel@tonic-gate4:
1582*7c478bd9Sstevel@tonic-gate	wrpr	%g0, %l5, %pstate
1583*7c478bd9Sstevel@tonic-gate	ret
1584*7c478bd9Sstevel@tonic-gate	restore
1585*7c478bd9Sstevel@tonic-gate	SET_SIZE(setsoftint)
1586*7c478bd9Sstevel@tonic-gate
1587*7c478bd9Sstevel@tonic-gate#endif	/* lint */
1588*7c478bd9Sstevel@tonic-gate
1589*7c478bd9Sstevel@tonic-gate#if defined(lint)
1590*7c478bd9Sstevel@tonic-gate
1591*7c478bd9Sstevel@tonic-gate/*ARGSUSED*/
1592*7c478bd9Sstevel@tonic-gatevoid
1593*7c478bd9Sstevel@tonic-gatesetsoftint_tl1(uint64_t inum, uint64_t dummy)
1594*7c478bd9Sstevel@tonic-gate{}
1595*7c478bd9Sstevel@tonic-gate
1596*7c478bd9Sstevel@tonic-gate#else	/* lint */
1597*7c478bd9Sstevel@tonic-gate
1598*7c478bd9Sstevel@tonic-gate	!
1599*7c478bd9Sstevel@tonic-gate	! Register usage
1600*7c478bd9Sstevel@tonic-gate	!
1601*7c478bd9Sstevel@tonic-gate	! Arguments:
1602*7c478bd9Sstevel@tonic-gate	! %g1 - inumber
1603*7c478bd9Sstevel@tonic-gate	!
1604*7c478bd9Sstevel@tonic-gate	! Internal:
1605*7c478bd9Sstevel@tonic-gate	! %g2 - requested pil
1606*7c478bd9Sstevel@tonic-gate	! %g3 - intr_req
1607*7c478bd9Sstevel@tonic-gate	! %g4 - cpu pointer
1608*7c478bd9Sstevel@tonic-gate	! %g5,%g6,%g7 - temps
1609*7c478bd9Sstevel@tonic-gate	!
1610*7c478bd9Sstevel@tonic-gate	ENTRY_NP(setsoftint_tl1)
1611*7c478bd9Sstevel@tonic-gate	!
1612*7c478bd9Sstevel@tonic-gate	! Verify the inumber received (should be inum < MAXIVNUM).
1613*7c478bd9Sstevel@tonic-gate	!
1614*7c478bd9Sstevel@tonic-gate	set	MAXIVNUM, %g2
1615*7c478bd9Sstevel@tonic-gate	cmp	%g1, %g2
1616*7c478bd9Sstevel@tonic-gate	bgeu,pn	%xcc, .no_ivintr
1617*7c478bd9Sstevel@tonic-gate	clr	%g2			! expected in .no_ivintr
1618*7c478bd9Sstevel@tonic-gate	!
1619*7c478bd9Sstevel@tonic-gate	! Fetch data from intr_vector[] table according to the inum.
1620*7c478bd9Sstevel@tonic-gate	!
1621*7c478bd9Sstevel@tonic-gate	! We have an interrupt number. Put the request on the cpu's softint
1622*7c478bd9Sstevel@tonic-gate	! list, and set %set_softint.
1623*7c478bd9Sstevel@tonic-gate	!
1624*7c478bd9Sstevel@tonic-gate	set	intr_vector, %g5
1625*7c478bd9Sstevel@tonic-gate	sll	%g1, INTR_VECTOR_SHIFT, %g6
1626*7c478bd9Sstevel@tonic-gate	add	%g5, %g6, %g5			! %g5 = &intr_vector[inum]
1627*7c478bd9Sstevel@tonic-gate
1628*7c478bd9Sstevel@tonic-gate	!
1629*7c478bd9Sstevel@tonic-gate	! allocate an intr_req from the free list
1630*7c478bd9Sstevel@tonic-gate	!
1631*7c478bd9Sstevel@tonic-gate	CPU_ADDR(%g4, %g2)
1632*7c478bd9Sstevel@tonic-gate	ldn	[%g4 + INTR_HEAD], %g3
1633*7c478bd9Sstevel@tonic-gate
1634*7c478bd9Sstevel@tonic-gate	! load the pil so it can be used by .no_intr_pool/.no_ivintr
1635*7c478bd9Sstevel@tonic-gate	lduh	[%g5 + IV_PIL], %g2
1636*7c478bd9Sstevel@tonic-gate
1637*7c478bd9Sstevel@tonic-gate	! Verify that the free list is not exhausted.
1638*7c478bd9Sstevel@tonic-gate	brz,pn	%g3, .no_intr_pool
1639*7c478bd9Sstevel@tonic-gate	nop
1640*7c478bd9Sstevel@tonic-gate
1641*7c478bd9Sstevel@tonic-gate	! Verify the intr_vector[] entry according to the inumber.
1642*7c478bd9Sstevel@tonic-gate	! The iv_pil field should not be zero.  This used to be
1643*7c478bd9Sstevel@tonic-gate	! guarded by DEBUG but broken drivers can cause spurious
1644*7c478bd9Sstevel@tonic-gate	! tick interrupts when the softint register is programmed
1645*7c478bd9Sstevel@tonic-gate	! with 1 << 0 at the end of this routine.  Now we always
1646*7c478bd9Sstevel@tonic-gate	! check for an invalid pil.
1647*7c478bd9Sstevel@tonic-gate	brz,pn	%g2, .no_ivintr
1648*7c478bd9Sstevel@tonic-gate	nop
1649*7c478bd9Sstevel@tonic-gate
1650*7c478bd9Sstevel@tonic-gate	!
1651*7c478bd9Sstevel@tonic-gate	! fixup free list
1652*7c478bd9Sstevel@tonic-gate	!
1653*7c478bd9Sstevel@tonic-gate	ldn	[%g3 + INTR_NEXT], %g6
1654*7c478bd9Sstevel@tonic-gate	stn	%g6, [%g4 + INTR_HEAD]
1655*7c478bd9Sstevel@tonic-gate
1656*7c478bd9Sstevel@tonic-gate	!
1657*7c478bd9Sstevel@tonic-gate	! fill in intr_req
1658*7c478bd9Sstevel@tonic-gate	!
1659*7c478bd9Sstevel@tonic-gate	st	%g1, [%g3 + INTR_NUMBER]
1660*7c478bd9Sstevel@tonic-gate	stn	%g0, [%g3 + INTR_NEXT]
1661*7c478bd9Sstevel@tonic-gate	!
1662*7c478bd9Sstevel@tonic-gate	! move intr_req to appropriate list
1663*7c478bd9Sstevel@tonic-gate	!
1664*7c478bd9Sstevel@tonic-gate	sll	%g2, CPTRSHIFT, %g7
1665*7c478bd9Sstevel@tonic-gate	add	%g4, INTR_TAIL, %g6
1666*7c478bd9Sstevel@tonic-gate	ldn	[%g6 + %g7], %g5	! current tail
1667*7c478bd9Sstevel@tonic-gate	brz,pt	%g5, 2f			! branch if list empty
1668*7c478bd9Sstevel@tonic-gate	stn	%g3, [%g6 + %g7]	! make intr_req new tail
1669*7c478bd9Sstevel@tonic-gate	!
1670*7c478bd9Sstevel@tonic-gate	! there's pending intr_req already
1671*7c478bd9Sstevel@tonic-gate	!
1672*7c478bd9Sstevel@tonic-gate	ba,pt	%xcc, 3f
1673*7c478bd9Sstevel@tonic-gate	stn	%g3, [%g5 + INTR_NEXT]	! update old tail
1674*7c478bd9Sstevel@tonic-gate2:
1675*7c478bd9Sstevel@tonic-gate	!
1676*7c478bd9Sstevel@tonic-gate	! no pending intr_req; make intr_req new head
1677*7c478bd9Sstevel@tonic-gate	!
1678*7c478bd9Sstevel@tonic-gate	add	%g4, INTR_HEAD, %g6
1679*7c478bd9Sstevel@tonic-gate	stn	%g3, [%g6 + %g7]
1680*7c478bd9Sstevel@tonic-gate3:
1681*7c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE
1682*7c478bd9Sstevel@tonic-gate	TRACE_PTR(%g1, %g6)
1683*7c478bd9Sstevel@tonic-gate	GET_TRACE_TICK(%g6)
1684*7c478bd9Sstevel@tonic-gate	stxa	%g6, [%g1 + TRAP_ENT_TICK]%asi
1685*7c478bd9Sstevel@tonic-gate	TRACE_SAVE_TL_GL_REGS(%g1, %g6)
1686*7c478bd9Sstevel@tonic-gate	rdpr	%tt, %g6
1687*7c478bd9Sstevel@tonic-gate	stha	%g6, [%g1 + TRAP_ENT_TT]%asi
1688*7c478bd9Sstevel@tonic-gate	rdpr	%tpc, %g6
1689*7c478bd9Sstevel@tonic-gate	stna	%g6, [%g1 + TRAP_ENT_TPC]%asi
1690*7c478bd9Sstevel@tonic-gate	rdpr	%tstate, %g6
1691*7c478bd9Sstevel@tonic-gate	stxa	%g6, [%g1 + TRAP_ENT_TSTATE]%asi
1692*7c478bd9Sstevel@tonic-gate	stna	%sp, [%g1 + TRAP_ENT_SP]%asi
1693*7c478bd9Sstevel@tonic-gate	ld	[%g3 + INTR_NUMBER], %g6
1694*7c478bd9Sstevel@tonic-gate	stna	%g6, [%g1 + TRAP_ENT_TR]%asi
1695*7c478bd9Sstevel@tonic-gate	add	%g4, INTR_HEAD, %g6
1696*7c478bd9Sstevel@tonic-gate	ldn	[%g6 + %g7], %g6		! intr_head[pil]
1697*7c478bd9Sstevel@tonic-gate	stna	%g6, [%g1 + TRAP_ENT_F1]%asi
1698*7c478bd9Sstevel@tonic-gate	add	%g4, INTR_TAIL, %g6
1699*7c478bd9Sstevel@tonic-gate	ldn	[%g6 + %g7], %g6		! intr_tail[pil]
1700*7c478bd9Sstevel@tonic-gate	stna	%g6, [%g1 + TRAP_ENT_F2]%asi
1701*7c478bd9Sstevel@tonic-gate	stna	%g2, [%g1 + TRAP_ENT_F3]%asi	! pil
1702*7c478bd9Sstevel@tonic-gate	stna	%g3, [%g1 + TRAP_ENT_F4]%asi	! intr_req
1703*7c478bd9Sstevel@tonic-gate	TRACE_NEXT(%g1, %g6, %g5)
1704*7c478bd9Sstevel@tonic-gate#endif /* TRAPTRACE */
1705*7c478bd9Sstevel@tonic-gate	!
1706*7c478bd9Sstevel@tonic-gate	! Write %set_softint with (1<<pil) to cause a "pil" level trap
1707*7c478bd9Sstevel@tonic-gate	!
1708*7c478bd9Sstevel@tonic-gate	mov	1, %g5
1709*7c478bd9Sstevel@tonic-gate	sll	%g5, %g2, %g5
1710*7c478bd9Sstevel@tonic-gate	wr	%g5, SET_SOFTINT
1711*7c478bd9Sstevel@tonic-gate4:
1712*7c478bd9Sstevel@tonic-gate	retry
1713*7c478bd9Sstevel@tonic-gate
1714*7c478bd9Sstevel@tonic-gate.no_intr_pool:
1715*7c478bd9Sstevel@tonic-gate	! no_intr_pool: rp, inum (%g1), pil (%g2)
1716*7c478bd9Sstevel@tonic-gate	mov	%g2, %g3
1717*7c478bd9Sstevel@tonic-gate	mov	%g1, %g2
1718*7c478bd9Sstevel@tonic-gate	set	no_intr_pool, %g1
1719*7c478bd9Sstevel@tonic-gate	ba,pt	%xcc, sys_trap
1720*7c478bd9Sstevel@tonic-gate	mov	PIL_15, %g4
1721*7c478bd9Sstevel@tonic-gate
1722*7c478bd9Sstevel@tonic-gate.no_ivintr:
1723*7c478bd9Sstevel@tonic-gate	! no_ivintr: arguments: rp, inum (%g1), pil (%g2 == 0)
1724*7c478bd9Sstevel@tonic-gate	mov	%g2, %g3
1725*7c478bd9Sstevel@tonic-gate	mov	%g1, %g2
1726*7c478bd9Sstevel@tonic-gate	set	no_ivintr, %g1
1727*7c478bd9Sstevel@tonic-gate	ba,pt	%xcc, sys_trap
1728*7c478bd9Sstevel@tonic-gate	mov	PIL_15, %g4
1729*7c478bd9Sstevel@tonic-gate	SET_SIZE(setsoftint_tl1)
1730*7c478bd9Sstevel@tonic-gate
1731*7c478bd9Sstevel@tonic-gate#endif	/* lint */
1732*7c478bd9Sstevel@tonic-gate
1733*7c478bd9Sstevel@tonic-gate#if defined(lint)
1734*7c478bd9Sstevel@tonic-gate
1735*7c478bd9Sstevel@tonic-gate/*ARGSUSED*/
1736*7c478bd9Sstevel@tonic-gatevoid
1737*7c478bd9Sstevel@tonic-gatewr_clr_softint(uint_t value)
1738*7c478bd9Sstevel@tonic-gate{}
1739*7c478bd9Sstevel@tonic-gate
1740*7c478bd9Sstevel@tonic-gate#else
1741*7c478bd9Sstevel@tonic-gate
1742*7c478bd9Sstevel@tonic-gate	ENTRY_NP(wr_clr_softint)
1743*7c478bd9Sstevel@tonic-gate	retl
1744*7c478bd9Sstevel@tonic-gate	wr	%o0, CLEAR_SOFTINT
1745*7c478bd9Sstevel@tonic-gate	SET_SIZE(wr_clr_softint)
1746*7c478bd9Sstevel@tonic-gate
1747*7c478bd9Sstevel@tonic-gate#endif /* lint */
1748*7c478bd9Sstevel@tonic-gate
1749*7c478bd9Sstevel@tonic-gate#if defined(lint)
1750*7c478bd9Sstevel@tonic-gate
1751*7c478bd9Sstevel@tonic-gate/*ARGSUSED*/
1752*7c478bd9Sstevel@tonic-gatevoid
1753*7c478bd9Sstevel@tonic-gateintr_enqueue_req(uint_t pil, uint32_t inum)
1754*7c478bd9Sstevel@tonic-gate{}
1755*7c478bd9Sstevel@tonic-gate
1756*7c478bd9Sstevel@tonic-gate#else   /* lint */
1757*7c478bd9Sstevel@tonic-gate
1758*7c478bd9Sstevel@tonic-gate/*
1759*7c478bd9Sstevel@tonic-gate * intr_enqueue_req
1760*7c478bd9Sstevel@tonic-gate *
1761*7c478bd9Sstevel@tonic-gate * %o0 - pil
1762*7c478bd9Sstevel@tonic-gate * %o1 - inum
1763*7c478bd9Sstevel@tonic-gate * %o5 - preserved
1764*7c478bd9Sstevel@tonic-gate * %g5 - preserved
1765*7c478bd9Sstevel@tonic-gate */
1766*7c478bd9Sstevel@tonic-gate	ENTRY_NP(intr_enqueue_req)
1767*7c478bd9Sstevel@tonic-gate	! get intr_req free list
1768*7c478bd9Sstevel@tonic-gate	CPU_ADDR(%g4, %g1)
1769*7c478bd9Sstevel@tonic-gate	ldn	[%g4 + INTR_HEAD], %g3
1770*7c478bd9Sstevel@tonic-gate
1771*7c478bd9Sstevel@tonic-gate	! take intr_req from free list
1772*7c478bd9Sstevel@tonic-gate	ldn	[%g3 + INTR_NEXT], %g6
1773*7c478bd9Sstevel@tonic-gate	stn	%g6, [%g4 + INTR_HEAD]
1774*7c478bd9Sstevel@tonic-gate
1775*7c478bd9Sstevel@tonic-gate	! fill up intr_req
1776*7c478bd9Sstevel@tonic-gate	st	%o1, [%g3 + INTR_NUMBER]
1777*7c478bd9Sstevel@tonic-gate	stn	%g0, [%g3 + INTR_NEXT]
1778*7c478bd9Sstevel@tonic-gate
1779*7c478bd9Sstevel@tonic-gate	! add intr_req to proper pil list
1780*7c478bd9Sstevel@tonic-gate	sll	%o0, CPTRSHIFT, %o0
1781*7c478bd9Sstevel@tonic-gate	add	%g4, INTR_TAIL, %g6
1782*7c478bd9Sstevel@tonic-gate	ldn	[%o0 + %g6], %g1	! current tail
1783*7c478bd9Sstevel@tonic-gate	brz,pt	%g1, 2f			! branch if list is empty
1784*7c478bd9Sstevel@tonic-gate	stn	%g3, [%g6 + %o0]	! make intr_req the new tail
1785*7c478bd9Sstevel@tonic-gate
1786*7c478bd9Sstevel@tonic-gate	! an intr_req was already queued so update old tail
1787*7c478bd9Sstevel@tonic-gate	ba,pt	%xcc, 3f
1788*7c478bd9Sstevel@tonic-gate	stn	%g3, [%g1 + INTR_NEXT]
1789*7c478bd9Sstevel@tonic-gate2:
1790*7c478bd9Sstevel@tonic-gate	! no intr_req's queued so make intr_req the new head
1791*7c478bd9Sstevel@tonic-gate	add	%g4, INTR_HEAD, %g6
1792*7c478bd9Sstevel@tonic-gate	stn	%g3, [%g6 + %o0]
1793*7c478bd9Sstevel@tonic-gate3:
1794*7c478bd9Sstevel@tonic-gate	retl
1795*7c478bd9Sstevel@tonic-gate	nop
1796*7c478bd9Sstevel@tonic-gate	SET_SIZE(intr_enqueue_req)
1797*7c478bd9Sstevel@tonic-gate
1798*7c478bd9Sstevel@tonic-gate#endif  /* lint */
1799*7c478bd9Sstevel@tonic-gate
1800*7c478bd9Sstevel@tonic-gate/*
1801*7c478bd9Sstevel@tonic-gate * Set CPU's base SPL level, based on which interrupt levels are active.
1802*7c478bd9Sstevel@tonic-gate * 	Called at spl7 or above.
1803*7c478bd9Sstevel@tonic-gate */
1804*7c478bd9Sstevel@tonic-gate
1805*7c478bd9Sstevel@tonic-gate#if defined(lint)
1806*7c478bd9Sstevel@tonic-gate
1807*7c478bd9Sstevel@tonic-gatevoid
1808*7c478bd9Sstevel@tonic-gateset_base_spl(void)
1809*7c478bd9Sstevel@tonic-gate{}
1810*7c478bd9Sstevel@tonic-gate
1811*7c478bd9Sstevel@tonic-gate#else	/* lint */
1812*7c478bd9Sstevel@tonic-gate
1813*7c478bd9Sstevel@tonic-gate	ENTRY_NP(set_base_spl)
1814*7c478bd9Sstevel@tonic-gate	ldn	[THREAD_REG + T_CPU], %o2	! load CPU pointer
1815*7c478bd9Sstevel@tonic-gate	ld	[%o2 + CPU_INTR_ACTV], %o5	! load active interrupts mask
1816*7c478bd9Sstevel@tonic-gate
1817*7c478bd9Sstevel@tonic-gate/*
1818*7c478bd9Sstevel@tonic-gate * WARNING: non-standard callinq sequence; do not call from C
1819*7c478bd9Sstevel@tonic-gate *	%o2 = pointer to CPU
1820*7c478bd9Sstevel@tonic-gate *	%o5 = updated CPU_INTR_ACTV
1821*7c478bd9Sstevel@tonic-gate */
1822*7c478bd9Sstevel@tonic-gate_intr_set_spl:					! intr_thread_exit enters here
1823*7c478bd9Sstevel@tonic-gate	!
1824*7c478bd9Sstevel@tonic-gate	! Determine highest interrupt level active.  Several could be blocked
1825*7c478bd9Sstevel@tonic-gate	! at higher levels than this one, so must convert flags to a PIL
1826*7c478bd9Sstevel@tonic-gate	! Normally nothing will be blocked, so test this first.
1827*7c478bd9Sstevel@tonic-gate	!
1828*7c478bd9Sstevel@tonic-gate	brz,pt	%o5, 1f				! nothing active
1829*7c478bd9Sstevel@tonic-gate	sra	%o5, 11, %o3			! delay - set %o3 to bits 15-11
1830*7c478bd9Sstevel@tonic-gate	set	_intr_flag_table, %o1
1831*7c478bd9Sstevel@tonic-gate	tst	%o3				! see if any of the bits set
1832*7c478bd9Sstevel@tonic-gate	ldub	[%o1 + %o3], %o3		! load bit number
1833*7c478bd9Sstevel@tonic-gate	bnz,a,pn %xcc, 1f			! yes, add 10 and we're done
1834*7c478bd9Sstevel@tonic-gate	add	%o3, 11-1, %o3			! delay - add bit number - 1
1835*7c478bd9Sstevel@tonic-gate
1836*7c478bd9Sstevel@tonic-gate	sra	%o5, 6, %o3			! test bits 10-6
1837*7c478bd9Sstevel@tonic-gate	tst	%o3
1838*7c478bd9Sstevel@tonic-gate	ldub	[%o1 + %o3], %o3
1839*7c478bd9Sstevel@tonic-gate	bnz,a,pn %xcc, 1f
1840*7c478bd9Sstevel@tonic-gate	add	%o3, 6-1, %o3
1841*7c478bd9Sstevel@tonic-gate
1842*7c478bd9Sstevel@tonic-gate	sra	%o5, 1, %o3			! test bits 5-1
1843*7c478bd9Sstevel@tonic-gate	ldub	[%o1 + %o3], %o3
1844*7c478bd9Sstevel@tonic-gate
1845*7c478bd9Sstevel@tonic-gate	!
1846*7c478bd9Sstevel@tonic-gate	! highest interrupt level number active is in %l6
1847*7c478bd9Sstevel@tonic-gate	!
1848*7c478bd9Sstevel@tonic-gate1:
1849*7c478bd9Sstevel@tonic-gate	retl
1850*7c478bd9Sstevel@tonic-gate	st	%o3, [%o2 + CPU_BASE_SPL]	! delay - store base priority
1851*7c478bd9Sstevel@tonic-gate	SET_SIZE(set_base_spl)
1852*7c478bd9Sstevel@tonic-gate
1853*7c478bd9Sstevel@tonic-gate/*
1854*7c478bd9Sstevel@tonic-gate * Table that finds the most significant bit set in a five bit field.
1855*7c478bd9Sstevel@tonic-gate * Each entry is the high-order bit number + 1 of it's index in the table.
1856*7c478bd9Sstevel@tonic-gate * This read-only data is in the text segment.
1857*7c478bd9Sstevel@tonic-gate */
1858*7c478bd9Sstevel@tonic-gate_intr_flag_table:
1859*7c478bd9Sstevel@tonic-gate	.byte	0, 1, 2, 2,	3, 3, 3, 3,	4, 4, 4, 4,	4, 4, 4, 4
1860*7c478bd9Sstevel@tonic-gate	.byte	5, 5, 5, 5,	5, 5, 5, 5,	5, 5, 5, 5,	5, 5, 5, 5
1861*7c478bd9Sstevel@tonic-gate	.align	4
1862*7c478bd9Sstevel@tonic-gate
1863*7c478bd9Sstevel@tonic-gate#endif	/* lint */
1864*7c478bd9Sstevel@tonic-gate
1865*7c478bd9Sstevel@tonic-gate/*
1866*7c478bd9Sstevel@tonic-gate * int
1867*7c478bd9Sstevel@tonic-gate * intr_passivate(from, to)
1868*7c478bd9Sstevel@tonic-gate *	kthread_id_t	from;		interrupt thread
1869*7c478bd9Sstevel@tonic-gate *	kthread_id_t	to;		interrupted thread
1870*7c478bd9Sstevel@tonic-gate */
1871*7c478bd9Sstevel@tonic-gate
1872*7c478bd9Sstevel@tonic-gate#if defined(lint)
1873*7c478bd9Sstevel@tonic-gate
1874*7c478bd9Sstevel@tonic-gate/* ARGSUSED */
1875*7c478bd9Sstevel@tonic-gateint
1876*7c478bd9Sstevel@tonic-gateintr_passivate(kthread_id_t from, kthread_id_t to)
1877*7c478bd9Sstevel@tonic-gate{ return (0); }
1878*7c478bd9Sstevel@tonic-gate
1879*7c478bd9Sstevel@tonic-gate#else	/* lint */
1880*7c478bd9Sstevel@tonic-gate
1881*7c478bd9Sstevel@tonic-gate	ENTRY_NP(intr_passivate)
1882*7c478bd9Sstevel@tonic-gate	save	%sp, -SA(MINFRAME), %sp	! get a new window
1883*7c478bd9Sstevel@tonic-gate
1884*7c478bd9Sstevel@tonic-gate	flushw				! force register windows to stack
1885*7c478bd9Sstevel@tonic-gate	!
1886*7c478bd9Sstevel@tonic-gate	! restore registers from the base of the stack of the interrupt thread.
1887*7c478bd9Sstevel@tonic-gate	!
1888*7c478bd9Sstevel@tonic-gate	ldn	[%i0 + T_STACK], %i2	! get stack save area pointer
1889*7c478bd9Sstevel@tonic-gate	ldn	[%i2 + (0*GREGSIZE)], %l0	! load locals
1890*7c478bd9Sstevel@tonic-gate	ldn	[%i2 + (1*GREGSIZE)], %l1
1891*7c478bd9Sstevel@tonic-gate	ldn	[%i2 + (2*GREGSIZE)], %l2
1892*7c478bd9Sstevel@tonic-gate	ldn	[%i2 + (3*GREGSIZE)], %l3
1893*7c478bd9Sstevel@tonic-gate	ldn	[%i2 + (4*GREGSIZE)], %l4
1894*7c478bd9Sstevel@tonic-gate	ldn	[%i2 + (5*GREGSIZE)], %l5
1895*7c478bd9Sstevel@tonic-gate	ldn	[%i2 + (6*GREGSIZE)], %l6
1896*7c478bd9Sstevel@tonic-gate	ldn	[%i2 + (7*GREGSIZE)], %l7
1897*7c478bd9Sstevel@tonic-gate	ldn	[%i2 + (8*GREGSIZE)], %o0	! put ins from stack in outs
1898*7c478bd9Sstevel@tonic-gate	ldn	[%i2 + (9*GREGSIZE)], %o1
1899*7c478bd9Sstevel@tonic-gate	ldn	[%i2 + (10*GREGSIZE)], %o2
1900*7c478bd9Sstevel@tonic-gate	ldn	[%i2 + (11*GREGSIZE)], %o3
1901*7c478bd9Sstevel@tonic-gate	ldn	[%i2 + (12*GREGSIZE)], %o4
1902*7c478bd9Sstevel@tonic-gate	ldn	[%i2 + (13*GREGSIZE)], %o5
1903*7c478bd9Sstevel@tonic-gate	ldn	[%i2 + (14*GREGSIZE)], %i4
1904*7c478bd9Sstevel@tonic-gate					! copy stack/pointer without using %sp
1905*7c478bd9Sstevel@tonic-gate	ldn	[%i2 + (15*GREGSIZE)], %i5
1906*7c478bd9Sstevel@tonic-gate	!
1907*7c478bd9Sstevel@tonic-gate	! put registers into the save area at the top of the interrupted
1908*7c478bd9Sstevel@tonic-gate	! thread's stack, pointed to by %l7 in the save area just loaded.
1909*7c478bd9Sstevel@tonic-gate	!
1910*7c478bd9Sstevel@tonic-gate	ldn	[%i1 + T_SP], %i3	! get stack save area pointer
1911*7c478bd9Sstevel@tonic-gate	stn	%l0, [%i3 + STACK_BIAS + (0*GREGSIZE)]	! save locals
1912*7c478bd9Sstevel@tonic-gate	stn	%l1, [%i3 + STACK_BIAS + (1*GREGSIZE)]
1913*7c478bd9Sstevel@tonic-gate	stn	%l2, [%i3 + STACK_BIAS + (2*GREGSIZE)]
1914*7c478bd9Sstevel@tonic-gate	stn	%l3, [%i3 + STACK_BIAS + (3*GREGSIZE)]
1915*7c478bd9Sstevel@tonic-gate	stn	%l4, [%i3 + STACK_BIAS + (4*GREGSIZE)]
1916*7c478bd9Sstevel@tonic-gate	stn	%l5, [%i3 + STACK_BIAS + (5*GREGSIZE)]
1917*7c478bd9Sstevel@tonic-gate	stn	%l6, [%i3 + STACK_BIAS + (6*GREGSIZE)]
1918*7c478bd9Sstevel@tonic-gate	stn	%l7, [%i3 + STACK_BIAS + (7*GREGSIZE)]
1919*7c478bd9Sstevel@tonic-gate	stn	%o0, [%i3 + STACK_BIAS + (8*GREGSIZE)]	! save ins using outs
1920*7c478bd9Sstevel@tonic-gate	stn	%o1, [%i3 + STACK_BIAS + (9*GREGSIZE)]
1921*7c478bd9Sstevel@tonic-gate	stn	%o2, [%i3 + STACK_BIAS + (10*GREGSIZE)]
1922*7c478bd9Sstevel@tonic-gate	stn	%o3, [%i3 + STACK_BIAS + (11*GREGSIZE)]
1923*7c478bd9Sstevel@tonic-gate	stn	%o4, [%i3 + STACK_BIAS + (12*GREGSIZE)]
1924*7c478bd9Sstevel@tonic-gate	stn	%o5, [%i3 + STACK_BIAS + (13*GREGSIZE)]
1925*7c478bd9Sstevel@tonic-gate	stn	%i4, [%i3 + STACK_BIAS + (14*GREGSIZE)]
1926*7c478bd9Sstevel@tonic-gate						! fp, %i7 copied using %i4
1927*7c478bd9Sstevel@tonic-gate	stn	%i5, [%i3 + STACK_BIAS + (15*GREGSIZE)]
1928*7c478bd9Sstevel@tonic-gate	stn	%g0, [%i2 + ((8+6)*GREGSIZE)]
1929*7c478bd9Sstevel@tonic-gate						! clear fp in save area
1930*7c478bd9Sstevel@tonic-gate
1931*7c478bd9Sstevel@tonic-gate	! load saved pil for return
1932*7c478bd9Sstevel@tonic-gate	ldub	[%i0 + T_PIL], %i0
1933*7c478bd9Sstevel@tonic-gate	ret
1934*7c478bd9Sstevel@tonic-gate	restore
1935*7c478bd9Sstevel@tonic-gate	SET_SIZE(intr_passivate)
1936*7c478bd9Sstevel@tonic-gate
1937*7c478bd9Sstevel@tonic-gate#endif	/* lint */
1938*7c478bd9Sstevel@tonic-gate
1939*7c478bd9Sstevel@tonic-gate#if defined(lint)
1940*7c478bd9Sstevel@tonic-gate
1941*7c478bd9Sstevel@tonic-gate/*
1942*7c478bd9Sstevel@tonic-gate * intr_get_time() is a resource for interrupt handlers to determine how
1943*7c478bd9Sstevel@tonic-gate * much time has been spent handling the current interrupt. Such a function
1944*7c478bd9Sstevel@tonic-gate * is needed because higher level interrupts can arrive during the
1945*7c478bd9Sstevel@tonic-gate * processing of an interrupt, thus making direct comparisons of %tick by
1946*7c478bd9Sstevel@tonic-gate * the handler inaccurate. intr_get_time() only returns time spent in the
1947*7c478bd9Sstevel@tonic-gate * current interrupt handler.
1948*7c478bd9Sstevel@tonic-gate *
1949*7c478bd9Sstevel@tonic-gate * The caller must be calling from an interrupt handler running at a pil
1950*7c478bd9Sstevel@tonic-gate * below or at lock level. Timings are not provided for high-level
1951*7c478bd9Sstevel@tonic-gate * interrupts.
1952*7c478bd9Sstevel@tonic-gate *
1953*7c478bd9Sstevel@tonic-gate * The first time intr_get_time() is called while handling an interrupt,
1954*7c478bd9Sstevel@tonic-gate * it returns the time since the interrupt handler was invoked. Subsequent
1955*7c478bd9Sstevel@tonic-gate * calls will return the time since the prior call to intr_get_time(). Time
1956*7c478bd9Sstevel@tonic-gate * is returned as ticks, adjusted for any clock divisor due to power
1957*7c478bd9Sstevel@tonic-gate * management. Use tick2ns() to convert ticks to nsec. Warning: ticks may
1958*7c478bd9Sstevel@tonic-gate * not be the same across CPUs.
1959*7c478bd9Sstevel@tonic-gate *
1960*7c478bd9Sstevel@tonic-gate * Theory Of Intrstat[][]:
1961*7c478bd9Sstevel@tonic-gate *
1962*7c478bd9Sstevel@tonic-gate * uint64_t intrstat[pil][0..1] is an array indexed by pil level, with two
1963*7c478bd9Sstevel@tonic-gate * uint64_ts per pil.
1964*7c478bd9Sstevel@tonic-gate *
1965*7c478bd9Sstevel@tonic-gate * intrstat[pil][0] is a cumulative count of the number of ticks spent
1966*7c478bd9Sstevel@tonic-gate * handling all interrupts at the specified pil on this CPU. It is
1967*7c478bd9Sstevel@tonic-gate * exported via kstats to the user.
1968*7c478bd9Sstevel@tonic-gate *
1969*7c478bd9Sstevel@tonic-gate * intrstat[pil][1] is always a count of ticks less than or equal to the
1970*7c478bd9Sstevel@tonic-gate * value in [0]. The difference between [1] and [0] is the value returned
1971*7c478bd9Sstevel@tonic-gate * by a call to intr_get_time(). At the start of interrupt processing,
1972*7c478bd9Sstevel@tonic-gate * [0] and [1] will be equal (or nearly so). As the interrupt consumes
1973*7c478bd9Sstevel@tonic-gate * time, [0] will increase, but [1] will remain the same. A call to
1974*7c478bd9Sstevel@tonic-gate * intr_get_time() will return the difference, then update [1] to be the
1975*7c478bd9Sstevel@tonic-gate * same as [0]. Future calls will return the time since the last call.
1976*7c478bd9Sstevel@tonic-gate * Finally, when the interrupt completes, [1] is updated to the same as [0].
1977*7c478bd9Sstevel@tonic-gate *
1978*7c478bd9Sstevel@tonic-gate * Implementation:
1979*7c478bd9Sstevel@tonic-gate *
1980*7c478bd9Sstevel@tonic-gate * intr_get_time() works much like a higher level interrupt arriving. It
1981*7c478bd9Sstevel@tonic-gate * "checkpoints" the timing information by incrementing intrstat[pil][0]
1982*7c478bd9Sstevel@tonic-gate * to include elapsed running time, and by setting t_intr_start to %tick.
1983*7c478bd9Sstevel@tonic-gate * It then sets the return value to intrstat[pil][0] - intrstat[pil][1],
1984*7c478bd9Sstevel@tonic-gate * and updates intrstat[pil][1] to be the same as the new value of
1985*7c478bd9Sstevel@tonic-gate * intrstat[pil][0].
1986*7c478bd9Sstevel@tonic-gate *
1987*7c478bd9Sstevel@tonic-gate * In the normal handling of interrupts, after an interrupt handler returns
1988*7c478bd9Sstevel@tonic-gate * and the code in intr_thread() updates intrstat[pil][0], it then sets
1989*7c478bd9Sstevel@tonic-gate * intrstat[pil][1] to the new value of intrstat[pil][0]. When [0] == [1],
1990*7c478bd9Sstevel@tonic-gate * the timings are reset, i.e. intr_get_time() will return [0] - [1] which
1991*7c478bd9Sstevel@tonic-gate * is 0.
1992*7c478bd9Sstevel@tonic-gate *
1993*7c478bd9Sstevel@tonic-gate * Whenever interrupts arrive on a CPU which is handling a lower pil
1994*7c478bd9Sstevel@tonic-gate * interrupt, they update the lower pil's [0] to show time spent in the
1995*7c478bd9Sstevel@tonic-gate * handler that they've interrupted. This results in a growing discrepancy
1996*7c478bd9Sstevel@tonic-gate * between [0] and [1], which is returned the next time intr_get_time() is
1997*7c478bd9Sstevel@tonic-gate * called. Time spent in the higher-pil interrupt will not be returned in
1998*7c478bd9Sstevel@tonic-gate * the next intr_get_time() call from the original interrupt, because
1999*7c478bd9Sstevel@tonic-gate * the higher-pil interrupt's time is accumulated in intrstat[higherpil][].
2000*7c478bd9Sstevel@tonic-gate */
2001*7c478bd9Sstevel@tonic-gate
2002*7c478bd9Sstevel@tonic-gate/*ARGSUSED*/
2003*7c478bd9Sstevel@tonic-gateuint64_t
2004*7c478bd9Sstevel@tonic-gateintr_get_time(void)
2005*7c478bd9Sstevel@tonic-gate{ return 0; }
2006*7c478bd9Sstevel@tonic-gate#else	/* lint */
2007*7c478bd9Sstevel@tonic-gate
2008*7c478bd9Sstevel@tonic-gate	ENTRY_NP(intr_get_time)
2009*7c478bd9Sstevel@tonic-gate#ifdef DEBUG
2010*7c478bd9Sstevel@tonic-gate	!
2011*7c478bd9Sstevel@tonic-gate	! Lots of asserts, but just check panic_quiesce first.
2012*7c478bd9Sstevel@tonic-gate	! Don't bother with lots of tests if we're just ignoring them.
2013*7c478bd9Sstevel@tonic-gate	!
2014*7c478bd9Sstevel@tonic-gate	sethi	%hi(panic_quiesce), %o0
2015*7c478bd9Sstevel@tonic-gate	ld	[%o0 + %lo(panic_quiesce)], %o0
2016*7c478bd9Sstevel@tonic-gate	brnz,pn	%o0, 2f
2017*7c478bd9Sstevel@tonic-gate	nop
2018*7c478bd9Sstevel@tonic-gate	!
2019*7c478bd9Sstevel@tonic-gate	! ASSERT(%pil <= LOCK_LEVEL)
2020*7c478bd9Sstevel@tonic-gate	!
2021*7c478bd9Sstevel@tonic-gate	rdpr	%pil, %o1
2022*7c478bd9Sstevel@tonic-gate	cmp	%o1, LOCK_LEVEL
2023*7c478bd9Sstevel@tonic-gate	ble,pt	%xcc, 0f
2024*7c478bd9Sstevel@tonic-gate	sethi	%hi(intr_get_time_high_pil), %o0	! delay
2025*7c478bd9Sstevel@tonic-gate	call	panic
2026*7c478bd9Sstevel@tonic-gate	or	%o0, %lo(intr_get_time_high_pil), %o0
2027*7c478bd9Sstevel@tonic-gate0:
2028*7c478bd9Sstevel@tonic-gate	!
2029*7c478bd9Sstevel@tonic-gate	! ASSERT((t_flags & T_INTR_THREAD) != 0 && t_pil > 0)
2030*7c478bd9Sstevel@tonic-gate	!
2031*7c478bd9Sstevel@tonic-gate	lduh	[THREAD_REG + T_FLAGS], %o2
2032*7c478bd9Sstevel@tonic-gate	andcc	%o2, T_INTR_THREAD, %g0
2033*7c478bd9Sstevel@tonic-gate	bz,pn	%xcc, 1f
2034*7c478bd9Sstevel@tonic-gate	ldub	[THREAD_REG + T_PIL], %o1		! delay
2035*7c478bd9Sstevel@tonic-gate	brnz,pt	%o1, 0f
2036*7c478bd9Sstevel@tonic-gate1:
2037*7c478bd9Sstevel@tonic-gate	sethi	%hi(intr_get_time_not_intr), %o0
2038*7c478bd9Sstevel@tonic-gate	call	panic
2039*7c478bd9Sstevel@tonic-gate	or	%o0, %lo(intr_get_time_not_intr), %o0
2040*7c478bd9Sstevel@tonic-gate0:
2041*7c478bd9Sstevel@tonic-gate	!
2042*7c478bd9Sstevel@tonic-gate	! ASSERT(t_intr_start != 0)
2043*7c478bd9Sstevel@tonic-gate	!
2044*7c478bd9Sstevel@tonic-gate	ldx	[THREAD_REG + T_INTR_START], %o1
2045*7c478bd9Sstevel@tonic-gate	brnz,pt	%o1, 2f
2046*7c478bd9Sstevel@tonic-gate	sethi	%hi(intr_get_time_no_start_time), %o0	! delay
2047*7c478bd9Sstevel@tonic-gate	call	panic
2048*7c478bd9Sstevel@tonic-gate	or	%o0, %lo(intr_get_time_no_start_time), %o0
2049*7c478bd9Sstevel@tonic-gate2:
2050*7c478bd9Sstevel@tonic-gate#endif /* DEBUG */
2051*7c478bd9Sstevel@tonic-gate	!
2052*7c478bd9Sstevel@tonic-gate	! %o0 = elapsed time and return value
2053*7c478bd9Sstevel@tonic-gate	! %o1 = pil
2054*7c478bd9Sstevel@tonic-gate	! %o2 = scratch
2055*7c478bd9Sstevel@tonic-gate	! %o3 = scratch
2056*7c478bd9Sstevel@tonic-gate	! %o4 = scratch
2057*7c478bd9Sstevel@tonic-gate	! %o5 = cpu
2058*7c478bd9Sstevel@tonic-gate	!
2059*7c478bd9Sstevel@tonic-gate	wrpr	%g0, PIL_MAX, %pil	! make this easy -- block normal intrs
2060*7c478bd9Sstevel@tonic-gate	ldn	[THREAD_REG + T_CPU], %o5
2061*7c478bd9Sstevel@tonic-gate	ldub	[THREAD_REG + T_PIL], %o1
2062*7c478bd9Sstevel@tonic-gate	ldx	[THREAD_REG + T_INTR_START], %o3 ! %o3 = t_intr_start
2063*7c478bd9Sstevel@tonic-gate	!
2064*7c478bd9Sstevel@tonic-gate	! Calculate elapsed time since t_intr_start. Update t_intr_start,
2065*7c478bd9Sstevel@tonic-gate	! get delta, and multiply by cpu_divisor if necessary.
2066*7c478bd9Sstevel@tonic-gate	!
2067*7c478bd9Sstevel@tonic-gate	rdpr	%tick, %o2
2068*7c478bd9Sstevel@tonic-gate	sllx	%o2, 1, %o2
2069*7c478bd9Sstevel@tonic-gate	srlx	%o2, 1, %o2
2070*7c478bd9Sstevel@tonic-gate	stx	%o2, [THREAD_REG + T_INTR_START]
2071*7c478bd9Sstevel@tonic-gate	sub	%o2, %o3, %o0
2072*7c478bd9Sstevel@tonic-gate
2073*7c478bd9Sstevel@tonic-gate	lduh	[%o5 + CPU_DIVISOR], %o4
2074*7c478bd9Sstevel@tonic-gate	cmp	%o4, 1
2075*7c478bd9Sstevel@tonic-gate	bg,a,pn	%xcc, 1f
2076*7c478bd9Sstevel@tonic-gate	mulx	%o0, %o4, %o0	! multiply interval by clock divisor iff > 1
2077*7c478bd9Sstevel@tonic-gate1:
2078*7c478bd9Sstevel@tonic-gate	!
2079*7c478bd9Sstevel@tonic-gate	! Increment cpu_m.intrstat[pil][0]. Calculate elapsed time since
2080*7c478bd9Sstevel@tonic-gate	! cpu_m.intrstat[pil][1], which is either when the interrupt was
2081*7c478bd9Sstevel@tonic-gate	! first entered, or the last time intr_get_time() was invoked. Then
2082*7c478bd9Sstevel@tonic-gate	! update cpu_m.intrstat[pil][1] to match [0].
2083*7c478bd9Sstevel@tonic-gate	!
2084*7c478bd9Sstevel@tonic-gate	sllx	%o1, 4, %o3
2085*7c478bd9Sstevel@tonic-gate	add	%o3, CPU_MCPU, %o3
2086*7c478bd9Sstevel@tonic-gate	add	%o3, MCPU_INTRSTAT, %o3
2087*7c478bd9Sstevel@tonic-gate	add	%o3, %o5, %o3		! %o3 = cpu_m.intrstat[pil][0]
2088*7c478bd9Sstevel@tonic-gate	ldx	[%o3], %o2
2089*7c478bd9Sstevel@tonic-gate	add	%o2, %o0, %o2		! %o2 = new value for intrstat
2090*7c478bd9Sstevel@tonic-gate	stx	%o2, [%o3]
2091*7c478bd9Sstevel@tonic-gate	ldx	[%o3 + 8], %o4		! %o4 = cpu_m.intrstat[pil][1]
2092*7c478bd9Sstevel@tonic-gate	sub	%o2, %o4, %o0		! %o0 is elapsed time since %o4
2093*7c478bd9Sstevel@tonic-gate	stx	%o2, [%o3 + 8]		! make [1] match [0], resetting time
2094*7c478bd9Sstevel@tonic-gate
2095*7c478bd9Sstevel@tonic-gate	ld	[%o5 + CPU_BASE_SPL], %o2	! restore %pil to the greater
2096*7c478bd9Sstevel@tonic-gate	cmp	%o2, %o1			! of either our pil %o1 or
2097*7c478bd9Sstevel@tonic-gate	movl	%xcc, %o1, %o2			! cpu_base_spl.
2098*7c478bd9Sstevel@tonic-gate	retl
2099*7c478bd9Sstevel@tonic-gate	wrpr	%g0, %o2, %pil
2100*7c478bd9Sstevel@tonic-gate	SET_SIZE(intr_get_time)
2101*7c478bd9Sstevel@tonic-gate
2102*7c478bd9Sstevel@tonic-gate#ifdef DEBUG
2103*7c478bd9Sstevel@tonic-gateintr_get_time_high_pil:
2104*7c478bd9Sstevel@tonic-gate	.asciz	"intr_get_time(): %pil > LOCK_LEVEL"
2105*7c478bd9Sstevel@tonic-gateintr_get_time_not_intr:
2106*7c478bd9Sstevel@tonic-gate	.asciz	"intr_get_time(): not called from an interrupt thread"
2107*7c478bd9Sstevel@tonic-gateintr_get_time_no_start_time:
2108*7c478bd9Sstevel@tonic-gate	.asciz	"intr_get_time(): t_intr_start == 0"
2109*7c478bd9Sstevel@tonic-gate#endif /* DEBUG */
2110*7c478bd9Sstevel@tonic-gate#endif  /* lint */
2111*7c478bd9Sstevel@tonic-gate
2112*7c478bd9Sstevel@tonic-gate
2113*7c478bd9Sstevel@tonic-gate#if !defined(lint)
2114*7c478bd9Sstevel@tonic-gate
2115*7c478bd9Sstevel@tonic-gate/*
2116*7c478bd9Sstevel@tonic-gate * Check shift value used for computing array offsets
2117*7c478bd9Sstevel@tonic-gate */
2118*7c478bd9Sstevel@tonic-gate#if INTR_VECTOR_SIZE != (1 << INTR_VECTOR_SHIFT)
2119*7c478bd9Sstevel@tonic-gate#error "INTR_VECTOR_SIZE has changed"
2120*7c478bd9Sstevel@tonic-gate#endif
2121*7c478bd9Sstevel@tonic-gate
2122*7c478bd9Sstevel@tonic-gate#endif  /* lint */
2123