xref: /titanic_51/usr/src/uts/sun4/ml/interrupt.s (revision f4b3ec61df05330d25f55a36b975b4d7519fdeb1)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28#if defined(lint)
29#include <sys/types.h>
30#include <sys/thread.h>
31#else	/* lint */
32#include "assym.h"
33#endif	/* lint */
34
35#include <sys/cmn_err.h>
36#include <sys/ftrace.h>
37#include <sys/asm_linkage.h>
38#include <sys/machthread.h>
39#include <sys/machcpuvar.h>
40#include <sys/intreg.h>
41#include <sys/ivintr.h>
42
43#ifdef TRAPTRACE
44#include <sys/traptrace.h>
45#endif /* TRAPTRACE */
46
47#if defined(lint)
48
49/* ARGSUSED */
50void
51pil_interrupt(int level)
52{}
53
54#else	/* lint */
55
56
57/*
58 * (TT 0x40..0x4F, TL>0) Interrupt Level N Handler (N == 1..15)
59 * 	Register passed from LEVEL_INTERRUPT(level)
60 *	%g4 - interrupt request level
61 */
62	ENTRY_NP(pil_interrupt)
63	!
64	! Register usage
65	!	%g1 - cpu
66	!	%g2 - pointer to intr_vec_t (iv)
67	!	%g4 - pil
68	!	%g3, %g5, %g6, %g7 - temps
69	!
70	! Grab the first or list head intr_vec_t off the intr_head[pil]
71	! and panic immediately if list head is NULL. Otherwise, update
72	! intr_head[pil] to next intr_vec_t on the list and clear softint
73	! %clear_softint, if next intr_vec_t is NULL.
74	!
75	CPU_ADDR(%g1, %g5)		! %g1 = cpu
76	!
77	ALTENTRY(pil_interrupt_common)
78	sll	%g4, CPTRSHIFT, %g5	! %g5 = offset to the pil entry
79	add	%g1, INTR_HEAD, %g6	! %g6 = &cpu->m_cpu.intr_head
80	add	%g6, %g5, %g6		! %g6 = &cpu->m_cpu.intr_head[pil]
81	ldn	[%g6], %g2		! %g2 = cpu->m_cpu.intr_head[pil]
82	brnz,pt	%g2, 0f			! check list head (iv) is NULL
83	nop
84	ba	ptl1_panic		! panic, list head (iv) is NULL
85	mov	PTL1_BAD_INTR_VEC, %g1
860:
87	lduh	[%g2 + IV_FLAGS], %g7	! %g7 = iv->iv_flags
88	and	%g7, IV_SOFTINT_MT, %g3 ! %g3 = iv->iv_flags & IV_SOFTINT_MT
89	brz,pt	%g3, 1f			! check for multi target softint
90	add	%g2, IV_PIL_NEXT, %g7	! g7% = &iv->iv_pil_next
91	ld	[%g1 + CPU_ID], %g3	! for multi target softint, use cpuid
92	sll	%g3, CPTRSHIFT, %g3	! convert cpuid to offset address
93	add	%g7, %g3, %g7		! %g5 = &iv->iv_xpil_next[cpuid]
941:
95	ldn	[%g7], %g3		! %g3 = next intr_vec_t
96	brnz,pn	%g3, 2f			! branch if next intr_vec_t non NULL
97	stn	%g3, [%g6]		! update cpu->m_cpu.intr_head[pil]
98	add	%g1, INTR_TAIL, %g6	! %g6 =  &cpu->m_cpu.intr_tail
99	stn	%g0, [%g5 + %g6]	! clear cpu->m_cpu.intr_tail[pil]
100	mov	1, %g5			! %g5 = 1
101	sll	%g5, %g4, %g5		! %g5 = 1 << pil
102	wr	%g5, CLEAR_SOFTINT	! clear interrupt on this pil
1032:
104#ifdef TRAPTRACE
105	TRACE_PTR(%g5, %g6)
106	GET_TRACE_TICK(%g6)
107	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi	! trap_tick = %tick
108	TRACE_SAVE_TL_GL_REGS(%g5, %g6)
109	rdpr	%tt, %g6
110	stha	%g6, [%g5 + TRAP_ENT_TT]%asi	! trap_type = %tt
111	rdpr	%tpc, %g6
112	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi	! trap_pc = %tpc
113	rdpr	%tstate, %g6
114	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi ! trap_tstate = %tstate
115	stna	%sp, [%g5 + TRAP_ENT_SP]%asi	! trap_sp = %sp
116	stna	%g2, [%g5 + TRAP_ENT_TR]%asi	! trap_tr = first intr_vec
117	stna	%g3, [%g5 + TRAP_ENT_F1]%asi	! trap_f1 = next intr_vec
118	sll	%g4, CPTRSHIFT, %g3
119	add	%g1, INTR_HEAD, %g6
120	ldn	[%g6 + %g3], %g6		! %g6=cpu->m_cpu.intr_head[pil]
121	stna	%g6, [%g5 + TRAP_ENT_F2]%asi	! trap_f2 = intr_head[pil]
122	add	%g1, INTR_TAIL, %g6
123	ldn	[%g6 + %g3], %g6		! %g6=cpu->m_cpu.intr_tail[pil]
124	stna	%g6, [%g5 + TRAP_ENT_F3]%asi	! trap_f3 = intr_tail[pil]
125	stna	%g4, [%g5 + TRAP_ENT_F4]%asi	! trap_f4 = pil
126	TRACE_NEXT(%g5, %g6, %g3)
127#endif /* TRAPTRACE */
128	!
129	! clear the iv_pending flag for this interrupt request
130	!
131	lduh	[%g2 + IV_FLAGS], %g3		! %g3 = iv->iv_flags
132	andn	%g3, IV_SOFTINT_PEND, %g3	! %g3 = !(iv->iv_flags & PEND)
133	sth	%g3, [%g2 + IV_FLAGS]		! clear IV_SOFTINT_PEND flag
134	stn	%g0, [%g7]			! clear iv->iv_pil_next or
135						!       iv->iv_pil_xnext
136
137	!
138	! Prepare for sys_trap()
139	!
140	! Registers passed to sys_trap()
141	!	%g1 - interrupt handler at TL==0
142	!	%g2 - pointer to current intr_vec_t (iv),
143	!	      job queue for intr_thread or current_thread
144	!	%g3 - pil
145	!	%g4 - initial pil for handler
146	!
147	! figure which handler to run and which %pil it starts at
148	! intr_thread starts at DISP_LEVEL to prevent preemption
149	! current_thread starts at PIL_MAX to protect cpu_intr_actv
150	!
151	mov	%g4, %g3		! %g3 = %g4, pil
152	cmp	%g4, LOCK_LEVEL
153	bg,a,pt	%xcc, 3f		! branch if pil > LOCK_LEVEL
154	mov	PIL_MAX, %g4		! %g4 = PIL_MAX (15)
155	sethi	%hi(intr_thread), %g1	! %g1 = intr_thread
156	mov	DISP_LEVEL, %g4		! %g4 = DISP_LEVEL (11)
157	ba,pt	%xcc, sys_trap
158	or	%g1, %lo(intr_thread), %g1
1593:
160	sethi	%hi(current_thread), %g1 ! %g1 = current_thread
161	ba,pt	%xcc, sys_trap
162	or	%g1, %lo(current_thread), %g1
163	SET_SIZE(pil_interrupt_common)
164	SET_SIZE(pil_interrupt)
165
166#endif	/* lint */
167
168
169#ifndef	lint
170_spurious:
171	.asciz	"!interrupt 0x%x at level %d not serviced"
172
173/*
174 * SERVE_INTR_PRE is called once, just before the first invocation
175 * of SERVE_INTR.
176 *
177 * Registers on entry:
178 *
179 * iv_p, cpu, regs: may be out-registers
180 * ls1, ls2: local scratch registers
181 * os1, os2, os3: scratch registers, may be out
182 */
183
184#define SERVE_INTR_PRE(iv_p, cpu, ls1, ls2, os1, os2, os3, regs)	\
185	mov	iv_p, ls1;						\
186	mov	iv_p, ls2;						\
187	SERVE_INTR_TRACE(iv_p, os1, os2, os3, regs);
188
189/*
190 * SERVE_INTR is called immediately after either SERVE_INTR_PRE or
191 * SERVE_INTR_NEXT, without intervening code. No register values
192 * may be modified.
193 *
194 * After calling SERVE_INTR, the caller must check if os3 is set. If
195 * so, there is another interrupt to process. The caller must call
196 * SERVE_INTR_NEXT, immediately followed by SERVE_INTR.
197 *
198 * Before calling SERVE_INTR_NEXT, the caller may perform accounting
199 * and other actions which need to occur after invocation of an interrupt
200 * handler. However, the values of ls1 and os3 *must* be preserved and
201 * passed unmodified into SERVE_INTR_NEXT.
202 *
203 * Registers on return from SERVE_INTR:
204 *
205 * ls1 - the pil just processed
206 * ls2 - the pointer to intr_vec_t (iv) just processed
207 * os3 - if set, another interrupt needs to be processed
208 * cpu, ls1, os3 - must be preserved if os3 is set
209 */
210
211#define	SERVE_INTR(os5, cpu, ls1, ls2, os1, os2, os3, os4)		\
212	ldn	[ls1 + IV_HANDLER], os2;				\
213	ldn	[ls1 + IV_ARG1], %o0;					\
214	ldn	[ls1 + IV_ARG2], %o1;					\
215	call	os2;							\
216	lduh	[ls1 + IV_PIL], ls1;					\
217	brnz,pt	%o0, 2f;						\
218	mov	CE_WARN, %o0;						\
219	set	_spurious, %o1;						\
220	mov	ls2, %o2;						\
221	call	cmn_err;						\
222	rdpr	%pil, %o3;						\
2232:	ldn	[THREAD_REG + T_CPU], cpu;				\
224	sll	ls1, 3, os1;						\
225	add	os1, CPU_STATS_SYS_INTR - 8, os2;			\
226	ldx	[cpu + os2], os3;					\
227	inc	os3;							\
228	stx	os3, [cpu + os2];					\
229	sll	ls1, CPTRSHIFT, os2;					\
230	add	cpu,  INTR_HEAD, os1;					\
231	add	os1, os2, os1;						\
232	ldn	[os1], os3;
233
234/*
235 * Registers on entry:
236 *
237 * cpu			- cpu pointer (clobbered, set to cpu upon completion)
238 * ls1, os3		- preserved from prior call to SERVE_INTR
239 * ls2			- local scratch reg (not preserved)
240 * os1, os2, os4, os5	- scratch reg, can be out (not preserved)
241 */
242#define SERVE_INTR_NEXT(os5, cpu, ls1, ls2, os1, os2, os3, os4)		\
243	sll	ls1, CPTRSHIFT, os4;					\
244	add	cpu, INTR_HEAD, os1;					\
245	rdpr	%pstate, ls2;						\
246	wrpr	ls2, PSTATE_IE, %pstate;				\
247	lduh	[os3 + IV_FLAGS], os2;					\
248	and	os2, IV_SOFTINT_MT, os2;				\
249	brz,pt	os2, 4f;						\
250	add	os3, IV_PIL_NEXT, os2;					\
251	ld	[cpu + CPU_ID], os5;					\
252	sll	os5, CPTRSHIFT, os5;					\
253	add	os2, os5, os2;						\
2544:	ldn	[os2], os5;						\
255	brnz,pn	os5, 5f;						\
256	stn	os5, [os1 + os4];					\
257	add	cpu, INTR_TAIL, os1;					\
258	stn	%g0, [os1 + os4];					\
259	mov	1, os1;							\
260	sll	os1, ls1, os1;						\
261	wr	os1, CLEAR_SOFTINT;					\
2625:	lduh	[os3 + IV_FLAGS], ls1;                                  \
263	andn	ls1, IV_SOFTINT_PEND, ls1;				\
264	sth	ls1, [os3 + IV_FLAGS];				        \
265	stn	%g0, [os2];						\
266	wrpr	%g0, ls2, %pstate;					\
267	mov	os3, ls1;						\
268	mov	os3, ls2;						\
269	SERVE_INTR_TRACE2(os5, os1, os2, os3, os4);
270
271#ifdef TRAPTRACE
272/*
273 * inum - not modified, _spurious depends on it.
274 */
275#define	SERVE_INTR_TRACE(inum, os1, os2, os3, os4)			\
276	rdpr	%pstate, os3;						\
277	andn	os3, PSTATE_IE | PSTATE_AM, os2;			\
278	wrpr	%g0, os2, %pstate;					\
279	TRACE_PTR(os1, os2);						\
280	ldn	[os4 + PC_OFF], os2;					\
281	stna	os2, [os1 + TRAP_ENT_TPC]%asi;				\
282	ldx	[os4 + TSTATE_OFF], os2;				\
283	stxa	os2, [os1 + TRAP_ENT_TSTATE]%asi;			\
284	mov	os3, os4;						\
285	GET_TRACE_TICK(os2); 						\
286	stxa	os2, [os1 + TRAP_ENT_TICK]%asi;				\
287	TRACE_SAVE_TL_GL_REGS(os1, os2);				\
288	set	TT_SERVE_INTR, os2;					\
289	rdpr	%pil, os3;						\
290	or	os2, os3, os2;						\
291	stha	os2, [os1 + TRAP_ENT_TT]%asi;				\
292	stna	%sp, [os1 + TRAP_ENT_SP]%asi;				\
293	stna	inum, [os1 + TRAP_ENT_TR]%asi;				\
294	stna	%g0, [os1 + TRAP_ENT_F1]%asi;				\
295	stna	%g0, [os1 + TRAP_ENT_F2]%asi;				\
296	stna	%g0, [os1 + TRAP_ENT_F3]%asi;				\
297	stna	%g0, [os1 + TRAP_ENT_F4]%asi;				\
298	TRACE_NEXT(os1, os2, os3);					\
299	wrpr	%g0, os4, %pstate
300#else	/* TRAPTRACE */
301#define SERVE_INTR_TRACE(inum, os1, os2, os3, os4)
302#endif	/* TRAPTRACE */
303
304#ifdef TRAPTRACE
305/*
306 * inum - not modified, _spurious depends on it.
307 */
308#define	SERVE_INTR_TRACE2(inum, os1, os2, os3, os4)			\
309	rdpr	%pstate, os3;						\
310	andn	os3, PSTATE_IE | PSTATE_AM, os2;			\
311	wrpr	%g0, os2, %pstate;					\
312	TRACE_PTR(os1, os2);						\
313	stna	%g0, [os1 + TRAP_ENT_TPC]%asi;				\
314	stxa	%g0, [os1 + TRAP_ENT_TSTATE]%asi;			\
315	mov	os3, os4;						\
316	GET_TRACE_TICK(os2); 						\
317	stxa	os2, [os1 + TRAP_ENT_TICK]%asi;				\
318	TRACE_SAVE_TL_GL_REGS(os1, os2);				\
319	set	TT_SERVE_INTR, os2;					\
320	rdpr	%pil, os3;						\
321	or	os2, os3, os2;						\
322	stha	os2, [os1 + TRAP_ENT_TT]%asi;				\
323	stna	%sp, [os1 + TRAP_ENT_SP]%asi;				\
324	stna	inum, [os1 + TRAP_ENT_TR]%asi;				\
325	stna	%g0, [os1 + TRAP_ENT_F1]%asi;				\
326	stna	%g0, [os1 + TRAP_ENT_F2]%asi;				\
327	stna	%g0, [os1 + TRAP_ENT_F3]%asi;				\
328	stna	%g0, [os1 + TRAP_ENT_F4]%asi;				\
329	TRACE_NEXT(os1, os2, os3);					\
330	wrpr	%g0, os4, %pstate
331#else	/* TRAPTRACE */
332#define SERVE_INTR_TRACE2(inum, os1, os2, os3, os4)
333#endif	/* TRAPTRACE */
334
335#endif	/* lint */
336
337#if defined(lint)
338
339/*ARGSUSED*/
340void
341intr_thread(struct regs *regs, uint64_t iv_p, uint_t pil)
342{}
343
344#else	/* lint */
345
346#define	INTRCNT_LIMIT 16
347
348/*
349 * Handle an interrupt in a new thread.
350 *	Entry:
351 *		%o0       = pointer to regs structure
352 *		%o1       = pointer to current intr_vec_t (iv) to be processed
353 *		%o2       = pil
354 *		%sp       = on current thread's kernel stack
355 *		%o7       = return linkage to trap code
356 *		%g7       = current thread
357 *		%pstate   = normal globals, interrupts enabled,
358 *		            privileged, fp disabled
359 *		%pil      = DISP_LEVEL
360 *
361 *	Register Usage
362 *		%l0       = return linkage
363 *		%l1       = pil
364 *		%l2 - %l3 = scratch
365 *		%l4 - %l7 = reserved for sys_trap
366 *		%o2       = cpu
367 *		%o3       = intr thread
368 *		%o0       = scratch
369 *		%o4 - %o5 = scratch
370 */
371	ENTRY_NP(intr_thread)
372	mov	%o7, %l0
373	mov	%o2, %l1
374	!
375	! See if we are interrupting another interrupt thread.
376	!
377	lduh	[THREAD_REG + T_FLAGS], %o3
378	andcc	%o3, T_INTR_THREAD, %g0
379	bz,pt	%xcc, 1f
380	ldn	[THREAD_REG + T_CPU], %o2	! delay - load CPU pointer
381
382	! We have interrupted an interrupt thread. Take a timestamp,
383	! compute its interval, and update its cumulative counter.
384	add	THREAD_REG, T_INTR_START, %o5
3850:
386	ldx	[%o5], %o3
387	brz,pn	%o3, 1f
388	! We came in on top of an interrupt thread that had no timestamp.
389	! This could happen if, for instance, an interrupt thread which had
390	! previously blocked is being set up to run again in resume(), but
391	! resume() hasn't yet stored a timestamp for it. Or, it could be in
392	! swtch() after its slice has been accounted for.
393	! Only account for the time slice if the starting timestamp is non-zero.
394	rdpr	%tick, %o4			! delay
395	sllx	%o4, 1, %o4			! shift off NPT bit
396	srlx	%o4, 1, %o4
397	sub	%o4, %o3, %o4			! o4 has interval
398
399	! A high-level interrupt in current_thread() interrupting here
400	! will account for the interrupted thread's time slice, but
401	! only if t_intr_start is non-zero. Since this code is going to account
402	! for the time slice, we want to "atomically" load the thread's
403	! starting timestamp, calculate the interval with %tick, and zero
404	! its starting timestamp.
405	! To do this, we do a casx on the t_intr_start field, and store 0 to it.
406	! If it has changed since we loaded it above, we need to re-compute the
407	! interval, since a changed t_intr_start implies current_thread placed
408	! a new, later timestamp there after running a high-level interrupt,
409	! and the %tick val in %o4 had become stale.
410	mov	%g0, %l2
411	casx	[%o5], %o3, %l2
412
413	! If %l2 == %o3, our casx was successful. If not, the starting timestamp
414	! changed between loading it (after label 0b) and computing the
415	! interval above.
416	cmp	%l2, %o3
417	bne,pn	%xcc, 0b
418
419	! Check for Energy Star mode
420	lduh	[%o2 + CPU_DIVISOR], %l2	! delay -- %l2 = clock divisor
421	cmp	%l2, 1
422	bg,a,pn	%xcc, 2f
423	mulx	%o4, %l2, %o4	! multiply interval by clock divisor iff > 1
4242:
425	! We now know that a valid interval for the interrupted interrupt
426	! thread is in %o4. Update its cumulative counter.
427	ldub	[THREAD_REG + T_PIL], %l3	! load PIL
428	sllx	%l3, 4, %l3		! convert PIL index to byte offset
429	add	%l3, CPU_MCPU, %l3	! CPU_INTRSTAT is too big for use
430	add	%l3, MCPU_INTRSTAT, %l3	! as const, add offsets separately
431	ldx	[%o2 + %l3], %o5	! old counter in o5
432	add	%o5, %o4, %o5		! new counter in o5
433	stx	%o5, [%o2 + %l3]	! store new counter
434
435	! Also update intracct[]
436	lduh	[%o2 + CPU_MSTATE], %l3
437	sllx	%l3, 3, %l3
438	add	%l3, CPU_INTRACCT, %l3
439	add	%l3, %o2, %l3
4400:
441	ldx	[%l3], %o5
442	add	%o5, %o4, %o3
443	casx	[%l3], %o5, %o3
444	cmp	%o5, %o3
445	bne,pn	%xcc, 0b
446	nop
447
4481:
449	!
450	! Get set to run interrupt thread.
451	! There should always be an interrupt thread since we allocate one
452	! for each level on the CPU.
453	!
454	! Note that the code in kcpc_overflow_intr -relies- on the ordering
455	! of events here -- in particular that t->t_lwp of the interrupt thread
456	! is set to the pinned thread *before* curthread is changed.
457	!
458	ldn	[%o2 + CPU_INTR_THREAD], %o3	! interrupt thread pool
459	ldn	[%o3 + T_LINK], %o4		! unlink thread from CPU's list
460	stn	%o4, [%o2 + CPU_INTR_THREAD]
461	!
462	! Set bit for this level in CPU's active interrupt bitmask.
463	!
464	ld	[%o2 + CPU_INTR_ACTV], %o5
465	mov	1, %o4
466	sll	%o4, %l1, %o4
467#ifdef DEBUG
468	!
469	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
470	!
471	andcc	%o5, %o4, %g0
472	bz,pt	%xcc, 0f
473	nop
474	! Do not call panic if a panic is already in progress.
475	sethi	%hi(panic_quiesce), %l2
476	ld	[%l2 + %lo(panic_quiesce)], %l2
477	brnz,pn	%l2, 0f
478	nop
479	sethi	%hi(intr_thread_actv_bit_set), %o0
480	call	panic
481	or	%o0, %lo(intr_thread_actv_bit_set), %o0
4820:
483#endif /* DEBUG */
484	or	%o5, %o4, %o5
485	st	%o5, [%o2 + CPU_INTR_ACTV]
486	!
487	! Consider the new thread part of the same LWP so that
488	! window overflow code can find the PCB.
489	!
490	ldn	[THREAD_REG + T_LWP], %o4
491	stn	%o4, [%o3 + T_LWP]
492	!
493	! Threads on the interrupt thread free list could have state already
494	! set to TS_ONPROC, but it helps in debugging if they're TS_FREE
495	! Could eliminate the next two instructions with a little work.
496	!
497	mov	TS_ONPROC, %o4
498	st	%o4, [%o3 + T_STATE]
499	!
500	! Push interrupted thread onto list from new thread.
501	! Set the new thread as the current one.
502	! Set interrupted thread's T_SP because if it is the idle thread,
503	! resume may use that stack between threads.
504	!
505	stn	%o7, [THREAD_REG + T_PC]	! mark pc for resume
506	stn	%sp, [THREAD_REG + T_SP]	! mark stack for resume
507	stn	THREAD_REG, [%o3 + T_INTR]	! push old thread
508	stn	%o3, [%o2 + CPU_THREAD]		! set new thread
509	mov	%o3, THREAD_REG			! set global curthread register
510	ldn	[%o3 + T_STACK], %o4		! interrupt stack pointer
511	sub	%o4, STACK_BIAS, %sp
512	!
513	! Initialize thread priority level from intr_pri
514	!
515	sethi	%hi(intr_pri), %o4
516	ldsh	[%o4 + %lo(intr_pri)], %o4	! grab base interrupt priority
517	add	%l1, %o4, %o4		! convert level to dispatch priority
518	sth	%o4, [THREAD_REG + T_PRI]
519	stub	%l1, [THREAD_REG + T_PIL]	! save pil for intr_passivate
520
521	! Store starting timestamp in thread structure.
522	add	THREAD_REG, T_INTR_START, %o3
5231:
524	ldx	[%o3], %o5
525	rdpr	%tick, %o4
526	sllx	%o4, 1, %o4
527	srlx	%o4, 1, %o4			! shift off NPT bit
528	casx	[%o3], %o5, %o4
529	cmp	%o4, %o5
530	! If a high-level interrupt occurred while we were attempting to store
531	! the timestamp, try again.
532	bne,pn	%xcc, 1b
533	nop
534
535	wrpr	%g0, %l1, %pil			! lower %pil to new level
536	!
537	! Fast event tracing.
538	!
539	ld	[%o2 + CPU_FTRACE_STATE], %o4	! %o2 = curthread->t_cpu
540	btst	FTRACE_ENABLED, %o4
541	be,pt	%icc, 1f			! skip if ftrace disabled
542	  mov	%l1, %o5
543	!
544	! Tracing is enabled - write the trace entry.
545	!
546	save	%sp, -SA(MINFRAME), %sp
547	set	ftrace_intr_thread_format_str, %o0
548	mov	%i0, %o1
549	mov	%i1, %o2
550	call	ftrace_3
551	mov	%i5, %o3
552	restore
5531:
554	!
555	! call the handler
556	!
557	SERVE_INTR_PRE(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
558	!
559	! %o0 and %o1 are now available as scratch registers.
560	!
5610:
562	SERVE_INTR(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
563	!
564	! If %o3 is set, we must call serve_intr_next, and both %l1 and %o3
565	! must be preserved. %l1 holds our pil, %l3 holds our inum.
566	!
567	! Note: %l1 is the pil level we're processing, but we may have a
568	! higher effective pil because a higher-level interrupt may have
569	! blocked.
570	!
571	wrpr	%g0, DISP_LEVEL, %pil
572	!
573	! Take timestamp, compute interval, update cumulative counter.
574	!
575	add	THREAD_REG, T_INTR_START, %o5
5761:
577	ldx	[%o5], %o0
578#ifdef DEBUG
579	brnz	%o0, 9f
580	nop
581	! Do not call panic if a panic is already in progress.
582	sethi	%hi(panic_quiesce), %o1
583	ld	[%o1 + %lo(panic_quiesce)], %o1
584	brnz,pn	%o1, 9f
585	nop
586	sethi	%hi(intr_thread_t_intr_start_zero), %o0
587	call	panic
588	or	%o0, %lo(intr_thread_t_intr_start_zero), %o0
5899:
590#endif /* DEBUG */
591	rdpr	%tick, %o1
592	sllx	%o1, 1, %o1
593	srlx	%o1, 1, %o1			! shift off NPT bit
594	sub	%o1, %o0, %l2			! l2 has interval
595	!
596	! The general outline of what the code here does is:
597	! 1. load t_intr_start, %tick, and calculate the delta
598	! 2. replace t_intr_start with %tick (if %o3 is set) or 0.
599	!
600	! The problem is that a high-level interrupt could arrive at any time.
601	! It will account for (%tick - t_intr_start) for us when it starts,
602	! unless we have set t_intr_start to zero, and then set t_intr_start
603	! to a new %tick when it finishes. To account for this, our first step
604	! is to load t_intr_start and the last is to use casx to store the new
605	! t_intr_start. This guarantees atomicity in reading t_intr_start,
606	! reading %tick, and updating t_intr_start.
607	!
608	movrz	%o3, %g0, %o1
609	casx	[%o5], %o0, %o1
610	cmp	%o0, %o1
611	bne,pn	%xcc, 1b
612	!
613	! Check for Energy Star mode
614	!
615	lduh	[%o2 + CPU_DIVISOR], %o0	! delay -- %o0 = clock divisor
616	cmp	%o0, 1
617	bg,a,pn	%xcc, 2f
618	mulx	%l2, %o0, %l2	! multiply interval by clock divisor iff > 1
6192:
620	!
621	! Update cpu_intrstat. If o3 is set then we will be processing another
622	! interrupt. Above we have set t_intr_start to %tick, not 0. This
623	! means a high-level interrupt can arrive and update the same stats
624	! we're updating. Need to use casx.
625	!
626	sllx	%l1, 4, %o1			! delay - PIL as byte offset
627	add	%o1, CPU_MCPU, %o1		! CPU_INTRSTAT const too big
628	add	%o1, MCPU_INTRSTAT, %o1		! add parts separately
629	add	%o1, %o2, %o1
6301:
631	ldx	[%o1], %o5			! old counter in o5
632	add	%o5, %l2, %o0			! new counter in o0
633 	stx	%o0, [%o1 + 8]			! store into intrstat[pil][1]
634	casx	[%o1], %o5, %o0			! and into intrstat[pil][0]
635	cmp	%o5, %o0
636	bne,pn	%xcc, 1b
637	nop
638
639	! Also update intracct[]
640	lduh	[%o2 + CPU_MSTATE], %o1
641	sllx	%o1, 3, %o1
642	add	%o1, CPU_INTRACCT, %o1
643	add	%o1, %o2, %o1
6441:
645	ldx	[%o1], %o5
646	add	%o5, %l2, %o0
647	casx	[%o1], %o5, %o0
648	cmp	%o5, %o0
649	bne,pn	%xcc, 1b
650	nop
651
652	!
653	! Don't keep a pinned process pinned indefinitely. Bump cpu_intrcnt
654	! for each interrupt handler we invoke. If we hit INTRCNT_LIMIT, then
655	! we've crossed the threshold and we should unpin the pinned threads
656	! by preempt()ing ourselves, which will bubble up the t_intr chain
657	! until hitting the non-interrupt thread, which will then in turn
658	! preempt itself allowing the interrupt processing to resume. Finally,
659	! the scheduler takes over and picks the next thread to run.
660	!
661	! If our CPU is quiesced, we cannot preempt because the idle thread
662	! won't ever re-enter the scheduler, and the interrupt will be forever
663	! blocked.
664	!
665	! If t_intr is NULL, we're not pinning anyone, so we use a simpler
666	! algorithm. Just check for cpu_kprunrun, and if set then preempt.
667	! This insures we enter the scheduler if a higher-priority thread
668	! has become runnable.
669	!
670	lduh	[%o2 + CPU_FLAGS], %o5		! don't preempt if quiesced
671	andcc	%o5, CPU_QUIESCED, %g0
672	bnz,pn	%xcc, 1f
673
674	ldn     [THREAD_REG + T_INTR], %o5      ! pinning anything?
675	brz,pn  %o5, 3f				! if not, don't inc intrcnt
676
677	ldub	[%o2 + CPU_INTRCNT], %o5	! delay - %o5 = cpu_intrcnt
678	inc	%o5
679	cmp	%o5, INTRCNT_LIMIT		! have we hit the limit?
680	bl,a,pt	%xcc, 1f			! no preempt if < INTRCNT_LIMIT
681	stub	%o5, [%o2 + CPU_INTRCNT]	! delay annul - inc CPU_INTRCNT
682	bg,pn	%xcc, 2f			! don't inc stats again
683	!
684	! We've reached the limit. Set cpu_intrcnt and cpu_kprunrun, and do
685	! CPU_STATS_ADDQ(cp, sys, intrunpin, 1). Then call preempt.
686	!
687	mov	1, %o4				! delay
688	stub	%o4, [%o2 + CPU_KPRUNRUN]
689	ldx	[%o2 + CPU_STATS_SYS_INTRUNPIN], %o4
690	inc	%o4
691	stx	%o4, [%o2 + CPU_STATS_SYS_INTRUNPIN]
692	ba	2f
693	stub	%o5, [%o2 + CPU_INTRCNT]	! delay
6943:
695	! Code for t_intr == NULL
696	ldub	[%o2 + CPU_KPRUNRUN], %o5
697	brz,pt	%o5, 1f				! don't preempt unless kprunrun
6982:
699	! Time to call preempt
700	mov	%o2, %l3			! delay - save %o2
701	call	preempt
702	mov	%o3, %l2			! delay - save %o3.
703	mov	%l3, %o2			! restore %o2
704	mov	%l2, %o3			! restore %o3
705	wrpr	%g0, DISP_LEVEL, %pil		! up from cpu_base_spl
7061:
707	!
708	! Do we need to call serve_intr_next and do this again?
709	!
710	brz,a,pt %o3, 0f
711	ld	[%o2 + CPU_INTR_ACTV], %o5	! delay annulled
712	!
713	! Restore %pil before calling serve_intr() again. We must check
714	! CPU_BASE_SPL and set %pil to max(our-pil, CPU_BASE_SPL)
715	!
716	ld	[%o2 + CPU_BASE_SPL], %o4
717	cmp	%o4, %l1
718	movl	%xcc, %l1, %o4
719	wrpr	%g0, %o4, %pil
720	SERVE_INTR_NEXT(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
721	ba	0b				! compute new stats
722	nop
7230:
724	!
725	! Clear bit for this level in CPU's interrupt active bitmask.
726	!
727	mov	1, %o4
728	sll	%o4, %l1, %o4
729#ifdef DEBUG
730	!
731	! ASSERT(CPU->cpu_intr_actv & (1 << PIL))
732	!
733	andcc	%o4, %o5, %g0
734	bnz,pt	%xcc, 0f
735	nop
736	! Do not call panic if a panic is already in progress.
737	sethi	%hi(panic_quiesce), %l2
738	ld	[%l2 + %lo(panic_quiesce)], %l2
739	brnz,pn	%l2, 0f
740	nop
741	sethi	%hi(intr_thread_actv_bit_not_set), %o0
742	call	panic
743	or	%o0, %lo(intr_thread_actv_bit_not_set), %o0
7440:
745#endif /* DEBUG */
746	andn	%o5, %o4, %o5
747	st	%o5, [%o2 + CPU_INTR_ACTV]
748	!
749	! If there is still an interrupted thread underneath this one,
750	! then the interrupt was never blocked and the return is fairly
751	! simple.  Otherwise jump to intr_thread_exit.
752	!
753	ldn	[THREAD_REG + T_INTR], %o4	! pinned thread
754	brz,pn	%o4, intr_thread_exit		! branch if none
755	nop
756	!
757	! link the thread back onto the interrupt thread pool
758	!
759	ldn	[%o2 + CPU_INTR_THREAD], %o3
760	stn	%o3, [THREAD_REG + T_LINK]
761	stn	THREAD_REG, [%o2 + CPU_INTR_THREAD]
762	!
763	! set the thread state to free so kernel debuggers don't see it
764	!
765	mov	TS_FREE, %o5
766	st	%o5, [THREAD_REG + T_STATE]
767	!
768	! Switch back to the interrupted thread and return
769	!
770	stn	%o4, [%o2 + CPU_THREAD]
771	membar	#StoreLoad			! sync with mutex_exit()
772	mov	%o4, THREAD_REG
773
774	! If we pinned an interrupt thread, store its starting timestamp.
775	lduh	[THREAD_REG + T_FLAGS], %o5
776	andcc	%o5, T_INTR_THREAD, %g0
777	bz,pt	%xcc, 1f
778	ldn	[THREAD_REG + T_SP], %sp	! delay - restore %sp
779
780	add	THREAD_REG, T_INTR_START, %o3	! o3 has &curthread->t_intr_star
7810:
782	ldx	[%o3], %o4			! o4 = t_intr_start before
783	rdpr	%tick, %o5
784	sllx	%o5, 1, %o5
785	srlx	%o5, 1, %o5			! shift off NPT bit
786	casx	[%o3], %o4, %o5			! put o5 in ts if o4 == ts after
787	cmp	%o4, %o5
788	! If a high-level interrupt occurred while we were attempting to store
789	! the timestamp, try again.
790	bne,pn	%xcc, 0b
791	ldn	[THREAD_REG + T_SP], %sp	! delay - restore %sp
7921:
793	! If the thread being restarted isn't pinning anyone, and no interrupts
794	! are pending, zero out cpu_intrcnt
795	ldn	[THREAD_REG + T_INTR], %o4
796	brnz,pn	%o4, 2f
797	rd	SOFTINT, %o4			! delay
798	set	SOFTINT_MASK, %o5
799	andcc	%o4, %o5, %g0
800	bz,a,pt	%xcc, 2f
801	stub	%g0, [%o2 + CPU_INTRCNT]	! delay annul
8022:
803	jmp	%l0 + 8
804	nop
805	SET_SIZE(intr_thread)
806	/* Not Reached */
807
808	!
809	! An interrupt returned on what was once (and still might be)
810	! an interrupt thread stack, but the interrupted process is no longer
811	! there.  This means the interrupt must have blocked.
812	!
813	! There is no longer a thread under this one, so put this thread back
814	! on the CPU's free list and resume the idle thread which will dispatch
815	! the next thread to run.
816	!
817	! All traps below DISP_LEVEL are disabled here, but the mondo interrupt
818	! is enabled.
819	!
820	ENTRY_NP(intr_thread_exit)
821#ifdef TRAPTRACE
822	rdpr	%pstate, %l2
823	andn	%l2, PSTATE_IE | PSTATE_AM, %o4
824	wrpr	%g0, %o4, %pstate			! cpu to known state
825	TRACE_PTR(%o4, %o5)
826	GET_TRACE_TICK(%o5)
827	stxa	%o5, [%o4 + TRAP_ENT_TICK]%asi
828	TRACE_SAVE_TL_GL_REGS(%o4, %o5)
829	set	TT_INTR_EXIT, %o5
830	stha	%o5, [%o4 + TRAP_ENT_TT]%asi
831	stna	%g0, [%o4 + TRAP_ENT_TPC]%asi
832	stxa	%g0, [%o4 + TRAP_ENT_TSTATE]%asi
833	stna	%sp, [%o4 + TRAP_ENT_SP]%asi
834	stna	THREAD_REG, [%o4 + TRAP_ENT_TR]%asi
835	ld	[%o2 + CPU_BASE_SPL], %o5
836	stna	%o5, [%o4 + TRAP_ENT_F1]%asi
837	stna	%g0, [%o4 + TRAP_ENT_F2]%asi
838	stna	%g0, [%o4 + TRAP_ENT_F3]%asi
839	stna	%g0, [%o4 + TRAP_ENT_F4]%asi
840	TRACE_NEXT(%o4, %o5, %o0)
841	wrpr	%g0, %l2, %pstate
842#endif /* TRAPTRACE */
843	! cpu_stats.sys.intrblk++
844        ldx	[%o2 + CPU_STATS_SYS_INTRBLK], %o4
845        inc     %o4
846        stx	%o4, [%o2 + CPU_STATS_SYS_INTRBLK]
847	!
848	! Put thread back on the interrupt thread list.
849	!
850
851	!
852	! Set the CPU's base SPL level.
853	!
854#ifdef DEBUG
855	!
856	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
857	!
858	ld	[%o2 + CPU_INTR_ACTV], %o5
859	mov	1, %o4
860	sll	%o4, %l1, %o4
861	and	%o5, %o4, %o4
862	brz,pt	%o4, 0f
863	nop
864	! Do not call panic if a panic is already in progress.
865	sethi	%hi(panic_quiesce), %l2
866	ld	[%l2 + %lo(panic_quiesce)], %l2
867	brnz,pn	%l2, 0f
868	nop
869	sethi	%hi(intr_thread_exit_actv_bit_set), %o0
870	call	panic
871	or	%o0, %lo(intr_thread_exit_actv_bit_set), %o0
8720:
873#endif /* DEBUG */
874	call	_intr_set_spl			! set CPU's base SPL level
875	ld	[%o2 + CPU_INTR_ACTV], %o5	! delay - load active mask
876	!
877	! set the thread state to free so kernel debuggers don't see it
878	!
879	mov	TS_FREE, %o4
880	st	%o4, [THREAD_REG + T_STATE]
881	!
882	! Put thread on either the interrupt pool or the free pool and
883	! call swtch() to resume another thread.
884	!
885	ldn	[%o2 + CPU_INTR_THREAD], %o5	! get list pointer
886	stn	%o5, [THREAD_REG + T_LINK]
887	call	swtch				! switch to best thread
888	stn	THREAD_REG, [%o2 + CPU_INTR_THREAD] ! delay - put thread on list
889	ba,a,pt	%xcc, .				! swtch() shouldn't return
890	SET_SIZE(intr_thread_exit)
891
892	.global ftrace_intr_thread_format_str
893ftrace_intr_thread_format_str:
894	.asciz	"intr_thread(): regs=0x%lx, int=0x%lx, pil=0x%lx"
895#ifdef DEBUG
896intr_thread_actv_bit_set:
897	.asciz	"intr_thread():	cpu_intr_actv bit already set for PIL"
898intr_thread_actv_bit_not_set:
899	.asciz	"intr_thread():	cpu_intr_actv bit not set for PIL"
900intr_thread_exit_actv_bit_set:
901	.asciz	"intr_thread_exit(): cpu_intr_actv bit erroneously set for PIL"
902intr_thread_t_intr_start_zero:
903	.asciz	"intr_thread():	t_intr_start zero upon handler return"
904#endif /* DEBUG */
905#endif	/* lint */
906
907#if defined(lint)
908
909/*
910 * Handle an interrupt in the current thread
911 *	Entry:
912 *		%o0       = pointer to regs structure
913 *		%o1       = pointer to current intr_vec_t (iv) to be processed
914 *		%o2       = pil
915 *		%sp       = on current thread's kernel stack
916 *		%o7       = return linkage to trap code
917 *		%g7       = current thread
918 *		%pstate   = normal globals, interrupts enabled,
919 *		            privileged, fp disabled
920 *		%pil      = PIL_MAX
921 *
922 *	Register Usage
923 *		%l0       = return linkage
924 *		%l1       = old stack
925 *		%l2 - %l3 = scratch
926 *		%l4 - %l7 = reserved for sys_trap
927 *		%o3       = cpu
928 *		%o0       = scratch
929 *		%o4 - %o5 = scratch
930 */
931/* ARGSUSED */
932void
933current_thread(struct regs *regs, uint64_t iv_p, uint_t pil)
934{}
935
936#else	/* lint */
937
938	ENTRY_NP(current_thread)
939
940	mov	%o7, %l0
941	ldn	[THREAD_REG + T_CPU], %o3
942	!
943	! Set bit for this level in CPU's active interrupt bitmask.
944	!
945	ld	[%o3 + CPU_INTR_ACTV], %o5	! o5 has cpu_intr_actv b4 chng
946	mov	1, %o4
947	sll	%o4, %o2, %o4			! construct mask for level
948#ifdef DEBUG
949	!
950	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
951	!
952	andcc	%o5, %o4, %g0
953	bz,pt	%xcc, 0f
954	nop
955	! Do not call panic if a panic is already in progress.
956	sethi	%hi(panic_quiesce), %l2
957	ld	[%l2 + %lo(panic_quiesce)], %l2
958	brnz,pn	%l2, 0f
959	nop
960	sethi	%hi(current_thread_actv_bit_set), %o0
961	call	panic
962	or	%o0, %lo(current_thread_actv_bit_set), %o0
9630:
964#endif /* DEBUG */
965	or	%o5, %o4, %o4
966	!
967	! See if we are interrupting another high-level interrupt.
968	!
969	srl	%o5, LOCK_LEVEL + 1, %o5	! only look at high-level bits
970	brz,pt	%o5, 1f
971	st	%o4, [%o3 + CPU_INTR_ACTV]	! delay - store active mask
972	!
973	! We have interrupted another high-level interrupt. Find its PIL,
974	! compute the interval it ran for, and update its cumulative counter.
975	!
976	! Register usage:
977
978	! o2 = PIL of this interrupt
979	! o5 = high PIL bits of INTR_ACTV (not including this PIL)
980	! l1 = bitmask used to find other active high-level PIL
981	! o4 = index of bit set in l1
982	! Use cpu_intr_actv to find the cpu_pil_high_start[] offset for the
983	! interrupted high-level interrupt.
984	! Create mask for cpu_intr_actv. Begin by looking for bits set
985	! at one level below the current PIL. Since %o5 contains the active
986	! mask already shifted right by (LOCK_LEVEL + 1), we start by looking
987	! at bit (current_pil - (LOCK_LEVEL + 2)).
988	sub	%o2, LOCK_LEVEL + 2, %o4
989	mov	1, %l1
990	sll	%l1, %o4, %l1
9912:
992#ifdef DEBUG
993	! ASSERT(%l1 != 0) (we didn't shift the bit off the right edge)
994	brnz,pt	%l1, 9f
995	nop
996
997	! Don't panic if a panic is already in progress.
998	sethi	%hi(panic_quiesce), %l3
999	ld	[%l3 + %lo(panic_quiesce)], %l3
1000	brnz,pn	%l3, 9f
1001	nop
1002	sethi	%hi(current_thread_nested_PIL_not_found), %o0
1003	call	panic
1004	or	%o0, %lo(current_thread_nested_PIL_not_found), %o0
10059:
1006#endif /* DEBUG */
1007	andcc	%l1, %o5, %g0		! test mask against high-level bits of
1008	bnz	%xcc, 3f		! cpu_intr_actv
1009	nop
1010	srl	%l1, 1, %l1		! No match. Try next lower PIL.
1011	ba,pt	%xcc, 2b
1012	sub	%o4, 1, %o4		! delay - decrement PIL
10133:
1014	sll	%o4, 3, %o4			! index to byte offset
1015	add	%o4, CPU_MCPU, %l1	! CPU_PIL_HIGH_START is too large
1016	add	%l1, MCPU_PIL_HIGH_START, %l1
1017	ldx	[%o3 + %l1], %l3		! load starting timestamp
1018#ifdef DEBUG
1019	brnz,pt	%l3, 9f
1020	nop
1021	! Don't panic if a panic is already in progress.
1022	sethi	%hi(panic_quiesce), %l1
1023	ld	[%l1 + %lo(panic_quiesce)], %l1
1024	brnz,pn	%l1, 9f
1025	nop
1026	srl	%o4, 3, %o1			! Find interrupted PIL for panic
1027	add	%o1, LOCK_LEVEL + 1, %o1
1028	sethi	%hi(current_thread_nested_pil_zero), %o0
1029	call	panic
1030	or	%o0, %lo(current_thread_nested_pil_zero), %o0
10319:
1032#endif /* DEBUG */
1033	rdpr	%tick, %l1
1034	sllx	%l1, 1, %l1
1035	srlx	%l1, 1, %l1			! shake off NPT bit
1036	sub	%l1, %l3, %l3			! interval in %l3
1037	!
1038	! Check for Energy Star mode
1039	!
1040	lduh	[%o3 + CPU_DIVISOR], %l1	! %l1 = clock divisor
1041	cmp	%l1, 1
1042	bg,a,pn	%xcc, 2f
1043	mulx	%l3, %l1, %l3	! multiply interval by clock divisor iff > 1
10442:
1045	!
1046	! We need to find the CPU offset of the cumulative counter. We start
1047	! with %o4, which has (PIL - (LOCK_LEVEL + 1)) * 8. We need PIL * 16,
1048	! so we shift left 1, then add (LOCK_LEVEL + 1) * 16, which is
1049	! CPU_INTRSTAT_LOW_PIL_OFFSET.
1050	!
1051	sll	%o4, 1, %o4
1052	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT const too large
1053	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1054	add	%o4, CPU_INTRSTAT_LOW_PIL_OFFSET, %o4
1055	ldx	[%o3 + %o4], %l1		! old counter in l1
1056	add	%l1, %l3, %l1			! new counter in l1
1057	stx	%l1, [%o3 + %o4]		! store new counter
1058
1059	! Also update intracct[]
1060	lduh	[%o3 + CPU_MSTATE], %o4
1061	sllx	%o4, 3, %o4
1062	add	%o4, CPU_INTRACCT, %o4
1063	ldx	[%o3 + %o4], %l1
1064	add	%l1, %l3, %l1
1065	! Another high-level interrupt is active below this one, so
1066	! there is no need to check for an interrupt thread. That will be
1067	! done by the lowest priority high-level interrupt active.
1068	ba,pt	%xcc, 5f
1069	stx	%l1, [%o3 + %o4]		! delay - store new counter
10701:
1071	! If we haven't interrupted another high-level interrupt, we may be
1072	! interrupting a low level interrupt thread. If so, compute its interval
1073	! and update its cumulative counter.
1074	lduh	[THREAD_REG + T_FLAGS], %o4
1075	andcc	%o4, T_INTR_THREAD, %g0
1076	bz,pt	%xcc, 4f
1077	nop
1078
1079	! We have interrupted an interrupt thread. Take timestamp, compute
1080	! interval, update cumulative counter.
1081
1082	! Check t_intr_start. If it is zero, either intr_thread() or
1083	! current_thread() (at a lower PIL, of course) already did
1084	! the accounting for the underlying interrupt thread.
1085	ldx	[THREAD_REG + T_INTR_START], %o5
1086	brz,pn	%o5, 4f
1087	nop
1088
1089	stx	%g0, [THREAD_REG + T_INTR_START]
1090	rdpr	%tick, %o4
1091	sllx	%o4, 1, %o4
1092	srlx	%o4, 1, %o4			! shake off NPT bit
1093	sub	%o4, %o5, %o5			! o5 has the interval
1094
1095	! Check for Energy Star mode
1096	lduh	[%o3 + CPU_DIVISOR], %o4	! %o4 = clock divisor
1097	cmp	%o4, 1
1098	bg,a,pn	%xcc, 2f
1099	mulx	%o5, %o4, %o5	! multiply interval by clock divisor iff > 1
11002:
1101	ldub	[THREAD_REG + T_PIL], %o4
1102	sllx	%o4, 4, %o4			! PIL index to byte offset
1103	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT const too large
1104	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1105	ldx	[%o3 + %o4], %l2		! old counter in l2
1106	add	%l2, %o5, %l2			! new counter in l2
1107	stx	%l2, [%o3 + %o4]		! store new counter
1108
1109	! Also update intracct[]
1110	lduh	[%o3 + CPU_MSTATE], %o4
1111	sllx	%o4, 3, %o4
1112	add	%o4, CPU_INTRACCT, %o4
1113	ldx	[%o3 + %o4], %l2
1114	add	%l2, %o5, %l2
1115	stx	%l2, [%o3 + %o4]
11164:
1117	!
1118	! Handle high-level interrupts on separate interrupt stack.
1119	! No other high-level interrupts are active, so switch to int stack.
1120	!
1121	mov	%sp, %l1
1122	ldn	[%o3 + CPU_INTR_STACK], %l3
1123	sub	%l3, STACK_BIAS, %sp
1124
11255:
1126#ifdef DEBUG
1127	!
1128	! ASSERT(%o2 > LOCK_LEVEL)
1129	!
1130	cmp	%o2, LOCK_LEVEL
1131	bg,pt	%xcc, 3f
1132	nop
1133	mov	CE_PANIC, %o0
1134	sethi	%hi(current_thread_wrong_pil), %o1
1135	call	cmn_err				! %o2 has the %pil already
1136	or	%o1, %lo(current_thread_wrong_pil), %o1
1137#endif
11383:
1139	! Store starting timestamp for this PIL in CPU structure at
1140	! cpu.cpu_m.pil_high_start[PIL - (LOCK_LEVEL + 1)]
1141        sub     %o2, LOCK_LEVEL + 1, %o4	! convert PIL to array index
1142	sllx    %o4, 3, %o4			! index to byte offset
1143	add	%o4, CPU_MCPU, %o4	! CPU_PIL_HIGH_START is too large
1144	add	%o4, MCPU_PIL_HIGH_START, %o4
1145        rdpr    %tick, %o5
1146	sllx	%o5, 1, %o5
1147	srlx	%o5, 1, %o5
1148        stx     %o5, [%o3 + %o4]
1149
1150	wrpr	%g0, %o2, %pil			! enable interrupts
1151
1152	!
1153	! call the handler
1154	!
1155	SERVE_INTR_PRE(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
11561:
1157	SERVE_INTR(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
1158
1159	brz,a,pt %o2, 0f			! if %o2, more intrs await
1160	rdpr	%pil, %o2			! delay annulled
1161	SERVE_INTR_NEXT(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
1162	ba	1b
1163	nop
11640:
1165	wrpr	%g0, PIL_MAX, %pil		! disable interrupts (1-15)
1166
1167	cmp	%o2, PIL_15
1168	bne,pt	%xcc, 3f
1169	nop
1170
1171	sethi	%hi(cpc_level15_inum), %o1
1172	ldx	[%o1 + %lo(cpc_level15_inum)], %o1 ! arg for intr_enqueue_req
1173	brz	%o1, 3f
1174	nop
1175
1176	rdpr 	%pstate, %g5
1177	andn	%g5, PSTATE_IE, %g1
1178	wrpr	%g0, %g1, %pstate		! Disable vec interrupts
1179
1180	call	intr_enqueue_req		! preserves %g5
1181	mov	PIL_15, %o0
1182
1183	! clear perfcntr overflow
1184	mov	1, %o0
1185	sllx	%o0, PIL_15, %o0
1186	wr	%o0, CLEAR_SOFTINT
1187
1188	wrpr	%g0, %g5, %pstate		! Enable vec interrupts
1189
11903:
1191	cmp	%o2, PIL_14
1192	be	tick_rtt			!  cpu-specific tick processing
1193	nop
1194	.global	current_thread_complete
1195current_thread_complete:
1196	!
1197	! Register usage:
1198	!
1199	! %l1 = stack pointer
1200	! %l2 = CPU_INTR_ACTV >> (LOCK_LEVEL + 1)
1201	! %o2 = PIL
1202	! %o3 = CPU pointer
1203	! %o4, %o5, %l3, %l4, %l5 = scratch
1204	!
1205	ldn	[THREAD_REG + T_CPU], %o3
1206	!
1207	! Clear bit for this level in CPU's interrupt active bitmask.
1208	!
1209	ld	[%o3 + CPU_INTR_ACTV], %l2
1210	mov	1, %o5
1211	sll	%o5, %o2, %o5
1212#ifdef DEBUG
1213	!
1214	! ASSERT(CPU->cpu_intr_actv & (1 << PIL))
1215	!
1216	andcc	%l2, %o5, %g0
1217	bnz,pt	%xcc, 0f
1218	nop
1219	! Do not call panic if a panic is already in progress.
1220	sethi	%hi(panic_quiesce), %l2
1221	ld	[%l2 + %lo(panic_quiesce)], %l2
1222	brnz,pn	%l2, 0f
1223	nop
1224	sethi	%hi(current_thread_actv_bit_not_set), %o0
1225	call	panic
1226	or	%o0, %lo(current_thread_actv_bit_not_set), %o0
12270:
1228#endif /* DEBUG */
1229	andn	%l2, %o5, %l2
1230	st	%l2, [%o3 + CPU_INTR_ACTV]
1231
1232	! Take timestamp, compute interval, update cumulative counter.
1233        sub     %o2, LOCK_LEVEL + 1, %o4	! PIL to array index
1234	sllx    %o4, 3, %o4			! index to byte offset
1235	add	%o4, CPU_MCPU, %o4	! CPU_PIL_HIGH_START is too large
1236	add	%o4, MCPU_PIL_HIGH_START, %o4
1237        rdpr    %tick, %o5
1238	sllx	%o5, 1, %o5
1239	srlx	%o5, 1, %o5
1240	ldx     [%o3 + %o4], %o0
1241#ifdef DEBUG
1242	! ASSERT(cpu.cpu_m.pil_high_start[pil - (LOCK_LEVEL + 1)] != 0)
1243	brnz,pt	%o0, 9f
1244	nop
1245	! Don't panic if a panic is already in progress.
1246	sethi	%hi(panic_quiesce), %l2
1247	ld	[%l2 + %lo(panic_quiesce)], %l2
1248	brnz,pn	%l2, 9f
1249	nop
1250	sethi	%hi(current_thread_timestamp_zero), %o0
1251	call	panic
1252	or	%o0, %lo(current_thread_timestamp_zero), %o0
12539:
1254#endif /* DEBUG */
1255	stx	%g0, [%o3 + %o4]
1256	sub	%o5, %o0, %o5			! interval in o5
1257
1258	! Check for Energy Star mode
1259	lduh	[%o3 + CPU_DIVISOR], %o4	! %o4 = clock divisor
1260	cmp	%o4, 1
1261	bg,a,pn	%xcc, 2f
1262	mulx	%o5, %o4, %o5	! multiply interval by clock divisor iff > 1
12632:
1264	sllx	%o2, 4, %o4			! PIL index to byte offset
1265	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT too large
1266	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1267	ldx	[%o3 + %o4], %o0		! old counter in o0
1268	add	%o0, %o5, %o0			! new counter in o0
1269	stx	%o0, [%o3 + %o4]		! store new counter
1270
1271	! Also update intracct[]
1272	lduh	[%o3 + CPU_MSTATE], %o4
1273	sllx	%o4, 3, %o4
1274	add	%o4, CPU_INTRACCT, %o4
1275	ldx	[%o3 + %o4], %o0
1276	add	%o0, %o5, %o0
1277	stx	%o0, [%o3 + %o4]
1278
1279	!
1280	! get back on current thread's stack
1281	!
1282	srl	%l2, LOCK_LEVEL + 1, %l2
1283	tst	%l2				! any more high-level ints?
1284	movz	%xcc, %l1, %sp
1285	!
1286	! Current register usage:
1287	! o2 = PIL
1288	! o3 = CPU pointer
1289	! l0 = return address
1290	! l2 = intr_actv shifted right
1291	!
1292	bz,pt	%xcc, 3f			! if l2 was zero, no more ints
1293	nop
1294	!
1295	! We found another high-level interrupt active below the one that just
1296	! returned. Store a starting timestamp for it in the CPU structure.
1297	!
1298	! Use cpu_intr_actv to find the cpu_pil_high_start[] offset for the
1299	! interrupted high-level interrupt.
1300	! Create mask for cpu_intr_actv. Begin by looking for bits set
1301	! at one level below the current PIL. Since %l2 contains the active
1302	! mask already shifted right by (LOCK_LEVEL + 1), we start by looking
1303	! at bit (current_pil - (LOCK_LEVEL + 2)).
1304	! %l1 = mask, %o5 = index of bit set in mask
1305	!
1306	mov	1, %l1
1307	sub	%o2, LOCK_LEVEL + 2, %o5
1308	sll	%l1, %o5, %l1			! l1 = mask for level
13091:
1310#ifdef DEBUG
1311	! ASSERT(%l1 != 0) (we didn't shift the bit off the right edge)
1312	brnz,pt	%l1, 9f
1313	nop
1314	sethi	%hi(current_thread_nested_PIL_not_found), %o0
1315	call	panic
1316	or	%o0, %lo(current_thread_nested_PIL_not_found), %o0
13179:
1318#endif /* DEBUG */
1319	andcc	%l1, %l2, %g0		! test mask against high-level bits of
1320	bnz	%xcc, 2f		! cpu_intr_actv
1321	nop
1322	srl	%l1, 1, %l1		! No match. Try next lower PIL.
1323	ba,pt	%xcc, 1b
1324	sub	%o5, 1, %o5		! delay - decrement PIL
13252:
1326	sll	%o5, 3, %o5		! convert array index to byte offset
1327	add	%o5, CPU_MCPU, %o5	! CPU_PIL_HIGH_START is too large
1328	add	%o5, MCPU_PIL_HIGH_START, %o5
1329	rdpr	%tick, %o4
1330	sllx	%o4, 1, %o4
1331	srlx	%o4, 1, %o4
1332	! Another high-level interrupt is active below this one, so
1333	! there is no need to check for an interrupt thread. That will be
1334	! done by the lowest priority high-level interrupt active.
1335	ba,pt	%xcc, 1f
1336	stx	%o4, [%o3 + %o5]	! delay - store timestamp
13373:
1338	! If we haven't interrupted another high-level interrupt, we may have
1339	! interrupted a low level interrupt thread. If so, store a starting
1340	! timestamp in its thread structure.
1341	lduh	[THREAD_REG + T_FLAGS], %o4
1342	andcc	%o4, T_INTR_THREAD, %g0
1343	bz,pt	%xcc, 1f
1344	nop
1345
1346	rdpr	%tick, %o4
1347	sllx	%o4, 1, %o4
1348	srlx	%o4, 1, %o4			! Shake off NPT bit
1349	stx	%o4, [THREAD_REG + T_INTR_START]
13501:
1351	! Enable interrupts and return
1352	jmp	%l0 + 8
1353	wrpr	%g0, %o2, %pil			! enable interrupts
1354	SET_SIZE(current_thread)
1355
1356
1357#ifdef DEBUG
1358current_thread_wrong_pil:
1359	.asciz	"current_thread: unexpected pil level: %d"
1360current_thread_actv_bit_set:
1361	.asciz	"current_thread(): cpu_intr_actv bit already set for PIL"
1362current_thread_actv_bit_not_set:
1363	.asciz	"current_thread(): cpu_intr_actv bit not set for PIL"
1364current_thread_nested_pil_zero:
1365	.asciz	"current_thread(): timestamp zero for nested PIL %d"
1366current_thread_timestamp_zero:
1367	.asciz	"current_thread(): timestamp zero upon handler return"
1368current_thread_nested_PIL_not_found:
1369	.asciz	"current_thread: couldn't find nested high-level PIL"
1370#endif /* DEBUG */
1371#endif /* lint */
1372
1373/*
1374 * Return a thread's interrupt level.
1375 * Since this isn't saved anywhere but in %l4 on interrupt entry, we
1376 * must dig it out of the save area.
1377 *
1378 * Caller 'swears' that this really is an interrupt thread.
1379 *
1380 * int
1381 * intr_level(t)
1382 *	kthread_id_t	t;
1383 */
1384
1385#if defined(lint)
1386
1387/* ARGSUSED */
1388int
1389intr_level(kthread_id_t t)
1390{ return (0); }
1391
1392#else	/* lint */
1393
1394	ENTRY_NP(intr_level)
1395	retl
1396	ldub	[%o0 + T_PIL], %o0		! return saved pil
1397	SET_SIZE(intr_level)
1398
1399#endif	/* lint */
1400
1401#if defined(lint)
1402
1403/* ARGSUSED */
1404int
1405disable_pil_intr()
1406{ return (0); }
1407
1408#else	/* lint */
1409
1410	ENTRY_NP(disable_pil_intr)
1411	rdpr	%pil, %o0
1412	retl
1413	wrpr	%g0, PIL_MAX, %pil		! disable interrupts (1-15)
1414	SET_SIZE(disable_pil_intr)
1415
1416#endif	/* lint */
1417
1418#if defined(lint)
1419
1420/* ARGSUSED */
1421void
1422enable_pil_intr(int pil_save)
1423{}
1424
1425#else	/* lint */
1426
1427	ENTRY_NP(enable_pil_intr)
1428	retl
1429	wrpr	%o0, %pil
1430	SET_SIZE(enable_pil_intr)
1431
1432#endif	/* lint */
1433
1434#if defined(lint)
1435
1436/* ARGSUSED */
1437uint_t
1438disable_vec_intr(void)
1439{ return (0); }
1440
1441#else	/* lint */
1442
1443	ENTRY_NP(disable_vec_intr)
1444	rdpr	%pstate, %o0
1445	andn	%o0, PSTATE_IE, %g1
1446	retl
1447	wrpr	%g0, %g1, %pstate		! disable interrupt
1448	SET_SIZE(disable_vec_intr)
1449
1450#endif	/* lint */
1451
1452#if defined(lint)
1453
1454/* ARGSUSED */
1455void
1456enable_vec_intr(uint_t pstate_save)
1457{}
1458
1459#else	/* lint */
1460
1461	ENTRY_NP(enable_vec_intr)
1462	retl
1463	wrpr	%g0, %o0, %pstate
1464	SET_SIZE(enable_vec_intr)
1465
1466#endif	/* lint */
1467
1468#if defined(lint)
1469
1470void
1471cbe_level14(void)
1472{}
1473
1474#else   /* lint */
1475
1476	ENTRY_NP(cbe_level14)
1477	save    %sp, -SA(MINFRAME), %sp ! get a new window
1478	!
1479	! Make sure that this is from TICK_COMPARE; if not just return
1480	!
1481	rd	SOFTINT, %l1
1482	set	(TICK_INT_MASK | STICK_INT_MASK), %o2
1483	andcc	%l1, %o2, %g0
1484	bz,pn	%icc, 2f
1485	nop
1486
1487	CPU_ADDR(%o1, %o2)
1488	call	cyclic_fire
1489	mov	%o1, %o0
14902:
1491	ret
1492	restore	%g0, 1, %o0
1493	SET_SIZE(cbe_level14)
1494
1495#endif  /* lint */
1496
1497
1498#if defined(lint)
1499
1500/* ARGSUSED */
1501void
1502setsoftint(uint64_t iv_p)
1503{}
1504
1505#else	/* lint */
1506
1507	ENTRY_NP(setsoftint)
1508	save	%sp, -SA(MINFRAME), %sp	! get a new window
1509	rdpr	%pstate, %l5
1510	andn	%l5, PSTATE_IE, %l1
1511	wrpr	%l1, %pstate		! disable interrupt
1512	!
1513	! We have a pointer to an interrupt vector data structure.
1514	! Put the request on the cpu's softint priority list and
1515	! set %set_softint.
1516	!
1517	! Register usage
1518	! 	%i0 - pointer to intr_vec_t (iv)
1519	!	%l2 - requested pil
1520	!	%l4 - cpu
1521	!	%l5 - pstate
1522	!	%l1, %l3, %l6 - temps
1523	!
1524	! check if a softint is pending for this softint,
1525	! if one is pending, don't bother queuing another.
1526	!
1527	lduh	[%i0 + IV_FLAGS], %l1	! %l1 = iv->iv_flags
1528	and	%l1, IV_SOFTINT_PEND, %l6 ! %l6 = iv->iv_flags & IV_SOFTINT_PEND
1529	brnz,pn	%l6, 4f			! branch if softint is already pending
1530	or	%l1, IV_SOFTINT_PEND, %l2
1531	sth	%l2, [%i0 + IV_FLAGS]	! Set IV_SOFTINT_PEND flag
1532
1533	CPU_ADDR(%l4, %l2)		! %l4 = cpu
1534	lduh	[%i0 + IV_PIL], %l2	! %l2 = iv->iv_pil
1535
1536	!
1537	! Insert intr_vec_t (iv) to appropriate cpu's softint priority list
1538	!
1539	sll	%l2, CPTRSHIFT, %l0	! %l0 = offset to pil entry
1540	add	%l4, INTR_TAIL, %l6	! %l6 = &cpu->m_cpu.intr_tail
1541	ldn	[%l6 + %l0], %l1	! %l1 = cpu->m_cpu.intr_tail[pil]
1542					!       current tail (ct)
1543	brz,pt	%l1, 2f			! branch if current tail is NULL
1544	stn	%i0, [%l6 + %l0]	! make intr_vec_t (iv) as new tail
1545	!
1546	! there's pending intr_vec_t already
1547	!
1548	lduh	[%l1 + IV_FLAGS], %l6	! %l6 = ct->iv_flags
1549	and	%l6, IV_SOFTINT_MT, %l6	! %l6 = ct->iv_flags & IV_SOFTINT_MT
1550	brz,pt	%l6, 1f			! check for Multi target softint flag
1551	add	%l1, IV_PIL_NEXT, %l3	! %l3 = &ct->iv_pil_next
1552	ld	[%l4 + CPU_ID], %l6	! for multi target softint, use cpuid
1553	sll	%l6, CPTRSHIFT, %l6	! calculate offset address from cpuid
1554	add	%l3, %l6, %l3		! %l3 =  &ct->iv_xpil_next[cpuid]
15551:
1556	!
1557	! update old tail
1558	!
1559	ba,pt	%xcc, 3f
1560	stn	%i0, [%l3]		! [%l3] = iv, set pil_next field
15612:
1562	!
1563	! no pending intr_vec_t; make intr_vec_t as new head
1564	!
1565	add	%l4, INTR_HEAD, %l6	! %l6 = &cpu->m_cpu.intr_head[pil]
1566	stn	%i0, [%l6 + %l0]	! cpu->m_cpu.intr_head[pil] = iv
15673:
1568	!
1569	! Write %set_softint with (1<<pil) to cause a "pil" level trap
1570	!
1571	mov	1, %l1			! %l1 = 1
1572	sll	%l1, %l2, %l1		! %l1 = 1 << pil
1573	wr	%l1, SET_SOFTINT	! trigger required pil softint
15744:
1575	wrpr	%g0, %l5, %pstate	! %pstate = saved %pstate (in %l5)
1576	ret
1577	restore
1578	SET_SIZE(setsoftint)
1579
1580#endif	/* lint */
1581
1582#if defined(lint)
1583
1584/*ARGSUSED*/
1585void
1586setsoftint_tl1(uint64_t iv_p, uint64_t dummy)
1587{}
1588
1589#else	/* lint */
1590
1591	!
1592	! Register usage
1593	!	Arguments:
1594	! 	%g1 - Pointer to intr_vec_t (iv)
1595	!
1596	!	Internal:
1597	!	%g2 - pil
1598	!	%g4 - cpu
1599	!	%g3,%g5-g7 - temps
1600	!
1601	ENTRY_NP(setsoftint_tl1)
1602	!
1603	! We have a pointer to an interrupt vector data structure.
1604	! Put the request on the cpu's softint priority list and
1605	! set %set_softint.
1606	!
1607	CPU_ADDR(%g4, %g2)		! %g4 = cpu
1608	lduh	[%g1 + IV_PIL], %g2	! %g2 = iv->iv_pil
1609
1610	!
1611	! Insert intr_vec_t (iv) to appropriate cpu's softint priority list
1612	!
1613	sll	%g2, CPTRSHIFT, %g7	! %g7 = offset to pil entry
1614	add	%g4, INTR_TAIL, %g6	! %g6 = &cpu->m_cpu.intr_tail
1615	ldn	[%g6 + %g7], %g5	! %g5 = cpu->m_cpu.intr_tail[pil]
1616					!       current tail (ct)
1617	brz,pt	%g5, 1f			! branch if current tail is NULL
1618	stn	%g1, [%g6 + %g7]	! make intr_rec_t (iv) as new tail
1619	!
1620	! there's pending intr_vec_t already
1621	!
1622	lduh	[%g5 + IV_FLAGS], %g6	! %g6 = ct->iv_flags
1623	and	%g6, IV_SOFTINT_MT, %g6	! %g6 = ct->iv_flags & IV_SOFTINT_MT
1624	brz,pt	%g6, 0f			! check for Multi target softint flag
1625	add	%g5, IV_PIL_NEXT, %g3	! %g3 = &ct->iv_pil_next
1626	ld	[%g4 + CPU_ID], %g6	! for multi target softint, use cpuid
1627	sll	%g6, CPTRSHIFT, %g6	! calculate offset address from cpuid
1628	add	%g3, %g6, %g3		! %g3 = &ct->iv_xpil_next[cpuid]
16290:
1630	!
1631	! update old tail
1632	!
1633	ba,pt	%xcc, 2f
1634	stn	%g1, [%g3]		! [%g3] = iv, set pil_next field
16351:
1636	!
1637	! no pending intr_vec_t; make intr_vec_t as new head
1638	!
1639	add	%g4, INTR_HEAD, %g6	! %g6 = &cpu->m_cpu.intr_head[pil]
1640	stn	%g1, [%g6 + %g7]	! cpu->m_cpu.intr_head[pil] = iv
16412:
1642#ifdef TRAPTRACE
1643	TRACE_PTR(%g5, %g6)
1644	GET_TRACE_TICK(%g6)
1645	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi	! trap_tick = %tick
1646	TRACE_SAVE_TL_GL_REGS(%g5, %g6)
1647	rdpr	%tt, %g6
1648	stha	%g6, [%g5 + TRAP_ENT_TT]%asi	! trap_type = %tt
1649	rdpr	%tpc, %g6
1650	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi	! trap_pc = %tpc
1651	rdpr	%tstate, %g6
1652	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi ! trap_tstate = %tstate
1653	stna	%sp, [%g5 + TRAP_ENT_SP]%asi	! trap_sp = %sp
1654	stna	%g1, [%g5 + TRAP_ENT_TR]%asi	! trap_tr = iv
1655	ldn	[%g1 + IV_PIL_NEXT], %g6	!
1656	stna	%g6, [%g5 + TRAP_ENT_F1]%asi	! trap_f1 = iv->iv_pil_next
1657	add	%g4, INTR_HEAD, %g6
1658	ldn	[%g6 + %g7], %g6		! %g6=cpu->m_cpu.intr_head[pil]
1659	stna	%g6, [%g5 + TRAP_ENT_F2]%asi	! trap_f2 = intr_head[pil]
1660	add	%g4, INTR_TAIL, %g6
1661	ldn	[%g6 + %g7], %g6		! %g6=cpu->m_cpu.intr_tail[pil]
1662	stna	%g6, [%g5 + TRAP_ENT_F3]%asi	! trap_f3 = intr_tail[pil]
1663	stna	%g2, [%g5 + TRAP_ENT_F4]%asi	! trap_f4 = pil
1664	TRACE_NEXT(%g5, %g6, %g3)
1665#endif /* TRAPTRACE */
1666	!
1667	! Write %set_softint with (1<<pil) to cause a "pil" level trap
1668	!
1669	mov	1, %g5			! %g5 = 1
1670	sll	%g5, %g2, %g5		! %g5 = 1 << pil
1671	wr	%g5, SET_SOFTINT	! trigger required pil softint
1672	retry
1673	SET_SIZE(setsoftint_tl1)
1674
1675#endif	/* lint */
1676
1677#if defined(lint)
1678
1679/*ARGSUSED*/
1680void
1681setvecint_tl1(uint64_t inum, uint64_t dummy)
1682{}
1683
1684#else	/* lint */
1685
1686	!
1687	! Register usage
1688	!	Arguments:
1689	! 	%g1 - inumber
1690	!
1691	!	Internal:
1692	! 	%g1 - softint pil mask
1693	!	%g2 - pil of intr_vec_t
1694	!	%g3 - pointer to current intr_vec_t (iv)
1695	!	%g4 - cpu
1696	!	%g5, %g6,%g7 - temps
1697	!
1698	ENTRY_NP(setvecint_tl1)
1699	!
1700	! Verify the inumber received (should be inum < MAXIVNUM).
1701	!
1702	set	MAXIVNUM, %g2
1703	cmp	%g1, %g2
1704	bgeu,pn	%xcc, .no_ivintr
1705	clr	%g2			! expected in .no_ivintr
1706
1707	!
1708	! Fetch data from intr_vec_table according to the inum.
1709	!
1710	! We have an interrupt number. Fetch the interrupt vector requests
1711	! from the interrupt vector table for a given interrupt number and
1712	! insert them into cpu's softint priority lists and set %set_softint.
1713	!
1714	set	intr_vec_table, %g5	! %g5 = intr_vec_table
1715	sll	%g1, CPTRSHIFT, %g6	! %g6 = offset to inum entry in table
1716	add	%g5, %g6, %g5		! %g5 = &intr_vec_table[inum]
1717	ldn	[%g5], %g3		! %g3 = pointer to first entry of
1718					!       intr_vec_t list
1719
1720	! Verify the first intr_vec_t pointer for a given inum and it should
1721	! not be NULL. This used to be guarded by DEBUG but broken drivers can
1722	! cause spurious tick interrupts when the softint register is programmed
1723	! with 1 << 0 at the end of this routine. Now we always check for a
1724	! valid intr_vec_t pointer.
1725	brz,pn	%g3, .no_ivintr
1726	nop
1727
1728	!
1729	! Traverse the intr_vec_t link list, put each item on to corresponding
1730	! CPU softint priority queue, and compose the final softint pil mask.
1731	!
1732	! At this point:
1733	!	%g3 = intr_vec_table[inum]
1734	!
1735	CPU_ADDR(%g4, %g2)		! %g4 = cpu
1736	mov	%g0, %g1		! %g1 = 0, initialize pil mask to 0
17370:
1738	!
1739	! Insert next intr_vec_t (iv) to appropriate cpu's softint priority list
1740	!
1741	! At this point:
1742	!	%g1 = softint pil mask
1743	!	%g3 = pointer to next intr_vec_t (iv)
1744	!	%g4 = cpu
1745	!
1746	lduh	[%g3 + IV_PIL], %g2	! %g2 = iv->iv_pil
1747	sll	%g2, CPTRSHIFT, %g7	! %g7 = offset to pil entry
1748	add	%g4, INTR_TAIL, %g6	! %g6 = &cpu->m_cpu.intr_tail
1749	ldn	[%g6 + %g7], %g5	! %g5 = cpu->m_cpu.intr_tail[pil]
1750					! 	current tail (ct)
1751	brz,pt	%g5, 2f			! branch if current tail is NULL
1752	stn	%g3, [%g6 + %g7]	! make intr_vec_t (iv) as new tail
1753					! cpu->m_cpu.intr_tail[pil] = iv
1754	!
1755	! there's pending intr_vec_t already
1756	!
1757	lduh	[%g5 + IV_FLAGS], %g6	! %g6 = ct->iv_flags
1758	and	%g6, IV_SOFTINT_MT, %g6	! %g6 = ct->iv_flags & IV_SOFTINT_MT
1759	brz,pt	%g6, 1f			! check for Multi target softint flag
1760	add	%g5, IV_PIL_NEXT, %g5	! %g5 = &ct->iv_pil_next
1761	ld	[%g4 + CPU_ID], %g6	! for multi target softint, use cpuid
1762	sll	%g6, CPTRSHIFT, %g6	! calculate offset address from cpuid
1763	add	%g5, %g6, %g5		! %g5 = &ct->iv_xpil_next[cpuid]
17641:
1765	!
1766	! update old tail
1767	!
1768	ba,pt	%xcc, 3f
1769	stn	%g3, [%g5]		! [%g5] = iv, set pil_next field
17702:
1771	!
1772	! no pending intr_vec_t; make intr_vec_t as new head
1773	!
1774	add	%g4, INTR_HEAD, %g6	!  %g6 = &cpu->m_cpu.intr_head[pil]
1775	stn	%g3, [%g6 + %g7]	!  cpu->m_cpu.intr_head[pil] = iv
17763:
1777#ifdef TRAPTRACE
1778	TRACE_PTR(%g5, %g6)
1779	GET_TRACE_TICK(%g6)
1780	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi	! trap_tick = %tick
1781	TRACE_SAVE_TL_GL_REGS(%g5, %g6)
1782	rdpr	%tt, %g6
1783	stha	%g6, [%g5 + TRAP_ENT_TT]%asi	! trap_type = %tt`
1784	rdpr	%tpc, %g6
1785	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi	! trap_pc = %tpc
1786	rdpr	%tstate, %g6
1787	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi ! trap_tstate = %tstate
1788	stna	%sp, [%g5 + TRAP_ENT_SP]%asi	! trap_sp = %sp
1789	stna	%g3, [%g5 + TRAP_ENT_TR]%asi	! trap_tr = iv
1790	stna	%g1, [%g5 + TRAP_ENT_F1]%asi	! trap_f1 = pil mask
1791	add	%g4, INTR_HEAD, %g6
1792	ldn	[%g6 + %g7], %g6		! %g6=cpu->m_cpu.intr_head[pil]
1793	stna	%g6, [%g5 + TRAP_ENT_F2]%asi	! trap_f2 = intr_head[pil]
1794	add	%g4, INTR_TAIL, %g6
1795	ldn	[%g6 + %g7], %g6		! %g6=cpu->m_cpu.intr_tail[pil]
1796	stna	%g6, [%g5 + TRAP_ENT_F3]%asi	! trap_f3 = intr_tail[pil]
1797	stna	%g2, [%g5 + TRAP_ENT_F4]%asi	! trap_f4 = pil
1798	TRACE_NEXT(%g5, %g6, %g7)
1799#endif /* TRAPTRACE */
1800	mov	1, %g6			! %g6 = 1
1801	sll	%g6, %g2, %g6		! %g6 = 1 << pil
1802	or	%g1, %g6, %g1		! %g1 |= (1 << pil), pil mask
1803	ldn	[%g3 + IV_VEC_NEXT], %g3 ! %g3 = pointer to next intr_vec_t (iv)
1804	brnz,pn	%g3, 0b			! iv->iv_vec_next is non NULL, goto 0b
1805	nop
1806	wr	%g1, SET_SOFTINT	! triggered one or more pil softints
1807	retry
1808
1809.no_ivintr:
1810	! no_ivintr: arguments: rp, inum (%g1), pil (%g2 == 0)
1811	mov	%g2, %g3
1812	mov	%g1, %g2
1813	set	no_ivintr, %g1
1814	ba,pt	%xcc, sys_trap
1815	mov	PIL_15, %g4
1816	SET_SIZE(setvecint_tl1)
1817
1818#endif	/* lint */
1819
1820#if defined(lint)
1821
1822/*ARGSUSED*/
1823void
1824wr_clr_softint(uint_t value)
1825{}
1826
1827#else
1828
1829	ENTRY_NP(wr_clr_softint)
1830	retl
1831	wr	%o0, CLEAR_SOFTINT
1832	SET_SIZE(wr_clr_softint)
1833
1834#endif /* lint */
1835
1836#if defined(lint)
1837
1838/*ARGSUSED*/
1839void
1840intr_enqueue_req(uint_t pil, uint64_t inum)
1841{}
1842
1843#else   /* lint */
1844
1845/*
1846 * intr_enqueue_req
1847 *
1848 * %o0 - pil
1849 * %o1 - pointer to intr_vec_t (iv)
1850 * %o5 - preserved
1851 * %g5 - preserved
1852 */
1853	ENTRY_NP(intr_enqueue_req)
1854	!
1855	CPU_ADDR(%g4, %g1)		! %g4 = cpu
1856
1857	!
1858	! Insert intr_vec_t (iv) to appropriate cpu's softint priority list
1859	!
1860	sll	%o0, CPTRSHIFT, %o0	! %o0 = offset to pil entry
1861	add	%g4, INTR_TAIL, %g6	! %g6 = &cpu->m_cpu.intr_tail
1862	ldn	[%o0 + %g6], %g1	! %g1 = cpu->m_cpu.intr_tail[pil]
1863					!       current tail (ct)
1864	brz,pt	%g1, 2f			! branch if current tail is NULL
1865	stn	%o1, [%g6 + %o0]	! make intr_vec_t (iv) as new tail
1866
1867	!
1868	! there's pending intr_vec_t already
1869	!
1870	lduh	[%g1 + IV_FLAGS], %g6	! %g6 = ct->iv_flags
1871	and	%g6, IV_SOFTINT_MT, %g6	! %g6 = ct->iv_flags & IV_SOFTINT_MT
1872	brz,pt	%g6, 1f			! check for Multi target softint flag
1873	add	%g1, IV_PIL_NEXT, %g3	! %g3 = &ct->iv_pil_next
1874	ld	[%g4 + CPU_ID], %g6	! for multi target softint, use cpuid
1875	sll	%g6, CPTRSHIFT, %g6	! calculate offset address from cpuid
1876	add	%g3, %g6, %g3		! %g3 = &ct->iv_xpil_next[cpuid]
18771:
1878	!
1879	! update old tail
1880	!
1881	ba,pt	%xcc, 3f
1882	stn	%o1, [%g3]		! {%g5] = iv, set pil_next field
18832:
1884	!
1885	! no intr_vec_t's queued so make intr_vec_t as new head
1886	!
1887	add	%g4, INTR_HEAD, %g6	! %g6 = &cpu->m_cpu.intr_head[pil]
1888	stn	%o1, [%g6 + %o0]	! cpu->m_cpu.intr_head[pil] = iv
18893:
1890	retl
1891	nop
1892	SET_SIZE(intr_enqueue_req)
1893
1894#endif  /* lint */
1895
1896/*
1897 * Set CPU's base SPL level, based on which interrupt levels are active.
1898 * 	Called at spl7 or above.
1899 */
1900
1901#if defined(lint)
1902
1903void
1904set_base_spl(void)
1905{}
1906
1907#else	/* lint */
1908
1909	ENTRY_NP(set_base_spl)
1910	ldn	[THREAD_REG + T_CPU], %o2	! load CPU pointer
1911	ld	[%o2 + CPU_INTR_ACTV], %o5	! load active interrupts mask
1912
1913/*
1914 * WARNING: non-standard callinq sequence; do not call from C
1915 *	%o2 = pointer to CPU
1916 *	%o5 = updated CPU_INTR_ACTV
1917 */
1918_intr_set_spl:					! intr_thread_exit enters here
1919	!
1920	! Determine highest interrupt level active.  Several could be blocked
1921	! at higher levels than this one, so must convert flags to a PIL
1922	! Normally nothing will be blocked, so test this first.
1923	!
1924	brz,pt	%o5, 1f				! nothing active
1925	sra	%o5, 11, %o3			! delay - set %o3 to bits 15-11
1926	set	_intr_flag_table, %o1
1927	tst	%o3				! see if any of the bits set
1928	ldub	[%o1 + %o3], %o3		! load bit number
1929	bnz,a,pn %xcc, 1f			! yes, add 10 and we're done
1930	add	%o3, 11-1, %o3			! delay - add bit number - 1
1931
1932	sra	%o5, 6, %o3			! test bits 10-6
1933	tst	%o3
1934	ldub	[%o1 + %o3], %o3
1935	bnz,a,pn %xcc, 1f
1936	add	%o3, 6-1, %o3
1937
1938	sra	%o5, 1, %o3			! test bits 5-1
1939	ldub	[%o1 + %o3], %o3
1940
1941	!
1942	! highest interrupt level number active is in %l6
1943	!
19441:
1945	retl
1946	st	%o3, [%o2 + CPU_BASE_SPL]	! delay - store base priority
1947	SET_SIZE(set_base_spl)
1948
1949/*
1950 * Table that finds the most significant bit set in a five bit field.
1951 * Each entry is the high-order bit number + 1 of it's index in the table.
1952 * This read-only data is in the text segment.
1953 */
1954_intr_flag_table:
1955	.byte	0, 1, 2, 2,	3, 3, 3, 3,	4, 4, 4, 4,	4, 4, 4, 4
1956	.byte	5, 5, 5, 5,	5, 5, 5, 5,	5, 5, 5, 5,	5, 5, 5, 5
1957	.align	4
1958
1959#endif	/* lint */
1960
1961/*
1962 * int
1963 * intr_passivate(from, to)
1964 *	kthread_id_t	from;		interrupt thread
1965 *	kthread_id_t	to;		interrupted thread
1966 */
1967
1968#if defined(lint)
1969
1970/* ARGSUSED */
1971int
1972intr_passivate(kthread_id_t from, kthread_id_t to)
1973{ return (0); }
1974
1975#else	/* lint */
1976
1977	ENTRY_NP(intr_passivate)
1978	save	%sp, -SA(MINFRAME), %sp	! get a new window
1979
1980	flushw				! force register windows to stack
1981	!
1982	! restore registers from the base of the stack of the interrupt thread.
1983	!
1984	ldn	[%i0 + T_STACK], %i2	! get stack save area pointer
1985	ldn	[%i2 + (0*GREGSIZE)], %l0	! load locals
1986	ldn	[%i2 + (1*GREGSIZE)], %l1
1987	ldn	[%i2 + (2*GREGSIZE)], %l2
1988	ldn	[%i2 + (3*GREGSIZE)], %l3
1989	ldn	[%i2 + (4*GREGSIZE)], %l4
1990	ldn	[%i2 + (5*GREGSIZE)], %l5
1991	ldn	[%i2 + (6*GREGSIZE)], %l6
1992	ldn	[%i2 + (7*GREGSIZE)], %l7
1993	ldn	[%i2 + (8*GREGSIZE)], %o0	! put ins from stack in outs
1994	ldn	[%i2 + (9*GREGSIZE)], %o1
1995	ldn	[%i2 + (10*GREGSIZE)], %o2
1996	ldn	[%i2 + (11*GREGSIZE)], %o3
1997	ldn	[%i2 + (12*GREGSIZE)], %o4
1998	ldn	[%i2 + (13*GREGSIZE)], %o5
1999	ldn	[%i2 + (14*GREGSIZE)], %i4
2000					! copy stack/pointer without using %sp
2001	ldn	[%i2 + (15*GREGSIZE)], %i5
2002	!
2003	! put registers into the save area at the top of the interrupted
2004	! thread's stack, pointed to by %l7 in the save area just loaded.
2005	!
2006	ldn	[%i1 + T_SP], %i3	! get stack save area pointer
2007	stn	%l0, [%i3 + STACK_BIAS + (0*GREGSIZE)]	! save locals
2008	stn	%l1, [%i3 + STACK_BIAS + (1*GREGSIZE)]
2009	stn	%l2, [%i3 + STACK_BIAS + (2*GREGSIZE)]
2010	stn	%l3, [%i3 + STACK_BIAS + (3*GREGSIZE)]
2011	stn	%l4, [%i3 + STACK_BIAS + (4*GREGSIZE)]
2012	stn	%l5, [%i3 + STACK_BIAS + (5*GREGSIZE)]
2013	stn	%l6, [%i3 + STACK_BIAS + (6*GREGSIZE)]
2014	stn	%l7, [%i3 + STACK_BIAS + (7*GREGSIZE)]
2015	stn	%o0, [%i3 + STACK_BIAS + (8*GREGSIZE)]	! save ins using outs
2016	stn	%o1, [%i3 + STACK_BIAS + (9*GREGSIZE)]
2017	stn	%o2, [%i3 + STACK_BIAS + (10*GREGSIZE)]
2018	stn	%o3, [%i3 + STACK_BIAS + (11*GREGSIZE)]
2019	stn	%o4, [%i3 + STACK_BIAS + (12*GREGSIZE)]
2020	stn	%o5, [%i3 + STACK_BIAS + (13*GREGSIZE)]
2021	stn	%i4, [%i3 + STACK_BIAS + (14*GREGSIZE)]
2022						! fp, %i7 copied using %i4
2023	stn	%i5, [%i3 + STACK_BIAS + (15*GREGSIZE)]
2024	stn	%g0, [%i2 + ((8+6)*GREGSIZE)]
2025						! clear fp in save area
2026
2027	! load saved pil for return
2028	ldub	[%i0 + T_PIL], %i0
2029	ret
2030	restore
2031	SET_SIZE(intr_passivate)
2032
2033#endif	/* lint */
2034
2035#if defined(lint)
2036
2037/*
2038 * intr_get_time() is a resource for interrupt handlers to determine how
2039 * much time has been spent handling the current interrupt. Such a function
2040 * is needed because higher level interrupts can arrive during the
2041 * processing of an interrupt, thus making direct comparisons of %tick by
2042 * the handler inaccurate. intr_get_time() only returns time spent in the
2043 * current interrupt handler.
2044 *
2045 * The caller must be calling from an interrupt handler running at a pil
2046 * below or at lock level. Timings are not provided for high-level
2047 * interrupts.
2048 *
2049 * The first time intr_get_time() is called while handling an interrupt,
2050 * it returns the time since the interrupt handler was invoked. Subsequent
2051 * calls will return the time since the prior call to intr_get_time(). Time
2052 * is returned as ticks, adjusted for any clock divisor due to power
2053 * management. Use tick2ns() to convert ticks to nsec. Warning: ticks may
2054 * not be the same across CPUs.
2055 *
2056 * Theory Of Intrstat[][]:
2057 *
2058 * uint64_t intrstat[pil][0..1] is an array indexed by pil level, with two
2059 * uint64_ts per pil.
2060 *
2061 * intrstat[pil][0] is a cumulative count of the number of ticks spent
2062 * handling all interrupts at the specified pil on this CPU. It is
2063 * exported via kstats to the user.
2064 *
2065 * intrstat[pil][1] is always a count of ticks less than or equal to the
2066 * value in [0]. The difference between [1] and [0] is the value returned
2067 * by a call to intr_get_time(). At the start of interrupt processing,
2068 * [0] and [1] will be equal (or nearly so). As the interrupt consumes
2069 * time, [0] will increase, but [1] will remain the same. A call to
2070 * intr_get_time() will return the difference, then update [1] to be the
2071 * same as [0]. Future calls will return the time since the last call.
2072 * Finally, when the interrupt completes, [1] is updated to the same as [0].
2073 *
2074 * Implementation:
2075 *
2076 * intr_get_time() works much like a higher level interrupt arriving. It
2077 * "checkpoints" the timing information by incrementing intrstat[pil][0]
2078 * to include elapsed running time, and by setting t_intr_start to %tick.
2079 * It then sets the return value to intrstat[pil][0] - intrstat[pil][1],
2080 * and updates intrstat[pil][1] to be the same as the new value of
2081 * intrstat[pil][0].
2082 *
2083 * In the normal handling of interrupts, after an interrupt handler returns
2084 * and the code in intr_thread() updates intrstat[pil][0], it then sets
2085 * intrstat[pil][1] to the new value of intrstat[pil][0]. When [0] == [1],
2086 * the timings are reset, i.e. intr_get_time() will return [0] - [1] which
2087 * is 0.
2088 *
2089 * Whenever interrupts arrive on a CPU which is handling a lower pil
2090 * interrupt, they update the lower pil's [0] to show time spent in the
2091 * handler that they've interrupted. This results in a growing discrepancy
2092 * between [0] and [1], which is returned the next time intr_get_time() is
2093 * called. Time spent in the higher-pil interrupt will not be returned in
2094 * the next intr_get_time() call from the original interrupt, because
2095 * the higher-pil interrupt's time is accumulated in intrstat[higherpil][].
2096 */
2097
2098/*ARGSUSED*/
2099uint64_t
2100intr_get_time(void)
2101{ return 0; }
2102#else	/* lint */
2103
2104	ENTRY_NP(intr_get_time)
2105#ifdef DEBUG
2106	!
2107	! Lots of asserts, but just check panic_quiesce first.
2108	! Don't bother with lots of tests if we're just ignoring them.
2109	!
2110	sethi	%hi(panic_quiesce), %o0
2111	ld	[%o0 + %lo(panic_quiesce)], %o0
2112	brnz,pn	%o0, 2f
2113	nop
2114	!
2115	! ASSERT(%pil <= LOCK_LEVEL)
2116	!
2117	rdpr	%pil, %o1
2118	cmp	%o1, LOCK_LEVEL
2119	ble,pt	%xcc, 0f
2120	sethi	%hi(intr_get_time_high_pil), %o0	! delay
2121	call	panic
2122	or	%o0, %lo(intr_get_time_high_pil), %o0
21230:
2124	!
2125	! ASSERT((t_flags & T_INTR_THREAD) != 0 && t_pil > 0)
2126	!
2127	lduh	[THREAD_REG + T_FLAGS], %o2
2128	andcc	%o2, T_INTR_THREAD, %g0
2129	bz,pn	%xcc, 1f
2130	ldub	[THREAD_REG + T_PIL], %o1		! delay
2131	brnz,pt	%o1, 0f
21321:
2133	sethi	%hi(intr_get_time_not_intr), %o0
2134	call	panic
2135	or	%o0, %lo(intr_get_time_not_intr), %o0
21360:
2137	!
2138	! ASSERT(t_intr_start != 0)
2139	!
2140	ldx	[THREAD_REG + T_INTR_START], %o1
2141	brnz,pt	%o1, 2f
2142	sethi	%hi(intr_get_time_no_start_time), %o0	! delay
2143	call	panic
2144	or	%o0, %lo(intr_get_time_no_start_time), %o0
21452:
2146#endif /* DEBUG */
2147	!
2148	! %o0 = elapsed time and return value
2149	! %o1 = pil
2150	! %o2 = scratch
2151	! %o3 = scratch
2152	! %o4 = scratch
2153	! %o5 = cpu
2154	!
2155	wrpr	%g0, PIL_MAX, %pil	! make this easy -- block normal intrs
2156	ldn	[THREAD_REG + T_CPU], %o5
2157	ldub	[THREAD_REG + T_PIL], %o1
2158	ldx	[THREAD_REG + T_INTR_START], %o3 ! %o3 = t_intr_start
2159	!
2160	! Calculate elapsed time since t_intr_start. Update t_intr_start,
2161	! get delta, and multiply by cpu_divisor if necessary.
2162	!
2163	rdpr	%tick, %o2
2164	sllx	%o2, 1, %o2
2165	srlx	%o2, 1, %o2
2166	stx	%o2, [THREAD_REG + T_INTR_START]
2167	sub	%o2, %o3, %o0
2168
2169	lduh	[%o5 + CPU_DIVISOR], %o4
2170	cmp	%o4, 1
2171	bg,a,pn	%xcc, 1f
2172	mulx	%o0, %o4, %o0	! multiply interval by clock divisor iff > 1
21731:
2174	! Update intracct[]
2175	lduh	[%o5 + CPU_MSTATE], %o4
2176	sllx	%o4, 3, %o4
2177	add	%o4, CPU_INTRACCT, %o4
2178	ldx	[%o5 + %o4], %o2
2179	add	%o2, %o0, %o2
2180	stx	%o2, [%o5 + %o4]
2181
2182	!
2183	! Increment cpu_m.intrstat[pil][0]. Calculate elapsed time since
2184	! cpu_m.intrstat[pil][1], which is either when the interrupt was
2185	! first entered, or the last time intr_get_time() was invoked. Then
2186	! update cpu_m.intrstat[pil][1] to match [0].
2187	!
2188	sllx	%o1, 4, %o3
2189	add	%o3, CPU_MCPU, %o3
2190	add	%o3, MCPU_INTRSTAT, %o3
2191	add	%o3, %o5, %o3		! %o3 = cpu_m.intrstat[pil][0]
2192	ldx	[%o3], %o2
2193	add	%o2, %o0, %o2		! %o2 = new value for intrstat
2194	stx	%o2, [%o3]
2195	ldx	[%o3 + 8], %o4		! %o4 = cpu_m.intrstat[pil][1]
2196	sub	%o2, %o4, %o0		! %o0 is elapsed time since %o4
2197	stx	%o2, [%o3 + 8]		! make [1] match [0], resetting time
2198
2199	ld	[%o5 + CPU_BASE_SPL], %o2	! restore %pil to the greater
2200	cmp	%o2, %o1			! of either our pil %o1 or
2201	movl	%xcc, %o1, %o2			! cpu_base_spl.
2202	retl
2203	wrpr	%g0, %o2, %pil
2204	SET_SIZE(intr_get_time)
2205
2206#ifdef DEBUG
2207intr_get_time_high_pil:
2208	.asciz	"intr_get_time(): %pil > LOCK_LEVEL"
2209intr_get_time_not_intr:
2210	.asciz	"intr_get_time(): not called from an interrupt thread"
2211intr_get_time_no_start_time:
2212	.asciz	"intr_get_time(): t_intr_start == 0"
2213#endif /* DEBUG */
2214#endif  /* lint */
2215