xref: /titanic_41/usr/src/uts/sun4/ml/interrupt.s (revision 9e86db79b7d1bbc5f2f04e99954cbd5eae0e22bb)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28#if defined(lint)
29#include <sys/types.h>
30#include <sys/thread.h>
31#else	/* lint */
32#include "assym.h"
33#endif	/* lint */
34
35#include <sys/cmn_err.h>
36#include <sys/ftrace.h>
37#include <sys/asm_linkage.h>
38#include <sys/machthread.h>
39#include <sys/machcpuvar.h>
40#include <sys/intreg.h>
41#include <sys/ivintr.h>
42
43#ifdef TRAPTRACE
44#include <sys/traptrace.h>
45#endif /* TRAPTRACE */
46
47#if defined(lint)
48
49/* ARGSUSED */
50void
51pil_interrupt(int level)
52{}
53
54#else	/* lint */
55
56
57/*
58 * (TT 0x40..0x4F, TL>0) Interrupt Level N Handler (N == 1..15)
59 * 	Register passed from LEVEL_INTERRUPT(level)
60 *	%g4 - interrupt request level
61 */
62	ENTRY_NP(pil_interrupt)
63	!
64	! Register usage
65	!	%g1 - cpu
66	!	%g2 - pointer to intr_vec_t (iv)
67	!	%g4 - pil
68	!	%g3, %g5, %g6, %g7 - temps
69	!
70	! Grab the first or list head intr_vec_t off the intr_head[pil]
71	! and panic immediately if list head is NULL. Otherwise, update
72	! intr_head[pil] to next intr_vec_t on the list and clear softint
73	! %clear_softint, if next intr_vec_t is NULL.
74	!
75	CPU_ADDR(%g1, %g5)		! %g1 = cpu
76	!
77	ALTENTRY(pil_interrupt_common)
78	sll	%g4, CPTRSHIFT, %g5	! %g5 = offset to the pil entry
79	add	%g1, INTR_HEAD, %g6	! %g6 = &cpu->m_cpu.intr_head
80	add	%g6, %g5, %g6		! %g6 = &cpu->m_cpu.intr_head[pil]
81	ldn	[%g6], %g2		! %g2 = cpu->m_cpu.intr_head[pil]
82	brnz,pt	%g2, 0f			! check list head (iv) is NULL
83	nop
84	ba	ptl1_panic		! panic, list head (iv) is NULL
85	mov	PTL1_BAD_INTR_VEC, %g1
860:
87	lduh	[%g2 + IV_FLAGS], %g7	! %g7 = iv->iv_flags
88	and	%g7, IV_SOFTINT_MT, %g3 ! %g3 = iv->iv_flags & IV_SOFTINT_MT
89	brz,pt	%g3, 1f			! check for multi target softint
90	add	%g2, IV_PIL_NEXT, %g7	! g7% = &iv->iv_pil_next
91	ld	[%g1 + CPU_ID], %g3	! for multi target softint, use cpuid
92	sll	%g3, CPTRSHIFT, %g3	! convert cpuid to offset address
93	add	%g7, %g3, %g7		! %g5 = &iv->iv_xpil_next[cpuid]
941:
95	ldn	[%g7], %g3		! %g3 = next intr_vec_t
96	brnz,pn	%g3, 2f			! branch if next intr_vec_t non NULL
97	stn	%g3, [%g6]		! update cpu->m_cpu.intr_head[pil]
98	add	%g1, INTR_TAIL, %g6	! %g6 =  &cpu->m_cpu.intr_tail
99	stn	%g0, [%g5 + %g6]	! clear cpu->m_cpu.intr_tail[pil]
100	mov	1, %g5			! %g5 = 1
101	sll	%g5, %g4, %g5		! %g5 = 1 << pil
102	wr	%g5, CLEAR_SOFTINT	! clear interrupt on this pil
1032:
104#ifdef TRAPTRACE
105	TRACE_PTR(%g5, %g6)
106	GET_TRACE_TICK(%g6)
107	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi	! trap_tick = %tick
108	TRACE_SAVE_TL_GL_REGS(%g5, %g6)
109	rdpr	%tt, %g6
110	stha	%g6, [%g5 + TRAP_ENT_TT]%asi	! trap_type = %tt
111	rdpr	%tpc, %g6
112	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi	! trap_pc = %tpc
113	rdpr	%tstate, %g6
114	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi ! trap_tstate = %tstate
115	stna	%sp, [%g5 + TRAP_ENT_SP]%asi	! trap_sp = %sp
116	stna	%g2, [%g5 + TRAP_ENT_TR]%asi	! trap_tr = first intr_vec
117	stna	%g3, [%g5 + TRAP_ENT_F1]%asi	! trap_f1 = next intr_vec
118	sll	%g4, CPTRSHIFT, %g3
119	add	%g1, INTR_HEAD, %g6
120	ldn	[%g6 + %g3], %g6		! %g6=cpu->m_cpu.intr_head[pil]
121	stna	%g6, [%g5 + TRAP_ENT_F2]%asi	! trap_f2 = intr_head[pil]
122	add	%g1, INTR_TAIL, %g6
123	ldn	[%g6 + %g3], %g6		! %g6=cpu->m_cpu.intr_tail[pil]
124	stna	%g6, [%g5 + TRAP_ENT_F3]%asi	! trap_f3 = intr_tail[pil]
125	stna	%g4, [%g5 + TRAP_ENT_F4]%asi	! trap_f4 = pil
126	TRACE_NEXT(%g5, %g6, %g3)
127#endif /* TRAPTRACE */
128	!
129	! clear the iv_pending flag for this interrupt request
130	!
131	lduh	[%g2 + IV_FLAGS], %g3		! %g3 = iv->iv_flags
132	andn	%g3, IV_SOFTINT_PEND, %g3	! %g3 = !(iv->iv_flags & PEND)
133	sth	%g3, [%g2 + IV_FLAGS]		! clear IV_SOFTINT_PEND flag
134	stn	%g0, [%g7]			! clear iv->iv_pil_next or
135						!       iv->iv_pil_xnext
136
137	!
138	! Prepare for sys_trap()
139	!
140	! Registers passed to sys_trap()
141	!	%g1 - interrupt handler at TL==0
142	!	%g2 - pointer to current intr_vec_t (iv),
143	!	      job queue for intr_thread or current_thread
144	!	%g3 - pil
145	!	%g4 - initial pil for handler
146	!
147	! figure which handler to run and which %pil it starts at
148	! intr_thread starts at DISP_LEVEL to prevent preemption
149	! current_thread starts at PIL_MAX to protect cpu_intr_actv
150	!
151	mov	%g4, %g3		! %g3 = %g4, pil
152	cmp	%g4, LOCK_LEVEL
153	bg,a,pt	%xcc, 3f		! branch if pil > LOCK_LEVEL
154	mov	PIL_MAX, %g4		! %g4 = PIL_MAX (15)
155	sethi	%hi(intr_thread), %g1	! %g1 = intr_thread
156	mov	DISP_LEVEL, %g4		! %g4 = DISP_LEVEL (11)
157	ba,pt	%xcc, sys_trap
158	or	%g1, %lo(intr_thread), %g1
1593:
160	sethi	%hi(current_thread), %g1 ! %g1 = current_thread
161	ba,pt	%xcc, sys_trap
162	or	%g1, %lo(current_thread), %g1
163	SET_SIZE(pil_interrupt_common)
164	SET_SIZE(pil_interrupt)
165
166#endif	/* lint */
167
168
169#ifndef	lint
170_spurious:
171	.asciz	"!interrupt 0x%x at level %d not serviced"
172
173/*
174 * SERVE_INTR_PRE is called once, just before the first invocation
175 * of SERVE_INTR.
176 *
177 * Registers on entry:
178 *
179 * iv_p, cpu, regs: may be out-registers
180 * ls1, ls2: local scratch registers
181 * os1, os2, os3: scratch registers, may be out
182 */
183
184#define SERVE_INTR_PRE(iv_p, cpu, ls1, ls2, os1, os2, os3, regs)	\
185	mov	iv_p, ls1;						\
186	mov	iv_p, ls2;						\
187	SERVE_INTR_TRACE(iv_p, os1, os2, os3, regs);
188
189/*
190 * SERVE_INTR is called immediately after either SERVE_INTR_PRE or
191 * SERVE_INTR_NEXT, without intervening code. No register values
192 * may be modified.
193 *
194 * After calling SERVE_INTR, the caller must check if os3 is set. If
195 * so, there is another interrupt to process. The caller must call
196 * SERVE_INTR_NEXT, immediately followed by SERVE_INTR.
197 *
198 * Before calling SERVE_INTR_NEXT, the caller may perform accounting
199 * and other actions which need to occur after invocation of an interrupt
200 * handler. However, the values of ls1 and os3 *must* be preserved and
201 * passed unmodified into SERVE_INTR_NEXT.
202 *
203 * Registers on return from SERVE_INTR:
204 *
205 * ls1 - the pil just processed
206 * ls2 - the pointer to intr_vec_t (iv) just processed
207 * os3 - if set, another interrupt needs to be processed
208 * cpu, ls1, os3 - must be preserved if os3 is set
209 */
210
211#define	SERVE_INTR(os5, cpu, ls1, ls2, os1, os2, os3, os4)		\
212	ldn	[ls1 + IV_HANDLER], os2;				\
213	ldn	[ls1 + IV_ARG1], %o0;					\
214	ldn	[ls1 + IV_ARG2], %o1;					\
215	call	os2;							\
216	lduh	[ls1 + IV_PIL], ls1;					\
217	brnz,pt	%o0, 2f;						\
218	mov	CE_WARN, %o0;						\
219	set	_spurious, %o1;						\
220	mov	ls2, %o2;						\
221	call	cmn_err;						\
222	rdpr	%pil, %o3;						\
2232:	ldn	[THREAD_REG + T_CPU], cpu;				\
224	sll	ls1, 3, os1;						\
225	add	os1, CPU_STATS_SYS_INTR - 8, os2;			\
226	ldx	[cpu + os2], os3;					\
227	inc	os3;							\
228	stx	os3, [cpu + os2];					\
229	sll	ls1, CPTRSHIFT, os2;					\
230	add	cpu,  INTR_HEAD, os1;					\
231	add	os1, os2, os1;						\
232	ldn	[os1], os3;
233
234/*
235 * Registers on entry:
236 *
237 * cpu			- cpu pointer (clobbered, set to cpu upon completion)
238 * ls1, os3		- preserved from prior call to SERVE_INTR
239 * ls2			- local scratch reg (not preserved)
240 * os1, os2, os4, os5	- scratch reg, can be out (not preserved)
241 */
242#define SERVE_INTR_NEXT(os5, cpu, ls1, ls2, os1, os2, os3, os4)		\
243	sll	ls1, CPTRSHIFT, os4;					\
244	add	cpu, INTR_HEAD, os1;					\
245	rdpr	%pstate, ls2;						\
246	wrpr	ls2, PSTATE_IE, %pstate;				\
247	lduh	[os3 + IV_FLAGS], os2;					\
248	and	os2, IV_SOFTINT_MT, os2;				\
249	brz,pt	os2, 4f;						\
250	add	os3, IV_PIL_NEXT, os2;					\
251	ld	[cpu + CPU_ID], os5;					\
252	sll	os5, CPTRSHIFT, os5;					\
253	add	os2, os5, os2;						\
2544:	ldn	[os2], os5;						\
255	brnz,pn	os5, 5f;						\
256	stn	os5, [os1 + os4];					\
257	add	cpu, INTR_TAIL, os1;					\
258	stn	%g0, [os1 + os4];					\
259	mov	1, os1;							\
260	sll	os1, ls1, os1;						\
261	wr	os1, CLEAR_SOFTINT;					\
2625:	lduh	[os3 + IV_FLAGS], ls1;                                  \
263	andn	ls1, IV_SOFTINT_PEND, ls1;				\
264	sth	ls1, [os3 + IV_FLAGS];				        \
265	stn	%g0, [os2];						\
266	wrpr	%g0, ls2, %pstate;					\
267	mov	os3, ls1;						\
268	mov	os3, ls2;						\
269	SERVE_INTR_TRACE2(os5, os1, os2, os3, os4);
270
271#ifdef TRAPTRACE
272/*
273 * inum - not modified, _spurious depends on it.
274 */
275#define	SERVE_INTR_TRACE(inum, os1, os2, os3, os4)			\
276	rdpr	%pstate, os3;						\
277	andn	os3, PSTATE_IE | PSTATE_AM, os2;			\
278	wrpr	%g0, os2, %pstate;					\
279	TRACE_PTR(os1, os2);						\
280	ldn	[os4 + PC_OFF], os2;					\
281	stna	os2, [os1 + TRAP_ENT_TPC]%asi;				\
282	ldx	[os4 + TSTATE_OFF], os2;				\
283	stxa	os2, [os1 + TRAP_ENT_TSTATE]%asi;			\
284	mov	os3, os4;						\
285	GET_TRACE_TICK(os2); 						\
286	stxa	os2, [os1 + TRAP_ENT_TICK]%asi;				\
287	TRACE_SAVE_TL_GL_REGS(os1, os2);				\
288	set	TT_SERVE_INTR, os2;					\
289	rdpr	%pil, os3;						\
290	or	os2, os3, os2;						\
291	stha	os2, [os1 + TRAP_ENT_TT]%asi;				\
292	stna	%sp, [os1 + TRAP_ENT_SP]%asi;				\
293	stna	inum, [os1 + TRAP_ENT_TR]%asi;				\
294	stna	%g0, [os1 + TRAP_ENT_F1]%asi;				\
295	stna	%g0, [os1 + TRAP_ENT_F2]%asi;				\
296	stna	%g0, [os1 + TRAP_ENT_F3]%asi;				\
297	stna	%g0, [os1 + TRAP_ENT_F4]%asi;				\
298	TRACE_NEXT(os1, os2, os3);					\
299	wrpr	%g0, os4, %pstate
300#else	/* TRAPTRACE */
301#define SERVE_INTR_TRACE(inum, os1, os2, os3, os4)
302#endif	/* TRAPTRACE */
303
304#ifdef TRAPTRACE
305/*
306 * inum - not modified, _spurious depends on it.
307 */
308#define	SERVE_INTR_TRACE2(inum, os1, os2, os3, os4)			\
309	rdpr	%pstate, os3;						\
310	andn	os3, PSTATE_IE | PSTATE_AM, os2;			\
311	wrpr	%g0, os2, %pstate;					\
312	TRACE_PTR(os1, os2);						\
313	stna	%g0, [os1 + TRAP_ENT_TPC]%asi;				\
314	stxa	%g0, [os1 + TRAP_ENT_TSTATE]%asi;			\
315	mov	os3, os4;						\
316	GET_TRACE_TICK(os2); 						\
317	stxa	os2, [os1 + TRAP_ENT_TICK]%asi;				\
318	TRACE_SAVE_TL_GL_REGS(os1, os2);				\
319	set	TT_SERVE_INTR, os2;					\
320	rdpr	%pil, os3;						\
321	or	os2, os3, os2;						\
322	stha	os2, [os1 + TRAP_ENT_TT]%asi;				\
323	stna	%sp, [os1 + TRAP_ENT_SP]%asi;				\
324	stna	inum, [os1 + TRAP_ENT_TR]%asi;				\
325	stna	%g0, [os1 + TRAP_ENT_F1]%asi;				\
326	stna	%g0, [os1 + TRAP_ENT_F2]%asi;				\
327	stna	%g0, [os1 + TRAP_ENT_F3]%asi;				\
328	stna	%g0, [os1 + TRAP_ENT_F4]%asi;				\
329	TRACE_NEXT(os1, os2, os3);					\
330	wrpr	%g0, os4, %pstate
331#else	/* TRAPTRACE */
332#define SERVE_INTR_TRACE2(inum, os1, os2, os3, os4)
333#endif	/* TRAPTRACE */
334
335#endif	/* lint */
336
337#if defined(lint)
338
339/*ARGSUSED*/
340void
341intr_thread(struct regs *regs, uint64_t iv_p, uint_t pil)
342{}
343
344#else	/* lint */
345
346#define	INTRCNT_LIMIT 16
347
348/*
349 * Handle an interrupt in a new thread.
350 *	Entry:
351 *		%o0       = pointer to regs structure
352 *		%o1       = pointer to current intr_vec_t (iv) to be processed
353 *		%o2       = pil
354 *		%sp       = on current thread's kernel stack
355 *		%o7       = return linkage to trap code
356 *		%g7       = current thread
357 *		%pstate   = normal globals, interrupts enabled,
358 *		            privileged, fp disabled
359 *		%pil      = DISP_LEVEL
360 *
361 *	Register Usage
362 *		%l0       = return linkage
363 *		%l1       = pil
364 *		%l2 - %l3 = scratch
365 *		%l4 - %l7 = reserved for sys_trap
366 *		%o2       = cpu
367 *		%o3       = intr thread
368 *		%o0       = scratch
369 *		%o4 - %o5 = scratch
370 */
371	ENTRY_NP(intr_thread)
372	mov	%o7, %l0
373	mov	%o2, %l1
374	!
375	! See if we are interrupting another interrupt thread.
376	!
377	lduh	[THREAD_REG + T_FLAGS], %o3
378	andcc	%o3, T_INTR_THREAD, %g0
379	bz,pt	%xcc, 1f
380	ldn	[THREAD_REG + T_CPU], %o2	! delay - load CPU pointer
381
382	! We have interrupted an interrupt thread. Take a timestamp,
383	! compute its interval, and update its cumulative counter.
384	add	THREAD_REG, T_INTR_START, %o5
3850:
386	ldx	[%o5], %o3
387	brz,pn	%o3, 1f
388	! We came in on top of an interrupt thread that had no timestamp.
389	! This could happen if, for instance, an interrupt thread which had
390	! previously blocked is being set up to run again in resume(), but
391	! resume() hasn't yet stored a timestamp for it. Or, it could be in
392	! swtch() after its slice has been accounted for.
393	! Only account for the time slice if the starting timestamp is non-zero.
394	rdpr	%tick, %o4			! delay
395	sllx	%o4, 1, %o4			! shift off NPT bit
396	srlx	%o4, 1, %o4
397	sub	%o4, %o3, %o4			! o4 has interval
398
399	! A high-level interrupt in current_thread() interrupting here
400	! will account for the interrupted thread's time slice, but
401	! only if t_intr_start is non-zero. Since this code is going to account
402	! for the time slice, we want to "atomically" load the thread's
403	! starting timestamp, calculate the interval with %tick, and zero
404	! its starting timestamp.
405	! To do this, we do a casx on the t_intr_start field, and store 0 to it.
406	! If it has changed since we loaded it above, we need to re-compute the
407	! interval, since a changed t_intr_start implies current_thread placed
408	! a new, later timestamp there after running a high-level interrupt,
409	! and the %tick val in %o4 had become stale.
410	mov	%g0, %l2
411	casx	[%o5], %o3, %l2
412
413	! If %l2 == %o3, our casx was successful. If not, the starting timestamp
414	! changed between loading it (after label 0b) and computing the
415	! interval above.
416	cmp	%l2, %o3
417	bne,pn	%xcc, 0b
418
419	! Check for Energy Star mode
420	lduh	[%o2 + CPU_DIVISOR], %l2	! delay -- %l2 = clock divisor
421	cmp	%l2, 1
422	bg,a,pn	%xcc, 2f
423	mulx	%o4, %l2, %o4	! multiply interval by clock divisor iff > 1
4242:
425	! We now know that a valid interval for the interrupted interrupt
426	! thread is in %o4. Update its cumulative counter.
427	ldub	[THREAD_REG + T_PIL], %l3	! load PIL
428	sllx	%l3, 4, %l3		! convert PIL index to byte offset
429	add	%l3, CPU_MCPU, %l3	! CPU_INTRSTAT is too big for use
430	add	%l3, MCPU_INTRSTAT, %l3	! as const, add offsets separately
431	ldx	[%o2 + %l3], %o5	! old counter in o5
432	add	%o5, %o4, %o5		! new counter in o5
433	stx	%o5, [%o2 + %l3]	! store new counter
434
435	! Also update intracct[]
436	lduh	[%o2 + CPU_MSTATE], %l3
437	sllx	%l3, 3, %l3
438	add	%l3, CPU_INTRACCT, %l3
439	add	%l3, %o2, %l3
4400:
441	ldx	[%l3], %o5
442	add	%o5, %o4, %o3
443	casx	[%l3], %o5, %o3
444	cmp	%o5, %o3
445	bne,pn	%xcc, 0b
446	nop
447
4481:
449	!
450	! Get set to run interrupt thread.
451	! There should always be an interrupt thread since we allocate one
452	! for each level on the CPU.
453	!
454	! Note that the code in kcpc_overflow_intr -relies- on the ordering
455	! of events here -- in particular that t->t_lwp of the interrupt thread
456	! is set to the pinned thread *before* curthread is changed.
457	!
458	ldn	[%o2 + CPU_INTR_THREAD], %o3	! interrupt thread pool
459	ldn	[%o3 + T_LINK], %o4		! unlink thread from CPU's list
460	stn	%o4, [%o2 + CPU_INTR_THREAD]
461	!
462	! Set bit for this level in CPU's active interrupt bitmask.
463	!
464	ld	[%o2 + CPU_INTR_ACTV], %o5
465	mov	1, %o4
466	sll	%o4, %l1, %o4
467#ifdef DEBUG
468	!
469	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
470	!
471	andcc	%o5, %o4, %g0
472	bz,pt	%xcc, 0f
473	nop
474	! Do not call panic if a panic is already in progress.
475	sethi	%hi(panic_quiesce), %l2
476	ld	[%l2 + %lo(panic_quiesce)], %l2
477	brnz,pn	%l2, 0f
478	nop
479	sethi	%hi(intr_thread_actv_bit_set), %o0
480	call	panic
481	or	%o0, %lo(intr_thread_actv_bit_set), %o0
4820:
483#endif /* DEBUG */
484	or	%o5, %o4, %o5
485	st	%o5, [%o2 + CPU_INTR_ACTV]
486	!
487	! Consider the new thread part of the same LWP so that
488	! window overflow code can find the PCB.
489	!
490	ldn	[THREAD_REG + T_LWP], %o4
491	stn	%o4, [%o3 + T_LWP]
492	!
493	! Threads on the interrupt thread free list could have state already
494	! set to TS_ONPROC, but it helps in debugging if they're TS_FREE
495	! Could eliminate the next two instructions with a little work.
496	!
497	mov	TS_ONPROC, %o4
498	st	%o4, [%o3 + T_STATE]
499	!
500	! Push interrupted thread onto list from new thread.
501	! Set the new thread as the current one.
502	! Set interrupted thread's T_SP because if it is the idle thread,
503	! resume may use that stack between threads.
504	!
505	stn	%o7, [THREAD_REG + T_PC]	! mark pc for resume
506	stn	%sp, [THREAD_REG + T_SP]	! mark stack for resume
507	stn	THREAD_REG, [%o3 + T_INTR]	! push old thread
508	stn	%o3, [%o2 + CPU_THREAD]		! set new thread
509	mov	%o3, THREAD_REG			! set global curthread register
510	ldn	[%o3 + T_STACK], %o4		! interrupt stack pointer
511	sub	%o4, STACK_BIAS, %sp
512	!
513	! Initialize thread priority level from intr_pri
514	!
515	sethi	%hi(intr_pri), %o4
516	ldsh	[%o4 + %lo(intr_pri)], %o4	! grab base interrupt priority
517	add	%l1, %o4, %o4		! convert level to dispatch priority
518	sth	%o4, [THREAD_REG + T_PRI]
519	stub	%l1, [THREAD_REG + T_PIL]	! save pil for intr_passivate
520
521	! Store starting timestamp in thread structure.
522	add	THREAD_REG, T_INTR_START, %o3
5231:
524	ldx	[%o3], %o5
525	rdpr	%tick, %o4
526	sllx	%o4, 1, %o4
527	srlx	%o4, 1, %o4			! shift off NPT bit
528	casx	[%o3], %o5, %o4
529	cmp	%o4, %o5
530	! If a high-level interrupt occurred while we were attempting to store
531	! the timestamp, try again.
532	bne,pn	%xcc, 1b
533	nop
534
535	wrpr	%g0, %l1, %pil			! lower %pil to new level
536	!
537	! Fast event tracing.
538	!
539	ld	[%o2 + CPU_FTRACE_STATE], %o4	! %o2 = curthread->t_cpu
540	btst	FTRACE_ENABLED, %o4
541	be,pt	%icc, 1f			! skip if ftrace disabled
542	  mov	%l1, %o5
543	!
544	! Tracing is enabled - write the trace entry.
545	!
546	save	%sp, -SA(MINFRAME), %sp
547	set	ftrace_intr_thread_format_str, %o0
548	mov	%i0, %o1
549	mov	%i1, %o2
550	mov	%i5, %o3
551	call	ftrace_3
552	ldn	[%i0 + PC_OFF], %o4
553	restore
5541:
555	!
556	! call the handler
557	!
558	SERVE_INTR_PRE(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
559	!
560	! %o0 and %o1 are now available as scratch registers.
561	!
5620:
563	SERVE_INTR(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
564	!
565	! If %o3 is set, we must call serve_intr_next, and both %l1 and %o3
566	! must be preserved. %l1 holds our pil, %l3 holds our inum.
567	!
568	! Note: %l1 is the pil level we're processing, but we may have a
569	! higher effective pil because a higher-level interrupt may have
570	! blocked.
571	!
572	wrpr	%g0, DISP_LEVEL, %pil
573	!
574	! Take timestamp, compute interval, update cumulative counter.
575	!
576	add	THREAD_REG, T_INTR_START, %o5
5771:
578	ldx	[%o5], %o0
579#ifdef DEBUG
580	brnz	%o0, 9f
581	nop
582	! Do not call panic if a panic is already in progress.
583	sethi	%hi(panic_quiesce), %o1
584	ld	[%o1 + %lo(panic_quiesce)], %o1
585	brnz,pn	%o1, 9f
586	nop
587	sethi	%hi(intr_thread_t_intr_start_zero), %o0
588	call	panic
589	or	%o0, %lo(intr_thread_t_intr_start_zero), %o0
5909:
591#endif /* DEBUG */
592	rdpr	%tick, %o1
593	sllx	%o1, 1, %o1
594	srlx	%o1, 1, %o1			! shift off NPT bit
595	sub	%o1, %o0, %l2			! l2 has interval
596	!
597	! The general outline of what the code here does is:
598	! 1. load t_intr_start, %tick, and calculate the delta
599	! 2. replace t_intr_start with %tick (if %o3 is set) or 0.
600	!
601	! The problem is that a high-level interrupt could arrive at any time.
602	! It will account for (%tick - t_intr_start) for us when it starts,
603	! unless we have set t_intr_start to zero, and then set t_intr_start
604	! to a new %tick when it finishes. To account for this, our first step
605	! is to load t_intr_start and the last is to use casx to store the new
606	! t_intr_start. This guarantees atomicity in reading t_intr_start,
607	! reading %tick, and updating t_intr_start.
608	!
609	movrz	%o3, %g0, %o1
610	casx	[%o5], %o0, %o1
611	cmp	%o0, %o1
612	bne,pn	%xcc, 1b
613	!
614	! Check for Energy Star mode
615	!
616	lduh	[%o2 + CPU_DIVISOR], %o0	! delay -- %o0 = clock divisor
617	cmp	%o0, 1
618	bg,a,pn	%xcc, 2f
619	mulx	%l2, %o0, %l2	! multiply interval by clock divisor iff > 1
6202:
621	!
622	! Update cpu_intrstat. If o3 is set then we will be processing another
623	! interrupt. Above we have set t_intr_start to %tick, not 0. This
624	! means a high-level interrupt can arrive and update the same stats
625	! we're updating. Need to use casx.
626	!
627	sllx	%l1, 4, %o1			! delay - PIL as byte offset
628	add	%o1, CPU_MCPU, %o1		! CPU_INTRSTAT const too big
629	add	%o1, MCPU_INTRSTAT, %o1		! add parts separately
630	add	%o1, %o2, %o1
6311:
632	ldx	[%o1], %o5			! old counter in o5
633	add	%o5, %l2, %o0			! new counter in o0
634 	stx	%o0, [%o1 + 8]			! store into intrstat[pil][1]
635	casx	[%o1], %o5, %o0			! and into intrstat[pil][0]
636	cmp	%o5, %o0
637	bne,pn	%xcc, 1b
638	nop
639
640	! Also update intracct[]
641	lduh	[%o2 + CPU_MSTATE], %o1
642	sllx	%o1, 3, %o1
643	add	%o1, CPU_INTRACCT, %o1
644	add	%o1, %o2, %o1
6451:
646	ldx	[%o1], %o5
647	add	%o5, %l2, %o0
648	casx	[%o1], %o5, %o0
649	cmp	%o5, %o0
650	bne,pn	%xcc, 1b
651	nop
652
653	!
654	! Don't keep a pinned process pinned indefinitely. Bump cpu_intrcnt
655	! for each interrupt handler we invoke. If we hit INTRCNT_LIMIT, then
656	! we've crossed the threshold and we should unpin the pinned threads
657	! by preempt()ing ourselves, which will bubble up the t_intr chain
658	! until hitting the non-interrupt thread, which will then in turn
659	! preempt itself allowing the interrupt processing to resume. Finally,
660	! the scheduler takes over and picks the next thread to run.
661	!
662	! If our CPU is quiesced, we cannot preempt because the idle thread
663	! won't ever re-enter the scheduler, and the interrupt will be forever
664	! blocked.
665	!
666	! If t_intr is NULL, we're not pinning anyone, so we use a simpler
667	! algorithm. Just check for cpu_kprunrun, and if set then preempt.
668	! This insures we enter the scheduler if a higher-priority thread
669	! has become runnable.
670	!
671	lduh	[%o2 + CPU_FLAGS], %o5		! don't preempt if quiesced
672	andcc	%o5, CPU_QUIESCED, %g0
673	bnz,pn	%xcc, 1f
674
675	ldn     [THREAD_REG + T_INTR], %o5      ! pinning anything?
676	brz,pn  %o5, 3f				! if not, don't inc intrcnt
677
678	ldub	[%o2 + CPU_INTRCNT], %o5	! delay - %o5 = cpu_intrcnt
679	inc	%o5
680	cmp	%o5, INTRCNT_LIMIT		! have we hit the limit?
681	bl,a,pt	%xcc, 1f			! no preempt if < INTRCNT_LIMIT
682	stub	%o5, [%o2 + CPU_INTRCNT]	! delay annul - inc CPU_INTRCNT
683	bg,pn	%xcc, 2f			! don't inc stats again
684	!
685	! We've reached the limit. Set cpu_intrcnt and cpu_kprunrun, and do
686	! CPU_STATS_ADDQ(cp, sys, intrunpin, 1). Then call preempt.
687	!
688	mov	1, %o4				! delay
689	stub	%o4, [%o2 + CPU_KPRUNRUN]
690	ldx	[%o2 + CPU_STATS_SYS_INTRUNPIN], %o4
691	inc	%o4
692	stx	%o4, [%o2 + CPU_STATS_SYS_INTRUNPIN]
693	ba	2f
694	stub	%o5, [%o2 + CPU_INTRCNT]	! delay
6953:
696	! Code for t_intr == NULL
697	ldub	[%o2 + CPU_KPRUNRUN], %o5
698	brz,pt	%o5, 1f				! don't preempt unless kprunrun
6992:
700	! Time to call preempt
701	mov	%o2, %l3			! delay - save %o2
702	call	preempt
703	mov	%o3, %l2			! delay - save %o3.
704	mov	%l3, %o2			! restore %o2
705	mov	%l2, %o3			! restore %o3
706	wrpr	%g0, DISP_LEVEL, %pil		! up from cpu_base_spl
7071:
708	!
709	! Do we need to call serve_intr_next and do this again?
710	!
711	brz,a,pt %o3, 0f
712	ld	[%o2 + CPU_INTR_ACTV], %o5	! delay annulled
713	!
714	! Restore %pil before calling serve_intr() again. We must check
715	! CPU_BASE_SPL and set %pil to max(our-pil, CPU_BASE_SPL)
716	!
717	ld	[%o2 + CPU_BASE_SPL], %o4
718	cmp	%o4, %l1
719	movl	%xcc, %l1, %o4
720	wrpr	%g0, %o4, %pil
721	SERVE_INTR_NEXT(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
722	ba	0b				! compute new stats
723	nop
7240:
725	!
726	! Clear bit for this level in CPU's interrupt active bitmask.
727	!
728	mov	1, %o4
729	sll	%o4, %l1, %o4
730#ifdef DEBUG
731	!
732	! ASSERT(CPU->cpu_intr_actv & (1 << PIL))
733	!
734	andcc	%o4, %o5, %g0
735	bnz,pt	%xcc, 0f
736	nop
737	! Do not call panic if a panic is already in progress.
738	sethi	%hi(panic_quiesce), %l2
739	ld	[%l2 + %lo(panic_quiesce)], %l2
740	brnz,pn	%l2, 0f
741	nop
742	sethi	%hi(intr_thread_actv_bit_not_set), %o0
743	call	panic
744	or	%o0, %lo(intr_thread_actv_bit_not_set), %o0
7450:
746#endif /* DEBUG */
747	andn	%o5, %o4, %o5
748	st	%o5, [%o2 + CPU_INTR_ACTV]
749	!
750	! If there is still an interrupted thread underneath this one,
751	! then the interrupt was never blocked and the return is fairly
752	! simple.  Otherwise jump to intr_thread_exit.
753	!
754	ldn	[THREAD_REG + T_INTR], %o4	! pinned thread
755	brz,pn	%o4, intr_thread_exit		! branch if none
756	nop
757	!
758	! link the thread back onto the interrupt thread pool
759	!
760	ldn	[%o2 + CPU_INTR_THREAD], %o3
761	stn	%o3, [THREAD_REG + T_LINK]
762	stn	THREAD_REG, [%o2 + CPU_INTR_THREAD]
763	!
764	! set the thread state to free so kernel debuggers don't see it
765	!
766	mov	TS_FREE, %o5
767	st	%o5, [THREAD_REG + T_STATE]
768	!
769	! Switch back to the interrupted thread and return
770	!
771	stn	%o4, [%o2 + CPU_THREAD]
772	membar	#StoreLoad			! sync with mutex_exit()
773	mov	%o4, THREAD_REG
774
775	! If we pinned an interrupt thread, store its starting timestamp.
776	lduh	[THREAD_REG + T_FLAGS], %o5
777	andcc	%o5, T_INTR_THREAD, %g0
778	bz,pt	%xcc, 1f
779	ldn	[THREAD_REG + T_SP], %sp	! delay - restore %sp
780
781	add	THREAD_REG, T_INTR_START, %o3	! o3 has &curthread->t_intr_star
7820:
783	ldx	[%o3], %o4			! o4 = t_intr_start before
784	rdpr	%tick, %o5
785	sllx	%o5, 1, %o5
786	srlx	%o5, 1, %o5			! shift off NPT bit
787	casx	[%o3], %o4, %o5			! put o5 in ts if o4 == ts after
788	cmp	%o4, %o5
789	! If a high-level interrupt occurred while we were attempting to store
790	! the timestamp, try again.
791	bne,pn	%xcc, 0b
792	ldn	[THREAD_REG + T_SP], %sp	! delay - restore %sp
7931:
794	! If the thread being restarted isn't pinning anyone, and no interrupts
795	! are pending, zero out cpu_intrcnt
796	ldn	[THREAD_REG + T_INTR], %o4
797	brnz,pn	%o4, 2f
798	rd	SOFTINT, %o4			! delay
799	set	SOFTINT_MASK, %o5
800	andcc	%o4, %o5, %g0
801	bz,a,pt	%xcc, 2f
802	stub	%g0, [%o2 + CPU_INTRCNT]	! delay annul
8032:
804	jmp	%l0 + 8
805	nop
806	SET_SIZE(intr_thread)
807	/* Not Reached */
808
809	!
810	! An interrupt returned on what was once (and still might be)
811	! an interrupt thread stack, but the interrupted process is no longer
812	! there.  This means the interrupt must have blocked.
813	!
814	! There is no longer a thread under this one, so put this thread back
815	! on the CPU's free list and resume the idle thread which will dispatch
816	! the next thread to run.
817	!
818	! All traps below DISP_LEVEL are disabled here, but the mondo interrupt
819	! is enabled.
820	!
821	ENTRY_NP(intr_thread_exit)
822#ifdef TRAPTRACE
823	rdpr	%pstate, %l2
824	andn	%l2, PSTATE_IE | PSTATE_AM, %o4
825	wrpr	%g0, %o4, %pstate			! cpu to known state
826	TRACE_PTR(%o4, %o5)
827	GET_TRACE_TICK(%o5)
828	stxa	%o5, [%o4 + TRAP_ENT_TICK]%asi
829	TRACE_SAVE_TL_GL_REGS(%o4, %o5)
830	set	TT_INTR_EXIT, %o5
831	stha	%o5, [%o4 + TRAP_ENT_TT]%asi
832	stna	%g0, [%o4 + TRAP_ENT_TPC]%asi
833	stxa	%g0, [%o4 + TRAP_ENT_TSTATE]%asi
834	stna	%sp, [%o4 + TRAP_ENT_SP]%asi
835	stna	THREAD_REG, [%o4 + TRAP_ENT_TR]%asi
836	ld	[%o2 + CPU_BASE_SPL], %o5
837	stna	%o5, [%o4 + TRAP_ENT_F1]%asi
838	stna	%g0, [%o4 + TRAP_ENT_F2]%asi
839	stna	%g0, [%o4 + TRAP_ENT_F3]%asi
840	stna	%g0, [%o4 + TRAP_ENT_F4]%asi
841	TRACE_NEXT(%o4, %o5, %o0)
842	wrpr	%g0, %l2, %pstate
843#endif /* TRAPTRACE */
844	! cpu_stats.sys.intrblk++
845        ldx	[%o2 + CPU_STATS_SYS_INTRBLK], %o4
846        inc     %o4
847        stx	%o4, [%o2 + CPU_STATS_SYS_INTRBLK]
848	!
849	! Put thread back on the interrupt thread list.
850	!
851
852	!
853	! Set the CPU's base SPL level.
854	!
855#ifdef DEBUG
856	!
857	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
858	!
859	ld	[%o2 + CPU_INTR_ACTV], %o5
860	mov	1, %o4
861	sll	%o4, %l1, %o4
862	and	%o5, %o4, %o4
863	brz,pt	%o4, 0f
864	nop
865	! Do not call panic if a panic is already in progress.
866	sethi	%hi(panic_quiesce), %l2
867	ld	[%l2 + %lo(panic_quiesce)], %l2
868	brnz,pn	%l2, 0f
869	nop
870	sethi	%hi(intr_thread_exit_actv_bit_set), %o0
871	call	panic
872	or	%o0, %lo(intr_thread_exit_actv_bit_set), %o0
8730:
874#endif /* DEBUG */
875	call	_intr_set_spl			! set CPU's base SPL level
876	ld	[%o2 + CPU_INTR_ACTV], %o5	! delay - load active mask
877	!
878	! set the thread state to free so kernel debuggers don't see it
879	!
880	mov	TS_FREE, %o4
881	st	%o4, [THREAD_REG + T_STATE]
882	!
883	! Put thread on either the interrupt pool or the free pool and
884	! call swtch() to resume another thread.
885	!
886	ldn	[%o2 + CPU_INTR_THREAD], %o5	! get list pointer
887	stn	%o5, [THREAD_REG + T_LINK]
888	call	swtch				! switch to best thread
889	stn	THREAD_REG, [%o2 + CPU_INTR_THREAD] ! delay - put thread on list
890	ba,a,pt	%xcc, .				! swtch() shouldn't return
891	SET_SIZE(intr_thread_exit)
892
893	.global ftrace_intr_thread_format_str
894ftrace_intr_thread_format_str:
895	.asciz	"intr_thread(): regs=0x%lx, int=0x%lx, pil=0x%lx"
896#ifdef DEBUG
897intr_thread_actv_bit_set:
898	.asciz	"intr_thread():	cpu_intr_actv bit already set for PIL"
899intr_thread_actv_bit_not_set:
900	.asciz	"intr_thread():	cpu_intr_actv bit not set for PIL"
901intr_thread_exit_actv_bit_set:
902	.asciz	"intr_thread_exit(): cpu_intr_actv bit erroneously set for PIL"
903intr_thread_t_intr_start_zero:
904	.asciz	"intr_thread():	t_intr_start zero upon handler return"
905#endif /* DEBUG */
906#endif	/* lint */
907
908#if defined(lint)
909
910/*
911 * Handle an interrupt in the current thread
912 *	Entry:
913 *		%o0       = pointer to regs structure
914 *		%o1       = pointer to current intr_vec_t (iv) to be processed
915 *		%o2       = pil
916 *		%sp       = on current thread's kernel stack
917 *		%o7       = return linkage to trap code
918 *		%g7       = current thread
919 *		%pstate   = normal globals, interrupts enabled,
920 *		            privileged, fp disabled
921 *		%pil      = PIL_MAX
922 *
923 *	Register Usage
924 *		%l0       = return linkage
925 *		%l1       = old stack
926 *		%l2 - %l3 = scratch
927 *		%l4 - %l7 = reserved for sys_trap
928 *		%o3       = cpu
929 *		%o0       = scratch
930 *		%o4 - %o5 = scratch
931 */
932/* ARGSUSED */
933void
934current_thread(struct regs *regs, uint64_t iv_p, uint_t pil)
935{}
936
937#else	/* lint */
938
939	ENTRY_NP(current_thread)
940
941	mov	%o7, %l0
942	ldn	[THREAD_REG + T_CPU], %o3
943
944	ldn	[THREAD_REG + T_ONFAULT], %l2
945	brz,pt	%l2, no_onfault		! branch if no onfault label set
946	nop
947	stn	%g0, [THREAD_REG + T_ONFAULT]! clear onfault label
948	ldn	[THREAD_REG + T_LOFAULT], %l3
949	stn	%g0, [THREAD_REG + T_LOFAULT]! clear lofault data
950
951	sub	%o2, LOCK_LEVEL + 1, %o5
952	sll	%o5, CPTRSHIFT, %o5
953	add	%o5, CPU_OFD, %o4	! %o4 has on_fault data offset
954	stn	%l2, [%o3 + %o4]	! save onfault label for pil %o2
955	add	%o5, CPU_LFD, %o4	! %o4 has lofault data offset
956	stn	%l3, [%o3 + %o4]	! save lofault data for pil %o2
957
958no_onfault:
959	ldn	[THREAD_REG + T_ONTRAP], %l2
960	brz,pt	%l2, 6f			! branch if no on_trap protection
961	nop
962	stn	%g0, [THREAD_REG + T_ONTRAP]! clear on_trap protection
963	sub	%o2, LOCK_LEVEL + 1, %o5
964	sll	%o5, CPTRSHIFT, %o5
965	add	%o5, CPU_OTD, %o4	! %o4 has on_trap data offset
966	stn	%l2, [%o3 + %o4]	! save on_trap label for pil %o2
967
968	!
969	! Set bit for this level in CPU's active interrupt bitmask.
970	!
9716:	ld	[%o3 + CPU_INTR_ACTV], %o5	! o5 has cpu_intr_actv b4 chng
972	mov	1, %o4
973	sll	%o4, %o2, %o4			! construct mask for level
974#ifdef DEBUG
975	!
976	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
977	!
978	andcc	%o5, %o4, %g0
979	bz,pt	%xcc, 0f
980	nop
981	! Do not call panic if a panic is already in progress.
982	sethi	%hi(panic_quiesce), %l2
983	ld	[%l2 + %lo(panic_quiesce)], %l2
984	brnz,pn	%l2, 0f
985	nop
986	sethi	%hi(current_thread_actv_bit_set), %o0
987	call	panic
988	or	%o0, %lo(current_thread_actv_bit_set), %o0
9890:
990#endif /* DEBUG */
991	or	%o5, %o4, %o4
992	!
993	! See if we are interrupting another high-level interrupt.
994	!
995	srl	%o5, LOCK_LEVEL + 1, %o5	! only look at high-level bits
996	brz,pt	%o5, 1f
997	st	%o4, [%o3 + CPU_INTR_ACTV]	! delay - store active mask
998	!
999	! We have interrupted another high-level interrupt. Find its PIL,
1000	! compute the interval it ran for, and update its cumulative counter.
1001	!
1002	! Register usage:
1003
1004	! o2 = PIL of this interrupt
1005	! o5 = high PIL bits of INTR_ACTV (not including this PIL)
1006	! l1 = bitmask used to find other active high-level PIL
1007	! o4 = index of bit set in l1
1008	! Use cpu_intr_actv to find the cpu_pil_high_start[] offset for the
1009	! interrupted high-level interrupt.
1010	! Create mask for cpu_intr_actv. Begin by looking for bits set
1011	! at one level below the current PIL. Since %o5 contains the active
1012	! mask already shifted right by (LOCK_LEVEL + 1), we start by looking
1013	! at bit (current_pil - (LOCK_LEVEL + 2)).
1014	sub	%o2, LOCK_LEVEL + 2, %o4
1015	mov	1, %l1
1016	sll	%l1, %o4, %l1
10172:
1018#ifdef DEBUG
1019	! ASSERT(%l1 != 0) (we didn't shift the bit off the right edge)
1020	brnz,pt	%l1, 9f
1021	nop
1022
1023	! Don't panic if a panic is already in progress.
1024	sethi	%hi(panic_quiesce), %l3
1025	ld	[%l3 + %lo(panic_quiesce)], %l3
1026	brnz,pn	%l3, 9f
1027	nop
1028	sethi	%hi(current_thread_nested_PIL_not_found), %o0
1029	call	panic
1030	or	%o0, %lo(current_thread_nested_PIL_not_found), %o0
10319:
1032#endif /* DEBUG */
1033	andcc	%l1, %o5, %g0		! test mask against high-level bits of
1034	bnz	%xcc, 3f		! cpu_intr_actv
1035	nop
1036	srl	%l1, 1, %l1		! No match. Try next lower PIL.
1037	ba,pt	%xcc, 2b
1038	sub	%o4, 1, %o4		! delay - decrement PIL
10393:
1040	sll	%o4, 3, %o4			! index to byte offset
1041	add	%o4, CPU_MCPU, %l1	! CPU_PIL_HIGH_START is too large
1042	add	%l1, MCPU_PIL_HIGH_START, %l1
1043	ldx	[%o3 + %l1], %l3		! load starting timestamp
1044#ifdef DEBUG
1045	brnz,pt	%l3, 9f
1046	nop
1047	! Don't panic if a panic is already in progress.
1048	sethi	%hi(panic_quiesce), %l1
1049	ld	[%l1 + %lo(panic_quiesce)], %l1
1050	brnz,pn	%l1, 9f
1051	nop
1052	srl	%o4, 3, %o1			! Find interrupted PIL for panic
1053	add	%o1, LOCK_LEVEL + 1, %o1
1054	sethi	%hi(current_thread_nested_pil_zero), %o0
1055	call	panic
1056	or	%o0, %lo(current_thread_nested_pil_zero), %o0
10579:
1058#endif /* DEBUG */
1059	rdpr	%tick, %l1
1060	sllx	%l1, 1, %l1
1061	srlx	%l1, 1, %l1			! shake off NPT bit
1062	sub	%l1, %l3, %l3			! interval in %l3
1063	!
1064	! Check for Energy Star mode
1065	!
1066	lduh	[%o3 + CPU_DIVISOR], %l1	! %l1 = clock divisor
1067	cmp	%l1, 1
1068	bg,a,pn	%xcc, 2f
1069	mulx	%l3, %l1, %l3	! multiply interval by clock divisor iff > 1
10702:
1071	!
1072	! We need to find the CPU offset of the cumulative counter. We start
1073	! with %o4, which has (PIL - (LOCK_LEVEL + 1)) * 8. We need PIL * 16,
1074	! so we shift left 1, then add (LOCK_LEVEL + 1) * 16, which is
1075	! CPU_INTRSTAT_LOW_PIL_OFFSET.
1076	!
1077	sll	%o4, 1, %o4
1078	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT const too large
1079	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1080	add	%o4, CPU_INTRSTAT_LOW_PIL_OFFSET, %o4
1081	ldx	[%o3 + %o4], %l1		! old counter in l1
1082	add	%l1, %l3, %l1			! new counter in l1
1083	stx	%l1, [%o3 + %o4]		! store new counter
1084
1085	! Also update intracct[]
1086	lduh	[%o3 + CPU_MSTATE], %o4
1087	sllx	%o4, 3, %o4
1088	add	%o4, CPU_INTRACCT, %o4
1089	ldx	[%o3 + %o4], %l1
1090	add	%l1, %l3, %l1
1091	! Another high-level interrupt is active below this one, so
1092	! there is no need to check for an interrupt thread. That will be
1093	! done by the lowest priority high-level interrupt active.
1094	ba,pt	%xcc, 5f
1095	stx	%l1, [%o3 + %o4]		! delay - store new counter
10961:
1097	! If we haven't interrupted another high-level interrupt, we may be
1098	! interrupting a low level interrupt thread. If so, compute its interval
1099	! and update its cumulative counter.
1100	lduh	[THREAD_REG + T_FLAGS], %o4
1101	andcc	%o4, T_INTR_THREAD, %g0
1102	bz,pt	%xcc, 4f
1103	nop
1104
1105	! We have interrupted an interrupt thread. Take timestamp, compute
1106	! interval, update cumulative counter.
1107
1108	! Check t_intr_start. If it is zero, either intr_thread() or
1109	! current_thread() (at a lower PIL, of course) already did
1110	! the accounting for the underlying interrupt thread.
1111	ldx	[THREAD_REG + T_INTR_START], %o5
1112	brz,pn	%o5, 4f
1113	nop
1114
1115	stx	%g0, [THREAD_REG + T_INTR_START]
1116	rdpr	%tick, %o4
1117	sllx	%o4, 1, %o4
1118	srlx	%o4, 1, %o4			! shake off NPT bit
1119	sub	%o4, %o5, %o5			! o5 has the interval
1120
1121	! Check for Energy Star mode
1122	lduh	[%o3 + CPU_DIVISOR], %o4	! %o4 = clock divisor
1123	cmp	%o4, 1
1124	bg,a,pn	%xcc, 2f
1125	mulx	%o5, %o4, %o5	! multiply interval by clock divisor iff > 1
11262:
1127	ldub	[THREAD_REG + T_PIL], %o4
1128	sllx	%o4, 4, %o4			! PIL index to byte offset
1129	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT const too large
1130	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1131	ldx	[%o3 + %o4], %l2		! old counter in l2
1132	add	%l2, %o5, %l2			! new counter in l2
1133	stx	%l2, [%o3 + %o4]		! store new counter
1134
1135	! Also update intracct[]
1136	lduh	[%o3 + CPU_MSTATE], %o4
1137	sllx	%o4, 3, %o4
1138	add	%o4, CPU_INTRACCT, %o4
1139	ldx	[%o3 + %o4], %l2
1140	add	%l2, %o5, %l2
1141	stx	%l2, [%o3 + %o4]
11424:
1143	!
1144	! Handle high-level interrupts on separate interrupt stack.
1145	! No other high-level interrupts are active, so switch to int stack.
1146	!
1147	mov	%sp, %l1
1148	ldn	[%o3 + CPU_INTR_STACK], %l3
1149	sub	%l3, STACK_BIAS, %sp
1150
11515:
1152#ifdef DEBUG
1153	!
1154	! ASSERT(%o2 > LOCK_LEVEL)
1155	!
1156	cmp	%o2, LOCK_LEVEL
1157	bg,pt	%xcc, 3f
1158	nop
1159	mov	CE_PANIC, %o0
1160	sethi	%hi(current_thread_wrong_pil), %o1
1161	call	cmn_err				! %o2 has the %pil already
1162	or	%o1, %lo(current_thread_wrong_pil), %o1
1163#endif
11643:
1165	! Store starting timestamp for this PIL in CPU structure at
1166	! cpu.cpu_m.pil_high_start[PIL - (LOCK_LEVEL + 1)]
1167        sub     %o2, LOCK_LEVEL + 1, %o4	! convert PIL to array index
1168	sllx    %o4, 3, %o4			! index to byte offset
1169	add	%o4, CPU_MCPU, %o4	! CPU_PIL_HIGH_START is too large
1170	add	%o4, MCPU_PIL_HIGH_START, %o4
1171        rdpr    %tick, %o5
1172	sllx	%o5, 1, %o5
1173	srlx	%o5, 1, %o5
1174        stx     %o5, [%o3 + %o4]
1175
1176	wrpr	%g0, %o2, %pil			! enable interrupts
1177
1178	!
1179	! call the handler
1180	!
1181	SERVE_INTR_PRE(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
11821:
1183	SERVE_INTR(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
1184
1185	brz,a,pt %o2, 0f			! if %o2, more intrs await
1186	rdpr	%pil, %o2			! delay annulled
1187	SERVE_INTR_NEXT(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
1188	ba	1b
1189	nop
11900:
1191	wrpr	%g0, PIL_MAX, %pil		! disable interrupts (1-15)
1192
1193	cmp	%o2, PIL_15
1194	bne,pt	%xcc, 3f
1195	nop
1196
1197	sethi	%hi(cpc_level15_inum), %o1
1198	ldx	[%o1 + %lo(cpc_level15_inum)], %o1 ! arg for intr_enqueue_req
1199	brz	%o1, 3f
1200	nop
1201
1202	rdpr 	%pstate, %g5
1203	andn	%g5, PSTATE_IE, %g1
1204	wrpr	%g0, %g1, %pstate		! Disable vec interrupts
1205
1206	call	intr_enqueue_req		! preserves %g5
1207	mov	PIL_15, %o0
1208
1209	! clear perfcntr overflow
1210	mov	1, %o0
1211	sllx	%o0, PIL_15, %o0
1212	wr	%o0, CLEAR_SOFTINT
1213
1214	wrpr	%g0, %g5, %pstate		! Enable vec interrupts
1215
12163:
1217	cmp	%o2, PIL_14
1218	be	tick_rtt			!  cpu-specific tick processing
1219	nop
1220	.global	current_thread_complete
1221current_thread_complete:
1222	!
1223	! Register usage:
1224	!
1225	! %l1 = stack pointer
1226	! %l2 = CPU_INTR_ACTV >> (LOCK_LEVEL + 1)
1227	! %o2 = PIL
1228	! %o3 = CPU pointer
1229	! %o4, %o5, %l3, %l4, %l5 = scratch
1230	!
1231	ldn	[THREAD_REG + T_CPU], %o3
1232	!
1233	! Clear bit for this level in CPU's interrupt active bitmask.
1234	!
1235	ld	[%o3 + CPU_INTR_ACTV], %l2
1236	mov	1, %o5
1237	sll	%o5, %o2, %o5
1238#ifdef DEBUG
1239	!
1240	! ASSERT(CPU->cpu_intr_actv & (1 << PIL))
1241	!
1242	andcc	%l2, %o5, %g0
1243	bnz,pt	%xcc, 0f
1244	nop
1245	! Do not call panic if a panic is already in progress.
1246	sethi	%hi(panic_quiesce), %l2
1247	ld	[%l2 + %lo(panic_quiesce)], %l2
1248	brnz,pn	%l2, 0f
1249	nop
1250	sethi	%hi(current_thread_actv_bit_not_set), %o0
1251	call	panic
1252	or	%o0, %lo(current_thread_actv_bit_not_set), %o0
12530:
1254#endif /* DEBUG */
1255	andn	%l2, %o5, %l2
1256	st	%l2, [%o3 + CPU_INTR_ACTV]
1257
1258	! Take timestamp, compute interval, update cumulative counter.
1259        sub     %o2, LOCK_LEVEL + 1, %o4	! PIL to array index
1260	sllx    %o4, 3, %o4			! index to byte offset
1261	add	%o4, CPU_MCPU, %o4	! CPU_PIL_HIGH_START is too large
1262	add	%o4, MCPU_PIL_HIGH_START, %o4
1263        rdpr    %tick, %o5
1264	sllx	%o5, 1, %o5
1265	srlx	%o5, 1, %o5
1266	ldx     [%o3 + %o4], %o0
1267#ifdef DEBUG
1268	! ASSERT(cpu.cpu_m.pil_high_start[pil - (LOCK_LEVEL + 1)] != 0)
1269	brnz,pt	%o0, 9f
1270	nop
1271	! Don't panic if a panic is already in progress.
1272	sethi	%hi(panic_quiesce), %l2
1273	ld	[%l2 + %lo(panic_quiesce)], %l2
1274	brnz,pn	%l2, 9f
1275	nop
1276	sethi	%hi(current_thread_timestamp_zero), %o0
1277	call	panic
1278	or	%o0, %lo(current_thread_timestamp_zero), %o0
12799:
1280#endif /* DEBUG */
1281	stx	%g0, [%o3 + %o4]
1282	sub	%o5, %o0, %o5			! interval in o5
1283
1284	! Check for Energy Star mode
1285	lduh	[%o3 + CPU_DIVISOR], %o4	! %o4 = clock divisor
1286	cmp	%o4, 1
1287	bg,a,pn	%xcc, 2f
1288	mulx	%o5, %o4, %o5	! multiply interval by clock divisor iff > 1
12892:
1290	sllx	%o2, 4, %o4			! PIL index to byte offset
1291	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT too large
1292	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1293	ldx	[%o3 + %o4], %o0		! old counter in o0
1294	add	%o0, %o5, %o0			! new counter in o0
1295	stx	%o0, [%o3 + %o4]		! store new counter
1296
1297	! Also update intracct[]
1298	lduh	[%o3 + CPU_MSTATE], %o4
1299	sllx	%o4, 3, %o4
1300	add	%o4, CPU_INTRACCT, %o4
1301	ldx	[%o3 + %o4], %o0
1302	add	%o0, %o5, %o0
1303	stx	%o0, [%o3 + %o4]
1304
1305	!
1306	! get back on current thread's stack
1307	!
1308	srl	%l2, LOCK_LEVEL + 1, %l2
1309	tst	%l2				! any more high-level ints?
1310	movz	%xcc, %l1, %sp
1311	!
1312	! Current register usage:
1313	! o2 = PIL
1314	! o3 = CPU pointer
1315	! l0 = return address
1316	! l2 = intr_actv shifted right
1317	!
1318	bz,pt	%xcc, 3f			! if l2 was zero, no more ints
1319	nop
1320	!
1321	! We found another high-level interrupt active below the one that just
1322	! returned. Store a starting timestamp for it in the CPU structure.
1323	!
1324	! Use cpu_intr_actv to find the cpu_pil_high_start[] offset for the
1325	! interrupted high-level interrupt.
1326	! Create mask for cpu_intr_actv. Begin by looking for bits set
1327	! at one level below the current PIL. Since %l2 contains the active
1328	! mask already shifted right by (LOCK_LEVEL + 1), we start by looking
1329	! at bit (current_pil - (LOCK_LEVEL + 2)).
1330	! %l1 = mask, %o5 = index of bit set in mask
1331	!
1332	mov	1, %l1
1333	sub	%o2, LOCK_LEVEL + 2, %o5
1334	sll	%l1, %o5, %l1			! l1 = mask for level
13351:
1336#ifdef DEBUG
1337	! ASSERT(%l1 != 0) (we didn't shift the bit off the right edge)
1338	brnz,pt	%l1, 9f
1339	nop
1340	sethi	%hi(current_thread_nested_PIL_not_found), %o0
1341	call	panic
1342	or	%o0, %lo(current_thread_nested_PIL_not_found), %o0
13439:
1344#endif /* DEBUG */
1345	andcc	%l1, %l2, %g0		! test mask against high-level bits of
1346	bnz	%xcc, 2f		! cpu_intr_actv
1347	nop
1348	srl	%l1, 1, %l1		! No match. Try next lower PIL.
1349	ba,pt	%xcc, 1b
1350	sub	%o5, 1, %o5		! delay - decrement PIL
13512:
1352	sll	%o5, 3, %o5		! convert array index to byte offset
1353	add	%o5, CPU_MCPU, %o5	! CPU_PIL_HIGH_START is too large
1354	add	%o5, MCPU_PIL_HIGH_START, %o5
1355	rdpr	%tick, %o4
1356	sllx	%o4, 1, %o4
1357	srlx	%o4, 1, %o4
1358	! Another high-level interrupt is active below this one, so
1359	! there is no need to check for an interrupt thread. That will be
1360	! done by the lowest priority high-level interrupt active.
1361	ba,pt	%xcc, 7f
1362	stx	%o4, [%o3 + %o5]	! delay - store timestamp
13633:
1364	! If we haven't interrupted another high-level interrupt, we may have
1365	! interrupted a low level interrupt thread. If so, store a starting
1366	! timestamp in its thread structure.
1367	lduh	[THREAD_REG + T_FLAGS], %o4
1368	andcc	%o4, T_INTR_THREAD, %g0
1369	bz,pt	%xcc, 7f
1370	nop
1371
1372	rdpr	%tick, %o4
1373	sllx	%o4, 1, %o4
1374	srlx	%o4, 1, %o4			! Shake off NPT bit
1375	stx	%o4, [THREAD_REG + T_INTR_START]
1376
13777:
1378	sub	%o2, LOCK_LEVEL + 1, %o4
1379	sll	%o4, CPTRSHIFT, %o5
1380
1381	! Check on_trap saved area and restore as needed
1382	add	%o5, CPU_OTD, %o4
1383	ldn	[%o3 + %o4], %l2
1384	brz,pt %l2, no_ontrp_restore
1385	nop
1386	stn	%l2, [THREAD_REG + T_ONTRAP] ! restore
1387	stn	%g0, [%o3 + %o4]	! clear
1388
1389no_ontrp_restore:
1390	! Check on_fault saved area and restore as needed
1391	add	%o5, CPU_OFD, %o4
1392	ldn	[%o3 + %o4], %l2
1393	brz,pt %l2, 8f
1394	nop
1395	stn	%l2, [THREAD_REG + T_ONFAULT] ! restore
1396	stn	%g0, [%o3 + %o4]	! clear
1397	add	%o5, CPU_LFD, %o4
1398	ldn	[%o3 + %o4], %l2
1399	stn	%l2, [THREAD_REG + T_LOFAULT] ! restore
1400	stn	%g0, [%o3 + %o4]	! clear
1401
1402
14038:
1404	! Enable interrupts and return
1405	jmp	%l0 + 8
1406	wrpr	%g0, %o2, %pil			! enable interrupts
1407	SET_SIZE(current_thread)
1408
1409
1410#ifdef DEBUG
1411current_thread_wrong_pil:
1412	.asciz	"current_thread: unexpected pil level: %d"
1413current_thread_actv_bit_set:
1414	.asciz	"current_thread(): cpu_intr_actv bit already set for PIL"
1415current_thread_actv_bit_not_set:
1416	.asciz	"current_thread(): cpu_intr_actv bit not set for PIL"
1417current_thread_nested_pil_zero:
1418	.asciz	"current_thread(): timestamp zero for nested PIL %d"
1419current_thread_timestamp_zero:
1420	.asciz	"current_thread(): timestamp zero upon handler return"
1421current_thread_nested_PIL_not_found:
1422	.asciz	"current_thread: couldn't find nested high-level PIL"
1423#endif /* DEBUG */
1424#endif /* lint */
1425
1426/*
1427 * Return a thread's interrupt level.
1428 * Since this isn't saved anywhere but in %l4 on interrupt entry, we
1429 * must dig it out of the save area.
1430 *
1431 * Caller 'swears' that this really is an interrupt thread.
1432 *
1433 * int
1434 * intr_level(t)
1435 *	kthread_id_t	t;
1436 */
1437
1438#if defined(lint)
1439
1440/* ARGSUSED */
1441int
1442intr_level(kthread_id_t t)
1443{ return (0); }
1444
1445#else	/* lint */
1446
1447	ENTRY_NP(intr_level)
1448	retl
1449	ldub	[%o0 + T_PIL], %o0		! return saved pil
1450	SET_SIZE(intr_level)
1451
1452#endif	/* lint */
1453
1454#if defined(lint)
1455
1456/* ARGSUSED */
1457int
1458disable_pil_intr()
1459{ return (0); }
1460
1461#else	/* lint */
1462
1463	ENTRY_NP(disable_pil_intr)
1464	rdpr	%pil, %o0
1465	retl
1466	wrpr	%g0, PIL_MAX, %pil		! disable interrupts (1-15)
1467	SET_SIZE(disable_pil_intr)
1468
1469#endif	/* lint */
1470
1471#if defined(lint)
1472
1473/* ARGSUSED */
1474void
1475enable_pil_intr(int pil_save)
1476{}
1477
1478#else	/* lint */
1479
1480	ENTRY_NP(enable_pil_intr)
1481	retl
1482	wrpr	%o0, %pil
1483	SET_SIZE(enable_pil_intr)
1484
1485#endif	/* lint */
1486
1487#if defined(lint)
1488
1489/* ARGSUSED */
1490uint_t
1491disable_vec_intr(void)
1492{ return (0); }
1493
1494#else	/* lint */
1495
1496	ENTRY_NP(disable_vec_intr)
1497	rdpr	%pstate, %o0
1498	andn	%o0, PSTATE_IE, %g1
1499	retl
1500	wrpr	%g0, %g1, %pstate		! disable interrupt
1501	SET_SIZE(disable_vec_intr)
1502
1503#endif	/* lint */
1504
1505#if defined(lint)
1506
1507/* ARGSUSED */
1508void
1509enable_vec_intr(uint_t pstate_save)
1510{}
1511
1512#else	/* lint */
1513
1514	ENTRY_NP(enable_vec_intr)
1515	retl
1516	wrpr	%g0, %o0, %pstate
1517	SET_SIZE(enable_vec_intr)
1518
1519#endif	/* lint */
1520
1521#if defined(lint)
1522
1523void
1524cbe_level14(void)
1525{}
1526
1527#else   /* lint */
1528
1529	ENTRY_NP(cbe_level14)
1530	save    %sp, -SA(MINFRAME), %sp ! get a new window
1531	!
1532	! Make sure that this is from TICK_COMPARE; if not just return
1533	!
1534	rd	SOFTINT, %l1
1535	set	(TICK_INT_MASK | STICK_INT_MASK), %o2
1536	andcc	%l1, %o2, %g0
1537	bz,pn	%icc, 2f
1538	nop
1539
1540	CPU_ADDR(%o1, %o2)
1541	call	cyclic_fire
1542	mov	%o1, %o0
15432:
1544	ret
1545	restore	%g0, 1, %o0
1546	SET_SIZE(cbe_level14)
1547
1548#endif  /* lint */
1549
1550
1551#if defined(lint)
1552
1553/* ARGSUSED */
1554void
1555kdi_setsoftint(uint64_t iv_p)
1556{}
1557
1558#else	/* lint */
1559
1560	ENTRY_NP(kdi_setsoftint)
1561	save	%sp, -SA(MINFRAME), %sp	! get a new window
1562	rdpr	%pstate, %l5
1563	andn	%l5, PSTATE_IE, %l1
1564	wrpr	%l1, %pstate		! disable interrupt
1565	!
1566	! We have a pointer to an interrupt vector data structure.
1567	! Put the request on the cpu's softint priority list and
1568	! set %set_softint.
1569	!
1570	! Register usage
1571	! 	%i0 - pointer to intr_vec_t (iv)
1572	!	%l2 - requested pil
1573	!	%l4 - cpu
1574	!	%l5 - pstate
1575	!	%l1, %l3, %l6 - temps
1576	!
1577	! check if a softint is pending for this softint,
1578	! if one is pending, don't bother queuing another.
1579	!
1580	lduh	[%i0 + IV_FLAGS], %l1	! %l1 = iv->iv_flags
1581	and	%l1, IV_SOFTINT_PEND, %l6 ! %l6 = iv->iv_flags & IV_SOFTINT_PEND
1582	brnz,pn	%l6, 4f			! branch if softint is already pending
1583	or	%l1, IV_SOFTINT_PEND, %l2
1584	sth	%l2, [%i0 + IV_FLAGS]	! Set IV_SOFTINT_PEND flag
1585
1586	CPU_ADDR(%l4, %l2)		! %l4 = cpu
1587	lduh	[%i0 + IV_PIL], %l2	! %l2 = iv->iv_pil
1588
1589	!
1590	! Insert intr_vec_t (iv) to appropriate cpu's softint priority list
1591	!
1592	sll	%l2, CPTRSHIFT, %l0	! %l0 = offset to pil entry
1593	add	%l4, INTR_TAIL, %l6	! %l6 = &cpu->m_cpu.intr_tail
1594	ldn	[%l6 + %l0], %l1	! %l1 = cpu->m_cpu.intr_tail[pil]
1595					!       current tail (ct)
1596	brz,pt	%l1, 2f			! branch if current tail is NULL
1597	stn	%i0, [%l6 + %l0]	! make intr_vec_t (iv) as new tail
1598	!
1599	! there's pending intr_vec_t already
1600	!
1601	lduh	[%l1 + IV_FLAGS], %l6	! %l6 = ct->iv_flags
1602	and	%l6, IV_SOFTINT_MT, %l6	! %l6 = ct->iv_flags & IV_SOFTINT_MT
1603	brz,pt	%l6, 1f			! check for Multi target softint flag
1604	add	%l1, IV_PIL_NEXT, %l3	! %l3 = &ct->iv_pil_next
1605	ld	[%l4 + CPU_ID], %l6	! for multi target softint, use cpuid
1606	sll	%l6, CPTRSHIFT, %l6	! calculate offset address from cpuid
1607	add	%l3, %l6, %l3		! %l3 =  &ct->iv_xpil_next[cpuid]
16081:
1609	!
1610	! update old tail
1611	!
1612	ba,pt	%xcc, 3f
1613	stn	%i0, [%l3]		! [%l3] = iv, set pil_next field
16142:
1615	!
1616	! no pending intr_vec_t; make intr_vec_t as new head
1617	!
1618	add	%l4, INTR_HEAD, %l6	! %l6 = &cpu->m_cpu.intr_head[pil]
1619	stn	%i0, [%l6 + %l0]	! cpu->m_cpu.intr_head[pil] = iv
16203:
1621	!
1622	! Write %set_softint with (1<<pil) to cause a "pil" level trap
1623	!
1624	mov	1, %l1			! %l1 = 1
1625	sll	%l1, %l2, %l1		! %l1 = 1 << pil
1626	wr	%l1, SET_SOFTINT	! trigger required pil softint
16274:
1628	wrpr	%g0, %l5, %pstate	! %pstate = saved %pstate (in %l5)
1629	ret
1630	restore
1631	SET_SIZE(kdi_setsoftint)
1632
1633#endif	/* lint */
1634
1635#if defined(lint)
1636
1637/*ARGSUSED*/
1638void
1639setsoftint_tl1(uint64_t iv_p, uint64_t dummy)
1640{}
1641
1642#else	/* lint */
1643
1644	!
1645	! Register usage
1646	!	Arguments:
1647	! 	%g1 - Pointer to intr_vec_t (iv)
1648	!
1649	!	Internal:
1650	!	%g2 - pil
1651	!	%g4 - cpu
1652	!	%g3,%g5-g7 - temps
1653	!
1654	ENTRY_NP(setsoftint_tl1)
1655	!
1656	! We have a pointer to an interrupt vector data structure.
1657	! Put the request on the cpu's softint priority list and
1658	! set %set_softint.
1659	!
1660	CPU_ADDR(%g4, %g2)		! %g4 = cpu
1661	lduh	[%g1 + IV_PIL], %g2	! %g2 = iv->iv_pil
1662
1663	!
1664	! Insert intr_vec_t (iv) to appropriate cpu's softint priority list
1665	!
1666	sll	%g2, CPTRSHIFT, %g7	! %g7 = offset to pil entry
1667	add	%g4, INTR_TAIL, %g6	! %g6 = &cpu->m_cpu.intr_tail
1668	ldn	[%g6 + %g7], %g5	! %g5 = cpu->m_cpu.intr_tail[pil]
1669					!       current tail (ct)
1670	brz,pt	%g5, 1f			! branch if current tail is NULL
1671	stn	%g1, [%g6 + %g7]	! make intr_rec_t (iv) as new tail
1672	!
1673	! there's pending intr_vec_t already
1674	!
1675	lduh	[%g5 + IV_FLAGS], %g6	! %g6 = ct->iv_flags
1676	and	%g6, IV_SOFTINT_MT, %g6	! %g6 = ct->iv_flags & IV_SOFTINT_MT
1677	brz,pt	%g6, 0f			! check for Multi target softint flag
1678	add	%g5, IV_PIL_NEXT, %g3	! %g3 = &ct->iv_pil_next
1679	ld	[%g4 + CPU_ID], %g6	! for multi target softint, use cpuid
1680	sll	%g6, CPTRSHIFT, %g6	! calculate offset address from cpuid
1681	add	%g3, %g6, %g3		! %g3 = &ct->iv_xpil_next[cpuid]
16820:
1683	!
1684	! update old tail
1685	!
1686	ba,pt	%xcc, 2f
1687	stn	%g1, [%g3]		! [%g3] = iv, set pil_next field
16881:
1689	!
1690	! no pending intr_vec_t; make intr_vec_t as new head
1691	!
1692	add	%g4, INTR_HEAD, %g6	! %g6 = &cpu->m_cpu.intr_head[pil]
1693	stn	%g1, [%g6 + %g7]	! cpu->m_cpu.intr_head[pil] = iv
16942:
1695#ifdef TRAPTRACE
1696	TRACE_PTR(%g5, %g6)
1697	GET_TRACE_TICK(%g6)
1698	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi	! trap_tick = %tick
1699	TRACE_SAVE_TL_GL_REGS(%g5, %g6)
1700	rdpr	%tt, %g6
1701	stha	%g6, [%g5 + TRAP_ENT_TT]%asi	! trap_type = %tt
1702	rdpr	%tpc, %g6
1703	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi	! trap_pc = %tpc
1704	rdpr	%tstate, %g6
1705	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi ! trap_tstate = %tstate
1706	stna	%sp, [%g5 + TRAP_ENT_SP]%asi	! trap_sp = %sp
1707	stna	%g1, [%g5 + TRAP_ENT_TR]%asi	! trap_tr = iv
1708	ldn	[%g1 + IV_PIL_NEXT], %g6	!
1709	stna	%g6, [%g5 + TRAP_ENT_F1]%asi	! trap_f1 = iv->iv_pil_next
1710	add	%g4, INTR_HEAD, %g6
1711	ldn	[%g6 + %g7], %g6		! %g6=cpu->m_cpu.intr_head[pil]
1712	stna	%g6, [%g5 + TRAP_ENT_F2]%asi	! trap_f2 = intr_head[pil]
1713	add	%g4, INTR_TAIL, %g6
1714	ldn	[%g6 + %g7], %g6		! %g6=cpu->m_cpu.intr_tail[pil]
1715	stna	%g6, [%g5 + TRAP_ENT_F3]%asi	! trap_f3 = intr_tail[pil]
1716	stna	%g2, [%g5 + TRAP_ENT_F4]%asi	! trap_f4 = pil
1717	TRACE_NEXT(%g5, %g6, %g3)
1718#endif /* TRAPTRACE */
1719	!
1720	! Write %set_softint with (1<<pil) to cause a "pil" level trap
1721	!
1722	mov	1, %g5			! %g5 = 1
1723	sll	%g5, %g2, %g5		! %g5 = 1 << pil
1724	wr	%g5, SET_SOFTINT	! trigger required pil softint
1725	retry
1726	SET_SIZE(setsoftint_tl1)
1727
1728#endif	/* lint */
1729
1730#if defined(lint)
1731
1732/*ARGSUSED*/
1733void
1734setvecint_tl1(uint64_t inum, uint64_t dummy)
1735{}
1736
1737#else	/* lint */
1738
1739	!
1740	! Register usage
1741	!	Arguments:
1742	! 	%g1 - inumber
1743	!
1744	!	Internal:
1745	! 	%g1 - softint pil mask
1746	!	%g2 - pil of intr_vec_t
1747	!	%g3 - pointer to current intr_vec_t (iv)
1748	!	%g4 - cpu
1749	!	%g5, %g6,%g7 - temps
1750	!
1751	ENTRY_NP(setvecint_tl1)
1752	!
1753	! Verify the inumber received (should be inum < MAXIVNUM).
1754	!
1755	set	MAXIVNUM, %g2
1756	cmp	%g1, %g2
1757	bgeu,pn	%xcc, .no_ivintr
1758	clr	%g2			! expected in .no_ivintr
1759
1760	!
1761	! Fetch data from intr_vec_table according to the inum.
1762	!
1763	! We have an interrupt number. Fetch the interrupt vector requests
1764	! from the interrupt vector table for a given interrupt number and
1765	! insert them into cpu's softint priority lists and set %set_softint.
1766	!
1767	set	intr_vec_table, %g5	! %g5 = intr_vec_table
1768	sll	%g1, CPTRSHIFT, %g6	! %g6 = offset to inum entry in table
1769	add	%g5, %g6, %g5		! %g5 = &intr_vec_table[inum]
1770	ldn	[%g5], %g3		! %g3 = pointer to first entry of
1771					!       intr_vec_t list
1772
1773	! Verify the first intr_vec_t pointer for a given inum and it should
1774	! not be NULL. This used to be guarded by DEBUG but broken drivers can
1775	! cause spurious tick interrupts when the softint register is programmed
1776	! with 1 << 0 at the end of this routine. Now we always check for a
1777	! valid intr_vec_t pointer.
1778	brz,pn	%g3, .no_ivintr
1779	nop
1780
1781	!
1782	! Traverse the intr_vec_t link list, put each item on to corresponding
1783	! CPU softint priority queue, and compose the final softint pil mask.
1784	!
1785	! At this point:
1786	!	%g3 = intr_vec_table[inum]
1787	!
1788	CPU_ADDR(%g4, %g2)		! %g4 = cpu
1789	mov	%g0, %g1		! %g1 = 0, initialize pil mask to 0
17900:
1791	!
1792	! Insert next intr_vec_t (iv) to appropriate cpu's softint priority list
1793	!
1794	! At this point:
1795	!	%g1 = softint pil mask
1796	!	%g3 = pointer to next intr_vec_t (iv)
1797	!	%g4 = cpu
1798	!
1799	lduh	[%g3 + IV_PIL], %g2	! %g2 = iv->iv_pil
1800	sll	%g2, CPTRSHIFT, %g7	! %g7 = offset to pil entry
1801	add	%g4, INTR_TAIL, %g6	! %g6 = &cpu->m_cpu.intr_tail
1802	ldn	[%g6 + %g7], %g5	! %g5 = cpu->m_cpu.intr_tail[pil]
1803					! 	current tail (ct)
1804	brz,pt	%g5, 2f			! branch if current tail is NULL
1805	stn	%g3, [%g6 + %g7]	! make intr_vec_t (iv) as new tail
1806					! cpu->m_cpu.intr_tail[pil] = iv
1807	!
1808	! there's pending intr_vec_t already
1809	!
1810	lduh	[%g5 + IV_FLAGS], %g6	! %g6 = ct->iv_flags
1811	and	%g6, IV_SOFTINT_MT, %g6	! %g6 = ct->iv_flags & IV_SOFTINT_MT
1812	brz,pt	%g6, 1f			! check for Multi target softint flag
1813	add	%g5, IV_PIL_NEXT, %g5	! %g5 = &ct->iv_pil_next
1814	ld	[%g4 + CPU_ID], %g6	! for multi target softint, use cpuid
1815	sll	%g6, CPTRSHIFT, %g6	! calculate offset address from cpuid
1816	add	%g5, %g6, %g5		! %g5 = &ct->iv_xpil_next[cpuid]
18171:
1818	!
1819	! update old tail
1820	!
1821	ba,pt	%xcc, 3f
1822	stn	%g3, [%g5]		! [%g5] = iv, set pil_next field
18232:
1824	!
1825	! no pending intr_vec_t; make intr_vec_t as new head
1826	!
1827	add	%g4, INTR_HEAD, %g6	!  %g6 = &cpu->m_cpu.intr_head[pil]
1828	stn	%g3, [%g6 + %g7]	!  cpu->m_cpu.intr_head[pil] = iv
18293:
1830#ifdef TRAPTRACE
1831	TRACE_PTR(%g5, %g6)
1832	GET_TRACE_TICK(%g6)
1833	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi	! trap_tick = %tick
1834	TRACE_SAVE_TL_GL_REGS(%g5, %g6)
1835	rdpr	%tt, %g6
1836	stha	%g6, [%g5 + TRAP_ENT_TT]%asi	! trap_type = %tt`
1837	rdpr	%tpc, %g6
1838	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi	! trap_pc = %tpc
1839	rdpr	%tstate, %g6
1840	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi ! trap_tstate = %tstate
1841	stna	%sp, [%g5 + TRAP_ENT_SP]%asi	! trap_sp = %sp
1842	stna	%g3, [%g5 + TRAP_ENT_TR]%asi	! trap_tr = iv
1843	stna	%g1, [%g5 + TRAP_ENT_F1]%asi	! trap_f1 = pil mask
1844	add	%g4, INTR_HEAD, %g6
1845	ldn	[%g6 + %g7], %g6		! %g6=cpu->m_cpu.intr_head[pil]
1846	stna	%g6, [%g5 + TRAP_ENT_F2]%asi	! trap_f2 = intr_head[pil]
1847	add	%g4, INTR_TAIL, %g6
1848	ldn	[%g6 + %g7], %g6		! %g6=cpu->m_cpu.intr_tail[pil]
1849	stna	%g6, [%g5 + TRAP_ENT_F3]%asi	! trap_f3 = intr_tail[pil]
1850	stna	%g2, [%g5 + TRAP_ENT_F4]%asi	! trap_f4 = pil
1851	TRACE_NEXT(%g5, %g6, %g7)
1852#endif /* TRAPTRACE */
1853	mov	1, %g6			! %g6 = 1
1854	sll	%g6, %g2, %g6		! %g6 = 1 << pil
1855	or	%g1, %g6, %g1		! %g1 |= (1 << pil), pil mask
1856	ldn	[%g3 + IV_VEC_NEXT], %g3 ! %g3 = pointer to next intr_vec_t (iv)
1857	brnz,pn	%g3, 0b			! iv->iv_vec_next is non NULL, goto 0b
1858	nop
1859	wr	%g1, SET_SOFTINT	! triggered one or more pil softints
1860	retry
1861
1862.no_ivintr:
1863	! no_ivintr: arguments: rp, inum (%g1), pil (%g2 == 0)
1864	mov	%g2, %g3
1865	mov	%g1, %g2
1866	set	no_ivintr, %g1
1867	ba,pt	%xcc, sys_trap
1868	mov	PIL_15, %g4
1869	SET_SIZE(setvecint_tl1)
1870
1871#endif	/* lint */
1872
1873#if defined(lint)
1874
1875/*ARGSUSED*/
1876void
1877wr_clr_softint(uint_t value)
1878{}
1879
1880#else
1881
1882	ENTRY_NP(wr_clr_softint)
1883	retl
1884	wr	%o0, CLEAR_SOFTINT
1885	SET_SIZE(wr_clr_softint)
1886
1887#endif /* lint */
1888
1889#if defined(lint)
1890
1891/*ARGSUSED*/
1892void
1893intr_enqueue_req(uint_t pil, uint64_t inum)
1894{}
1895
1896#else   /* lint */
1897
1898/*
1899 * intr_enqueue_req
1900 *
1901 * %o0 - pil
1902 * %o1 - pointer to intr_vec_t (iv)
1903 * %o5 - preserved
1904 * %g5 - preserved
1905 */
1906	ENTRY_NP(intr_enqueue_req)
1907	!
1908	CPU_ADDR(%g4, %g1)		! %g4 = cpu
1909
1910	!
1911	! Insert intr_vec_t (iv) to appropriate cpu's softint priority list
1912	!
1913	sll	%o0, CPTRSHIFT, %o0	! %o0 = offset to pil entry
1914	add	%g4, INTR_TAIL, %g6	! %g6 = &cpu->m_cpu.intr_tail
1915	ldn	[%o0 + %g6], %g1	! %g1 = cpu->m_cpu.intr_tail[pil]
1916					!       current tail (ct)
1917	brz,pt	%g1, 2f			! branch if current tail is NULL
1918	stn	%o1, [%g6 + %o0]	! make intr_vec_t (iv) as new tail
1919
1920	!
1921	! there's pending intr_vec_t already
1922	!
1923	lduh	[%g1 + IV_FLAGS], %g6	! %g6 = ct->iv_flags
1924	and	%g6, IV_SOFTINT_MT, %g6	! %g6 = ct->iv_flags & IV_SOFTINT_MT
1925	brz,pt	%g6, 1f			! check for Multi target softint flag
1926	add	%g1, IV_PIL_NEXT, %g3	! %g3 = &ct->iv_pil_next
1927	ld	[%g4 + CPU_ID], %g6	! for multi target softint, use cpuid
1928	sll	%g6, CPTRSHIFT, %g6	! calculate offset address from cpuid
1929	add	%g3, %g6, %g3		! %g3 = &ct->iv_xpil_next[cpuid]
19301:
1931	!
1932	! update old tail
1933	!
1934	ba,pt	%xcc, 3f
1935	stn	%o1, [%g3]		! {%g5] = iv, set pil_next field
19362:
1937	!
1938	! no intr_vec_t's queued so make intr_vec_t as new head
1939	!
1940	add	%g4, INTR_HEAD, %g6	! %g6 = &cpu->m_cpu.intr_head[pil]
1941	stn	%o1, [%g6 + %o0]	! cpu->m_cpu.intr_head[pil] = iv
19423:
1943	retl
1944	nop
1945	SET_SIZE(intr_enqueue_req)
1946
1947#endif  /* lint */
1948
1949/*
1950 * Set CPU's base SPL level, based on which interrupt levels are active.
1951 * 	Called at spl7 or above.
1952 */
1953
1954#if defined(lint)
1955
1956void
1957set_base_spl(void)
1958{}
1959
1960#else	/* lint */
1961
1962	ENTRY_NP(set_base_spl)
1963	ldn	[THREAD_REG + T_CPU], %o2	! load CPU pointer
1964	ld	[%o2 + CPU_INTR_ACTV], %o5	! load active interrupts mask
1965
1966/*
1967 * WARNING: non-standard callinq sequence; do not call from C
1968 *	%o2 = pointer to CPU
1969 *	%o5 = updated CPU_INTR_ACTV
1970 */
1971_intr_set_spl:					! intr_thread_exit enters here
1972	!
1973	! Determine highest interrupt level active.  Several could be blocked
1974	! at higher levels than this one, so must convert flags to a PIL
1975	! Normally nothing will be blocked, so test this first.
1976	!
1977	brz,pt	%o5, 1f				! nothing active
1978	sra	%o5, 11, %o3			! delay - set %o3 to bits 15-11
1979	set	_intr_flag_table, %o1
1980	tst	%o3				! see if any of the bits set
1981	ldub	[%o1 + %o3], %o3		! load bit number
1982	bnz,a,pn %xcc, 1f			! yes, add 10 and we're done
1983	add	%o3, 11-1, %o3			! delay - add bit number - 1
1984
1985	sra	%o5, 6, %o3			! test bits 10-6
1986	tst	%o3
1987	ldub	[%o1 + %o3], %o3
1988	bnz,a,pn %xcc, 1f
1989	add	%o3, 6-1, %o3
1990
1991	sra	%o5, 1, %o3			! test bits 5-1
1992	ldub	[%o1 + %o3], %o3
1993
1994	!
1995	! highest interrupt level number active is in %l6
1996	!
19971:
1998	retl
1999	st	%o3, [%o2 + CPU_BASE_SPL]	! delay - store base priority
2000	SET_SIZE(set_base_spl)
2001
2002/*
2003 * Table that finds the most significant bit set in a five bit field.
2004 * Each entry is the high-order bit number + 1 of it's index in the table.
2005 * This read-only data is in the text segment.
2006 */
2007_intr_flag_table:
2008	.byte	0, 1, 2, 2,	3, 3, 3, 3,	4, 4, 4, 4,	4, 4, 4, 4
2009	.byte	5, 5, 5, 5,	5, 5, 5, 5,	5, 5, 5, 5,	5, 5, 5, 5
2010	.align	4
2011
2012#endif	/* lint */
2013
2014/*
2015 * int
2016 * intr_passivate(from, to)
2017 *	kthread_id_t	from;		interrupt thread
2018 *	kthread_id_t	to;		interrupted thread
2019 */
2020
2021#if defined(lint)
2022
2023/* ARGSUSED */
2024int
2025intr_passivate(kthread_id_t from, kthread_id_t to)
2026{ return (0); }
2027
2028#else	/* lint */
2029
2030	ENTRY_NP(intr_passivate)
2031	save	%sp, -SA(MINFRAME), %sp	! get a new window
2032
2033	flushw				! force register windows to stack
2034	!
2035	! restore registers from the base of the stack of the interrupt thread.
2036	!
2037	ldn	[%i0 + T_STACK], %i2	! get stack save area pointer
2038	ldn	[%i2 + (0*GREGSIZE)], %l0	! load locals
2039	ldn	[%i2 + (1*GREGSIZE)], %l1
2040	ldn	[%i2 + (2*GREGSIZE)], %l2
2041	ldn	[%i2 + (3*GREGSIZE)], %l3
2042	ldn	[%i2 + (4*GREGSIZE)], %l4
2043	ldn	[%i2 + (5*GREGSIZE)], %l5
2044	ldn	[%i2 + (6*GREGSIZE)], %l6
2045	ldn	[%i2 + (7*GREGSIZE)], %l7
2046	ldn	[%i2 + (8*GREGSIZE)], %o0	! put ins from stack in outs
2047	ldn	[%i2 + (9*GREGSIZE)], %o1
2048	ldn	[%i2 + (10*GREGSIZE)], %o2
2049	ldn	[%i2 + (11*GREGSIZE)], %o3
2050	ldn	[%i2 + (12*GREGSIZE)], %o4
2051	ldn	[%i2 + (13*GREGSIZE)], %o5
2052	ldn	[%i2 + (14*GREGSIZE)], %i4
2053					! copy stack/pointer without using %sp
2054	ldn	[%i2 + (15*GREGSIZE)], %i5
2055	!
2056	! put registers into the save area at the top of the interrupted
2057	! thread's stack, pointed to by %l7 in the save area just loaded.
2058	!
2059	ldn	[%i1 + T_SP], %i3	! get stack save area pointer
2060	stn	%l0, [%i3 + STACK_BIAS + (0*GREGSIZE)]	! save locals
2061	stn	%l1, [%i3 + STACK_BIAS + (1*GREGSIZE)]
2062	stn	%l2, [%i3 + STACK_BIAS + (2*GREGSIZE)]
2063	stn	%l3, [%i3 + STACK_BIAS + (3*GREGSIZE)]
2064	stn	%l4, [%i3 + STACK_BIAS + (4*GREGSIZE)]
2065	stn	%l5, [%i3 + STACK_BIAS + (5*GREGSIZE)]
2066	stn	%l6, [%i3 + STACK_BIAS + (6*GREGSIZE)]
2067	stn	%l7, [%i3 + STACK_BIAS + (7*GREGSIZE)]
2068	stn	%o0, [%i3 + STACK_BIAS + (8*GREGSIZE)]	! save ins using outs
2069	stn	%o1, [%i3 + STACK_BIAS + (9*GREGSIZE)]
2070	stn	%o2, [%i3 + STACK_BIAS + (10*GREGSIZE)]
2071	stn	%o3, [%i3 + STACK_BIAS + (11*GREGSIZE)]
2072	stn	%o4, [%i3 + STACK_BIAS + (12*GREGSIZE)]
2073	stn	%o5, [%i3 + STACK_BIAS + (13*GREGSIZE)]
2074	stn	%i4, [%i3 + STACK_BIAS + (14*GREGSIZE)]
2075						! fp, %i7 copied using %i4
2076	stn	%i5, [%i3 + STACK_BIAS + (15*GREGSIZE)]
2077	stn	%g0, [%i2 + ((8+6)*GREGSIZE)]
2078						! clear fp in save area
2079
2080	! load saved pil for return
2081	ldub	[%i0 + T_PIL], %i0
2082	ret
2083	restore
2084	SET_SIZE(intr_passivate)
2085
2086#endif	/* lint */
2087
2088#if defined(lint)
2089
2090/*
2091 * intr_get_time() is a resource for interrupt handlers to determine how
2092 * much time has been spent handling the current interrupt. Such a function
2093 * is needed because higher level interrupts can arrive during the
2094 * processing of an interrupt, thus making direct comparisons of %tick by
2095 * the handler inaccurate. intr_get_time() only returns time spent in the
2096 * current interrupt handler.
2097 *
2098 * The caller must be calling from an interrupt handler running at a pil
2099 * below or at lock level. Timings are not provided for high-level
2100 * interrupts.
2101 *
2102 * The first time intr_get_time() is called while handling an interrupt,
2103 * it returns the time since the interrupt handler was invoked. Subsequent
2104 * calls will return the time since the prior call to intr_get_time(). Time
2105 * is returned as ticks, adjusted for any clock divisor due to power
2106 * management. Use tick2ns() to convert ticks to nsec. Warning: ticks may
2107 * not be the same across CPUs.
2108 *
2109 * Theory Of Intrstat[][]:
2110 *
2111 * uint64_t intrstat[pil][0..1] is an array indexed by pil level, with two
2112 * uint64_ts per pil.
2113 *
2114 * intrstat[pil][0] is a cumulative count of the number of ticks spent
2115 * handling all interrupts at the specified pil on this CPU. It is
2116 * exported via kstats to the user.
2117 *
2118 * intrstat[pil][1] is always a count of ticks less than or equal to the
2119 * value in [0]. The difference between [1] and [0] is the value returned
2120 * by a call to intr_get_time(). At the start of interrupt processing,
2121 * [0] and [1] will be equal (or nearly so). As the interrupt consumes
2122 * time, [0] will increase, but [1] will remain the same. A call to
2123 * intr_get_time() will return the difference, then update [1] to be the
2124 * same as [0]. Future calls will return the time since the last call.
2125 * Finally, when the interrupt completes, [1] is updated to the same as [0].
2126 *
2127 * Implementation:
2128 *
2129 * intr_get_time() works much like a higher level interrupt arriving. It
2130 * "checkpoints" the timing information by incrementing intrstat[pil][0]
2131 * to include elapsed running time, and by setting t_intr_start to %tick.
2132 * It then sets the return value to intrstat[pil][0] - intrstat[pil][1],
2133 * and updates intrstat[pil][1] to be the same as the new value of
2134 * intrstat[pil][0].
2135 *
2136 * In the normal handling of interrupts, after an interrupt handler returns
2137 * and the code in intr_thread() updates intrstat[pil][0], it then sets
2138 * intrstat[pil][1] to the new value of intrstat[pil][0]. When [0] == [1],
2139 * the timings are reset, i.e. intr_get_time() will return [0] - [1] which
2140 * is 0.
2141 *
2142 * Whenever interrupts arrive on a CPU which is handling a lower pil
2143 * interrupt, they update the lower pil's [0] to show time spent in the
2144 * handler that they've interrupted. This results in a growing discrepancy
2145 * between [0] and [1], which is returned the next time intr_get_time() is
2146 * called. Time spent in the higher-pil interrupt will not be returned in
2147 * the next intr_get_time() call from the original interrupt, because
2148 * the higher-pil interrupt's time is accumulated in intrstat[higherpil][].
2149 */
2150
2151/*ARGSUSED*/
2152uint64_t
2153intr_get_time(void)
2154{ return 0; }
2155#else	/* lint */
2156
2157	ENTRY_NP(intr_get_time)
2158#ifdef DEBUG
2159	!
2160	! Lots of asserts, but just check panic_quiesce first.
2161	! Don't bother with lots of tests if we're just ignoring them.
2162	!
2163	sethi	%hi(panic_quiesce), %o0
2164	ld	[%o0 + %lo(panic_quiesce)], %o0
2165	brnz,pn	%o0, 2f
2166	nop
2167	!
2168	! ASSERT(%pil <= LOCK_LEVEL)
2169	!
2170	rdpr	%pil, %o1
2171	cmp	%o1, LOCK_LEVEL
2172	ble,pt	%xcc, 0f
2173	sethi	%hi(intr_get_time_high_pil), %o0	! delay
2174	call	panic
2175	or	%o0, %lo(intr_get_time_high_pil), %o0
21760:
2177	!
2178	! ASSERT((t_flags & T_INTR_THREAD) != 0 && t_pil > 0)
2179	!
2180	lduh	[THREAD_REG + T_FLAGS], %o2
2181	andcc	%o2, T_INTR_THREAD, %g0
2182	bz,pn	%xcc, 1f
2183	ldub	[THREAD_REG + T_PIL], %o1		! delay
2184	brnz,pt	%o1, 0f
21851:
2186	sethi	%hi(intr_get_time_not_intr), %o0
2187	call	panic
2188	or	%o0, %lo(intr_get_time_not_intr), %o0
21890:
2190	!
2191	! ASSERT(t_intr_start != 0)
2192	!
2193	ldx	[THREAD_REG + T_INTR_START], %o1
2194	brnz,pt	%o1, 2f
2195	sethi	%hi(intr_get_time_no_start_time), %o0	! delay
2196	call	panic
2197	or	%o0, %lo(intr_get_time_no_start_time), %o0
21982:
2199#endif /* DEBUG */
2200	!
2201	! %o0 = elapsed time and return value
2202	! %o1 = pil
2203	! %o2 = scratch
2204	! %o3 = scratch
2205	! %o4 = scratch
2206	! %o5 = cpu
2207	!
2208	wrpr	%g0, PIL_MAX, %pil	! make this easy -- block normal intrs
2209	ldn	[THREAD_REG + T_CPU], %o5
2210	ldub	[THREAD_REG + T_PIL], %o1
2211	ldx	[THREAD_REG + T_INTR_START], %o3 ! %o3 = t_intr_start
2212	!
2213	! Calculate elapsed time since t_intr_start. Update t_intr_start,
2214	! get delta, and multiply by cpu_divisor if necessary.
2215	!
2216	rdpr	%tick, %o2
2217	sllx	%o2, 1, %o2
2218	srlx	%o2, 1, %o2
2219	stx	%o2, [THREAD_REG + T_INTR_START]
2220	sub	%o2, %o3, %o0
2221
2222	lduh	[%o5 + CPU_DIVISOR], %o4
2223	cmp	%o4, 1
2224	bg,a,pn	%xcc, 1f
2225	mulx	%o0, %o4, %o0	! multiply interval by clock divisor iff > 1
22261:
2227	! Update intracct[]
2228	lduh	[%o5 + CPU_MSTATE], %o4
2229	sllx	%o4, 3, %o4
2230	add	%o4, CPU_INTRACCT, %o4
2231	ldx	[%o5 + %o4], %o2
2232	add	%o2, %o0, %o2
2233	stx	%o2, [%o5 + %o4]
2234
2235	!
2236	! Increment cpu_m.intrstat[pil][0]. Calculate elapsed time since
2237	! cpu_m.intrstat[pil][1], which is either when the interrupt was
2238	! first entered, or the last time intr_get_time() was invoked. Then
2239	! update cpu_m.intrstat[pil][1] to match [0].
2240	!
2241	sllx	%o1, 4, %o3
2242	add	%o3, CPU_MCPU, %o3
2243	add	%o3, MCPU_INTRSTAT, %o3
2244	add	%o3, %o5, %o3		! %o3 = cpu_m.intrstat[pil][0]
2245	ldx	[%o3], %o2
2246	add	%o2, %o0, %o2		! %o2 = new value for intrstat
2247	stx	%o2, [%o3]
2248	ldx	[%o3 + 8], %o4		! %o4 = cpu_m.intrstat[pil][1]
2249	sub	%o2, %o4, %o0		! %o0 is elapsed time since %o4
2250	stx	%o2, [%o3 + 8]		! make [1] match [0], resetting time
2251
2252	ld	[%o5 + CPU_BASE_SPL], %o2	! restore %pil to the greater
2253	cmp	%o2, %o1			! of either our pil %o1 or
2254	movl	%xcc, %o1, %o2			! cpu_base_spl.
2255	retl
2256	wrpr	%g0, %o2, %pil
2257	SET_SIZE(intr_get_time)
2258
2259#ifdef DEBUG
2260intr_get_time_high_pil:
2261	.asciz	"intr_get_time(): %pil > LOCK_LEVEL"
2262intr_get_time_not_intr:
2263	.asciz	"intr_get_time(): not called from an interrupt thread"
2264intr_get_time_no_start_time:
2265	.asciz	"intr_get_time(): t_intr_start == 0"
2266#endif /* DEBUG */
2267#endif  /* lint */
2268