xref: /titanic_50/usr/src/uts/sun4/ml/interrupt.s (revision b494511a9cf72b1fc4eb13a0e593f55c624ab829)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25#if defined(lint)
26#include <sys/types.h>
27#include <sys/thread.h>
28#else	/* lint */
29#include "assym.h"
30#endif	/* lint */
31
32#include <sys/cmn_err.h>
33#include <sys/ftrace.h>
34#include <sys/asm_linkage.h>
35#include <sys/machthread.h>
36#include <sys/machcpuvar.h>
37#include <sys/intreg.h>
38#include <sys/ivintr.h>
39
40#ifdef TRAPTRACE
41#include <sys/traptrace.h>
42#endif /* TRAPTRACE */
43
44#if defined(lint)
45
46/* ARGSUSED */
47void
48pil_interrupt(int level)
49{}
50
51#else	/* lint */
52
53
54/*
55 * (TT 0x40..0x4F, TL>0) Interrupt Level N Handler (N == 1..15)
56 * 	Register passed from LEVEL_INTERRUPT(level)
57 *	%g4 - interrupt request level
58 */
59	ENTRY_NP(pil_interrupt)
60	!
61	! Register usage
62	!	%g1 - cpu
63	!	%g2 - pointer to intr_vec_t (iv)
64	!	%g4 - pil
65	!	%g3, %g5, %g6, %g7 - temps
66	!
67	! Grab the first or list head intr_vec_t off the intr_head[pil]
68	! and panic immediately if list head is NULL. Otherwise, update
69	! intr_head[pil] to next intr_vec_t on the list and clear softint
70	! %clear_softint, if next intr_vec_t is NULL.
71	!
72	CPU_ADDR(%g1, %g5)		! %g1 = cpu
73	!
74	ALTENTRY(pil_interrupt_common)
75	sll	%g4, CPTRSHIFT, %g5	! %g5 = offset to the pil entry
76	add	%g1, INTR_HEAD, %g6	! %g6 = &cpu->m_cpu.intr_head
77	add	%g6, %g5, %g6		! %g6 = &cpu->m_cpu.intr_head[pil]
78	ldn	[%g6], %g2		! %g2 = cpu->m_cpu.intr_head[pil]
79	brnz,pt	%g2, 0f			! check list head (iv) is NULL
80	nop
81	ba	ptl1_panic		! panic, list head (iv) is NULL
82	mov	PTL1_BAD_INTR_VEC, %g1
830:
84	lduh	[%g2 + IV_FLAGS], %g7	! %g7 = iv->iv_flags
85	and	%g7, IV_SOFTINT_MT, %g3 ! %g3 = iv->iv_flags & IV_SOFTINT_MT
86	brz,pt	%g3, 1f			! check for multi target softint
87	add	%g2, IV_PIL_NEXT, %g7	! g7% = &iv->iv_pil_next
88	ld	[%g1 + CPU_ID], %g3	! for multi target softint, use cpuid
89	sll	%g3, CPTRSHIFT, %g3	! convert cpuid to offset address
90	add	%g7, %g3, %g7		! %g5 = &iv->iv_xpil_next[cpuid]
911:
92	ldn	[%g7], %g3		! %g3 = next intr_vec_t
93	brnz,pn	%g3, 2f			! branch if next intr_vec_t non NULL
94	stn	%g3, [%g6]		! update cpu->m_cpu.intr_head[pil]
95	add	%g1, INTR_TAIL, %g6	! %g6 =  &cpu->m_cpu.intr_tail
96	stn	%g0, [%g5 + %g6]	! clear cpu->m_cpu.intr_tail[pil]
97	mov	1, %g5			! %g5 = 1
98	sll	%g5, %g4, %g5		! %g5 = 1 << pil
99	wr	%g5, CLEAR_SOFTINT	! clear interrupt on this pil
1002:
101#ifdef TRAPTRACE
102	TRACE_PTR(%g5, %g6)
103	TRACE_SAVE_TL_GL_REGS(%g5, %g6)
104	rdpr	%tt, %g6
105	stha	%g6, [%g5 + TRAP_ENT_TT]%asi	! trap_type = %tt
106	rdpr	%tpc, %g6
107	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi	! trap_pc = %tpc
108	rdpr	%tstate, %g6
109	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi ! trap_tstate = %tstate
110	stna	%sp, [%g5 + TRAP_ENT_SP]%asi	! trap_sp = %sp
111	stna	%g2, [%g5 + TRAP_ENT_TR]%asi	! trap_tr = first intr_vec
112	stna	%g3, [%g5 + TRAP_ENT_F1]%asi	! trap_f1 = next intr_vec
113	GET_TRACE_TICK(%g6, %g3)
114	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi	! trap_tick = %tick
115	sll	%g4, CPTRSHIFT, %g3
116	add	%g1, INTR_HEAD, %g6
117	ldn	[%g6 + %g3], %g6		! %g6=cpu->m_cpu.intr_head[pil]
118	stna	%g6, [%g5 + TRAP_ENT_F2]%asi	! trap_f2 = intr_head[pil]
119	add	%g1, INTR_TAIL, %g6
120	ldn	[%g6 + %g3], %g6		! %g6=cpu->m_cpu.intr_tail[pil]
121	stna	%g6, [%g5 + TRAP_ENT_F3]%asi	! trap_f3 = intr_tail[pil]
122	stna	%g4, [%g5 + TRAP_ENT_F4]%asi	! trap_f4 = pil
123	TRACE_NEXT(%g5, %g6, %g3)
124#endif /* TRAPTRACE */
125	!
126	! clear the iv_pending flag for this interrupt request
127	!
128	lduh	[%g2 + IV_FLAGS], %g3		! %g3 = iv->iv_flags
129	andn	%g3, IV_SOFTINT_PEND, %g3	! %g3 = !(iv->iv_flags & PEND)
130	sth	%g3, [%g2 + IV_FLAGS]		! clear IV_SOFTINT_PEND flag
131	stn	%g0, [%g7]			! clear iv->iv_pil_next or
132						!       iv->iv_pil_xnext
133
134	!
135	! Prepare for sys_trap()
136	!
137	! Registers passed to sys_trap()
138	!	%g1 - interrupt handler at TL==0
139	!	%g2 - pointer to current intr_vec_t (iv),
140	!	      job queue for intr_thread or current_thread
141	!	%g3 - pil
142	!	%g4 - initial pil for handler
143	!
144	! figure which handler to run and which %pil it starts at
145	! intr_thread starts at DISP_LEVEL to prevent preemption
146	! current_thread starts at PIL_MAX to protect cpu_intr_actv
147	!
148	mov	%g4, %g3		! %g3 = %g4, pil
149	cmp	%g4, LOCK_LEVEL
150	bg,a,pt	%xcc, 3f		! branch if pil > LOCK_LEVEL
151	mov	PIL_MAX, %g4		! %g4 = PIL_MAX (15)
152	sethi	%hi(intr_thread), %g1	! %g1 = intr_thread
153	mov	DISP_LEVEL, %g4		! %g4 = DISP_LEVEL (11)
154	ba,pt	%xcc, sys_trap
155	or	%g1, %lo(intr_thread), %g1
1563:
157	sethi	%hi(current_thread), %g1 ! %g1 = current_thread
158	ba,pt	%xcc, sys_trap
159	or	%g1, %lo(current_thread), %g1
160	SET_SIZE(pil_interrupt_common)
161	SET_SIZE(pil_interrupt)
162
163#endif	/* lint */
164
165
166#ifndef	lint
167_spurious:
168	.asciz	"!interrupt 0x%x at level %d not serviced"
169
170/*
171 * SERVE_INTR_PRE is called once, just before the first invocation
172 * of SERVE_INTR.
173 *
174 * Registers on entry:
175 *
176 * iv_p, cpu, regs: may be out-registers
177 * ls1, ls2: local scratch registers
178 * os1, os2, os3: scratch registers, may be out
179 */
180
181#define SERVE_INTR_PRE(iv_p, cpu, ls1, ls2, os1, os2, os3, regs)	\
182	mov	iv_p, ls1;						\
183	mov	iv_p, ls2;						\
184	SERVE_INTR_TRACE(iv_p, os1, os2, os3, regs);
185
186/*
187 * SERVE_INTR is called immediately after either SERVE_INTR_PRE or
188 * SERVE_INTR_NEXT, without intervening code. No register values
189 * may be modified.
190 *
191 * After calling SERVE_INTR, the caller must check if os3 is set. If
192 * so, there is another interrupt to process. The caller must call
193 * SERVE_INTR_NEXT, immediately followed by SERVE_INTR.
194 *
195 * Before calling SERVE_INTR_NEXT, the caller may perform accounting
196 * and other actions which need to occur after invocation of an interrupt
197 * handler. However, the values of ls1 and os3 *must* be preserved and
198 * passed unmodified into SERVE_INTR_NEXT.
199 *
200 * Registers on return from SERVE_INTR:
201 *
202 * ls1 - the pil just processed
203 * ls2 - the pointer to intr_vec_t (iv) just processed
204 * os3 - if set, another interrupt needs to be processed
205 * cpu, ls1, os3 - must be preserved if os3 is set
206 */
207
208#define	SERVE_INTR(os5, cpu, ls1, ls2, os1, os2, os3, os4)		\
209	ldn	[ls1 + IV_HANDLER], os2;				\
210	ldn	[ls1 + IV_ARG1], %o0;					\
211	ldn	[ls1 + IV_ARG2], %o1;					\
212	call	os2;							\
213	lduh	[ls1 + IV_PIL], ls1;					\
214	brnz,pt	%o0, 2f;						\
215	mov	CE_WARN, %o0;						\
216	set	_spurious, %o1;						\
217	mov	ls2, %o2;						\
218	call	cmn_err;						\
219	rdpr	%pil, %o3;						\
2202:	ldn	[THREAD_REG + T_CPU], cpu;				\
221	sll	ls1, 3, os1;						\
222	add	os1, CPU_STATS_SYS_INTR - 8, os2;			\
223	ldx	[cpu + os2], os3;					\
224	inc	os3;							\
225	stx	os3, [cpu + os2];					\
226	sll	ls1, CPTRSHIFT, os2;					\
227	add	cpu,  INTR_HEAD, os1;					\
228	add	os1, os2, os1;						\
229	ldn	[os1], os3;
230
231/*
232 * Registers on entry:
233 *
234 * cpu			- cpu pointer (clobbered, set to cpu upon completion)
235 * ls1, os3		- preserved from prior call to SERVE_INTR
236 * ls2			- local scratch reg (not preserved)
237 * os1, os2, os4, os5	- scratch reg, can be out (not preserved)
238 */
239#define SERVE_INTR_NEXT(os5, cpu, ls1, ls2, os1, os2, os3, os4)		\
240	sll	ls1, CPTRSHIFT, os4;					\
241	add	cpu, INTR_HEAD, os1;					\
242	rdpr	%pstate, ls2;						\
243	wrpr	ls2, PSTATE_IE, %pstate;				\
244	lduh	[os3 + IV_FLAGS], os2;					\
245	and	os2, IV_SOFTINT_MT, os2;				\
246	brz,pt	os2, 4f;						\
247	add	os3, IV_PIL_NEXT, os2;					\
248	ld	[cpu + CPU_ID], os5;					\
249	sll	os5, CPTRSHIFT, os5;					\
250	add	os2, os5, os2;						\
2514:	ldn	[os2], os5;						\
252	brnz,pn	os5, 5f;						\
253	stn	os5, [os1 + os4];					\
254	add	cpu, INTR_TAIL, os1;					\
255	stn	%g0, [os1 + os4];					\
256	mov	1, os1;							\
257	sll	os1, ls1, os1;						\
258	wr	os1, CLEAR_SOFTINT;					\
2595:	lduh	[os3 + IV_FLAGS], ls1;                                  \
260	andn	ls1, IV_SOFTINT_PEND, ls1;				\
261	sth	ls1, [os3 + IV_FLAGS];				        \
262	stn	%g0, [os2];						\
263	wrpr	%g0, ls2, %pstate;					\
264	mov	os3, ls1;						\
265	mov	os3, ls2;						\
266	SERVE_INTR_TRACE2(os5, os1, os2, os3, os4);
267
268#ifdef TRAPTRACE
269/*
270 * inum - not modified, _spurious depends on it.
271 */
272#define	SERVE_INTR_TRACE(inum, os1, os2, os3, os4)			\
273	rdpr	%pstate, os3;						\
274	andn	os3, PSTATE_IE | PSTATE_AM, os2;			\
275	wrpr	%g0, os2, %pstate;					\
276	TRACE_PTR(os1, os2); 						\
277	ldn	[os4 + PC_OFF], os2;					\
278	stna	os2, [os1 + TRAP_ENT_TPC]%asi;				\
279	ldx	[os4 + TSTATE_OFF], os2;				\
280	stxa	os2, [os1 + TRAP_ENT_TSTATE]%asi;			\
281	mov	os3, os4;						\
282	GET_TRACE_TICK(os2, os3);					\
283	stxa	os2, [os1 + TRAP_ENT_TICK]%asi;				\
284	TRACE_SAVE_TL_GL_REGS(os1, os2);				\
285	set	TT_SERVE_INTR, os2;					\
286	rdpr	%pil, os3;						\
287	or	os2, os3, os2;						\
288	stha	os2, [os1 + TRAP_ENT_TT]%asi;				\
289	stna	%sp, [os1 + TRAP_ENT_SP]%asi;				\
290	stna	inum, [os1 + TRAP_ENT_TR]%asi;				\
291	stna	%g0, [os1 + TRAP_ENT_F1]%asi;				\
292	stna	%g0, [os1 + TRAP_ENT_F2]%asi;				\
293	stna	%g0, [os1 + TRAP_ENT_F3]%asi;				\
294	stna	%g0, [os1 + TRAP_ENT_F4]%asi;				\
295	TRACE_NEXT(os1, os2, os3);					\
296	wrpr	%g0, os4, %pstate
297#else	/* TRAPTRACE */
298#define SERVE_INTR_TRACE(inum, os1, os2, os3, os4)
299#endif	/* TRAPTRACE */
300
301#ifdef TRAPTRACE
302/*
303 * inum - not modified, _spurious depends on it.
304 */
305#define	SERVE_INTR_TRACE2(inum, os1, os2, os3, os4)			\
306	rdpr	%pstate, os3;						\
307	andn	os3, PSTATE_IE | PSTATE_AM, os2;			\
308	wrpr	%g0, os2, %pstate;					\
309	TRACE_PTR(os1, os2); 						\
310	stna	%g0, [os1 + TRAP_ENT_TPC]%asi;				\
311	stxa	%g0, [os1 + TRAP_ENT_TSTATE]%asi;			\
312	mov	os3, os4;						\
313	GET_TRACE_TICK(os2, os3);					\
314	stxa	os2, [os1 + TRAP_ENT_TICK]%asi;				\
315	TRACE_SAVE_TL_GL_REGS(os1, os2);				\
316	set	TT_SERVE_INTR, os2;					\
317	rdpr	%pil, os3;						\
318	or	os2, os3, os2;						\
319	stha	os2, [os1 + TRAP_ENT_TT]%asi;				\
320	stna	%sp, [os1 + TRAP_ENT_SP]%asi;				\
321	stna	inum, [os1 + TRAP_ENT_TR]%asi;				\
322	stna	%g0, [os1 + TRAP_ENT_F1]%asi;				\
323	stna	%g0, [os1 + TRAP_ENT_F2]%asi;				\
324	stna	%g0, [os1 + TRAP_ENT_F3]%asi;				\
325	stna	%g0, [os1 + TRAP_ENT_F4]%asi;				\
326	TRACE_NEXT(os1, os2, os3);					\
327	wrpr	%g0, os4, %pstate
328#else	/* TRAPTRACE */
329#define SERVE_INTR_TRACE2(inum, os1, os2, os3, os4)
330#endif	/* TRAPTRACE */
331
332#endif	/* lint */
333
334#if defined(lint)
335
336/*ARGSUSED*/
337void
338intr_thread(struct regs *regs, uint64_t iv_p, uint_t pil)
339{}
340
341#else	/* lint */
342
343#define	INTRCNT_LIMIT 16
344
345/*
346 * Handle an interrupt in a new thread.
347 *	Entry:
348 *		%o0       = pointer to regs structure
349 *		%o1       = pointer to current intr_vec_t (iv) to be processed
350 *		%o2       = pil
351 *		%sp       = on current thread's kernel stack
352 *		%o7       = return linkage to trap code
353 *		%g7       = current thread
354 *		%pstate   = normal globals, interrupts enabled,
355 *		            privileged, fp disabled
356 *		%pil      = DISP_LEVEL
357 *
358 *	Register Usage
359 *		%l0       = return linkage
360 *		%l1       = pil
361 *		%l2 - %l3 = scratch
362 *		%l4 - %l7 = reserved for sys_trap
363 *		%o2       = cpu
364 *		%o3       = intr thread
365 *		%o0       = scratch
366 *		%o4 - %o5 = scratch
367 */
368	ENTRY_NP(intr_thread)
369	mov	%o7, %l0
370	mov	%o2, %l1
371	!
372	! See if we are interrupting another interrupt thread.
373	!
374	lduh	[THREAD_REG + T_FLAGS], %o3
375	andcc	%o3, T_INTR_THREAD, %g0
376	bz,pt	%xcc, 1f
377	ldn	[THREAD_REG + T_CPU], %o2	! delay - load CPU pointer
378
379	! We have interrupted an interrupt thread. Take a timestamp,
380	! compute its interval, and update its cumulative counter.
381	add	THREAD_REG, T_INTR_START, %o5
3820:
383	ldx	[%o5], %o3
384	brz,pn	%o3, 1f
385	! We came in on top of an interrupt thread that had no timestamp.
386	! This could happen if, for instance, an interrupt thread which had
387	! previously blocked is being set up to run again in resume(), but
388	! resume() hasn't yet stored a timestamp for it. Or, it could be in
389	! swtch() after its slice has been accounted for.
390	! Only account for the time slice if the starting timestamp is non-zero.
391	RD_CLOCK_TICK(%o4,%l2,%l3,__LINE__)
392	sub	%o4, %o3, %o4			! o4 has interval
393
394	! A high-level interrupt in current_thread() interrupting here
395	! will account for the interrupted thread's time slice, but
396	! only if t_intr_start is non-zero. Since this code is going to account
397	! for the time slice, we want to "atomically" load the thread's
398	! starting timestamp, calculate the interval with %tick, and zero
399	! its starting timestamp.
400	! To do this, we do a casx on the t_intr_start field, and store 0 to it.
401	! If it has changed since we loaded it above, we need to re-compute the
402	! interval, since a changed t_intr_start implies current_thread placed
403	! a new, later timestamp there after running a high-level interrupt,
404	! and the %tick val in %o4 had become stale.
405	mov	%g0, %l2
406	casx	[%o5], %o3, %l2
407
408	! If %l2 == %o3, our casx was successful. If not, the starting timestamp
409	! changed between loading it (after label 0b) and computing the
410	! interval above.
411	cmp	%l2, %o3
412	bne,pn	%xcc, 0b
413
414	! Check for Energy Star mode
415	lduh	[%o2 + CPU_DIVISOR], %l2	! delay -- %l2 = clock divisor
416	cmp	%l2, 1
417	bg,a,pn	%xcc, 2f
418	mulx	%o4, %l2, %o4	! multiply interval by clock divisor iff > 1
4192:
420	! We now know that a valid interval for the interrupted interrupt
421	! thread is in %o4. Update its cumulative counter.
422	ldub	[THREAD_REG + T_PIL], %l3	! load PIL
423	sllx	%l3, 4, %l3		! convert PIL index to byte offset
424	add	%l3, CPU_MCPU, %l3	! CPU_INTRSTAT is too big for use
425	add	%l3, MCPU_INTRSTAT, %l3	! as const, add offsets separately
426	ldx	[%o2 + %l3], %o5	! old counter in o5
427	add	%o5, %o4, %o5		! new counter in o5
428	stx	%o5, [%o2 + %l3]	! store new counter
429
430	! Also update intracct[]
431	lduh	[%o2 + CPU_MSTATE], %l3
432	sllx	%l3, 3, %l3
433	add	%l3, CPU_INTRACCT, %l3
434	add	%l3, %o2, %l3
4350:
436	ldx	[%l3], %o5
437	add	%o5, %o4, %o3
438	casx	[%l3], %o5, %o3
439	cmp	%o5, %o3
440	bne,pn	%xcc, 0b
441	nop
442
4431:
444	!
445	! Get set to run interrupt thread.
446	! There should always be an interrupt thread since we allocate one
447	! for each level on the CPU.
448	!
449	! Note that the code in kcpc_overflow_intr -relies- on the ordering
450	! of events here -- in particular that t->t_lwp of the interrupt thread
451	! is set to the pinned thread *before* curthread is changed.
452	!
453	ldn	[%o2 + CPU_INTR_THREAD], %o3	! interrupt thread pool
454	ldn	[%o3 + T_LINK], %o4		! unlink thread from CPU's list
455	stn	%o4, [%o2 + CPU_INTR_THREAD]
456	!
457	! Set bit for this level in CPU's active interrupt bitmask.
458	!
459	ld	[%o2 + CPU_INTR_ACTV], %o5
460	mov	1, %o4
461	sll	%o4, %l1, %o4
462#ifdef DEBUG
463	!
464	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
465	!
466	andcc	%o5, %o4, %g0
467	bz,pt	%xcc, 0f
468	nop
469	! Do not call panic if a panic is already in progress.
470	sethi	%hi(panic_quiesce), %l2
471	ld	[%l2 + %lo(panic_quiesce)], %l2
472	brnz,pn	%l2, 0f
473	nop
474	sethi	%hi(intr_thread_actv_bit_set), %o0
475	call	panic
476	or	%o0, %lo(intr_thread_actv_bit_set), %o0
4770:
478#endif /* DEBUG */
479	or	%o5, %o4, %o5
480	st	%o5, [%o2 + CPU_INTR_ACTV]
481	!
482	! Consider the new thread part of the same LWP so that
483	! window overflow code can find the PCB.
484	!
485	ldn	[THREAD_REG + T_LWP], %o4
486	stn	%o4, [%o3 + T_LWP]
487	!
488	! Threads on the interrupt thread free list could have state already
489	! set to TS_ONPROC, but it helps in debugging if they're TS_FREE
490	! Could eliminate the next two instructions with a little work.
491	!
492	mov	TS_ONPROC, %o4
493	st	%o4, [%o3 + T_STATE]
494	!
495	! Push interrupted thread onto list from new thread.
496	! Set the new thread as the current one.
497	! Set interrupted thread's T_SP because if it is the idle thread,
498	! resume may use that stack between threads.
499	!
500	stn	%o7, [THREAD_REG + T_PC]	! mark pc for resume
501	stn	%sp, [THREAD_REG + T_SP]	! mark stack for resume
502	stn	THREAD_REG, [%o3 + T_INTR]	! push old thread
503	stn	%o3, [%o2 + CPU_THREAD]		! set new thread
504	mov	%o3, THREAD_REG			! set global curthread register
505	ldn	[%o3 + T_STACK], %o4		! interrupt stack pointer
506	sub	%o4, STACK_BIAS, %sp
507	!
508	! Initialize thread priority level from intr_pri
509	!
510	sethi	%hi(intr_pri), %o4
511	ldsh	[%o4 + %lo(intr_pri)], %o4	! grab base interrupt priority
512	add	%l1, %o4, %o4		! convert level to dispatch priority
513	sth	%o4, [THREAD_REG + T_PRI]
514	stub	%l1, [THREAD_REG + T_PIL]	! save pil for intr_passivate
515
516	! Store starting timestamp in thread structure.
517	add	THREAD_REG, T_INTR_START, %o3
5181:
519	ldx	[%o3], %o5
520	RD_CLOCK_TICK(%o4,%l2,%l3,__LINE__)
521	casx	[%o3], %o5, %o4
522	cmp	%o4, %o5
523	! If a high-level interrupt occurred while we were attempting to store
524	! the timestamp, try again.
525	bne,pn	%xcc, 1b
526	nop
527
528	wrpr	%g0, %l1, %pil			! lower %pil to new level
529	!
530	! Fast event tracing.
531	!
532	ld	[%o2 + CPU_FTRACE_STATE], %o4	! %o2 = curthread->t_cpu
533	btst	FTRACE_ENABLED, %o4
534	be,pt	%icc, 1f			! skip if ftrace disabled
535	  mov	%l1, %o5
536	!
537	! Tracing is enabled - write the trace entry.
538	!
539	save	%sp, -SA(MINFRAME), %sp
540	set	ftrace_intr_thread_format_str, %o0
541	mov	%i0, %o1
542	mov	%i1, %o2
543	mov	%i5, %o3
544	call	ftrace_3
545	ldn	[%i0 + PC_OFF], %o4
546	restore
5471:
548	!
549	! call the handler
550	!
551	SERVE_INTR_PRE(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
552	!
553	! %o0 and %o1 are now available as scratch registers.
554	!
5550:
556	SERVE_INTR(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
557	!
558	! If %o3 is set, we must call serve_intr_next, and both %l1 and %o3
559	! must be preserved. %l1 holds our pil, %l3 holds our inum.
560	!
561	! Note: %l1 is the pil level we're processing, but we may have a
562	! higher effective pil because a higher-level interrupt may have
563	! blocked.
564	!
565	wrpr	%g0, DISP_LEVEL, %pil
566	!
567	! Take timestamp, compute interval, update cumulative counter.
568	!
569	add	THREAD_REG, T_INTR_START, %o5
5701:
571	ldx	[%o5], %o0
572#ifdef DEBUG
573	brnz	%o0, 9f
574	nop
575	! Do not call panic if a panic is already in progress.
576	sethi	%hi(panic_quiesce), %o1
577	ld	[%o1 + %lo(panic_quiesce)], %o1
578	brnz,pn	%o1, 9f
579	nop
580	sethi	%hi(intr_thread_t_intr_start_zero), %o0
581	call	panic
582	or	%o0, %lo(intr_thread_t_intr_start_zero), %o0
5839:
584#endif /* DEBUG */
585	RD_CLOCK_TICK(%o1,%l2,%l3,__LINE__)
586	sub	%o1, %o0, %l2			! l2 has interval
587	!
588	! The general outline of what the code here does is:
589	! 1. load t_intr_start, %tick, and calculate the delta
590	! 2. replace t_intr_start with %tick (if %o3 is set) or 0.
591	!
592	! The problem is that a high-level interrupt could arrive at any time.
593	! It will account for (%tick - t_intr_start) for us when it starts,
594	! unless we have set t_intr_start to zero, and then set t_intr_start
595	! to a new %tick when it finishes. To account for this, our first step
596	! is to load t_intr_start and the last is to use casx to store the new
597	! t_intr_start. This guarantees atomicity in reading t_intr_start,
598	! reading %tick, and updating t_intr_start.
599	!
600	movrz	%o3, %g0, %o1
601	casx	[%o5], %o0, %o1
602	cmp	%o0, %o1
603	bne,pn	%xcc, 1b
604	!
605	! Check for Energy Star mode
606	!
607	lduh	[%o2 + CPU_DIVISOR], %o0	! delay -- %o0 = clock divisor
608	cmp	%o0, 1
609	bg,a,pn	%xcc, 2f
610	mulx	%l2, %o0, %l2	! multiply interval by clock divisor iff > 1
6112:
612	!
613	! Update cpu_intrstat. If o3 is set then we will be processing another
614	! interrupt. Above we have set t_intr_start to %tick, not 0. This
615	! means a high-level interrupt can arrive and update the same stats
616	! we're updating. Need to use casx.
617	!
618	sllx	%l1, 4, %o1			! delay - PIL as byte offset
619	add	%o1, CPU_MCPU, %o1		! CPU_INTRSTAT const too big
620	add	%o1, MCPU_INTRSTAT, %o1		! add parts separately
621	add	%o1, %o2, %o1
6221:
623	ldx	[%o1], %o5			! old counter in o5
624	add	%o5, %l2, %o0			! new counter in o0
625 	stx	%o0, [%o1 + 8]			! store into intrstat[pil][1]
626	casx	[%o1], %o5, %o0			! and into intrstat[pil][0]
627	cmp	%o5, %o0
628	bne,pn	%xcc, 1b
629	nop
630
631	! Also update intracct[]
632	lduh	[%o2 + CPU_MSTATE], %o1
633	sllx	%o1, 3, %o1
634	add	%o1, CPU_INTRACCT, %o1
635	add	%o1, %o2, %o1
6361:
637	ldx	[%o1], %o5
638	add	%o5, %l2, %o0
639	casx	[%o1], %o5, %o0
640	cmp	%o5, %o0
641	bne,pn	%xcc, 1b
642	nop
643
644	!
645	! Don't keep a pinned process pinned indefinitely. Bump cpu_intrcnt
646	! for each interrupt handler we invoke. If we hit INTRCNT_LIMIT, then
647	! we've crossed the threshold and we should unpin the pinned threads
648	! by preempt()ing ourselves, which will bubble up the t_intr chain
649	! until hitting the non-interrupt thread, which will then in turn
650	! preempt itself allowing the interrupt processing to resume. Finally,
651	! the scheduler takes over and picks the next thread to run.
652	!
653	! If our CPU is quiesced, we cannot preempt because the idle thread
654	! won't ever re-enter the scheduler, and the interrupt will be forever
655	! blocked.
656	!
657	! If t_intr is NULL, we're not pinning anyone, so we use a simpler
658	! algorithm. Just check for cpu_kprunrun, and if set then preempt.
659	! This insures we enter the scheduler if a higher-priority thread
660	! has become runnable.
661	!
662	lduh	[%o2 + CPU_FLAGS], %o5		! don't preempt if quiesced
663	andcc	%o5, CPU_QUIESCED, %g0
664	bnz,pn	%xcc, 1f
665
666	ldn     [THREAD_REG + T_INTR], %o5      ! pinning anything?
667	brz,pn  %o5, 3f				! if not, don't inc intrcnt
668
669	ldub	[%o2 + CPU_INTRCNT], %o5	! delay - %o5 = cpu_intrcnt
670	inc	%o5
671	cmp	%o5, INTRCNT_LIMIT		! have we hit the limit?
672	bl,a,pt	%xcc, 1f			! no preempt if < INTRCNT_LIMIT
673	stub	%o5, [%o2 + CPU_INTRCNT]	! delay annul - inc CPU_INTRCNT
674	bg,pn	%xcc, 2f			! don't inc stats again
675	!
676	! We've reached the limit. Set cpu_intrcnt and cpu_kprunrun, and do
677	! CPU_STATS_ADDQ(cp, sys, intrunpin, 1). Then call preempt.
678	!
679	mov	1, %o4				! delay
680	stub	%o4, [%o2 + CPU_KPRUNRUN]
681	ldx	[%o2 + CPU_STATS_SYS_INTRUNPIN], %o4
682	inc	%o4
683	stx	%o4, [%o2 + CPU_STATS_SYS_INTRUNPIN]
684	ba	2f
685	stub	%o5, [%o2 + CPU_INTRCNT]	! delay
6863:
687	! Code for t_intr == NULL
688	ldub	[%o2 + CPU_KPRUNRUN], %o5
689	brz,pt	%o5, 1f				! don't preempt unless kprunrun
6902:
691	! Time to call preempt
692	mov	%o2, %l3			! delay - save %o2
693	call	preempt
694	mov	%o3, %l2			! delay - save %o3.
695	mov	%l3, %o2			! restore %o2
696	mov	%l2, %o3			! restore %o3
697	wrpr	%g0, DISP_LEVEL, %pil		! up from cpu_base_spl
6981:
699	!
700	! Do we need to call serve_intr_next and do this again?
701	!
702	brz,a,pt %o3, 0f
703	ld	[%o2 + CPU_INTR_ACTV], %o5	! delay annulled
704	!
705	! Restore %pil before calling serve_intr() again. We must check
706	! CPU_BASE_SPL and set %pil to max(our-pil, CPU_BASE_SPL)
707	!
708	ld	[%o2 + CPU_BASE_SPL], %o4
709	cmp	%o4, %l1
710	movl	%xcc, %l1, %o4
711	wrpr	%g0, %o4, %pil
712	SERVE_INTR_NEXT(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
713	ba	0b				! compute new stats
714	nop
7150:
716	!
717	! Clear bit for this level in CPU's interrupt active bitmask.
718	!
719	mov	1, %o4
720	sll	%o4, %l1, %o4
721#ifdef DEBUG
722	!
723	! ASSERT(CPU->cpu_intr_actv & (1 << PIL))
724	!
725	andcc	%o4, %o5, %g0
726	bnz,pt	%xcc, 0f
727	nop
728	! Do not call panic if a panic is already in progress.
729	sethi	%hi(panic_quiesce), %l2
730	ld	[%l2 + %lo(panic_quiesce)], %l2
731	brnz,pn	%l2, 0f
732	nop
733	sethi	%hi(intr_thread_actv_bit_not_set), %o0
734	call	panic
735	or	%o0, %lo(intr_thread_actv_bit_not_set), %o0
7360:
737#endif /* DEBUG */
738	andn	%o5, %o4, %o5
739	st	%o5, [%o2 + CPU_INTR_ACTV]
740	!
741	! If there is still an interrupted thread underneath this one,
742	! then the interrupt was never blocked and the return is fairly
743	! simple.  Otherwise jump to intr_thread_exit.
744	!
745	ldn	[THREAD_REG + T_INTR], %o4	! pinned thread
746	brz,pn	%o4, intr_thread_exit		! branch if none
747	nop
748	!
749	! link the thread back onto the interrupt thread pool
750	!
751	ldn	[%o2 + CPU_INTR_THREAD], %o3
752	stn	%o3, [THREAD_REG + T_LINK]
753	stn	THREAD_REG, [%o2 + CPU_INTR_THREAD]
754	!
755	! set the thread state to free so kernel debuggers don't see it
756	!
757	mov	TS_FREE, %o5
758	st	%o5, [THREAD_REG + T_STATE]
759	!
760	! Switch back to the interrupted thread and return
761	!
762	stn	%o4, [%o2 + CPU_THREAD]
763	membar	#StoreLoad			! sync with mutex_exit()
764	mov	%o4, THREAD_REG
765
766	! If we pinned an interrupt thread, store its starting timestamp.
767	lduh	[THREAD_REG + T_FLAGS], %o5
768	andcc	%o5, T_INTR_THREAD, %g0
769	bz,pt	%xcc, 1f
770	ldn	[THREAD_REG + T_SP], %sp	! delay - restore %sp
771
772	add	THREAD_REG, T_INTR_START, %o3	! o3 has &curthread->t_intr_star
7730:
774	ldx	[%o3], %o4			! o4 = t_intr_start before
775	RD_CLOCK_TICK(%o5,%l2,%l3,__LINE__)
776	casx	[%o3], %o4, %o5			! put o5 in ts if o4 == ts after
777	cmp	%o4, %o5
778	! If a high-level interrupt occurred while we were attempting to store
779	! the timestamp, try again.
780	bne,pn	%xcc, 0b
781	ldn	[THREAD_REG + T_SP], %sp	! delay - restore %sp
7821:
783	! If the thread being restarted isn't pinning anyone, and no interrupts
784	! are pending, zero out cpu_intrcnt
785	ldn	[THREAD_REG + T_INTR], %o4
786	brnz,pn	%o4, 2f
787	rd	SOFTINT, %o4			! delay
788	set	SOFTINT_MASK, %o5
789	andcc	%o4, %o5, %g0
790	bz,a,pt	%xcc, 2f
791	stub	%g0, [%o2 + CPU_INTRCNT]	! delay annul
7922:
793	jmp	%l0 + 8
794	nop
795	SET_SIZE(intr_thread)
796	/* Not Reached */
797
798	!
799	! An interrupt returned on what was once (and still might be)
800	! an interrupt thread stack, but the interrupted process is no longer
801	! there.  This means the interrupt must have blocked.
802	!
803	! There is no longer a thread under this one, so put this thread back
804	! on the CPU's free list and resume the idle thread which will dispatch
805	! the next thread to run.
806	!
807	! All traps below DISP_LEVEL are disabled here, but the mondo interrupt
808	! is enabled.
809	!
810	ENTRY_NP(intr_thread_exit)
811#ifdef TRAPTRACE
812	rdpr	%pstate, %l2
813	andn	%l2, PSTATE_IE | PSTATE_AM, %o4
814	wrpr	%g0, %o4, %pstate			! cpu to known state
815	TRACE_PTR(%o4, %o5)
816	GET_TRACE_TICK(%o5, %o0)
817	stxa	%o5, [%o4 + TRAP_ENT_TICK]%asi
818	TRACE_SAVE_TL_GL_REGS(%o4, %o5)
819	set	TT_INTR_EXIT, %o5
820	stha	%o5, [%o4 + TRAP_ENT_TT]%asi
821	stna	%g0, [%o4 + TRAP_ENT_TPC]%asi
822	stxa	%g0, [%o4 + TRAP_ENT_TSTATE]%asi
823	stna	%sp, [%o4 + TRAP_ENT_SP]%asi
824	stna	THREAD_REG, [%o4 + TRAP_ENT_TR]%asi
825	ld	[%o2 + CPU_BASE_SPL], %o5
826	stna	%o5, [%o4 + TRAP_ENT_F1]%asi
827	stna	%g0, [%o4 + TRAP_ENT_F2]%asi
828	stna	%g0, [%o4 + TRAP_ENT_F3]%asi
829	stna	%g0, [%o4 + TRAP_ENT_F4]%asi
830	TRACE_NEXT(%o4, %o5, %o0)
831	wrpr	%g0, %l2, %pstate
832#endif /* TRAPTRACE */
833	! cpu_stats.sys.intrblk++
834        ldx	[%o2 + CPU_STATS_SYS_INTRBLK], %o4
835        inc     %o4
836        stx	%o4, [%o2 + CPU_STATS_SYS_INTRBLK]
837	!
838	! Put thread back on the interrupt thread list.
839	!
840
841	!
842	! Set the CPU's base SPL level.
843	!
844#ifdef DEBUG
845	!
846	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
847	!
848	ld	[%o2 + CPU_INTR_ACTV], %o5
849	mov	1, %o4
850	sll	%o4, %l1, %o4
851	and	%o5, %o4, %o4
852	brz,pt	%o4, 0f
853	nop
854	! Do not call panic if a panic is already in progress.
855	sethi	%hi(panic_quiesce), %l2
856	ld	[%l2 + %lo(panic_quiesce)], %l2
857	brnz,pn	%l2, 0f
858	nop
859	sethi	%hi(intr_thread_exit_actv_bit_set), %o0
860	call	panic
861	or	%o0, %lo(intr_thread_exit_actv_bit_set), %o0
8620:
863#endif /* DEBUG */
864	call	_intr_set_spl			! set CPU's base SPL level
865	ld	[%o2 + CPU_INTR_ACTV], %o5	! delay - load active mask
866	!
867	! set the thread state to free so kernel debuggers don't see it
868	!
869	mov	TS_FREE, %o4
870	st	%o4, [THREAD_REG + T_STATE]
871	!
872	! Put thread on either the interrupt pool or the free pool and
873	! call swtch() to resume another thread.
874	!
875	ldn	[%o2 + CPU_INTR_THREAD], %o5	! get list pointer
876	stn	%o5, [THREAD_REG + T_LINK]
877	call	swtch				! switch to best thread
878	stn	THREAD_REG, [%o2 + CPU_INTR_THREAD] ! delay - put thread on list
879	ba,a,pt	%xcc, .				! swtch() shouldn't return
880	SET_SIZE(intr_thread_exit)
881
882	.global ftrace_intr_thread_format_str
883ftrace_intr_thread_format_str:
884	.asciz	"intr_thread(): regs=0x%lx, int=0x%lx, pil=0x%lx"
885#ifdef DEBUG
886intr_thread_actv_bit_set:
887	.asciz	"intr_thread():	cpu_intr_actv bit already set for PIL"
888intr_thread_actv_bit_not_set:
889	.asciz	"intr_thread():	cpu_intr_actv bit not set for PIL"
890intr_thread_exit_actv_bit_set:
891	.asciz	"intr_thread_exit(): cpu_intr_actv bit erroneously set for PIL"
892intr_thread_t_intr_start_zero:
893	.asciz	"intr_thread():	t_intr_start zero upon handler return"
894#endif /* DEBUG */
895#endif	/* lint */
896
897#if defined(lint)
898
899/*
900 * Handle an interrupt in the current thread
901 *	Entry:
902 *		%o0       = pointer to regs structure
903 *		%o1       = pointer to current intr_vec_t (iv) to be processed
904 *		%o2       = pil
905 *		%sp       = on current thread's kernel stack
906 *		%o7       = return linkage to trap code
907 *		%g7       = current thread
908 *		%pstate   = normal globals, interrupts enabled,
909 *		            privileged, fp disabled
910 *		%pil      = PIL_MAX
911 *
912 *	Register Usage
913 *		%l0       = return linkage
914 *		%l1       = old stack
915 *		%l2 - %l3 = scratch
916 *		%l4 - %l7 = reserved for sys_trap
917 *		%o3       = cpu
918 *		%o0       = scratch
919 *		%o4 - %o5 = scratch
920 */
921/* ARGSUSED */
922void
923current_thread(struct regs *regs, uint64_t iv_p, uint_t pil)
924{}
925
926#else	/* lint */
927
928	ENTRY_NP(current_thread)
929
930	mov	%o7, %l0
931	ldn	[THREAD_REG + T_CPU], %o3
932
933	ldn	[THREAD_REG + T_ONFAULT], %l2
934	brz,pt	%l2, no_onfault		! branch if no onfault label set
935	nop
936	stn	%g0, [THREAD_REG + T_ONFAULT]! clear onfault label
937	ldn	[THREAD_REG + T_LOFAULT], %l3
938	stn	%g0, [THREAD_REG + T_LOFAULT]! clear lofault data
939
940	sub	%o2, LOCK_LEVEL + 1, %o5
941	sll	%o5, CPTRSHIFT, %o5
942	add	%o5, CPU_OFD, %o4	! %o4 has on_fault data offset
943	stn	%l2, [%o3 + %o4]	! save onfault label for pil %o2
944	add	%o5, CPU_LFD, %o4	! %o4 has lofault data offset
945	stn	%l3, [%o3 + %o4]	! save lofault data for pil %o2
946
947no_onfault:
948	ldn	[THREAD_REG + T_ONTRAP], %l2
949	brz,pt	%l2, 6f			! branch if no on_trap protection
950	nop
951	stn	%g0, [THREAD_REG + T_ONTRAP]! clear on_trap protection
952	sub	%o2, LOCK_LEVEL + 1, %o5
953	sll	%o5, CPTRSHIFT, %o5
954	add	%o5, CPU_OTD, %o4	! %o4 has on_trap data offset
955	stn	%l2, [%o3 + %o4]	! save on_trap label for pil %o2
956
957	!
958	! Set bit for this level in CPU's active interrupt bitmask.
959	!
9606:	ld	[%o3 + CPU_INTR_ACTV], %o5	! o5 has cpu_intr_actv b4 chng
961	mov	1, %o4
962	sll	%o4, %o2, %o4			! construct mask for level
963#ifdef DEBUG
964	!
965	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
966	!
967	andcc	%o5, %o4, %g0
968	bz,pt	%xcc, 0f
969	nop
970	! Do not call panic if a panic is already in progress.
971	sethi	%hi(panic_quiesce), %l2
972	ld	[%l2 + %lo(panic_quiesce)], %l2
973	brnz,pn	%l2, 0f
974	nop
975	sethi	%hi(current_thread_actv_bit_set), %o0
976	call	panic
977	or	%o0, %lo(current_thread_actv_bit_set), %o0
9780:
979#endif /* DEBUG */
980	or	%o5, %o4, %o4
981	!
982	! See if we are interrupting another high-level interrupt.
983	!
984	srl	%o5, LOCK_LEVEL + 1, %o5	! only look at high-level bits
985	brz,pt	%o5, 1f
986	st	%o4, [%o3 + CPU_INTR_ACTV]	! delay - store active mask
987	!
988	! We have interrupted another high-level interrupt. Find its PIL,
989	! compute the interval it ran for, and update its cumulative counter.
990	!
991	! Register usage:
992
993	! o2 = PIL of this interrupt
994	! o5 = high PIL bits of INTR_ACTV (not including this PIL)
995	! l1 = bitmask used to find other active high-level PIL
996	! o4 = index of bit set in l1
997	! Use cpu_intr_actv to find the cpu_pil_high_start[] offset for the
998	! interrupted high-level interrupt.
999	! Create mask for cpu_intr_actv. Begin by looking for bits set
1000	! at one level below the current PIL. Since %o5 contains the active
1001	! mask already shifted right by (LOCK_LEVEL + 1), we start by looking
1002	! at bit (current_pil - (LOCK_LEVEL + 2)).
1003	sub	%o2, LOCK_LEVEL + 2, %o4
1004	mov	1, %l1
1005	sll	%l1, %o4, %l1
10062:
1007#ifdef DEBUG
1008	! ASSERT(%l1 != 0) (we didn't shift the bit off the right edge)
1009	brnz,pt	%l1, 9f
1010	nop
1011
1012	! Don't panic if a panic is already in progress.
1013	sethi	%hi(panic_quiesce), %l3
1014	ld	[%l3 + %lo(panic_quiesce)], %l3
1015	brnz,pn	%l3, 9f
1016	nop
1017	sethi	%hi(current_thread_nested_PIL_not_found), %o0
1018	call	panic
1019	or	%o0, %lo(current_thread_nested_PIL_not_found), %o0
10209:
1021#endif /* DEBUG */
1022	andcc	%l1, %o5, %g0		! test mask against high-level bits of
1023	bnz	%xcc, 3f		! cpu_intr_actv
1024	nop
1025	srl	%l1, 1, %l1		! No match. Try next lower PIL.
1026	ba,pt	%xcc, 2b
1027	sub	%o4, 1, %o4		! delay - decrement PIL
10283:
1029	sll	%o4, 3, %o4			! index to byte offset
1030	add	%o4, CPU_MCPU, %l1	! CPU_PIL_HIGH_START is too large
1031	add	%l1, MCPU_PIL_HIGH_START, %l1
1032	ldx	[%o3 + %l1], %l3		! load starting timestamp
1033#ifdef DEBUG
1034	brnz,pt	%l3, 9f
1035	nop
1036	! Don't panic if a panic is already in progress.
1037	sethi	%hi(panic_quiesce), %l1
1038	ld	[%l1 + %lo(panic_quiesce)], %l1
1039	brnz,pn	%l1, 9f
1040	nop
1041	srl	%o4, 3, %o1			! Find interrupted PIL for panic
1042	add	%o1, LOCK_LEVEL + 1, %o1
1043	sethi	%hi(current_thread_nested_pil_zero), %o0
1044	call	panic
1045	or	%o0, %lo(current_thread_nested_pil_zero), %o0
10469:
1047#endif /* DEBUG */
1048	RD_CLOCK_TICK_NO_SUSPEND_CHECK(%l1, %l2)
1049	sub	%l1, %l3, %l3			! interval in %l3
1050	!
1051	! Check for Energy Star mode
1052	!
1053	lduh	[%o3 + CPU_DIVISOR], %l1	! %l1 = clock divisor
1054	cmp	%l1, 1
1055	bg,a,pn	%xcc, 2f
1056	mulx	%l3, %l1, %l3	! multiply interval by clock divisor iff > 1
10572:
1058	!
1059	! We need to find the CPU offset of the cumulative counter. We start
1060	! with %o4, which has (PIL - (LOCK_LEVEL + 1)) * 8. We need PIL * 16,
1061	! so we shift left 1, then add (LOCK_LEVEL + 1) * 16, which is
1062	! CPU_INTRSTAT_LOW_PIL_OFFSET.
1063	!
1064	sll	%o4, 1, %o4
1065	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT const too large
1066	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1067	add	%o4, CPU_INTRSTAT_LOW_PIL_OFFSET, %o4
1068	ldx	[%o3 + %o4], %l1		! old counter in l1
1069	add	%l1, %l3, %l1			! new counter in l1
1070	stx	%l1, [%o3 + %o4]		! store new counter
1071
1072	! Also update intracct[]
1073	lduh	[%o3 + CPU_MSTATE], %o4
1074	sllx	%o4, 3, %o4
1075	add	%o4, CPU_INTRACCT, %o4
1076	ldx	[%o3 + %o4], %l1
1077	add	%l1, %l3, %l1
1078	! Another high-level interrupt is active below this one, so
1079	! there is no need to check for an interrupt thread. That will be
1080	! done by the lowest priority high-level interrupt active.
1081	ba,pt	%xcc, 5f
1082	stx	%l1, [%o3 + %o4]		! delay - store new counter
10831:
1084	! If we haven't interrupted another high-level interrupt, we may be
1085	! interrupting a low level interrupt thread. If so, compute its interval
1086	! and update its cumulative counter.
1087	lduh	[THREAD_REG + T_FLAGS], %o4
1088	andcc	%o4, T_INTR_THREAD, %g0
1089	bz,pt	%xcc, 4f
1090	nop
1091
1092	! We have interrupted an interrupt thread. Take timestamp, compute
1093	! interval, update cumulative counter.
1094
1095	! Check t_intr_start. If it is zero, either intr_thread() or
1096	! current_thread() (at a lower PIL, of course) already did
1097	! the accounting for the underlying interrupt thread.
1098	ldx	[THREAD_REG + T_INTR_START], %o5
1099	brz,pn	%o5, 4f
1100	nop
1101
1102	stx	%g0, [THREAD_REG + T_INTR_START]
1103	RD_CLOCK_TICK_NO_SUSPEND_CHECK(%o4, %l2)
1104	sub	%o4, %o5, %o5			! o5 has the interval
1105
1106	! Check for Energy Star mode
1107	lduh	[%o3 + CPU_DIVISOR], %o4	! %o4 = clock divisor
1108	cmp	%o4, 1
1109	bg,a,pn	%xcc, 2f
1110	mulx	%o5, %o4, %o5	! multiply interval by clock divisor iff > 1
11112:
1112	ldub	[THREAD_REG + T_PIL], %o4
1113	sllx	%o4, 4, %o4			! PIL index to byte offset
1114	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT const too large
1115	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1116	ldx	[%o3 + %o4], %l2		! old counter in l2
1117	add	%l2, %o5, %l2			! new counter in l2
1118	stx	%l2, [%o3 + %o4]		! store new counter
1119
1120	! Also update intracct[]
1121	lduh	[%o3 + CPU_MSTATE], %o4
1122	sllx	%o4, 3, %o4
1123	add	%o4, CPU_INTRACCT, %o4
1124	ldx	[%o3 + %o4], %l2
1125	add	%l2, %o5, %l2
1126	stx	%l2, [%o3 + %o4]
11274:
1128	!
1129	! Handle high-level interrupts on separate interrupt stack.
1130	! No other high-level interrupts are active, so switch to int stack.
1131	!
1132	mov	%sp, %l1
1133	ldn	[%o3 + CPU_INTR_STACK], %l3
1134	sub	%l3, STACK_BIAS, %sp
1135
11365:
1137#ifdef DEBUG
1138	!
1139	! ASSERT(%o2 > LOCK_LEVEL)
1140	!
1141	cmp	%o2, LOCK_LEVEL
1142	bg,pt	%xcc, 3f
1143	nop
1144	mov	CE_PANIC, %o0
1145	sethi	%hi(current_thread_wrong_pil), %o1
1146	call	cmn_err				! %o2 has the %pil already
1147	or	%o1, %lo(current_thread_wrong_pil), %o1
1148#endif
11493:
1150	! Store starting timestamp for this PIL in CPU structure at
1151	! cpu.cpu_m.pil_high_start[PIL - (LOCK_LEVEL + 1)]
1152        sub     %o2, LOCK_LEVEL + 1, %o4	! convert PIL to array index
1153	sllx    %o4, 3, %o4			! index to byte offset
1154	add	%o4, CPU_MCPU, %o4	! CPU_PIL_HIGH_START is too large
1155	add	%o4, MCPU_PIL_HIGH_START, %o4
1156	RD_CLOCK_TICK_NO_SUSPEND_CHECK(%o5, %l2)
1157        stx     %o5, [%o3 + %o4]
1158
1159	wrpr	%g0, %o2, %pil			! enable interrupts
1160
1161	!
1162	! call the handler
1163	!
1164	SERVE_INTR_PRE(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
11651:
1166	SERVE_INTR(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
1167
1168	brz,a,pt %o2, 0f			! if %o2, more intrs await
1169	rdpr	%pil, %o2			! delay annulled
1170	SERVE_INTR_NEXT(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
1171	ba	1b
1172	nop
11730:
1174	wrpr	%g0, PIL_MAX, %pil		! disable interrupts (1-15)
1175
1176	cmp	%o2, PIL_15
1177	bne,pt	%xcc, 3f
1178	nop
1179
1180	sethi	%hi(cpc_level15_inum), %o1
1181	ldx	[%o1 + %lo(cpc_level15_inum)], %o1 ! arg for intr_enqueue_req
1182	brz	%o1, 3f
1183	nop
1184
1185	rdpr 	%pstate, %g5
1186	andn	%g5, PSTATE_IE, %g1
1187	wrpr	%g0, %g1, %pstate		! Disable vec interrupts
1188
1189	call	intr_enqueue_req		! preserves %g5
1190	mov	PIL_15, %o0
1191
1192	! clear perfcntr overflow
1193	mov	1, %o0
1194	sllx	%o0, PIL_15, %o0
1195	wr	%o0, CLEAR_SOFTINT
1196
1197	wrpr	%g0, %g5, %pstate		! Enable vec interrupts
1198
11993:
1200	cmp	%o2, PIL_14
1201	be	tick_rtt			!  cpu-specific tick processing
1202	nop
1203	.global	current_thread_complete
1204current_thread_complete:
1205	!
1206	! Register usage:
1207	!
1208	! %l1 = stack pointer
1209	! %l2 = CPU_INTR_ACTV >> (LOCK_LEVEL + 1)
1210	! %o2 = PIL
1211	! %o3 = CPU pointer
1212	! %o4, %o5, %l3, %l4, %l5 = scratch
1213	!
1214	ldn	[THREAD_REG + T_CPU], %o3
1215	!
1216	! Clear bit for this level in CPU's interrupt active bitmask.
1217	!
1218	ld	[%o3 + CPU_INTR_ACTV], %l2
1219	mov	1, %o5
1220	sll	%o5, %o2, %o5
1221#ifdef DEBUG
1222	!
1223	! ASSERT(CPU->cpu_intr_actv & (1 << PIL))
1224	!
1225	andcc	%l2, %o5, %g0
1226	bnz,pt	%xcc, 0f
1227	nop
1228	! Do not call panic if a panic is already in progress.
1229	sethi	%hi(panic_quiesce), %l2
1230	ld	[%l2 + %lo(panic_quiesce)], %l2
1231	brnz,pn	%l2, 0f
1232	nop
1233	sethi	%hi(current_thread_actv_bit_not_set), %o0
1234	call	panic
1235	or	%o0, %lo(current_thread_actv_bit_not_set), %o0
12360:
1237#endif /* DEBUG */
1238	andn	%l2, %o5, %l2
1239	st	%l2, [%o3 + CPU_INTR_ACTV]
1240
1241	! Take timestamp, compute interval, update cumulative counter.
1242        sub     %o2, LOCK_LEVEL + 1, %o4	! PIL to array index
1243	sllx    %o4, 3, %o4			! index to byte offset
1244	add	%o4, CPU_MCPU, %o4	! CPU_PIL_HIGH_START is too large
1245	add	%o4, MCPU_PIL_HIGH_START, %o4
1246	RD_CLOCK_TICK_NO_SUSPEND_CHECK(%o5, %o0)
1247	ldx     [%o3 + %o4], %o0
1248#ifdef DEBUG
1249	! ASSERT(cpu.cpu_m.pil_high_start[pil - (LOCK_LEVEL + 1)] != 0)
1250	brnz,pt	%o0, 9f
1251	nop
1252	! Don't panic if a panic is already in progress.
1253	sethi	%hi(panic_quiesce), %l2
1254	ld	[%l2 + %lo(panic_quiesce)], %l2
1255	brnz,pn	%l2, 9f
1256	nop
1257	sethi	%hi(current_thread_timestamp_zero), %o0
1258	call	panic
1259	or	%o0, %lo(current_thread_timestamp_zero), %o0
12609:
1261#endif /* DEBUG */
1262	stx	%g0, [%o3 + %o4]
1263	sub	%o5, %o0, %o5			! interval in o5
1264
1265	! Check for Energy Star mode
1266	lduh	[%o3 + CPU_DIVISOR], %o4	! %o4 = clock divisor
1267	cmp	%o4, 1
1268	bg,a,pn	%xcc, 2f
1269	mulx	%o5, %o4, %o5	! multiply interval by clock divisor iff > 1
12702:
1271	sllx	%o2, 4, %o4			! PIL index to byte offset
1272	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT too large
1273	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1274	ldx	[%o3 + %o4], %o0		! old counter in o0
1275	add	%o0, %o5, %o0			! new counter in o0
1276	stx	%o0, [%o3 + %o4]		! store new counter
1277
1278	! Also update intracct[]
1279	lduh	[%o3 + CPU_MSTATE], %o4
1280	sllx	%o4, 3, %o4
1281	add	%o4, CPU_INTRACCT, %o4
1282	ldx	[%o3 + %o4], %o0
1283	add	%o0, %o5, %o0
1284	stx	%o0, [%o3 + %o4]
1285
1286	!
1287	! get back on current thread's stack
1288	!
1289	srl	%l2, LOCK_LEVEL + 1, %l2
1290	tst	%l2				! any more high-level ints?
1291	movz	%xcc, %l1, %sp
1292	!
1293	! Current register usage:
1294	! o2 = PIL
1295	! o3 = CPU pointer
1296	! l0 = return address
1297	! l2 = intr_actv shifted right
1298	!
1299	bz,pt	%xcc, 3f			! if l2 was zero, no more ints
1300	nop
1301	!
1302	! We found another high-level interrupt active below the one that just
1303	! returned. Store a starting timestamp for it in the CPU structure.
1304	!
1305	! Use cpu_intr_actv to find the cpu_pil_high_start[] offset for the
1306	! interrupted high-level interrupt.
1307	! Create mask for cpu_intr_actv. Begin by looking for bits set
1308	! at one level below the current PIL. Since %l2 contains the active
1309	! mask already shifted right by (LOCK_LEVEL + 1), we start by looking
1310	! at bit (current_pil - (LOCK_LEVEL + 2)).
1311	! %l1 = mask, %o5 = index of bit set in mask
1312	!
1313	mov	1, %l1
1314	sub	%o2, LOCK_LEVEL + 2, %o5
1315	sll	%l1, %o5, %l1			! l1 = mask for level
13161:
1317#ifdef DEBUG
1318	! ASSERT(%l1 != 0) (we didn't shift the bit off the right edge)
1319	brnz,pt	%l1, 9f
1320	nop
1321	sethi	%hi(current_thread_nested_PIL_not_found), %o0
1322	call	panic
1323	or	%o0, %lo(current_thread_nested_PIL_not_found), %o0
13249:
1325#endif /* DEBUG */
1326	andcc	%l1, %l2, %g0		! test mask against high-level bits of
1327	bnz	%xcc, 2f		! cpu_intr_actv
1328	nop
1329	srl	%l1, 1, %l1		! No match. Try next lower PIL.
1330	ba,pt	%xcc, 1b
1331	sub	%o5, 1, %o5		! delay - decrement PIL
13322:
1333	sll	%o5, 3, %o5		! convert array index to byte offset
1334	add	%o5, CPU_MCPU, %o5	! CPU_PIL_HIGH_START is too large
1335	add	%o5, MCPU_PIL_HIGH_START, %o5
1336	RD_CLOCK_TICK_NO_SUSPEND_CHECK(%o4, %l2)
1337	! Another high-level interrupt is active below this one, so
1338	! there is no need to check for an interrupt thread. That will be
1339	! done by the lowest priority high-level interrupt active.
1340	ba,pt	%xcc, 7f
1341	stx	%o4, [%o3 + %o5]	! delay - store timestamp
13423:
1343	! If we haven't interrupted another high-level interrupt, we may have
1344	! interrupted a low level interrupt thread. If so, store a starting
1345	! timestamp in its thread structure.
1346	lduh	[THREAD_REG + T_FLAGS], %o4
1347	andcc	%o4, T_INTR_THREAD, %g0
1348	bz,pt	%xcc, 7f
1349	nop
1350
1351	RD_CLOCK_TICK_NO_SUSPEND_CHECK(%o4, %l2)
1352	stx	%o4, [THREAD_REG + T_INTR_START]
1353
13547:
1355	sub	%o2, LOCK_LEVEL + 1, %o4
1356	sll	%o4, CPTRSHIFT, %o5
1357
1358	! Check on_trap saved area and restore as needed
1359	add	%o5, CPU_OTD, %o4
1360	ldn	[%o3 + %o4], %l2
1361	brz,pt %l2, no_ontrp_restore
1362	nop
1363	stn	%l2, [THREAD_REG + T_ONTRAP] ! restore
1364	stn	%g0, [%o3 + %o4]	! clear
1365
1366no_ontrp_restore:
1367	! Check on_fault saved area and restore as needed
1368	add	%o5, CPU_OFD, %o4
1369	ldn	[%o3 + %o4], %l2
1370	brz,pt %l2, 8f
1371	nop
1372	stn	%l2, [THREAD_REG + T_ONFAULT] ! restore
1373	stn	%g0, [%o3 + %o4]	! clear
1374	add	%o5, CPU_LFD, %o4
1375	ldn	[%o3 + %o4], %l2
1376	stn	%l2, [THREAD_REG + T_LOFAULT] ! restore
1377	stn	%g0, [%o3 + %o4]	! clear
1378
1379
13808:
1381	! Enable interrupts and return
1382	jmp	%l0 + 8
1383	wrpr	%g0, %o2, %pil			! enable interrupts
1384	SET_SIZE(current_thread)
1385
1386
1387#ifdef DEBUG
1388current_thread_wrong_pil:
1389	.asciz	"current_thread: unexpected pil level: %d"
1390current_thread_actv_bit_set:
1391	.asciz	"current_thread(): cpu_intr_actv bit already set for PIL"
1392current_thread_actv_bit_not_set:
1393	.asciz	"current_thread(): cpu_intr_actv bit not set for PIL"
1394current_thread_nested_pil_zero:
1395	.asciz	"current_thread(): timestamp zero for nested PIL %d"
1396current_thread_timestamp_zero:
1397	.asciz	"current_thread(): timestamp zero upon handler return"
1398current_thread_nested_PIL_not_found:
1399	.asciz	"current_thread: couldn't find nested high-level PIL"
1400#endif /* DEBUG */
1401#endif /* lint */
1402
1403/*
1404 * Return a thread's interrupt level.
1405 * Since this isn't saved anywhere but in %l4 on interrupt entry, we
1406 * must dig it out of the save area.
1407 *
1408 * Caller 'swears' that this really is an interrupt thread.
1409 *
1410 * int
1411 * intr_level(t)
1412 *	kthread_id_t	t;
1413 */
1414
1415#if defined(lint)
1416
1417/* ARGSUSED */
1418int
1419intr_level(kthread_id_t t)
1420{ return (0); }
1421
1422#else	/* lint */
1423
1424	ENTRY_NP(intr_level)
1425	retl
1426	ldub	[%o0 + T_PIL], %o0		! return saved pil
1427	SET_SIZE(intr_level)
1428
1429#endif	/* lint */
1430
1431#if defined(lint)
1432
1433/* ARGSUSED */
1434int
1435disable_pil_intr()
1436{ return (0); }
1437
1438#else	/* lint */
1439
1440	ENTRY_NP(disable_pil_intr)
1441	rdpr	%pil, %o0
1442	retl
1443	wrpr	%g0, PIL_MAX, %pil		! disable interrupts (1-15)
1444	SET_SIZE(disable_pil_intr)
1445
1446#endif	/* lint */
1447
1448#if defined(lint)
1449
1450/* ARGSUSED */
1451void
1452enable_pil_intr(int pil_save)
1453{}
1454
1455#else	/* lint */
1456
1457	ENTRY_NP(enable_pil_intr)
1458	retl
1459	wrpr	%o0, %pil
1460	SET_SIZE(enable_pil_intr)
1461
1462#endif	/* lint */
1463
1464#if defined(lint)
1465
1466/* ARGSUSED */
1467uint_t
1468disable_vec_intr(void)
1469{ return (0); }
1470
1471#else	/* lint */
1472
1473	ENTRY_NP(disable_vec_intr)
1474	rdpr	%pstate, %o0
1475	andn	%o0, PSTATE_IE, %g1
1476	retl
1477	wrpr	%g0, %g1, %pstate		! disable interrupt
1478	SET_SIZE(disable_vec_intr)
1479
1480#endif	/* lint */
1481
1482#if defined(lint)
1483
1484/* ARGSUSED */
1485void
1486enable_vec_intr(uint_t pstate_save)
1487{}
1488
1489#else	/* lint */
1490
1491	ENTRY_NP(enable_vec_intr)
1492	retl
1493	wrpr	%g0, %o0, %pstate
1494	SET_SIZE(enable_vec_intr)
1495
1496#endif	/* lint */
1497
1498#if defined(lint)
1499
1500void
1501cbe_level14(void)
1502{}
1503
1504#else   /* lint */
1505
1506	ENTRY_NP(cbe_level14)
1507	save    %sp, -SA(MINFRAME), %sp ! get a new window
1508	!
1509	! Make sure that this is from TICK_COMPARE; if not just return
1510	!
1511	rd	SOFTINT, %l1
1512	set	(TICK_INT_MASK | STICK_INT_MASK), %o2
1513	andcc	%l1, %o2, %g0
1514	bz,pn	%icc, 2f
1515	nop
1516
1517	CPU_ADDR(%o1, %o2)
1518	call	cyclic_fire
1519	mov	%o1, %o0
15202:
1521	ret
1522	restore	%g0, 1, %o0
1523	SET_SIZE(cbe_level14)
1524
1525#endif  /* lint */
1526
1527
1528#if defined(lint)
1529
1530/* ARGSUSED */
1531void
1532kdi_setsoftint(uint64_t iv_p)
1533{}
1534
1535#else	/* lint */
1536
1537	ENTRY_NP(kdi_setsoftint)
1538	save	%sp, -SA(MINFRAME), %sp	! get a new window
1539	rdpr	%pstate, %l5
1540	andn	%l5, PSTATE_IE, %l1
1541	wrpr	%l1, %pstate		! disable interrupt
1542	!
1543	! We have a pointer to an interrupt vector data structure.
1544	! Put the request on the cpu's softint priority list and
1545	! set %set_softint.
1546	!
1547	! Register usage
1548	! 	%i0 - pointer to intr_vec_t (iv)
1549	!	%l2 - requested pil
1550	!	%l4 - cpu
1551	!	%l5 - pstate
1552	!	%l1, %l3, %l6 - temps
1553	!
1554	! check if a softint is pending for this softint,
1555	! if one is pending, don't bother queuing another.
1556	!
1557	lduh	[%i0 + IV_FLAGS], %l1	! %l1 = iv->iv_flags
1558	and	%l1, IV_SOFTINT_PEND, %l6 ! %l6 = iv->iv_flags & IV_SOFTINT_PEND
1559	brnz,pn	%l6, 4f			! branch if softint is already pending
1560	or	%l1, IV_SOFTINT_PEND, %l2
1561	sth	%l2, [%i0 + IV_FLAGS]	! Set IV_SOFTINT_PEND flag
1562
1563	CPU_ADDR(%l4, %l2)		! %l4 = cpu
1564	lduh	[%i0 + IV_PIL], %l2	! %l2 = iv->iv_pil
1565
1566	!
1567	! Insert intr_vec_t (iv) to appropriate cpu's softint priority list
1568	!
1569	sll	%l2, CPTRSHIFT, %l0	! %l0 = offset to pil entry
1570	add	%l4, INTR_TAIL, %l6	! %l6 = &cpu->m_cpu.intr_tail
1571	ldn	[%l6 + %l0], %l1	! %l1 = cpu->m_cpu.intr_tail[pil]
1572					!       current tail (ct)
1573	brz,pt	%l1, 2f			! branch if current tail is NULL
1574	stn	%i0, [%l6 + %l0]	! make intr_vec_t (iv) as new tail
1575	!
1576	! there's pending intr_vec_t already
1577	!
1578	lduh	[%l1 + IV_FLAGS], %l6	! %l6 = ct->iv_flags
1579	and	%l6, IV_SOFTINT_MT, %l6	! %l6 = ct->iv_flags & IV_SOFTINT_MT
1580	brz,pt	%l6, 1f			! check for Multi target softint flag
1581	add	%l1, IV_PIL_NEXT, %l3	! %l3 = &ct->iv_pil_next
1582	ld	[%l4 + CPU_ID], %l6	! for multi target softint, use cpuid
1583	sll	%l6, CPTRSHIFT, %l6	! calculate offset address from cpuid
1584	add	%l3, %l6, %l3		! %l3 =  &ct->iv_xpil_next[cpuid]
15851:
1586	!
1587	! update old tail
1588	!
1589	ba,pt	%xcc, 3f
1590	stn	%i0, [%l3]		! [%l3] = iv, set pil_next field
15912:
1592	!
1593	! no pending intr_vec_t; make intr_vec_t as new head
1594	!
1595	add	%l4, INTR_HEAD, %l6	! %l6 = &cpu->m_cpu.intr_head[pil]
1596	stn	%i0, [%l6 + %l0]	! cpu->m_cpu.intr_head[pil] = iv
15973:
1598	!
1599	! Write %set_softint with (1<<pil) to cause a "pil" level trap
1600	!
1601	mov	1, %l1			! %l1 = 1
1602	sll	%l1, %l2, %l1		! %l1 = 1 << pil
1603	wr	%l1, SET_SOFTINT	! trigger required pil softint
16044:
1605	wrpr	%g0, %l5, %pstate	! %pstate = saved %pstate (in %l5)
1606	ret
1607	restore
1608	SET_SIZE(kdi_setsoftint)
1609
1610#endif	/* lint */
1611
1612#if defined(lint)
1613
1614/*ARGSUSED*/
1615void
1616setsoftint_tl1(uint64_t iv_p, uint64_t dummy)
1617{}
1618
1619#else	/* lint */
1620
1621	!
1622	! Register usage
1623	!	Arguments:
1624	! 	%g1 - Pointer to intr_vec_t (iv)
1625	!
1626	!	Internal:
1627	!	%g2 - pil
1628	!	%g4 - cpu
1629	!	%g3,%g5-g7 - temps
1630	!
1631	ENTRY_NP(setsoftint_tl1)
1632	!
1633	! We have a pointer to an interrupt vector data structure.
1634	! Put the request on the cpu's softint priority list and
1635	! set %set_softint.
1636	!
1637	CPU_ADDR(%g4, %g2)		! %g4 = cpu
1638	lduh	[%g1 + IV_PIL], %g2	! %g2 = iv->iv_pil
1639
1640	!
1641	! Insert intr_vec_t (iv) to appropriate cpu's softint priority list
1642	!
1643	sll	%g2, CPTRSHIFT, %g7	! %g7 = offset to pil entry
1644	add	%g4, INTR_TAIL, %g6	! %g6 = &cpu->m_cpu.intr_tail
1645	ldn	[%g6 + %g7], %g5	! %g5 = cpu->m_cpu.intr_tail[pil]
1646					!       current tail (ct)
1647	brz,pt	%g5, 1f			! branch if current tail is NULL
1648	stn	%g1, [%g6 + %g7]	! make intr_rec_t (iv) as new tail
1649	!
1650	! there's pending intr_vec_t already
1651	!
1652	lduh	[%g5 + IV_FLAGS], %g6	! %g6 = ct->iv_flags
1653	and	%g6, IV_SOFTINT_MT, %g6	! %g6 = ct->iv_flags & IV_SOFTINT_MT
1654	brz,pt	%g6, 0f			! check for Multi target softint flag
1655	add	%g5, IV_PIL_NEXT, %g3	! %g3 = &ct->iv_pil_next
1656	ld	[%g4 + CPU_ID], %g6	! for multi target softint, use cpuid
1657	sll	%g6, CPTRSHIFT, %g6	! calculate offset address from cpuid
1658	add	%g3, %g6, %g3		! %g3 = &ct->iv_xpil_next[cpuid]
16590:
1660	!
1661	! update old tail
1662	!
1663	ba,pt	%xcc, 2f
1664	stn	%g1, [%g3]		! [%g3] = iv, set pil_next field
16651:
1666	!
1667	! no pending intr_vec_t; make intr_vec_t as new head
1668	!
1669	add	%g4, INTR_HEAD, %g6	! %g6 = &cpu->m_cpu.intr_head[pil]
1670	stn	%g1, [%g6 + %g7]	! cpu->m_cpu.intr_head[pil] = iv
16712:
1672#ifdef TRAPTRACE
1673	TRACE_PTR(%g5, %g6)
1674	GET_TRACE_TICK(%g6, %g3)
1675	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi	! trap_tick = %tick
1676	TRACE_SAVE_TL_GL_REGS(%g5, %g6)
1677	rdpr	%tt, %g6
1678	stha	%g6, [%g5 + TRAP_ENT_TT]%asi	! trap_type = %tt
1679	rdpr	%tpc, %g6
1680	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi	! trap_pc = %tpc
1681	rdpr	%tstate, %g6
1682	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi ! trap_tstate = %tstate
1683	stna	%sp, [%g5 + TRAP_ENT_SP]%asi	! trap_sp = %sp
1684	stna	%g1, [%g5 + TRAP_ENT_TR]%asi	! trap_tr = iv
1685	ldn	[%g1 + IV_PIL_NEXT], %g6	!
1686	stna	%g6, [%g5 + TRAP_ENT_F1]%asi	! trap_f1 = iv->iv_pil_next
1687	add	%g4, INTR_HEAD, %g6
1688	ldn	[%g6 + %g7], %g6		! %g6=cpu->m_cpu.intr_head[pil]
1689	stna	%g6, [%g5 + TRAP_ENT_F2]%asi	! trap_f2 = intr_head[pil]
1690	add	%g4, INTR_TAIL, %g6
1691	ldn	[%g6 + %g7], %g6		! %g6=cpu->m_cpu.intr_tail[pil]
1692	stna	%g6, [%g5 + TRAP_ENT_F3]%asi	! trap_f3 = intr_tail[pil]
1693	stna	%g2, [%g5 + TRAP_ENT_F4]%asi	! trap_f4 = pil
1694	TRACE_NEXT(%g5, %g6, %g3)
1695#endif /* TRAPTRACE */
1696	!
1697	! Write %set_softint with (1<<pil) to cause a "pil" level trap
1698	!
1699	mov	1, %g5			! %g5 = 1
1700	sll	%g5, %g2, %g5		! %g5 = 1 << pil
1701	wr	%g5, SET_SOFTINT	! trigger required pil softint
1702	retry
1703	SET_SIZE(setsoftint_tl1)
1704
1705#endif	/* lint */
1706
1707#if defined(lint)
1708
1709/*ARGSUSED*/
1710void
1711setvecint_tl1(uint64_t inum, uint64_t dummy)
1712{}
1713
1714#else	/* lint */
1715
1716	!
1717	! Register usage
1718	!	Arguments:
1719	! 	%g1 - inumber
1720	!
1721	!	Internal:
1722	! 	%g1 - softint pil mask
1723	!	%g2 - pil of intr_vec_t
1724	!	%g3 - pointer to current intr_vec_t (iv)
1725	!	%g4 - cpu
1726	!	%g5, %g6,%g7 - temps
1727	!
1728	ENTRY_NP(setvecint_tl1)
1729	!
1730	! Verify the inumber received (should be inum < MAXIVNUM).
1731	!
1732	set	MAXIVNUM, %g2
1733	cmp	%g1, %g2
1734	bgeu,pn	%xcc, .no_ivintr
1735	clr	%g2			! expected in .no_ivintr
1736
1737	!
1738	! Fetch data from intr_vec_table according to the inum.
1739	!
1740	! We have an interrupt number. Fetch the interrupt vector requests
1741	! from the interrupt vector table for a given interrupt number and
1742	! insert them into cpu's softint priority lists and set %set_softint.
1743	!
1744	set	intr_vec_table, %g5	! %g5 = intr_vec_table
1745	sll	%g1, CPTRSHIFT, %g6	! %g6 = offset to inum entry in table
1746	add	%g5, %g6, %g5		! %g5 = &intr_vec_table[inum]
1747	ldn	[%g5], %g3		! %g3 = pointer to first entry of
1748					!       intr_vec_t list
1749
1750	! Verify the first intr_vec_t pointer for a given inum and it should
1751	! not be NULL. This used to be guarded by DEBUG but broken drivers can
1752	! cause spurious tick interrupts when the softint register is programmed
1753	! with 1 << 0 at the end of this routine. Now we always check for a
1754	! valid intr_vec_t pointer.
1755	brz,pn	%g3, .no_ivintr
1756	nop
1757
1758	!
1759	! Traverse the intr_vec_t link list, put each item on to corresponding
1760	! CPU softint priority queue, and compose the final softint pil mask.
1761	!
1762	! At this point:
1763	!	%g3 = intr_vec_table[inum]
1764	!
1765	CPU_ADDR(%g4, %g2)		! %g4 = cpu
1766	mov	%g0, %g1		! %g1 = 0, initialize pil mask to 0
17670:
1768	!
1769	! Insert next intr_vec_t (iv) to appropriate cpu's softint priority list
1770	!
1771	! At this point:
1772	!	%g1 = softint pil mask
1773	!	%g3 = pointer to next intr_vec_t (iv)
1774	!	%g4 = cpu
1775	!
1776	lduh	[%g3 + IV_PIL], %g2	! %g2 = iv->iv_pil
1777	sll	%g2, CPTRSHIFT, %g7	! %g7 = offset to pil entry
1778	add	%g4, INTR_TAIL, %g6	! %g6 = &cpu->m_cpu.intr_tail
1779	ldn	[%g6 + %g7], %g5	! %g5 = cpu->m_cpu.intr_tail[pil]
1780					! 	current tail (ct)
1781	brz,pt	%g5, 2f			! branch if current tail is NULL
1782	stn	%g3, [%g6 + %g7]	! make intr_vec_t (iv) as new tail
1783					! cpu->m_cpu.intr_tail[pil] = iv
1784	!
1785	! there's pending intr_vec_t already
1786	!
1787	lduh	[%g5 + IV_FLAGS], %g6	! %g6 = ct->iv_flags
1788	and	%g6, IV_SOFTINT_MT, %g6	! %g6 = ct->iv_flags & IV_SOFTINT_MT
1789	brz,pt	%g6, 1f			! check for Multi target softint flag
1790	add	%g5, IV_PIL_NEXT, %g5	! %g5 = &ct->iv_pil_next
1791	ld	[%g4 + CPU_ID], %g6	! for multi target softint, use cpuid
1792	sll	%g6, CPTRSHIFT, %g6	! calculate offset address from cpuid
1793	add	%g5, %g6, %g5		! %g5 = &ct->iv_xpil_next[cpuid]
17941:
1795	!
1796	! update old tail
1797	!
1798	ba,pt	%xcc, 3f
1799	stn	%g3, [%g5]		! [%g5] = iv, set pil_next field
18002:
1801	!
1802	! no pending intr_vec_t; make intr_vec_t as new head
1803	!
1804	add	%g4, INTR_HEAD, %g6	!  %g6 = &cpu->m_cpu.intr_head[pil]
1805	stn	%g3, [%g6 + %g7]	!  cpu->m_cpu.intr_head[pil] = iv
18063:
1807#ifdef TRAPTRACE
1808	TRACE_PTR(%g5, %g6)
1809	TRACE_SAVE_TL_GL_REGS(%g5, %g6)
1810	rdpr	%tt, %g6
1811	stha	%g6, [%g5 + TRAP_ENT_TT]%asi	! trap_type = %tt`
1812	rdpr	%tpc, %g6
1813	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi	! trap_pc = %tpc
1814	rdpr	%tstate, %g6
1815	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi ! trap_tstate = %tstate
1816	stna	%sp, [%g5 + TRAP_ENT_SP]%asi	! trap_sp = %sp
1817	stna	%g3, [%g5 + TRAP_ENT_TR]%asi	! trap_tr = iv
1818	stna	%g1, [%g5 + TRAP_ENT_F1]%asi	! trap_f1 = pil mask
1819	add	%g4, INTR_HEAD, %g6
1820	ldn	[%g6 + %g7], %g6		! %g6=cpu->m_cpu.intr_head[pil]
1821	stna	%g6, [%g5 + TRAP_ENT_F2]%asi	! trap_f2 = intr_head[pil]
1822	add	%g4, INTR_TAIL, %g6
1823	ldn	[%g6 + %g7], %g6		! %g6=cpu->m_cpu.intr_tail[pil]
1824	stna	%g6, [%g5 + TRAP_ENT_F3]%asi	! trap_f3 = intr_tail[pil]
1825	stna	%g2, [%g5 + TRAP_ENT_F4]%asi	! trap_f4 = pil
1826	GET_TRACE_TICK(%g6, %g7)
1827	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi	! trap_tick = %tick
1828	TRACE_NEXT(%g5, %g6, %g7)
1829#endif /* TRAPTRACE */
1830	mov	1, %g6			! %g6 = 1
1831	sll	%g6, %g2, %g6		! %g6 = 1 << pil
1832	or	%g1, %g6, %g1		! %g1 |= (1 << pil), pil mask
1833	ldn	[%g3 + IV_VEC_NEXT], %g3 ! %g3 = pointer to next intr_vec_t (iv)
1834	brnz,pn	%g3, 0b			! iv->iv_vec_next is non NULL, goto 0b
1835	nop
1836	wr	%g1, SET_SOFTINT	! triggered one or more pil softints
1837	retry
1838
1839.no_ivintr:
1840	! no_ivintr: arguments: rp, inum (%g1), pil (%g2 == 0)
1841	mov	%g2, %g3
1842	mov	%g1, %g2
1843	set	no_ivintr, %g1
1844	ba,pt	%xcc, sys_trap
1845	mov	PIL_15, %g4
1846	SET_SIZE(setvecint_tl1)
1847
1848#endif	/* lint */
1849
1850#if defined(lint)
1851
1852/*ARGSUSED*/
1853void
1854wr_clr_softint(uint_t value)
1855{}
1856
1857#else
1858
1859	ENTRY_NP(wr_clr_softint)
1860	retl
1861	wr	%o0, CLEAR_SOFTINT
1862	SET_SIZE(wr_clr_softint)
1863
1864#endif /* lint */
1865
1866#if defined(lint)
1867
1868/*ARGSUSED*/
1869void
1870intr_enqueue_req(uint_t pil, uint64_t inum)
1871{}
1872
1873#else   /* lint */
1874
1875/*
1876 * intr_enqueue_req
1877 *
1878 * %o0 - pil
1879 * %o1 - pointer to intr_vec_t (iv)
1880 * %o5 - preserved
1881 * %g5 - preserved
1882 */
1883	ENTRY_NP(intr_enqueue_req)
1884	!
1885	CPU_ADDR(%g4, %g1)		! %g4 = cpu
1886
1887	!
1888	! Insert intr_vec_t (iv) to appropriate cpu's softint priority list
1889	!
1890	sll	%o0, CPTRSHIFT, %o0	! %o0 = offset to pil entry
1891	add	%g4, INTR_TAIL, %g6	! %g6 = &cpu->m_cpu.intr_tail
1892	ldn	[%o0 + %g6], %g1	! %g1 = cpu->m_cpu.intr_tail[pil]
1893					!       current tail (ct)
1894	brz,pt	%g1, 2f			! branch if current tail is NULL
1895	stn	%o1, [%g6 + %o0]	! make intr_vec_t (iv) as new tail
1896
1897	!
1898	! there's pending intr_vec_t already
1899	!
1900	lduh	[%g1 + IV_FLAGS], %g6	! %g6 = ct->iv_flags
1901	and	%g6, IV_SOFTINT_MT, %g6	! %g6 = ct->iv_flags & IV_SOFTINT_MT
1902	brz,pt	%g6, 1f			! check for Multi target softint flag
1903	add	%g1, IV_PIL_NEXT, %g3	! %g3 = &ct->iv_pil_next
1904	ld	[%g4 + CPU_ID], %g6	! for multi target softint, use cpuid
1905	sll	%g6, CPTRSHIFT, %g6	! calculate offset address from cpuid
1906	add	%g3, %g6, %g3		! %g3 = &ct->iv_xpil_next[cpuid]
19071:
1908	!
1909	! update old tail
1910	!
1911	ba,pt	%xcc, 3f
1912	stn	%o1, [%g3]		! {%g5] = iv, set pil_next field
19132:
1914	!
1915	! no intr_vec_t's queued so make intr_vec_t as new head
1916	!
1917	add	%g4, INTR_HEAD, %g6	! %g6 = &cpu->m_cpu.intr_head[pil]
1918	stn	%o1, [%g6 + %o0]	! cpu->m_cpu.intr_head[pil] = iv
19193:
1920	retl
1921	nop
1922	SET_SIZE(intr_enqueue_req)
1923
1924#endif  /* lint */
1925
1926/*
1927 * Set CPU's base SPL level, based on which interrupt levels are active.
1928 * 	Called at spl7 or above.
1929 */
1930
1931#if defined(lint)
1932
1933void
1934set_base_spl(void)
1935{}
1936
1937#else	/* lint */
1938
1939	ENTRY_NP(set_base_spl)
1940	ldn	[THREAD_REG + T_CPU], %o2	! load CPU pointer
1941	ld	[%o2 + CPU_INTR_ACTV], %o5	! load active interrupts mask
1942
1943/*
1944 * WARNING: non-standard callinq sequence; do not call from C
1945 *	%o2 = pointer to CPU
1946 *	%o5 = updated CPU_INTR_ACTV
1947 */
1948_intr_set_spl:					! intr_thread_exit enters here
1949	!
1950	! Determine highest interrupt level active.  Several could be blocked
1951	! at higher levels than this one, so must convert flags to a PIL
1952	! Normally nothing will be blocked, so test this first.
1953	!
1954	brz,pt	%o5, 1f				! nothing active
1955	sra	%o5, 11, %o3			! delay - set %o3 to bits 15-11
1956	set	_intr_flag_table, %o1
1957	tst	%o3				! see if any of the bits set
1958	ldub	[%o1 + %o3], %o3		! load bit number
1959	bnz,a,pn %xcc, 1f			! yes, add 10 and we're done
1960	add	%o3, 11-1, %o3			! delay - add bit number - 1
1961
1962	sra	%o5, 6, %o3			! test bits 10-6
1963	tst	%o3
1964	ldub	[%o1 + %o3], %o3
1965	bnz,a,pn %xcc, 1f
1966	add	%o3, 6-1, %o3
1967
1968	sra	%o5, 1, %o3			! test bits 5-1
1969	ldub	[%o1 + %o3], %o3
1970
1971	!
1972	! highest interrupt level number active is in %l6
1973	!
19741:
1975	retl
1976	st	%o3, [%o2 + CPU_BASE_SPL]	! delay - store base priority
1977	SET_SIZE(set_base_spl)
1978
1979/*
1980 * Table that finds the most significant bit set in a five bit field.
1981 * Each entry is the high-order bit number + 1 of it's index in the table.
1982 * This read-only data is in the text segment.
1983 */
1984_intr_flag_table:
1985	.byte	0, 1, 2, 2,	3, 3, 3, 3,	4, 4, 4, 4,	4, 4, 4, 4
1986	.byte	5, 5, 5, 5,	5, 5, 5, 5,	5, 5, 5, 5,	5, 5, 5, 5
1987	.align	4
1988
1989#endif	/* lint */
1990
1991/*
1992 * int
1993 * intr_passivate(from, to)
1994 *	kthread_id_t	from;		interrupt thread
1995 *	kthread_id_t	to;		interrupted thread
1996 */
1997
1998#if defined(lint)
1999
2000/* ARGSUSED */
2001int
2002intr_passivate(kthread_id_t from, kthread_id_t to)
2003{ return (0); }
2004
2005#else	/* lint */
2006
2007	ENTRY_NP(intr_passivate)
2008	save	%sp, -SA(MINFRAME), %sp	! get a new window
2009
2010	flushw				! force register windows to stack
2011	!
2012	! restore registers from the base of the stack of the interrupt thread.
2013	!
2014	ldn	[%i0 + T_STACK], %i2	! get stack save area pointer
2015	ldn	[%i2 + (0*GREGSIZE)], %l0	! load locals
2016	ldn	[%i2 + (1*GREGSIZE)], %l1
2017	ldn	[%i2 + (2*GREGSIZE)], %l2
2018	ldn	[%i2 + (3*GREGSIZE)], %l3
2019	ldn	[%i2 + (4*GREGSIZE)], %l4
2020	ldn	[%i2 + (5*GREGSIZE)], %l5
2021	ldn	[%i2 + (6*GREGSIZE)], %l6
2022	ldn	[%i2 + (7*GREGSIZE)], %l7
2023	ldn	[%i2 + (8*GREGSIZE)], %o0	! put ins from stack in outs
2024	ldn	[%i2 + (9*GREGSIZE)], %o1
2025	ldn	[%i2 + (10*GREGSIZE)], %o2
2026	ldn	[%i2 + (11*GREGSIZE)], %o3
2027	ldn	[%i2 + (12*GREGSIZE)], %o4
2028	ldn	[%i2 + (13*GREGSIZE)], %o5
2029	ldn	[%i2 + (14*GREGSIZE)], %i4
2030					! copy stack/pointer without using %sp
2031	ldn	[%i2 + (15*GREGSIZE)], %i5
2032	!
2033	! put registers into the save area at the top of the interrupted
2034	! thread's stack, pointed to by %l7 in the save area just loaded.
2035	!
2036	ldn	[%i1 + T_SP], %i3	! get stack save area pointer
2037	stn	%l0, [%i3 + STACK_BIAS + (0*GREGSIZE)]	! save locals
2038	stn	%l1, [%i3 + STACK_BIAS + (1*GREGSIZE)]
2039	stn	%l2, [%i3 + STACK_BIAS + (2*GREGSIZE)]
2040	stn	%l3, [%i3 + STACK_BIAS + (3*GREGSIZE)]
2041	stn	%l4, [%i3 + STACK_BIAS + (4*GREGSIZE)]
2042	stn	%l5, [%i3 + STACK_BIAS + (5*GREGSIZE)]
2043	stn	%l6, [%i3 + STACK_BIAS + (6*GREGSIZE)]
2044	stn	%l7, [%i3 + STACK_BIAS + (7*GREGSIZE)]
2045	stn	%o0, [%i3 + STACK_BIAS + (8*GREGSIZE)]	! save ins using outs
2046	stn	%o1, [%i3 + STACK_BIAS + (9*GREGSIZE)]
2047	stn	%o2, [%i3 + STACK_BIAS + (10*GREGSIZE)]
2048	stn	%o3, [%i3 + STACK_BIAS + (11*GREGSIZE)]
2049	stn	%o4, [%i3 + STACK_BIAS + (12*GREGSIZE)]
2050	stn	%o5, [%i3 + STACK_BIAS + (13*GREGSIZE)]
2051	stn	%i4, [%i3 + STACK_BIAS + (14*GREGSIZE)]
2052						! fp, %i7 copied using %i4
2053	stn	%i5, [%i3 + STACK_BIAS + (15*GREGSIZE)]
2054	stn	%g0, [%i2 + ((8+6)*GREGSIZE)]
2055						! clear fp in save area
2056
2057	! load saved pil for return
2058	ldub	[%i0 + T_PIL], %i0
2059	ret
2060	restore
2061	SET_SIZE(intr_passivate)
2062
2063#endif	/* lint */
2064
2065#if defined(lint)
2066
2067/*
2068 * intr_get_time() is a resource for interrupt handlers to determine how
2069 * much time has been spent handling the current interrupt. Such a function
2070 * is needed because higher level interrupts can arrive during the
2071 * processing of an interrupt, thus making direct comparisons of %tick by
2072 * the handler inaccurate. intr_get_time() only returns time spent in the
2073 * current interrupt handler.
2074 *
2075 * The caller must be calling from an interrupt handler running at a pil
2076 * below or at lock level. Timings are not provided for high-level
2077 * interrupts.
2078 *
2079 * The first time intr_get_time() is called while handling an interrupt,
2080 * it returns the time since the interrupt handler was invoked. Subsequent
2081 * calls will return the time since the prior call to intr_get_time(). Time
2082 * is returned as ticks, adjusted for any clock divisor due to power
2083 * management. Use tick2ns() to convert ticks to nsec. Warning: ticks may
2084 * not be the same across CPUs.
2085 *
2086 * Theory Of Intrstat[][]:
2087 *
2088 * uint64_t intrstat[pil][0..1] is an array indexed by pil level, with two
2089 * uint64_ts per pil.
2090 *
2091 * intrstat[pil][0] is a cumulative count of the number of ticks spent
2092 * handling all interrupts at the specified pil on this CPU. It is
2093 * exported via kstats to the user.
2094 *
2095 * intrstat[pil][1] is always a count of ticks less than or equal to the
2096 * value in [0]. The difference between [1] and [0] is the value returned
2097 * by a call to intr_get_time(). At the start of interrupt processing,
2098 * [0] and [1] will be equal (or nearly so). As the interrupt consumes
2099 * time, [0] will increase, but [1] will remain the same. A call to
2100 * intr_get_time() will return the difference, then update [1] to be the
2101 * same as [0]. Future calls will return the time since the last call.
2102 * Finally, when the interrupt completes, [1] is updated to the same as [0].
2103 *
2104 * Implementation:
2105 *
2106 * intr_get_time() works much like a higher level interrupt arriving. It
2107 * "checkpoints" the timing information by incrementing intrstat[pil][0]
2108 * to include elapsed running time, and by setting t_intr_start to %tick.
2109 * It then sets the return value to intrstat[pil][0] - intrstat[pil][1],
2110 * and updates intrstat[pil][1] to be the same as the new value of
2111 * intrstat[pil][0].
2112 *
2113 * In the normal handling of interrupts, after an interrupt handler returns
2114 * and the code in intr_thread() updates intrstat[pil][0], it then sets
2115 * intrstat[pil][1] to the new value of intrstat[pil][0]. When [0] == [1],
2116 * the timings are reset, i.e. intr_get_time() will return [0] - [1] which
2117 * is 0.
2118 *
2119 * Whenever interrupts arrive on a CPU which is handling a lower pil
2120 * interrupt, they update the lower pil's [0] to show time spent in the
2121 * handler that they've interrupted. This results in a growing discrepancy
2122 * between [0] and [1], which is returned the next time intr_get_time() is
2123 * called. Time spent in the higher-pil interrupt will not be returned in
2124 * the next intr_get_time() call from the original interrupt, because
2125 * the higher-pil interrupt's time is accumulated in intrstat[higherpil][].
2126 */
2127
2128/*ARGSUSED*/
2129uint64_t
2130intr_get_time(void)
2131{ return 0; }
2132#else	/* lint */
2133
2134	ENTRY_NP(intr_get_time)
2135#ifdef DEBUG
2136	!
2137	! Lots of asserts, but just check panic_quiesce first.
2138	! Don't bother with lots of tests if we're just ignoring them.
2139	!
2140	sethi	%hi(panic_quiesce), %o0
2141	ld	[%o0 + %lo(panic_quiesce)], %o0
2142	brnz,pn	%o0, 2f
2143	nop
2144	!
2145	! ASSERT(%pil <= LOCK_LEVEL)
2146	!
2147	rdpr	%pil, %o1
2148	cmp	%o1, LOCK_LEVEL
2149	ble,pt	%xcc, 0f
2150	sethi	%hi(intr_get_time_high_pil), %o0	! delay
2151	call	panic
2152	or	%o0, %lo(intr_get_time_high_pil), %o0
21530:
2154	!
2155	! ASSERT((t_flags & T_INTR_THREAD) != 0 && t_pil > 0)
2156	!
2157	lduh	[THREAD_REG + T_FLAGS], %o2
2158	andcc	%o2, T_INTR_THREAD, %g0
2159	bz,pn	%xcc, 1f
2160	ldub	[THREAD_REG + T_PIL], %o1		! delay
2161	brnz,pt	%o1, 0f
21621:
2163	sethi	%hi(intr_get_time_not_intr), %o0
2164	call	panic
2165	or	%o0, %lo(intr_get_time_not_intr), %o0
21660:
2167	!
2168	! ASSERT(t_intr_start != 0)
2169	!
2170	ldx	[THREAD_REG + T_INTR_START], %o1
2171	brnz,pt	%o1, 2f
2172	sethi	%hi(intr_get_time_no_start_time), %o0	! delay
2173	call	panic
2174	or	%o0, %lo(intr_get_time_no_start_time), %o0
21752:
2176#endif /* DEBUG */
2177	!
2178	! %o0 = elapsed time and return value
2179	! %o1 = pil
2180	! %o2 = scratch
2181	! %o3 = scratch
2182	! %o4 = scratch
2183	! %o5 = cpu
2184	!
2185	wrpr	%g0, PIL_MAX, %pil	! make this easy -- block normal intrs
2186	ldn	[THREAD_REG + T_CPU], %o5
2187	ldub	[THREAD_REG + T_PIL], %o1
2188	ldx	[THREAD_REG + T_INTR_START], %o3 ! %o3 = t_intr_start
2189	!
2190	! Calculate elapsed time since t_intr_start. Update t_intr_start,
2191	! get delta, and multiply by cpu_divisor if necessary.
2192	!
2193	RD_CLOCK_TICK_NO_SUSPEND_CHECK(%o2, %o0)
2194	stx	%o2, [THREAD_REG + T_INTR_START]
2195	sub	%o2, %o3, %o0
2196
2197	lduh	[%o5 + CPU_DIVISOR], %o4
2198	cmp	%o4, 1
2199	bg,a,pn	%xcc, 1f
2200	mulx	%o0, %o4, %o0	! multiply interval by clock divisor iff > 1
22011:
2202	! Update intracct[]
2203	lduh	[%o5 + CPU_MSTATE], %o4
2204	sllx	%o4, 3, %o4
2205	add	%o4, CPU_INTRACCT, %o4
2206	ldx	[%o5 + %o4], %o2
2207	add	%o2, %o0, %o2
2208	stx	%o2, [%o5 + %o4]
2209
2210	!
2211	! Increment cpu_m.intrstat[pil][0]. Calculate elapsed time since
2212	! cpu_m.intrstat[pil][1], which is either when the interrupt was
2213	! first entered, or the last time intr_get_time() was invoked. Then
2214	! update cpu_m.intrstat[pil][1] to match [0].
2215	!
2216	sllx	%o1, 4, %o3
2217	add	%o3, CPU_MCPU, %o3
2218	add	%o3, MCPU_INTRSTAT, %o3
2219	add	%o3, %o5, %o3		! %o3 = cpu_m.intrstat[pil][0]
2220	ldx	[%o3], %o2
2221	add	%o2, %o0, %o2		! %o2 = new value for intrstat
2222	stx	%o2, [%o3]
2223	ldx	[%o3 + 8], %o4		! %o4 = cpu_m.intrstat[pil][1]
2224	sub	%o2, %o4, %o0		! %o0 is elapsed time since %o4
2225	stx	%o2, [%o3 + 8]		! make [1] match [0], resetting time
2226
2227	ld	[%o5 + CPU_BASE_SPL], %o2	! restore %pil to the greater
2228	cmp	%o2, %o1			! of either our pil %o1 or
2229	movl	%xcc, %o1, %o2			! cpu_base_spl.
2230	retl
2231	wrpr	%g0, %o2, %pil
2232	SET_SIZE(intr_get_time)
2233
2234#ifdef DEBUG
2235intr_get_time_high_pil:
2236	.asciz	"intr_get_time(): %pil > LOCK_LEVEL"
2237intr_get_time_not_intr:
2238	.asciz	"intr_get_time(): not called from an interrupt thread"
2239intr_get_time_no_start_time:
2240	.asciz	"intr_get_time(): t_intr_start == 0"
2241#endif /* DEBUG */
2242#endif  /* lint */
2243