xref: /titanic_50/usr/src/uts/sun4/ml/interrupt.s (revision c0dd49bdd68c0d758a67d56f07826f3b45cfc664)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#if defined(lint)
27#include <sys/types.h>
28#include <sys/thread.h>
29#else	/* lint */
30#include "assym.h"
31#endif	/* lint */
32
33#include <sys/cmn_err.h>
34#include <sys/ftrace.h>
35#include <sys/asm_linkage.h>
36#include <sys/machthread.h>
37#include <sys/machcpuvar.h>
38#include <sys/intreg.h>
39#include <sys/ivintr.h>
40
41#ifdef TRAPTRACE
42#include <sys/traptrace.h>
43#endif /* TRAPTRACE */
44
45#if defined(lint)
46
47/* ARGSUSED */
48void
49pil_interrupt(int level)
50{}
51
52#else	/* lint */
53
54
55/*
56 * (TT 0x40..0x4F, TL>0) Interrupt Level N Handler (N == 1..15)
57 * 	Register passed from LEVEL_INTERRUPT(level)
58 *	%g4 - interrupt request level
59 */
60	ENTRY_NP(pil_interrupt)
61	!
62	! Register usage
63	!	%g1 - cpu
64	!	%g2 - pointer to intr_vec_t (iv)
65	!	%g4 - pil
66	!	%g3, %g5, %g6, %g7 - temps
67	!
68	! Grab the first or list head intr_vec_t off the intr_head[pil]
69	! and panic immediately if list head is NULL. Otherwise, update
70	! intr_head[pil] to next intr_vec_t on the list and clear softint
71	! %clear_softint, if next intr_vec_t is NULL.
72	!
73	CPU_ADDR(%g1, %g5)		! %g1 = cpu
74	!
75	ALTENTRY(pil_interrupt_common)
76	sll	%g4, CPTRSHIFT, %g5	! %g5 = offset to the pil entry
77	add	%g1, INTR_HEAD, %g6	! %g6 = &cpu->m_cpu.intr_head
78	add	%g6, %g5, %g6		! %g6 = &cpu->m_cpu.intr_head[pil]
79	ldn	[%g6], %g2		! %g2 = cpu->m_cpu.intr_head[pil]
80	brnz,pt	%g2, 0f			! check list head (iv) is NULL
81	nop
82	ba	ptl1_panic		! panic, list head (iv) is NULL
83	mov	PTL1_BAD_INTR_VEC, %g1
840:
85	lduh	[%g2 + IV_FLAGS], %g7	! %g7 = iv->iv_flags
86	and	%g7, IV_SOFTINT_MT, %g3 ! %g3 = iv->iv_flags & IV_SOFTINT_MT
87	brz,pt	%g3, 1f			! check for multi target softint
88	add	%g2, IV_PIL_NEXT, %g7	! g7% = &iv->iv_pil_next
89	ld	[%g1 + CPU_ID], %g3	! for multi target softint, use cpuid
90	sll	%g3, CPTRSHIFT, %g3	! convert cpuid to offset address
91	add	%g7, %g3, %g7		! %g5 = &iv->iv_xpil_next[cpuid]
921:
93	ldn	[%g7], %g3		! %g3 = next intr_vec_t
94	brnz,pn	%g3, 2f			! branch if next intr_vec_t non NULL
95	stn	%g3, [%g6]		! update cpu->m_cpu.intr_head[pil]
96	add	%g1, INTR_TAIL, %g6	! %g6 =  &cpu->m_cpu.intr_tail
97	stn	%g0, [%g5 + %g6]	! clear cpu->m_cpu.intr_tail[pil]
98	mov	1, %g5			! %g5 = 1
99	sll	%g5, %g4, %g5		! %g5 = 1 << pil
100	wr	%g5, CLEAR_SOFTINT	! clear interrupt on this pil
1012:
102#ifdef TRAPTRACE
103	TRACE_PTR(%g5, %g6)
104	TRACE_SAVE_TL_GL_REGS(%g5, %g6)
105	rdpr	%tt, %g6
106	stha	%g6, [%g5 + TRAP_ENT_TT]%asi	! trap_type = %tt
107	rdpr	%tpc, %g6
108	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi	! trap_pc = %tpc
109	rdpr	%tstate, %g6
110	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi ! trap_tstate = %tstate
111	stna	%sp, [%g5 + TRAP_ENT_SP]%asi	! trap_sp = %sp
112	stna	%g2, [%g5 + TRAP_ENT_TR]%asi	! trap_tr = first intr_vec
113	stna	%g3, [%g5 + TRAP_ENT_F1]%asi	! trap_f1 = next intr_vec
114	GET_TRACE_TICK(%g6, %g3)
115	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi	! trap_tick = %tick
116	sll	%g4, CPTRSHIFT, %g3
117	add	%g1, INTR_HEAD, %g6
118	ldn	[%g6 + %g3], %g6		! %g6=cpu->m_cpu.intr_head[pil]
119	stna	%g6, [%g5 + TRAP_ENT_F2]%asi	! trap_f2 = intr_head[pil]
120	add	%g1, INTR_TAIL, %g6
121	ldn	[%g6 + %g3], %g6		! %g6=cpu->m_cpu.intr_tail[pil]
122	stna	%g6, [%g5 + TRAP_ENT_F3]%asi	! trap_f3 = intr_tail[pil]
123	stna	%g4, [%g5 + TRAP_ENT_F4]%asi	! trap_f4 = pil
124	TRACE_NEXT(%g5, %g6, %g3)
125#endif /* TRAPTRACE */
126	!
127	! clear the iv_pending flag for this interrupt request
128	!
129	lduh	[%g2 + IV_FLAGS], %g3		! %g3 = iv->iv_flags
130	andn	%g3, IV_SOFTINT_PEND, %g3	! %g3 = !(iv->iv_flags & PEND)
131	sth	%g3, [%g2 + IV_FLAGS]		! clear IV_SOFTINT_PEND flag
132	stn	%g0, [%g7]			! clear iv->iv_pil_next or
133						!       iv->iv_pil_xnext
134
135	!
136	! Prepare for sys_trap()
137	!
138	! Registers passed to sys_trap()
139	!	%g1 - interrupt handler at TL==0
140	!	%g2 - pointer to current intr_vec_t (iv),
141	!	      job queue for intr_thread or current_thread
142	!	%g3 - pil
143	!	%g4 - initial pil for handler
144	!
145	! figure which handler to run and which %pil it starts at
146	! intr_thread starts at DISP_LEVEL to prevent preemption
147	! current_thread starts at PIL_MAX to protect cpu_intr_actv
148	!
149	mov	%g4, %g3		! %g3 = %g4, pil
150	cmp	%g4, LOCK_LEVEL
151	bg,a,pt	%xcc, 3f		! branch if pil > LOCK_LEVEL
152	mov	PIL_MAX, %g4		! %g4 = PIL_MAX (15)
153	sethi	%hi(intr_thread), %g1	! %g1 = intr_thread
154	mov	DISP_LEVEL, %g4		! %g4 = DISP_LEVEL (11)
155	ba,pt	%xcc, sys_trap
156	or	%g1, %lo(intr_thread), %g1
1573:
158	sethi	%hi(current_thread), %g1 ! %g1 = current_thread
159	ba,pt	%xcc, sys_trap
160	or	%g1, %lo(current_thread), %g1
161	SET_SIZE(pil_interrupt_common)
162	SET_SIZE(pil_interrupt)
163
164#endif	/* lint */
165
166
167#ifndef	lint
168_spurious:
169	.asciz	"!interrupt 0x%x at level %d not serviced"
170
171/*
172 * SERVE_INTR_PRE is called once, just before the first invocation
173 * of SERVE_INTR.
174 *
175 * Registers on entry:
176 *
177 * iv_p, cpu, regs: may be out-registers
178 * ls1, ls2: local scratch registers
179 * os1, os2, os3: scratch registers, may be out
180 */
181
182#define SERVE_INTR_PRE(iv_p, cpu, ls1, ls2, os1, os2, os3, regs)	\
183	mov	iv_p, ls1;						\
184	mov	iv_p, ls2;						\
185	SERVE_INTR_TRACE(iv_p, os1, os2, os3, regs);
186
187/*
188 * SERVE_INTR is called immediately after either SERVE_INTR_PRE or
189 * SERVE_INTR_NEXT, without intervening code. No register values
190 * may be modified.
191 *
192 * After calling SERVE_INTR, the caller must check if os3 is set. If
193 * so, there is another interrupt to process. The caller must call
194 * SERVE_INTR_NEXT, immediately followed by SERVE_INTR.
195 *
196 * Before calling SERVE_INTR_NEXT, the caller may perform accounting
197 * and other actions which need to occur after invocation of an interrupt
198 * handler. However, the values of ls1 and os3 *must* be preserved and
199 * passed unmodified into SERVE_INTR_NEXT.
200 *
201 * Registers on return from SERVE_INTR:
202 *
203 * ls1 - the pil just processed
204 * ls2 - the pointer to intr_vec_t (iv) just processed
205 * os3 - if set, another interrupt needs to be processed
206 * cpu, ls1, os3 - must be preserved if os3 is set
207 */
208
209#define	SERVE_INTR(os5, cpu, ls1, ls2, os1, os2, os3, os4)		\
210	ldn	[ls1 + IV_HANDLER], os2;				\
211	ldn	[ls1 + IV_ARG1], %o0;					\
212	ldn	[ls1 + IV_ARG2], %o1;					\
213	call	os2;							\
214	lduh	[ls1 + IV_PIL], ls1;					\
215	brnz,pt	%o0, 2f;						\
216	mov	CE_WARN, %o0;						\
217	set	_spurious, %o1;						\
218	mov	ls2, %o2;						\
219	call	cmn_err;						\
220	rdpr	%pil, %o3;						\
2212:	ldn	[THREAD_REG + T_CPU], cpu;				\
222	sll	ls1, 3, os1;						\
223	add	os1, CPU_STATS_SYS_INTR - 8, os2;			\
224	ldx	[cpu + os2], os3;					\
225	inc	os3;							\
226	stx	os3, [cpu + os2];					\
227	sll	ls1, CPTRSHIFT, os2;					\
228	add	cpu,  INTR_HEAD, os1;					\
229	add	os1, os2, os1;						\
230	ldn	[os1], os3;
231
232/*
233 * Registers on entry:
234 *
235 * cpu			- cpu pointer (clobbered, set to cpu upon completion)
236 * ls1, os3		- preserved from prior call to SERVE_INTR
237 * ls2			- local scratch reg (not preserved)
238 * os1, os2, os4, os5	- scratch reg, can be out (not preserved)
239 */
240#define SERVE_INTR_NEXT(os5, cpu, ls1, ls2, os1, os2, os3, os4)		\
241	sll	ls1, CPTRSHIFT, os4;					\
242	add	cpu, INTR_HEAD, os1;					\
243	rdpr	%pstate, ls2;						\
244	wrpr	ls2, PSTATE_IE, %pstate;				\
245	lduh	[os3 + IV_FLAGS], os2;					\
246	and	os2, IV_SOFTINT_MT, os2;				\
247	brz,pt	os2, 4f;						\
248	add	os3, IV_PIL_NEXT, os2;					\
249	ld	[cpu + CPU_ID], os5;					\
250	sll	os5, CPTRSHIFT, os5;					\
251	add	os2, os5, os2;						\
2524:	ldn	[os2], os5;						\
253	brnz,pn	os5, 5f;						\
254	stn	os5, [os1 + os4];					\
255	add	cpu, INTR_TAIL, os1;					\
256	stn	%g0, [os1 + os4];					\
257	mov	1, os1;							\
258	sll	os1, ls1, os1;						\
259	wr	os1, CLEAR_SOFTINT;					\
2605:	lduh	[os3 + IV_FLAGS], ls1;                                  \
261	andn	ls1, IV_SOFTINT_PEND, ls1;				\
262	sth	ls1, [os3 + IV_FLAGS];				        \
263	stn	%g0, [os2];						\
264	wrpr	%g0, ls2, %pstate;					\
265	mov	os3, ls1;						\
266	mov	os3, ls2;						\
267	SERVE_INTR_TRACE2(os5, os1, os2, os3, os4);
268
269#ifdef TRAPTRACE
270/*
271 * inum - not modified, _spurious depends on it.
272 */
273#define	SERVE_INTR_TRACE(inum, os1, os2, os3, os4)			\
274	rdpr	%pstate, os3;						\
275	andn	os3, PSTATE_IE | PSTATE_AM, os2;			\
276	wrpr	%g0, os2, %pstate;					\
277	TRACE_PTR(os1, os2); 						\
278	ldn	[os4 + PC_OFF], os2;					\
279	stna	os2, [os1 + TRAP_ENT_TPC]%asi;				\
280	ldx	[os4 + TSTATE_OFF], os2;				\
281	stxa	os2, [os1 + TRAP_ENT_TSTATE]%asi;			\
282	mov	os3, os4;						\
283	GET_TRACE_TICK(os2, os3);					\
284	stxa	os2, [os1 + TRAP_ENT_TICK]%asi;				\
285	TRACE_SAVE_TL_GL_REGS(os1, os2);				\
286	set	TT_SERVE_INTR, os2;					\
287	rdpr	%pil, os3;						\
288	or	os2, os3, os2;						\
289	stha	os2, [os1 + TRAP_ENT_TT]%asi;				\
290	stna	%sp, [os1 + TRAP_ENT_SP]%asi;				\
291	stna	inum, [os1 + TRAP_ENT_TR]%asi;				\
292	stna	%g0, [os1 + TRAP_ENT_F1]%asi;				\
293	stna	%g0, [os1 + TRAP_ENT_F2]%asi;				\
294	stna	%g0, [os1 + TRAP_ENT_F3]%asi;				\
295	stna	%g0, [os1 + TRAP_ENT_F4]%asi;				\
296	TRACE_NEXT(os1, os2, os3);					\
297	wrpr	%g0, os4, %pstate
298#else	/* TRAPTRACE */
299#define SERVE_INTR_TRACE(inum, os1, os2, os3, os4)
300#endif	/* TRAPTRACE */
301
302#ifdef TRAPTRACE
303/*
304 * inum - not modified, _spurious depends on it.
305 */
306#define	SERVE_INTR_TRACE2(inum, os1, os2, os3, os4)			\
307	rdpr	%pstate, os3;						\
308	andn	os3, PSTATE_IE | PSTATE_AM, os2;			\
309	wrpr	%g0, os2, %pstate;					\
310	TRACE_PTR(os1, os2); 						\
311	stna	%g0, [os1 + TRAP_ENT_TPC]%asi;				\
312	stxa	%g0, [os1 + TRAP_ENT_TSTATE]%asi;			\
313	mov	os3, os4;						\
314	GET_TRACE_TICK(os2, os3);					\
315	stxa	os2, [os1 + TRAP_ENT_TICK]%asi;				\
316	TRACE_SAVE_TL_GL_REGS(os1, os2);				\
317	set	TT_SERVE_INTR, os2;					\
318	rdpr	%pil, os3;						\
319	or	os2, os3, os2;						\
320	stha	os2, [os1 + TRAP_ENT_TT]%asi;				\
321	stna	%sp, [os1 + TRAP_ENT_SP]%asi;				\
322	stna	inum, [os1 + TRAP_ENT_TR]%asi;				\
323	stna	%g0, [os1 + TRAP_ENT_F1]%asi;				\
324	stna	%g0, [os1 + TRAP_ENT_F2]%asi;				\
325	stna	%g0, [os1 + TRAP_ENT_F3]%asi;				\
326	stna	%g0, [os1 + TRAP_ENT_F4]%asi;				\
327	TRACE_NEXT(os1, os2, os3);					\
328	wrpr	%g0, os4, %pstate
329#else	/* TRAPTRACE */
330#define SERVE_INTR_TRACE2(inum, os1, os2, os3, os4)
331#endif	/* TRAPTRACE */
332
333#endif	/* lint */
334
335#if defined(lint)
336
337/*ARGSUSED*/
338void
339intr_thread(struct regs *regs, uint64_t iv_p, uint_t pil)
340{}
341
342#else	/* lint */
343
344#define	INTRCNT_LIMIT 16
345
346/*
347 * Handle an interrupt in a new thread.
348 *	Entry:
349 *		%o0       = pointer to regs structure
350 *		%o1       = pointer to current intr_vec_t (iv) to be processed
351 *		%o2       = pil
352 *		%sp       = on current thread's kernel stack
353 *		%o7       = return linkage to trap code
354 *		%g7       = current thread
355 *		%pstate   = normal globals, interrupts enabled,
356 *		            privileged, fp disabled
357 *		%pil      = DISP_LEVEL
358 *
359 *	Register Usage
360 *		%l0       = return linkage
361 *		%l1       = pil
362 *		%l2 - %l3 = scratch
363 *		%l4 - %l7 = reserved for sys_trap
364 *		%o2       = cpu
365 *		%o3       = intr thread
366 *		%o0       = scratch
367 *		%o4 - %o5 = scratch
368 */
369	ENTRY_NP(intr_thread)
370	mov	%o7, %l0
371	mov	%o2, %l1
372	!
373	! See if we are interrupting another interrupt thread.
374	!
375	lduh	[THREAD_REG + T_FLAGS], %o3
376	andcc	%o3, T_INTR_THREAD, %g0
377	bz,pt	%xcc, 1f
378	ldn	[THREAD_REG + T_CPU], %o2	! delay - load CPU pointer
379
380	! We have interrupted an interrupt thread. Take a timestamp,
381	! compute its interval, and update its cumulative counter.
382	add	THREAD_REG, T_INTR_START, %o5
3830:
384	ldx	[%o5], %o3
385	brz,pn	%o3, 1f
386	! We came in on top of an interrupt thread that had no timestamp.
387	! This could happen if, for instance, an interrupt thread which had
388	! previously blocked is being set up to run again in resume(), but
389	! resume() hasn't yet stored a timestamp for it. Or, it could be in
390	! swtch() after its slice has been accounted for.
391	! Only account for the time slice if the starting timestamp is non-zero.
392	RD_TICK(%o4,%l2,%l3,__LINE__)
393	sub	%o4, %o3, %o4			! o4 has interval
394
395	! A high-level interrupt in current_thread() interrupting here
396	! will account for the interrupted thread's time slice, but
397	! only if t_intr_start is non-zero. Since this code is going to account
398	! for the time slice, we want to "atomically" load the thread's
399	! starting timestamp, calculate the interval with %tick, and zero
400	! its starting timestamp.
401	! To do this, we do a casx on the t_intr_start field, and store 0 to it.
402	! If it has changed since we loaded it above, we need to re-compute the
403	! interval, since a changed t_intr_start implies current_thread placed
404	! a new, later timestamp there after running a high-level interrupt,
405	! and the %tick val in %o4 had become stale.
406	mov	%g0, %l2
407	casx	[%o5], %o3, %l2
408
409	! If %l2 == %o3, our casx was successful. If not, the starting timestamp
410	! changed between loading it (after label 0b) and computing the
411	! interval above.
412	cmp	%l2, %o3
413	bne,pn	%xcc, 0b
414
415	! Check for Energy Star mode
416	lduh	[%o2 + CPU_DIVISOR], %l2	! delay -- %l2 = clock divisor
417	cmp	%l2, 1
418	bg,a,pn	%xcc, 2f
419	mulx	%o4, %l2, %o4	! multiply interval by clock divisor iff > 1
4202:
421	! We now know that a valid interval for the interrupted interrupt
422	! thread is in %o4. Update its cumulative counter.
423	ldub	[THREAD_REG + T_PIL], %l3	! load PIL
424	sllx	%l3, 4, %l3		! convert PIL index to byte offset
425	add	%l3, CPU_MCPU, %l3	! CPU_INTRSTAT is too big for use
426	add	%l3, MCPU_INTRSTAT, %l3	! as const, add offsets separately
427	ldx	[%o2 + %l3], %o5	! old counter in o5
428	add	%o5, %o4, %o5		! new counter in o5
429	stx	%o5, [%o2 + %l3]	! store new counter
430
431	! Also update intracct[]
432	lduh	[%o2 + CPU_MSTATE], %l3
433	sllx	%l3, 3, %l3
434	add	%l3, CPU_INTRACCT, %l3
435	add	%l3, %o2, %l3
4360:
437	ldx	[%l3], %o5
438	add	%o5, %o4, %o3
439	casx	[%l3], %o5, %o3
440	cmp	%o5, %o3
441	bne,pn	%xcc, 0b
442	nop
443
4441:
445	!
446	! Get set to run interrupt thread.
447	! There should always be an interrupt thread since we allocate one
448	! for each level on the CPU.
449	!
450	! Note that the code in kcpc_overflow_intr -relies- on the ordering
451	! of events here -- in particular that t->t_lwp of the interrupt thread
452	! is set to the pinned thread *before* curthread is changed.
453	!
454	ldn	[%o2 + CPU_INTR_THREAD], %o3	! interrupt thread pool
455	ldn	[%o3 + T_LINK], %o4		! unlink thread from CPU's list
456	stn	%o4, [%o2 + CPU_INTR_THREAD]
457	!
458	! Set bit for this level in CPU's active interrupt bitmask.
459	!
460	ld	[%o2 + CPU_INTR_ACTV], %o5
461	mov	1, %o4
462	sll	%o4, %l1, %o4
463#ifdef DEBUG
464	!
465	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
466	!
467	andcc	%o5, %o4, %g0
468	bz,pt	%xcc, 0f
469	nop
470	! Do not call panic if a panic is already in progress.
471	sethi	%hi(panic_quiesce), %l2
472	ld	[%l2 + %lo(panic_quiesce)], %l2
473	brnz,pn	%l2, 0f
474	nop
475	sethi	%hi(intr_thread_actv_bit_set), %o0
476	call	panic
477	or	%o0, %lo(intr_thread_actv_bit_set), %o0
4780:
479#endif /* DEBUG */
480	or	%o5, %o4, %o5
481	st	%o5, [%o2 + CPU_INTR_ACTV]
482	!
483	! Consider the new thread part of the same LWP so that
484	! window overflow code can find the PCB.
485	!
486	ldn	[THREAD_REG + T_LWP], %o4
487	stn	%o4, [%o3 + T_LWP]
488	!
489	! Threads on the interrupt thread free list could have state already
490	! set to TS_ONPROC, but it helps in debugging if they're TS_FREE
491	! Could eliminate the next two instructions with a little work.
492	!
493	mov	TS_ONPROC, %o4
494	st	%o4, [%o3 + T_STATE]
495	!
496	! Push interrupted thread onto list from new thread.
497	! Set the new thread as the current one.
498	! Set interrupted thread's T_SP because if it is the idle thread,
499	! resume may use that stack between threads.
500	!
501	stn	%o7, [THREAD_REG + T_PC]	! mark pc for resume
502	stn	%sp, [THREAD_REG + T_SP]	! mark stack for resume
503	stn	THREAD_REG, [%o3 + T_INTR]	! push old thread
504	stn	%o3, [%o2 + CPU_THREAD]		! set new thread
505	mov	%o3, THREAD_REG			! set global curthread register
506	ldn	[%o3 + T_STACK], %o4		! interrupt stack pointer
507	sub	%o4, STACK_BIAS, %sp
508	!
509	! Initialize thread priority level from intr_pri
510	!
511	sethi	%hi(intr_pri), %o4
512	ldsh	[%o4 + %lo(intr_pri)], %o4	! grab base interrupt priority
513	add	%l1, %o4, %o4		! convert level to dispatch priority
514	sth	%o4, [THREAD_REG + T_PRI]
515	stub	%l1, [THREAD_REG + T_PIL]	! save pil for intr_passivate
516
517	! Store starting timestamp in thread structure.
518	add	THREAD_REG, T_INTR_START, %o3
5191:
520	ldx	[%o3], %o5
521	RD_TICK(%o4,%l2,%l3,__LINE__)
522	casx	[%o3], %o5, %o4
523	cmp	%o4, %o5
524	! If a high-level interrupt occurred while we were attempting to store
525	! the timestamp, try again.
526	bne,pn	%xcc, 1b
527	nop
528
529	wrpr	%g0, %l1, %pil			! lower %pil to new level
530	!
531	! Fast event tracing.
532	!
533	ld	[%o2 + CPU_FTRACE_STATE], %o4	! %o2 = curthread->t_cpu
534	btst	FTRACE_ENABLED, %o4
535	be,pt	%icc, 1f			! skip if ftrace disabled
536	  mov	%l1, %o5
537	!
538	! Tracing is enabled - write the trace entry.
539	!
540	save	%sp, -SA(MINFRAME), %sp
541	set	ftrace_intr_thread_format_str, %o0
542	mov	%i0, %o1
543	mov	%i1, %o2
544	mov	%i5, %o3
545	call	ftrace_3
546	ldn	[%i0 + PC_OFF], %o4
547	restore
5481:
549	!
550	! call the handler
551	!
552	SERVE_INTR_PRE(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
553	!
554	! %o0 and %o1 are now available as scratch registers.
555	!
5560:
557	SERVE_INTR(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
558	!
559	! If %o3 is set, we must call serve_intr_next, and both %l1 and %o3
560	! must be preserved. %l1 holds our pil, %l3 holds our inum.
561	!
562	! Note: %l1 is the pil level we're processing, but we may have a
563	! higher effective pil because a higher-level interrupt may have
564	! blocked.
565	!
566	wrpr	%g0, DISP_LEVEL, %pil
567	!
568	! Take timestamp, compute interval, update cumulative counter.
569	!
570	add	THREAD_REG, T_INTR_START, %o5
5711:
572	ldx	[%o5], %o0
573#ifdef DEBUG
574	brnz	%o0, 9f
575	nop
576	! Do not call panic if a panic is already in progress.
577	sethi	%hi(panic_quiesce), %o1
578	ld	[%o1 + %lo(panic_quiesce)], %o1
579	brnz,pn	%o1, 9f
580	nop
581	sethi	%hi(intr_thread_t_intr_start_zero), %o0
582	call	panic
583	or	%o0, %lo(intr_thread_t_intr_start_zero), %o0
5849:
585#endif /* DEBUG */
586	RD_TICK(%o1,%l2,%l3,__LINE__)
587	sub	%o1, %o0, %l2			! l2 has interval
588	!
589	! The general outline of what the code here does is:
590	! 1. load t_intr_start, %tick, and calculate the delta
591	! 2. replace t_intr_start with %tick (if %o3 is set) or 0.
592	!
593	! The problem is that a high-level interrupt could arrive at any time.
594	! It will account for (%tick - t_intr_start) for us when it starts,
595	! unless we have set t_intr_start to zero, and then set t_intr_start
596	! to a new %tick when it finishes. To account for this, our first step
597	! is to load t_intr_start and the last is to use casx to store the new
598	! t_intr_start. This guarantees atomicity in reading t_intr_start,
599	! reading %tick, and updating t_intr_start.
600	!
601	movrz	%o3, %g0, %o1
602	casx	[%o5], %o0, %o1
603	cmp	%o0, %o1
604	bne,pn	%xcc, 1b
605	!
606	! Check for Energy Star mode
607	!
608	lduh	[%o2 + CPU_DIVISOR], %o0	! delay -- %o0 = clock divisor
609	cmp	%o0, 1
610	bg,a,pn	%xcc, 2f
611	mulx	%l2, %o0, %l2	! multiply interval by clock divisor iff > 1
6122:
613	!
614	! Update cpu_intrstat. If o3 is set then we will be processing another
615	! interrupt. Above we have set t_intr_start to %tick, not 0. This
616	! means a high-level interrupt can arrive and update the same stats
617	! we're updating. Need to use casx.
618	!
619	sllx	%l1, 4, %o1			! delay - PIL as byte offset
620	add	%o1, CPU_MCPU, %o1		! CPU_INTRSTAT const too big
621	add	%o1, MCPU_INTRSTAT, %o1		! add parts separately
622	add	%o1, %o2, %o1
6231:
624	ldx	[%o1], %o5			! old counter in o5
625	add	%o5, %l2, %o0			! new counter in o0
626 	stx	%o0, [%o1 + 8]			! store into intrstat[pil][1]
627	casx	[%o1], %o5, %o0			! and into intrstat[pil][0]
628	cmp	%o5, %o0
629	bne,pn	%xcc, 1b
630	nop
631
632	! Also update intracct[]
633	lduh	[%o2 + CPU_MSTATE], %o1
634	sllx	%o1, 3, %o1
635	add	%o1, CPU_INTRACCT, %o1
636	add	%o1, %o2, %o1
6371:
638	ldx	[%o1], %o5
639	add	%o5, %l2, %o0
640	casx	[%o1], %o5, %o0
641	cmp	%o5, %o0
642	bne,pn	%xcc, 1b
643	nop
644
645	!
646	! Don't keep a pinned process pinned indefinitely. Bump cpu_intrcnt
647	! for each interrupt handler we invoke. If we hit INTRCNT_LIMIT, then
648	! we've crossed the threshold and we should unpin the pinned threads
649	! by preempt()ing ourselves, which will bubble up the t_intr chain
650	! until hitting the non-interrupt thread, which will then in turn
651	! preempt itself allowing the interrupt processing to resume. Finally,
652	! the scheduler takes over and picks the next thread to run.
653	!
654	! If our CPU is quiesced, we cannot preempt because the idle thread
655	! won't ever re-enter the scheduler, and the interrupt will be forever
656	! blocked.
657	!
658	! If t_intr is NULL, we're not pinning anyone, so we use a simpler
659	! algorithm. Just check for cpu_kprunrun, and if set then preempt.
660	! This insures we enter the scheduler if a higher-priority thread
661	! has become runnable.
662	!
663	lduh	[%o2 + CPU_FLAGS], %o5		! don't preempt if quiesced
664	andcc	%o5, CPU_QUIESCED, %g0
665	bnz,pn	%xcc, 1f
666
667	ldn     [THREAD_REG + T_INTR], %o5      ! pinning anything?
668	brz,pn  %o5, 3f				! if not, don't inc intrcnt
669
670	ldub	[%o2 + CPU_INTRCNT], %o5	! delay - %o5 = cpu_intrcnt
671	inc	%o5
672	cmp	%o5, INTRCNT_LIMIT		! have we hit the limit?
673	bl,a,pt	%xcc, 1f			! no preempt if < INTRCNT_LIMIT
674	stub	%o5, [%o2 + CPU_INTRCNT]	! delay annul - inc CPU_INTRCNT
675	bg,pn	%xcc, 2f			! don't inc stats again
676	!
677	! We've reached the limit. Set cpu_intrcnt and cpu_kprunrun, and do
678	! CPU_STATS_ADDQ(cp, sys, intrunpin, 1). Then call preempt.
679	!
680	mov	1, %o4				! delay
681	stub	%o4, [%o2 + CPU_KPRUNRUN]
682	ldx	[%o2 + CPU_STATS_SYS_INTRUNPIN], %o4
683	inc	%o4
684	stx	%o4, [%o2 + CPU_STATS_SYS_INTRUNPIN]
685	ba	2f
686	stub	%o5, [%o2 + CPU_INTRCNT]	! delay
6873:
688	! Code for t_intr == NULL
689	ldub	[%o2 + CPU_KPRUNRUN], %o5
690	brz,pt	%o5, 1f				! don't preempt unless kprunrun
6912:
692	! Time to call preempt
693	mov	%o2, %l3			! delay - save %o2
694	call	preempt
695	mov	%o3, %l2			! delay - save %o3.
696	mov	%l3, %o2			! restore %o2
697	mov	%l2, %o3			! restore %o3
698	wrpr	%g0, DISP_LEVEL, %pil		! up from cpu_base_spl
6991:
700	!
701	! Do we need to call serve_intr_next and do this again?
702	!
703	brz,a,pt %o3, 0f
704	ld	[%o2 + CPU_INTR_ACTV], %o5	! delay annulled
705	!
706	! Restore %pil before calling serve_intr() again. We must check
707	! CPU_BASE_SPL and set %pil to max(our-pil, CPU_BASE_SPL)
708	!
709	ld	[%o2 + CPU_BASE_SPL], %o4
710	cmp	%o4, %l1
711	movl	%xcc, %l1, %o4
712	wrpr	%g0, %o4, %pil
713	SERVE_INTR_NEXT(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
714	ba	0b				! compute new stats
715	nop
7160:
717	!
718	! Clear bit for this level in CPU's interrupt active bitmask.
719	!
720	mov	1, %o4
721	sll	%o4, %l1, %o4
722#ifdef DEBUG
723	!
724	! ASSERT(CPU->cpu_intr_actv & (1 << PIL))
725	!
726	andcc	%o4, %o5, %g0
727	bnz,pt	%xcc, 0f
728	nop
729	! Do not call panic if a panic is already in progress.
730	sethi	%hi(panic_quiesce), %l2
731	ld	[%l2 + %lo(panic_quiesce)], %l2
732	brnz,pn	%l2, 0f
733	nop
734	sethi	%hi(intr_thread_actv_bit_not_set), %o0
735	call	panic
736	or	%o0, %lo(intr_thread_actv_bit_not_set), %o0
7370:
738#endif /* DEBUG */
739	andn	%o5, %o4, %o5
740	st	%o5, [%o2 + CPU_INTR_ACTV]
741	!
742	! If there is still an interrupted thread underneath this one,
743	! then the interrupt was never blocked and the return is fairly
744	! simple.  Otherwise jump to intr_thread_exit.
745	!
746	ldn	[THREAD_REG + T_INTR], %o4	! pinned thread
747	brz,pn	%o4, intr_thread_exit		! branch if none
748	nop
749	!
750	! link the thread back onto the interrupt thread pool
751	!
752	ldn	[%o2 + CPU_INTR_THREAD], %o3
753	stn	%o3, [THREAD_REG + T_LINK]
754	stn	THREAD_REG, [%o2 + CPU_INTR_THREAD]
755	!
756	! set the thread state to free so kernel debuggers don't see it
757	!
758	mov	TS_FREE, %o5
759	st	%o5, [THREAD_REG + T_STATE]
760	!
761	! Switch back to the interrupted thread and return
762	!
763	stn	%o4, [%o2 + CPU_THREAD]
764	membar	#StoreLoad			! sync with mutex_exit()
765	mov	%o4, THREAD_REG
766
767	! If we pinned an interrupt thread, store its starting timestamp.
768	lduh	[THREAD_REG + T_FLAGS], %o5
769	andcc	%o5, T_INTR_THREAD, %g0
770	bz,pt	%xcc, 1f
771	ldn	[THREAD_REG + T_SP], %sp	! delay - restore %sp
772
773	add	THREAD_REG, T_INTR_START, %o3	! o3 has &curthread->t_intr_star
7740:
775	ldx	[%o3], %o4			! o4 = t_intr_start before
776	RD_TICK(%o5,%l2,%l3,__LINE__)
777	casx	[%o3], %o4, %o5			! put o5 in ts if o4 == ts after
778	cmp	%o4, %o5
779	! If a high-level interrupt occurred while we were attempting to store
780	! the timestamp, try again.
781	bne,pn	%xcc, 0b
782	ldn	[THREAD_REG + T_SP], %sp	! delay - restore %sp
7831:
784	! If the thread being restarted isn't pinning anyone, and no interrupts
785	! are pending, zero out cpu_intrcnt
786	ldn	[THREAD_REG + T_INTR], %o4
787	brnz,pn	%o4, 2f
788	rd	SOFTINT, %o4			! delay
789	set	SOFTINT_MASK, %o5
790	andcc	%o4, %o5, %g0
791	bz,a,pt	%xcc, 2f
792	stub	%g0, [%o2 + CPU_INTRCNT]	! delay annul
7932:
794	jmp	%l0 + 8
795	nop
796	SET_SIZE(intr_thread)
797	/* Not Reached */
798
799	!
800	! An interrupt returned on what was once (and still might be)
801	! an interrupt thread stack, but the interrupted process is no longer
802	! there.  This means the interrupt must have blocked.
803	!
804	! There is no longer a thread under this one, so put this thread back
805	! on the CPU's free list and resume the idle thread which will dispatch
806	! the next thread to run.
807	!
808	! All traps below DISP_LEVEL are disabled here, but the mondo interrupt
809	! is enabled.
810	!
811	ENTRY_NP(intr_thread_exit)
812#ifdef TRAPTRACE
813	rdpr	%pstate, %l2
814	andn	%l2, PSTATE_IE | PSTATE_AM, %o4
815	wrpr	%g0, %o4, %pstate			! cpu to known state
816	TRACE_PTR(%o4, %o5)
817	GET_TRACE_TICK(%o5, %o0)
818	stxa	%o5, [%o4 + TRAP_ENT_TICK]%asi
819	TRACE_SAVE_TL_GL_REGS(%o4, %o5)
820	set	TT_INTR_EXIT, %o5
821	stha	%o5, [%o4 + TRAP_ENT_TT]%asi
822	stna	%g0, [%o4 + TRAP_ENT_TPC]%asi
823	stxa	%g0, [%o4 + TRAP_ENT_TSTATE]%asi
824	stna	%sp, [%o4 + TRAP_ENT_SP]%asi
825	stna	THREAD_REG, [%o4 + TRAP_ENT_TR]%asi
826	ld	[%o2 + CPU_BASE_SPL], %o5
827	stna	%o5, [%o4 + TRAP_ENT_F1]%asi
828	stna	%g0, [%o4 + TRAP_ENT_F2]%asi
829	stna	%g0, [%o4 + TRAP_ENT_F3]%asi
830	stna	%g0, [%o4 + TRAP_ENT_F4]%asi
831	TRACE_NEXT(%o4, %o5, %o0)
832	wrpr	%g0, %l2, %pstate
833#endif /* TRAPTRACE */
834	! cpu_stats.sys.intrblk++
835        ldx	[%o2 + CPU_STATS_SYS_INTRBLK], %o4
836        inc     %o4
837        stx	%o4, [%o2 + CPU_STATS_SYS_INTRBLK]
838	!
839	! Put thread back on the interrupt thread list.
840	!
841
842	!
843	! Set the CPU's base SPL level.
844	!
845#ifdef DEBUG
846	!
847	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
848	!
849	ld	[%o2 + CPU_INTR_ACTV], %o5
850	mov	1, %o4
851	sll	%o4, %l1, %o4
852	and	%o5, %o4, %o4
853	brz,pt	%o4, 0f
854	nop
855	! Do not call panic if a panic is already in progress.
856	sethi	%hi(panic_quiesce), %l2
857	ld	[%l2 + %lo(panic_quiesce)], %l2
858	brnz,pn	%l2, 0f
859	nop
860	sethi	%hi(intr_thread_exit_actv_bit_set), %o0
861	call	panic
862	or	%o0, %lo(intr_thread_exit_actv_bit_set), %o0
8630:
864#endif /* DEBUG */
865	call	_intr_set_spl			! set CPU's base SPL level
866	ld	[%o2 + CPU_INTR_ACTV], %o5	! delay - load active mask
867	!
868	! set the thread state to free so kernel debuggers don't see it
869	!
870	mov	TS_FREE, %o4
871	st	%o4, [THREAD_REG + T_STATE]
872	!
873	! Put thread on either the interrupt pool or the free pool and
874	! call swtch() to resume another thread.
875	!
876	ldn	[%o2 + CPU_INTR_THREAD], %o5	! get list pointer
877	stn	%o5, [THREAD_REG + T_LINK]
878	call	swtch				! switch to best thread
879	stn	THREAD_REG, [%o2 + CPU_INTR_THREAD] ! delay - put thread on list
880	ba,a,pt	%xcc, .				! swtch() shouldn't return
881	SET_SIZE(intr_thread_exit)
882
883	.global ftrace_intr_thread_format_str
884ftrace_intr_thread_format_str:
885	.asciz	"intr_thread(): regs=0x%lx, int=0x%lx, pil=0x%lx"
886#ifdef DEBUG
887intr_thread_actv_bit_set:
888	.asciz	"intr_thread():	cpu_intr_actv bit already set for PIL"
889intr_thread_actv_bit_not_set:
890	.asciz	"intr_thread():	cpu_intr_actv bit not set for PIL"
891intr_thread_exit_actv_bit_set:
892	.asciz	"intr_thread_exit(): cpu_intr_actv bit erroneously set for PIL"
893intr_thread_t_intr_start_zero:
894	.asciz	"intr_thread():	t_intr_start zero upon handler return"
895#endif /* DEBUG */
896#endif	/* lint */
897
898#if defined(lint)
899
900/*
901 * Handle an interrupt in the current thread
902 *	Entry:
903 *		%o0       = pointer to regs structure
904 *		%o1       = pointer to current intr_vec_t (iv) to be processed
905 *		%o2       = pil
906 *		%sp       = on current thread's kernel stack
907 *		%o7       = return linkage to trap code
908 *		%g7       = current thread
909 *		%pstate   = normal globals, interrupts enabled,
910 *		            privileged, fp disabled
911 *		%pil      = PIL_MAX
912 *
913 *	Register Usage
914 *		%l0       = return linkage
915 *		%l1       = old stack
916 *		%l2 - %l3 = scratch
917 *		%l4 - %l7 = reserved for sys_trap
918 *		%o3       = cpu
919 *		%o0       = scratch
920 *		%o4 - %o5 = scratch
921 */
922/* ARGSUSED */
923void
924current_thread(struct regs *regs, uint64_t iv_p, uint_t pil)
925{}
926
927#else	/* lint */
928
929	ENTRY_NP(current_thread)
930
931	mov	%o7, %l0
932	ldn	[THREAD_REG + T_CPU], %o3
933
934	ldn	[THREAD_REG + T_ONFAULT], %l2
935	brz,pt	%l2, no_onfault		! branch if no onfault label set
936	nop
937	stn	%g0, [THREAD_REG + T_ONFAULT]! clear onfault label
938	ldn	[THREAD_REG + T_LOFAULT], %l3
939	stn	%g0, [THREAD_REG + T_LOFAULT]! clear lofault data
940
941	sub	%o2, LOCK_LEVEL + 1, %o5
942	sll	%o5, CPTRSHIFT, %o5
943	add	%o5, CPU_OFD, %o4	! %o4 has on_fault data offset
944	stn	%l2, [%o3 + %o4]	! save onfault label for pil %o2
945	add	%o5, CPU_LFD, %o4	! %o4 has lofault data offset
946	stn	%l3, [%o3 + %o4]	! save lofault data for pil %o2
947
948no_onfault:
949	ldn	[THREAD_REG + T_ONTRAP], %l2
950	brz,pt	%l2, 6f			! branch if no on_trap protection
951	nop
952	stn	%g0, [THREAD_REG + T_ONTRAP]! clear on_trap protection
953	sub	%o2, LOCK_LEVEL + 1, %o5
954	sll	%o5, CPTRSHIFT, %o5
955	add	%o5, CPU_OTD, %o4	! %o4 has on_trap data offset
956	stn	%l2, [%o3 + %o4]	! save on_trap label for pil %o2
957
958	!
959	! Set bit for this level in CPU's active interrupt bitmask.
960	!
9616:	ld	[%o3 + CPU_INTR_ACTV], %o5	! o5 has cpu_intr_actv b4 chng
962	mov	1, %o4
963	sll	%o4, %o2, %o4			! construct mask for level
964#ifdef DEBUG
965	!
966	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
967	!
968	andcc	%o5, %o4, %g0
969	bz,pt	%xcc, 0f
970	nop
971	! Do not call panic if a panic is already in progress.
972	sethi	%hi(panic_quiesce), %l2
973	ld	[%l2 + %lo(panic_quiesce)], %l2
974	brnz,pn	%l2, 0f
975	nop
976	sethi	%hi(current_thread_actv_bit_set), %o0
977	call	panic
978	or	%o0, %lo(current_thread_actv_bit_set), %o0
9790:
980#endif /* DEBUG */
981	or	%o5, %o4, %o4
982	!
983	! See if we are interrupting another high-level interrupt.
984	!
985	srl	%o5, LOCK_LEVEL + 1, %o5	! only look at high-level bits
986	brz,pt	%o5, 1f
987	st	%o4, [%o3 + CPU_INTR_ACTV]	! delay - store active mask
988	!
989	! We have interrupted another high-level interrupt. Find its PIL,
990	! compute the interval it ran for, and update its cumulative counter.
991	!
992	! Register usage:
993
994	! o2 = PIL of this interrupt
995	! o5 = high PIL bits of INTR_ACTV (not including this PIL)
996	! l1 = bitmask used to find other active high-level PIL
997	! o4 = index of bit set in l1
998	! Use cpu_intr_actv to find the cpu_pil_high_start[] offset for the
999	! interrupted high-level interrupt.
1000	! Create mask for cpu_intr_actv. Begin by looking for bits set
1001	! at one level below the current PIL. Since %o5 contains the active
1002	! mask already shifted right by (LOCK_LEVEL + 1), we start by looking
1003	! at bit (current_pil - (LOCK_LEVEL + 2)).
1004	sub	%o2, LOCK_LEVEL + 2, %o4
1005	mov	1, %l1
1006	sll	%l1, %o4, %l1
10072:
1008#ifdef DEBUG
1009	! ASSERT(%l1 != 0) (we didn't shift the bit off the right edge)
1010	brnz,pt	%l1, 9f
1011	nop
1012
1013	! Don't panic if a panic is already in progress.
1014	sethi	%hi(panic_quiesce), %l3
1015	ld	[%l3 + %lo(panic_quiesce)], %l3
1016	brnz,pn	%l3, 9f
1017	nop
1018	sethi	%hi(current_thread_nested_PIL_not_found), %o0
1019	call	panic
1020	or	%o0, %lo(current_thread_nested_PIL_not_found), %o0
10219:
1022#endif /* DEBUG */
1023	andcc	%l1, %o5, %g0		! test mask against high-level bits of
1024	bnz	%xcc, 3f		! cpu_intr_actv
1025	nop
1026	srl	%l1, 1, %l1		! No match. Try next lower PIL.
1027	ba,pt	%xcc, 2b
1028	sub	%o4, 1, %o4		! delay - decrement PIL
10293:
1030	sll	%o4, 3, %o4			! index to byte offset
1031	add	%o4, CPU_MCPU, %l1	! CPU_PIL_HIGH_START is too large
1032	add	%l1, MCPU_PIL_HIGH_START, %l1
1033	ldx	[%o3 + %l1], %l3		! load starting timestamp
1034#ifdef DEBUG
1035	brnz,pt	%l3, 9f
1036	nop
1037	! Don't panic if a panic is already in progress.
1038	sethi	%hi(panic_quiesce), %l1
1039	ld	[%l1 + %lo(panic_quiesce)], %l1
1040	brnz,pn	%l1, 9f
1041	nop
1042	srl	%o4, 3, %o1			! Find interrupted PIL for panic
1043	add	%o1, LOCK_LEVEL + 1, %o1
1044	sethi	%hi(current_thread_nested_pil_zero), %o0
1045	call	panic
1046	or	%o0, %lo(current_thread_nested_pil_zero), %o0
10479:
1048#endif /* DEBUG */
1049	RD_TICK_NO_SUSPEND_CHECK(%l1, %l2)
1050	sub	%l1, %l3, %l3			! interval in %l3
1051	!
1052	! Check for Energy Star mode
1053	!
1054	lduh	[%o3 + CPU_DIVISOR], %l1	! %l1 = clock divisor
1055	cmp	%l1, 1
1056	bg,a,pn	%xcc, 2f
1057	mulx	%l3, %l1, %l3	! multiply interval by clock divisor iff > 1
10582:
1059	!
1060	! We need to find the CPU offset of the cumulative counter. We start
1061	! with %o4, which has (PIL - (LOCK_LEVEL + 1)) * 8. We need PIL * 16,
1062	! so we shift left 1, then add (LOCK_LEVEL + 1) * 16, which is
1063	! CPU_INTRSTAT_LOW_PIL_OFFSET.
1064	!
1065	sll	%o4, 1, %o4
1066	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT const too large
1067	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1068	add	%o4, CPU_INTRSTAT_LOW_PIL_OFFSET, %o4
1069	ldx	[%o3 + %o4], %l1		! old counter in l1
1070	add	%l1, %l3, %l1			! new counter in l1
1071	stx	%l1, [%o3 + %o4]		! store new counter
1072
1073	! Also update intracct[]
1074	lduh	[%o3 + CPU_MSTATE], %o4
1075	sllx	%o4, 3, %o4
1076	add	%o4, CPU_INTRACCT, %o4
1077	ldx	[%o3 + %o4], %l1
1078	add	%l1, %l3, %l1
1079	! Another high-level interrupt is active below this one, so
1080	! there is no need to check for an interrupt thread. That will be
1081	! done by the lowest priority high-level interrupt active.
1082	ba,pt	%xcc, 5f
1083	stx	%l1, [%o3 + %o4]		! delay - store new counter
10841:
1085	! If we haven't interrupted another high-level interrupt, we may be
1086	! interrupting a low level interrupt thread. If so, compute its interval
1087	! and update its cumulative counter.
1088	lduh	[THREAD_REG + T_FLAGS], %o4
1089	andcc	%o4, T_INTR_THREAD, %g0
1090	bz,pt	%xcc, 4f
1091	nop
1092
1093	! We have interrupted an interrupt thread. Take timestamp, compute
1094	! interval, update cumulative counter.
1095
1096	! Check t_intr_start. If it is zero, either intr_thread() or
1097	! current_thread() (at a lower PIL, of course) already did
1098	! the accounting for the underlying interrupt thread.
1099	ldx	[THREAD_REG + T_INTR_START], %o5
1100	brz,pn	%o5, 4f
1101	nop
1102
1103	stx	%g0, [THREAD_REG + T_INTR_START]
1104	RD_TICK_NO_SUSPEND_CHECK(%o4, %l2)
1105	sub	%o4, %o5, %o5			! o5 has the interval
1106
1107	! Check for Energy Star mode
1108	lduh	[%o3 + CPU_DIVISOR], %o4	! %o4 = clock divisor
1109	cmp	%o4, 1
1110	bg,a,pn	%xcc, 2f
1111	mulx	%o5, %o4, %o5	! multiply interval by clock divisor iff > 1
11122:
1113	ldub	[THREAD_REG + T_PIL], %o4
1114	sllx	%o4, 4, %o4			! PIL index to byte offset
1115	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT const too large
1116	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1117	ldx	[%o3 + %o4], %l2		! old counter in l2
1118	add	%l2, %o5, %l2			! new counter in l2
1119	stx	%l2, [%o3 + %o4]		! store new counter
1120
1121	! Also update intracct[]
1122	lduh	[%o3 + CPU_MSTATE], %o4
1123	sllx	%o4, 3, %o4
1124	add	%o4, CPU_INTRACCT, %o4
1125	ldx	[%o3 + %o4], %l2
1126	add	%l2, %o5, %l2
1127	stx	%l2, [%o3 + %o4]
11284:
1129	!
1130	! Handle high-level interrupts on separate interrupt stack.
1131	! No other high-level interrupts are active, so switch to int stack.
1132	!
1133	mov	%sp, %l1
1134	ldn	[%o3 + CPU_INTR_STACK], %l3
1135	sub	%l3, STACK_BIAS, %sp
1136
11375:
1138#ifdef DEBUG
1139	!
1140	! ASSERT(%o2 > LOCK_LEVEL)
1141	!
1142	cmp	%o2, LOCK_LEVEL
1143	bg,pt	%xcc, 3f
1144	nop
1145	mov	CE_PANIC, %o0
1146	sethi	%hi(current_thread_wrong_pil), %o1
1147	call	cmn_err				! %o2 has the %pil already
1148	or	%o1, %lo(current_thread_wrong_pil), %o1
1149#endif
11503:
1151	! Store starting timestamp for this PIL in CPU structure at
1152	! cpu.cpu_m.pil_high_start[PIL - (LOCK_LEVEL + 1)]
1153        sub     %o2, LOCK_LEVEL + 1, %o4	! convert PIL to array index
1154	sllx    %o4, 3, %o4			! index to byte offset
1155	add	%o4, CPU_MCPU, %o4	! CPU_PIL_HIGH_START is too large
1156	add	%o4, MCPU_PIL_HIGH_START, %o4
1157	RD_TICK_NO_SUSPEND_CHECK(%o5, %l2)
1158        stx     %o5, [%o3 + %o4]
1159
1160	wrpr	%g0, %o2, %pil			! enable interrupts
1161
1162	!
1163	! call the handler
1164	!
1165	SERVE_INTR_PRE(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
11661:
1167	SERVE_INTR(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
1168
1169	brz,a,pt %o2, 0f			! if %o2, more intrs await
1170	rdpr	%pil, %o2			! delay annulled
1171	SERVE_INTR_NEXT(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
1172	ba	1b
1173	nop
11740:
1175	wrpr	%g0, PIL_MAX, %pil		! disable interrupts (1-15)
1176
1177	cmp	%o2, PIL_15
1178	bne,pt	%xcc, 3f
1179	nop
1180
1181	sethi	%hi(cpc_level15_inum), %o1
1182	ldx	[%o1 + %lo(cpc_level15_inum)], %o1 ! arg for intr_enqueue_req
1183	brz	%o1, 3f
1184	nop
1185
1186	rdpr 	%pstate, %g5
1187	andn	%g5, PSTATE_IE, %g1
1188	wrpr	%g0, %g1, %pstate		! Disable vec interrupts
1189
1190	call	intr_enqueue_req		! preserves %g5
1191	mov	PIL_15, %o0
1192
1193	! clear perfcntr overflow
1194	mov	1, %o0
1195	sllx	%o0, PIL_15, %o0
1196	wr	%o0, CLEAR_SOFTINT
1197
1198	wrpr	%g0, %g5, %pstate		! Enable vec interrupts
1199
12003:
1201	cmp	%o2, PIL_14
1202	be	tick_rtt			!  cpu-specific tick processing
1203	nop
1204	.global	current_thread_complete
1205current_thread_complete:
1206	!
1207	! Register usage:
1208	!
1209	! %l1 = stack pointer
1210	! %l2 = CPU_INTR_ACTV >> (LOCK_LEVEL + 1)
1211	! %o2 = PIL
1212	! %o3 = CPU pointer
1213	! %o4, %o5, %l3, %l4, %l5 = scratch
1214	!
1215	ldn	[THREAD_REG + T_CPU], %o3
1216	!
1217	! Clear bit for this level in CPU's interrupt active bitmask.
1218	!
1219	ld	[%o3 + CPU_INTR_ACTV], %l2
1220	mov	1, %o5
1221	sll	%o5, %o2, %o5
1222#ifdef DEBUG
1223	!
1224	! ASSERT(CPU->cpu_intr_actv & (1 << PIL))
1225	!
1226	andcc	%l2, %o5, %g0
1227	bnz,pt	%xcc, 0f
1228	nop
1229	! Do not call panic if a panic is already in progress.
1230	sethi	%hi(panic_quiesce), %l2
1231	ld	[%l2 + %lo(panic_quiesce)], %l2
1232	brnz,pn	%l2, 0f
1233	nop
1234	sethi	%hi(current_thread_actv_bit_not_set), %o0
1235	call	panic
1236	or	%o0, %lo(current_thread_actv_bit_not_set), %o0
12370:
1238#endif /* DEBUG */
1239	andn	%l2, %o5, %l2
1240	st	%l2, [%o3 + CPU_INTR_ACTV]
1241
1242	! Take timestamp, compute interval, update cumulative counter.
1243        sub     %o2, LOCK_LEVEL + 1, %o4	! PIL to array index
1244	sllx    %o4, 3, %o4			! index to byte offset
1245	add	%o4, CPU_MCPU, %o4	! CPU_PIL_HIGH_START is too large
1246	add	%o4, MCPU_PIL_HIGH_START, %o4
1247	RD_TICK_NO_SUSPEND_CHECK(%o5, %o0)
1248	ldx     [%o3 + %o4], %o0
1249#ifdef DEBUG
1250	! ASSERT(cpu.cpu_m.pil_high_start[pil - (LOCK_LEVEL + 1)] != 0)
1251	brnz,pt	%o0, 9f
1252	nop
1253	! Don't panic if a panic is already in progress.
1254	sethi	%hi(panic_quiesce), %l2
1255	ld	[%l2 + %lo(panic_quiesce)], %l2
1256	brnz,pn	%l2, 9f
1257	nop
1258	sethi	%hi(current_thread_timestamp_zero), %o0
1259	call	panic
1260	or	%o0, %lo(current_thread_timestamp_zero), %o0
12619:
1262#endif /* DEBUG */
1263	stx	%g0, [%o3 + %o4]
1264	sub	%o5, %o0, %o5			! interval in o5
1265
1266	! Check for Energy Star mode
1267	lduh	[%o3 + CPU_DIVISOR], %o4	! %o4 = clock divisor
1268	cmp	%o4, 1
1269	bg,a,pn	%xcc, 2f
1270	mulx	%o5, %o4, %o5	! multiply interval by clock divisor iff > 1
12712:
1272	sllx	%o2, 4, %o4			! PIL index to byte offset
1273	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT too large
1274	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1275	ldx	[%o3 + %o4], %o0		! old counter in o0
1276	add	%o0, %o5, %o0			! new counter in o0
1277	stx	%o0, [%o3 + %o4]		! store new counter
1278
1279	! Also update intracct[]
1280	lduh	[%o3 + CPU_MSTATE], %o4
1281	sllx	%o4, 3, %o4
1282	add	%o4, CPU_INTRACCT, %o4
1283	ldx	[%o3 + %o4], %o0
1284	add	%o0, %o5, %o0
1285	stx	%o0, [%o3 + %o4]
1286
1287	!
1288	! get back on current thread's stack
1289	!
1290	srl	%l2, LOCK_LEVEL + 1, %l2
1291	tst	%l2				! any more high-level ints?
1292	movz	%xcc, %l1, %sp
1293	!
1294	! Current register usage:
1295	! o2 = PIL
1296	! o3 = CPU pointer
1297	! l0 = return address
1298	! l2 = intr_actv shifted right
1299	!
1300	bz,pt	%xcc, 3f			! if l2 was zero, no more ints
1301	nop
1302	!
1303	! We found another high-level interrupt active below the one that just
1304	! returned. Store a starting timestamp for it in the CPU structure.
1305	!
1306	! Use cpu_intr_actv to find the cpu_pil_high_start[] offset for the
1307	! interrupted high-level interrupt.
1308	! Create mask for cpu_intr_actv. Begin by looking for bits set
1309	! at one level below the current PIL. Since %l2 contains the active
1310	! mask already shifted right by (LOCK_LEVEL + 1), we start by looking
1311	! at bit (current_pil - (LOCK_LEVEL + 2)).
1312	! %l1 = mask, %o5 = index of bit set in mask
1313	!
1314	mov	1, %l1
1315	sub	%o2, LOCK_LEVEL + 2, %o5
1316	sll	%l1, %o5, %l1			! l1 = mask for level
13171:
1318#ifdef DEBUG
1319	! ASSERT(%l1 != 0) (we didn't shift the bit off the right edge)
1320	brnz,pt	%l1, 9f
1321	nop
1322	sethi	%hi(current_thread_nested_PIL_not_found), %o0
1323	call	panic
1324	or	%o0, %lo(current_thread_nested_PIL_not_found), %o0
13259:
1326#endif /* DEBUG */
1327	andcc	%l1, %l2, %g0		! test mask against high-level bits of
1328	bnz	%xcc, 2f		! cpu_intr_actv
1329	nop
1330	srl	%l1, 1, %l1		! No match. Try next lower PIL.
1331	ba,pt	%xcc, 1b
1332	sub	%o5, 1, %o5		! delay - decrement PIL
13332:
1334	sll	%o5, 3, %o5		! convert array index to byte offset
1335	add	%o5, CPU_MCPU, %o5	! CPU_PIL_HIGH_START is too large
1336	add	%o5, MCPU_PIL_HIGH_START, %o5
1337	RD_TICK_NO_SUSPEND_CHECK(%o4, %l2)
1338	! Another high-level interrupt is active below this one, so
1339	! there is no need to check for an interrupt thread. That will be
1340	! done by the lowest priority high-level interrupt active.
1341	ba,pt	%xcc, 7f
1342	stx	%o4, [%o3 + %o5]	! delay - store timestamp
13433:
1344	! If we haven't interrupted another high-level interrupt, we may have
1345	! interrupted a low level interrupt thread. If so, store a starting
1346	! timestamp in its thread structure.
1347	lduh	[THREAD_REG + T_FLAGS], %o4
1348	andcc	%o4, T_INTR_THREAD, %g0
1349	bz,pt	%xcc, 7f
1350	nop
1351
1352	RD_TICK_NO_SUSPEND_CHECK(%o4, %l2)
1353	stx	%o4, [THREAD_REG + T_INTR_START]
1354
13557:
1356	sub	%o2, LOCK_LEVEL + 1, %o4
1357	sll	%o4, CPTRSHIFT, %o5
1358
1359	! Check on_trap saved area and restore as needed
1360	add	%o5, CPU_OTD, %o4
1361	ldn	[%o3 + %o4], %l2
1362	brz,pt %l2, no_ontrp_restore
1363	nop
1364	stn	%l2, [THREAD_REG + T_ONTRAP] ! restore
1365	stn	%g0, [%o3 + %o4]	! clear
1366
1367no_ontrp_restore:
1368	! Check on_fault saved area and restore as needed
1369	add	%o5, CPU_OFD, %o4
1370	ldn	[%o3 + %o4], %l2
1371	brz,pt %l2, 8f
1372	nop
1373	stn	%l2, [THREAD_REG + T_ONFAULT] ! restore
1374	stn	%g0, [%o3 + %o4]	! clear
1375	add	%o5, CPU_LFD, %o4
1376	ldn	[%o3 + %o4], %l2
1377	stn	%l2, [THREAD_REG + T_LOFAULT] ! restore
1378	stn	%g0, [%o3 + %o4]	! clear
1379
1380
13818:
1382	! Enable interrupts and return
1383	jmp	%l0 + 8
1384	wrpr	%g0, %o2, %pil			! enable interrupts
1385	SET_SIZE(current_thread)
1386
1387
1388#ifdef DEBUG
1389current_thread_wrong_pil:
1390	.asciz	"current_thread: unexpected pil level: %d"
1391current_thread_actv_bit_set:
1392	.asciz	"current_thread(): cpu_intr_actv bit already set for PIL"
1393current_thread_actv_bit_not_set:
1394	.asciz	"current_thread(): cpu_intr_actv bit not set for PIL"
1395current_thread_nested_pil_zero:
1396	.asciz	"current_thread(): timestamp zero for nested PIL %d"
1397current_thread_timestamp_zero:
1398	.asciz	"current_thread(): timestamp zero upon handler return"
1399current_thread_nested_PIL_not_found:
1400	.asciz	"current_thread: couldn't find nested high-level PIL"
1401#endif /* DEBUG */
1402#endif /* lint */
1403
1404/*
1405 * Return a thread's interrupt level.
1406 * Since this isn't saved anywhere but in %l4 on interrupt entry, we
1407 * must dig it out of the save area.
1408 *
1409 * Caller 'swears' that this really is an interrupt thread.
1410 *
1411 * int
1412 * intr_level(t)
1413 *	kthread_id_t	t;
1414 */
1415
1416#if defined(lint)
1417
1418/* ARGSUSED */
1419int
1420intr_level(kthread_id_t t)
1421{ return (0); }
1422
1423#else	/* lint */
1424
1425	ENTRY_NP(intr_level)
1426	retl
1427	ldub	[%o0 + T_PIL], %o0		! return saved pil
1428	SET_SIZE(intr_level)
1429
1430#endif	/* lint */
1431
1432#if defined(lint)
1433
1434/* ARGSUSED */
1435int
1436disable_pil_intr()
1437{ return (0); }
1438
1439#else	/* lint */
1440
1441	ENTRY_NP(disable_pil_intr)
1442	rdpr	%pil, %o0
1443	retl
1444	wrpr	%g0, PIL_MAX, %pil		! disable interrupts (1-15)
1445	SET_SIZE(disable_pil_intr)
1446
1447#endif	/* lint */
1448
1449#if defined(lint)
1450
1451/* ARGSUSED */
1452void
1453enable_pil_intr(int pil_save)
1454{}
1455
1456#else	/* lint */
1457
1458	ENTRY_NP(enable_pil_intr)
1459	retl
1460	wrpr	%o0, %pil
1461	SET_SIZE(enable_pil_intr)
1462
1463#endif	/* lint */
1464
1465#if defined(lint)
1466
1467/* ARGSUSED */
1468uint_t
1469disable_vec_intr(void)
1470{ return (0); }
1471
1472#else	/* lint */
1473
1474	ENTRY_NP(disable_vec_intr)
1475	rdpr	%pstate, %o0
1476	andn	%o0, PSTATE_IE, %g1
1477	retl
1478	wrpr	%g0, %g1, %pstate		! disable interrupt
1479	SET_SIZE(disable_vec_intr)
1480
1481#endif	/* lint */
1482
1483#if defined(lint)
1484
1485/* ARGSUSED */
1486void
1487enable_vec_intr(uint_t pstate_save)
1488{}
1489
1490#else	/* lint */
1491
1492	ENTRY_NP(enable_vec_intr)
1493	retl
1494	wrpr	%g0, %o0, %pstate
1495	SET_SIZE(enable_vec_intr)
1496
1497#endif	/* lint */
1498
1499#if defined(lint)
1500
1501void
1502cbe_level14(void)
1503{}
1504
1505#else   /* lint */
1506
1507	ENTRY_NP(cbe_level14)
1508	save    %sp, -SA(MINFRAME), %sp ! get a new window
1509	!
1510	! Make sure that this is from TICK_COMPARE; if not just return
1511	!
1512	rd	SOFTINT, %l1
1513	set	(TICK_INT_MASK | STICK_INT_MASK), %o2
1514	andcc	%l1, %o2, %g0
1515	bz,pn	%icc, 2f
1516	nop
1517
1518	CPU_ADDR(%o1, %o2)
1519	call	cyclic_fire
1520	mov	%o1, %o0
15212:
1522	ret
1523	restore	%g0, 1, %o0
1524	SET_SIZE(cbe_level14)
1525
1526#endif  /* lint */
1527
1528
1529#if defined(lint)
1530
1531/* ARGSUSED */
1532void
1533kdi_setsoftint(uint64_t iv_p)
1534{}
1535
1536#else	/* lint */
1537
1538	ENTRY_NP(kdi_setsoftint)
1539	save	%sp, -SA(MINFRAME), %sp	! get a new window
1540	rdpr	%pstate, %l5
1541	andn	%l5, PSTATE_IE, %l1
1542	wrpr	%l1, %pstate		! disable interrupt
1543	!
1544	! We have a pointer to an interrupt vector data structure.
1545	! Put the request on the cpu's softint priority list and
1546	! set %set_softint.
1547	!
1548	! Register usage
1549	! 	%i0 - pointer to intr_vec_t (iv)
1550	!	%l2 - requested pil
1551	!	%l4 - cpu
1552	!	%l5 - pstate
1553	!	%l1, %l3, %l6 - temps
1554	!
1555	! check if a softint is pending for this softint,
1556	! if one is pending, don't bother queuing another.
1557	!
1558	lduh	[%i0 + IV_FLAGS], %l1	! %l1 = iv->iv_flags
1559	and	%l1, IV_SOFTINT_PEND, %l6 ! %l6 = iv->iv_flags & IV_SOFTINT_PEND
1560	brnz,pn	%l6, 4f			! branch if softint is already pending
1561	or	%l1, IV_SOFTINT_PEND, %l2
1562	sth	%l2, [%i0 + IV_FLAGS]	! Set IV_SOFTINT_PEND flag
1563
1564	CPU_ADDR(%l4, %l2)		! %l4 = cpu
1565	lduh	[%i0 + IV_PIL], %l2	! %l2 = iv->iv_pil
1566
1567	!
1568	! Insert intr_vec_t (iv) to appropriate cpu's softint priority list
1569	!
1570	sll	%l2, CPTRSHIFT, %l0	! %l0 = offset to pil entry
1571	add	%l4, INTR_TAIL, %l6	! %l6 = &cpu->m_cpu.intr_tail
1572	ldn	[%l6 + %l0], %l1	! %l1 = cpu->m_cpu.intr_tail[pil]
1573					!       current tail (ct)
1574	brz,pt	%l1, 2f			! branch if current tail is NULL
1575	stn	%i0, [%l6 + %l0]	! make intr_vec_t (iv) as new tail
1576	!
1577	! there's pending intr_vec_t already
1578	!
1579	lduh	[%l1 + IV_FLAGS], %l6	! %l6 = ct->iv_flags
1580	and	%l6, IV_SOFTINT_MT, %l6	! %l6 = ct->iv_flags & IV_SOFTINT_MT
1581	brz,pt	%l6, 1f			! check for Multi target softint flag
1582	add	%l1, IV_PIL_NEXT, %l3	! %l3 = &ct->iv_pil_next
1583	ld	[%l4 + CPU_ID], %l6	! for multi target softint, use cpuid
1584	sll	%l6, CPTRSHIFT, %l6	! calculate offset address from cpuid
1585	add	%l3, %l6, %l3		! %l3 =  &ct->iv_xpil_next[cpuid]
15861:
1587	!
1588	! update old tail
1589	!
1590	ba,pt	%xcc, 3f
1591	stn	%i0, [%l3]		! [%l3] = iv, set pil_next field
15922:
1593	!
1594	! no pending intr_vec_t; make intr_vec_t as new head
1595	!
1596	add	%l4, INTR_HEAD, %l6	! %l6 = &cpu->m_cpu.intr_head[pil]
1597	stn	%i0, [%l6 + %l0]	! cpu->m_cpu.intr_head[pil] = iv
15983:
1599	!
1600	! Write %set_softint with (1<<pil) to cause a "pil" level trap
1601	!
1602	mov	1, %l1			! %l1 = 1
1603	sll	%l1, %l2, %l1		! %l1 = 1 << pil
1604	wr	%l1, SET_SOFTINT	! trigger required pil softint
16054:
1606	wrpr	%g0, %l5, %pstate	! %pstate = saved %pstate (in %l5)
1607	ret
1608	restore
1609	SET_SIZE(kdi_setsoftint)
1610
1611#endif	/* lint */
1612
1613#if defined(lint)
1614
1615/*ARGSUSED*/
1616void
1617setsoftint_tl1(uint64_t iv_p, uint64_t dummy)
1618{}
1619
1620#else	/* lint */
1621
1622	!
1623	! Register usage
1624	!	Arguments:
1625	! 	%g1 - Pointer to intr_vec_t (iv)
1626	!
1627	!	Internal:
1628	!	%g2 - pil
1629	!	%g4 - cpu
1630	!	%g3,%g5-g7 - temps
1631	!
1632	ENTRY_NP(setsoftint_tl1)
1633	!
1634	! We have a pointer to an interrupt vector data structure.
1635	! Put the request on the cpu's softint priority list and
1636	! set %set_softint.
1637	!
1638	CPU_ADDR(%g4, %g2)		! %g4 = cpu
1639	lduh	[%g1 + IV_PIL], %g2	! %g2 = iv->iv_pil
1640
1641	!
1642	! Insert intr_vec_t (iv) to appropriate cpu's softint priority list
1643	!
1644	sll	%g2, CPTRSHIFT, %g7	! %g7 = offset to pil entry
1645	add	%g4, INTR_TAIL, %g6	! %g6 = &cpu->m_cpu.intr_tail
1646	ldn	[%g6 + %g7], %g5	! %g5 = cpu->m_cpu.intr_tail[pil]
1647					!       current tail (ct)
1648	brz,pt	%g5, 1f			! branch if current tail is NULL
1649	stn	%g1, [%g6 + %g7]	! make intr_rec_t (iv) as new tail
1650	!
1651	! there's pending intr_vec_t already
1652	!
1653	lduh	[%g5 + IV_FLAGS], %g6	! %g6 = ct->iv_flags
1654	and	%g6, IV_SOFTINT_MT, %g6	! %g6 = ct->iv_flags & IV_SOFTINT_MT
1655	brz,pt	%g6, 0f			! check for Multi target softint flag
1656	add	%g5, IV_PIL_NEXT, %g3	! %g3 = &ct->iv_pil_next
1657	ld	[%g4 + CPU_ID], %g6	! for multi target softint, use cpuid
1658	sll	%g6, CPTRSHIFT, %g6	! calculate offset address from cpuid
1659	add	%g3, %g6, %g3		! %g3 = &ct->iv_xpil_next[cpuid]
16600:
1661	!
1662	! update old tail
1663	!
1664	ba,pt	%xcc, 2f
1665	stn	%g1, [%g3]		! [%g3] = iv, set pil_next field
16661:
1667	!
1668	! no pending intr_vec_t; make intr_vec_t as new head
1669	!
1670	add	%g4, INTR_HEAD, %g6	! %g6 = &cpu->m_cpu.intr_head[pil]
1671	stn	%g1, [%g6 + %g7]	! cpu->m_cpu.intr_head[pil] = iv
16722:
1673#ifdef TRAPTRACE
1674	TRACE_PTR(%g5, %g6)
1675	GET_TRACE_TICK(%g6, %g3)
1676	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi	! trap_tick = %tick
1677	TRACE_SAVE_TL_GL_REGS(%g5, %g6)
1678	rdpr	%tt, %g6
1679	stha	%g6, [%g5 + TRAP_ENT_TT]%asi	! trap_type = %tt
1680	rdpr	%tpc, %g6
1681	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi	! trap_pc = %tpc
1682	rdpr	%tstate, %g6
1683	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi ! trap_tstate = %tstate
1684	stna	%sp, [%g5 + TRAP_ENT_SP]%asi	! trap_sp = %sp
1685	stna	%g1, [%g5 + TRAP_ENT_TR]%asi	! trap_tr = iv
1686	ldn	[%g1 + IV_PIL_NEXT], %g6	!
1687	stna	%g6, [%g5 + TRAP_ENT_F1]%asi	! trap_f1 = iv->iv_pil_next
1688	add	%g4, INTR_HEAD, %g6
1689	ldn	[%g6 + %g7], %g6		! %g6=cpu->m_cpu.intr_head[pil]
1690	stna	%g6, [%g5 + TRAP_ENT_F2]%asi	! trap_f2 = intr_head[pil]
1691	add	%g4, INTR_TAIL, %g6
1692	ldn	[%g6 + %g7], %g6		! %g6=cpu->m_cpu.intr_tail[pil]
1693	stna	%g6, [%g5 + TRAP_ENT_F3]%asi	! trap_f3 = intr_tail[pil]
1694	stna	%g2, [%g5 + TRAP_ENT_F4]%asi	! trap_f4 = pil
1695	TRACE_NEXT(%g5, %g6, %g3)
1696#endif /* TRAPTRACE */
1697	!
1698	! Write %set_softint with (1<<pil) to cause a "pil" level trap
1699	!
1700	mov	1, %g5			! %g5 = 1
1701	sll	%g5, %g2, %g5		! %g5 = 1 << pil
1702	wr	%g5, SET_SOFTINT	! trigger required pil softint
1703	retry
1704	SET_SIZE(setsoftint_tl1)
1705
1706#endif	/* lint */
1707
1708#if defined(lint)
1709
1710/*ARGSUSED*/
1711void
1712setvecint_tl1(uint64_t inum, uint64_t dummy)
1713{}
1714
1715#else	/* lint */
1716
1717	!
1718	! Register usage
1719	!	Arguments:
1720	! 	%g1 - inumber
1721	!
1722	!	Internal:
1723	! 	%g1 - softint pil mask
1724	!	%g2 - pil of intr_vec_t
1725	!	%g3 - pointer to current intr_vec_t (iv)
1726	!	%g4 - cpu
1727	!	%g5, %g6,%g7 - temps
1728	!
1729	ENTRY_NP(setvecint_tl1)
1730	!
1731	! Verify the inumber received (should be inum < MAXIVNUM).
1732	!
1733	set	MAXIVNUM, %g2
1734	cmp	%g1, %g2
1735	bgeu,pn	%xcc, .no_ivintr
1736	clr	%g2			! expected in .no_ivintr
1737
1738	!
1739	! Fetch data from intr_vec_table according to the inum.
1740	!
1741	! We have an interrupt number. Fetch the interrupt vector requests
1742	! from the interrupt vector table for a given interrupt number and
1743	! insert them into cpu's softint priority lists and set %set_softint.
1744	!
1745	set	intr_vec_table, %g5	! %g5 = intr_vec_table
1746	sll	%g1, CPTRSHIFT, %g6	! %g6 = offset to inum entry in table
1747	add	%g5, %g6, %g5		! %g5 = &intr_vec_table[inum]
1748	ldn	[%g5], %g3		! %g3 = pointer to first entry of
1749					!       intr_vec_t list
1750
1751	! Verify the first intr_vec_t pointer for a given inum and it should
1752	! not be NULL. This used to be guarded by DEBUG but broken drivers can
1753	! cause spurious tick interrupts when the softint register is programmed
1754	! with 1 << 0 at the end of this routine. Now we always check for a
1755	! valid intr_vec_t pointer.
1756	brz,pn	%g3, .no_ivintr
1757	nop
1758
1759	!
1760	! Traverse the intr_vec_t link list, put each item on to corresponding
1761	! CPU softint priority queue, and compose the final softint pil mask.
1762	!
1763	! At this point:
1764	!	%g3 = intr_vec_table[inum]
1765	!
1766	CPU_ADDR(%g4, %g2)		! %g4 = cpu
1767	mov	%g0, %g1		! %g1 = 0, initialize pil mask to 0
17680:
1769	!
1770	! Insert next intr_vec_t (iv) to appropriate cpu's softint priority list
1771	!
1772	! At this point:
1773	!	%g1 = softint pil mask
1774	!	%g3 = pointer to next intr_vec_t (iv)
1775	!	%g4 = cpu
1776	!
1777	lduh	[%g3 + IV_PIL], %g2	! %g2 = iv->iv_pil
1778	sll	%g2, CPTRSHIFT, %g7	! %g7 = offset to pil entry
1779	add	%g4, INTR_TAIL, %g6	! %g6 = &cpu->m_cpu.intr_tail
1780	ldn	[%g6 + %g7], %g5	! %g5 = cpu->m_cpu.intr_tail[pil]
1781					! 	current tail (ct)
1782	brz,pt	%g5, 2f			! branch if current tail is NULL
1783	stn	%g3, [%g6 + %g7]	! make intr_vec_t (iv) as new tail
1784					! cpu->m_cpu.intr_tail[pil] = iv
1785	!
1786	! there's pending intr_vec_t already
1787	!
1788	lduh	[%g5 + IV_FLAGS], %g6	! %g6 = ct->iv_flags
1789	and	%g6, IV_SOFTINT_MT, %g6	! %g6 = ct->iv_flags & IV_SOFTINT_MT
1790	brz,pt	%g6, 1f			! check for Multi target softint flag
1791	add	%g5, IV_PIL_NEXT, %g5	! %g5 = &ct->iv_pil_next
1792	ld	[%g4 + CPU_ID], %g6	! for multi target softint, use cpuid
1793	sll	%g6, CPTRSHIFT, %g6	! calculate offset address from cpuid
1794	add	%g5, %g6, %g5		! %g5 = &ct->iv_xpil_next[cpuid]
17951:
1796	!
1797	! update old tail
1798	!
1799	ba,pt	%xcc, 3f
1800	stn	%g3, [%g5]		! [%g5] = iv, set pil_next field
18012:
1802	!
1803	! no pending intr_vec_t; make intr_vec_t as new head
1804	!
1805	add	%g4, INTR_HEAD, %g6	!  %g6 = &cpu->m_cpu.intr_head[pil]
1806	stn	%g3, [%g6 + %g7]	!  cpu->m_cpu.intr_head[pil] = iv
18073:
1808#ifdef TRAPTRACE
1809	TRACE_PTR(%g5, %g6)
1810	TRACE_SAVE_TL_GL_REGS(%g5, %g6)
1811	rdpr	%tt, %g6
1812	stha	%g6, [%g5 + TRAP_ENT_TT]%asi	! trap_type = %tt`
1813	rdpr	%tpc, %g6
1814	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi	! trap_pc = %tpc
1815	rdpr	%tstate, %g6
1816	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi ! trap_tstate = %tstate
1817	stna	%sp, [%g5 + TRAP_ENT_SP]%asi	! trap_sp = %sp
1818	stna	%g3, [%g5 + TRAP_ENT_TR]%asi	! trap_tr = iv
1819	stna	%g1, [%g5 + TRAP_ENT_F1]%asi	! trap_f1 = pil mask
1820	add	%g4, INTR_HEAD, %g6
1821	ldn	[%g6 + %g7], %g6		! %g6=cpu->m_cpu.intr_head[pil]
1822	stna	%g6, [%g5 + TRAP_ENT_F2]%asi	! trap_f2 = intr_head[pil]
1823	add	%g4, INTR_TAIL, %g6
1824	ldn	[%g6 + %g7], %g6		! %g6=cpu->m_cpu.intr_tail[pil]
1825	stna	%g6, [%g5 + TRAP_ENT_F3]%asi	! trap_f3 = intr_tail[pil]
1826	stna	%g2, [%g5 + TRAP_ENT_F4]%asi	! trap_f4 = pil
1827	GET_TRACE_TICK(%g6, %g7)
1828	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi	! trap_tick = %tick
1829	TRACE_NEXT(%g5, %g6, %g7)
1830#endif /* TRAPTRACE */
1831	mov	1, %g6			! %g6 = 1
1832	sll	%g6, %g2, %g6		! %g6 = 1 << pil
1833	or	%g1, %g6, %g1		! %g1 |= (1 << pil), pil mask
1834	ldn	[%g3 + IV_VEC_NEXT], %g3 ! %g3 = pointer to next intr_vec_t (iv)
1835	brnz,pn	%g3, 0b			! iv->iv_vec_next is non NULL, goto 0b
1836	nop
1837	wr	%g1, SET_SOFTINT	! triggered one or more pil softints
1838	retry
1839
1840.no_ivintr:
1841	! no_ivintr: arguments: rp, inum (%g1), pil (%g2 == 0)
1842	mov	%g2, %g3
1843	mov	%g1, %g2
1844	set	no_ivintr, %g1
1845	ba,pt	%xcc, sys_trap
1846	mov	PIL_15, %g4
1847	SET_SIZE(setvecint_tl1)
1848
1849#endif	/* lint */
1850
1851#if defined(lint)
1852
1853/*ARGSUSED*/
1854void
1855wr_clr_softint(uint_t value)
1856{}
1857
1858#else
1859
1860	ENTRY_NP(wr_clr_softint)
1861	retl
1862	wr	%o0, CLEAR_SOFTINT
1863	SET_SIZE(wr_clr_softint)
1864
1865#endif /* lint */
1866
1867#if defined(lint)
1868
1869/*ARGSUSED*/
1870void
1871intr_enqueue_req(uint_t pil, uint64_t inum)
1872{}
1873
1874#else   /* lint */
1875
1876/*
1877 * intr_enqueue_req
1878 *
1879 * %o0 - pil
1880 * %o1 - pointer to intr_vec_t (iv)
1881 * %o5 - preserved
1882 * %g5 - preserved
1883 */
1884	ENTRY_NP(intr_enqueue_req)
1885	!
1886	CPU_ADDR(%g4, %g1)		! %g4 = cpu
1887
1888	!
1889	! Insert intr_vec_t (iv) to appropriate cpu's softint priority list
1890	!
1891	sll	%o0, CPTRSHIFT, %o0	! %o0 = offset to pil entry
1892	add	%g4, INTR_TAIL, %g6	! %g6 = &cpu->m_cpu.intr_tail
1893	ldn	[%o0 + %g6], %g1	! %g1 = cpu->m_cpu.intr_tail[pil]
1894					!       current tail (ct)
1895	brz,pt	%g1, 2f			! branch if current tail is NULL
1896	stn	%o1, [%g6 + %o0]	! make intr_vec_t (iv) as new tail
1897
1898	!
1899	! there's pending intr_vec_t already
1900	!
1901	lduh	[%g1 + IV_FLAGS], %g6	! %g6 = ct->iv_flags
1902	and	%g6, IV_SOFTINT_MT, %g6	! %g6 = ct->iv_flags & IV_SOFTINT_MT
1903	brz,pt	%g6, 1f			! check for Multi target softint flag
1904	add	%g1, IV_PIL_NEXT, %g3	! %g3 = &ct->iv_pil_next
1905	ld	[%g4 + CPU_ID], %g6	! for multi target softint, use cpuid
1906	sll	%g6, CPTRSHIFT, %g6	! calculate offset address from cpuid
1907	add	%g3, %g6, %g3		! %g3 = &ct->iv_xpil_next[cpuid]
19081:
1909	!
1910	! update old tail
1911	!
1912	ba,pt	%xcc, 3f
1913	stn	%o1, [%g3]		! {%g5] = iv, set pil_next field
19142:
1915	!
1916	! no intr_vec_t's queued so make intr_vec_t as new head
1917	!
1918	add	%g4, INTR_HEAD, %g6	! %g6 = &cpu->m_cpu.intr_head[pil]
1919	stn	%o1, [%g6 + %o0]	! cpu->m_cpu.intr_head[pil] = iv
19203:
1921	retl
1922	nop
1923	SET_SIZE(intr_enqueue_req)
1924
1925#endif  /* lint */
1926
1927/*
1928 * Set CPU's base SPL level, based on which interrupt levels are active.
1929 * 	Called at spl7 or above.
1930 */
1931
1932#if defined(lint)
1933
1934void
1935set_base_spl(void)
1936{}
1937
1938#else	/* lint */
1939
1940	ENTRY_NP(set_base_spl)
1941	ldn	[THREAD_REG + T_CPU], %o2	! load CPU pointer
1942	ld	[%o2 + CPU_INTR_ACTV], %o5	! load active interrupts mask
1943
1944/*
1945 * WARNING: non-standard callinq sequence; do not call from C
1946 *	%o2 = pointer to CPU
1947 *	%o5 = updated CPU_INTR_ACTV
1948 */
1949_intr_set_spl:					! intr_thread_exit enters here
1950	!
1951	! Determine highest interrupt level active.  Several could be blocked
1952	! at higher levels than this one, so must convert flags to a PIL
1953	! Normally nothing will be blocked, so test this first.
1954	!
1955	brz,pt	%o5, 1f				! nothing active
1956	sra	%o5, 11, %o3			! delay - set %o3 to bits 15-11
1957	set	_intr_flag_table, %o1
1958	tst	%o3				! see if any of the bits set
1959	ldub	[%o1 + %o3], %o3		! load bit number
1960	bnz,a,pn %xcc, 1f			! yes, add 10 and we're done
1961	add	%o3, 11-1, %o3			! delay - add bit number - 1
1962
1963	sra	%o5, 6, %o3			! test bits 10-6
1964	tst	%o3
1965	ldub	[%o1 + %o3], %o3
1966	bnz,a,pn %xcc, 1f
1967	add	%o3, 6-1, %o3
1968
1969	sra	%o5, 1, %o3			! test bits 5-1
1970	ldub	[%o1 + %o3], %o3
1971
1972	!
1973	! highest interrupt level number active is in %l6
1974	!
19751:
1976	retl
1977	st	%o3, [%o2 + CPU_BASE_SPL]	! delay - store base priority
1978	SET_SIZE(set_base_spl)
1979
1980/*
1981 * Table that finds the most significant bit set in a five bit field.
1982 * Each entry is the high-order bit number + 1 of it's index in the table.
1983 * This read-only data is in the text segment.
1984 */
1985_intr_flag_table:
1986	.byte	0, 1, 2, 2,	3, 3, 3, 3,	4, 4, 4, 4,	4, 4, 4, 4
1987	.byte	5, 5, 5, 5,	5, 5, 5, 5,	5, 5, 5, 5,	5, 5, 5, 5
1988	.align	4
1989
1990#endif	/* lint */
1991
1992/*
1993 * int
1994 * intr_passivate(from, to)
1995 *	kthread_id_t	from;		interrupt thread
1996 *	kthread_id_t	to;		interrupted thread
1997 */
1998
1999#if defined(lint)
2000
2001/* ARGSUSED */
2002int
2003intr_passivate(kthread_id_t from, kthread_id_t to)
2004{ return (0); }
2005
2006#else	/* lint */
2007
2008	ENTRY_NP(intr_passivate)
2009	save	%sp, -SA(MINFRAME), %sp	! get a new window
2010
2011	flushw				! force register windows to stack
2012	!
2013	! restore registers from the base of the stack of the interrupt thread.
2014	!
2015	ldn	[%i0 + T_STACK], %i2	! get stack save area pointer
2016	ldn	[%i2 + (0*GREGSIZE)], %l0	! load locals
2017	ldn	[%i2 + (1*GREGSIZE)], %l1
2018	ldn	[%i2 + (2*GREGSIZE)], %l2
2019	ldn	[%i2 + (3*GREGSIZE)], %l3
2020	ldn	[%i2 + (4*GREGSIZE)], %l4
2021	ldn	[%i2 + (5*GREGSIZE)], %l5
2022	ldn	[%i2 + (6*GREGSIZE)], %l6
2023	ldn	[%i2 + (7*GREGSIZE)], %l7
2024	ldn	[%i2 + (8*GREGSIZE)], %o0	! put ins from stack in outs
2025	ldn	[%i2 + (9*GREGSIZE)], %o1
2026	ldn	[%i2 + (10*GREGSIZE)], %o2
2027	ldn	[%i2 + (11*GREGSIZE)], %o3
2028	ldn	[%i2 + (12*GREGSIZE)], %o4
2029	ldn	[%i2 + (13*GREGSIZE)], %o5
2030	ldn	[%i2 + (14*GREGSIZE)], %i4
2031					! copy stack/pointer without using %sp
2032	ldn	[%i2 + (15*GREGSIZE)], %i5
2033	!
2034	! put registers into the save area at the top of the interrupted
2035	! thread's stack, pointed to by %l7 in the save area just loaded.
2036	!
2037	ldn	[%i1 + T_SP], %i3	! get stack save area pointer
2038	stn	%l0, [%i3 + STACK_BIAS + (0*GREGSIZE)]	! save locals
2039	stn	%l1, [%i3 + STACK_BIAS + (1*GREGSIZE)]
2040	stn	%l2, [%i3 + STACK_BIAS + (2*GREGSIZE)]
2041	stn	%l3, [%i3 + STACK_BIAS + (3*GREGSIZE)]
2042	stn	%l4, [%i3 + STACK_BIAS + (4*GREGSIZE)]
2043	stn	%l5, [%i3 + STACK_BIAS + (5*GREGSIZE)]
2044	stn	%l6, [%i3 + STACK_BIAS + (6*GREGSIZE)]
2045	stn	%l7, [%i3 + STACK_BIAS + (7*GREGSIZE)]
2046	stn	%o0, [%i3 + STACK_BIAS + (8*GREGSIZE)]	! save ins using outs
2047	stn	%o1, [%i3 + STACK_BIAS + (9*GREGSIZE)]
2048	stn	%o2, [%i3 + STACK_BIAS + (10*GREGSIZE)]
2049	stn	%o3, [%i3 + STACK_BIAS + (11*GREGSIZE)]
2050	stn	%o4, [%i3 + STACK_BIAS + (12*GREGSIZE)]
2051	stn	%o5, [%i3 + STACK_BIAS + (13*GREGSIZE)]
2052	stn	%i4, [%i3 + STACK_BIAS + (14*GREGSIZE)]
2053						! fp, %i7 copied using %i4
2054	stn	%i5, [%i3 + STACK_BIAS + (15*GREGSIZE)]
2055	stn	%g0, [%i2 + ((8+6)*GREGSIZE)]
2056						! clear fp in save area
2057
2058	! load saved pil for return
2059	ldub	[%i0 + T_PIL], %i0
2060	ret
2061	restore
2062	SET_SIZE(intr_passivate)
2063
2064#endif	/* lint */
2065
2066#if defined(lint)
2067
2068/*
2069 * intr_get_time() is a resource for interrupt handlers to determine how
2070 * much time has been spent handling the current interrupt. Such a function
2071 * is needed because higher level interrupts can arrive during the
2072 * processing of an interrupt, thus making direct comparisons of %tick by
2073 * the handler inaccurate. intr_get_time() only returns time spent in the
2074 * current interrupt handler.
2075 *
2076 * The caller must be calling from an interrupt handler running at a pil
2077 * below or at lock level. Timings are not provided for high-level
2078 * interrupts.
2079 *
2080 * The first time intr_get_time() is called while handling an interrupt,
2081 * it returns the time since the interrupt handler was invoked. Subsequent
2082 * calls will return the time since the prior call to intr_get_time(). Time
2083 * is returned as ticks, adjusted for any clock divisor due to power
2084 * management. Use tick2ns() to convert ticks to nsec. Warning: ticks may
2085 * not be the same across CPUs.
2086 *
2087 * Theory Of Intrstat[][]:
2088 *
2089 * uint64_t intrstat[pil][0..1] is an array indexed by pil level, with two
2090 * uint64_ts per pil.
2091 *
2092 * intrstat[pil][0] is a cumulative count of the number of ticks spent
2093 * handling all interrupts at the specified pil on this CPU. It is
2094 * exported via kstats to the user.
2095 *
2096 * intrstat[pil][1] is always a count of ticks less than or equal to the
2097 * value in [0]. The difference between [1] and [0] is the value returned
2098 * by a call to intr_get_time(). At the start of interrupt processing,
2099 * [0] and [1] will be equal (or nearly so). As the interrupt consumes
2100 * time, [0] will increase, but [1] will remain the same. A call to
2101 * intr_get_time() will return the difference, then update [1] to be the
2102 * same as [0]. Future calls will return the time since the last call.
2103 * Finally, when the interrupt completes, [1] is updated to the same as [0].
2104 *
2105 * Implementation:
2106 *
2107 * intr_get_time() works much like a higher level interrupt arriving. It
2108 * "checkpoints" the timing information by incrementing intrstat[pil][0]
2109 * to include elapsed running time, and by setting t_intr_start to %tick.
2110 * It then sets the return value to intrstat[pil][0] - intrstat[pil][1],
2111 * and updates intrstat[pil][1] to be the same as the new value of
2112 * intrstat[pil][0].
2113 *
2114 * In the normal handling of interrupts, after an interrupt handler returns
2115 * and the code in intr_thread() updates intrstat[pil][0], it then sets
2116 * intrstat[pil][1] to the new value of intrstat[pil][0]. When [0] == [1],
2117 * the timings are reset, i.e. intr_get_time() will return [0] - [1] which
2118 * is 0.
2119 *
2120 * Whenever interrupts arrive on a CPU which is handling a lower pil
2121 * interrupt, they update the lower pil's [0] to show time spent in the
2122 * handler that they've interrupted. This results in a growing discrepancy
2123 * between [0] and [1], which is returned the next time intr_get_time() is
2124 * called. Time spent in the higher-pil interrupt will not be returned in
2125 * the next intr_get_time() call from the original interrupt, because
2126 * the higher-pil interrupt's time is accumulated in intrstat[higherpil][].
2127 */
2128
2129/*ARGSUSED*/
2130uint64_t
2131intr_get_time(void)
2132{ return 0; }
2133#else	/* lint */
2134
2135	ENTRY_NP(intr_get_time)
2136#ifdef DEBUG
2137	!
2138	! Lots of asserts, but just check panic_quiesce first.
2139	! Don't bother with lots of tests if we're just ignoring them.
2140	!
2141	sethi	%hi(panic_quiesce), %o0
2142	ld	[%o0 + %lo(panic_quiesce)], %o0
2143	brnz,pn	%o0, 2f
2144	nop
2145	!
2146	! ASSERT(%pil <= LOCK_LEVEL)
2147	!
2148	rdpr	%pil, %o1
2149	cmp	%o1, LOCK_LEVEL
2150	ble,pt	%xcc, 0f
2151	sethi	%hi(intr_get_time_high_pil), %o0	! delay
2152	call	panic
2153	or	%o0, %lo(intr_get_time_high_pil), %o0
21540:
2155	!
2156	! ASSERT((t_flags & T_INTR_THREAD) != 0 && t_pil > 0)
2157	!
2158	lduh	[THREAD_REG + T_FLAGS], %o2
2159	andcc	%o2, T_INTR_THREAD, %g0
2160	bz,pn	%xcc, 1f
2161	ldub	[THREAD_REG + T_PIL], %o1		! delay
2162	brnz,pt	%o1, 0f
21631:
2164	sethi	%hi(intr_get_time_not_intr), %o0
2165	call	panic
2166	or	%o0, %lo(intr_get_time_not_intr), %o0
21670:
2168	!
2169	! ASSERT(t_intr_start != 0)
2170	!
2171	ldx	[THREAD_REG + T_INTR_START], %o1
2172	brnz,pt	%o1, 2f
2173	sethi	%hi(intr_get_time_no_start_time), %o0	! delay
2174	call	panic
2175	or	%o0, %lo(intr_get_time_no_start_time), %o0
21762:
2177#endif /* DEBUG */
2178	!
2179	! %o0 = elapsed time and return value
2180	! %o1 = pil
2181	! %o2 = scratch
2182	! %o3 = scratch
2183	! %o4 = scratch
2184	! %o5 = cpu
2185	!
2186	wrpr	%g0, PIL_MAX, %pil	! make this easy -- block normal intrs
2187	ldn	[THREAD_REG + T_CPU], %o5
2188	ldub	[THREAD_REG + T_PIL], %o1
2189	ldx	[THREAD_REG + T_INTR_START], %o3 ! %o3 = t_intr_start
2190	!
2191	! Calculate elapsed time since t_intr_start. Update t_intr_start,
2192	! get delta, and multiply by cpu_divisor if necessary.
2193	!
2194	RD_TICK_NO_SUSPEND_CHECK(%o2, %o0)
2195	stx	%o2, [THREAD_REG + T_INTR_START]
2196	sub	%o2, %o3, %o0
2197
2198	lduh	[%o5 + CPU_DIVISOR], %o4
2199	cmp	%o4, 1
2200	bg,a,pn	%xcc, 1f
2201	mulx	%o0, %o4, %o0	! multiply interval by clock divisor iff > 1
22021:
2203	! Update intracct[]
2204	lduh	[%o5 + CPU_MSTATE], %o4
2205	sllx	%o4, 3, %o4
2206	add	%o4, CPU_INTRACCT, %o4
2207	ldx	[%o5 + %o4], %o2
2208	add	%o2, %o0, %o2
2209	stx	%o2, [%o5 + %o4]
2210
2211	!
2212	! Increment cpu_m.intrstat[pil][0]. Calculate elapsed time since
2213	! cpu_m.intrstat[pil][1], which is either when the interrupt was
2214	! first entered, or the last time intr_get_time() was invoked. Then
2215	! update cpu_m.intrstat[pil][1] to match [0].
2216	!
2217	sllx	%o1, 4, %o3
2218	add	%o3, CPU_MCPU, %o3
2219	add	%o3, MCPU_INTRSTAT, %o3
2220	add	%o3, %o5, %o3		! %o3 = cpu_m.intrstat[pil][0]
2221	ldx	[%o3], %o2
2222	add	%o2, %o0, %o2		! %o2 = new value for intrstat
2223	stx	%o2, [%o3]
2224	ldx	[%o3 + 8], %o4		! %o4 = cpu_m.intrstat[pil][1]
2225	sub	%o2, %o4, %o0		! %o0 is elapsed time since %o4
2226	stx	%o2, [%o3 + 8]		! make [1] match [0], resetting time
2227
2228	ld	[%o5 + CPU_BASE_SPL], %o2	! restore %pil to the greater
2229	cmp	%o2, %o1			! of either our pil %o1 or
2230	movl	%xcc, %o1, %o2			! cpu_base_spl.
2231	retl
2232	wrpr	%g0, %o2, %pil
2233	SET_SIZE(intr_get_time)
2234
2235#ifdef DEBUG
2236intr_get_time_high_pil:
2237	.asciz	"intr_get_time(): %pil > LOCK_LEVEL"
2238intr_get_time_not_intr:
2239	.asciz	"intr_get_time(): not called from an interrupt thread"
2240intr_get_time_no_start_time:
2241	.asciz	"intr_get_time(): t_intr_start == 0"
2242#endif /* DEBUG */
2243#endif  /* lint */
2244