xref: /illumos-gate/usr/src/uts/sun4/ml/interrupt.S (revision d48be21240dfd051b689384ce2b23479d757f2d8)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25#include "assym.h"
26
27#include <sys/cmn_err.h>
28#include <sys/ftrace.h>
29#include <sys/asm_linkage.h>
30#include <sys/machthread.h>
31#include <sys/machcpuvar.h>
32#include <sys/intreg.h>
33#include <sys/ivintr.h>
34
35#ifdef TRAPTRACE
36#include <sys/traptrace.h>
37#endif /* TRAPTRACE */
38
39
40/*
41 * (TT 0x40..0x4F, TL>0) Interrupt Level N Handler (N == 1..15)
42 * 	Register passed from LEVEL_INTERRUPT(level)
43 *	%g4 - interrupt request level
44 */
45	ENTRY_NP(pil_interrupt)
46	!
47	! Register usage
48	!	%g1 - cpu
49	!	%g2 - pointer to intr_vec_t (iv)
50	!	%g4 - pil
51	!	%g3, %g5, %g6, %g7 - temps
52	!
53	! Grab the first or list head intr_vec_t off the intr_head[pil]
54	! and panic immediately if list head is NULL. Otherwise, update
55	! intr_head[pil] to next intr_vec_t on the list and clear softint
56	! %clear_softint, if next intr_vec_t is NULL.
57	!
58	CPU_ADDR(%g1, %g5)		! %g1 = cpu
59	!
60	ALTENTRY(pil_interrupt_common)
61	sll	%g4, CPTRSHIFT, %g5	! %g5 = offset to the pil entry
62	add	%g1, INTR_HEAD, %g6	! %g6 = &cpu->m_cpu.intr_head
63	add	%g6, %g5, %g6		! %g6 = &cpu->m_cpu.intr_head[pil]
64	ldn	[%g6], %g2		! %g2 = cpu->m_cpu.intr_head[pil]
65	brnz,pt	%g2, 0f			! check list head (iv) is NULL
66	nop
67	ba	ptl1_panic		! panic, list head (iv) is NULL
68	mov	PTL1_BAD_INTR_VEC, %g1
690:
70	lduh	[%g2 + IV_FLAGS], %g7	! %g7 = iv->iv_flags
71	and	%g7, IV_SOFTINT_MT, %g3 ! %g3 = iv->iv_flags & IV_SOFTINT_MT
72	brz,pt	%g3, 1f			! check for multi target softint
73	add	%g2, IV_PIL_NEXT, %g7	! g7% = &iv->iv_pil_next
74	ld	[%g1 + CPU_ID], %g3	! for multi target softint, use cpuid
75	sll	%g3, CPTRSHIFT, %g3	! convert cpuid to offset address
76	add	%g7, %g3, %g7		! %g5 = &iv->iv_xpil_next[cpuid]
771:
78	ldn	[%g7], %g3		! %g3 = next intr_vec_t
79	brnz,pn	%g3, 2f			! branch if next intr_vec_t non NULL
80	stn	%g3, [%g6]		! update cpu->m_cpu.intr_head[pil]
81	add	%g1, INTR_TAIL, %g6	! %g6 =  &cpu->m_cpu.intr_tail
82	stn	%g0, [%g5 + %g6]	! clear cpu->m_cpu.intr_tail[pil]
83	mov	1, %g5			! %g5 = 1
84	sll	%g5, %g4, %g5		! %g5 = 1 << pil
85	wr	%g5, CLEAR_SOFTINT	! clear interrupt on this pil
862:
87#ifdef TRAPTRACE
88	TRACE_PTR(%g5, %g6)
89	TRACE_SAVE_TL_GL_REGS(%g5, %g6)
90	rdpr	%tt, %g6
91	stha	%g6, [%g5 + TRAP_ENT_TT]%asi	! trap_type = %tt
92	rdpr	%tpc, %g6
93	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi	! trap_pc = %tpc
94	rdpr	%tstate, %g6
95	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi ! trap_tstate = %tstate
96	stna	%sp, [%g5 + TRAP_ENT_SP]%asi	! trap_sp = %sp
97	stna	%g2, [%g5 + TRAP_ENT_TR]%asi	! trap_tr = first intr_vec
98	stna	%g3, [%g5 + TRAP_ENT_F1]%asi	! trap_f1 = next intr_vec
99	GET_TRACE_TICK(%g6, %g3)
100	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi	! trap_tick = %tick
101	sll	%g4, CPTRSHIFT, %g3
102	add	%g1, INTR_HEAD, %g6
103	ldn	[%g6 + %g3], %g6		! %g6=cpu->m_cpu.intr_head[pil]
104	stna	%g6, [%g5 + TRAP_ENT_F2]%asi	! trap_f2 = intr_head[pil]
105	add	%g1, INTR_TAIL, %g6
106	ldn	[%g6 + %g3], %g6		! %g6=cpu->m_cpu.intr_tail[pil]
107	stna	%g6, [%g5 + TRAP_ENT_F3]%asi	! trap_f3 = intr_tail[pil]
108	stna	%g4, [%g5 + TRAP_ENT_F4]%asi	! trap_f4 = pil
109	TRACE_NEXT(%g5, %g6, %g3)
110#endif /* TRAPTRACE */
111	!
112	! clear the iv_pending flag for this interrupt request
113	!
114	lduh	[%g2 + IV_FLAGS], %g3		! %g3 = iv->iv_flags
115	andn	%g3, IV_SOFTINT_PEND, %g3	! %g3 = !(iv->iv_flags & PEND)
116	sth	%g3, [%g2 + IV_FLAGS]		! clear IV_SOFTINT_PEND flag
117	stn	%g0, [%g7]			! clear iv->iv_pil_next or
118						!       iv->iv_pil_xnext
119
120	!
121	! Prepare for sys_trap()
122	!
123	! Registers passed to sys_trap()
124	!	%g1 - interrupt handler at TL==0
125	!	%g2 - pointer to current intr_vec_t (iv),
126	!	      job queue for intr_thread or current_thread
127	!	%g3 - pil
128	!	%g4 - initial pil for handler
129	!
130	! figure which handler to run and which %pil it starts at
131	! intr_thread starts at DISP_LEVEL to prevent preemption
132	! current_thread starts at PIL_MAX to protect cpu_intr_actv
133	!
134	mov	%g4, %g3		! %g3 = %g4, pil
135	cmp	%g4, LOCK_LEVEL
136	bg,a,pt	%xcc, 3f		! branch if pil > LOCK_LEVEL
137	mov	PIL_MAX, %g4		! %g4 = PIL_MAX (15)
138	sethi	%hi(intr_thread), %g1	! %g1 = intr_thread
139	mov	DISP_LEVEL, %g4		! %g4 = DISP_LEVEL (11)
140	ba,pt	%xcc, sys_trap
141	or	%g1, %lo(intr_thread), %g1
1423:
143	sethi	%hi(current_thread), %g1 ! %g1 = current_thread
144	ba,pt	%xcc, sys_trap
145	or	%g1, %lo(current_thread), %g1
146	SET_SIZE(pil_interrupt_common)
147	SET_SIZE(pil_interrupt)
148
149
150_spurious:
151	.asciz	"!interrupt 0x%x at level %d not serviced"
152
153/*
154 * SERVE_INTR_PRE is called once, just before the first invocation
155 * of SERVE_INTR.
156 *
157 * Registers on entry:
158 *
159 * iv_p, cpu, regs: may be out-registers
160 * ls1, ls2: local scratch registers
161 * os1, os2, os3: scratch registers, may be out
162 */
163
164#define SERVE_INTR_PRE(iv_p, cpu, ls1, ls2, os1, os2, os3, regs)	\
165	mov	iv_p, ls1;						\
166	mov	iv_p, ls2;						\
167	SERVE_INTR_TRACE(iv_p, os1, os2, os3, regs);
168
169/*
170 * SERVE_INTR is called immediately after either SERVE_INTR_PRE or
171 * SERVE_INTR_NEXT, without intervening code. No register values
172 * may be modified.
173 *
174 * After calling SERVE_INTR, the caller must check if os3 is set. If
175 * so, there is another interrupt to process. The caller must call
176 * SERVE_INTR_NEXT, immediately followed by SERVE_INTR.
177 *
178 * Before calling SERVE_INTR_NEXT, the caller may perform accounting
179 * and other actions which need to occur after invocation of an interrupt
180 * handler. However, the values of ls1 and os3 *must* be preserved and
181 * passed unmodified into SERVE_INTR_NEXT.
182 *
183 * Registers on return from SERVE_INTR:
184 *
185 * ls1 - the pil just processed
186 * ls2 - the pointer to intr_vec_t (iv) just processed
187 * os3 - if set, another interrupt needs to be processed
188 * cpu, ls1, os3 - must be preserved if os3 is set
189 */
190
191#define	SERVE_INTR(os5, cpu, ls1, ls2, os1, os2, os3, os4)		\
192	ldn	[ls1 + IV_HANDLER], os2;				\
193	ldn	[ls1 + IV_ARG1], %o0;					\
194	ldn	[ls1 + IV_ARG2], %o1;					\
195	call	os2;							\
196	lduh	[ls1 + IV_PIL], ls1;					\
197	brnz,pt	%o0, 2f;						\
198	mov	CE_WARN, %o0;						\
199	set	_spurious, %o1;						\
200	mov	ls2, %o2;						\
201	call	cmn_err;						\
202	rdpr	%pil, %o3;						\
2032:	ldn	[THREAD_REG + T_CPU], cpu;				\
204	sll	ls1, 3, os1;						\
205	add	os1, CPU_STATS_SYS_INTR - 8, os2;			\
206	ldx	[cpu + os2], os3;					\
207	inc	os3;							\
208	stx	os3, [cpu + os2];					\
209	sll	ls1, CPTRSHIFT, os2;					\
210	add	cpu,  INTR_HEAD, os1;					\
211	add	os1, os2, os1;						\
212	ldn	[os1], os3;
213
214/*
215 * Registers on entry:
216 *
217 * cpu			- cpu pointer (clobbered, set to cpu upon completion)
218 * ls1, os3		- preserved from prior call to SERVE_INTR
219 * ls2			- local scratch reg (not preserved)
220 * os1, os2, os4, os5	- scratch reg, can be out (not preserved)
221 */
222#define SERVE_INTR_NEXT(os5, cpu, ls1, ls2, os1, os2, os3, os4)		\
223	sll	ls1, CPTRSHIFT, os4;					\
224	add	cpu, INTR_HEAD, os1;					\
225	rdpr	%pstate, ls2;						\
226	wrpr	ls2, PSTATE_IE, %pstate;				\
227	lduh	[os3 + IV_FLAGS], os2;					\
228	and	os2, IV_SOFTINT_MT, os2;				\
229	brz,pt	os2, 4f;						\
230	add	os3, IV_PIL_NEXT, os2;					\
231	ld	[cpu + CPU_ID], os5;					\
232	sll	os5, CPTRSHIFT, os5;					\
233	add	os2, os5, os2;						\
2344:	ldn	[os2], os5;						\
235	brnz,pn	os5, 5f;						\
236	stn	os5, [os1 + os4];					\
237	add	cpu, INTR_TAIL, os1;					\
238	stn	%g0, [os1 + os4];					\
239	mov	1, os1;							\
240	sll	os1, ls1, os1;						\
241	wr	os1, CLEAR_SOFTINT;					\
2425:	lduh	[os3 + IV_FLAGS], ls1;                                  \
243	andn	ls1, IV_SOFTINT_PEND, ls1;				\
244	sth	ls1, [os3 + IV_FLAGS];				        \
245	stn	%g0, [os2];						\
246	wrpr	%g0, ls2, %pstate;					\
247	mov	os3, ls1;						\
248	mov	os3, ls2;						\
249	SERVE_INTR_TRACE2(os5, os1, os2, os3, os4);
250
251#ifdef TRAPTRACE
252/*
253 * inum - not modified, _spurious depends on it.
254 */
255#define	SERVE_INTR_TRACE(inum, os1, os2, os3, os4)			\
256	rdpr	%pstate, os3;						\
257	andn	os3, PSTATE_IE | PSTATE_AM, os2;			\
258	wrpr	%g0, os2, %pstate;					\
259	TRACE_PTR(os1, os2); 						\
260	ldn	[os4 + PC_OFF], os2;					\
261	stna	os2, [os1 + TRAP_ENT_TPC]%asi;				\
262	ldx	[os4 + TSTATE_OFF], os2;				\
263	stxa	os2, [os1 + TRAP_ENT_TSTATE]%asi;			\
264	mov	os3, os4;						\
265	GET_TRACE_TICK(os2, os3);					\
266	stxa	os2, [os1 + TRAP_ENT_TICK]%asi;				\
267	TRACE_SAVE_TL_GL_REGS(os1, os2);				\
268	set	TT_SERVE_INTR, os2;					\
269	rdpr	%pil, os3;						\
270	or	os2, os3, os2;						\
271	stha	os2, [os1 + TRAP_ENT_TT]%asi;				\
272	stna	%sp, [os1 + TRAP_ENT_SP]%asi;				\
273	stna	inum, [os1 + TRAP_ENT_TR]%asi;				\
274	stna	%g0, [os1 + TRAP_ENT_F1]%asi;				\
275	stna	%g0, [os1 + TRAP_ENT_F2]%asi;				\
276	stna	%g0, [os1 + TRAP_ENT_F3]%asi;				\
277	stna	%g0, [os1 + TRAP_ENT_F4]%asi;				\
278	TRACE_NEXT(os1, os2, os3);					\
279	wrpr	%g0, os4, %pstate
280#else	/* TRAPTRACE */
281#define SERVE_INTR_TRACE(inum, os1, os2, os3, os4)
282#endif	/* TRAPTRACE */
283
284#ifdef TRAPTRACE
285/*
286 * inum - not modified, _spurious depends on it.
287 */
288#define	SERVE_INTR_TRACE2(inum, os1, os2, os3, os4)			\
289	rdpr	%pstate, os3;						\
290	andn	os3, PSTATE_IE | PSTATE_AM, os2;			\
291	wrpr	%g0, os2, %pstate;					\
292	TRACE_PTR(os1, os2); 						\
293	stna	%g0, [os1 + TRAP_ENT_TPC]%asi;				\
294	stxa	%g0, [os1 + TRAP_ENT_TSTATE]%asi;			\
295	mov	os3, os4;						\
296	GET_TRACE_TICK(os2, os3);					\
297	stxa	os2, [os1 + TRAP_ENT_TICK]%asi;				\
298	TRACE_SAVE_TL_GL_REGS(os1, os2);				\
299	set	TT_SERVE_INTR, os2;					\
300	rdpr	%pil, os3;						\
301	or	os2, os3, os2;						\
302	stha	os2, [os1 + TRAP_ENT_TT]%asi;				\
303	stna	%sp, [os1 + TRAP_ENT_SP]%asi;				\
304	stna	inum, [os1 + TRAP_ENT_TR]%asi;				\
305	stna	%g0, [os1 + TRAP_ENT_F1]%asi;				\
306	stna	%g0, [os1 + TRAP_ENT_F2]%asi;				\
307	stna	%g0, [os1 + TRAP_ENT_F3]%asi;				\
308	stna	%g0, [os1 + TRAP_ENT_F4]%asi;				\
309	TRACE_NEXT(os1, os2, os3);					\
310	wrpr	%g0, os4, %pstate
311#else	/* TRAPTRACE */
312#define SERVE_INTR_TRACE2(inum, os1, os2, os3, os4)
313#endif	/* TRAPTRACE */
314
315#define	INTRCNT_LIMIT 16
316
317/*
318 * Handle an interrupt in a new thread.
319 *	Entry:
320 *		%o0       = pointer to regs structure
321 *		%o1       = pointer to current intr_vec_t (iv) to be processed
322 *		%o2       = pil
323 *		%sp       = on current thread's kernel stack
324 *		%o7       = return linkage to trap code
325 *		%g7       = current thread
326 *		%pstate   = normal globals, interrupts enabled,
327 *		            privileged, fp disabled
328 *		%pil      = DISP_LEVEL
329 *
330 *	Register Usage
331 *		%l0       = return linkage
332 *		%l1       = pil
333 *		%l2 - %l3 = scratch
334 *		%l4 - %l7 = reserved for sys_trap
335 *		%o2       = cpu
336 *		%o3       = intr thread
337 *		%o0       = scratch
338 *		%o4 - %o5 = scratch
339 */
340	ENTRY_NP(intr_thread)
341	mov	%o7, %l0
342	mov	%o2, %l1
343	!
344	! See if we are interrupting another interrupt thread.
345	!
346	lduh	[THREAD_REG + T_FLAGS], %o3
347	andcc	%o3, T_INTR_THREAD, %g0
348	bz,pt	%xcc, 1f
349	ldn	[THREAD_REG + T_CPU], %o2	! delay - load CPU pointer
350
351	! We have interrupted an interrupt thread. Take a timestamp,
352	! compute its interval, and update its cumulative counter.
353	add	THREAD_REG, T_INTR_START, %o5
3540:
355	ldx	[%o5], %o3
356	brz,pn	%o3, 1f
357	! We came in on top of an interrupt thread that had no timestamp.
358	! This could happen if, for instance, an interrupt thread which had
359	! previously blocked is being set up to run again in resume(), but
360	! resume() hasn't yet stored a timestamp for it. Or, it could be in
361	! swtch() after its slice has been accounted for.
362	! Only account for the time slice if the starting timestamp is non-zero.
363	RD_CLOCK_TICK(%o4,%l2,%l3,__LINE__)
364	sub	%o4, %o3, %o4			! o4 has interval
365
366	! A high-level interrupt in current_thread() interrupting here
367	! will account for the interrupted thread's time slice, but
368	! only if t_intr_start is non-zero. Since this code is going to account
369	! for the time slice, we want to "atomically" load the thread's
370	! starting timestamp, calculate the interval with %tick, and zero
371	! its starting timestamp.
372	! To do this, we do a casx on the t_intr_start field, and store 0 to it.
373	! If it has changed since we loaded it above, we need to re-compute the
374	! interval, since a changed t_intr_start implies current_thread placed
375	! a new, later timestamp there after running a high-level interrupt,
376	! and the %tick val in %o4 had become stale.
377	mov	%g0, %l2
378	casx	[%o5], %o3, %l2
379
380	! If %l2 == %o3, our casx was successful. If not, the starting timestamp
381	! changed between loading it (after label 0b) and computing the
382	! interval above.
383	cmp	%l2, %o3
384	bne,pn	%xcc, 0b
385
386	! Check for Energy Star mode
387	lduh	[%o2 + CPU_DIVISOR], %l2	! delay -- %l2 = clock divisor
388	cmp	%l2, 1
389	bg,a,pn	%xcc, 2f
390	mulx	%o4, %l2, %o4	! multiply interval by clock divisor iff > 1
3912:
392	! We now know that a valid interval for the interrupted interrupt
393	! thread is in %o4. Update its cumulative counter.
394	ldub	[THREAD_REG + T_PIL], %l3	! load PIL
395	sllx	%l3, 4, %l3		! convert PIL index to byte offset
396	add	%l3, CPU_MCPU, %l3	! CPU_INTRSTAT is too big for use
397	add	%l3, MCPU_INTRSTAT, %l3	! as const, add offsets separately
398	ldx	[%o2 + %l3], %o5	! old counter in o5
399	add	%o5, %o4, %o5		! new counter in o5
400	stx	%o5, [%o2 + %l3]	! store new counter
401
402	! Also update intracct[]
403	lduh	[%o2 + CPU_MSTATE], %l3
404	sllx	%l3, 3, %l3
405	add	%l3, CPU_INTRACCT, %l3
406	add	%l3, %o2, %l3
4070:
408	ldx	[%l3], %o5
409	add	%o5, %o4, %o3
410	casx	[%l3], %o5, %o3
411	cmp	%o5, %o3
412	bne,pn	%xcc, 0b
413	nop
414
4151:
416	!
417	! Get set to run interrupt thread.
418	! There should always be an interrupt thread since we allocate one
419	! for each level on the CPU.
420	!
421	! Note that the code in kcpc_overflow_intr -relies- on the ordering
422	! of events here -- in particular that t->t_lwp of the interrupt thread
423	! is set to the pinned thread *before* curthread is changed.
424	!
425	ldn	[%o2 + CPU_INTR_THREAD], %o3	! interrupt thread pool
426	ldn	[%o3 + T_LINK], %o4		! unlink thread from CPU's list
427	stn	%o4, [%o2 + CPU_INTR_THREAD]
428	!
429	! Set bit for this level in CPU's active interrupt bitmask.
430	!
431	ld	[%o2 + CPU_INTR_ACTV], %o5
432	mov	1, %o4
433	sll	%o4, %l1, %o4
434#ifdef DEBUG
435	!
436	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
437	!
438	andcc	%o5, %o4, %g0
439	bz,pt	%xcc, 0f
440	nop
441	! Do not call panic if a panic is already in progress.
442	sethi	%hi(panic_quiesce), %l2
443	ld	[%l2 + %lo(panic_quiesce)], %l2
444	brnz,pn	%l2, 0f
445	nop
446	sethi	%hi(intr_thread_actv_bit_set), %o0
447	call	panic
448	or	%o0, %lo(intr_thread_actv_bit_set), %o0
4490:
450#endif /* DEBUG */
451	or	%o5, %o4, %o5
452	st	%o5, [%o2 + CPU_INTR_ACTV]
453	!
454	! Consider the new thread part of the same LWP so that
455	! window overflow code can find the PCB.
456	!
457	ldn	[THREAD_REG + T_LWP], %o4
458	stn	%o4, [%o3 + T_LWP]
459	!
460	! Threads on the interrupt thread free list could have state already
461	! set to TS_ONPROC, but it helps in debugging if they're TS_FREE
462	! Could eliminate the next two instructions with a little work.
463	!
464	mov	TS_ONPROC, %o4
465	st	%o4, [%o3 + T_STATE]
466	!
467	! Push interrupted thread onto list from new thread.
468	! Set the new thread as the current one.
469	! Set interrupted thread's T_SP because if it is the idle thread,
470	! resume may use that stack between threads.
471	!
472	stn	%o7, [THREAD_REG + T_PC]	! mark pc for resume
473	stn	%sp, [THREAD_REG + T_SP]	! mark stack for resume
474	stn	THREAD_REG, [%o3 + T_INTR]	! push old thread
475	stn	%o3, [%o2 + CPU_THREAD]		! set new thread
476	mov	%o3, THREAD_REG			! set global curthread register
477	ldn	[%o3 + T_STACK], %o4		! interrupt stack pointer
478	sub	%o4, STACK_BIAS, %sp
479	!
480	! Initialize thread priority level from intr_pri
481	!
482	sethi	%hi(intr_pri), %o4
483	ldsh	[%o4 + %lo(intr_pri)], %o4	! grab base interrupt priority
484	add	%l1, %o4, %o4		! convert level to dispatch priority
485	sth	%o4, [THREAD_REG + T_PRI]
486	stub	%l1, [THREAD_REG + T_PIL]	! save pil for intr_passivate
487
488	! Store starting timestamp in thread structure.
489	add	THREAD_REG, T_INTR_START, %o3
4901:
491	ldx	[%o3], %o5
492	RD_CLOCK_TICK(%o4,%l2,%l3,__LINE__)
493	casx	[%o3], %o5, %o4
494	cmp	%o4, %o5
495	! If a high-level interrupt occurred while we were attempting to store
496	! the timestamp, try again.
497	bne,pn	%xcc, 1b
498	nop
499
500	wrpr	%g0, %l1, %pil			! lower %pil to new level
501	!
502	! Fast event tracing.
503	!
504	ld	[%o2 + CPU_FTRACE_STATE], %o4	! %o2 = curthread->t_cpu
505	btst	FTRACE_ENABLED, %o4
506	be,pt	%icc, 1f			! skip if ftrace disabled
507	  mov	%l1, %o5
508	!
509	! Tracing is enabled - write the trace entry.
510	!
511	save	%sp, -SA(MINFRAME), %sp
512	set	ftrace_intr_thread_format_str, %o0
513	mov	%i0, %o1
514	mov	%i1, %o2
515	mov	%i5, %o3
516	call	ftrace_3
517	ldn	[%i0 + PC_OFF], %o4
518	restore
5191:
520	!
521	! call the handler
522	!
523	SERVE_INTR_PRE(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
524	!
525	! %o0 and %o1 are now available as scratch registers.
526	!
5270:
528	SERVE_INTR(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
529	!
530	! If %o3 is set, we must call serve_intr_next, and both %l1 and %o3
531	! must be preserved. %l1 holds our pil, %l3 holds our inum.
532	!
533	! Note: %l1 is the pil level we're processing, but we may have a
534	! higher effective pil because a higher-level interrupt may have
535	! blocked.
536	!
537	wrpr	%g0, DISP_LEVEL, %pil
538	!
539	! Take timestamp, compute interval, update cumulative counter.
540	!
541	add	THREAD_REG, T_INTR_START, %o5
5421:
543	ldx	[%o5], %o0
544#ifdef DEBUG
545	brnz	%o0, 9f
546	nop
547	! Do not call panic if a panic is already in progress.
548	sethi	%hi(panic_quiesce), %o1
549	ld	[%o1 + %lo(panic_quiesce)], %o1
550	brnz,pn	%o1, 9f
551	nop
552	sethi	%hi(intr_thread_t_intr_start_zero), %o0
553	call	panic
554	or	%o0, %lo(intr_thread_t_intr_start_zero), %o0
5559:
556#endif /* DEBUG */
557	RD_CLOCK_TICK(%o1,%l2,%l3,__LINE__)
558	sub	%o1, %o0, %l2			! l2 has interval
559	!
560	! The general outline of what the code here does is:
561	! 1. load t_intr_start, %tick, and calculate the delta
562	! 2. replace t_intr_start with %tick (if %o3 is set) or 0.
563	!
564	! The problem is that a high-level interrupt could arrive at any time.
565	! It will account for (%tick - t_intr_start) for us when it starts,
566	! unless we have set t_intr_start to zero, and then set t_intr_start
567	! to a new %tick when it finishes. To account for this, our first step
568	! is to load t_intr_start and the last is to use casx to store the new
569	! t_intr_start. This guarantees atomicity in reading t_intr_start,
570	! reading %tick, and updating t_intr_start.
571	!
572	movrz	%o3, %g0, %o1
573	casx	[%o5], %o0, %o1
574	cmp	%o0, %o1
575	bne,pn	%xcc, 1b
576	!
577	! Check for Energy Star mode
578	!
579	lduh	[%o2 + CPU_DIVISOR], %o0	! delay -- %o0 = clock divisor
580	cmp	%o0, 1
581	bg,a,pn	%xcc, 2f
582	mulx	%l2, %o0, %l2	! multiply interval by clock divisor iff > 1
5832:
584	!
585	! Update cpu_intrstat. If o3 is set then we will be processing another
586	! interrupt. Above we have set t_intr_start to %tick, not 0. This
587	! means a high-level interrupt can arrive and update the same stats
588	! we're updating. Need to use casx.
589	!
590	sllx	%l1, 4, %o1			! delay - PIL as byte offset
591	add	%o1, CPU_MCPU, %o1		! CPU_INTRSTAT const too big
592	add	%o1, MCPU_INTRSTAT, %o1		! add parts separately
593	add	%o1, %o2, %o1
5941:
595	ldx	[%o1], %o5			! old counter in o5
596	add	%o5, %l2, %o0			! new counter in o0
597 	stx	%o0, [%o1 + 8]			! store into intrstat[pil][1]
598	casx	[%o1], %o5, %o0			! and into intrstat[pil][0]
599	cmp	%o5, %o0
600	bne,pn	%xcc, 1b
601	nop
602
603	! Also update intracct[]
604	lduh	[%o2 + CPU_MSTATE], %o1
605	sllx	%o1, 3, %o1
606	add	%o1, CPU_INTRACCT, %o1
607	add	%o1, %o2, %o1
6081:
609	ldx	[%o1], %o5
610	add	%o5, %l2, %o0
611	casx	[%o1], %o5, %o0
612	cmp	%o5, %o0
613	bne,pn	%xcc, 1b
614	nop
615
616	!
617	! Don't keep a pinned process pinned indefinitely. Bump cpu_intrcnt
618	! for each interrupt handler we invoke. If we hit INTRCNT_LIMIT, then
619	! we've crossed the threshold and we should unpin the pinned threads
620	! by preempt()ing ourselves, which will bubble up the t_intr chain
621	! until hitting the non-interrupt thread, which will then in turn
622	! preempt itself allowing the interrupt processing to resume. Finally,
623	! the scheduler takes over and picks the next thread to run.
624	!
625	! If our CPU is quiesced, we cannot preempt because the idle thread
626	! won't ever re-enter the scheduler, and the interrupt will be forever
627	! blocked.
628	!
629	! If t_intr is NULL, we're not pinning anyone, so we use a simpler
630	! algorithm. Just check for cpu_kprunrun, and if set then preempt.
631	! This insures we enter the scheduler if a higher-priority thread
632	! has become runnable.
633	!
634	lduh	[%o2 + CPU_FLAGS], %o5		! don't preempt if quiesced
635	andcc	%o5, CPU_QUIESCED, %g0
636	bnz,pn	%xcc, 1f
637
638	ldn     [THREAD_REG + T_INTR], %o5      ! pinning anything?
639	brz,pn  %o5, 3f				! if not, don't inc intrcnt
640
641	ldub	[%o2 + CPU_INTRCNT], %o5	! delay - %o5 = cpu_intrcnt
642	inc	%o5
643	cmp	%o5, INTRCNT_LIMIT		! have we hit the limit?
644	bl,a,pt	%xcc, 1f			! no preempt if < INTRCNT_LIMIT
645	stub	%o5, [%o2 + CPU_INTRCNT]	! delay annul - inc CPU_INTRCNT
646	bg,pn	%xcc, 2f			! don't inc stats again
647	!
648	! We've reached the limit. Set cpu_intrcnt and cpu_kprunrun, and do
649	! CPU_STATS_ADDQ(cp, sys, intrunpin, 1). Then call preempt.
650	!
651	mov	1, %o4				! delay
652	stub	%o4, [%o2 + CPU_KPRUNRUN]
653	ldx	[%o2 + CPU_STATS_SYS_INTRUNPIN], %o4
654	inc	%o4
655	stx	%o4, [%o2 + CPU_STATS_SYS_INTRUNPIN]
656	ba	2f
657	stub	%o5, [%o2 + CPU_INTRCNT]	! delay
6583:
659	! Code for t_intr == NULL
660	ldub	[%o2 + CPU_KPRUNRUN], %o5
661	brz,pt	%o5, 1f				! don't preempt unless kprunrun
6622:
663	! Time to call preempt
664	mov	%o2, %l3			! delay - save %o2
665	call	preempt
666	mov	%o3, %l2			! delay - save %o3.
667	mov	%l3, %o2			! restore %o2
668	mov	%l2, %o3			! restore %o3
669	wrpr	%g0, DISP_LEVEL, %pil		! up from cpu_base_spl
6701:
671	!
672	! Do we need to call serve_intr_next and do this again?
673	!
674	brz,a,pt %o3, 0f
675	ld	[%o2 + CPU_INTR_ACTV], %o5	! delay annulled
676	!
677	! Restore %pil before calling serve_intr() again. We must check
678	! CPU_BASE_SPL and set %pil to max(our-pil, CPU_BASE_SPL)
679	!
680	ld	[%o2 + CPU_BASE_SPL], %o4
681	cmp	%o4, %l1
682	movl	%xcc, %l1, %o4
683	wrpr	%g0, %o4, %pil
684	SERVE_INTR_NEXT(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
685	ba	0b				! compute new stats
686	nop
6870:
688	!
689	! Clear bit for this level in CPU's interrupt active bitmask.
690	!
691	mov	1, %o4
692	sll	%o4, %l1, %o4
693#ifdef DEBUG
694	!
695	! ASSERT(CPU->cpu_intr_actv & (1 << PIL))
696	!
697	andcc	%o4, %o5, %g0
698	bnz,pt	%xcc, 0f
699	nop
700	! Do not call panic if a panic is already in progress.
701	sethi	%hi(panic_quiesce), %l2
702	ld	[%l2 + %lo(panic_quiesce)], %l2
703	brnz,pn	%l2, 0f
704	nop
705	sethi	%hi(intr_thread_actv_bit_not_set), %o0
706	call	panic
707	or	%o0, %lo(intr_thread_actv_bit_not_set), %o0
7080:
709#endif /* DEBUG */
710	andn	%o5, %o4, %o5
711	st	%o5, [%o2 + CPU_INTR_ACTV]
712	!
713	! If there is still an interrupted thread underneath this one,
714	! then the interrupt was never blocked and the return is fairly
715	! simple.  Otherwise jump to intr_thread_exit.
716	!
717	ldn	[THREAD_REG + T_INTR], %o4	! pinned thread
718	brz,pn	%o4, intr_thread_exit		! branch if none
719	nop
720	!
721	! link the thread back onto the interrupt thread pool
722	!
723	ldn	[%o2 + CPU_INTR_THREAD], %o3
724	stn	%o3, [THREAD_REG + T_LINK]
725	stn	THREAD_REG, [%o2 + CPU_INTR_THREAD]
726	!
727	! set the thread state to free so kernel debuggers don't see it
728	!
729	mov	TS_FREE, %o5
730	st	%o5, [THREAD_REG + T_STATE]
731	!
732	! Switch back to the interrupted thread and return
733	!
734	stn	%o4, [%o2 + CPU_THREAD]
735	membar	#StoreLoad			! sync with mutex_exit()
736	mov	%o4, THREAD_REG
737
738	! If we pinned an interrupt thread, store its starting timestamp.
739	lduh	[THREAD_REG + T_FLAGS], %o5
740	andcc	%o5, T_INTR_THREAD, %g0
741	bz,pt	%xcc, 1f
742	ldn	[THREAD_REG + T_SP], %sp	! delay - restore %sp
743
744	add	THREAD_REG, T_INTR_START, %o3	! o3 has &curthread->t_intr_star
7450:
746	ldx	[%o3], %o4			! o4 = t_intr_start before
747	RD_CLOCK_TICK(%o5,%l2,%l3,__LINE__)
748	casx	[%o3], %o4, %o5			! put o5 in ts if o4 == ts after
749	cmp	%o4, %o5
750	! If a high-level interrupt occurred while we were attempting to store
751	! the timestamp, try again.
752	bne,pn	%xcc, 0b
753	ldn	[THREAD_REG + T_SP], %sp	! delay - restore %sp
7541:
755	! If the thread being restarted isn't pinning anyone, and no interrupts
756	! are pending, zero out cpu_intrcnt
757	ldn	[THREAD_REG + T_INTR], %o4
758	brnz,pn	%o4, 2f
759	rd	SOFTINT, %o4			! delay
760	set	SOFTINT_MASK, %o5
761	andcc	%o4, %o5, %g0
762	bz,a,pt	%xcc, 2f
763	stub	%g0, [%o2 + CPU_INTRCNT]	! delay annul
7642:
765	jmp	%l0 + 8
766	nop
767	SET_SIZE(intr_thread)
768	/* Not Reached */
769
770	!
771	! An interrupt returned on what was once (and still might be)
772	! an interrupt thread stack, but the interrupted process is no longer
773	! there.  This means the interrupt must have blocked.
774	!
775	! There is no longer a thread under this one, so put this thread back
776	! on the CPU's free list and resume the idle thread which will dispatch
777	! the next thread to run.
778	!
779	! All traps below DISP_LEVEL are disabled here, but the mondo interrupt
780	! is enabled.
781	!
782	ENTRY_NP(intr_thread_exit)
783#ifdef TRAPTRACE
784	rdpr	%pstate, %l2
785	andn	%l2, PSTATE_IE | PSTATE_AM, %o4
786	wrpr	%g0, %o4, %pstate			! cpu to known state
787	TRACE_PTR(%o4, %o5)
788	GET_TRACE_TICK(%o5, %o0)
789	stxa	%o5, [%o4 + TRAP_ENT_TICK]%asi
790	TRACE_SAVE_TL_GL_REGS(%o4, %o5)
791	set	TT_INTR_EXIT, %o5
792	stha	%o5, [%o4 + TRAP_ENT_TT]%asi
793	stna	%g0, [%o4 + TRAP_ENT_TPC]%asi
794	stxa	%g0, [%o4 + TRAP_ENT_TSTATE]%asi
795	stna	%sp, [%o4 + TRAP_ENT_SP]%asi
796	stna	THREAD_REG, [%o4 + TRAP_ENT_TR]%asi
797	ld	[%o2 + CPU_BASE_SPL], %o5
798	stna	%o5, [%o4 + TRAP_ENT_F1]%asi
799	stna	%g0, [%o4 + TRAP_ENT_F2]%asi
800	stna	%g0, [%o4 + TRAP_ENT_F3]%asi
801	stna	%g0, [%o4 + TRAP_ENT_F4]%asi
802	TRACE_NEXT(%o4, %o5, %o0)
803	wrpr	%g0, %l2, %pstate
804#endif /* TRAPTRACE */
805	! cpu_stats.sys.intrblk++
806        ldx	[%o2 + CPU_STATS_SYS_INTRBLK], %o4
807        inc     %o4
808        stx	%o4, [%o2 + CPU_STATS_SYS_INTRBLK]
809	!
810	! Put thread back on the interrupt thread list.
811	!
812
813	!
814	! Set the CPU's base SPL level.
815	!
816#ifdef DEBUG
817	!
818	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
819	!
820	ld	[%o2 + CPU_INTR_ACTV], %o5
821	mov	1, %o4
822	sll	%o4, %l1, %o4
823	and	%o5, %o4, %o4
824	brz,pt	%o4, 0f
825	nop
826	! Do not call panic if a panic is already in progress.
827	sethi	%hi(panic_quiesce), %l2
828	ld	[%l2 + %lo(panic_quiesce)], %l2
829	brnz,pn	%l2, 0f
830	nop
831	sethi	%hi(intr_thread_exit_actv_bit_set), %o0
832	call	panic
833	or	%o0, %lo(intr_thread_exit_actv_bit_set), %o0
8340:
835#endif /* DEBUG */
836	call	_intr_set_spl			! set CPU's base SPL level
837	ld	[%o2 + CPU_INTR_ACTV], %o5	! delay - load active mask
838	!
839	! set the thread state to free so kernel debuggers don't see it
840	!
841	mov	TS_FREE, %o4
842	st	%o4, [THREAD_REG + T_STATE]
843	!
844	! Put thread on either the interrupt pool or the free pool and
845	! call swtch() to resume another thread.
846	!
847	ldn	[%o2 + CPU_INTR_THREAD], %o5	! get list pointer
848	stn	%o5, [THREAD_REG + T_LINK]
849	call	swtch				! switch to best thread
850	stn	THREAD_REG, [%o2 + CPU_INTR_THREAD] ! delay - put thread on list
851	ba,a,pt	%xcc, .				! swtch() shouldn't return
852	SET_SIZE(intr_thread_exit)
853
854	.global ftrace_intr_thread_format_str
855ftrace_intr_thread_format_str:
856	.asciz	"intr_thread(): regs=0x%lx, int=0x%lx, pil=0x%lx"
857#ifdef DEBUG
858intr_thread_actv_bit_set:
859	.asciz	"intr_thread():	cpu_intr_actv bit already set for PIL"
860intr_thread_actv_bit_not_set:
861	.asciz	"intr_thread():	cpu_intr_actv bit not set for PIL"
862intr_thread_exit_actv_bit_set:
863	.asciz	"intr_thread_exit(): cpu_intr_actv bit erroneously set for PIL"
864intr_thread_t_intr_start_zero:
865	.asciz	"intr_thread():	t_intr_start zero upon handler return"
866#endif /* DEBUG */
867
868/*
869 * Handle an interrupt in the current thread
870 *	Entry:
871 *		%o0       = pointer to regs structure
872 *		%o1       = pointer to current intr_vec_t (iv) to be processed
873 *		%o2       = pil
874 *		%sp       = on current thread's kernel stack
875 *		%o7       = return linkage to trap code
876 *		%g7       = current thread
877 *		%pstate   = normal globals, interrupts enabled,
878 *		            privileged, fp disabled
879 *		%pil      = PIL_MAX
880 *
881 *	Register Usage
882 *		%l0       = return linkage
883 *		%l1       = old stack
884 *		%l2 - %l3 = scratch
885 *		%l4 - %l7 = reserved for sys_trap
886 *		%o3       = cpu
887 *		%o0       = scratch
888 *		%o4 - %o5 = scratch
889 */
890	ENTRY_NP(current_thread)
891
892	mov	%o7, %l0
893	ldn	[THREAD_REG + T_CPU], %o3
894
895	ldn	[THREAD_REG + T_ONFAULT], %l2
896	brz,pt	%l2, no_onfault		! branch if no onfault label set
897	nop
898	stn	%g0, [THREAD_REG + T_ONFAULT]! clear onfault label
899	ldn	[THREAD_REG + T_LOFAULT], %l3
900	stn	%g0, [THREAD_REG + T_LOFAULT]! clear lofault data
901
902	sub	%o2, LOCK_LEVEL + 1, %o5
903	sll	%o5, CPTRSHIFT, %o5
904	add	%o5, CPU_OFD, %o4	! %o4 has on_fault data offset
905	stn	%l2, [%o3 + %o4]	! save onfault label for pil %o2
906	add	%o5, CPU_LFD, %o4	! %o4 has lofault data offset
907	stn	%l3, [%o3 + %o4]	! save lofault data for pil %o2
908
909no_onfault:
910	ldn	[THREAD_REG + T_ONTRAP], %l2
911	brz,pt	%l2, 6f			! branch if no on_trap protection
912	nop
913	stn	%g0, [THREAD_REG + T_ONTRAP]! clear on_trap protection
914	sub	%o2, LOCK_LEVEL + 1, %o5
915	sll	%o5, CPTRSHIFT, %o5
916	add	%o5, CPU_OTD, %o4	! %o4 has on_trap data offset
917	stn	%l2, [%o3 + %o4]	! save on_trap label for pil %o2
918
919	!
920	! Set bit for this level in CPU's active interrupt bitmask.
921	!
9226:	ld	[%o3 + CPU_INTR_ACTV], %o5	! o5 has cpu_intr_actv b4 chng
923	mov	1, %o4
924	sll	%o4, %o2, %o4			! construct mask for level
925#ifdef DEBUG
926	!
927	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
928	!
929	andcc	%o5, %o4, %g0
930	bz,pt	%xcc, 0f
931	nop
932	! Do not call panic if a panic is already in progress.
933	sethi	%hi(panic_quiesce), %l2
934	ld	[%l2 + %lo(panic_quiesce)], %l2
935	brnz,pn	%l2, 0f
936	nop
937	sethi	%hi(current_thread_actv_bit_set), %o0
938	call	panic
939	or	%o0, %lo(current_thread_actv_bit_set), %o0
9400:
941#endif /* DEBUG */
942	or	%o5, %o4, %o4
943	!
944	! See if we are interrupting another high-level interrupt.
945	!
946	srl	%o5, LOCK_LEVEL + 1, %o5	! only look at high-level bits
947	brz,pt	%o5, 1f
948	st	%o4, [%o3 + CPU_INTR_ACTV]	! delay - store active mask
949	!
950	! We have interrupted another high-level interrupt. Find its PIL,
951	! compute the interval it ran for, and update its cumulative counter.
952	!
953	! Register usage:
954
955	! o2 = PIL of this interrupt
956	! o5 = high PIL bits of INTR_ACTV (not including this PIL)
957	! l1 = bitmask used to find other active high-level PIL
958	! o4 = index of bit set in l1
959	! Use cpu_intr_actv to find the cpu_pil_high_start[] offset for the
960	! interrupted high-level interrupt.
961	! Create mask for cpu_intr_actv. Begin by looking for bits set
962	! at one level below the current PIL. Since %o5 contains the active
963	! mask already shifted right by (LOCK_LEVEL + 1), we start by looking
964	! at bit (current_pil - (LOCK_LEVEL + 2)).
965	sub	%o2, LOCK_LEVEL + 2, %o4
966	mov	1, %l1
967	sll	%l1, %o4, %l1
9682:
969#ifdef DEBUG
970	! ASSERT(%l1 != 0) (we didn't shift the bit off the right edge)
971	brnz,pt	%l1, 9f
972	nop
973
974	! Don't panic if a panic is already in progress.
975	sethi	%hi(panic_quiesce), %l3
976	ld	[%l3 + %lo(panic_quiesce)], %l3
977	brnz,pn	%l3, 9f
978	nop
979	sethi	%hi(current_thread_nested_PIL_not_found), %o0
980	call	panic
981	or	%o0, %lo(current_thread_nested_PIL_not_found), %o0
9829:
983#endif /* DEBUG */
984	andcc	%l1, %o5, %g0		! test mask against high-level bits of
985	bnz	%xcc, 3f		! cpu_intr_actv
986	nop
987	srl	%l1, 1, %l1		! No match. Try next lower PIL.
988	ba,pt	%xcc, 2b
989	sub	%o4, 1, %o4		! delay - decrement PIL
9903:
991	sll	%o4, 3, %o4			! index to byte offset
992	add	%o4, CPU_MCPU, %l1	! CPU_PIL_HIGH_START is too large
993	add	%l1, MCPU_PIL_HIGH_START, %l1
994	ldx	[%o3 + %l1], %l3		! load starting timestamp
995#ifdef DEBUG
996	brnz,pt	%l3, 9f
997	nop
998	! Don't panic if a panic is already in progress.
999	sethi	%hi(panic_quiesce), %l1
1000	ld	[%l1 + %lo(panic_quiesce)], %l1
1001	brnz,pn	%l1, 9f
1002	nop
1003	srl	%o4, 3, %o1			! Find interrupted PIL for panic
1004	add	%o1, LOCK_LEVEL + 1, %o1
1005	sethi	%hi(current_thread_nested_pil_zero), %o0
1006	call	panic
1007	or	%o0, %lo(current_thread_nested_pil_zero), %o0
10089:
1009#endif /* DEBUG */
1010	RD_CLOCK_TICK_NO_SUSPEND_CHECK(%l1, %l2)
1011	sub	%l1, %l3, %l3			! interval in %l3
1012	!
1013	! Check for Energy Star mode
1014	!
1015	lduh	[%o3 + CPU_DIVISOR], %l1	! %l1 = clock divisor
1016	cmp	%l1, 1
1017	bg,a,pn	%xcc, 2f
1018	mulx	%l3, %l1, %l3	! multiply interval by clock divisor iff > 1
10192:
1020	!
1021	! We need to find the CPU offset of the cumulative counter. We start
1022	! with %o4, which has (PIL - (LOCK_LEVEL + 1)) * 8. We need PIL * 16,
1023	! so we shift left 1, then add (LOCK_LEVEL + 1) * 16, which is
1024	! CPU_INTRSTAT_LOW_PIL_OFFSET.
1025	!
1026	sll	%o4, 1, %o4
1027	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT const too large
1028	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1029	add	%o4, CPU_INTRSTAT_LOW_PIL_OFFSET, %o4
1030	ldx	[%o3 + %o4], %l1		! old counter in l1
1031	add	%l1, %l3, %l1			! new counter in l1
1032	stx	%l1, [%o3 + %o4]		! store new counter
1033
1034	! Also update intracct[]
1035	lduh	[%o3 + CPU_MSTATE], %o4
1036	sllx	%o4, 3, %o4
1037	add	%o4, CPU_INTRACCT, %o4
1038	ldx	[%o3 + %o4], %l1
1039	add	%l1, %l3, %l1
1040	! Another high-level interrupt is active below this one, so
1041	! there is no need to check for an interrupt thread. That will be
1042	! done by the lowest priority high-level interrupt active.
1043	ba,pt	%xcc, 5f
1044	stx	%l1, [%o3 + %o4]		! delay - store new counter
10451:
1046	! If we haven't interrupted another high-level interrupt, we may be
1047	! interrupting a low level interrupt thread. If so, compute its interval
1048	! and update its cumulative counter.
1049	lduh	[THREAD_REG + T_FLAGS], %o4
1050	andcc	%o4, T_INTR_THREAD, %g0
1051	bz,pt	%xcc, 4f
1052	nop
1053
1054	! We have interrupted an interrupt thread. Take timestamp, compute
1055	! interval, update cumulative counter.
1056
1057	! Check t_intr_start. If it is zero, either intr_thread() or
1058	! current_thread() (at a lower PIL, of course) already did
1059	! the accounting for the underlying interrupt thread.
1060	ldx	[THREAD_REG + T_INTR_START], %o5
1061	brz,pn	%o5, 4f
1062	nop
1063
1064	stx	%g0, [THREAD_REG + T_INTR_START]
1065	RD_CLOCK_TICK_NO_SUSPEND_CHECK(%o4, %l2)
1066	sub	%o4, %o5, %o5			! o5 has the interval
1067
1068	! Check for Energy Star mode
1069	lduh	[%o3 + CPU_DIVISOR], %o4	! %o4 = clock divisor
1070	cmp	%o4, 1
1071	bg,a,pn	%xcc, 2f
1072	mulx	%o5, %o4, %o5	! multiply interval by clock divisor iff > 1
10732:
1074	ldub	[THREAD_REG + T_PIL], %o4
1075	sllx	%o4, 4, %o4			! PIL index to byte offset
1076	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT const too large
1077	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1078	ldx	[%o3 + %o4], %l2		! old counter in l2
1079	add	%l2, %o5, %l2			! new counter in l2
1080	stx	%l2, [%o3 + %o4]		! store new counter
1081
1082	! Also update intracct[]
1083	lduh	[%o3 + CPU_MSTATE], %o4
1084	sllx	%o4, 3, %o4
1085	add	%o4, CPU_INTRACCT, %o4
1086	ldx	[%o3 + %o4], %l2
1087	add	%l2, %o5, %l2
1088	stx	%l2, [%o3 + %o4]
10894:
1090	!
1091	! Handle high-level interrupts on separate interrupt stack.
1092	! No other high-level interrupts are active, so switch to int stack.
1093	!
1094	mov	%sp, %l1
1095	ldn	[%o3 + CPU_INTR_STACK], %l3
1096	sub	%l3, STACK_BIAS, %sp
1097
10985:
1099#ifdef DEBUG
1100	!
1101	! ASSERT(%o2 > LOCK_LEVEL)
1102	!
1103	cmp	%o2, LOCK_LEVEL
1104	bg,pt	%xcc, 3f
1105	nop
1106	mov	CE_PANIC, %o0
1107	sethi	%hi(current_thread_wrong_pil), %o1
1108	call	cmn_err				! %o2 has the %pil already
1109	or	%o1, %lo(current_thread_wrong_pil), %o1
1110#endif
11113:
1112	! Store starting timestamp for this PIL in CPU structure at
1113	! cpu.cpu_m.pil_high_start[PIL - (LOCK_LEVEL + 1)]
1114        sub     %o2, LOCK_LEVEL + 1, %o4	! convert PIL to array index
1115	sllx    %o4, 3, %o4			! index to byte offset
1116	add	%o4, CPU_MCPU, %o4	! CPU_PIL_HIGH_START is too large
1117	add	%o4, MCPU_PIL_HIGH_START, %o4
1118	RD_CLOCK_TICK_NO_SUSPEND_CHECK(%o5, %l2)
1119        stx     %o5, [%o3 + %o4]
1120
1121	wrpr	%g0, %o2, %pil			! enable interrupts
1122
1123	!
1124	! call the handler
1125	!
1126	SERVE_INTR_PRE(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
11271:
1128	SERVE_INTR(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
1129
1130	brz,a,pt %o2, 0f			! if %o2, more intrs await
1131	rdpr	%pil, %o2			! delay annulled
1132	SERVE_INTR_NEXT(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
1133	ba	1b
1134	nop
11350:
1136	wrpr	%g0, PIL_MAX, %pil		! disable interrupts (1-15)
1137
1138	cmp	%o2, PIL_15
1139	bne,pt	%xcc, 3f
1140	nop
1141
1142	sethi	%hi(cpc_level15_inum), %o1
1143	ldx	[%o1 + %lo(cpc_level15_inum)], %o1 ! arg for intr_enqueue_req
1144	brz	%o1, 3f
1145	nop
1146
1147	rdpr 	%pstate, %g5
1148	andn	%g5, PSTATE_IE, %g1
1149	wrpr	%g0, %g1, %pstate		! Disable vec interrupts
1150
1151	call	intr_enqueue_req		! preserves %g5
1152	mov	PIL_15, %o0
1153
1154	! clear perfcntr overflow
1155	mov	1, %o0
1156	sllx	%o0, PIL_15, %o0
1157	wr	%o0, CLEAR_SOFTINT
1158
1159	wrpr	%g0, %g5, %pstate		! Enable vec interrupts
1160
11613:
1162	cmp	%o2, PIL_14
1163	be	tick_rtt			!  cpu-specific tick processing
1164	nop
1165	.global	current_thread_complete
1166current_thread_complete:
1167	!
1168	! Register usage:
1169	!
1170	! %l1 = stack pointer
1171	! %l2 = CPU_INTR_ACTV >> (LOCK_LEVEL + 1)
1172	! %o2 = PIL
1173	! %o3 = CPU pointer
1174	! %o4, %o5, %l3, %l4, %l5 = scratch
1175	!
1176	ldn	[THREAD_REG + T_CPU], %o3
1177	!
1178	! Clear bit for this level in CPU's interrupt active bitmask.
1179	!
1180	ld	[%o3 + CPU_INTR_ACTV], %l2
1181	mov	1, %o5
1182	sll	%o5, %o2, %o5
1183#ifdef DEBUG
1184	!
1185	! ASSERT(CPU->cpu_intr_actv & (1 << PIL))
1186	!
1187	andcc	%l2, %o5, %g0
1188	bnz,pt	%xcc, 0f
1189	nop
1190	! Do not call panic if a panic is already in progress.
1191	sethi	%hi(panic_quiesce), %l2
1192	ld	[%l2 + %lo(panic_quiesce)], %l2
1193	brnz,pn	%l2, 0f
1194	nop
1195	sethi	%hi(current_thread_actv_bit_not_set), %o0
1196	call	panic
1197	or	%o0, %lo(current_thread_actv_bit_not_set), %o0
11980:
1199#endif /* DEBUG */
1200	andn	%l2, %o5, %l2
1201	st	%l2, [%o3 + CPU_INTR_ACTV]
1202
1203	! Take timestamp, compute interval, update cumulative counter.
1204        sub     %o2, LOCK_LEVEL + 1, %o4	! PIL to array index
1205	sllx    %o4, 3, %o4			! index to byte offset
1206	add	%o4, CPU_MCPU, %o4	! CPU_PIL_HIGH_START is too large
1207	add	%o4, MCPU_PIL_HIGH_START, %o4
1208	RD_CLOCK_TICK_NO_SUSPEND_CHECK(%o5, %o0)
1209	ldx     [%o3 + %o4], %o0
1210#ifdef DEBUG
1211	! ASSERT(cpu.cpu_m.pil_high_start[pil - (LOCK_LEVEL + 1)] != 0)
1212	brnz,pt	%o0, 9f
1213	nop
1214	! Don't panic if a panic is already in progress.
1215	sethi	%hi(panic_quiesce), %l2
1216	ld	[%l2 + %lo(panic_quiesce)], %l2
1217	brnz,pn	%l2, 9f
1218	nop
1219	sethi	%hi(current_thread_timestamp_zero), %o0
1220	call	panic
1221	or	%o0, %lo(current_thread_timestamp_zero), %o0
12229:
1223#endif /* DEBUG */
1224	stx	%g0, [%o3 + %o4]
1225	sub	%o5, %o0, %o5			! interval in o5
1226
1227	! Check for Energy Star mode
1228	lduh	[%o3 + CPU_DIVISOR], %o4	! %o4 = clock divisor
1229	cmp	%o4, 1
1230	bg,a,pn	%xcc, 2f
1231	mulx	%o5, %o4, %o5	! multiply interval by clock divisor iff > 1
12322:
1233	sllx	%o2, 4, %o4			! PIL index to byte offset
1234	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT too large
1235	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1236	ldx	[%o3 + %o4], %o0		! old counter in o0
1237	add	%o0, %o5, %o0			! new counter in o0
1238	stx	%o0, [%o3 + %o4]		! store new counter
1239
1240	! Also update intracct[]
1241	lduh	[%o3 + CPU_MSTATE], %o4
1242	sllx	%o4, 3, %o4
1243	add	%o4, CPU_INTRACCT, %o4
1244	ldx	[%o3 + %o4], %o0
1245	add	%o0, %o5, %o0
1246	stx	%o0, [%o3 + %o4]
1247
1248	!
1249	! get back on current thread's stack
1250	!
1251	srl	%l2, LOCK_LEVEL + 1, %l2
1252	tst	%l2				! any more high-level ints?
1253	movz	%xcc, %l1, %sp
1254	!
1255	! Current register usage:
1256	! o2 = PIL
1257	! o3 = CPU pointer
1258	! l0 = return address
1259	! l2 = intr_actv shifted right
1260	!
1261	bz,pt	%xcc, 3f			! if l2 was zero, no more ints
1262	nop
1263	!
1264	! We found another high-level interrupt active below the one that just
1265	! returned. Store a starting timestamp for it in the CPU structure.
1266	!
1267	! Use cpu_intr_actv to find the cpu_pil_high_start[] offset for the
1268	! interrupted high-level interrupt.
1269	! Create mask for cpu_intr_actv. Begin by looking for bits set
1270	! at one level below the current PIL. Since %l2 contains the active
1271	! mask already shifted right by (LOCK_LEVEL + 1), we start by looking
1272	! at bit (current_pil - (LOCK_LEVEL + 2)).
1273	! %l1 = mask, %o5 = index of bit set in mask
1274	!
1275	mov	1, %l1
1276	sub	%o2, LOCK_LEVEL + 2, %o5
1277	sll	%l1, %o5, %l1			! l1 = mask for level
12781:
1279#ifdef DEBUG
1280	! ASSERT(%l1 != 0) (we didn't shift the bit off the right edge)
1281	brnz,pt	%l1, 9f
1282	nop
1283	sethi	%hi(current_thread_nested_PIL_not_found), %o0
1284	call	panic
1285	or	%o0, %lo(current_thread_nested_PIL_not_found), %o0
12869:
1287#endif /* DEBUG */
1288	andcc	%l1, %l2, %g0		! test mask against high-level bits of
1289	bnz	%xcc, 2f		! cpu_intr_actv
1290	nop
1291	srl	%l1, 1, %l1		! No match. Try next lower PIL.
1292	ba,pt	%xcc, 1b
1293	sub	%o5, 1, %o5		! delay - decrement PIL
12942:
1295	sll	%o5, 3, %o5		! convert array index to byte offset
1296	add	%o5, CPU_MCPU, %o5	! CPU_PIL_HIGH_START is too large
1297	add	%o5, MCPU_PIL_HIGH_START, %o5
1298	RD_CLOCK_TICK_NO_SUSPEND_CHECK(%o4, %l2)
1299	! Another high-level interrupt is active below this one, so
1300	! there is no need to check for an interrupt thread. That will be
1301	! done by the lowest priority high-level interrupt active.
1302	ba,pt	%xcc, 7f
1303	stx	%o4, [%o3 + %o5]	! delay - store timestamp
13043:
1305	! If we haven't interrupted another high-level interrupt, we may have
1306	! interrupted a low level interrupt thread. If so, store a starting
1307	! timestamp in its thread structure.
1308	lduh	[THREAD_REG + T_FLAGS], %o4
1309	andcc	%o4, T_INTR_THREAD, %g0
1310	bz,pt	%xcc, 7f
1311	nop
1312
1313	RD_CLOCK_TICK_NO_SUSPEND_CHECK(%o4, %l2)
1314	stx	%o4, [THREAD_REG + T_INTR_START]
1315
13167:
1317	sub	%o2, LOCK_LEVEL + 1, %o4
1318	sll	%o4, CPTRSHIFT, %o5
1319
1320	! Check on_trap saved area and restore as needed
1321	add	%o5, CPU_OTD, %o4
1322	ldn	[%o3 + %o4], %l2
1323	brz,pt %l2, no_ontrp_restore
1324	nop
1325	stn	%l2, [THREAD_REG + T_ONTRAP] ! restore
1326	stn	%g0, [%o3 + %o4]	! clear
1327
1328no_ontrp_restore:
1329	! Check on_fault saved area and restore as needed
1330	add	%o5, CPU_OFD, %o4
1331	ldn	[%o3 + %o4], %l2
1332	brz,pt %l2, 8f
1333	nop
1334	stn	%l2, [THREAD_REG + T_ONFAULT] ! restore
1335	stn	%g0, [%o3 + %o4]	! clear
1336	add	%o5, CPU_LFD, %o4
1337	ldn	[%o3 + %o4], %l2
1338	stn	%l2, [THREAD_REG + T_LOFAULT] ! restore
1339	stn	%g0, [%o3 + %o4]	! clear
1340
1341
13428:
1343	! Enable interrupts and return
1344	jmp	%l0 + 8
1345	wrpr	%g0, %o2, %pil			! enable interrupts
1346	SET_SIZE(current_thread)
1347
1348
1349#ifdef DEBUG
1350current_thread_wrong_pil:
1351	.asciz	"current_thread: unexpected pil level: %d"
1352current_thread_actv_bit_set:
1353	.asciz	"current_thread(): cpu_intr_actv bit already set for PIL"
1354current_thread_actv_bit_not_set:
1355	.asciz	"current_thread(): cpu_intr_actv bit not set for PIL"
1356current_thread_nested_pil_zero:
1357	.asciz	"current_thread(): timestamp zero for nested PIL %d"
1358current_thread_timestamp_zero:
1359	.asciz	"current_thread(): timestamp zero upon handler return"
1360current_thread_nested_PIL_not_found:
1361	.asciz	"current_thread: couldn't find nested high-level PIL"
1362#endif /* DEBUG */
1363
1364/*
1365 * Return a thread's interrupt level.
1366 * Since this isn't saved anywhere but in %l4 on interrupt entry, we
1367 * must dig it out of the save area.
1368 *
1369 * Caller 'swears' that this really is an interrupt thread.
1370 *
1371 * int
1372 * intr_level(t)
1373 *	kthread_id_t	t;
1374 */
1375
1376	ENTRY_NP(intr_level)
1377	retl
1378	ldub	[%o0 + T_PIL], %o0		! return saved pil
1379	SET_SIZE(intr_level)
1380
1381	ENTRY_NP(disable_pil_intr)
1382	rdpr	%pil, %o0
1383	retl
1384	wrpr	%g0, PIL_MAX, %pil		! disable interrupts (1-15)
1385	SET_SIZE(disable_pil_intr)
1386
1387	ENTRY_NP(enable_pil_intr)
1388	retl
1389	wrpr	%o0, %pil
1390	SET_SIZE(enable_pil_intr)
1391
1392	ENTRY_NP(disable_vec_intr)
1393	rdpr	%pstate, %o0
1394	andn	%o0, PSTATE_IE, %g1
1395	retl
1396	wrpr	%g0, %g1, %pstate		! disable interrupt
1397	SET_SIZE(disable_vec_intr)
1398
1399	ENTRY_NP(enable_vec_intr)
1400	retl
1401	wrpr	%g0, %o0, %pstate
1402	SET_SIZE(enable_vec_intr)
1403
1404	ENTRY_NP(cbe_level14)
1405	save    %sp, -SA(MINFRAME), %sp ! get a new window
1406	!
1407	! Make sure that this is from TICK_COMPARE; if not just return
1408	!
1409	rd	SOFTINT, %l1
1410	set	(TICK_INT_MASK | STICK_INT_MASK), %o2
1411	andcc	%l1, %o2, %g0
1412	bz,pn	%icc, 2f
1413	nop
1414
1415	CPU_ADDR(%o1, %o2)
1416	call	cyclic_fire
1417	mov	%o1, %o0
14182:
1419	ret
1420	restore	%g0, 1, %o0
1421	SET_SIZE(cbe_level14)
1422
1423
1424	ENTRY_NP(kdi_setsoftint)
1425	save	%sp, -SA(MINFRAME), %sp	! get a new window
1426	rdpr	%pstate, %l5
1427	andn	%l5, PSTATE_IE, %l1
1428	wrpr	%l1, %pstate		! disable interrupt
1429	!
1430	! We have a pointer to an interrupt vector data structure.
1431	! Put the request on the cpu's softint priority list and
1432	! set %set_softint.
1433	!
1434	! Register usage
1435	! 	%i0 - pointer to intr_vec_t (iv)
1436	!	%l2 - requested pil
1437	!	%l4 - cpu
1438	!	%l5 - pstate
1439	!	%l1, %l3, %l6 - temps
1440	!
1441	! check if a softint is pending for this softint,
1442	! if one is pending, don't bother queuing another.
1443	!
1444	lduh	[%i0 + IV_FLAGS], %l1	! %l1 = iv->iv_flags
1445	and	%l1, IV_SOFTINT_PEND, %l6 ! %l6 = iv->iv_flags & IV_SOFTINT_PEND
1446	brnz,pn	%l6, 4f			! branch if softint is already pending
1447	or	%l1, IV_SOFTINT_PEND, %l2
1448	sth	%l2, [%i0 + IV_FLAGS]	! Set IV_SOFTINT_PEND flag
1449
1450	CPU_ADDR(%l4, %l2)		! %l4 = cpu
1451	lduh	[%i0 + IV_PIL], %l2	! %l2 = iv->iv_pil
1452
1453	!
1454	! Insert intr_vec_t (iv) to appropriate cpu's softint priority list
1455	!
1456	sll	%l2, CPTRSHIFT, %l0	! %l0 = offset to pil entry
1457	add	%l4, INTR_TAIL, %l6	! %l6 = &cpu->m_cpu.intr_tail
1458	ldn	[%l6 + %l0], %l1	! %l1 = cpu->m_cpu.intr_tail[pil]
1459					!       current tail (ct)
1460	brz,pt	%l1, 2f			! branch if current tail is NULL
1461	stn	%i0, [%l6 + %l0]	! make intr_vec_t (iv) as new tail
1462	!
1463	! there's pending intr_vec_t already
1464	!
1465	lduh	[%l1 + IV_FLAGS], %l6	! %l6 = ct->iv_flags
1466	and	%l6, IV_SOFTINT_MT, %l6	! %l6 = ct->iv_flags & IV_SOFTINT_MT
1467	brz,pt	%l6, 1f			! check for Multi target softint flag
1468	add	%l1, IV_PIL_NEXT, %l3	! %l3 = &ct->iv_pil_next
1469	ld	[%l4 + CPU_ID], %l6	! for multi target softint, use cpuid
1470	sll	%l6, CPTRSHIFT, %l6	! calculate offset address from cpuid
1471	add	%l3, %l6, %l3		! %l3 =  &ct->iv_xpil_next[cpuid]
14721:
1473	!
1474	! update old tail
1475	!
1476	ba,pt	%xcc, 3f
1477	stn	%i0, [%l3]		! [%l3] = iv, set pil_next field
14782:
1479	!
1480	! no pending intr_vec_t; make intr_vec_t as new head
1481	!
1482	add	%l4, INTR_HEAD, %l6	! %l6 = &cpu->m_cpu.intr_head[pil]
1483	stn	%i0, [%l6 + %l0]	! cpu->m_cpu.intr_head[pil] = iv
14843:
1485	!
1486	! Write %set_softint with (1<<pil) to cause a "pil" level trap
1487	!
1488	mov	1, %l1			! %l1 = 1
1489	sll	%l1, %l2, %l1		! %l1 = 1 << pil
1490	wr	%l1, SET_SOFTINT	! trigger required pil softint
14914:
1492	wrpr	%g0, %l5, %pstate	! %pstate = saved %pstate (in %l5)
1493	ret
1494	restore
1495	SET_SIZE(kdi_setsoftint)
1496
1497	!
1498	! Register usage
1499	!	Arguments:
1500	! 	%g1 - Pointer to intr_vec_t (iv)
1501	!
1502	!	Internal:
1503	!	%g2 - pil
1504	!	%g4 - cpu
1505	!	%g3,%g5-g7 - temps
1506	!
1507	ENTRY_NP(setsoftint_tl1)
1508	!
1509	! We have a pointer to an interrupt vector data structure.
1510	! Put the request on the cpu's softint priority list and
1511	! set %set_softint.
1512	!
1513	CPU_ADDR(%g4, %g2)		! %g4 = cpu
1514	lduh	[%g1 + IV_PIL], %g2	! %g2 = iv->iv_pil
1515
1516	!
1517	! Insert intr_vec_t (iv) to appropriate cpu's softint priority list
1518	!
1519	sll	%g2, CPTRSHIFT, %g7	! %g7 = offset to pil entry
1520	add	%g4, INTR_TAIL, %g6	! %g6 = &cpu->m_cpu.intr_tail
1521	ldn	[%g6 + %g7], %g5	! %g5 = cpu->m_cpu.intr_tail[pil]
1522					!       current tail (ct)
1523	brz,pt	%g5, 1f			! branch if current tail is NULL
1524	stn	%g1, [%g6 + %g7]	! make intr_rec_t (iv) as new tail
1525	!
1526	! there's pending intr_vec_t already
1527	!
1528	lduh	[%g5 + IV_FLAGS], %g6	! %g6 = ct->iv_flags
1529	and	%g6, IV_SOFTINT_MT, %g6	! %g6 = ct->iv_flags & IV_SOFTINT_MT
1530	brz,pt	%g6, 0f			! check for Multi target softint flag
1531	add	%g5, IV_PIL_NEXT, %g3	! %g3 = &ct->iv_pil_next
1532	ld	[%g4 + CPU_ID], %g6	! for multi target softint, use cpuid
1533	sll	%g6, CPTRSHIFT, %g6	! calculate offset address from cpuid
1534	add	%g3, %g6, %g3		! %g3 = &ct->iv_xpil_next[cpuid]
15350:
1536	!
1537	! update old tail
1538	!
1539	ba,pt	%xcc, 2f
1540	stn	%g1, [%g3]		! [%g3] = iv, set pil_next field
15411:
1542	!
1543	! no pending intr_vec_t; make intr_vec_t as new head
1544	!
1545	add	%g4, INTR_HEAD, %g6	! %g6 = &cpu->m_cpu.intr_head[pil]
1546	stn	%g1, [%g6 + %g7]	! cpu->m_cpu.intr_head[pil] = iv
15472:
1548#ifdef TRAPTRACE
1549	TRACE_PTR(%g5, %g6)
1550	GET_TRACE_TICK(%g6, %g3)
1551	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi	! trap_tick = %tick
1552	TRACE_SAVE_TL_GL_REGS(%g5, %g6)
1553	rdpr	%tt, %g6
1554	stha	%g6, [%g5 + TRAP_ENT_TT]%asi	! trap_type = %tt
1555	rdpr	%tpc, %g6
1556	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi	! trap_pc = %tpc
1557	rdpr	%tstate, %g6
1558	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi ! trap_tstate = %tstate
1559	stna	%sp, [%g5 + TRAP_ENT_SP]%asi	! trap_sp = %sp
1560	stna	%g1, [%g5 + TRAP_ENT_TR]%asi	! trap_tr = iv
1561	ldn	[%g1 + IV_PIL_NEXT], %g6	!
1562	stna	%g6, [%g5 + TRAP_ENT_F1]%asi	! trap_f1 = iv->iv_pil_next
1563	add	%g4, INTR_HEAD, %g6
1564	ldn	[%g6 + %g7], %g6		! %g6=cpu->m_cpu.intr_head[pil]
1565	stna	%g6, [%g5 + TRAP_ENT_F2]%asi	! trap_f2 = intr_head[pil]
1566	add	%g4, INTR_TAIL, %g6
1567	ldn	[%g6 + %g7], %g6		! %g6=cpu->m_cpu.intr_tail[pil]
1568	stna	%g6, [%g5 + TRAP_ENT_F3]%asi	! trap_f3 = intr_tail[pil]
1569	stna	%g2, [%g5 + TRAP_ENT_F4]%asi	! trap_f4 = pil
1570	TRACE_NEXT(%g5, %g6, %g3)
1571#endif /* TRAPTRACE */
1572	!
1573	! Write %set_softint with (1<<pil) to cause a "pil" level trap
1574	!
1575	mov	1, %g5			! %g5 = 1
1576	sll	%g5, %g2, %g5		! %g5 = 1 << pil
1577	wr	%g5, SET_SOFTINT	! trigger required pil softint
1578	retry
1579	SET_SIZE(setsoftint_tl1)
1580
1581	!
1582	! Register usage
1583	!	Arguments:
1584	! 	%g1 - inumber
1585	!
1586	!	Internal:
1587	! 	%g1 - softint pil mask
1588	!	%g2 - pil of intr_vec_t
1589	!	%g3 - pointer to current intr_vec_t (iv)
1590	!	%g4 - cpu
1591	!	%g5, %g6,%g7 - temps
1592	!
1593	ENTRY_NP(setvecint_tl1)
1594	!
1595	! Verify the inumber received (should be inum < MAXIVNUM).
1596	!
1597	set	MAXIVNUM, %g2
1598	cmp	%g1, %g2
1599	bgeu,pn	%xcc, .no_ivintr
1600	clr	%g2			! expected in .no_ivintr
1601
1602	!
1603	! Fetch data from intr_vec_table according to the inum.
1604	!
1605	! We have an interrupt number. Fetch the interrupt vector requests
1606	! from the interrupt vector table for a given interrupt number and
1607	! insert them into cpu's softint priority lists and set %set_softint.
1608	!
1609	set	intr_vec_table, %g5	! %g5 = intr_vec_table
1610	sll	%g1, CPTRSHIFT, %g6	! %g6 = offset to inum entry in table
1611	add	%g5, %g6, %g5		! %g5 = &intr_vec_table[inum]
1612	ldn	[%g5], %g3		! %g3 = pointer to first entry of
1613					!       intr_vec_t list
1614
1615	! Verify the first intr_vec_t pointer for a given inum and it should
1616	! not be NULL. This used to be guarded by DEBUG but broken drivers can
1617	! cause spurious tick interrupts when the softint register is programmed
1618	! with 1 << 0 at the end of this routine. Now we always check for a
1619	! valid intr_vec_t pointer.
1620	brz,pn	%g3, .no_ivintr
1621	nop
1622
1623	!
1624	! Traverse the intr_vec_t link list, put each item on to corresponding
1625	! CPU softint priority queue, and compose the final softint pil mask.
1626	!
1627	! At this point:
1628	!	%g3 = intr_vec_table[inum]
1629	!
1630	CPU_ADDR(%g4, %g2)		! %g4 = cpu
1631	mov	%g0, %g1		! %g1 = 0, initialize pil mask to 0
16320:
1633	!
1634	! Insert next intr_vec_t (iv) to appropriate cpu's softint priority list
1635	!
1636	! At this point:
1637	!	%g1 = softint pil mask
1638	!	%g3 = pointer to next intr_vec_t (iv)
1639	!	%g4 = cpu
1640	!
1641	lduh	[%g3 + IV_PIL], %g2	! %g2 = iv->iv_pil
1642	sll	%g2, CPTRSHIFT, %g7	! %g7 = offset to pil entry
1643	add	%g4, INTR_TAIL, %g6	! %g6 = &cpu->m_cpu.intr_tail
1644	ldn	[%g6 + %g7], %g5	! %g5 = cpu->m_cpu.intr_tail[pil]
1645					! 	current tail (ct)
1646	brz,pt	%g5, 2f			! branch if current tail is NULL
1647	stn	%g3, [%g6 + %g7]	! make intr_vec_t (iv) as new tail
1648					! cpu->m_cpu.intr_tail[pil] = iv
1649	!
1650	! there's pending intr_vec_t already
1651	!
1652	lduh	[%g5 + IV_FLAGS], %g6	! %g6 = ct->iv_flags
1653	and	%g6, IV_SOFTINT_MT, %g6	! %g6 = ct->iv_flags & IV_SOFTINT_MT
1654	brz,pt	%g6, 1f			! check for Multi target softint flag
1655	add	%g5, IV_PIL_NEXT, %g5	! %g5 = &ct->iv_pil_next
1656	ld	[%g4 + CPU_ID], %g6	! for multi target softint, use cpuid
1657	sll	%g6, CPTRSHIFT, %g6	! calculate offset address from cpuid
1658	add	%g5, %g6, %g5		! %g5 = &ct->iv_xpil_next[cpuid]
16591:
1660	!
1661	! update old tail
1662	!
1663	ba,pt	%xcc, 3f
1664	stn	%g3, [%g5]		! [%g5] = iv, set pil_next field
16652:
1666	!
1667	! no pending intr_vec_t; make intr_vec_t as new head
1668	!
1669	add	%g4, INTR_HEAD, %g6	!  %g6 = &cpu->m_cpu.intr_head[pil]
1670	stn	%g3, [%g6 + %g7]	!  cpu->m_cpu.intr_head[pil] = iv
16713:
1672#ifdef TRAPTRACE
1673	TRACE_PTR(%g5, %g6)
1674	TRACE_SAVE_TL_GL_REGS(%g5, %g6)
1675	rdpr	%tt, %g6
1676	stha	%g6, [%g5 + TRAP_ENT_TT]%asi	! trap_type = %tt`
1677	rdpr	%tpc, %g6
1678	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi	! trap_pc = %tpc
1679	rdpr	%tstate, %g6
1680	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi ! trap_tstate = %tstate
1681	stna	%sp, [%g5 + TRAP_ENT_SP]%asi	! trap_sp = %sp
1682	stna	%g3, [%g5 + TRAP_ENT_TR]%asi	! trap_tr = iv
1683	stna	%g1, [%g5 + TRAP_ENT_F1]%asi	! trap_f1 = pil mask
1684	add	%g4, INTR_HEAD, %g6
1685	ldn	[%g6 + %g7], %g6		! %g6=cpu->m_cpu.intr_head[pil]
1686	stna	%g6, [%g5 + TRAP_ENT_F2]%asi	! trap_f2 = intr_head[pil]
1687	add	%g4, INTR_TAIL, %g6
1688	ldn	[%g6 + %g7], %g6		! %g6=cpu->m_cpu.intr_tail[pil]
1689	stna	%g6, [%g5 + TRAP_ENT_F3]%asi	! trap_f3 = intr_tail[pil]
1690	stna	%g2, [%g5 + TRAP_ENT_F4]%asi	! trap_f4 = pil
1691	GET_TRACE_TICK(%g6, %g7)
1692	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi	! trap_tick = %tick
1693	TRACE_NEXT(%g5, %g6, %g7)
1694#endif /* TRAPTRACE */
1695	mov	1, %g6			! %g6 = 1
1696	sll	%g6, %g2, %g6		! %g6 = 1 << pil
1697	or	%g1, %g6, %g1		! %g1 |= (1 << pil), pil mask
1698	ldn	[%g3 + IV_VEC_NEXT], %g3 ! %g3 = pointer to next intr_vec_t (iv)
1699	brnz,pn	%g3, 0b			! iv->iv_vec_next is non NULL, goto 0b
1700	nop
1701	wr	%g1, SET_SOFTINT	! triggered one or more pil softints
1702	retry
1703
1704.no_ivintr:
1705	! no_ivintr: arguments: rp, inum (%g1), pil (%g2 == 0)
1706	mov	%g2, %g3
1707	mov	%g1, %g2
1708	set	no_ivintr, %g1
1709	ba,pt	%xcc, sys_trap
1710	mov	PIL_15, %g4
1711	SET_SIZE(setvecint_tl1)
1712
1713	ENTRY_NP(wr_clr_softint)
1714	retl
1715	wr	%o0, CLEAR_SOFTINT
1716	SET_SIZE(wr_clr_softint)
1717
1718/*
1719 * intr_enqueue_req
1720 *
1721 * %o0 - pil
1722 * %o1 - pointer to intr_vec_t (iv)
1723 * %o5 - preserved
1724 * %g5 - preserved
1725 */
1726	ENTRY_NP(intr_enqueue_req)
1727	!
1728	CPU_ADDR(%g4, %g1)		! %g4 = cpu
1729
1730	!
1731	! Insert intr_vec_t (iv) to appropriate cpu's softint priority list
1732	!
1733	sll	%o0, CPTRSHIFT, %o0	! %o0 = offset to pil entry
1734	add	%g4, INTR_TAIL, %g6	! %g6 = &cpu->m_cpu.intr_tail
1735	ldn	[%o0 + %g6], %g1	! %g1 = cpu->m_cpu.intr_tail[pil]
1736					!       current tail (ct)
1737	brz,pt	%g1, 2f			! branch if current tail is NULL
1738	stn	%o1, [%g6 + %o0]	! make intr_vec_t (iv) as new tail
1739
1740	!
1741	! there's pending intr_vec_t already
1742	!
1743	lduh	[%g1 + IV_FLAGS], %g6	! %g6 = ct->iv_flags
1744	and	%g6, IV_SOFTINT_MT, %g6	! %g6 = ct->iv_flags & IV_SOFTINT_MT
1745	brz,pt	%g6, 1f			! check for Multi target softint flag
1746	add	%g1, IV_PIL_NEXT, %g3	! %g3 = &ct->iv_pil_next
1747	ld	[%g4 + CPU_ID], %g6	! for multi target softint, use cpuid
1748	sll	%g6, CPTRSHIFT, %g6	! calculate offset address from cpuid
1749	add	%g3, %g6, %g3		! %g3 = &ct->iv_xpil_next[cpuid]
17501:
1751	!
1752	! update old tail
1753	!
1754	ba,pt	%xcc, 3f
1755	stn	%o1, [%g3]		! {%g5] = iv, set pil_next field
17562:
1757	!
1758	! no intr_vec_t's queued so make intr_vec_t as new head
1759	!
1760	add	%g4, INTR_HEAD, %g6	! %g6 = &cpu->m_cpu.intr_head[pil]
1761	stn	%o1, [%g6 + %o0]	! cpu->m_cpu.intr_head[pil] = iv
17623:
1763	retl
1764	nop
1765	SET_SIZE(intr_enqueue_req)
1766
1767/*
1768 * Set CPU's base SPL level, based on which interrupt levels are active.
1769 * 	Called at spl7 or above.
1770 */
1771
1772	ENTRY_NP(set_base_spl)
1773	ldn	[THREAD_REG + T_CPU], %o2	! load CPU pointer
1774	ld	[%o2 + CPU_INTR_ACTV], %o5	! load active interrupts mask
1775
1776/*
1777 * WARNING: non-standard callinq sequence; do not call from C
1778 *	%o2 = pointer to CPU
1779 *	%o5 = updated CPU_INTR_ACTV
1780 */
1781_intr_set_spl:					! intr_thread_exit enters here
1782	!
1783	! Determine highest interrupt level active.  Several could be blocked
1784	! at higher levels than this one, so must convert flags to a PIL
1785	! Normally nothing will be blocked, so test this first.
1786	!
1787	brz,pt	%o5, 1f				! nothing active
1788	sra	%o5, 11, %o3			! delay - set %o3 to bits 15-11
1789	set	_intr_flag_table, %o1
1790	tst	%o3				! see if any of the bits set
1791	ldub	[%o1 + %o3], %o3		! load bit number
1792	bnz,a,pn %xcc, 1f			! yes, add 10 and we're done
1793	add	%o3, 11-1, %o3			! delay - add bit number - 1
1794
1795	sra	%o5, 6, %o3			! test bits 10-6
1796	tst	%o3
1797	ldub	[%o1 + %o3], %o3
1798	bnz,a,pn %xcc, 1f
1799	add	%o3, 6-1, %o3
1800
1801	sra	%o5, 1, %o3			! test bits 5-1
1802	ldub	[%o1 + %o3], %o3
1803
1804	!
1805	! highest interrupt level number active is in %l6
1806	!
18071:
1808	retl
1809	st	%o3, [%o2 + CPU_BASE_SPL]	! delay - store base priority
1810	SET_SIZE(set_base_spl)
1811
1812/*
1813 * Table that finds the most significant bit set in a five bit field.
1814 * Each entry is the high-order bit number + 1 of it's index in the table.
1815 * This read-only data is in the text segment.
1816 */
1817_intr_flag_table:
1818	.byte	0, 1, 2, 2,	3, 3, 3, 3,	4, 4, 4, 4,	4, 4, 4, 4
1819	.byte	5, 5, 5, 5,	5, 5, 5, 5,	5, 5, 5, 5,	5, 5, 5, 5
1820	.align	4
1821
1822/*
1823 * int
1824 * intr_passivate(from, to)
1825 *	kthread_id_t	from;		interrupt thread
1826 *	kthread_id_t	to;		interrupted thread
1827 */
1828
1829	ENTRY_NP(intr_passivate)
1830	save	%sp, -SA(MINFRAME), %sp	! get a new window
1831
1832	flushw				! force register windows to stack
1833	!
1834	! restore registers from the base of the stack of the interrupt thread.
1835	!
1836	ldn	[%i0 + T_STACK], %i2	! get stack save area pointer
1837	ldn	[%i2 + (0*GREGSIZE)], %l0	! load locals
1838	ldn	[%i2 + (1*GREGSIZE)], %l1
1839	ldn	[%i2 + (2*GREGSIZE)], %l2
1840	ldn	[%i2 + (3*GREGSIZE)], %l3
1841	ldn	[%i2 + (4*GREGSIZE)], %l4
1842	ldn	[%i2 + (5*GREGSIZE)], %l5
1843	ldn	[%i2 + (6*GREGSIZE)], %l6
1844	ldn	[%i2 + (7*GREGSIZE)], %l7
1845	ldn	[%i2 + (8*GREGSIZE)], %o0	! put ins from stack in outs
1846	ldn	[%i2 + (9*GREGSIZE)], %o1
1847	ldn	[%i2 + (10*GREGSIZE)], %o2
1848	ldn	[%i2 + (11*GREGSIZE)], %o3
1849	ldn	[%i2 + (12*GREGSIZE)], %o4
1850	ldn	[%i2 + (13*GREGSIZE)], %o5
1851	ldn	[%i2 + (14*GREGSIZE)], %i4
1852					! copy stack/pointer without using %sp
1853	ldn	[%i2 + (15*GREGSIZE)], %i5
1854	!
1855	! put registers into the save area at the top of the interrupted
1856	! thread's stack, pointed to by %l7 in the save area just loaded.
1857	!
1858	ldn	[%i1 + T_SP], %i3	! get stack save area pointer
1859	stn	%l0, [%i3 + STACK_BIAS + (0*GREGSIZE)]	! save locals
1860	stn	%l1, [%i3 + STACK_BIAS + (1*GREGSIZE)]
1861	stn	%l2, [%i3 + STACK_BIAS + (2*GREGSIZE)]
1862	stn	%l3, [%i3 + STACK_BIAS + (3*GREGSIZE)]
1863	stn	%l4, [%i3 + STACK_BIAS + (4*GREGSIZE)]
1864	stn	%l5, [%i3 + STACK_BIAS + (5*GREGSIZE)]
1865	stn	%l6, [%i3 + STACK_BIAS + (6*GREGSIZE)]
1866	stn	%l7, [%i3 + STACK_BIAS + (7*GREGSIZE)]
1867	stn	%o0, [%i3 + STACK_BIAS + (8*GREGSIZE)]	! save ins using outs
1868	stn	%o1, [%i3 + STACK_BIAS + (9*GREGSIZE)]
1869	stn	%o2, [%i3 + STACK_BIAS + (10*GREGSIZE)]
1870	stn	%o3, [%i3 + STACK_BIAS + (11*GREGSIZE)]
1871	stn	%o4, [%i3 + STACK_BIAS + (12*GREGSIZE)]
1872	stn	%o5, [%i3 + STACK_BIAS + (13*GREGSIZE)]
1873	stn	%i4, [%i3 + STACK_BIAS + (14*GREGSIZE)]
1874						! fp, %i7 copied using %i4
1875	stn	%i5, [%i3 + STACK_BIAS + (15*GREGSIZE)]
1876	stn	%g0, [%i2 + ((8+6)*GREGSIZE)]
1877						! clear fp in save area
1878
1879	! load saved pil for return
1880	ldub	[%i0 + T_PIL], %i0
1881	ret
1882	restore
1883	SET_SIZE(intr_passivate)
1884
1885/*
1886 * intr_get_time() is a resource for interrupt handlers to determine how
1887 * much time has been spent handling the current interrupt. Such a function
1888 * is needed because higher level interrupts can arrive during the
1889 * processing of an interrupt, thus making direct comparisons of %tick by
1890 * the handler inaccurate. intr_get_time() only returns time spent in the
1891 * current interrupt handler.
1892 *
1893 * The caller must be calling from an interrupt handler running at a pil
1894 * below or at lock level. Timings are not provided for high-level
1895 * interrupts.
1896 *
1897 * The first time intr_get_time() is called while handling an interrupt,
1898 * it returns the time since the interrupt handler was invoked. Subsequent
1899 * calls will return the time since the prior call to intr_get_time(). Time
1900 * is returned as ticks, adjusted for any clock divisor due to power
1901 * management. Use tick2ns() to convert ticks to nsec. Warning: ticks may
1902 * not be the same across CPUs.
1903 *
1904 * Theory Of Intrstat[][]:
1905 *
1906 * uint64_t intrstat[pil][0..1] is an array indexed by pil level, with two
1907 * uint64_ts per pil.
1908 *
1909 * intrstat[pil][0] is a cumulative count of the number of ticks spent
1910 * handling all interrupts at the specified pil on this CPU. It is
1911 * exported via kstats to the user.
1912 *
1913 * intrstat[pil][1] is always a count of ticks less than or equal to the
1914 * value in [0]. The difference between [1] and [0] is the value returned
1915 * by a call to intr_get_time(). At the start of interrupt processing,
1916 * [0] and [1] will be equal (or nearly so). As the interrupt consumes
1917 * time, [0] will increase, but [1] will remain the same. A call to
1918 * intr_get_time() will return the difference, then update [1] to be the
1919 * same as [0]. Future calls will return the time since the last call.
1920 * Finally, when the interrupt completes, [1] is updated to the same as [0].
1921 *
1922 * Implementation:
1923 *
1924 * intr_get_time() works much like a higher level interrupt arriving. It
1925 * "checkpoints" the timing information by incrementing intrstat[pil][0]
1926 * to include elapsed running time, and by setting t_intr_start to %tick.
1927 * It then sets the return value to intrstat[pil][0] - intrstat[pil][1],
1928 * and updates intrstat[pil][1] to be the same as the new value of
1929 * intrstat[pil][0].
1930 *
1931 * In the normal handling of interrupts, after an interrupt handler returns
1932 * and the code in intr_thread() updates intrstat[pil][0], it then sets
1933 * intrstat[pil][1] to the new value of intrstat[pil][0]. When [0] == [1],
1934 * the timings are reset, i.e. intr_get_time() will return [0] - [1] which
1935 * is 0.
1936 *
1937 * Whenever interrupts arrive on a CPU which is handling a lower pil
1938 * interrupt, they update the lower pil's [0] to show time spent in the
1939 * handler that they've interrupted. This results in a growing discrepancy
1940 * between [0] and [1], which is returned the next time intr_get_time() is
1941 * called. Time spent in the higher-pil interrupt will not be returned in
1942 * the next intr_get_time() call from the original interrupt, because
1943 * the higher-pil interrupt's time is accumulated in intrstat[higherpil][].
1944 */
1945	ENTRY_NP(intr_get_time)
1946#ifdef DEBUG
1947	!
1948	! Lots of asserts, but just check panic_quiesce first.
1949	! Don't bother with lots of tests if we're just ignoring them.
1950	!
1951	sethi	%hi(panic_quiesce), %o0
1952	ld	[%o0 + %lo(panic_quiesce)], %o0
1953	brnz,pn	%o0, 2f
1954	nop
1955	!
1956	! ASSERT(%pil <= LOCK_LEVEL)
1957	!
1958	rdpr	%pil, %o1
1959	cmp	%o1, LOCK_LEVEL
1960	ble,pt	%xcc, 0f
1961	sethi	%hi(intr_get_time_high_pil), %o0	! delay
1962	call	panic
1963	or	%o0, %lo(intr_get_time_high_pil), %o0
19640:
1965	!
1966	! ASSERT((t_flags & T_INTR_THREAD) != 0 && t_pil > 0)
1967	!
1968	lduh	[THREAD_REG + T_FLAGS], %o2
1969	andcc	%o2, T_INTR_THREAD, %g0
1970	bz,pn	%xcc, 1f
1971	ldub	[THREAD_REG + T_PIL], %o1		! delay
1972	brnz,pt	%o1, 0f
19731:
1974	sethi	%hi(intr_get_time_not_intr), %o0
1975	call	panic
1976	or	%o0, %lo(intr_get_time_not_intr), %o0
19770:
1978	!
1979	! ASSERT(t_intr_start != 0)
1980	!
1981	ldx	[THREAD_REG + T_INTR_START], %o1
1982	brnz,pt	%o1, 2f
1983	sethi	%hi(intr_get_time_no_start_time), %o0	! delay
1984	call	panic
1985	or	%o0, %lo(intr_get_time_no_start_time), %o0
19862:
1987#endif /* DEBUG */
1988	!
1989	! %o0 = elapsed time and return value
1990	! %o1 = pil
1991	! %o2 = scratch
1992	! %o3 = scratch
1993	! %o4 = scratch
1994	! %o5 = cpu
1995	!
1996	wrpr	%g0, PIL_MAX, %pil	! make this easy -- block normal intrs
1997	ldn	[THREAD_REG + T_CPU], %o5
1998	ldub	[THREAD_REG + T_PIL], %o1
1999	ldx	[THREAD_REG + T_INTR_START], %o3 ! %o3 = t_intr_start
2000	!
2001	! Calculate elapsed time since t_intr_start. Update t_intr_start,
2002	! get delta, and multiply by cpu_divisor if necessary.
2003	!
2004	RD_CLOCK_TICK_NO_SUSPEND_CHECK(%o2, %o0)
2005	stx	%o2, [THREAD_REG + T_INTR_START]
2006	sub	%o2, %o3, %o0
2007
2008	lduh	[%o5 + CPU_DIVISOR], %o4
2009	cmp	%o4, 1
2010	bg,a,pn	%xcc, 1f
2011	mulx	%o0, %o4, %o0	! multiply interval by clock divisor iff > 1
20121:
2013	! Update intracct[]
2014	lduh	[%o5 + CPU_MSTATE], %o4
2015	sllx	%o4, 3, %o4
2016	add	%o4, CPU_INTRACCT, %o4
2017	ldx	[%o5 + %o4], %o2
2018	add	%o2, %o0, %o2
2019	stx	%o2, [%o5 + %o4]
2020
2021	!
2022	! Increment cpu_m.intrstat[pil][0]. Calculate elapsed time since
2023	! cpu_m.intrstat[pil][1], which is either when the interrupt was
2024	! first entered, or the last time intr_get_time() was invoked. Then
2025	! update cpu_m.intrstat[pil][1] to match [0].
2026	!
2027	sllx	%o1, 4, %o3
2028	add	%o3, CPU_MCPU, %o3
2029	add	%o3, MCPU_INTRSTAT, %o3
2030	add	%o3, %o5, %o3		! %o3 = cpu_m.intrstat[pil][0]
2031	ldx	[%o3], %o2
2032	add	%o2, %o0, %o2		! %o2 = new value for intrstat
2033	stx	%o2, [%o3]
2034	ldx	[%o3 + 8], %o4		! %o4 = cpu_m.intrstat[pil][1]
2035	sub	%o2, %o4, %o0		! %o0 is elapsed time since %o4
2036	stx	%o2, [%o3 + 8]		! make [1] match [0], resetting time
2037
2038	ld	[%o5 + CPU_BASE_SPL], %o2	! restore %pil to the greater
2039	cmp	%o2, %o1			! of either our pil %o1 or
2040	movl	%xcc, %o1, %o2			! cpu_base_spl.
2041	retl
2042	wrpr	%g0, %o2, %pil
2043	SET_SIZE(intr_get_time)
2044
2045#ifdef DEBUG
2046intr_get_time_high_pil:
2047	.asciz	"intr_get_time(): %pil > LOCK_LEVEL"
2048intr_get_time_not_intr:
2049	.asciz	"intr_get_time(): not called from an interrupt thread"
2050intr_get_time_no_start_time:
2051	.asciz	"intr_get_time(): t_intr_start == 0"
2052#endif /* DEBUG */
2053