xref: /titanic_44/usr/src/uts/sun4/ml/interrupt.s (revision 8a40a695ee676a322b094e9afe5375567bfb51e3)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28#if defined(lint)
29#include <sys/types.h>
30#include <sys/thread.h>
31#else	/* lint */
32#include "assym.h"
33#endif	/* lint */
34
35#include <sys/cmn_err.h>
36#include <sys/ftrace.h>
37#include <sys/asm_linkage.h>
38#include <sys/machthread.h>
39#include <sys/machcpuvar.h>
40#include <sys/intreg.h>
41
42#ifdef TRAPTRACE
43#include <sys/traptrace.h>
44#endif /* TRAPTRACE */
45
46
47
48#if defined(lint)
49
50/* ARGSUSED */
51void
52pil_interrupt(int level)
53{}
54
55#else	/* lint */
56
57
58/*
59 * (TT 0x40..0x4F, TL>0) Interrupt Level N Handler (N == 1..15)
60 * 	Register passed from LEVEL_INTERRUPT(level)
61 *	%g4 - interrupt request level
62 */
63	ENTRY_NP(pil_interrupt)
64	!
65	! Register usage
66	!	%g1 - cpu
67	!	%g3 - intr_req
68	!	%g4 - pil
69	!	%g2, %g5, %g6 - temps
70	!
71	! grab the 1st intr_req off the list
72	! if the list is empty, clear %clear_softint
73	!
74	CPU_ADDR(%g1, %g5)
75	!
76	ALTENTRY(pil_interrupt_common)
77	sll	%g4, CPTRSHIFT, %g5
78	add	%g1, INTR_HEAD, %g6	! intr_head[0]
79	add	%g6, %g5, %g6		! intr_head[pil]
80	ldn	[%g6], %g3		! g3 = intr_req
81
82#ifndef DEBUG
83	brnz,pt	%g3, 5f
84	nop
85#else
86	!
87	! Verify the address of intr_req; it should be within the
88	! address range of intr_pool and intr_head
89	! or the address range of intr_add_head and intr_add_tail.
90	! The range of intr_add_head and intr_add_tail is subdivided
91	! by cpu, but the subdivision is not verified here.
92	!
93	! Registers passed to sys_trap()
94	!	%g1 - no_intr_req
95	!	%g2 - intr_req
96	!	%g3 - %pil
97	!	%g4 - current pil
98	!
99	add	%g1, INTR_POOL, %g2
100	cmp	%g3, %g2
101	blu,pn	%xcc, 8f
102	nop
103	add	%g1, INTR_HEAD, %g2
104	cmp	%g2, %g3
105	bgeu,pt	%xcc, 5f
106	nop
1078:
108	sethi	%hi(intr_add_head), %g2
109	ldn	[%g2 + %lo(intr_add_head)], %g2
110	brz,pn	%g2, 4f			! intr_add_head can be NULL
111	cmp	%g3, %g2
112	blu,pn	%xcc, 4f
113	nop
114	sethi	%hi(intr_add_tail), %g2
115	ldn	[%g2 + %lo(intr_add_tail)], %g2
116	cmp	%g2, %g3
117	bgeu,pt	%xcc, 5f
118	nop
1194:
120#endif /* DEBUG */
121#ifdef TRAPTRACE
122	TRACE_PTR(%g5, %g2)
123	GET_TRACE_TICK(%g2)
124	stxa	%g2, [%g5 + TRAP_ENT_TICK]%asi
125	TRACE_SAVE_TL_GL_REGS(%g5, %g2)
126	mov	0xbad, %g2
127	stha	%g2, [%g5 + TRAP_ENT_TT]%asi
128	rdpr	%tpc, %g2
129	stna	%g2, [%g5 + TRAP_ENT_TPC]%asi
130	rdpr	%tstate, %g2
131	stxa	%g2, [%g5 + TRAP_ENT_TSTATE]%asi
132	stna	%g0, [%g5 + TRAP_ENT_SP]%asi
133	stna	%g1, [%g5 + TRAP_ENT_TR]%asi
134	rd	SOFTINT, %g2
135	stna	%g2, [%g5 + TRAP_ENT_F1]%asi
136	stna	%g3, [%g5 + TRAP_ENT_F2]%asi
137	stna	%g4, [%g5 + TRAP_ENT_F3]%asi
138	stna	%g6, [%g5 + TRAP_ENT_F4]%asi
139	TRACE_NEXT(%g5, %g2, %g1)
140#endif /* TRAPTRACE */
141	ba	ptl1_panic
142	mov	PTL1_BAD_INTR_REQ, %g1
1435:
144	ldn	[%g3 + INTR_NEXT], %g2	! 2nd entry
145	brnz,pn	%g2, 1f			! branch if list not empty
146	stn	%g2, [%g6]
147	add	%g1, INTR_TAIL, %g6	! intr_tail[0]
148	stn	%g0, [%g5 + %g6]	! update intr_tail[pil]
149	mov	1, %g5
150	sll	%g5, %g4, %g5
151	wr	%g5, CLEAR_SOFTINT
1521:
153	!
154	! put intr_req on free list
155	!	%g2 - inumber
156	!
157	ldn	[%g1 + INTR_HEAD], %g5	! current head of free list
158	lduw	[%g3 + INTR_NUMBER], %g2
159	stn	%g3, [%g1 + INTR_HEAD]
160	stn	%g5, [%g3 + INTR_NEXT]
161#ifdef TRAPTRACE
162	TRACE_PTR(%g5, %g6)
163	GET_TRACE_TICK(%g6)
164	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi
165	TRACE_SAVE_TL_GL_REGS(%g5, %g6)
166	rdpr	%tt, %g6
167	stha	%g6, [%g5 + TRAP_ENT_TT]%asi
168	rdpr	%tpc, %g6
169	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi
170	rdpr	%tstate, %g6
171	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi
172	stna	%sp, [%g5 + TRAP_ENT_SP]%asi
173	stna	%g3, [%g5 + TRAP_ENT_TR]%asi
174	stna	%g2, [%g5 + TRAP_ENT_F1]%asi
175	sll	%g4, CPTRSHIFT, %g3
176	add	%g1, INTR_HEAD, %g6
177	ldn	[%g6 + %g3], %g6		! intr_head[pil]
178	stna	%g6, [%g5 + TRAP_ENT_F2]%asi
179	add	%g1, INTR_TAIL, %g6
180	ldn	[%g6 + %g3], %g6		! intr_tail[pil]
181	stna	%g4, [%g5 + TRAP_ENT_F3]%asi
182	stna	%g6, [%g5 + TRAP_ENT_F4]%asi
183	TRACE_NEXT(%g5, %g6, %g3)
184#endif /* TRAPTRACE */
185	!
186	! clear the iv_pending flag for this inum
187	!
188	set	intr_vector, %g5;
189	sll	%g2, INTR_VECTOR_SHIFT, %g6;
190	add	%g5, %g6, %g5;			! &intr_vector[inum]
191	sth	%g0, [%g5 + IV_PENDING]
192
193	!
194	! Prepare for sys_trap()
195	!
196	! Registers passed to sys_trap()
197	!	%g1 - interrupt handler at TL==0
198	!	%g2 - inumber
199	!	%g3 - pil
200	!	%g4 - initial pil for handler
201	!
202	! figure which handler to run and which %pil it starts at
203	! intr_thread starts at DISP_LEVEL to prevent preemption
204	! current_thread starts at PIL_MAX to protect cpu_intr_actv
205	!
206	mov	%g4, %g3
207	cmp	%g4, LOCK_LEVEL
208	bg,a,pt	%xcc, 4f		! branch if pil > LOCK_LEVEL
209	mov	PIL_MAX, %g4
210	sethi	%hi(intr_thread), %g1
211	mov	DISP_LEVEL, %g4
212	ba,pt	%xcc, sys_trap
213	or	%g1, %lo(intr_thread), %g1
2144:
215	sethi	%hi(current_thread), %g1
216	ba,pt	%xcc, sys_trap
217	or	%g1, %lo(current_thread), %g1
218	SET_SIZE(pil_interrupt_common)
219	SET_SIZE(pil_interrupt)
220
221#endif	/* lint */
222
223
224#ifndef	lint
225_spurious:
226	.asciz	"!interrupt 0x%x at level %d not serviced"
227
228/*
229 * SERVE_INTR_PRE is called once, just before the first invocation
230 * of SERVE_INTR.
231 *
232 * Registers on entry:
233 *
234 * inum, cpu, regs: may be out-registers
235 * ls1, ls2: local scratch registers
236 * os1, os2, os3: scratch registers, may be out
237 */
238
239#define SERVE_INTR_PRE(inum, cpu, ls1, ls2, os1, os2, os3, regs)	\
240	set	intr_vector, ls1;					\
241	sll	inum, INTR_VECTOR_SHIFT, os1;				\
242	add	ls1, os1, ls1;						\
243	SERVE_INTR_TRACE(inum, os1, os2, os3, regs);			\
244	mov	inum, ls2;
245
246/*
247 * SERVE_INTR is called immediately after either SERVE_INTR_PRE or
248 * SERVE_INTR_NEXT, without intervening code. No register values
249 * may be modified.
250 *
251 * After calling SERVE_INTR, the caller must check if os3 is set. If
252 * so, there is another interrupt to process. The caller must call
253 * SERVE_INTR_NEXT, immediately followed by SERVE_INTR.
254 *
255 * Before calling SERVE_INTR_NEXT, the caller may perform accounting
256 * and other actions which need to occur after invocation of an interrupt
257 * handler. However, the values of ls1 and os3 *must* be preserved and
258 * passed unmodified into SERVE_INTR_NEXT.
259 *
260 * Registers on return from SERVE_INTR:
261 *
262 * ls1 - the pil just processed
263 * ls2 - the inum just processed
264 * os3 - if set, another interrupt needs to be processed
265 * cpu, ls1, os3 - must be preserved if os3 is set
266 */
267
268#define	SERVE_INTR(os5, cpu, ls1, ls2, os1, os2, os3, os4)		\
269	ldn	[ls1 + IV_HANDLER], os2;				\
270	ldn	[ls1 + IV_ARG], %o0;					\
271	ldn	[ls1 + IV_SOFTINT_ARG2], %o1;					\
272	call	os2;							\
273	lduh	[ls1 + IV_PIL], ls1;					\
274	brnz,pt	%o0, 2f;						\
275	mov	CE_WARN, %o0;						\
276	set	_spurious, %o1;						\
277	mov	ls2, %o2;						\
278	call	cmn_err;						\
279	rdpr	%pil, %o3;						\
2802:	ldn	[THREAD_REG + T_CPU], cpu;				\
281	sll	ls1, 3, os1;						\
282	add	os1, CPU_STATS_SYS_INTR - 8, os2;			\
283	ldx	[cpu + os2], os3;					\
284	inc	os3;							\
285	stx	os3, [cpu + os2];					\
286	sll	ls1, CPTRSHIFT, os2;					\
287	add	cpu,  INTR_HEAD, os1;					\
288	add	os1, os2, os1;						\
289	ldn	[os1], os3;
290
291/*
292 * Registers on entry:
293 *
294 * cpu			- cpu pointer (clobbered, set to cpu upon completion)
295 * ls1, os3		- preserved from prior call to SERVE_INTR
296 * ls2			- local scratch reg (not preserved)
297 * os1, os2, os4, os5	- scratch reg, can be out (not preserved)
298 */
299#define SERVE_INTR_NEXT(os5, cpu, ls1, ls2, os1, os2, os3, os4)		\
300	sll	ls1, CPTRSHIFT, os4;					\
301	add	cpu, INTR_HEAD, os1;					\
302	rdpr	%pstate, ls2;						\
303	wrpr	ls2, PSTATE_IE, %pstate;				\
304	ldn 	[os3 + INTR_NEXT], os2;					\
305	brnz,pn	os2, 4f;						\
306	stn	os2, [os1 + os4];					\
307	add	cpu, INTR_TAIL, os1;					\
308	stn	%g0, [os1 + os4];					\
309	mov	1, os1;							\
310	sll	os1, ls1, os1;						\
311	wr	os1, CLEAR_SOFTINT;					\
3124:	ldn	[cpu + INTR_HEAD], os1;					\
313	ld 	[os3 + INTR_NUMBER], os5;				\
314	stn	os3, [cpu + INTR_HEAD];					\
315	stn	os1, [os3 + INTR_NEXT];					\
316	set	intr_vector, ls1;					\
317	sll	os5, INTR_VECTOR_SHIFT, os1;				\
318	add	ls1, os1, ls1;						\
319	sth	%g0, [ls1 + IV_PENDING];				\
320	wrpr	%g0, ls2, %pstate;					\
321	SERVE_INTR_TRACE2(os5, os1, os2, os3, os4);			\
322	mov	os5, ls2;
323
324#ifdef TRAPTRACE
325/*
326 * inum - not modified, _spurious depends on it.
327 */
328#define	SERVE_INTR_TRACE(inum, os1, os2, os3, os4)			\
329	rdpr	%pstate, os3;						\
330	andn	os3, PSTATE_IE | PSTATE_AM, os2;			\
331	wrpr	%g0, os2, %pstate;					\
332	TRACE_PTR(os1, os2);						\
333	ldn	[os4 + PC_OFF], os2;					\
334	stna	os2, [os1 + TRAP_ENT_TPC]%asi;				\
335	ldx	[os4 + TSTATE_OFF], os2;				\
336	stxa	os2, [os1 + TRAP_ENT_TSTATE]%asi;			\
337	mov	os3, os4;						\
338	GET_TRACE_TICK(os2); 						\
339	stxa	os2, [os1 + TRAP_ENT_TICK]%asi;				\
340	TRACE_SAVE_TL_GL_REGS(os1, os2);				\
341	set	TT_SERVE_INTR, os2;					\
342	rdpr	%pil, os3;						\
343	or	os2, os3, os2;						\
344	stha	os2, [os1 + TRAP_ENT_TT]%asi;				\
345	stna	%sp, [os1 + TRAP_ENT_SP]%asi;				\
346	stna	inum, [os1 + TRAP_ENT_TR]%asi;				\
347	stna	%g0, [os1 + TRAP_ENT_F1]%asi;				\
348	stna	%g0, [os1 + TRAP_ENT_F2]%asi;				\
349	stna	%g0, [os1 + TRAP_ENT_F3]%asi;				\
350	stna	%g0, [os1 + TRAP_ENT_F4]%asi;				\
351	TRACE_NEXT(os1, os2, os3);					\
352	wrpr	%g0, os4, %pstate
353#else	/* TRAPTRACE */
354#define SERVE_INTR_TRACE(inum, os1, os2, os3, os4)
355#endif	/* TRAPTRACE */
356
357#ifdef TRAPTRACE
358/*
359 * inum - not modified, _spurious depends on it.
360 */
361#define	SERVE_INTR_TRACE2(inum, os1, os2, os3, os4)			\
362	rdpr	%pstate, os3;						\
363	andn	os3, PSTATE_IE | PSTATE_AM, os2;			\
364	wrpr	%g0, os2, %pstate;					\
365	TRACE_PTR(os1, os2);						\
366	stna	%g0, [os1 + TRAP_ENT_TPC]%asi;				\
367	stxa	%g0, [os1 + TRAP_ENT_TSTATE]%asi;			\
368	mov	os3, os4;						\
369	GET_TRACE_TICK(os2); 						\
370	stxa	os2, [os1 + TRAP_ENT_TICK]%asi;				\
371	TRACE_SAVE_TL_GL_REGS(os1, os2);				\
372	set	TT_SERVE_INTR, os2;					\
373	rdpr	%pil, os3;						\
374	or	os2, os3, os2;						\
375	stha	os2, [os1 + TRAP_ENT_TT]%asi;				\
376	stna	%sp, [os1 + TRAP_ENT_SP]%asi;				\
377	stna	inum, [os1 + TRAP_ENT_TR]%asi;				\
378	stna	%g0, [os1 + TRAP_ENT_F1]%asi;				\
379	stna	%g0, [os1 + TRAP_ENT_F2]%asi;				\
380	stna	%g0, [os1 + TRAP_ENT_F3]%asi;				\
381	stna	%g0, [os1 + TRAP_ENT_F4]%asi;				\
382	TRACE_NEXT(os1, os2, os3);					\
383	wrpr	%g0, os4, %pstate
384#else	/* TRAPTRACE */
385#define SERVE_INTR_TRACE2(inum, os1, os2, os3, os4)
386#endif	/* TRAPTRACE */
387
388#endif	/* lint */
389
390#if defined(lint)
391
392/*ARGSUSED*/
393void
394intr_thread(struct regs *regs, uint_t inumber, uint_t pil)
395{}
396
397#else	/* lint */
398
399#define	INTRCNT_LIMIT 16
400
401/*
402 * Handle an interrupt in a new thread.
403 *	Entry:
404 *		%o0       = pointer to regs structure
405 *		%o1       = inumber
406 *		%o2       = pil
407 *		%sp       = on current thread's kernel stack
408 *		%o7       = return linkage to trap code
409 *		%g7       = current thread
410 *		%pstate   = normal globals, interrupts enabled,
411 *		            privileged, fp disabled
412 *		%pil      = DISP_LEVEL
413 *
414 *	Register Usage
415 *		%l0       = return linkage
416 *		%l1       = pil
417 *		%l2 - %l3 = scratch
418 *		%l4 - %l7 = reserved for sys_trap
419 *		%o2       = cpu
420 *		%o3       = intr thread
421 *		%o0       = scratch
422 *		%o4 - %o5 = scratch
423 */
424	ENTRY_NP(intr_thread)
425	mov	%o7, %l0
426	mov	%o2, %l1
427	!
428	! See if we are interrupting another interrupt thread.
429	!
430	lduh	[THREAD_REG + T_FLAGS], %o3
431	andcc	%o3, T_INTR_THREAD, %g0
432	bz,pt	%xcc, 1f
433	ldn	[THREAD_REG + T_CPU], %o2	! delay - load CPU pointer
434
435	! We have interrupted an interrupt thread. Take a timestamp,
436	! compute its interval, and update its cumulative counter.
437	add	THREAD_REG, T_INTR_START, %o5
4380:
439	ldx	[%o5], %o3
440	brz,pn	%o3, 1f
441	! We came in on top of an interrupt thread that had no timestamp.
442	! This could happen if, for instance, an interrupt thread which had
443	! previously blocked is being set up to run again in resume(), but
444	! resume() hasn't yet stored a timestamp for it. Or, it could be in
445	! swtch() after its slice has been accounted for.
446	! Only account for the time slice if the starting timestamp is non-zero.
447	rdpr	%tick, %o4			! delay
448	sllx	%o4, 1, %o4			! shift off NPT bit
449	srlx	%o4, 1, %o4
450	sub	%o4, %o3, %o4			! o4 has interval
451
452	! A high-level interrupt in current_thread() interrupting here
453	! will account for the interrupted thread's time slice, but
454	! only if t_intr_start is non-zero. Since this code is going to account
455	! for the time slice, we want to "atomically" load the thread's
456	! starting timestamp, calculate the interval with %tick, and zero
457	! its starting timestamp.
458	! To do this, we do a casx on the t_intr_start field, and store 0 to it.
459	! If it has changed since we loaded it above, we need to re-compute the
460	! interval, since a changed t_intr_start implies current_thread placed
461	! a new, later timestamp there after running a high-level interrupt,
462	! and the %tick val in %o4 had become stale.
463	mov	%g0, %l2
464	casx	[%o5], %o3, %l2
465
466	! If %l2 == %o3, our casx was successful. If not, the starting timestamp
467	! changed between loading it (after label 0b) and computing the
468	! interval above.
469	cmp	%l2, %o3
470	bne,pn	%xcc, 0b
471
472	! Check for Energy Star mode
473	lduh	[%o2 + CPU_DIVISOR], %l2	! delay -- %l2 = clock divisor
474	cmp	%l2, 1
475	bg,a,pn	%xcc, 2f
476	mulx	%o4, %l2, %o4	! multiply interval by clock divisor iff > 1
4772:
478	! We now know that a valid interval for the interrupted interrupt
479	! thread is in %o4. Update its cumulative counter.
480	ldub	[THREAD_REG + T_PIL], %l3	! load PIL
481	sllx	%l3, 4, %l3		! convert PIL index to byte offset
482	add	%l3, CPU_MCPU, %l3	! CPU_INTRSTAT is too big for use
483	add	%l3, MCPU_INTRSTAT, %l3	! as const, add offsets separately
484	ldx	[%o2 + %l3], %o5	! old counter in o5
485	add	%o5, %o4, %o5		! new counter in o5
486	stx	%o5, [%o2 + %l3]	! store new counter
487
488	! Also update intracct[]
489	lduh	[%o2 + CPU_MSTATE], %l3
490	sllx	%l3, 3, %l3
491	add	%l3, CPU_INTRACCT, %l3
492	add	%l3, %o2, %l3
4930:
494	ldx	[%l3], %o5
495	add	%o5, %o4, %o3
496	casx	[%l3], %o5, %o3
497	cmp	%o5, %o3
498	bne,pn	%xcc, 0b
499	nop
500
5011:
502	!
503	! Get set to run interrupt thread.
504	! There should always be an interrupt thread since we allocate one
505	! for each level on the CPU.
506	!
507	! Note that the code in kcpc_overflow_intr -relies- on the ordering
508	! of events here -- in particular that t->t_lwp of the interrupt thread
509	! is set to the pinned thread *before* curthread is changed.
510	!
511	ldn	[%o2 + CPU_INTR_THREAD], %o3	! interrupt thread pool
512	ldn	[%o3 + T_LINK], %o4		! unlink thread from CPU's list
513	stn	%o4, [%o2 + CPU_INTR_THREAD]
514	!
515	! Set bit for this level in CPU's active interrupt bitmask.
516	!
517	ld	[%o2 + CPU_INTR_ACTV], %o5
518	mov	1, %o4
519	sll	%o4, %l1, %o4
520#ifdef DEBUG
521	!
522	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
523	!
524	andcc	%o5, %o4, %g0
525	bz,pt	%xcc, 0f
526	nop
527	! Do not call panic if a panic is already in progress.
528	sethi	%hi(panic_quiesce), %l2
529	ld	[%l2 + %lo(panic_quiesce)], %l2
530	brnz,pn	%l2, 0f
531	nop
532	sethi	%hi(intr_thread_actv_bit_set), %o0
533	call	panic
534	or	%o0, %lo(intr_thread_actv_bit_set), %o0
5350:
536#endif /* DEBUG */
537	or	%o5, %o4, %o5
538	st	%o5, [%o2 + CPU_INTR_ACTV]
539	!
540	! Consider the new thread part of the same LWP so that
541	! window overflow code can find the PCB.
542	!
543	ldn	[THREAD_REG + T_LWP], %o4
544	stn	%o4, [%o3 + T_LWP]
545	!
546	! Threads on the interrupt thread free list could have state already
547	! set to TS_ONPROC, but it helps in debugging if they're TS_FREE
548	! Could eliminate the next two instructions with a little work.
549	!
550	mov	TS_ONPROC, %o4
551	st	%o4, [%o3 + T_STATE]
552	!
553	! Push interrupted thread onto list from new thread.
554	! Set the new thread as the current one.
555	! Set interrupted thread's T_SP because if it is the idle thread,
556	! resume may use that stack between threads.
557	!
558	stn	%o7, [THREAD_REG + T_PC]	! mark pc for resume
559	stn	%sp, [THREAD_REG + T_SP]	! mark stack for resume
560	stn	THREAD_REG, [%o3 + T_INTR]	! push old thread
561	stn	%o3, [%o2 + CPU_THREAD]		! set new thread
562	mov	%o3, THREAD_REG			! set global curthread register
563	ldn	[%o3 + T_STACK], %o4		! interrupt stack pointer
564	sub	%o4, STACK_BIAS, %sp
565	!
566	! Initialize thread priority level from intr_pri
567	!
568	sethi	%hi(intr_pri), %o4
569	ldsh	[%o4 + %lo(intr_pri)], %o4	! grab base interrupt priority
570	add	%l1, %o4, %o4		! convert level to dispatch priority
571	sth	%o4, [THREAD_REG + T_PRI]
572	stub	%l1, [THREAD_REG + T_PIL]	! save pil for intr_passivate
573
574	! Store starting timestamp in thread structure.
575	add	THREAD_REG, T_INTR_START, %o3
5761:
577	ldx	[%o3], %o5
578	rdpr	%tick, %o4
579	sllx	%o4, 1, %o4
580	srlx	%o4, 1, %o4			! shift off NPT bit
581	casx	[%o3], %o5, %o4
582	cmp	%o4, %o5
583	! If a high-level interrupt occurred while we were attempting to store
584	! the timestamp, try again.
585	bne,pn	%xcc, 1b
586	nop
587
588	wrpr	%g0, %l1, %pil			! lower %pil to new level
589	!
590	! Fast event tracing.
591	!
592	ld	[%o2 + CPU_FTRACE_STATE], %o4	! %o2 = curthread->t_cpu
593	btst	FTRACE_ENABLED, %o4
594	be,pt	%icc, 1f			! skip if ftrace disabled
595	  mov	%l1, %o5
596	!
597	! Tracing is enabled - write the trace entry.
598	!
599	save	%sp, -SA(MINFRAME), %sp
600	set	ftrace_intr_thread_format_str, %o0
601	mov	%i0, %o1
602	mov	%i1, %o2
603	call	ftrace_3
604	mov	%i5, %o3
605	restore
6061:
607	!
608	! call the handler
609	!
610	SERVE_INTR_PRE(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
611	!
612	! %o0 and %o1 are now available as scratch registers.
613	!
6140:
615	SERVE_INTR(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
616	!
617	! If %o3 is set, we must call serve_intr_post, and both %l1 and %o3
618	! must be preserved. %l1 holds our pil, %l3 holds our inum.
619	!
620	! Note: %l1 is the pil level we're processing, but we may have a
621	! higher effective pil because a higher-level interrupt may have
622	! blocked.
623	!
624	wrpr	%g0, DISP_LEVEL, %pil
625	!
626	! Take timestamp, compute interval, update cumulative counter.
627	!
628	add	THREAD_REG, T_INTR_START, %o5
6291:
630	ldx	[%o5], %o0
631#ifdef DEBUG
632	brnz	%o0, 9f
633	nop
634	! Do not call panic if a panic is already in progress.
635	sethi	%hi(panic_quiesce), %o1
636	ld	[%o1 + %lo(panic_quiesce)], %o1
637	brnz,pn	%o1, 9f
638	nop
639	sethi	%hi(intr_thread_t_intr_start_zero), %o0
640	call	panic
641	or	%o0, %lo(intr_thread_t_intr_start_zero), %o0
6429:
643#endif /* DEBUG */
644	rdpr	%tick, %o1
645	sllx	%o1, 1, %o1
646	srlx	%o1, 1, %o1			! shift off NPT bit
647	sub	%o1, %o0, %l2			! l2 has interval
648	!
649	! The general outline of what the code here does is:
650	! 1. load t_intr_start, %tick, and calculate the delta
651	! 2. replace t_intr_start with %tick (if %o3 is set) or 0.
652	!
653	! The problem is that a high-level interrupt could arrive at any time.
654	! It will account for (%tick - t_intr_start) for us when it starts,
655	! unless we have set t_intr_start to zero, and then set t_intr_start
656	! to a new %tick when it finishes. To account for this, our first step
657	! is to load t_intr_start and the last is to use casx to store the new
658	! t_intr_start. This guarantees atomicity in reading t_intr_start,
659	! reading %tick, and updating t_intr_start.
660	!
661	movrz	%o3, %g0, %o1
662	casx	[%o5], %o0, %o1
663	cmp	%o0, %o1
664	bne,pn	%xcc, 1b
665	!
666	! Check for Energy Star mode
667	!
668	lduh	[%o2 + CPU_DIVISOR], %o0	! delay -- %o0 = clock divisor
669	cmp	%o0, 1
670	bg,a,pn	%xcc, 2f
671	mulx	%l2, %o0, %l2	! multiply interval by clock divisor iff > 1
6722:
673	!
674	! Update cpu_intrstat. If o3 is set then we will be processing another
675	! interrupt. Above we have set t_intr_start to %tick, not 0. This
676	! means a high-level interrupt can arrive and update the same stats
677	! we're updating. Need to use casx.
678	!
679	sllx	%l1, 4, %o1			! delay - PIL as byte offset
680	add	%o1, CPU_MCPU, %o1		! CPU_INTRSTAT const too big
681	add	%o1, MCPU_INTRSTAT, %o1		! add parts separately
682	add	%o1, %o2, %o1
6831:
684	ldx	[%o1], %o5			! old counter in o5
685	add	%o5, %l2, %o0			! new counter in o0
686 	stx	%o0, [%o1 + 8]			! store into intrstat[pil][1]
687	casx	[%o1], %o5, %o0			! and into intrstat[pil][0]
688	cmp	%o5, %o0
689	bne,pn	%xcc, 1b
690	nop
691
692	! Also update intracct[]
693	lduh	[%o2 + CPU_MSTATE], %o1
694	sllx	%o1, 3, %o1
695	add	%o1, CPU_INTRACCT, %o1
696	add	%o1, %o2, %o1
6971:
698	ldx	[%o1], %o5
699	add	%o5, %l2, %o0
700	casx	[%o1], %o5, %o0
701	cmp	%o5, %o0
702	bne,pn	%xcc, 1b
703	nop
704
705	!
706	! Don't keep a pinned process pinned indefinitely. Bump cpu_intrcnt
707	! for each interrupt handler we invoke. If we hit INTRCNT_LIMIT, then
708	! we've crossed the threshold and we should unpin the pinned threads
709	! by preempt()ing ourselves, which will bubble up the t_intr chain
710	! until hitting the non-interrupt thread, which will then in turn
711	! preempt itself allowing the interrupt processing to resume. Finally,
712	! the scheduler takes over and picks the next thread to run.
713	!
714	! If our CPU is quiesced, we cannot preempt because the idle thread
715	! won't ever re-enter the scheduler, and the interrupt will be forever
716	! blocked.
717	!
718	! If t_intr is NULL, we're not pinning anyone, so we use a simpler
719	! algorithm. Just check for cpu_kprunrun, and if set then preempt.
720	! This insures we enter the scheduler if a higher-priority thread
721	! has become runnable.
722	!
723	lduh	[%o2 + CPU_FLAGS], %o5		! don't preempt if quiesced
724	andcc	%o5, CPU_QUIESCED, %g0
725	bnz,pn	%xcc, 1f
726
727	ldn     [THREAD_REG + T_INTR], %o5      ! pinning anything?
728	brz,pn  %o5, 3f				! if not, don't inc intrcnt
729
730	ldub	[%o2 + CPU_INTRCNT], %o5	! delay - %o5 = cpu_intrcnt
731	inc	%o5
732	cmp	%o5, INTRCNT_LIMIT		! have we hit the limit?
733	bl,a,pt	%xcc, 1f			! no preempt if < INTRCNT_LIMIT
734	stub	%o5, [%o2 + CPU_INTRCNT]	! delay annul - inc CPU_INTRCNT
735	bg,pn	%xcc, 2f			! don't inc stats again
736	!
737	! We've reached the limit. Set cpu_intrcnt and cpu_kprunrun, and do
738	! CPU_STATS_ADDQ(cp, sys, intrunpin, 1). Then call preempt.
739	!
740	mov	1, %o4				! delay
741	stub	%o4, [%o2 + CPU_KPRUNRUN]
742	ldx	[%o2 + CPU_STATS_SYS_INTRUNPIN], %o4
743	inc	%o4
744	stx	%o4, [%o2 + CPU_STATS_SYS_INTRUNPIN]
745	ba	2f
746	stub	%o5, [%o2 + CPU_INTRCNT]	! delay
7473:
748	! Code for t_intr == NULL
749	ldub	[%o2 + CPU_KPRUNRUN], %o5
750	brz,pt	%o5, 1f				! don't preempt unless kprunrun
7512:
752	! Time to call preempt
753	mov	%o2, %l3			! delay - save %o2
754	call	preempt
755	mov	%o3, %l2			! delay - save %o3.
756	mov	%l3, %o2			! restore %o2
757	mov	%l2, %o3			! restore %o3
758	wrpr	%g0, DISP_LEVEL, %pil		! up from cpu_base_spl
7591:
760	!
761	! Do we need to call serve_intr_post and do this again?
762	!
763	brz,a,pt %o3, 0f
764	ld	[%o2 + CPU_INTR_ACTV], %o5	! delay annulled
765	!
766	! Restore %pil before calling serve_intr() again. We must check
767	! CPU_BASE_SPL and set %pil to max(our-pil, CPU_BASE_SPL)
768	!
769	ld	[%o2 + CPU_BASE_SPL], %o4
770	cmp	%o4, %l1
771	movl	%xcc, %l1, %o4
772	wrpr	%g0, %o4, %pil
773	SERVE_INTR_NEXT(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
774	ba	0b				! compute new stats
775	nop
7760:
777	!
778	! Clear bit for this level in CPU's interrupt active bitmask.
779	!
780	mov	1, %o4
781	sll	%o4, %l1, %o4
782#ifdef DEBUG
783	!
784	! ASSERT(CPU->cpu_intr_actv & (1 << PIL))
785	!
786	andcc	%o4, %o5, %g0
787	bnz,pt	%xcc, 0f
788	nop
789	! Do not call panic if a panic is already in progress.
790	sethi	%hi(panic_quiesce), %l2
791	ld	[%l2 + %lo(panic_quiesce)], %l2
792	brnz,pn	%l2, 0f
793	nop
794	sethi	%hi(intr_thread_actv_bit_not_set), %o0
795	call	panic
796	or	%o0, %lo(intr_thread_actv_bit_not_set), %o0
7970:
798#endif /* DEBUG */
799	andn	%o5, %o4, %o5
800	st	%o5, [%o2 + CPU_INTR_ACTV]
801	!
802	! If there is still an interrupted thread underneath this one,
803	! then the interrupt was never blocked and the return is fairly
804	! simple.  Otherwise jump to intr_thread_exit.
805	!
806	ldn	[THREAD_REG + T_INTR], %o4	! pinned thread
807	brz,pn	%o4, intr_thread_exit		! branch if none
808	nop
809	!
810	! link the thread back onto the interrupt thread pool
811	!
812	ldn	[%o2 + CPU_INTR_THREAD], %o3
813	stn	%o3, [THREAD_REG + T_LINK]
814	stn	THREAD_REG, [%o2 + CPU_INTR_THREAD]
815	!
816	! set the thread state to free so kernel debuggers don't see it
817	!
818	mov	TS_FREE, %o5
819	st	%o5, [THREAD_REG + T_STATE]
820	!
821	! Switch back to the interrupted thread and return
822	!
823	stn	%o4, [%o2 + CPU_THREAD]
824	membar	#StoreLoad			! sync with mutex_exit()
825	mov	%o4, THREAD_REG
826
827	! If we pinned an interrupt thread, store its starting timestamp.
828	lduh	[THREAD_REG + T_FLAGS], %o5
829	andcc	%o5, T_INTR_THREAD, %g0
830	bz,pt	%xcc, 1f
831	ldn	[THREAD_REG + T_SP], %sp	! delay - restore %sp
832
833	add	THREAD_REG, T_INTR_START, %o3	! o3 has &curthread->t_intr_star
8340:
835	ldx	[%o3], %o4			! o4 = t_intr_start before
836	rdpr	%tick, %o5
837	sllx	%o5, 1, %o5
838	srlx	%o5, 1, %o5			! shift off NPT bit
839	casx	[%o3], %o4, %o5			! put o5 in ts if o4 == ts after
840	cmp	%o4, %o5
841	! If a high-level interrupt occurred while we were attempting to store
842	! the timestamp, try again.
843	bne,pn	%xcc, 0b
844	ldn	[THREAD_REG + T_SP], %sp	! delay - restore %sp
8451:
846	! If the thread being restarted isn't pinning anyone, and no interrupts
847	! are pending, zero out cpu_intrcnt
848	ldn	[THREAD_REG + T_INTR], %o4
849	brnz,pn	%o4, 2f
850	rd	SOFTINT, %o4			! delay
851	set	SOFTINT_MASK, %o5
852	andcc	%o4, %o5, %g0
853	bz,a,pt	%xcc, 2f
854	stub	%g0, [%o2 + CPU_INTRCNT]	! delay annul
8552:
856	jmp	%l0 + 8
857	nop
858	SET_SIZE(intr_thread)
859	/* Not Reached */
860
861	!
862	! An interrupt returned on what was once (and still might be)
863	! an interrupt thread stack, but the interrupted process is no longer
864	! there.  This means the interrupt must have blocked.
865	!
866	! There is no longer a thread under this one, so put this thread back
867	! on the CPU's free list and resume the idle thread which will dispatch
868	! the next thread to run.
869	!
870	! All traps below DISP_LEVEL are disabled here, but the mondo interrupt
871	! is enabled.
872	!
873	ENTRY_NP(intr_thread_exit)
874#ifdef TRAPTRACE
875	rdpr	%pstate, %l2
876	andn	%l2, PSTATE_IE | PSTATE_AM, %o4
877	wrpr	%g0, %o4, %pstate			! cpu to known state
878	TRACE_PTR(%o4, %o5)
879	GET_TRACE_TICK(%o5)
880	stxa	%o5, [%o4 + TRAP_ENT_TICK]%asi
881	TRACE_SAVE_TL_GL_REGS(%o4, %o5)
882	set	TT_INTR_EXIT, %o5
883	stha	%o5, [%o4 + TRAP_ENT_TT]%asi
884	stna	%g0, [%o4 + TRAP_ENT_TPC]%asi
885	stxa	%g0, [%o4 + TRAP_ENT_TSTATE]%asi
886	stna	%sp, [%o4 + TRAP_ENT_SP]%asi
887	stna	THREAD_REG, [%o4 + TRAP_ENT_TR]%asi
888	ld	[%o2 + CPU_BASE_SPL], %o5
889	stna	%o5, [%o4 + TRAP_ENT_F1]%asi
890	stna	%g0, [%o4 + TRAP_ENT_F2]%asi
891	stna	%g0, [%o4 + TRAP_ENT_F3]%asi
892	stna	%g0, [%o4 + TRAP_ENT_F4]%asi
893	TRACE_NEXT(%o4, %o5, %o0)
894	wrpr	%g0, %l2, %pstate
895#endif /* TRAPTRACE */
896	! cpu_stats.sys.intrblk++
897        ldx	[%o2 + CPU_STATS_SYS_INTRBLK], %o4
898        inc     %o4
899        stx	%o4, [%o2 + CPU_STATS_SYS_INTRBLK]
900	!
901	! Put thread back on the interrupt thread list.
902	!
903
904	!
905	! Set the CPU's base SPL level.
906	!
907#ifdef DEBUG
908	!
909	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
910	!
911	ld	[%o2 + CPU_INTR_ACTV], %o5
912	mov	1, %o4
913	sll	%o4, %l1, %o4
914	and	%o5, %o4, %o4
915	brz,pt	%o4, 0f
916	nop
917	! Do not call panic if a panic is already in progress.
918	sethi	%hi(panic_quiesce), %l2
919	ld	[%l2 + %lo(panic_quiesce)], %l2
920	brnz,pn	%l2, 0f
921	nop
922	sethi	%hi(intr_thread_exit_actv_bit_set), %o0
923	call	panic
924	or	%o0, %lo(intr_thread_exit_actv_bit_set), %o0
9250:
926#endif /* DEBUG */
927	call	_intr_set_spl			! set CPU's base SPL level
928	ld	[%o2 + CPU_INTR_ACTV], %o5	! delay - load active mask
929	!
930	! set the thread state to free so kernel debuggers don't see it
931	!
932	mov	TS_FREE, %o4
933	st	%o4, [THREAD_REG + T_STATE]
934	!
935	! Put thread on either the interrupt pool or the free pool and
936	! call swtch() to resume another thread.
937	!
938	ldn	[%o2 + CPU_INTR_THREAD], %o5	! get list pointer
939	stn	%o5, [THREAD_REG + T_LINK]
940	call	swtch				! switch to best thread
941	stn	THREAD_REG, [%o2 + CPU_INTR_THREAD] ! delay - put thread on list
942	ba,a,pt	%xcc, .				! swtch() shouldn't return
943	SET_SIZE(intr_thread_exit)
944
945	.global ftrace_intr_thread_format_str
946ftrace_intr_thread_format_str:
947	.asciz	"intr_thread(): regs=0x%lx, int=0x%lx, pil=0x%lx"
948#ifdef DEBUG
949intr_thread_actv_bit_set:
950	.asciz	"intr_thread():	cpu_intr_actv bit already set for PIL"
951intr_thread_actv_bit_not_set:
952	.asciz	"intr_thread():	cpu_intr_actv bit not set for PIL"
953intr_thread_exit_actv_bit_set:
954	.asciz	"intr_thread_exit(): cpu_intr_actv bit erroneously set for PIL"
955intr_thread_t_intr_start_zero:
956	.asciz	"intr_thread():	t_intr_start zero upon handler return"
957#endif /* DEBUG */
958#endif	/* lint */
959
960#if defined(lint)
961
962/*
963 * Handle an interrupt in the current thread
964 *	Entry:
965 *		%o0       = pointer to regs structure
966 *		%o1       = inumber
967 *		%o2       = pil
968 *		%sp       = on current thread's kernel stack
969 *		%o7       = return linkage to trap code
970 *		%g7       = current thread
971 *		%pstate   = normal globals, interrupts enabled,
972 *		            privileged, fp disabled
973 *		%pil      = PIL_MAX
974 *
975 *	Register Usage
976 *		%l0       = return linkage
977 *		%l1       = old stack
978 *		%l2 - %l3 = scratch
979 *		%l4 - %l7 = reserved for sys_trap
980 *		%o3       = cpu
981 *		%o0       = scratch
982 *		%o4 - %o5 = scratch
983 */
984/* ARGSUSED */
985void
986current_thread(struct regs *regs, uint_t inumber, uint_t pil)
987{}
988
989#else	/* lint */
990
991	ENTRY_NP(current_thread)
992
993	mov	%o7, %l0
994	ldn	[THREAD_REG + T_CPU], %o3
995	!
996	! Set bit for this level in CPU's active interrupt bitmask.
997	!
998	ld	[%o3 + CPU_INTR_ACTV], %o5	! o5 has cpu_intr_actv b4 chng
999	mov	1, %o4
1000	sll	%o4, %o2, %o4			! construct mask for level
1001#ifdef DEBUG
1002	!
1003	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
1004	!
1005	andcc	%o5, %o4, %g0
1006	bz,pt	%xcc, 0f
1007	nop
1008	! Do not call panic if a panic is already in progress.
1009	sethi	%hi(panic_quiesce), %l2
1010	ld	[%l2 + %lo(panic_quiesce)], %l2
1011	brnz,pn	%l2, 0f
1012	nop
1013	sethi	%hi(current_thread_actv_bit_set), %o0
1014	call	panic
1015	or	%o0, %lo(current_thread_actv_bit_set), %o0
10160:
1017#endif /* DEBUG */
1018	or	%o5, %o4, %o4
1019	!
1020	! See if we are interrupting another high-level interrupt.
1021	!
1022	srl	%o5, LOCK_LEVEL + 1, %o5	! only look at high-level bits
1023	brz,pt	%o5, 1f
1024	st	%o4, [%o3 + CPU_INTR_ACTV]	! delay - store active mask
1025	!
1026	! We have interrupted another high-level interrupt. Find its PIL,
1027	! compute the interval it ran for, and update its cumulative counter.
1028	!
1029	! Register usage:
1030
1031	! o2 = PIL of this interrupt
1032	! o5 = high PIL bits of INTR_ACTV (not including this PIL)
1033	! l1 = bitmask used to find other active high-level PIL
1034	! o4 = index of bit set in l1
1035	! Use cpu_intr_actv to find the cpu_pil_high_start[] offset for the
1036	! interrupted high-level interrupt.
1037	! Create mask for cpu_intr_actv. Begin by looking for bits set
1038	! at one level below the current PIL. Since %o5 contains the active
1039	! mask already shifted right by (LOCK_LEVEL + 1), we start by looking
1040	! at bit (current_pil - (LOCK_LEVEL + 2)).
1041	sub	%o2, LOCK_LEVEL + 2, %o4
1042	mov	1, %l1
1043	sll	%l1, %o4, %l1
10442:
1045#ifdef DEBUG
1046	! ASSERT(%l1 != 0) (we didn't shift the bit off the right edge)
1047	brnz,pt	%l1, 9f
1048	nop
1049
1050	! Don't panic if a panic is already in progress.
1051	sethi	%hi(panic_quiesce), %l3
1052	ld	[%l3 + %lo(panic_quiesce)], %l3
1053	brnz,pn	%l3, 9f
1054	nop
1055	sethi	%hi(current_thread_nested_PIL_not_found), %o0
1056	call	panic
1057	or	%o0, %lo(current_thread_nested_PIL_not_found), %o0
10589:
1059#endif /* DEBUG */
1060	andcc	%l1, %o5, %g0		! test mask against high-level bits of
1061	bnz	%xcc, 3f		! cpu_intr_actv
1062	nop
1063	srl	%l1, 1, %l1		! No match. Try next lower PIL.
1064	ba,pt	%xcc, 2b
1065	sub	%o4, 1, %o4		! delay - decrement PIL
10663:
1067	sll	%o4, 3, %o4			! index to byte offset
1068	add	%o4, CPU_MCPU, %l1	! CPU_PIL_HIGH_START is too large
1069	add	%l1, MCPU_PIL_HIGH_START, %l1
1070	ldx	[%o3 + %l1], %l3		! load starting timestamp
1071#ifdef DEBUG
1072	brnz,pt	%l3, 9f
1073	nop
1074	! Don't panic if a panic is already in progress.
1075	sethi	%hi(panic_quiesce), %l1
1076	ld	[%l1 + %lo(panic_quiesce)], %l1
1077	brnz,pn	%l1, 9f
1078	nop
1079	srl	%o4, 3, %o1			! Find interrupted PIL for panic
1080	add	%o1, LOCK_LEVEL + 1, %o1
1081	sethi	%hi(current_thread_nested_pil_zero), %o0
1082	call	panic
1083	or	%o0, %lo(current_thread_nested_pil_zero), %o0
10849:
1085#endif /* DEBUG */
1086	rdpr	%tick, %l1
1087	sllx	%l1, 1, %l1
1088	srlx	%l1, 1, %l1			! shake off NPT bit
1089	sub	%l1, %l3, %l3			! interval in %l3
1090	!
1091	! Check for Energy Star mode
1092	!
1093	lduh	[%o3 + CPU_DIVISOR], %l1	! %l1 = clock divisor
1094	cmp	%l1, 1
1095	bg,a,pn	%xcc, 2f
1096	mulx	%l3, %l1, %l3	! multiply interval by clock divisor iff > 1
10972:
1098	!
1099	! We need to find the CPU offset of the cumulative counter. We start
1100	! with %o4, which has (PIL - (LOCK_LEVEL + 1)) * 8. We need PIL * 16,
1101	! so we shift left 1, then add (LOCK_LEVEL + 1) * 16, which is
1102	! CPU_INTRSTAT_LOW_PIL_OFFSET.
1103	!
1104	sll	%o4, 1, %o4
1105	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT const too large
1106	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1107	add	%o4, CPU_INTRSTAT_LOW_PIL_OFFSET, %o4
1108	ldx	[%o3 + %o4], %l1		! old counter in l1
1109	add	%l1, %l3, %l1			! new counter in l1
1110	stx	%l1, [%o3 + %o4]		! store new counter
1111
1112	! Also update intracct[]
1113	lduh	[%o3 + CPU_MSTATE], %o4
1114	sllx	%o4, 3, %o4
1115	add	%o4, CPU_INTRACCT, %o4
1116	ldx	[%o3 + %o4], %l1
1117	add	%l1, %l3, %l1
1118	! Another high-level interrupt is active below this one, so
1119	! there is no need to check for an interrupt thread. That will be
1120	! done by the lowest priority high-level interrupt active.
1121	ba,pt	%xcc, 5f
1122	stx	%l1, [%o3 + %o4]		! delay - store new counter
11231:
1124	! If we haven't interrupted another high-level interrupt, we may be
1125	! interrupting a low level interrupt thread. If so, compute its interval
1126	! and update its cumulative counter.
1127	lduh	[THREAD_REG + T_FLAGS], %o4
1128	andcc	%o4, T_INTR_THREAD, %g0
1129	bz,pt	%xcc, 4f
1130	nop
1131
1132	! We have interrupted an interrupt thread. Take timestamp, compute
1133	! interval, update cumulative counter.
1134
1135	! Check t_intr_start. If it is zero, either intr_thread() or
1136	! current_thread() (at a lower PIL, of course) already did
1137	! the accounting for the underlying interrupt thread.
1138	ldx	[THREAD_REG + T_INTR_START], %o5
1139	brz,pn	%o5, 4f
1140	nop
1141
1142	stx	%g0, [THREAD_REG + T_INTR_START]
1143	rdpr	%tick, %o4
1144	sllx	%o4, 1, %o4
1145	srlx	%o4, 1, %o4			! shake off NPT bit
1146	sub	%o4, %o5, %o5			! o5 has the interval
1147
1148	! Check for Energy Star mode
1149	lduh	[%o3 + CPU_DIVISOR], %o4	! %o4 = clock divisor
1150	cmp	%o4, 1
1151	bg,a,pn	%xcc, 2f
1152	mulx	%o5, %o4, %o5	! multiply interval by clock divisor iff > 1
11532:
1154	ldub	[THREAD_REG + T_PIL], %o4
1155	sllx	%o4, 4, %o4			! PIL index to byte offset
1156	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT const too large
1157	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1158	ldx	[%o3 + %o4], %l2		! old counter in l2
1159	add	%l2, %o5, %l2			! new counter in l2
1160	stx	%l2, [%o3 + %o4]		! store new counter
1161
1162	! Also update intracct[]
1163	lduh	[%o3 + CPU_MSTATE], %o4
1164	sllx	%o4, 3, %o4
1165	add	%o4, CPU_INTRACCT, %o4
1166	ldx	[%o3 + %o4], %l2
1167	add	%l2, %o5, %l2
1168	stx	%l2, [%o3 + %o4]
11694:
1170	!
1171	! Handle high-level interrupts on separate interrupt stack.
1172	! No other high-level interrupts are active, so switch to int stack.
1173	!
1174	mov	%sp, %l1
1175	ldn	[%o3 + CPU_INTR_STACK], %l3
1176	sub	%l3, STACK_BIAS, %sp
1177
11785:
1179#ifdef DEBUG
1180	!
1181	! ASSERT(%o2 > LOCK_LEVEL)
1182	!
1183	cmp	%o2, LOCK_LEVEL
1184	bg,pt	%xcc, 3f
1185	nop
1186	mov	CE_PANIC, %o0
1187	sethi	%hi(current_thread_wrong_pil), %o1
1188	call	cmn_err				! %o2 has the %pil already
1189	or	%o1, %lo(current_thread_wrong_pil), %o1
1190#endif
11913:
1192	! Store starting timestamp for this PIL in CPU structure at
1193	! cpu.cpu_m.pil_high_start[PIL - (LOCK_LEVEL + 1)]
1194        sub     %o2, LOCK_LEVEL + 1, %o4	! convert PIL to array index
1195	sllx    %o4, 3, %o4			! index to byte offset
1196	add	%o4, CPU_MCPU, %o4	! CPU_PIL_HIGH_START is too large
1197	add	%o4, MCPU_PIL_HIGH_START, %o4
1198        rdpr    %tick, %o5
1199	sllx	%o5, 1, %o5
1200	srlx	%o5, 1, %o5
1201        stx     %o5, [%o3 + %o4]
1202
1203	wrpr	%g0, %o2, %pil			! enable interrupts
1204
1205	!
1206	! call the handler
1207	!
1208	SERVE_INTR_PRE(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
12091:
1210	SERVE_INTR(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
1211
1212	brz,a,pt %o2, 0f			! if %o2, more intrs await
1213	rdpr	%pil, %o2			! delay annulled
1214	SERVE_INTR_NEXT(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
1215	ba	1b
1216	nop
12170:
1218	wrpr	%g0, PIL_MAX, %pil		! disable interrupts (1-15)
1219
1220	cmp	%o2, PIL_15
1221	bne,pt	%xcc, 3f
1222	nop
1223
1224	sethi	%hi(cpc_level15_inum), %o1
1225	ld	[%o1 + %lo(cpc_level15_inum)], %o1 ! arg for intr_enqueue_req
1226	brz	%o1, 3f
1227	nop
1228
1229	rdpr 	%pstate, %g5
1230	andn	%g5, PSTATE_IE, %g1
1231	wrpr	%g0, %g1, %pstate		! Disable vec interrupts
1232
1233	call	intr_enqueue_req		! preserves %g5
1234	mov	PIL_15, %o0
1235
1236	! clear perfcntr overflow
1237	mov	1, %o0
1238	sllx	%o0, PIL_15, %o0
1239	wr	%o0, CLEAR_SOFTINT
1240
1241	wrpr	%g0, %g5, %pstate		! Enable vec interrupts
1242
12433:
1244	cmp	%o2, PIL_14
1245	be	tick_rtt			!  cpu-specific tick processing
1246	nop
1247	.global	current_thread_complete
1248current_thread_complete:
1249	!
1250	! Register usage:
1251	!
1252	! %l1 = stack pointer
1253	! %l2 = CPU_INTR_ACTV >> (LOCK_LEVEL + 1)
1254	! %o2 = PIL
1255	! %o3 = CPU pointer
1256	! %o4, %o5, %l3, %l4, %l5 = scratch
1257	!
1258	ldn	[THREAD_REG + T_CPU], %o3
1259	!
1260	! Clear bit for this level in CPU's interrupt active bitmask.
1261	!
1262	ld	[%o3 + CPU_INTR_ACTV], %l2
1263	mov	1, %o5
1264	sll	%o5, %o2, %o5
1265#ifdef DEBUG
1266	!
1267	! ASSERT(CPU->cpu_intr_actv & (1 << PIL))
1268	!
1269	andcc	%l2, %o5, %g0
1270	bnz,pt	%xcc, 0f
1271	nop
1272	! Do not call panic if a panic is already in progress.
1273	sethi	%hi(panic_quiesce), %l2
1274	ld	[%l2 + %lo(panic_quiesce)], %l2
1275	brnz,pn	%l2, 0f
1276	nop
1277	sethi	%hi(current_thread_actv_bit_not_set), %o0
1278	call	panic
1279	or	%o0, %lo(current_thread_actv_bit_not_set), %o0
12800:
1281#endif /* DEBUG */
1282	andn	%l2, %o5, %l2
1283	st	%l2, [%o3 + CPU_INTR_ACTV]
1284
1285	! Take timestamp, compute interval, update cumulative counter.
1286        sub     %o2, LOCK_LEVEL + 1, %o4	! PIL to array index
1287	sllx    %o4, 3, %o4			! index to byte offset
1288	add	%o4, CPU_MCPU, %o4	! CPU_PIL_HIGH_START is too large
1289	add	%o4, MCPU_PIL_HIGH_START, %o4
1290        rdpr    %tick, %o5
1291	sllx	%o5, 1, %o5
1292	srlx	%o5, 1, %o5
1293	ldx     [%o3 + %o4], %o0
1294#ifdef DEBUG
1295	! ASSERT(cpu.cpu_m.pil_high_start[pil - (LOCK_LEVEL + 1)] != 0)
1296	brnz,pt	%o0, 9f
1297	nop
1298	! Don't panic if a panic is already in progress.
1299	sethi	%hi(panic_quiesce), %l2
1300	ld	[%l2 + %lo(panic_quiesce)], %l2
1301	brnz,pn	%l2, 9f
1302	nop
1303	sethi	%hi(current_thread_timestamp_zero), %o0
1304	call	panic
1305	or	%o0, %lo(current_thread_timestamp_zero), %o0
13069:
1307#endif /* DEBUG */
1308	stx	%g0, [%o3 + %o4]
1309	sub	%o5, %o0, %o5			! interval in o5
1310
1311	! Check for Energy Star mode
1312	lduh	[%o3 + CPU_DIVISOR], %o4	! %o4 = clock divisor
1313	cmp	%o4, 1
1314	bg,a,pn	%xcc, 2f
1315	mulx	%o5, %o4, %o5	! multiply interval by clock divisor iff > 1
13162:
1317	sllx	%o2, 4, %o4			! PIL index to byte offset
1318	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT too large
1319	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1320	ldx	[%o3 + %o4], %o0		! old counter in o0
1321	add	%o0, %o5, %o0			! new counter in o0
1322	stx	%o0, [%o3 + %o4]		! store new counter
1323
1324	! Also update intracct[]
1325	lduh	[%o3 + CPU_MSTATE], %o4
1326	sllx	%o4, 3, %o4
1327	add	%o4, CPU_INTRACCT, %o4
1328	ldx	[%o3 + %o4], %o0
1329	add	%o0, %o5, %o0
1330	stx	%o0, [%o3 + %o4]
1331
1332	!
1333	! get back on current thread's stack
1334	!
1335	srl	%l2, LOCK_LEVEL + 1, %l2
1336	tst	%l2				! any more high-level ints?
1337	movz	%xcc, %l1, %sp
1338	!
1339	! Current register usage:
1340	! o2 = PIL
1341	! o3 = CPU pointer
1342	! l0 = return address
1343	! l2 = intr_actv shifted right
1344	!
1345	bz,pt	%xcc, 3f			! if l2 was zero, no more ints
1346	nop
1347	!
1348	! We found another high-level interrupt active below the one that just
1349	! returned. Store a starting timestamp for it in the CPU structure.
1350	!
1351	! Use cpu_intr_actv to find the cpu_pil_high_start[] offset for the
1352	! interrupted high-level interrupt.
1353	! Create mask for cpu_intr_actv. Begin by looking for bits set
1354	! at one level below the current PIL. Since %l2 contains the active
1355	! mask already shifted right by (LOCK_LEVEL + 1), we start by looking
1356	! at bit (current_pil - (LOCK_LEVEL + 2)).
1357	! %l1 = mask, %o5 = index of bit set in mask
1358	!
1359	mov	1, %l1
1360	sub	%o2, LOCK_LEVEL + 2, %o5
1361	sll	%l1, %o5, %l1			! l1 = mask for level
13621:
1363#ifdef DEBUG
1364	! ASSERT(%l1 != 0) (we didn't shift the bit off the right edge)
1365	brnz,pt	%l1, 9f
1366	nop
1367	sethi	%hi(current_thread_nested_PIL_not_found), %o0
1368	call	panic
1369	or	%o0, %lo(current_thread_nested_PIL_not_found), %o0
13709:
1371#endif /* DEBUG */
1372	andcc	%l1, %l2, %g0		! test mask against high-level bits of
1373	bnz	%xcc, 2f		! cpu_intr_actv
1374	nop
1375	srl	%l1, 1, %l1		! No match. Try next lower PIL.
1376	ba,pt	%xcc, 1b
1377	sub	%o5, 1, %o5		! delay - decrement PIL
13782:
1379	sll	%o5, 3, %o5		! convert array index to byte offset
1380	add	%o5, CPU_MCPU, %o5	! CPU_PIL_HIGH_START is too large
1381	add	%o5, MCPU_PIL_HIGH_START, %o5
1382	rdpr	%tick, %o4
1383	sllx	%o4, 1, %o4
1384	srlx	%o4, 1, %o4
1385	! Another high-level interrupt is active below this one, so
1386	! there is no need to check for an interrupt thread. That will be
1387	! done by the lowest priority high-level interrupt active.
1388	ba,pt	%xcc, 1f
1389	stx	%o4, [%o3 + %o5]	! delay - store timestamp
13903:
1391	! If we haven't interrupted another high-level interrupt, we may have
1392	! interrupted a low level interrupt thread. If so, store a starting
1393	! timestamp in its thread structure.
1394	lduh	[THREAD_REG + T_FLAGS], %o4
1395	andcc	%o4, T_INTR_THREAD, %g0
1396	bz,pt	%xcc, 1f
1397	nop
1398
1399	rdpr	%tick, %o4
1400	sllx	%o4, 1, %o4
1401	srlx	%o4, 1, %o4			! Shake off NPT bit
1402	stx	%o4, [THREAD_REG + T_INTR_START]
14031:
1404	! Enable interrupts and return
1405	jmp	%l0 + 8
1406	wrpr	%g0, %o2, %pil			! enable interrupts
1407	SET_SIZE(current_thread)
1408
1409
1410#ifdef DEBUG
1411current_thread_wrong_pil:
1412	.asciz	"current_thread: unexpected pil level: %d"
1413current_thread_actv_bit_set:
1414	.asciz	"current_thread(): cpu_intr_actv bit already set for PIL"
1415current_thread_actv_bit_not_set:
1416	.asciz	"current_thread(): cpu_intr_actv bit not set for PIL"
1417current_thread_nested_pil_zero:
1418	.asciz	"current_thread(): timestamp zero for nested PIL %d"
1419current_thread_timestamp_zero:
1420	.asciz	"current_thread(): timestamp zero upon handler return"
1421current_thread_nested_PIL_not_found:
1422	.asciz	"current_thread: couldn't find nested high-level PIL"
1423#endif /* DEBUG */
1424#endif /* lint */
1425
1426/*
1427 * Return a thread's interrupt level.
1428 * Since this isn't saved anywhere but in %l4 on interrupt entry, we
1429 * must dig it out of the save area.
1430 *
1431 * Caller 'swears' that this really is an interrupt thread.
1432 *
1433 * int
1434 * intr_level(t)
1435 *	kthread_id_t	t;
1436 */
1437
1438#if defined(lint)
1439
1440/* ARGSUSED */
1441int
1442intr_level(kthread_id_t t)
1443{ return (0); }
1444
1445#else	/* lint */
1446
1447	ENTRY_NP(intr_level)
1448	retl
1449	ldub	[%o0 + T_PIL], %o0		! return saved pil
1450	SET_SIZE(intr_level)
1451
1452#endif	/* lint */
1453
1454#if defined(lint)
1455
1456/* ARGSUSED */
1457int
1458disable_pil_intr()
1459{ return (0); }
1460
1461#else	/* lint */
1462
1463	ENTRY_NP(disable_pil_intr)
1464	rdpr	%pil, %o0
1465	retl
1466	wrpr	%g0, PIL_MAX, %pil		! disable interrupts (1-15)
1467	SET_SIZE(disable_pil_intr)
1468
1469#endif	/* lint */
1470
1471#if defined(lint)
1472
1473/* ARGSUSED */
1474void
1475enable_pil_intr(int pil_save)
1476{}
1477
1478#else	/* lint */
1479
1480	ENTRY_NP(enable_pil_intr)
1481	retl
1482	wrpr	%o0, %pil
1483	SET_SIZE(enable_pil_intr)
1484
1485#endif	/* lint */
1486
1487#if defined(lint)
1488
1489/* ARGSUSED */
1490uint_t
1491disable_vec_intr(void)
1492{ return (0); }
1493
1494#else	/* lint */
1495
1496	ENTRY_NP(disable_vec_intr)
1497	rdpr	%pstate, %o0
1498	andn	%o0, PSTATE_IE, %g1
1499	retl
1500	wrpr	%g0, %g1, %pstate		! disable interrupt
1501	SET_SIZE(disable_vec_intr)
1502
1503#endif	/* lint */
1504
1505#if defined(lint)
1506
1507/* ARGSUSED */
1508void
1509enable_vec_intr(uint_t pstate_save)
1510{}
1511
1512#else	/* lint */
1513
1514	ENTRY_NP(enable_vec_intr)
1515	retl
1516	wrpr	%g0, %o0, %pstate
1517	SET_SIZE(enable_vec_intr)
1518
1519#endif	/* lint */
1520
1521#if defined(lint)
1522
1523void
1524cbe_level14(void)
1525{}
1526
1527#else   /* lint */
1528
1529	ENTRY_NP(cbe_level14)
1530	save    %sp, -SA(MINFRAME), %sp ! get a new window
1531	!
1532	! Make sure that this is from TICK_COMPARE; if not just return
1533	!
1534	rd	SOFTINT, %l1
1535	set	(TICK_INT_MASK | STICK_INT_MASK), %o2
1536	andcc	%l1, %o2, %g0
1537	bz,pn	%icc, 2f
1538	nop
1539
1540	CPU_ADDR(%o1, %o2)
1541	call	cyclic_fire
1542	mov	%o1, %o0
15432:
1544	ret
1545	restore	%g0, 1, %o0
1546	SET_SIZE(cbe_level14)
1547
1548#endif  /* lint */
1549
1550
1551#if defined(lint)
1552
1553/* ARGSUSED */
1554void
1555setsoftint(uint_t inum)
1556{}
1557
1558#else	/* lint */
1559
1560	ENTRY_NP(setsoftint)
1561	save	%sp, -SA(MINFRAME), %sp	! get a new window
1562	rdpr	%pstate, %l5
1563	andn	%l5, PSTATE_IE, %l1
1564	wrpr	%l1, %pstate		! disable interrupt
1565	!
1566	! Fetch data from intr_vector[] table according to the inum.
1567	!
1568	! We have an interrupt number.
1569	! Put the request on the cpu's softint list,
1570	! and set %set_softint.
1571	!
1572	! Register usage
1573	!	%i0 - inumber
1574	!	%l2 - requested pil
1575	!	%l3 - intr_req
1576	!	%l4 - *cpu
1577	!	%l1, %l6 - temps
1578	!
1579	! check if a softint is pending for this inum already
1580	! if one is pending, don't bother queuing another
1581	!
1582	set	intr_vector, %l1
1583	sll	%i0, INTR_VECTOR_SHIFT, %l6
1584	add	%l1, %l6, %l1			! %l1 = &intr_vector[inum]
1585	lduh	[%l1 + IV_PENDING], %l6
1586	brnz,pn	%l6, 4f				! branch, if pending
1587	or	%g0, 1, %l2
1588	sth	%l2, [%l1 + IV_PENDING]		! intr_vector[inum].pend = 1
1589	!
1590	! allocate an intr_req from the free list
1591	!
1592	CPU_ADDR(%l4, %l2)
1593	ldn	[%l4 + INTR_HEAD], %l3
1594	lduh	[%l1 + IV_PIL], %l2
1595	!
1596	! fixup free list
1597	!
1598	ldn	[%l3 + INTR_NEXT], %l6
1599	stn	%l6, [%l4 + INTR_HEAD]
1600	!
1601	! fill up intr_req
1602	!
1603	st	%i0, [%l3 + INTR_NUMBER]
1604	stn	%g0, [%l3 + INTR_NEXT]
1605	!
1606	! move intr_req to appropriate list
1607	!
1608	sll	%l2, CPTRSHIFT, %l0
1609	add	%l4, INTR_TAIL, %l6
1610	ldn	[%l6 + %l0], %l1	! current tail
1611	brz,pt	%l1, 2f			! branch if list empty
1612	stn	%l3, [%l6 + %l0]	! make intr_req new tail
1613	!
1614	! there's pending intr_req already
1615	!
1616	ba,pt	%xcc, 3f
1617	stn	%l3, [%l1 + INTR_NEXT]	! update old tail
16182:
1619	!
1620	! no pending intr_req; make intr_req new head
1621	!
1622	add	%l4, INTR_HEAD, %l6
1623	stn	%l3, [%l6 + %l0]
16243:
1625	!
1626	! Write %set_softint with (1<<pil) to cause a "pil" level trap
1627	!
1628	mov	1, %l1
1629	sll	%l1, %l2, %l1
1630	wr	%l1, SET_SOFTINT
16314:
1632	wrpr	%g0, %l5, %pstate
1633	ret
1634	restore
1635	SET_SIZE(setsoftint)
1636
1637#endif	/* lint */
1638
1639#if defined(lint)
1640
1641/*ARGSUSED*/
1642void
1643setsoftint_tl1(uint64_t inum, uint64_t dummy)
1644{}
1645
1646#else	/* lint */
1647
1648	!
1649	! Register usage
1650	!
1651	! Arguments:
1652	! %g1 - inumber
1653	!
1654	! Internal:
1655	! %g2 - requested pil
1656	! %g3 - intr_req
1657	! %g4 - cpu pointer
1658	! %g5,%g6,%g7 - temps
1659	!
1660	ENTRY_NP(setsoftint_tl1)
1661	!
1662	! Verify the inumber received (should be inum < MAXIVNUM).
1663	!
1664	set	MAXIVNUM, %g2
1665	cmp	%g1, %g2
1666	bgeu,pn	%xcc, .no_ivintr
1667	clr	%g2			! expected in .no_ivintr
1668	!
1669	! Fetch data from intr_vector[] table according to the inum.
1670	!
1671	! We have an interrupt number. Put the request on the cpu's softint
1672	! list, and set %set_softint.
1673	!
1674	set	intr_vector, %g5
1675	sll	%g1, INTR_VECTOR_SHIFT, %g6
1676	add	%g5, %g6, %g5			! %g5 = &intr_vector[inum]
1677
1678	!
1679	! allocate an intr_req from the free list
1680	!
1681	CPU_ADDR(%g4, %g2)
1682	ldn	[%g4 + INTR_HEAD], %g3
1683
1684	! load the pil so it can be used by .no_intr_pool/.no_ivintr
1685	lduh	[%g5 + IV_PIL], %g2
1686
1687	! Verify that the free list is not exhausted.
1688	brz,pn	%g3, .no_intr_pool
1689	nop
1690
1691	! Verify the intr_vector[] entry according to the inumber.
1692	! The iv_pil field should not be zero.  This used to be
1693	! guarded by DEBUG but broken drivers can cause spurious
1694	! tick interrupts when the softint register is programmed
1695	! with 1 << 0 at the end of this routine.  Now we always
1696	! check for an invalid pil.
1697	brz,pn	%g2, .no_ivintr
1698	nop
1699
1700	!
1701	! fixup free list
1702	!
1703	ldn	[%g3 + INTR_NEXT], %g6
1704	stn	%g6, [%g4 + INTR_HEAD]
1705
1706	!
1707	! fill in intr_req
1708	!
1709	st	%g1, [%g3 + INTR_NUMBER]
1710	stn	%g0, [%g3 + INTR_NEXT]
1711	!
1712	! move intr_req to appropriate list
1713	!
1714	sll	%g2, CPTRSHIFT, %g7
1715	add	%g4, INTR_TAIL, %g6
1716	ldn	[%g6 + %g7], %g5	! current tail
1717	brz,pt	%g5, 2f			! branch if list empty
1718	stn	%g3, [%g6 + %g7]	! make intr_req new tail
1719	!
1720	! there's pending intr_req already
1721	!
1722	ba,pt	%xcc, 3f
1723	stn	%g3, [%g5 + INTR_NEXT]	! update old tail
17242:
1725	!
1726	! no pending intr_req; make intr_req new head
1727	!
1728	add	%g4, INTR_HEAD, %g6
1729	stn	%g3, [%g6 + %g7]
17303:
1731#ifdef TRAPTRACE
1732	TRACE_PTR(%g1, %g6)
1733	GET_TRACE_TICK(%g6)
1734	stxa	%g6, [%g1 + TRAP_ENT_TICK]%asi
1735	TRACE_SAVE_TL_GL_REGS(%g1, %g6)
1736	rdpr	%tt, %g6
1737	stha	%g6, [%g1 + TRAP_ENT_TT]%asi
1738	rdpr	%tpc, %g6
1739	stna	%g6, [%g1 + TRAP_ENT_TPC]%asi
1740	rdpr	%tstate, %g6
1741	stxa	%g6, [%g1 + TRAP_ENT_TSTATE]%asi
1742	stna	%sp, [%g1 + TRAP_ENT_SP]%asi
1743	ld	[%g3 + INTR_NUMBER], %g6
1744	stna	%g6, [%g1 + TRAP_ENT_TR]%asi
1745	add	%g4, INTR_HEAD, %g6
1746	ldn	[%g6 + %g7], %g6		! intr_head[pil]
1747	stna	%g6, [%g1 + TRAP_ENT_F1]%asi
1748	add	%g4, INTR_TAIL, %g6
1749	ldn	[%g6 + %g7], %g6		! intr_tail[pil]
1750	stna	%g6, [%g1 + TRAP_ENT_F2]%asi
1751	stna	%g2, [%g1 + TRAP_ENT_F3]%asi	! pil
1752	stna	%g3, [%g1 + TRAP_ENT_F4]%asi	! intr_req
1753	TRACE_NEXT(%g1, %g6, %g5)
1754#endif /* TRAPTRACE */
1755	!
1756	! Write %set_softint with (1<<pil) to cause a "pil" level trap
1757	!
1758	mov	1, %g5
1759	sll	%g5, %g2, %g5
1760	wr	%g5, SET_SOFTINT
17614:
1762	retry
1763
1764.no_intr_pool:
1765	! no_intr_pool: rp, inum (%g1), pil (%g2)
1766	mov	%g2, %g3
1767	mov	%g1, %g2
1768	set	no_intr_pool, %g1
1769	ba,pt	%xcc, sys_trap
1770	mov	PIL_15, %g4
1771
1772.no_ivintr:
1773	! no_ivintr: arguments: rp, inum (%g1), pil (%g2 == 0)
1774	mov	%g2, %g3
1775	mov	%g1, %g2
1776	set	no_ivintr, %g1
1777	ba,pt	%xcc, sys_trap
1778	mov	PIL_15, %g4
1779	SET_SIZE(setsoftint_tl1)
1780
1781#endif	/* lint */
1782
1783#if defined(lint)
1784
1785/*ARGSUSED*/
1786void
1787wr_clr_softint(uint_t value)
1788{}
1789
1790#else
1791
1792	ENTRY_NP(wr_clr_softint)
1793	retl
1794	wr	%o0, CLEAR_SOFTINT
1795	SET_SIZE(wr_clr_softint)
1796
1797#endif /* lint */
1798
1799#if defined(lint)
1800
1801/*ARGSUSED*/
1802void
1803intr_enqueue_req(uint_t pil, uint32_t inum)
1804{}
1805
1806#else   /* lint */
1807
1808/*
1809 * intr_enqueue_req
1810 *
1811 * %o0 - pil
1812 * %o1 - inum
1813 * %o5 - preserved
1814 * %g5 - preserved
1815 */
1816	ENTRY_NP(intr_enqueue_req)
1817	! get intr_req free list
1818	CPU_ADDR(%g4, %g1)
1819	ldn	[%g4 + INTR_HEAD], %g3
1820
1821	! take intr_req from free list
1822	ldn	[%g3 + INTR_NEXT], %g6
1823	stn	%g6, [%g4 + INTR_HEAD]
1824
1825	! fill up intr_req
1826	st	%o1, [%g3 + INTR_NUMBER]
1827	stn	%g0, [%g3 + INTR_NEXT]
1828
1829	! add intr_req to proper pil list
1830	sll	%o0, CPTRSHIFT, %o0
1831	add	%g4, INTR_TAIL, %g6
1832	ldn	[%o0 + %g6], %g1	! current tail
1833	brz,pt	%g1, 2f			! branch if list is empty
1834	stn	%g3, [%g6 + %o0]	! make intr_req the new tail
1835
1836	! an intr_req was already queued so update old tail
1837	ba,pt	%xcc, 3f
1838	stn	%g3, [%g1 + INTR_NEXT]
18392:
1840	! no intr_req's queued so make intr_req the new head
1841	add	%g4, INTR_HEAD, %g6
1842	stn	%g3, [%g6 + %o0]
18433:
1844	retl
1845	nop
1846	SET_SIZE(intr_enqueue_req)
1847
1848#endif  /* lint */
1849
1850/*
1851 * Set CPU's base SPL level, based on which interrupt levels are active.
1852 * 	Called at spl7 or above.
1853 */
1854
1855#if defined(lint)
1856
1857void
1858set_base_spl(void)
1859{}
1860
1861#else	/* lint */
1862
1863	ENTRY_NP(set_base_spl)
1864	ldn	[THREAD_REG + T_CPU], %o2	! load CPU pointer
1865	ld	[%o2 + CPU_INTR_ACTV], %o5	! load active interrupts mask
1866
1867/*
1868 * WARNING: non-standard callinq sequence; do not call from C
1869 *	%o2 = pointer to CPU
1870 *	%o5 = updated CPU_INTR_ACTV
1871 */
1872_intr_set_spl:					! intr_thread_exit enters here
1873	!
1874	! Determine highest interrupt level active.  Several could be blocked
1875	! at higher levels than this one, so must convert flags to a PIL
1876	! Normally nothing will be blocked, so test this first.
1877	!
1878	brz,pt	%o5, 1f				! nothing active
1879	sra	%o5, 11, %o3			! delay - set %o3 to bits 15-11
1880	set	_intr_flag_table, %o1
1881	tst	%o3				! see if any of the bits set
1882	ldub	[%o1 + %o3], %o3		! load bit number
1883	bnz,a,pn %xcc, 1f			! yes, add 10 and we're done
1884	add	%o3, 11-1, %o3			! delay - add bit number - 1
1885
1886	sra	%o5, 6, %o3			! test bits 10-6
1887	tst	%o3
1888	ldub	[%o1 + %o3], %o3
1889	bnz,a,pn %xcc, 1f
1890	add	%o3, 6-1, %o3
1891
1892	sra	%o5, 1, %o3			! test bits 5-1
1893	ldub	[%o1 + %o3], %o3
1894
1895	!
1896	! highest interrupt level number active is in %l6
1897	!
18981:
1899	retl
1900	st	%o3, [%o2 + CPU_BASE_SPL]	! delay - store base priority
1901	SET_SIZE(set_base_spl)
1902
1903/*
1904 * Table that finds the most significant bit set in a five bit field.
1905 * Each entry is the high-order bit number + 1 of it's index in the table.
1906 * This read-only data is in the text segment.
1907 */
1908_intr_flag_table:
1909	.byte	0, 1, 2, 2,	3, 3, 3, 3,	4, 4, 4, 4,	4, 4, 4, 4
1910	.byte	5, 5, 5, 5,	5, 5, 5, 5,	5, 5, 5, 5,	5, 5, 5, 5
1911	.align	4
1912
1913#endif	/* lint */
1914
1915/*
1916 * int
1917 * intr_passivate(from, to)
1918 *	kthread_id_t	from;		interrupt thread
1919 *	kthread_id_t	to;		interrupted thread
1920 */
1921
1922#if defined(lint)
1923
1924/* ARGSUSED */
1925int
1926intr_passivate(kthread_id_t from, kthread_id_t to)
1927{ return (0); }
1928
1929#else	/* lint */
1930
1931	ENTRY_NP(intr_passivate)
1932	save	%sp, -SA(MINFRAME), %sp	! get a new window
1933
1934	flushw				! force register windows to stack
1935	!
1936	! restore registers from the base of the stack of the interrupt thread.
1937	!
1938	ldn	[%i0 + T_STACK], %i2	! get stack save area pointer
1939	ldn	[%i2 + (0*GREGSIZE)], %l0	! load locals
1940	ldn	[%i2 + (1*GREGSIZE)], %l1
1941	ldn	[%i2 + (2*GREGSIZE)], %l2
1942	ldn	[%i2 + (3*GREGSIZE)], %l3
1943	ldn	[%i2 + (4*GREGSIZE)], %l4
1944	ldn	[%i2 + (5*GREGSIZE)], %l5
1945	ldn	[%i2 + (6*GREGSIZE)], %l6
1946	ldn	[%i2 + (7*GREGSIZE)], %l7
1947	ldn	[%i2 + (8*GREGSIZE)], %o0	! put ins from stack in outs
1948	ldn	[%i2 + (9*GREGSIZE)], %o1
1949	ldn	[%i2 + (10*GREGSIZE)], %o2
1950	ldn	[%i2 + (11*GREGSIZE)], %o3
1951	ldn	[%i2 + (12*GREGSIZE)], %o4
1952	ldn	[%i2 + (13*GREGSIZE)], %o5
1953	ldn	[%i2 + (14*GREGSIZE)], %i4
1954					! copy stack/pointer without using %sp
1955	ldn	[%i2 + (15*GREGSIZE)], %i5
1956	!
1957	! put registers into the save area at the top of the interrupted
1958	! thread's stack, pointed to by %l7 in the save area just loaded.
1959	!
1960	ldn	[%i1 + T_SP], %i3	! get stack save area pointer
1961	stn	%l0, [%i3 + STACK_BIAS + (0*GREGSIZE)]	! save locals
1962	stn	%l1, [%i3 + STACK_BIAS + (1*GREGSIZE)]
1963	stn	%l2, [%i3 + STACK_BIAS + (2*GREGSIZE)]
1964	stn	%l3, [%i3 + STACK_BIAS + (3*GREGSIZE)]
1965	stn	%l4, [%i3 + STACK_BIAS + (4*GREGSIZE)]
1966	stn	%l5, [%i3 + STACK_BIAS + (5*GREGSIZE)]
1967	stn	%l6, [%i3 + STACK_BIAS + (6*GREGSIZE)]
1968	stn	%l7, [%i3 + STACK_BIAS + (7*GREGSIZE)]
1969	stn	%o0, [%i3 + STACK_BIAS + (8*GREGSIZE)]	! save ins using outs
1970	stn	%o1, [%i3 + STACK_BIAS + (9*GREGSIZE)]
1971	stn	%o2, [%i3 + STACK_BIAS + (10*GREGSIZE)]
1972	stn	%o3, [%i3 + STACK_BIAS + (11*GREGSIZE)]
1973	stn	%o4, [%i3 + STACK_BIAS + (12*GREGSIZE)]
1974	stn	%o5, [%i3 + STACK_BIAS + (13*GREGSIZE)]
1975	stn	%i4, [%i3 + STACK_BIAS + (14*GREGSIZE)]
1976						! fp, %i7 copied using %i4
1977	stn	%i5, [%i3 + STACK_BIAS + (15*GREGSIZE)]
1978	stn	%g0, [%i2 + ((8+6)*GREGSIZE)]
1979						! clear fp in save area
1980
1981	! load saved pil for return
1982	ldub	[%i0 + T_PIL], %i0
1983	ret
1984	restore
1985	SET_SIZE(intr_passivate)
1986
1987#endif	/* lint */
1988
1989#if defined(lint)
1990
1991/*
1992 * intr_get_time() is a resource for interrupt handlers to determine how
1993 * much time has been spent handling the current interrupt. Such a function
1994 * is needed because higher level interrupts can arrive during the
1995 * processing of an interrupt, thus making direct comparisons of %tick by
1996 * the handler inaccurate. intr_get_time() only returns time spent in the
1997 * current interrupt handler.
1998 *
1999 * The caller must be calling from an interrupt handler running at a pil
2000 * below or at lock level. Timings are not provided for high-level
2001 * interrupts.
2002 *
2003 * The first time intr_get_time() is called while handling an interrupt,
2004 * it returns the time since the interrupt handler was invoked. Subsequent
2005 * calls will return the time since the prior call to intr_get_time(). Time
2006 * is returned as ticks, adjusted for any clock divisor due to power
2007 * management. Use tick2ns() to convert ticks to nsec. Warning: ticks may
2008 * not be the same across CPUs.
2009 *
2010 * Theory Of Intrstat[][]:
2011 *
2012 * uint64_t intrstat[pil][0..1] is an array indexed by pil level, with two
2013 * uint64_ts per pil.
2014 *
2015 * intrstat[pil][0] is a cumulative count of the number of ticks spent
2016 * handling all interrupts at the specified pil on this CPU. It is
2017 * exported via kstats to the user.
2018 *
2019 * intrstat[pil][1] is always a count of ticks less than or equal to the
2020 * value in [0]. The difference between [1] and [0] is the value returned
2021 * by a call to intr_get_time(). At the start of interrupt processing,
2022 * [0] and [1] will be equal (or nearly so). As the interrupt consumes
2023 * time, [0] will increase, but [1] will remain the same. A call to
2024 * intr_get_time() will return the difference, then update [1] to be the
2025 * same as [0]. Future calls will return the time since the last call.
2026 * Finally, when the interrupt completes, [1] is updated to the same as [0].
2027 *
2028 * Implementation:
2029 *
2030 * intr_get_time() works much like a higher level interrupt arriving. It
2031 * "checkpoints" the timing information by incrementing intrstat[pil][0]
2032 * to include elapsed running time, and by setting t_intr_start to %tick.
2033 * It then sets the return value to intrstat[pil][0] - intrstat[pil][1],
2034 * and updates intrstat[pil][1] to be the same as the new value of
2035 * intrstat[pil][0].
2036 *
2037 * In the normal handling of interrupts, after an interrupt handler returns
2038 * and the code in intr_thread() updates intrstat[pil][0], it then sets
2039 * intrstat[pil][1] to the new value of intrstat[pil][0]. When [0] == [1],
2040 * the timings are reset, i.e. intr_get_time() will return [0] - [1] which
2041 * is 0.
2042 *
2043 * Whenever interrupts arrive on a CPU which is handling a lower pil
2044 * interrupt, they update the lower pil's [0] to show time spent in the
2045 * handler that they've interrupted. This results in a growing discrepancy
2046 * between [0] and [1], which is returned the next time intr_get_time() is
2047 * called. Time spent in the higher-pil interrupt will not be returned in
2048 * the next intr_get_time() call from the original interrupt, because
2049 * the higher-pil interrupt's time is accumulated in intrstat[higherpil][].
2050 */
2051
2052/*ARGSUSED*/
2053uint64_t
2054intr_get_time(void)
2055{ return 0; }
2056#else	/* lint */
2057
2058	ENTRY_NP(intr_get_time)
2059#ifdef DEBUG
2060	!
2061	! Lots of asserts, but just check panic_quiesce first.
2062	! Don't bother with lots of tests if we're just ignoring them.
2063	!
2064	sethi	%hi(panic_quiesce), %o0
2065	ld	[%o0 + %lo(panic_quiesce)], %o0
2066	brnz,pn	%o0, 2f
2067	nop
2068	!
2069	! ASSERT(%pil <= LOCK_LEVEL)
2070	!
2071	rdpr	%pil, %o1
2072	cmp	%o1, LOCK_LEVEL
2073	ble,pt	%xcc, 0f
2074	sethi	%hi(intr_get_time_high_pil), %o0	! delay
2075	call	panic
2076	or	%o0, %lo(intr_get_time_high_pil), %o0
20770:
2078	!
2079	! ASSERT((t_flags & T_INTR_THREAD) != 0 && t_pil > 0)
2080	!
2081	lduh	[THREAD_REG + T_FLAGS], %o2
2082	andcc	%o2, T_INTR_THREAD, %g0
2083	bz,pn	%xcc, 1f
2084	ldub	[THREAD_REG + T_PIL], %o1		! delay
2085	brnz,pt	%o1, 0f
20861:
2087	sethi	%hi(intr_get_time_not_intr), %o0
2088	call	panic
2089	or	%o0, %lo(intr_get_time_not_intr), %o0
20900:
2091	!
2092	! ASSERT(t_intr_start != 0)
2093	!
2094	ldx	[THREAD_REG + T_INTR_START], %o1
2095	brnz,pt	%o1, 2f
2096	sethi	%hi(intr_get_time_no_start_time), %o0	! delay
2097	call	panic
2098	or	%o0, %lo(intr_get_time_no_start_time), %o0
20992:
2100#endif /* DEBUG */
2101	!
2102	! %o0 = elapsed time and return value
2103	! %o1 = pil
2104	! %o2 = scratch
2105	! %o3 = scratch
2106	! %o4 = scratch
2107	! %o5 = cpu
2108	!
2109	wrpr	%g0, PIL_MAX, %pil	! make this easy -- block normal intrs
2110	ldn	[THREAD_REG + T_CPU], %o5
2111	ldub	[THREAD_REG + T_PIL], %o1
2112	ldx	[THREAD_REG + T_INTR_START], %o3 ! %o3 = t_intr_start
2113	!
2114	! Calculate elapsed time since t_intr_start. Update t_intr_start,
2115	! get delta, and multiply by cpu_divisor if necessary.
2116	!
2117	rdpr	%tick, %o2
2118	sllx	%o2, 1, %o2
2119	srlx	%o2, 1, %o2
2120	stx	%o2, [THREAD_REG + T_INTR_START]
2121	sub	%o2, %o3, %o0
2122
2123	lduh	[%o5 + CPU_DIVISOR], %o4
2124	cmp	%o4, 1
2125	bg,a,pn	%xcc, 1f
2126	mulx	%o0, %o4, %o0	! multiply interval by clock divisor iff > 1
21271:
2128	! Update intracct[]
2129	lduh	[%o5 + CPU_MSTATE], %o4
2130	sllx	%o4, 3, %o4
2131	add	%o4, CPU_INTRACCT, %o4
2132	ldx	[%o5 + %o4], %o2
2133	add	%o2, %o0, %o2
2134	stx	%o2, [%o5 + %o4]
2135
2136	!
2137	! Increment cpu_m.intrstat[pil][0]. Calculate elapsed time since
2138	! cpu_m.intrstat[pil][1], which is either when the interrupt was
2139	! first entered, or the last time intr_get_time() was invoked. Then
2140	! update cpu_m.intrstat[pil][1] to match [0].
2141	!
2142	sllx	%o1, 4, %o3
2143	add	%o3, CPU_MCPU, %o3
2144	add	%o3, MCPU_INTRSTAT, %o3
2145	add	%o3, %o5, %o3		! %o3 = cpu_m.intrstat[pil][0]
2146	ldx	[%o3], %o2
2147	add	%o2, %o0, %o2		! %o2 = new value for intrstat
2148	stx	%o2, [%o3]
2149	ldx	[%o3 + 8], %o4		! %o4 = cpu_m.intrstat[pil][1]
2150	sub	%o2, %o4, %o0		! %o0 is elapsed time since %o4
2151	stx	%o2, [%o3 + 8]		! make [1] match [0], resetting time
2152
2153	ld	[%o5 + CPU_BASE_SPL], %o2	! restore %pil to the greater
2154	cmp	%o2, %o1			! of either our pil %o1 or
2155	movl	%xcc, %o1, %o2			! cpu_base_spl.
2156	retl
2157	wrpr	%g0, %o2, %pil
2158	SET_SIZE(intr_get_time)
2159
2160#ifdef DEBUG
2161intr_get_time_high_pil:
2162	.asciz	"intr_get_time(): %pil > LOCK_LEVEL"
2163intr_get_time_not_intr:
2164	.asciz	"intr_get_time(): not called from an interrupt thread"
2165intr_get_time_no_start_time:
2166	.asciz	"intr_get_time(): t_intr_start == 0"
2167#endif /* DEBUG */
2168#endif  /* lint */
2169
2170
2171#if !defined(lint)
2172
2173/*
2174 * Check shift value used for computing array offsets
2175 */
2176#if INTR_VECTOR_SIZE != (1 << INTR_VECTOR_SHIFT)
2177#error "INTR_VECTOR_SIZE has changed"
2178#endif
2179
2180#endif  /* lint */
2181