xref: /titanic_41/usr/src/uts/sun4/ml/interrupt.s (revision 8eea8e29cc4374d1ee24c25a07f45af132db3499)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22/*
23 * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#pragma ident	"%Z%%M%	%I%	%E% SMI"
28
29#if defined(lint)
30#include <sys/types.h>
31#include <sys/thread.h>
32#else	/* lint */
33#include "assym.h"
34#endif	/* lint */
35
36#include <sys/cmn_err.h>
37#include <sys/ftrace.h>
38#include <sys/asm_linkage.h>
39#include <sys/machthread.h>
40#include <sys/machcpuvar.h>
41#include <sys/intreg.h>
42
43#ifdef TRAPTRACE
44#include <sys/traptrace.h>
45#endif /* TRAPTRACE */
46
47
48
49#if defined(lint)
50
51/* ARGSUSED */
52void
53pil_interrupt(int level)
54{}
55
56#else	/* lint */
57
58
59/*
60 * (TT 0x40..0x4F, TL>0) Interrupt Level N Handler (N == 1..15)
61 * 	Register passed from LEVEL_INTERRUPT(level)
62 *	%g4 - interrupt request level
63 */
64	ENTRY_NP(pil_interrupt)
65	!
66	! Register usage
67	!	%g1 - cpu
68	!	%g3 - intr_req
69	!	%g4 - pil
70	!	%g2, %g5, %g6 - temps
71	!
72	! grab the 1st intr_req off the list
73	! if the list is empty, clear %clear_softint
74	!
75	CPU_ADDR(%g1, %g5)
76	!
77	ALTENTRY(pil_interrupt_common)
78	sll	%g4, CPTRSHIFT, %g5
79	add	%g1, INTR_HEAD, %g6	! intr_head[0]
80	add	%g6, %g5, %g6		! intr_head[pil]
81	ldn	[%g6], %g3		! g3 = intr_req
82
83#ifndef DEBUG
84	brnz,pt	%g3, 5f
85	nop
86#else
87	!
88	! Verify the address of intr_req; it should be within the
89	! address range of intr_pool and intr_head
90	! or the address range of intr_add_head and intr_add_tail.
91	! The range of intr_add_head and intr_add_tail is subdivided
92	! by cpu, but the subdivision is not verified here.
93	!
94	! Registers passed to sys_trap()
95	!	%g1 - no_intr_req
96	!	%g2 - intr_req
97	!	%g3 - %pil
98	!	%g4 - current pil
99	!
100	add	%g1, INTR_POOL, %g2
101	cmp	%g3, %g2
102	blu,pn	%xcc, 8f
103	nop
104	add	%g1, INTR_HEAD, %g2
105	cmp	%g2, %g3
106	bgeu,pt	%xcc, 5f
107	nop
1088:
109	sethi	%hi(intr_add_head), %g2
110	ldn	[%g2 + %lo(intr_add_head)], %g2
111	brz,pn	%g2, 4f			! intr_add_head can be NULL
112	cmp	%g3, %g2
113	blu,pn	%xcc, 4f
114	nop
115	sethi	%hi(intr_add_tail), %g2
116	ldn	[%g2 + %lo(intr_add_tail)], %g2
117	cmp	%g2, %g3
118	bgeu,pt	%xcc, 5f
119	nop
1204:
121#endif /* DEBUG */
122#ifdef TRAPTRACE
123	TRACE_PTR(%g5, %g2)
124	GET_TRACE_TICK(%g2)
125	stxa	%g2, [%g5 + TRAP_ENT_TICK]%asi
126	TRACE_SAVE_TL_GL_REGS(%g5, %g2)
127	mov	0xbad, %g2
128	stha	%g2, [%g5 + TRAP_ENT_TT]%asi
129	rdpr	%tpc, %g2
130	stna	%g2, [%g5 + TRAP_ENT_TPC]%asi
131	rdpr	%tstate, %g2
132	stxa	%g2, [%g5 + TRAP_ENT_TSTATE]%asi
133	stna	%g0, [%g5 + TRAP_ENT_SP]%asi
134	stna	%g1, [%g5 + TRAP_ENT_TR]%asi
135	rd	SOFTINT, %g2
136	stna	%g2, [%g5 + TRAP_ENT_F1]%asi
137	stna	%g3, [%g5 + TRAP_ENT_F2]%asi
138	stna	%g4, [%g5 + TRAP_ENT_F3]%asi
139	stna	%g6, [%g5 + TRAP_ENT_F4]%asi
140	TRACE_NEXT(%g5, %g2, %g1)
141#endif /* TRAPTRACE */
142	ba	ptl1_panic
143	mov	PTL1_BAD_INTR_REQ, %g1
1445:
145	ldn	[%g3 + INTR_NEXT], %g2	! 2nd entry
146	brnz,pn	%g2, 1f			! branch if list not empty
147	stn	%g2, [%g6]
148	add	%g1, INTR_TAIL, %g6	! intr_tail[0]
149	stn	%g0, [%g5 + %g6]	! update intr_tail[pil]
150	mov	1, %g5
151	sll	%g5, %g4, %g5
152	wr	%g5, CLEAR_SOFTINT
1531:
154	!
155	! put intr_req on free list
156	!	%g2 - inumber
157	!
158	ldn	[%g1 + INTR_HEAD], %g5	! current head of free list
159	lduw	[%g3 + INTR_NUMBER], %g2
160	stn	%g3, [%g1 + INTR_HEAD]
161	stn	%g5, [%g3 + INTR_NEXT]
162#ifdef TRAPTRACE
163	TRACE_PTR(%g5, %g6)
164	GET_TRACE_TICK(%g6)
165	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi
166	TRACE_SAVE_TL_GL_REGS(%g5, %g6)
167	rdpr	%tt, %g6
168	stha	%g6, [%g5 + TRAP_ENT_TT]%asi
169	rdpr	%tpc, %g6
170	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi
171	rdpr	%tstate, %g6
172	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi
173	stna	%sp, [%g5 + TRAP_ENT_SP]%asi
174	stna	%g3, [%g5 + TRAP_ENT_TR]%asi
175	stna	%g2, [%g5 + TRAP_ENT_F1]%asi
176	sll	%g4, CPTRSHIFT, %g3
177	add	%g1, INTR_HEAD, %g6
178	ldn	[%g6 + %g3], %g6		! intr_head[pil]
179	stna	%g6, [%g5 + TRAP_ENT_F2]%asi
180	add	%g1, INTR_TAIL, %g6
181	ldn	[%g6 + %g3], %g6		! intr_tail[pil]
182	stna	%g4, [%g5 + TRAP_ENT_F3]%asi
183	stna	%g6, [%g5 + TRAP_ENT_F4]%asi
184	TRACE_NEXT(%g5, %g6, %g3)
185#endif /* TRAPTRACE */
186	!
187	! clear the iv_pending flag for this inum
188	!
189	set	intr_vector, %g5;
190	sll	%g2, INTR_VECTOR_SHIFT, %g6;
191	add	%g5, %g6, %g5;			! &intr_vector[inum]
192	sth	%g0, [%g5 + IV_PENDING]
193
194	!
195	! Prepare for sys_trap()
196	!
197	! Registers passed to sys_trap()
198	!	%g1 - interrupt handler at TL==0
199	!	%g2 - inumber
200	!	%g3 - pil
201	!	%g4 - initial pil for handler
202	!
203	! figure which handler to run and which %pil it starts at
204	! intr_thread starts at DISP_LEVEL to prevent preemption
205	! current_thread starts at PIL_MAX to protect cpu_intr_actv
206	!
207	mov	%g4, %g3
208	cmp	%g4, LOCK_LEVEL
209	bg,a,pt	%xcc, 4f		! branch if pil > LOCK_LEVEL
210	mov	PIL_MAX, %g4
211	sethi	%hi(intr_thread), %g1
212	mov	DISP_LEVEL, %g4
213	ba,pt	%xcc, sys_trap
214	or	%g1, %lo(intr_thread), %g1
2154:
216	sethi	%hi(current_thread), %g1
217	ba,pt	%xcc, sys_trap
218	or	%g1, %lo(current_thread), %g1
219	SET_SIZE(pil_interrupt_common)
220	SET_SIZE(pil_interrupt)
221
222#endif	/* lint */
223
224
225#ifndef	lint
226_spurious:
227	.asciz	"!interrupt 0x%x at level %d not serviced"
228
229/*
230 * SERVE_INTR_PRE is called once, just before the first invocation
231 * of SERVE_INTR.
232 *
233 * Registers on entry:
234 *
235 * inum, cpu, regs: may be out-registers
236 * ls1, ls2: local scratch registers
237 * os1, os2, os3: scratch registers, may be out
238 */
239
240#define SERVE_INTR_PRE(inum, cpu, ls1, ls2, os1, os2, os3, regs)	\
241	set	intr_vector, ls1;					\
242	sll	inum, INTR_VECTOR_SHIFT, os1;				\
243	add	ls1, os1, ls1;						\
244	SERVE_INTR_TRACE(inum, os1, os2, os3, regs);			\
245	mov	inum, ls2;
246
247/*
248 * SERVE_INTR is called immediately after either SERVE_INTR_PRE or
249 * SERVE_INTR_NEXT, without intervening code. No register values
250 * may be modified.
251 *
252 * After calling SERVE_INTR, the caller must check if os3 is set. If
253 * so, there is another interrupt to process. The caller must call
254 * SERVE_INTR_NEXT, immediately followed by SERVE_INTR.
255 *
256 * Before calling SERVE_INTR_NEXT, the caller may perform accounting
257 * and other actions which need to occur after invocation of an interrupt
258 * handler. However, the values of ls1 and os3 *must* be preserved and
259 * passed unmodified into SERVE_INTR_NEXT.
260 *
261 * Registers on return from SERVE_INTR:
262 *
263 * ls1 - the pil just processed
264 * ls2 - the inum just processed
265 * os3 - if set, another interrupt needs to be processed
266 * cpu, ls1, os3 - must be preserved if os3 is set
267 */
268
269#define	SERVE_INTR(os5, cpu, ls1, ls2, os1, os2, os3, os4)		\
270	ldn	[ls1 + IV_HANDLER], os2;				\
271	ldn	[ls1 + IV_ARG], %o0;					\
272	ldn	[ls1 + IV_SOFTINT_ARG2], %o1;					\
273	call	os2;							\
274	lduh	[ls1 + IV_PIL], ls1;					\
275	brnz,pt	%o0, 2f;						\
276	mov	CE_WARN, %o0;						\
277	set	_spurious, %o1;						\
278	mov	ls2, %o2;						\
279	call	cmn_err;						\
280	rdpr	%pil, %o3;						\
2812:	ldn	[THREAD_REG + T_CPU], cpu;				\
282	sll	ls1, 3, os1;						\
283	add	os1, CPU_STATS_SYS_INTR - 8, os2;			\
284	ldx	[cpu + os2], os3;					\
285	inc	os3;							\
286	stx	os3, [cpu + os2];					\
287	sll	ls1, CPTRSHIFT, os2;					\
288	add	cpu,  INTR_HEAD, os1;					\
289	add	os1, os2, os1;						\
290	ldn	[os1], os3;
291
292/*
293 * Registers on entry:
294 *
295 * cpu			- cpu pointer (clobbered, set to cpu upon completion)
296 * ls1, os3		- preserved from prior call to SERVE_INTR
297 * ls2			- local scratch reg (not preserved)
298 * os1, os2, os4, os5	- scratch reg, can be out (not preserved)
299 */
300#define SERVE_INTR_NEXT(os5, cpu, ls1, ls2, os1, os2, os3, os4)		\
301	sll	ls1, CPTRSHIFT, os4;					\
302	add	cpu, INTR_HEAD, os1;					\
303	rdpr	%pstate, ls2;						\
304	wrpr	ls2, PSTATE_IE, %pstate;				\
305	ldn 	[os3 + INTR_NEXT], os2;					\
306	brnz,pn	os2, 4f;						\
307	stn	os2, [os1 + os4];					\
308	add	cpu, INTR_TAIL, os1;					\
309	stn	%g0, [os1 + os4];					\
310	mov	1, os1;							\
311	sll	os1, ls1, os1;						\
312	wr	os1, CLEAR_SOFTINT;					\
3134:	ldn	[cpu + INTR_HEAD], os1;					\
314	ld 	[os3 + INTR_NUMBER], os5;				\
315	stn	os3, [cpu + INTR_HEAD];					\
316	stn	os1, [os3 + INTR_NEXT];					\
317	set	intr_vector, ls1;					\
318	sll	os5, INTR_VECTOR_SHIFT, os1;				\
319	add	ls1, os1, ls1;						\
320	sth	%g0, [ls1 + IV_PENDING];				\
321	wrpr	%g0, ls2, %pstate;					\
322	SERVE_INTR_TRACE2(os5, os1, os2, os3, os4);			\
323	mov	os5, ls2;
324
325#ifdef TRAPTRACE
326/*
327 * inum - not modified, _spurious depends on it.
328 */
329#define	SERVE_INTR_TRACE(inum, os1, os2, os3, os4)			\
330	rdpr	%pstate, os3;						\
331	andn	os3, PSTATE_IE | PSTATE_AM, os2;			\
332	wrpr	%g0, os2, %pstate;					\
333	TRACE_PTR(os1, os2);						\
334	ldn	[os4 + PC_OFF], os2;					\
335	stna	os2, [os1 + TRAP_ENT_TPC]%asi;				\
336	ldx	[os4 + TSTATE_OFF], os2;				\
337	stxa	os2, [os1 + TRAP_ENT_TSTATE]%asi;			\
338	mov	os3, os4;						\
339	GET_TRACE_TICK(os2); 						\
340	stxa	os2, [os1 + TRAP_ENT_TICK]%asi;				\
341	TRACE_SAVE_TL_GL_REGS(os1, os2);				\
342	set	TT_SERVE_INTR, os2;					\
343	rdpr	%pil, os3;						\
344	or	os2, os3, os2;						\
345	stha	os2, [os1 + TRAP_ENT_TT]%asi;				\
346	stna	%sp, [os1 + TRAP_ENT_SP]%asi;				\
347	stna	inum, [os1 + TRAP_ENT_TR]%asi;				\
348	stna	%g0, [os1 + TRAP_ENT_F1]%asi;				\
349	stna	%g0, [os1 + TRAP_ENT_F2]%asi;				\
350	stna	%g0, [os1 + TRAP_ENT_F3]%asi;				\
351	stna	%g0, [os1 + TRAP_ENT_F4]%asi;				\
352	TRACE_NEXT(os1, os2, os3);					\
353	wrpr	%g0, os4, %pstate
354#else	/* TRAPTRACE */
355#define SERVE_INTR_TRACE(inum, os1, os2, os3, os4)
356#endif	/* TRAPTRACE */
357
358#ifdef TRAPTRACE
359/*
360 * inum - not modified, _spurious depends on it.
361 */
362#define	SERVE_INTR_TRACE2(inum, os1, os2, os3, os4)			\
363	rdpr	%pstate, os3;						\
364	andn	os3, PSTATE_IE | PSTATE_AM, os2;			\
365	wrpr	%g0, os2, %pstate;					\
366	TRACE_PTR(os1, os2);						\
367	stna	%g0, [os1 + TRAP_ENT_TPC]%asi;				\
368	stxa	%g0, [os1 + TRAP_ENT_TSTATE]%asi;			\
369	mov	os3, os4;						\
370	GET_TRACE_TICK(os2); 						\
371	stxa	os2, [os1 + TRAP_ENT_TICK]%asi;				\
372	TRACE_SAVE_TL_GL_REGS(os1, os2);				\
373	set	TT_SERVE_INTR, os2;					\
374	rdpr	%pil, os3;						\
375	or	os2, os3, os2;						\
376	stha	os2, [os1 + TRAP_ENT_TT]%asi;				\
377	stna	%sp, [os1 + TRAP_ENT_SP]%asi;				\
378	stna	inum, [os1 + TRAP_ENT_TR]%asi;				\
379	stna	%g0, [os1 + TRAP_ENT_F1]%asi;				\
380	stna	%g0, [os1 + TRAP_ENT_F2]%asi;				\
381	stna	%g0, [os1 + TRAP_ENT_F3]%asi;				\
382	stna	%g0, [os1 + TRAP_ENT_F4]%asi;				\
383	TRACE_NEXT(os1, os2, os3);					\
384	wrpr	%g0, os4, %pstate
385#else	/* TRAPTRACE */
386#define SERVE_INTR_TRACE2(inum, os1, os2, os3, os4)
387#endif	/* TRAPTRACE */
388
389#endif	/* lint */
390
391#if defined(lint)
392
393/*ARGSUSED*/
394void
395intr_thread(struct regs *regs, uint_t inumber, uint_t pil)
396{}
397
398#else	/* lint */
399
400#define	INTRCNT_LIMIT 16
401
402/*
403 * Handle an interrupt in a new thread.
404 *	Entry:
405 *		%o0       = pointer to regs structure
406 *		%o1       = inumber
407 *		%o2       = pil
408 *		%sp       = on current thread's kernel stack
409 *		%o7       = return linkage to trap code
410 *		%g7       = current thread
411 *		%pstate   = normal globals, interrupts enabled,
412 *		            privileged, fp disabled
413 *		%pil      = DISP_LEVEL
414 *
415 *	Register Usage
416 *		%l0       = return linkage
417 *		%l1       = pil
418 *		%l2 - %l3 = scratch
419 *		%l4 - %l7 = reserved for sys_trap
420 *		%o2       = cpu
421 *		%o3       = intr thread
422 *		%o0       = scratch
423 *		%o4 - %o5 = scratch
424 */
425	ENTRY_NP(intr_thread)
426	mov	%o7, %l0
427	mov	%o2, %l1
428	!
429	! See if we are interrupting another interrupt thread.
430	!
431	lduh	[THREAD_REG + T_FLAGS], %o3
432	andcc	%o3, T_INTR_THREAD, %g0
433	bz,pt	%xcc, 1f
434	ldn	[THREAD_REG + T_CPU], %o2	! delay - load CPU pointer
435
436	! We have interrupted an interrupt thread. Take a timestamp,
437	! compute its interval, and update its cumulative counter.
438	add	THREAD_REG, T_INTR_START, %o5
4390:
440	ldx	[%o5], %o3
441	brz,pn	%o3, 1f
442	! We came in on top of an interrupt thread that had no timestamp.
443	! This could happen if, for instance, an interrupt thread which had
444	! previously blocked is being set up to run again in resume(), but
445	! resume() hasn't yet stored a timestamp for it. Or, it could be in
446	! swtch() after its slice has been accounted for.
447	! Only account for the time slice if the starting timestamp is non-zero.
448	rdpr	%tick, %o4			! delay
449	sllx	%o4, 1, %o4			! shift off NPT bit
450	srlx	%o4, 1, %o4
451	sub	%o4, %o3, %o4			! o4 has interval
452
453	! A high-level interrupt in current_thread() interrupting here
454	! will account for the interrupted thread's time slice, but
455	! only if t_intr_start is non-zero. Since this code is going to account
456	! for the time slice, we want to "atomically" load the thread's
457	! starting timestamp, calculate the interval with %tick, and zero
458	! its starting timestamp.
459	! To do this, we do a casx on the t_intr_start field, and store 0 to it.
460	! If it has changed since we loaded it above, we need to re-compute the
461	! interval, since a changed t_intr_start implies current_thread placed
462	! a new, later timestamp there after running a high-level interrupt,
463	! and the %tick val in %o4 had become stale.
464	mov	%g0, %l2
465	casx	[%o5], %o3, %l2
466
467	! If %l2 == %o3, our casx was successful. If not, the starting timestamp
468	! changed between loading it (after label 0b) and computing the
469	! interval above.
470	cmp	%l2, %o3
471	bne,pn	%xcc, 0b
472
473	! Check for Energy Star mode
474	lduh	[%o2 + CPU_DIVISOR], %l2	! delay -- %l2 = clock divisor
475	cmp	%l2, 1
476	bg,a,pn	%xcc, 2f
477	mulx	%o4, %l2, %o4	! multiply interval by clock divisor iff > 1
4782:
479	! We now know that a valid interval for the interrupted interrupt
480	! thread is in %o4. Update its cumulative counter.
481	ldub	[THREAD_REG + T_PIL], %l3	! load PIL
482	sllx	%l3, 4, %l3		! convert PIL index to byte offset
483	add	%l3, CPU_MCPU, %l3	! CPU_INTRSTAT is too big for use
484	add	%l3, MCPU_INTRSTAT, %l3	! as const, add offsets separately
485	ldx	[%o2 + %l3], %o5	! old counter in o5
486	add	%o5, %o4, %o5		! new counter in o5
487	stx	%o5, [%o2 + %l3]	! store new counter
488
4891:
490	!
491	! Get set to run interrupt thread.
492	! There should always be an interrupt thread since we allocate one
493	! for each level on the CPU.
494	!
495	! Note that the code in kcpc_overflow_intr -relies- on the ordering
496	! of events here -- in particular that t->t_lwp of the interrupt thread
497	! is set to the pinned thread *before* curthread is changed.
498	!
499	ldn	[%o2 + CPU_INTR_THREAD], %o3	! interrupt thread pool
500	ldn	[%o3 + T_LINK], %o4		! unlink thread from CPU's list
501	stn	%o4, [%o2 + CPU_INTR_THREAD]
502	!
503	! Set bit for this level in CPU's active interrupt bitmask.
504	!
505	ld	[%o2 + CPU_INTR_ACTV], %o5
506	mov	1, %o4
507	sll	%o4, %l1, %o4
508#ifdef DEBUG
509	!
510	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
511	!
512	andcc	%o5, %o4, %g0
513	bz,pt	%xcc, 0f
514	nop
515	! Do not call panic if a panic is already in progress.
516	sethi	%hi(panic_quiesce), %l2
517	ld	[%l2 + %lo(panic_quiesce)], %l2
518	brnz,pn	%l2, 0f
519	nop
520	sethi	%hi(intr_thread_actv_bit_set), %o0
521	call	panic
522	or	%o0, %lo(intr_thread_actv_bit_set), %o0
5230:
524#endif /* DEBUG */
525	or	%o5, %o4, %o5
526	st	%o5, [%o2 + CPU_INTR_ACTV]
527	!
528	! Consider the new thread part of the same LWP so that
529	! window overflow code can find the PCB.
530	!
531	ldn	[THREAD_REG + T_LWP], %o4
532	stn	%o4, [%o3 + T_LWP]
533	!
534	! Threads on the interrupt thread free list could have state already
535	! set to TS_ONPROC, but it helps in debugging if they're TS_FREE
536	! Could eliminate the next two instructions with a little work.
537	!
538	mov	TS_ONPROC, %o4
539	st	%o4, [%o3 + T_STATE]
540	!
541	! Push interrupted thread onto list from new thread.
542	! Set the new thread as the current one.
543	! Set interrupted thread's T_SP because if it is the idle thread,
544	! resume may use that stack between threads.
545	!
546	stn	%o7, [THREAD_REG + T_PC]	! mark pc for resume
547	stn	%sp, [THREAD_REG + T_SP]	! mark stack for resume
548	stn	THREAD_REG, [%o3 + T_INTR]	! push old thread
549	stn	%o3, [%o2 + CPU_THREAD]		! set new thread
550	mov	%o3, THREAD_REG			! set global curthread register
551	ldn	[%o3 + T_STACK], %o4		! interrupt stack pointer
552	sub	%o4, STACK_BIAS, %sp
553	!
554	! Initialize thread priority level from intr_pri
555	!
556	sethi	%hi(intr_pri), %o4
557	ldsh	[%o4 + %lo(intr_pri)], %o4	! grab base interrupt priority
558	add	%l1, %o4, %o4		! convert level to dispatch priority
559	sth	%o4, [THREAD_REG + T_PRI]
560	stub	%l1, [THREAD_REG + T_PIL]	! save pil for intr_passivate
561
562	! Store starting timestamp in thread structure.
563	add	THREAD_REG, T_INTR_START, %o3
5641:
565	ldx	[%o3], %o5
566	rdpr	%tick, %o4
567	sllx	%o4, 1, %o4
568	srlx	%o4, 1, %o4			! shift off NPT bit
569	casx	[%o3], %o5, %o4
570	cmp	%o4, %o5
571	! If a high-level interrupt occurred while we were attempting to store
572	! the timestamp, try again.
573	bne,pn	%xcc, 1b
574	nop
575
576	wrpr	%g0, %l1, %pil			! lower %pil to new level
577	!
578	! Fast event tracing.
579	!
580	ld	[%o2 + CPU_FTRACE_STATE], %o4	! %o2 = curthread->t_cpu
581	btst	FTRACE_ENABLED, %o4
582	be,pt	%icc, 1f			! skip if ftrace disabled
583	  mov	%l1, %o5
584	!
585	! Tracing is enabled - write the trace entry.
586	!
587	save	%sp, -SA(MINFRAME), %sp
588	set	ftrace_intr_thread_format_str, %o0
589	mov	%i0, %o1
590	mov	%i1, %o2
591	call	ftrace_3
592	mov	%i5, %o3
593	restore
5941:
595	!
596	! call the handler
597	!
598	SERVE_INTR_PRE(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
599	!
600	! %o0 and %o1 are now available as scratch registers.
601	!
6020:
603	SERVE_INTR(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
604	!
605	! If %o3 is set, we must call serve_intr_post, and both %l1 and %o3
606	! must be preserved. %l1 holds our pil, %l3 holds our inum.
607	!
608	! Note: %l1 is the pil level we're processing, but we may have a
609	! higher effective pil because a higher-level interrupt may have
610	! blocked.
611	!
612	wrpr	%g0, DISP_LEVEL, %pil
613	!
614	! Take timestamp, compute interval, update cumulative counter.
615	!
616	add	THREAD_REG, T_INTR_START, %o5
6171:
618	ldx	[%o5], %o0
619#ifdef DEBUG
620	brnz	%o0, 9f
621	nop
622	! Do not call panic if a panic is already in progress.
623	sethi	%hi(panic_quiesce), %o1
624	ld	[%o1 + %lo(panic_quiesce)], %o1
625	brnz,pn	%o1, 9f
626	nop
627	sethi	%hi(intr_thread_t_intr_start_zero), %o0
628	call	panic
629	or	%o0, %lo(intr_thread_t_intr_start_zero), %o0
6309:
631#endif /* DEBUG */
632	rdpr	%tick, %o1
633	sllx	%o1, 1, %o1
634	srlx	%o1, 1, %o1			! shift off NPT bit
635	sub	%o1, %o0, %l2			! l2 has interval
636	!
637	! The general outline of what the code here does is:
638	! 1. load t_intr_start, %tick, and calculate the delta
639	! 2. replace t_intr_start with %tick (if %o3 is set) or 0.
640	!
641	! The problem is that a high-level interrupt could arrive at any time.
642	! It will account for (%tick - t_intr_start) for us when it starts,
643	! unless we have set t_intr_start to zero, and then set t_intr_start
644	! to a new %tick when it finishes. To account for this, our first step
645	! is to load t_intr_start and the last is to use casx to store the new
646	! t_intr_start. This guarantees atomicity in reading t_intr_start,
647	! reading %tick, and updating t_intr_start.
648	!
649	movrz	%o3, %g0, %o1
650	casx	[%o5], %o0, %o1
651	cmp	%o0, %o1
652	bne,pn	%xcc, 1b
653	!
654	! Check for Energy Star mode
655	!
656	lduh	[%o2 + CPU_DIVISOR], %o0	! delay -- %o0 = clock divisor
657	cmp	%o0, 1
658	bg,a,pn	%xcc, 2f
659	mulx	%l2, %o0, %l2	! multiply interval by clock divisor iff > 1
6602:
661	!
662	! Update cpu_intrstat. If o3 is set then we will be processing another
663	! interrupt. Above we have set t_intr_start to %tick, not 0. This
664	! means a high-level interrupt can arrive and update the same stats
665	! we're updating. Need to use casx.
666	!
667	sllx	%l1, 4, %o1			! delay - PIL as byte offset
668	add	%o1, CPU_MCPU, %o1		! CPU_INTRSTAT const too big
669	add	%o1, MCPU_INTRSTAT, %o1		! add parts separately
670	add	%o1, %o2, %o1
6711:
672	ldx	[%o1], %o5			! old counter in o5
673	add	%o5, %l2, %o0			! new counter in o0
674 	stx	%o0, [%o1 + 8]			! store into intrstat[pil][1]
675	casx	[%o1], %o5, %o0			! and into intrstat[pil][0]
676	cmp	%o5, %o0
677	bne,pn	%xcc, 1b
678	nop
679	!
680	! Don't keep a pinned process pinned indefinitely. Bump cpu_intrcnt
681	! for each interrupt handler we invoke. If we hit INTRCNT_LIMIT, then
682	! we've crossed the threshold and we should unpin the pinned threads
683	! by preempt()ing ourselves, which will bubble up the t_intr chain
684	! until hitting the non-interrupt thread, which will then in turn
685	! preempt itself allowing the interrupt processing to resume. Finally,
686	! the scheduler takes over and picks the next thread to run.
687	!
688	! If our CPU is quiesced, we cannot preempt because the idle thread
689	! won't ever re-enter the scheduler, and the interrupt will be forever
690	! blocked.
691	!
692	! If t_intr is NULL, we're not pinning anyone, so we use a simpler
693	! algorithm. Just check for cpu_kprunrun, and if set then preempt.
694	! This insures we enter the scheduler if a higher-priority thread
695	! has become runnable.
696	!
697	lduh	[%o2 + CPU_FLAGS], %o5		! don't preempt if quiesced
698	andcc	%o5, CPU_QUIESCED, %g0
699	bnz,pn	%xcc, 1f
700
701	ldn     [THREAD_REG + T_INTR], %o5      ! pinning anything?
702	brz,pn  %o5, 3f				! if not, don't inc intrcnt
703
704	ldub	[%o2 + CPU_INTRCNT], %o5	! delay - %o5 = cpu_intrcnt
705	inc	%o5
706	cmp	%o5, INTRCNT_LIMIT		! have we hit the limit?
707	bl,a,pt	%xcc, 1f			! no preempt if < INTRCNT_LIMIT
708	stub	%o5, [%o2 + CPU_INTRCNT]	! delay annul - inc CPU_INTRCNT
709	bg,pn	%xcc, 2f			! don't inc stats again
710	!
711	! We've reached the limit. Set cpu_intrcnt and cpu_kprunrun, and do
712	! CPU_STATS_ADDQ(cp, sys, intrunpin, 1). Then call preempt.
713	!
714	mov	1, %o4				! delay
715	stub	%o4, [%o2 + CPU_KPRUNRUN]
716	ldx	[%o2 + CPU_STATS_SYS_INTRUNPIN], %o4
717	inc	%o4
718	stx	%o4, [%o2 + CPU_STATS_SYS_INTRUNPIN]
719	ba	2f
720	stub	%o5, [%o2 + CPU_INTRCNT]	! delay
7213:
722	! Code for t_intr == NULL
723	ldub	[%o2 + CPU_KPRUNRUN], %o5
724	brz,pt	%o5, 1f				! don't preempt unless kprunrun
7252:
726	! Time to call preempt
727	mov	%o2, %l3			! delay - save %o2
728	call	preempt
729	mov	%o3, %l2			! delay - save %o3.
730	mov	%l3, %o2			! restore %o2
731	mov	%l2, %o3			! restore %o3
732	wrpr	%g0, DISP_LEVEL, %pil		! up from cpu_base_spl
7331:
734	!
735	! Do we need to call serve_intr_post and do this again?
736	!
737	brz,a,pt %o3, 0f
738	ld	[%o2 + CPU_INTR_ACTV], %o5	! delay annulled
739	!
740	! Restore %pil before calling serve_intr() again. We must check
741	! CPU_BASE_SPL and set %pil to max(our-pil, CPU_BASE_SPL)
742	!
743	ld	[%o2 + CPU_BASE_SPL], %o4
744	cmp	%o4, %l1
745	movl	%xcc, %l1, %o4
746	wrpr	%g0, %o4, %pil
747	SERVE_INTR_NEXT(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
748	ba	0b				! compute new stats
749	nop
7500:
751	!
752	! Clear bit for this level in CPU's interrupt active bitmask.
753	!
754	mov	1, %o4
755	sll	%o4, %l1, %o4
756#ifdef DEBUG
757	!
758	! ASSERT(CPU->cpu_intr_actv & (1 << PIL))
759	!
760	andcc	%o4, %o5, %g0
761	bnz,pt	%xcc, 0f
762	nop
763	! Do not call panic if a panic is already in progress.
764	sethi	%hi(panic_quiesce), %l2
765	ld	[%l2 + %lo(panic_quiesce)], %l2
766	brnz,pn	%l2, 0f
767	nop
768	sethi	%hi(intr_thread_actv_bit_not_set), %o0
769	call	panic
770	or	%o0, %lo(intr_thread_actv_bit_not_set), %o0
7710:
772#endif /* DEBUG */
773	andn	%o5, %o4, %o5
774	st	%o5, [%o2 + CPU_INTR_ACTV]
775	!
776	! If there is still an interrupted thread underneath this one,
777	! then the interrupt was never blocked and the return is fairly
778	! simple.  Otherwise jump to intr_thread_exit.
779	!
780	ldn	[THREAD_REG + T_INTR], %o4	! pinned thread
781	brz,pn	%o4, intr_thread_exit		! branch if none
782	nop
783	!
784	! link the thread back onto the interrupt thread pool
785	!
786	ldn	[%o2 + CPU_INTR_THREAD], %o3
787	stn	%o3, [THREAD_REG + T_LINK]
788	stn	THREAD_REG, [%o2 + CPU_INTR_THREAD]
789	!
790	! set the thread state to free so kernel debuggers don't see it
791	!
792	mov	TS_FREE, %o5
793	st	%o5, [THREAD_REG + T_STATE]
794	!
795	! Switch back to the interrupted thread and return
796	!
797	stn	%o4, [%o2 + CPU_THREAD]
798	mov	%o4, THREAD_REG
799
800	! If we pinned an interrupt thread, store its starting timestamp.
801	lduh	[THREAD_REG + T_FLAGS], %o5
802	andcc	%o5, T_INTR_THREAD, %g0
803	bz,pt	%xcc, 1f
804	ldn	[THREAD_REG + T_SP], %sp	! delay - restore %sp
805
806	add	THREAD_REG, T_INTR_START, %o3	! o3 has &curthread->t_intr_star
8070:
808	ldx	[%o3], %o4			! o4 = t_intr_start before
809	rdpr	%tick, %o5
810	sllx	%o5, 1, %o5
811	srlx	%o5, 1, %o5			! shift off NPT bit
812	casx	[%o3], %o4, %o5			! put o5 in ts if o4 == ts after
813	cmp	%o4, %o5
814	! If a high-level interrupt occurred while we were attempting to store
815	! the timestamp, try again.
816	bne,pn	%xcc, 0b
817	ldn	[THREAD_REG + T_SP], %sp	! delay - restore %sp
8181:
819	! If the thread being restarted isn't pinning anyone, and no interrupts
820	! are pending, zero out cpu_intrcnt
821	ldn	[THREAD_REG + T_INTR], %o4
822	brnz,pn	%o4, 2f
823	rd	SOFTINT, %o4			! delay
824	set	SOFTINT_MASK, %o5
825	andcc	%o4, %o5, %g0
826	bz,a,pt	%xcc, 2f
827	stub	%g0, [%o2 + CPU_INTRCNT]	! delay annul
8282:
829	jmp	%l0 + 8
830	nop
831	SET_SIZE(intr_thread)
832	/* Not Reached */
833
834	!
835	! An interrupt returned on what was once (and still might be)
836	! an interrupt thread stack, but the interrupted process is no longer
837	! there.  This means the interrupt must have blocked.
838	!
839	! There is no longer a thread under this one, so put this thread back
840	! on the CPU's free list and resume the idle thread which will dispatch
841	! the next thread to run.
842	!
843	! All traps below DISP_LEVEL are disabled here, but the mondo interrupt
844	! is enabled.
845	!
846	ENTRY_NP(intr_thread_exit)
847#ifdef TRAPTRACE
848	rdpr	%pstate, %l2
849	andn	%l2, PSTATE_IE | PSTATE_AM, %o4
850	wrpr	%g0, %o4, %pstate			! cpu to known state
851	TRACE_PTR(%o4, %o5)
852	GET_TRACE_TICK(%o5)
853	stxa	%o5, [%o4 + TRAP_ENT_TICK]%asi
854	TRACE_SAVE_TL_GL_REGS(%o4, %o5)
855	set	TT_INTR_EXIT, %o5
856	stha	%o5, [%o4 + TRAP_ENT_TT]%asi
857	stna	%g0, [%o4 + TRAP_ENT_TPC]%asi
858	stxa	%g0, [%o4 + TRAP_ENT_TSTATE]%asi
859	stna	%sp, [%o4 + TRAP_ENT_SP]%asi
860	stna	THREAD_REG, [%o4 + TRAP_ENT_TR]%asi
861	ld	[%o2 + CPU_BASE_SPL], %o5
862	stna	%o5, [%o4 + TRAP_ENT_F1]%asi
863	stna	%g0, [%o4 + TRAP_ENT_F2]%asi
864	stna	%g0, [%o4 + TRAP_ENT_F3]%asi
865	stna	%g0, [%o4 + TRAP_ENT_F4]%asi
866	TRACE_NEXT(%o4, %o5, %o0)
867	wrpr	%g0, %l2, %pstate
868#endif /* TRAPTRACE */
869	! cpu_stats.sys.intrblk++
870        ldx	[%o2 + CPU_STATS_SYS_INTRBLK], %o4
871        inc     %o4
872        stx	%o4, [%o2 + CPU_STATS_SYS_INTRBLK]
873	!
874	! Put thread back on the interrupt thread list.
875	!
876
877	!
878	! Set the CPU's base SPL level.
879	!
880#ifdef DEBUG
881	!
882	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
883	!
884	ld	[%o2 + CPU_INTR_ACTV], %o5
885	mov	1, %o4
886	sll	%o4, %l1, %o4
887	and	%o5, %o4, %o4
888	brz,pt	%o4, 0f
889	nop
890	! Do not call panic if a panic is already in progress.
891	sethi	%hi(panic_quiesce), %l2
892	ld	[%l2 + %lo(panic_quiesce)], %l2
893	brnz,pn	%l2, 0f
894	nop
895	sethi	%hi(intr_thread_exit_actv_bit_set), %o0
896	call	panic
897	or	%o0, %lo(intr_thread_exit_actv_bit_set), %o0
8980:
899#endif /* DEBUG */
900	call	_intr_set_spl			! set CPU's base SPL level
901	ld	[%o2 + CPU_INTR_ACTV], %o5	! delay - load active mask
902	!
903	! set the thread state to free so kernel debuggers don't see it
904	!
905	mov	TS_FREE, %o4
906	st	%o4, [THREAD_REG + T_STATE]
907	!
908	! Put thread on either the interrupt pool or the free pool and
909	! call swtch() to resume another thread.
910	!
911	ldn	[%o2 + CPU_INTR_THREAD], %o5	! get list pointer
912	stn	%o5, [THREAD_REG + T_LINK]
913	call	swtch				! switch to best thread
914	stn	THREAD_REG, [%o2 + CPU_INTR_THREAD] ! delay - put thread on list
915	ba,a,pt	%xcc, .				! swtch() shouldn't return
916	SET_SIZE(intr_thread_exit)
917
918	.global ftrace_intr_thread_format_str
919ftrace_intr_thread_format_str:
920	.asciz	"intr_thread(): regs=0x%lx, int=0x%lx, pil=0x%lx"
921#ifdef DEBUG
922intr_thread_actv_bit_set:
923	.asciz	"intr_thread():	cpu_intr_actv bit already set for PIL"
924intr_thread_actv_bit_not_set:
925	.asciz	"intr_thread():	cpu_intr_actv bit not set for PIL"
926intr_thread_exit_actv_bit_set:
927	.asciz	"intr_thread_exit(): cpu_intr_actv bit erroneously set for PIL"
928intr_thread_t_intr_start_zero:
929	.asciz	"intr_thread():	t_intr_start zero upon handler return"
930#endif /* DEBUG */
931#endif	/* lint */
932
933#if defined(lint)
934
935/*
936 * Handle an interrupt in the current thread
937 *	Entry:
938 *		%o0       = pointer to regs structure
939 *		%o1       = inumber
940 *		%o2       = pil
941 *		%sp       = on current thread's kernel stack
942 *		%o7       = return linkage to trap code
943 *		%g7       = current thread
944 *		%pstate   = normal globals, interrupts enabled,
945 *		            privileged, fp disabled
946 *		%pil      = PIL_MAX
947 *
948 *	Register Usage
949 *		%l0       = return linkage
950 *		%l1       = old stack
951 *		%l2 - %l3 = scratch
952 *		%l4 - %l7 = reserved for sys_trap
953 *		%o3       = cpu
954 *		%o0       = scratch
955 *		%o4 - %o5 = scratch
956 */
957/* ARGSUSED */
958void
959current_thread(struct regs *regs, uint_t inumber, uint_t pil)
960{}
961
962#else	/* lint */
963
964	ENTRY_NP(current_thread)
965
966	mov	%o7, %l0
967	ldn	[THREAD_REG + T_CPU], %o3
968	!
969	! Set bit for this level in CPU's active interrupt bitmask.
970	!
971	ld	[%o3 + CPU_INTR_ACTV], %o5	! o5 has cpu_intr_actv b4 chng
972	mov	1, %o4
973	sll	%o4, %o2, %o4			! construct mask for level
974#ifdef DEBUG
975	!
976	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
977	!
978	andcc	%o5, %o4, %g0
979	bz,pt	%xcc, 0f
980	nop
981	! Do not call panic if a panic is already in progress.
982	sethi	%hi(panic_quiesce), %l2
983	ld	[%l2 + %lo(panic_quiesce)], %l2
984	brnz,pn	%l2, 0f
985	nop
986	sethi	%hi(current_thread_actv_bit_set), %o0
987	call	panic
988	or	%o0, %lo(current_thread_actv_bit_set), %o0
9890:
990#endif /* DEBUG */
991	or	%o5, %o4, %o4
992	!
993	! See if we are interrupting another high-level interrupt.
994	!
995	srl	%o5, LOCK_LEVEL + 1, %o5	! only look at high-level bits
996	brz,pt	%o5, 1f
997	st	%o4, [%o3 + CPU_INTR_ACTV]	! delay - store active mask
998	!
999	! We have interrupted another high-level interrupt. Find its PIL,
1000	! compute the interval it ran for, and update its cumulative counter.
1001	!
1002	! Register usage:
1003
1004	! o2 = PIL of this interrupt
1005	! o5 = high PIL bits of INTR_ACTV (not including this PIL)
1006	! l1 = bitmask used to find other active high-level PIL
1007	! o4 = index of bit set in l1
1008	! Use cpu_intr_actv to find the cpu_pil_high_start[] offset for the
1009	! interrupted high-level interrupt.
1010	! Create mask for cpu_intr_actv. Begin by looking for bits set
1011	! at one level below the current PIL. Since %o5 contains the active
1012	! mask already shifted right by (LOCK_LEVEL + 1), we start by looking
1013	! at bit (current_pil - (LOCK_LEVEL + 2)).
1014	sub	%o2, LOCK_LEVEL + 2, %o4
1015	mov	1, %l1
1016	sll	%l1, %o4, %l1
10172:
1018#ifdef DEBUG
1019	! ASSERT(%l1 != 0) (we didn't shift the bit off the right edge)
1020	brnz,pt	%l1, 9f
1021	nop
1022
1023	! Don't panic if a panic is already in progress.
1024	sethi	%hi(panic_quiesce), %l3
1025	ld	[%l3 + %lo(panic_quiesce)], %l3
1026	brnz,pn	%l3, 9f
1027	nop
1028	sethi	%hi(current_thread_nested_PIL_not_found), %o0
1029	call	panic
1030	or	%o0, %lo(current_thread_nested_PIL_not_found), %o0
10319:
1032#endif /* DEBUG */
1033	andcc	%l1, %o5, %g0		! test mask against high-level bits of
1034	bnz	%xcc, 3f		! cpu_intr_actv
1035	nop
1036	srl	%l1, 1, %l1		! No match. Try next lower PIL.
1037	ba,pt	%xcc, 2b
1038	sub	%o4, 1, %o4		! delay - decrement PIL
10393:
1040	sll	%o4, 3, %o4			! index to byte offset
1041	add	%o4, CPU_MCPU, %l1	! CPU_PIL_HIGH_START is too large
1042	add	%l1, MCPU_PIL_HIGH_START, %l1
1043	ldx	[%o3 + %l1], %l3		! load starting timestamp
1044#ifdef DEBUG
1045	brnz,pt	%l3, 9f
1046	nop
1047	! Don't panic if a panic is already in progress.
1048	sethi	%hi(panic_quiesce), %l1
1049	ld	[%l1 + %lo(panic_quiesce)], %l1
1050	brnz,pn	%l1, 9f
1051	nop
1052	srl	%o4, 3, %o1			! Find interrupted PIL for panic
1053	add	%o1, LOCK_LEVEL + 1, %o1
1054	sethi	%hi(current_thread_nested_pil_zero), %o0
1055	call	panic
1056	or	%o0, %lo(current_thread_nested_pil_zero), %o0
10579:
1058#endif /* DEBUG */
1059	rdpr	%tick, %l1
1060	sllx	%l1, 1, %l1
1061	srlx	%l1, 1, %l1			! shake off NPT bit
1062	sub	%l1, %l3, %l3			! interval in %l3
1063	!
1064	! Check for Energy Star mode
1065	!
1066	lduh	[%o3 + CPU_DIVISOR], %l1	! %l1 = clock divisor
1067	cmp	%l1, 1
1068	bg,a,pn	%xcc, 2f
1069	mulx	%l3, %l1, %l3	! multiply interval by clock divisor iff > 1
10702:
1071	!
1072	! We need to find the CPU offset of the cumulative counter. We start
1073	! with %o4, which has (PIL - (LOCK_LEVEL + 1)) * 8. We need PIL * 16,
1074	! so we shift left 1, then add (LOCK_LEVEL + 1) * 16, which is
1075	! CPU_INTRSTAT_LOW_PIL_OFFSET.
1076	!
1077	sll	%o4, 1, %o4
1078	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT const too large
1079	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1080	add	%o4, CPU_INTRSTAT_LOW_PIL_OFFSET, %o4
1081	ldx	[%o3 + %o4], %l1		! old counter in l1
1082	add	%l1, %l3, %l1			! new counter in l1
1083	! Another high-level interrupt is active below this one, so
1084	! there is no need to check for an interrupt thread. That will be
1085	! done by the lowest priority high-level interrupt active.
1086	ba,pt	%xcc, 5f
1087	stx	%l1, [%o3 + %o4]		! delay - store new counter
10881:
1089	! If we haven't interrupted another high-level interrupt, we may be
1090	! interrupting a low level interrupt thread. If so, compute its interval
1091	! and update its cumulative counter.
1092	lduh	[THREAD_REG + T_FLAGS], %o4
1093	andcc	%o4, T_INTR_THREAD, %g0
1094	bz,pt	%xcc, 4f
1095	nop
1096
1097	! We have interrupted an interrupt thread. Take timestamp, compute
1098	! interval, update cumulative counter.
1099
1100	! Check t_intr_start. If it is zero, either intr_thread() or
1101	! current_thread() (at a lower PIL, of course) already did
1102	! the accounting for the underlying interrupt thread.
1103	ldx	[THREAD_REG + T_INTR_START], %o5
1104	brz,pn	%o5, 4f
1105	nop
1106
1107	stx	%g0, [THREAD_REG + T_INTR_START]
1108	rdpr	%tick, %o4
1109	sllx	%o4, 1, %o4
1110	srlx	%o4, 1, %o4			! shake off NPT bit
1111	sub	%o4, %o5, %o5			! o5 has the interval
1112
1113	! Check for Energy Star mode
1114	lduh	[%o3 + CPU_DIVISOR], %o4	! %o4 = clock divisor
1115	cmp	%o4, 1
1116	bg,a,pn	%xcc, 2f
1117	mulx	%o5, %o4, %o5	! multiply interval by clock divisor iff > 1
11182:
1119	ldub	[THREAD_REG + T_PIL], %o4
1120	sllx	%o4, 4, %o4			! PIL index to byte offset
1121	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT const too large
1122	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1123	ldx	[%o3 + %o4], %l2		! old counter in l2
1124	add	%l2, %o5, %l2			! new counter in l2
1125	stx	%l2, [%o3 + %o4]		! store new counter
1126
11274:
1128	!
1129	! Handle high-level interrupts on separate interrupt stack.
1130	! No other high-level interrupts are active, so switch to int stack.
1131	!
1132	mov	%sp, %l1
1133	ldn	[%o3 + CPU_INTR_STACK], %l3
1134	sub	%l3, STACK_BIAS, %sp
1135
11365:
1137#ifdef DEBUG
1138	!
1139	! ASSERT(%o2 > LOCK_LEVEL)
1140	!
1141	cmp	%o2, LOCK_LEVEL
1142	bg,pt	%xcc, 3f
1143	nop
1144	mov	CE_PANIC, %o0
1145	sethi	%hi(current_thread_wrong_pil), %o1
1146	call	cmn_err				! %o2 has the %pil already
1147	or	%o1, %lo(current_thread_wrong_pil), %o1
1148#endif
11493:
1150	! Store starting timestamp for this PIL in CPU structure at
1151	! cpu.cpu_m.pil_high_start[PIL - (LOCK_LEVEL + 1)]
1152        sub     %o2, LOCK_LEVEL + 1, %o4	! convert PIL to array index
1153	sllx    %o4, 3, %o4			! index to byte offset
1154	add	%o4, CPU_MCPU, %o4	! CPU_PIL_HIGH_START is too large
1155	add	%o4, MCPU_PIL_HIGH_START, %o4
1156        rdpr    %tick, %o5
1157	sllx	%o5, 1, %o5
1158	srlx	%o5, 1, %o5
1159        stx     %o5, [%o3 + %o4]
1160
1161	wrpr	%g0, %o2, %pil			! enable interrupts
1162
1163	!
1164	! call the handler
1165	!
1166	SERVE_INTR_PRE(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
11671:
1168	SERVE_INTR(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
1169
1170	brz,a,pt %o2, 0f			! if %o2, more intrs await
1171	rdpr	%pil, %o2			! delay annulled
1172	SERVE_INTR_NEXT(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
1173	ba	1b
1174	nop
11750:
1176	wrpr	%g0, PIL_MAX, %pil		! disable interrupts (1-15)
1177
1178	cmp	%o2, PIL_15
1179	bne,pt	%xcc, 3f
1180	nop
1181
1182	sethi	%hi(cpc_level15_inum), %o1
1183	ld	[%o1 + %lo(cpc_level15_inum)], %o1 ! arg for intr_enqueue_req
1184	brz	%o1, 3f
1185	nop
1186
1187	rdpr 	%pstate, %g5
1188	andn	%g5, PSTATE_IE, %g1
1189	wrpr	%g0, %g1, %pstate		! Disable vec interrupts
1190
1191	call	intr_enqueue_req		! preserves %g5
1192	mov	PIL_15, %o0
1193
1194	! clear perfcntr overflow
1195	mov	1, %o0
1196	sllx	%o0, PIL_15, %o0
1197	wr	%o0, CLEAR_SOFTINT
1198
1199	wrpr	%g0, %g5, %pstate		! Enable vec interrupts
1200
12013:
1202	cmp	%o2, PIL_14
1203	be	tick_rtt			!  cpu-specific tick processing
1204	nop
1205	.global	current_thread_complete
1206current_thread_complete:
1207	!
1208	! Register usage:
1209	!
1210	! %l1 = stack pointer
1211	! %l2 = CPU_INTR_ACTV >> (LOCK_LEVEL + 1)
1212	! %o2 = PIL
1213	! %o3 = CPU pointer
1214	! %o4, %o5, %l3, %l4, %l5 = scratch
1215	!
1216	ldn	[THREAD_REG + T_CPU], %o3
1217	!
1218	! Clear bit for this level in CPU's interrupt active bitmask.
1219	!
1220	ld	[%o3 + CPU_INTR_ACTV], %l2
1221	mov	1, %o5
1222	sll	%o5, %o2, %o5
1223#ifdef DEBUG
1224	!
1225	! ASSERT(CPU->cpu_intr_actv & (1 << PIL))
1226	!
1227	andcc	%l2, %o5, %g0
1228	bnz,pt	%xcc, 0f
1229	nop
1230	! Do not call panic if a panic is already in progress.
1231	sethi	%hi(panic_quiesce), %l2
1232	ld	[%l2 + %lo(panic_quiesce)], %l2
1233	brnz,pn	%l2, 0f
1234	nop
1235	sethi	%hi(current_thread_actv_bit_not_set), %o0
1236	call	panic
1237	or	%o0, %lo(current_thread_actv_bit_not_set), %o0
12380:
1239#endif /* DEBUG */
1240	andn	%l2, %o5, %l2
1241	st	%l2, [%o3 + CPU_INTR_ACTV]
1242
1243	! Take timestamp, compute interval, update cumulative counter.
1244        sub     %o2, LOCK_LEVEL + 1, %o4	! PIL to array index
1245	sllx    %o4, 3, %o4			! index to byte offset
1246	add	%o4, CPU_MCPU, %o4	! CPU_PIL_HIGH_START is too large
1247	add	%o4, MCPU_PIL_HIGH_START, %o4
1248        rdpr    %tick, %o5
1249	sllx	%o5, 1, %o5
1250	srlx	%o5, 1, %o5
1251	ldx     [%o3 + %o4], %o0
1252#ifdef DEBUG
1253	! ASSERT(cpu.cpu_m.pil_high_start[pil - (LOCK_LEVEL + 1)] != 0)
1254	brnz,pt	%o0, 9f
1255	nop
1256	! Don't panic if a panic is already in progress.
1257	sethi	%hi(panic_quiesce), %l2
1258	ld	[%l2 + %lo(panic_quiesce)], %l2
1259	brnz,pn	%l2, 9f
1260	nop
1261	sethi	%hi(current_thread_timestamp_zero), %o0
1262	call	panic
1263	or	%o0, %lo(current_thread_timestamp_zero), %o0
12649:
1265#endif /* DEBUG */
1266	stx	%g0, [%o3 + %o4]
1267	sub	%o5, %o0, %o5			! interval in o5
1268
1269	! Check for Energy Star mode
1270	lduh	[%o3 + CPU_DIVISOR], %o4	! %o4 = clock divisor
1271	cmp	%o4, 1
1272	bg,a,pn	%xcc, 2f
1273	mulx	%o5, %o4, %o5	! multiply interval by clock divisor iff > 1
12742:
1275	sllx	%o2, 4, %o4			! PIL index to byte offset
1276	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT too large
1277	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1278	ldx	[%o3 + %o4], %o0		! old counter in o0
1279	add	%o0, %o5, %o0			! new counter in o0
1280	stx	%o0, [%o3 + %o4]		! store new counter
1281
1282	!
1283	! get back on current thread's stack
1284	!
1285	srl	%l2, LOCK_LEVEL + 1, %l2
1286	tst	%l2				! any more high-level ints?
1287	movz	%xcc, %l1, %sp
1288	!
1289	! Current register usage:
1290	! o2 = PIL
1291	! o3 = CPU pointer
1292	! l0 = return address
1293	! l2 = intr_actv shifted right
1294	!
1295	bz,pt	%xcc, 3f			! if l2 was zero, no more ints
1296	nop
1297	!
1298	! We found another high-level interrupt active below the one that just
1299	! returned. Store a starting timestamp for it in the CPU structure.
1300	!
1301	! Use cpu_intr_actv to find the cpu_pil_high_start[] offset for the
1302	! interrupted high-level interrupt.
1303	! Create mask for cpu_intr_actv. Begin by looking for bits set
1304	! at one level below the current PIL. Since %l2 contains the active
1305	! mask already shifted right by (LOCK_LEVEL + 1), we start by looking
1306	! at bit (current_pil - (LOCK_LEVEL + 2)).
1307	! %l1 = mask, %o5 = index of bit set in mask
1308	!
1309	mov	1, %l1
1310	sub	%o2, LOCK_LEVEL + 2, %o5
1311	sll	%l1, %o5, %l1			! l1 = mask for level
13121:
1313#ifdef DEBUG
1314	! ASSERT(%l1 != 0) (we didn't shift the bit off the right edge)
1315	brnz,pt	%l1, 9f
1316	nop
1317	sethi	%hi(current_thread_nested_PIL_not_found), %o0
1318	call	panic
1319	or	%o0, %lo(current_thread_nested_PIL_not_found), %o0
13209:
1321#endif /* DEBUG */
1322	andcc	%l1, %l2, %g0		! test mask against high-level bits of
1323	bnz	%xcc, 2f		! cpu_intr_actv
1324	nop
1325	srl	%l1, 1, %l1		! No match. Try next lower PIL.
1326	ba,pt	%xcc, 1b
1327	sub	%o5, 1, %o5		! delay - decrement PIL
13282:
1329	sll	%o5, 3, %o5		! convert array index to byte offset
1330	add	%o5, CPU_MCPU, %o5	! CPU_PIL_HIGH_START is too large
1331	add	%o5, MCPU_PIL_HIGH_START, %o5
1332	rdpr	%tick, %o4
1333	sllx	%o4, 1, %o4
1334	srlx	%o4, 1, %o4
1335	! Another high-level interrupt is active below this one, so
1336	! there is no need to check for an interrupt thread. That will be
1337	! done by the lowest priority high-level interrupt active.
1338	ba,pt	%xcc, 1f
1339	stx	%o4, [%o3 + %o5]	! delay - store timestamp
13403:
1341	! If we haven't interrupted another high-level interrupt, we may have
1342	! interrupted a low level interrupt thread. If so, store a starting
1343	! timestamp in its thread structure.
1344	lduh	[THREAD_REG + T_FLAGS], %o4
1345	andcc	%o4, T_INTR_THREAD, %g0
1346	bz,pt	%xcc, 1f
1347	nop
1348
1349	rdpr	%tick, %o4
1350	sllx	%o4, 1, %o4
1351	srlx	%o4, 1, %o4			! Shake off NPT bit
1352	stx	%o4, [THREAD_REG + T_INTR_START]
13531:
1354	! Enable interrupts and return
1355	jmp	%l0 + 8
1356	wrpr	%g0, %o2, %pil			! enable interrupts
1357	SET_SIZE(current_thread)
1358
1359
1360#ifdef DEBUG
1361current_thread_wrong_pil:
1362	.asciz	"current_thread: unexpected pil level: %d"
1363current_thread_actv_bit_set:
1364	.asciz	"current_thread(): cpu_intr_actv bit already set for PIL"
1365current_thread_actv_bit_not_set:
1366	.asciz	"current_thread(): cpu_intr_actv bit not set for PIL"
1367current_thread_nested_pil_zero:
1368	.asciz	"current_thread(): timestamp zero for nested PIL %d"
1369current_thread_timestamp_zero:
1370	.asciz	"current_thread(): timestamp zero upon handler return"
1371current_thread_nested_PIL_not_found:
1372	.asciz	"current_thread: couldn't find nested high-level PIL"
1373#endif /* DEBUG */
1374#endif /* lint */
1375
1376/*
1377 * Return a thread's interrupt level.
1378 * Since this isn't saved anywhere but in %l4 on interrupt entry, we
1379 * must dig it out of the save area.
1380 *
1381 * Caller 'swears' that this really is an interrupt thread.
1382 *
1383 * int
1384 * intr_level(t)
1385 *	kthread_id_t	t;
1386 */
1387
1388#if defined(lint)
1389
1390/* ARGSUSED */
1391int
1392intr_level(kthread_id_t t)
1393{ return (0); }
1394
1395#else	/* lint */
1396
1397	ENTRY_NP(intr_level)
1398	retl
1399	ldub	[%o0 + T_PIL], %o0		! return saved pil
1400	SET_SIZE(intr_level)
1401
1402#endif	/* lint */
1403
1404#if defined(lint)
1405
1406/* ARGSUSED */
1407int
1408disable_pil_intr()
1409{ return (0); }
1410
1411#else	/* lint */
1412
1413	ENTRY_NP(disable_pil_intr)
1414	rdpr	%pil, %o0
1415	retl
1416	wrpr	%g0, PIL_MAX, %pil		! disable interrupts (1-15)
1417	SET_SIZE(disable_pil_intr)
1418
1419#endif	/* lint */
1420
1421#if defined(lint)
1422
1423/* ARGSUSED */
1424void
1425enable_pil_intr(int pil_save)
1426{}
1427
1428#else	/* lint */
1429
1430	ENTRY_NP(enable_pil_intr)
1431	retl
1432	wrpr	%o0, %pil
1433	SET_SIZE(enable_pil_intr)
1434
1435#endif	/* lint */
1436
1437#if defined(lint)
1438
1439/* ARGSUSED */
1440uint_t
1441disable_vec_intr(void)
1442{ return (0); }
1443
1444#else	/* lint */
1445
1446	ENTRY_NP(disable_vec_intr)
1447	rdpr	%pstate, %o0
1448	andn	%o0, PSTATE_IE, %g1
1449	retl
1450	wrpr	%g0, %g1, %pstate		! disable interrupt
1451	SET_SIZE(disable_vec_intr)
1452
1453#endif	/* lint */
1454
1455#if defined(lint)
1456
1457/* ARGSUSED */
1458void
1459enable_vec_intr(uint_t pstate_save)
1460{}
1461
1462#else	/* lint */
1463
1464	ENTRY_NP(enable_vec_intr)
1465	retl
1466	wrpr	%g0, %o0, %pstate
1467	SET_SIZE(enable_vec_intr)
1468
1469#endif	/* lint */
1470
1471#if defined(lint)
1472
1473void
1474cbe_level14(void)
1475{}
1476
1477#else   /* lint */
1478
1479	ENTRY_NP(cbe_level14)
1480	save    %sp, -SA(MINFRAME), %sp ! get a new window
1481	!
1482	! Make sure that this is from TICK_COMPARE; if not just return
1483	!
1484	rd	SOFTINT, %l1
1485	set	(TICK_INT_MASK | STICK_INT_MASK), %o2
1486	andcc	%l1, %o2, %g0
1487	bz,pn	%icc, 2f
1488	nop
1489
1490	CPU_ADDR(%o1, %o2)
1491	call	cyclic_fire
1492	mov	%o1, %o0
14932:
1494	ret
1495	restore	%g0, 1, %o0
1496	SET_SIZE(cbe_level14)
1497
1498#endif  /* lint */
1499
1500
1501#if defined(lint)
1502
1503/* ARGSUSED */
1504void
1505setsoftint(uint_t inum)
1506{}
1507
1508#else	/* lint */
1509
1510	ENTRY_NP(setsoftint)
1511	save	%sp, -SA(MINFRAME), %sp	! get a new window
1512	rdpr	%pstate, %l5
1513	andn	%l5, PSTATE_IE, %l1
1514	wrpr	%l1, %pstate		! disable interrupt
1515	!
1516	! Fetch data from intr_vector[] table according to the inum.
1517	!
1518	! We have an interrupt number.
1519	! Put the request on the cpu's softint list,
1520	! and set %set_softint.
1521	!
1522	! Register usage
1523	!	%i0 - inumber
1524	!	%l2 - requested pil
1525	!	%l3 - intr_req
1526	!	%l4 - *cpu
1527	!	%l1, %l6 - temps
1528	!
1529	! check if a softint is pending for this inum already
1530	! if one is pending, don't bother queuing another
1531	!
1532	set	intr_vector, %l1
1533	sll	%i0, INTR_VECTOR_SHIFT, %l6
1534	add	%l1, %l6, %l1			! %l1 = &intr_vector[inum]
1535	lduh	[%l1 + IV_PENDING], %l6
1536	brnz,pn	%l6, 4f				! branch, if pending
1537	or	%g0, 1, %l2
1538	sth	%l2, [%l1 + IV_PENDING]		! intr_vector[inum].pend = 1
1539	!
1540	! allocate an intr_req from the free list
1541	!
1542	CPU_ADDR(%l4, %l2)
1543	ldn	[%l4 + INTR_HEAD], %l3
1544	lduh	[%l1 + IV_PIL], %l2
1545	!
1546	! fixup free list
1547	!
1548	ldn	[%l3 + INTR_NEXT], %l6
1549	stn	%l6, [%l4 + INTR_HEAD]
1550	!
1551	! fill up intr_req
1552	!
1553	st	%i0, [%l3 + INTR_NUMBER]
1554	stn	%g0, [%l3 + INTR_NEXT]
1555	!
1556	! move intr_req to appropriate list
1557	!
1558	sll	%l2, CPTRSHIFT, %l0
1559	add	%l4, INTR_TAIL, %l6
1560	ldn	[%l6 + %l0], %l1	! current tail
1561	brz,pt	%l1, 2f			! branch if list empty
1562	stn	%l3, [%l6 + %l0]	! make intr_req new tail
1563	!
1564	! there's pending intr_req already
1565	!
1566	ba,pt	%xcc, 3f
1567	stn	%l3, [%l1 + INTR_NEXT]	! update old tail
15682:
1569	!
1570	! no pending intr_req; make intr_req new head
1571	!
1572	add	%l4, INTR_HEAD, %l6
1573	stn	%l3, [%l6 + %l0]
15743:
1575	!
1576	! Write %set_softint with (1<<pil) to cause a "pil" level trap
1577	!
1578	mov	1, %l1
1579	sll	%l1, %l2, %l1
1580	wr	%l1, SET_SOFTINT
15814:
1582	wrpr	%g0, %l5, %pstate
1583	ret
1584	restore
1585	SET_SIZE(setsoftint)
1586
1587#endif	/* lint */
1588
1589#if defined(lint)
1590
1591/*ARGSUSED*/
1592void
1593setsoftint_tl1(uint64_t inum, uint64_t dummy)
1594{}
1595
1596#else	/* lint */
1597
1598	!
1599	! Register usage
1600	!
1601	! Arguments:
1602	! %g1 - inumber
1603	!
1604	! Internal:
1605	! %g2 - requested pil
1606	! %g3 - intr_req
1607	! %g4 - cpu pointer
1608	! %g5,%g6,%g7 - temps
1609	!
1610	ENTRY_NP(setsoftint_tl1)
1611	!
1612	! Verify the inumber received (should be inum < MAXIVNUM).
1613	!
1614	set	MAXIVNUM, %g2
1615	cmp	%g1, %g2
1616	bgeu,pn	%xcc, .no_ivintr
1617	clr	%g2			! expected in .no_ivintr
1618	!
1619	! Fetch data from intr_vector[] table according to the inum.
1620	!
1621	! We have an interrupt number. Put the request on the cpu's softint
1622	! list, and set %set_softint.
1623	!
1624	set	intr_vector, %g5
1625	sll	%g1, INTR_VECTOR_SHIFT, %g6
1626	add	%g5, %g6, %g5			! %g5 = &intr_vector[inum]
1627
1628	!
1629	! allocate an intr_req from the free list
1630	!
1631	CPU_ADDR(%g4, %g2)
1632	ldn	[%g4 + INTR_HEAD], %g3
1633
1634	! load the pil so it can be used by .no_intr_pool/.no_ivintr
1635	lduh	[%g5 + IV_PIL], %g2
1636
1637	! Verify that the free list is not exhausted.
1638	brz,pn	%g3, .no_intr_pool
1639	nop
1640
1641	! Verify the intr_vector[] entry according to the inumber.
1642	! The iv_pil field should not be zero.  This used to be
1643	! guarded by DEBUG but broken drivers can cause spurious
1644	! tick interrupts when the softint register is programmed
1645	! with 1 << 0 at the end of this routine.  Now we always
1646	! check for an invalid pil.
1647	brz,pn	%g2, .no_ivintr
1648	nop
1649
1650	!
1651	! fixup free list
1652	!
1653	ldn	[%g3 + INTR_NEXT], %g6
1654	stn	%g6, [%g4 + INTR_HEAD]
1655
1656	!
1657	! fill in intr_req
1658	!
1659	st	%g1, [%g3 + INTR_NUMBER]
1660	stn	%g0, [%g3 + INTR_NEXT]
1661	!
1662	! move intr_req to appropriate list
1663	!
1664	sll	%g2, CPTRSHIFT, %g7
1665	add	%g4, INTR_TAIL, %g6
1666	ldn	[%g6 + %g7], %g5	! current tail
1667	brz,pt	%g5, 2f			! branch if list empty
1668	stn	%g3, [%g6 + %g7]	! make intr_req new tail
1669	!
1670	! there's pending intr_req already
1671	!
1672	ba,pt	%xcc, 3f
1673	stn	%g3, [%g5 + INTR_NEXT]	! update old tail
16742:
1675	!
1676	! no pending intr_req; make intr_req new head
1677	!
1678	add	%g4, INTR_HEAD, %g6
1679	stn	%g3, [%g6 + %g7]
16803:
1681#ifdef TRAPTRACE
1682	TRACE_PTR(%g1, %g6)
1683	GET_TRACE_TICK(%g6)
1684	stxa	%g6, [%g1 + TRAP_ENT_TICK]%asi
1685	TRACE_SAVE_TL_GL_REGS(%g1, %g6)
1686	rdpr	%tt, %g6
1687	stha	%g6, [%g1 + TRAP_ENT_TT]%asi
1688	rdpr	%tpc, %g6
1689	stna	%g6, [%g1 + TRAP_ENT_TPC]%asi
1690	rdpr	%tstate, %g6
1691	stxa	%g6, [%g1 + TRAP_ENT_TSTATE]%asi
1692	stna	%sp, [%g1 + TRAP_ENT_SP]%asi
1693	ld	[%g3 + INTR_NUMBER], %g6
1694	stna	%g6, [%g1 + TRAP_ENT_TR]%asi
1695	add	%g4, INTR_HEAD, %g6
1696	ldn	[%g6 + %g7], %g6		! intr_head[pil]
1697	stna	%g6, [%g1 + TRAP_ENT_F1]%asi
1698	add	%g4, INTR_TAIL, %g6
1699	ldn	[%g6 + %g7], %g6		! intr_tail[pil]
1700	stna	%g6, [%g1 + TRAP_ENT_F2]%asi
1701	stna	%g2, [%g1 + TRAP_ENT_F3]%asi	! pil
1702	stna	%g3, [%g1 + TRAP_ENT_F4]%asi	! intr_req
1703	TRACE_NEXT(%g1, %g6, %g5)
1704#endif /* TRAPTRACE */
1705	!
1706	! Write %set_softint with (1<<pil) to cause a "pil" level trap
1707	!
1708	mov	1, %g5
1709	sll	%g5, %g2, %g5
1710	wr	%g5, SET_SOFTINT
17114:
1712	retry
1713
1714.no_intr_pool:
1715	! no_intr_pool: rp, inum (%g1), pil (%g2)
1716	mov	%g2, %g3
1717	mov	%g1, %g2
1718	set	no_intr_pool, %g1
1719	ba,pt	%xcc, sys_trap
1720	mov	PIL_15, %g4
1721
1722.no_ivintr:
1723	! no_ivintr: arguments: rp, inum (%g1), pil (%g2 == 0)
1724	mov	%g2, %g3
1725	mov	%g1, %g2
1726	set	no_ivintr, %g1
1727	ba,pt	%xcc, sys_trap
1728	mov	PIL_15, %g4
1729	SET_SIZE(setsoftint_tl1)
1730
1731#endif	/* lint */
1732
1733#if defined(lint)
1734
1735/*ARGSUSED*/
1736void
1737wr_clr_softint(uint_t value)
1738{}
1739
1740#else
1741
1742	ENTRY_NP(wr_clr_softint)
1743	retl
1744	wr	%o0, CLEAR_SOFTINT
1745	SET_SIZE(wr_clr_softint)
1746
1747#endif /* lint */
1748
1749#if defined(lint)
1750
1751/*ARGSUSED*/
1752void
1753intr_enqueue_req(uint_t pil, uint32_t inum)
1754{}
1755
1756#else   /* lint */
1757
1758/*
1759 * intr_enqueue_req
1760 *
1761 * %o0 - pil
1762 * %o1 - inum
1763 * %o5 - preserved
1764 * %g5 - preserved
1765 */
1766	ENTRY_NP(intr_enqueue_req)
1767	! get intr_req free list
1768	CPU_ADDR(%g4, %g1)
1769	ldn	[%g4 + INTR_HEAD], %g3
1770
1771	! take intr_req from free list
1772	ldn	[%g3 + INTR_NEXT], %g6
1773	stn	%g6, [%g4 + INTR_HEAD]
1774
1775	! fill up intr_req
1776	st	%o1, [%g3 + INTR_NUMBER]
1777	stn	%g0, [%g3 + INTR_NEXT]
1778
1779	! add intr_req to proper pil list
1780	sll	%o0, CPTRSHIFT, %o0
1781	add	%g4, INTR_TAIL, %g6
1782	ldn	[%o0 + %g6], %g1	! current tail
1783	brz,pt	%g1, 2f			! branch if list is empty
1784	stn	%g3, [%g6 + %o0]	! make intr_req the new tail
1785
1786	! an intr_req was already queued so update old tail
1787	ba,pt	%xcc, 3f
1788	stn	%g3, [%g1 + INTR_NEXT]
17892:
1790	! no intr_req's queued so make intr_req the new head
1791	add	%g4, INTR_HEAD, %g6
1792	stn	%g3, [%g6 + %o0]
17933:
1794	retl
1795	nop
1796	SET_SIZE(intr_enqueue_req)
1797
1798#endif  /* lint */
1799
1800/*
1801 * Set CPU's base SPL level, based on which interrupt levels are active.
1802 * 	Called at spl7 or above.
1803 */
1804
1805#if defined(lint)
1806
1807void
1808set_base_spl(void)
1809{}
1810
1811#else	/* lint */
1812
1813	ENTRY_NP(set_base_spl)
1814	ldn	[THREAD_REG + T_CPU], %o2	! load CPU pointer
1815	ld	[%o2 + CPU_INTR_ACTV], %o5	! load active interrupts mask
1816
1817/*
1818 * WARNING: non-standard callinq sequence; do not call from C
1819 *	%o2 = pointer to CPU
1820 *	%o5 = updated CPU_INTR_ACTV
1821 */
1822_intr_set_spl:					! intr_thread_exit enters here
1823	!
1824	! Determine highest interrupt level active.  Several could be blocked
1825	! at higher levels than this one, so must convert flags to a PIL
1826	! Normally nothing will be blocked, so test this first.
1827	!
1828	brz,pt	%o5, 1f				! nothing active
1829	sra	%o5, 11, %o3			! delay - set %o3 to bits 15-11
1830	set	_intr_flag_table, %o1
1831	tst	%o3				! see if any of the bits set
1832	ldub	[%o1 + %o3], %o3		! load bit number
1833	bnz,a,pn %xcc, 1f			! yes, add 10 and we're done
1834	add	%o3, 11-1, %o3			! delay - add bit number - 1
1835
1836	sra	%o5, 6, %o3			! test bits 10-6
1837	tst	%o3
1838	ldub	[%o1 + %o3], %o3
1839	bnz,a,pn %xcc, 1f
1840	add	%o3, 6-1, %o3
1841
1842	sra	%o5, 1, %o3			! test bits 5-1
1843	ldub	[%o1 + %o3], %o3
1844
1845	!
1846	! highest interrupt level number active is in %l6
1847	!
18481:
1849	retl
1850	st	%o3, [%o2 + CPU_BASE_SPL]	! delay - store base priority
1851	SET_SIZE(set_base_spl)
1852
1853/*
1854 * Table that finds the most significant bit set in a five bit field.
1855 * Each entry is the high-order bit number + 1 of it's index in the table.
1856 * This read-only data is in the text segment.
1857 */
1858_intr_flag_table:
1859	.byte	0, 1, 2, 2,	3, 3, 3, 3,	4, 4, 4, 4,	4, 4, 4, 4
1860	.byte	5, 5, 5, 5,	5, 5, 5, 5,	5, 5, 5, 5,	5, 5, 5, 5
1861	.align	4
1862
1863#endif	/* lint */
1864
1865/*
1866 * int
1867 * intr_passivate(from, to)
1868 *	kthread_id_t	from;		interrupt thread
1869 *	kthread_id_t	to;		interrupted thread
1870 */
1871
1872#if defined(lint)
1873
1874/* ARGSUSED */
1875int
1876intr_passivate(kthread_id_t from, kthread_id_t to)
1877{ return (0); }
1878
1879#else	/* lint */
1880
1881	ENTRY_NP(intr_passivate)
1882	save	%sp, -SA(MINFRAME), %sp	! get a new window
1883
1884	flushw				! force register windows to stack
1885	!
1886	! restore registers from the base of the stack of the interrupt thread.
1887	!
1888	ldn	[%i0 + T_STACK], %i2	! get stack save area pointer
1889	ldn	[%i2 + (0*GREGSIZE)], %l0	! load locals
1890	ldn	[%i2 + (1*GREGSIZE)], %l1
1891	ldn	[%i2 + (2*GREGSIZE)], %l2
1892	ldn	[%i2 + (3*GREGSIZE)], %l3
1893	ldn	[%i2 + (4*GREGSIZE)], %l4
1894	ldn	[%i2 + (5*GREGSIZE)], %l5
1895	ldn	[%i2 + (6*GREGSIZE)], %l6
1896	ldn	[%i2 + (7*GREGSIZE)], %l7
1897	ldn	[%i2 + (8*GREGSIZE)], %o0	! put ins from stack in outs
1898	ldn	[%i2 + (9*GREGSIZE)], %o1
1899	ldn	[%i2 + (10*GREGSIZE)], %o2
1900	ldn	[%i2 + (11*GREGSIZE)], %o3
1901	ldn	[%i2 + (12*GREGSIZE)], %o4
1902	ldn	[%i2 + (13*GREGSIZE)], %o5
1903	ldn	[%i2 + (14*GREGSIZE)], %i4
1904					! copy stack/pointer without using %sp
1905	ldn	[%i2 + (15*GREGSIZE)], %i5
1906	!
1907	! put registers into the save area at the top of the interrupted
1908	! thread's stack, pointed to by %l7 in the save area just loaded.
1909	!
1910	ldn	[%i1 + T_SP], %i3	! get stack save area pointer
1911	stn	%l0, [%i3 + STACK_BIAS + (0*GREGSIZE)]	! save locals
1912	stn	%l1, [%i3 + STACK_BIAS + (1*GREGSIZE)]
1913	stn	%l2, [%i3 + STACK_BIAS + (2*GREGSIZE)]
1914	stn	%l3, [%i3 + STACK_BIAS + (3*GREGSIZE)]
1915	stn	%l4, [%i3 + STACK_BIAS + (4*GREGSIZE)]
1916	stn	%l5, [%i3 + STACK_BIAS + (5*GREGSIZE)]
1917	stn	%l6, [%i3 + STACK_BIAS + (6*GREGSIZE)]
1918	stn	%l7, [%i3 + STACK_BIAS + (7*GREGSIZE)]
1919	stn	%o0, [%i3 + STACK_BIAS + (8*GREGSIZE)]	! save ins using outs
1920	stn	%o1, [%i3 + STACK_BIAS + (9*GREGSIZE)]
1921	stn	%o2, [%i3 + STACK_BIAS + (10*GREGSIZE)]
1922	stn	%o3, [%i3 + STACK_BIAS + (11*GREGSIZE)]
1923	stn	%o4, [%i3 + STACK_BIAS + (12*GREGSIZE)]
1924	stn	%o5, [%i3 + STACK_BIAS + (13*GREGSIZE)]
1925	stn	%i4, [%i3 + STACK_BIAS + (14*GREGSIZE)]
1926						! fp, %i7 copied using %i4
1927	stn	%i5, [%i3 + STACK_BIAS + (15*GREGSIZE)]
1928	stn	%g0, [%i2 + ((8+6)*GREGSIZE)]
1929						! clear fp in save area
1930
1931	! load saved pil for return
1932	ldub	[%i0 + T_PIL], %i0
1933	ret
1934	restore
1935	SET_SIZE(intr_passivate)
1936
1937#endif	/* lint */
1938
1939#if defined(lint)
1940
1941/*
1942 * intr_get_time() is a resource for interrupt handlers to determine how
1943 * much time has been spent handling the current interrupt. Such a function
1944 * is needed because higher level interrupts can arrive during the
1945 * processing of an interrupt, thus making direct comparisons of %tick by
1946 * the handler inaccurate. intr_get_time() only returns time spent in the
1947 * current interrupt handler.
1948 *
1949 * The caller must be calling from an interrupt handler running at a pil
1950 * below or at lock level. Timings are not provided for high-level
1951 * interrupts.
1952 *
1953 * The first time intr_get_time() is called while handling an interrupt,
1954 * it returns the time since the interrupt handler was invoked. Subsequent
1955 * calls will return the time since the prior call to intr_get_time(). Time
1956 * is returned as ticks, adjusted for any clock divisor due to power
1957 * management. Use tick2ns() to convert ticks to nsec. Warning: ticks may
1958 * not be the same across CPUs.
1959 *
1960 * Theory Of Intrstat[][]:
1961 *
1962 * uint64_t intrstat[pil][0..1] is an array indexed by pil level, with two
1963 * uint64_ts per pil.
1964 *
1965 * intrstat[pil][0] is a cumulative count of the number of ticks spent
1966 * handling all interrupts at the specified pil on this CPU. It is
1967 * exported via kstats to the user.
1968 *
1969 * intrstat[pil][1] is always a count of ticks less than or equal to the
1970 * value in [0]. The difference between [1] and [0] is the value returned
1971 * by a call to intr_get_time(). At the start of interrupt processing,
1972 * [0] and [1] will be equal (or nearly so). As the interrupt consumes
1973 * time, [0] will increase, but [1] will remain the same. A call to
1974 * intr_get_time() will return the difference, then update [1] to be the
1975 * same as [0]. Future calls will return the time since the last call.
1976 * Finally, when the interrupt completes, [1] is updated to the same as [0].
1977 *
1978 * Implementation:
1979 *
1980 * intr_get_time() works much like a higher level interrupt arriving. It
1981 * "checkpoints" the timing information by incrementing intrstat[pil][0]
1982 * to include elapsed running time, and by setting t_intr_start to %tick.
1983 * It then sets the return value to intrstat[pil][0] - intrstat[pil][1],
1984 * and updates intrstat[pil][1] to be the same as the new value of
1985 * intrstat[pil][0].
1986 *
1987 * In the normal handling of interrupts, after an interrupt handler returns
1988 * and the code in intr_thread() updates intrstat[pil][0], it then sets
1989 * intrstat[pil][1] to the new value of intrstat[pil][0]. When [0] == [1],
1990 * the timings are reset, i.e. intr_get_time() will return [0] - [1] which
1991 * is 0.
1992 *
1993 * Whenever interrupts arrive on a CPU which is handling a lower pil
1994 * interrupt, they update the lower pil's [0] to show time spent in the
1995 * handler that they've interrupted. This results in a growing discrepancy
1996 * between [0] and [1], which is returned the next time intr_get_time() is
1997 * called. Time spent in the higher-pil interrupt will not be returned in
1998 * the next intr_get_time() call from the original interrupt, because
1999 * the higher-pil interrupt's time is accumulated in intrstat[higherpil][].
2000 */
2001
2002/*ARGSUSED*/
2003uint64_t
2004intr_get_time(void)
2005{ return 0; }
2006#else	/* lint */
2007
2008	ENTRY_NP(intr_get_time)
2009#ifdef DEBUG
2010	!
2011	! Lots of asserts, but just check panic_quiesce first.
2012	! Don't bother with lots of tests if we're just ignoring them.
2013	!
2014	sethi	%hi(panic_quiesce), %o0
2015	ld	[%o0 + %lo(panic_quiesce)], %o0
2016	brnz,pn	%o0, 2f
2017	nop
2018	!
2019	! ASSERT(%pil <= LOCK_LEVEL)
2020	!
2021	rdpr	%pil, %o1
2022	cmp	%o1, LOCK_LEVEL
2023	ble,pt	%xcc, 0f
2024	sethi	%hi(intr_get_time_high_pil), %o0	! delay
2025	call	panic
2026	or	%o0, %lo(intr_get_time_high_pil), %o0
20270:
2028	!
2029	! ASSERT((t_flags & T_INTR_THREAD) != 0 && t_pil > 0)
2030	!
2031	lduh	[THREAD_REG + T_FLAGS], %o2
2032	andcc	%o2, T_INTR_THREAD, %g0
2033	bz,pn	%xcc, 1f
2034	ldub	[THREAD_REG + T_PIL], %o1		! delay
2035	brnz,pt	%o1, 0f
20361:
2037	sethi	%hi(intr_get_time_not_intr), %o0
2038	call	panic
2039	or	%o0, %lo(intr_get_time_not_intr), %o0
20400:
2041	!
2042	! ASSERT(t_intr_start != 0)
2043	!
2044	ldx	[THREAD_REG + T_INTR_START], %o1
2045	brnz,pt	%o1, 2f
2046	sethi	%hi(intr_get_time_no_start_time), %o0	! delay
2047	call	panic
2048	or	%o0, %lo(intr_get_time_no_start_time), %o0
20492:
2050#endif /* DEBUG */
2051	!
2052	! %o0 = elapsed time and return value
2053	! %o1 = pil
2054	! %o2 = scratch
2055	! %o3 = scratch
2056	! %o4 = scratch
2057	! %o5 = cpu
2058	!
2059	wrpr	%g0, PIL_MAX, %pil	! make this easy -- block normal intrs
2060	ldn	[THREAD_REG + T_CPU], %o5
2061	ldub	[THREAD_REG + T_PIL], %o1
2062	ldx	[THREAD_REG + T_INTR_START], %o3 ! %o3 = t_intr_start
2063	!
2064	! Calculate elapsed time since t_intr_start. Update t_intr_start,
2065	! get delta, and multiply by cpu_divisor if necessary.
2066	!
2067	rdpr	%tick, %o2
2068	sllx	%o2, 1, %o2
2069	srlx	%o2, 1, %o2
2070	stx	%o2, [THREAD_REG + T_INTR_START]
2071	sub	%o2, %o3, %o0
2072
2073	lduh	[%o5 + CPU_DIVISOR], %o4
2074	cmp	%o4, 1
2075	bg,a,pn	%xcc, 1f
2076	mulx	%o0, %o4, %o0	! multiply interval by clock divisor iff > 1
20771:
2078	!
2079	! Increment cpu_m.intrstat[pil][0]. Calculate elapsed time since
2080	! cpu_m.intrstat[pil][1], which is either when the interrupt was
2081	! first entered, or the last time intr_get_time() was invoked. Then
2082	! update cpu_m.intrstat[pil][1] to match [0].
2083	!
2084	sllx	%o1, 4, %o3
2085	add	%o3, CPU_MCPU, %o3
2086	add	%o3, MCPU_INTRSTAT, %o3
2087	add	%o3, %o5, %o3		! %o3 = cpu_m.intrstat[pil][0]
2088	ldx	[%o3], %o2
2089	add	%o2, %o0, %o2		! %o2 = new value for intrstat
2090	stx	%o2, [%o3]
2091	ldx	[%o3 + 8], %o4		! %o4 = cpu_m.intrstat[pil][1]
2092	sub	%o2, %o4, %o0		! %o0 is elapsed time since %o4
2093	stx	%o2, [%o3 + 8]		! make [1] match [0], resetting time
2094
2095	ld	[%o5 + CPU_BASE_SPL], %o2	! restore %pil to the greater
2096	cmp	%o2, %o1			! of either our pil %o1 or
2097	movl	%xcc, %o1, %o2			! cpu_base_spl.
2098	retl
2099	wrpr	%g0, %o2, %pil
2100	SET_SIZE(intr_get_time)
2101
2102#ifdef DEBUG
2103intr_get_time_high_pil:
2104	.asciz	"intr_get_time(): %pil > LOCK_LEVEL"
2105intr_get_time_not_intr:
2106	.asciz	"intr_get_time(): not called from an interrupt thread"
2107intr_get_time_no_start_time:
2108	.asciz	"intr_get_time(): t_intr_start == 0"
2109#endif /* DEBUG */
2110#endif  /* lint */
2111
2112
2113#if !defined(lint)
2114
2115/*
2116 * Check shift value used for computing array offsets
2117 */
2118#if INTR_VECTOR_SIZE != (1 << INTR_VECTOR_SHIFT)
2119#error "INTR_VECTOR_SIZE has changed"
2120#endif
2121
2122#endif  /* lint */
2123