xref: /titanic_44/usr/src/uts/sun4/ml/interrupt.s (revision aad98a6d8e89f8f5a62a1793da807d4bc5e5b159)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22/*
23 * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#pragma ident	"%Z%%M%	%I%	%E% SMI"
28
29#if defined(lint)
30#include <sys/types.h>
31#include <sys/thread.h>
32#else	/* lint */
33#include "assym.h"
34#endif	/* lint */
35
36#include <sys/cmn_err.h>
37#include <sys/ftrace.h>
38#include <sys/asm_linkage.h>
39#include <sys/machthread.h>
40#include <sys/machcpuvar.h>
41#include <sys/intreg.h>
42
43#ifdef TRAPTRACE
44#include <sys/traptrace.h>
45#endif /* TRAPTRACE */
46
47
48
49#if defined(lint)
50
51/* ARGSUSED */
52void
53pil_interrupt(int level)
54{}
55
56#else	/* lint */
57
58
59/*
60 * (TT 0x40..0x4F, TL>0) Interrupt Level N Handler (N == 1..15)
61 * 	Register passed from LEVEL_INTERRUPT(level)
62 *	%g4 - interrupt request level
63 */
64	ENTRY_NP(pil_interrupt)
65	!
66	! Register usage
67	!	%g1 - cpu
68	!	%g3 - intr_req
69	!	%g4 - pil
70	!	%g2, %g5, %g6 - temps
71	!
72	! grab the 1st intr_req off the list
73	! if the list is empty, clear %clear_softint
74	!
75	CPU_ADDR(%g1, %g5)
76	!
77	ALTENTRY(pil_interrupt_common)
78	sll	%g4, CPTRSHIFT, %g5
79	add	%g1, INTR_HEAD, %g6	! intr_head[0]
80	add	%g6, %g5, %g6		! intr_head[pil]
81	ldn	[%g6], %g3		! g3 = intr_req
82
83#ifndef DEBUG
84	brnz,pt	%g3, 5f
85	nop
86#else
87	!
88	! Verify the address of intr_req; it should be within the
89	! address range of intr_pool and intr_head
90	! or the address range of intr_add_head and intr_add_tail.
91	! The range of intr_add_head and intr_add_tail is subdivided
92	! by cpu, but the subdivision is not verified here.
93	!
94	! Registers passed to sys_trap()
95	!	%g1 - no_intr_req
96	!	%g2 - intr_req
97	!	%g3 - %pil
98	!	%g4 - current pil
99	!
100	add	%g1, INTR_POOL, %g2
101	cmp	%g3, %g2
102	blu,pn	%xcc, 8f
103	nop
104	add	%g1, INTR_HEAD, %g2
105	cmp	%g2, %g3
106	bgeu,pt	%xcc, 5f
107	nop
1088:
109	sethi	%hi(intr_add_head), %g2
110	ldn	[%g2 + %lo(intr_add_head)], %g2
111	brz,pn	%g2, 4f			! intr_add_head can be NULL
112	cmp	%g3, %g2
113	blu,pn	%xcc, 4f
114	nop
115	sethi	%hi(intr_add_tail), %g2
116	ldn	[%g2 + %lo(intr_add_tail)], %g2
117	cmp	%g2, %g3
118	bgeu,pt	%xcc, 5f
119	nop
1204:
121#endif /* DEBUG */
122#ifdef TRAPTRACE
123	TRACE_PTR(%g5, %g2)
124	GET_TRACE_TICK(%g2)
125	stxa	%g2, [%g5 + TRAP_ENT_TICK]%asi
126	TRACE_SAVE_TL_GL_REGS(%g5, %g2)
127	mov	0xbad, %g2
128	stha	%g2, [%g5 + TRAP_ENT_TT]%asi
129	rdpr	%tpc, %g2
130	stna	%g2, [%g5 + TRAP_ENT_TPC]%asi
131	rdpr	%tstate, %g2
132	stxa	%g2, [%g5 + TRAP_ENT_TSTATE]%asi
133	stna	%g0, [%g5 + TRAP_ENT_SP]%asi
134	stna	%g1, [%g5 + TRAP_ENT_TR]%asi
135	rd	SOFTINT, %g2
136	stna	%g2, [%g5 + TRAP_ENT_F1]%asi
137	stna	%g3, [%g5 + TRAP_ENT_F2]%asi
138	stna	%g4, [%g5 + TRAP_ENT_F3]%asi
139	stna	%g6, [%g5 + TRAP_ENT_F4]%asi
140	TRACE_NEXT(%g5, %g2, %g1)
141#endif /* TRAPTRACE */
142	ba	ptl1_panic
143	mov	PTL1_BAD_INTR_REQ, %g1
1445:
145	ldn	[%g3 + INTR_NEXT], %g2	! 2nd entry
146	brnz,pn	%g2, 1f			! branch if list not empty
147	stn	%g2, [%g6]
148	add	%g1, INTR_TAIL, %g6	! intr_tail[0]
149	stn	%g0, [%g5 + %g6]	! update intr_tail[pil]
150	mov	1, %g5
151	sll	%g5, %g4, %g5
152	wr	%g5, CLEAR_SOFTINT
1531:
154	!
155	! put intr_req on free list
156	!	%g2 - inumber
157	!
158	ldn	[%g1 + INTR_HEAD], %g5	! current head of free list
159	lduw	[%g3 + INTR_NUMBER], %g2
160	stn	%g3, [%g1 + INTR_HEAD]
161	stn	%g5, [%g3 + INTR_NEXT]
162#ifdef TRAPTRACE
163	TRACE_PTR(%g5, %g6)
164	GET_TRACE_TICK(%g6)
165	stxa	%g6, [%g5 + TRAP_ENT_TICK]%asi
166	TRACE_SAVE_TL_GL_REGS(%g5, %g6)
167	rdpr	%tt, %g6
168	stha	%g6, [%g5 + TRAP_ENT_TT]%asi
169	rdpr	%tpc, %g6
170	stna	%g6, [%g5 + TRAP_ENT_TPC]%asi
171	rdpr	%tstate, %g6
172	stxa	%g6, [%g5 + TRAP_ENT_TSTATE]%asi
173	stna	%sp, [%g5 + TRAP_ENT_SP]%asi
174	stna	%g3, [%g5 + TRAP_ENT_TR]%asi
175	stna	%g2, [%g5 + TRAP_ENT_F1]%asi
176	sll	%g4, CPTRSHIFT, %g3
177	add	%g1, INTR_HEAD, %g6
178	ldn	[%g6 + %g3], %g6		! intr_head[pil]
179	stna	%g6, [%g5 + TRAP_ENT_F2]%asi
180	add	%g1, INTR_TAIL, %g6
181	ldn	[%g6 + %g3], %g6		! intr_tail[pil]
182	stna	%g4, [%g5 + TRAP_ENT_F3]%asi
183	stna	%g6, [%g5 + TRAP_ENT_F4]%asi
184	TRACE_NEXT(%g5, %g6, %g3)
185#endif /* TRAPTRACE */
186	!
187	! clear the iv_pending flag for this inum
188	!
189	set	intr_vector, %g5;
190	sll	%g2, INTR_VECTOR_SHIFT, %g6;
191	add	%g5, %g6, %g5;			! &intr_vector[inum]
192	sth	%g0, [%g5 + IV_PENDING]
193
194	!
195	! Prepare for sys_trap()
196	!
197	! Registers passed to sys_trap()
198	!	%g1 - interrupt handler at TL==0
199	!	%g2 - inumber
200	!	%g3 - pil
201	!	%g4 - initial pil for handler
202	!
203	! figure which handler to run and which %pil it starts at
204	! intr_thread starts at DISP_LEVEL to prevent preemption
205	! current_thread starts at PIL_MAX to protect cpu_intr_actv
206	!
207	mov	%g4, %g3
208	cmp	%g4, LOCK_LEVEL
209	bg,a,pt	%xcc, 4f		! branch if pil > LOCK_LEVEL
210	mov	PIL_MAX, %g4
211	sethi	%hi(intr_thread), %g1
212	mov	DISP_LEVEL, %g4
213	ba,pt	%xcc, sys_trap
214	or	%g1, %lo(intr_thread), %g1
2154:
216	sethi	%hi(current_thread), %g1
217	ba,pt	%xcc, sys_trap
218	or	%g1, %lo(current_thread), %g1
219	SET_SIZE(pil_interrupt_common)
220	SET_SIZE(pil_interrupt)
221
222#endif	/* lint */
223
224
225#ifndef	lint
226_spurious:
227	.asciz	"!interrupt 0x%x at level %d not serviced"
228
229/*
230 * SERVE_INTR_PRE is called once, just before the first invocation
231 * of SERVE_INTR.
232 *
233 * Registers on entry:
234 *
235 * inum, cpu, regs: may be out-registers
236 * ls1, ls2: local scratch registers
237 * os1, os2, os3: scratch registers, may be out
238 */
239
240#define SERVE_INTR_PRE(inum, cpu, ls1, ls2, os1, os2, os3, regs)	\
241	set	intr_vector, ls1;					\
242	sll	inum, INTR_VECTOR_SHIFT, os1;				\
243	add	ls1, os1, ls1;						\
244	SERVE_INTR_TRACE(inum, os1, os2, os3, regs);			\
245	mov	inum, ls2;
246
247/*
248 * SERVE_INTR is called immediately after either SERVE_INTR_PRE or
249 * SERVE_INTR_NEXT, without intervening code. No register values
250 * may be modified.
251 *
252 * After calling SERVE_INTR, the caller must check if os3 is set. If
253 * so, there is another interrupt to process. The caller must call
254 * SERVE_INTR_NEXT, immediately followed by SERVE_INTR.
255 *
256 * Before calling SERVE_INTR_NEXT, the caller may perform accounting
257 * and other actions which need to occur after invocation of an interrupt
258 * handler. However, the values of ls1 and os3 *must* be preserved and
259 * passed unmodified into SERVE_INTR_NEXT.
260 *
261 * Registers on return from SERVE_INTR:
262 *
263 * ls1 - the pil just processed
264 * ls2 - the inum just processed
265 * os3 - if set, another interrupt needs to be processed
266 * cpu, ls1, os3 - must be preserved if os3 is set
267 */
268
269#define	SERVE_INTR(os5, cpu, ls1, ls2, os1, os2, os3, os4)		\
270	ldn	[ls1 + IV_HANDLER], os2;				\
271	ldn	[ls1 + IV_ARG], %o0;					\
272	ldn	[ls1 + IV_SOFTINT_ARG2], %o1;					\
273	call	os2;							\
274	lduh	[ls1 + IV_PIL], ls1;					\
275	brnz,pt	%o0, 2f;						\
276	mov	CE_WARN, %o0;						\
277	set	_spurious, %o1;						\
278	mov	ls2, %o2;						\
279	call	cmn_err;						\
280	rdpr	%pil, %o3;						\
2812:	ldn	[THREAD_REG + T_CPU], cpu;				\
282	sll	ls1, 3, os1;						\
283	add	os1, CPU_STATS_SYS_INTR - 8, os2;			\
284	ldx	[cpu + os2], os3;					\
285	inc	os3;							\
286	stx	os3, [cpu + os2];					\
287	sll	ls1, CPTRSHIFT, os2;					\
288	add	cpu,  INTR_HEAD, os1;					\
289	add	os1, os2, os1;						\
290	ldn	[os1], os3;
291
292/*
293 * Registers on entry:
294 *
295 * cpu			- cpu pointer (clobbered, set to cpu upon completion)
296 * ls1, os3		- preserved from prior call to SERVE_INTR
297 * ls2			- local scratch reg (not preserved)
298 * os1, os2, os4, os5	- scratch reg, can be out (not preserved)
299 */
300#define SERVE_INTR_NEXT(os5, cpu, ls1, ls2, os1, os2, os3, os4)		\
301	sll	ls1, CPTRSHIFT, os4;					\
302	add	cpu, INTR_HEAD, os1;					\
303	rdpr	%pstate, ls2;						\
304	wrpr	ls2, PSTATE_IE, %pstate;				\
305	ldn 	[os3 + INTR_NEXT], os2;					\
306	brnz,pn	os2, 4f;						\
307	stn	os2, [os1 + os4];					\
308	add	cpu, INTR_TAIL, os1;					\
309	stn	%g0, [os1 + os4];					\
310	mov	1, os1;							\
311	sll	os1, ls1, os1;						\
312	wr	os1, CLEAR_SOFTINT;					\
3134:	ldn	[cpu + INTR_HEAD], os1;					\
314	ld 	[os3 + INTR_NUMBER], os5;				\
315	stn	os3, [cpu + INTR_HEAD];					\
316	stn	os1, [os3 + INTR_NEXT];					\
317	set	intr_vector, ls1;					\
318	sll	os5, INTR_VECTOR_SHIFT, os1;				\
319	add	ls1, os1, ls1;						\
320	sth	%g0, [ls1 + IV_PENDING];				\
321	wrpr	%g0, ls2, %pstate;					\
322	SERVE_INTR_TRACE2(os5, os1, os2, os3, os4);			\
323	mov	os5, ls2;
324
325#ifdef TRAPTRACE
326/*
327 * inum - not modified, _spurious depends on it.
328 */
329#define	SERVE_INTR_TRACE(inum, os1, os2, os3, os4)			\
330	rdpr	%pstate, os3;						\
331	andn	os3, PSTATE_IE | PSTATE_AM, os2;			\
332	wrpr	%g0, os2, %pstate;					\
333	TRACE_PTR(os1, os2);						\
334	ldn	[os4 + PC_OFF], os2;					\
335	stna	os2, [os1 + TRAP_ENT_TPC]%asi;				\
336	ldx	[os4 + TSTATE_OFF], os2;				\
337	stxa	os2, [os1 + TRAP_ENT_TSTATE]%asi;			\
338	mov	os3, os4;						\
339	GET_TRACE_TICK(os2); 						\
340	stxa	os2, [os1 + TRAP_ENT_TICK]%asi;				\
341	TRACE_SAVE_TL_GL_REGS(os1, os2);				\
342	set	TT_SERVE_INTR, os2;					\
343	rdpr	%pil, os3;						\
344	or	os2, os3, os2;						\
345	stha	os2, [os1 + TRAP_ENT_TT]%asi;				\
346	stna	%sp, [os1 + TRAP_ENT_SP]%asi;				\
347	stna	inum, [os1 + TRAP_ENT_TR]%asi;				\
348	stna	%g0, [os1 + TRAP_ENT_F1]%asi;				\
349	stna	%g0, [os1 + TRAP_ENT_F2]%asi;				\
350	stna	%g0, [os1 + TRAP_ENT_F3]%asi;				\
351	stna	%g0, [os1 + TRAP_ENT_F4]%asi;				\
352	TRACE_NEXT(os1, os2, os3);					\
353	wrpr	%g0, os4, %pstate
354#else	/* TRAPTRACE */
355#define SERVE_INTR_TRACE(inum, os1, os2, os3, os4)
356#endif	/* TRAPTRACE */
357
358#ifdef TRAPTRACE
359/*
360 * inum - not modified, _spurious depends on it.
361 */
362#define	SERVE_INTR_TRACE2(inum, os1, os2, os3, os4)			\
363	rdpr	%pstate, os3;						\
364	andn	os3, PSTATE_IE | PSTATE_AM, os2;			\
365	wrpr	%g0, os2, %pstate;					\
366	TRACE_PTR(os1, os2);						\
367	stna	%g0, [os1 + TRAP_ENT_TPC]%asi;				\
368	stxa	%g0, [os1 + TRAP_ENT_TSTATE]%asi;			\
369	mov	os3, os4;						\
370	GET_TRACE_TICK(os2); 						\
371	stxa	os2, [os1 + TRAP_ENT_TICK]%asi;				\
372	TRACE_SAVE_TL_GL_REGS(os1, os2);				\
373	set	TT_SERVE_INTR, os2;					\
374	rdpr	%pil, os3;						\
375	or	os2, os3, os2;						\
376	stha	os2, [os1 + TRAP_ENT_TT]%asi;				\
377	stna	%sp, [os1 + TRAP_ENT_SP]%asi;				\
378	stna	inum, [os1 + TRAP_ENT_TR]%asi;				\
379	stna	%g0, [os1 + TRAP_ENT_F1]%asi;				\
380	stna	%g0, [os1 + TRAP_ENT_F2]%asi;				\
381	stna	%g0, [os1 + TRAP_ENT_F3]%asi;				\
382	stna	%g0, [os1 + TRAP_ENT_F4]%asi;				\
383	TRACE_NEXT(os1, os2, os3);					\
384	wrpr	%g0, os4, %pstate
385#else	/* TRAPTRACE */
386#define SERVE_INTR_TRACE2(inum, os1, os2, os3, os4)
387#endif	/* TRAPTRACE */
388
389#endif	/* lint */
390
391#if defined(lint)
392
393/*ARGSUSED*/
394void
395intr_thread(struct regs *regs, uint_t inumber, uint_t pil)
396{}
397
398#else	/* lint */
399
400#define	INTRCNT_LIMIT 16
401
402/*
403 * Handle an interrupt in a new thread.
404 *	Entry:
405 *		%o0       = pointer to regs structure
406 *		%o1       = inumber
407 *		%o2       = pil
408 *		%sp       = on current thread's kernel stack
409 *		%o7       = return linkage to trap code
410 *		%g7       = current thread
411 *		%pstate   = normal globals, interrupts enabled,
412 *		            privileged, fp disabled
413 *		%pil      = DISP_LEVEL
414 *
415 *	Register Usage
416 *		%l0       = return linkage
417 *		%l1       = pil
418 *		%l2 - %l3 = scratch
419 *		%l4 - %l7 = reserved for sys_trap
420 *		%o2       = cpu
421 *		%o3       = intr thread
422 *		%o0       = scratch
423 *		%o4 - %o5 = scratch
424 */
425	ENTRY_NP(intr_thread)
426	mov	%o7, %l0
427	mov	%o2, %l1
428	!
429	! See if we are interrupting another interrupt thread.
430	!
431	lduh	[THREAD_REG + T_FLAGS], %o3
432	andcc	%o3, T_INTR_THREAD, %g0
433	bz,pt	%xcc, 1f
434	ldn	[THREAD_REG + T_CPU], %o2	! delay - load CPU pointer
435
436	! We have interrupted an interrupt thread. Take a timestamp,
437	! compute its interval, and update its cumulative counter.
438	add	THREAD_REG, T_INTR_START, %o5
4390:
440	ldx	[%o5], %o3
441	brz,pn	%o3, 1f
442	! We came in on top of an interrupt thread that had no timestamp.
443	! This could happen if, for instance, an interrupt thread which had
444	! previously blocked is being set up to run again in resume(), but
445	! resume() hasn't yet stored a timestamp for it. Or, it could be in
446	! swtch() after its slice has been accounted for.
447	! Only account for the time slice if the starting timestamp is non-zero.
448	rdpr	%tick, %o4			! delay
449	sllx	%o4, 1, %o4			! shift off NPT bit
450	srlx	%o4, 1, %o4
451	sub	%o4, %o3, %o4			! o4 has interval
452
453	! A high-level interrupt in current_thread() interrupting here
454	! will account for the interrupted thread's time slice, but
455	! only if t_intr_start is non-zero. Since this code is going to account
456	! for the time slice, we want to "atomically" load the thread's
457	! starting timestamp, calculate the interval with %tick, and zero
458	! its starting timestamp.
459	! To do this, we do a casx on the t_intr_start field, and store 0 to it.
460	! If it has changed since we loaded it above, we need to re-compute the
461	! interval, since a changed t_intr_start implies current_thread placed
462	! a new, later timestamp there after running a high-level interrupt,
463	! and the %tick val in %o4 had become stale.
464	mov	%g0, %l2
465	casx	[%o5], %o3, %l2
466
467	! If %l2 == %o3, our casx was successful. If not, the starting timestamp
468	! changed between loading it (after label 0b) and computing the
469	! interval above.
470	cmp	%l2, %o3
471	bne,pn	%xcc, 0b
472
473	! Check for Energy Star mode
474	lduh	[%o2 + CPU_DIVISOR], %l2	! delay -- %l2 = clock divisor
475	cmp	%l2, 1
476	bg,a,pn	%xcc, 2f
477	mulx	%o4, %l2, %o4	! multiply interval by clock divisor iff > 1
4782:
479	! We now know that a valid interval for the interrupted interrupt
480	! thread is in %o4. Update its cumulative counter.
481	ldub	[THREAD_REG + T_PIL], %l3	! load PIL
482	sllx	%l3, 4, %l3		! convert PIL index to byte offset
483	add	%l3, CPU_MCPU, %l3	! CPU_INTRSTAT is too big for use
484	add	%l3, MCPU_INTRSTAT, %l3	! as const, add offsets separately
485	ldx	[%o2 + %l3], %o5	! old counter in o5
486	add	%o5, %o4, %o5		! new counter in o5
487	stx	%o5, [%o2 + %l3]	! store new counter
488
489	! Also update intracct[]
490	lduh	[%o2 + CPU_MSTATE], %l3
491	sllx	%l3, 3, %l3
492	add	%l3, CPU_INTRACCT, %l3
493	add	%l3, %o2, %l3
4940:
495	ldx	[%l3], %o5
496	add	%o5, %o4, %o3
497	casx	[%l3], %o5, %o3
498	cmp	%o5, %o3
499	bne,pn	%xcc, 0b
500	nop
501
5021:
503	!
504	! Get set to run interrupt thread.
505	! There should always be an interrupt thread since we allocate one
506	! for each level on the CPU.
507	!
508	! Note that the code in kcpc_overflow_intr -relies- on the ordering
509	! of events here -- in particular that t->t_lwp of the interrupt thread
510	! is set to the pinned thread *before* curthread is changed.
511	!
512	ldn	[%o2 + CPU_INTR_THREAD], %o3	! interrupt thread pool
513	ldn	[%o3 + T_LINK], %o4		! unlink thread from CPU's list
514	stn	%o4, [%o2 + CPU_INTR_THREAD]
515	!
516	! Set bit for this level in CPU's active interrupt bitmask.
517	!
518	ld	[%o2 + CPU_INTR_ACTV], %o5
519	mov	1, %o4
520	sll	%o4, %l1, %o4
521#ifdef DEBUG
522	!
523	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
524	!
525	andcc	%o5, %o4, %g0
526	bz,pt	%xcc, 0f
527	nop
528	! Do not call panic if a panic is already in progress.
529	sethi	%hi(panic_quiesce), %l2
530	ld	[%l2 + %lo(panic_quiesce)], %l2
531	brnz,pn	%l2, 0f
532	nop
533	sethi	%hi(intr_thread_actv_bit_set), %o0
534	call	panic
535	or	%o0, %lo(intr_thread_actv_bit_set), %o0
5360:
537#endif /* DEBUG */
538	or	%o5, %o4, %o5
539	st	%o5, [%o2 + CPU_INTR_ACTV]
540	!
541	! Consider the new thread part of the same LWP so that
542	! window overflow code can find the PCB.
543	!
544	ldn	[THREAD_REG + T_LWP], %o4
545	stn	%o4, [%o3 + T_LWP]
546	!
547	! Threads on the interrupt thread free list could have state already
548	! set to TS_ONPROC, but it helps in debugging if they're TS_FREE
549	! Could eliminate the next two instructions with a little work.
550	!
551	mov	TS_ONPROC, %o4
552	st	%o4, [%o3 + T_STATE]
553	!
554	! Push interrupted thread onto list from new thread.
555	! Set the new thread as the current one.
556	! Set interrupted thread's T_SP because if it is the idle thread,
557	! resume may use that stack between threads.
558	!
559	stn	%o7, [THREAD_REG + T_PC]	! mark pc for resume
560	stn	%sp, [THREAD_REG + T_SP]	! mark stack for resume
561	stn	THREAD_REG, [%o3 + T_INTR]	! push old thread
562	stn	%o3, [%o2 + CPU_THREAD]		! set new thread
563	mov	%o3, THREAD_REG			! set global curthread register
564	ldn	[%o3 + T_STACK], %o4		! interrupt stack pointer
565	sub	%o4, STACK_BIAS, %sp
566	!
567	! Initialize thread priority level from intr_pri
568	!
569	sethi	%hi(intr_pri), %o4
570	ldsh	[%o4 + %lo(intr_pri)], %o4	! grab base interrupt priority
571	add	%l1, %o4, %o4		! convert level to dispatch priority
572	sth	%o4, [THREAD_REG + T_PRI]
573	stub	%l1, [THREAD_REG + T_PIL]	! save pil for intr_passivate
574
575	! Store starting timestamp in thread structure.
576	add	THREAD_REG, T_INTR_START, %o3
5771:
578	ldx	[%o3], %o5
579	rdpr	%tick, %o4
580	sllx	%o4, 1, %o4
581	srlx	%o4, 1, %o4			! shift off NPT bit
582	casx	[%o3], %o5, %o4
583	cmp	%o4, %o5
584	! If a high-level interrupt occurred while we were attempting to store
585	! the timestamp, try again.
586	bne,pn	%xcc, 1b
587	nop
588
589	wrpr	%g0, %l1, %pil			! lower %pil to new level
590	!
591	! Fast event tracing.
592	!
593	ld	[%o2 + CPU_FTRACE_STATE], %o4	! %o2 = curthread->t_cpu
594	btst	FTRACE_ENABLED, %o4
595	be,pt	%icc, 1f			! skip if ftrace disabled
596	  mov	%l1, %o5
597	!
598	! Tracing is enabled - write the trace entry.
599	!
600	save	%sp, -SA(MINFRAME), %sp
601	set	ftrace_intr_thread_format_str, %o0
602	mov	%i0, %o1
603	mov	%i1, %o2
604	call	ftrace_3
605	mov	%i5, %o3
606	restore
6071:
608	!
609	! call the handler
610	!
611	SERVE_INTR_PRE(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
612	!
613	! %o0 and %o1 are now available as scratch registers.
614	!
6150:
616	SERVE_INTR(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
617	!
618	! If %o3 is set, we must call serve_intr_post, and both %l1 and %o3
619	! must be preserved. %l1 holds our pil, %l3 holds our inum.
620	!
621	! Note: %l1 is the pil level we're processing, but we may have a
622	! higher effective pil because a higher-level interrupt may have
623	! blocked.
624	!
625	wrpr	%g0, DISP_LEVEL, %pil
626	!
627	! Take timestamp, compute interval, update cumulative counter.
628	!
629	add	THREAD_REG, T_INTR_START, %o5
6301:
631	ldx	[%o5], %o0
632#ifdef DEBUG
633	brnz	%o0, 9f
634	nop
635	! Do not call panic if a panic is already in progress.
636	sethi	%hi(panic_quiesce), %o1
637	ld	[%o1 + %lo(panic_quiesce)], %o1
638	brnz,pn	%o1, 9f
639	nop
640	sethi	%hi(intr_thread_t_intr_start_zero), %o0
641	call	panic
642	or	%o0, %lo(intr_thread_t_intr_start_zero), %o0
6439:
644#endif /* DEBUG */
645	rdpr	%tick, %o1
646	sllx	%o1, 1, %o1
647	srlx	%o1, 1, %o1			! shift off NPT bit
648	sub	%o1, %o0, %l2			! l2 has interval
649	!
650	! The general outline of what the code here does is:
651	! 1. load t_intr_start, %tick, and calculate the delta
652	! 2. replace t_intr_start with %tick (if %o3 is set) or 0.
653	!
654	! The problem is that a high-level interrupt could arrive at any time.
655	! It will account for (%tick - t_intr_start) for us when it starts,
656	! unless we have set t_intr_start to zero, and then set t_intr_start
657	! to a new %tick when it finishes. To account for this, our first step
658	! is to load t_intr_start and the last is to use casx to store the new
659	! t_intr_start. This guarantees atomicity in reading t_intr_start,
660	! reading %tick, and updating t_intr_start.
661	!
662	movrz	%o3, %g0, %o1
663	casx	[%o5], %o0, %o1
664	cmp	%o0, %o1
665	bne,pn	%xcc, 1b
666	!
667	! Check for Energy Star mode
668	!
669	lduh	[%o2 + CPU_DIVISOR], %o0	! delay -- %o0 = clock divisor
670	cmp	%o0, 1
671	bg,a,pn	%xcc, 2f
672	mulx	%l2, %o0, %l2	! multiply interval by clock divisor iff > 1
6732:
674	!
675	! Update cpu_intrstat. If o3 is set then we will be processing another
676	! interrupt. Above we have set t_intr_start to %tick, not 0. This
677	! means a high-level interrupt can arrive and update the same stats
678	! we're updating. Need to use casx.
679	!
680	sllx	%l1, 4, %o1			! delay - PIL as byte offset
681	add	%o1, CPU_MCPU, %o1		! CPU_INTRSTAT const too big
682	add	%o1, MCPU_INTRSTAT, %o1		! add parts separately
683	add	%o1, %o2, %o1
6841:
685	ldx	[%o1], %o5			! old counter in o5
686	add	%o5, %l2, %o0			! new counter in o0
687 	stx	%o0, [%o1 + 8]			! store into intrstat[pil][1]
688	casx	[%o1], %o5, %o0			! and into intrstat[pil][0]
689	cmp	%o5, %o0
690	bne,pn	%xcc, 1b
691	nop
692
693	! Also update intracct[]
694	lduh	[%o2 + CPU_MSTATE], %o1
695	sllx	%o1, 3, %o1
696	add	%o1, CPU_INTRACCT, %o1
697	add	%o1, %o2, %o1
6981:
699	ldx	[%o1], %o5
700	add	%o5, %l2, %o0
701	casx	[%o1], %o5, %o0
702	cmp	%o5, %o0
703	bne,pn	%xcc, 1b
704	nop
705
706	!
707	! Don't keep a pinned process pinned indefinitely. Bump cpu_intrcnt
708	! for each interrupt handler we invoke. If we hit INTRCNT_LIMIT, then
709	! we've crossed the threshold and we should unpin the pinned threads
710	! by preempt()ing ourselves, which will bubble up the t_intr chain
711	! until hitting the non-interrupt thread, which will then in turn
712	! preempt itself allowing the interrupt processing to resume. Finally,
713	! the scheduler takes over and picks the next thread to run.
714	!
715	! If our CPU is quiesced, we cannot preempt because the idle thread
716	! won't ever re-enter the scheduler, and the interrupt will be forever
717	! blocked.
718	!
719	! If t_intr is NULL, we're not pinning anyone, so we use a simpler
720	! algorithm. Just check for cpu_kprunrun, and if set then preempt.
721	! This insures we enter the scheduler if a higher-priority thread
722	! has become runnable.
723	!
724	lduh	[%o2 + CPU_FLAGS], %o5		! don't preempt if quiesced
725	andcc	%o5, CPU_QUIESCED, %g0
726	bnz,pn	%xcc, 1f
727
728	ldn     [THREAD_REG + T_INTR], %o5      ! pinning anything?
729	brz,pn  %o5, 3f				! if not, don't inc intrcnt
730
731	ldub	[%o2 + CPU_INTRCNT], %o5	! delay - %o5 = cpu_intrcnt
732	inc	%o5
733	cmp	%o5, INTRCNT_LIMIT		! have we hit the limit?
734	bl,a,pt	%xcc, 1f			! no preempt if < INTRCNT_LIMIT
735	stub	%o5, [%o2 + CPU_INTRCNT]	! delay annul - inc CPU_INTRCNT
736	bg,pn	%xcc, 2f			! don't inc stats again
737	!
738	! We've reached the limit. Set cpu_intrcnt and cpu_kprunrun, and do
739	! CPU_STATS_ADDQ(cp, sys, intrunpin, 1). Then call preempt.
740	!
741	mov	1, %o4				! delay
742	stub	%o4, [%o2 + CPU_KPRUNRUN]
743	ldx	[%o2 + CPU_STATS_SYS_INTRUNPIN], %o4
744	inc	%o4
745	stx	%o4, [%o2 + CPU_STATS_SYS_INTRUNPIN]
746	ba	2f
747	stub	%o5, [%o2 + CPU_INTRCNT]	! delay
7483:
749	! Code for t_intr == NULL
750	ldub	[%o2 + CPU_KPRUNRUN], %o5
751	brz,pt	%o5, 1f				! don't preempt unless kprunrun
7522:
753	! Time to call preempt
754	mov	%o2, %l3			! delay - save %o2
755	call	preempt
756	mov	%o3, %l2			! delay - save %o3.
757	mov	%l3, %o2			! restore %o2
758	mov	%l2, %o3			! restore %o3
759	wrpr	%g0, DISP_LEVEL, %pil		! up from cpu_base_spl
7601:
761	!
762	! Do we need to call serve_intr_post and do this again?
763	!
764	brz,a,pt %o3, 0f
765	ld	[%o2 + CPU_INTR_ACTV], %o5	! delay annulled
766	!
767	! Restore %pil before calling serve_intr() again. We must check
768	! CPU_BASE_SPL and set %pil to max(our-pil, CPU_BASE_SPL)
769	!
770	ld	[%o2 + CPU_BASE_SPL], %o4
771	cmp	%o4, %l1
772	movl	%xcc, %l1, %o4
773	wrpr	%g0, %o4, %pil
774	SERVE_INTR_NEXT(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0)
775	ba	0b				! compute new stats
776	nop
7770:
778	!
779	! Clear bit for this level in CPU's interrupt active bitmask.
780	!
781	mov	1, %o4
782	sll	%o4, %l1, %o4
783#ifdef DEBUG
784	!
785	! ASSERT(CPU->cpu_intr_actv & (1 << PIL))
786	!
787	andcc	%o4, %o5, %g0
788	bnz,pt	%xcc, 0f
789	nop
790	! Do not call panic if a panic is already in progress.
791	sethi	%hi(panic_quiesce), %l2
792	ld	[%l2 + %lo(panic_quiesce)], %l2
793	brnz,pn	%l2, 0f
794	nop
795	sethi	%hi(intr_thread_actv_bit_not_set), %o0
796	call	panic
797	or	%o0, %lo(intr_thread_actv_bit_not_set), %o0
7980:
799#endif /* DEBUG */
800	andn	%o5, %o4, %o5
801	st	%o5, [%o2 + CPU_INTR_ACTV]
802	!
803	! If there is still an interrupted thread underneath this one,
804	! then the interrupt was never blocked and the return is fairly
805	! simple.  Otherwise jump to intr_thread_exit.
806	!
807	ldn	[THREAD_REG + T_INTR], %o4	! pinned thread
808	brz,pn	%o4, intr_thread_exit		! branch if none
809	nop
810	!
811	! link the thread back onto the interrupt thread pool
812	!
813	ldn	[%o2 + CPU_INTR_THREAD], %o3
814	stn	%o3, [THREAD_REG + T_LINK]
815	stn	THREAD_REG, [%o2 + CPU_INTR_THREAD]
816	!
817	! set the thread state to free so kernel debuggers don't see it
818	!
819	mov	TS_FREE, %o5
820	st	%o5, [THREAD_REG + T_STATE]
821	!
822	! Switch back to the interrupted thread and return
823	!
824	stn	%o4, [%o2 + CPU_THREAD]
825	mov	%o4, THREAD_REG
826
827	! If we pinned an interrupt thread, store its starting timestamp.
828	lduh	[THREAD_REG + T_FLAGS], %o5
829	andcc	%o5, T_INTR_THREAD, %g0
830	bz,pt	%xcc, 1f
831	ldn	[THREAD_REG + T_SP], %sp	! delay - restore %sp
832
833	add	THREAD_REG, T_INTR_START, %o3	! o3 has &curthread->t_intr_star
8340:
835	ldx	[%o3], %o4			! o4 = t_intr_start before
836	rdpr	%tick, %o5
837	sllx	%o5, 1, %o5
838	srlx	%o5, 1, %o5			! shift off NPT bit
839	casx	[%o3], %o4, %o5			! put o5 in ts if o4 == ts after
840	cmp	%o4, %o5
841	! If a high-level interrupt occurred while we were attempting to store
842	! the timestamp, try again.
843	bne,pn	%xcc, 0b
844	ldn	[THREAD_REG + T_SP], %sp	! delay - restore %sp
8451:
846	! If the thread being restarted isn't pinning anyone, and no interrupts
847	! are pending, zero out cpu_intrcnt
848	ldn	[THREAD_REG + T_INTR], %o4
849	brnz,pn	%o4, 2f
850	rd	SOFTINT, %o4			! delay
851	set	SOFTINT_MASK, %o5
852	andcc	%o4, %o5, %g0
853	bz,a,pt	%xcc, 2f
854	stub	%g0, [%o2 + CPU_INTRCNT]	! delay annul
8552:
856	jmp	%l0 + 8
857	nop
858	SET_SIZE(intr_thread)
859	/* Not Reached */
860
861	!
862	! An interrupt returned on what was once (and still might be)
863	! an interrupt thread stack, but the interrupted process is no longer
864	! there.  This means the interrupt must have blocked.
865	!
866	! There is no longer a thread under this one, so put this thread back
867	! on the CPU's free list and resume the idle thread which will dispatch
868	! the next thread to run.
869	!
870	! All traps below DISP_LEVEL are disabled here, but the mondo interrupt
871	! is enabled.
872	!
873	ENTRY_NP(intr_thread_exit)
874#ifdef TRAPTRACE
875	rdpr	%pstate, %l2
876	andn	%l2, PSTATE_IE | PSTATE_AM, %o4
877	wrpr	%g0, %o4, %pstate			! cpu to known state
878	TRACE_PTR(%o4, %o5)
879	GET_TRACE_TICK(%o5)
880	stxa	%o5, [%o4 + TRAP_ENT_TICK]%asi
881	TRACE_SAVE_TL_GL_REGS(%o4, %o5)
882	set	TT_INTR_EXIT, %o5
883	stha	%o5, [%o4 + TRAP_ENT_TT]%asi
884	stna	%g0, [%o4 + TRAP_ENT_TPC]%asi
885	stxa	%g0, [%o4 + TRAP_ENT_TSTATE]%asi
886	stna	%sp, [%o4 + TRAP_ENT_SP]%asi
887	stna	THREAD_REG, [%o4 + TRAP_ENT_TR]%asi
888	ld	[%o2 + CPU_BASE_SPL], %o5
889	stna	%o5, [%o4 + TRAP_ENT_F1]%asi
890	stna	%g0, [%o4 + TRAP_ENT_F2]%asi
891	stna	%g0, [%o4 + TRAP_ENT_F3]%asi
892	stna	%g0, [%o4 + TRAP_ENT_F4]%asi
893	TRACE_NEXT(%o4, %o5, %o0)
894	wrpr	%g0, %l2, %pstate
895#endif /* TRAPTRACE */
896	! cpu_stats.sys.intrblk++
897        ldx	[%o2 + CPU_STATS_SYS_INTRBLK], %o4
898        inc     %o4
899        stx	%o4, [%o2 + CPU_STATS_SYS_INTRBLK]
900	!
901	! Put thread back on the interrupt thread list.
902	!
903
904	!
905	! Set the CPU's base SPL level.
906	!
907#ifdef DEBUG
908	!
909	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
910	!
911	ld	[%o2 + CPU_INTR_ACTV], %o5
912	mov	1, %o4
913	sll	%o4, %l1, %o4
914	and	%o5, %o4, %o4
915	brz,pt	%o4, 0f
916	nop
917	! Do not call panic if a panic is already in progress.
918	sethi	%hi(panic_quiesce), %l2
919	ld	[%l2 + %lo(panic_quiesce)], %l2
920	brnz,pn	%l2, 0f
921	nop
922	sethi	%hi(intr_thread_exit_actv_bit_set), %o0
923	call	panic
924	or	%o0, %lo(intr_thread_exit_actv_bit_set), %o0
9250:
926#endif /* DEBUG */
927	call	_intr_set_spl			! set CPU's base SPL level
928	ld	[%o2 + CPU_INTR_ACTV], %o5	! delay - load active mask
929	!
930	! set the thread state to free so kernel debuggers don't see it
931	!
932	mov	TS_FREE, %o4
933	st	%o4, [THREAD_REG + T_STATE]
934	!
935	! Put thread on either the interrupt pool or the free pool and
936	! call swtch() to resume another thread.
937	!
938	ldn	[%o2 + CPU_INTR_THREAD], %o5	! get list pointer
939	stn	%o5, [THREAD_REG + T_LINK]
940	call	swtch				! switch to best thread
941	stn	THREAD_REG, [%o2 + CPU_INTR_THREAD] ! delay - put thread on list
942	ba,a,pt	%xcc, .				! swtch() shouldn't return
943	SET_SIZE(intr_thread_exit)
944
945	.global ftrace_intr_thread_format_str
946ftrace_intr_thread_format_str:
947	.asciz	"intr_thread(): regs=0x%lx, int=0x%lx, pil=0x%lx"
948#ifdef DEBUG
949intr_thread_actv_bit_set:
950	.asciz	"intr_thread():	cpu_intr_actv bit already set for PIL"
951intr_thread_actv_bit_not_set:
952	.asciz	"intr_thread():	cpu_intr_actv bit not set for PIL"
953intr_thread_exit_actv_bit_set:
954	.asciz	"intr_thread_exit(): cpu_intr_actv bit erroneously set for PIL"
955intr_thread_t_intr_start_zero:
956	.asciz	"intr_thread():	t_intr_start zero upon handler return"
957#endif /* DEBUG */
958#endif	/* lint */
959
960#if defined(lint)
961
962/*
963 * Handle an interrupt in the current thread
964 *	Entry:
965 *		%o0       = pointer to regs structure
966 *		%o1       = inumber
967 *		%o2       = pil
968 *		%sp       = on current thread's kernel stack
969 *		%o7       = return linkage to trap code
970 *		%g7       = current thread
971 *		%pstate   = normal globals, interrupts enabled,
972 *		            privileged, fp disabled
973 *		%pil      = PIL_MAX
974 *
975 *	Register Usage
976 *		%l0       = return linkage
977 *		%l1       = old stack
978 *		%l2 - %l3 = scratch
979 *		%l4 - %l7 = reserved for sys_trap
980 *		%o3       = cpu
981 *		%o0       = scratch
982 *		%o4 - %o5 = scratch
983 */
984/* ARGSUSED */
985void
986current_thread(struct regs *regs, uint_t inumber, uint_t pil)
987{}
988
989#else	/* lint */
990
991	ENTRY_NP(current_thread)
992
993	mov	%o7, %l0
994	ldn	[THREAD_REG + T_CPU], %o3
995	!
996	! Set bit for this level in CPU's active interrupt bitmask.
997	!
998	ld	[%o3 + CPU_INTR_ACTV], %o5	! o5 has cpu_intr_actv b4 chng
999	mov	1, %o4
1000	sll	%o4, %o2, %o4			! construct mask for level
1001#ifdef DEBUG
1002	!
1003	! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
1004	!
1005	andcc	%o5, %o4, %g0
1006	bz,pt	%xcc, 0f
1007	nop
1008	! Do not call panic if a panic is already in progress.
1009	sethi	%hi(panic_quiesce), %l2
1010	ld	[%l2 + %lo(panic_quiesce)], %l2
1011	brnz,pn	%l2, 0f
1012	nop
1013	sethi	%hi(current_thread_actv_bit_set), %o0
1014	call	panic
1015	or	%o0, %lo(current_thread_actv_bit_set), %o0
10160:
1017#endif /* DEBUG */
1018	or	%o5, %o4, %o4
1019	!
1020	! See if we are interrupting another high-level interrupt.
1021	!
1022	srl	%o5, LOCK_LEVEL + 1, %o5	! only look at high-level bits
1023	brz,pt	%o5, 1f
1024	st	%o4, [%o3 + CPU_INTR_ACTV]	! delay - store active mask
1025	!
1026	! We have interrupted another high-level interrupt. Find its PIL,
1027	! compute the interval it ran for, and update its cumulative counter.
1028	!
1029	! Register usage:
1030
1031	! o2 = PIL of this interrupt
1032	! o5 = high PIL bits of INTR_ACTV (not including this PIL)
1033	! l1 = bitmask used to find other active high-level PIL
1034	! o4 = index of bit set in l1
1035	! Use cpu_intr_actv to find the cpu_pil_high_start[] offset for the
1036	! interrupted high-level interrupt.
1037	! Create mask for cpu_intr_actv. Begin by looking for bits set
1038	! at one level below the current PIL. Since %o5 contains the active
1039	! mask already shifted right by (LOCK_LEVEL + 1), we start by looking
1040	! at bit (current_pil - (LOCK_LEVEL + 2)).
1041	sub	%o2, LOCK_LEVEL + 2, %o4
1042	mov	1, %l1
1043	sll	%l1, %o4, %l1
10442:
1045#ifdef DEBUG
1046	! ASSERT(%l1 != 0) (we didn't shift the bit off the right edge)
1047	brnz,pt	%l1, 9f
1048	nop
1049
1050	! Don't panic if a panic is already in progress.
1051	sethi	%hi(panic_quiesce), %l3
1052	ld	[%l3 + %lo(panic_quiesce)], %l3
1053	brnz,pn	%l3, 9f
1054	nop
1055	sethi	%hi(current_thread_nested_PIL_not_found), %o0
1056	call	panic
1057	or	%o0, %lo(current_thread_nested_PIL_not_found), %o0
10589:
1059#endif /* DEBUG */
1060	andcc	%l1, %o5, %g0		! test mask against high-level bits of
1061	bnz	%xcc, 3f		! cpu_intr_actv
1062	nop
1063	srl	%l1, 1, %l1		! No match. Try next lower PIL.
1064	ba,pt	%xcc, 2b
1065	sub	%o4, 1, %o4		! delay - decrement PIL
10663:
1067	sll	%o4, 3, %o4			! index to byte offset
1068	add	%o4, CPU_MCPU, %l1	! CPU_PIL_HIGH_START is too large
1069	add	%l1, MCPU_PIL_HIGH_START, %l1
1070	ldx	[%o3 + %l1], %l3		! load starting timestamp
1071#ifdef DEBUG
1072	brnz,pt	%l3, 9f
1073	nop
1074	! Don't panic if a panic is already in progress.
1075	sethi	%hi(panic_quiesce), %l1
1076	ld	[%l1 + %lo(panic_quiesce)], %l1
1077	brnz,pn	%l1, 9f
1078	nop
1079	srl	%o4, 3, %o1			! Find interrupted PIL for panic
1080	add	%o1, LOCK_LEVEL + 1, %o1
1081	sethi	%hi(current_thread_nested_pil_zero), %o0
1082	call	panic
1083	or	%o0, %lo(current_thread_nested_pil_zero), %o0
10849:
1085#endif /* DEBUG */
1086	rdpr	%tick, %l1
1087	sllx	%l1, 1, %l1
1088	srlx	%l1, 1, %l1			! shake off NPT bit
1089	sub	%l1, %l3, %l3			! interval in %l3
1090	!
1091	! Check for Energy Star mode
1092	!
1093	lduh	[%o3 + CPU_DIVISOR], %l1	! %l1 = clock divisor
1094	cmp	%l1, 1
1095	bg,a,pn	%xcc, 2f
1096	mulx	%l3, %l1, %l3	! multiply interval by clock divisor iff > 1
10972:
1098	!
1099	! We need to find the CPU offset of the cumulative counter. We start
1100	! with %o4, which has (PIL - (LOCK_LEVEL + 1)) * 8. We need PIL * 16,
1101	! so we shift left 1, then add (LOCK_LEVEL + 1) * 16, which is
1102	! CPU_INTRSTAT_LOW_PIL_OFFSET.
1103	!
1104	sll	%o4, 1, %o4
1105	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT const too large
1106	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1107	add	%o4, CPU_INTRSTAT_LOW_PIL_OFFSET, %o4
1108	ldx	[%o3 + %o4], %l1		! old counter in l1
1109	add	%l1, %l3, %l1			! new counter in l1
1110	stx	%l1, [%o3 + %o4]		! store new counter
1111
1112	! Also update intracct[]
1113	lduh	[%o3 + CPU_MSTATE], %o4
1114	sllx	%o4, 3, %o4
1115	add	%o4, CPU_INTRACCT, %o4
1116	ldx	[%o3 + %o4], %l1
1117	add	%l1, %l3, %l1
1118	! Another high-level interrupt is active below this one, so
1119	! there is no need to check for an interrupt thread. That will be
1120	! done by the lowest priority high-level interrupt active.
1121	ba,pt	%xcc, 5f
1122	stx	%l1, [%o3 + %o4]		! delay - store new counter
11231:
1124	! If we haven't interrupted another high-level interrupt, we may be
1125	! interrupting a low level interrupt thread. If so, compute its interval
1126	! and update its cumulative counter.
1127	lduh	[THREAD_REG + T_FLAGS], %o4
1128	andcc	%o4, T_INTR_THREAD, %g0
1129	bz,pt	%xcc, 4f
1130	nop
1131
1132	! We have interrupted an interrupt thread. Take timestamp, compute
1133	! interval, update cumulative counter.
1134
1135	! Check t_intr_start. If it is zero, either intr_thread() or
1136	! current_thread() (at a lower PIL, of course) already did
1137	! the accounting for the underlying interrupt thread.
1138	ldx	[THREAD_REG + T_INTR_START], %o5
1139	brz,pn	%o5, 4f
1140	nop
1141
1142	stx	%g0, [THREAD_REG + T_INTR_START]
1143	rdpr	%tick, %o4
1144	sllx	%o4, 1, %o4
1145	srlx	%o4, 1, %o4			! shake off NPT bit
1146	sub	%o4, %o5, %o5			! o5 has the interval
1147
1148	! Check for Energy Star mode
1149	lduh	[%o3 + CPU_DIVISOR], %o4	! %o4 = clock divisor
1150	cmp	%o4, 1
1151	bg,a,pn	%xcc, 2f
1152	mulx	%o5, %o4, %o5	! multiply interval by clock divisor iff > 1
11532:
1154	ldub	[THREAD_REG + T_PIL], %o4
1155	sllx	%o4, 4, %o4			! PIL index to byte offset
1156	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT const too large
1157	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1158	ldx	[%o3 + %o4], %l2		! old counter in l2
1159	add	%l2, %o5, %l2			! new counter in l2
1160	stx	%l2, [%o3 + %o4]		! store new counter
1161
1162	! Also update intracct[]
1163	lduh	[%o3 + CPU_MSTATE], %o4
1164	sllx	%o4, 3, %o4
1165	add	%o4, CPU_INTRACCT, %o4
1166	ldx	[%o3 + %o4], %l2
1167	add	%l2, %o5, %l2
1168	stx	%l2, [%o3 + %o4]
11694:
1170	!
1171	! Handle high-level interrupts on separate interrupt stack.
1172	! No other high-level interrupts are active, so switch to int stack.
1173	!
1174	mov	%sp, %l1
1175	ldn	[%o3 + CPU_INTR_STACK], %l3
1176	sub	%l3, STACK_BIAS, %sp
1177
11785:
1179#ifdef DEBUG
1180	!
1181	! ASSERT(%o2 > LOCK_LEVEL)
1182	!
1183	cmp	%o2, LOCK_LEVEL
1184	bg,pt	%xcc, 3f
1185	nop
1186	mov	CE_PANIC, %o0
1187	sethi	%hi(current_thread_wrong_pil), %o1
1188	call	cmn_err				! %o2 has the %pil already
1189	or	%o1, %lo(current_thread_wrong_pil), %o1
1190#endif
11913:
1192	! Store starting timestamp for this PIL in CPU structure at
1193	! cpu.cpu_m.pil_high_start[PIL - (LOCK_LEVEL + 1)]
1194        sub     %o2, LOCK_LEVEL + 1, %o4	! convert PIL to array index
1195	sllx    %o4, 3, %o4			! index to byte offset
1196	add	%o4, CPU_MCPU, %o4	! CPU_PIL_HIGH_START is too large
1197	add	%o4, MCPU_PIL_HIGH_START, %o4
1198        rdpr    %tick, %o5
1199	sllx	%o5, 1, %o5
1200	srlx	%o5, 1, %o5
1201        stx     %o5, [%o3 + %o4]
1202
1203	wrpr	%g0, %o2, %pil			! enable interrupts
1204
1205	!
1206	! call the handler
1207	!
1208	SERVE_INTR_PRE(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
12091:
1210	SERVE_INTR(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
1211
1212	brz,a,pt %o2, 0f			! if %o2, more intrs await
1213	rdpr	%pil, %o2			! delay annulled
1214	SERVE_INTR_NEXT(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0)
1215	ba	1b
1216	nop
12170:
1218	wrpr	%g0, PIL_MAX, %pil		! disable interrupts (1-15)
1219
1220	cmp	%o2, PIL_15
1221	bne,pt	%xcc, 3f
1222	nop
1223
1224	sethi	%hi(cpc_level15_inum), %o1
1225	ld	[%o1 + %lo(cpc_level15_inum)], %o1 ! arg for intr_enqueue_req
1226	brz	%o1, 3f
1227	nop
1228
1229	rdpr 	%pstate, %g5
1230	andn	%g5, PSTATE_IE, %g1
1231	wrpr	%g0, %g1, %pstate		! Disable vec interrupts
1232
1233	call	intr_enqueue_req		! preserves %g5
1234	mov	PIL_15, %o0
1235
1236	! clear perfcntr overflow
1237	mov	1, %o0
1238	sllx	%o0, PIL_15, %o0
1239	wr	%o0, CLEAR_SOFTINT
1240
1241	wrpr	%g0, %g5, %pstate		! Enable vec interrupts
1242
12433:
1244	cmp	%o2, PIL_14
1245	be	tick_rtt			!  cpu-specific tick processing
1246	nop
1247	.global	current_thread_complete
1248current_thread_complete:
1249	!
1250	! Register usage:
1251	!
1252	! %l1 = stack pointer
1253	! %l2 = CPU_INTR_ACTV >> (LOCK_LEVEL + 1)
1254	! %o2 = PIL
1255	! %o3 = CPU pointer
1256	! %o4, %o5, %l3, %l4, %l5 = scratch
1257	!
1258	ldn	[THREAD_REG + T_CPU], %o3
1259	!
1260	! Clear bit for this level in CPU's interrupt active bitmask.
1261	!
1262	ld	[%o3 + CPU_INTR_ACTV], %l2
1263	mov	1, %o5
1264	sll	%o5, %o2, %o5
1265#ifdef DEBUG
1266	!
1267	! ASSERT(CPU->cpu_intr_actv & (1 << PIL))
1268	!
1269	andcc	%l2, %o5, %g0
1270	bnz,pt	%xcc, 0f
1271	nop
1272	! Do not call panic if a panic is already in progress.
1273	sethi	%hi(panic_quiesce), %l2
1274	ld	[%l2 + %lo(panic_quiesce)], %l2
1275	brnz,pn	%l2, 0f
1276	nop
1277	sethi	%hi(current_thread_actv_bit_not_set), %o0
1278	call	panic
1279	or	%o0, %lo(current_thread_actv_bit_not_set), %o0
12800:
1281#endif /* DEBUG */
1282	andn	%l2, %o5, %l2
1283	st	%l2, [%o3 + CPU_INTR_ACTV]
1284
1285	! Take timestamp, compute interval, update cumulative counter.
1286        sub     %o2, LOCK_LEVEL + 1, %o4	! PIL to array index
1287	sllx    %o4, 3, %o4			! index to byte offset
1288	add	%o4, CPU_MCPU, %o4	! CPU_PIL_HIGH_START is too large
1289	add	%o4, MCPU_PIL_HIGH_START, %o4
1290        rdpr    %tick, %o5
1291	sllx	%o5, 1, %o5
1292	srlx	%o5, 1, %o5
1293	ldx     [%o3 + %o4], %o0
1294#ifdef DEBUG
1295	! ASSERT(cpu.cpu_m.pil_high_start[pil - (LOCK_LEVEL + 1)] != 0)
1296	brnz,pt	%o0, 9f
1297	nop
1298	! Don't panic if a panic is already in progress.
1299	sethi	%hi(panic_quiesce), %l2
1300	ld	[%l2 + %lo(panic_quiesce)], %l2
1301	brnz,pn	%l2, 9f
1302	nop
1303	sethi	%hi(current_thread_timestamp_zero), %o0
1304	call	panic
1305	or	%o0, %lo(current_thread_timestamp_zero), %o0
13069:
1307#endif /* DEBUG */
1308	stx	%g0, [%o3 + %o4]
1309	sub	%o5, %o0, %o5			! interval in o5
1310
1311	! Check for Energy Star mode
1312	lduh	[%o3 + CPU_DIVISOR], %o4	! %o4 = clock divisor
1313	cmp	%o4, 1
1314	bg,a,pn	%xcc, 2f
1315	mulx	%o5, %o4, %o5	! multiply interval by clock divisor iff > 1
13162:
1317	sllx	%o2, 4, %o4			! PIL index to byte offset
1318	add	%o4, CPU_MCPU, %o4		! CPU_INTRSTAT too large
1319	add	%o4, MCPU_INTRSTAT, %o4		! add parts separately
1320	ldx	[%o3 + %o4], %o0		! old counter in o0
1321	add	%o0, %o5, %o0			! new counter in o0
1322	stx	%o0, [%o3 + %o4]		! store new counter
1323
1324	! Also update intracct[]
1325	lduh	[%o3 + CPU_MSTATE], %o4
1326	sllx	%o4, 3, %o4
1327	add	%o4, CPU_INTRACCT, %o4
1328	ldx	[%o3 + %o4], %o0
1329	add	%o0, %o5, %o0
1330	stx	%o0, [%o3 + %o4]
1331
1332	!
1333	! get back on current thread's stack
1334	!
1335	srl	%l2, LOCK_LEVEL + 1, %l2
1336	tst	%l2				! any more high-level ints?
1337	movz	%xcc, %l1, %sp
1338	!
1339	! Current register usage:
1340	! o2 = PIL
1341	! o3 = CPU pointer
1342	! l0 = return address
1343	! l2 = intr_actv shifted right
1344	!
1345	bz,pt	%xcc, 3f			! if l2 was zero, no more ints
1346	nop
1347	!
1348	! We found another high-level interrupt active below the one that just
1349	! returned. Store a starting timestamp for it in the CPU structure.
1350	!
1351	! Use cpu_intr_actv to find the cpu_pil_high_start[] offset for the
1352	! interrupted high-level interrupt.
1353	! Create mask for cpu_intr_actv. Begin by looking for bits set
1354	! at one level below the current PIL. Since %l2 contains the active
1355	! mask already shifted right by (LOCK_LEVEL + 1), we start by looking
1356	! at bit (current_pil - (LOCK_LEVEL + 2)).
1357	! %l1 = mask, %o5 = index of bit set in mask
1358	!
1359	mov	1, %l1
1360	sub	%o2, LOCK_LEVEL + 2, %o5
1361	sll	%l1, %o5, %l1			! l1 = mask for level
13621:
1363#ifdef DEBUG
1364	! ASSERT(%l1 != 0) (we didn't shift the bit off the right edge)
1365	brnz,pt	%l1, 9f
1366	nop
1367	sethi	%hi(current_thread_nested_PIL_not_found), %o0
1368	call	panic
1369	or	%o0, %lo(current_thread_nested_PIL_not_found), %o0
13709:
1371#endif /* DEBUG */
1372	andcc	%l1, %l2, %g0		! test mask against high-level bits of
1373	bnz	%xcc, 2f		! cpu_intr_actv
1374	nop
1375	srl	%l1, 1, %l1		! No match. Try next lower PIL.
1376	ba,pt	%xcc, 1b
1377	sub	%o5, 1, %o5		! delay - decrement PIL
13782:
1379	sll	%o5, 3, %o5		! convert array index to byte offset
1380	add	%o5, CPU_MCPU, %o5	! CPU_PIL_HIGH_START is too large
1381	add	%o5, MCPU_PIL_HIGH_START, %o5
1382	rdpr	%tick, %o4
1383	sllx	%o4, 1, %o4
1384	srlx	%o4, 1, %o4
1385	! Another high-level interrupt is active below this one, so
1386	! there is no need to check for an interrupt thread. That will be
1387	! done by the lowest priority high-level interrupt active.
1388	ba,pt	%xcc, 1f
1389	stx	%o4, [%o3 + %o5]	! delay - store timestamp
13903:
1391	! If we haven't interrupted another high-level interrupt, we may have
1392	! interrupted a low level interrupt thread. If so, store a starting
1393	! timestamp in its thread structure.
1394	lduh	[THREAD_REG + T_FLAGS], %o4
1395	andcc	%o4, T_INTR_THREAD, %g0
1396	bz,pt	%xcc, 1f
1397	nop
1398
1399	rdpr	%tick, %o4
1400	sllx	%o4, 1, %o4
1401	srlx	%o4, 1, %o4			! Shake off NPT bit
1402	stx	%o4, [THREAD_REG + T_INTR_START]
14031:
1404	! Enable interrupts and return
1405	jmp	%l0 + 8
1406	wrpr	%g0, %o2, %pil			! enable interrupts
1407	SET_SIZE(current_thread)
1408
1409
1410#ifdef DEBUG
1411current_thread_wrong_pil:
1412	.asciz	"current_thread: unexpected pil level: %d"
1413current_thread_actv_bit_set:
1414	.asciz	"current_thread(): cpu_intr_actv bit already set for PIL"
1415current_thread_actv_bit_not_set:
1416	.asciz	"current_thread(): cpu_intr_actv bit not set for PIL"
1417current_thread_nested_pil_zero:
1418	.asciz	"current_thread(): timestamp zero for nested PIL %d"
1419current_thread_timestamp_zero:
1420	.asciz	"current_thread(): timestamp zero upon handler return"
1421current_thread_nested_PIL_not_found:
1422	.asciz	"current_thread: couldn't find nested high-level PIL"
1423#endif /* DEBUG */
1424#endif /* lint */
1425
1426/*
1427 * Return a thread's interrupt level.
1428 * Since this isn't saved anywhere but in %l4 on interrupt entry, we
1429 * must dig it out of the save area.
1430 *
1431 * Caller 'swears' that this really is an interrupt thread.
1432 *
1433 * int
1434 * intr_level(t)
1435 *	kthread_id_t	t;
1436 */
1437
1438#if defined(lint)
1439
1440/* ARGSUSED */
1441int
1442intr_level(kthread_id_t t)
1443{ return (0); }
1444
1445#else	/* lint */
1446
1447	ENTRY_NP(intr_level)
1448	retl
1449	ldub	[%o0 + T_PIL], %o0		! return saved pil
1450	SET_SIZE(intr_level)
1451
1452#endif	/* lint */
1453
1454#if defined(lint)
1455
1456/* ARGSUSED */
1457int
1458disable_pil_intr()
1459{ return (0); }
1460
1461#else	/* lint */
1462
1463	ENTRY_NP(disable_pil_intr)
1464	rdpr	%pil, %o0
1465	retl
1466	wrpr	%g0, PIL_MAX, %pil		! disable interrupts (1-15)
1467	SET_SIZE(disable_pil_intr)
1468
1469#endif	/* lint */
1470
1471#if defined(lint)
1472
1473/* ARGSUSED */
1474void
1475enable_pil_intr(int pil_save)
1476{}
1477
1478#else	/* lint */
1479
1480	ENTRY_NP(enable_pil_intr)
1481	retl
1482	wrpr	%o0, %pil
1483	SET_SIZE(enable_pil_intr)
1484
1485#endif	/* lint */
1486
1487#if defined(lint)
1488
1489/* ARGSUSED */
1490uint_t
1491disable_vec_intr(void)
1492{ return (0); }
1493
1494#else	/* lint */
1495
1496	ENTRY_NP(disable_vec_intr)
1497	rdpr	%pstate, %o0
1498	andn	%o0, PSTATE_IE, %g1
1499	retl
1500	wrpr	%g0, %g1, %pstate		! disable interrupt
1501	SET_SIZE(disable_vec_intr)
1502
1503#endif	/* lint */
1504
1505#if defined(lint)
1506
1507/* ARGSUSED */
1508void
1509enable_vec_intr(uint_t pstate_save)
1510{}
1511
1512#else	/* lint */
1513
1514	ENTRY_NP(enable_vec_intr)
1515	retl
1516	wrpr	%g0, %o0, %pstate
1517	SET_SIZE(enable_vec_intr)
1518
1519#endif	/* lint */
1520
1521#if defined(lint)
1522
1523void
1524cbe_level14(void)
1525{}
1526
1527#else   /* lint */
1528
1529	ENTRY_NP(cbe_level14)
1530	save    %sp, -SA(MINFRAME), %sp ! get a new window
1531	!
1532	! Make sure that this is from TICK_COMPARE; if not just return
1533	!
1534	rd	SOFTINT, %l1
1535	set	(TICK_INT_MASK | STICK_INT_MASK), %o2
1536	andcc	%l1, %o2, %g0
1537	bz,pn	%icc, 2f
1538	nop
1539
1540	CPU_ADDR(%o1, %o2)
1541	call	cyclic_fire
1542	mov	%o1, %o0
15432:
1544	ret
1545	restore	%g0, 1, %o0
1546	SET_SIZE(cbe_level14)
1547
1548#endif  /* lint */
1549
1550
1551#if defined(lint)
1552
1553/* ARGSUSED */
1554void
1555setsoftint(uint_t inum)
1556{}
1557
1558#else	/* lint */
1559
1560	ENTRY_NP(setsoftint)
1561	save	%sp, -SA(MINFRAME), %sp	! get a new window
1562	rdpr	%pstate, %l5
1563	andn	%l5, PSTATE_IE, %l1
1564	wrpr	%l1, %pstate		! disable interrupt
1565	!
1566	! Fetch data from intr_vector[] table according to the inum.
1567	!
1568	! We have an interrupt number.
1569	! Put the request on the cpu's softint list,
1570	! and set %set_softint.
1571	!
1572	! Register usage
1573	!	%i0 - inumber
1574	!	%l2 - requested pil
1575	!	%l3 - intr_req
1576	!	%l4 - *cpu
1577	!	%l1, %l6 - temps
1578	!
1579	! check if a softint is pending for this inum already
1580	! if one is pending, don't bother queuing another
1581	!
1582	set	intr_vector, %l1
1583	sll	%i0, INTR_VECTOR_SHIFT, %l6
1584	add	%l1, %l6, %l1			! %l1 = &intr_vector[inum]
1585	lduh	[%l1 + IV_PENDING], %l6
1586	brnz,pn	%l6, 4f				! branch, if pending
1587	or	%g0, 1, %l2
1588	sth	%l2, [%l1 + IV_PENDING]		! intr_vector[inum].pend = 1
1589	!
1590	! allocate an intr_req from the free list
1591	!
1592	CPU_ADDR(%l4, %l2)
1593	ldn	[%l4 + INTR_HEAD], %l3
1594	lduh	[%l1 + IV_PIL], %l2
1595	!
1596	! fixup free list
1597	!
1598	ldn	[%l3 + INTR_NEXT], %l6
1599	stn	%l6, [%l4 + INTR_HEAD]
1600	!
1601	! fill up intr_req
1602	!
1603	st	%i0, [%l3 + INTR_NUMBER]
1604	stn	%g0, [%l3 + INTR_NEXT]
1605	!
1606	! move intr_req to appropriate list
1607	!
1608	sll	%l2, CPTRSHIFT, %l0
1609	add	%l4, INTR_TAIL, %l6
1610	ldn	[%l6 + %l0], %l1	! current tail
1611	brz,pt	%l1, 2f			! branch if list empty
1612	stn	%l3, [%l6 + %l0]	! make intr_req new tail
1613	!
1614	! there's pending intr_req already
1615	!
1616	ba,pt	%xcc, 3f
1617	stn	%l3, [%l1 + INTR_NEXT]	! update old tail
16182:
1619	!
1620	! no pending intr_req; make intr_req new head
1621	!
1622	add	%l4, INTR_HEAD, %l6
1623	stn	%l3, [%l6 + %l0]
16243:
1625	!
1626	! Write %set_softint with (1<<pil) to cause a "pil" level trap
1627	!
1628	mov	1, %l1
1629	sll	%l1, %l2, %l1
1630	wr	%l1, SET_SOFTINT
16314:
1632	wrpr	%g0, %l5, %pstate
1633	ret
1634	restore
1635	SET_SIZE(setsoftint)
1636
1637#endif	/* lint */
1638
1639#if defined(lint)
1640
1641/*ARGSUSED*/
1642void
1643setsoftint_tl1(uint64_t inum, uint64_t dummy)
1644{}
1645
1646#else	/* lint */
1647
1648	!
1649	! Register usage
1650	!
1651	! Arguments:
1652	! %g1 - inumber
1653	!
1654	! Internal:
1655	! %g2 - requested pil
1656	! %g3 - intr_req
1657	! %g4 - cpu pointer
1658	! %g5,%g6,%g7 - temps
1659	!
1660	ENTRY_NP(setsoftint_tl1)
1661	!
1662	! Verify the inumber received (should be inum < MAXIVNUM).
1663	!
1664	set	MAXIVNUM, %g2
1665	cmp	%g1, %g2
1666	bgeu,pn	%xcc, .no_ivintr
1667	clr	%g2			! expected in .no_ivintr
1668	!
1669	! Fetch data from intr_vector[] table according to the inum.
1670	!
1671	! We have an interrupt number. Put the request on the cpu's softint
1672	! list, and set %set_softint.
1673	!
1674	set	intr_vector, %g5
1675	sll	%g1, INTR_VECTOR_SHIFT, %g6
1676	add	%g5, %g6, %g5			! %g5 = &intr_vector[inum]
1677
1678	!
1679	! allocate an intr_req from the free list
1680	!
1681	CPU_ADDR(%g4, %g2)
1682	ldn	[%g4 + INTR_HEAD], %g3
1683
1684	! load the pil so it can be used by .no_intr_pool/.no_ivintr
1685	lduh	[%g5 + IV_PIL], %g2
1686
1687	! Verify that the free list is not exhausted.
1688	brz,pn	%g3, .no_intr_pool
1689	nop
1690
1691	! Verify the intr_vector[] entry according to the inumber.
1692	! The iv_pil field should not be zero.  This used to be
1693	! guarded by DEBUG but broken drivers can cause spurious
1694	! tick interrupts when the softint register is programmed
1695	! with 1 << 0 at the end of this routine.  Now we always
1696	! check for an invalid pil.
1697	brz,pn	%g2, .no_ivintr
1698	nop
1699
1700	!
1701	! fixup free list
1702	!
1703	ldn	[%g3 + INTR_NEXT], %g6
1704	stn	%g6, [%g4 + INTR_HEAD]
1705
1706	!
1707	! fill in intr_req
1708	!
1709	st	%g1, [%g3 + INTR_NUMBER]
1710	stn	%g0, [%g3 + INTR_NEXT]
1711	!
1712	! move intr_req to appropriate list
1713	!
1714	sll	%g2, CPTRSHIFT, %g7
1715	add	%g4, INTR_TAIL, %g6
1716	ldn	[%g6 + %g7], %g5	! current tail
1717	brz,pt	%g5, 2f			! branch if list empty
1718	stn	%g3, [%g6 + %g7]	! make intr_req new tail
1719	!
1720	! there's pending intr_req already
1721	!
1722	ba,pt	%xcc, 3f
1723	stn	%g3, [%g5 + INTR_NEXT]	! update old tail
17242:
1725	!
1726	! no pending intr_req; make intr_req new head
1727	!
1728	add	%g4, INTR_HEAD, %g6
1729	stn	%g3, [%g6 + %g7]
17303:
1731#ifdef TRAPTRACE
1732	TRACE_PTR(%g1, %g6)
1733	GET_TRACE_TICK(%g6)
1734	stxa	%g6, [%g1 + TRAP_ENT_TICK]%asi
1735	TRACE_SAVE_TL_GL_REGS(%g1, %g6)
1736	rdpr	%tt, %g6
1737	stha	%g6, [%g1 + TRAP_ENT_TT]%asi
1738	rdpr	%tpc, %g6
1739	stna	%g6, [%g1 + TRAP_ENT_TPC]%asi
1740	rdpr	%tstate, %g6
1741	stxa	%g6, [%g1 + TRAP_ENT_TSTATE]%asi
1742	stna	%sp, [%g1 + TRAP_ENT_SP]%asi
1743	ld	[%g3 + INTR_NUMBER], %g6
1744	stna	%g6, [%g1 + TRAP_ENT_TR]%asi
1745	add	%g4, INTR_HEAD, %g6
1746	ldn	[%g6 + %g7], %g6		! intr_head[pil]
1747	stna	%g6, [%g1 + TRAP_ENT_F1]%asi
1748	add	%g4, INTR_TAIL, %g6
1749	ldn	[%g6 + %g7], %g6		! intr_tail[pil]
1750	stna	%g6, [%g1 + TRAP_ENT_F2]%asi
1751	stna	%g2, [%g1 + TRAP_ENT_F3]%asi	! pil
1752	stna	%g3, [%g1 + TRAP_ENT_F4]%asi	! intr_req
1753	TRACE_NEXT(%g1, %g6, %g5)
1754#endif /* TRAPTRACE */
1755	!
1756	! Write %set_softint with (1<<pil) to cause a "pil" level trap
1757	!
1758	mov	1, %g5
1759	sll	%g5, %g2, %g5
1760	wr	%g5, SET_SOFTINT
17614:
1762	retry
1763
1764.no_intr_pool:
1765	! no_intr_pool: rp, inum (%g1), pil (%g2)
1766	mov	%g2, %g3
1767	mov	%g1, %g2
1768	set	no_intr_pool, %g1
1769	ba,pt	%xcc, sys_trap
1770	mov	PIL_15, %g4
1771
1772.no_ivintr:
1773	! no_ivintr: arguments: rp, inum (%g1), pil (%g2 == 0)
1774	mov	%g2, %g3
1775	mov	%g1, %g2
1776	set	no_ivintr, %g1
1777	ba,pt	%xcc, sys_trap
1778	mov	PIL_15, %g4
1779	SET_SIZE(setsoftint_tl1)
1780
1781#endif	/* lint */
1782
1783#if defined(lint)
1784
1785/*ARGSUSED*/
1786void
1787wr_clr_softint(uint_t value)
1788{}
1789
1790#else
1791
1792	ENTRY_NP(wr_clr_softint)
1793	retl
1794	wr	%o0, CLEAR_SOFTINT
1795	SET_SIZE(wr_clr_softint)
1796
1797#endif /* lint */
1798
1799#if defined(lint)
1800
1801/*ARGSUSED*/
1802void
1803intr_enqueue_req(uint_t pil, uint32_t inum)
1804{}
1805
1806#else   /* lint */
1807
1808/*
1809 * intr_enqueue_req
1810 *
1811 * %o0 - pil
1812 * %o1 - inum
1813 * %o5 - preserved
1814 * %g5 - preserved
1815 */
1816	ENTRY_NP(intr_enqueue_req)
1817	! get intr_req free list
1818	CPU_ADDR(%g4, %g1)
1819	ldn	[%g4 + INTR_HEAD], %g3
1820
1821	! take intr_req from free list
1822	ldn	[%g3 + INTR_NEXT], %g6
1823	stn	%g6, [%g4 + INTR_HEAD]
1824
1825	! fill up intr_req
1826	st	%o1, [%g3 + INTR_NUMBER]
1827	stn	%g0, [%g3 + INTR_NEXT]
1828
1829	! add intr_req to proper pil list
1830	sll	%o0, CPTRSHIFT, %o0
1831	add	%g4, INTR_TAIL, %g6
1832	ldn	[%o0 + %g6], %g1	! current tail
1833	brz,pt	%g1, 2f			! branch if list is empty
1834	stn	%g3, [%g6 + %o0]	! make intr_req the new tail
1835
1836	! an intr_req was already queued so update old tail
1837	ba,pt	%xcc, 3f
1838	stn	%g3, [%g1 + INTR_NEXT]
18392:
1840	! no intr_req's queued so make intr_req the new head
1841	add	%g4, INTR_HEAD, %g6
1842	stn	%g3, [%g6 + %o0]
18433:
1844	retl
1845	nop
1846	SET_SIZE(intr_enqueue_req)
1847
1848#endif  /* lint */
1849
1850/*
1851 * Set CPU's base SPL level, based on which interrupt levels are active.
1852 * 	Called at spl7 or above.
1853 */
1854
1855#if defined(lint)
1856
1857void
1858set_base_spl(void)
1859{}
1860
1861#else	/* lint */
1862
1863	ENTRY_NP(set_base_spl)
1864	ldn	[THREAD_REG + T_CPU], %o2	! load CPU pointer
1865	ld	[%o2 + CPU_INTR_ACTV], %o5	! load active interrupts mask
1866
1867/*
1868 * WARNING: non-standard callinq sequence; do not call from C
1869 *	%o2 = pointer to CPU
1870 *	%o5 = updated CPU_INTR_ACTV
1871 */
1872_intr_set_spl:					! intr_thread_exit enters here
1873	!
1874	! Determine highest interrupt level active.  Several could be blocked
1875	! at higher levels than this one, so must convert flags to a PIL
1876	! Normally nothing will be blocked, so test this first.
1877	!
1878	brz,pt	%o5, 1f				! nothing active
1879	sra	%o5, 11, %o3			! delay - set %o3 to bits 15-11
1880	set	_intr_flag_table, %o1
1881	tst	%o3				! see if any of the bits set
1882	ldub	[%o1 + %o3], %o3		! load bit number
1883	bnz,a,pn %xcc, 1f			! yes, add 10 and we're done
1884	add	%o3, 11-1, %o3			! delay - add bit number - 1
1885
1886	sra	%o5, 6, %o3			! test bits 10-6
1887	tst	%o3
1888	ldub	[%o1 + %o3], %o3
1889	bnz,a,pn %xcc, 1f
1890	add	%o3, 6-1, %o3
1891
1892	sra	%o5, 1, %o3			! test bits 5-1
1893	ldub	[%o1 + %o3], %o3
1894
1895	!
1896	! highest interrupt level number active is in %l6
1897	!
18981:
1899	retl
1900	st	%o3, [%o2 + CPU_BASE_SPL]	! delay - store base priority
1901	SET_SIZE(set_base_spl)
1902
1903/*
1904 * Table that finds the most significant bit set in a five bit field.
1905 * Each entry is the high-order bit number + 1 of it's index in the table.
1906 * This read-only data is in the text segment.
1907 */
1908_intr_flag_table:
1909	.byte	0, 1, 2, 2,	3, 3, 3, 3,	4, 4, 4, 4,	4, 4, 4, 4
1910	.byte	5, 5, 5, 5,	5, 5, 5, 5,	5, 5, 5, 5,	5, 5, 5, 5
1911	.align	4
1912
1913#endif	/* lint */
1914
1915/*
1916 * int
1917 * intr_passivate(from, to)
1918 *	kthread_id_t	from;		interrupt thread
1919 *	kthread_id_t	to;		interrupted thread
1920 */
1921
1922#if defined(lint)
1923
1924/* ARGSUSED */
1925int
1926intr_passivate(kthread_id_t from, kthread_id_t to)
1927{ return (0); }
1928
1929#else	/* lint */
1930
1931	ENTRY_NP(intr_passivate)
1932	save	%sp, -SA(MINFRAME), %sp	! get a new window
1933
1934	flushw				! force register windows to stack
1935	!
1936	! restore registers from the base of the stack of the interrupt thread.
1937	!
1938	ldn	[%i0 + T_STACK], %i2	! get stack save area pointer
1939	ldn	[%i2 + (0*GREGSIZE)], %l0	! load locals
1940	ldn	[%i2 + (1*GREGSIZE)], %l1
1941	ldn	[%i2 + (2*GREGSIZE)], %l2
1942	ldn	[%i2 + (3*GREGSIZE)], %l3
1943	ldn	[%i2 + (4*GREGSIZE)], %l4
1944	ldn	[%i2 + (5*GREGSIZE)], %l5
1945	ldn	[%i2 + (6*GREGSIZE)], %l6
1946	ldn	[%i2 + (7*GREGSIZE)], %l7
1947	ldn	[%i2 + (8*GREGSIZE)], %o0	! put ins from stack in outs
1948	ldn	[%i2 + (9*GREGSIZE)], %o1
1949	ldn	[%i2 + (10*GREGSIZE)], %o2
1950	ldn	[%i2 + (11*GREGSIZE)], %o3
1951	ldn	[%i2 + (12*GREGSIZE)], %o4
1952	ldn	[%i2 + (13*GREGSIZE)], %o5
1953	ldn	[%i2 + (14*GREGSIZE)], %i4
1954					! copy stack/pointer without using %sp
1955	ldn	[%i2 + (15*GREGSIZE)], %i5
1956	!
1957	! put registers into the save area at the top of the interrupted
1958	! thread's stack, pointed to by %l7 in the save area just loaded.
1959	!
1960	ldn	[%i1 + T_SP], %i3	! get stack save area pointer
1961	stn	%l0, [%i3 + STACK_BIAS + (0*GREGSIZE)]	! save locals
1962	stn	%l1, [%i3 + STACK_BIAS + (1*GREGSIZE)]
1963	stn	%l2, [%i3 + STACK_BIAS + (2*GREGSIZE)]
1964	stn	%l3, [%i3 + STACK_BIAS + (3*GREGSIZE)]
1965	stn	%l4, [%i3 + STACK_BIAS + (4*GREGSIZE)]
1966	stn	%l5, [%i3 + STACK_BIAS + (5*GREGSIZE)]
1967	stn	%l6, [%i3 + STACK_BIAS + (6*GREGSIZE)]
1968	stn	%l7, [%i3 + STACK_BIAS + (7*GREGSIZE)]
1969	stn	%o0, [%i3 + STACK_BIAS + (8*GREGSIZE)]	! save ins using outs
1970	stn	%o1, [%i3 + STACK_BIAS + (9*GREGSIZE)]
1971	stn	%o2, [%i3 + STACK_BIAS + (10*GREGSIZE)]
1972	stn	%o3, [%i3 + STACK_BIAS + (11*GREGSIZE)]
1973	stn	%o4, [%i3 + STACK_BIAS + (12*GREGSIZE)]
1974	stn	%o5, [%i3 + STACK_BIAS + (13*GREGSIZE)]
1975	stn	%i4, [%i3 + STACK_BIAS + (14*GREGSIZE)]
1976						! fp, %i7 copied using %i4
1977	stn	%i5, [%i3 + STACK_BIAS + (15*GREGSIZE)]
1978	stn	%g0, [%i2 + ((8+6)*GREGSIZE)]
1979						! clear fp in save area
1980
1981	! load saved pil for return
1982	ldub	[%i0 + T_PIL], %i0
1983	ret
1984	restore
1985	SET_SIZE(intr_passivate)
1986
1987#endif	/* lint */
1988
1989#if defined(lint)
1990
1991/*
1992 * intr_get_time() is a resource for interrupt handlers to determine how
1993 * much time has been spent handling the current interrupt. Such a function
1994 * is needed because higher level interrupts can arrive during the
1995 * processing of an interrupt, thus making direct comparisons of %tick by
1996 * the handler inaccurate. intr_get_time() only returns time spent in the
1997 * current interrupt handler.
1998 *
1999 * The caller must be calling from an interrupt handler running at a pil
2000 * below or at lock level. Timings are not provided for high-level
2001 * interrupts.
2002 *
2003 * The first time intr_get_time() is called while handling an interrupt,
2004 * it returns the time since the interrupt handler was invoked. Subsequent
2005 * calls will return the time since the prior call to intr_get_time(). Time
2006 * is returned as ticks, adjusted for any clock divisor due to power
2007 * management. Use tick2ns() to convert ticks to nsec. Warning: ticks may
2008 * not be the same across CPUs.
2009 *
2010 * Theory Of Intrstat[][]:
2011 *
2012 * uint64_t intrstat[pil][0..1] is an array indexed by pil level, with two
2013 * uint64_ts per pil.
2014 *
2015 * intrstat[pil][0] is a cumulative count of the number of ticks spent
2016 * handling all interrupts at the specified pil on this CPU. It is
2017 * exported via kstats to the user.
2018 *
2019 * intrstat[pil][1] is always a count of ticks less than or equal to the
2020 * value in [0]. The difference between [1] and [0] is the value returned
2021 * by a call to intr_get_time(). At the start of interrupt processing,
2022 * [0] and [1] will be equal (or nearly so). As the interrupt consumes
2023 * time, [0] will increase, but [1] will remain the same. A call to
2024 * intr_get_time() will return the difference, then update [1] to be the
2025 * same as [0]. Future calls will return the time since the last call.
2026 * Finally, when the interrupt completes, [1] is updated to the same as [0].
2027 *
2028 * Implementation:
2029 *
2030 * intr_get_time() works much like a higher level interrupt arriving. It
2031 * "checkpoints" the timing information by incrementing intrstat[pil][0]
2032 * to include elapsed running time, and by setting t_intr_start to %tick.
2033 * It then sets the return value to intrstat[pil][0] - intrstat[pil][1],
2034 * and updates intrstat[pil][1] to be the same as the new value of
2035 * intrstat[pil][0].
2036 *
2037 * In the normal handling of interrupts, after an interrupt handler returns
2038 * and the code in intr_thread() updates intrstat[pil][0], it then sets
2039 * intrstat[pil][1] to the new value of intrstat[pil][0]. When [0] == [1],
2040 * the timings are reset, i.e. intr_get_time() will return [0] - [1] which
2041 * is 0.
2042 *
2043 * Whenever interrupts arrive on a CPU which is handling a lower pil
2044 * interrupt, they update the lower pil's [0] to show time spent in the
2045 * handler that they've interrupted. This results in a growing discrepancy
2046 * between [0] and [1], which is returned the next time intr_get_time() is
2047 * called. Time spent in the higher-pil interrupt will not be returned in
2048 * the next intr_get_time() call from the original interrupt, because
2049 * the higher-pil interrupt's time is accumulated in intrstat[higherpil][].
2050 */
2051
2052/*ARGSUSED*/
2053uint64_t
2054intr_get_time(void)
2055{ return 0; }
2056#else	/* lint */
2057
2058	ENTRY_NP(intr_get_time)
2059#ifdef DEBUG
2060	!
2061	! Lots of asserts, but just check panic_quiesce first.
2062	! Don't bother with lots of tests if we're just ignoring them.
2063	!
2064	sethi	%hi(panic_quiesce), %o0
2065	ld	[%o0 + %lo(panic_quiesce)], %o0
2066	brnz,pn	%o0, 2f
2067	nop
2068	!
2069	! ASSERT(%pil <= LOCK_LEVEL)
2070	!
2071	rdpr	%pil, %o1
2072	cmp	%o1, LOCK_LEVEL
2073	ble,pt	%xcc, 0f
2074	sethi	%hi(intr_get_time_high_pil), %o0	! delay
2075	call	panic
2076	or	%o0, %lo(intr_get_time_high_pil), %o0
20770:
2078	!
2079	! ASSERT((t_flags & T_INTR_THREAD) != 0 && t_pil > 0)
2080	!
2081	lduh	[THREAD_REG + T_FLAGS], %o2
2082	andcc	%o2, T_INTR_THREAD, %g0
2083	bz,pn	%xcc, 1f
2084	ldub	[THREAD_REG + T_PIL], %o1		! delay
2085	brnz,pt	%o1, 0f
20861:
2087	sethi	%hi(intr_get_time_not_intr), %o0
2088	call	panic
2089	or	%o0, %lo(intr_get_time_not_intr), %o0
20900:
2091	!
2092	! ASSERT(t_intr_start != 0)
2093	!
2094	ldx	[THREAD_REG + T_INTR_START], %o1
2095	brnz,pt	%o1, 2f
2096	sethi	%hi(intr_get_time_no_start_time), %o0	! delay
2097	call	panic
2098	or	%o0, %lo(intr_get_time_no_start_time), %o0
20992:
2100#endif /* DEBUG */
2101	!
2102	! %o0 = elapsed time and return value
2103	! %o1 = pil
2104	! %o2 = scratch
2105	! %o3 = scratch
2106	! %o4 = scratch
2107	! %o5 = cpu
2108	!
2109	wrpr	%g0, PIL_MAX, %pil	! make this easy -- block normal intrs
2110	ldn	[THREAD_REG + T_CPU], %o5
2111	ldub	[THREAD_REG + T_PIL], %o1
2112	ldx	[THREAD_REG + T_INTR_START], %o3 ! %o3 = t_intr_start
2113	!
2114	! Calculate elapsed time since t_intr_start. Update t_intr_start,
2115	! get delta, and multiply by cpu_divisor if necessary.
2116	!
2117	rdpr	%tick, %o2
2118	sllx	%o2, 1, %o2
2119	srlx	%o2, 1, %o2
2120	stx	%o2, [THREAD_REG + T_INTR_START]
2121	sub	%o2, %o3, %o0
2122
2123	lduh	[%o5 + CPU_DIVISOR], %o4
2124	cmp	%o4, 1
2125	bg,a,pn	%xcc, 1f
2126	mulx	%o0, %o4, %o0	! multiply interval by clock divisor iff > 1
21271:
2128	! Update intracct[]
2129	lduh	[%o5 + CPU_MSTATE], %o4
2130	sllx	%o4, 3, %o4
2131	add	%o4, CPU_INTRACCT, %o4
2132	ldx	[%o5 + %o4], %o2
2133	add	%o2, %o0, %o2
2134	stx	%o2, [%o5 + %o4]
2135
2136	!
2137	! Increment cpu_m.intrstat[pil][0]. Calculate elapsed time since
2138	! cpu_m.intrstat[pil][1], which is either when the interrupt was
2139	! first entered, or the last time intr_get_time() was invoked. Then
2140	! update cpu_m.intrstat[pil][1] to match [0].
2141	!
2142	sllx	%o1, 4, %o3
2143	add	%o3, CPU_MCPU, %o3
2144	add	%o3, MCPU_INTRSTAT, %o3
2145	add	%o3, %o5, %o3		! %o3 = cpu_m.intrstat[pil][0]
2146	ldx	[%o3], %o2
2147	add	%o2, %o0, %o2		! %o2 = new value for intrstat
2148	stx	%o2, [%o3]
2149	ldx	[%o3 + 8], %o4		! %o4 = cpu_m.intrstat[pil][1]
2150	sub	%o2, %o4, %o0		! %o0 is elapsed time since %o4
2151	stx	%o2, [%o3 + 8]		! make [1] match [0], resetting time
2152
2153	ld	[%o5 + CPU_BASE_SPL], %o2	! restore %pil to the greater
2154	cmp	%o2, %o1			! of either our pil %o1 or
2155	movl	%xcc, %o1, %o2			! cpu_base_spl.
2156	retl
2157	wrpr	%g0, %o2, %pil
2158	SET_SIZE(intr_get_time)
2159
2160#ifdef DEBUG
2161intr_get_time_high_pil:
2162	.asciz	"intr_get_time(): %pil > LOCK_LEVEL"
2163intr_get_time_not_intr:
2164	.asciz	"intr_get_time(): not called from an interrupt thread"
2165intr_get_time_no_start_time:
2166	.asciz	"intr_get_time(): t_intr_start == 0"
2167#endif /* DEBUG */
2168#endif  /* lint */
2169
2170
2171#if !defined(lint)
2172
2173/*
2174 * Check shift value used for computing array offsets
2175 */
2176#if INTR_VECTOR_SIZE != (1 << INTR_VECTOR_SHIFT)
2177#error "INTR_VECTOR_SIZE has changed"
2178#endif
2179
2180#endif  /* lint */
2181