xref: /titanic_53/usr/src/uts/i86pc/ml/interrupt.s (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1*7c478bd9Sstevel@tonic-gate/*
2*7c478bd9Sstevel@tonic-gate * CDDL HEADER START
3*7c478bd9Sstevel@tonic-gate *
4*7c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
5*7c478bd9Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only
6*7c478bd9Sstevel@tonic-gate * (the "License").  You may not use this file except in compliance
7*7c478bd9Sstevel@tonic-gate * with the License.
8*7c478bd9Sstevel@tonic-gate *
9*7c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*7c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
11*7c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
12*7c478bd9Sstevel@tonic-gate * and limitations under the License.
13*7c478bd9Sstevel@tonic-gate *
14*7c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
15*7c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*7c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
17*7c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
18*7c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
19*7c478bd9Sstevel@tonic-gate *
20*7c478bd9Sstevel@tonic-gate * CDDL HEADER END
21*7c478bd9Sstevel@tonic-gate */
22*7c478bd9Sstevel@tonic-gate/*
23*7c478bd9Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*7c478bd9Sstevel@tonic-gate * Use is subject to license terms.
25*7c478bd9Sstevel@tonic-gate */
26*7c478bd9Sstevel@tonic-gate
27*7c478bd9Sstevel@tonic-gate/*	Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.	*/
28*7c478bd9Sstevel@tonic-gate/*	Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T	*/
29*7c478bd9Sstevel@tonic-gate/*	  All Rights Reserved					*/
30*7c478bd9Sstevel@tonic-gate
31*7c478bd9Sstevel@tonic-gate/*	Copyright (c) 1987, 1988 Microsoft Corporation		*/
32*7c478bd9Sstevel@tonic-gate/*	  All Rights Reserved					*/
33*7c478bd9Sstevel@tonic-gate
34*7c478bd9Sstevel@tonic-gate#pragma ident	"%Z%%M%	%I%	%E% SMI"
35*7c478bd9Sstevel@tonic-gate
36*7c478bd9Sstevel@tonic-gate#include <sys/asm_linkage.h>
37*7c478bd9Sstevel@tonic-gate#include <sys/asm_misc.h>
38*7c478bd9Sstevel@tonic-gate#include <sys/regset.h>
39*7c478bd9Sstevel@tonic-gate#include <sys/psw.h>
40*7c478bd9Sstevel@tonic-gate#include <sys/x86_archext.h>
41*7c478bd9Sstevel@tonic-gate
42*7c478bd9Sstevel@tonic-gate#if defined(__lint)
43*7c478bd9Sstevel@tonic-gate
44*7c478bd9Sstevel@tonic-gate#include <sys/types.h>
45*7c478bd9Sstevel@tonic-gate#include <sys/thread.h>
46*7c478bd9Sstevel@tonic-gate#include <sys/systm.h>
47*7c478bd9Sstevel@tonic-gate
48*7c478bd9Sstevel@tonic-gate#else   /* __lint */
49*7c478bd9Sstevel@tonic-gate
50*7c478bd9Sstevel@tonic-gate#include <sys/segments.h>
51*7c478bd9Sstevel@tonic-gate#include <sys/pcb.h>
52*7c478bd9Sstevel@tonic-gate#include <sys/trap.h>
53*7c478bd9Sstevel@tonic-gate#include <sys/ftrace.h>
54*7c478bd9Sstevel@tonic-gate#include <sys/traptrace.h>
55*7c478bd9Sstevel@tonic-gate#include <sys/clock.h>
56*7c478bd9Sstevel@tonic-gate#include <sys/panic.h>
57*7c478bd9Sstevel@tonic-gate#include "assym.h"
58*7c478bd9Sstevel@tonic-gate
59*7c478bd9Sstevel@tonic-gate_ftrace_intr_thread_fmt:
60*7c478bd9Sstevel@tonic-gate	.string	"intr_thread(): regs=0x%lx, int=0x%x, pil=0x%x"
61*7c478bd9Sstevel@tonic-gate
62*7c478bd9Sstevel@tonic-gate#endif	/* lint */
63*7c478bd9Sstevel@tonic-gate
64*7c478bd9Sstevel@tonic-gate#if defined(__i386)
65*7c478bd9Sstevel@tonic-gate
66*7c478bd9Sstevel@tonic-gate#if defined(__lint)
67*7c478bd9Sstevel@tonic-gate
68*7c478bd9Sstevel@tonic-gatevoid
69*7c478bd9Sstevel@tonic-gatepatch_tsc(void)
70*7c478bd9Sstevel@tonic-gate{}
71*7c478bd9Sstevel@tonic-gate
72*7c478bd9Sstevel@tonic-gate#else	/* __lint */
73*7c478bd9Sstevel@tonic-gate
74*7c478bd9Sstevel@tonic-gate/*
75*7c478bd9Sstevel@tonic-gate * To cope with processors that do not implement the rdtsc instruction,
76*7c478bd9Sstevel@tonic-gate * we patch the kernel to use rdtsc if that feature is detected on the CPU.
77*7c478bd9Sstevel@tonic-gate * On an unpatched kernel, all locations requiring rdtsc are nop's.
78*7c478bd9Sstevel@tonic-gate *
79*7c478bd9Sstevel@tonic-gate * This function patches the nop's to rdtsc.
80*7c478bd9Sstevel@tonic-gate */
81*7c478bd9Sstevel@tonic-gate	ENTRY_NP(patch_tsc)
82*7c478bd9Sstevel@tonic-gate	movw	_rdtsc_insn, %cx
83*7c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch1
84*7c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch2
85*7c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch3
86*7c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch4
87*7c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch5
88*7c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch6
89*7c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch7
90*7c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch8
91*7c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch9
92*7c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch10
93*7c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch11
94*7c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch12
95*7c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch13
96*7c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch14
97*7c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch15
98*7c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch16
99*7c478bd9Sstevel@tonic-gate	ret
100*7c478bd9Sstevel@tonic-gate_rdtsc_insn:
101*7c478bd9Sstevel@tonic-gate	rdtsc
102*7c478bd9Sstevel@tonic-gate	SET_SIZE(patch_tsc)
103*7c478bd9Sstevel@tonic-gate
104*7c478bd9Sstevel@tonic-gate#endif	/* __lint */
105*7c478bd9Sstevel@tonic-gate
106*7c478bd9Sstevel@tonic-gate#endif	/* __i386 */
107*7c478bd9Sstevel@tonic-gate
108*7c478bd9Sstevel@tonic-gate
109*7c478bd9Sstevel@tonic-gate#if defined(__lint)
110*7c478bd9Sstevel@tonic-gate
111*7c478bd9Sstevel@tonic-gatevoid
112*7c478bd9Sstevel@tonic-gate_interrupt(void)
113*7c478bd9Sstevel@tonic-gate{}
114*7c478bd9Sstevel@tonic-gate
115*7c478bd9Sstevel@tonic-gate#else	/* __lint */
116*7c478bd9Sstevel@tonic-gate
117*7c478bd9Sstevel@tonic-gate#if defined(__amd64)
118*7c478bd9Sstevel@tonic-gate
119*7c478bd9Sstevel@tonic-gate	/*
120*7c478bd9Sstevel@tonic-gate	 * Common register usage:
121*7c478bd9Sstevel@tonic-gate	 *
122*7c478bd9Sstevel@tonic-gate	 * %rbx		cpu pointer
123*7c478bd9Sstevel@tonic-gate	 * %r12		trap trace pointer -and- stash of
124*7c478bd9Sstevel@tonic-gate	 *		vec across intr_thread dispatch.
125*7c478bd9Sstevel@tonic-gate	 * %r13d	ipl of isr
126*7c478bd9Sstevel@tonic-gate	 * %r14d	old ipl (ipl level we entered on)
127*7c478bd9Sstevel@tonic-gate	 * %r15		interrupted thread stack pointer
128*7c478bd9Sstevel@tonic-gate	 */
129*7c478bd9Sstevel@tonic-gate	ENTRY_NP2(cmnint, _interrupt)
130*7c478bd9Sstevel@tonic-gate
131*7c478bd9Sstevel@tonic-gate	INTR_PUSH
132*7c478bd9Sstevel@tonic-gate
133*7c478bd9Sstevel@tonic-gate	/*
134*7c478bd9Sstevel@tonic-gate	 * At the end of TRACE_PTR %r12 points to the current TRAPTRACE entry
135*7c478bd9Sstevel@tonic-gate	 */
136*7c478bd9Sstevel@tonic-gate	TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_INTERRUPT)
137*7c478bd9Sstevel@tonic-gate						/* Uses labels 8 and 9 */
138*7c478bd9Sstevel@tonic-gate	TRACE_REGS(%r12, %rsp, %rax, %rbx)	/* Uses label 9 */
139*7c478bd9Sstevel@tonic-gate	TRACE_STAMP(%r12)		/* Clobbers %eax, %edx, uses 9 */
140*7c478bd9Sstevel@tonic-gate
141*7c478bd9Sstevel@tonic-gate	DISABLE_INTR_FLAGS		/* (and set kernel flag values) */
142*7c478bd9Sstevel@tonic-gate
143*7c478bd9Sstevel@tonic-gate	movq	%rsp, %rbp
144*7c478bd9Sstevel@tonic-gate
145*7c478bd9Sstevel@tonic-gate	TRACE_STACK(%r12)
146*7c478bd9Sstevel@tonic-gate
147*7c478bd9Sstevel@tonic-gate	LOADCPU(%rbx)				/* &cpu */
148*7c478bd9Sstevel@tonic-gate	leaq	REGOFF_TRAPNO(%rbp), %rsi	/* &vector */
149*7c478bd9Sstevel@tonic-gate	movl	CPU_PRI(%rbx), %r14d		/* old ipl */
150*7c478bd9Sstevel@tonic-gate	movl	CPU_SOFTINFO(%rbx), %edx
151*7c478bd9Sstevel@tonic-gate
152*7c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE
153*7c478bd9Sstevel@tonic-gate	movl	$255, TTR_IPL(%r12)
154*7c478bd9Sstevel@tonic-gate	movl	%r14d, %edi
155*7c478bd9Sstevel@tonic-gate	movb	%dil, TTR_PRI(%r12)
156*7c478bd9Sstevel@tonic-gate	movl	CPU_BASE_SPL(%rbx), %edi
157*7c478bd9Sstevel@tonic-gate	movb	%dil, TTR_SPL(%r12)
158*7c478bd9Sstevel@tonic-gate	movb	$255, TTR_VECTOR(%r12)
159*7c478bd9Sstevel@tonic-gate#endif
160*7c478bd9Sstevel@tonic-gate
161*7c478bd9Sstevel@tonic-gate	/*
162*7c478bd9Sstevel@tonic-gate	 * Check to see if the trap number is T_SOFTINT; if it is,
163*7c478bd9Sstevel@tonic-gate	 * jump straight to dosoftint now.
164*7c478bd9Sstevel@tonic-gate	 */
165*7c478bd9Sstevel@tonic-gate	cmpq	$T_SOFTINT, (%rsi)
166*7c478bd9Sstevel@tonic-gate	je	dosoftint
167*7c478bd9Sstevel@tonic-gate
168*7c478bd9Sstevel@tonic-gate	/*
169*7c478bd9Sstevel@tonic-gate	 * Raise the interrupt priority level, returns newpil.
170*7c478bd9Sstevel@tonic-gate	 * (The vector address is in %rsi so setlvl can update it.)
171*7c478bd9Sstevel@tonic-gate	 */
172*7c478bd9Sstevel@tonic-gate	movl	%r14d, %edi			/* old ipl */
173*7c478bd9Sstevel@tonic-gate						/* &vector */
174*7c478bd9Sstevel@tonic-gate	call	*setlvl(%rip)
175*7c478bd9Sstevel@tonic-gate
176*7c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE
177*7c478bd9Sstevel@tonic-gate	movb	%al, TTR_IPL(%r12)
178*7c478bd9Sstevel@tonic-gate#endif
179*7c478bd9Sstevel@tonic-gate	/*
180*7c478bd9Sstevel@tonic-gate	 * check for spurious interrupt
181*7c478bd9Sstevel@tonic-gate	 */
182*7c478bd9Sstevel@tonic-gate	cmpl	$-1, %eax
183*7c478bd9Sstevel@tonic-gate	je	_sys_rtt
184*7c478bd9Sstevel@tonic-gate
185*7c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE
186*7c478bd9Sstevel@tonic-gate	movl	%r14d, %edx
187*7c478bd9Sstevel@tonic-gate	movb	%dl, TTR_PRI(%r12)
188*7c478bd9Sstevel@tonic-gate	movl	CPU_BASE_SPL(%rbx), %edx
189*7c478bd9Sstevel@tonic-gate	movb	%dl, TTR_SPL(%r12)
190*7c478bd9Sstevel@tonic-gate#endif
191*7c478bd9Sstevel@tonic-gate	movl	%eax, CPU_PRI(%rbx)		/* update ipl */
192*7c478bd9Sstevel@tonic-gate
193*7c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE
194*7c478bd9Sstevel@tonic-gate	movl	REGOFF_TRAPNO(%rbp), %edx
195*7c478bd9Sstevel@tonic-gate	movb	%dl, TTR_VECTOR(%r12)
196*7c478bd9Sstevel@tonic-gate#endif
197*7c478bd9Sstevel@tonic-gate	movl	%eax, %r13d			/* ipl of isr */
198*7c478bd9Sstevel@tonic-gate
199*7c478bd9Sstevel@tonic-gate	/*
200*7c478bd9Sstevel@tonic-gate	 * At this point we can take one of two paths.
201*7c478bd9Sstevel@tonic-gate	 * If the new level is at or below lock level, we will
202*7c478bd9Sstevel@tonic-gate	 * run this interrupt in a separate thread.
203*7c478bd9Sstevel@tonic-gate	 */
204*7c478bd9Sstevel@tonic-gate	cmpl	$LOCK_LEVEL, %eax
205*7c478bd9Sstevel@tonic-gate	jbe	intr_thread
206*7c478bd9Sstevel@tonic-gate
207*7c478bd9Sstevel@tonic-gate	movq	%rbx, %rdi		/* &cpu */
208*7c478bd9Sstevel@tonic-gate	movl	%r13d, %esi		/* ipl */
209*7c478bd9Sstevel@tonic-gate	movl	%r14d, %edx		/* old ipl */
210*7c478bd9Sstevel@tonic-gate	movq	%rbp, %rcx		/* &regs */
211*7c478bd9Sstevel@tonic-gate	call	hilevel_intr_prolog
212*7c478bd9Sstevel@tonic-gate	orl	%eax, %eax		/* zero if need to switch stack */
213*7c478bd9Sstevel@tonic-gate	jnz	1f
214*7c478bd9Sstevel@tonic-gate
215*7c478bd9Sstevel@tonic-gate	/*
216*7c478bd9Sstevel@tonic-gate	 * Save the thread stack and get on the cpu's interrupt stack
217*7c478bd9Sstevel@tonic-gate	 */
218*7c478bd9Sstevel@tonic-gate	movq	%rsp, %r15
219*7c478bd9Sstevel@tonic-gate	movq	CPU_INTR_STACK(%rbx), %rsp
220*7c478bd9Sstevel@tonic-gate1:
221*7c478bd9Sstevel@tonic-gate
222*7c478bd9Sstevel@tonic-gate	sti
223*7c478bd9Sstevel@tonic-gate
224*7c478bd9Sstevel@tonic-gate	/*
225*7c478bd9Sstevel@tonic-gate	 * Walk the list of handlers for this vector, calling
226*7c478bd9Sstevel@tonic-gate	 * them as we go until no more interrupts are claimed.
227*7c478bd9Sstevel@tonic-gate	 */
228*7c478bd9Sstevel@tonic-gate	movl	REGOFF_TRAPNO(%rbp), %edi
229*7c478bd9Sstevel@tonic-gate	call	av_dispatch_autovect
230*7c478bd9Sstevel@tonic-gate
231*7c478bd9Sstevel@tonic-gate	cli
232*7c478bd9Sstevel@tonic-gate
233*7c478bd9Sstevel@tonic-gate	movq	%rbx, %rdi			/* &cpu */
234*7c478bd9Sstevel@tonic-gate	movl	%r13d, %esi			/* ipl */
235*7c478bd9Sstevel@tonic-gate	movl	%r14d, %edx			/* oldipl */
236*7c478bd9Sstevel@tonic-gate	movl	REGOFF_TRAPNO(%rbp), %ecx	/* vec */
237*7c478bd9Sstevel@tonic-gate	call	hilevel_intr_epilog
238*7c478bd9Sstevel@tonic-gate	orl	%eax, %eax		/* zero if need to switch stack */
239*7c478bd9Sstevel@tonic-gate	jnz	2f
240*7c478bd9Sstevel@tonic-gate	movq	%r15, %rsp
241*7c478bd9Sstevel@tonic-gate2:	/*
242*7c478bd9Sstevel@tonic-gate	 * Check for, and execute, softints before we iret.
243*7c478bd9Sstevel@tonic-gate	 *
244*7c478bd9Sstevel@tonic-gate	 * (dosoftint expects oldipl in %r14d (which is where it is)
245*7c478bd9Sstevel@tonic-gate	 * the cpu pointer in %rbx (which is where it is) and the
246*7c478bd9Sstevel@tonic-gate	 * softinfo in %edx (which is where we'll put it right now))
247*7c478bd9Sstevel@tonic-gate	 */
248*7c478bd9Sstevel@tonic-gate	movl	CPU_SOFTINFO(%rbx), %edx
249*7c478bd9Sstevel@tonic-gate	orl	%edx, %edx
250*7c478bd9Sstevel@tonic-gate	jz	_sys_rtt
251*7c478bd9Sstevel@tonic-gate	jmp	dosoftint
252*7c478bd9Sstevel@tonic-gate	/*NOTREACHED*/
253*7c478bd9Sstevel@tonic-gate
254*7c478bd9Sstevel@tonic-gate	SET_SIZE(cmnint)
255*7c478bd9Sstevel@tonic-gate	SET_SIZE(_interrupt)
256*7c478bd9Sstevel@tonic-gate
257*7c478bd9Sstevel@tonic-gate/*
258*7c478bd9Sstevel@tonic-gate * Handle an interrupt in a new thread
259*7c478bd9Sstevel@tonic-gate *
260*7c478bd9Sstevel@tonic-gate * As we branch here, interrupts are still masked,
261*7c478bd9Sstevel@tonic-gate * %rbx still contains the cpu pointer,
262*7c478bd9Sstevel@tonic-gate * %r14d contains the old ipl that we came in on, and
263*7c478bd9Sstevel@tonic-gate * %eax contains the new ipl that we got from the setlvl routine
264*7c478bd9Sstevel@tonic-gate */
265*7c478bd9Sstevel@tonic-gate
266*7c478bd9Sstevel@tonic-gate	ENTRY_NP(intr_thread)
267*7c478bd9Sstevel@tonic-gate
268*7c478bd9Sstevel@tonic-gate	movq	%rbx, %rdi	/* &cpu */
269*7c478bd9Sstevel@tonic-gate	movq	%rbp, %rsi	/* &regs = stack pointer for _sys_rtt */
270*7c478bd9Sstevel@tonic-gate	movl	REGOFF_TRAPNO(%rbp), %r12d	/* stash the vec */
271*7c478bd9Sstevel@tonic-gate	movl	%eax, %edx	/* new pil from setlvlx() */
272*7c478bd9Sstevel@tonic-gate	call	intr_thread_prolog
273*7c478bd9Sstevel@tonic-gate	movq	%rsp, %r15
274*7c478bd9Sstevel@tonic-gate	movq	%rax, %rsp	/* t_stk from interrupt thread */
275*7c478bd9Sstevel@tonic-gate	movq	%rsp, %rbp
276*7c478bd9Sstevel@tonic-gate
277*7c478bd9Sstevel@tonic-gate	sti
278*7c478bd9Sstevel@tonic-gate
279*7c478bd9Sstevel@tonic-gate	testl	$FTRACE_ENABLED, CPU_FTRACE_STATE(%rbx)
280*7c478bd9Sstevel@tonic-gate	jz	1f
281*7c478bd9Sstevel@tonic-gate	/*
282*7c478bd9Sstevel@tonic-gate	 * ftracing support. do we need this on x86?
283*7c478bd9Sstevel@tonic-gate	 */
284*7c478bd9Sstevel@tonic-gate	leaq	_ftrace_intr_thread_fmt(%rip), %rdi
285*7c478bd9Sstevel@tonic-gate	movq	%rbp, %rsi			/* &regs */
286*7c478bd9Sstevel@tonic-gate	movl	%r12d, %edx			/* vec */
287*7c478bd9Sstevel@tonic-gate	movq	CPU_THREAD(%rbx), %r11		/* (the interrupt thread) */
288*7c478bd9Sstevel@tonic-gate	movzbl	T_PIL(%r11), %ecx		/* newipl */
289*7c478bd9Sstevel@tonic-gate	call	ftrace_3_notick
290*7c478bd9Sstevel@tonic-gate1:
291*7c478bd9Sstevel@tonic-gate	movl	%r12d, %edi			/* vec */
292*7c478bd9Sstevel@tonic-gate	call	av_dispatch_autovect
293*7c478bd9Sstevel@tonic-gate
294*7c478bd9Sstevel@tonic-gate	cli
295*7c478bd9Sstevel@tonic-gate
296*7c478bd9Sstevel@tonic-gate	movq	%rbx, %rdi			/* &cpu */
297*7c478bd9Sstevel@tonic-gate	movl	%r12d, %esi			/* vec */
298*7c478bd9Sstevel@tonic-gate	movl	%r14d, %edx			/* oldpil */
299*7c478bd9Sstevel@tonic-gate	call	intr_thread_epilog
300*7c478bd9Sstevel@tonic-gate	/*
301*7c478bd9Sstevel@tonic-gate	 * If we return from here (we might not if the interrupted thread
302*7c478bd9Sstevel@tonic-gate	 * has exited or blocked, in which case we'll have quietly swtch()ed
303*7c478bd9Sstevel@tonic-gate	 * away) then we need to switch back to our old %rsp
304*7c478bd9Sstevel@tonic-gate	 */
305*7c478bd9Sstevel@tonic-gate	movq	%r15, %rsp
306*7c478bd9Sstevel@tonic-gate	movq	%rsp, %rbp
307*7c478bd9Sstevel@tonic-gate	/*
308*7c478bd9Sstevel@tonic-gate	 * Check for, and execute, softints before we iret.
309*7c478bd9Sstevel@tonic-gate	 *
310*7c478bd9Sstevel@tonic-gate	 * (dosoftint expects oldpil in %r14d, the cpu pointer in %rbx and
311*7c478bd9Sstevel@tonic-gate	 * the mcpu_softinfo.st_pending field in %edx.
312*7c478bd9Sstevel@tonic-gate	 */
313*7c478bd9Sstevel@tonic-gate	movl	CPU_SOFTINFO(%rbx), %edx
314*7c478bd9Sstevel@tonic-gate	orl	%edx, %edx
315*7c478bd9Sstevel@tonic-gate	jz	_sys_rtt
316*7c478bd9Sstevel@tonic-gate	/*FALLTHROUGH*/
317*7c478bd9Sstevel@tonic-gate
318*7c478bd9Sstevel@tonic-gate/*
319*7c478bd9Sstevel@tonic-gate * Process soft interrupts.
320*7c478bd9Sstevel@tonic-gate * Interrupts are masked, and we have a minimal frame on the stack.
321*7c478bd9Sstevel@tonic-gate * %edx should contain the mcpu_softinfo.st_pending field
322*7c478bd9Sstevel@tonic-gate */
323*7c478bd9Sstevel@tonic-gate
324*7c478bd9Sstevel@tonic-gate	ALTENTRY(dosoftint)
325*7c478bd9Sstevel@tonic-gate
326*7c478bd9Sstevel@tonic-gate	movq	%rbx, %rdi	/* &cpu */
327*7c478bd9Sstevel@tonic-gate	movq	%rbp, %rsi	/* &regs = stack pointer for _sys_rtt */
328*7c478bd9Sstevel@tonic-gate				/* cpu->cpu_m.mcpu_softinfo.st_pending */
329*7c478bd9Sstevel@tonic-gate	movl	%r14d, %ecx	/* oldipl */
330*7c478bd9Sstevel@tonic-gate	call	dosoftint_prolog
331*7c478bd9Sstevel@tonic-gate	/*
332*7c478bd9Sstevel@tonic-gate	 * dosoftint_prolog() usually returns a stack pointer for the
333*7c478bd9Sstevel@tonic-gate	 * interrupt thread that we must switch to.  However, if the
334*7c478bd9Sstevel@tonic-gate	 * returned stack pointer is NULL, then the software interrupt was
335*7c478bd9Sstevel@tonic-gate	 * too low in priority to run now; we'll catch it another time.
336*7c478bd9Sstevel@tonic-gate	 */
337*7c478bd9Sstevel@tonic-gate	orq	%rax, %rax
338*7c478bd9Sstevel@tonic-gate	jz	_sys_rtt
339*7c478bd9Sstevel@tonic-gate	movq	%rsp, %r15
340*7c478bd9Sstevel@tonic-gate	movq	%rax, %rsp	/* t_stk from interrupt thread */
341*7c478bd9Sstevel@tonic-gate	movq	%rsp, %rbp
342*7c478bd9Sstevel@tonic-gate
343*7c478bd9Sstevel@tonic-gate	sti
344*7c478bd9Sstevel@tonic-gate
345*7c478bd9Sstevel@tonic-gate	/*
346*7c478bd9Sstevel@tonic-gate	 * Enabling interrupts (above) could raise the current ipl
347*7c478bd9Sstevel@tonic-gate	 * and base spl.  But, we continue processing the current soft
348*7c478bd9Sstevel@tonic-gate	 * interrupt and we will check the base spl next time around
349*7c478bd9Sstevel@tonic-gate	 * so that blocked interrupt threads get a chance to run.
350*7c478bd9Sstevel@tonic-gate	 */
351*7c478bd9Sstevel@tonic-gate	movq	CPU_THREAD(%rbx), %r11	/* now an interrupt thread */
352*7c478bd9Sstevel@tonic-gate	movzbl	T_PIL(%r11), %edi
353*7c478bd9Sstevel@tonic-gate	call	av_dispatch_softvect
354*7c478bd9Sstevel@tonic-gate
355*7c478bd9Sstevel@tonic-gate	cli
356*7c478bd9Sstevel@tonic-gate
357*7c478bd9Sstevel@tonic-gate	movq	%rbx, %rdi		/* &cpu */
358*7c478bd9Sstevel@tonic-gate	movl	%r14d, %esi		/* oldpil */
359*7c478bd9Sstevel@tonic-gate	call	dosoftint_epilog
360*7c478bd9Sstevel@tonic-gate	movq	%r15, %rsp		/* back on old stack pointer */
361*7c478bd9Sstevel@tonic-gate	movq	%rsp, %rbp
362*7c478bd9Sstevel@tonic-gate	movl	CPU_SOFTINFO(%rbx), %edx
363*7c478bd9Sstevel@tonic-gate	orl	%edx, %edx
364*7c478bd9Sstevel@tonic-gate	jz	_sys_rtt
365*7c478bd9Sstevel@tonic-gate	jmp	dosoftint
366*7c478bd9Sstevel@tonic-gate
367*7c478bd9Sstevel@tonic-gate	SET_SIZE(dosoftint)
368*7c478bd9Sstevel@tonic-gate	SET_SIZE(intr_thread)
369*7c478bd9Sstevel@tonic-gate
370*7c478bd9Sstevel@tonic-gate#elif defined(__i386)
371*7c478bd9Sstevel@tonic-gate
372*7c478bd9Sstevel@tonic-gate/*
373*7c478bd9Sstevel@tonic-gate * One day, this should just invoke the C routines that know how to
374*7c478bd9Sstevel@tonic-gate * do all the interrupt bookkeeping.  In the meantime, try
375*7c478bd9Sstevel@tonic-gate * and make the assembler a little more comprehensible.
376*7c478bd9Sstevel@tonic-gate */
377*7c478bd9Sstevel@tonic-gate
378*7c478bd9Sstevel@tonic-gate#define	INC64(basereg, offset)			\
379*7c478bd9Sstevel@tonic-gate	addl	$1, offset(basereg);		\
380*7c478bd9Sstevel@tonic-gate	adcl	$0, offset + 4(basereg)
381*7c478bd9Sstevel@tonic-gate
382*7c478bd9Sstevel@tonic-gate#define	TSC_CLR(basereg, offset)		\
383*7c478bd9Sstevel@tonic-gate	movl	$0, offset(basereg);		\
384*7c478bd9Sstevel@tonic-gate	movl	$0, offset + 4(basereg)
385*7c478bd9Sstevel@tonic-gate
386*7c478bd9Sstevel@tonic-gate/*
387*7c478bd9Sstevel@tonic-gate * The following macros assume the time value is in %edx:%eax
388*7c478bd9Sstevel@tonic-gate * e.g. from a rdtsc instruction.
389*7c478bd9Sstevel@tonic-gate */
390*7c478bd9Sstevel@tonic-gate#define	TSC_MOV(reg, offset)		\
391*7c478bd9Sstevel@tonic-gate	movl	%eax, offset(reg);	\
392*7c478bd9Sstevel@tonic-gate	movl	%edx, offset + 4(reg)
393*7c478bd9Sstevel@tonic-gate
394*7c478bd9Sstevel@tonic-gate#define	TSC_ADD_TO(reg, offset)		\
395*7c478bd9Sstevel@tonic-gate	addl	%eax, offset(reg);	\
396*7c478bd9Sstevel@tonic-gate	adcl	%edx, offset + 4(reg)
397*7c478bd9Sstevel@tonic-gate
398*7c478bd9Sstevel@tonic-gate#define	TSC_SUB_FROM(reg, offset)	\
399*7c478bd9Sstevel@tonic-gate	subl	offset(reg), %eax;	\
400*7c478bd9Sstevel@tonic-gate	sbbl	offset + 4(reg), %edx	/* interval in edx:eax */
401*7c478bd9Sstevel@tonic-gate
402*7c478bd9Sstevel@tonic-gate/*
403*7c478bd9Sstevel@tonic-gate * basereg   - pointer to cpu struct
404*7c478bd9Sstevel@tonic-gate * pilreg    - pil or converted pil (pil - (LOCK_LEVEL + 1))
405*7c478bd9Sstevel@tonic-gate * pilreg_32 - 32-bit version of pilreg
406*7c478bd9Sstevel@tonic-gate *
407*7c478bd9Sstevel@tonic-gate * Returns (base + pil * 8) in pilreg
408*7c478bd9Sstevel@tonic-gate */
409*7c478bd9Sstevel@tonic-gate#define	PILBASE(basereg, pilreg)	\
410*7c478bd9Sstevel@tonic-gate	lea	(basereg, pilreg, 8), pilreg
411*7c478bd9Sstevel@tonic-gate
412*7c478bd9Sstevel@tonic-gate/*
413*7c478bd9Sstevel@tonic-gate * Returns (base + (pil - (LOCK_LEVEL + 1)) * 8) in pilreg
414*7c478bd9Sstevel@tonic-gate */
415*7c478bd9Sstevel@tonic-gate#define	HIGHPILBASE(basereg, pilreg, pilreg_32)		\
416*7c478bd9Sstevel@tonic-gate	subl	$LOCK_LEVEL + 1, pilreg_32;		\
417*7c478bd9Sstevel@tonic-gate	PILBASE(basereg, pilreg)
418*7c478bd9Sstevel@tonic-gate
419*7c478bd9Sstevel@tonic-gate/*
420*7c478bd9Sstevel@tonic-gate * cpu_stats.sys.intr[PIL]++
421*7c478bd9Sstevel@tonic-gate */
422*7c478bd9Sstevel@tonic-gate#define	INC_CPU_STATS_INTR(pilreg, tmpreg, tmpreg_32, basereg)	\
423*7c478bd9Sstevel@tonic-gate	movl	pilreg, tmpreg_32;				\
424*7c478bd9Sstevel@tonic-gate	PILBASE(basereg, tmpreg);				\
425*7c478bd9Sstevel@tonic-gate	INC64(tmpreg, _CONST(CPU_STATS_SYS_INTR - 8))
426*7c478bd9Sstevel@tonic-gate
427*7c478bd9Sstevel@tonic-gate/*
428*7c478bd9Sstevel@tonic-gate * Unlink thread from CPU's list
429*7c478bd9Sstevel@tonic-gate */
430*7c478bd9Sstevel@tonic-gate#define	UNLINK_INTR_THREAD(cpureg, ithread, tmpreg)	\
431*7c478bd9Sstevel@tonic-gate	mov	CPU_INTR_THREAD(cpureg), ithread;	\
432*7c478bd9Sstevel@tonic-gate	mov	T_LINK(ithread), tmpreg;		\
433*7c478bd9Sstevel@tonic-gate	mov	tmpreg, CPU_INTR_THREAD(cpureg)
434*7c478bd9Sstevel@tonic-gate
435*7c478bd9Sstevel@tonic-gate/*
436*7c478bd9Sstevel@tonic-gate * Link a thread into CPU's list
437*7c478bd9Sstevel@tonic-gate */
438*7c478bd9Sstevel@tonic-gate#define	LINK_INTR_THREAD(cpureg, ithread, tmpreg)	\
439*7c478bd9Sstevel@tonic-gate	mov	CPU_INTR_THREAD(cpureg), tmpreg;	\
440*7c478bd9Sstevel@tonic-gate	mov	tmpreg, T_LINK(ithread);		\
441*7c478bd9Sstevel@tonic-gate	mov	ithread, CPU_INTR_THREAD(cpureg)
442*7c478bd9Sstevel@tonic-gate
443*7c478bd9Sstevel@tonic-gate#if defined(DEBUG)
444*7c478bd9Sstevel@tonic-gate
445*7c478bd9Sstevel@tonic-gate/*
446*7c478bd9Sstevel@tonic-gate * Do not call panic, if panic is already in progress.
447*7c478bd9Sstevel@tonic-gate */
448*7c478bd9Sstevel@tonic-gate#define	__PANIC(msg, label)		\
449*7c478bd9Sstevel@tonic-gate	cmpl	$0, panic_quiesce;		\
450*7c478bd9Sstevel@tonic-gate	jne	label;				\
451*7c478bd9Sstevel@tonic-gate	pushl	$msg;				\
452*7c478bd9Sstevel@tonic-gate	call	panic
453*7c478bd9Sstevel@tonic-gate
454*7c478bd9Sstevel@tonic-gate#define	__CMP64_JNE(basereg, offset, label)	\
455*7c478bd9Sstevel@tonic-gate	cmpl	$0, offset(basereg);		\
456*7c478bd9Sstevel@tonic-gate	jne	label;				\
457*7c478bd9Sstevel@tonic-gate	cmpl	$0, offset + 4(basereg);	\
458*7c478bd9Sstevel@tonic-gate	jne	label
459*7c478bd9Sstevel@tonic-gate
460*7c478bd9Sstevel@tonic-gate/*
461*7c478bd9Sstevel@tonic-gate * ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
462*7c478bd9Sstevel@tonic-gate */
463*7c478bd9Sstevel@tonic-gate#define	ASSERT_NOT_CPU_INTR_ACTV(pilreg, basereg, msg)	\
464*7c478bd9Sstevel@tonic-gate	btl	pilreg, CPU_INTR_ACTV(basereg);		\
465*7c478bd9Sstevel@tonic-gate	jnc	4f;					\
466*7c478bd9Sstevel@tonic-gate	__PANIC(msg, 4f);				\
467*7c478bd9Sstevel@tonic-gate4:
468*7c478bd9Sstevel@tonic-gate
469*7c478bd9Sstevel@tonic-gate/*
470*7c478bd9Sstevel@tonic-gate * ASSERT(CPU->cpu_intr_actv & (1 << PIL))
471*7c478bd9Sstevel@tonic-gate */
472*7c478bd9Sstevel@tonic-gate#define	ASSERT_CPU_INTR_ACTV(pilreg, basereg, msg)	\
473*7c478bd9Sstevel@tonic-gate	btl	pilreg, CPU_INTR_ACTV(basereg);		\
474*7c478bd9Sstevel@tonic-gate	jc	5f;					\
475*7c478bd9Sstevel@tonic-gate	__PANIC(msg, 5f);				\
476*7c478bd9Sstevel@tonic-gate5:
477*7c478bd9Sstevel@tonic-gate
478*7c478bd9Sstevel@tonic-gate/*
479*7c478bd9Sstevel@tonic-gate * ASSERT(CPU->cpu_pil_high_start != 0)
480*7c478bd9Sstevel@tonic-gate */
481*7c478bd9Sstevel@tonic-gate#define	ASSERT_CPU_PIL_HIGH_START_NZ(basereg)			\
482*7c478bd9Sstevel@tonic-gate	__CMP64_JNE(basereg, CPU_PIL_HIGH_START, 6f);		\
483*7c478bd9Sstevel@tonic-gate	__PANIC(_interrupt_timestamp_zero, 6f);		\
484*7c478bd9Sstevel@tonic-gate6:
485*7c478bd9Sstevel@tonic-gate
486*7c478bd9Sstevel@tonic-gate/*
487*7c478bd9Sstevel@tonic-gate * ASSERT(t->t_intr_start != 0)
488*7c478bd9Sstevel@tonic-gate */
489*7c478bd9Sstevel@tonic-gate#define	ASSERT_T_INTR_START_NZ(basereg)				\
490*7c478bd9Sstevel@tonic-gate	__CMP64_JNE(basereg, T_INTR_START, 7f);			\
491*7c478bd9Sstevel@tonic-gate	__PANIC(_intr_thread_t_intr_start_zero, 7f);	\
492*7c478bd9Sstevel@tonic-gate7:
493*7c478bd9Sstevel@tonic-gate
494*7c478bd9Sstevel@tonic-gate_interrupt_actv_bit_set:
495*7c478bd9Sstevel@tonic-gate	.string	"_interrupt(): cpu_intr_actv bit already set for PIL"
496*7c478bd9Sstevel@tonic-gate_interrupt_actv_bit_not_set:
497*7c478bd9Sstevel@tonic-gate	.string	"_interrupt(): cpu_intr_actv bit not set for PIL"
498*7c478bd9Sstevel@tonic-gate_interrupt_timestamp_zero:
499*7c478bd9Sstevel@tonic-gate	.string "_interrupt(): timestamp zero upon handler return"
500*7c478bd9Sstevel@tonic-gate_intr_thread_actv_bit_not_set:
501*7c478bd9Sstevel@tonic-gate	.string	"intr_thread():	cpu_intr_actv bit not set for PIL"
502*7c478bd9Sstevel@tonic-gate_intr_thread_t_intr_start_zero:
503*7c478bd9Sstevel@tonic-gate	.string	"intr_thread():	t_intr_start zero upon handler return"
504*7c478bd9Sstevel@tonic-gate_dosoftint_actv_bit_set:
505*7c478bd9Sstevel@tonic-gate	.string	"dosoftint(): cpu_intr_actv bit already set for PIL"
506*7c478bd9Sstevel@tonic-gate_dosoftint_actv_bit_not_set:
507*7c478bd9Sstevel@tonic-gate	.string	"dosoftint(): cpu_intr_actv bit not set for PIL"
508*7c478bd9Sstevel@tonic-gate
509*7c478bd9Sstevel@tonic-gate	DGDEF(intr_thread_cnt)
510*7c478bd9Sstevel@tonic-gate
511*7c478bd9Sstevel@tonic-gate#else
512*7c478bd9Sstevel@tonic-gate#define	ASSERT_NOT_CPU_INTR_ACTV(pilreg, basereg, msg)
513*7c478bd9Sstevel@tonic-gate#define	ASSERT_CPU_INTR_ACTV(pilreg, basereg, msg)
514*7c478bd9Sstevel@tonic-gate#define	ASSERT_CPU_PIL_HIGH_START_NZ(basereg)
515*7c478bd9Sstevel@tonic-gate#define	ASSERT_T_INTR_START_NZ(basereg)
516*7c478bd9Sstevel@tonic-gate#endif
517*7c478bd9Sstevel@tonic-gate
518*7c478bd9Sstevel@tonic-gate	ENTRY_NP2(cmnint, _interrupt)
519*7c478bd9Sstevel@tonic-gate
520*7c478bd9Sstevel@tonic-gate	INTR_PUSH
521*7c478bd9Sstevel@tonic-gate
522*7c478bd9Sstevel@tonic-gate	/*
523*7c478bd9Sstevel@tonic-gate	 * At the end of TRACE_PTR %esi points to the current TRAPTRACE entry
524*7c478bd9Sstevel@tonic-gate	 */
525*7c478bd9Sstevel@tonic-gate	TRACE_PTR(%esi, %eax, %eax, %edx, $TT_INTERRUPT)
526*7c478bd9Sstevel@tonic-gate						/* Uses labels 8 and 9 */
527*7c478bd9Sstevel@tonic-gate	TRACE_REGS(%esi, %esp, %eax, %ebx)	/* Uses label 9 */
528*7c478bd9Sstevel@tonic-gate	TRACE_STAMP(%esi)		/* Clobbers %eax, %edx, uses 9 */
529*7c478bd9Sstevel@tonic-gate
530*7c478bd9Sstevel@tonic-gate	movl	%esp, %ebp
531*7c478bd9Sstevel@tonic-gate	DISABLE_INTR_FLAGS
532*7c478bd9Sstevel@tonic-gate	LOADCPU(%ebx)		/* get pointer to CPU struct. Avoid gs refs */
533*7c478bd9Sstevel@tonic-gate	leal    REGOFF_TRAPNO(%ebp), %ecx	/* get address of vector */
534*7c478bd9Sstevel@tonic-gate	movl	CPU_PRI(%ebx), %edi		/* get ipl */
535*7c478bd9Sstevel@tonic-gate	movl	CPU_SOFTINFO(%ebx), %edx
536*7c478bd9Sstevel@tonic-gate
537*7c478bd9Sstevel@tonic-gate	/
538*7c478bd9Sstevel@tonic-gate	/ Check to see if the trap number is T_SOFTINT; if it is, we'll
539*7c478bd9Sstevel@tonic-gate	/ jump straight to dosoftint now.
540*7c478bd9Sstevel@tonic-gate	/
541*7c478bd9Sstevel@tonic-gate	cmpl	$T_SOFTINT, (%ecx)
542*7c478bd9Sstevel@tonic-gate	je	dosoftint
543*7c478bd9Sstevel@tonic-gate
544*7c478bd9Sstevel@tonic-gate	/ raise interrupt priority level
545*7c478bd9Sstevel@tonic-gate	/ oldipl is in %edi, vectorp is in %ecx
546*7c478bd9Sstevel@tonic-gate	/ newipl is returned in %eax
547*7c478bd9Sstevel@tonic-gate	pushl	%ecx
548*7c478bd9Sstevel@tonic-gate	pushl	%edi
549*7c478bd9Sstevel@tonic-gate	call    *setlvl
550*7c478bd9Sstevel@tonic-gate	popl	%edi			/* save oldpil in %edi */
551*7c478bd9Sstevel@tonic-gate	popl	%ecx
552*7c478bd9Sstevel@tonic-gate
553*7c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE
554*7c478bd9Sstevel@tonic-gate	movb	%al, TTR_IPL(%esi)
555*7c478bd9Sstevel@tonic-gate#endif
556*7c478bd9Sstevel@tonic-gate
557*7c478bd9Sstevel@tonic-gate	/ check for spurious interrupt
558*7c478bd9Sstevel@tonic-gate	cmp	$-1, %eax
559*7c478bd9Sstevel@tonic-gate	je	_sys_rtt
560*7c478bd9Sstevel@tonic-gate
561*7c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE
562*7c478bd9Sstevel@tonic-gate	movl	CPU_PRI(%ebx), %edx
563*7c478bd9Sstevel@tonic-gate	movb	%dl, TTR_PRI(%esi)
564*7c478bd9Sstevel@tonic-gate	movl	CPU_BASE_SPL(%ebx), %edx
565*7c478bd9Sstevel@tonic-gate	movb	%dl, TTR_SPL(%esi)
566*7c478bd9Sstevel@tonic-gate#endif
567*7c478bd9Sstevel@tonic-gate
568*7c478bd9Sstevel@tonic-gate	movl	%eax, CPU_PRI(%ebx) /* update ipl */
569*7c478bd9Sstevel@tonic-gate	movl	REGOFF_TRAPNO(%ebp), %ecx /* reload the interrupt vector */
570*7c478bd9Sstevel@tonic-gate
571*7c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE
572*7c478bd9Sstevel@tonic-gate	movb	%cl, TTR_VECTOR(%esi)
573*7c478bd9Sstevel@tonic-gate#endif
574*7c478bd9Sstevel@tonic-gate
575*7c478bd9Sstevel@tonic-gate	/ At this point we can take one of two paths.  If the new priority
576*7c478bd9Sstevel@tonic-gate	/ level is less than or equal to LOCK LEVEL then we jump to code that
577*7c478bd9Sstevel@tonic-gate	/ will run this interrupt as a separate thread.  Otherwise the
578*7c478bd9Sstevel@tonic-gate	/ interrupt is NOT run as a separate thread.
579*7c478bd9Sstevel@tonic-gate
580*7c478bd9Sstevel@tonic-gate	/ %edi - old priority level
581*7c478bd9Sstevel@tonic-gate	/ %ebp - pointer to REGS
582*7c478bd9Sstevel@tonic-gate	/ %ecx - translated vector
583*7c478bd9Sstevel@tonic-gate	/ %eax - ipl of isr
584*7c478bd9Sstevel@tonic-gate	/ %ebx - cpu pointer
585*7c478bd9Sstevel@tonic-gate
586*7c478bd9Sstevel@tonic-gate	cmpl 	$LOCK_LEVEL, %eax	/* compare to highest thread level */
587*7c478bd9Sstevel@tonic-gate	jbe	intr_thread		/* process as a separate thread */
588*7c478bd9Sstevel@tonic-gate
589*7c478bd9Sstevel@tonic-gate	cmpl	$CBE_HIGH_PIL, %eax	/* Is this a CY_HIGH_LEVEL interrupt? */
590*7c478bd9Sstevel@tonic-gate	jne	2f
591*7c478bd9Sstevel@tonic-gate
592*7c478bd9Sstevel@tonic-gate	movl	REGOFF_PC(%ebp), %esi
593*7c478bd9Sstevel@tonic-gate	movl	%edi, CPU_PROFILE_PIL(%ebx)	/* record interrupted PIL */
594*7c478bd9Sstevel@tonic-gate	testw	$CPL_MASK, REGOFF_CS(%ebp)	/* trap from supervisor mode? */
595*7c478bd9Sstevel@tonic-gate	jz	1f
596*7c478bd9Sstevel@tonic-gate	movl	%esi, CPU_PROFILE_UPC(%ebx)	/* record user PC */
597*7c478bd9Sstevel@tonic-gate	movl	$0, CPU_PROFILE_PC(%ebx)	/* zero kernel PC */
598*7c478bd9Sstevel@tonic-gate	jmp	2f
599*7c478bd9Sstevel@tonic-gate
600*7c478bd9Sstevel@tonic-gate1:
601*7c478bd9Sstevel@tonic-gate	movl	%esi, CPU_PROFILE_PC(%ebx)	/* record kernel PC */
602*7c478bd9Sstevel@tonic-gate	movl	$0, CPU_PROFILE_UPC(%ebx)	/* zero user PC */
603*7c478bd9Sstevel@tonic-gate
604*7c478bd9Sstevel@tonic-gate2:
605*7c478bd9Sstevel@tonic-gate	pushl	%ecx				/* vec */
606*7c478bd9Sstevel@tonic-gate	pushl	%eax				/* newpil */
607*7c478bd9Sstevel@tonic-gate
608*7c478bd9Sstevel@tonic-gate	/
609*7c478bd9Sstevel@tonic-gate	/ See if we are interrupting another high-level interrupt.
610*7c478bd9Sstevel@tonic-gate	/
611*7c478bd9Sstevel@tonic-gate	movl	CPU_INTR_ACTV(%ebx), %eax
612*7c478bd9Sstevel@tonic-gate	andl	$CPU_INTR_ACTV_HIGH_LEVEL_MASK, %eax
613*7c478bd9Sstevel@tonic-gate	jz	0f
614*7c478bd9Sstevel@tonic-gate	/
615*7c478bd9Sstevel@tonic-gate	/ We have interrupted another high-level interrupt.
616*7c478bd9Sstevel@tonic-gate	/ Load starting timestamp, compute interval, update cumulative counter.
617*7c478bd9Sstevel@tonic-gate	/
618*7c478bd9Sstevel@tonic-gate	bsrl	%eax, %ecx		/* find PIL of interrupted handler */
619*7c478bd9Sstevel@tonic-gate	HIGHPILBASE(%ebx, %ecx, %ecx)
620*7c478bd9Sstevel@tonic-gate_tsc_patch1:
621*7c478bd9Sstevel@tonic-gate	nop; nop			/* patched to rdtsc if available */
622*7c478bd9Sstevel@tonic-gate	TSC_SUB_FROM(%ecx, CPU_PIL_HIGH_START)
623*7c478bd9Sstevel@tonic-gate	addl	$CPU_INTRSTAT_LOW_PIL_OFFSET, %ecx	/* offset PILs 0-10 */
624*7c478bd9Sstevel@tonic-gate	TSC_ADD_TO(%ecx, CPU_INTRSTAT)
625*7c478bd9Sstevel@tonic-gate	/
626*7c478bd9Sstevel@tonic-gate	/ Another high-level interrupt is active below this one, so
627*7c478bd9Sstevel@tonic-gate	/ there is no need to check for an interrupt thread. That will be
628*7c478bd9Sstevel@tonic-gate	/ done by the lowest priority high-level interrupt active.
629*7c478bd9Sstevel@tonic-gate	/
630*7c478bd9Sstevel@tonic-gate	jmp	1f
631*7c478bd9Sstevel@tonic-gate0:
632*7c478bd9Sstevel@tonic-gate	/
633*7c478bd9Sstevel@tonic-gate	/ See if we are interrupting a low-level interrupt thread.
634*7c478bd9Sstevel@tonic-gate	/
635*7c478bd9Sstevel@tonic-gate	movl	CPU_THREAD(%ebx), %esi
636*7c478bd9Sstevel@tonic-gate	testw	$T_INTR_THREAD, T_FLAGS(%esi)
637*7c478bd9Sstevel@tonic-gate	jz	1f
638*7c478bd9Sstevel@tonic-gate	/
639*7c478bd9Sstevel@tonic-gate	/ We have interrupted an interrupt thread. Account for its time slice
640*7c478bd9Sstevel@tonic-gate	/ only if its time stamp is non-zero.
641*7c478bd9Sstevel@tonic-gate	/
642*7c478bd9Sstevel@tonic-gate	cmpl	$0, T_INTR_START+4(%esi)
643*7c478bd9Sstevel@tonic-gate	jne	0f
644*7c478bd9Sstevel@tonic-gate	cmpl	$0, T_INTR_START(%esi)
645*7c478bd9Sstevel@tonic-gate	je	1f
646*7c478bd9Sstevel@tonic-gate0:
647*7c478bd9Sstevel@tonic-gate	movzbl	T_PIL(%esi), %ecx /* %ecx has PIL of interrupted handler */
648*7c478bd9Sstevel@tonic-gate	PILBASE(%ebx, %ecx)
649*7c478bd9Sstevel@tonic-gate_tsc_patch2:
650*7c478bd9Sstevel@tonic-gate	nop; nop			/* patched to rdtsc if available */
651*7c478bd9Sstevel@tonic-gate	TSC_SUB_FROM(%esi, T_INTR_START)
652*7c478bd9Sstevel@tonic-gate	TSC_CLR(%esi, T_INTR_START)
653*7c478bd9Sstevel@tonic-gate	TSC_ADD_TO(%ecx, CPU_INTRSTAT)
654*7c478bd9Sstevel@tonic-gate1:
655*7c478bd9Sstevel@tonic-gate	/ Store starting timestamp in CPU structure for this PIL.
656*7c478bd9Sstevel@tonic-gate	popl	%ecx			/* restore new PIL */
657*7c478bd9Sstevel@tonic-gate	pushl	%ecx
658*7c478bd9Sstevel@tonic-gate	HIGHPILBASE(%ebx, %ecx, %ecx)
659*7c478bd9Sstevel@tonic-gate_tsc_patch3:
660*7c478bd9Sstevel@tonic-gate	nop; nop			/* patched to rdtsc if available */
661*7c478bd9Sstevel@tonic-gate	TSC_MOV(%ecx, CPU_PIL_HIGH_START)
662*7c478bd9Sstevel@tonic-gate
663*7c478bd9Sstevel@tonic-gate	popl	%eax			/* restore new pil */
664*7c478bd9Sstevel@tonic-gate	popl	%ecx			/* vec */
665*7c478bd9Sstevel@tonic-gate	/
666*7c478bd9Sstevel@tonic-gate	/ Set bit for this PIL in CPU's interrupt active bitmask.
667*7c478bd9Sstevel@tonic-gate	/
668*7c478bd9Sstevel@tonic-gate
669*7c478bd9Sstevel@tonic-gate	ASSERT_NOT_CPU_INTR_ACTV(%eax, %ebx, _interrupt_actv_bit_set)
670*7c478bd9Sstevel@tonic-gate
671*7c478bd9Sstevel@tonic-gate	/ Save old CPU_INTR_ACTV
672*7c478bd9Sstevel@tonic-gate	movl	CPU_INTR_ACTV(%ebx), %esi
673*7c478bd9Sstevel@tonic-gate
674*7c478bd9Sstevel@tonic-gate	cmpl	$15, %eax
675*7c478bd9Sstevel@tonic-gate	jne	0f
676*7c478bd9Sstevel@tonic-gate	/ PIL-15 interrupt. Increment nest-count in upper 16 bits of intr_actv
677*7c478bd9Sstevel@tonic-gate	incw	CPU_INTR_ACTV_REF(%ebx)	/* increment ref count */
678*7c478bd9Sstevel@tonic-gate0:
679*7c478bd9Sstevel@tonic-gate	btsl	%eax, CPU_INTR_ACTV(%ebx)
680*7c478bd9Sstevel@tonic-gate	/
681*7c478bd9Sstevel@tonic-gate	/ Handle high-level nested interrupt on separate interrupt stack
682*7c478bd9Sstevel@tonic-gate	/
683*7c478bd9Sstevel@tonic-gate	testl	$CPU_INTR_ACTV_HIGH_LEVEL_MASK, %esi
684*7c478bd9Sstevel@tonic-gate	jnz	onstack			/* already on interrupt stack */
685*7c478bd9Sstevel@tonic-gate	movl	%esp, %eax
686*7c478bd9Sstevel@tonic-gate	movl	CPU_INTR_STACK(%ebx), %esp	/* get on interrupt stack */
687*7c478bd9Sstevel@tonic-gate	pushl	%eax			/* save the thread stack pointer */
688*7c478bd9Sstevel@tonic-gateonstack:
689*7c478bd9Sstevel@tonic-gate	movl	$autovect, %esi		/* get autovect structure before */
690*7c478bd9Sstevel@tonic-gate					/* sti to save on AGI later */
691*7c478bd9Sstevel@tonic-gate	sti				/* enable interrupts */
692*7c478bd9Sstevel@tonic-gate	pushl	%ecx			/* save interrupt vector */
693*7c478bd9Sstevel@tonic-gate	/
694*7c478bd9Sstevel@tonic-gate	/ Get handler address
695*7c478bd9Sstevel@tonic-gate	/
696*7c478bd9Sstevel@tonic-gatepre_loop1:
697*7c478bd9Sstevel@tonic-gate	movl	AVH_LINK(%esi, %ecx, 8), %esi
698*7c478bd9Sstevel@tonic-gate	xorl	%ebx, %ebx	/* bh is no. of intpts in chain */
699*7c478bd9Sstevel@tonic-gate				/* bl is DDI_INTR_CLAIMED status of chain */
700*7c478bd9Sstevel@tonic-gate	testl	%esi, %esi		/* if pointer is null */
701*7c478bd9Sstevel@tonic-gate	jz	.intr_ret		/* then skip */
702*7c478bd9Sstevel@tonic-gateloop1:
703*7c478bd9Sstevel@tonic-gate	incb	%bh
704*7c478bd9Sstevel@tonic-gate	movl	AV_VECTOR(%esi), %edx	/* get the interrupt routine */
705*7c478bd9Sstevel@tonic-gate	testl	%edx, %edx		/* if func is null */
706*7c478bd9Sstevel@tonic-gate	jz	.intr_ret		/* then skip */
707*7c478bd9Sstevel@tonic-gate	pushl	$0
708*7c478bd9Sstevel@tonic-gate	pushl	AV_INTARG2(%esi)
709*7c478bd9Sstevel@tonic-gate	pushl	AV_INTARG1(%esi)
710*7c478bd9Sstevel@tonic-gate	pushl	AV_VECTOR(%esi)
711*7c478bd9Sstevel@tonic-gate	pushl	AV_DIP(%esi)
712*7c478bd9Sstevel@tonic-gate	call	__dtrace_probe_interrupt__start
713*7c478bd9Sstevel@tonic-gate	pushl	AV_INTARG2(%esi)	/* get 2nd arg to interrupt routine */
714*7c478bd9Sstevel@tonic-gate	pushl	AV_INTARG1(%esi)	/* get first arg to interrupt routine */
715*7c478bd9Sstevel@tonic-gate	call	*%edx			/* call interrupt routine with arg */
716*7c478bd9Sstevel@tonic-gate	addl	$8, %esp
717*7c478bd9Sstevel@tonic-gate	movl	%eax, 16(%esp)
718*7c478bd9Sstevel@tonic-gate	call	__dtrace_probe_interrupt__complete
719*7c478bd9Sstevel@tonic-gate	addl	$20, %esp
720*7c478bd9Sstevel@tonic-gate	orb	%al, %bl		/* see if anyone claims intpt. */
721*7c478bd9Sstevel@tonic-gate	movl	AV_LINK(%esi), %esi	/* get next routine on list */
722*7c478bd9Sstevel@tonic-gate	testl	%esi, %esi		/* if pointer is non-null */
723*7c478bd9Sstevel@tonic-gate	jnz	loop1			/* then continue */
724*7c478bd9Sstevel@tonic-gate
725*7c478bd9Sstevel@tonic-gate.intr_ret:
726*7c478bd9Sstevel@tonic-gate	cmpb	$1, %bh		/* if only 1 intpt in chain, it is OK */
727*7c478bd9Sstevel@tonic-gate	je	.intr_ret1
728*7c478bd9Sstevel@tonic-gate	orb	%bl, %bl	/* If no one claims intpt, then it is OK */
729*7c478bd9Sstevel@tonic-gate	jz	.intr_ret1
730*7c478bd9Sstevel@tonic-gate	movl	(%esp), %ecx		/* else restore intr vector */
731*7c478bd9Sstevel@tonic-gate	movl	$autovect, %esi		/* get autovect structure */
732*7c478bd9Sstevel@tonic-gate	jmp	pre_loop1		/* and try again. */
733*7c478bd9Sstevel@tonic-gate
734*7c478bd9Sstevel@tonic-gate.intr_ret1:
735*7c478bd9Sstevel@tonic-gate	LOADCPU(%ebx)			/* get pointer to cpu struct */
736*7c478bd9Sstevel@tonic-gate
737*7c478bd9Sstevel@tonic-gate	cli
738*7c478bd9Sstevel@tonic-gate	movl	CPU_PRI(%ebx), %esi
739*7c478bd9Sstevel@tonic-gate
740*7c478bd9Sstevel@tonic-gate	/ cpu_stats.sys.intr[PIL]++
741*7c478bd9Sstevel@tonic-gate	INC_CPU_STATS_INTR(%esi, %eax, %eax, %ebx)
742*7c478bd9Sstevel@tonic-gate
743*7c478bd9Sstevel@tonic-gate	/
744*7c478bd9Sstevel@tonic-gate	/ Clear bit for this PIL in CPU's interrupt active bitmask.
745*7c478bd9Sstevel@tonic-gate	/
746*7c478bd9Sstevel@tonic-gate
747*7c478bd9Sstevel@tonic-gate	ASSERT_CPU_INTR_ACTV(%esi, %ebx, _interrupt_actv_bit_not_set)
748*7c478bd9Sstevel@tonic-gate
749*7c478bd9Sstevel@tonic-gate	cmpl	$15, %esi
750*7c478bd9Sstevel@tonic-gate	jne	0f
751*7c478bd9Sstevel@tonic-gate	/ Only clear bit if reference count is now zero.
752*7c478bd9Sstevel@tonic-gate	decw	CPU_INTR_ACTV_REF(%ebx)
753*7c478bd9Sstevel@tonic-gate	jnz	1f
754*7c478bd9Sstevel@tonic-gate0:
755*7c478bd9Sstevel@tonic-gate	btrl	%esi, CPU_INTR_ACTV(%ebx)
756*7c478bd9Sstevel@tonic-gate1:
757*7c478bd9Sstevel@tonic-gate	/
758*7c478bd9Sstevel@tonic-gate	/ Take timestamp, compute interval, update cumulative counter.
759*7c478bd9Sstevel@tonic-gate	/ esi = PIL
760*7c478bd9Sstevel@tonic-gate_tsc_patch4:
761*7c478bd9Sstevel@tonic-gate	nop; nop			/* patched to rdtsc if available */
762*7c478bd9Sstevel@tonic-gate	HIGHPILBASE(%ebx, %esi, %esi)
763*7c478bd9Sstevel@tonic-gate
764*7c478bd9Sstevel@tonic-gate	ASSERT_CPU_PIL_HIGH_START_NZ(%esi)
765*7c478bd9Sstevel@tonic-gate
766*7c478bd9Sstevel@tonic-gate	TSC_SUB_FROM(%esi, CPU_PIL_HIGH_START)
767*7c478bd9Sstevel@tonic-gate	addl	$CPU_INTRSTAT_LOW_PIL_OFFSET, %esi	/* offset PILs 0-10 */
768*7c478bd9Sstevel@tonic-gate	TSC_ADD_TO(%esi, CPU_INTRSTAT)
769*7c478bd9Sstevel@tonic-gate	/
770*7c478bd9Sstevel@tonic-gate	/ Check for lower-PIL nested high-level interrupt beneath current one
771*7c478bd9Sstevel@tonic-gate	/ If so, place a starting timestamp in its pil_high_start entry.
772*7c478bd9Sstevel@tonic-gate	/
773*7c478bd9Sstevel@tonic-gate	movl	CPU_INTR_ACTV(%ebx), %eax
774*7c478bd9Sstevel@tonic-gate	movl	%eax, %esi
775*7c478bd9Sstevel@tonic-gate	andl	$CPU_INTR_ACTV_HIGH_LEVEL_MASK, %eax
776*7c478bd9Sstevel@tonic-gate	jz	0f
777*7c478bd9Sstevel@tonic-gate	bsrl	%eax, %ecx		/* find PIL of nested interrupt */
778*7c478bd9Sstevel@tonic-gate	HIGHPILBASE(%ebx, %ecx, %ecx)
779*7c478bd9Sstevel@tonic-gate_tsc_patch5:
780*7c478bd9Sstevel@tonic-gate	nop; nop			/* patched to rdtsc if available */
781*7c478bd9Sstevel@tonic-gate	TSC_MOV(%ecx, CPU_PIL_HIGH_START)
782*7c478bd9Sstevel@tonic-gate	/
783*7c478bd9Sstevel@tonic-gate	/ Another high-level interrupt is active below this one, so
784*7c478bd9Sstevel@tonic-gate	/ there is no need to check for an interrupt thread. That will be
785*7c478bd9Sstevel@tonic-gate	/ done by the lowest priority high-level interrupt active.
786*7c478bd9Sstevel@tonic-gate	/
787*7c478bd9Sstevel@tonic-gate	jmp	1f
788*7c478bd9Sstevel@tonic-gate0:
789*7c478bd9Sstevel@tonic-gate	/ Check to see if there is a low-level interrupt active. If so,
790*7c478bd9Sstevel@tonic-gate	/ place a starting timestamp in the thread structure.
791*7c478bd9Sstevel@tonic-gate	movl	CPU_THREAD(%ebx), %esi
792*7c478bd9Sstevel@tonic-gate	testw	$T_INTR_THREAD, T_FLAGS(%esi)
793*7c478bd9Sstevel@tonic-gate	jz	1f
794*7c478bd9Sstevel@tonic-gate_tsc_patch6:
795*7c478bd9Sstevel@tonic-gate	nop; nop			/* patched to rdtsc if available */
796*7c478bd9Sstevel@tonic-gate	TSC_MOV(%esi, T_INTR_START)
797*7c478bd9Sstevel@tonic-gate1:
798*7c478bd9Sstevel@tonic-gate	movl	%edi, CPU_PRI(%ebx)
799*7c478bd9Sstevel@tonic-gate				/* interrupt vector already on stack */
800*7c478bd9Sstevel@tonic-gate	pushl	%edi			/* old ipl */
801*7c478bd9Sstevel@tonic-gate	call	*setlvlx
802*7c478bd9Sstevel@tonic-gate	addl	$8, %esp		/* eax contains the current ipl */
803*7c478bd9Sstevel@tonic-gate
804*7c478bd9Sstevel@tonic-gate	movl	CPU_INTR_ACTV(%ebx), %esi /* reset stack pointer if no more */
805*7c478bd9Sstevel@tonic-gate	shrl	$LOCK_LEVEL + 1, %esi	/* HI PRI intrs. */
806*7c478bd9Sstevel@tonic-gate	jnz	.intr_ret2
807*7c478bd9Sstevel@tonic-gate	popl	%esp			/* restore the thread stack pointer */
808*7c478bd9Sstevel@tonic-gate.intr_ret2:
809*7c478bd9Sstevel@tonic-gate	movl	CPU_SOFTINFO(%ebx), %edx /* any pending software interrupts */
810*7c478bd9Sstevel@tonic-gate	orl	%edx, %edx
811*7c478bd9Sstevel@tonic-gate	jz	_sys_rtt
812*7c478bd9Sstevel@tonic-gate	jmp	dosoftint	/* check for softints before we return. */
813*7c478bd9Sstevel@tonic-gate	SET_SIZE(cmnint)
814*7c478bd9Sstevel@tonic-gate	SET_SIZE(_interrupt)
815*7c478bd9Sstevel@tonic-gate
816*7c478bd9Sstevel@tonic-gate#endif	/* __i386 */
817*7c478bd9Sstevel@tonic-gate
818*7c478bd9Sstevel@tonic-gate/*
819*7c478bd9Sstevel@tonic-gate * Declare a uintptr_t which has the size of _interrupt to enable stack
820*7c478bd9Sstevel@tonic-gate * traceback code to know when a regs structure is on the stack.
821*7c478bd9Sstevel@tonic-gate */
822*7c478bd9Sstevel@tonic-gate	.globl	_interrupt_size
823*7c478bd9Sstevel@tonic-gate	.align	CLONGSIZE
824*7c478bd9Sstevel@tonic-gate_interrupt_size:
825*7c478bd9Sstevel@tonic-gate	.NWORD	. - _interrupt
826*7c478bd9Sstevel@tonic-gate	.type	_interrupt_size, @object
827*7c478bd9Sstevel@tonic-gate
828*7c478bd9Sstevel@tonic-gate#endif	/* __lint */
829*7c478bd9Sstevel@tonic-gate
830*7c478bd9Sstevel@tonic-gate#if defined(__i386)
831*7c478bd9Sstevel@tonic-gate
832*7c478bd9Sstevel@tonic-gate/*
833*7c478bd9Sstevel@tonic-gate * Handle an interrupt in a new thread.
834*7c478bd9Sstevel@tonic-gate *	Entry:  traps disabled.
835*7c478bd9Sstevel@tonic-gate *		%edi - old priority level
836*7c478bd9Sstevel@tonic-gate *		%ebp - pointer to REGS
837*7c478bd9Sstevel@tonic-gate *		%ecx - translated vector
838*7c478bd9Sstevel@tonic-gate *		%eax - ipl of isr.
839*7c478bd9Sstevel@tonic-gate *		%ebx - pointer to CPU struct
840*7c478bd9Sstevel@tonic-gate *	Uses:
841*7c478bd9Sstevel@tonic-gate */
842*7c478bd9Sstevel@tonic-gate
843*7c478bd9Sstevel@tonic-gate#if !defined(__lint)
844*7c478bd9Sstevel@tonic-gate
845*7c478bd9Sstevel@tonic-gate	ENTRY_NP(intr_thread)
846*7c478bd9Sstevel@tonic-gate	/
847*7c478bd9Sstevel@tonic-gate	/ Set bit for this PIL in CPU's interrupt active bitmask.
848*7c478bd9Sstevel@tonic-gate	/
849*7c478bd9Sstevel@tonic-gate
850*7c478bd9Sstevel@tonic-gate	ASSERT_NOT_CPU_INTR_ACTV(%eax, %ebx, _interrupt_actv_bit_set)
851*7c478bd9Sstevel@tonic-gate
852*7c478bd9Sstevel@tonic-gate	btsl	%eax, CPU_INTR_ACTV(%ebx)
853*7c478bd9Sstevel@tonic-gate
854*7c478bd9Sstevel@tonic-gate	/ Get set to run interrupt thread.
855*7c478bd9Sstevel@tonic-gate	/ There should always be an interrupt thread since we allocate one
856*7c478bd9Sstevel@tonic-gate	/ for each level on the CPU.
857*7c478bd9Sstevel@tonic-gate	/
858*7c478bd9Sstevel@tonic-gate	/ Note that the code in kcpc_overflow_intr -relies- on the ordering
859*7c478bd9Sstevel@tonic-gate	/ of events here - in particular that t->t_lwp of the interrupt
860*7c478bd9Sstevel@tonic-gate	/ thread is set to the pinned thread *before* curthread is changed
861*7c478bd9Sstevel@tonic-gate	/
862*7c478bd9Sstevel@tonic-gate	movl	CPU_THREAD(%ebx), %edx		/* cur thread in edx */
863*7c478bd9Sstevel@tonic-gate
864*7c478bd9Sstevel@tonic-gate	/
865*7c478bd9Sstevel@tonic-gate	/ Are we interrupting an interrupt thread? If so, account for it.
866*7c478bd9Sstevel@tonic-gate	/
867*7c478bd9Sstevel@tonic-gate	testw	$T_INTR_THREAD, T_FLAGS(%edx)
868*7c478bd9Sstevel@tonic-gate	jz	0f
869*7c478bd9Sstevel@tonic-gate	pushl	%ecx
870*7c478bd9Sstevel@tonic-gate	pushl	%eax
871*7c478bd9Sstevel@tonic-gate	movl	%edx, %esi
872*7c478bd9Sstevel@tonic-gate_tsc_patch7:
873*7c478bd9Sstevel@tonic-gate	nop; nop			/* patched to rdtsc if available */
874*7c478bd9Sstevel@tonic-gate	TSC_SUB_FROM(%esi, T_INTR_START)
875*7c478bd9Sstevel@tonic-gate	TSC_CLR(%esi, T_INTR_START)
876*7c478bd9Sstevel@tonic-gate	movzbl	T_PIL(%esi), %ecx
877*7c478bd9Sstevel@tonic-gate	PILBASE(%ebx, %ecx)
878*7c478bd9Sstevel@tonic-gate	TSC_ADD_TO(%ecx, CPU_INTRSTAT)
879*7c478bd9Sstevel@tonic-gate	movl	%esi, %edx
880*7c478bd9Sstevel@tonic-gate	popl	%eax
881*7c478bd9Sstevel@tonic-gate	popl	%ecx
882*7c478bd9Sstevel@tonic-gate0:
883*7c478bd9Sstevel@tonic-gate	movl	%esp, T_SP(%edx)	/* mark stack in curthread for resume */
884*7c478bd9Sstevel@tonic-gate	pushl	%edi			/* get a temporary register */
885*7c478bd9Sstevel@tonic-gate	UNLINK_INTR_THREAD(%ebx, %esi, %edi)
886*7c478bd9Sstevel@tonic-gate
887*7c478bd9Sstevel@tonic-gate	movl	T_LWP(%edx), %edi
888*7c478bd9Sstevel@tonic-gate	movl	%edx, T_INTR(%esi)		/* push old thread */
889*7c478bd9Sstevel@tonic-gate	movl	%edi, T_LWP(%esi)
890*7c478bd9Sstevel@tonic-gate	/
891*7c478bd9Sstevel@tonic-gate	/ Threads on the interrupt thread free list could have state already
892*7c478bd9Sstevel@tonic-gate	/ set to TS_ONPROC, but it helps in debugging if they're TS_FREE
893*7c478bd9Sstevel@tonic-gate	/
894*7c478bd9Sstevel@tonic-gate	movl	$ONPROC_THREAD, T_STATE(%esi)
895*7c478bd9Sstevel@tonic-gate	/
896*7c478bd9Sstevel@tonic-gate	/ chain the interrupted thread onto list from the interrupt thread.
897*7c478bd9Sstevel@tonic-gate	/ Set the new interrupt thread as the current one.
898*7c478bd9Sstevel@tonic-gate	/
899*7c478bd9Sstevel@tonic-gate	popl	%edi			/* Don't need a temp reg anymore */
900*7c478bd9Sstevel@tonic-gate	movl	T_STACK(%esi), %esp		/* interrupt stack pointer */
901*7c478bd9Sstevel@tonic-gate	movl	%esp, %ebp
902*7c478bd9Sstevel@tonic-gate	movl	%esi, CPU_THREAD(%ebx)		/* set new thread */
903*7c478bd9Sstevel@tonic-gate	pushl	%eax				/* save the ipl */
904*7c478bd9Sstevel@tonic-gate	/
905*7c478bd9Sstevel@tonic-gate	/ Initialize thread priority level from intr_pri
906*7c478bd9Sstevel@tonic-gate	/
907*7c478bd9Sstevel@tonic-gate	movb	%al, T_PIL(%esi)	/* store pil */
908*7c478bd9Sstevel@tonic-gate	movzwl	intr_pri, %ebx		/* XXX Can cause probs if new class */
909*7c478bd9Sstevel@tonic-gate					/* is loaded on some other cpu. */
910*7c478bd9Sstevel@tonic-gate	addl	%ebx, %eax		/* convert level to dispatch priority */
911*7c478bd9Sstevel@tonic-gate	movw	%ax, T_PRI(%esi)
912*7c478bd9Sstevel@tonic-gate
913*7c478bd9Sstevel@tonic-gate	/
914*7c478bd9Sstevel@tonic-gate	/ Take timestamp and store it in the thread structure.
915*7c478bd9Sstevel@tonic-gate	/
916*7c478bd9Sstevel@tonic-gate	movl	%eax, %ebx		/* save priority over rdtsc */
917*7c478bd9Sstevel@tonic-gate_tsc_patch8:
918*7c478bd9Sstevel@tonic-gate	nop; nop			/* patched to rdtsc if available */
919*7c478bd9Sstevel@tonic-gate	TSC_MOV(%esi, T_INTR_START)
920*7c478bd9Sstevel@tonic-gate	movl	%ebx, %eax		/* restore priority */
921*7c478bd9Sstevel@tonic-gate
922*7c478bd9Sstevel@tonic-gate	/ The following 3 instructions need not be in cli.
923*7c478bd9Sstevel@tonic-gate	/ Putting them here only to avoid the AGI penalty on Pentiums.
924*7c478bd9Sstevel@tonic-gate
925*7c478bd9Sstevel@tonic-gate	pushl	%ecx			/* save interrupt vector. */
926*7c478bd9Sstevel@tonic-gate	pushl	%esi			/* save interrupt thread */
927*7c478bd9Sstevel@tonic-gate	movl	$autovect, %esi		/* get autovect structure */
928*7c478bd9Sstevel@tonic-gate	sti				/* enable interrupts */
929*7c478bd9Sstevel@tonic-gate
930*7c478bd9Sstevel@tonic-gate	/ Fast event tracing.
931*7c478bd9Sstevel@tonic-gate	LOADCPU(%ebx)
932*7c478bd9Sstevel@tonic-gate	movl	CPU_FTRACE_STATE(%ebx), %ebx
933*7c478bd9Sstevel@tonic-gate	testl	$FTRACE_ENABLED, %ebx
934*7c478bd9Sstevel@tonic-gate	jz	1f
935*7c478bd9Sstevel@tonic-gate
936*7c478bd9Sstevel@tonic-gate	movl	8(%esp), %ebx
937*7c478bd9Sstevel@tonic-gate	pushl	%ebx			/* ipl */
938*7c478bd9Sstevel@tonic-gate	pushl	%ecx			/* int vector */
939*7c478bd9Sstevel@tonic-gate	movl	T_SP(%edx), %ebx
940*7c478bd9Sstevel@tonic-gate	pushl	%ebx			/* &regs */
941*7c478bd9Sstevel@tonic-gate	pushl	$_ftrace_intr_thread_fmt
942*7c478bd9Sstevel@tonic-gate	call	ftrace_3_notick
943*7c478bd9Sstevel@tonic-gate	addl	$8, %esp
944*7c478bd9Sstevel@tonic-gate	popl	%ecx			/* restore int vector */
945*7c478bd9Sstevel@tonic-gate	addl	$4, %esp
946*7c478bd9Sstevel@tonic-gate1:
947*7c478bd9Sstevel@tonic-gatepre_loop2:
948*7c478bd9Sstevel@tonic-gate	movl	AVH_LINK(%esi, %ecx, 8), %esi
949*7c478bd9Sstevel@tonic-gate	xorl	%ebx, %ebx	/* bh is cno. of intpts in chain */
950*7c478bd9Sstevel@tonic-gate				/* bl is DDI_INTR_CLAIMED status of * chain */
951*7c478bd9Sstevel@tonic-gate	testl	%esi, %esi	/* if pointer is null */
952*7c478bd9Sstevel@tonic-gate	jz	loop_done2	/* we're done */
953*7c478bd9Sstevel@tonic-gateloop2:
954*7c478bd9Sstevel@tonic-gate	movl	AV_VECTOR(%esi), %edx	/* get the interrupt routine */
955*7c478bd9Sstevel@tonic-gate	testl	%edx, %edx		/* if pointer is null */
956*7c478bd9Sstevel@tonic-gate	jz	loop_done2		/* we're done */
957*7c478bd9Sstevel@tonic-gate	incb	%bh
958*7c478bd9Sstevel@tonic-gate	pushl	$0
959*7c478bd9Sstevel@tonic-gate	pushl	AV_INTARG2(%esi)
960*7c478bd9Sstevel@tonic-gate	pushl	AV_INTARG1(%esi)
961*7c478bd9Sstevel@tonic-gate	pushl	AV_VECTOR(%esi)
962*7c478bd9Sstevel@tonic-gate	pushl	AV_DIP(%esi)
963*7c478bd9Sstevel@tonic-gate	call	__dtrace_probe_interrupt__start
964*7c478bd9Sstevel@tonic-gate	pushl	AV_INTARG2(%esi)	/* get 2nd arg to interrupt routine */
965*7c478bd9Sstevel@tonic-gate	pushl	AV_INTARG1(%esi)	/* get first arg to interrupt routine */
966*7c478bd9Sstevel@tonic-gate	call	*%edx			/* call interrupt routine with arg */
967*7c478bd9Sstevel@tonic-gate	addl	$8, %esp
968*7c478bd9Sstevel@tonic-gate	movl	%eax, 16(%esp)
969*7c478bd9Sstevel@tonic-gate	call	__dtrace_probe_interrupt__complete
970*7c478bd9Sstevel@tonic-gate	addl	$20, %esp
971*7c478bd9Sstevel@tonic-gate	orb	%al, %bl		/* see if anyone claims intpt. */
972*7c478bd9Sstevel@tonic-gate	movl	AV_LINK(%esi), %esi	/* get next routine on list */
973*7c478bd9Sstevel@tonic-gate	testl	%esi, %esi		/* if pointer is non-null */
974*7c478bd9Sstevel@tonic-gate	jnz	loop2			/* continue */
975*7c478bd9Sstevel@tonic-gateloop_done2:
976*7c478bd9Sstevel@tonic-gate	cmpb	$1, %bh		/* if only 1 intpt in chain, it is OK */
977*7c478bd9Sstevel@tonic-gate	je	.loop_done2_1
978*7c478bd9Sstevel@tonic-gate	orb	%bl, %bl	/* If no one claims intpt, then it is OK */
979*7c478bd9Sstevel@tonic-gate	jz	.loop_done2_1
980*7c478bd9Sstevel@tonic-gate	movl	$autovect, %esi		/* else get autovect structure */
981*7c478bd9Sstevel@tonic-gate	movl	4(%esp), %ecx		/* restore intr vector */
982*7c478bd9Sstevel@tonic-gate	jmp	pre_loop2		/* and try again. */
983*7c478bd9Sstevel@tonic-gate.loop_done2_1:
984*7c478bd9Sstevel@tonic-gate	popl	%esi			/* restore intr thread pointer */
985*7c478bd9Sstevel@tonic-gate
986*7c478bd9Sstevel@tonic-gate	LOADCPU(%ebx)
987*7c478bd9Sstevel@tonic-gate
988*7c478bd9Sstevel@tonic-gate	cli		/* protect interrupt thread pool and intr_actv */
989*7c478bd9Sstevel@tonic-gate	movzbl	T_PIL(%esi), %eax
990*7c478bd9Sstevel@tonic-gate
991*7c478bd9Sstevel@tonic-gate	/ Save value in regs
992*7c478bd9Sstevel@tonic-gate	pushl	%eax			/* current pil */
993*7c478bd9Sstevel@tonic-gate	pushl	%edx			/* (huh?) */
994*7c478bd9Sstevel@tonic-gate	pushl	%edi			/* old pil */
995*7c478bd9Sstevel@tonic-gate
996*7c478bd9Sstevel@tonic-gate	/ cpu_stats.sys.intr[PIL]++
997*7c478bd9Sstevel@tonic-gate	INC_CPU_STATS_INTR(%eax, %edx, %edx, %ebx)
998*7c478bd9Sstevel@tonic-gate
999*7c478bd9Sstevel@tonic-gate	/
1000*7c478bd9Sstevel@tonic-gate	/ Take timestamp, compute interval, and update cumulative counter.
1001*7c478bd9Sstevel@tonic-gate	/ esi = thread pointer, ebx = cpu pointer, eax = PIL
1002*7c478bd9Sstevel@tonic-gate	/
1003*7c478bd9Sstevel@tonic-gate	movl	%eax, %edi
1004*7c478bd9Sstevel@tonic-gate
1005*7c478bd9Sstevel@tonic-gate	ASSERT_T_INTR_START_NZ(%esi)
1006*7c478bd9Sstevel@tonic-gate
1007*7c478bd9Sstevel@tonic-gate_tsc_patch9:
1008*7c478bd9Sstevel@tonic-gate	nop; nop			/* patched to rdtsc if available */
1009*7c478bd9Sstevel@tonic-gate	TSC_SUB_FROM(%esi, T_INTR_START)
1010*7c478bd9Sstevel@tonic-gate	PILBASE(%ebx, %edi)
1011*7c478bd9Sstevel@tonic-gate	TSC_ADD_TO(%edi, CPU_INTRSTAT)
1012*7c478bd9Sstevel@tonic-gate	popl	%edi
1013*7c478bd9Sstevel@tonic-gate	popl	%edx
1014*7c478bd9Sstevel@tonic-gate	popl	%eax
1015*7c478bd9Sstevel@tonic-gate
1016*7c478bd9Sstevel@tonic-gate	/
1017*7c478bd9Sstevel@tonic-gate	/ Clear bit for this PIL in CPU's interrupt active bitmask.
1018*7c478bd9Sstevel@tonic-gate	/
1019*7c478bd9Sstevel@tonic-gate
1020*7c478bd9Sstevel@tonic-gate	ASSERT_CPU_INTR_ACTV(%eax, %ebx, _intr_thread_actv_bit_not_set)
1021*7c478bd9Sstevel@tonic-gate
1022*7c478bd9Sstevel@tonic-gate	btrl	%eax, CPU_INTR_ACTV(%ebx)
1023*7c478bd9Sstevel@tonic-gate
1024*7c478bd9Sstevel@tonic-gate	/ if there is still an interrupted thread underneath this one
1025*7c478bd9Sstevel@tonic-gate	/ then the interrupt was never blocked and the return is fairly
1026*7c478bd9Sstevel@tonic-gate	/ simple.  Otherwise jump to intr_thread_exit
1027*7c478bd9Sstevel@tonic-gate	cmpl	$0, T_INTR(%esi)
1028*7c478bd9Sstevel@tonic-gate	je	intr_thread_exit
1029*7c478bd9Sstevel@tonic-gate
1030*7c478bd9Sstevel@tonic-gate	/
1031*7c478bd9Sstevel@tonic-gate	/ link the thread back onto the interrupt thread pool
1032*7c478bd9Sstevel@tonic-gate	LINK_INTR_THREAD(%ebx, %esi, %edx)
1033*7c478bd9Sstevel@tonic-gate
1034*7c478bd9Sstevel@tonic-gate	movl	CPU_BASE_SPL(%ebx), %eax	/* used below. */
1035*7c478bd9Sstevel@tonic-gate	/ set the thread state to free so kmdb doesn't see it
1036*7c478bd9Sstevel@tonic-gate	movl	$FREE_THREAD, T_STATE(%esi)
1037*7c478bd9Sstevel@tonic-gate
1038*7c478bd9Sstevel@tonic-gate	cmpl	%eax, %edi		/* if (oldipl >= basespl) */
1039*7c478bd9Sstevel@tonic-gate	jae	intr_restore_ipl	/* then use oldipl */
1040*7c478bd9Sstevel@tonic-gate	movl	%eax, %edi		/* else use basespl */
1041*7c478bd9Sstevel@tonic-gateintr_restore_ipl:
1042*7c478bd9Sstevel@tonic-gate	movl	%edi, CPU_PRI(%ebx)
1043*7c478bd9Sstevel@tonic-gate					/* intr vector already on stack */
1044*7c478bd9Sstevel@tonic-gate	pushl	%edi			/* old ipl */
1045*7c478bd9Sstevel@tonic-gate	call	*setlvlx		/* eax contains the current ipl */
1046*7c478bd9Sstevel@tonic-gate	/
1047*7c478bd9Sstevel@tonic-gate	/ Switch back to the interrupted thread
1048*7c478bd9Sstevel@tonic-gate	movl	T_INTR(%esi), %ecx
1049*7c478bd9Sstevel@tonic-gate
1050*7c478bd9Sstevel@tonic-gate	/ Place starting timestamp in interrupted thread's thread structure.
1051*7c478bd9Sstevel@tonic-gate_tsc_patch10:
1052*7c478bd9Sstevel@tonic-gate	nop; nop			/* patched to rdtsc if available */
1053*7c478bd9Sstevel@tonic-gate	TSC_MOV(%ecx, T_INTR_START)
1054*7c478bd9Sstevel@tonic-gate
1055*7c478bd9Sstevel@tonic-gate	movl	T_SP(%ecx), %esp	/* restore stack pointer */
1056*7c478bd9Sstevel@tonic-gate	movl	%esp, %ebp
1057*7c478bd9Sstevel@tonic-gate	movl	%ecx, CPU_THREAD(%ebx)
1058*7c478bd9Sstevel@tonic-gate
1059*7c478bd9Sstevel@tonic-gate	movl	CPU_SOFTINFO(%ebx), %edx /* any pending software interrupts */
1060*7c478bd9Sstevel@tonic-gate	orl	%edx, %edx
1061*7c478bd9Sstevel@tonic-gate	jz	_sys_rtt
1062*7c478bd9Sstevel@tonic-gate	jmp	dosoftint	/* check for softints before we return. */
1063*7c478bd9Sstevel@tonic-gate
1064*7c478bd9Sstevel@tonic-gate	/
1065*7c478bd9Sstevel@tonic-gate	/ An interrupt returned on what was once (and still might be)
1066*7c478bd9Sstevel@tonic-gate	/ an interrupt thread stack, but the interrupted process is no longer
1067*7c478bd9Sstevel@tonic-gate	/ there.  This means the interrupt must have blocked.
1068*7c478bd9Sstevel@tonic-gate	/
1069*7c478bd9Sstevel@tonic-gate	/ There is no longer a thread under this one, so put this thread back
1070*7c478bd9Sstevel@tonic-gate	/ on the CPU's free list and resume the idle thread which will dispatch
1071*7c478bd9Sstevel@tonic-gate	/ the next thread to run.
1072*7c478bd9Sstevel@tonic-gate	/
1073*7c478bd9Sstevel@tonic-gate	/ All interrupts are disabled here
1074*7c478bd9Sstevel@tonic-gate	/
1075*7c478bd9Sstevel@tonic-gate
1076*7c478bd9Sstevel@tonic-gateintr_thread_exit:
1077*7c478bd9Sstevel@tonic-gate#ifdef DEBUG
1078*7c478bd9Sstevel@tonic-gate	incl	intr_thread_cnt
1079*7c478bd9Sstevel@tonic-gate#endif
1080*7c478bd9Sstevel@tonic-gate	INC64(%ebx, CPU_STATS_SYS_INTRBLK)	/* cpu_stats.sys.intrblk++ */
1081*7c478bd9Sstevel@tonic-gate	/
1082*7c478bd9Sstevel@tonic-gate	/ Put thread back on the interrupt thread list.
1083*7c478bd9Sstevel@tonic-gate	/ As a reminder, the regs at this point are
1084*7c478bd9Sstevel@tonic-gate	/	esi	interrupt thread
1085*7c478bd9Sstevel@tonic-gate	/	edi	old ipl
1086*7c478bd9Sstevel@tonic-gate	/	ebx	ptr to CPU struct
1087*7c478bd9Sstevel@tonic-gate
1088*7c478bd9Sstevel@tonic-gate	/ Set CPU's base SPL level based on active interrupts bitmask
1089*7c478bd9Sstevel@tonic-gate	call	set_base_spl
1090*7c478bd9Sstevel@tonic-gate
1091*7c478bd9Sstevel@tonic-gate	movl	CPU_BASE_SPL(%ebx), %edi
1092*7c478bd9Sstevel@tonic-gate	movl	%edi, CPU_PRI(%ebx)
1093*7c478bd9Sstevel@tonic-gate					/* interrupt vector already on stack */
1094*7c478bd9Sstevel@tonic-gate	pushl	%edi
1095*7c478bd9Sstevel@tonic-gate	call	*setlvlx
1096*7c478bd9Sstevel@tonic-gate	addl	$8, %esp		/* XXX - don't need to pop since */
1097*7c478bd9Sstevel@tonic-gate					/* we are ready to switch */
1098*7c478bd9Sstevel@tonic-gate	call	splhigh			/* block all intrs below lock level */
1099*7c478bd9Sstevel@tonic-gate	/
1100*7c478bd9Sstevel@tonic-gate	/ Set the thread state to free so kmdb doesn't see it
1101*7c478bd9Sstevel@tonic-gate	/
1102*7c478bd9Sstevel@tonic-gate	movl	$FREE_THREAD, T_STATE(%esi)
1103*7c478bd9Sstevel@tonic-gate	/
1104*7c478bd9Sstevel@tonic-gate	/ Put thread on either the interrupt pool or the free pool and
1105*7c478bd9Sstevel@tonic-gate	/ call swtch() to resume another thread.
1106*7c478bd9Sstevel@tonic-gate	/
1107*7c478bd9Sstevel@tonic-gate	LINK_INTR_THREAD(%ebx, %esi, %edx)
1108*7c478bd9Sstevel@tonic-gate	call 	swtch
1109*7c478bd9Sstevel@tonic-gate	/ swtch() shouldn't return
1110*7c478bd9Sstevel@tonic-gate
1111*7c478bd9Sstevel@tonic-gate	SET_SIZE(intr_thread)
1112*7c478bd9Sstevel@tonic-gate
1113*7c478bd9Sstevel@tonic-gate#endif	/* __lint */
1114*7c478bd9Sstevel@tonic-gate#endif	/* __i386 */
1115*7c478bd9Sstevel@tonic-gate
1116*7c478bd9Sstevel@tonic-gate/*
1117*7c478bd9Sstevel@tonic-gate * Set Cpu's base SPL level, base on which interrupt levels are active
1118*7c478bd9Sstevel@tonic-gate *	Called at spl7 or above.
1119*7c478bd9Sstevel@tonic-gate */
1120*7c478bd9Sstevel@tonic-gate
1121*7c478bd9Sstevel@tonic-gate#if defined(__lint)
1122*7c478bd9Sstevel@tonic-gate
1123*7c478bd9Sstevel@tonic-gatevoid
1124*7c478bd9Sstevel@tonic-gateset_base_spl(void)
1125*7c478bd9Sstevel@tonic-gate{}
1126*7c478bd9Sstevel@tonic-gate
1127*7c478bd9Sstevel@tonic-gate#else	/* __lint */
1128*7c478bd9Sstevel@tonic-gate
1129*7c478bd9Sstevel@tonic-gate	ENTRY_NP(set_base_spl)
1130*7c478bd9Sstevel@tonic-gate	movl	%gs:CPU_INTR_ACTV, %eax	/* load active interrupts mask */
1131*7c478bd9Sstevel@tonic-gate	testl	%eax, %eax		/* is it zero? */
1132*7c478bd9Sstevel@tonic-gate	jz	setbase
1133*7c478bd9Sstevel@tonic-gate	testl	$0xff00, %eax
1134*7c478bd9Sstevel@tonic-gate	jnz	ah_set
1135*7c478bd9Sstevel@tonic-gate	shl	$24, %eax		/* shift 'em over so we can find */
1136*7c478bd9Sstevel@tonic-gate					/* the 1st bit faster */
1137*7c478bd9Sstevel@tonic-gate	bsrl	%eax, %eax
1138*7c478bd9Sstevel@tonic-gate	subl	$24, %eax
1139*7c478bd9Sstevel@tonic-gatesetbase:
1140*7c478bd9Sstevel@tonic-gate	movl	%eax, %gs:CPU_BASE_SPL	/* store base priority */
1141*7c478bd9Sstevel@tonic-gate	ret
1142*7c478bd9Sstevel@tonic-gateah_set:
1143*7c478bd9Sstevel@tonic-gate	shl	$16, %eax
1144*7c478bd9Sstevel@tonic-gate	bsrl	%eax, %eax
1145*7c478bd9Sstevel@tonic-gate	subl	$16, %eax
1146*7c478bd9Sstevel@tonic-gate	jmp	setbase
1147*7c478bd9Sstevel@tonic-gate	SET_SIZE(set_base_spl)
1148*7c478bd9Sstevel@tonic-gate
1149*7c478bd9Sstevel@tonic-gate#endif	/* __lint */
1150*7c478bd9Sstevel@tonic-gate
1151*7c478bd9Sstevel@tonic-gate#if defined(__i386)
1152*7c478bd9Sstevel@tonic-gate
1153*7c478bd9Sstevel@tonic-gate/*
1154*7c478bd9Sstevel@tonic-gate * int
1155*7c478bd9Sstevel@tonic-gate * intr_passivate(from, to)
1156*7c478bd9Sstevel@tonic-gate *      thread_id_t     from;           interrupt thread
1157*7c478bd9Sstevel@tonic-gate *      thread_id_t     to;             interrupted thread
1158*7c478bd9Sstevel@tonic-gate *
1159*7c478bd9Sstevel@tonic-gate *	intr_passivate(t, itp) makes the interrupted thread "t" runnable.
1160*7c478bd9Sstevel@tonic-gate *
1161*7c478bd9Sstevel@tonic-gate *	Since t->t_sp has already been saved, t->t_pc is all that needs
1162*7c478bd9Sstevel@tonic-gate *	set in this function.
1163*7c478bd9Sstevel@tonic-gate *
1164*7c478bd9Sstevel@tonic-gate *	Returns interrupt level of the thread.
1165*7c478bd9Sstevel@tonic-gate */
1166*7c478bd9Sstevel@tonic-gate
1167*7c478bd9Sstevel@tonic-gate#if defined(__lint)
1168*7c478bd9Sstevel@tonic-gate
1169*7c478bd9Sstevel@tonic-gate/* ARGSUSED */
1170*7c478bd9Sstevel@tonic-gateint
1171*7c478bd9Sstevel@tonic-gateintr_passivate(kthread_id_t from, kthread_id_t to)
1172*7c478bd9Sstevel@tonic-gate{ return (0); }
1173*7c478bd9Sstevel@tonic-gate
1174*7c478bd9Sstevel@tonic-gate#else	/* __lint */
1175*7c478bd9Sstevel@tonic-gate
1176*7c478bd9Sstevel@tonic-gate	ENTRY(intr_passivate)
1177*7c478bd9Sstevel@tonic-gate	movl	8(%esp), %eax		/* interrupted thread  */
1178*7c478bd9Sstevel@tonic-gate	movl	$_sys_rtt, T_PC(%eax)	/* set T_PC for interrupted thread */
1179*7c478bd9Sstevel@tonic-gate
1180*7c478bd9Sstevel@tonic-gate	movl	4(%esp), %eax		/* interrupt thread */
1181*7c478bd9Sstevel@tonic-gate	movl	T_STACK(%eax), %eax	/* get the pointer to the start of */
1182*7c478bd9Sstevel@tonic-gate					/* of the interrupt thread stack */
1183*7c478bd9Sstevel@tonic-gate	movl	-4(%eax), %eax		/* interrupt level was the first */
1184*7c478bd9Sstevel@tonic-gate					/* thing pushed onto the stack */
1185*7c478bd9Sstevel@tonic-gate	ret
1186*7c478bd9Sstevel@tonic-gate	SET_SIZE(intr_passivate)
1187*7c478bd9Sstevel@tonic-gate
1188*7c478bd9Sstevel@tonic-gate#endif	/* __lint */
1189*7c478bd9Sstevel@tonic-gate#endif	/* __i386 */
1190*7c478bd9Sstevel@tonic-gate
1191*7c478bd9Sstevel@tonic-gate#if defined(__lint)
1192*7c478bd9Sstevel@tonic-gate
1193*7c478bd9Sstevel@tonic-gatevoid
1194*7c478bd9Sstevel@tonic-gatefakesoftint(void)
1195*7c478bd9Sstevel@tonic-gate{}
1196*7c478bd9Sstevel@tonic-gate
1197*7c478bd9Sstevel@tonic-gate#else	/* __lint */
1198*7c478bd9Sstevel@tonic-gate
1199*7c478bd9Sstevel@tonic-gate	/
1200*7c478bd9Sstevel@tonic-gate	/ If we're here, we're being called from splx() to fake a soft
1201*7c478bd9Sstevel@tonic-gate	/ interrupt (note that interrupts are still disabled from splx()).
1202*7c478bd9Sstevel@tonic-gate	/ We execute this code when a soft interrupt is posted at
1203*7c478bd9Sstevel@tonic-gate	/ level higher than the CPU's current spl; when spl is lowered in
1204*7c478bd9Sstevel@tonic-gate	/ splx(), it will see the softint and jump here.  We'll do exactly
1205*7c478bd9Sstevel@tonic-gate	/ what a trap would do:  push our flags, %cs, %eip, error code
1206*7c478bd9Sstevel@tonic-gate	/ and trap number (T_SOFTINT).  The cmnint() code will see T_SOFTINT
1207*7c478bd9Sstevel@tonic-gate	/ and branch to the dosoftint() code.
1208*7c478bd9Sstevel@tonic-gate	/
1209*7c478bd9Sstevel@tonic-gate#if defined(__amd64)
1210*7c478bd9Sstevel@tonic-gate
1211*7c478bd9Sstevel@tonic-gate	/*
1212*7c478bd9Sstevel@tonic-gate	 * In 64-bit mode, iretq -always- pops all five regs
1213*7c478bd9Sstevel@tonic-gate	 * Imitate the 16-byte auto-align of the stack, and the
1214*7c478bd9Sstevel@tonic-gate	 * zero-ed out %ss value.
1215*7c478bd9Sstevel@tonic-gate	 */
1216*7c478bd9Sstevel@tonic-gate	ENTRY_NP(fakesoftint)
1217*7c478bd9Sstevel@tonic-gate	movq	%rsp, %r11
1218*7c478bd9Sstevel@tonic-gate	andq	$-16, %rsp
1219*7c478bd9Sstevel@tonic-gate	pushq	$KDS_SEL	/* %ss */
1220*7c478bd9Sstevel@tonic-gate	pushq	%r11		/* %rsp */
1221*7c478bd9Sstevel@tonic-gate	pushf			/* rflags */
1222*7c478bd9Sstevel@tonic-gate	pushq	$KCS_SEL	/* %cs */
1223*7c478bd9Sstevel@tonic-gate	leaq	fakesoftint_return(%rip), %r11
1224*7c478bd9Sstevel@tonic-gate	pushq	%r11		/* %rip */
1225*7c478bd9Sstevel@tonic-gate	pushq	$0		/* err */
1226*7c478bd9Sstevel@tonic-gate	pushq	$T_SOFTINT	/* trap */
1227*7c478bd9Sstevel@tonic-gate	jmp	cmnint
1228*7c478bd9Sstevel@tonic-gate	SET_SIZE(fakesoftint)
1229*7c478bd9Sstevel@tonic-gate
1230*7c478bd9Sstevel@tonic-gate#elif defined(__i386)
1231*7c478bd9Sstevel@tonic-gate
1232*7c478bd9Sstevel@tonic-gate	ENTRY_NP(fakesoftint)
1233*7c478bd9Sstevel@tonic-gate	pushf
1234*7c478bd9Sstevel@tonic-gate	push	%cs
1235*7c478bd9Sstevel@tonic-gate	push	$fakesoftint_return
1236*7c478bd9Sstevel@tonic-gate	push	$0
1237*7c478bd9Sstevel@tonic-gate	push	$T_SOFTINT
1238*7c478bd9Sstevel@tonic-gate	jmp	cmnint
1239*7c478bd9Sstevel@tonic-gate	SET_SIZE(fakesoftint)
1240*7c478bd9Sstevel@tonic-gate
1241*7c478bd9Sstevel@tonic-gate#endif	/* __i386 */
1242*7c478bd9Sstevel@tonic-gate
1243*7c478bd9Sstevel@tonic-gate	.align	CPTRSIZE
1244*7c478bd9Sstevel@tonic-gate	.globl	_fakesoftint_size
1245*7c478bd9Sstevel@tonic-gate	.type	_fakesoftint_size, @object
1246*7c478bd9Sstevel@tonic-gate_fakesoftint_size:
1247*7c478bd9Sstevel@tonic-gate	.NWORD	. - fakesoftint
1248*7c478bd9Sstevel@tonic-gate	SET_SIZE(_fakesoftint_size)
1249*7c478bd9Sstevel@tonic-gate
1250*7c478bd9Sstevel@tonic-gate/*
1251*7c478bd9Sstevel@tonic-gate * dosoftint(old_pil in %edi, softinfo in %edx, CPU pointer in %ebx)
1252*7c478bd9Sstevel@tonic-gate * Process software interrupts
1253*7c478bd9Sstevel@tonic-gate * Interrupts are disabled here.
1254*7c478bd9Sstevel@tonic-gate */
1255*7c478bd9Sstevel@tonic-gate#if defined(__i386)
1256*7c478bd9Sstevel@tonic-gate
1257*7c478bd9Sstevel@tonic-gate	ENTRY_NP(dosoftint)
1258*7c478bd9Sstevel@tonic-gate
1259*7c478bd9Sstevel@tonic-gate	bsrl	%edx, %edx		/* find highest pending interrupt */
1260*7c478bd9Sstevel@tonic-gate	cmpl 	%edx, %edi		/* if curipl >= pri soft pending intr */
1261*7c478bd9Sstevel@tonic-gate	jae	_sys_rtt		/* skip */
1262*7c478bd9Sstevel@tonic-gate
1263*7c478bd9Sstevel@tonic-gate	movl	%gs:CPU_BASE_SPL, %eax	/* check for blocked intr threads */
1264*7c478bd9Sstevel@tonic-gate	cmpl	%edx, %eax		/* if basespl >= pri soft pending */
1265*7c478bd9Sstevel@tonic-gate	jae	_sys_rtt		/* skip */
1266*7c478bd9Sstevel@tonic-gate
1267*7c478bd9Sstevel@tonic-gate	lock				/* MP protect */
1268*7c478bd9Sstevel@tonic-gate	btrl	%edx, CPU_SOFTINFO(%ebx) /* clear the selected interrupt bit */
1269*7c478bd9Sstevel@tonic-gate	jnc	dosoftint_again
1270*7c478bd9Sstevel@tonic-gate
1271*7c478bd9Sstevel@tonic-gate	movl	%edx, CPU_PRI(%ebx) /* set IPL to sofint level */
1272*7c478bd9Sstevel@tonic-gate	pushl	%edx
1273*7c478bd9Sstevel@tonic-gate	call	*setspl			/* mask levels upto the softint level */
1274*7c478bd9Sstevel@tonic-gate	popl	%eax			/* priority we are at in %eax */
1275*7c478bd9Sstevel@tonic-gate
1276*7c478bd9Sstevel@tonic-gate	/ Get set to run interrupt thread.
1277*7c478bd9Sstevel@tonic-gate	/ There should always be an interrupt thread since we allocate one
1278*7c478bd9Sstevel@tonic-gate	/ for each level on the CPU.
1279*7c478bd9Sstevel@tonic-gate	UNLINK_INTR_THREAD(%ebx, %esi, %edx)
1280*7c478bd9Sstevel@tonic-gate
1281*7c478bd9Sstevel@tonic-gate	/
1282*7c478bd9Sstevel@tonic-gate	/ Note that the code in kcpc_overflow_intr -relies- on the ordering
1283*7c478bd9Sstevel@tonic-gate	/ of events here - in particular that t->t_lwp of the interrupt
1284*7c478bd9Sstevel@tonic-gate	/ thread is set to the pinned thread *before* curthread is changed
1285*7c478bd9Sstevel@tonic-gate	/
1286*7c478bd9Sstevel@tonic-gate	movl	CPU_THREAD(%ebx), %ecx
1287*7c478bd9Sstevel@tonic-gate
1288*7c478bd9Sstevel@tonic-gate	/ If we are interrupting an interrupt thread, account for it.
1289*7c478bd9Sstevel@tonic-gate	testw	$T_INTR_THREAD, T_FLAGS(%ecx)
1290*7c478bd9Sstevel@tonic-gate	jz	0f
1291*7c478bd9Sstevel@tonic-gate	pushl	%eax
1292*7c478bd9Sstevel@tonic-gate	movl	%eax, %ebp
1293*7c478bd9Sstevel@tonic-gate_tsc_patch11:
1294*7c478bd9Sstevel@tonic-gate	nop; nop			/* patched to rdtsc if available */
1295*7c478bd9Sstevel@tonic-gate	PILBASE(%ebx, %ebp)
1296*7c478bd9Sstevel@tonic-gate	TSC_SUB_FROM(%ecx, T_INTR_START)
1297*7c478bd9Sstevel@tonic-gate	TSC_ADD_TO(%ebp, CPU_INTRSTAT)
1298*7c478bd9Sstevel@tonic-gate	popl	%eax
1299*7c478bd9Sstevel@tonic-gate0:
1300*7c478bd9Sstevel@tonic-gate	movl	T_LWP(%ecx), %ebp
1301*7c478bd9Sstevel@tonic-gate	movl	%ebp, T_LWP(%esi)
1302*7c478bd9Sstevel@tonic-gate	/
1303*7c478bd9Sstevel@tonic-gate	/ Threads on the interrupt thread free list could have state already
1304*7c478bd9Sstevel@tonic-gate	/ set to TS_ONPROC, but it helps in debugging if they're TS_FREE
1305*7c478bd9Sstevel@tonic-gate	/ Could eliminate the next two instructions with a little work.
1306*7c478bd9Sstevel@tonic-gate	/
1307*7c478bd9Sstevel@tonic-gate	movl	$ONPROC_THREAD, T_STATE(%esi)
1308*7c478bd9Sstevel@tonic-gate	/
1309*7c478bd9Sstevel@tonic-gate	/ Push interrupted thread onto list from new thread.
1310*7c478bd9Sstevel@tonic-gate	/ Set the new thread as the current one.
1311*7c478bd9Sstevel@tonic-gate	/ Set interrupted thread's T_SP because if it is the idle thread,
1312*7c478bd9Sstevel@tonic-gate	/ Resume() may use that stack between threads.
1313*7c478bd9Sstevel@tonic-gate	/
1314*7c478bd9Sstevel@tonic-gate	movl	%esp, T_SP(%ecx)		/* mark stack for resume */
1315*7c478bd9Sstevel@tonic-gate	movl	%ecx, T_INTR(%esi)		/* push old thread */
1316*7c478bd9Sstevel@tonic-gate	movl	%esi, CPU_THREAD(%ebx)		/* set new thread */
1317*7c478bd9Sstevel@tonic-gate	movl	T_STACK(%esi), %esp		/* interrupt stack pointer */
1318*7c478bd9Sstevel@tonic-gate	movl	%esp, %ebp
1319*7c478bd9Sstevel@tonic-gate
1320*7c478bd9Sstevel@tonic-gate	pushl	%eax			/* push ipl as first element in stack */
1321*7c478bd9Sstevel@tonic-gate					/* see intr_passivate() */
1322*7c478bd9Sstevel@tonic-gate	/
1323*7c478bd9Sstevel@tonic-gate	/ Set bit for this PIL in CPU's interrupt active bitmask.
1324*7c478bd9Sstevel@tonic-gate	/
1325*7c478bd9Sstevel@tonic-gate
1326*7c478bd9Sstevel@tonic-gate	ASSERT_NOT_CPU_INTR_ACTV(%eax, %ebx, _dosoftint_actv_bit_set)
1327*7c478bd9Sstevel@tonic-gate
1328*7c478bd9Sstevel@tonic-gate	btsl	%eax, CPU_INTR_ACTV(%ebx)
1329*7c478bd9Sstevel@tonic-gate
1330*7c478bd9Sstevel@tonic-gate	/
1331*7c478bd9Sstevel@tonic-gate	/ Initialize thread priority level from intr_pri
1332*7c478bd9Sstevel@tonic-gate	/
1333*7c478bd9Sstevel@tonic-gate	movb	%al, T_PIL(%esi)	/* store pil */
1334*7c478bd9Sstevel@tonic-gate	movzwl	intr_pri, %ecx
1335*7c478bd9Sstevel@tonic-gate	addl	%eax, %ecx		/* convert level to dispatch priority */
1336*7c478bd9Sstevel@tonic-gate	movw	%cx, T_PRI(%esi)
1337*7c478bd9Sstevel@tonic-gate
1338*7c478bd9Sstevel@tonic-gate	/
1339*7c478bd9Sstevel@tonic-gate	/ Store starting timestamp in thread structure.
1340*7c478bd9Sstevel@tonic-gate	/ esi = thread, ebx = cpu pointer, eax = PIL
1341*7c478bd9Sstevel@tonic-gate	/
1342*7c478bd9Sstevel@tonic-gate	movl	%eax, %ecx		/* save PIL from rdtsc clobber */
1343*7c478bd9Sstevel@tonic-gate_tsc_patch12:
1344*7c478bd9Sstevel@tonic-gate	nop; nop			/* patched to rdtsc if available */
1345*7c478bd9Sstevel@tonic-gate	TSC_MOV(%esi, T_INTR_START)
1346*7c478bd9Sstevel@tonic-gate
1347*7c478bd9Sstevel@tonic-gate	sti				/* enable interrupts */
1348*7c478bd9Sstevel@tonic-gate
1349*7c478bd9Sstevel@tonic-gate	/
1350*7c478bd9Sstevel@tonic-gate	/ Enabling interrupts (above) could raise the current
1351*7c478bd9Sstevel@tonic-gate	/ IPL and base SPL. But, we continue processing the current soft
1352*7c478bd9Sstevel@tonic-gate	/ interrupt and we will check the base SPL next time in the loop
1353*7c478bd9Sstevel@tonic-gate	/ so that blocked interrupt thread would get a chance to run.
1354*7c478bd9Sstevel@tonic-gate	/
1355*7c478bd9Sstevel@tonic-gate
1356*7c478bd9Sstevel@tonic-gate	/
1357*7c478bd9Sstevel@tonic-gate	/ dispatch soft interrupts
1358*7c478bd9Sstevel@tonic-gate	/
1359*7c478bd9Sstevel@tonic-gate	pushl	%ecx
1360*7c478bd9Sstevel@tonic-gate	call	av_dispatch_softvect
1361*7c478bd9Sstevel@tonic-gate	addl	$4, %esp
1362*7c478bd9Sstevel@tonic-gate
1363*7c478bd9Sstevel@tonic-gate	cli				/* protect interrupt thread pool */
1364*7c478bd9Sstevel@tonic-gate					/* and softinfo & sysinfo */
1365*7c478bd9Sstevel@tonic-gate	movl	CPU_THREAD(%ebx), %esi	/* restore thread pointer */
1366*7c478bd9Sstevel@tonic-gate	movzbl	T_PIL(%esi), %ecx
1367*7c478bd9Sstevel@tonic-gate
1368*7c478bd9Sstevel@tonic-gate	/ cpu_stats.sys.intr[PIL]++
1369*7c478bd9Sstevel@tonic-gate	INC_CPU_STATS_INTR(%ecx, %edx, %edx, %ebx)
1370*7c478bd9Sstevel@tonic-gate
1371*7c478bd9Sstevel@tonic-gate	/
1372*7c478bd9Sstevel@tonic-gate	/ Clear bit for this PIL in CPU's interrupt active bitmask.
1373*7c478bd9Sstevel@tonic-gate	/
1374*7c478bd9Sstevel@tonic-gate
1375*7c478bd9Sstevel@tonic-gate	ASSERT_CPU_INTR_ACTV(%ecx, %ebx, _dosoftint_actv_bit_not_set)
1376*7c478bd9Sstevel@tonic-gate
1377*7c478bd9Sstevel@tonic-gate	btrl	%ecx, CPU_INTR_ACTV(%ebx)
1378*7c478bd9Sstevel@tonic-gate
1379*7c478bd9Sstevel@tonic-gate	/
1380*7c478bd9Sstevel@tonic-gate	/ Take timestamp, compute interval, update cumulative counter.
1381*7c478bd9Sstevel@tonic-gate	/ esi = thread, ebx = cpu, ecx = PIL
1382*7c478bd9Sstevel@tonic-gate	/
1383*7c478bd9Sstevel@tonic-gate	PILBASE(%ebx, %ecx)
1384*7c478bd9Sstevel@tonic-gate_tsc_patch13:
1385*7c478bd9Sstevel@tonic-gate	nop; nop		/* patched to rdtsc if available */
1386*7c478bd9Sstevel@tonic-gate	TSC_SUB_FROM(%esi, T_INTR_START)
1387*7c478bd9Sstevel@tonic-gate	TSC_ADD_TO(%ecx, CPU_INTRSTAT)
1388*7c478bd9Sstevel@tonic-gate
1389*7c478bd9Sstevel@tonic-gate	/ if there is still an interrupt thread underneath this one
1390*7c478bd9Sstevel@tonic-gate	/ then the interrupt was never blocked and the return is fairly
1391*7c478bd9Sstevel@tonic-gate	/ simple.  Otherwise jump to softintr_thread_exit.
1392*7c478bd9Sstevel@tonic-gate	/ softintr_thread_exit expect esi to be curthread & ebx to be ipl.
1393*7c478bd9Sstevel@tonic-gate	cmpl	$0, T_INTR(%esi)
1394*7c478bd9Sstevel@tonic-gate	je	softintr_thread_exit
1395*7c478bd9Sstevel@tonic-gate
1396*7c478bd9Sstevel@tonic-gate	/
1397*7c478bd9Sstevel@tonic-gate	/ link the thread back onto the interrupt thread pool
1398*7c478bd9Sstevel@tonic-gate	LINK_INTR_THREAD(%ebx, %esi, %edx)
1399*7c478bd9Sstevel@tonic-gate
1400*7c478bd9Sstevel@tonic-gate	/ set the thread state to free so kmdb doesn't see it
1401*7c478bd9Sstevel@tonic-gate	movl	$FREE_THREAD, T_STATE(%esi)
1402*7c478bd9Sstevel@tonic-gate	/
1403*7c478bd9Sstevel@tonic-gate	/ Switch back to the interrupted thread
1404*7c478bd9Sstevel@tonic-gate	movl	T_INTR(%esi), %ecx
1405*7c478bd9Sstevel@tonic-gate	movl	%ecx, CPU_THREAD(%ebx)
1406*7c478bd9Sstevel@tonic-gate	movl	T_SP(%ecx), %esp	/* restore stack pointer */
1407*7c478bd9Sstevel@tonic-gate	movl	%esp, %ebp
1408*7c478bd9Sstevel@tonic-gate
1409*7c478bd9Sstevel@tonic-gate	/ If we are returning to an interrupt thread, store a starting
1410*7c478bd9Sstevel@tonic-gate	/ timestamp in the thread structure.
1411*7c478bd9Sstevel@tonic-gate	testw	$T_INTR_THREAD, T_FLAGS(%ecx)
1412*7c478bd9Sstevel@tonic-gate	jz	0f
1413*7c478bd9Sstevel@tonic-gate_tsc_patch14:
1414*7c478bd9Sstevel@tonic-gate	nop; nop			/* patched to rdtsc if available */
1415*7c478bd9Sstevel@tonic-gate	TSC_MOV(%ecx, T_INTR_START)
1416*7c478bd9Sstevel@tonic-gate0:
1417*7c478bd9Sstevel@tonic-gate	movl	CPU_BASE_SPL(%ebx), %eax
1418*7c478bd9Sstevel@tonic-gate	cmpl	%eax, %edi		/* if (oldipl >= basespl) */
1419*7c478bd9Sstevel@tonic-gate	jae	softintr_restore_ipl	/* then use oldipl */
1420*7c478bd9Sstevel@tonic-gate	movl	%eax, %edi		/* else use basespl */
1421*7c478bd9Sstevel@tonic-gatesoftintr_restore_ipl:
1422*7c478bd9Sstevel@tonic-gate	movl	%edi, CPU_PRI(%ebx) /* set IPL to old level */
1423*7c478bd9Sstevel@tonic-gate	pushl	%edi
1424*7c478bd9Sstevel@tonic-gate	call	*setspl
1425*7c478bd9Sstevel@tonic-gate	popl	%eax
1426*7c478bd9Sstevel@tonic-gatedosoftint_again:
1427*7c478bd9Sstevel@tonic-gate	movl	CPU_SOFTINFO(%ebx), %edx /* any pending software interrupts */
1428*7c478bd9Sstevel@tonic-gate	orl	%edx, %edx
1429*7c478bd9Sstevel@tonic-gate	jz	_sys_rtt
1430*7c478bd9Sstevel@tonic-gate	jmp	dosoftint		/* process more software interrupts */
1431*7c478bd9Sstevel@tonic-gate
1432*7c478bd9Sstevel@tonic-gatesoftintr_thread_exit:
1433*7c478bd9Sstevel@tonic-gate	/
1434*7c478bd9Sstevel@tonic-gate	/ Put thread back on the interrupt thread list.
1435*7c478bd9Sstevel@tonic-gate	/ As a reminder, the regs at this point are
1436*7c478bd9Sstevel@tonic-gate	/	%esi	interrupt thread
1437*7c478bd9Sstevel@tonic-gate
1438*7c478bd9Sstevel@tonic-gate	/
1439*7c478bd9Sstevel@tonic-gate	/ This was an interrupt thread, so set CPU's base SPL level
1440*7c478bd9Sstevel@tonic-gate	/ set_base_spl only uses %eax.
1441*7c478bd9Sstevel@tonic-gate	/
1442*7c478bd9Sstevel@tonic-gate	call	set_base_spl		/* interrupt vector already on stack */
1443*7c478bd9Sstevel@tonic-gate	/
1444*7c478bd9Sstevel@tonic-gate	/ Set the thread state to free so kmdb doesn't see it
1445*7c478bd9Sstevel@tonic-gate	/
1446*7c478bd9Sstevel@tonic-gate	movl	$FREE_THREAD, T_STATE(%esi)
1447*7c478bd9Sstevel@tonic-gate	/
1448*7c478bd9Sstevel@tonic-gate	/ Put thread on either the interrupt pool or the free pool and
1449*7c478bd9Sstevel@tonic-gate	/ call swtch() to resume another thread.
1450*7c478bd9Sstevel@tonic-gate	/
1451*7c478bd9Sstevel@tonic-gate	LOADCPU(%ebx)
1452*7c478bd9Sstevel@tonic-gate	LINK_INTR_THREAD(%ebx, %esi, %edx)
1453*7c478bd9Sstevel@tonic-gate	call	splhigh			/* block all intrs below lock lvl */
1454*7c478bd9Sstevel@tonic-gate	call	swtch
1455*7c478bd9Sstevel@tonic-gate	/ swtch() shouldn't return
1456*7c478bd9Sstevel@tonic-gate	SET_SIZE(dosoftint)
1457*7c478bd9Sstevel@tonic-gate
1458*7c478bd9Sstevel@tonic-gate#endif	/* __i386 */
1459*7c478bd9Sstevel@tonic-gate#endif	/* __lint */
1460