xref: /titanic_50/usr/src/uts/i86pc/ml/interrupt.s (revision 6fec3791b5a9a5621db93bfef3a6514bc0511b5d)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22/*
23 * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*	Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.	*/
28/*	Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T	*/
29/*	  All Rights Reserved					*/
30
31/*	Copyright (c) 1987, 1988 Microsoft Corporation		*/
32/*	  All Rights Reserved					*/
33
34#pragma ident	"%Z%%M%	%I%	%E% SMI"
35
36#include <sys/asm_linkage.h>
37#include <sys/asm_misc.h>
38#include <sys/regset.h>
39#include <sys/psw.h>
40#include <sys/x86_archext.h>
41
42#if defined(__lint)
43
44#include <sys/types.h>
45#include <sys/thread.h>
46#include <sys/systm.h>
47
48#else   /* __lint */
49
50#include <sys/segments.h>
51#include <sys/pcb.h>
52#include <sys/trap.h>
53#include <sys/ftrace.h>
54#include <sys/traptrace.h>
55#include <sys/clock.h>
56#include <sys/panic.h>
57#include "assym.h"
58
59_ftrace_intr_thread_fmt:
60	.string	"intr_thread(): regs=0x%lx, int=0x%x, pil=0x%x"
61
62#endif	/* lint */
63
64#if defined(__i386)
65
66#if defined(__lint)
67
68void
69patch_tsc(void)
70{}
71
72#else	/* __lint */
73
74/*
75 * To cope with processors that do not implement the rdtsc instruction,
76 * we patch the kernel to use rdtsc if that feature is detected on the CPU.
77 * On an unpatched kernel, all locations requiring rdtsc are nop's.
78 *
79 * This function patches the nop's to rdtsc.
80 */
81	ENTRY_NP(patch_tsc)
82	movw	_rdtsc_insn, %cx
83	movw	%cx, _tsc_patch1
84	movw	%cx, _tsc_patch2
85	movw	%cx, _tsc_patch3
86	movw	%cx, _tsc_patch4
87	movw	%cx, _tsc_patch5
88	movw	%cx, _tsc_patch6
89	movw	%cx, _tsc_patch7
90	movw	%cx, _tsc_patch8
91	movw	%cx, _tsc_patch9
92	movw	%cx, _tsc_patch10
93	movw	%cx, _tsc_patch11
94	movw	%cx, _tsc_patch12
95	movw	%cx, _tsc_patch13
96	movw	%cx, _tsc_patch14
97	movw	%cx, _tsc_patch15
98	movw	%cx, _tsc_patch16
99	ret
100_rdtsc_insn:
101	rdtsc
102	SET_SIZE(patch_tsc)
103
104#endif	/* __lint */
105
106#endif	/* __i386 */
107
108
109#if defined(__lint)
110
111void
112_interrupt(void)
113{}
114
115#else	/* __lint */
116
117#if defined(__amd64)
118
119	/*
120	 * Common register usage:
121	 *
122	 * %rbx		cpu pointer
123	 * %r12		trap trace pointer -and- stash of
124	 *		vec across intr_thread dispatch.
125	 * %r13d	ipl of isr
126	 * %r14d	old ipl (ipl level we entered on)
127	 * %r15		interrupted thread stack pointer
128	 */
129	ENTRY_NP2(cmnint, _interrupt)
130
131	INTR_PUSH
132
133	/*
134	 * At the end of TRACE_PTR %r12 points to the current TRAPTRACE entry
135	 */
136	TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_INTERRUPT)
137						/* Uses labels 8 and 9 */
138	TRACE_REGS(%r12, %rsp, %rax, %rbx)	/* Uses label 9 */
139	TRACE_STAMP(%r12)		/* Clobbers %eax, %edx, uses 9 */
140
141	DISABLE_INTR_FLAGS		/* (and set kernel flag values) */
142
143	movq	%rsp, %rbp
144
145	TRACE_STACK(%r12)
146
147	LOADCPU(%rbx)				/* &cpu */
148	leaq	REGOFF_TRAPNO(%rbp), %rsi	/* &vector */
149	movl	CPU_PRI(%rbx), %r14d		/* old ipl */
150	movl	CPU_SOFTINFO(%rbx), %edx
151
152#ifdef TRAPTRACE
153	movl	$255, TTR_IPL(%r12)
154	movl	%r14d, %edi
155	movb	%dil, TTR_PRI(%r12)
156	movl	CPU_BASE_SPL(%rbx), %edi
157	movb	%dil, TTR_SPL(%r12)
158	movb	$255, TTR_VECTOR(%r12)
159#endif
160
161	/*
162	 * Check to see if the trap number is T_SOFTINT; if it is,
163	 * jump straight to dosoftint now.
164	 */
165	cmpq	$T_SOFTINT, (%rsi)
166	je	dosoftint
167
168	/*
169	 * Raise the interrupt priority level, returns newpil.
170	 * (The vector address is in %rsi so setlvl can update it.)
171	 */
172	movl	%r14d, %edi			/* old ipl */
173						/* &vector */
174	call	*setlvl(%rip)
175
176#ifdef TRAPTRACE
177	movb	%al, TTR_IPL(%r12)
178#endif
179	/*
180	 * check for spurious interrupt
181	 */
182	cmpl	$-1, %eax
183	je	_sys_rtt
184
185#ifdef TRAPTRACE
186	movl	%r14d, %edx
187	movb	%dl, TTR_PRI(%r12)
188	movl	CPU_BASE_SPL(%rbx), %edx
189	movb	%dl, TTR_SPL(%r12)
190#endif
191	movl	%eax, CPU_PRI(%rbx)		/* update ipl */
192
193#ifdef TRAPTRACE
194	movl	REGOFF_TRAPNO(%rbp), %edx
195	movb	%dl, TTR_VECTOR(%r12)
196#endif
197	movl	%eax, %r13d			/* ipl of isr */
198
199	/*
200	 * At this point we can take one of two paths.
201	 * If the new level is at or below lock level, we will
202	 * run this interrupt in a separate thread.
203	 */
204	cmpl	$LOCK_LEVEL, %eax
205	jbe	intr_thread
206
207	movq	%rbx, %rdi		/* &cpu */
208	movl	%r13d, %esi		/* ipl */
209	movl	%r14d, %edx		/* old ipl */
210	movq	%rbp, %rcx		/* &regs */
211	call	hilevel_intr_prolog
212	orl	%eax, %eax		/* zero if need to switch stack */
213	jnz	1f
214
215	/*
216	 * Save the thread stack and get on the cpu's interrupt stack
217	 */
218	movq	%rsp, %r15
219	movq	CPU_INTR_STACK(%rbx), %rsp
2201:
221
222	sti
223
224	/*
225	 * Walk the list of handlers for this vector, calling
226	 * them as we go until no more interrupts are claimed.
227	 */
228	movl	REGOFF_TRAPNO(%rbp), %edi
229	call	av_dispatch_autovect
230
231	cli
232
233	movq	%rbx, %rdi			/* &cpu */
234	movl	%r13d, %esi			/* ipl */
235	movl	%r14d, %edx			/* oldipl */
236	movl	REGOFF_TRAPNO(%rbp), %ecx	/* vec */
237	call	hilevel_intr_epilog
238	orl	%eax, %eax		/* zero if need to switch stack */
239	jnz	2f
240	movq	%r15, %rsp
2412:	/*
242	 * Check for, and execute, softints before we iret.
243	 *
244	 * (dosoftint expects oldipl in %r14d (which is where it is)
245	 * the cpu pointer in %rbx (which is where it is) and the
246	 * softinfo in %edx (which is where we'll put it right now))
247	 */
248	movl	CPU_SOFTINFO(%rbx), %edx
249	orl	%edx, %edx
250	jz	_sys_rtt
251	jmp	dosoftint
252	/*NOTREACHED*/
253
254	SET_SIZE(cmnint)
255	SET_SIZE(_interrupt)
256
257/*
258 * Handle an interrupt in a new thread
259 *
260 * As we branch here, interrupts are still masked,
261 * %rbx still contains the cpu pointer,
262 * %r14d contains the old ipl that we came in on, and
263 * %eax contains the new ipl that we got from the setlvl routine
264 */
265
266	ENTRY_NP(intr_thread)
267
268	movq	%rbx, %rdi	/* &cpu */
269	movq	%rbp, %rsi	/* &regs = stack pointer for _sys_rtt */
270	movl	REGOFF_TRAPNO(%rbp), %r12d	/* stash the vec */
271	movl	%eax, %edx	/* new pil from setlvlx() */
272	call	intr_thread_prolog
273	movq	%rsp, %r15
274	movq	%rax, %rsp	/* t_stk from interrupt thread */
275	movq	%rsp, %rbp
276
277	sti
278
279	testl	$FTRACE_ENABLED, CPU_FTRACE_STATE(%rbx)
280	jz	1f
281	/*
282	 * ftracing support. do we need this on x86?
283	 */
284	leaq	_ftrace_intr_thread_fmt(%rip), %rdi
285	movq	%rbp, %rsi			/* &regs */
286	movl	%r12d, %edx			/* vec */
287	movq	CPU_THREAD(%rbx), %r11		/* (the interrupt thread) */
288	movzbl	T_PIL(%r11), %ecx		/* newipl */
289	call	ftrace_3_notick
2901:
291	movl	%r12d, %edi			/* vec */
292	call	av_dispatch_autovect
293
294	cli
295
296	movq	%rbx, %rdi			/* &cpu */
297	movl	%r12d, %esi			/* vec */
298	movl	%r14d, %edx			/* oldpil */
299	call	intr_thread_epilog
300	/*
301	 * If we return from here (we might not if the interrupted thread
302	 * has exited or blocked, in which case we'll have quietly swtch()ed
303	 * away) then we need to switch back to our old %rsp
304	 */
305	movq	%r15, %rsp
306	movq	%rsp, %rbp
307	/*
308	 * Check for, and execute, softints before we iret.
309	 *
310	 * (dosoftint expects oldpil in %r14d, the cpu pointer in %rbx and
311	 * the mcpu_softinfo.st_pending field in %edx.
312	 */
313	movl	CPU_SOFTINFO(%rbx), %edx
314	orl	%edx, %edx
315	jz	_sys_rtt
316	/*FALLTHROUGH*/
317
318/*
319 * Process soft interrupts.
320 * Interrupts are masked, and we have a minimal frame on the stack.
321 * %edx should contain the mcpu_softinfo.st_pending field
322 */
323
324	ALTENTRY(dosoftint)
325
326	movq	%rbx, %rdi	/* &cpu */
327	movq	%rbp, %rsi	/* &regs = stack pointer for _sys_rtt */
328				/* cpu->cpu_m.mcpu_softinfo.st_pending */
329	movl	%r14d, %ecx	/* oldipl */
330	call	dosoftint_prolog
331	/*
332	 * dosoftint_prolog() usually returns a stack pointer for the
333	 * interrupt thread that we must switch to.  However, if the
334	 * returned stack pointer is NULL, then the software interrupt was
335	 * too low in priority to run now; we'll catch it another time.
336	 */
337	orq	%rax, %rax
338	jz	_sys_rtt
339	movq	%rsp, %r15
340	movq	%rax, %rsp	/* t_stk from interrupt thread */
341	movq	%rsp, %rbp
342
343	sti
344
345	/*
346	 * Enabling interrupts (above) could raise the current ipl
347	 * and base spl.  But, we continue processing the current soft
348	 * interrupt and we will check the base spl next time around
349	 * so that blocked interrupt threads get a chance to run.
350	 */
351	movq	CPU_THREAD(%rbx), %r11	/* now an interrupt thread */
352	movzbl	T_PIL(%r11), %edi
353	call	av_dispatch_softvect
354
355	cli
356
357	movq	%rbx, %rdi		/* &cpu */
358	movl	%r14d, %esi		/* oldpil */
359	call	dosoftint_epilog
360	movq	%r15, %rsp		/* back on old stack pointer */
361	movq	%rsp, %rbp
362	movl	CPU_SOFTINFO(%rbx), %edx
363	orl	%edx, %edx
364	jz	_sys_rtt
365	jmp	dosoftint
366
367	SET_SIZE(dosoftint)
368	SET_SIZE(intr_thread)
369
370#elif defined(__i386)
371
372/*
373 * One day, this should just invoke the C routines that know how to
374 * do all the interrupt bookkeeping.  In the meantime, try
375 * and make the assembler a little more comprehensible.
376 */
377
378#define	INC64(basereg, offset)			\
379	addl	$1, offset(basereg);		\
380	adcl	$0, offset + 4(basereg)
381
382#define	TSC_CLR(basereg, offset)		\
383	movl	$0, offset(basereg);		\
384	movl	$0, offset + 4(basereg)
385
386/*
387 * The following macros assume the time value is in %edx:%eax
388 * e.g. from a rdtsc instruction.
389 */
390#define	TSC_MOV(reg, offset)		\
391	movl	%eax, offset(reg);	\
392	movl	%edx, offset + 4(reg)
393
394#define	TSC_ADD_TO(reg, offset)		\
395	addl	%eax, offset(reg);	\
396	adcl	%edx, offset + 4(reg)
397
398#define	TSC_SUB_FROM(reg, offset)	\
399	subl	offset(reg), %eax;	\
400	sbbl	offset + 4(reg), %edx	/* interval in edx:eax */
401
402/*
403 * basereg   - pointer to cpu struct
404 * pilreg    - pil or converted pil (pil - (LOCK_LEVEL + 1))
405 * pilreg_32 - 32-bit version of pilreg
406 *
407 * Returns (base + pil * 8) in pilreg
408 */
409#define	PILBASE(basereg, pilreg)	\
410	lea	(basereg, pilreg, 8), pilreg
411
412/*
413 * Returns (base + (pil - (LOCK_LEVEL + 1)) * 8) in pilreg
414 */
415#define	HIGHPILBASE(basereg, pilreg, pilreg_32)		\
416	subl	$LOCK_LEVEL + 1, pilreg_32;		\
417	PILBASE(basereg, pilreg)
418
419/*
420 * Returns (cpu + cpu_mstate * 8) in tgt
421 */
422#define	INTRACCTBASE(cpureg, tgtreg)		\
423	movzwl	CPU_MSTATE(cpureg), tgtreg;	\
424	lea	(cpureg, tgtreg, 8), tgtreg
425
426/*
427 * cpu_stats.sys.intr[PIL]++
428 */
429#define	INC_CPU_STATS_INTR(pilreg, tmpreg, tmpreg_32, basereg)	\
430	movl	pilreg, tmpreg_32;				\
431	PILBASE(basereg, tmpreg);				\
432	INC64(tmpreg, _CONST(CPU_STATS_SYS_INTR - 8))
433
434/*
435 * Unlink thread from CPU's list
436 */
437#define	UNLINK_INTR_THREAD(cpureg, ithread, tmpreg)	\
438	mov	CPU_INTR_THREAD(cpureg), ithread;	\
439	mov	T_LINK(ithread), tmpreg;		\
440	mov	tmpreg, CPU_INTR_THREAD(cpureg)
441
442/*
443 * Link a thread into CPU's list
444 */
445#define	LINK_INTR_THREAD(cpureg, ithread, tmpreg)	\
446	mov	CPU_INTR_THREAD(cpureg), tmpreg;	\
447	mov	tmpreg, T_LINK(ithread);		\
448	mov	ithread, CPU_INTR_THREAD(cpureg)
449
450#if defined(DEBUG)
451
452/*
453 * Do not call panic, if panic is already in progress.
454 */
455#define	__PANIC(msg, label)		\
456	cmpl	$0, panic_quiesce;		\
457	jne	label;				\
458	pushl	$msg;				\
459	call	panic
460
461#define	__CMP64_JNE(basereg, offset, label)	\
462	cmpl	$0, offset(basereg);		\
463	jne	label;				\
464	cmpl	$0, offset + 4(basereg);	\
465	jne	label
466
467/*
468 * ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
469 */
470#define	ASSERT_NOT_CPU_INTR_ACTV(pilreg, basereg, msg)	\
471	btl	pilreg, CPU_INTR_ACTV(basereg);		\
472	jnc	4f;					\
473	__PANIC(msg, 4f);				\
4744:
475
476/*
477 * ASSERT(CPU->cpu_intr_actv & (1 << PIL))
478 */
479#define	ASSERT_CPU_INTR_ACTV(pilreg, basereg, msg)	\
480	btl	pilreg, CPU_INTR_ACTV(basereg);		\
481	jc	5f;					\
482	__PANIC(msg, 5f);				\
4835:
484
485/*
486 * ASSERT(CPU->cpu_pil_high_start != 0)
487 */
488#define	ASSERT_CPU_PIL_HIGH_START_NZ(basereg)			\
489	__CMP64_JNE(basereg, CPU_PIL_HIGH_START, 6f);		\
490	__PANIC(_interrupt_timestamp_zero, 6f);		\
4916:
492
493/*
494 * ASSERT(t->t_intr_start != 0)
495 */
496#define	ASSERT_T_INTR_START_NZ(basereg)				\
497	__CMP64_JNE(basereg, T_INTR_START, 7f);			\
498	__PANIC(_intr_thread_t_intr_start_zero, 7f);	\
4997:
500
501_interrupt_actv_bit_set:
502	.string	"_interrupt(): cpu_intr_actv bit already set for PIL"
503_interrupt_actv_bit_not_set:
504	.string	"_interrupt(): cpu_intr_actv bit not set for PIL"
505_interrupt_timestamp_zero:
506	.string "_interrupt(): timestamp zero upon handler return"
507_intr_thread_actv_bit_not_set:
508	.string	"intr_thread():	cpu_intr_actv bit not set for PIL"
509_intr_thread_t_intr_start_zero:
510	.string	"intr_thread():	t_intr_start zero upon handler return"
511_dosoftint_actv_bit_set:
512	.string	"dosoftint(): cpu_intr_actv bit already set for PIL"
513_dosoftint_actv_bit_not_set:
514	.string	"dosoftint(): cpu_intr_actv bit not set for PIL"
515
516	DGDEF(intr_thread_cnt)
517
518#else
519#define	ASSERT_NOT_CPU_INTR_ACTV(pilreg, basereg, msg)
520#define	ASSERT_CPU_INTR_ACTV(pilreg, basereg, msg)
521#define	ASSERT_CPU_PIL_HIGH_START_NZ(basereg)
522#define	ASSERT_T_INTR_START_NZ(basereg)
523#endif
524
525	ENTRY_NP2(cmnint, _interrupt)
526
527	INTR_PUSH
528
529	/*
530	 * At the end of TRACE_PTR %esi points to the current TRAPTRACE entry
531	 */
532	TRACE_PTR(%esi, %eax, %eax, %edx, $TT_INTERRUPT)
533						/* Uses labels 8 and 9 */
534	TRACE_REGS(%esi, %esp, %eax, %ebx)	/* Uses label 9 */
535	TRACE_STAMP(%esi)		/* Clobbers %eax, %edx, uses 9 */
536
537	movl	%esp, %ebp
538	DISABLE_INTR_FLAGS
539	LOADCPU(%ebx)		/* get pointer to CPU struct. Avoid gs refs */
540	leal    REGOFF_TRAPNO(%ebp), %ecx	/* get address of vector */
541	movl	CPU_PRI(%ebx), %edi		/* get ipl */
542	movl	CPU_SOFTINFO(%ebx), %edx
543
544	/
545	/ Check to see if the trap number is T_SOFTINT; if it is, we'll
546	/ jump straight to dosoftint now.
547	/
548	cmpl	$T_SOFTINT, (%ecx)
549	je	dosoftint
550
551	/ raise interrupt priority level
552	/ oldipl is in %edi, vectorp is in %ecx
553	/ newipl is returned in %eax
554	pushl	%ecx
555	pushl	%edi
556	call    *setlvl
557	popl	%edi			/* save oldpil in %edi */
558	popl	%ecx
559
560#ifdef TRAPTRACE
561	movb	%al, TTR_IPL(%esi)
562#endif
563
564	/ check for spurious interrupt
565	cmp	$-1, %eax
566	je	_sys_rtt
567
568#ifdef TRAPTRACE
569	movl	CPU_PRI(%ebx), %edx
570	movb	%dl, TTR_PRI(%esi)
571	movl	CPU_BASE_SPL(%ebx), %edx
572	movb	%dl, TTR_SPL(%esi)
573#endif
574
575	movl	%eax, CPU_PRI(%ebx) /* update ipl */
576	movl	REGOFF_TRAPNO(%ebp), %ecx /* reload the interrupt vector */
577
578#ifdef TRAPTRACE
579	movb	%cl, TTR_VECTOR(%esi)
580#endif
581
582	/ At this point we can take one of two paths.  If the new priority
583	/ level is less than or equal to LOCK LEVEL then we jump to code that
584	/ will run this interrupt as a separate thread.  Otherwise the
585	/ interrupt is NOT run as a separate thread.
586
587	/ %edi - old priority level
588	/ %ebp - pointer to REGS
589	/ %ecx - translated vector
590	/ %eax - ipl of isr
591	/ %ebx - cpu pointer
592
593	cmpl 	$LOCK_LEVEL, %eax	/* compare to highest thread level */
594	jbe	intr_thread		/* process as a separate thread */
595
596	cmpl	$CBE_HIGH_PIL, %eax	/* Is this a CY_HIGH_LEVEL interrupt? */
597	jne	2f
598
599	movl	REGOFF_PC(%ebp), %esi
600	movl	%edi, CPU_PROFILE_PIL(%ebx)	/* record interrupted PIL */
601	testw	$CPL_MASK, REGOFF_CS(%ebp)	/* trap from supervisor mode? */
602	jz	1f
603	movl	%esi, CPU_PROFILE_UPC(%ebx)	/* record user PC */
604	movl	$0, CPU_PROFILE_PC(%ebx)	/* zero kernel PC */
605	jmp	2f
606
6071:
608	movl	%esi, CPU_PROFILE_PC(%ebx)	/* record kernel PC */
609	movl	$0, CPU_PROFILE_UPC(%ebx)	/* zero user PC */
610
6112:
612	pushl	%ecx				/* vec */
613	pushl	%eax				/* newpil */
614
615	/
616	/ See if we are interrupting another high-level interrupt.
617	/
618	movl	CPU_INTR_ACTV(%ebx), %eax
619	andl	$CPU_INTR_ACTV_HIGH_LEVEL_MASK, %eax
620	jz	0f
621	/
622	/ We have interrupted another high-level interrupt.
623	/ Load starting timestamp, compute interval, update cumulative counter.
624	/
625	bsrl	%eax, %ecx		/* find PIL of interrupted handler */
626	HIGHPILBASE(%ebx, %ecx, %ecx)
627_tsc_patch1:
628	nop; nop			/* patched to rdtsc if available */
629	TSC_SUB_FROM(%ecx, CPU_PIL_HIGH_START)
630	addl	$CPU_INTRSTAT_LOW_PIL_OFFSET, %ecx	/* offset PILs 0-10 */
631	TSC_ADD_TO(%ecx, CPU_INTRSTAT)
632	INTRACCTBASE(%ebx, %ecx)
633	TSC_ADD_TO(%ecx, CPU_INTRACCT)	/* cpu_intracct[cpu_mstate] += tsc */
634	/
635	/ Another high-level interrupt is active below this one, so
636	/ there is no need to check for an interrupt thread. That will be
637	/ done by the lowest priority high-level interrupt active.
638	/
639	jmp	1f
6400:
641	/
642	/ See if we are interrupting a low-level interrupt thread.
643	/
644	movl	CPU_THREAD(%ebx), %esi
645	testw	$T_INTR_THREAD, T_FLAGS(%esi)
646	jz	1f
647	/
648	/ We have interrupted an interrupt thread. Account for its time slice
649	/ only if its time stamp is non-zero.
650	/
651	cmpl	$0, T_INTR_START+4(%esi)
652	jne	0f
653	cmpl	$0, T_INTR_START(%esi)
654	je	1f
6550:
656	movzbl	T_PIL(%esi), %ecx /* %ecx has PIL of interrupted handler */
657	PILBASE(%ebx, %ecx)
658_tsc_patch2:
659	nop; nop			/* patched to rdtsc if available */
660	TSC_SUB_FROM(%esi, T_INTR_START)
661	TSC_CLR(%esi, T_INTR_START)
662	TSC_ADD_TO(%ecx, CPU_INTRSTAT)
663	INTRACCTBASE(%ebx, %ecx)
664	TSC_ADD_TO(%ecx, CPU_INTRACCT)	/* cpu_intracct[cpu_mstate] += tsc */
6651:
666	/ Store starting timestamp in CPU structure for this PIL.
667	popl	%ecx			/* restore new PIL */
668	pushl	%ecx
669	HIGHPILBASE(%ebx, %ecx, %ecx)
670_tsc_patch3:
671	nop; nop			/* patched to rdtsc if available */
672	TSC_MOV(%ecx, CPU_PIL_HIGH_START)
673
674	popl	%eax			/* restore new pil */
675	popl	%ecx			/* vec */
676	/
677	/ Set bit for this PIL in CPU's interrupt active bitmask.
678	/
679
680	ASSERT_NOT_CPU_INTR_ACTV(%eax, %ebx, _interrupt_actv_bit_set)
681
682	/ Save old CPU_INTR_ACTV
683	movl	CPU_INTR_ACTV(%ebx), %esi
684
685	cmpl	$15, %eax
686	jne	0f
687	/ PIL-15 interrupt. Increment nest-count in upper 16 bits of intr_actv
688	incw	CPU_INTR_ACTV_REF(%ebx)	/* increment ref count */
6890:
690	btsl	%eax, CPU_INTR_ACTV(%ebx)
691	/
692	/ Handle high-level nested interrupt on separate interrupt stack
693	/
694	testl	$CPU_INTR_ACTV_HIGH_LEVEL_MASK, %esi
695	jnz	onstack			/* already on interrupt stack */
696	movl	%esp, %eax
697	movl	CPU_INTR_STACK(%ebx), %esp	/* get on interrupt stack */
698	pushl	%eax			/* save the thread stack pointer */
699onstack:
700	movl	$autovect, %esi		/* get autovect structure before */
701					/* sti to save on AGI later */
702	sti				/* enable interrupts */
703	pushl	%ecx			/* save interrupt vector */
704	/
705	/ Get handler address
706	/
707pre_loop1:
708	movl	AVH_LINK(%esi, %ecx, 8), %esi
709	xorl	%ebx, %ebx	/* bh is no. of intpts in chain */
710				/* bl is DDI_INTR_CLAIMED status of chain */
711	testl	%esi, %esi		/* if pointer is null */
712	jz	.intr_ret		/* then skip */
713loop1:
714	incb	%bh
715	movl	AV_VECTOR(%esi), %edx	/* get the interrupt routine */
716	testl	%edx, %edx		/* if func is null */
717	jz	.intr_ret		/* then skip */
718	pushl	$0
719	pushl	AV_INTARG2(%esi)
720	pushl	AV_INTARG1(%esi)
721	pushl	AV_VECTOR(%esi)
722	pushl	AV_DIP(%esi)
723	call	__dtrace_probe_interrupt__start
724	pushl	AV_INTARG2(%esi)	/* get 2nd arg to interrupt routine */
725	pushl	AV_INTARG1(%esi)	/* get first arg to interrupt routine */
726	call	*%edx			/* call interrupt routine with arg */
727	addl	$8, %esp
728	movl	%eax, 16(%esp)
729	call	__dtrace_probe_interrupt__complete
730	addl	$20, %esp
731	orb	%al, %bl		/* see if anyone claims intpt. */
732	movl	AV_LINK(%esi), %esi	/* get next routine on list */
733	testl	%esi, %esi		/* if pointer is non-null */
734	jnz	loop1			/* then continue */
735
736.intr_ret:
737	cmpb	$1, %bh		/* if only 1 intpt in chain, it is OK */
738	je	.intr_ret1
739	orb	%bl, %bl	/* If no one claims intpt, then it is OK */
740	jz	.intr_ret1
741	movl	(%esp), %ecx		/* else restore intr vector */
742	movl	$autovect, %esi		/* get autovect structure */
743	jmp	pre_loop1		/* and try again. */
744
745.intr_ret1:
746	LOADCPU(%ebx)			/* get pointer to cpu struct */
747
748	cli
749	movl	CPU_PRI(%ebx), %esi
750
751	/ cpu_stats.sys.intr[PIL]++
752	INC_CPU_STATS_INTR(%esi, %eax, %eax, %ebx)
753
754	/
755	/ Clear bit for this PIL in CPU's interrupt active bitmask.
756	/
757
758	ASSERT_CPU_INTR_ACTV(%esi, %ebx, _interrupt_actv_bit_not_set)
759
760	cmpl	$15, %esi
761	jne	0f
762	/ Only clear bit if reference count is now zero.
763	decw	CPU_INTR_ACTV_REF(%ebx)
764	jnz	1f
7650:
766	btrl	%esi, CPU_INTR_ACTV(%ebx)
7671:
768	/
769	/ Take timestamp, compute interval, update cumulative counter.
770	/ esi = PIL
771_tsc_patch4:
772	nop; nop			/* patched to rdtsc if available */
773	HIGHPILBASE(%ebx, %esi, %esi)
774
775	ASSERT_CPU_PIL_HIGH_START_NZ(%esi)
776
777	TSC_SUB_FROM(%esi, CPU_PIL_HIGH_START)
778	addl	$CPU_INTRSTAT_LOW_PIL_OFFSET, %esi	/* offset PILs 0-10 */
779	TSC_ADD_TO(%esi, CPU_INTRSTAT)
780	INTRACCTBASE(%ebx, %esi)
781	TSC_ADD_TO(%esi, CPU_INTRACCT)	/* cpu_intracct[cpu_mstate] += tsc */
782	/
783	/ Check for lower-PIL nested high-level interrupt beneath current one
784	/ If so, place a starting timestamp in its pil_high_start entry.
785	/
786	movl	CPU_INTR_ACTV(%ebx), %eax
787	movl	%eax, %esi
788	andl	$CPU_INTR_ACTV_HIGH_LEVEL_MASK, %eax
789	jz	0f
790	bsrl	%eax, %ecx		/* find PIL of nested interrupt */
791	HIGHPILBASE(%ebx, %ecx, %ecx)
792_tsc_patch5:
793	nop; nop			/* patched to rdtsc if available */
794	TSC_MOV(%ecx, CPU_PIL_HIGH_START)
795	/
796	/ Another high-level interrupt is active below this one, so
797	/ there is no need to check for an interrupt thread. That will be
798	/ done by the lowest priority high-level interrupt active.
799	/
800	jmp	1f
8010:
802	/ Check to see if there is a low-level interrupt active. If so,
803	/ place a starting timestamp in the thread structure.
804	movl	CPU_THREAD(%ebx), %esi
805	testw	$T_INTR_THREAD, T_FLAGS(%esi)
806	jz	1f
807_tsc_patch6:
808	nop; nop			/* patched to rdtsc if available */
809	TSC_MOV(%esi, T_INTR_START)
8101:
811	movl	%edi, CPU_PRI(%ebx)
812				/* interrupt vector already on stack */
813	pushl	%edi			/* old ipl */
814	call	*setlvlx
815	addl	$8, %esp		/* eax contains the current ipl */
816
817	movl	CPU_INTR_ACTV(%ebx), %esi /* reset stack pointer if no more */
818	shrl	$LOCK_LEVEL + 1, %esi	/* HI PRI intrs. */
819	jnz	.intr_ret2
820	popl	%esp			/* restore the thread stack pointer */
821.intr_ret2:
822	movl	CPU_SOFTINFO(%ebx), %edx /* any pending software interrupts */
823	orl	%edx, %edx
824	jz	_sys_rtt
825	jmp	dosoftint	/* check for softints before we return. */
826	SET_SIZE(cmnint)
827	SET_SIZE(_interrupt)
828
829#endif	/* __i386 */
830
831/*
832 * Declare a uintptr_t which has the size of _interrupt to enable stack
833 * traceback code to know when a regs structure is on the stack.
834 */
835	.globl	_interrupt_size
836	.align	CLONGSIZE
837_interrupt_size:
838	.NWORD	. - _interrupt
839	.type	_interrupt_size, @object
840
841#endif	/* __lint */
842
843#if defined(__i386)
844
845/*
846 * Handle an interrupt in a new thread.
847 *	Entry:  traps disabled.
848 *		%edi - old priority level
849 *		%ebp - pointer to REGS
850 *		%ecx - translated vector
851 *		%eax - ipl of isr.
852 *		%ebx - pointer to CPU struct
853 *	Uses:
854 */
855
856#if !defined(__lint)
857
858	ENTRY_NP(intr_thread)
859	/
860	/ Set bit for this PIL in CPU's interrupt active bitmask.
861	/
862
863	ASSERT_NOT_CPU_INTR_ACTV(%eax, %ebx, _interrupt_actv_bit_set)
864
865	btsl	%eax, CPU_INTR_ACTV(%ebx)
866
867	/ Get set to run interrupt thread.
868	/ There should always be an interrupt thread since we allocate one
869	/ for each level on the CPU.
870	/
871	/ Note that the code in kcpc_overflow_intr -relies- on the ordering
872	/ of events here - in particular that t->t_lwp of the interrupt
873	/ thread is set to the pinned thread *before* curthread is changed
874	/
875	movl	CPU_THREAD(%ebx), %edx		/* cur thread in edx */
876
877	/
878	/ Are we interrupting an interrupt thread? If so, account for it.
879	/
880	testw	$T_INTR_THREAD, T_FLAGS(%edx)
881	jz	0f
882	pushl	%ecx
883	pushl	%eax
884	movl	%edx, %esi
885_tsc_patch7:
886	nop; nop			/* patched to rdtsc if available */
887	TSC_SUB_FROM(%esi, T_INTR_START)
888	TSC_CLR(%esi, T_INTR_START)
889	movzbl	T_PIL(%esi), %ecx
890	PILBASE(%ebx, %ecx)
891	TSC_ADD_TO(%ecx, CPU_INTRSTAT)
892	INTRACCTBASE(%ebx, %ecx)
893	TSC_ADD_TO(%ecx, CPU_INTRACCT)	/* cpu_intracct[cpu_mstate] += tsc */
894	movl	%esi, %edx
895	popl	%eax
896	popl	%ecx
8970:
898	movl	%esp, T_SP(%edx)	/* mark stack in curthread for resume */
899	pushl	%edi			/* get a temporary register */
900	UNLINK_INTR_THREAD(%ebx, %esi, %edi)
901
902	movl	T_LWP(%edx), %edi
903	movl	%edx, T_INTR(%esi)		/* push old thread */
904	movl	%edi, T_LWP(%esi)
905	/
906	/ Threads on the interrupt thread free list could have state already
907	/ set to TS_ONPROC, but it helps in debugging if they're TS_FREE
908	/
909	movl	$ONPROC_THREAD, T_STATE(%esi)
910	/
911	/ chain the interrupted thread onto list from the interrupt thread.
912	/ Set the new interrupt thread as the current one.
913	/
914	popl	%edi			/* Don't need a temp reg anymore */
915	movl	T_STACK(%esi), %esp		/* interrupt stack pointer */
916	movl	%esp, %ebp
917	movl	%esi, CPU_THREAD(%ebx)		/* set new thread */
918	pushl	%eax				/* save the ipl */
919	/
920	/ Initialize thread priority level from intr_pri
921	/
922	movb	%al, T_PIL(%esi)	/* store pil */
923	movzwl	intr_pri, %ebx		/* XXX Can cause probs if new class */
924					/* is loaded on some other cpu. */
925	addl	%ebx, %eax		/* convert level to dispatch priority */
926	movw	%ax, T_PRI(%esi)
927
928	/
929	/ Take timestamp and store it in the thread structure.
930	/
931	movl	%eax, %ebx		/* save priority over rdtsc */
932_tsc_patch8:
933	nop; nop			/* patched to rdtsc if available */
934	TSC_MOV(%esi, T_INTR_START)
935	movl	%ebx, %eax		/* restore priority */
936
937	/ The following 3 instructions need not be in cli.
938	/ Putting them here only to avoid the AGI penalty on Pentiums.
939
940	pushl	%ecx			/* save interrupt vector. */
941	pushl	%esi			/* save interrupt thread */
942	movl	$autovect, %esi		/* get autovect structure */
943	sti				/* enable interrupts */
944
945	/ Fast event tracing.
946	LOADCPU(%ebx)
947	movl	CPU_FTRACE_STATE(%ebx), %ebx
948	testl	$FTRACE_ENABLED, %ebx
949	jz	1f
950
951	movl	8(%esp), %ebx
952	pushl	%ebx			/* ipl */
953	pushl	%ecx			/* int vector */
954	movl	T_SP(%edx), %ebx
955	pushl	%ebx			/* &regs */
956	pushl	$_ftrace_intr_thread_fmt
957	call	ftrace_3_notick
958	addl	$8, %esp
959	popl	%ecx			/* restore int vector */
960	addl	$4, %esp
9611:
962pre_loop2:
963	movl	AVH_LINK(%esi, %ecx, 8), %esi
964	xorl	%ebx, %ebx	/* bh is cno. of intpts in chain */
965				/* bl is DDI_INTR_CLAIMED status of * chain */
966	testl	%esi, %esi	/* if pointer is null */
967	jz	loop_done2	/* we're done */
968loop2:
969	movl	AV_VECTOR(%esi), %edx	/* get the interrupt routine */
970	testl	%edx, %edx		/* if pointer is null */
971	jz	loop_done2		/* we're done */
972	incb	%bh
973	pushl	$0
974	pushl	AV_INTARG2(%esi)
975	pushl	AV_INTARG1(%esi)
976	pushl	AV_VECTOR(%esi)
977	pushl	AV_DIP(%esi)
978	call	__dtrace_probe_interrupt__start
979	pushl	AV_INTARG2(%esi)	/* get 2nd arg to interrupt routine */
980	pushl	AV_INTARG1(%esi)	/* get first arg to interrupt routine */
981	call	*%edx			/* call interrupt routine with arg */
982	addl	$8, %esp
983	movl	%eax, 16(%esp)
984	call	__dtrace_probe_interrupt__complete
985	addl	$20, %esp
986	orb	%al, %bl		/* see if anyone claims intpt. */
987	movl	AV_LINK(%esi), %esi	/* get next routine on list */
988	testl	%esi, %esi		/* if pointer is non-null */
989	jnz	loop2			/* continue */
990loop_done2:
991	cmpb	$1, %bh		/* if only 1 intpt in chain, it is OK */
992	je	.loop_done2_1
993	orb	%bl, %bl	/* If no one claims intpt, then it is OK */
994	jz	.loop_done2_1
995	movl	$autovect, %esi		/* else get autovect structure */
996	movl	4(%esp), %ecx		/* restore intr vector */
997	jmp	pre_loop2		/* and try again. */
998.loop_done2_1:
999	popl	%esi			/* restore intr thread pointer */
1000
1001	LOADCPU(%ebx)
1002
1003	cli		/* protect interrupt thread pool and intr_actv */
1004	movzbl	T_PIL(%esi), %eax
1005
1006	/ Save value in regs
1007	pushl	%eax			/* current pil */
1008	pushl	%edx			/* (huh?) */
1009	pushl	%edi			/* old pil */
1010
1011	/ cpu_stats.sys.intr[PIL]++
1012	INC_CPU_STATS_INTR(%eax, %edx, %edx, %ebx)
1013
1014	/
1015	/ Take timestamp, compute interval, and update cumulative counter.
1016	/ esi = thread pointer, ebx = cpu pointer, eax = PIL
1017	/
1018	movl	%eax, %edi
1019
1020	ASSERT_T_INTR_START_NZ(%esi)
1021
1022_tsc_patch9:
1023	nop; nop			/* patched to rdtsc if available */
1024	TSC_SUB_FROM(%esi, T_INTR_START)
1025	PILBASE(%ebx, %edi)
1026	TSC_ADD_TO(%edi, CPU_INTRSTAT)
1027	INTRACCTBASE(%ebx, %edi)
1028	TSC_ADD_TO(%edi, CPU_INTRACCT)	/* cpu_intracct[cpu_mstate] += tsc */
1029	popl	%edi
1030	popl	%edx
1031	popl	%eax
1032
1033	/
1034	/ Clear bit for this PIL in CPU's interrupt active bitmask.
1035	/
1036
1037	ASSERT_CPU_INTR_ACTV(%eax, %ebx, _intr_thread_actv_bit_not_set)
1038
1039	btrl	%eax, CPU_INTR_ACTV(%ebx)
1040
1041	/ if there is still an interrupted thread underneath this one
1042	/ then the interrupt was never blocked and the return is fairly
1043	/ simple.  Otherwise jump to intr_thread_exit
1044	cmpl	$0, T_INTR(%esi)
1045	je	intr_thread_exit
1046
1047	/
1048	/ link the thread back onto the interrupt thread pool
1049	LINK_INTR_THREAD(%ebx, %esi, %edx)
1050
1051	movl	CPU_BASE_SPL(%ebx), %eax	/* used below. */
1052	/ set the thread state to free so kmdb doesn't see it
1053	movl	$FREE_THREAD, T_STATE(%esi)
1054
1055	cmpl	%eax, %edi		/* if (oldipl >= basespl) */
1056	jae	intr_restore_ipl	/* then use oldipl */
1057	movl	%eax, %edi		/* else use basespl */
1058intr_restore_ipl:
1059	movl	%edi, CPU_PRI(%ebx)
1060					/* intr vector already on stack */
1061	pushl	%edi			/* old ipl */
1062	call	*setlvlx		/* eax contains the current ipl */
1063	/
1064	/ Switch back to the interrupted thread
1065	movl	T_INTR(%esi), %ecx
1066
1067	/ Place starting timestamp in interrupted thread's thread structure.
1068_tsc_patch10:
1069	nop; nop			/* patched to rdtsc if available */
1070	TSC_MOV(%ecx, T_INTR_START)
1071
1072	movl	T_SP(%ecx), %esp	/* restore stack pointer */
1073	movl	%esp, %ebp
1074	movl	%ecx, CPU_THREAD(%ebx)
1075
1076	movl	CPU_SOFTINFO(%ebx), %edx /* any pending software interrupts */
1077	orl	%edx, %edx
1078	jz	_sys_rtt
1079	jmp	dosoftint	/* check for softints before we return. */
1080
1081	/
1082	/ An interrupt returned on what was once (and still might be)
1083	/ an interrupt thread stack, but the interrupted process is no longer
1084	/ there.  This means the interrupt must have blocked.
1085	/
1086	/ There is no longer a thread under this one, so put this thread back
1087	/ on the CPU's free list and resume the idle thread which will dispatch
1088	/ the next thread to run.
1089	/
1090	/ All interrupts are disabled here
1091	/
1092
1093intr_thread_exit:
1094#ifdef DEBUG
1095	incl	intr_thread_cnt
1096#endif
1097	INC64(%ebx, CPU_STATS_SYS_INTRBLK)	/* cpu_stats.sys.intrblk++ */
1098	/
1099	/ Put thread back on the interrupt thread list.
1100	/ As a reminder, the regs at this point are
1101	/	esi	interrupt thread
1102	/	edi	old ipl
1103	/	ebx	ptr to CPU struct
1104
1105	/ Set CPU's base SPL level based on active interrupts bitmask
1106	call	set_base_spl
1107
1108	movl	CPU_BASE_SPL(%ebx), %edi
1109	movl	%edi, CPU_PRI(%ebx)
1110					/* interrupt vector already on stack */
1111	pushl	%edi
1112	call	*setlvlx
1113	addl	$8, %esp		/* XXX - don't need to pop since */
1114					/* we are ready to switch */
1115	call	splhigh			/* block all intrs below lock level */
1116	/
1117	/ Set the thread state to free so kmdb doesn't see it
1118	/
1119	movl	$FREE_THREAD, T_STATE(%esi)
1120	/
1121	/ Put thread on either the interrupt pool or the free pool and
1122	/ call swtch() to resume another thread.
1123	/
1124	LINK_INTR_THREAD(%ebx, %esi, %edx)
1125	call 	swtch
1126	/ swtch() shouldn't return
1127
1128	SET_SIZE(intr_thread)
1129
1130#endif	/* __lint */
1131#endif	/* __i386 */
1132
1133/*
1134 * Set Cpu's base SPL level, base on which interrupt levels are active
1135 *	Called at spl7 or above.
1136 */
1137
1138#if defined(__lint)
1139
1140void
1141set_base_spl(void)
1142{}
1143
1144#else	/* __lint */
1145
1146	ENTRY_NP(set_base_spl)
1147	movl	%gs:CPU_INTR_ACTV, %eax	/* load active interrupts mask */
1148	testl	%eax, %eax		/* is it zero? */
1149	jz	setbase
1150	testl	$0xff00, %eax
1151	jnz	ah_set
1152	shl	$24, %eax		/* shift 'em over so we can find */
1153					/* the 1st bit faster */
1154	bsrl	%eax, %eax
1155	subl	$24, %eax
1156setbase:
1157	movl	%eax, %gs:CPU_BASE_SPL	/* store base priority */
1158	ret
1159ah_set:
1160	shl	$16, %eax
1161	bsrl	%eax, %eax
1162	subl	$16, %eax
1163	jmp	setbase
1164	SET_SIZE(set_base_spl)
1165
1166#endif	/* __lint */
1167
1168#if defined(__i386)
1169
1170/*
1171 * int
1172 * intr_passivate(from, to)
1173 *      thread_id_t     from;           interrupt thread
1174 *      thread_id_t     to;             interrupted thread
1175 *
1176 *	intr_passivate(t, itp) makes the interrupted thread "t" runnable.
1177 *
1178 *	Since t->t_sp has already been saved, t->t_pc is all that needs
1179 *	set in this function.
1180 *
1181 *	Returns interrupt level of the thread.
1182 */
1183
1184#if defined(__lint)
1185
1186/* ARGSUSED */
1187int
1188intr_passivate(kthread_id_t from, kthread_id_t to)
1189{ return (0); }
1190
1191#else	/* __lint */
1192
1193	ENTRY(intr_passivate)
1194	movl	8(%esp), %eax		/* interrupted thread  */
1195	movl	$_sys_rtt, T_PC(%eax)	/* set T_PC for interrupted thread */
1196
1197	movl	4(%esp), %eax		/* interrupt thread */
1198	movl	T_STACK(%eax), %eax	/* get the pointer to the start of */
1199					/* of the interrupt thread stack */
1200	movl	-4(%eax), %eax		/* interrupt level was the first */
1201					/* thing pushed onto the stack */
1202	ret
1203	SET_SIZE(intr_passivate)
1204
1205#endif	/* __lint */
1206#endif	/* __i386 */
1207
1208#if defined(__lint)
1209
1210void
1211fakesoftint(void)
1212{}
1213
1214#else	/* __lint */
1215
1216	/
1217	/ If we're here, we're being called from splx() to fake a soft
1218	/ interrupt (note that interrupts are still disabled from splx()).
1219	/ We execute this code when a soft interrupt is posted at
1220	/ level higher than the CPU's current spl; when spl is lowered in
1221	/ splx(), it will see the softint and jump here.  We'll do exactly
1222	/ what a trap would do:  push our flags, %cs, %eip, error code
1223	/ and trap number (T_SOFTINT).  The cmnint() code will see T_SOFTINT
1224	/ and branch to the dosoftint() code.
1225	/
1226#if defined(__amd64)
1227
1228	/*
1229	 * In 64-bit mode, iretq -always- pops all five regs
1230	 * Imitate the 16-byte auto-align of the stack, and the
1231	 * zero-ed out %ss value.
1232	 */
1233	ENTRY_NP(fakesoftint)
1234	movq	%rsp, %r11
1235	andq	$-16, %rsp
1236	pushq	$KDS_SEL	/* %ss */
1237	pushq	%r11		/* %rsp */
1238	pushf			/* rflags */
1239	pushq	$KCS_SEL	/* %cs */
1240	leaq	fakesoftint_return(%rip), %r11
1241	pushq	%r11		/* %rip */
1242	pushq	$0		/* err */
1243	pushq	$T_SOFTINT	/* trap */
1244	jmp	cmnint
1245	SET_SIZE(fakesoftint)
1246
1247#elif defined(__i386)
1248
1249	ENTRY_NP(fakesoftint)
1250	pushf
1251	push	%cs
1252	push	$fakesoftint_return
1253	push	$0
1254	push	$T_SOFTINT
1255	jmp	cmnint
1256	SET_SIZE(fakesoftint)
1257
1258#endif	/* __i386 */
1259
1260	.align	CPTRSIZE
1261	.globl	_fakesoftint_size
1262	.type	_fakesoftint_size, @object
1263_fakesoftint_size:
1264	.NWORD	. - fakesoftint
1265	SET_SIZE(_fakesoftint_size)
1266
1267/*
1268 * dosoftint(old_pil in %edi, softinfo in %edx, CPU pointer in %ebx)
1269 * Process software interrupts
1270 * Interrupts are disabled here.
1271 */
1272#if defined(__i386)
1273
1274	ENTRY_NP(dosoftint)
1275
1276	bsrl	%edx, %edx		/* find highest pending interrupt */
1277	cmpl 	%edx, %edi		/* if curipl >= pri soft pending intr */
1278	jae	_sys_rtt		/* skip */
1279
1280	movl	%gs:CPU_BASE_SPL, %eax	/* check for blocked intr threads */
1281	cmpl	%edx, %eax		/* if basespl >= pri soft pending */
1282	jae	_sys_rtt		/* skip */
1283
1284	lock				/* MP protect */
1285	btrl	%edx, CPU_SOFTINFO(%ebx) /* clear the selected interrupt bit */
1286	jnc	dosoftint_again
1287
1288	movl	%edx, CPU_PRI(%ebx) /* set IPL to sofint level */
1289	pushl	%edx
1290	call	*setspl			/* mask levels upto the softint level */
1291	popl	%eax			/* priority we are at in %eax */
1292
1293	/ Get set to run interrupt thread.
1294	/ There should always be an interrupt thread since we allocate one
1295	/ for each level on the CPU.
1296	UNLINK_INTR_THREAD(%ebx, %esi, %edx)
1297
1298	/
1299	/ Note that the code in kcpc_overflow_intr -relies- on the ordering
1300	/ of events here - in particular that t->t_lwp of the interrupt
1301	/ thread is set to the pinned thread *before* curthread is changed
1302	/
1303	movl	CPU_THREAD(%ebx), %ecx
1304
1305	/ If we are interrupting an interrupt thread, account for it.
1306	testw	$T_INTR_THREAD, T_FLAGS(%ecx)
1307	jz	0f
1308	pushl	%eax
1309	movl	%eax, %ebp
1310_tsc_patch11:
1311	nop; nop			/* patched to rdtsc if available */
1312	PILBASE(%ebx, %ebp)
1313	TSC_SUB_FROM(%ecx, T_INTR_START)
1314	TSC_ADD_TO(%ebp, CPU_INTRSTAT)
1315	INTRACCTBASE(%ebx, %ebp)
1316	TSC_ADD_TO(%ebp, CPU_INTRACCT)	/* cpu_intracct[cpu_mstate] += tsc */
1317	popl	%eax
13180:
1319	movl	T_LWP(%ecx), %ebp
1320	movl	%ebp, T_LWP(%esi)
1321	/
1322	/ Threads on the interrupt thread free list could have state already
1323	/ set to TS_ONPROC, but it helps in debugging if they're TS_FREE
1324	/ Could eliminate the next two instructions with a little work.
1325	/
1326	movl	$ONPROC_THREAD, T_STATE(%esi)
1327	/
1328	/ Push interrupted thread onto list from new thread.
1329	/ Set the new thread as the current one.
1330	/ Set interrupted thread's T_SP because if it is the idle thread,
1331	/ Resume() may use that stack between threads.
1332	/
1333	movl	%esp, T_SP(%ecx)		/* mark stack for resume */
1334	movl	%ecx, T_INTR(%esi)		/* push old thread */
1335	movl	%esi, CPU_THREAD(%ebx)		/* set new thread */
1336	movl	T_STACK(%esi), %esp		/* interrupt stack pointer */
1337	movl	%esp, %ebp
1338
1339	pushl	%eax			/* push ipl as first element in stack */
1340					/* see intr_passivate() */
1341	/
1342	/ Set bit for this PIL in CPU's interrupt active bitmask.
1343	/
1344
1345	ASSERT_NOT_CPU_INTR_ACTV(%eax, %ebx, _dosoftint_actv_bit_set)
1346
1347	btsl	%eax, CPU_INTR_ACTV(%ebx)
1348
1349	/
1350	/ Initialize thread priority level from intr_pri
1351	/
1352	movb	%al, T_PIL(%esi)	/* store pil */
1353	movzwl	intr_pri, %ecx
1354	addl	%eax, %ecx		/* convert level to dispatch priority */
1355	movw	%cx, T_PRI(%esi)
1356
1357	/
1358	/ Store starting timestamp in thread structure.
1359	/ esi = thread, ebx = cpu pointer, eax = PIL
1360	/
1361	movl	%eax, %ecx		/* save PIL from rdtsc clobber */
1362_tsc_patch12:
1363	nop; nop			/* patched to rdtsc if available */
1364	TSC_MOV(%esi, T_INTR_START)
1365
1366	sti				/* enable interrupts */
1367
1368	/
1369	/ Enabling interrupts (above) could raise the current
1370	/ IPL and base SPL. But, we continue processing the current soft
1371	/ interrupt and we will check the base SPL next time in the loop
1372	/ so that blocked interrupt thread would get a chance to run.
1373	/
1374
1375	/
1376	/ dispatch soft interrupts
1377	/
1378	pushl	%ecx
1379	call	av_dispatch_softvect
1380	addl	$4, %esp
1381
1382	cli				/* protect interrupt thread pool */
1383					/* and softinfo & sysinfo */
1384	movl	CPU_THREAD(%ebx), %esi	/* restore thread pointer */
1385	movzbl	T_PIL(%esi), %ecx
1386
1387	/ cpu_stats.sys.intr[PIL]++
1388	INC_CPU_STATS_INTR(%ecx, %edx, %edx, %ebx)
1389
1390	/
1391	/ Clear bit for this PIL in CPU's interrupt active bitmask.
1392	/
1393
1394	ASSERT_CPU_INTR_ACTV(%ecx, %ebx, _dosoftint_actv_bit_not_set)
1395
1396	btrl	%ecx, CPU_INTR_ACTV(%ebx)
1397
1398	/
1399	/ Take timestamp, compute interval, update cumulative counter.
1400	/ esi = thread, ebx = cpu, ecx = PIL
1401	/
1402	PILBASE(%ebx, %ecx)
1403_tsc_patch13:
1404	nop; nop		/* patched to rdtsc if available */
1405	TSC_SUB_FROM(%esi, T_INTR_START)
1406	TSC_ADD_TO(%ecx, CPU_INTRSTAT)
1407	INTRACCTBASE(%ebx, %ecx)
1408	TSC_ADD_TO(%ecx, CPU_INTRACCT)	/* cpu_intracct[cpu_mstate] += tsc */
1409
1410	/ if there is still an interrupt thread underneath this one
1411	/ then the interrupt was never blocked and the return is fairly
1412	/ simple.  Otherwise jump to softintr_thread_exit.
1413	/ softintr_thread_exit expect esi to be curthread & ebx to be ipl.
1414	cmpl	$0, T_INTR(%esi)
1415	je	softintr_thread_exit
1416
1417	/
1418	/ link the thread back onto the interrupt thread pool
1419	LINK_INTR_THREAD(%ebx, %esi, %edx)
1420
1421	/ set the thread state to free so kmdb doesn't see it
1422	movl	$FREE_THREAD, T_STATE(%esi)
1423	/
1424	/ Switch back to the interrupted thread
1425	movl	T_INTR(%esi), %ecx
1426	movl	%ecx, CPU_THREAD(%ebx)
1427	movl	T_SP(%ecx), %esp	/* restore stack pointer */
1428	movl	%esp, %ebp
1429
1430	/ If we are returning to an interrupt thread, store a starting
1431	/ timestamp in the thread structure.
1432	testw	$T_INTR_THREAD, T_FLAGS(%ecx)
1433	jz	0f
1434_tsc_patch14:
1435	nop; nop			/* patched to rdtsc if available */
1436	TSC_MOV(%ecx, T_INTR_START)
14370:
1438	movl	CPU_BASE_SPL(%ebx), %eax
1439	cmpl	%eax, %edi		/* if (oldipl >= basespl) */
1440	jae	softintr_restore_ipl	/* then use oldipl */
1441	movl	%eax, %edi		/* else use basespl */
1442softintr_restore_ipl:
1443	movl	%edi, CPU_PRI(%ebx) /* set IPL to old level */
1444	pushl	%edi
1445	call	*setspl
1446	popl	%eax
1447dosoftint_again:
1448	movl	CPU_SOFTINFO(%ebx), %edx /* any pending software interrupts */
1449	orl	%edx, %edx
1450	jz	_sys_rtt
1451	jmp	dosoftint		/* process more software interrupts */
1452
1453softintr_thread_exit:
1454	/
1455	/ Put thread back on the interrupt thread list.
1456	/ As a reminder, the regs at this point are
1457	/	%esi	interrupt thread
1458
1459	/
1460	/ This was an interrupt thread, so set CPU's base SPL level
1461	/ set_base_spl only uses %eax.
1462	/
1463	call	set_base_spl		/* interrupt vector already on stack */
1464	/
1465	/ Set the thread state to free so kmdb doesn't see it
1466	/
1467	movl	$FREE_THREAD, T_STATE(%esi)
1468	/
1469	/ Put thread on either the interrupt pool or the free pool and
1470	/ call swtch() to resume another thread.
1471	/
1472	LOADCPU(%ebx)
1473	LINK_INTR_THREAD(%ebx, %esi, %edx)
1474	call	splhigh			/* block all intrs below lock lvl */
1475	call	swtch
1476	/ swtch() shouldn't return
1477	SET_SIZE(dosoftint)
1478
1479#endif	/* __i386 */
1480#endif	/* __lint */
1481