xref: /titanic_41/usr/src/uts/i86pc/ml/interrupt.s (revision c138f478d2bc94e73ab8f6a084e323bec25e62f5)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22/*
23 * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*	Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.	*/
28/*	Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T	*/
29/*	  All Rights Reserved					*/
30
31/*	Copyright (c) 1987, 1988 Microsoft Corporation		*/
32/*	  All Rights Reserved					*/
33
34#pragma ident	"%Z%%M%	%I%	%E% SMI"
35
36#include <sys/asm_linkage.h>
37#include <sys/asm_misc.h>
38#include <sys/regset.h>
39#include <sys/psw.h>
40#include <sys/x86_archext.h>
41
42#if defined(__lint)
43
44#include <sys/types.h>
45#include <sys/thread.h>
46#include <sys/systm.h>
47
48#else   /* __lint */
49
50#include <sys/segments.h>
51#include <sys/pcb.h>
52#include <sys/trap.h>
53#include <sys/ftrace.h>
54#include <sys/traptrace.h>
55#include <sys/clock.h>
56#include <sys/panic.h>
57#include "assym.h"
58
59_ftrace_intr_thread_fmt:
60	.string	"intr_thread(): regs=0x%lx, int=0x%x, pil=0x%x"
61
62#endif	/* lint */
63
64#if defined(__i386)
65
66#if defined(__lint)
67
68void
69patch_tsc(void)
70{}
71
72#else	/* __lint */
73
74/*
75 * To cope with processors that do not implement the rdtsc instruction,
76 * we patch the kernel to use rdtsc if that feature is detected on the CPU.
77 * On an unpatched kernel, all locations requiring rdtsc are nop's.
78 *
79 * This function patches the nop's to rdtsc.
80 */
81	ENTRY_NP(patch_tsc)
82	movw	_rdtsc_insn, %cx
83	movw	%cx, _tsc_patch1
84	movw	%cx, _tsc_patch2
85	movw	%cx, _tsc_patch3
86	movw	%cx, _tsc_patch4
87	movw	%cx, _tsc_patch5
88	movw	%cx, _tsc_patch6
89	movw	%cx, _tsc_patch7
90	movw	%cx, _tsc_patch8
91	movw	%cx, _tsc_patch9
92	movw	%cx, _tsc_patch10
93	movw	%cx, _tsc_patch11
94	movw	%cx, _tsc_patch12
95	movw	%cx, _tsc_patch13
96	movw	%cx, _tsc_patch14
97	movw	%cx, _tsc_patch15
98	movw	%cx, _tsc_patch16
99	movw	%cx, _tsc_patch17
100	ret
101_rdtsc_insn:
102	rdtsc
103	SET_SIZE(patch_tsc)
104
105#endif	/* __lint */
106
107#endif	/* __i386 */
108
109
110#if defined(__lint)
111
112void
113_interrupt(void)
114{}
115
116#else	/* __lint */
117
118#if defined(__amd64)
119
120	/*
121	 * Common register usage:
122	 *
123	 * %rbx		cpu pointer
124	 * %r12		trap trace pointer -and- stash of
125	 *		vec across intr_thread dispatch.
126	 * %r13d	ipl of isr
127	 * %r14d	old ipl (ipl level we entered on)
128	 * %r15		interrupted thread stack pointer
129	 */
130	ENTRY_NP2(cmnint, _interrupt)
131
132	INTR_PUSH
133
134	/*
135	 * At the end of TRACE_PTR %r12 points to the current TRAPTRACE entry
136	 */
137	TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_INTERRUPT)
138						/* Uses labels 8 and 9 */
139	TRACE_REGS(%r12, %rsp, %rax, %rbx)	/* Uses label 9 */
140	TRACE_STAMP(%r12)		/* Clobbers %eax, %edx, uses 9 */
141
142	DISABLE_INTR_FLAGS		/* (and set kernel flag values) */
143
144	movq	%rsp, %rbp
145
146	TRACE_STACK(%r12)
147
148	LOADCPU(%rbx)				/* &cpu */
149	leaq	REGOFF_TRAPNO(%rbp), %rsi	/* &vector */
150	movl	CPU_PRI(%rbx), %r14d		/* old ipl */
151	movl	CPU_SOFTINFO(%rbx), %edx
152
153#ifdef TRAPTRACE
154	movl	$255, TTR_IPL(%r12)
155	movl	%r14d, %edi
156	movb	%dil, TTR_PRI(%r12)
157	movl	CPU_BASE_SPL(%rbx), %edi
158	movb	%dil, TTR_SPL(%r12)
159	movb	$255, TTR_VECTOR(%r12)
160#endif
161
162	/*
163	 * Check to see if the trap number is T_SOFTINT; if it is,
164	 * jump straight to dosoftint now.
165	 */
166	cmpq	$T_SOFTINT, (%rsi)
167	je	dosoftint
168
169	/*
170	 * Raise the interrupt priority level, returns newpil.
171	 * (The vector address is in %rsi so setlvl can update it.)
172	 */
173	movl	%r14d, %edi			/* old ipl */
174						/* &vector */
175	call	*setlvl(%rip)
176
177#ifdef TRAPTRACE
178	movb	%al, TTR_IPL(%r12)
179#endif
180	/*
181	 * check for spurious interrupt
182	 */
183	cmpl	$-1, %eax
184	je	_sys_rtt
185
186#ifdef TRAPTRACE
187	movl	%r14d, %edx
188	movb	%dl, TTR_PRI(%r12)
189	movl	CPU_BASE_SPL(%rbx), %edx
190	movb	%dl, TTR_SPL(%r12)
191#endif
192	movl	%eax, CPU_PRI(%rbx)		/* update ipl */
193
194#ifdef TRAPTRACE
195	movl	REGOFF_TRAPNO(%rbp), %edx
196	movb	%dl, TTR_VECTOR(%r12)
197#endif
198	movl	%eax, %r13d			/* ipl of isr */
199
200	/*
201	 * At this point we can take one of two paths.
202	 * If the new level is at or below lock level, we will
203	 * run this interrupt in a separate thread.
204	 */
205	cmpl	$LOCK_LEVEL, %eax
206	jbe	intr_thread
207
208	movq	%rbx, %rdi		/* &cpu */
209	movl	%r13d, %esi		/* ipl */
210	movl	%r14d, %edx		/* old ipl */
211	movq	%rbp, %rcx		/* &regs */
212	call	hilevel_intr_prolog
213	orl	%eax, %eax		/* zero if need to switch stack */
214	jnz	1f
215
216	/*
217	 * Save the thread stack and get on the cpu's interrupt stack
218	 */
219	movq	%rsp, %r15
220	movq	CPU_INTR_STACK(%rbx), %rsp
2211:
222
223	sti
224
225	/*
226	 * Walk the list of handlers for this vector, calling
227	 * them as we go until no more interrupts are claimed.
228	 */
229	movl	REGOFF_TRAPNO(%rbp), %edi
230	call	av_dispatch_autovect
231
232	cli
233
234	movq	%rbx, %rdi			/* &cpu */
235	movl	%r13d, %esi			/* ipl */
236	movl	%r14d, %edx			/* oldipl */
237	movl	REGOFF_TRAPNO(%rbp), %ecx	/* vec */
238	call	hilevel_intr_epilog
239	orl	%eax, %eax		/* zero if need to switch stack */
240	jnz	2f
241	movq	%r15, %rsp
2422:	/*
243	 * Check for, and execute, softints before we iret.
244	 *
245	 * (dosoftint expects oldipl in %r14d (which is where it is)
246	 * the cpu pointer in %rbx (which is where it is) and the
247	 * softinfo in %edx (which is where we'll put it right now))
248	 */
249	movl	CPU_SOFTINFO(%rbx), %edx
250	orl	%edx, %edx
251	jz	_sys_rtt
252	jmp	dosoftint
253	/*NOTREACHED*/
254
255	SET_SIZE(cmnint)
256	SET_SIZE(_interrupt)
257
258/*
259 * Handle an interrupt in a new thread
260 *
261 * As we branch here, interrupts are still masked,
262 * %rbx still contains the cpu pointer,
263 * %r14d contains the old ipl that we came in on, and
264 * %eax contains the new ipl that we got from the setlvl routine
265 */
266
267	ENTRY_NP(intr_thread)
268
269	movq	%rbx, %rdi	/* &cpu */
270	movq	%rbp, %rsi	/* &regs = stack pointer for _sys_rtt */
271	movl	REGOFF_TRAPNO(%rbp), %r12d	/* stash the vec */
272	movl	%eax, %edx	/* new pil from setlvlx() */
273	call	intr_thread_prolog
274	movq	%rsp, %r15
275	movq	%rax, %rsp	/* t_stk from interrupt thread */
276	movq	%rsp, %rbp
277
278	sti
279
280	testl	$FTRACE_ENABLED, CPU_FTRACE_STATE(%rbx)
281	jz	1f
282	/*
283	 * ftracing support. do we need this on x86?
284	 */
285	leaq	_ftrace_intr_thread_fmt(%rip), %rdi
286	movq	%rbp, %rsi			/* &regs */
287	movl	%r12d, %edx			/* vec */
288	movq	CPU_THREAD(%rbx), %r11		/* (the interrupt thread) */
289	movzbl	T_PIL(%r11), %ecx		/* newipl */
290	call	ftrace_3_notick
2911:
292	movl	%r12d, %edi			/* vec */
293	call	av_dispatch_autovect
294
295	cli
296
297	movq	%rbx, %rdi			/* &cpu */
298	movl	%r12d, %esi			/* vec */
299	movl	%r14d, %edx			/* oldpil */
300	call	intr_thread_epilog
301	/*
302	 * If we return from here (we might not if the interrupted thread
303	 * has exited or blocked, in which case we'll have quietly swtch()ed
304	 * away) then we need to switch back to our old %rsp
305	 */
306	movq	%r15, %rsp
307	movq	%rsp, %rbp
308	/*
309	 * Check for, and execute, softints before we iret.
310	 *
311	 * (dosoftint expects oldpil in %r14d, the cpu pointer in %rbx and
312	 * the mcpu_softinfo.st_pending field in %edx.
313	 */
314	movl	CPU_SOFTINFO(%rbx), %edx
315	orl	%edx, %edx
316	jz	_sys_rtt
317	/*FALLTHROUGH*/
318
319/*
320 * Process soft interrupts.
321 * Interrupts are masked, and we have a minimal frame on the stack.
322 * %edx should contain the mcpu_softinfo.st_pending field
323 */
324
325	ALTENTRY(dosoftint)
326
327	movq	%rbx, %rdi	/* &cpu */
328	movq	%rbp, %rsi	/* &regs = stack pointer for _sys_rtt */
329				/* cpu->cpu_m.mcpu_softinfo.st_pending */
330	movl	%r14d, %ecx	/* oldipl */
331	call	dosoftint_prolog
332	/*
333	 * dosoftint_prolog() usually returns a stack pointer for the
334	 * interrupt thread that we must switch to.  However, if the
335	 * returned stack pointer is NULL, then the software interrupt was
336	 * too low in priority to run now; we'll catch it another time.
337	 */
338	orq	%rax, %rax
339	jz	_sys_rtt
340	movq	%rsp, %r15
341	movq	%rax, %rsp	/* t_stk from interrupt thread */
342	movq	%rsp, %rbp
343
344	sti
345
346	/*
347	 * Enabling interrupts (above) could raise the current ipl
348	 * and base spl.  But, we continue processing the current soft
349	 * interrupt and we will check the base spl next time around
350	 * so that blocked interrupt threads get a chance to run.
351	 */
352	movq	CPU_THREAD(%rbx), %r11	/* now an interrupt thread */
353	movzbl	T_PIL(%r11), %edi
354	call	av_dispatch_softvect
355
356	cli
357
358	movq	%rbx, %rdi		/* &cpu */
359	movl	%r14d, %esi		/* oldpil */
360	call	dosoftint_epilog
361	movq	%r15, %rsp		/* back on old stack pointer */
362	movq	%rsp, %rbp
363	movl	CPU_SOFTINFO(%rbx), %edx
364	orl	%edx, %edx
365	jz	_sys_rtt
366	jmp	dosoftint
367
368	SET_SIZE(dosoftint)
369	SET_SIZE(intr_thread)
370
371#elif defined(__i386)
372
373/*
374 * One day, this should just invoke the C routines that know how to
375 * do all the interrupt bookkeeping.  In the meantime, try
376 * and make the assembler a little more comprehensible.
377 */
378
379#define	INC64(basereg, offset)			\
380	addl	$1, offset(basereg);		\
381	adcl	$0, offset + 4(basereg)
382
383#define	TSC_CLR(basereg, offset)		\
384	movl	$0, offset(basereg);		\
385	movl	$0, offset + 4(basereg)
386
387/*
388 * The following macros assume the time value is in %edx:%eax
389 * e.g. from a rdtsc instruction.
390 */
391#define	TSC_STORE(reg, offset)		\
392	movl	%eax, offset(reg);	\
393	movl	%edx, offset + 4(reg)
394
395#define	TSC_LOAD(reg, offset)	\
396	movl	offset(reg), %eax;	\
397	movl	offset + 4(reg), %edx
398
399#define	TSC_ADD_TO(reg, offset)		\
400	addl	%eax, offset(reg);	\
401	adcl	%edx, offset + 4(reg)
402
403#define	TSC_SUB_FROM(reg, offset)	\
404	subl	offset(reg), %eax;	\
405	sbbl	offset + 4(reg), %edx	/* interval in edx:eax */
406
407/*
408 * basereg   - pointer to cpu struct
409 * pilreg    - pil or converted pil (pil - (LOCK_LEVEL + 1))
410 *
411 * Returns (base + pil * 8) in pilreg
412 */
413#define	PILBASE(basereg, pilreg)	\
414	lea	(basereg, pilreg, 8), pilreg
415
416/*
417 * Returns (base + (pil - (LOCK_LEVEL + 1)) * 8) in pilreg
418 */
419#define	HIGHPILBASE(basereg, pilreg)		\
420	subl	$LOCK_LEVEL + 1, pilreg;	\
421	PILBASE(basereg, pilreg)
422
423/*
424 * Returns (base + pil * 16) in pilreg
425 */
426#define	PILBASE_INTRSTAT(basereg, pilreg)	\
427	shl	$4, pilreg;			\
428	addl	basereg, pilreg;
429
430/*
431 * Returns (cpu + cpu_mstate * 8) in tgt
432 */
433#define	INTRACCTBASE(cpureg, tgtreg)		\
434	movzwl	CPU_MSTATE(cpureg), tgtreg;	\
435	lea	(cpureg, tgtreg, 8), tgtreg
436
437/*
438 * cpu_stats.sys.intr[PIL]++
439 */
440#define	INC_CPU_STATS_INTR(pilreg, tmpreg, tmpreg_32, basereg)	\
441	movl	pilreg, tmpreg_32;				\
442	PILBASE(basereg, tmpreg);				\
443	INC64(tmpreg, _CONST(CPU_STATS_SYS_INTR - 8))
444
445/*
446 * Unlink thread from CPU's list
447 */
448#define	UNLINK_INTR_THREAD(cpureg, ithread, tmpreg)	\
449	mov	CPU_INTR_THREAD(cpureg), ithread;	\
450	mov	T_LINK(ithread), tmpreg;		\
451	mov	tmpreg, CPU_INTR_THREAD(cpureg)
452
453/*
454 * Link a thread into CPU's list
455 */
456#define	LINK_INTR_THREAD(cpureg, ithread, tmpreg)	\
457	mov	CPU_INTR_THREAD(cpureg), tmpreg;	\
458	mov	tmpreg, T_LINK(ithread);		\
459	mov	ithread, CPU_INTR_THREAD(cpureg)
460
461#if defined(DEBUG)
462
463/*
464 * Do not call panic, if panic is already in progress.
465 */
466#define	__PANIC(msg, label)		\
467	cmpl	$0, panic_quiesce;		\
468	jne	label;				\
469	pushl	$msg;				\
470	call	panic
471
472#define	__CMP64_JNE(basereg, offset, label)	\
473	cmpl	$0, offset(basereg);		\
474	jne	label;				\
475	cmpl	$0, offset + 4(basereg);	\
476	jne	label
477
478/*
479 * ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
480 */
481#define	ASSERT_NOT_CPU_INTR_ACTV(pilreg, basereg, msg)	\
482	btl	pilreg, CPU_INTR_ACTV(basereg);		\
483	jnc	4f;					\
484	__PANIC(msg, 4f);				\
4854:
486
487/*
488 * ASSERT(CPU->cpu_intr_actv & (1 << PIL))
489 */
490#define	ASSERT_CPU_INTR_ACTV(pilreg, basereg, msg)	\
491	btl	pilreg, CPU_INTR_ACTV(basereg);		\
492	jc	5f;					\
493	__PANIC(msg, 5f);				\
4945:
495
496/*
497 * ASSERT(CPU->cpu_pil_high_start != 0)
498 */
499#define	ASSERT_CPU_PIL_HIGH_START_NZ(basereg)			\
500	__CMP64_JNE(basereg, CPU_PIL_HIGH_START, 6f);		\
501	__PANIC(_interrupt_timestamp_zero, 6f);		\
5026:
503
504/*
505 * ASSERT(t->t_intr_start != 0)
506 */
507#define	ASSERT_T_INTR_START_NZ(basereg)				\
508	__CMP64_JNE(basereg, T_INTR_START, 7f);			\
509	__PANIC(_intr_thread_t_intr_start_zero, 7f);	\
5107:
511
512_interrupt_actv_bit_set:
513	.string	"_interrupt(): cpu_intr_actv bit already set for PIL"
514_interrupt_actv_bit_not_set:
515	.string	"_interrupt(): cpu_intr_actv bit not set for PIL"
516_interrupt_timestamp_zero:
517	.string "_interrupt(): timestamp zero upon handler return"
518_intr_thread_actv_bit_not_set:
519	.string	"intr_thread():	cpu_intr_actv bit not set for PIL"
520_intr_thread_t_intr_start_zero:
521	.string	"intr_thread():	t_intr_start zero upon handler return"
522_dosoftint_actv_bit_set:
523	.string	"dosoftint(): cpu_intr_actv bit already set for PIL"
524_dosoftint_actv_bit_not_set:
525	.string	"dosoftint(): cpu_intr_actv bit not set for PIL"
526
527	DGDEF(intr_thread_cnt)
528
529#else
530#define	ASSERT_NOT_CPU_INTR_ACTV(pilreg, basereg, msg)
531#define	ASSERT_CPU_INTR_ACTV(pilreg, basereg, msg)
532#define	ASSERT_CPU_PIL_HIGH_START_NZ(basereg)
533#define	ASSERT_T_INTR_START_NZ(basereg)
534#endif
535
536	ENTRY_NP2(cmnint, _interrupt)
537
538	INTR_PUSH
539
540	/*
541	 * At the end of TRACE_PTR %esi points to the current TRAPTRACE entry
542	 */
543	TRACE_PTR(%esi, %eax, %eax, %edx, $TT_INTERRUPT)
544						/* Uses labels 8 and 9 */
545	TRACE_REGS(%esi, %esp, %eax, %ebx)	/* Uses label 9 */
546	TRACE_STAMP(%esi)		/* Clobbers %eax, %edx, uses 9 */
547
548	movl	%esp, %ebp
549	DISABLE_INTR_FLAGS
550	LOADCPU(%ebx)		/* get pointer to CPU struct. Avoid gs refs */
551	leal    REGOFF_TRAPNO(%ebp), %ecx	/* get address of vector */
552	movl	CPU_PRI(%ebx), %edi		/* get ipl */
553	movl	CPU_SOFTINFO(%ebx), %edx
554
555	/
556	/ Check to see if the trap number is T_SOFTINT; if it is, we'll
557	/ jump straight to dosoftint now.
558	/
559	cmpl	$T_SOFTINT, (%ecx)
560	je	dosoftint
561
562	/ raise interrupt priority level
563	/ oldipl is in %edi, vectorp is in %ecx
564	/ newipl is returned in %eax
565	pushl	%ecx
566	pushl	%edi
567	call    *setlvl
568	popl	%edi			/* save oldpil in %edi */
569	popl	%ecx
570
571#ifdef TRAPTRACE
572	movb	%al, TTR_IPL(%esi)
573#endif
574
575	/ check for spurious interrupt
576	cmp	$-1, %eax
577	je	_sys_rtt
578
579#ifdef TRAPTRACE
580	movl	CPU_PRI(%ebx), %edx
581	movb	%dl, TTR_PRI(%esi)
582	movl	CPU_BASE_SPL(%ebx), %edx
583	movb	%dl, TTR_SPL(%esi)
584#endif
585
586	movl	%eax, CPU_PRI(%ebx) /* update ipl */
587	movl	REGOFF_TRAPNO(%ebp), %ecx /* reload the interrupt vector */
588
589#ifdef TRAPTRACE
590	movb	%cl, TTR_VECTOR(%esi)
591#endif
592
593	/ At this point we can take one of two paths.  If the new priority
594	/ level is less than or equal to LOCK LEVEL then we jump to code that
595	/ will run this interrupt as a separate thread.  Otherwise the
596	/ interrupt is NOT run as a separate thread.
597
598	/ %edi - old priority level
599	/ %ebp - pointer to REGS
600	/ %ecx - translated vector
601	/ %eax - ipl of isr
602	/ %ebx - cpu pointer
603
604	cmpl 	$LOCK_LEVEL, %eax	/* compare to highest thread level */
605	jbe	intr_thread		/* process as a separate thread */
606
607	cmpl	$CBE_HIGH_PIL, %eax	/* Is this a CY_HIGH_LEVEL interrupt? */
608	jne	2f
609
610	movl	REGOFF_PC(%ebp), %esi
611	movl	%edi, CPU_PROFILE_PIL(%ebx)	/* record interrupted PIL */
612	testw	$CPL_MASK, REGOFF_CS(%ebp)	/* trap from supervisor mode? */
613	jz	1f
614	movl	%esi, CPU_PROFILE_UPC(%ebx)	/* record user PC */
615	movl	$0, CPU_PROFILE_PC(%ebx)	/* zero kernel PC */
616	jmp	2f
617
6181:
619	movl	%esi, CPU_PROFILE_PC(%ebx)	/* record kernel PC */
620	movl	$0, CPU_PROFILE_UPC(%ebx)	/* zero user PC */
621
6222:
623	pushl	%ecx				/* vec */
624	pushl	%eax				/* newpil */
625
626	/
627	/ See if we are interrupting another high-level interrupt.
628	/
629	movl	CPU_INTR_ACTV(%ebx), %eax
630	andl	$CPU_INTR_ACTV_HIGH_LEVEL_MASK, %eax
631	jz	0f
632	/
633	/ We have interrupted another high-level interrupt.
634	/ Load starting timestamp, compute interval, update cumulative counter.
635	/
636	bsrl	%eax, %ecx		/* find PIL of interrupted handler */
637	movl	%ecx, %esi		/* save PIL for later */
638	HIGHPILBASE(%ebx, %ecx)
639_tsc_patch1:
640	nop; nop			/* patched to rdtsc if available */
641	TSC_SUB_FROM(%ecx, CPU_PIL_HIGH_START)
642
643	PILBASE_INTRSTAT(%ebx, %esi)
644	TSC_ADD_TO(%esi, CPU_INTRSTAT)
645	INTRACCTBASE(%ebx, %ecx)
646	TSC_ADD_TO(%ecx, CPU_INTRACCT)	/* cpu_intracct[cpu_mstate] += tsc */
647	/
648	/ Another high-level interrupt is active below this one, so
649	/ there is no need to check for an interrupt thread. That will be
650	/ done by the lowest priority high-level interrupt active.
651	/
652	jmp	1f
6530:
654	/
655	/ See if we are interrupting a low-level interrupt thread.
656	/
657	movl	CPU_THREAD(%ebx), %esi
658	testw	$T_INTR_THREAD, T_FLAGS(%esi)
659	jz	1f
660	/
661	/ We have interrupted an interrupt thread. Account for its time slice
662	/ only if its time stamp is non-zero.
663	/
664	cmpl	$0, T_INTR_START+4(%esi)
665	jne	0f
666	cmpl	$0, T_INTR_START(%esi)
667	je	1f
6680:
669	movzbl	T_PIL(%esi), %ecx /* %ecx has PIL of interrupted handler */
670	PILBASE_INTRSTAT(%ebx, %ecx)
671_tsc_patch2:
672	nop; nop			/* patched to rdtsc if available */
673	TSC_SUB_FROM(%esi, T_INTR_START)
674	TSC_CLR(%esi, T_INTR_START)
675	TSC_ADD_TO(%ecx, CPU_INTRSTAT)
676	INTRACCTBASE(%ebx, %ecx)
677	TSC_ADD_TO(%ecx, CPU_INTRACCT)	/* cpu_intracct[cpu_mstate] += tsc */
6781:
679	/ Store starting timestamp in CPU structure for this PIL.
680	popl	%ecx			/* restore new PIL */
681	pushl	%ecx
682	HIGHPILBASE(%ebx, %ecx)
683_tsc_patch3:
684	nop; nop			/* patched to rdtsc if available */
685	TSC_STORE(%ecx, CPU_PIL_HIGH_START)
686
687	popl	%eax			/* restore new pil */
688	popl	%ecx			/* vec */
689	/
690	/ Set bit for this PIL in CPU's interrupt active bitmask.
691	/
692
693	ASSERT_NOT_CPU_INTR_ACTV(%eax, %ebx, _interrupt_actv_bit_set)
694
695	/ Save old CPU_INTR_ACTV
696	movl	CPU_INTR_ACTV(%ebx), %esi
697
698	cmpl	$15, %eax
699	jne	0f
700	/ PIL-15 interrupt. Increment nest-count in upper 16 bits of intr_actv
701	incw	CPU_INTR_ACTV_REF(%ebx)	/* increment ref count */
7020:
703	btsl	%eax, CPU_INTR_ACTV(%ebx)
704	/
705	/ Handle high-level nested interrupt on separate interrupt stack
706	/
707	testl	$CPU_INTR_ACTV_HIGH_LEVEL_MASK, %esi
708	jnz	onstack			/* already on interrupt stack */
709	movl	%esp, %eax
710	movl	CPU_INTR_STACK(%ebx), %esp	/* get on interrupt stack */
711	pushl	%eax			/* save the thread stack pointer */
712onstack:
713	movl	$autovect, %esi		/* get autovect structure before */
714					/* sti to save on AGI later */
715	sti				/* enable interrupts */
716	pushl	%ecx			/* save interrupt vector */
717	/
718	/ Get handler address
719	/
720pre_loop1:
721	movl	AVH_LINK(%esi, %ecx, 8), %esi
722	xorl	%ebx, %ebx	/* bh is no. of intpts in chain */
723				/* bl is DDI_INTR_CLAIMED status of chain */
724	testl	%esi, %esi		/* if pointer is null */
725	jz	.intr_ret		/* then skip */
726loop1:
727	incb	%bh
728	movl	AV_VECTOR(%esi), %edx	/* get the interrupt routine */
729	testl	%edx, %edx		/* if func is null */
730	jz	.intr_ret		/* then skip */
731	pushl	$0
732	pushl	AV_INTARG2(%esi)
733	pushl	AV_INTARG1(%esi)
734	pushl	AV_VECTOR(%esi)
735	pushl	AV_DIP(%esi)
736	call	__dtrace_probe_interrupt__start
737	pushl	AV_INTARG2(%esi)	/* get 2nd arg to interrupt routine */
738	pushl	AV_INTARG1(%esi)	/* get first arg to interrupt routine */
739	call	*%edx			/* call interrupt routine with arg */
740	addl	$8, %esp
741	movl	%eax, 16(%esp)
742	call	__dtrace_probe_interrupt__complete
743	addl	$20, %esp
744	orb	%al, %bl		/* see if anyone claims intpt. */
745	movl	AV_LINK(%esi), %esi	/* get next routine on list */
746	testl	%esi, %esi		/* if pointer is non-null */
747	jnz	loop1			/* then continue */
748
749.intr_ret:
750	cmpb	$1, %bh		/* if only 1 intpt in chain, it is OK */
751	je	.intr_ret1
752	orb	%bl, %bl	/* If no one claims intpt, then it is OK */
753	jz	.intr_ret1
754	movl	(%esp), %ecx		/* else restore intr vector */
755	movl	$autovect, %esi		/* get autovect structure */
756	jmp	pre_loop1		/* and try again. */
757
758.intr_ret1:
759	LOADCPU(%ebx)			/* get pointer to cpu struct */
760
761	cli
762	movl	CPU_PRI(%ebx), %esi
763
764	/ cpu_stats.sys.intr[PIL]++
765	INC_CPU_STATS_INTR(%esi, %eax, %eax, %ebx)
766
767	/
768	/ Clear bit for this PIL in CPU's interrupt active bitmask.
769	/
770
771	ASSERT_CPU_INTR_ACTV(%esi, %ebx, _interrupt_actv_bit_not_set)
772
773	cmpl	$15, %esi
774	jne	0f
775	/ Only clear bit if reference count is now zero.
776	decw	CPU_INTR_ACTV_REF(%ebx)
777	jnz	1f
7780:
779	btrl	%esi, CPU_INTR_ACTV(%ebx)
7801:
781	/
782	/ Take timestamp, compute interval, update cumulative counter.
783	/ esi = PIL
784_tsc_patch4:
785	nop; nop			/* patched to rdtsc if available */
786	movl	%esi, %ecx		/* save for later */
787	HIGHPILBASE(%ebx, %esi)
788
789	ASSERT_CPU_PIL_HIGH_START_NZ(%esi)
790
791	TSC_SUB_FROM(%esi, CPU_PIL_HIGH_START)
792
793	PILBASE_INTRSTAT(%ebx, %ecx)
794	TSC_ADD_TO(%ecx, CPU_INTRSTAT)
795	INTRACCTBASE(%ebx, %esi)
796	TSC_ADD_TO(%esi, CPU_INTRACCT)	/* cpu_intracct[cpu_mstate] += tsc */
797	/
798	/ Check for lower-PIL nested high-level interrupt beneath current one
799	/ If so, place a starting timestamp in its pil_high_start entry.
800	/
801	movl	CPU_INTR_ACTV(%ebx), %eax
802	movl	%eax, %esi
803	andl	$CPU_INTR_ACTV_HIGH_LEVEL_MASK, %eax
804	jz	0f
805	bsrl	%eax, %ecx		/* find PIL of nested interrupt */
806	HIGHPILBASE(%ebx, %ecx)
807_tsc_patch5:
808	nop; nop			/* patched to rdtsc if available */
809	TSC_STORE(%ecx, CPU_PIL_HIGH_START)
810	/
811	/ Another high-level interrupt is active below this one, so
812	/ there is no need to check for an interrupt thread. That will be
813	/ done by the lowest priority high-level interrupt active.
814	/
815	jmp	1f
8160:
817	/ Check to see if there is a low-level interrupt active. If so,
818	/ place a starting timestamp in the thread structure.
819	movl	CPU_THREAD(%ebx), %esi
820	testw	$T_INTR_THREAD, T_FLAGS(%esi)
821	jz	1f
822_tsc_patch6:
823	nop; nop			/* patched to rdtsc if available */
824	TSC_STORE(%esi, T_INTR_START)
8251:
826	movl	%edi, CPU_PRI(%ebx)
827				/* interrupt vector already on stack */
828	pushl	%edi			/* old ipl */
829	call	*setlvlx
830	addl	$8, %esp		/* eax contains the current ipl */
831
832	movl	CPU_INTR_ACTV(%ebx), %esi /* reset stack pointer if no more */
833	shrl	$LOCK_LEVEL + 1, %esi	/* HI PRI intrs. */
834	jnz	.intr_ret2
835	popl	%esp			/* restore the thread stack pointer */
836.intr_ret2:
837	movl	CPU_SOFTINFO(%ebx), %edx /* any pending software interrupts */
838	orl	%edx, %edx
839	jz	_sys_rtt
840	jmp	dosoftint	/* check for softints before we return. */
841	SET_SIZE(cmnint)
842	SET_SIZE(_interrupt)
843
844#endif	/* __i386 */
845
846/*
847 * Declare a uintptr_t which has the size of _interrupt to enable stack
848 * traceback code to know when a regs structure is on the stack.
849 */
850	.globl	_interrupt_size
851	.align	CLONGSIZE
852_interrupt_size:
853	.NWORD	. - _interrupt
854	.type	_interrupt_size, @object
855
856#endif	/* __lint */
857
858#if defined(__i386)
859
860/*
861 * Handle an interrupt in a new thread.
862 *	Entry:  traps disabled.
863 *		%edi - old priority level
864 *		%ebp - pointer to REGS
865 *		%ecx - translated vector
866 *		%eax - ipl of isr.
867 *		%ebx - pointer to CPU struct
868 *	Uses:
869 */
870
871#if !defined(__lint)
872
873	ENTRY_NP(intr_thread)
874	/
875	/ Set bit for this PIL in CPU's interrupt active bitmask.
876	/
877
878	ASSERT_NOT_CPU_INTR_ACTV(%eax, %ebx, _interrupt_actv_bit_set)
879
880	btsl	%eax, CPU_INTR_ACTV(%ebx)
881
882	/ Get set to run interrupt thread.
883	/ There should always be an interrupt thread since we allocate one
884	/ for each level on the CPU.
885	/
886	/ Note that the code in kcpc_overflow_intr -relies- on the ordering
887	/ of events here - in particular that t->t_lwp of the interrupt
888	/ thread is set to the pinned thread *before* curthread is changed
889	/
890	movl	CPU_THREAD(%ebx), %edx		/* cur thread in edx */
891
892	/
893	/ Are we interrupting an interrupt thread? If so, account for it.
894	/
895	testw	$T_INTR_THREAD, T_FLAGS(%edx)
896	jz	0f
897	/
898	/ We have interrupted an interrupt thread. Account for its time slice
899	/ only if its time stamp is non-zero. t_intr_start may be zero due to
900	/ cpu_intr_swtch_enter.
901	/
902	cmpl	$0, T_INTR_START+4(%edx)
903	jne	1f
904	cmpl	$0, T_INTR_START(%edx)
905	je	0f
9061:
907	pushl	%ecx
908	pushl	%eax
909	movl	%edx, %esi
910_tsc_patch7:
911	nop; nop			/* patched to rdtsc if available */
912	TSC_SUB_FROM(%esi, T_INTR_START)
913	TSC_CLR(%esi, T_INTR_START)
914	movzbl	T_PIL(%esi), %ecx
915	PILBASE_INTRSTAT(%ebx, %ecx)
916	TSC_ADD_TO(%ecx, CPU_INTRSTAT)
917	INTRACCTBASE(%ebx, %ecx)
918	TSC_ADD_TO(%ecx, CPU_INTRACCT)	/* cpu_intracct[cpu_mstate] += tsc */
919	movl	%esi, %edx
920	popl	%eax
921	popl	%ecx
9220:
923	movl	%esp, T_SP(%edx)	/* mark stack in curthread for resume */
924	pushl	%edi			/* get a temporary register */
925	UNLINK_INTR_THREAD(%ebx, %esi, %edi)
926
927	movl	T_LWP(%edx), %edi
928	movl	%edx, T_INTR(%esi)		/* push old thread */
929	movl	%edi, T_LWP(%esi)
930	/
931	/ Threads on the interrupt thread free list could have state already
932	/ set to TS_ONPROC, but it helps in debugging if they're TS_FREE
933	/
934	movl	$ONPROC_THREAD, T_STATE(%esi)
935	/
936	/ chain the interrupted thread onto list from the interrupt thread.
937	/ Set the new interrupt thread as the current one.
938	/
939	popl	%edi			/* Don't need a temp reg anymore */
940	movl	T_STACK(%esi), %esp		/* interrupt stack pointer */
941	movl	%esp, %ebp
942	movl	%esi, CPU_THREAD(%ebx)		/* set new thread */
943	pushl	%eax				/* save the ipl */
944	/
945	/ Initialize thread priority level from intr_pri
946	/
947	movb	%al, T_PIL(%esi)	/* store pil */
948	movzwl	intr_pri, %ebx		/* XXX Can cause probs if new class */
949					/* is loaded on some other cpu. */
950	addl	%ebx, %eax		/* convert level to dispatch priority */
951	movw	%ax, T_PRI(%esi)
952
953	/
954	/ Take timestamp and store it in the thread structure.
955	/
956	movl	%eax, %ebx		/* save priority over rdtsc */
957_tsc_patch8:
958	nop; nop			/* patched to rdtsc if available */
959	TSC_STORE(%esi, T_INTR_START)
960	movl	%ebx, %eax		/* restore priority */
961
962	/ The following 3 instructions need not be in cli.
963	/ Putting them here only to avoid the AGI penalty on Pentiums.
964
965	pushl	%ecx			/* save interrupt vector. */
966	pushl	%esi			/* save interrupt thread */
967	movl	$autovect, %esi		/* get autovect structure */
968	sti				/* enable interrupts */
969
970	/ Fast event tracing.
971	LOADCPU(%ebx)
972	movl	CPU_FTRACE_STATE(%ebx), %ebx
973	testl	$FTRACE_ENABLED, %ebx
974	jz	1f
975
976	movl	8(%esp), %ebx
977	pushl	%ebx			/* ipl */
978	pushl	%ecx			/* int vector */
979	movl	T_SP(%edx), %ebx
980	pushl	%ebx			/* &regs */
981	pushl	$_ftrace_intr_thread_fmt
982	call	ftrace_3_notick
983	addl	$8, %esp
984	popl	%ecx			/* restore int vector */
985	addl	$4, %esp
9861:
987pre_loop2:
988	movl	AVH_LINK(%esi, %ecx, 8), %esi
989	xorl	%ebx, %ebx	/* bh is cno. of intpts in chain */
990				/* bl is DDI_INTR_CLAIMED status of * chain */
991	testl	%esi, %esi	/* if pointer is null */
992	jz	loop_done2	/* we're done */
993loop2:
994	movl	AV_VECTOR(%esi), %edx	/* get the interrupt routine */
995	testl	%edx, %edx		/* if pointer is null */
996	jz	loop_done2		/* we're done */
997	incb	%bh
998	pushl	$0
999	pushl	AV_INTARG2(%esi)
1000	pushl	AV_INTARG1(%esi)
1001	pushl	AV_VECTOR(%esi)
1002	pushl	AV_DIP(%esi)
1003	call	__dtrace_probe_interrupt__start
1004	pushl	AV_INTARG2(%esi)	/* get 2nd arg to interrupt routine */
1005	pushl	AV_INTARG1(%esi)	/* get first arg to interrupt routine */
1006	call	*%edx			/* call interrupt routine with arg */
1007	addl	$8, %esp
1008	movl	%eax, 16(%esp)
1009	call	__dtrace_probe_interrupt__complete
1010	addl	$20, %esp
1011	orb	%al, %bl		/* see if anyone claims intpt. */
1012	movl	AV_TICKSP(%esi), %ecx
1013	testl	%ecx, %ecx
1014	jz	no_time
1015	call	intr_get_time
1016	movl	AV_TICKSP(%esi), %ecx
1017	TSC_ADD_TO(%ecx, 0)
1018no_time:
1019	movl	AV_LINK(%esi), %esi	/* get next routine on list */
1020	testl	%esi, %esi		/* if pointer is non-null */
1021	jnz	loop2			/* continue */
1022loop_done2:
1023	cmpb	$1, %bh		/* if only 1 intpt in chain, it is OK */
1024	je	.loop_done2_1
1025	orb	%bl, %bl	/* If no one claims intpt, then it is OK */
1026	jz	.loop_done2_1
1027	movl	$autovect, %esi		/* else get autovect structure */
1028	movl	4(%esp), %ecx		/* restore intr vector */
1029	jmp	pre_loop2		/* and try again. */
1030.loop_done2_1:
1031	popl	%esi			/* restore intr thread pointer */
1032
1033	LOADCPU(%ebx)
1034
1035	cli		/* protect interrupt thread pool and intr_actv */
1036	movzbl	T_PIL(%esi), %eax
1037
1038	/ Save value in regs
1039	pushl	%eax			/* current pil */
1040	pushl	%edx			/* (huh?) */
1041	pushl	%edi			/* old pil */
1042
1043	/ cpu_stats.sys.intr[PIL]++
1044	INC_CPU_STATS_INTR(%eax, %edx, %edx, %ebx)
1045
1046	/
1047	/ Take timestamp, compute interval, and update cumulative counter.
1048	/ esi = thread pointer, ebx = cpu pointer, eax = PIL
1049	/
1050	movl	%eax, %edi
1051
1052	ASSERT_T_INTR_START_NZ(%esi)
1053
1054_tsc_patch9:
1055	nop; nop			/* patched to rdtsc if available */
1056	TSC_SUB_FROM(%esi, T_INTR_START)
1057	PILBASE_INTRSTAT(%ebx, %edi)
1058	TSC_ADD_TO(%edi, CPU_INTRSTAT)
1059	INTRACCTBASE(%ebx, %edi)
1060	TSC_ADD_TO(%edi, CPU_INTRACCT)	/* cpu_intracct[cpu_mstate] += tsc */
1061	popl	%edi
1062	popl	%edx
1063	popl	%eax
1064
1065	/
1066	/ Clear bit for this PIL in CPU's interrupt active bitmask.
1067	/
1068
1069	ASSERT_CPU_INTR_ACTV(%eax, %ebx, _intr_thread_actv_bit_not_set)
1070
1071	btrl	%eax, CPU_INTR_ACTV(%ebx)
1072
1073	/ if there is still an interrupted thread underneath this one
1074	/ then the interrupt was never blocked and the return is fairly
1075	/ simple.  Otherwise jump to intr_thread_exit
1076	cmpl	$0, T_INTR(%esi)
1077	je	intr_thread_exit
1078
1079	/
1080	/ link the thread back onto the interrupt thread pool
1081	LINK_INTR_THREAD(%ebx, %esi, %edx)
1082
1083	movl	CPU_BASE_SPL(%ebx), %eax	/* used below. */
1084	/ set the thread state to free so kmdb doesn't see it
1085	movl	$FREE_THREAD, T_STATE(%esi)
1086
1087	cmpl	%eax, %edi		/* if (oldipl >= basespl) */
1088	jae	intr_restore_ipl	/* then use oldipl */
1089	movl	%eax, %edi		/* else use basespl */
1090intr_restore_ipl:
1091	movl	%edi, CPU_PRI(%ebx)
1092					/* intr vector already on stack */
1093	pushl	%edi			/* old ipl */
1094	call	*setlvlx		/* eax contains the current ipl */
1095	/
1096	/ Switch back to the interrupted thread
1097	movl	T_INTR(%esi), %ecx
1098
1099	/ Place starting timestamp in interrupted thread's thread structure.
1100_tsc_patch10:
1101	nop; nop			/* patched to rdtsc if available */
1102	TSC_STORE(%ecx, T_INTR_START)
1103
1104	movl	T_SP(%ecx), %esp	/* restore stack pointer */
1105	movl	%esp, %ebp
1106	movl	%ecx, CPU_THREAD(%ebx)
1107
1108	movl	CPU_SOFTINFO(%ebx), %edx /* any pending software interrupts */
1109	orl	%edx, %edx
1110	jz	_sys_rtt
1111	jmp	dosoftint	/* check for softints before we return. */
1112
1113	/
1114	/ An interrupt returned on what was once (and still might be)
1115	/ an interrupt thread stack, but the interrupted process is no longer
1116	/ there.  This means the interrupt must have blocked.
1117	/
1118	/ There is no longer a thread under this one, so put this thread back
1119	/ on the CPU's free list and resume the idle thread which will dispatch
1120	/ the next thread to run.
1121	/
1122	/ All interrupts are disabled here
1123	/
1124
1125intr_thread_exit:
1126#ifdef DEBUG
1127	incl	intr_thread_cnt
1128#endif
1129	INC64(%ebx, CPU_STATS_SYS_INTRBLK)	/* cpu_stats.sys.intrblk++ */
1130	/
1131	/ Put thread back on the interrupt thread list.
1132	/ As a reminder, the regs at this point are
1133	/	esi	interrupt thread
1134	/	edi	old ipl
1135	/	ebx	ptr to CPU struct
1136
1137	/ Set CPU's base SPL level based on active interrupts bitmask
1138	call	set_base_spl
1139
1140	movl	CPU_BASE_SPL(%ebx), %edi
1141	movl	%edi, CPU_PRI(%ebx)
1142					/* interrupt vector already on stack */
1143	pushl	%edi
1144	call	*setlvlx
1145	addl	$8, %esp		/* XXX - don't need to pop since */
1146					/* we are ready to switch */
1147	call	splhigh			/* block all intrs below lock level */
1148	/
1149	/ Set the thread state to free so kmdb doesn't see it
1150	/
1151	movl	$FREE_THREAD, T_STATE(%esi)
1152	/
1153	/ Put thread on either the interrupt pool or the free pool and
1154	/ call swtch() to resume another thread.
1155	/
1156	LINK_INTR_THREAD(%ebx, %esi, %edx)
1157	call 	swtch
1158	/ swtch() shouldn't return
1159
1160	SET_SIZE(intr_thread)
1161
1162#endif	/* __lint */
1163#endif	/* __i386 */
1164
1165/*
1166 * Set Cpu's base SPL level, base on which interrupt levels are active
1167 *	Called at spl7 or above.
1168 */
1169
1170#if defined(__lint)
1171
1172void
1173set_base_spl(void)
1174{}
1175
1176#else	/* __lint */
1177
1178	ENTRY_NP(set_base_spl)
1179	movl	%gs:CPU_INTR_ACTV, %eax	/* load active interrupts mask */
1180	testl	%eax, %eax		/* is it zero? */
1181	jz	setbase
1182	testl	$0xff00, %eax
1183	jnz	ah_set
1184	shl	$24, %eax		/* shift 'em over so we can find */
1185					/* the 1st bit faster */
1186	bsrl	%eax, %eax
1187	subl	$24, %eax
1188setbase:
1189	movl	%eax, %gs:CPU_BASE_SPL	/* store base priority */
1190	ret
1191ah_set:
1192	shl	$16, %eax
1193	bsrl	%eax, %eax
1194	subl	$16, %eax
1195	jmp	setbase
1196	SET_SIZE(set_base_spl)
1197
1198#endif	/* __lint */
1199
1200#if defined(__i386)
1201
1202/*
1203 * int
1204 * intr_passivate(from, to)
1205 *      thread_id_t     from;           interrupt thread
1206 *      thread_id_t     to;             interrupted thread
1207 *
1208 *	intr_passivate(t, itp) makes the interrupted thread "t" runnable.
1209 *
1210 *	Since t->t_sp has already been saved, t->t_pc is all that needs
1211 *	set in this function.
1212 *
1213 *	Returns interrupt level of the thread.
1214 */
1215
1216#if defined(__lint)
1217
1218/* ARGSUSED */
1219int
1220intr_passivate(kthread_id_t from, kthread_id_t to)
1221{ return (0); }
1222
1223#else	/* __lint */
1224
1225	ENTRY(intr_passivate)
1226	movl	8(%esp), %eax		/* interrupted thread  */
1227	movl	$_sys_rtt, T_PC(%eax)	/* set T_PC for interrupted thread */
1228
1229	movl	4(%esp), %eax		/* interrupt thread */
1230	movl	T_STACK(%eax), %eax	/* get the pointer to the start of */
1231					/* of the interrupt thread stack */
1232	movl	-4(%eax), %eax		/* interrupt level was the first */
1233					/* thing pushed onto the stack */
1234	ret
1235	SET_SIZE(intr_passivate)
1236
1237#endif	/* __lint */
1238#endif	/* __i386 */
1239
1240#if defined(__lint)
1241
1242void
1243fakesoftint(void)
1244{}
1245
1246#else	/* __lint */
1247
1248	/
1249	/ If we're here, we're being called from splx() to fake a soft
1250	/ interrupt (note that interrupts are still disabled from splx()).
1251	/ We execute this code when a soft interrupt is posted at
1252	/ level higher than the CPU's current spl; when spl is lowered in
1253	/ splx(), it will see the softint and jump here.  We'll do exactly
1254	/ what a trap would do:  push our flags, %cs, %eip, error code
1255	/ and trap number (T_SOFTINT).  The cmnint() code will see T_SOFTINT
1256	/ and branch to the dosoftint() code.
1257	/
1258#if defined(__amd64)
1259
1260	/*
1261	 * In 64-bit mode, iretq -always- pops all five regs
1262	 * Imitate the 16-byte auto-align of the stack, and the
1263	 * zero-ed out %ss value.
1264	 */
1265	ENTRY_NP(fakesoftint)
1266	movq	%rsp, %r11
1267	andq	$-16, %rsp
1268	pushq	$KDS_SEL	/* %ss */
1269	pushq	%r11		/* %rsp */
1270	pushf			/* rflags */
1271	pushq	$KCS_SEL	/* %cs */
1272	leaq	fakesoftint_return(%rip), %r11
1273	pushq	%r11		/* %rip */
1274	pushq	$0		/* err */
1275	pushq	$T_SOFTINT	/* trap */
1276	jmp	cmnint
1277	SET_SIZE(fakesoftint)
1278
1279#elif defined(__i386)
1280
1281	ENTRY_NP(fakesoftint)
1282	pushf
1283	push	%cs
1284	push	$fakesoftint_return
1285	push	$0
1286	push	$T_SOFTINT
1287	jmp	cmnint
1288	SET_SIZE(fakesoftint)
1289
1290#endif	/* __i386 */
1291
1292	.align	CPTRSIZE
1293	.globl	_fakesoftint_size
1294	.type	_fakesoftint_size, @object
1295_fakesoftint_size:
1296	.NWORD	. - fakesoftint
1297	SET_SIZE(_fakesoftint_size)
1298
1299/*
1300 * dosoftint(old_pil in %edi, softinfo in %edx, CPU pointer in %ebx)
1301 * Process software interrupts
1302 * Interrupts are disabled here.
1303 */
1304#if defined(__i386)
1305
1306	ENTRY_NP(dosoftint)
1307
1308	bsrl	%edx, %edx		/* find highest pending interrupt */
1309	cmpl 	%edx, %edi		/* if curipl >= pri soft pending intr */
1310	jae	_sys_rtt		/* skip */
1311
1312	movl	%gs:CPU_BASE_SPL, %eax	/* check for blocked intr threads */
1313	cmpl	%edx, %eax		/* if basespl >= pri soft pending */
1314	jae	_sys_rtt		/* skip */
1315
1316	lock				/* MP protect */
1317	btrl	%edx, CPU_SOFTINFO(%ebx) /* clear the selected interrupt bit */
1318	jnc	dosoftint_again
1319
1320	movl	%edx, CPU_PRI(%ebx) /* set IPL to sofint level */
1321	pushl	%edx
1322	call	*setspl			/* mask levels upto the softint level */
1323	popl	%eax			/* priority we are at in %eax */
1324
1325	/ Get set to run interrupt thread.
1326	/ There should always be an interrupt thread since we allocate one
1327	/ for each level on the CPU.
1328	UNLINK_INTR_THREAD(%ebx, %esi, %edx)
1329
1330	/
1331	/ Note that the code in kcpc_overflow_intr -relies- on the ordering
1332	/ of events here - in particular that t->t_lwp of the interrupt
1333	/ thread is set to the pinned thread *before* curthread is changed
1334	/
1335	movl	CPU_THREAD(%ebx), %ecx
1336
1337	/ If we are interrupting an interrupt thread, account for it.
1338	testw	$T_INTR_THREAD, T_FLAGS(%ecx)
1339	jz	0f
1340	/
1341	/ We have interrupted an interrupt thread. Account for its time slice
1342	/ only if its time stamp is non-zero. t_intr_start may be zero due to
1343	/ cpu_intr_swtch_enter.
1344	/
1345	cmpl	$0, T_INTR_START+4(%ecx)
1346	jne	1f
1347	cmpl	$0, T_INTR_START(%ecx)
1348	je	0f
13491:
1350	pushl	%eax
1351	movl	%eax, %ebp
1352_tsc_patch11:
1353	nop; nop			/* patched to rdtsc if available */
1354	PILBASE_INTRSTAT(%ebx, %ebp)
1355	TSC_SUB_FROM(%ecx, T_INTR_START)
1356	TSC_ADD_TO(%ebp, CPU_INTRSTAT)
1357	INTRACCTBASE(%ebx, %ebp)
1358	TSC_ADD_TO(%ebp, CPU_INTRACCT)	/* cpu_intracct[cpu_mstate] += tsc */
1359	popl	%eax
13600:
1361	movl	T_LWP(%ecx), %ebp
1362	movl	%ebp, T_LWP(%esi)
1363	/
1364	/ Threads on the interrupt thread free list could have state already
1365	/ set to TS_ONPROC, but it helps in debugging if they're TS_FREE
1366	/ Could eliminate the next two instructions with a little work.
1367	/
1368	movl	$ONPROC_THREAD, T_STATE(%esi)
1369	/
1370	/ Push interrupted thread onto list from new thread.
1371	/ Set the new thread as the current one.
1372	/ Set interrupted thread's T_SP because if it is the idle thread,
1373	/ Resume() may use that stack between threads.
1374	/
1375	movl	%esp, T_SP(%ecx)		/* mark stack for resume */
1376	movl	%ecx, T_INTR(%esi)		/* push old thread */
1377	movl	%esi, CPU_THREAD(%ebx)		/* set new thread */
1378	movl	T_STACK(%esi), %esp		/* interrupt stack pointer */
1379	movl	%esp, %ebp
1380
1381	pushl	%eax			/* push ipl as first element in stack */
1382					/* see intr_passivate() */
1383	/
1384	/ Set bit for this PIL in CPU's interrupt active bitmask.
1385	/
1386
1387	ASSERT_NOT_CPU_INTR_ACTV(%eax, %ebx, _dosoftint_actv_bit_set)
1388
1389	btsl	%eax, CPU_INTR_ACTV(%ebx)
1390
1391	/
1392	/ Initialize thread priority level from intr_pri
1393	/
1394	movb	%al, T_PIL(%esi)	/* store pil */
1395	movzwl	intr_pri, %ecx
1396	addl	%eax, %ecx		/* convert level to dispatch priority */
1397	movw	%cx, T_PRI(%esi)
1398
1399	/
1400	/ Store starting timestamp in thread structure.
1401	/ esi = thread, ebx = cpu pointer, eax = PIL
1402	/
1403	movl	%eax, %ecx		/* save PIL from rdtsc clobber */
1404_tsc_patch12:
1405	nop; nop			/* patched to rdtsc if available */
1406	TSC_STORE(%esi, T_INTR_START)
1407
1408	sti				/* enable interrupts */
1409
1410	/
1411	/ Enabling interrupts (above) could raise the current
1412	/ IPL and base SPL. But, we continue processing the current soft
1413	/ interrupt and we will check the base SPL next time in the loop
1414	/ so that blocked interrupt thread would get a chance to run.
1415	/
1416
1417	/
1418	/ dispatch soft interrupts
1419	/
1420	pushl	%ecx
1421	call	av_dispatch_softvect
1422	addl	$4, %esp
1423
1424	cli				/* protect interrupt thread pool */
1425					/* and softinfo & sysinfo */
1426	movl	CPU_THREAD(%ebx), %esi	/* restore thread pointer */
1427	movzbl	T_PIL(%esi), %ecx
1428
1429	/ cpu_stats.sys.intr[PIL]++
1430	INC_CPU_STATS_INTR(%ecx, %edx, %edx, %ebx)
1431
1432	/
1433	/ Clear bit for this PIL in CPU's interrupt active bitmask.
1434	/
1435
1436	ASSERT_CPU_INTR_ACTV(%ecx, %ebx, _dosoftint_actv_bit_not_set)
1437
1438	btrl	%ecx, CPU_INTR_ACTV(%ebx)
1439
1440	/
1441	/ Take timestamp, compute interval, update cumulative counter.
1442	/ esi = thread, ebx = cpu, ecx = PIL
1443	/
1444	PILBASE_INTRSTAT(%ebx, %ecx)
1445_tsc_patch13:
1446	nop; nop		/* patched to rdtsc if available */
1447	TSC_SUB_FROM(%esi, T_INTR_START)
1448	TSC_ADD_TO(%ecx, CPU_INTRSTAT)
1449	INTRACCTBASE(%ebx, %ecx)
1450	TSC_ADD_TO(%ecx, CPU_INTRACCT)	/* cpu_intracct[cpu_mstate] += tsc */
1451
1452	/ if there is still an interrupt thread underneath this one
1453	/ then the interrupt was never blocked and the return is fairly
1454	/ simple.  Otherwise jump to softintr_thread_exit.
1455	/ softintr_thread_exit expect esi to be curthread & ebx to be ipl.
1456	cmpl	$0, T_INTR(%esi)
1457	je	softintr_thread_exit
1458
1459	/
1460	/ link the thread back onto the interrupt thread pool
1461	LINK_INTR_THREAD(%ebx, %esi, %edx)
1462
1463	/ set the thread state to free so kmdb doesn't see it
1464	movl	$FREE_THREAD, T_STATE(%esi)
1465	/
1466	/ Switch back to the interrupted thread
1467	movl	T_INTR(%esi), %ecx
1468	movl	%ecx, CPU_THREAD(%ebx)
1469	movl	T_SP(%ecx), %esp	/* restore stack pointer */
1470	movl	%esp, %ebp
1471
1472	/ If we are returning to an interrupt thread, store a starting
1473	/ timestamp in the thread structure.
1474	testw	$T_INTR_THREAD, T_FLAGS(%ecx)
1475	jz	0f
1476_tsc_patch14:
1477	nop; nop			/* patched to rdtsc if available */
1478	TSC_STORE(%ecx, T_INTR_START)
14790:
1480	movl	CPU_BASE_SPL(%ebx), %eax
1481	cmpl	%eax, %edi		/* if (oldipl >= basespl) */
1482	jae	softintr_restore_ipl	/* then use oldipl */
1483	movl	%eax, %edi		/* else use basespl */
1484softintr_restore_ipl:
1485	movl	%edi, CPU_PRI(%ebx) /* set IPL to old level */
1486	pushl	%edi
1487	call	*setspl
1488	popl	%eax
1489dosoftint_again:
1490	movl	CPU_SOFTINFO(%ebx), %edx /* any pending software interrupts */
1491	orl	%edx, %edx
1492	jz	_sys_rtt
1493	jmp	dosoftint		/* process more software interrupts */
1494
1495softintr_thread_exit:
1496	/
1497	/ Put thread back on the interrupt thread list.
1498	/ As a reminder, the regs at this point are
1499	/	%esi	interrupt thread
1500
1501	/
1502	/ This was an interrupt thread, so set CPU's base SPL level
1503	/ set_base_spl only uses %eax.
1504	/
1505	call	set_base_spl		/* interrupt vector already on stack */
1506	/
1507	/ Set the thread state to free so kmdb doesn't see it
1508	/
1509	movl	$FREE_THREAD, T_STATE(%esi)
1510	/
1511	/ Put thread on either the interrupt pool or the free pool and
1512	/ call swtch() to resume another thread.
1513	/
1514	LOADCPU(%ebx)
1515	LINK_INTR_THREAD(%ebx, %esi, %edx)
1516	call	splhigh			/* block all intrs below lock lvl */
1517	call	swtch
1518	/ swtch() shouldn't return
1519	SET_SIZE(dosoftint)
1520
1521#endif	/* __i386 */
1522#endif	/* __lint */
1523
1524#if defined(lint)
1525
1526/*
1527 * intr_get_time() is a resource for interrupt handlers to determine how
1528 * much time has been spent handling the current interrupt. Such a function
1529 * is needed because higher level interrupts can arrive during the
1530 * processing of an interrupt, thus making direct comparisons of %tick by
1531 * the handler inaccurate. intr_get_time() only returns time spent in the
1532 * current interrupt handler.
1533 *
1534 * The caller must be calling from an interrupt handler running at a pil
1535 * below or at lock level. Timings are not provided for high-level
1536 * interrupts.
1537 *
1538 * The first time intr_get_time() is called while handling an interrupt,
1539 * it returns the time since the interrupt handler was invoked. Subsequent
1540 * calls will return the time since the prior call to intr_get_time(). Time
1541 * is returned as ticks. Use tsc_scalehrtime() to convert ticks to nsec.
1542 *
1543 * Theory Of Intrstat[][]:
1544 *
1545 * uint64_t intrstat[pil][0..1] is an array indexed by pil level, with two
1546 * uint64_ts per pil.
1547 *
1548 * intrstat[pil][0] is a cumulative count of the number of ticks spent
1549 * handling all interrupts at the specified pil on this CPU. It is
1550 * exported via kstats to the user.
1551 *
1552 * intrstat[pil][1] is always a count of ticks less than or equal to the
1553 * value in [0]. The difference between [1] and [0] is the value returned
1554 * by a call to intr_get_time(). At the start of interrupt processing,
1555 * [0] and [1] will be equal (or nearly so). As the interrupt consumes
1556 * time, [0] will increase, but [1] will remain the same. A call to
1557 * intr_get_time() will return the difference, then update [1] to be the
1558 * same as [0]. Future calls will return the time since the last call.
1559 * Finally, when the interrupt completes, [1] is updated to the same as [0].
1560 *
1561 * Implementation:
1562 *
1563 * intr_get_time() works much like a higher level interrupt arriving. It
1564 * "checkpoints" the timing information by incrementing intrstat[pil][0]
1565 * to include elapsed running time, and by setting t_intr_start to rdtsc.
1566 * It then sets the return value to intrstat[pil][0] - intrstat[pil][1],
1567 * and updates intrstat[pil][1] to be the same as the new value of
1568 * intrstat[pil][0].
1569 *
1570 * In the normal handling of interrupts, after an interrupt handler returns
1571 * and the code in intr_thread() updates intrstat[pil][0], it then sets
1572 * intrstat[pil][1] to the new value of intrstat[pil][0]. When [0] == [1],
1573 * the timings are reset, i.e. intr_get_time() will return [0] - [1] which
1574 * is 0.
1575 *
1576 * Whenever interrupts arrive on a CPU which is handling a lower pil
1577 * interrupt, they update the lower pil's [0] to show time spent in the
1578 * handler that they've interrupted. This results in a growing discrepancy
1579 * between [0] and [1], which is returned the next time intr_get_time() is
1580 * called. Time spent in the higher-pil interrupt will not be returned in
1581 * the next intr_get_time() call from the original interrupt, because
1582 * the higher-pil interrupt's time is accumulated in intrstat[higherpil][].
1583 */
1584
1585/*ARGSUSED*/
1586uint64_t
1587intr_get_time(void)
1588{ return 0; }
1589#else	/* lint */
1590
1591
1592#if defined(__amd64)
1593	ENTRY_NP(intr_get_time)
1594	cli				/* make this easy -- block intrs */
1595	LOADCPU(%rdi)
1596	call	intr_thread_get_time
1597	sti
1598	ret
1599	SET_SIZE(intr_get_time)
1600
1601#elif defined(__i386)
1602
1603#ifdef DEBUG
1604
1605
1606_intr_get_time_high_pil:
1607	.string	"intr_get_time(): %pil > LOCK_LEVEL"
1608_intr_get_time_not_intr:
1609	.string	"intr_get_time(): not called from an interrupt thread"
1610_intr_get_time_no_start_time:
1611	.string	"intr_get_time(): t_intr_start == 0"
1612
1613/*
1614 * ASSERT(%pil <= LOCK_LEVEL)
1615 */
1616#define	ASSERT_PIL_BELOW_LOCK_LEVEL(cpureg)				\
1617	testl	$CPU_INTR_ACTV_HIGH_LEVEL_MASK, CPU_INTR_ACTV(cpureg);	\
1618	jz	0f;							\
1619	__PANIC(_intr_get_time_high_pil, 0f);				\
16200:
1621
1622/*
1623 * ASSERT((t_flags & T_INTR_THREAD) != 0 && t_pil > 0)
1624 */
1625#define	ASSERT_NO_PIL_0_INTRS(thrreg)			\
1626	testw	$T_INTR_THREAD, T_FLAGS(thrreg);	\
1627	jz	1f;					\
1628	cmpb	$0, T_PIL(thrreg);			\
1629	jne	0f;					\
16301:							\
1631	__PANIC(_intr_get_time_not_intr, 0f);		\
16320:
1633
1634/*
1635 * ASSERT(t_intr_start != 0)
1636 */
1637#define	ASSERT_INTR_START_NOT_0(thrreg)			\
1638	cmpl	$0, T_INTR_START(thrreg);		\
1639	jnz	0f;					\
1640	cmpl	$0, T_INTR_START+4(thrreg);		\
1641	jnz	0f;					\
1642	__PANIC(_intr_get_time_no_start_time, 0f);	\
16430:
1644
1645#endif /* DEBUG */
1646
1647	ENTRY_NP(intr_get_time)
1648
1649	cli				/* make this easy -- block intrs */
1650	pushl	%esi			/* and free up some registers */
1651
1652	LOADCPU(%esi)
1653	movl	CPU_THREAD(%esi), %ecx
1654
1655#ifdef DEBUG
1656	ASSERT_PIL_BELOW_LOCK_LEVEL(%esi)
1657	ASSERT_NO_PIL_0_INTRS(%ecx)
1658	ASSERT_INTR_START_NOT_0(%ecx)
1659#endif /* DEBUG */
1660
1661_tsc_patch17:
1662	nop; nop			/* patched to rdtsc if available */
1663	TSC_SUB_FROM(%ecx, T_INTR_START)	/* get elapsed time */
1664	TSC_ADD_TO(%ecx, T_INTR_START)		/* T_INTR_START = rdtsc */
1665
1666	movzbl	T_PIL(%ecx), %ecx		/* %ecx = pil */
1667	PILBASE_INTRSTAT(%esi, %ecx)		/* %ecx = CPU + pil*16 */
1668	TSC_ADD_TO(%ecx, CPU_INTRSTAT)		/* intrstat[0] += elapsed */
1669	TSC_LOAD(%ecx, CPU_INTRSTAT)		/* get new intrstat[0] */
1670	TSC_SUB_FROM(%ecx, CPU_INTRSTAT+8)	/* diff with intrstat[1] */
1671	TSC_ADD_TO(%ecx, CPU_INTRSTAT+8)	/* intrstat[1] = intrstat[0] */
1672
1673	/* %edx/%eax contain difference between old and new intrstat[1] */
1674
1675	popl	%esi
1676	sti
1677	ret
1678	SET_SIZE(intr_get_time)
1679#endif	/* __i386 */
1680
1681#endif  /* lint */
1682