xref: /titanic_53/usr/src/uts/i86pc/ml/interrupt.s (revision 7a364d25fde47aa82704b12b5251bf7fac37f02e)
17c478bd9Sstevel@tonic-gate/*
27c478bd9Sstevel@tonic-gate * CDDL HEADER START
37c478bd9Sstevel@tonic-gate *
47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
57c478bd9Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only
67c478bd9Sstevel@tonic-gate * (the "License").  You may not use this file except in compliance
77c478bd9Sstevel@tonic-gate * with the License.
87c478bd9Sstevel@tonic-gate *
97c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
107c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
117c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
127c478bd9Sstevel@tonic-gate * and limitations under the License.
137c478bd9Sstevel@tonic-gate *
147c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
157c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
167c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
177c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
187c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
197c478bd9Sstevel@tonic-gate *
207c478bd9Sstevel@tonic-gate * CDDL HEADER END
217c478bd9Sstevel@tonic-gate */
227c478bd9Sstevel@tonic-gate/*
237c478bd9Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
247c478bd9Sstevel@tonic-gate * Use is subject to license terms.
257c478bd9Sstevel@tonic-gate */
267c478bd9Sstevel@tonic-gate
277c478bd9Sstevel@tonic-gate/*	Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.	*/
287c478bd9Sstevel@tonic-gate/*	Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T	*/
297c478bd9Sstevel@tonic-gate/*	  All Rights Reserved					*/
307c478bd9Sstevel@tonic-gate
317c478bd9Sstevel@tonic-gate/*	Copyright (c) 1987, 1988 Microsoft Corporation		*/
327c478bd9Sstevel@tonic-gate/*	  All Rights Reserved					*/
337c478bd9Sstevel@tonic-gate
347c478bd9Sstevel@tonic-gate#pragma ident	"%Z%%M%	%I%	%E% SMI"
357c478bd9Sstevel@tonic-gate
367c478bd9Sstevel@tonic-gate#include <sys/asm_linkage.h>
377c478bd9Sstevel@tonic-gate#include <sys/asm_misc.h>
387c478bd9Sstevel@tonic-gate#include <sys/regset.h>
397c478bd9Sstevel@tonic-gate#include <sys/psw.h>
407c478bd9Sstevel@tonic-gate#include <sys/x86_archext.h>
417c478bd9Sstevel@tonic-gate
427c478bd9Sstevel@tonic-gate#if defined(__lint)
437c478bd9Sstevel@tonic-gate
447c478bd9Sstevel@tonic-gate#include <sys/types.h>
457c478bd9Sstevel@tonic-gate#include <sys/thread.h>
467c478bd9Sstevel@tonic-gate#include <sys/systm.h>
477c478bd9Sstevel@tonic-gate
487c478bd9Sstevel@tonic-gate#else   /* __lint */
497c478bd9Sstevel@tonic-gate
507c478bd9Sstevel@tonic-gate#include <sys/segments.h>
517c478bd9Sstevel@tonic-gate#include <sys/pcb.h>
527c478bd9Sstevel@tonic-gate#include <sys/trap.h>
537c478bd9Sstevel@tonic-gate#include <sys/ftrace.h>
547c478bd9Sstevel@tonic-gate#include <sys/traptrace.h>
557c478bd9Sstevel@tonic-gate#include <sys/clock.h>
567c478bd9Sstevel@tonic-gate#include <sys/panic.h>
577c478bd9Sstevel@tonic-gate#include "assym.h"
587c478bd9Sstevel@tonic-gate
597c478bd9Sstevel@tonic-gate_ftrace_intr_thread_fmt:
607c478bd9Sstevel@tonic-gate	.string	"intr_thread(): regs=0x%lx, int=0x%x, pil=0x%x"
617c478bd9Sstevel@tonic-gate
627c478bd9Sstevel@tonic-gate#endif	/* lint */
637c478bd9Sstevel@tonic-gate
647c478bd9Sstevel@tonic-gate#if defined(__i386)
657c478bd9Sstevel@tonic-gate
667c478bd9Sstevel@tonic-gate#if defined(__lint)
677c478bd9Sstevel@tonic-gate
687c478bd9Sstevel@tonic-gatevoid
697c478bd9Sstevel@tonic-gatepatch_tsc(void)
707c478bd9Sstevel@tonic-gate{}
717c478bd9Sstevel@tonic-gate
727c478bd9Sstevel@tonic-gate#else	/* __lint */
737c478bd9Sstevel@tonic-gate
747c478bd9Sstevel@tonic-gate/*
757c478bd9Sstevel@tonic-gate * To cope with processors that do not implement the rdtsc instruction,
767c478bd9Sstevel@tonic-gate * we patch the kernel to use rdtsc if that feature is detected on the CPU.
777c478bd9Sstevel@tonic-gate * On an unpatched kernel, all locations requiring rdtsc are nop's.
787c478bd9Sstevel@tonic-gate *
797c478bd9Sstevel@tonic-gate * This function patches the nop's to rdtsc.
807c478bd9Sstevel@tonic-gate */
817c478bd9Sstevel@tonic-gate	ENTRY_NP(patch_tsc)
827c478bd9Sstevel@tonic-gate	movw	_rdtsc_insn, %cx
837c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch1
847c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch2
857c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch3
867c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch4
877c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch5
887c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch6
897c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch7
907c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch8
917c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch9
927c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch10
937c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch11
947c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch12
957c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch13
967c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch14
977c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch15
987c478bd9Sstevel@tonic-gate	movw	%cx, _tsc_patch16
99*7a364d25Sschwartz	movw	%cx, _tsc_patch17
1007c478bd9Sstevel@tonic-gate	ret
1017c478bd9Sstevel@tonic-gate_rdtsc_insn:
1027c478bd9Sstevel@tonic-gate	rdtsc
1037c478bd9Sstevel@tonic-gate	SET_SIZE(patch_tsc)
1047c478bd9Sstevel@tonic-gate
1057c478bd9Sstevel@tonic-gate#endif	/* __lint */
1067c478bd9Sstevel@tonic-gate
1077c478bd9Sstevel@tonic-gate#endif	/* __i386 */
1087c478bd9Sstevel@tonic-gate
1097c478bd9Sstevel@tonic-gate
1107c478bd9Sstevel@tonic-gate#if defined(__lint)
1117c478bd9Sstevel@tonic-gate
1127c478bd9Sstevel@tonic-gatevoid
1137c478bd9Sstevel@tonic-gate_interrupt(void)
1147c478bd9Sstevel@tonic-gate{}
1157c478bd9Sstevel@tonic-gate
1167c478bd9Sstevel@tonic-gate#else	/* __lint */
1177c478bd9Sstevel@tonic-gate
1187c478bd9Sstevel@tonic-gate#if defined(__amd64)
1197c478bd9Sstevel@tonic-gate
1207c478bd9Sstevel@tonic-gate	/*
1217c478bd9Sstevel@tonic-gate	 * Common register usage:
1227c478bd9Sstevel@tonic-gate	 *
1237c478bd9Sstevel@tonic-gate	 * %rbx		cpu pointer
1247c478bd9Sstevel@tonic-gate	 * %r12		trap trace pointer -and- stash of
1257c478bd9Sstevel@tonic-gate	 *		vec across intr_thread dispatch.
1267c478bd9Sstevel@tonic-gate	 * %r13d	ipl of isr
1277c478bd9Sstevel@tonic-gate	 * %r14d	old ipl (ipl level we entered on)
1287c478bd9Sstevel@tonic-gate	 * %r15		interrupted thread stack pointer
1297c478bd9Sstevel@tonic-gate	 */
1307c478bd9Sstevel@tonic-gate	ENTRY_NP2(cmnint, _interrupt)
1317c478bd9Sstevel@tonic-gate
1327c478bd9Sstevel@tonic-gate	INTR_PUSH
1337c478bd9Sstevel@tonic-gate
1347c478bd9Sstevel@tonic-gate	/*
1357c478bd9Sstevel@tonic-gate	 * At the end of TRACE_PTR %r12 points to the current TRAPTRACE entry
1367c478bd9Sstevel@tonic-gate	 */
1377c478bd9Sstevel@tonic-gate	TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_INTERRUPT)
1387c478bd9Sstevel@tonic-gate						/* Uses labels 8 and 9 */
1397c478bd9Sstevel@tonic-gate	TRACE_REGS(%r12, %rsp, %rax, %rbx)	/* Uses label 9 */
1407c478bd9Sstevel@tonic-gate	TRACE_STAMP(%r12)		/* Clobbers %eax, %edx, uses 9 */
1417c478bd9Sstevel@tonic-gate
1427c478bd9Sstevel@tonic-gate	DISABLE_INTR_FLAGS		/* (and set kernel flag values) */
1437c478bd9Sstevel@tonic-gate
1447c478bd9Sstevel@tonic-gate	movq	%rsp, %rbp
1457c478bd9Sstevel@tonic-gate
1467c478bd9Sstevel@tonic-gate	TRACE_STACK(%r12)
1477c478bd9Sstevel@tonic-gate
1487c478bd9Sstevel@tonic-gate	LOADCPU(%rbx)				/* &cpu */
1497c478bd9Sstevel@tonic-gate	leaq	REGOFF_TRAPNO(%rbp), %rsi	/* &vector */
1507c478bd9Sstevel@tonic-gate	movl	CPU_PRI(%rbx), %r14d		/* old ipl */
1517c478bd9Sstevel@tonic-gate	movl	CPU_SOFTINFO(%rbx), %edx
1527c478bd9Sstevel@tonic-gate
1537c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE
1547c478bd9Sstevel@tonic-gate	movl	$255, TTR_IPL(%r12)
1557c478bd9Sstevel@tonic-gate	movl	%r14d, %edi
1567c478bd9Sstevel@tonic-gate	movb	%dil, TTR_PRI(%r12)
1577c478bd9Sstevel@tonic-gate	movl	CPU_BASE_SPL(%rbx), %edi
1587c478bd9Sstevel@tonic-gate	movb	%dil, TTR_SPL(%r12)
1597c478bd9Sstevel@tonic-gate	movb	$255, TTR_VECTOR(%r12)
1607c478bd9Sstevel@tonic-gate#endif
1617c478bd9Sstevel@tonic-gate
1627c478bd9Sstevel@tonic-gate	/*
1637c478bd9Sstevel@tonic-gate	 * Check to see if the trap number is T_SOFTINT; if it is,
1647c478bd9Sstevel@tonic-gate	 * jump straight to dosoftint now.
1657c478bd9Sstevel@tonic-gate	 */
1667c478bd9Sstevel@tonic-gate	cmpq	$T_SOFTINT, (%rsi)
1677c478bd9Sstevel@tonic-gate	je	dosoftint
1687c478bd9Sstevel@tonic-gate
1697c478bd9Sstevel@tonic-gate	/*
1707c478bd9Sstevel@tonic-gate	 * Raise the interrupt priority level, returns newpil.
1717c478bd9Sstevel@tonic-gate	 * (The vector address is in %rsi so setlvl can update it.)
1727c478bd9Sstevel@tonic-gate	 */
1737c478bd9Sstevel@tonic-gate	movl	%r14d, %edi			/* old ipl */
1747c478bd9Sstevel@tonic-gate						/* &vector */
1757c478bd9Sstevel@tonic-gate	call	*setlvl(%rip)
1767c478bd9Sstevel@tonic-gate
1777c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE
1787c478bd9Sstevel@tonic-gate	movb	%al, TTR_IPL(%r12)
1797c478bd9Sstevel@tonic-gate#endif
1807c478bd9Sstevel@tonic-gate	/*
1817c478bd9Sstevel@tonic-gate	 * check for spurious interrupt
1827c478bd9Sstevel@tonic-gate	 */
1837c478bd9Sstevel@tonic-gate	cmpl	$-1, %eax
1847c478bd9Sstevel@tonic-gate	je	_sys_rtt
1857c478bd9Sstevel@tonic-gate
1867c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE
1877c478bd9Sstevel@tonic-gate	movl	%r14d, %edx
1887c478bd9Sstevel@tonic-gate	movb	%dl, TTR_PRI(%r12)
1897c478bd9Sstevel@tonic-gate	movl	CPU_BASE_SPL(%rbx), %edx
1907c478bd9Sstevel@tonic-gate	movb	%dl, TTR_SPL(%r12)
1917c478bd9Sstevel@tonic-gate#endif
1927c478bd9Sstevel@tonic-gate	movl	%eax, CPU_PRI(%rbx)		/* update ipl */
1937c478bd9Sstevel@tonic-gate
1947c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE
1957c478bd9Sstevel@tonic-gate	movl	REGOFF_TRAPNO(%rbp), %edx
1967c478bd9Sstevel@tonic-gate	movb	%dl, TTR_VECTOR(%r12)
1977c478bd9Sstevel@tonic-gate#endif
1987c478bd9Sstevel@tonic-gate	movl	%eax, %r13d			/* ipl of isr */
1997c478bd9Sstevel@tonic-gate
2007c478bd9Sstevel@tonic-gate	/*
2017c478bd9Sstevel@tonic-gate	 * At this point we can take one of two paths.
2027c478bd9Sstevel@tonic-gate	 * If the new level is at or below lock level, we will
2037c478bd9Sstevel@tonic-gate	 * run this interrupt in a separate thread.
2047c478bd9Sstevel@tonic-gate	 */
2057c478bd9Sstevel@tonic-gate	cmpl	$LOCK_LEVEL, %eax
2067c478bd9Sstevel@tonic-gate	jbe	intr_thread
2077c478bd9Sstevel@tonic-gate
2087c478bd9Sstevel@tonic-gate	movq	%rbx, %rdi		/* &cpu */
2097c478bd9Sstevel@tonic-gate	movl	%r13d, %esi		/* ipl */
2107c478bd9Sstevel@tonic-gate	movl	%r14d, %edx		/* old ipl */
2117c478bd9Sstevel@tonic-gate	movq	%rbp, %rcx		/* &regs */
2127c478bd9Sstevel@tonic-gate	call	hilevel_intr_prolog
2137c478bd9Sstevel@tonic-gate	orl	%eax, %eax		/* zero if need to switch stack */
2147c478bd9Sstevel@tonic-gate	jnz	1f
2157c478bd9Sstevel@tonic-gate
2167c478bd9Sstevel@tonic-gate	/*
2177c478bd9Sstevel@tonic-gate	 * Save the thread stack and get on the cpu's interrupt stack
2187c478bd9Sstevel@tonic-gate	 */
2197c478bd9Sstevel@tonic-gate	movq	%rsp, %r15
2207c478bd9Sstevel@tonic-gate	movq	CPU_INTR_STACK(%rbx), %rsp
2217c478bd9Sstevel@tonic-gate1:
2227c478bd9Sstevel@tonic-gate
2237c478bd9Sstevel@tonic-gate	sti
2247c478bd9Sstevel@tonic-gate
2257c478bd9Sstevel@tonic-gate	/*
2267c478bd9Sstevel@tonic-gate	 * Walk the list of handlers for this vector, calling
2277c478bd9Sstevel@tonic-gate	 * them as we go until no more interrupts are claimed.
2287c478bd9Sstevel@tonic-gate	 */
2297c478bd9Sstevel@tonic-gate	movl	REGOFF_TRAPNO(%rbp), %edi
2307c478bd9Sstevel@tonic-gate	call	av_dispatch_autovect
2317c478bd9Sstevel@tonic-gate
2327c478bd9Sstevel@tonic-gate	cli
2337c478bd9Sstevel@tonic-gate
2347c478bd9Sstevel@tonic-gate	movq	%rbx, %rdi			/* &cpu */
2357c478bd9Sstevel@tonic-gate	movl	%r13d, %esi			/* ipl */
2367c478bd9Sstevel@tonic-gate	movl	%r14d, %edx			/* oldipl */
2377c478bd9Sstevel@tonic-gate	movl	REGOFF_TRAPNO(%rbp), %ecx	/* vec */
2387c478bd9Sstevel@tonic-gate	call	hilevel_intr_epilog
2397c478bd9Sstevel@tonic-gate	orl	%eax, %eax		/* zero if need to switch stack */
2407c478bd9Sstevel@tonic-gate	jnz	2f
2417c478bd9Sstevel@tonic-gate	movq	%r15, %rsp
2427c478bd9Sstevel@tonic-gate2:	/*
2437c478bd9Sstevel@tonic-gate	 * Check for, and execute, softints before we iret.
2447c478bd9Sstevel@tonic-gate	 *
2457c478bd9Sstevel@tonic-gate	 * (dosoftint expects oldipl in %r14d (which is where it is)
2467c478bd9Sstevel@tonic-gate	 * the cpu pointer in %rbx (which is where it is) and the
2477c478bd9Sstevel@tonic-gate	 * softinfo in %edx (which is where we'll put it right now))
2487c478bd9Sstevel@tonic-gate	 */
2497c478bd9Sstevel@tonic-gate	movl	CPU_SOFTINFO(%rbx), %edx
2507c478bd9Sstevel@tonic-gate	orl	%edx, %edx
2517c478bd9Sstevel@tonic-gate	jz	_sys_rtt
2527c478bd9Sstevel@tonic-gate	jmp	dosoftint
2537c478bd9Sstevel@tonic-gate	/*NOTREACHED*/
2547c478bd9Sstevel@tonic-gate
2557c478bd9Sstevel@tonic-gate	SET_SIZE(cmnint)
2567c478bd9Sstevel@tonic-gate	SET_SIZE(_interrupt)
2577c478bd9Sstevel@tonic-gate
2587c478bd9Sstevel@tonic-gate/*
2597c478bd9Sstevel@tonic-gate * Handle an interrupt in a new thread
2607c478bd9Sstevel@tonic-gate *
2617c478bd9Sstevel@tonic-gate * As we branch here, interrupts are still masked,
2627c478bd9Sstevel@tonic-gate * %rbx still contains the cpu pointer,
2637c478bd9Sstevel@tonic-gate * %r14d contains the old ipl that we came in on, and
2647c478bd9Sstevel@tonic-gate * %eax contains the new ipl that we got from the setlvl routine
2657c478bd9Sstevel@tonic-gate */
2667c478bd9Sstevel@tonic-gate
2677c478bd9Sstevel@tonic-gate	ENTRY_NP(intr_thread)
2687c478bd9Sstevel@tonic-gate
2697c478bd9Sstevel@tonic-gate	movq	%rbx, %rdi	/* &cpu */
2707c478bd9Sstevel@tonic-gate	movq	%rbp, %rsi	/* &regs = stack pointer for _sys_rtt */
2717c478bd9Sstevel@tonic-gate	movl	REGOFF_TRAPNO(%rbp), %r12d	/* stash the vec */
2727c478bd9Sstevel@tonic-gate	movl	%eax, %edx	/* new pil from setlvlx() */
2737c478bd9Sstevel@tonic-gate	call	intr_thread_prolog
2747c478bd9Sstevel@tonic-gate	movq	%rsp, %r15
2757c478bd9Sstevel@tonic-gate	movq	%rax, %rsp	/* t_stk from interrupt thread */
2767c478bd9Sstevel@tonic-gate	movq	%rsp, %rbp
2777c478bd9Sstevel@tonic-gate
2787c478bd9Sstevel@tonic-gate	sti
2797c478bd9Sstevel@tonic-gate
2807c478bd9Sstevel@tonic-gate	testl	$FTRACE_ENABLED, CPU_FTRACE_STATE(%rbx)
2817c478bd9Sstevel@tonic-gate	jz	1f
2827c478bd9Sstevel@tonic-gate	/*
2837c478bd9Sstevel@tonic-gate	 * ftracing support. do we need this on x86?
2847c478bd9Sstevel@tonic-gate	 */
2857c478bd9Sstevel@tonic-gate	leaq	_ftrace_intr_thread_fmt(%rip), %rdi
2867c478bd9Sstevel@tonic-gate	movq	%rbp, %rsi			/* &regs */
2877c478bd9Sstevel@tonic-gate	movl	%r12d, %edx			/* vec */
2887c478bd9Sstevel@tonic-gate	movq	CPU_THREAD(%rbx), %r11		/* (the interrupt thread) */
2897c478bd9Sstevel@tonic-gate	movzbl	T_PIL(%r11), %ecx		/* newipl */
2907c478bd9Sstevel@tonic-gate	call	ftrace_3_notick
2917c478bd9Sstevel@tonic-gate1:
2927c478bd9Sstevel@tonic-gate	movl	%r12d, %edi			/* vec */
2937c478bd9Sstevel@tonic-gate	call	av_dispatch_autovect
2947c478bd9Sstevel@tonic-gate
2957c478bd9Sstevel@tonic-gate	cli
2967c478bd9Sstevel@tonic-gate
2977c478bd9Sstevel@tonic-gate	movq	%rbx, %rdi			/* &cpu */
2987c478bd9Sstevel@tonic-gate	movl	%r12d, %esi			/* vec */
2997c478bd9Sstevel@tonic-gate	movl	%r14d, %edx			/* oldpil */
3007c478bd9Sstevel@tonic-gate	call	intr_thread_epilog
3017c478bd9Sstevel@tonic-gate	/*
3027c478bd9Sstevel@tonic-gate	 * If we return from here (we might not if the interrupted thread
3037c478bd9Sstevel@tonic-gate	 * has exited or blocked, in which case we'll have quietly swtch()ed
3047c478bd9Sstevel@tonic-gate	 * away) then we need to switch back to our old %rsp
3057c478bd9Sstevel@tonic-gate	 */
3067c478bd9Sstevel@tonic-gate	movq	%r15, %rsp
3077c478bd9Sstevel@tonic-gate	movq	%rsp, %rbp
3087c478bd9Sstevel@tonic-gate	/*
3097c478bd9Sstevel@tonic-gate	 * Check for, and execute, softints before we iret.
3107c478bd9Sstevel@tonic-gate	 *
3117c478bd9Sstevel@tonic-gate	 * (dosoftint expects oldpil in %r14d, the cpu pointer in %rbx and
3127c478bd9Sstevel@tonic-gate	 * the mcpu_softinfo.st_pending field in %edx.
3137c478bd9Sstevel@tonic-gate	 */
3147c478bd9Sstevel@tonic-gate	movl	CPU_SOFTINFO(%rbx), %edx
3157c478bd9Sstevel@tonic-gate	orl	%edx, %edx
3167c478bd9Sstevel@tonic-gate	jz	_sys_rtt
3177c478bd9Sstevel@tonic-gate	/*FALLTHROUGH*/
3187c478bd9Sstevel@tonic-gate
3197c478bd9Sstevel@tonic-gate/*
3207c478bd9Sstevel@tonic-gate * Process soft interrupts.
3217c478bd9Sstevel@tonic-gate * Interrupts are masked, and we have a minimal frame on the stack.
3227c478bd9Sstevel@tonic-gate * %edx should contain the mcpu_softinfo.st_pending field
3237c478bd9Sstevel@tonic-gate */
3247c478bd9Sstevel@tonic-gate
3257c478bd9Sstevel@tonic-gate	ALTENTRY(dosoftint)
3267c478bd9Sstevel@tonic-gate
3277c478bd9Sstevel@tonic-gate	movq	%rbx, %rdi	/* &cpu */
3287c478bd9Sstevel@tonic-gate	movq	%rbp, %rsi	/* &regs = stack pointer for _sys_rtt */
3297c478bd9Sstevel@tonic-gate				/* cpu->cpu_m.mcpu_softinfo.st_pending */
3307c478bd9Sstevel@tonic-gate	movl	%r14d, %ecx	/* oldipl */
3317c478bd9Sstevel@tonic-gate	call	dosoftint_prolog
3327c478bd9Sstevel@tonic-gate	/*
3337c478bd9Sstevel@tonic-gate	 * dosoftint_prolog() usually returns a stack pointer for the
3347c478bd9Sstevel@tonic-gate	 * interrupt thread that we must switch to.  However, if the
3357c478bd9Sstevel@tonic-gate	 * returned stack pointer is NULL, then the software interrupt was
3367c478bd9Sstevel@tonic-gate	 * too low in priority to run now; we'll catch it another time.
3377c478bd9Sstevel@tonic-gate	 */
3387c478bd9Sstevel@tonic-gate	orq	%rax, %rax
3397c478bd9Sstevel@tonic-gate	jz	_sys_rtt
3407c478bd9Sstevel@tonic-gate	movq	%rsp, %r15
3417c478bd9Sstevel@tonic-gate	movq	%rax, %rsp	/* t_stk from interrupt thread */
3427c478bd9Sstevel@tonic-gate	movq	%rsp, %rbp
3437c478bd9Sstevel@tonic-gate
3447c478bd9Sstevel@tonic-gate	sti
3457c478bd9Sstevel@tonic-gate
3467c478bd9Sstevel@tonic-gate	/*
3477c478bd9Sstevel@tonic-gate	 * Enabling interrupts (above) could raise the current ipl
3487c478bd9Sstevel@tonic-gate	 * and base spl.  But, we continue processing the current soft
3497c478bd9Sstevel@tonic-gate	 * interrupt and we will check the base spl next time around
3507c478bd9Sstevel@tonic-gate	 * so that blocked interrupt threads get a chance to run.
3517c478bd9Sstevel@tonic-gate	 */
3527c478bd9Sstevel@tonic-gate	movq	CPU_THREAD(%rbx), %r11	/* now an interrupt thread */
3537c478bd9Sstevel@tonic-gate	movzbl	T_PIL(%r11), %edi
3547c478bd9Sstevel@tonic-gate	call	av_dispatch_softvect
3557c478bd9Sstevel@tonic-gate
3567c478bd9Sstevel@tonic-gate	cli
3577c478bd9Sstevel@tonic-gate
3587c478bd9Sstevel@tonic-gate	movq	%rbx, %rdi		/* &cpu */
3597c478bd9Sstevel@tonic-gate	movl	%r14d, %esi		/* oldpil */
3607c478bd9Sstevel@tonic-gate	call	dosoftint_epilog
3617c478bd9Sstevel@tonic-gate	movq	%r15, %rsp		/* back on old stack pointer */
3627c478bd9Sstevel@tonic-gate	movq	%rsp, %rbp
3637c478bd9Sstevel@tonic-gate	movl	CPU_SOFTINFO(%rbx), %edx
3647c478bd9Sstevel@tonic-gate	orl	%edx, %edx
3657c478bd9Sstevel@tonic-gate	jz	_sys_rtt
3667c478bd9Sstevel@tonic-gate	jmp	dosoftint
3677c478bd9Sstevel@tonic-gate
3687c478bd9Sstevel@tonic-gate	SET_SIZE(dosoftint)
3697c478bd9Sstevel@tonic-gate	SET_SIZE(intr_thread)
3707c478bd9Sstevel@tonic-gate
3717c478bd9Sstevel@tonic-gate#elif defined(__i386)
3727c478bd9Sstevel@tonic-gate
3737c478bd9Sstevel@tonic-gate/*
3747c478bd9Sstevel@tonic-gate * One day, this should just invoke the C routines that know how to
3757c478bd9Sstevel@tonic-gate * do all the interrupt bookkeeping.  In the meantime, try
3767c478bd9Sstevel@tonic-gate * and make the assembler a little more comprehensible.
3777c478bd9Sstevel@tonic-gate */
3787c478bd9Sstevel@tonic-gate
3797c478bd9Sstevel@tonic-gate#define	INC64(basereg, offset)			\
3807c478bd9Sstevel@tonic-gate	addl	$1, offset(basereg);		\
3817c478bd9Sstevel@tonic-gate	adcl	$0, offset + 4(basereg)
3827c478bd9Sstevel@tonic-gate
3837c478bd9Sstevel@tonic-gate#define	TSC_CLR(basereg, offset)		\
3847c478bd9Sstevel@tonic-gate	movl	$0, offset(basereg);		\
3857c478bd9Sstevel@tonic-gate	movl	$0, offset + 4(basereg)
3867c478bd9Sstevel@tonic-gate
3877c478bd9Sstevel@tonic-gate/*
3887c478bd9Sstevel@tonic-gate * The following macros assume the time value is in %edx:%eax
3897c478bd9Sstevel@tonic-gate * e.g. from a rdtsc instruction.
3907c478bd9Sstevel@tonic-gate */
391*7a364d25Sschwartz#define	TSC_STORE(reg, offset)		\
3927c478bd9Sstevel@tonic-gate	movl	%eax, offset(reg);	\
3937c478bd9Sstevel@tonic-gate	movl	%edx, offset + 4(reg)
3947c478bd9Sstevel@tonic-gate
395*7a364d25Sschwartz#define	TSC_LOAD(reg, offset)	\
396*7a364d25Sschwartz	movl	offset(reg), %eax;	\
397*7a364d25Sschwartz	movl	offset + 4(reg), %edx
398*7a364d25Sschwartz
3997c478bd9Sstevel@tonic-gate#define	TSC_ADD_TO(reg, offset)		\
4007c478bd9Sstevel@tonic-gate	addl	%eax, offset(reg);	\
4017c478bd9Sstevel@tonic-gate	adcl	%edx, offset + 4(reg)
4027c478bd9Sstevel@tonic-gate
4037c478bd9Sstevel@tonic-gate#define	TSC_SUB_FROM(reg, offset)	\
4047c478bd9Sstevel@tonic-gate	subl	offset(reg), %eax;	\
4057c478bd9Sstevel@tonic-gate	sbbl	offset + 4(reg), %edx	/* interval in edx:eax */
4067c478bd9Sstevel@tonic-gate
4077c478bd9Sstevel@tonic-gate/*
4087c478bd9Sstevel@tonic-gate * basereg   - pointer to cpu struct
4097c478bd9Sstevel@tonic-gate * pilreg    - pil or converted pil (pil - (LOCK_LEVEL + 1))
4107c478bd9Sstevel@tonic-gate *
4117c478bd9Sstevel@tonic-gate * Returns (base + pil * 8) in pilreg
4127c478bd9Sstevel@tonic-gate */
4137c478bd9Sstevel@tonic-gate#define	PILBASE(basereg, pilreg)	\
4147c478bd9Sstevel@tonic-gate	lea	(basereg, pilreg, 8), pilreg
4157c478bd9Sstevel@tonic-gate
4167c478bd9Sstevel@tonic-gate/*
4177c478bd9Sstevel@tonic-gate * Returns (base + (pil - (LOCK_LEVEL + 1)) * 8) in pilreg
4187c478bd9Sstevel@tonic-gate */
419*7a364d25Sschwartz#define	HIGHPILBASE(basereg, pilreg)		\
420*7a364d25Sschwartz	subl	$LOCK_LEVEL + 1, pilreg;	\
4217c478bd9Sstevel@tonic-gate	PILBASE(basereg, pilreg)
4227c478bd9Sstevel@tonic-gate
4237c478bd9Sstevel@tonic-gate/*
424*7a364d25Sschwartz * Returns (base + pil * 16) in pilreg
425*7a364d25Sschwartz */
426*7a364d25Sschwartz#define	PILBASE_INTRSTAT(basereg, pilreg)	\
427*7a364d25Sschwartz	shl	$4, pilreg;			\
428*7a364d25Sschwartz	addl	basereg, pilreg;
429*7a364d25Sschwartz
430*7a364d25Sschwartz/*
431eda89462Sesolom * Returns (cpu + cpu_mstate * 8) in tgt
432eda89462Sesolom */
433eda89462Sesolom#define	INTRACCTBASE(cpureg, tgtreg)		\
434eda89462Sesolom	movzwl	CPU_MSTATE(cpureg), tgtreg;	\
435eda89462Sesolom	lea	(cpureg, tgtreg, 8), tgtreg
436eda89462Sesolom
437eda89462Sesolom/*
4387c478bd9Sstevel@tonic-gate * cpu_stats.sys.intr[PIL]++
4397c478bd9Sstevel@tonic-gate */
4407c478bd9Sstevel@tonic-gate#define	INC_CPU_STATS_INTR(pilreg, tmpreg, tmpreg_32, basereg)	\
4417c478bd9Sstevel@tonic-gate	movl	pilreg, tmpreg_32;				\
4427c478bd9Sstevel@tonic-gate	PILBASE(basereg, tmpreg);				\
4437c478bd9Sstevel@tonic-gate	INC64(tmpreg, _CONST(CPU_STATS_SYS_INTR - 8))
4447c478bd9Sstevel@tonic-gate
4457c478bd9Sstevel@tonic-gate/*
4467c478bd9Sstevel@tonic-gate * Unlink thread from CPU's list
4477c478bd9Sstevel@tonic-gate */
4487c478bd9Sstevel@tonic-gate#define	UNLINK_INTR_THREAD(cpureg, ithread, tmpreg)	\
4497c478bd9Sstevel@tonic-gate	mov	CPU_INTR_THREAD(cpureg), ithread;	\
4507c478bd9Sstevel@tonic-gate	mov	T_LINK(ithread), tmpreg;		\
4517c478bd9Sstevel@tonic-gate	mov	tmpreg, CPU_INTR_THREAD(cpureg)
4527c478bd9Sstevel@tonic-gate
4537c478bd9Sstevel@tonic-gate/*
4547c478bd9Sstevel@tonic-gate * Link a thread into CPU's list
4557c478bd9Sstevel@tonic-gate */
4567c478bd9Sstevel@tonic-gate#define	LINK_INTR_THREAD(cpureg, ithread, tmpreg)	\
4577c478bd9Sstevel@tonic-gate	mov	CPU_INTR_THREAD(cpureg), tmpreg;	\
4587c478bd9Sstevel@tonic-gate	mov	tmpreg, T_LINK(ithread);		\
4597c478bd9Sstevel@tonic-gate	mov	ithread, CPU_INTR_THREAD(cpureg)
4607c478bd9Sstevel@tonic-gate
4617c478bd9Sstevel@tonic-gate#if defined(DEBUG)
4627c478bd9Sstevel@tonic-gate
4637c478bd9Sstevel@tonic-gate/*
4647c478bd9Sstevel@tonic-gate * Do not call panic, if panic is already in progress.
4657c478bd9Sstevel@tonic-gate */
4667c478bd9Sstevel@tonic-gate#define	__PANIC(msg, label)		\
4677c478bd9Sstevel@tonic-gate	cmpl	$0, panic_quiesce;		\
4687c478bd9Sstevel@tonic-gate	jne	label;				\
4697c478bd9Sstevel@tonic-gate	pushl	$msg;				\
4707c478bd9Sstevel@tonic-gate	call	panic
4717c478bd9Sstevel@tonic-gate
4727c478bd9Sstevel@tonic-gate#define	__CMP64_JNE(basereg, offset, label)	\
4737c478bd9Sstevel@tonic-gate	cmpl	$0, offset(basereg);		\
4747c478bd9Sstevel@tonic-gate	jne	label;				\
4757c478bd9Sstevel@tonic-gate	cmpl	$0, offset + 4(basereg);	\
4767c478bd9Sstevel@tonic-gate	jne	label
4777c478bd9Sstevel@tonic-gate
4787c478bd9Sstevel@tonic-gate/*
4797c478bd9Sstevel@tonic-gate * ASSERT(!(CPU->cpu_intr_actv & (1 << PIL)))
4807c478bd9Sstevel@tonic-gate */
4817c478bd9Sstevel@tonic-gate#define	ASSERT_NOT_CPU_INTR_ACTV(pilreg, basereg, msg)	\
4827c478bd9Sstevel@tonic-gate	btl	pilreg, CPU_INTR_ACTV(basereg);		\
4837c478bd9Sstevel@tonic-gate	jnc	4f;					\
4847c478bd9Sstevel@tonic-gate	__PANIC(msg, 4f);				\
4857c478bd9Sstevel@tonic-gate4:
4867c478bd9Sstevel@tonic-gate
4877c478bd9Sstevel@tonic-gate/*
4887c478bd9Sstevel@tonic-gate * ASSERT(CPU->cpu_intr_actv & (1 << PIL))
4897c478bd9Sstevel@tonic-gate */
4907c478bd9Sstevel@tonic-gate#define	ASSERT_CPU_INTR_ACTV(pilreg, basereg, msg)	\
4917c478bd9Sstevel@tonic-gate	btl	pilreg, CPU_INTR_ACTV(basereg);		\
4927c478bd9Sstevel@tonic-gate	jc	5f;					\
4937c478bd9Sstevel@tonic-gate	__PANIC(msg, 5f);				\
4947c478bd9Sstevel@tonic-gate5:
4957c478bd9Sstevel@tonic-gate
4967c478bd9Sstevel@tonic-gate/*
4977c478bd9Sstevel@tonic-gate * ASSERT(CPU->cpu_pil_high_start != 0)
4987c478bd9Sstevel@tonic-gate */
4997c478bd9Sstevel@tonic-gate#define	ASSERT_CPU_PIL_HIGH_START_NZ(basereg)			\
5007c478bd9Sstevel@tonic-gate	__CMP64_JNE(basereg, CPU_PIL_HIGH_START, 6f);		\
5017c478bd9Sstevel@tonic-gate	__PANIC(_interrupt_timestamp_zero, 6f);		\
5027c478bd9Sstevel@tonic-gate6:
5037c478bd9Sstevel@tonic-gate
5047c478bd9Sstevel@tonic-gate/*
5057c478bd9Sstevel@tonic-gate * ASSERT(t->t_intr_start != 0)
5067c478bd9Sstevel@tonic-gate */
5077c478bd9Sstevel@tonic-gate#define	ASSERT_T_INTR_START_NZ(basereg)				\
5087c478bd9Sstevel@tonic-gate	__CMP64_JNE(basereg, T_INTR_START, 7f);			\
5097c478bd9Sstevel@tonic-gate	__PANIC(_intr_thread_t_intr_start_zero, 7f);	\
5107c478bd9Sstevel@tonic-gate7:
5117c478bd9Sstevel@tonic-gate
5127c478bd9Sstevel@tonic-gate_interrupt_actv_bit_set:
5137c478bd9Sstevel@tonic-gate	.string	"_interrupt(): cpu_intr_actv bit already set for PIL"
5147c478bd9Sstevel@tonic-gate_interrupt_actv_bit_not_set:
5157c478bd9Sstevel@tonic-gate	.string	"_interrupt(): cpu_intr_actv bit not set for PIL"
5167c478bd9Sstevel@tonic-gate_interrupt_timestamp_zero:
5177c478bd9Sstevel@tonic-gate	.string "_interrupt(): timestamp zero upon handler return"
5187c478bd9Sstevel@tonic-gate_intr_thread_actv_bit_not_set:
5197c478bd9Sstevel@tonic-gate	.string	"intr_thread():	cpu_intr_actv bit not set for PIL"
5207c478bd9Sstevel@tonic-gate_intr_thread_t_intr_start_zero:
5217c478bd9Sstevel@tonic-gate	.string	"intr_thread():	t_intr_start zero upon handler return"
5227c478bd9Sstevel@tonic-gate_dosoftint_actv_bit_set:
5237c478bd9Sstevel@tonic-gate	.string	"dosoftint(): cpu_intr_actv bit already set for PIL"
5247c478bd9Sstevel@tonic-gate_dosoftint_actv_bit_not_set:
5257c478bd9Sstevel@tonic-gate	.string	"dosoftint(): cpu_intr_actv bit not set for PIL"
5267c478bd9Sstevel@tonic-gate
5277c478bd9Sstevel@tonic-gate	DGDEF(intr_thread_cnt)
5287c478bd9Sstevel@tonic-gate
5297c478bd9Sstevel@tonic-gate#else
5307c478bd9Sstevel@tonic-gate#define	ASSERT_NOT_CPU_INTR_ACTV(pilreg, basereg, msg)
5317c478bd9Sstevel@tonic-gate#define	ASSERT_CPU_INTR_ACTV(pilreg, basereg, msg)
5327c478bd9Sstevel@tonic-gate#define	ASSERT_CPU_PIL_HIGH_START_NZ(basereg)
5337c478bd9Sstevel@tonic-gate#define	ASSERT_T_INTR_START_NZ(basereg)
5347c478bd9Sstevel@tonic-gate#endif
5357c478bd9Sstevel@tonic-gate
5367c478bd9Sstevel@tonic-gate	ENTRY_NP2(cmnint, _interrupt)
5377c478bd9Sstevel@tonic-gate
5387c478bd9Sstevel@tonic-gate	INTR_PUSH
5397c478bd9Sstevel@tonic-gate
5407c478bd9Sstevel@tonic-gate	/*
5417c478bd9Sstevel@tonic-gate	 * At the end of TRACE_PTR %esi points to the current TRAPTRACE entry
5427c478bd9Sstevel@tonic-gate	 */
5437c478bd9Sstevel@tonic-gate	TRACE_PTR(%esi, %eax, %eax, %edx, $TT_INTERRUPT)
5447c478bd9Sstevel@tonic-gate						/* Uses labels 8 and 9 */
5457c478bd9Sstevel@tonic-gate	TRACE_REGS(%esi, %esp, %eax, %ebx)	/* Uses label 9 */
5467c478bd9Sstevel@tonic-gate	TRACE_STAMP(%esi)		/* Clobbers %eax, %edx, uses 9 */
5477c478bd9Sstevel@tonic-gate
5487c478bd9Sstevel@tonic-gate	movl	%esp, %ebp
5497c478bd9Sstevel@tonic-gate	DISABLE_INTR_FLAGS
5507c478bd9Sstevel@tonic-gate	LOADCPU(%ebx)		/* get pointer to CPU struct. Avoid gs refs */
5517c478bd9Sstevel@tonic-gate	leal    REGOFF_TRAPNO(%ebp), %ecx	/* get address of vector */
5527c478bd9Sstevel@tonic-gate	movl	CPU_PRI(%ebx), %edi		/* get ipl */
5537c478bd9Sstevel@tonic-gate	movl	CPU_SOFTINFO(%ebx), %edx
5547c478bd9Sstevel@tonic-gate
5557c478bd9Sstevel@tonic-gate	/
5567c478bd9Sstevel@tonic-gate	/ Check to see if the trap number is T_SOFTINT; if it is, we'll
5577c478bd9Sstevel@tonic-gate	/ jump straight to dosoftint now.
5587c478bd9Sstevel@tonic-gate	/
5597c478bd9Sstevel@tonic-gate	cmpl	$T_SOFTINT, (%ecx)
5607c478bd9Sstevel@tonic-gate	je	dosoftint
5617c478bd9Sstevel@tonic-gate
5627c478bd9Sstevel@tonic-gate	/ raise interrupt priority level
5637c478bd9Sstevel@tonic-gate	/ oldipl is in %edi, vectorp is in %ecx
5647c478bd9Sstevel@tonic-gate	/ newipl is returned in %eax
5657c478bd9Sstevel@tonic-gate	pushl	%ecx
5667c478bd9Sstevel@tonic-gate	pushl	%edi
5677c478bd9Sstevel@tonic-gate	call    *setlvl
5687c478bd9Sstevel@tonic-gate	popl	%edi			/* save oldpil in %edi */
5697c478bd9Sstevel@tonic-gate	popl	%ecx
5707c478bd9Sstevel@tonic-gate
5717c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE
5727c478bd9Sstevel@tonic-gate	movb	%al, TTR_IPL(%esi)
5737c478bd9Sstevel@tonic-gate#endif
5747c478bd9Sstevel@tonic-gate
5757c478bd9Sstevel@tonic-gate	/ check for spurious interrupt
5767c478bd9Sstevel@tonic-gate	cmp	$-1, %eax
5777c478bd9Sstevel@tonic-gate	je	_sys_rtt
5787c478bd9Sstevel@tonic-gate
5797c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE
5807c478bd9Sstevel@tonic-gate	movl	CPU_PRI(%ebx), %edx
5817c478bd9Sstevel@tonic-gate	movb	%dl, TTR_PRI(%esi)
5827c478bd9Sstevel@tonic-gate	movl	CPU_BASE_SPL(%ebx), %edx
5837c478bd9Sstevel@tonic-gate	movb	%dl, TTR_SPL(%esi)
5847c478bd9Sstevel@tonic-gate#endif
5857c478bd9Sstevel@tonic-gate
5867c478bd9Sstevel@tonic-gate	movl	%eax, CPU_PRI(%ebx) /* update ipl */
5877c478bd9Sstevel@tonic-gate	movl	REGOFF_TRAPNO(%ebp), %ecx /* reload the interrupt vector */
5887c478bd9Sstevel@tonic-gate
5897c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE
5907c478bd9Sstevel@tonic-gate	movb	%cl, TTR_VECTOR(%esi)
5917c478bd9Sstevel@tonic-gate#endif
5927c478bd9Sstevel@tonic-gate
5937c478bd9Sstevel@tonic-gate	/ At this point we can take one of two paths.  If the new priority
5947c478bd9Sstevel@tonic-gate	/ level is less than or equal to LOCK LEVEL then we jump to code that
5957c478bd9Sstevel@tonic-gate	/ will run this interrupt as a separate thread.  Otherwise the
5967c478bd9Sstevel@tonic-gate	/ interrupt is NOT run as a separate thread.
5977c478bd9Sstevel@tonic-gate
5987c478bd9Sstevel@tonic-gate	/ %edi - old priority level
5997c478bd9Sstevel@tonic-gate	/ %ebp - pointer to REGS
6007c478bd9Sstevel@tonic-gate	/ %ecx - translated vector
6017c478bd9Sstevel@tonic-gate	/ %eax - ipl of isr
6027c478bd9Sstevel@tonic-gate	/ %ebx - cpu pointer
6037c478bd9Sstevel@tonic-gate
6047c478bd9Sstevel@tonic-gate	cmpl 	$LOCK_LEVEL, %eax	/* compare to highest thread level */
6057c478bd9Sstevel@tonic-gate	jbe	intr_thread		/* process as a separate thread */
6067c478bd9Sstevel@tonic-gate
6077c478bd9Sstevel@tonic-gate	cmpl	$CBE_HIGH_PIL, %eax	/* Is this a CY_HIGH_LEVEL interrupt? */
6087c478bd9Sstevel@tonic-gate	jne	2f
6097c478bd9Sstevel@tonic-gate
6107c478bd9Sstevel@tonic-gate	movl	REGOFF_PC(%ebp), %esi
6117c478bd9Sstevel@tonic-gate	movl	%edi, CPU_PROFILE_PIL(%ebx)	/* record interrupted PIL */
6127c478bd9Sstevel@tonic-gate	testw	$CPL_MASK, REGOFF_CS(%ebp)	/* trap from supervisor mode? */
6137c478bd9Sstevel@tonic-gate	jz	1f
6147c478bd9Sstevel@tonic-gate	movl	%esi, CPU_PROFILE_UPC(%ebx)	/* record user PC */
6157c478bd9Sstevel@tonic-gate	movl	$0, CPU_PROFILE_PC(%ebx)	/* zero kernel PC */
6167c478bd9Sstevel@tonic-gate	jmp	2f
6177c478bd9Sstevel@tonic-gate
6187c478bd9Sstevel@tonic-gate1:
6197c478bd9Sstevel@tonic-gate	movl	%esi, CPU_PROFILE_PC(%ebx)	/* record kernel PC */
6207c478bd9Sstevel@tonic-gate	movl	$0, CPU_PROFILE_UPC(%ebx)	/* zero user PC */
6217c478bd9Sstevel@tonic-gate
6227c478bd9Sstevel@tonic-gate2:
6237c478bd9Sstevel@tonic-gate	pushl	%ecx				/* vec */
6247c478bd9Sstevel@tonic-gate	pushl	%eax				/* newpil */
6257c478bd9Sstevel@tonic-gate
6267c478bd9Sstevel@tonic-gate	/
6277c478bd9Sstevel@tonic-gate	/ See if we are interrupting another high-level interrupt.
6287c478bd9Sstevel@tonic-gate	/
6297c478bd9Sstevel@tonic-gate	movl	CPU_INTR_ACTV(%ebx), %eax
6307c478bd9Sstevel@tonic-gate	andl	$CPU_INTR_ACTV_HIGH_LEVEL_MASK, %eax
6317c478bd9Sstevel@tonic-gate	jz	0f
6327c478bd9Sstevel@tonic-gate	/
6337c478bd9Sstevel@tonic-gate	/ We have interrupted another high-level interrupt.
6347c478bd9Sstevel@tonic-gate	/ Load starting timestamp, compute interval, update cumulative counter.
6357c478bd9Sstevel@tonic-gate	/
6367c478bd9Sstevel@tonic-gate	bsrl	%eax, %ecx		/* find PIL of interrupted handler */
637*7a364d25Sschwartz	movl	%ecx, %esi		/* save PIL for later */
638*7a364d25Sschwartz	HIGHPILBASE(%ebx, %ecx)
6397c478bd9Sstevel@tonic-gate_tsc_patch1:
6407c478bd9Sstevel@tonic-gate	nop; nop			/* patched to rdtsc if available */
6417c478bd9Sstevel@tonic-gate	TSC_SUB_FROM(%ecx, CPU_PIL_HIGH_START)
642*7a364d25Sschwartz
643*7a364d25Sschwartz	PILBASE_INTRSTAT(%ebx, %esi)
644*7a364d25Sschwartz	TSC_ADD_TO(%esi, CPU_INTRSTAT)
645eda89462Sesolom	INTRACCTBASE(%ebx, %ecx)
646eda89462Sesolom	TSC_ADD_TO(%ecx, CPU_INTRACCT)	/* cpu_intracct[cpu_mstate] += tsc */
6477c478bd9Sstevel@tonic-gate	/
6487c478bd9Sstevel@tonic-gate	/ Another high-level interrupt is active below this one, so
6497c478bd9Sstevel@tonic-gate	/ there is no need to check for an interrupt thread. That will be
6507c478bd9Sstevel@tonic-gate	/ done by the lowest priority high-level interrupt active.
6517c478bd9Sstevel@tonic-gate	/
6527c478bd9Sstevel@tonic-gate	jmp	1f
6537c478bd9Sstevel@tonic-gate0:
6547c478bd9Sstevel@tonic-gate	/
6557c478bd9Sstevel@tonic-gate	/ See if we are interrupting a low-level interrupt thread.
6567c478bd9Sstevel@tonic-gate	/
6577c478bd9Sstevel@tonic-gate	movl	CPU_THREAD(%ebx), %esi
6587c478bd9Sstevel@tonic-gate	testw	$T_INTR_THREAD, T_FLAGS(%esi)
6597c478bd9Sstevel@tonic-gate	jz	1f
6607c478bd9Sstevel@tonic-gate	/
6617c478bd9Sstevel@tonic-gate	/ We have interrupted an interrupt thread. Account for its time slice
6627c478bd9Sstevel@tonic-gate	/ only if its time stamp is non-zero.
6637c478bd9Sstevel@tonic-gate	/
6647c478bd9Sstevel@tonic-gate	cmpl	$0, T_INTR_START+4(%esi)
6657c478bd9Sstevel@tonic-gate	jne	0f
6667c478bd9Sstevel@tonic-gate	cmpl	$0, T_INTR_START(%esi)
6677c478bd9Sstevel@tonic-gate	je	1f
6687c478bd9Sstevel@tonic-gate0:
6697c478bd9Sstevel@tonic-gate	movzbl	T_PIL(%esi), %ecx /* %ecx has PIL of interrupted handler */
670*7a364d25Sschwartz	PILBASE_INTRSTAT(%ebx, %ecx)
6717c478bd9Sstevel@tonic-gate_tsc_patch2:
6727c478bd9Sstevel@tonic-gate	nop; nop			/* patched to rdtsc if available */
6737c478bd9Sstevel@tonic-gate	TSC_SUB_FROM(%esi, T_INTR_START)
6747c478bd9Sstevel@tonic-gate	TSC_CLR(%esi, T_INTR_START)
6757c478bd9Sstevel@tonic-gate	TSC_ADD_TO(%ecx, CPU_INTRSTAT)
676eda89462Sesolom	INTRACCTBASE(%ebx, %ecx)
677eda89462Sesolom	TSC_ADD_TO(%ecx, CPU_INTRACCT)	/* cpu_intracct[cpu_mstate] += tsc */
6787c478bd9Sstevel@tonic-gate1:
6797c478bd9Sstevel@tonic-gate	/ Store starting timestamp in CPU structure for this PIL.
6807c478bd9Sstevel@tonic-gate	popl	%ecx			/* restore new PIL */
6817c478bd9Sstevel@tonic-gate	pushl	%ecx
682*7a364d25Sschwartz	HIGHPILBASE(%ebx, %ecx)
6837c478bd9Sstevel@tonic-gate_tsc_patch3:
6847c478bd9Sstevel@tonic-gate	nop; nop			/* patched to rdtsc if available */
685*7a364d25Sschwartz	TSC_STORE(%ecx, CPU_PIL_HIGH_START)
6867c478bd9Sstevel@tonic-gate
6877c478bd9Sstevel@tonic-gate	popl	%eax			/* restore new pil */
6887c478bd9Sstevel@tonic-gate	popl	%ecx			/* vec */
6897c478bd9Sstevel@tonic-gate	/
6907c478bd9Sstevel@tonic-gate	/ Set bit for this PIL in CPU's interrupt active bitmask.
6917c478bd9Sstevel@tonic-gate	/
6927c478bd9Sstevel@tonic-gate
6937c478bd9Sstevel@tonic-gate	ASSERT_NOT_CPU_INTR_ACTV(%eax, %ebx, _interrupt_actv_bit_set)
6947c478bd9Sstevel@tonic-gate
6957c478bd9Sstevel@tonic-gate	/ Save old CPU_INTR_ACTV
6967c478bd9Sstevel@tonic-gate	movl	CPU_INTR_ACTV(%ebx), %esi
6977c478bd9Sstevel@tonic-gate
6987c478bd9Sstevel@tonic-gate	cmpl	$15, %eax
6997c478bd9Sstevel@tonic-gate	jne	0f
7007c478bd9Sstevel@tonic-gate	/ PIL-15 interrupt. Increment nest-count in upper 16 bits of intr_actv
7017c478bd9Sstevel@tonic-gate	incw	CPU_INTR_ACTV_REF(%ebx)	/* increment ref count */
7027c478bd9Sstevel@tonic-gate0:
7037c478bd9Sstevel@tonic-gate	btsl	%eax, CPU_INTR_ACTV(%ebx)
7047c478bd9Sstevel@tonic-gate	/
7057c478bd9Sstevel@tonic-gate	/ Handle high-level nested interrupt on separate interrupt stack
7067c478bd9Sstevel@tonic-gate	/
7077c478bd9Sstevel@tonic-gate	testl	$CPU_INTR_ACTV_HIGH_LEVEL_MASK, %esi
7087c478bd9Sstevel@tonic-gate	jnz	onstack			/* already on interrupt stack */
7097c478bd9Sstevel@tonic-gate	movl	%esp, %eax
7107c478bd9Sstevel@tonic-gate	movl	CPU_INTR_STACK(%ebx), %esp	/* get on interrupt stack */
7117c478bd9Sstevel@tonic-gate	pushl	%eax			/* save the thread stack pointer */
7127c478bd9Sstevel@tonic-gateonstack:
7137c478bd9Sstevel@tonic-gate	movl	$autovect, %esi		/* get autovect structure before */
7147c478bd9Sstevel@tonic-gate					/* sti to save on AGI later */
7157c478bd9Sstevel@tonic-gate	sti				/* enable interrupts */
7167c478bd9Sstevel@tonic-gate	pushl	%ecx			/* save interrupt vector */
7177c478bd9Sstevel@tonic-gate	/
7187c478bd9Sstevel@tonic-gate	/ Get handler address
7197c478bd9Sstevel@tonic-gate	/
7207c478bd9Sstevel@tonic-gatepre_loop1:
7217c478bd9Sstevel@tonic-gate	movl	AVH_LINK(%esi, %ecx, 8), %esi
7227c478bd9Sstevel@tonic-gate	xorl	%ebx, %ebx	/* bh is no. of intpts in chain */
7237c478bd9Sstevel@tonic-gate				/* bl is DDI_INTR_CLAIMED status of chain */
7247c478bd9Sstevel@tonic-gate	testl	%esi, %esi		/* if pointer is null */
7257c478bd9Sstevel@tonic-gate	jz	.intr_ret		/* then skip */
7267c478bd9Sstevel@tonic-gateloop1:
7277c478bd9Sstevel@tonic-gate	incb	%bh
7287c478bd9Sstevel@tonic-gate	movl	AV_VECTOR(%esi), %edx	/* get the interrupt routine */
7297c478bd9Sstevel@tonic-gate	testl	%edx, %edx		/* if func is null */
7307c478bd9Sstevel@tonic-gate	jz	.intr_ret		/* then skip */
7317c478bd9Sstevel@tonic-gate	pushl	$0
7327c478bd9Sstevel@tonic-gate	pushl	AV_INTARG2(%esi)
7337c478bd9Sstevel@tonic-gate	pushl	AV_INTARG1(%esi)
7347c478bd9Sstevel@tonic-gate	pushl	AV_VECTOR(%esi)
7357c478bd9Sstevel@tonic-gate	pushl	AV_DIP(%esi)
7367c478bd9Sstevel@tonic-gate	call	__dtrace_probe_interrupt__start
7377c478bd9Sstevel@tonic-gate	pushl	AV_INTARG2(%esi)	/* get 2nd arg to interrupt routine */
7387c478bd9Sstevel@tonic-gate	pushl	AV_INTARG1(%esi)	/* get first arg to interrupt routine */
7397c478bd9Sstevel@tonic-gate	call	*%edx			/* call interrupt routine with arg */
7407c478bd9Sstevel@tonic-gate	addl	$8, %esp
7417c478bd9Sstevel@tonic-gate	movl	%eax, 16(%esp)
7427c478bd9Sstevel@tonic-gate	call	__dtrace_probe_interrupt__complete
7437c478bd9Sstevel@tonic-gate	addl	$20, %esp
7447c478bd9Sstevel@tonic-gate	orb	%al, %bl		/* see if anyone claims intpt. */
7457c478bd9Sstevel@tonic-gate	movl	AV_LINK(%esi), %esi	/* get next routine on list */
7467c478bd9Sstevel@tonic-gate	testl	%esi, %esi		/* if pointer is non-null */
7477c478bd9Sstevel@tonic-gate	jnz	loop1			/* then continue */
7487c478bd9Sstevel@tonic-gate
7497c478bd9Sstevel@tonic-gate.intr_ret:
7507c478bd9Sstevel@tonic-gate	cmpb	$1, %bh		/* if only 1 intpt in chain, it is OK */
7517c478bd9Sstevel@tonic-gate	je	.intr_ret1
7527c478bd9Sstevel@tonic-gate	orb	%bl, %bl	/* If no one claims intpt, then it is OK */
7537c478bd9Sstevel@tonic-gate	jz	.intr_ret1
7547c478bd9Sstevel@tonic-gate	movl	(%esp), %ecx		/* else restore intr vector */
7557c478bd9Sstevel@tonic-gate	movl	$autovect, %esi		/* get autovect structure */
7567c478bd9Sstevel@tonic-gate	jmp	pre_loop1		/* and try again. */
7577c478bd9Sstevel@tonic-gate
7587c478bd9Sstevel@tonic-gate.intr_ret1:
7597c478bd9Sstevel@tonic-gate	LOADCPU(%ebx)			/* get pointer to cpu struct */
7607c478bd9Sstevel@tonic-gate
7617c478bd9Sstevel@tonic-gate	cli
7627c478bd9Sstevel@tonic-gate	movl	CPU_PRI(%ebx), %esi
7637c478bd9Sstevel@tonic-gate
7647c478bd9Sstevel@tonic-gate	/ cpu_stats.sys.intr[PIL]++
7657c478bd9Sstevel@tonic-gate	INC_CPU_STATS_INTR(%esi, %eax, %eax, %ebx)
7667c478bd9Sstevel@tonic-gate
7677c478bd9Sstevel@tonic-gate	/
7687c478bd9Sstevel@tonic-gate	/ Clear bit for this PIL in CPU's interrupt active bitmask.
7697c478bd9Sstevel@tonic-gate	/
7707c478bd9Sstevel@tonic-gate
7717c478bd9Sstevel@tonic-gate	ASSERT_CPU_INTR_ACTV(%esi, %ebx, _interrupt_actv_bit_not_set)
7727c478bd9Sstevel@tonic-gate
7737c478bd9Sstevel@tonic-gate	cmpl	$15, %esi
7747c478bd9Sstevel@tonic-gate	jne	0f
7757c478bd9Sstevel@tonic-gate	/ Only clear bit if reference count is now zero.
7767c478bd9Sstevel@tonic-gate	decw	CPU_INTR_ACTV_REF(%ebx)
7777c478bd9Sstevel@tonic-gate	jnz	1f
7787c478bd9Sstevel@tonic-gate0:
7797c478bd9Sstevel@tonic-gate	btrl	%esi, CPU_INTR_ACTV(%ebx)
7807c478bd9Sstevel@tonic-gate1:
7817c478bd9Sstevel@tonic-gate	/
7827c478bd9Sstevel@tonic-gate	/ Take timestamp, compute interval, update cumulative counter.
7837c478bd9Sstevel@tonic-gate	/ esi = PIL
7847c478bd9Sstevel@tonic-gate_tsc_patch4:
7857c478bd9Sstevel@tonic-gate	nop; nop			/* patched to rdtsc if available */
786*7a364d25Sschwartz	movl	%esi, %ecx		/* save for later */
787*7a364d25Sschwartz	HIGHPILBASE(%ebx, %esi)
7887c478bd9Sstevel@tonic-gate
7897c478bd9Sstevel@tonic-gate	ASSERT_CPU_PIL_HIGH_START_NZ(%esi)
7907c478bd9Sstevel@tonic-gate
7917c478bd9Sstevel@tonic-gate	TSC_SUB_FROM(%esi, CPU_PIL_HIGH_START)
792*7a364d25Sschwartz
793*7a364d25Sschwartz	PILBASE_INTRSTAT(%ebx, %ecx)
794*7a364d25Sschwartz	TSC_ADD_TO(%ecx, CPU_INTRSTAT)
795eda89462Sesolom	INTRACCTBASE(%ebx, %esi)
796eda89462Sesolom	TSC_ADD_TO(%esi, CPU_INTRACCT)	/* cpu_intracct[cpu_mstate] += tsc */
7977c478bd9Sstevel@tonic-gate	/
7987c478bd9Sstevel@tonic-gate	/ Check for lower-PIL nested high-level interrupt beneath current one
7997c478bd9Sstevel@tonic-gate	/ If so, place a starting timestamp in its pil_high_start entry.
8007c478bd9Sstevel@tonic-gate	/
8017c478bd9Sstevel@tonic-gate	movl	CPU_INTR_ACTV(%ebx), %eax
8027c478bd9Sstevel@tonic-gate	movl	%eax, %esi
8037c478bd9Sstevel@tonic-gate	andl	$CPU_INTR_ACTV_HIGH_LEVEL_MASK, %eax
8047c478bd9Sstevel@tonic-gate	jz	0f
8057c478bd9Sstevel@tonic-gate	bsrl	%eax, %ecx		/* find PIL of nested interrupt */
806*7a364d25Sschwartz	HIGHPILBASE(%ebx, %ecx)
8077c478bd9Sstevel@tonic-gate_tsc_patch5:
8087c478bd9Sstevel@tonic-gate	nop; nop			/* patched to rdtsc if available */
809*7a364d25Sschwartz	TSC_STORE(%ecx, CPU_PIL_HIGH_START)
8107c478bd9Sstevel@tonic-gate	/
8117c478bd9Sstevel@tonic-gate	/ Another high-level interrupt is active below this one, so
8127c478bd9Sstevel@tonic-gate	/ there is no need to check for an interrupt thread. That will be
8137c478bd9Sstevel@tonic-gate	/ done by the lowest priority high-level interrupt active.
8147c478bd9Sstevel@tonic-gate	/
8157c478bd9Sstevel@tonic-gate	jmp	1f
8167c478bd9Sstevel@tonic-gate0:
8177c478bd9Sstevel@tonic-gate	/ Check to see if there is a low-level interrupt active. If so,
8187c478bd9Sstevel@tonic-gate	/ place a starting timestamp in the thread structure.
8197c478bd9Sstevel@tonic-gate	movl	CPU_THREAD(%ebx), %esi
8207c478bd9Sstevel@tonic-gate	testw	$T_INTR_THREAD, T_FLAGS(%esi)
8217c478bd9Sstevel@tonic-gate	jz	1f
8227c478bd9Sstevel@tonic-gate_tsc_patch6:
8237c478bd9Sstevel@tonic-gate	nop; nop			/* patched to rdtsc if available */
824*7a364d25Sschwartz	TSC_STORE(%esi, T_INTR_START)
8257c478bd9Sstevel@tonic-gate1:
8267c478bd9Sstevel@tonic-gate	movl	%edi, CPU_PRI(%ebx)
8277c478bd9Sstevel@tonic-gate				/* interrupt vector already on stack */
8287c478bd9Sstevel@tonic-gate	pushl	%edi			/* old ipl */
8297c478bd9Sstevel@tonic-gate	call	*setlvlx
8307c478bd9Sstevel@tonic-gate	addl	$8, %esp		/* eax contains the current ipl */
8317c478bd9Sstevel@tonic-gate
8327c478bd9Sstevel@tonic-gate	movl	CPU_INTR_ACTV(%ebx), %esi /* reset stack pointer if no more */
8337c478bd9Sstevel@tonic-gate	shrl	$LOCK_LEVEL + 1, %esi	/* HI PRI intrs. */
8347c478bd9Sstevel@tonic-gate	jnz	.intr_ret2
8357c478bd9Sstevel@tonic-gate	popl	%esp			/* restore the thread stack pointer */
8367c478bd9Sstevel@tonic-gate.intr_ret2:
8377c478bd9Sstevel@tonic-gate	movl	CPU_SOFTINFO(%ebx), %edx /* any pending software interrupts */
8387c478bd9Sstevel@tonic-gate	orl	%edx, %edx
8397c478bd9Sstevel@tonic-gate	jz	_sys_rtt
8407c478bd9Sstevel@tonic-gate	jmp	dosoftint	/* check for softints before we return. */
8417c478bd9Sstevel@tonic-gate	SET_SIZE(cmnint)
8427c478bd9Sstevel@tonic-gate	SET_SIZE(_interrupt)
8437c478bd9Sstevel@tonic-gate
8447c478bd9Sstevel@tonic-gate#endif	/* __i386 */
8457c478bd9Sstevel@tonic-gate
8467c478bd9Sstevel@tonic-gate/*
8477c478bd9Sstevel@tonic-gate * Declare a uintptr_t which has the size of _interrupt to enable stack
8487c478bd9Sstevel@tonic-gate * traceback code to know when a regs structure is on the stack.
8497c478bd9Sstevel@tonic-gate */
8507c478bd9Sstevel@tonic-gate	.globl	_interrupt_size
8517c478bd9Sstevel@tonic-gate	.align	CLONGSIZE
8527c478bd9Sstevel@tonic-gate_interrupt_size:
8537c478bd9Sstevel@tonic-gate	.NWORD	. - _interrupt
8547c478bd9Sstevel@tonic-gate	.type	_interrupt_size, @object
8557c478bd9Sstevel@tonic-gate
8567c478bd9Sstevel@tonic-gate#endif	/* __lint */
8577c478bd9Sstevel@tonic-gate
8587c478bd9Sstevel@tonic-gate#if defined(__i386)
8597c478bd9Sstevel@tonic-gate
8607c478bd9Sstevel@tonic-gate/*
8617c478bd9Sstevel@tonic-gate * Handle an interrupt in a new thread.
8627c478bd9Sstevel@tonic-gate *	Entry:  traps disabled.
8637c478bd9Sstevel@tonic-gate *		%edi - old priority level
8647c478bd9Sstevel@tonic-gate *		%ebp - pointer to REGS
8657c478bd9Sstevel@tonic-gate *		%ecx - translated vector
8667c478bd9Sstevel@tonic-gate *		%eax - ipl of isr.
8677c478bd9Sstevel@tonic-gate *		%ebx - pointer to CPU struct
8687c478bd9Sstevel@tonic-gate *	Uses:
8697c478bd9Sstevel@tonic-gate */
8707c478bd9Sstevel@tonic-gate
8717c478bd9Sstevel@tonic-gate#if !defined(__lint)
8727c478bd9Sstevel@tonic-gate
8737c478bd9Sstevel@tonic-gate	ENTRY_NP(intr_thread)
8747c478bd9Sstevel@tonic-gate	/
8757c478bd9Sstevel@tonic-gate	/ Set bit for this PIL in CPU's interrupt active bitmask.
8767c478bd9Sstevel@tonic-gate	/
8777c478bd9Sstevel@tonic-gate
8787c478bd9Sstevel@tonic-gate	ASSERT_NOT_CPU_INTR_ACTV(%eax, %ebx, _interrupt_actv_bit_set)
8797c478bd9Sstevel@tonic-gate
8807c478bd9Sstevel@tonic-gate	btsl	%eax, CPU_INTR_ACTV(%ebx)
8817c478bd9Sstevel@tonic-gate
8827c478bd9Sstevel@tonic-gate	/ Get set to run interrupt thread.
8837c478bd9Sstevel@tonic-gate	/ There should always be an interrupt thread since we allocate one
8847c478bd9Sstevel@tonic-gate	/ for each level on the CPU.
8857c478bd9Sstevel@tonic-gate	/
8867c478bd9Sstevel@tonic-gate	/ Note that the code in kcpc_overflow_intr -relies- on the ordering
8877c478bd9Sstevel@tonic-gate	/ of events here - in particular that t->t_lwp of the interrupt
8887c478bd9Sstevel@tonic-gate	/ thread is set to the pinned thread *before* curthread is changed
8897c478bd9Sstevel@tonic-gate	/
8907c478bd9Sstevel@tonic-gate	movl	CPU_THREAD(%ebx), %edx		/* cur thread in edx */
8917c478bd9Sstevel@tonic-gate
8927c478bd9Sstevel@tonic-gate	/
8937c478bd9Sstevel@tonic-gate	/ Are we interrupting an interrupt thread? If so, account for it.
8947c478bd9Sstevel@tonic-gate	/
8957c478bd9Sstevel@tonic-gate	testw	$T_INTR_THREAD, T_FLAGS(%edx)
8967c478bd9Sstevel@tonic-gate	jz	0f
8977c478bd9Sstevel@tonic-gate	pushl	%ecx
8987c478bd9Sstevel@tonic-gate	pushl	%eax
8997c478bd9Sstevel@tonic-gate	movl	%edx, %esi
9007c478bd9Sstevel@tonic-gate_tsc_patch7:
9017c478bd9Sstevel@tonic-gate	nop; nop			/* patched to rdtsc if available */
9027c478bd9Sstevel@tonic-gate	TSC_SUB_FROM(%esi, T_INTR_START)
9037c478bd9Sstevel@tonic-gate	TSC_CLR(%esi, T_INTR_START)
9047c478bd9Sstevel@tonic-gate	movzbl	T_PIL(%esi), %ecx
905*7a364d25Sschwartz	PILBASE_INTRSTAT(%ebx, %ecx)
9067c478bd9Sstevel@tonic-gate	TSC_ADD_TO(%ecx, CPU_INTRSTAT)
907eda89462Sesolom	INTRACCTBASE(%ebx, %ecx)
908eda89462Sesolom	TSC_ADD_TO(%ecx, CPU_INTRACCT)	/* cpu_intracct[cpu_mstate] += tsc */
9097c478bd9Sstevel@tonic-gate	movl	%esi, %edx
9107c478bd9Sstevel@tonic-gate	popl	%eax
9117c478bd9Sstevel@tonic-gate	popl	%ecx
9127c478bd9Sstevel@tonic-gate0:
9137c478bd9Sstevel@tonic-gate	movl	%esp, T_SP(%edx)	/* mark stack in curthread for resume */
9147c478bd9Sstevel@tonic-gate	pushl	%edi			/* get a temporary register */
9157c478bd9Sstevel@tonic-gate	UNLINK_INTR_THREAD(%ebx, %esi, %edi)
9167c478bd9Sstevel@tonic-gate
9177c478bd9Sstevel@tonic-gate	movl	T_LWP(%edx), %edi
9187c478bd9Sstevel@tonic-gate	movl	%edx, T_INTR(%esi)		/* push old thread */
9197c478bd9Sstevel@tonic-gate	movl	%edi, T_LWP(%esi)
9207c478bd9Sstevel@tonic-gate	/
9217c478bd9Sstevel@tonic-gate	/ Threads on the interrupt thread free list could have state already
9227c478bd9Sstevel@tonic-gate	/ set to TS_ONPROC, but it helps in debugging if they're TS_FREE
9237c478bd9Sstevel@tonic-gate	/
9247c478bd9Sstevel@tonic-gate	movl	$ONPROC_THREAD, T_STATE(%esi)
9257c478bd9Sstevel@tonic-gate	/
9267c478bd9Sstevel@tonic-gate	/ chain the interrupted thread onto list from the interrupt thread.
9277c478bd9Sstevel@tonic-gate	/ Set the new interrupt thread as the current one.
9287c478bd9Sstevel@tonic-gate	/
9297c478bd9Sstevel@tonic-gate	popl	%edi			/* Don't need a temp reg anymore */
9307c478bd9Sstevel@tonic-gate	movl	T_STACK(%esi), %esp		/* interrupt stack pointer */
9317c478bd9Sstevel@tonic-gate	movl	%esp, %ebp
9327c478bd9Sstevel@tonic-gate	movl	%esi, CPU_THREAD(%ebx)		/* set new thread */
9337c478bd9Sstevel@tonic-gate	pushl	%eax				/* save the ipl */
9347c478bd9Sstevel@tonic-gate	/
9357c478bd9Sstevel@tonic-gate	/ Initialize thread priority level from intr_pri
9367c478bd9Sstevel@tonic-gate	/
9377c478bd9Sstevel@tonic-gate	movb	%al, T_PIL(%esi)	/* store pil */
9387c478bd9Sstevel@tonic-gate	movzwl	intr_pri, %ebx		/* XXX Can cause probs if new class */
9397c478bd9Sstevel@tonic-gate					/* is loaded on some other cpu. */
9407c478bd9Sstevel@tonic-gate	addl	%ebx, %eax		/* convert level to dispatch priority */
9417c478bd9Sstevel@tonic-gate	movw	%ax, T_PRI(%esi)
9427c478bd9Sstevel@tonic-gate
9437c478bd9Sstevel@tonic-gate	/
9447c478bd9Sstevel@tonic-gate	/ Take timestamp and store it in the thread structure.
9457c478bd9Sstevel@tonic-gate	/
9467c478bd9Sstevel@tonic-gate	movl	%eax, %ebx		/* save priority over rdtsc */
9477c478bd9Sstevel@tonic-gate_tsc_patch8:
9487c478bd9Sstevel@tonic-gate	nop; nop			/* patched to rdtsc if available */
949*7a364d25Sschwartz	TSC_STORE(%esi, T_INTR_START)
9507c478bd9Sstevel@tonic-gate	movl	%ebx, %eax		/* restore priority */
9517c478bd9Sstevel@tonic-gate
9527c478bd9Sstevel@tonic-gate	/ The following 3 instructions need not be in cli.
9537c478bd9Sstevel@tonic-gate	/ Putting them here only to avoid the AGI penalty on Pentiums.
9547c478bd9Sstevel@tonic-gate
9557c478bd9Sstevel@tonic-gate	pushl	%ecx			/* save interrupt vector. */
9567c478bd9Sstevel@tonic-gate	pushl	%esi			/* save interrupt thread */
9577c478bd9Sstevel@tonic-gate	movl	$autovect, %esi		/* get autovect structure */
9587c478bd9Sstevel@tonic-gate	sti				/* enable interrupts */
9597c478bd9Sstevel@tonic-gate
9607c478bd9Sstevel@tonic-gate	/ Fast event tracing.
9617c478bd9Sstevel@tonic-gate	LOADCPU(%ebx)
9627c478bd9Sstevel@tonic-gate	movl	CPU_FTRACE_STATE(%ebx), %ebx
9637c478bd9Sstevel@tonic-gate	testl	$FTRACE_ENABLED, %ebx
9647c478bd9Sstevel@tonic-gate	jz	1f
9657c478bd9Sstevel@tonic-gate
9667c478bd9Sstevel@tonic-gate	movl	8(%esp), %ebx
9677c478bd9Sstevel@tonic-gate	pushl	%ebx			/* ipl */
9687c478bd9Sstevel@tonic-gate	pushl	%ecx			/* int vector */
9697c478bd9Sstevel@tonic-gate	movl	T_SP(%edx), %ebx
9707c478bd9Sstevel@tonic-gate	pushl	%ebx			/* &regs */
9717c478bd9Sstevel@tonic-gate	pushl	$_ftrace_intr_thread_fmt
9727c478bd9Sstevel@tonic-gate	call	ftrace_3_notick
9737c478bd9Sstevel@tonic-gate	addl	$8, %esp
9747c478bd9Sstevel@tonic-gate	popl	%ecx			/* restore int vector */
9757c478bd9Sstevel@tonic-gate	addl	$4, %esp
9767c478bd9Sstevel@tonic-gate1:
9777c478bd9Sstevel@tonic-gatepre_loop2:
9787c478bd9Sstevel@tonic-gate	movl	AVH_LINK(%esi, %ecx, 8), %esi
9797c478bd9Sstevel@tonic-gate	xorl	%ebx, %ebx	/* bh is cno. of intpts in chain */
9807c478bd9Sstevel@tonic-gate				/* bl is DDI_INTR_CLAIMED status of * chain */
9817c478bd9Sstevel@tonic-gate	testl	%esi, %esi	/* if pointer is null */
9827c478bd9Sstevel@tonic-gate	jz	loop_done2	/* we're done */
9837c478bd9Sstevel@tonic-gateloop2:
9847c478bd9Sstevel@tonic-gate	movl	AV_VECTOR(%esi), %edx	/* get the interrupt routine */
9857c478bd9Sstevel@tonic-gate	testl	%edx, %edx		/* if pointer is null */
9867c478bd9Sstevel@tonic-gate	jz	loop_done2		/* we're done */
9877c478bd9Sstevel@tonic-gate	incb	%bh
9887c478bd9Sstevel@tonic-gate	pushl	$0
9897c478bd9Sstevel@tonic-gate	pushl	AV_INTARG2(%esi)
9907c478bd9Sstevel@tonic-gate	pushl	AV_INTARG1(%esi)
9917c478bd9Sstevel@tonic-gate	pushl	AV_VECTOR(%esi)
9927c478bd9Sstevel@tonic-gate	pushl	AV_DIP(%esi)
9937c478bd9Sstevel@tonic-gate	call	__dtrace_probe_interrupt__start
9947c478bd9Sstevel@tonic-gate	pushl	AV_INTARG2(%esi)	/* get 2nd arg to interrupt routine */
9957c478bd9Sstevel@tonic-gate	pushl	AV_INTARG1(%esi)	/* get first arg to interrupt routine */
9967c478bd9Sstevel@tonic-gate	call	*%edx			/* call interrupt routine with arg */
9977c478bd9Sstevel@tonic-gate	addl	$8, %esp
9987c478bd9Sstevel@tonic-gate	movl	%eax, 16(%esp)
9997c478bd9Sstevel@tonic-gate	call	__dtrace_probe_interrupt__complete
10007c478bd9Sstevel@tonic-gate	addl	$20, %esp
10017c478bd9Sstevel@tonic-gate	orb	%al, %bl		/* see if anyone claims intpt. */
1002*7a364d25Sschwartz	movl	AV_TICKSP(%esi), %ecx
1003*7a364d25Sschwartz	testl	%ecx, %ecx
1004*7a364d25Sschwartz	jz	no_time
1005*7a364d25Sschwartz	call	intr_get_time
1006*7a364d25Sschwartz	movl	AV_TICKSP(%esi), %ecx
1007*7a364d25Sschwartz	TSC_ADD_TO(%ecx, 0)
1008*7a364d25Sschwartzno_time:
10097c478bd9Sstevel@tonic-gate	movl	AV_LINK(%esi), %esi	/* get next routine on list */
10107c478bd9Sstevel@tonic-gate	testl	%esi, %esi		/* if pointer is non-null */
10117c478bd9Sstevel@tonic-gate	jnz	loop2			/* continue */
10127c478bd9Sstevel@tonic-gateloop_done2:
10137c478bd9Sstevel@tonic-gate	cmpb	$1, %bh		/* if only 1 intpt in chain, it is OK */
10147c478bd9Sstevel@tonic-gate	je	.loop_done2_1
10157c478bd9Sstevel@tonic-gate	orb	%bl, %bl	/* If no one claims intpt, then it is OK */
10167c478bd9Sstevel@tonic-gate	jz	.loop_done2_1
10177c478bd9Sstevel@tonic-gate	movl	$autovect, %esi		/* else get autovect structure */
10187c478bd9Sstevel@tonic-gate	movl	4(%esp), %ecx		/* restore intr vector */
10197c478bd9Sstevel@tonic-gate	jmp	pre_loop2		/* and try again. */
10207c478bd9Sstevel@tonic-gate.loop_done2_1:
10217c478bd9Sstevel@tonic-gate	popl	%esi			/* restore intr thread pointer */
10227c478bd9Sstevel@tonic-gate
10237c478bd9Sstevel@tonic-gate	LOADCPU(%ebx)
10247c478bd9Sstevel@tonic-gate
10257c478bd9Sstevel@tonic-gate	cli		/* protect interrupt thread pool and intr_actv */
10267c478bd9Sstevel@tonic-gate	movzbl	T_PIL(%esi), %eax
10277c478bd9Sstevel@tonic-gate
10287c478bd9Sstevel@tonic-gate	/ Save value in regs
10297c478bd9Sstevel@tonic-gate	pushl	%eax			/* current pil */
10307c478bd9Sstevel@tonic-gate	pushl	%edx			/* (huh?) */
10317c478bd9Sstevel@tonic-gate	pushl	%edi			/* old pil */
10327c478bd9Sstevel@tonic-gate
10337c478bd9Sstevel@tonic-gate	/ cpu_stats.sys.intr[PIL]++
10347c478bd9Sstevel@tonic-gate	INC_CPU_STATS_INTR(%eax, %edx, %edx, %ebx)
10357c478bd9Sstevel@tonic-gate
10367c478bd9Sstevel@tonic-gate	/
10377c478bd9Sstevel@tonic-gate	/ Take timestamp, compute interval, and update cumulative counter.
10387c478bd9Sstevel@tonic-gate	/ esi = thread pointer, ebx = cpu pointer, eax = PIL
10397c478bd9Sstevel@tonic-gate	/
10407c478bd9Sstevel@tonic-gate	movl	%eax, %edi
10417c478bd9Sstevel@tonic-gate
10427c478bd9Sstevel@tonic-gate	ASSERT_T_INTR_START_NZ(%esi)
10437c478bd9Sstevel@tonic-gate
10447c478bd9Sstevel@tonic-gate_tsc_patch9:
10457c478bd9Sstevel@tonic-gate	nop; nop			/* patched to rdtsc if available */
10467c478bd9Sstevel@tonic-gate	TSC_SUB_FROM(%esi, T_INTR_START)
1047*7a364d25Sschwartz	PILBASE_INTRSTAT(%ebx, %edi)
10487c478bd9Sstevel@tonic-gate	TSC_ADD_TO(%edi, CPU_INTRSTAT)
1049eda89462Sesolom	INTRACCTBASE(%ebx, %edi)
1050eda89462Sesolom	TSC_ADD_TO(%edi, CPU_INTRACCT)	/* cpu_intracct[cpu_mstate] += tsc */
10517c478bd9Sstevel@tonic-gate	popl	%edi
10527c478bd9Sstevel@tonic-gate	popl	%edx
10537c478bd9Sstevel@tonic-gate	popl	%eax
10547c478bd9Sstevel@tonic-gate
10557c478bd9Sstevel@tonic-gate	/
10567c478bd9Sstevel@tonic-gate	/ Clear bit for this PIL in CPU's interrupt active bitmask.
10577c478bd9Sstevel@tonic-gate	/
10587c478bd9Sstevel@tonic-gate
10597c478bd9Sstevel@tonic-gate	ASSERT_CPU_INTR_ACTV(%eax, %ebx, _intr_thread_actv_bit_not_set)
10607c478bd9Sstevel@tonic-gate
10617c478bd9Sstevel@tonic-gate	btrl	%eax, CPU_INTR_ACTV(%ebx)
10627c478bd9Sstevel@tonic-gate
10637c478bd9Sstevel@tonic-gate	/ if there is still an interrupted thread underneath this one
10647c478bd9Sstevel@tonic-gate	/ then the interrupt was never blocked and the return is fairly
10657c478bd9Sstevel@tonic-gate	/ simple.  Otherwise jump to intr_thread_exit
10667c478bd9Sstevel@tonic-gate	cmpl	$0, T_INTR(%esi)
10677c478bd9Sstevel@tonic-gate	je	intr_thread_exit
10687c478bd9Sstevel@tonic-gate
10697c478bd9Sstevel@tonic-gate	/
10707c478bd9Sstevel@tonic-gate	/ link the thread back onto the interrupt thread pool
10717c478bd9Sstevel@tonic-gate	LINK_INTR_THREAD(%ebx, %esi, %edx)
10727c478bd9Sstevel@tonic-gate
10737c478bd9Sstevel@tonic-gate	movl	CPU_BASE_SPL(%ebx), %eax	/* used below. */
10747c478bd9Sstevel@tonic-gate	/ set the thread state to free so kmdb doesn't see it
10757c478bd9Sstevel@tonic-gate	movl	$FREE_THREAD, T_STATE(%esi)
10767c478bd9Sstevel@tonic-gate
10777c478bd9Sstevel@tonic-gate	cmpl	%eax, %edi		/* if (oldipl >= basespl) */
10787c478bd9Sstevel@tonic-gate	jae	intr_restore_ipl	/* then use oldipl */
10797c478bd9Sstevel@tonic-gate	movl	%eax, %edi		/* else use basespl */
10807c478bd9Sstevel@tonic-gateintr_restore_ipl:
10817c478bd9Sstevel@tonic-gate	movl	%edi, CPU_PRI(%ebx)
10827c478bd9Sstevel@tonic-gate					/* intr vector already on stack */
10837c478bd9Sstevel@tonic-gate	pushl	%edi			/* old ipl */
10847c478bd9Sstevel@tonic-gate	call	*setlvlx		/* eax contains the current ipl */
10857c478bd9Sstevel@tonic-gate	/
10867c478bd9Sstevel@tonic-gate	/ Switch back to the interrupted thread
10877c478bd9Sstevel@tonic-gate	movl	T_INTR(%esi), %ecx
10887c478bd9Sstevel@tonic-gate
10897c478bd9Sstevel@tonic-gate	/ Place starting timestamp in interrupted thread's thread structure.
10907c478bd9Sstevel@tonic-gate_tsc_patch10:
10917c478bd9Sstevel@tonic-gate	nop; nop			/* patched to rdtsc if available */
1092*7a364d25Sschwartz	TSC_STORE(%ecx, T_INTR_START)
10937c478bd9Sstevel@tonic-gate
10947c478bd9Sstevel@tonic-gate	movl	T_SP(%ecx), %esp	/* restore stack pointer */
10957c478bd9Sstevel@tonic-gate	movl	%esp, %ebp
10967c478bd9Sstevel@tonic-gate	movl	%ecx, CPU_THREAD(%ebx)
10977c478bd9Sstevel@tonic-gate
10987c478bd9Sstevel@tonic-gate	movl	CPU_SOFTINFO(%ebx), %edx /* any pending software interrupts */
10997c478bd9Sstevel@tonic-gate	orl	%edx, %edx
11007c478bd9Sstevel@tonic-gate	jz	_sys_rtt
11017c478bd9Sstevel@tonic-gate	jmp	dosoftint	/* check for softints before we return. */
11027c478bd9Sstevel@tonic-gate
11037c478bd9Sstevel@tonic-gate	/
11047c478bd9Sstevel@tonic-gate	/ An interrupt returned on what was once (and still might be)
11057c478bd9Sstevel@tonic-gate	/ an interrupt thread stack, but the interrupted process is no longer
11067c478bd9Sstevel@tonic-gate	/ there.  This means the interrupt must have blocked.
11077c478bd9Sstevel@tonic-gate	/
11087c478bd9Sstevel@tonic-gate	/ There is no longer a thread under this one, so put this thread back
11097c478bd9Sstevel@tonic-gate	/ on the CPU's free list and resume the idle thread which will dispatch
11107c478bd9Sstevel@tonic-gate	/ the next thread to run.
11117c478bd9Sstevel@tonic-gate	/
11127c478bd9Sstevel@tonic-gate	/ All interrupts are disabled here
11137c478bd9Sstevel@tonic-gate	/
11147c478bd9Sstevel@tonic-gate
11157c478bd9Sstevel@tonic-gateintr_thread_exit:
11167c478bd9Sstevel@tonic-gate#ifdef DEBUG
11177c478bd9Sstevel@tonic-gate	incl	intr_thread_cnt
11187c478bd9Sstevel@tonic-gate#endif
11197c478bd9Sstevel@tonic-gate	INC64(%ebx, CPU_STATS_SYS_INTRBLK)	/* cpu_stats.sys.intrblk++ */
11207c478bd9Sstevel@tonic-gate	/
11217c478bd9Sstevel@tonic-gate	/ Put thread back on the interrupt thread list.
11227c478bd9Sstevel@tonic-gate	/ As a reminder, the regs at this point are
11237c478bd9Sstevel@tonic-gate	/	esi	interrupt thread
11247c478bd9Sstevel@tonic-gate	/	edi	old ipl
11257c478bd9Sstevel@tonic-gate	/	ebx	ptr to CPU struct
11267c478bd9Sstevel@tonic-gate
11277c478bd9Sstevel@tonic-gate	/ Set CPU's base SPL level based on active interrupts bitmask
11287c478bd9Sstevel@tonic-gate	call	set_base_spl
11297c478bd9Sstevel@tonic-gate
11307c478bd9Sstevel@tonic-gate	movl	CPU_BASE_SPL(%ebx), %edi
11317c478bd9Sstevel@tonic-gate	movl	%edi, CPU_PRI(%ebx)
11327c478bd9Sstevel@tonic-gate					/* interrupt vector already on stack */
11337c478bd9Sstevel@tonic-gate	pushl	%edi
11347c478bd9Sstevel@tonic-gate	call	*setlvlx
11357c478bd9Sstevel@tonic-gate	addl	$8, %esp		/* XXX - don't need to pop since */
11367c478bd9Sstevel@tonic-gate					/* we are ready to switch */
11377c478bd9Sstevel@tonic-gate	call	splhigh			/* block all intrs below lock level */
11387c478bd9Sstevel@tonic-gate	/
11397c478bd9Sstevel@tonic-gate	/ Set the thread state to free so kmdb doesn't see it
11407c478bd9Sstevel@tonic-gate	/
11417c478bd9Sstevel@tonic-gate	movl	$FREE_THREAD, T_STATE(%esi)
11427c478bd9Sstevel@tonic-gate	/
11437c478bd9Sstevel@tonic-gate	/ Put thread on either the interrupt pool or the free pool and
11447c478bd9Sstevel@tonic-gate	/ call swtch() to resume another thread.
11457c478bd9Sstevel@tonic-gate	/
11467c478bd9Sstevel@tonic-gate	LINK_INTR_THREAD(%ebx, %esi, %edx)
11477c478bd9Sstevel@tonic-gate	call 	swtch
11487c478bd9Sstevel@tonic-gate	/ swtch() shouldn't return
11497c478bd9Sstevel@tonic-gate
11507c478bd9Sstevel@tonic-gate	SET_SIZE(intr_thread)
11517c478bd9Sstevel@tonic-gate
11527c478bd9Sstevel@tonic-gate#endif	/* __lint */
11537c478bd9Sstevel@tonic-gate#endif	/* __i386 */
11547c478bd9Sstevel@tonic-gate
11557c478bd9Sstevel@tonic-gate/*
11567c478bd9Sstevel@tonic-gate * Set Cpu's base SPL level, base on which interrupt levels are active
11577c478bd9Sstevel@tonic-gate *	Called at spl7 or above.
11587c478bd9Sstevel@tonic-gate */
11597c478bd9Sstevel@tonic-gate
11607c478bd9Sstevel@tonic-gate#if defined(__lint)
11617c478bd9Sstevel@tonic-gate
11627c478bd9Sstevel@tonic-gatevoid
11637c478bd9Sstevel@tonic-gateset_base_spl(void)
11647c478bd9Sstevel@tonic-gate{}
11657c478bd9Sstevel@tonic-gate
11667c478bd9Sstevel@tonic-gate#else	/* __lint */
11677c478bd9Sstevel@tonic-gate
11687c478bd9Sstevel@tonic-gate	ENTRY_NP(set_base_spl)
11697c478bd9Sstevel@tonic-gate	movl	%gs:CPU_INTR_ACTV, %eax	/* load active interrupts mask */
11707c478bd9Sstevel@tonic-gate	testl	%eax, %eax		/* is it zero? */
11717c478bd9Sstevel@tonic-gate	jz	setbase
11727c478bd9Sstevel@tonic-gate	testl	$0xff00, %eax
11737c478bd9Sstevel@tonic-gate	jnz	ah_set
11747c478bd9Sstevel@tonic-gate	shl	$24, %eax		/* shift 'em over so we can find */
11757c478bd9Sstevel@tonic-gate					/* the 1st bit faster */
11767c478bd9Sstevel@tonic-gate	bsrl	%eax, %eax
11777c478bd9Sstevel@tonic-gate	subl	$24, %eax
11787c478bd9Sstevel@tonic-gatesetbase:
11797c478bd9Sstevel@tonic-gate	movl	%eax, %gs:CPU_BASE_SPL	/* store base priority */
11807c478bd9Sstevel@tonic-gate	ret
11817c478bd9Sstevel@tonic-gateah_set:
11827c478bd9Sstevel@tonic-gate	shl	$16, %eax
11837c478bd9Sstevel@tonic-gate	bsrl	%eax, %eax
11847c478bd9Sstevel@tonic-gate	subl	$16, %eax
11857c478bd9Sstevel@tonic-gate	jmp	setbase
11867c478bd9Sstevel@tonic-gate	SET_SIZE(set_base_spl)
11877c478bd9Sstevel@tonic-gate
11887c478bd9Sstevel@tonic-gate#endif	/* __lint */
11897c478bd9Sstevel@tonic-gate
11907c478bd9Sstevel@tonic-gate#if defined(__i386)
11917c478bd9Sstevel@tonic-gate
11927c478bd9Sstevel@tonic-gate/*
11937c478bd9Sstevel@tonic-gate * int
11947c478bd9Sstevel@tonic-gate * intr_passivate(from, to)
11957c478bd9Sstevel@tonic-gate *      thread_id_t     from;           interrupt thread
11967c478bd9Sstevel@tonic-gate *      thread_id_t     to;             interrupted thread
11977c478bd9Sstevel@tonic-gate *
11987c478bd9Sstevel@tonic-gate *	intr_passivate(t, itp) makes the interrupted thread "t" runnable.
11997c478bd9Sstevel@tonic-gate *
12007c478bd9Sstevel@tonic-gate *	Since t->t_sp has already been saved, t->t_pc is all that needs
12017c478bd9Sstevel@tonic-gate *	set in this function.
12027c478bd9Sstevel@tonic-gate *
12037c478bd9Sstevel@tonic-gate *	Returns interrupt level of the thread.
12047c478bd9Sstevel@tonic-gate */
12057c478bd9Sstevel@tonic-gate
12067c478bd9Sstevel@tonic-gate#if defined(__lint)
12077c478bd9Sstevel@tonic-gate
12087c478bd9Sstevel@tonic-gate/* ARGSUSED */
12097c478bd9Sstevel@tonic-gateint
12107c478bd9Sstevel@tonic-gateintr_passivate(kthread_id_t from, kthread_id_t to)
12117c478bd9Sstevel@tonic-gate{ return (0); }
12127c478bd9Sstevel@tonic-gate
12137c478bd9Sstevel@tonic-gate#else	/* __lint */
12147c478bd9Sstevel@tonic-gate
12157c478bd9Sstevel@tonic-gate	ENTRY(intr_passivate)
12167c478bd9Sstevel@tonic-gate	movl	8(%esp), %eax		/* interrupted thread  */
12177c478bd9Sstevel@tonic-gate	movl	$_sys_rtt, T_PC(%eax)	/* set T_PC for interrupted thread */
12187c478bd9Sstevel@tonic-gate
12197c478bd9Sstevel@tonic-gate	movl	4(%esp), %eax		/* interrupt thread */
12207c478bd9Sstevel@tonic-gate	movl	T_STACK(%eax), %eax	/* get the pointer to the start of */
12217c478bd9Sstevel@tonic-gate					/* of the interrupt thread stack */
12227c478bd9Sstevel@tonic-gate	movl	-4(%eax), %eax		/* interrupt level was the first */
12237c478bd9Sstevel@tonic-gate					/* thing pushed onto the stack */
12247c478bd9Sstevel@tonic-gate	ret
12257c478bd9Sstevel@tonic-gate	SET_SIZE(intr_passivate)
12267c478bd9Sstevel@tonic-gate
12277c478bd9Sstevel@tonic-gate#endif	/* __lint */
12287c478bd9Sstevel@tonic-gate#endif	/* __i386 */
12297c478bd9Sstevel@tonic-gate
12307c478bd9Sstevel@tonic-gate#if defined(__lint)
12317c478bd9Sstevel@tonic-gate
12327c478bd9Sstevel@tonic-gatevoid
12337c478bd9Sstevel@tonic-gatefakesoftint(void)
12347c478bd9Sstevel@tonic-gate{}
12357c478bd9Sstevel@tonic-gate
12367c478bd9Sstevel@tonic-gate#else	/* __lint */
12377c478bd9Sstevel@tonic-gate
12387c478bd9Sstevel@tonic-gate	/
12397c478bd9Sstevel@tonic-gate	/ If we're here, we're being called from splx() to fake a soft
12407c478bd9Sstevel@tonic-gate	/ interrupt (note that interrupts are still disabled from splx()).
12417c478bd9Sstevel@tonic-gate	/ We execute this code when a soft interrupt is posted at
12427c478bd9Sstevel@tonic-gate	/ level higher than the CPU's current spl; when spl is lowered in
12437c478bd9Sstevel@tonic-gate	/ splx(), it will see the softint and jump here.  We'll do exactly
12447c478bd9Sstevel@tonic-gate	/ what a trap would do:  push our flags, %cs, %eip, error code
12457c478bd9Sstevel@tonic-gate	/ and trap number (T_SOFTINT).  The cmnint() code will see T_SOFTINT
12467c478bd9Sstevel@tonic-gate	/ and branch to the dosoftint() code.
12477c478bd9Sstevel@tonic-gate	/
12487c478bd9Sstevel@tonic-gate#if defined(__amd64)
12497c478bd9Sstevel@tonic-gate
12507c478bd9Sstevel@tonic-gate	/*
12517c478bd9Sstevel@tonic-gate	 * In 64-bit mode, iretq -always- pops all five regs
12527c478bd9Sstevel@tonic-gate	 * Imitate the 16-byte auto-align of the stack, and the
12537c478bd9Sstevel@tonic-gate	 * zero-ed out %ss value.
12547c478bd9Sstevel@tonic-gate	 */
12557c478bd9Sstevel@tonic-gate	ENTRY_NP(fakesoftint)
12567c478bd9Sstevel@tonic-gate	movq	%rsp, %r11
12577c478bd9Sstevel@tonic-gate	andq	$-16, %rsp
12587c478bd9Sstevel@tonic-gate	pushq	$KDS_SEL	/* %ss */
12597c478bd9Sstevel@tonic-gate	pushq	%r11		/* %rsp */
12607c478bd9Sstevel@tonic-gate	pushf			/* rflags */
12617c478bd9Sstevel@tonic-gate	pushq	$KCS_SEL	/* %cs */
12627c478bd9Sstevel@tonic-gate	leaq	fakesoftint_return(%rip), %r11
12637c478bd9Sstevel@tonic-gate	pushq	%r11		/* %rip */
12647c478bd9Sstevel@tonic-gate	pushq	$0		/* err */
12657c478bd9Sstevel@tonic-gate	pushq	$T_SOFTINT	/* trap */
12667c478bd9Sstevel@tonic-gate	jmp	cmnint
12677c478bd9Sstevel@tonic-gate	SET_SIZE(fakesoftint)
12687c478bd9Sstevel@tonic-gate
12697c478bd9Sstevel@tonic-gate#elif defined(__i386)
12707c478bd9Sstevel@tonic-gate
12717c478bd9Sstevel@tonic-gate	ENTRY_NP(fakesoftint)
12727c478bd9Sstevel@tonic-gate	pushf
12737c478bd9Sstevel@tonic-gate	push	%cs
12747c478bd9Sstevel@tonic-gate	push	$fakesoftint_return
12757c478bd9Sstevel@tonic-gate	push	$0
12767c478bd9Sstevel@tonic-gate	push	$T_SOFTINT
12777c478bd9Sstevel@tonic-gate	jmp	cmnint
12787c478bd9Sstevel@tonic-gate	SET_SIZE(fakesoftint)
12797c478bd9Sstevel@tonic-gate
12807c478bd9Sstevel@tonic-gate#endif	/* __i386 */
12817c478bd9Sstevel@tonic-gate
12827c478bd9Sstevel@tonic-gate	.align	CPTRSIZE
12837c478bd9Sstevel@tonic-gate	.globl	_fakesoftint_size
12847c478bd9Sstevel@tonic-gate	.type	_fakesoftint_size, @object
12857c478bd9Sstevel@tonic-gate_fakesoftint_size:
12867c478bd9Sstevel@tonic-gate	.NWORD	. - fakesoftint
12877c478bd9Sstevel@tonic-gate	SET_SIZE(_fakesoftint_size)
12887c478bd9Sstevel@tonic-gate
12897c478bd9Sstevel@tonic-gate/*
12907c478bd9Sstevel@tonic-gate * dosoftint(old_pil in %edi, softinfo in %edx, CPU pointer in %ebx)
12917c478bd9Sstevel@tonic-gate * Process software interrupts
12927c478bd9Sstevel@tonic-gate * Interrupts are disabled here.
12937c478bd9Sstevel@tonic-gate */
12947c478bd9Sstevel@tonic-gate#if defined(__i386)
12957c478bd9Sstevel@tonic-gate
12967c478bd9Sstevel@tonic-gate	ENTRY_NP(dosoftint)
12977c478bd9Sstevel@tonic-gate
12987c478bd9Sstevel@tonic-gate	bsrl	%edx, %edx		/* find highest pending interrupt */
12997c478bd9Sstevel@tonic-gate	cmpl 	%edx, %edi		/* if curipl >= pri soft pending intr */
13007c478bd9Sstevel@tonic-gate	jae	_sys_rtt		/* skip */
13017c478bd9Sstevel@tonic-gate
13027c478bd9Sstevel@tonic-gate	movl	%gs:CPU_BASE_SPL, %eax	/* check for blocked intr threads */
13037c478bd9Sstevel@tonic-gate	cmpl	%edx, %eax		/* if basespl >= pri soft pending */
13047c478bd9Sstevel@tonic-gate	jae	_sys_rtt		/* skip */
13057c478bd9Sstevel@tonic-gate
13067c478bd9Sstevel@tonic-gate	lock				/* MP protect */
13077c478bd9Sstevel@tonic-gate	btrl	%edx, CPU_SOFTINFO(%ebx) /* clear the selected interrupt bit */
13087c478bd9Sstevel@tonic-gate	jnc	dosoftint_again
13097c478bd9Sstevel@tonic-gate
13107c478bd9Sstevel@tonic-gate	movl	%edx, CPU_PRI(%ebx) /* set IPL to sofint level */
13117c478bd9Sstevel@tonic-gate	pushl	%edx
13127c478bd9Sstevel@tonic-gate	call	*setspl			/* mask levels upto the softint level */
13137c478bd9Sstevel@tonic-gate	popl	%eax			/* priority we are at in %eax */
13147c478bd9Sstevel@tonic-gate
13157c478bd9Sstevel@tonic-gate	/ Get set to run interrupt thread.
13167c478bd9Sstevel@tonic-gate	/ There should always be an interrupt thread since we allocate one
13177c478bd9Sstevel@tonic-gate	/ for each level on the CPU.
13187c478bd9Sstevel@tonic-gate	UNLINK_INTR_THREAD(%ebx, %esi, %edx)
13197c478bd9Sstevel@tonic-gate
13207c478bd9Sstevel@tonic-gate	/
13217c478bd9Sstevel@tonic-gate	/ Note that the code in kcpc_overflow_intr -relies- on the ordering
13227c478bd9Sstevel@tonic-gate	/ of events here - in particular that t->t_lwp of the interrupt
13237c478bd9Sstevel@tonic-gate	/ thread is set to the pinned thread *before* curthread is changed
13247c478bd9Sstevel@tonic-gate	/
13257c478bd9Sstevel@tonic-gate	movl	CPU_THREAD(%ebx), %ecx
13267c478bd9Sstevel@tonic-gate
13277c478bd9Sstevel@tonic-gate	/ If we are interrupting an interrupt thread, account for it.
13287c478bd9Sstevel@tonic-gate	testw	$T_INTR_THREAD, T_FLAGS(%ecx)
13297c478bd9Sstevel@tonic-gate	jz	0f
13307c478bd9Sstevel@tonic-gate	pushl	%eax
13317c478bd9Sstevel@tonic-gate	movl	%eax, %ebp
13327c478bd9Sstevel@tonic-gate_tsc_patch11:
13337c478bd9Sstevel@tonic-gate	nop; nop			/* patched to rdtsc if available */
1334*7a364d25Sschwartz	PILBASE_INTRSTAT(%ebx, %ebp)
13357c478bd9Sstevel@tonic-gate	TSC_SUB_FROM(%ecx, T_INTR_START)
13367c478bd9Sstevel@tonic-gate	TSC_ADD_TO(%ebp, CPU_INTRSTAT)
1337eda89462Sesolom	INTRACCTBASE(%ebx, %ebp)
1338eda89462Sesolom	TSC_ADD_TO(%ebp, CPU_INTRACCT)	/* cpu_intracct[cpu_mstate] += tsc */
13397c478bd9Sstevel@tonic-gate	popl	%eax
13407c478bd9Sstevel@tonic-gate0:
13417c478bd9Sstevel@tonic-gate	movl	T_LWP(%ecx), %ebp
13427c478bd9Sstevel@tonic-gate	movl	%ebp, T_LWP(%esi)
13437c478bd9Sstevel@tonic-gate	/
13447c478bd9Sstevel@tonic-gate	/ Threads on the interrupt thread free list could have state already
13457c478bd9Sstevel@tonic-gate	/ set to TS_ONPROC, but it helps in debugging if they're TS_FREE
13467c478bd9Sstevel@tonic-gate	/ Could eliminate the next two instructions with a little work.
13477c478bd9Sstevel@tonic-gate	/
13487c478bd9Sstevel@tonic-gate	movl	$ONPROC_THREAD, T_STATE(%esi)
13497c478bd9Sstevel@tonic-gate	/
13507c478bd9Sstevel@tonic-gate	/ Push interrupted thread onto list from new thread.
13517c478bd9Sstevel@tonic-gate	/ Set the new thread as the current one.
13527c478bd9Sstevel@tonic-gate	/ Set interrupted thread's T_SP because if it is the idle thread,
13537c478bd9Sstevel@tonic-gate	/ Resume() may use that stack between threads.
13547c478bd9Sstevel@tonic-gate	/
13557c478bd9Sstevel@tonic-gate	movl	%esp, T_SP(%ecx)		/* mark stack for resume */
13567c478bd9Sstevel@tonic-gate	movl	%ecx, T_INTR(%esi)		/* push old thread */
13577c478bd9Sstevel@tonic-gate	movl	%esi, CPU_THREAD(%ebx)		/* set new thread */
13587c478bd9Sstevel@tonic-gate	movl	T_STACK(%esi), %esp		/* interrupt stack pointer */
13597c478bd9Sstevel@tonic-gate	movl	%esp, %ebp
13607c478bd9Sstevel@tonic-gate
13617c478bd9Sstevel@tonic-gate	pushl	%eax			/* push ipl as first element in stack */
13627c478bd9Sstevel@tonic-gate					/* see intr_passivate() */
13637c478bd9Sstevel@tonic-gate	/
13647c478bd9Sstevel@tonic-gate	/ Set bit for this PIL in CPU's interrupt active bitmask.
13657c478bd9Sstevel@tonic-gate	/
13667c478bd9Sstevel@tonic-gate
13677c478bd9Sstevel@tonic-gate	ASSERT_NOT_CPU_INTR_ACTV(%eax, %ebx, _dosoftint_actv_bit_set)
13687c478bd9Sstevel@tonic-gate
13697c478bd9Sstevel@tonic-gate	btsl	%eax, CPU_INTR_ACTV(%ebx)
13707c478bd9Sstevel@tonic-gate
13717c478bd9Sstevel@tonic-gate	/
13727c478bd9Sstevel@tonic-gate	/ Initialize thread priority level from intr_pri
13737c478bd9Sstevel@tonic-gate	/
13747c478bd9Sstevel@tonic-gate	movb	%al, T_PIL(%esi)	/* store pil */
13757c478bd9Sstevel@tonic-gate	movzwl	intr_pri, %ecx
13767c478bd9Sstevel@tonic-gate	addl	%eax, %ecx		/* convert level to dispatch priority */
13777c478bd9Sstevel@tonic-gate	movw	%cx, T_PRI(%esi)
13787c478bd9Sstevel@tonic-gate
13797c478bd9Sstevel@tonic-gate	/
13807c478bd9Sstevel@tonic-gate	/ Store starting timestamp in thread structure.
13817c478bd9Sstevel@tonic-gate	/ esi = thread, ebx = cpu pointer, eax = PIL
13827c478bd9Sstevel@tonic-gate	/
13837c478bd9Sstevel@tonic-gate	movl	%eax, %ecx		/* save PIL from rdtsc clobber */
13847c478bd9Sstevel@tonic-gate_tsc_patch12:
13857c478bd9Sstevel@tonic-gate	nop; nop			/* patched to rdtsc if available */
1386*7a364d25Sschwartz	TSC_STORE(%esi, T_INTR_START)
13877c478bd9Sstevel@tonic-gate
13887c478bd9Sstevel@tonic-gate	sti				/* enable interrupts */
13897c478bd9Sstevel@tonic-gate
13907c478bd9Sstevel@tonic-gate	/
13917c478bd9Sstevel@tonic-gate	/ Enabling interrupts (above) could raise the current
13927c478bd9Sstevel@tonic-gate	/ IPL and base SPL. But, we continue processing the current soft
13937c478bd9Sstevel@tonic-gate	/ interrupt and we will check the base SPL next time in the loop
13947c478bd9Sstevel@tonic-gate	/ so that blocked interrupt thread would get a chance to run.
13957c478bd9Sstevel@tonic-gate	/
13967c478bd9Sstevel@tonic-gate
13977c478bd9Sstevel@tonic-gate	/
13987c478bd9Sstevel@tonic-gate	/ dispatch soft interrupts
13997c478bd9Sstevel@tonic-gate	/
14007c478bd9Sstevel@tonic-gate	pushl	%ecx
14017c478bd9Sstevel@tonic-gate	call	av_dispatch_softvect
14027c478bd9Sstevel@tonic-gate	addl	$4, %esp
14037c478bd9Sstevel@tonic-gate
14047c478bd9Sstevel@tonic-gate	cli				/* protect interrupt thread pool */
14057c478bd9Sstevel@tonic-gate					/* and softinfo & sysinfo */
14067c478bd9Sstevel@tonic-gate	movl	CPU_THREAD(%ebx), %esi	/* restore thread pointer */
14077c478bd9Sstevel@tonic-gate	movzbl	T_PIL(%esi), %ecx
14087c478bd9Sstevel@tonic-gate
14097c478bd9Sstevel@tonic-gate	/ cpu_stats.sys.intr[PIL]++
14107c478bd9Sstevel@tonic-gate	INC_CPU_STATS_INTR(%ecx, %edx, %edx, %ebx)
14117c478bd9Sstevel@tonic-gate
14127c478bd9Sstevel@tonic-gate	/
14137c478bd9Sstevel@tonic-gate	/ Clear bit for this PIL in CPU's interrupt active bitmask.
14147c478bd9Sstevel@tonic-gate	/
14157c478bd9Sstevel@tonic-gate
14167c478bd9Sstevel@tonic-gate	ASSERT_CPU_INTR_ACTV(%ecx, %ebx, _dosoftint_actv_bit_not_set)
14177c478bd9Sstevel@tonic-gate
14187c478bd9Sstevel@tonic-gate	btrl	%ecx, CPU_INTR_ACTV(%ebx)
14197c478bd9Sstevel@tonic-gate
14207c478bd9Sstevel@tonic-gate	/
14217c478bd9Sstevel@tonic-gate	/ Take timestamp, compute interval, update cumulative counter.
14227c478bd9Sstevel@tonic-gate	/ esi = thread, ebx = cpu, ecx = PIL
14237c478bd9Sstevel@tonic-gate	/
1424*7a364d25Sschwartz	PILBASE_INTRSTAT(%ebx, %ecx)
14257c478bd9Sstevel@tonic-gate_tsc_patch13:
14267c478bd9Sstevel@tonic-gate	nop; nop		/* patched to rdtsc if available */
14277c478bd9Sstevel@tonic-gate	TSC_SUB_FROM(%esi, T_INTR_START)
14287c478bd9Sstevel@tonic-gate	TSC_ADD_TO(%ecx, CPU_INTRSTAT)
1429eda89462Sesolom	INTRACCTBASE(%ebx, %ecx)
1430eda89462Sesolom	TSC_ADD_TO(%ecx, CPU_INTRACCT)	/* cpu_intracct[cpu_mstate] += tsc */
14317c478bd9Sstevel@tonic-gate
14327c478bd9Sstevel@tonic-gate	/ if there is still an interrupt thread underneath this one
14337c478bd9Sstevel@tonic-gate	/ then the interrupt was never blocked and the return is fairly
14347c478bd9Sstevel@tonic-gate	/ simple.  Otherwise jump to softintr_thread_exit.
14357c478bd9Sstevel@tonic-gate	/ softintr_thread_exit expect esi to be curthread & ebx to be ipl.
14367c478bd9Sstevel@tonic-gate	cmpl	$0, T_INTR(%esi)
14377c478bd9Sstevel@tonic-gate	je	softintr_thread_exit
14387c478bd9Sstevel@tonic-gate
14397c478bd9Sstevel@tonic-gate	/
14407c478bd9Sstevel@tonic-gate	/ link the thread back onto the interrupt thread pool
14417c478bd9Sstevel@tonic-gate	LINK_INTR_THREAD(%ebx, %esi, %edx)
14427c478bd9Sstevel@tonic-gate
14437c478bd9Sstevel@tonic-gate	/ set the thread state to free so kmdb doesn't see it
14447c478bd9Sstevel@tonic-gate	movl	$FREE_THREAD, T_STATE(%esi)
14457c478bd9Sstevel@tonic-gate	/
14467c478bd9Sstevel@tonic-gate	/ Switch back to the interrupted thread
14477c478bd9Sstevel@tonic-gate	movl	T_INTR(%esi), %ecx
14487c478bd9Sstevel@tonic-gate	movl	%ecx, CPU_THREAD(%ebx)
14497c478bd9Sstevel@tonic-gate	movl	T_SP(%ecx), %esp	/* restore stack pointer */
14507c478bd9Sstevel@tonic-gate	movl	%esp, %ebp
14517c478bd9Sstevel@tonic-gate
14527c478bd9Sstevel@tonic-gate	/ If we are returning to an interrupt thread, store a starting
14537c478bd9Sstevel@tonic-gate	/ timestamp in the thread structure.
14547c478bd9Sstevel@tonic-gate	testw	$T_INTR_THREAD, T_FLAGS(%ecx)
14557c478bd9Sstevel@tonic-gate	jz	0f
14567c478bd9Sstevel@tonic-gate_tsc_patch14:
14577c478bd9Sstevel@tonic-gate	nop; nop			/* patched to rdtsc if available */
1458*7a364d25Sschwartz	TSC_STORE(%ecx, T_INTR_START)
14597c478bd9Sstevel@tonic-gate0:
14607c478bd9Sstevel@tonic-gate	movl	CPU_BASE_SPL(%ebx), %eax
14617c478bd9Sstevel@tonic-gate	cmpl	%eax, %edi		/* if (oldipl >= basespl) */
14627c478bd9Sstevel@tonic-gate	jae	softintr_restore_ipl	/* then use oldipl */
14637c478bd9Sstevel@tonic-gate	movl	%eax, %edi		/* else use basespl */
14647c478bd9Sstevel@tonic-gatesoftintr_restore_ipl:
14657c478bd9Sstevel@tonic-gate	movl	%edi, CPU_PRI(%ebx) /* set IPL to old level */
14667c478bd9Sstevel@tonic-gate	pushl	%edi
14677c478bd9Sstevel@tonic-gate	call	*setspl
14687c478bd9Sstevel@tonic-gate	popl	%eax
14697c478bd9Sstevel@tonic-gatedosoftint_again:
14707c478bd9Sstevel@tonic-gate	movl	CPU_SOFTINFO(%ebx), %edx /* any pending software interrupts */
14717c478bd9Sstevel@tonic-gate	orl	%edx, %edx
14727c478bd9Sstevel@tonic-gate	jz	_sys_rtt
14737c478bd9Sstevel@tonic-gate	jmp	dosoftint		/* process more software interrupts */
14747c478bd9Sstevel@tonic-gate
14757c478bd9Sstevel@tonic-gatesoftintr_thread_exit:
14767c478bd9Sstevel@tonic-gate	/
14777c478bd9Sstevel@tonic-gate	/ Put thread back on the interrupt thread list.
14787c478bd9Sstevel@tonic-gate	/ As a reminder, the regs at this point are
14797c478bd9Sstevel@tonic-gate	/	%esi	interrupt thread
14807c478bd9Sstevel@tonic-gate
14817c478bd9Sstevel@tonic-gate	/
14827c478bd9Sstevel@tonic-gate	/ This was an interrupt thread, so set CPU's base SPL level
14837c478bd9Sstevel@tonic-gate	/ set_base_spl only uses %eax.
14847c478bd9Sstevel@tonic-gate	/
14857c478bd9Sstevel@tonic-gate	call	set_base_spl		/* interrupt vector already on stack */
14867c478bd9Sstevel@tonic-gate	/
14877c478bd9Sstevel@tonic-gate	/ Set the thread state to free so kmdb doesn't see it
14887c478bd9Sstevel@tonic-gate	/
14897c478bd9Sstevel@tonic-gate	movl	$FREE_THREAD, T_STATE(%esi)
14907c478bd9Sstevel@tonic-gate	/
14917c478bd9Sstevel@tonic-gate	/ Put thread on either the interrupt pool or the free pool and
14927c478bd9Sstevel@tonic-gate	/ call swtch() to resume another thread.
14937c478bd9Sstevel@tonic-gate	/
14947c478bd9Sstevel@tonic-gate	LOADCPU(%ebx)
14957c478bd9Sstevel@tonic-gate	LINK_INTR_THREAD(%ebx, %esi, %edx)
14967c478bd9Sstevel@tonic-gate	call	splhigh			/* block all intrs below lock lvl */
14977c478bd9Sstevel@tonic-gate	call	swtch
14987c478bd9Sstevel@tonic-gate	/ swtch() shouldn't return
14997c478bd9Sstevel@tonic-gate	SET_SIZE(dosoftint)
15007c478bd9Sstevel@tonic-gate
15017c478bd9Sstevel@tonic-gate#endif	/* __i386 */
15027c478bd9Sstevel@tonic-gate#endif	/* __lint */
1503*7a364d25Sschwartz
1504*7a364d25Sschwartz#if defined(lint)
1505*7a364d25Sschwartz
1506*7a364d25Sschwartz/*
1507*7a364d25Sschwartz * intr_get_time() is a resource for interrupt handlers to determine how
1508*7a364d25Sschwartz * much time has been spent handling the current interrupt. Such a function
1509*7a364d25Sschwartz * is needed because higher level interrupts can arrive during the
1510*7a364d25Sschwartz * processing of an interrupt, thus making direct comparisons of %tick by
1511*7a364d25Sschwartz * the handler inaccurate. intr_get_time() only returns time spent in the
1512*7a364d25Sschwartz * current interrupt handler.
1513*7a364d25Sschwartz *
1514*7a364d25Sschwartz * The caller must be calling from an interrupt handler running at a pil
1515*7a364d25Sschwartz * below or at lock level. Timings are not provided for high-level
1516*7a364d25Sschwartz * interrupts.
1517*7a364d25Sschwartz *
1518*7a364d25Sschwartz * The first time intr_get_time() is called while handling an interrupt,
1519*7a364d25Sschwartz * it returns the time since the interrupt handler was invoked. Subsequent
1520*7a364d25Sschwartz * calls will return the time since the prior call to intr_get_time(). Time
1521*7a364d25Sschwartz * is returned as ticks. Use tsc_scalehrtime() to convert ticks to nsec.
1522*7a364d25Sschwartz *
1523*7a364d25Sschwartz * Theory Of Intrstat[][]:
1524*7a364d25Sschwartz *
1525*7a364d25Sschwartz * uint64_t intrstat[pil][0..1] is an array indexed by pil level, with two
1526*7a364d25Sschwartz * uint64_ts per pil.
1527*7a364d25Sschwartz *
1528*7a364d25Sschwartz * intrstat[pil][0] is a cumulative count of the number of ticks spent
1529*7a364d25Sschwartz * handling all interrupts at the specified pil on this CPU. It is
1530*7a364d25Sschwartz * exported via kstats to the user.
1531*7a364d25Sschwartz *
1532*7a364d25Sschwartz * intrstat[pil][1] is always a count of ticks less than or equal to the
1533*7a364d25Sschwartz * value in [0]. The difference between [1] and [0] is the value returned
1534*7a364d25Sschwartz * by a call to intr_get_time(). At the start of interrupt processing,
1535*7a364d25Sschwartz * [0] and [1] will be equal (or nearly so). As the interrupt consumes
1536*7a364d25Sschwartz * time, [0] will increase, but [1] will remain the same. A call to
1537*7a364d25Sschwartz * intr_get_time() will return the difference, then update [1] to be the
1538*7a364d25Sschwartz * same as [0]. Future calls will return the time since the last call.
1539*7a364d25Sschwartz * Finally, when the interrupt completes, [1] is updated to the same as [0].
1540*7a364d25Sschwartz *
1541*7a364d25Sschwartz * Implementation:
1542*7a364d25Sschwartz *
1543*7a364d25Sschwartz * intr_get_time() works much like a higher level interrupt arriving. It
1544*7a364d25Sschwartz * "checkpoints" the timing information by incrementing intrstat[pil][0]
1545*7a364d25Sschwartz * to include elapsed running time, and by setting t_intr_start to rdtsc.
1546*7a364d25Sschwartz * It then sets the return value to intrstat[pil][0] - intrstat[pil][1],
1547*7a364d25Sschwartz * and updates intrstat[pil][1] to be the same as the new value of
1548*7a364d25Sschwartz * intrstat[pil][0].
1549*7a364d25Sschwartz *
1550*7a364d25Sschwartz * In the normal handling of interrupts, after an interrupt handler returns
1551*7a364d25Sschwartz * and the code in intr_thread() updates intrstat[pil][0], it then sets
1552*7a364d25Sschwartz * intrstat[pil][1] to the new value of intrstat[pil][0]. When [0] == [1],
1553*7a364d25Sschwartz * the timings are reset, i.e. intr_get_time() will return [0] - [1] which
1554*7a364d25Sschwartz * is 0.
1555*7a364d25Sschwartz *
1556*7a364d25Sschwartz * Whenever interrupts arrive on a CPU which is handling a lower pil
1557*7a364d25Sschwartz * interrupt, they update the lower pil's [0] to show time spent in the
1558*7a364d25Sschwartz * handler that they've interrupted. This results in a growing discrepancy
1559*7a364d25Sschwartz * between [0] and [1], which is returned the next time intr_get_time() is
1560*7a364d25Sschwartz * called. Time spent in the higher-pil interrupt will not be returned in
1561*7a364d25Sschwartz * the next intr_get_time() call from the original interrupt, because
1562*7a364d25Sschwartz * the higher-pil interrupt's time is accumulated in intrstat[higherpil][].
1563*7a364d25Sschwartz */
1564*7a364d25Sschwartz
1565*7a364d25Sschwartz/*ARGSUSED*/
1566*7a364d25Sschwartzuint64_t
1567*7a364d25Sschwartzintr_get_time(void)
1568*7a364d25Sschwartz{ return 0; }
1569*7a364d25Sschwartz#else	/* lint */
1570*7a364d25Sschwartz
1571*7a364d25Sschwartz
1572*7a364d25Sschwartz#if defined(__amd64)
1573*7a364d25Sschwartz	ENTRY_NP(intr_get_time)
1574*7a364d25Sschwartz	cli				/* make this easy -- block intrs */
1575*7a364d25Sschwartz	LOADCPU(%rdi)
1576*7a364d25Sschwartz	call	intr_thread_get_time
1577*7a364d25Sschwartz	sti
1578*7a364d25Sschwartz	ret
1579*7a364d25Sschwartz	SET_SIZE(intr_get_time)
1580*7a364d25Sschwartz
1581*7a364d25Sschwartz#elif defined(__i386)
1582*7a364d25Sschwartz
1583*7a364d25Sschwartz#ifdef DEBUG
1584*7a364d25Sschwartz
1585*7a364d25Sschwartz
1586*7a364d25Sschwartz_intr_get_time_high_pil:
1587*7a364d25Sschwartz	.string	"intr_get_time(): %pil > LOCK_LEVEL"
1588*7a364d25Sschwartz_intr_get_time_not_intr:
1589*7a364d25Sschwartz	.string	"intr_get_time(): not called from an interrupt thread"
1590*7a364d25Sschwartz_intr_get_time_no_start_time:
1591*7a364d25Sschwartz	.string	"intr_get_time(): t_intr_start == 0"
1592*7a364d25Sschwartz
1593*7a364d25Sschwartz/*
1594*7a364d25Sschwartz * ASSERT(%pil <= LOCK_LEVEL)
1595*7a364d25Sschwartz */
1596*7a364d25Sschwartz#define	ASSERT_PIL_BELOW_LOCK_LEVEL(cpureg)				\
1597*7a364d25Sschwartz	testl	$CPU_INTR_ACTV_HIGH_LEVEL_MASK, CPU_INTR_ACTV(cpureg);	\
1598*7a364d25Sschwartz	jz	0f;							\
1599*7a364d25Sschwartz	__PANIC(_intr_get_time_high_pil, 0f);				\
1600*7a364d25Sschwartz0:
1601*7a364d25Sschwartz
1602*7a364d25Sschwartz/*
1603*7a364d25Sschwartz * ASSERT((t_flags & T_INTR_THREAD) != 0 && t_pil > 0)
1604*7a364d25Sschwartz */
1605*7a364d25Sschwartz#define	ASSERT_NO_PIL_0_INTRS(thrreg)			\
1606*7a364d25Sschwartz	testw	$T_INTR_THREAD, T_FLAGS(thrreg);	\
1607*7a364d25Sschwartz	jz	1f;					\
1608*7a364d25Sschwartz	cmpb	$0, T_PIL(thrreg);			\
1609*7a364d25Sschwartz	jne	0f;					\
1610*7a364d25Sschwartz1:							\
1611*7a364d25Sschwartz	__PANIC(_intr_get_time_not_intr, 0f);		\
1612*7a364d25Sschwartz0:
1613*7a364d25Sschwartz
1614*7a364d25Sschwartz/*
1615*7a364d25Sschwartz * ASSERT(t_intr_start != 0)
1616*7a364d25Sschwartz */
1617*7a364d25Sschwartz#define	ASSERT_INTR_START_NOT_0(thrreg)			\
1618*7a364d25Sschwartz	cmpl	$0, T_INTR_START(thrreg);		\
1619*7a364d25Sschwartz	jnz	0f;					\
1620*7a364d25Sschwartz	cmpl	$0, T_INTR_START+4(thrreg);		\
1621*7a364d25Sschwartz	jnz	0f;					\
1622*7a364d25Sschwartz	__PANIC(_intr_get_time_no_start_time, 0f);	\
1623*7a364d25Sschwartz0:
1624*7a364d25Sschwartz
1625*7a364d25Sschwartz#endif /* DEBUG */
1626*7a364d25Sschwartz
1627*7a364d25Sschwartz	ENTRY_NP(intr_get_time)
1628*7a364d25Sschwartz
1629*7a364d25Sschwartz	cli				/* make this easy -- block intrs */
1630*7a364d25Sschwartz	pushl	%esi			/* and free up some registers */
1631*7a364d25Sschwartz
1632*7a364d25Sschwartz	LOADCPU(%esi)
1633*7a364d25Sschwartz	movl	CPU_THREAD(%esi), %ecx
1634*7a364d25Sschwartz
1635*7a364d25Sschwartz#ifdef DEBUG
1636*7a364d25Sschwartz	ASSERT_PIL_BELOW_LOCK_LEVEL(%esi)
1637*7a364d25Sschwartz	ASSERT_NO_PIL_0_INTRS(%ecx)
1638*7a364d25Sschwartz	ASSERT_INTR_START_NOT_0(%ecx)
1639*7a364d25Sschwartz#endif /* DEBUG */
1640*7a364d25Sschwartz
1641*7a364d25Sschwartz_tsc_patch17:
1642*7a364d25Sschwartz	nop; nop			/* patched to rdtsc if available */
1643*7a364d25Sschwartz	TSC_SUB_FROM(%ecx, T_INTR_START)	/* get elapsed time */
1644*7a364d25Sschwartz	TSC_ADD_TO(%ecx, T_INTR_START)		/* T_INTR_START = rdtsc */
1645*7a364d25Sschwartz
1646*7a364d25Sschwartz	movzbl	T_PIL(%ecx), %ecx		/* %ecx = pil */
1647*7a364d25Sschwartz	PILBASE_INTRSTAT(%esi, %ecx)		/* %ecx = CPU + pil*16 */
1648*7a364d25Sschwartz	TSC_ADD_TO(%ecx, CPU_INTRSTAT)		/* intrstat[0] += elapsed */
1649*7a364d25Sschwartz	TSC_LOAD(%ecx, CPU_INTRSTAT)		/* get new intrstat[0] */
1650*7a364d25Sschwartz	TSC_SUB_FROM(%ecx, CPU_INTRSTAT+8)	/* diff with intrstat[1] */
1651*7a364d25Sschwartz	TSC_ADD_TO(%ecx, CPU_INTRSTAT+8)	/* intrstat[1] = intrstat[0] */
1652*7a364d25Sschwartz
1653*7a364d25Sschwartz	/* %edx/%eax contain difference between old and new intrstat[1] */
1654*7a364d25Sschwartz
1655*7a364d25Sschwartz	popl	%esi
1656*7a364d25Sschwartz	sti
1657*7a364d25Sschwartz	ret
1658*7a364d25Sschwartz	SET_SIZE(intr_get_time)
1659*7a364d25Sschwartz#endif	/* __i386 */
1660*7a364d25Sschwartz
1661*7a364d25Sschwartz#endif  /* lint */
1662