17c478bd9Sstevel@tonic-gate/* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 57c478bd9Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 67c478bd9Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 77c478bd9Sstevel@tonic-gate * with the License. 87c478bd9Sstevel@tonic-gate * 97c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 107c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 117c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 127c478bd9Sstevel@tonic-gate * and limitations under the License. 137c478bd9Sstevel@tonic-gate * 147c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 157c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 167c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 177c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 187c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 197c478bd9Sstevel@tonic-gate * 207c478bd9Sstevel@tonic-gate * CDDL HEADER END 217c478bd9Sstevel@tonic-gate */ 227c478bd9Sstevel@tonic-gate/* 237c478bd9Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 247c478bd9Sstevel@tonic-gate * Use is subject to license terms. 257c478bd9Sstevel@tonic-gate */ 267c478bd9Sstevel@tonic-gate 277c478bd9Sstevel@tonic-gate/* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */ 287c478bd9Sstevel@tonic-gate/* Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T */ 297c478bd9Sstevel@tonic-gate/* All Rights Reserved */ 307c478bd9Sstevel@tonic-gate 317c478bd9Sstevel@tonic-gate/* Copyright (c) 1987, 1988 Microsoft Corporation */ 327c478bd9Sstevel@tonic-gate/* All Rights Reserved */ 337c478bd9Sstevel@tonic-gate 347c478bd9Sstevel@tonic-gate#pragma ident "%Z%%M% %I% %E% SMI" 357c478bd9Sstevel@tonic-gate 367c478bd9Sstevel@tonic-gate#include <sys/asm_linkage.h> 377c478bd9Sstevel@tonic-gate#include <sys/asm_misc.h> 387c478bd9Sstevel@tonic-gate#include <sys/regset.h> 397c478bd9Sstevel@tonic-gate#include <sys/psw.h> 407c478bd9Sstevel@tonic-gate#include <sys/x86_archext.h> 417c478bd9Sstevel@tonic-gate 427c478bd9Sstevel@tonic-gate#if defined(__lint) 437c478bd9Sstevel@tonic-gate 447c478bd9Sstevel@tonic-gate#include <sys/types.h> 457c478bd9Sstevel@tonic-gate#include <sys/thread.h> 467c478bd9Sstevel@tonic-gate#include <sys/systm.h> 477c478bd9Sstevel@tonic-gate 487c478bd9Sstevel@tonic-gate#else /* __lint */ 497c478bd9Sstevel@tonic-gate 507c478bd9Sstevel@tonic-gate#include <sys/segments.h> 517c478bd9Sstevel@tonic-gate#include <sys/pcb.h> 527c478bd9Sstevel@tonic-gate#include <sys/trap.h> 537c478bd9Sstevel@tonic-gate#include <sys/ftrace.h> 547c478bd9Sstevel@tonic-gate#include <sys/traptrace.h> 557c478bd9Sstevel@tonic-gate#include <sys/clock.h> 567c478bd9Sstevel@tonic-gate#include <sys/panic.h> 577c478bd9Sstevel@tonic-gate#include "assym.h" 587c478bd9Sstevel@tonic-gate 597c478bd9Sstevel@tonic-gate_ftrace_intr_thread_fmt: 607c478bd9Sstevel@tonic-gate .string "intr_thread(): regs=0x%lx, int=0x%x, pil=0x%x" 617c478bd9Sstevel@tonic-gate 627c478bd9Sstevel@tonic-gate#endif /* lint */ 637c478bd9Sstevel@tonic-gate 647c478bd9Sstevel@tonic-gate#if defined(__i386) 657c478bd9Sstevel@tonic-gate 667c478bd9Sstevel@tonic-gate#if defined(__lint) 677c478bd9Sstevel@tonic-gate 687c478bd9Sstevel@tonic-gatevoid 697c478bd9Sstevel@tonic-gatepatch_tsc(void) 707c478bd9Sstevel@tonic-gate{} 717c478bd9Sstevel@tonic-gate 727c478bd9Sstevel@tonic-gate#else /* __lint */ 737c478bd9Sstevel@tonic-gate 747c478bd9Sstevel@tonic-gate/* 757c478bd9Sstevel@tonic-gate * To cope with processors that do not implement the rdtsc instruction, 767c478bd9Sstevel@tonic-gate * we patch the kernel to use rdtsc if that feature is detected on the CPU. 777c478bd9Sstevel@tonic-gate * On an unpatched kernel, all locations requiring rdtsc are nop's. 787c478bd9Sstevel@tonic-gate * 797c478bd9Sstevel@tonic-gate * This function patches the nop's to rdtsc. 807c478bd9Sstevel@tonic-gate */ 817c478bd9Sstevel@tonic-gate ENTRY_NP(patch_tsc) 827c478bd9Sstevel@tonic-gate movw _rdtsc_insn, %cx 837c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch1 847c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch2 857c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch3 867c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch4 877c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch5 887c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch6 897c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch7 907c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch8 917c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch9 927c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch10 937c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch11 947c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch12 957c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch13 967c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch14 977c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch15 987c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch16 997c478bd9Sstevel@tonic-gate ret 1007c478bd9Sstevel@tonic-gate_rdtsc_insn: 1017c478bd9Sstevel@tonic-gate rdtsc 1027c478bd9Sstevel@tonic-gate SET_SIZE(patch_tsc) 1037c478bd9Sstevel@tonic-gate 1047c478bd9Sstevel@tonic-gate#endif /* __lint */ 1057c478bd9Sstevel@tonic-gate 1067c478bd9Sstevel@tonic-gate#endif /* __i386 */ 1077c478bd9Sstevel@tonic-gate 1087c478bd9Sstevel@tonic-gate 1097c478bd9Sstevel@tonic-gate#if defined(__lint) 1107c478bd9Sstevel@tonic-gate 1117c478bd9Sstevel@tonic-gatevoid 1127c478bd9Sstevel@tonic-gate_interrupt(void) 1137c478bd9Sstevel@tonic-gate{} 1147c478bd9Sstevel@tonic-gate 1157c478bd9Sstevel@tonic-gate#else /* __lint */ 1167c478bd9Sstevel@tonic-gate 1177c478bd9Sstevel@tonic-gate#if defined(__amd64) 1187c478bd9Sstevel@tonic-gate 1197c478bd9Sstevel@tonic-gate /* 1207c478bd9Sstevel@tonic-gate * Common register usage: 1217c478bd9Sstevel@tonic-gate * 1227c478bd9Sstevel@tonic-gate * %rbx cpu pointer 1237c478bd9Sstevel@tonic-gate * %r12 trap trace pointer -and- stash of 1247c478bd9Sstevel@tonic-gate * vec across intr_thread dispatch. 1257c478bd9Sstevel@tonic-gate * %r13d ipl of isr 1267c478bd9Sstevel@tonic-gate * %r14d old ipl (ipl level we entered on) 1277c478bd9Sstevel@tonic-gate * %r15 interrupted thread stack pointer 1287c478bd9Sstevel@tonic-gate */ 1297c478bd9Sstevel@tonic-gate ENTRY_NP2(cmnint, _interrupt) 1307c478bd9Sstevel@tonic-gate 1317c478bd9Sstevel@tonic-gate INTR_PUSH 1327c478bd9Sstevel@tonic-gate 1337c478bd9Sstevel@tonic-gate /* 1347c478bd9Sstevel@tonic-gate * At the end of TRACE_PTR %r12 points to the current TRAPTRACE entry 1357c478bd9Sstevel@tonic-gate */ 1367c478bd9Sstevel@tonic-gate TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_INTERRUPT) 1377c478bd9Sstevel@tonic-gate /* Uses labels 8 and 9 */ 1387c478bd9Sstevel@tonic-gate TRACE_REGS(%r12, %rsp, %rax, %rbx) /* Uses label 9 */ 1397c478bd9Sstevel@tonic-gate TRACE_STAMP(%r12) /* Clobbers %eax, %edx, uses 9 */ 1407c478bd9Sstevel@tonic-gate 1417c478bd9Sstevel@tonic-gate DISABLE_INTR_FLAGS /* (and set kernel flag values) */ 1427c478bd9Sstevel@tonic-gate 1437c478bd9Sstevel@tonic-gate movq %rsp, %rbp 1447c478bd9Sstevel@tonic-gate 1457c478bd9Sstevel@tonic-gate TRACE_STACK(%r12) 1467c478bd9Sstevel@tonic-gate 1477c478bd9Sstevel@tonic-gate LOADCPU(%rbx) /* &cpu */ 1487c478bd9Sstevel@tonic-gate leaq REGOFF_TRAPNO(%rbp), %rsi /* &vector */ 1497c478bd9Sstevel@tonic-gate movl CPU_PRI(%rbx), %r14d /* old ipl */ 1507c478bd9Sstevel@tonic-gate movl CPU_SOFTINFO(%rbx), %edx 1517c478bd9Sstevel@tonic-gate 1527c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE 1537c478bd9Sstevel@tonic-gate movl $255, TTR_IPL(%r12) 1547c478bd9Sstevel@tonic-gate movl %r14d, %edi 1557c478bd9Sstevel@tonic-gate movb %dil, TTR_PRI(%r12) 1567c478bd9Sstevel@tonic-gate movl CPU_BASE_SPL(%rbx), %edi 1577c478bd9Sstevel@tonic-gate movb %dil, TTR_SPL(%r12) 1587c478bd9Sstevel@tonic-gate movb $255, TTR_VECTOR(%r12) 1597c478bd9Sstevel@tonic-gate#endif 1607c478bd9Sstevel@tonic-gate 1617c478bd9Sstevel@tonic-gate /* 1627c478bd9Sstevel@tonic-gate * Check to see if the trap number is T_SOFTINT; if it is, 1637c478bd9Sstevel@tonic-gate * jump straight to dosoftint now. 1647c478bd9Sstevel@tonic-gate */ 1657c478bd9Sstevel@tonic-gate cmpq $T_SOFTINT, (%rsi) 1667c478bd9Sstevel@tonic-gate je dosoftint 1677c478bd9Sstevel@tonic-gate 1687c478bd9Sstevel@tonic-gate /* 1697c478bd9Sstevel@tonic-gate * Raise the interrupt priority level, returns newpil. 1707c478bd9Sstevel@tonic-gate * (The vector address is in %rsi so setlvl can update it.) 1717c478bd9Sstevel@tonic-gate */ 1727c478bd9Sstevel@tonic-gate movl %r14d, %edi /* old ipl */ 1737c478bd9Sstevel@tonic-gate /* &vector */ 1747c478bd9Sstevel@tonic-gate call *setlvl(%rip) 1757c478bd9Sstevel@tonic-gate 1767c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE 1777c478bd9Sstevel@tonic-gate movb %al, TTR_IPL(%r12) 1787c478bd9Sstevel@tonic-gate#endif 1797c478bd9Sstevel@tonic-gate /* 1807c478bd9Sstevel@tonic-gate * check for spurious interrupt 1817c478bd9Sstevel@tonic-gate */ 1827c478bd9Sstevel@tonic-gate cmpl $-1, %eax 1837c478bd9Sstevel@tonic-gate je _sys_rtt 1847c478bd9Sstevel@tonic-gate 1857c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE 1867c478bd9Sstevel@tonic-gate movl %r14d, %edx 1877c478bd9Sstevel@tonic-gate movb %dl, TTR_PRI(%r12) 1887c478bd9Sstevel@tonic-gate movl CPU_BASE_SPL(%rbx), %edx 1897c478bd9Sstevel@tonic-gate movb %dl, TTR_SPL(%r12) 1907c478bd9Sstevel@tonic-gate#endif 1917c478bd9Sstevel@tonic-gate movl %eax, CPU_PRI(%rbx) /* update ipl */ 1927c478bd9Sstevel@tonic-gate 1937c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE 1947c478bd9Sstevel@tonic-gate movl REGOFF_TRAPNO(%rbp), %edx 1957c478bd9Sstevel@tonic-gate movb %dl, TTR_VECTOR(%r12) 1967c478bd9Sstevel@tonic-gate#endif 1977c478bd9Sstevel@tonic-gate movl %eax, %r13d /* ipl of isr */ 1987c478bd9Sstevel@tonic-gate 1997c478bd9Sstevel@tonic-gate /* 2007c478bd9Sstevel@tonic-gate * At this point we can take one of two paths. 2017c478bd9Sstevel@tonic-gate * If the new level is at or below lock level, we will 2027c478bd9Sstevel@tonic-gate * run this interrupt in a separate thread. 2037c478bd9Sstevel@tonic-gate */ 2047c478bd9Sstevel@tonic-gate cmpl $LOCK_LEVEL, %eax 2057c478bd9Sstevel@tonic-gate jbe intr_thread 2067c478bd9Sstevel@tonic-gate 2077c478bd9Sstevel@tonic-gate movq %rbx, %rdi /* &cpu */ 2087c478bd9Sstevel@tonic-gate movl %r13d, %esi /* ipl */ 2097c478bd9Sstevel@tonic-gate movl %r14d, %edx /* old ipl */ 2107c478bd9Sstevel@tonic-gate movq %rbp, %rcx /* ®s */ 2117c478bd9Sstevel@tonic-gate call hilevel_intr_prolog 2127c478bd9Sstevel@tonic-gate orl %eax, %eax /* zero if need to switch stack */ 2137c478bd9Sstevel@tonic-gate jnz 1f 2147c478bd9Sstevel@tonic-gate 2157c478bd9Sstevel@tonic-gate /* 2167c478bd9Sstevel@tonic-gate * Save the thread stack and get on the cpu's interrupt stack 2177c478bd9Sstevel@tonic-gate */ 2187c478bd9Sstevel@tonic-gate movq %rsp, %r15 2197c478bd9Sstevel@tonic-gate movq CPU_INTR_STACK(%rbx), %rsp 2207c478bd9Sstevel@tonic-gate1: 2217c478bd9Sstevel@tonic-gate 2227c478bd9Sstevel@tonic-gate sti 2237c478bd9Sstevel@tonic-gate 2247c478bd9Sstevel@tonic-gate /* 2257c478bd9Sstevel@tonic-gate * Walk the list of handlers for this vector, calling 2267c478bd9Sstevel@tonic-gate * them as we go until no more interrupts are claimed. 2277c478bd9Sstevel@tonic-gate */ 2287c478bd9Sstevel@tonic-gate movl REGOFF_TRAPNO(%rbp), %edi 2297c478bd9Sstevel@tonic-gate call av_dispatch_autovect 2307c478bd9Sstevel@tonic-gate 2317c478bd9Sstevel@tonic-gate cli 2327c478bd9Sstevel@tonic-gate 2337c478bd9Sstevel@tonic-gate movq %rbx, %rdi /* &cpu */ 2347c478bd9Sstevel@tonic-gate movl %r13d, %esi /* ipl */ 2357c478bd9Sstevel@tonic-gate movl %r14d, %edx /* oldipl */ 2367c478bd9Sstevel@tonic-gate movl REGOFF_TRAPNO(%rbp), %ecx /* vec */ 2377c478bd9Sstevel@tonic-gate call hilevel_intr_epilog 2387c478bd9Sstevel@tonic-gate orl %eax, %eax /* zero if need to switch stack */ 2397c478bd9Sstevel@tonic-gate jnz 2f 2407c478bd9Sstevel@tonic-gate movq %r15, %rsp 2417c478bd9Sstevel@tonic-gate2: /* 2427c478bd9Sstevel@tonic-gate * Check for, and execute, softints before we iret. 2437c478bd9Sstevel@tonic-gate * 2447c478bd9Sstevel@tonic-gate * (dosoftint expects oldipl in %r14d (which is where it is) 2457c478bd9Sstevel@tonic-gate * the cpu pointer in %rbx (which is where it is) and the 2467c478bd9Sstevel@tonic-gate * softinfo in %edx (which is where we'll put it right now)) 2477c478bd9Sstevel@tonic-gate */ 2487c478bd9Sstevel@tonic-gate movl CPU_SOFTINFO(%rbx), %edx 2497c478bd9Sstevel@tonic-gate orl %edx, %edx 2507c478bd9Sstevel@tonic-gate jz _sys_rtt 2517c478bd9Sstevel@tonic-gate jmp dosoftint 2527c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 2537c478bd9Sstevel@tonic-gate 2547c478bd9Sstevel@tonic-gate SET_SIZE(cmnint) 2557c478bd9Sstevel@tonic-gate SET_SIZE(_interrupt) 2567c478bd9Sstevel@tonic-gate 2577c478bd9Sstevel@tonic-gate/* 2587c478bd9Sstevel@tonic-gate * Handle an interrupt in a new thread 2597c478bd9Sstevel@tonic-gate * 2607c478bd9Sstevel@tonic-gate * As we branch here, interrupts are still masked, 2617c478bd9Sstevel@tonic-gate * %rbx still contains the cpu pointer, 2627c478bd9Sstevel@tonic-gate * %r14d contains the old ipl that we came in on, and 2637c478bd9Sstevel@tonic-gate * %eax contains the new ipl that we got from the setlvl routine 2647c478bd9Sstevel@tonic-gate */ 2657c478bd9Sstevel@tonic-gate 2667c478bd9Sstevel@tonic-gate ENTRY_NP(intr_thread) 2677c478bd9Sstevel@tonic-gate 2687c478bd9Sstevel@tonic-gate movq %rbx, %rdi /* &cpu */ 2697c478bd9Sstevel@tonic-gate movq %rbp, %rsi /* ®s = stack pointer for _sys_rtt */ 2707c478bd9Sstevel@tonic-gate movl REGOFF_TRAPNO(%rbp), %r12d /* stash the vec */ 2717c478bd9Sstevel@tonic-gate movl %eax, %edx /* new pil from setlvlx() */ 2727c478bd9Sstevel@tonic-gate call intr_thread_prolog 2737c478bd9Sstevel@tonic-gate movq %rsp, %r15 2747c478bd9Sstevel@tonic-gate movq %rax, %rsp /* t_stk from interrupt thread */ 2757c478bd9Sstevel@tonic-gate movq %rsp, %rbp 2767c478bd9Sstevel@tonic-gate 2777c478bd9Sstevel@tonic-gate sti 2787c478bd9Sstevel@tonic-gate 2797c478bd9Sstevel@tonic-gate testl $FTRACE_ENABLED, CPU_FTRACE_STATE(%rbx) 2807c478bd9Sstevel@tonic-gate jz 1f 2817c478bd9Sstevel@tonic-gate /* 2827c478bd9Sstevel@tonic-gate * ftracing support. do we need this on x86? 2837c478bd9Sstevel@tonic-gate */ 2847c478bd9Sstevel@tonic-gate leaq _ftrace_intr_thread_fmt(%rip), %rdi 2857c478bd9Sstevel@tonic-gate movq %rbp, %rsi /* ®s */ 2867c478bd9Sstevel@tonic-gate movl %r12d, %edx /* vec */ 2877c478bd9Sstevel@tonic-gate movq CPU_THREAD(%rbx), %r11 /* (the interrupt thread) */ 2887c478bd9Sstevel@tonic-gate movzbl T_PIL(%r11), %ecx /* newipl */ 2897c478bd9Sstevel@tonic-gate call ftrace_3_notick 2907c478bd9Sstevel@tonic-gate1: 2917c478bd9Sstevel@tonic-gate movl %r12d, %edi /* vec */ 2927c478bd9Sstevel@tonic-gate call av_dispatch_autovect 2937c478bd9Sstevel@tonic-gate 2947c478bd9Sstevel@tonic-gate cli 2957c478bd9Sstevel@tonic-gate 2967c478bd9Sstevel@tonic-gate movq %rbx, %rdi /* &cpu */ 2977c478bd9Sstevel@tonic-gate movl %r12d, %esi /* vec */ 2987c478bd9Sstevel@tonic-gate movl %r14d, %edx /* oldpil */ 2997c478bd9Sstevel@tonic-gate call intr_thread_epilog 3007c478bd9Sstevel@tonic-gate /* 3017c478bd9Sstevel@tonic-gate * If we return from here (we might not if the interrupted thread 3027c478bd9Sstevel@tonic-gate * has exited or blocked, in which case we'll have quietly swtch()ed 3037c478bd9Sstevel@tonic-gate * away) then we need to switch back to our old %rsp 3047c478bd9Sstevel@tonic-gate */ 3057c478bd9Sstevel@tonic-gate movq %r15, %rsp 3067c478bd9Sstevel@tonic-gate movq %rsp, %rbp 3077c478bd9Sstevel@tonic-gate /* 3087c478bd9Sstevel@tonic-gate * Check for, and execute, softints before we iret. 3097c478bd9Sstevel@tonic-gate * 3107c478bd9Sstevel@tonic-gate * (dosoftint expects oldpil in %r14d, the cpu pointer in %rbx and 3117c478bd9Sstevel@tonic-gate * the mcpu_softinfo.st_pending field in %edx. 3127c478bd9Sstevel@tonic-gate */ 3137c478bd9Sstevel@tonic-gate movl CPU_SOFTINFO(%rbx), %edx 3147c478bd9Sstevel@tonic-gate orl %edx, %edx 3157c478bd9Sstevel@tonic-gate jz _sys_rtt 3167c478bd9Sstevel@tonic-gate /*FALLTHROUGH*/ 3177c478bd9Sstevel@tonic-gate 3187c478bd9Sstevel@tonic-gate/* 3197c478bd9Sstevel@tonic-gate * Process soft interrupts. 3207c478bd9Sstevel@tonic-gate * Interrupts are masked, and we have a minimal frame on the stack. 3217c478bd9Sstevel@tonic-gate * %edx should contain the mcpu_softinfo.st_pending field 3227c478bd9Sstevel@tonic-gate */ 3237c478bd9Sstevel@tonic-gate 3247c478bd9Sstevel@tonic-gate ALTENTRY(dosoftint) 3257c478bd9Sstevel@tonic-gate 3267c478bd9Sstevel@tonic-gate movq %rbx, %rdi /* &cpu */ 3277c478bd9Sstevel@tonic-gate movq %rbp, %rsi /* ®s = stack pointer for _sys_rtt */ 3287c478bd9Sstevel@tonic-gate /* cpu->cpu_m.mcpu_softinfo.st_pending */ 3297c478bd9Sstevel@tonic-gate movl %r14d, %ecx /* oldipl */ 3307c478bd9Sstevel@tonic-gate call dosoftint_prolog 3317c478bd9Sstevel@tonic-gate /* 3327c478bd9Sstevel@tonic-gate * dosoftint_prolog() usually returns a stack pointer for the 3337c478bd9Sstevel@tonic-gate * interrupt thread that we must switch to. However, if the 3347c478bd9Sstevel@tonic-gate * returned stack pointer is NULL, then the software interrupt was 3357c478bd9Sstevel@tonic-gate * too low in priority to run now; we'll catch it another time. 3367c478bd9Sstevel@tonic-gate */ 3377c478bd9Sstevel@tonic-gate orq %rax, %rax 3387c478bd9Sstevel@tonic-gate jz _sys_rtt 3397c478bd9Sstevel@tonic-gate movq %rsp, %r15 3407c478bd9Sstevel@tonic-gate movq %rax, %rsp /* t_stk from interrupt thread */ 3417c478bd9Sstevel@tonic-gate movq %rsp, %rbp 3427c478bd9Sstevel@tonic-gate 3437c478bd9Sstevel@tonic-gate sti 3447c478bd9Sstevel@tonic-gate 3457c478bd9Sstevel@tonic-gate /* 3467c478bd9Sstevel@tonic-gate * Enabling interrupts (above) could raise the current ipl 3477c478bd9Sstevel@tonic-gate * and base spl. But, we continue processing the current soft 3487c478bd9Sstevel@tonic-gate * interrupt and we will check the base spl next time around 3497c478bd9Sstevel@tonic-gate * so that blocked interrupt threads get a chance to run. 3507c478bd9Sstevel@tonic-gate */ 3517c478bd9Sstevel@tonic-gate movq CPU_THREAD(%rbx), %r11 /* now an interrupt thread */ 3527c478bd9Sstevel@tonic-gate movzbl T_PIL(%r11), %edi 3537c478bd9Sstevel@tonic-gate call av_dispatch_softvect 3547c478bd9Sstevel@tonic-gate 3557c478bd9Sstevel@tonic-gate cli 3567c478bd9Sstevel@tonic-gate 3577c478bd9Sstevel@tonic-gate movq %rbx, %rdi /* &cpu */ 3587c478bd9Sstevel@tonic-gate movl %r14d, %esi /* oldpil */ 3597c478bd9Sstevel@tonic-gate call dosoftint_epilog 3607c478bd9Sstevel@tonic-gate movq %r15, %rsp /* back on old stack pointer */ 3617c478bd9Sstevel@tonic-gate movq %rsp, %rbp 3627c478bd9Sstevel@tonic-gate movl CPU_SOFTINFO(%rbx), %edx 3637c478bd9Sstevel@tonic-gate orl %edx, %edx 3647c478bd9Sstevel@tonic-gate jz _sys_rtt 3657c478bd9Sstevel@tonic-gate jmp dosoftint 3667c478bd9Sstevel@tonic-gate 3677c478bd9Sstevel@tonic-gate SET_SIZE(dosoftint) 3687c478bd9Sstevel@tonic-gate SET_SIZE(intr_thread) 3697c478bd9Sstevel@tonic-gate 3707c478bd9Sstevel@tonic-gate#elif defined(__i386) 3717c478bd9Sstevel@tonic-gate 3727c478bd9Sstevel@tonic-gate/* 3737c478bd9Sstevel@tonic-gate * One day, this should just invoke the C routines that know how to 3747c478bd9Sstevel@tonic-gate * do all the interrupt bookkeeping. In the meantime, try 3757c478bd9Sstevel@tonic-gate * and make the assembler a little more comprehensible. 3767c478bd9Sstevel@tonic-gate */ 3777c478bd9Sstevel@tonic-gate 3787c478bd9Sstevel@tonic-gate#define INC64(basereg, offset) \ 3797c478bd9Sstevel@tonic-gate addl $1, offset(basereg); \ 3807c478bd9Sstevel@tonic-gate adcl $0, offset + 4(basereg) 3817c478bd9Sstevel@tonic-gate 3827c478bd9Sstevel@tonic-gate#define TSC_CLR(basereg, offset) \ 3837c478bd9Sstevel@tonic-gate movl $0, offset(basereg); \ 3847c478bd9Sstevel@tonic-gate movl $0, offset + 4(basereg) 3857c478bd9Sstevel@tonic-gate 3867c478bd9Sstevel@tonic-gate/* 3877c478bd9Sstevel@tonic-gate * The following macros assume the time value is in %edx:%eax 3887c478bd9Sstevel@tonic-gate * e.g. from a rdtsc instruction. 3897c478bd9Sstevel@tonic-gate */ 3907c478bd9Sstevel@tonic-gate#define TSC_MOV(reg, offset) \ 3917c478bd9Sstevel@tonic-gate movl %eax, offset(reg); \ 3927c478bd9Sstevel@tonic-gate movl %edx, offset + 4(reg) 3937c478bd9Sstevel@tonic-gate 3947c478bd9Sstevel@tonic-gate#define TSC_ADD_TO(reg, offset) \ 3957c478bd9Sstevel@tonic-gate addl %eax, offset(reg); \ 3967c478bd9Sstevel@tonic-gate adcl %edx, offset + 4(reg) 3977c478bd9Sstevel@tonic-gate 3987c478bd9Sstevel@tonic-gate#define TSC_SUB_FROM(reg, offset) \ 3997c478bd9Sstevel@tonic-gate subl offset(reg), %eax; \ 4007c478bd9Sstevel@tonic-gate sbbl offset + 4(reg), %edx /* interval in edx:eax */ 4017c478bd9Sstevel@tonic-gate 4027c478bd9Sstevel@tonic-gate/* 4037c478bd9Sstevel@tonic-gate * basereg - pointer to cpu struct 4047c478bd9Sstevel@tonic-gate * pilreg - pil or converted pil (pil - (LOCK_LEVEL + 1)) 4057c478bd9Sstevel@tonic-gate * pilreg_32 - 32-bit version of pilreg 4067c478bd9Sstevel@tonic-gate * 4077c478bd9Sstevel@tonic-gate * Returns (base + pil * 8) in pilreg 4087c478bd9Sstevel@tonic-gate */ 4097c478bd9Sstevel@tonic-gate#define PILBASE(basereg, pilreg) \ 4107c478bd9Sstevel@tonic-gate lea (basereg, pilreg, 8), pilreg 4117c478bd9Sstevel@tonic-gate 4127c478bd9Sstevel@tonic-gate/* 4137c478bd9Sstevel@tonic-gate * Returns (base + (pil - (LOCK_LEVEL + 1)) * 8) in pilreg 4147c478bd9Sstevel@tonic-gate */ 4157c478bd9Sstevel@tonic-gate#define HIGHPILBASE(basereg, pilreg, pilreg_32) \ 4167c478bd9Sstevel@tonic-gate subl $LOCK_LEVEL + 1, pilreg_32; \ 4177c478bd9Sstevel@tonic-gate PILBASE(basereg, pilreg) 4187c478bd9Sstevel@tonic-gate 4197c478bd9Sstevel@tonic-gate/* 420*eda89462Sesolom * Returns (cpu + cpu_mstate * 8) in tgt 421*eda89462Sesolom */ 422*eda89462Sesolom#define INTRACCTBASE(cpureg, tgtreg) \ 423*eda89462Sesolom movzwl CPU_MSTATE(cpureg), tgtreg; \ 424*eda89462Sesolom lea (cpureg, tgtreg, 8), tgtreg 425*eda89462Sesolom 426*eda89462Sesolom/* 4277c478bd9Sstevel@tonic-gate * cpu_stats.sys.intr[PIL]++ 4287c478bd9Sstevel@tonic-gate */ 4297c478bd9Sstevel@tonic-gate#define INC_CPU_STATS_INTR(pilreg, tmpreg, tmpreg_32, basereg) \ 4307c478bd9Sstevel@tonic-gate movl pilreg, tmpreg_32; \ 4317c478bd9Sstevel@tonic-gate PILBASE(basereg, tmpreg); \ 4327c478bd9Sstevel@tonic-gate INC64(tmpreg, _CONST(CPU_STATS_SYS_INTR - 8)) 4337c478bd9Sstevel@tonic-gate 4347c478bd9Sstevel@tonic-gate/* 4357c478bd9Sstevel@tonic-gate * Unlink thread from CPU's list 4367c478bd9Sstevel@tonic-gate */ 4377c478bd9Sstevel@tonic-gate#define UNLINK_INTR_THREAD(cpureg, ithread, tmpreg) \ 4387c478bd9Sstevel@tonic-gate mov CPU_INTR_THREAD(cpureg), ithread; \ 4397c478bd9Sstevel@tonic-gate mov T_LINK(ithread), tmpreg; \ 4407c478bd9Sstevel@tonic-gate mov tmpreg, CPU_INTR_THREAD(cpureg) 4417c478bd9Sstevel@tonic-gate 4427c478bd9Sstevel@tonic-gate/* 4437c478bd9Sstevel@tonic-gate * Link a thread into CPU's list 4447c478bd9Sstevel@tonic-gate */ 4457c478bd9Sstevel@tonic-gate#define LINK_INTR_THREAD(cpureg, ithread, tmpreg) \ 4467c478bd9Sstevel@tonic-gate mov CPU_INTR_THREAD(cpureg), tmpreg; \ 4477c478bd9Sstevel@tonic-gate mov tmpreg, T_LINK(ithread); \ 4487c478bd9Sstevel@tonic-gate mov ithread, CPU_INTR_THREAD(cpureg) 4497c478bd9Sstevel@tonic-gate 4507c478bd9Sstevel@tonic-gate#if defined(DEBUG) 4517c478bd9Sstevel@tonic-gate 4527c478bd9Sstevel@tonic-gate/* 4537c478bd9Sstevel@tonic-gate * Do not call panic, if panic is already in progress. 4547c478bd9Sstevel@tonic-gate */ 4557c478bd9Sstevel@tonic-gate#define __PANIC(msg, label) \ 4567c478bd9Sstevel@tonic-gate cmpl $0, panic_quiesce; \ 4577c478bd9Sstevel@tonic-gate jne label; \ 4587c478bd9Sstevel@tonic-gate pushl $msg; \ 4597c478bd9Sstevel@tonic-gate call panic 4607c478bd9Sstevel@tonic-gate 4617c478bd9Sstevel@tonic-gate#define __CMP64_JNE(basereg, offset, label) \ 4627c478bd9Sstevel@tonic-gate cmpl $0, offset(basereg); \ 4637c478bd9Sstevel@tonic-gate jne label; \ 4647c478bd9Sstevel@tonic-gate cmpl $0, offset + 4(basereg); \ 4657c478bd9Sstevel@tonic-gate jne label 4667c478bd9Sstevel@tonic-gate 4677c478bd9Sstevel@tonic-gate/* 4687c478bd9Sstevel@tonic-gate * ASSERT(!(CPU->cpu_intr_actv & (1 << PIL))) 4697c478bd9Sstevel@tonic-gate */ 4707c478bd9Sstevel@tonic-gate#define ASSERT_NOT_CPU_INTR_ACTV(pilreg, basereg, msg) \ 4717c478bd9Sstevel@tonic-gate btl pilreg, CPU_INTR_ACTV(basereg); \ 4727c478bd9Sstevel@tonic-gate jnc 4f; \ 4737c478bd9Sstevel@tonic-gate __PANIC(msg, 4f); \ 4747c478bd9Sstevel@tonic-gate4: 4757c478bd9Sstevel@tonic-gate 4767c478bd9Sstevel@tonic-gate/* 4777c478bd9Sstevel@tonic-gate * ASSERT(CPU->cpu_intr_actv & (1 << PIL)) 4787c478bd9Sstevel@tonic-gate */ 4797c478bd9Sstevel@tonic-gate#define ASSERT_CPU_INTR_ACTV(pilreg, basereg, msg) \ 4807c478bd9Sstevel@tonic-gate btl pilreg, CPU_INTR_ACTV(basereg); \ 4817c478bd9Sstevel@tonic-gate jc 5f; \ 4827c478bd9Sstevel@tonic-gate __PANIC(msg, 5f); \ 4837c478bd9Sstevel@tonic-gate5: 4847c478bd9Sstevel@tonic-gate 4857c478bd9Sstevel@tonic-gate/* 4867c478bd9Sstevel@tonic-gate * ASSERT(CPU->cpu_pil_high_start != 0) 4877c478bd9Sstevel@tonic-gate */ 4887c478bd9Sstevel@tonic-gate#define ASSERT_CPU_PIL_HIGH_START_NZ(basereg) \ 4897c478bd9Sstevel@tonic-gate __CMP64_JNE(basereg, CPU_PIL_HIGH_START, 6f); \ 4907c478bd9Sstevel@tonic-gate __PANIC(_interrupt_timestamp_zero, 6f); \ 4917c478bd9Sstevel@tonic-gate6: 4927c478bd9Sstevel@tonic-gate 4937c478bd9Sstevel@tonic-gate/* 4947c478bd9Sstevel@tonic-gate * ASSERT(t->t_intr_start != 0) 4957c478bd9Sstevel@tonic-gate */ 4967c478bd9Sstevel@tonic-gate#define ASSERT_T_INTR_START_NZ(basereg) \ 4977c478bd9Sstevel@tonic-gate __CMP64_JNE(basereg, T_INTR_START, 7f); \ 4987c478bd9Sstevel@tonic-gate __PANIC(_intr_thread_t_intr_start_zero, 7f); \ 4997c478bd9Sstevel@tonic-gate7: 5007c478bd9Sstevel@tonic-gate 5017c478bd9Sstevel@tonic-gate_interrupt_actv_bit_set: 5027c478bd9Sstevel@tonic-gate .string "_interrupt(): cpu_intr_actv bit already set for PIL" 5037c478bd9Sstevel@tonic-gate_interrupt_actv_bit_not_set: 5047c478bd9Sstevel@tonic-gate .string "_interrupt(): cpu_intr_actv bit not set for PIL" 5057c478bd9Sstevel@tonic-gate_interrupt_timestamp_zero: 5067c478bd9Sstevel@tonic-gate .string "_interrupt(): timestamp zero upon handler return" 5077c478bd9Sstevel@tonic-gate_intr_thread_actv_bit_not_set: 5087c478bd9Sstevel@tonic-gate .string "intr_thread(): cpu_intr_actv bit not set for PIL" 5097c478bd9Sstevel@tonic-gate_intr_thread_t_intr_start_zero: 5107c478bd9Sstevel@tonic-gate .string "intr_thread(): t_intr_start zero upon handler return" 5117c478bd9Sstevel@tonic-gate_dosoftint_actv_bit_set: 5127c478bd9Sstevel@tonic-gate .string "dosoftint(): cpu_intr_actv bit already set for PIL" 5137c478bd9Sstevel@tonic-gate_dosoftint_actv_bit_not_set: 5147c478bd9Sstevel@tonic-gate .string "dosoftint(): cpu_intr_actv bit not set for PIL" 5157c478bd9Sstevel@tonic-gate 5167c478bd9Sstevel@tonic-gate DGDEF(intr_thread_cnt) 5177c478bd9Sstevel@tonic-gate 5187c478bd9Sstevel@tonic-gate#else 5197c478bd9Sstevel@tonic-gate#define ASSERT_NOT_CPU_INTR_ACTV(pilreg, basereg, msg) 5207c478bd9Sstevel@tonic-gate#define ASSERT_CPU_INTR_ACTV(pilreg, basereg, msg) 5217c478bd9Sstevel@tonic-gate#define ASSERT_CPU_PIL_HIGH_START_NZ(basereg) 5227c478bd9Sstevel@tonic-gate#define ASSERT_T_INTR_START_NZ(basereg) 5237c478bd9Sstevel@tonic-gate#endif 5247c478bd9Sstevel@tonic-gate 5257c478bd9Sstevel@tonic-gate ENTRY_NP2(cmnint, _interrupt) 5267c478bd9Sstevel@tonic-gate 5277c478bd9Sstevel@tonic-gate INTR_PUSH 5287c478bd9Sstevel@tonic-gate 5297c478bd9Sstevel@tonic-gate /* 5307c478bd9Sstevel@tonic-gate * At the end of TRACE_PTR %esi points to the current TRAPTRACE entry 5317c478bd9Sstevel@tonic-gate */ 5327c478bd9Sstevel@tonic-gate TRACE_PTR(%esi, %eax, %eax, %edx, $TT_INTERRUPT) 5337c478bd9Sstevel@tonic-gate /* Uses labels 8 and 9 */ 5347c478bd9Sstevel@tonic-gate TRACE_REGS(%esi, %esp, %eax, %ebx) /* Uses label 9 */ 5357c478bd9Sstevel@tonic-gate TRACE_STAMP(%esi) /* Clobbers %eax, %edx, uses 9 */ 5367c478bd9Sstevel@tonic-gate 5377c478bd9Sstevel@tonic-gate movl %esp, %ebp 5387c478bd9Sstevel@tonic-gate DISABLE_INTR_FLAGS 5397c478bd9Sstevel@tonic-gate LOADCPU(%ebx) /* get pointer to CPU struct. Avoid gs refs */ 5407c478bd9Sstevel@tonic-gate leal REGOFF_TRAPNO(%ebp), %ecx /* get address of vector */ 5417c478bd9Sstevel@tonic-gate movl CPU_PRI(%ebx), %edi /* get ipl */ 5427c478bd9Sstevel@tonic-gate movl CPU_SOFTINFO(%ebx), %edx 5437c478bd9Sstevel@tonic-gate 5447c478bd9Sstevel@tonic-gate / 5457c478bd9Sstevel@tonic-gate / Check to see if the trap number is T_SOFTINT; if it is, we'll 5467c478bd9Sstevel@tonic-gate / jump straight to dosoftint now. 5477c478bd9Sstevel@tonic-gate / 5487c478bd9Sstevel@tonic-gate cmpl $T_SOFTINT, (%ecx) 5497c478bd9Sstevel@tonic-gate je dosoftint 5507c478bd9Sstevel@tonic-gate 5517c478bd9Sstevel@tonic-gate / raise interrupt priority level 5527c478bd9Sstevel@tonic-gate / oldipl is in %edi, vectorp is in %ecx 5537c478bd9Sstevel@tonic-gate / newipl is returned in %eax 5547c478bd9Sstevel@tonic-gate pushl %ecx 5557c478bd9Sstevel@tonic-gate pushl %edi 5567c478bd9Sstevel@tonic-gate call *setlvl 5577c478bd9Sstevel@tonic-gate popl %edi /* save oldpil in %edi */ 5587c478bd9Sstevel@tonic-gate popl %ecx 5597c478bd9Sstevel@tonic-gate 5607c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE 5617c478bd9Sstevel@tonic-gate movb %al, TTR_IPL(%esi) 5627c478bd9Sstevel@tonic-gate#endif 5637c478bd9Sstevel@tonic-gate 5647c478bd9Sstevel@tonic-gate / check for spurious interrupt 5657c478bd9Sstevel@tonic-gate cmp $-1, %eax 5667c478bd9Sstevel@tonic-gate je _sys_rtt 5677c478bd9Sstevel@tonic-gate 5687c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE 5697c478bd9Sstevel@tonic-gate movl CPU_PRI(%ebx), %edx 5707c478bd9Sstevel@tonic-gate movb %dl, TTR_PRI(%esi) 5717c478bd9Sstevel@tonic-gate movl CPU_BASE_SPL(%ebx), %edx 5727c478bd9Sstevel@tonic-gate movb %dl, TTR_SPL(%esi) 5737c478bd9Sstevel@tonic-gate#endif 5747c478bd9Sstevel@tonic-gate 5757c478bd9Sstevel@tonic-gate movl %eax, CPU_PRI(%ebx) /* update ipl */ 5767c478bd9Sstevel@tonic-gate movl REGOFF_TRAPNO(%ebp), %ecx /* reload the interrupt vector */ 5777c478bd9Sstevel@tonic-gate 5787c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE 5797c478bd9Sstevel@tonic-gate movb %cl, TTR_VECTOR(%esi) 5807c478bd9Sstevel@tonic-gate#endif 5817c478bd9Sstevel@tonic-gate 5827c478bd9Sstevel@tonic-gate / At this point we can take one of two paths. If the new priority 5837c478bd9Sstevel@tonic-gate / level is less than or equal to LOCK LEVEL then we jump to code that 5847c478bd9Sstevel@tonic-gate / will run this interrupt as a separate thread. Otherwise the 5857c478bd9Sstevel@tonic-gate / interrupt is NOT run as a separate thread. 5867c478bd9Sstevel@tonic-gate 5877c478bd9Sstevel@tonic-gate / %edi - old priority level 5887c478bd9Sstevel@tonic-gate / %ebp - pointer to REGS 5897c478bd9Sstevel@tonic-gate / %ecx - translated vector 5907c478bd9Sstevel@tonic-gate / %eax - ipl of isr 5917c478bd9Sstevel@tonic-gate / %ebx - cpu pointer 5927c478bd9Sstevel@tonic-gate 5937c478bd9Sstevel@tonic-gate cmpl $LOCK_LEVEL, %eax /* compare to highest thread level */ 5947c478bd9Sstevel@tonic-gate jbe intr_thread /* process as a separate thread */ 5957c478bd9Sstevel@tonic-gate 5967c478bd9Sstevel@tonic-gate cmpl $CBE_HIGH_PIL, %eax /* Is this a CY_HIGH_LEVEL interrupt? */ 5977c478bd9Sstevel@tonic-gate jne 2f 5987c478bd9Sstevel@tonic-gate 5997c478bd9Sstevel@tonic-gate movl REGOFF_PC(%ebp), %esi 6007c478bd9Sstevel@tonic-gate movl %edi, CPU_PROFILE_PIL(%ebx) /* record interrupted PIL */ 6017c478bd9Sstevel@tonic-gate testw $CPL_MASK, REGOFF_CS(%ebp) /* trap from supervisor mode? */ 6027c478bd9Sstevel@tonic-gate jz 1f 6037c478bd9Sstevel@tonic-gate movl %esi, CPU_PROFILE_UPC(%ebx) /* record user PC */ 6047c478bd9Sstevel@tonic-gate movl $0, CPU_PROFILE_PC(%ebx) /* zero kernel PC */ 6057c478bd9Sstevel@tonic-gate jmp 2f 6067c478bd9Sstevel@tonic-gate 6077c478bd9Sstevel@tonic-gate1: 6087c478bd9Sstevel@tonic-gate movl %esi, CPU_PROFILE_PC(%ebx) /* record kernel PC */ 6097c478bd9Sstevel@tonic-gate movl $0, CPU_PROFILE_UPC(%ebx) /* zero user PC */ 6107c478bd9Sstevel@tonic-gate 6117c478bd9Sstevel@tonic-gate2: 6127c478bd9Sstevel@tonic-gate pushl %ecx /* vec */ 6137c478bd9Sstevel@tonic-gate pushl %eax /* newpil */ 6147c478bd9Sstevel@tonic-gate 6157c478bd9Sstevel@tonic-gate / 6167c478bd9Sstevel@tonic-gate / See if we are interrupting another high-level interrupt. 6177c478bd9Sstevel@tonic-gate / 6187c478bd9Sstevel@tonic-gate movl CPU_INTR_ACTV(%ebx), %eax 6197c478bd9Sstevel@tonic-gate andl $CPU_INTR_ACTV_HIGH_LEVEL_MASK, %eax 6207c478bd9Sstevel@tonic-gate jz 0f 6217c478bd9Sstevel@tonic-gate / 6227c478bd9Sstevel@tonic-gate / We have interrupted another high-level interrupt. 6237c478bd9Sstevel@tonic-gate / Load starting timestamp, compute interval, update cumulative counter. 6247c478bd9Sstevel@tonic-gate / 6257c478bd9Sstevel@tonic-gate bsrl %eax, %ecx /* find PIL of interrupted handler */ 6267c478bd9Sstevel@tonic-gate HIGHPILBASE(%ebx, %ecx, %ecx) 6277c478bd9Sstevel@tonic-gate_tsc_patch1: 6287c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 6297c478bd9Sstevel@tonic-gate TSC_SUB_FROM(%ecx, CPU_PIL_HIGH_START) 6307c478bd9Sstevel@tonic-gate addl $CPU_INTRSTAT_LOW_PIL_OFFSET, %ecx /* offset PILs 0-10 */ 6317c478bd9Sstevel@tonic-gate TSC_ADD_TO(%ecx, CPU_INTRSTAT) 632*eda89462Sesolom INTRACCTBASE(%ebx, %ecx) 633*eda89462Sesolom TSC_ADD_TO(%ecx, CPU_INTRACCT) /* cpu_intracct[cpu_mstate] += tsc */ 6347c478bd9Sstevel@tonic-gate / 6357c478bd9Sstevel@tonic-gate / Another high-level interrupt is active below this one, so 6367c478bd9Sstevel@tonic-gate / there is no need to check for an interrupt thread. That will be 6377c478bd9Sstevel@tonic-gate / done by the lowest priority high-level interrupt active. 6387c478bd9Sstevel@tonic-gate / 6397c478bd9Sstevel@tonic-gate jmp 1f 6407c478bd9Sstevel@tonic-gate0: 6417c478bd9Sstevel@tonic-gate / 6427c478bd9Sstevel@tonic-gate / See if we are interrupting a low-level interrupt thread. 6437c478bd9Sstevel@tonic-gate / 6447c478bd9Sstevel@tonic-gate movl CPU_THREAD(%ebx), %esi 6457c478bd9Sstevel@tonic-gate testw $T_INTR_THREAD, T_FLAGS(%esi) 6467c478bd9Sstevel@tonic-gate jz 1f 6477c478bd9Sstevel@tonic-gate / 6487c478bd9Sstevel@tonic-gate / We have interrupted an interrupt thread. Account for its time slice 6497c478bd9Sstevel@tonic-gate / only if its time stamp is non-zero. 6507c478bd9Sstevel@tonic-gate / 6517c478bd9Sstevel@tonic-gate cmpl $0, T_INTR_START+4(%esi) 6527c478bd9Sstevel@tonic-gate jne 0f 6537c478bd9Sstevel@tonic-gate cmpl $0, T_INTR_START(%esi) 6547c478bd9Sstevel@tonic-gate je 1f 6557c478bd9Sstevel@tonic-gate0: 6567c478bd9Sstevel@tonic-gate movzbl T_PIL(%esi), %ecx /* %ecx has PIL of interrupted handler */ 6577c478bd9Sstevel@tonic-gate PILBASE(%ebx, %ecx) 6587c478bd9Sstevel@tonic-gate_tsc_patch2: 6597c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 6607c478bd9Sstevel@tonic-gate TSC_SUB_FROM(%esi, T_INTR_START) 6617c478bd9Sstevel@tonic-gate TSC_CLR(%esi, T_INTR_START) 6627c478bd9Sstevel@tonic-gate TSC_ADD_TO(%ecx, CPU_INTRSTAT) 663*eda89462Sesolom INTRACCTBASE(%ebx, %ecx) 664*eda89462Sesolom TSC_ADD_TO(%ecx, CPU_INTRACCT) /* cpu_intracct[cpu_mstate] += tsc */ 6657c478bd9Sstevel@tonic-gate1: 6667c478bd9Sstevel@tonic-gate / Store starting timestamp in CPU structure for this PIL. 6677c478bd9Sstevel@tonic-gate popl %ecx /* restore new PIL */ 6687c478bd9Sstevel@tonic-gate pushl %ecx 6697c478bd9Sstevel@tonic-gate HIGHPILBASE(%ebx, %ecx, %ecx) 6707c478bd9Sstevel@tonic-gate_tsc_patch3: 6717c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 6727c478bd9Sstevel@tonic-gate TSC_MOV(%ecx, CPU_PIL_HIGH_START) 6737c478bd9Sstevel@tonic-gate 6747c478bd9Sstevel@tonic-gate popl %eax /* restore new pil */ 6757c478bd9Sstevel@tonic-gate popl %ecx /* vec */ 6767c478bd9Sstevel@tonic-gate / 6777c478bd9Sstevel@tonic-gate / Set bit for this PIL in CPU's interrupt active bitmask. 6787c478bd9Sstevel@tonic-gate / 6797c478bd9Sstevel@tonic-gate 6807c478bd9Sstevel@tonic-gate ASSERT_NOT_CPU_INTR_ACTV(%eax, %ebx, _interrupt_actv_bit_set) 6817c478bd9Sstevel@tonic-gate 6827c478bd9Sstevel@tonic-gate / Save old CPU_INTR_ACTV 6837c478bd9Sstevel@tonic-gate movl CPU_INTR_ACTV(%ebx), %esi 6847c478bd9Sstevel@tonic-gate 6857c478bd9Sstevel@tonic-gate cmpl $15, %eax 6867c478bd9Sstevel@tonic-gate jne 0f 6877c478bd9Sstevel@tonic-gate / PIL-15 interrupt. Increment nest-count in upper 16 bits of intr_actv 6887c478bd9Sstevel@tonic-gate incw CPU_INTR_ACTV_REF(%ebx) /* increment ref count */ 6897c478bd9Sstevel@tonic-gate0: 6907c478bd9Sstevel@tonic-gate btsl %eax, CPU_INTR_ACTV(%ebx) 6917c478bd9Sstevel@tonic-gate / 6927c478bd9Sstevel@tonic-gate / Handle high-level nested interrupt on separate interrupt stack 6937c478bd9Sstevel@tonic-gate / 6947c478bd9Sstevel@tonic-gate testl $CPU_INTR_ACTV_HIGH_LEVEL_MASK, %esi 6957c478bd9Sstevel@tonic-gate jnz onstack /* already on interrupt stack */ 6967c478bd9Sstevel@tonic-gate movl %esp, %eax 6977c478bd9Sstevel@tonic-gate movl CPU_INTR_STACK(%ebx), %esp /* get on interrupt stack */ 6987c478bd9Sstevel@tonic-gate pushl %eax /* save the thread stack pointer */ 6997c478bd9Sstevel@tonic-gateonstack: 7007c478bd9Sstevel@tonic-gate movl $autovect, %esi /* get autovect structure before */ 7017c478bd9Sstevel@tonic-gate /* sti to save on AGI later */ 7027c478bd9Sstevel@tonic-gate sti /* enable interrupts */ 7037c478bd9Sstevel@tonic-gate pushl %ecx /* save interrupt vector */ 7047c478bd9Sstevel@tonic-gate / 7057c478bd9Sstevel@tonic-gate / Get handler address 7067c478bd9Sstevel@tonic-gate / 7077c478bd9Sstevel@tonic-gatepre_loop1: 7087c478bd9Sstevel@tonic-gate movl AVH_LINK(%esi, %ecx, 8), %esi 7097c478bd9Sstevel@tonic-gate xorl %ebx, %ebx /* bh is no. of intpts in chain */ 7107c478bd9Sstevel@tonic-gate /* bl is DDI_INTR_CLAIMED status of chain */ 7117c478bd9Sstevel@tonic-gate testl %esi, %esi /* if pointer is null */ 7127c478bd9Sstevel@tonic-gate jz .intr_ret /* then skip */ 7137c478bd9Sstevel@tonic-gateloop1: 7147c478bd9Sstevel@tonic-gate incb %bh 7157c478bd9Sstevel@tonic-gate movl AV_VECTOR(%esi), %edx /* get the interrupt routine */ 7167c478bd9Sstevel@tonic-gate testl %edx, %edx /* if func is null */ 7177c478bd9Sstevel@tonic-gate jz .intr_ret /* then skip */ 7187c478bd9Sstevel@tonic-gate pushl $0 7197c478bd9Sstevel@tonic-gate pushl AV_INTARG2(%esi) 7207c478bd9Sstevel@tonic-gate pushl AV_INTARG1(%esi) 7217c478bd9Sstevel@tonic-gate pushl AV_VECTOR(%esi) 7227c478bd9Sstevel@tonic-gate pushl AV_DIP(%esi) 7237c478bd9Sstevel@tonic-gate call __dtrace_probe_interrupt__start 7247c478bd9Sstevel@tonic-gate pushl AV_INTARG2(%esi) /* get 2nd arg to interrupt routine */ 7257c478bd9Sstevel@tonic-gate pushl AV_INTARG1(%esi) /* get first arg to interrupt routine */ 7267c478bd9Sstevel@tonic-gate call *%edx /* call interrupt routine with arg */ 7277c478bd9Sstevel@tonic-gate addl $8, %esp 7287c478bd9Sstevel@tonic-gate movl %eax, 16(%esp) 7297c478bd9Sstevel@tonic-gate call __dtrace_probe_interrupt__complete 7307c478bd9Sstevel@tonic-gate addl $20, %esp 7317c478bd9Sstevel@tonic-gate orb %al, %bl /* see if anyone claims intpt. */ 7327c478bd9Sstevel@tonic-gate movl AV_LINK(%esi), %esi /* get next routine on list */ 7337c478bd9Sstevel@tonic-gate testl %esi, %esi /* if pointer is non-null */ 7347c478bd9Sstevel@tonic-gate jnz loop1 /* then continue */ 7357c478bd9Sstevel@tonic-gate 7367c478bd9Sstevel@tonic-gate.intr_ret: 7377c478bd9Sstevel@tonic-gate cmpb $1, %bh /* if only 1 intpt in chain, it is OK */ 7387c478bd9Sstevel@tonic-gate je .intr_ret1 7397c478bd9Sstevel@tonic-gate orb %bl, %bl /* If no one claims intpt, then it is OK */ 7407c478bd9Sstevel@tonic-gate jz .intr_ret1 7417c478bd9Sstevel@tonic-gate movl (%esp), %ecx /* else restore intr vector */ 7427c478bd9Sstevel@tonic-gate movl $autovect, %esi /* get autovect structure */ 7437c478bd9Sstevel@tonic-gate jmp pre_loop1 /* and try again. */ 7447c478bd9Sstevel@tonic-gate 7457c478bd9Sstevel@tonic-gate.intr_ret1: 7467c478bd9Sstevel@tonic-gate LOADCPU(%ebx) /* get pointer to cpu struct */ 7477c478bd9Sstevel@tonic-gate 7487c478bd9Sstevel@tonic-gate cli 7497c478bd9Sstevel@tonic-gate movl CPU_PRI(%ebx), %esi 7507c478bd9Sstevel@tonic-gate 7517c478bd9Sstevel@tonic-gate / cpu_stats.sys.intr[PIL]++ 7527c478bd9Sstevel@tonic-gate INC_CPU_STATS_INTR(%esi, %eax, %eax, %ebx) 7537c478bd9Sstevel@tonic-gate 7547c478bd9Sstevel@tonic-gate / 7557c478bd9Sstevel@tonic-gate / Clear bit for this PIL in CPU's interrupt active bitmask. 7567c478bd9Sstevel@tonic-gate / 7577c478bd9Sstevel@tonic-gate 7587c478bd9Sstevel@tonic-gate ASSERT_CPU_INTR_ACTV(%esi, %ebx, _interrupt_actv_bit_not_set) 7597c478bd9Sstevel@tonic-gate 7607c478bd9Sstevel@tonic-gate cmpl $15, %esi 7617c478bd9Sstevel@tonic-gate jne 0f 7627c478bd9Sstevel@tonic-gate / Only clear bit if reference count is now zero. 7637c478bd9Sstevel@tonic-gate decw CPU_INTR_ACTV_REF(%ebx) 7647c478bd9Sstevel@tonic-gate jnz 1f 7657c478bd9Sstevel@tonic-gate0: 7667c478bd9Sstevel@tonic-gate btrl %esi, CPU_INTR_ACTV(%ebx) 7677c478bd9Sstevel@tonic-gate1: 7687c478bd9Sstevel@tonic-gate / 7697c478bd9Sstevel@tonic-gate / Take timestamp, compute interval, update cumulative counter. 7707c478bd9Sstevel@tonic-gate / esi = PIL 7717c478bd9Sstevel@tonic-gate_tsc_patch4: 7727c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 7737c478bd9Sstevel@tonic-gate HIGHPILBASE(%ebx, %esi, %esi) 7747c478bd9Sstevel@tonic-gate 7757c478bd9Sstevel@tonic-gate ASSERT_CPU_PIL_HIGH_START_NZ(%esi) 7767c478bd9Sstevel@tonic-gate 7777c478bd9Sstevel@tonic-gate TSC_SUB_FROM(%esi, CPU_PIL_HIGH_START) 7787c478bd9Sstevel@tonic-gate addl $CPU_INTRSTAT_LOW_PIL_OFFSET, %esi /* offset PILs 0-10 */ 7797c478bd9Sstevel@tonic-gate TSC_ADD_TO(%esi, CPU_INTRSTAT) 780*eda89462Sesolom INTRACCTBASE(%ebx, %esi) 781*eda89462Sesolom TSC_ADD_TO(%esi, CPU_INTRACCT) /* cpu_intracct[cpu_mstate] += tsc */ 7827c478bd9Sstevel@tonic-gate / 7837c478bd9Sstevel@tonic-gate / Check for lower-PIL nested high-level interrupt beneath current one 7847c478bd9Sstevel@tonic-gate / If so, place a starting timestamp in its pil_high_start entry. 7857c478bd9Sstevel@tonic-gate / 7867c478bd9Sstevel@tonic-gate movl CPU_INTR_ACTV(%ebx), %eax 7877c478bd9Sstevel@tonic-gate movl %eax, %esi 7887c478bd9Sstevel@tonic-gate andl $CPU_INTR_ACTV_HIGH_LEVEL_MASK, %eax 7897c478bd9Sstevel@tonic-gate jz 0f 7907c478bd9Sstevel@tonic-gate bsrl %eax, %ecx /* find PIL of nested interrupt */ 7917c478bd9Sstevel@tonic-gate HIGHPILBASE(%ebx, %ecx, %ecx) 7927c478bd9Sstevel@tonic-gate_tsc_patch5: 7937c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 7947c478bd9Sstevel@tonic-gate TSC_MOV(%ecx, CPU_PIL_HIGH_START) 7957c478bd9Sstevel@tonic-gate / 7967c478bd9Sstevel@tonic-gate / Another high-level interrupt is active below this one, so 7977c478bd9Sstevel@tonic-gate / there is no need to check for an interrupt thread. That will be 7987c478bd9Sstevel@tonic-gate / done by the lowest priority high-level interrupt active. 7997c478bd9Sstevel@tonic-gate / 8007c478bd9Sstevel@tonic-gate jmp 1f 8017c478bd9Sstevel@tonic-gate0: 8027c478bd9Sstevel@tonic-gate / Check to see if there is a low-level interrupt active. If so, 8037c478bd9Sstevel@tonic-gate / place a starting timestamp in the thread structure. 8047c478bd9Sstevel@tonic-gate movl CPU_THREAD(%ebx), %esi 8057c478bd9Sstevel@tonic-gate testw $T_INTR_THREAD, T_FLAGS(%esi) 8067c478bd9Sstevel@tonic-gate jz 1f 8077c478bd9Sstevel@tonic-gate_tsc_patch6: 8087c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 8097c478bd9Sstevel@tonic-gate TSC_MOV(%esi, T_INTR_START) 8107c478bd9Sstevel@tonic-gate1: 8117c478bd9Sstevel@tonic-gate movl %edi, CPU_PRI(%ebx) 8127c478bd9Sstevel@tonic-gate /* interrupt vector already on stack */ 8137c478bd9Sstevel@tonic-gate pushl %edi /* old ipl */ 8147c478bd9Sstevel@tonic-gate call *setlvlx 8157c478bd9Sstevel@tonic-gate addl $8, %esp /* eax contains the current ipl */ 8167c478bd9Sstevel@tonic-gate 8177c478bd9Sstevel@tonic-gate movl CPU_INTR_ACTV(%ebx), %esi /* reset stack pointer if no more */ 8187c478bd9Sstevel@tonic-gate shrl $LOCK_LEVEL + 1, %esi /* HI PRI intrs. */ 8197c478bd9Sstevel@tonic-gate jnz .intr_ret2 8207c478bd9Sstevel@tonic-gate popl %esp /* restore the thread stack pointer */ 8217c478bd9Sstevel@tonic-gate.intr_ret2: 8227c478bd9Sstevel@tonic-gate movl CPU_SOFTINFO(%ebx), %edx /* any pending software interrupts */ 8237c478bd9Sstevel@tonic-gate orl %edx, %edx 8247c478bd9Sstevel@tonic-gate jz _sys_rtt 8257c478bd9Sstevel@tonic-gate jmp dosoftint /* check for softints before we return. */ 8267c478bd9Sstevel@tonic-gate SET_SIZE(cmnint) 8277c478bd9Sstevel@tonic-gate SET_SIZE(_interrupt) 8287c478bd9Sstevel@tonic-gate 8297c478bd9Sstevel@tonic-gate#endif /* __i386 */ 8307c478bd9Sstevel@tonic-gate 8317c478bd9Sstevel@tonic-gate/* 8327c478bd9Sstevel@tonic-gate * Declare a uintptr_t which has the size of _interrupt to enable stack 8337c478bd9Sstevel@tonic-gate * traceback code to know when a regs structure is on the stack. 8347c478bd9Sstevel@tonic-gate */ 8357c478bd9Sstevel@tonic-gate .globl _interrupt_size 8367c478bd9Sstevel@tonic-gate .align CLONGSIZE 8377c478bd9Sstevel@tonic-gate_interrupt_size: 8387c478bd9Sstevel@tonic-gate .NWORD . - _interrupt 8397c478bd9Sstevel@tonic-gate .type _interrupt_size, @object 8407c478bd9Sstevel@tonic-gate 8417c478bd9Sstevel@tonic-gate#endif /* __lint */ 8427c478bd9Sstevel@tonic-gate 8437c478bd9Sstevel@tonic-gate#if defined(__i386) 8447c478bd9Sstevel@tonic-gate 8457c478bd9Sstevel@tonic-gate/* 8467c478bd9Sstevel@tonic-gate * Handle an interrupt in a new thread. 8477c478bd9Sstevel@tonic-gate * Entry: traps disabled. 8487c478bd9Sstevel@tonic-gate * %edi - old priority level 8497c478bd9Sstevel@tonic-gate * %ebp - pointer to REGS 8507c478bd9Sstevel@tonic-gate * %ecx - translated vector 8517c478bd9Sstevel@tonic-gate * %eax - ipl of isr. 8527c478bd9Sstevel@tonic-gate * %ebx - pointer to CPU struct 8537c478bd9Sstevel@tonic-gate * Uses: 8547c478bd9Sstevel@tonic-gate */ 8557c478bd9Sstevel@tonic-gate 8567c478bd9Sstevel@tonic-gate#if !defined(__lint) 8577c478bd9Sstevel@tonic-gate 8587c478bd9Sstevel@tonic-gate ENTRY_NP(intr_thread) 8597c478bd9Sstevel@tonic-gate / 8607c478bd9Sstevel@tonic-gate / Set bit for this PIL in CPU's interrupt active bitmask. 8617c478bd9Sstevel@tonic-gate / 8627c478bd9Sstevel@tonic-gate 8637c478bd9Sstevel@tonic-gate ASSERT_NOT_CPU_INTR_ACTV(%eax, %ebx, _interrupt_actv_bit_set) 8647c478bd9Sstevel@tonic-gate 8657c478bd9Sstevel@tonic-gate btsl %eax, CPU_INTR_ACTV(%ebx) 8667c478bd9Sstevel@tonic-gate 8677c478bd9Sstevel@tonic-gate / Get set to run interrupt thread. 8687c478bd9Sstevel@tonic-gate / There should always be an interrupt thread since we allocate one 8697c478bd9Sstevel@tonic-gate / for each level on the CPU. 8707c478bd9Sstevel@tonic-gate / 8717c478bd9Sstevel@tonic-gate / Note that the code in kcpc_overflow_intr -relies- on the ordering 8727c478bd9Sstevel@tonic-gate / of events here - in particular that t->t_lwp of the interrupt 8737c478bd9Sstevel@tonic-gate / thread is set to the pinned thread *before* curthread is changed 8747c478bd9Sstevel@tonic-gate / 8757c478bd9Sstevel@tonic-gate movl CPU_THREAD(%ebx), %edx /* cur thread in edx */ 8767c478bd9Sstevel@tonic-gate 8777c478bd9Sstevel@tonic-gate / 8787c478bd9Sstevel@tonic-gate / Are we interrupting an interrupt thread? If so, account for it. 8797c478bd9Sstevel@tonic-gate / 8807c478bd9Sstevel@tonic-gate testw $T_INTR_THREAD, T_FLAGS(%edx) 8817c478bd9Sstevel@tonic-gate jz 0f 8827c478bd9Sstevel@tonic-gate pushl %ecx 8837c478bd9Sstevel@tonic-gate pushl %eax 8847c478bd9Sstevel@tonic-gate movl %edx, %esi 8857c478bd9Sstevel@tonic-gate_tsc_patch7: 8867c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 8877c478bd9Sstevel@tonic-gate TSC_SUB_FROM(%esi, T_INTR_START) 8887c478bd9Sstevel@tonic-gate TSC_CLR(%esi, T_INTR_START) 8897c478bd9Sstevel@tonic-gate movzbl T_PIL(%esi), %ecx 8907c478bd9Sstevel@tonic-gate PILBASE(%ebx, %ecx) 8917c478bd9Sstevel@tonic-gate TSC_ADD_TO(%ecx, CPU_INTRSTAT) 892*eda89462Sesolom INTRACCTBASE(%ebx, %ecx) 893*eda89462Sesolom TSC_ADD_TO(%ecx, CPU_INTRACCT) /* cpu_intracct[cpu_mstate] += tsc */ 8947c478bd9Sstevel@tonic-gate movl %esi, %edx 8957c478bd9Sstevel@tonic-gate popl %eax 8967c478bd9Sstevel@tonic-gate popl %ecx 8977c478bd9Sstevel@tonic-gate0: 8987c478bd9Sstevel@tonic-gate movl %esp, T_SP(%edx) /* mark stack in curthread for resume */ 8997c478bd9Sstevel@tonic-gate pushl %edi /* get a temporary register */ 9007c478bd9Sstevel@tonic-gate UNLINK_INTR_THREAD(%ebx, %esi, %edi) 9017c478bd9Sstevel@tonic-gate 9027c478bd9Sstevel@tonic-gate movl T_LWP(%edx), %edi 9037c478bd9Sstevel@tonic-gate movl %edx, T_INTR(%esi) /* push old thread */ 9047c478bd9Sstevel@tonic-gate movl %edi, T_LWP(%esi) 9057c478bd9Sstevel@tonic-gate / 9067c478bd9Sstevel@tonic-gate / Threads on the interrupt thread free list could have state already 9077c478bd9Sstevel@tonic-gate / set to TS_ONPROC, but it helps in debugging if they're TS_FREE 9087c478bd9Sstevel@tonic-gate / 9097c478bd9Sstevel@tonic-gate movl $ONPROC_THREAD, T_STATE(%esi) 9107c478bd9Sstevel@tonic-gate / 9117c478bd9Sstevel@tonic-gate / chain the interrupted thread onto list from the interrupt thread. 9127c478bd9Sstevel@tonic-gate / Set the new interrupt thread as the current one. 9137c478bd9Sstevel@tonic-gate / 9147c478bd9Sstevel@tonic-gate popl %edi /* Don't need a temp reg anymore */ 9157c478bd9Sstevel@tonic-gate movl T_STACK(%esi), %esp /* interrupt stack pointer */ 9167c478bd9Sstevel@tonic-gate movl %esp, %ebp 9177c478bd9Sstevel@tonic-gate movl %esi, CPU_THREAD(%ebx) /* set new thread */ 9187c478bd9Sstevel@tonic-gate pushl %eax /* save the ipl */ 9197c478bd9Sstevel@tonic-gate / 9207c478bd9Sstevel@tonic-gate / Initialize thread priority level from intr_pri 9217c478bd9Sstevel@tonic-gate / 9227c478bd9Sstevel@tonic-gate movb %al, T_PIL(%esi) /* store pil */ 9237c478bd9Sstevel@tonic-gate movzwl intr_pri, %ebx /* XXX Can cause probs if new class */ 9247c478bd9Sstevel@tonic-gate /* is loaded on some other cpu. */ 9257c478bd9Sstevel@tonic-gate addl %ebx, %eax /* convert level to dispatch priority */ 9267c478bd9Sstevel@tonic-gate movw %ax, T_PRI(%esi) 9277c478bd9Sstevel@tonic-gate 9287c478bd9Sstevel@tonic-gate / 9297c478bd9Sstevel@tonic-gate / Take timestamp and store it in the thread structure. 9307c478bd9Sstevel@tonic-gate / 9317c478bd9Sstevel@tonic-gate movl %eax, %ebx /* save priority over rdtsc */ 9327c478bd9Sstevel@tonic-gate_tsc_patch8: 9337c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 9347c478bd9Sstevel@tonic-gate TSC_MOV(%esi, T_INTR_START) 9357c478bd9Sstevel@tonic-gate movl %ebx, %eax /* restore priority */ 9367c478bd9Sstevel@tonic-gate 9377c478bd9Sstevel@tonic-gate / The following 3 instructions need not be in cli. 9387c478bd9Sstevel@tonic-gate / Putting them here only to avoid the AGI penalty on Pentiums. 9397c478bd9Sstevel@tonic-gate 9407c478bd9Sstevel@tonic-gate pushl %ecx /* save interrupt vector. */ 9417c478bd9Sstevel@tonic-gate pushl %esi /* save interrupt thread */ 9427c478bd9Sstevel@tonic-gate movl $autovect, %esi /* get autovect structure */ 9437c478bd9Sstevel@tonic-gate sti /* enable interrupts */ 9447c478bd9Sstevel@tonic-gate 9457c478bd9Sstevel@tonic-gate / Fast event tracing. 9467c478bd9Sstevel@tonic-gate LOADCPU(%ebx) 9477c478bd9Sstevel@tonic-gate movl CPU_FTRACE_STATE(%ebx), %ebx 9487c478bd9Sstevel@tonic-gate testl $FTRACE_ENABLED, %ebx 9497c478bd9Sstevel@tonic-gate jz 1f 9507c478bd9Sstevel@tonic-gate 9517c478bd9Sstevel@tonic-gate movl 8(%esp), %ebx 9527c478bd9Sstevel@tonic-gate pushl %ebx /* ipl */ 9537c478bd9Sstevel@tonic-gate pushl %ecx /* int vector */ 9547c478bd9Sstevel@tonic-gate movl T_SP(%edx), %ebx 9557c478bd9Sstevel@tonic-gate pushl %ebx /* ®s */ 9567c478bd9Sstevel@tonic-gate pushl $_ftrace_intr_thread_fmt 9577c478bd9Sstevel@tonic-gate call ftrace_3_notick 9587c478bd9Sstevel@tonic-gate addl $8, %esp 9597c478bd9Sstevel@tonic-gate popl %ecx /* restore int vector */ 9607c478bd9Sstevel@tonic-gate addl $4, %esp 9617c478bd9Sstevel@tonic-gate1: 9627c478bd9Sstevel@tonic-gatepre_loop2: 9637c478bd9Sstevel@tonic-gate movl AVH_LINK(%esi, %ecx, 8), %esi 9647c478bd9Sstevel@tonic-gate xorl %ebx, %ebx /* bh is cno. of intpts in chain */ 9657c478bd9Sstevel@tonic-gate /* bl is DDI_INTR_CLAIMED status of * chain */ 9667c478bd9Sstevel@tonic-gate testl %esi, %esi /* if pointer is null */ 9677c478bd9Sstevel@tonic-gate jz loop_done2 /* we're done */ 9687c478bd9Sstevel@tonic-gateloop2: 9697c478bd9Sstevel@tonic-gate movl AV_VECTOR(%esi), %edx /* get the interrupt routine */ 9707c478bd9Sstevel@tonic-gate testl %edx, %edx /* if pointer is null */ 9717c478bd9Sstevel@tonic-gate jz loop_done2 /* we're done */ 9727c478bd9Sstevel@tonic-gate incb %bh 9737c478bd9Sstevel@tonic-gate pushl $0 9747c478bd9Sstevel@tonic-gate pushl AV_INTARG2(%esi) 9757c478bd9Sstevel@tonic-gate pushl AV_INTARG1(%esi) 9767c478bd9Sstevel@tonic-gate pushl AV_VECTOR(%esi) 9777c478bd9Sstevel@tonic-gate pushl AV_DIP(%esi) 9787c478bd9Sstevel@tonic-gate call __dtrace_probe_interrupt__start 9797c478bd9Sstevel@tonic-gate pushl AV_INTARG2(%esi) /* get 2nd arg to interrupt routine */ 9807c478bd9Sstevel@tonic-gate pushl AV_INTARG1(%esi) /* get first arg to interrupt routine */ 9817c478bd9Sstevel@tonic-gate call *%edx /* call interrupt routine with arg */ 9827c478bd9Sstevel@tonic-gate addl $8, %esp 9837c478bd9Sstevel@tonic-gate movl %eax, 16(%esp) 9847c478bd9Sstevel@tonic-gate call __dtrace_probe_interrupt__complete 9857c478bd9Sstevel@tonic-gate addl $20, %esp 9867c478bd9Sstevel@tonic-gate orb %al, %bl /* see if anyone claims intpt. */ 9877c478bd9Sstevel@tonic-gate movl AV_LINK(%esi), %esi /* get next routine on list */ 9887c478bd9Sstevel@tonic-gate testl %esi, %esi /* if pointer is non-null */ 9897c478bd9Sstevel@tonic-gate jnz loop2 /* continue */ 9907c478bd9Sstevel@tonic-gateloop_done2: 9917c478bd9Sstevel@tonic-gate cmpb $1, %bh /* if only 1 intpt in chain, it is OK */ 9927c478bd9Sstevel@tonic-gate je .loop_done2_1 9937c478bd9Sstevel@tonic-gate orb %bl, %bl /* If no one claims intpt, then it is OK */ 9947c478bd9Sstevel@tonic-gate jz .loop_done2_1 9957c478bd9Sstevel@tonic-gate movl $autovect, %esi /* else get autovect structure */ 9967c478bd9Sstevel@tonic-gate movl 4(%esp), %ecx /* restore intr vector */ 9977c478bd9Sstevel@tonic-gate jmp pre_loop2 /* and try again. */ 9987c478bd9Sstevel@tonic-gate.loop_done2_1: 9997c478bd9Sstevel@tonic-gate popl %esi /* restore intr thread pointer */ 10007c478bd9Sstevel@tonic-gate 10017c478bd9Sstevel@tonic-gate LOADCPU(%ebx) 10027c478bd9Sstevel@tonic-gate 10037c478bd9Sstevel@tonic-gate cli /* protect interrupt thread pool and intr_actv */ 10047c478bd9Sstevel@tonic-gate movzbl T_PIL(%esi), %eax 10057c478bd9Sstevel@tonic-gate 10067c478bd9Sstevel@tonic-gate / Save value in regs 10077c478bd9Sstevel@tonic-gate pushl %eax /* current pil */ 10087c478bd9Sstevel@tonic-gate pushl %edx /* (huh?) */ 10097c478bd9Sstevel@tonic-gate pushl %edi /* old pil */ 10107c478bd9Sstevel@tonic-gate 10117c478bd9Sstevel@tonic-gate / cpu_stats.sys.intr[PIL]++ 10127c478bd9Sstevel@tonic-gate INC_CPU_STATS_INTR(%eax, %edx, %edx, %ebx) 10137c478bd9Sstevel@tonic-gate 10147c478bd9Sstevel@tonic-gate / 10157c478bd9Sstevel@tonic-gate / Take timestamp, compute interval, and update cumulative counter. 10167c478bd9Sstevel@tonic-gate / esi = thread pointer, ebx = cpu pointer, eax = PIL 10177c478bd9Sstevel@tonic-gate / 10187c478bd9Sstevel@tonic-gate movl %eax, %edi 10197c478bd9Sstevel@tonic-gate 10207c478bd9Sstevel@tonic-gate ASSERT_T_INTR_START_NZ(%esi) 10217c478bd9Sstevel@tonic-gate 10227c478bd9Sstevel@tonic-gate_tsc_patch9: 10237c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 10247c478bd9Sstevel@tonic-gate TSC_SUB_FROM(%esi, T_INTR_START) 10257c478bd9Sstevel@tonic-gate PILBASE(%ebx, %edi) 10267c478bd9Sstevel@tonic-gate TSC_ADD_TO(%edi, CPU_INTRSTAT) 1027*eda89462Sesolom INTRACCTBASE(%ebx, %edi) 1028*eda89462Sesolom TSC_ADD_TO(%edi, CPU_INTRACCT) /* cpu_intracct[cpu_mstate] += tsc */ 10297c478bd9Sstevel@tonic-gate popl %edi 10307c478bd9Sstevel@tonic-gate popl %edx 10317c478bd9Sstevel@tonic-gate popl %eax 10327c478bd9Sstevel@tonic-gate 10337c478bd9Sstevel@tonic-gate / 10347c478bd9Sstevel@tonic-gate / Clear bit for this PIL in CPU's interrupt active bitmask. 10357c478bd9Sstevel@tonic-gate / 10367c478bd9Sstevel@tonic-gate 10377c478bd9Sstevel@tonic-gate ASSERT_CPU_INTR_ACTV(%eax, %ebx, _intr_thread_actv_bit_not_set) 10387c478bd9Sstevel@tonic-gate 10397c478bd9Sstevel@tonic-gate btrl %eax, CPU_INTR_ACTV(%ebx) 10407c478bd9Sstevel@tonic-gate 10417c478bd9Sstevel@tonic-gate / if there is still an interrupted thread underneath this one 10427c478bd9Sstevel@tonic-gate / then the interrupt was never blocked and the return is fairly 10437c478bd9Sstevel@tonic-gate / simple. Otherwise jump to intr_thread_exit 10447c478bd9Sstevel@tonic-gate cmpl $0, T_INTR(%esi) 10457c478bd9Sstevel@tonic-gate je intr_thread_exit 10467c478bd9Sstevel@tonic-gate 10477c478bd9Sstevel@tonic-gate / 10487c478bd9Sstevel@tonic-gate / link the thread back onto the interrupt thread pool 10497c478bd9Sstevel@tonic-gate LINK_INTR_THREAD(%ebx, %esi, %edx) 10507c478bd9Sstevel@tonic-gate 10517c478bd9Sstevel@tonic-gate movl CPU_BASE_SPL(%ebx), %eax /* used below. */ 10527c478bd9Sstevel@tonic-gate / set the thread state to free so kmdb doesn't see it 10537c478bd9Sstevel@tonic-gate movl $FREE_THREAD, T_STATE(%esi) 10547c478bd9Sstevel@tonic-gate 10557c478bd9Sstevel@tonic-gate cmpl %eax, %edi /* if (oldipl >= basespl) */ 10567c478bd9Sstevel@tonic-gate jae intr_restore_ipl /* then use oldipl */ 10577c478bd9Sstevel@tonic-gate movl %eax, %edi /* else use basespl */ 10587c478bd9Sstevel@tonic-gateintr_restore_ipl: 10597c478bd9Sstevel@tonic-gate movl %edi, CPU_PRI(%ebx) 10607c478bd9Sstevel@tonic-gate /* intr vector already on stack */ 10617c478bd9Sstevel@tonic-gate pushl %edi /* old ipl */ 10627c478bd9Sstevel@tonic-gate call *setlvlx /* eax contains the current ipl */ 10637c478bd9Sstevel@tonic-gate / 10647c478bd9Sstevel@tonic-gate / Switch back to the interrupted thread 10657c478bd9Sstevel@tonic-gate movl T_INTR(%esi), %ecx 10667c478bd9Sstevel@tonic-gate 10677c478bd9Sstevel@tonic-gate / Place starting timestamp in interrupted thread's thread structure. 10687c478bd9Sstevel@tonic-gate_tsc_patch10: 10697c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 10707c478bd9Sstevel@tonic-gate TSC_MOV(%ecx, T_INTR_START) 10717c478bd9Sstevel@tonic-gate 10727c478bd9Sstevel@tonic-gate movl T_SP(%ecx), %esp /* restore stack pointer */ 10737c478bd9Sstevel@tonic-gate movl %esp, %ebp 10747c478bd9Sstevel@tonic-gate movl %ecx, CPU_THREAD(%ebx) 10757c478bd9Sstevel@tonic-gate 10767c478bd9Sstevel@tonic-gate movl CPU_SOFTINFO(%ebx), %edx /* any pending software interrupts */ 10777c478bd9Sstevel@tonic-gate orl %edx, %edx 10787c478bd9Sstevel@tonic-gate jz _sys_rtt 10797c478bd9Sstevel@tonic-gate jmp dosoftint /* check for softints before we return. */ 10807c478bd9Sstevel@tonic-gate 10817c478bd9Sstevel@tonic-gate / 10827c478bd9Sstevel@tonic-gate / An interrupt returned on what was once (and still might be) 10837c478bd9Sstevel@tonic-gate / an interrupt thread stack, but the interrupted process is no longer 10847c478bd9Sstevel@tonic-gate / there. This means the interrupt must have blocked. 10857c478bd9Sstevel@tonic-gate / 10867c478bd9Sstevel@tonic-gate / There is no longer a thread under this one, so put this thread back 10877c478bd9Sstevel@tonic-gate / on the CPU's free list and resume the idle thread which will dispatch 10887c478bd9Sstevel@tonic-gate / the next thread to run. 10897c478bd9Sstevel@tonic-gate / 10907c478bd9Sstevel@tonic-gate / All interrupts are disabled here 10917c478bd9Sstevel@tonic-gate / 10927c478bd9Sstevel@tonic-gate 10937c478bd9Sstevel@tonic-gateintr_thread_exit: 10947c478bd9Sstevel@tonic-gate#ifdef DEBUG 10957c478bd9Sstevel@tonic-gate incl intr_thread_cnt 10967c478bd9Sstevel@tonic-gate#endif 10977c478bd9Sstevel@tonic-gate INC64(%ebx, CPU_STATS_SYS_INTRBLK) /* cpu_stats.sys.intrblk++ */ 10987c478bd9Sstevel@tonic-gate / 10997c478bd9Sstevel@tonic-gate / Put thread back on the interrupt thread list. 11007c478bd9Sstevel@tonic-gate / As a reminder, the regs at this point are 11017c478bd9Sstevel@tonic-gate / esi interrupt thread 11027c478bd9Sstevel@tonic-gate / edi old ipl 11037c478bd9Sstevel@tonic-gate / ebx ptr to CPU struct 11047c478bd9Sstevel@tonic-gate 11057c478bd9Sstevel@tonic-gate / Set CPU's base SPL level based on active interrupts bitmask 11067c478bd9Sstevel@tonic-gate call set_base_spl 11077c478bd9Sstevel@tonic-gate 11087c478bd9Sstevel@tonic-gate movl CPU_BASE_SPL(%ebx), %edi 11097c478bd9Sstevel@tonic-gate movl %edi, CPU_PRI(%ebx) 11107c478bd9Sstevel@tonic-gate /* interrupt vector already on stack */ 11117c478bd9Sstevel@tonic-gate pushl %edi 11127c478bd9Sstevel@tonic-gate call *setlvlx 11137c478bd9Sstevel@tonic-gate addl $8, %esp /* XXX - don't need to pop since */ 11147c478bd9Sstevel@tonic-gate /* we are ready to switch */ 11157c478bd9Sstevel@tonic-gate call splhigh /* block all intrs below lock level */ 11167c478bd9Sstevel@tonic-gate / 11177c478bd9Sstevel@tonic-gate / Set the thread state to free so kmdb doesn't see it 11187c478bd9Sstevel@tonic-gate / 11197c478bd9Sstevel@tonic-gate movl $FREE_THREAD, T_STATE(%esi) 11207c478bd9Sstevel@tonic-gate / 11217c478bd9Sstevel@tonic-gate / Put thread on either the interrupt pool or the free pool and 11227c478bd9Sstevel@tonic-gate / call swtch() to resume another thread. 11237c478bd9Sstevel@tonic-gate / 11247c478bd9Sstevel@tonic-gate LINK_INTR_THREAD(%ebx, %esi, %edx) 11257c478bd9Sstevel@tonic-gate call swtch 11267c478bd9Sstevel@tonic-gate / swtch() shouldn't return 11277c478bd9Sstevel@tonic-gate 11287c478bd9Sstevel@tonic-gate SET_SIZE(intr_thread) 11297c478bd9Sstevel@tonic-gate 11307c478bd9Sstevel@tonic-gate#endif /* __lint */ 11317c478bd9Sstevel@tonic-gate#endif /* __i386 */ 11327c478bd9Sstevel@tonic-gate 11337c478bd9Sstevel@tonic-gate/* 11347c478bd9Sstevel@tonic-gate * Set Cpu's base SPL level, base on which interrupt levels are active 11357c478bd9Sstevel@tonic-gate * Called at spl7 or above. 11367c478bd9Sstevel@tonic-gate */ 11377c478bd9Sstevel@tonic-gate 11387c478bd9Sstevel@tonic-gate#if defined(__lint) 11397c478bd9Sstevel@tonic-gate 11407c478bd9Sstevel@tonic-gatevoid 11417c478bd9Sstevel@tonic-gateset_base_spl(void) 11427c478bd9Sstevel@tonic-gate{} 11437c478bd9Sstevel@tonic-gate 11447c478bd9Sstevel@tonic-gate#else /* __lint */ 11457c478bd9Sstevel@tonic-gate 11467c478bd9Sstevel@tonic-gate ENTRY_NP(set_base_spl) 11477c478bd9Sstevel@tonic-gate movl %gs:CPU_INTR_ACTV, %eax /* load active interrupts mask */ 11487c478bd9Sstevel@tonic-gate testl %eax, %eax /* is it zero? */ 11497c478bd9Sstevel@tonic-gate jz setbase 11507c478bd9Sstevel@tonic-gate testl $0xff00, %eax 11517c478bd9Sstevel@tonic-gate jnz ah_set 11527c478bd9Sstevel@tonic-gate shl $24, %eax /* shift 'em over so we can find */ 11537c478bd9Sstevel@tonic-gate /* the 1st bit faster */ 11547c478bd9Sstevel@tonic-gate bsrl %eax, %eax 11557c478bd9Sstevel@tonic-gate subl $24, %eax 11567c478bd9Sstevel@tonic-gatesetbase: 11577c478bd9Sstevel@tonic-gate movl %eax, %gs:CPU_BASE_SPL /* store base priority */ 11587c478bd9Sstevel@tonic-gate ret 11597c478bd9Sstevel@tonic-gateah_set: 11607c478bd9Sstevel@tonic-gate shl $16, %eax 11617c478bd9Sstevel@tonic-gate bsrl %eax, %eax 11627c478bd9Sstevel@tonic-gate subl $16, %eax 11637c478bd9Sstevel@tonic-gate jmp setbase 11647c478bd9Sstevel@tonic-gate SET_SIZE(set_base_spl) 11657c478bd9Sstevel@tonic-gate 11667c478bd9Sstevel@tonic-gate#endif /* __lint */ 11677c478bd9Sstevel@tonic-gate 11687c478bd9Sstevel@tonic-gate#if defined(__i386) 11697c478bd9Sstevel@tonic-gate 11707c478bd9Sstevel@tonic-gate/* 11717c478bd9Sstevel@tonic-gate * int 11727c478bd9Sstevel@tonic-gate * intr_passivate(from, to) 11737c478bd9Sstevel@tonic-gate * thread_id_t from; interrupt thread 11747c478bd9Sstevel@tonic-gate * thread_id_t to; interrupted thread 11757c478bd9Sstevel@tonic-gate * 11767c478bd9Sstevel@tonic-gate * intr_passivate(t, itp) makes the interrupted thread "t" runnable. 11777c478bd9Sstevel@tonic-gate * 11787c478bd9Sstevel@tonic-gate * Since t->t_sp has already been saved, t->t_pc is all that needs 11797c478bd9Sstevel@tonic-gate * set in this function. 11807c478bd9Sstevel@tonic-gate * 11817c478bd9Sstevel@tonic-gate * Returns interrupt level of the thread. 11827c478bd9Sstevel@tonic-gate */ 11837c478bd9Sstevel@tonic-gate 11847c478bd9Sstevel@tonic-gate#if defined(__lint) 11857c478bd9Sstevel@tonic-gate 11867c478bd9Sstevel@tonic-gate/* ARGSUSED */ 11877c478bd9Sstevel@tonic-gateint 11887c478bd9Sstevel@tonic-gateintr_passivate(kthread_id_t from, kthread_id_t to) 11897c478bd9Sstevel@tonic-gate{ return (0); } 11907c478bd9Sstevel@tonic-gate 11917c478bd9Sstevel@tonic-gate#else /* __lint */ 11927c478bd9Sstevel@tonic-gate 11937c478bd9Sstevel@tonic-gate ENTRY(intr_passivate) 11947c478bd9Sstevel@tonic-gate movl 8(%esp), %eax /* interrupted thread */ 11957c478bd9Sstevel@tonic-gate movl $_sys_rtt, T_PC(%eax) /* set T_PC for interrupted thread */ 11967c478bd9Sstevel@tonic-gate 11977c478bd9Sstevel@tonic-gate movl 4(%esp), %eax /* interrupt thread */ 11987c478bd9Sstevel@tonic-gate movl T_STACK(%eax), %eax /* get the pointer to the start of */ 11997c478bd9Sstevel@tonic-gate /* of the interrupt thread stack */ 12007c478bd9Sstevel@tonic-gate movl -4(%eax), %eax /* interrupt level was the first */ 12017c478bd9Sstevel@tonic-gate /* thing pushed onto the stack */ 12027c478bd9Sstevel@tonic-gate ret 12037c478bd9Sstevel@tonic-gate SET_SIZE(intr_passivate) 12047c478bd9Sstevel@tonic-gate 12057c478bd9Sstevel@tonic-gate#endif /* __lint */ 12067c478bd9Sstevel@tonic-gate#endif /* __i386 */ 12077c478bd9Sstevel@tonic-gate 12087c478bd9Sstevel@tonic-gate#if defined(__lint) 12097c478bd9Sstevel@tonic-gate 12107c478bd9Sstevel@tonic-gatevoid 12117c478bd9Sstevel@tonic-gatefakesoftint(void) 12127c478bd9Sstevel@tonic-gate{} 12137c478bd9Sstevel@tonic-gate 12147c478bd9Sstevel@tonic-gate#else /* __lint */ 12157c478bd9Sstevel@tonic-gate 12167c478bd9Sstevel@tonic-gate / 12177c478bd9Sstevel@tonic-gate / If we're here, we're being called from splx() to fake a soft 12187c478bd9Sstevel@tonic-gate / interrupt (note that interrupts are still disabled from splx()). 12197c478bd9Sstevel@tonic-gate / We execute this code when a soft interrupt is posted at 12207c478bd9Sstevel@tonic-gate / level higher than the CPU's current spl; when spl is lowered in 12217c478bd9Sstevel@tonic-gate / splx(), it will see the softint and jump here. We'll do exactly 12227c478bd9Sstevel@tonic-gate / what a trap would do: push our flags, %cs, %eip, error code 12237c478bd9Sstevel@tonic-gate / and trap number (T_SOFTINT). The cmnint() code will see T_SOFTINT 12247c478bd9Sstevel@tonic-gate / and branch to the dosoftint() code. 12257c478bd9Sstevel@tonic-gate / 12267c478bd9Sstevel@tonic-gate#if defined(__amd64) 12277c478bd9Sstevel@tonic-gate 12287c478bd9Sstevel@tonic-gate /* 12297c478bd9Sstevel@tonic-gate * In 64-bit mode, iretq -always- pops all five regs 12307c478bd9Sstevel@tonic-gate * Imitate the 16-byte auto-align of the stack, and the 12317c478bd9Sstevel@tonic-gate * zero-ed out %ss value. 12327c478bd9Sstevel@tonic-gate */ 12337c478bd9Sstevel@tonic-gate ENTRY_NP(fakesoftint) 12347c478bd9Sstevel@tonic-gate movq %rsp, %r11 12357c478bd9Sstevel@tonic-gate andq $-16, %rsp 12367c478bd9Sstevel@tonic-gate pushq $KDS_SEL /* %ss */ 12377c478bd9Sstevel@tonic-gate pushq %r11 /* %rsp */ 12387c478bd9Sstevel@tonic-gate pushf /* rflags */ 12397c478bd9Sstevel@tonic-gate pushq $KCS_SEL /* %cs */ 12407c478bd9Sstevel@tonic-gate leaq fakesoftint_return(%rip), %r11 12417c478bd9Sstevel@tonic-gate pushq %r11 /* %rip */ 12427c478bd9Sstevel@tonic-gate pushq $0 /* err */ 12437c478bd9Sstevel@tonic-gate pushq $T_SOFTINT /* trap */ 12447c478bd9Sstevel@tonic-gate jmp cmnint 12457c478bd9Sstevel@tonic-gate SET_SIZE(fakesoftint) 12467c478bd9Sstevel@tonic-gate 12477c478bd9Sstevel@tonic-gate#elif defined(__i386) 12487c478bd9Sstevel@tonic-gate 12497c478bd9Sstevel@tonic-gate ENTRY_NP(fakesoftint) 12507c478bd9Sstevel@tonic-gate pushf 12517c478bd9Sstevel@tonic-gate push %cs 12527c478bd9Sstevel@tonic-gate push $fakesoftint_return 12537c478bd9Sstevel@tonic-gate push $0 12547c478bd9Sstevel@tonic-gate push $T_SOFTINT 12557c478bd9Sstevel@tonic-gate jmp cmnint 12567c478bd9Sstevel@tonic-gate SET_SIZE(fakesoftint) 12577c478bd9Sstevel@tonic-gate 12587c478bd9Sstevel@tonic-gate#endif /* __i386 */ 12597c478bd9Sstevel@tonic-gate 12607c478bd9Sstevel@tonic-gate .align CPTRSIZE 12617c478bd9Sstevel@tonic-gate .globl _fakesoftint_size 12627c478bd9Sstevel@tonic-gate .type _fakesoftint_size, @object 12637c478bd9Sstevel@tonic-gate_fakesoftint_size: 12647c478bd9Sstevel@tonic-gate .NWORD . - fakesoftint 12657c478bd9Sstevel@tonic-gate SET_SIZE(_fakesoftint_size) 12667c478bd9Sstevel@tonic-gate 12677c478bd9Sstevel@tonic-gate/* 12687c478bd9Sstevel@tonic-gate * dosoftint(old_pil in %edi, softinfo in %edx, CPU pointer in %ebx) 12697c478bd9Sstevel@tonic-gate * Process software interrupts 12707c478bd9Sstevel@tonic-gate * Interrupts are disabled here. 12717c478bd9Sstevel@tonic-gate */ 12727c478bd9Sstevel@tonic-gate#if defined(__i386) 12737c478bd9Sstevel@tonic-gate 12747c478bd9Sstevel@tonic-gate ENTRY_NP(dosoftint) 12757c478bd9Sstevel@tonic-gate 12767c478bd9Sstevel@tonic-gate bsrl %edx, %edx /* find highest pending interrupt */ 12777c478bd9Sstevel@tonic-gate cmpl %edx, %edi /* if curipl >= pri soft pending intr */ 12787c478bd9Sstevel@tonic-gate jae _sys_rtt /* skip */ 12797c478bd9Sstevel@tonic-gate 12807c478bd9Sstevel@tonic-gate movl %gs:CPU_BASE_SPL, %eax /* check for blocked intr threads */ 12817c478bd9Sstevel@tonic-gate cmpl %edx, %eax /* if basespl >= pri soft pending */ 12827c478bd9Sstevel@tonic-gate jae _sys_rtt /* skip */ 12837c478bd9Sstevel@tonic-gate 12847c478bd9Sstevel@tonic-gate lock /* MP protect */ 12857c478bd9Sstevel@tonic-gate btrl %edx, CPU_SOFTINFO(%ebx) /* clear the selected interrupt bit */ 12867c478bd9Sstevel@tonic-gate jnc dosoftint_again 12877c478bd9Sstevel@tonic-gate 12887c478bd9Sstevel@tonic-gate movl %edx, CPU_PRI(%ebx) /* set IPL to sofint level */ 12897c478bd9Sstevel@tonic-gate pushl %edx 12907c478bd9Sstevel@tonic-gate call *setspl /* mask levels upto the softint level */ 12917c478bd9Sstevel@tonic-gate popl %eax /* priority we are at in %eax */ 12927c478bd9Sstevel@tonic-gate 12937c478bd9Sstevel@tonic-gate / Get set to run interrupt thread. 12947c478bd9Sstevel@tonic-gate / There should always be an interrupt thread since we allocate one 12957c478bd9Sstevel@tonic-gate / for each level on the CPU. 12967c478bd9Sstevel@tonic-gate UNLINK_INTR_THREAD(%ebx, %esi, %edx) 12977c478bd9Sstevel@tonic-gate 12987c478bd9Sstevel@tonic-gate / 12997c478bd9Sstevel@tonic-gate / Note that the code in kcpc_overflow_intr -relies- on the ordering 13007c478bd9Sstevel@tonic-gate / of events here - in particular that t->t_lwp of the interrupt 13017c478bd9Sstevel@tonic-gate / thread is set to the pinned thread *before* curthread is changed 13027c478bd9Sstevel@tonic-gate / 13037c478bd9Sstevel@tonic-gate movl CPU_THREAD(%ebx), %ecx 13047c478bd9Sstevel@tonic-gate 13057c478bd9Sstevel@tonic-gate / If we are interrupting an interrupt thread, account for it. 13067c478bd9Sstevel@tonic-gate testw $T_INTR_THREAD, T_FLAGS(%ecx) 13077c478bd9Sstevel@tonic-gate jz 0f 13087c478bd9Sstevel@tonic-gate pushl %eax 13097c478bd9Sstevel@tonic-gate movl %eax, %ebp 13107c478bd9Sstevel@tonic-gate_tsc_patch11: 13117c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 13127c478bd9Sstevel@tonic-gate PILBASE(%ebx, %ebp) 13137c478bd9Sstevel@tonic-gate TSC_SUB_FROM(%ecx, T_INTR_START) 13147c478bd9Sstevel@tonic-gate TSC_ADD_TO(%ebp, CPU_INTRSTAT) 1315*eda89462Sesolom INTRACCTBASE(%ebx, %ebp) 1316*eda89462Sesolom TSC_ADD_TO(%ebp, CPU_INTRACCT) /* cpu_intracct[cpu_mstate] += tsc */ 13177c478bd9Sstevel@tonic-gate popl %eax 13187c478bd9Sstevel@tonic-gate0: 13197c478bd9Sstevel@tonic-gate movl T_LWP(%ecx), %ebp 13207c478bd9Sstevel@tonic-gate movl %ebp, T_LWP(%esi) 13217c478bd9Sstevel@tonic-gate / 13227c478bd9Sstevel@tonic-gate / Threads on the interrupt thread free list could have state already 13237c478bd9Sstevel@tonic-gate / set to TS_ONPROC, but it helps in debugging if they're TS_FREE 13247c478bd9Sstevel@tonic-gate / Could eliminate the next two instructions with a little work. 13257c478bd9Sstevel@tonic-gate / 13267c478bd9Sstevel@tonic-gate movl $ONPROC_THREAD, T_STATE(%esi) 13277c478bd9Sstevel@tonic-gate / 13287c478bd9Sstevel@tonic-gate / Push interrupted thread onto list from new thread. 13297c478bd9Sstevel@tonic-gate / Set the new thread as the current one. 13307c478bd9Sstevel@tonic-gate / Set interrupted thread's T_SP because if it is the idle thread, 13317c478bd9Sstevel@tonic-gate / Resume() may use that stack between threads. 13327c478bd9Sstevel@tonic-gate / 13337c478bd9Sstevel@tonic-gate movl %esp, T_SP(%ecx) /* mark stack for resume */ 13347c478bd9Sstevel@tonic-gate movl %ecx, T_INTR(%esi) /* push old thread */ 13357c478bd9Sstevel@tonic-gate movl %esi, CPU_THREAD(%ebx) /* set new thread */ 13367c478bd9Sstevel@tonic-gate movl T_STACK(%esi), %esp /* interrupt stack pointer */ 13377c478bd9Sstevel@tonic-gate movl %esp, %ebp 13387c478bd9Sstevel@tonic-gate 13397c478bd9Sstevel@tonic-gate pushl %eax /* push ipl as first element in stack */ 13407c478bd9Sstevel@tonic-gate /* see intr_passivate() */ 13417c478bd9Sstevel@tonic-gate / 13427c478bd9Sstevel@tonic-gate / Set bit for this PIL in CPU's interrupt active bitmask. 13437c478bd9Sstevel@tonic-gate / 13447c478bd9Sstevel@tonic-gate 13457c478bd9Sstevel@tonic-gate ASSERT_NOT_CPU_INTR_ACTV(%eax, %ebx, _dosoftint_actv_bit_set) 13467c478bd9Sstevel@tonic-gate 13477c478bd9Sstevel@tonic-gate btsl %eax, CPU_INTR_ACTV(%ebx) 13487c478bd9Sstevel@tonic-gate 13497c478bd9Sstevel@tonic-gate / 13507c478bd9Sstevel@tonic-gate / Initialize thread priority level from intr_pri 13517c478bd9Sstevel@tonic-gate / 13527c478bd9Sstevel@tonic-gate movb %al, T_PIL(%esi) /* store pil */ 13537c478bd9Sstevel@tonic-gate movzwl intr_pri, %ecx 13547c478bd9Sstevel@tonic-gate addl %eax, %ecx /* convert level to dispatch priority */ 13557c478bd9Sstevel@tonic-gate movw %cx, T_PRI(%esi) 13567c478bd9Sstevel@tonic-gate 13577c478bd9Sstevel@tonic-gate / 13587c478bd9Sstevel@tonic-gate / Store starting timestamp in thread structure. 13597c478bd9Sstevel@tonic-gate / esi = thread, ebx = cpu pointer, eax = PIL 13607c478bd9Sstevel@tonic-gate / 13617c478bd9Sstevel@tonic-gate movl %eax, %ecx /* save PIL from rdtsc clobber */ 13627c478bd9Sstevel@tonic-gate_tsc_patch12: 13637c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 13647c478bd9Sstevel@tonic-gate TSC_MOV(%esi, T_INTR_START) 13657c478bd9Sstevel@tonic-gate 13667c478bd9Sstevel@tonic-gate sti /* enable interrupts */ 13677c478bd9Sstevel@tonic-gate 13687c478bd9Sstevel@tonic-gate / 13697c478bd9Sstevel@tonic-gate / Enabling interrupts (above) could raise the current 13707c478bd9Sstevel@tonic-gate / IPL and base SPL. But, we continue processing the current soft 13717c478bd9Sstevel@tonic-gate / interrupt and we will check the base SPL next time in the loop 13727c478bd9Sstevel@tonic-gate / so that blocked interrupt thread would get a chance to run. 13737c478bd9Sstevel@tonic-gate / 13747c478bd9Sstevel@tonic-gate 13757c478bd9Sstevel@tonic-gate / 13767c478bd9Sstevel@tonic-gate / dispatch soft interrupts 13777c478bd9Sstevel@tonic-gate / 13787c478bd9Sstevel@tonic-gate pushl %ecx 13797c478bd9Sstevel@tonic-gate call av_dispatch_softvect 13807c478bd9Sstevel@tonic-gate addl $4, %esp 13817c478bd9Sstevel@tonic-gate 13827c478bd9Sstevel@tonic-gate cli /* protect interrupt thread pool */ 13837c478bd9Sstevel@tonic-gate /* and softinfo & sysinfo */ 13847c478bd9Sstevel@tonic-gate movl CPU_THREAD(%ebx), %esi /* restore thread pointer */ 13857c478bd9Sstevel@tonic-gate movzbl T_PIL(%esi), %ecx 13867c478bd9Sstevel@tonic-gate 13877c478bd9Sstevel@tonic-gate / cpu_stats.sys.intr[PIL]++ 13887c478bd9Sstevel@tonic-gate INC_CPU_STATS_INTR(%ecx, %edx, %edx, %ebx) 13897c478bd9Sstevel@tonic-gate 13907c478bd9Sstevel@tonic-gate / 13917c478bd9Sstevel@tonic-gate / Clear bit for this PIL in CPU's interrupt active bitmask. 13927c478bd9Sstevel@tonic-gate / 13937c478bd9Sstevel@tonic-gate 13947c478bd9Sstevel@tonic-gate ASSERT_CPU_INTR_ACTV(%ecx, %ebx, _dosoftint_actv_bit_not_set) 13957c478bd9Sstevel@tonic-gate 13967c478bd9Sstevel@tonic-gate btrl %ecx, CPU_INTR_ACTV(%ebx) 13977c478bd9Sstevel@tonic-gate 13987c478bd9Sstevel@tonic-gate / 13997c478bd9Sstevel@tonic-gate / Take timestamp, compute interval, update cumulative counter. 14007c478bd9Sstevel@tonic-gate / esi = thread, ebx = cpu, ecx = PIL 14017c478bd9Sstevel@tonic-gate / 14027c478bd9Sstevel@tonic-gate PILBASE(%ebx, %ecx) 14037c478bd9Sstevel@tonic-gate_tsc_patch13: 14047c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 14057c478bd9Sstevel@tonic-gate TSC_SUB_FROM(%esi, T_INTR_START) 14067c478bd9Sstevel@tonic-gate TSC_ADD_TO(%ecx, CPU_INTRSTAT) 1407*eda89462Sesolom INTRACCTBASE(%ebx, %ecx) 1408*eda89462Sesolom TSC_ADD_TO(%ecx, CPU_INTRACCT) /* cpu_intracct[cpu_mstate] += tsc */ 14097c478bd9Sstevel@tonic-gate 14107c478bd9Sstevel@tonic-gate / if there is still an interrupt thread underneath this one 14117c478bd9Sstevel@tonic-gate / then the interrupt was never blocked and the return is fairly 14127c478bd9Sstevel@tonic-gate / simple. Otherwise jump to softintr_thread_exit. 14137c478bd9Sstevel@tonic-gate / softintr_thread_exit expect esi to be curthread & ebx to be ipl. 14147c478bd9Sstevel@tonic-gate cmpl $0, T_INTR(%esi) 14157c478bd9Sstevel@tonic-gate je softintr_thread_exit 14167c478bd9Sstevel@tonic-gate 14177c478bd9Sstevel@tonic-gate / 14187c478bd9Sstevel@tonic-gate / link the thread back onto the interrupt thread pool 14197c478bd9Sstevel@tonic-gate LINK_INTR_THREAD(%ebx, %esi, %edx) 14207c478bd9Sstevel@tonic-gate 14217c478bd9Sstevel@tonic-gate / set the thread state to free so kmdb doesn't see it 14227c478bd9Sstevel@tonic-gate movl $FREE_THREAD, T_STATE(%esi) 14237c478bd9Sstevel@tonic-gate / 14247c478bd9Sstevel@tonic-gate / Switch back to the interrupted thread 14257c478bd9Sstevel@tonic-gate movl T_INTR(%esi), %ecx 14267c478bd9Sstevel@tonic-gate movl %ecx, CPU_THREAD(%ebx) 14277c478bd9Sstevel@tonic-gate movl T_SP(%ecx), %esp /* restore stack pointer */ 14287c478bd9Sstevel@tonic-gate movl %esp, %ebp 14297c478bd9Sstevel@tonic-gate 14307c478bd9Sstevel@tonic-gate / If we are returning to an interrupt thread, store a starting 14317c478bd9Sstevel@tonic-gate / timestamp in the thread structure. 14327c478bd9Sstevel@tonic-gate testw $T_INTR_THREAD, T_FLAGS(%ecx) 14337c478bd9Sstevel@tonic-gate jz 0f 14347c478bd9Sstevel@tonic-gate_tsc_patch14: 14357c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 14367c478bd9Sstevel@tonic-gate TSC_MOV(%ecx, T_INTR_START) 14377c478bd9Sstevel@tonic-gate0: 14387c478bd9Sstevel@tonic-gate movl CPU_BASE_SPL(%ebx), %eax 14397c478bd9Sstevel@tonic-gate cmpl %eax, %edi /* if (oldipl >= basespl) */ 14407c478bd9Sstevel@tonic-gate jae softintr_restore_ipl /* then use oldipl */ 14417c478bd9Sstevel@tonic-gate movl %eax, %edi /* else use basespl */ 14427c478bd9Sstevel@tonic-gatesoftintr_restore_ipl: 14437c478bd9Sstevel@tonic-gate movl %edi, CPU_PRI(%ebx) /* set IPL to old level */ 14447c478bd9Sstevel@tonic-gate pushl %edi 14457c478bd9Sstevel@tonic-gate call *setspl 14467c478bd9Sstevel@tonic-gate popl %eax 14477c478bd9Sstevel@tonic-gatedosoftint_again: 14487c478bd9Sstevel@tonic-gate movl CPU_SOFTINFO(%ebx), %edx /* any pending software interrupts */ 14497c478bd9Sstevel@tonic-gate orl %edx, %edx 14507c478bd9Sstevel@tonic-gate jz _sys_rtt 14517c478bd9Sstevel@tonic-gate jmp dosoftint /* process more software interrupts */ 14527c478bd9Sstevel@tonic-gate 14537c478bd9Sstevel@tonic-gatesoftintr_thread_exit: 14547c478bd9Sstevel@tonic-gate / 14557c478bd9Sstevel@tonic-gate / Put thread back on the interrupt thread list. 14567c478bd9Sstevel@tonic-gate / As a reminder, the regs at this point are 14577c478bd9Sstevel@tonic-gate / %esi interrupt thread 14587c478bd9Sstevel@tonic-gate 14597c478bd9Sstevel@tonic-gate / 14607c478bd9Sstevel@tonic-gate / This was an interrupt thread, so set CPU's base SPL level 14617c478bd9Sstevel@tonic-gate / set_base_spl only uses %eax. 14627c478bd9Sstevel@tonic-gate / 14637c478bd9Sstevel@tonic-gate call set_base_spl /* interrupt vector already on stack */ 14647c478bd9Sstevel@tonic-gate / 14657c478bd9Sstevel@tonic-gate / Set the thread state to free so kmdb doesn't see it 14667c478bd9Sstevel@tonic-gate / 14677c478bd9Sstevel@tonic-gate movl $FREE_THREAD, T_STATE(%esi) 14687c478bd9Sstevel@tonic-gate / 14697c478bd9Sstevel@tonic-gate / Put thread on either the interrupt pool or the free pool and 14707c478bd9Sstevel@tonic-gate / call swtch() to resume another thread. 14717c478bd9Sstevel@tonic-gate / 14727c478bd9Sstevel@tonic-gate LOADCPU(%ebx) 14737c478bd9Sstevel@tonic-gate LINK_INTR_THREAD(%ebx, %esi, %edx) 14747c478bd9Sstevel@tonic-gate call splhigh /* block all intrs below lock lvl */ 14757c478bd9Sstevel@tonic-gate call swtch 14767c478bd9Sstevel@tonic-gate / swtch() shouldn't return 14777c478bd9Sstevel@tonic-gate SET_SIZE(dosoftint) 14787c478bd9Sstevel@tonic-gate 14797c478bd9Sstevel@tonic-gate#endif /* __i386 */ 14807c478bd9Sstevel@tonic-gate#endif /* __lint */ 1481