17c478bd9Sstevel@tonic-gate/* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 57c478bd9Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 67c478bd9Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 77c478bd9Sstevel@tonic-gate * with the License. 87c478bd9Sstevel@tonic-gate * 97c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 107c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 117c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 127c478bd9Sstevel@tonic-gate * and limitations under the License. 137c478bd9Sstevel@tonic-gate * 147c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 157c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 167c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 177c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 187c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 197c478bd9Sstevel@tonic-gate * 207c478bd9Sstevel@tonic-gate * CDDL HEADER END 217c478bd9Sstevel@tonic-gate */ 227c478bd9Sstevel@tonic-gate/* 237c478bd9Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 247c478bd9Sstevel@tonic-gate * Use is subject to license terms. 257c478bd9Sstevel@tonic-gate */ 267c478bd9Sstevel@tonic-gate 277c478bd9Sstevel@tonic-gate/* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */ 287c478bd9Sstevel@tonic-gate/* Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T */ 297c478bd9Sstevel@tonic-gate/* All Rights Reserved */ 307c478bd9Sstevel@tonic-gate 317c478bd9Sstevel@tonic-gate/* Copyright (c) 1987, 1988 Microsoft Corporation */ 327c478bd9Sstevel@tonic-gate/* All Rights Reserved */ 337c478bd9Sstevel@tonic-gate 347c478bd9Sstevel@tonic-gate#pragma ident "%Z%%M% %I% %E% SMI" 357c478bd9Sstevel@tonic-gate 367c478bd9Sstevel@tonic-gate#include <sys/asm_linkage.h> 377c478bd9Sstevel@tonic-gate#include <sys/asm_misc.h> 387c478bd9Sstevel@tonic-gate#include <sys/regset.h> 397c478bd9Sstevel@tonic-gate#include <sys/psw.h> 407c478bd9Sstevel@tonic-gate#include <sys/x86_archext.h> 417c478bd9Sstevel@tonic-gate 427c478bd9Sstevel@tonic-gate#if defined(__lint) 437c478bd9Sstevel@tonic-gate 447c478bd9Sstevel@tonic-gate#include <sys/types.h> 457c478bd9Sstevel@tonic-gate#include <sys/thread.h> 467c478bd9Sstevel@tonic-gate#include <sys/systm.h> 477c478bd9Sstevel@tonic-gate 487c478bd9Sstevel@tonic-gate#else /* __lint */ 497c478bd9Sstevel@tonic-gate 507c478bd9Sstevel@tonic-gate#include <sys/segments.h> 517c478bd9Sstevel@tonic-gate#include <sys/pcb.h> 527c478bd9Sstevel@tonic-gate#include <sys/trap.h> 537c478bd9Sstevel@tonic-gate#include <sys/ftrace.h> 547c478bd9Sstevel@tonic-gate#include <sys/traptrace.h> 557c478bd9Sstevel@tonic-gate#include <sys/clock.h> 567c478bd9Sstevel@tonic-gate#include <sys/panic.h> 577c478bd9Sstevel@tonic-gate#include "assym.h" 587c478bd9Sstevel@tonic-gate 597c478bd9Sstevel@tonic-gate_ftrace_intr_thread_fmt: 607c478bd9Sstevel@tonic-gate .string "intr_thread(): regs=0x%lx, int=0x%x, pil=0x%x" 617c478bd9Sstevel@tonic-gate 627c478bd9Sstevel@tonic-gate#endif /* lint */ 637c478bd9Sstevel@tonic-gate 647c478bd9Sstevel@tonic-gate#if defined(__i386) 657c478bd9Sstevel@tonic-gate 667c478bd9Sstevel@tonic-gate#if defined(__lint) 677c478bd9Sstevel@tonic-gate 687c478bd9Sstevel@tonic-gatevoid 697c478bd9Sstevel@tonic-gatepatch_tsc(void) 707c478bd9Sstevel@tonic-gate{} 717c478bd9Sstevel@tonic-gate 727c478bd9Sstevel@tonic-gate#else /* __lint */ 737c478bd9Sstevel@tonic-gate 747c478bd9Sstevel@tonic-gate/* 757c478bd9Sstevel@tonic-gate * To cope with processors that do not implement the rdtsc instruction, 767c478bd9Sstevel@tonic-gate * we patch the kernel to use rdtsc if that feature is detected on the CPU. 777c478bd9Sstevel@tonic-gate * On an unpatched kernel, all locations requiring rdtsc are nop's. 787c478bd9Sstevel@tonic-gate * 797c478bd9Sstevel@tonic-gate * This function patches the nop's to rdtsc. 807c478bd9Sstevel@tonic-gate */ 817c478bd9Sstevel@tonic-gate ENTRY_NP(patch_tsc) 827c478bd9Sstevel@tonic-gate movw _rdtsc_insn, %cx 837c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch1 847c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch2 857c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch3 867c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch4 877c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch5 887c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch6 897c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch7 907c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch8 917c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch9 927c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch10 937c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch11 947c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch12 957c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch13 967c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch14 977c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch15 987c478bd9Sstevel@tonic-gate movw %cx, _tsc_patch16 997a364d25Sschwartz movw %cx, _tsc_patch17 1007c478bd9Sstevel@tonic-gate ret 1017c478bd9Sstevel@tonic-gate_rdtsc_insn: 1027c478bd9Sstevel@tonic-gate rdtsc 1037c478bd9Sstevel@tonic-gate SET_SIZE(patch_tsc) 1047c478bd9Sstevel@tonic-gate 1057c478bd9Sstevel@tonic-gate#endif /* __lint */ 1067c478bd9Sstevel@tonic-gate 1077c478bd9Sstevel@tonic-gate#endif /* __i386 */ 1087c478bd9Sstevel@tonic-gate 1097c478bd9Sstevel@tonic-gate 1107c478bd9Sstevel@tonic-gate#if defined(__lint) 1117c478bd9Sstevel@tonic-gate 1127c478bd9Sstevel@tonic-gatevoid 1137c478bd9Sstevel@tonic-gate_interrupt(void) 1147c478bd9Sstevel@tonic-gate{} 1157c478bd9Sstevel@tonic-gate 1167c478bd9Sstevel@tonic-gate#else /* __lint */ 1177c478bd9Sstevel@tonic-gate 1187c478bd9Sstevel@tonic-gate#if defined(__amd64) 1197c478bd9Sstevel@tonic-gate 1207c478bd9Sstevel@tonic-gate /* 1217c478bd9Sstevel@tonic-gate * Common register usage: 1227c478bd9Sstevel@tonic-gate * 1237c478bd9Sstevel@tonic-gate * %rbx cpu pointer 1247c478bd9Sstevel@tonic-gate * %r12 trap trace pointer -and- stash of 1257c478bd9Sstevel@tonic-gate * vec across intr_thread dispatch. 1267c478bd9Sstevel@tonic-gate * %r13d ipl of isr 1277c478bd9Sstevel@tonic-gate * %r14d old ipl (ipl level we entered on) 1287c478bd9Sstevel@tonic-gate * %r15 interrupted thread stack pointer 1297c478bd9Sstevel@tonic-gate */ 1307c478bd9Sstevel@tonic-gate ENTRY_NP2(cmnint, _interrupt) 1317c478bd9Sstevel@tonic-gate 1327c478bd9Sstevel@tonic-gate INTR_PUSH 1337c478bd9Sstevel@tonic-gate 1347c478bd9Sstevel@tonic-gate /* 1357c478bd9Sstevel@tonic-gate * At the end of TRACE_PTR %r12 points to the current TRAPTRACE entry 1367c478bd9Sstevel@tonic-gate */ 1377c478bd9Sstevel@tonic-gate TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_INTERRUPT) 1387c478bd9Sstevel@tonic-gate /* Uses labels 8 and 9 */ 1397c478bd9Sstevel@tonic-gate TRACE_REGS(%r12, %rsp, %rax, %rbx) /* Uses label 9 */ 1407c478bd9Sstevel@tonic-gate TRACE_STAMP(%r12) /* Clobbers %eax, %edx, uses 9 */ 1417c478bd9Sstevel@tonic-gate 1427c478bd9Sstevel@tonic-gate DISABLE_INTR_FLAGS /* (and set kernel flag values) */ 1437c478bd9Sstevel@tonic-gate 1447c478bd9Sstevel@tonic-gate movq %rsp, %rbp 1457c478bd9Sstevel@tonic-gate 1467c478bd9Sstevel@tonic-gate TRACE_STACK(%r12) 1477c478bd9Sstevel@tonic-gate 1487c478bd9Sstevel@tonic-gate LOADCPU(%rbx) /* &cpu */ 1497c478bd9Sstevel@tonic-gate leaq REGOFF_TRAPNO(%rbp), %rsi /* &vector */ 1507c478bd9Sstevel@tonic-gate movl CPU_PRI(%rbx), %r14d /* old ipl */ 1517c478bd9Sstevel@tonic-gate movl CPU_SOFTINFO(%rbx), %edx 1527c478bd9Sstevel@tonic-gate 1537c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE 1547c478bd9Sstevel@tonic-gate movl $255, TTR_IPL(%r12) 1557c478bd9Sstevel@tonic-gate movl %r14d, %edi 1567c478bd9Sstevel@tonic-gate movb %dil, TTR_PRI(%r12) 1577c478bd9Sstevel@tonic-gate movl CPU_BASE_SPL(%rbx), %edi 1587c478bd9Sstevel@tonic-gate movb %dil, TTR_SPL(%r12) 1597c478bd9Sstevel@tonic-gate movb $255, TTR_VECTOR(%r12) 1607c478bd9Sstevel@tonic-gate#endif 1617c478bd9Sstevel@tonic-gate 1627c478bd9Sstevel@tonic-gate /* 1637c478bd9Sstevel@tonic-gate * Check to see if the trap number is T_SOFTINT; if it is, 1647c478bd9Sstevel@tonic-gate * jump straight to dosoftint now. 1657c478bd9Sstevel@tonic-gate */ 1667c478bd9Sstevel@tonic-gate cmpq $T_SOFTINT, (%rsi) 1677c478bd9Sstevel@tonic-gate je dosoftint 1687c478bd9Sstevel@tonic-gate 1697c478bd9Sstevel@tonic-gate /* 1707c478bd9Sstevel@tonic-gate * Raise the interrupt priority level, returns newpil. 1717c478bd9Sstevel@tonic-gate * (The vector address is in %rsi so setlvl can update it.) 1727c478bd9Sstevel@tonic-gate */ 1737c478bd9Sstevel@tonic-gate movl %r14d, %edi /* old ipl */ 1747c478bd9Sstevel@tonic-gate /* &vector */ 1757c478bd9Sstevel@tonic-gate call *setlvl(%rip) 1767c478bd9Sstevel@tonic-gate 1777c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE 1787c478bd9Sstevel@tonic-gate movb %al, TTR_IPL(%r12) 1797c478bd9Sstevel@tonic-gate#endif 1807c478bd9Sstevel@tonic-gate /* 1817c478bd9Sstevel@tonic-gate * check for spurious interrupt 1827c478bd9Sstevel@tonic-gate */ 1837c478bd9Sstevel@tonic-gate cmpl $-1, %eax 1847c478bd9Sstevel@tonic-gate je _sys_rtt 1857c478bd9Sstevel@tonic-gate 1867c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE 1877c478bd9Sstevel@tonic-gate movl %r14d, %edx 1887c478bd9Sstevel@tonic-gate movb %dl, TTR_PRI(%r12) 1897c478bd9Sstevel@tonic-gate movl CPU_BASE_SPL(%rbx), %edx 1907c478bd9Sstevel@tonic-gate movb %dl, TTR_SPL(%r12) 1917c478bd9Sstevel@tonic-gate#endif 1927c478bd9Sstevel@tonic-gate movl %eax, CPU_PRI(%rbx) /* update ipl */ 1937c478bd9Sstevel@tonic-gate 1947c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE 1957c478bd9Sstevel@tonic-gate movl REGOFF_TRAPNO(%rbp), %edx 1967c478bd9Sstevel@tonic-gate movb %dl, TTR_VECTOR(%r12) 1977c478bd9Sstevel@tonic-gate#endif 1987c478bd9Sstevel@tonic-gate movl %eax, %r13d /* ipl of isr */ 1997c478bd9Sstevel@tonic-gate 2007c478bd9Sstevel@tonic-gate /* 2017c478bd9Sstevel@tonic-gate * At this point we can take one of two paths. 2027c478bd9Sstevel@tonic-gate * If the new level is at or below lock level, we will 2037c478bd9Sstevel@tonic-gate * run this interrupt in a separate thread. 2047c478bd9Sstevel@tonic-gate */ 2057c478bd9Sstevel@tonic-gate cmpl $LOCK_LEVEL, %eax 2067c478bd9Sstevel@tonic-gate jbe intr_thread 2077c478bd9Sstevel@tonic-gate 2087c478bd9Sstevel@tonic-gate movq %rbx, %rdi /* &cpu */ 2097c478bd9Sstevel@tonic-gate movl %r13d, %esi /* ipl */ 2107c478bd9Sstevel@tonic-gate movl %r14d, %edx /* old ipl */ 2117c478bd9Sstevel@tonic-gate movq %rbp, %rcx /* ®s */ 2127c478bd9Sstevel@tonic-gate call hilevel_intr_prolog 2137c478bd9Sstevel@tonic-gate orl %eax, %eax /* zero if need to switch stack */ 2147c478bd9Sstevel@tonic-gate jnz 1f 2157c478bd9Sstevel@tonic-gate 2167c478bd9Sstevel@tonic-gate /* 2177c478bd9Sstevel@tonic-gate * Save the thread stack and get on the cpu's interrupt stack 2187c478bd9Sstevel@tonic-gate */ 2197c478bd9Sstevel@tonic-gate movq %rsp, %r15 2207c478bd9Sstevel@tonic-gate movq CPU_INTR_STACK(%rbx), %rsp 2217c478bd9Sstevel@tonic-gate1: 2227c478bd9Sstevel@tonic-gate 2237c478bd9Sstevel@tonic-gate sti 2247c478bd9Sstevel@tonic-gate 2257c478bd9Sstevel@tonic-gate /* 2267c478bd9Sstevel@tonic-gate * Walk the list of handlers for this vector, calling 2277c478bd9Sstevel@tonic-gate * them as we go until no more interrupts are claimed. 2287c478bd9Sstevel@tonic-gate */ 2297c478bd9Sstevel@tonic-gate movl REGOFF_TRAPNO(%rbp), %edi 2307c478bd9Sstevel@tonic-gate call av_dispatch_autovect 2317c478bd9Sstevel@tonic-gate 2327c478bd9Sstevel@tonic-gate cli 2337c478bd9Sstevel@tonic-gate 2347c478bd9Sstevel@tonic-gate movq %rbx, %rdi /* &cpu */ 2357c478bd9Sstevel@tonic-gate movl %r13d, %esi /* ipl */ 2367c478bd9Sstevel@tonic-gate movl %r14d, %edx /* oldipl */ 2377c478bd9Sstevel@tonic-gate movl REGOFF_TRAPNO(%rbp), %ecx /* vec */ 2387c478bd9Sstevel@tonic-gate call hilevel_intr_epilog 2397c478bd9Sstevel@tonic-gate orl %eax, %eax /* zero if need to switch stack */ 2407c478bd9Sstevel@tonic-gate jnz 2f 2417c478bd9Sstevel@tonic-gate movq %r15, %rsp 2427c478bd9Sstevel@tonic-gate2: /* 2437c478bd9Sstevel@tonic-gate * Check for, and execute, softints before we iret. 2447c478bd9Sstevel@tonic-gate * 2457c478bd9Sstevel@tonic-gate * (dosoftint expects oldipl in %r14d (which is where it is) 2467c478bd9Sstevel@tonic-gate * the cpu pointer in %rbx (which is where it is) and the 2477c478bd9Sstevel@tonic-gate * softinfo in %edx (which is where we'll put it right now)) 2487c478bd9Sstevel@tonic-gate */ 2497c478bd9Sstevel@tonic-gate movl CPU_SOFTINFO(%rbx), %edx 2507c478bd9Sstevel@tonic-gate orl %edx, %edx 2517c478bd9Sstevel@tonic-gate jz _sys_rtt 2527c478bd9Sstevel@tonic-gate jmp dosoftint 2537c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 2547c478bd9Sstevel@tonic-gate 2557c478bd9Sstevel@tonic-gate SET_SIZE(cmnint) 2567c478bd9Sstevel@tonic-gate SET_SIZE(_interrupt) 2577c478bd9Sstevel@tonic-gate 2587c478bd9Sstevel@tonic-gate/* 2597c478bd9Sstevel@tonic-gate * Handle an interrupt in a new thread 2607c478bd9Sstevel@tonic-gate * 2617c478bd9Sstevel@tonic-gate * As we branch here, interrupts are still masked, 2627c478bd9Sstevel@tonic-gate * %rbx still contains the cpu pointer, 2637c478bd9Sstevel@tonic-gate * %r14d contains the old ipl that we came in on, and 2647c478bd9Sstevel@tonic-gate * %eax contains the new ipl that we got from the setlvl routine 2657c478bd9Sstevel@tonic-gate */ 2667c478bd9Sstevel@tonic-gate 2677c478bd9Sstevel@tonic-gate ENTRY_NP(intr_thread) 2687c478bd9Sstevel@tonic-gate 2697c478bd9Sstevel@tonic-gate movq %rbx, %rdi /* &cpu */ 2707c478bd9Sstevel@tonic-gate movq %rbp, %rsi /* ®s = stack pointer for _sys_rtt */ 2717c478bd9Sstevel@tonic-gate movl REGOFF_TRAPNO(%rbp), %r12d /* stash the vec */ 2727c478bd9Sstevel@tonic-gate movl %eax, %edx /* new pil from setlvlx() */ 2737c478bd9Sstevel@tonic-gate call intr_thread_prolog 2747c478bd9Sstevel@tonic-gate movq %rsp, %r15 2757c478bd9Sstevel@tonic-gate movq %rax, %rsp /* t_stk from interrupt thread */ 2767c478bd9Sstevel@tonic-gate movq %rsp, %rbp 2777c478bd9Sstevel@tonic-gate 2787c478bd9Sstevel@tonic-gate sti 2797c478bd9Sstevel@tonic-gate 2807c478bd9Sstevel@tonic-gate testl $FTRACE_ENABLED, CPU_FTRACE_STATE(%rbx) 2817c478bd9Sstevel@tonic-gate jz 1f 2827c478bd9Sstevel@tonic-gate /* 2837c478bd9Sstevel@tonic-gate * ftracing support. do we need this on x86? 2847c478bd9Sstevel@tonic-gate */ 2857c478bd9Sstevel@tonic-gate leaq _ftrace_intr_thread_fmt(%rip), %rdi 2867c478bd9Sstevel@tonic-gate movq %rbp, %rsi /* ®s */ 2877c478bd9Sstevel@tonic-gate movl %r12d, %edx /* vec */ 2887c478bd9Sstevel@tonic-gate movq CPU_THREAD(%rbx), %r11 /* (the interrupt thread) */ 2897c478bd9Sstevel@tonic-gate movzbl T_PIL(%r11), %ecx /* newipl */ 2907c478bd9Sstevel@tonic-gate call ftrace_3_notick 2917c478bd9Sstevel@tonic-gate1: 2927c478bd9Sstevel@tonic-gate movl %r12d, %edi /* vec */ 2937c478bd9Sstevel@tonic-gate call av_dispatch_autovect 2947c478bd9Sstevel@tonic-gate 2957c478bd9Sstevel@tonic-gate cli 2967c478bd9Sstevel@tonic-gate 2977c478bd9Sstevel@tonic-gate movq %rbx, %rdi /* &cpu */ 2987c478bd9Sstevel@tonic-gate movl %r12d, %esi /* vec */ 2997c478bd9Sstevel@tonic-gate movl %r14d, %edx /* oldpil */ 3007c478bd9Sstevel@tonic-gate call intr_thread_epilog 3017c478bd9Sstevel@tonic-gate /* 3027c478bd9Sstevel@tonic-gate * If we return from here (we might not if the interrupted thread 3037c478bd9Sstevel@tonic-gate * has exited or blocked, in which case we'll have quietly swtch()ed 3047c478bd9Sstevel@tonic-gate * away) then we need to switch back to our old %rsp 3057c478bd9Sstevel@tonic-gate */ 3067c478bd9Sstevel@tonic-gate movq %r15, %rsp 3077c478bd9Sstevel@tonic-gate movq %rsp, %rbp 3087c478bd9Sstevel@tonic-gate /* 3097c478bd9Sstevel@tonic-gate * Check for, and execute, softints before we iret. 3107c478bd9Sstevel@tonic-gate * 3117c478bd9Sstevel@tonic-gate * (dosoftint expects oldpil in %r14d, the cpu pointer in %rbx and 3127c478bd9Sstevel@tonic-gate * the mcpu_softinfo.st_pending field in %edx. 3137c478bd9Sstevel@tonic-gate */ 3147c478bd9Sstevel@tonic-gate movl CPU_SOFTINFO(%rbx), %edx 3157c478bd9Sstevel@tonic-gate orl %edx, %edx 3167c478bd9Sstevel@tonic-gate jz _sys_rtt 3177c478bd9Sstevel@tonic-gate /*FALLTHROUGH*/ 3187c478bd9Sstevel@tonic-gate 3197c478bd9Sstevel@tonic-gate/* 3207c478bd9Sstevel@tonic-gate * Process soft interrupts. 3217c478bd9Sstevel@tonic-gate * Interrupts are masked, and we have a minimal frame on the stack. 3227c478bd9Sstevel@tonic-gate * %edx should contain the mcpu_softinfo.st_pending field 3237c478bd9Sstevel@tonic-gate */ 3247c478bd9Sstevel@tonic-gate 3257c478bd9Sstevel@tonic-gate ALTENTRY(dosoftint) 3267c478bd9Sstevel@tonic-gate 3277c478bd9Sstevel@tonic-gate movq %rbx, %rdi /* &cpu */ 3287c478bd9Sstevel@tonic-gate movq %rbp, %rsi /* ®s = stack pointer for _sys_rtt */ 3297c478bd9Sstevel@tonic-gate /* cpu->cpu_m.mcpu_softinfo.st_pending */ 3307c478bd9Sstevel@tonic-gate movl %r14d, %ecx /* oldipl */ 3317c478bd9Sstevel@tonic-gate call dosoftint_prolog 3327c478bd9Sstevel@tonic-gate /* 3337c478bd9Sstevel@tonic-gate * dosoftint_prolog() usually returns a stack pointer for the 3347c478bd9Sstevel@tonic-gate * interrupt thread that we must switch to. However, if the 3357c478bd9Sstevel@tonic-gate * returned stack pointer is NULL, then the software interrupt was 3367c478bd9Sstevel@tonic-gate * too low in priority to run now; we'll catch it another time. 3377c478bd9Sstevel@tonic-gate */ 3387c478bd9Sstevel@tonic-gate orq %rax, %rax 3397c478bd9Sstevel@tonic-gate jz _sys_rtt 3407c478bd9Sstevel@tonic-gate movq %rsp, %r15 3417c478bd9Sstevel@tonic-gate movq %rax, %rsp /* t_stk from interrupt thread */ 3427c478bd9Sstevel@tonic-gate movq %rsp, %rbp 3437c478bd9Sstevel@tonic-gate 3447c478bd9Sstevel@tonic-gate sti 3457c478bd9Sstevel@tonic-gate 3467c478bd9Sstevel@tonic-gate /* 3477c478bd9Sstevel@tonic-gate * Enabling interrupts (above) could raise the current ipl 3487c478bd9Sstevel@tonic-gate * and base spl. But, we continue processing the current soft 3497c478bd9Sstevel@tonic-gate * interrupt and we will check the base spl next time around 3507c478bd9Sstevel@tonic-gate * so that blocked interrupt threads get a chance to run. 3517c478bd9Sstevel@tonic-gate */ 3527c478bd9Sstevel@tonic-gate movq CPU_THREAD(%rbx), %r11 /* now an interrupt thread */ 3537c478bd9Sstevel@tonic-gate movzbl T_PIL(%r11), %edi 3547c478bd9Sstevel@tonic-gate call av_dispatch_softvect 3557c478bd9Sstevel@tonic-gate 3567c478bd9Sstevel@tonic-gate cli 3577c478bd9Sstevel@tonic-gate 3587c478bd9Sstevel@tonic-gate movq %rbx, %rdi /* &cpu */ 3597c478bd9Sstevel@tonic-gate movl %r14d, %esi /* oldpil */ 3607c478bd9Sstevel@tonic-gate call dosoftint_epilog 3617c478bd9Sstevel@tonic-gate movq %r15, %rsp /* back on old stack pointer */ 3627c478bd9Sstevel@tonic-gate movq %rsp, %rbp 3637c478bd9Sstevel@tonic-gate movl CPU_SOFTINFO(%rbx), %edx 3647c478bd9Sstevel@tonic-gate orl %edx, %edx 3657c478bd9Sstevel@tonic-gate jz _sys_rtt 3667c478bd9Sstevel@tonic-gate jmp dosoftint 3677c478bd9Sstevel@tonic-gate 3687c478bd9Sstevel@tonic-gate SET_SIZE(dosoftint) 3697c478bd9Sstevel@tonic-gate SET_SIZE(intr_thread) 3707c478bd9Sstevel@tonic-gate 3717c478bd9Sstevel@tonic-gate#elif defined(__i386) 3727c478bd9Sstevel@tonic-gate 3737c478bd9Sstevel@tonic-gate/* 3747c478bd9Sstevel@tonic-gate * One day, this should just invoke the C routines that know how to 3757c478bd9Sstevel@tonic-gate * do all the interrupt bookkeeping. In the meantime, try 3767c478bd9Sstevel@tonic-gate * and make the assembler a little more comprehensible. 3777c478bd9Sstevel@tonic-gate */ 3787c478bd9Sstevel@tonic-gate 3797c478bd9Sstevel@tonic-gate#define INC64(basereg, offset) \ 3807c478bd9Sstevel@tonic-gate addl $1, offset(basereg); \ 3817c478bd9Sstevel@tonic-gate adcl $0, offset + 4(basereg) 3827c478bd9Sstevel@tonic-gate 3837c478bd9Sstevel@tonic-gate#define TSC_CLR(basereg, offset) \ 3847c478bd9Sstevel@tonic-gate movl $0, offset(basereg); \ 3857c478bd9Sstevel@tonic-gate movl $0, offset + 4(basereg) 3867c478bd9Sstevel@tonic-gate 3877c478bd9Sstevel@tonic-gate/* 3887c478bd9Sstevel@tonic-gate * The following macros assume the time value is in %edx:%eax 3897c478bd9Sstevel@tonic-gate * e.g. from a rdtsc instruction. 3907c478bd9Sstevel@tonic-gate */ 3917a364d25Sschwartz#define TSC_STORE(reg, offset) \ 3927c478bd9Sstevel@tonic-gate movl %eax, offset(reg); \ 3937c478bd9Sstevel@tonic-gate movl %edx, offset + 4(reg) 3947c478bd9Sstevel@tonic-gate 3957a364d25Sschwartz#define TSC_LOAD(reg, offset) \ 3967a364d25Sschwartz movl offset(reg), %eax; \ 3977a364d25Sschwartz movl offset + 4(reg), %edx 3987a364d25Sschwartz 3997c478bd9Sstevel@tonic-gate#define TSC_ADD_TO(reg, offset) \ 4007c478bd9Sstevel@tonic-gate addl %eax, offset(reg); \ 4017c478bd9Sstevel@tonic-gate adcl %edx, offset + 4(reg) 4027c478bd9Sstevel@tonic-gate 4037c478bd9Sstevel@tonic-gate#define TSC_SUB_FROM(reg, offset) \ 4047c478bd9Sstevel@tonic-gate subl offset(reg), %eax; \ 4057c478bd9Sstevel@tonic-gate sbbl offset + 4(reg), %edx /* interval in edx:eax */ 4067c478bd9Sstevel@tonic-gate 4077c478bd9Sstevel@tonic-gate/* 4087c478bd9Sstevel@tonic-gate * basereg - pointer to cpu struct 4097c478bd9Sstevel@tonic-gate * pilreg - pil or converted pil (pil - (LOCK_LEVEL + 1)) 4107c478bd9Sstevel@tonic-gate * 4117c478bd9Sstevel@tonic-gate * Returns (base + pil * 8) in pilreg 4127c478bd9Sstevel@tonic-gate */ 4137c478bd9Sstevel@tonic-gate#define PILBASE(basereg, pilreg) \ 4147c478bd9Sstevel@tonic-gate lea (basereg, pilreg, 8), pilreg 4157c478bd9Sstevel@tonic-gate 4167c478bd9Sstevel@tonic-gate/* 4177c478bd9Sstevel@tonic-gate * Returns (base + (pil - (LOCK_LEVEL + 1)) * 8) in pilreg 4187c478bd9Sstevel@tonic-gate */ 4197a364d25Sschwartz#define HIGHPILBASE(basereg, pilreg) \ 4207a364d25Sschwartz subl $LOCK_LEVEL + 1, pilreg; \ 4217c478bd9Sstevel@tonic-gate PILBASE(basereg, pilreg) 4227c478bd9Sstevel@tonic-gate 4237c478bd9Sstevel@tonic-gate/* 4247a364d25Sschwartz * Returns (base + pil * 16) in pilreg 4257a364d25Sschwartz */ 4267a364d25Sschwartz#define PILBASE_INTRSTAT(basereg, pilreg) \ 4277a364d25Sschwartz shl $4, pilreg; \ 4287a364d25Sschwartz addl basereg, pilreg; 4297a364d25Sschwartz 4307a364d25Sschwartz/* 431eda89462Sesolom * Returns (cpu + cpu_mstate * 8) in tgt 432eda89462Sesolom */ 433eda89462Sesolom#define INTRACCTBASE(cpureg, tgtreg) \ 434eda89462Sesolom movzwl CPU_MSTATE(cpureg), tgtreg; \ 435eda89462Sesolom lea (cpureg, tgtreg, 8), tgtreg 436eda89462Sesolom 437eda89462Sesolom/* 4387c478bd9Sstevel@tonic-gate * cpu_stats.sys.intr[PIL]++ 4397c478bd9Sstevel@tonic-gate */ 4407c478bd9Sstevel@tonic-gate#define INC_CPU_STATS_INTR(pilreg, tmpreg, tmpreg_32, basereg) \ 4417c478bd9Sstevel@tonic-gate movl pilreg, tmpreg_32; \ 4427c478bd9Sstevel@tonic-gate PILBASE(basereg, tmpreg); \ 4437c478bd9Sstevel@tonic-gate INC64(tmpreg, _CONST(CPU_STATS_SYS_INTR - 8)) 4447c478bd9Sstevel@tonic-gate 4457c478bd9Sstevel@tonic-gate/* 4467c478bd9Sstevel@tonic-gate * Unlink thread from CPU's list 4477c478bd9Sstevel@tonic-gate */ 4487c478bd9Sstevel@tonic-gate#define UNLINK_INTR_THREAD(cpureg, ithread, tmpreg) \ 4497c478bd9Sstevel@tonic-gate mov CPU_INTR_THREAD(cpureg), ithread; \ 4507c478bd9Sstevel@tonic-gate mov T_LINK(ithread), tmpreg; \ 4517c478bd9Sstevel@tonic-gate mov tmpreg, CPU_INTR_THREAD(cpureg) 4527c478bd9Sstevel@tonic-gate 4537c478bd9Sstevel@tonic-gate/* 4547c478bd9Sstevel@tonic-gate * Link a thread into CPU's list 4557c478bd9Sstevel@tonic-gate */ 4567c478bd9Sstevel@tonic-gate#define LINK_INTR_THREAD(cpureg, ithread, tmpreg) \ 4577c478bd9Sstevel@tonic-gate mov CPU_INTR_THREAD(cpureg), tmpreg; \ 4587c478bd9Sstevel@tonic-gate mov tmpreg, T_LINK(ithread); \ 4597c478bd9Sstevel@tonic-gate mov ithread, CPU_INTR_THREAD(cpureg) 4607c478bd9Sstevel@tonic-gate 4617c478bd9Sstevel@tonic-gate#if defined(DEBUG) 4627c478bd9Sstevel@tonic-gate 4637c478bd9Sstevel@tonic-gate/* 4647c478bd9Sstevel@tonic-gate * Do not call panic, if panic is already in progress. 4657c478bd9Sstevel@tonic-gate */ 4667c478bd9Sstevel@tonic-gate#define __PANIC(msg, label) \ 4677c478bd9Sstevel@tonic-gate cmpl $0, panic_quiesce; \ 4687c478bd9Sstevel@tonic-gate jne label; \ 4697c478bd9Sstevel@tonic-gate pushl $msg; \ 4707c478bd9Sstevel@tonic-gate call panic 4717c478bd9Sstevel@tonic-gate 4727c478bd9Sstevel@tonic-gate#define __CMP64_JNE(basereg, offset, label) \ 4737c478bd9Sstevel@tonic-gate cmpl $0, offset(basereg); \ 4747c478bd9Sstevel@tonic-gate jne label; \ 4757c478bd9Sstevel@tonic-gate cmpl $0, offset + 4(basereg); \ 4767c478bd9Sstevel@tonic-gate jne label 4777c478bd9Sstevel@tonic-gate 4787c478bd9Sstevel@tonic-gate/* 4797c478bd9Sstevel@tonic-gate * ASSERT(!(CPU->cpu_intr_actv & (1 << PIL))) 4807c478bd9Sstevel@tonic-gate */ 4817c478bd9Sstevel@tonic-gate#define ASSERT_NOT_CPU_INTR_ACTV(pilreg, basereg, msg) \ 4827c478bd9Sstevel@tonic-gate btl pilreg, CPU_INTR_ACTV(basereg); \ 4837c478bd9Sstevel@tonic-gate jnc 4f; \ 4847c478bd9Sstevel@tonic-gate __PANIC(msg, 4f); \ 4857c478bd9Sstevel@tonic-gate4: 4867c478bd9Sstevel@tonic-gate 4877c478bd9Sstevel@tonic-gate/* 4887c478bd9Sstevel@tonic-gate * ASSERT(CPU->cpu_intr_actv & (1 << PIL)) 4897c478bd9Sstevel@tonic-gate */ 4907c478bd9Sstevel@tonic-gate#define ASSERT_CPU_INTR_ACTV(pilreg, basereg, msg) \ 4917c478bd9Sstevel@tonic-gate btl pilreg, CPU_INTR_ACTV(basereg); \ 4927c478bd9Sstevel@tonic-gate jc 5f; \ 4937c478bd9Sstevel@tonic-gate __PANIC(msg, 5f); \ 4947c478bd9Sstevel@tonic-gate5: 4957c478bd9Sstevel@tonic-gate 4967c478bd9Sstevel@tonic-gate/* 4977c478bd9Sstevel@tonic-gate * ASSERT(CPU->cpu_pil_high_start != 0) 4987c478bd9Sstevel@tonic-gate */ 4997c478bd9Sstevel@tonic-gate#define ASSERT_CPU_PIL_HIGH_START_NZ(basereg) \ 5007c478bd9Sstevel@tonic-gate __CMP64_JNE(basereg, CPU_PIL_HIGH_START, 6f); \ 5017c478bd9Sstevel@tonic-gate __PANIC(_interrupt_timestamp_zero, 6f); \ 5027c478bd9Sstevel@tonic-gate6: 5037c478bd9Sstevel@tonic-gate 5047c478bd9Sstevel@tonic-gate/* 5057c478bd9Sstevel@tonic-gate * ASSERT(t->t_intr_start != 0) 5067c478bd9Sstevel@tonic-gate */ 5077c478bd9Sstevel@tonic-gate#define ASSERT_T_INTR_START_NZ(basereg) \ 5087c478bd9Sstevel@tonic-gate __CMP64_JNE(basereg, T_INTR_START, 7f); \ 5097c478bd9Sstevel@tonic-gate __PANIC(_intr_thread_t_intr_start_zero, 7f); \ 5107c478bd9Sstevel@tonic-gate7: 5117c478bd9Sstevel@tonic-gate 5127c478bd9Sstevel@tonic-gate_interrupt_actv_bit_set: 5137c478bd9Sstevel@tonic-gate .string "_interrupt(): cpu_intr_actv bit already set for PIL" 5147c478bd9Sstevel@tonic-gate_interrupt_actv_bit_not_set: 5157c478bd9Sstevel@tonic-gate .string "_interrupt(): cpu_intr_actv bit not set for PIL" 5167c478bd9Sstevel@tonic-gate_interrupt_timestamp_zero: 5177c478bd9Sstevel@tonic-gate .string "_interrupt(): timestamp zero upon handler return" 5187c478bd9Sstevel@tonic-gate_intr_thread_actv_bit_not_set: 5197c478bd9Sstevel@tonic-gate .string "intr_thread(): cpu_intr_actv bit not set for PIL" 5207c478bd9Sstevel@tonic-gate_intr_thread_t_intr_start_zero: 5217c478bd9Sstevel@tonic-gate .string "intr_thread(): t_intr_start zero upon handler return" 5227c478bd9Sstevel@tonic-gate_dosoftint_actv_bit_set: 5237c478bd9Sstevel@tonic-gate .string "dosoftint(): cpu_intr_actv bit already set for PIL" 5247c478bd9Sstevel@tonic-gate_dosoftint_actv_bit_not_set: 5257c478bd9Sstevel@tonic-gate .string "dosoftint(): cpu_intr_actv bit not set for PIL" 5267c478bd9Sstevel@tonic-gate 5277c478bd9Sstevel@tonic-gate DGDEF(intr_thread_cnt) 5287c478bd9Sstevel@tonic-gate 5297c478bd9Sstevel@tonic-gate#else 5307c478bd9Sstevel@tonic-gate#define ASSERT_NOT_CPU_INTR_ACTV(pilreg, basereg, msg) 5317c478bd9Sstevel@tonic-gate#define ASSERT_CPU_INTR_ACTV(pilreg, basereg, msg) 5327c478bd9Sstevel@tonic-gate#define ASSERT_CPU_PIL_HIGH_START_NZ(basereg) 5337c478bd9Sstevel@tonic-gate#define ASSERT_T_INTR_START_NZ(basereg) 5347c478bd9Sstevel@tonic-gate#endif 5357c478bd9Sstevel@tonic-gate 5367c478bd9Sstevel@tonic-gate ENTRY_NP2(cmnint, _interrupt) 5377c478bd9Sstevel@tonic-gate 5387c478bd9Sstevel@tonic-gate INTR_PUSH 5397c478bd9Sstevel@tonic-gate 5407c478bd9Sstevel@tonic-gate /* 5417c478bd9Sstevel@tonic-gate * At the end of TRACE_PTR %esi points to the current TRAPTRACE entry 5427c478bd9Sstevel@tonic-gate */ 5437c478bd9Sstevel@tonic-gate TRACE_PTR(%esi, %eax, %eax, %edx, $TT_INTERRUPT) 5447c478bd9Sstevel@tonic-gate /* Uses labels 8 and 9 */ 5457c478bd9Sstevel@tonic-gate TRACE_REGS(%esi, %esp, %eax, %ebx) /* Uses label 9 */ 5467c478bd9Sstevel@tonic-gate TRACE_STAMP(%esi) /* Clobbers %eax, %edx, uses 9 */ 5477c478bd9Sstevel@tonic-gate 5487c478bd9Sstevel@tonic-gate movl %esp, %ebp 5497c478bd9Sstevel@tonic-gate DISABLE_INTR_FLAGS 5507c478bd9Sstevel@tonic-gate LOADCPU(%ebx) /* get pointer to CPU struct. Avoid gs refs */ 5517c478bd9Sstevel@tonic-gate leal REGOFF_TRAPNO(%ebp), %ecx /* get address of vector */ 5527c478bd9Sstevel@tonic-gate movl CPU_PRI(%ebx), %edi /* get ipl */ 5537c478bd9Sstevel@tonic-gate movl CPU_SOFTINFO(%ebx), %edx 5547c478bd9Sstevel@tonic-gate 5557c478bd9Sstevel@tonic-gate / 5567c478bd9Sstevel@tonic-gate / Check to see if the trap number is T_SOFTINT; if it is, we'll 5577c478bd9Sstevel@tonic-gate / jump straight to dosoftint now. 5587c478bd9Sstevel@tonic-gate / 5597c478bd9Sstevel@tonic-gate cmpl $T_SOFTINT, (%ecx) 5607c478bd9Sstevel@tonic-gate je dosoftint 5617c478bd9Sstevel@tonic-gate 5627c478bd9Sstevel@tonic-gate / raise interrupt priority level 5637c478bd9Sstevel@tonic-gate / oldipl is in %edi, vectorp is in %ecx 5647c478bd9Sstevel@tonic-gate / newipl is returned in %eax 5657c478bd9Sstevel@tonic-gate pushl %ecx 5667c478bd9Sstevel@tonic-gate pushl %edi 5677c478bd9Sstevel@tonic-gate call *setlvl 5687c478bd9Sstevel@tonic-gate popl %edi /* save oldpil in %edi */ 5697c478bd9Sstevel@tonic-gate popl %ecx 5707c478bd9Sstevel@tonic-gate 5717c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE 5727c478bd9Sstevel@tonic-gate movb %al, TTR_IPL(%esi) 5737c478bd9Sstevel@tonic-gate#endif 5747c478bd9Sstevel@tonic-gate 5757c478bd9Sstevel@tonic-gate / check for spurious interrupt 5767c478bd9Sstevel@tonic-gate cmp $-1, %eax 5777c478bd9Sstevel@tonic-gate je _sys_rtt 5787c478bd9Sstevel@tonic-gate 5797c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE 5807c478bd9Sstevel@tonic-gate movl CPU_PRI(%ebx), %edx 5817c478bd9Sstevel@tonic-gate movb %dl, TTR_PRI(%esi) 5827c478bd9Sstevel@tonic-gate movl CPU_BASE_SPL(%ebx), %edx 5837c478bd9Sstevel@tonic-gate movb %dl, TTR_SPL(%esi) 5847c478bd9Sstevel@tonic-gate#endif 5857c478bd9Sstevel@tonic-gate 5867c478bd9Sstevel@tonic-gate movl %eax, CPU_PRI(%ebx) /* update ipl */ 5877c478bd9Sstevel@tonic-gate movl REGOFF_TRAPNO(%ebp), %ecx /* reload the interrupt vector */ 5887c478bd9Sstevel@tonic-gate 5897c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE 5907c478bd9Sstevel@tonic-gate movb %cl, TTR_VECTOR(%esi) 5917c478bd9Sstevel@tonic-gate#endif 5927c478bd9Sstevel@tonic-gate 5937c478bd9Sstevel@tonic-gate / At this point we can take one of two paths. If the new priority 5947c478bd9Sstevel@tonic-gate / level is less than or equal to LOCK LEVEL then we jump to code that 5957c478bd9Sstevel@tonic-gate / will run this interrupt as a separate thread. Otherwise the 5967c478bd9Sstevel@tonic-gate / interrupt is NOT run as a separate thread. 5977c478bd9Sstevel@tonic-gate 5987c478bd9Sstevel@tonic-gate / %edi - old priority level 5997c478bd9Sstevel@tonic-gate / %ebp - pointer to REGS 6007c478bd9Sstevel@tonic-gate / %ecx - translated vector 6017c478bd9Sstevel@tonic-gate / %eax - ipl of isr 6027c478bd9Sstevel@tonic-gate / %ebx - cpu pointer 6037c478bd9Sstevel@tonic-gate 6047c478bd9Sstevel@tonic-gate cmpl $LOCK_LEVEL, %eax /* compare to highest thread level */ 6057c478bd9Sstevel@tonic-gate jbe intr_thread /* process as a separate thread */ 6067c478bd9Sstevel@tonic-gate 6077c478bd9Sstevel@tonic-gate cmpl $CBE_HIGH_PIL, %eax /* Is this a CY_HIGH_LEVEL interrupt? */ 6087c478bd9Sstevel@tonic-gate jne 2f 6097c478bd9Sstevel@tonic-gate 6107c478bd9Sstevel@tonic-gate movl REGOFF_PC(%ebp), %esi 6117c478bd9Sstevel@tonic-gate movl %edi, CPU_PROFILE_PIL(%ebx) /* record interrupted PIL */ 6127c478bd9Sstevel@tonic-gate testw $CPL_MASK, REGOFF_CS(%ebp) /* trap from supervisor mode? */ 6137c478bd9Sstevel@tonic-gate jz 1f 6147c478bd9Sstevel@tonic-gate movl %esi, CPU_PROFILE_UPC(%ebx) /* record user PC */ 6157c478bd9Sstevel@tonic-gate movl $0, CPU_PROFILE_PC(%ebx) /* zero kernel PC */ 6167c478bd9Sstevel@tonic-gate jmp 2f 6177c478bd9Sstevel@tonic-gate 6187c478bd9Sstevel@tonic-gate1: 6197c478bd9Sstevel@tonic-gate movl %esi, CPU_PROFILE_PC(%ebx) /* record kernel PC */ 6207c478bd9Sstevel@tonic-gate movl $0, CPU_PROFILE_UPC(%ebx) /* zero user PC */ 6217c478bd9Sstevel@tonic-gate 6227c478bd9Sstevel@tonic-gate2: 6237c478bd9Sstevel@tonic-gate pushl %ecx /* vec */ 6247c478bd9Sstevel@tonic-gate pushl %eax /* newpil */ 6257c478bd9Sstevel@tonic-gate 6267c478bd9Sstevel@tonic-gate / 6277c478bd9Sstevel@tonic-gate / See if we are interrupting another high-level interrupt. 6287c478bd9Sstevel@tonic-gate / 6297c478bd9Sstevel@tonic-gate movl CPU_INTR_ACTV(%ebx), %eax 6307c478bd9Sstevel@tonic-gate andl $CPU_INTR_ACTV_HIGH_LEVEL_MASK, %eax 6317c478bd9Sstevel@tonic-gate jz 0f 6327c478bd9Sstevel@tonic-gate / 6337c478bd9Sstevel@tonic-gate / We have interrupted another high-level interrupt. 6347c478bd9Sstevel@tonic-gate / Load starting timestamp, compute interval, update cumulative counter. 6357c478bd9Sstevel@tonic-gate / 6367c478bd9Sstevel@tonic-gate bsrl %eax, %ecx /* find PIL of interrupted handler */ 6377a364d25Sschwartz movl %ecx, %esi /* save PIL for later */ 6387a364d25Sschwartz HIGHPILBASE(%ebx, %ecx) 6397c478bd9Sstevel@tonic-gate_tsc_patch1: 6407c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 6417c478bd9Sstevel@tonic-gate TSC_SUB_FROM(%ecx, CPU_PIL_HIGH_START) 6427a364d25Sschwartz 6437a364d25Sschwartz PILBASE_INTRSTAT(%ebx, %esi) 6447a364d25Sschwartz TSC_ADD_TO(%esi, CPU_INTRSTAT) 645eda89462Sesolom INTRACCTBASE(%ebx, %ecx) 646eda89462Sesolom TSC_ADD_TO(%ecx, CPU_INTRACCT) /* cpu_intracct[cpu_mstate] += tsc */ 6477c478bd9Sstevel@tonic-gate / 6487c478bd9Sstevel@tonic-gate / Another high-level interrupt is active below this one, so 6497c478bd9Sstevel@tonic-gate / there is no need to check for an interrupt thread. That will be 6507c478bd9Sstevel@tonic-gate / done by the lowest priority high-level interrupt active. 6517c478bd9Sstevel@tonic-gate / 6527c478bd9Sstevel@tonic-gate jmp 1f 6537c478bd9Sstevel@tonic-gate0: 6547c478bd9Sstevel@tonic-gate / 6557c478bd9Sstevel@tonic-gate / See if we are interrupting a low-level interrupt thread. 6567c478bd9Sstevel@tonic-gate / 6577c478bd9Sstevel@tonic-gate movl CPU_THREAD(%ebx), %esi 6587c478bd9Sstevel@tonic-gate testw $T_INTR_THREAD, T_FLAGS(%esi) 6597c478bd9Sstevel@tonic-gate jz 1f 6607c478bd9Sstevel@tonic-gate / 6617c478bd9Sstevel@tonic-gate / We have interrupted an interrupt thread. Account for its time slice 6627c478bd9Sstevel@tonic-gate / only if its time stamp is non-zero. 6637c478bd9Sstevel@tonic-gate / 6647c478bd9Sstevel@tonic-gate cmpl $0, T_INTR_START+4(%esi) 6657c478bd9Sstevel@tonic-gate jne 0f 6667c478bd9Sstevel@tonic-gate cmpl $0, T_INTR_START(%esi) 6677c478bd9Sstevel@tonic-gate je 1f 6687c478bd9Sstevel@tonic-gate0: 6697c478bd9Sstevel@tonic-gate movzbl T_PIL(%esi), %ecx /* %ecx has PIL of interrupted handler */ 6707a364d25Sschwartz PILBASE_INTRSTAT(%ebx, %ecx) 6717c478bd9Sstevel@tonic-gate_tsc_patch2: 6727c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 6737c478bd9Sstevel@tonic-gate TSC_SUB_FROM(%esi, T_INTR_START) 6747c478bd9Sstevel@tonic-gate TSC_CLR(%esi, T_INTR_START) 6757c478bd9Sstevel@tonic-gate TSC_ADD_TO(%ecx, CPU_INTRSTAT) 676eda89462Sesolom INTRACCTBASE(%ebx, %ecx) 677eda89462Sesolom TSC_ADD_TO(%ecx, CPU_INTRACCT) /* cpu_intracct[cpu_mstate] += tsc */ 6787c478bd9Sstevel@tonic-gate1: 6797c478bd9Sstevel@tonic-gate / Store starting timestamp in CPU structure for this PIL. 6807c478bd9Sstevel@tonic-gate popl %ecx /* restore new PIL */ 6817c478bd9Sstevel@tonic-gate pushl %ecx 6827a364d25Sschwartz HIGHPILBASE(%ebx, %ecx) 6837c478bd9Sstevel@tonic-gate_tsc_patch3: 6847c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 6857a364d25Sschwartz TSC_STORE(%ecx, CPU_PIL_HIGH_START) 6867c478bd9Sstevel@tonic-gate 6877c478bd9Sstevel@tonic-gate popl %eax /* restore new pil */ 6887c478bd9Sstevel@tonic-gate popl %ecx /* vec */ 6897c478bd9Sstevel@tonic-gate / 6907c478bd9Sstevel@tonic-gate / Set bit for this PIL in CPU's interrupt active bitmask. 6917c478bd9Sstevel@tonic-gate / 6927c478bd9Sstevel@tonic-gate 6937c478bd9Sstevel@tonic-gate ASSERT_NOT_CPU_INTR_ACTV(%eax, %ebx, _interrupt_actv_bit_set) 6947c478bd9Sstevel@tonic-gate 6957c478bd9Sstevel@tonic-gate / Save old CPU_INTR_ACTV 6967c478bd9Sstevel@tonic-gate movl CPU_INTR_ACTV(%ebx), %esi 6977c478bd9Sstevel@tonic-gate 6987c478bd9Sstevel@tonic-gate cmpl $15, %eax 6997c478bd9Sstevel@tonic-gate jne 0f 7007c478bd9Sstevel@tonic-gate / PIL-15 interrupt. Increment nest-count in upper 16 bits of intr_actv 7017c478bd9Sstevel@tonic-gate incw CPU_INTR_ACTV_REF(%ebx) /* increment ref count */ 7027c478bd9Sstevel@tonic-gate0: 7037c478bd9Sstevel@tonic-gate btsl %eax, CPU_INTR_ACTV(%ebx) 7047c478bd9Sstevel@tonic-gate / 7057c478bd9Sstevel@tonic-gate / Handle high-level nested interrupt on separate interrupt stack 7067c478bd9Sstevel@tonic-gate / 7077c478bd9Sstevel@tonic-gate testl $CPU_INTR_ACTV_HIGH_LEVEL_MASK, %esi 7087c478bd9Sstevel@tonic-gate jnz onstack /* already on interrupt stack */ 7097c478bd9Sstevel@tonic-gate movl %esp, %eax 7107c478bd9Sstevel@tonic-gate movl CPU_INTR_STACK(%ebx), %esp /* get on interrupt stack */ 7117c478bd9Sstevel@tonic-gate pushl %eax /* save the thread stack pointer */ 7127c478bd9Sstevel@tonic-gateonstack: 7137c478bd9Sstevel@tonic-gate movl $autovect, %esi /* get autovect structure before */ 7147c478bd9Sstevel@tonic-gate /* sti to save on AGI later */ 7157c478bd9Sstevel@tonic-gate sti /* enable interrupts */ 7167c478bd9Sstevel@tonic-gate pushl %ecx /* save interrupt vector */ 7177c478bd9Sstevel@tonic-gate / 7187c478bd9Sstevel@tonic-gate / Get handler address 7197c478bd9Sstevel@tonic-gate / 7207c478bd9Sstevel@tonic-gatepre_loop1: 7217c478bd9Sstevel@tonic-gate movl AVH_LINK(%esi, %ecx, 8), %esi 7227c478bd9Sstevel@tonic-gate xorl %ebx, %ebx /* bh is no. of intpts in chain */ 7237c478bd9Sstevel@tonic-gate /* bl is DDI_INTR_CLAIMED status of chain */ 7247c478bd9Sstevel@tonic-gate testl %esi, %esi /* if pointer is null */ 7257c478bd9Sstevel@tonic-gate jz .intr_ret /* then skip */ 7267c478bd9Sstevel@tonic-gateloop1: 7277c478bd9Sstevel@tonic-gate incb %bh 7287c478bd9Sstevel@tonic-gate movl AV_VECTOR(%esi), %edx /* get the interrupt routine */ 7297c478bd9Sstevel@tonic-gate testl %edx, %edx /* if func is null */ 7307c478bd9Sstevel@tonic-gate jz .intr_ret /* then skip */ 7317c478bd9Sstevel@tonic-gate pushl $0 7327c478bd9Sstevel@tonic-gate pushl AV_INTARG2(%esi) 7337c478bd9Sstevel@tonic-gate pushl AV_INTARG1(%esi) 7347c478bd9Sstevel@tonic-gate pushl AV_VECTOR(%esi) 7357c478bd9Sstevel@tonic-gate pushl AV_DIP(%esi) 7367c478bd9Sstevel@tonic-gate call __dtrace_probe_interrupt__start 7377c478bd9Sstevel@tonic-gate pushl AV_INTARG2(%esi) /* get 2nd arg to interrupt routine */ 7387c478bd9Sstevel@tonic-gate pushl AV_INTARG1(%esi) /* get first arg to interrupt routine */ 7397c478bd9Sstevel@tonic-gate call *%edx /* call interrupt routine with arg */ 7407c478bd9Sstevel@tonic-gate addl $8, %esp 7417c478bd9Sstevel@tonic-gate movl %eax, 16(%esp) 7427c478bd9Sstevel@tonic-gate call __dtrace_probe_interrupt__complete 7437c478bd9Sstevel@tonic-gate addl $20, %esp 7447c478bd9Sstevel@tonic-gate orb %al, %bl /* see if anyone claims intpt. */ 7457c478bd9Sstevel@tonic-gate movl AV_LINK(%esi), %esi /* get next routine on list */ 7467c478bd9Sstevel@tonic-gate testl %esi, %esi /* if pointer is non-null */ 7477c478bd9Sstevel@tonic-gate jnz loop1 /* then continue */ 7487c478bd9Sstevel@tonic-gate 7497c478bd9Sstevel@tonic-gate.intr_ret: 7507c478bd9Sstevel@tonic-gate cmpb $1, %bh /* if only 1 intpt in chain, it is OK */ 7517c478bd9Sstevel@tonic-gate je .intr_ret1 7527c478bd9Sstevel@tonic-gate orb %bl, %bl /* If no one claims intpt, then it is OK */ 7537c478bd9Sstevel@tonic-gate jz .intr_ret1 7547c478bd9Sstevel@tonic-gate movl (%esp), %ecx /* else restore intr vector */ 7557c478bd9Sstevel@tonic-gate movl $autovect, %esi /* get autovect structure */ 7567c478bd9Sstevel@tonic-gate jmp pre_loop1 /* and try again. */ 7577c478bd9Sstevel@tonic-gate 7587c478bd9Sstevel@tonic-gate.intr_ret1: 7597c478bd9Sstevel@tonic-gate LOADCPU(%ebx) /* get pointer to cpu struct */ 7607c478bd9Sstevel@tonic-gate 7617c478bd9Sstevel@tonic-gate cli 7627c478bd9Sstevel@tonic-gate movl CPU_PRI(%ebx), %esi 7637c478bd9Sstevel@tonic-gate 7647c478bd9Sstevel@tonic-gate / cpu_stats.sys.intr[PIL]++ 7657c478bd9Sstevel@tonic-gate INC_CPU_STATS_INTR(%esi, %eax, %eax, %ebx) 7667c478bd9Sstevel@tonic-gate 7677c478bd9Sstevel@tonic-gate / 7687c478bd9Sstevel@tonic-gate / Clear bit for this PIL in CPU's interrupt active bitmask. 7697c478bd9Sstevel@tonic-gate / 7707c478bd9Sstevel@tonic-gate 7717c478bd9Sstevel@tonic-gate ASSERT_CPU_INTR_ACTV(%esi, %ebx, _interrupt_actv_bit_not_set) 7727c478bd9Sstevel@tonic-gate 7737c478bd9Sstevel@tonic-gate cmpl $15, %esi 7747c478bd9Sstevel@tonic-gate jne 0f 7757c478bd9Sstevel@tonic-gate / Only clear bit if reference count is now zero. 7767c478bd9Sstevel@tonic-gate decw CPU_INTR_ACTV_REF(%ebx) 7777c478bd9Sstevel@tonic-gate jnz 1f 7787c478bd9Sstevel@tonic-gate0: 7797c478bd9Sstevel@tonic-gate btrl %esi, CPU_INTR_ACTV(%ebx) 7807c478bd9Sstevel@tonic-gate1: 7817c478bd9Sstevel@tonic-gate / 7827c478bd9Sstevel@tonic-gate / Take timestamp, compute interval, update cumulative counter. 7837c478bd9Sstevel@tonic-gate / esi = PIL 7847c478bd9Sstevel@tonic-gate_tsc_patch4: 7857c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 7867a364d25Sschwartz movl %esi, %ecx /* save for later */ 7877a364d25Sschwartz HIGHPILBASE(%ebx, %esi) 7887c478bd9Sstevel@tonic-gate 7897c478bd9Sstevel@tonic-gate ASSERT_CPU_PIL_HIGH_START_NZ(%esi) 7907c478bd9Sstevel@tonic-gate 7917c478bd9Sstevel@tonic-gate TSC_SUB_FROM(%esi, CPU_PIL_HIGH_START) 7927a364d25Sschwartz 7937a364d25Sschwartz PILBASE_INTRSTAT(%ebx, %ecx) 7947a364d25Sschwartz TSC_ADD_TO(%ecx, CPU_INTRSTAT) 795eda89462Sesolom INTRACCTBASE(%ebx, %esi) 796eda89462Sesolom TSC_ADD_TO(%esi, CPU_INTRACCT) /* cpu_intracct[cpu_mstate] += tsc */ 7977c478bd9Sstevel@tonic-gate / 7987c478bd9Sstevel@tonic-gate / Check for lower-PIL nested high-level interrupt beneath current one 7997c478bd9Sstevel@tonic-gate / If so, place a starting timestamp in its pil_high_start entry. 8007c478bd9Sstevel@tonic-gate / 8017c478bd9Sstevel@tonic-gate movl CPU_INTR_ACTV(%ebx), %eax 8027c478bd9Sstevel@tonic-gate movl %eax, %esi 8037c478bd9Sstevel@tonic-gate andl $CPU_INTR_ACTV_HIGH_LEVEL_MASK, %eax 8047c478bd9Sstevel@tonic-gate jz 0f 8057c478bd9Sstevel@tonic-gate bsrl %eax, %ecx /* find PIL of nested interrupt */ 8067a364d25Sschwartz HIGHPILBASE(%ebx, %ecx) 8077c478bd9Sstevel@tonic-gate_tsc_patch5: 8087c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 8097a364d25Sschwartz TSC_STORE(%ecx, CPU_PIL_HIGH_START) 8107c478bd9Sstevel@tonic-gate / 8117c478bd9Sstevel@tonic-gate / Another high-level interrupt is active below this one, so 8127c478bd9Sstevel@tonic-gate / there is no need to check for an interrupt thread. That will be 8137c478bd9Sstevel@tonic-gate / done by the lowest priority high-level interrupt active. 8147c478bd9Sstevel@tonic-gate / 8157c478bd9Sstevel@tonic-gate jmp 1f 8167c478bd9Sstevel@tonic-gate0: 8177c478bd9Sstevel@tonic-gate / Check to see if there is a low-level interrupt active. If so, 8187c478bd9Sstevel@tonic-gate / place a starting timestamp in the thread structure. 8197c478bd9Sstevel@tonic-gate movl CPU_THREAD(%ebx), %esi 8207c478bd9Sstevel@tonic-gate testw $T_INTR_THREAD, T_FLAGS(%esi) 8217c478bd9Sstevel@tonic-gate jz 1f 8227c478bd9Sstevel@tonic-gate_tsc_patch6: 8237c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 8247a364d25Sschwartz TSC_STORE(%esi, T_INTR_START) 8257c478bd9Sstevel@tonic-gate1: 8267c478bd9Sstevel@tonic-gate movl %edi, CPU_PRI(%ebx) 8277c478bd9Sstevel@tonic-gate /* interrupt vector already on stack */ 8287c478bd9Sstevel@tonic-gate pushl %edi /* old ipl */ 8297c478bd9Sstevel@tonic-gate call *setlvlx 8307c478bd9Sstevel@tonic-gate addl $8, %esp /* eax contains the current ipl */ 8317c478bd9Sstevel@tonic-gate 8327c478bd9Sstevel@tonic-gate movl CPU_INTR_ACTV(%ebx), %esi /* reset stack pointer if no more */ 8337c478bd9Sstevel@tonic-gate shrl $LOCK_LEVEL + 1, %esi /* HI PRI intrs. */ 8347c478bd9Sstevel@tonic-gate jnz .intr_ret2 8357c478bd9Sstevel@tonic-gate popl %esp /* restore the thread stack pointer */ 8367c478bd9Sstevel@tonic-gate.intr_ret2: 8377c478bd9Sstevel@tonic-gate movl CPU_SOFTINFO(%ebx), %edx /* any pending software interrupts */ 8387c478bd9Sstevel@tonic-gate orl %edx, %edx 8397c478bd9Sstevel@tonic-gate jz _sys_rtt 8407c478bd9Sstevel@tonic-gate jmp dosoftint /* check for softints before we return. */ 8417c478bd9Sstevel@tonic-gate SET_SIZE(cmnint) 8427c478bd9Sstevel@tonic-gate SET_SIZE(_interrupt) 8437c478bd9Sstevel@tonic-gate 8447c478bd9Sstevel@tonic-gate#endif /* __i386 */ 8457c478bd9Sstevel@tonic-gate 8467c478bd9Sstevel@tonic-gate/* 8477c478bd9Sstevel@tonic-gate * Declare a uintptr_t which has the size of _interrupt to enable stack 8487c478bd9Sstevel@tonic-gate * traceback code to know when a regs structure is on the stack. 8497c478bd9Sstevel@tonic-gate */ 8507c478bd9Sstevel@tonic-gate .globl _interrupt_size 8517c478bd9Sstevel@tonic-gate .align CLONGSIZE 8527c478bd9Sstevel@tonic-gate_interrupt_size: 8537c478bd9Sstevel@tonic-gate .NWORD . - _interrupt 8547c478bd9Sstevel@tonic-gate .type _interrupt_size, @object 8557c478bd9Sstevel@tonic-gate 8567c478bd9Sstevel@tonic-gate#endif /* __lint */ 8577c478bd9Sstevel@tonic-gate 8587c478bd9Sstevel@tonic-gate#if defined(__i386) 8597c478bd9Sstevel@tonic-gate 8607c478bd9Sstevel@tonic-gate/* 8617c478bd9Sstevel@tonic-gate * Handle an interrupt in a new thread. 8627c478bd9Sstevel@tonic-gate * Entry: traps disabled. 8637c478bd9Sstevel@tonic-gate * %edi - old priority level 8647c478bd9Sstevel@tonic-gate * %ebp - pointer to REGS 8657c478bd9Sstevel@tonic-gate * %ecx - translated vector 8667c478bd9Sstevel@tonic-gate * %eax - ipl of isr. 8677c478bd9Sstevel@tonic-gate * %ebx - pointer to CPU struct 8687c478bd9Sstevel@tonic-gate * Uses: 8697c478bd9Sstevel@tonic-gate */ 8707c478bd9Sstevel@tonic-gate 8717c478bd9Sstevel@tonic-gate#if !defined(__lint) 8727c478bd9Sstevel@tonic-gate 8737c478bd9Sstevel@tonic-gate ENTRY_NP(intr_thread) 8747c478bd9Sstevel@tonic-gate / 8757c478bd9Sstevel@tonic-gate / Set bit for this PIL in CPU's interrupt active bitmask. 8767c478bd9Sstevel@tonic-gate / 8777c478bd9Sstevel@tonic-gate 8787c478bd9Sstevel@tonic-gate ASSERT_NOT_CPU_INTR_ACTV(%eax, %ebx, _interrupt_actv_bit_set) 8797c478bd9Sstevel@tonic-gate 8807c478bd9Sstevel@tonic-gate btsl %eax, CPU_INTR_ACTV(%ebx) 8817c478bd9Sstevel@tonic-gate 8827c478bd9Sstevel@tonic-gate / Get set to run interrupt thread. 8837c478bd9Sstevel@tonic-gate / There should always be an interrupt thread since we allocate one 8847c478bd9Sstevel@tonic-gate / for each level on the CPU. 8857c478bd9Sstevel@tonic-gate / 8867c478bd9Sstevel@tonic-gate / Note that the code in kcpc_overflow_intr -relies- on the ordering 8877c478bd9Sstevel@tonic-gate / of events here - in particular that t->t_lwp of the interrupt 8887c478bd9Sstevel@tonic-gate / thread is set to the pinned thread *before* curthread is changed 8897c478bd9Sstevel@tonic-gate / 8907c478bd9Sstevel@tonic-gate movl CPU_THREAD(%ebx), %edx /* cur thread in edx */ 8917c478bd9Sstevel@tonic-gate 8927c478bd9Sstevel@tonic-gate / 8937c478bd9Sstevel@tonic-gate / Are we interrupting an interrupt thread? If so, account for it. 8947c478bd9Sstevel@tonic-gate / 8957c478bd9Sstevel@tonic-gate testw $T_INTR_THREAD, T_FLAGS(%edx) 8967c478bd9Sstevel@tonic-gate jz 0f 897*fd71cd2fSesolom / 898*fd71cd2fSesolom / We have interrupted an interrupt thread. Account for its time slice 899*fd71cd2fSesolom / only if its time stamp is non-zero. t_intr_start may be zero due to 900*fd71cd2fSesolom / cpu_intr_swtch_enter. 901*fd71cd2fSesolom / 902*fd71cd2fSesolom cmpl $0, T_INTR_START+4(%edx) 903*fd71cd2fSesolom jne 1f 904*fd71cd2fSesolom cmpl $0, T_INTR_START(%edx) 905*fd71cd2fSesolom je 0f 906*fd71cd2fSesolom1: 9077c478bd9Sstevel@tonic-gate pushl %ecx 9087c478bd9Sstevel@tonic-gate pushl %eax 9097c478bd9Sstevel@tonic-gate movl %edx, %esi 9107c478bd9Sstevel@tonic-gate_tsc_patch7: 9117c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 9127c478bd9Sstevel@tonic-gate TSC_SUB_FROM(%esi, T_INTR_START) 9137c478bd9Sstevel@tonic-gate TSC_CLR(%esi, T_INTR_START) 9147c478bd9Sstevel@tonic-gate movzbl T_PIL(%esi), %ecx 9157a364d25Sschwartz PILBASE_INTRSTAT(%ebx, %ecx) 9167c478bd9Sstevel@tonic-gate TSC_ADD_TO(%ecx, CPU_INTRSTAT) 917eda89462Sesolom INTRACCTBASE(%ebx, %ecx) 918eda89462Sesolom TSC_ADD_TO(%ecx, CPU_INTRACCT) /* cpu_intracct[cpu_mstate] += tsc */ 9197c478bd9Sstevel@tonic-gate movl %esi, %edx 9207c478bd9Sstevel@tonic-gate popl %eax 9217c478bd9Sstevel@tonic-gate popl %ecx 9227c478bd9Sstevel@tonic-gate0: 9237c478bd9Sstevel@tonic-gate movl %esp, T_SP(%edx) /* mark stack in curthread for resume */ 9247c478bd9Sstevel@tonic-gate pushl %edi /* get a temporary register */ 9257c478bd9Sstevel@tonic-gate UNLINK_INTR_THREAD(%ebx, %esi, %edi) 9267c478bd9Sstevel@tonic-gate 9277c478bd9Sstevel@tonic-gate movl T_LWP(%edx), %edi 9287c478bd9Sstevel@tonic-gate movl %edx, T_INTR(%esi) /* push old thread */ 9297c478bd9Sstevel@tonic-gate movl %edi, T_LWP(%esi) 9307c478bd9Sstevel@tonic-gate / 9317c478bd9Sstevel@tonic-gate / Threads on the interrupt thread free list could have state already 9327c478bd9Sstevel@tonic-gate / set to TS_ONPROC, but it helps in debugging if they're TS_FREE 9337c478bd9Sstevel@tonic-gate / 9347c478bd9Sstevel@tonic-gate movl $ONPROC_THREAD, T_STATE(%esi) 9357c478bd9Sstevel@tonic-gate / 9367c478bd9Sstevel@tonic-gate / chain the interrupted thread onto list from the interrupt thread. 9377c478bd9Sstevel@tonic-gate / Set the new interrupt thread as the current one. 9387c478bd9Sstevel@tonic-gate / 9397c478bd9Sstevel@tonic-gate popl %edi /* Don't need a temp reg anymore */ 9407c478bd9Sstevel@tonic-gate movl T_STACK(%esi), %esp /* interrupt stack pointer */ 9417c478bd9Sstevel@tonic-gate movl %esp, %ebp 9427c478bd9Sstevel@tonic-gate movl %esi, CPU_THREAD(%ebx) /* set new thread */ 9437c478bd9Sstevel@tonic-gate pushl %eax /* save the ipl */ 9447c478bd9Sstevel@tonic-gate / 9457c478bd9Sstevel@tonic-gate / Initialize thread priority level from intr_pri 9467c478bd9Sstevel@tonic-gate / 9477c478bd9Sstevel@tonic-gate movb %al, T_PIL(%esi) /* store pil */ 9487c478bd9Sstevel@tonic-gate movzwl intr_pri, %ebx /* XXX Can cause probs if new class */ 9497c478bd9Sstevel@tonic-gate /* is loaded on some other cpu. */ 9507c478bd9Sstevel@tonic-gate addl %ebx, %eax /* convert level to dispatch priority */ 9517c478bd9Sstevel@tonic-gate movw %ax, T_PRI(%esi) 9527c478bd9Sstevel@tonic-gate 9537c478bd9Sstevel@tonic-gate / 9547c478bd9Sstevel@tonic-gate / Take timestamp and store it in the thread structure. 9557c478bd9Sstevel@tonic-gate / 9567c478bd9Sstevel@tonic-gate movl %eax, %ebx /* save priority over rdtsc */ 9577c478bd9Sstevel@tonic-gate_tsc_patch8: 9587c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 9597a364d25Sschwartz TSC_STORE(%esi, T_INTR_START) 9607c478bd9Sstevel@tonic-gate movl %ebx, %eax /* restore priority */ 9617c478bd9Sstevel@tonic-gate 9627c478bd9Sstevel@tonic-gate / The following 3 instructions need not be in cli. 9637c478bd9Sstevel@tonic-gate / Putting them here only to avoid the AGI penalty on Pentiums. 9647c478bd9Sstevel@tonic-gate 9657c478bd9Sstevel@tonic-gate pushl %ecx /* save interrupt vector. */ 9667c478bd9Sstevel@tonic-gate pushl %esi /* save interrupt thread */ 9677c478bd9Sstevel@tonic-gate movl $autovect, %esi /* get autovect structure */ 9687c478bd9Sstevel@tonic-gate sti /* enable interrupts */ 9697c478bd9Sstevel@tonic-gate 9707c478bd9Sstevel@tonic-gate / Fast event tracing. 9717c478bd9Sstevel@tonic-gate LOADCPU(%ebx) 9727c478bd9Sstevel@tonic-gate movl CPU_FTRACE_STATE(%ebx), %ebx 9737c478bd9Sstevel@tonic-gate testl $FTRACE_ENABLED, %ebx 9747c478bd9Sstevel@tonic-gate jz 1f 9757c478bd9Sstevel@tonic-gate 9767c478bd9Sstevel@tonic-gate movl 8(%esp), %ebx 9777c478bd9Sstevel@tonic-gate pushl %ebx /* ipl */ 9787c478bd9Sstevel@tonic-gate pushl %ecx /* int vector */ 9797c478bd9Sstevel@tonic-gate movl T_SP(%edx), %ebx 9807c478bd9Sstevel@tonic-gate pushl %ebx /* ®s */ 9817c478bd9Sstevel@tonic-gate pushl $_ftrace_intr_thread_fmt 9827c478bd9Sstevel@tonic-gate call ftrace_3_notick 9837c478bd9Sstevel@tonic-gate addl $8, %esp 9847c478bd9Sstevel@tonic-gate popl %ecx /* restore int vector */ 9857c478bd9Sstevel@tonic-gate addl $4, %esp 9867c478bd9Sstevel@tonic-gate1: 9877c478bd9Sstevel@tonic-gatepre_loop2: 9887c478bd9Sstevel@tonic-gate movl AVH_LINK(%esi, %ecx, 8), %esi 9897c478bd9Sstevel@tonic-gate xorl %ebx, %ebx /* bh is cno. of intpts in chain */ 9907c478bd9Sstevel@tonic-gate /* bl is DDI_INTR_CLAIMED status of * chain */ 9917c478bd9Sstevel@tonic-gate testl %esi, %esi /* if pointer is null */ 9927c478bd9Sstevel@tonic-gate jz loop_done2 /* we're done */ 9937c478bd9Sstevel@tonic-gateloop2: 9947c478bd9Sstevel@tonic-gate movl AV_VECTOR(%esi), %edx /* get the interrupt routine */ 9957c478bd9Sstevel@tonic-gate testl %edx, %edx /* if pointer is null */ 9967c478bd9Sstevel@tonic-gate jz loop_done2 /* we're done */ 9977c478bd9Sstevel@tonic-gate incb %bh 9987c478bd9Sstevel@tonic-gate pushl $0 9997c478bd9Sstevel@tonic-gate pushl AV_INTARG2(%esi) 10007c478bd9Sstevel@tonic-gate pushl AV_INTARG1(%esi) 10017c478bd9Sstevel@tonic-gate pushl AV_VECTOR(%esi) 10027c478bd9Sstevel@tonic-gate pushl AV_DIP(%esi) 10037c478bd9Sstevel@tonic-gate call __dtrace_probe_interrupt__start 10047c478bd9Sstevel@tonic-gate pushl AV_INTARG2(%esi) /* get 2nd arg to interrupt routine */ 10057c478bd9Sstevel@tonic-gate pushl AV_INTARG1(%esi) /* get first arg to interrupt routine */ 10067c478bd9Sstevel@tonic-gate call *%edx /* call interrupt routine with arg */ 10077c478bd9Sstevel@tonic-gate addl $8, %esp 10087c478bd9Sstevel@tonic-gate movl %eax, 16(%esp) 10097c478bd9Sstevel@tonic-gate call __dtrace_probe_interrupt__complete 10107c478bd9Sstevel@tonic-gate addl $20, %esp 10117c478bd9Sstevel@tonic-gate orb %al, %bl /* see if anyone claims intpt. */ 10127a364d25Sschwartz movl AV_TICKSP(%esi), %ecx 10137a364d25Sschwartz testl %ecx, %ecx 10147a364d25Sschwartz jz no_time 10157a364d25Sschwartz call intr_get_time 10167a364d25Sschwartz movl AV_TICKSP(%esi), %ecx 10177a364d25Sschwartz TSC_ADD_TO(%ecx, 0) 10187a364d25Sschwartzno_time: 10197c478bd9Sstevel@tonic-gate movl AV_LINK(%esi), %esi /* get next routine on list */ 10207c478bd9Sstevel@tonic-gate testl %esi, %esi /* if pointer is non-null */ 10217c478bd9Sstevel@tonic-gate jnz loop2 /* continue */ 10227c478bd9Sstevel@tonic-gateloop_done2: 10237c478bd9Sstevel@tonic-gate cmpb $1, %bh /* if only 1 intpt in chain, it is OK */ 10247c478bd9Sstevel@tonic-gate je .loop_done2_1 10257c478bd9Sstevel@tonic-gate orb %bl, %bl /* If no one claims intpt, then it is OK */ 10267c478bd9Sstevel@tonic-gate jz .loop_done2_1 10277c478bd9Sstevel@tonic-gate movl $autovect, %esi /* else get autovect structure */ 10287c478bd9Sstevel@tonic-gate movl 4(%esp), %ecx /* restore intr vector */ 10297c478bd9Sstevel@tonic-gate jmp pre_loop2 /* and try again. */ 10307c478bd9Sstevel@tonic-gate.loop_done2_1: 10317c478bd9Sstevel@tonic-gate popl %esi /* restore intr thread pointer */ 10327c478bd9Sstevel@tonic-gate 10337c478bd9Sstevel@tonic-gate LOADCPU(%ebx) 10347c478bd9Sstevel@tonic-gate 10357c478bd9Sstevel@tonic-gate cli /* protect interrupt thread pool and intr_actv */ 10367c478bd9Sstevel@tonic-gate movzbl T_PIL(%esi), %eax 10377c478bd9Sstevel@tonic-gate 10387c478bd9Sstevel@tonic-gate / Save value in regs 10397c478bd9Sstevel@tonic-gate pushl %eax /* current pil */ 10407c478bd9Sstevel@tonic-gate pushl %edx /* (huh?) */ 10417c478bd9Sstevel@tonic-gate pushl %edi /* old pil */ 10427c478bd9Sstevel@tonic-gate 10437c478bd9Sstevel@tonic-gate / cpu_stats.sys.intr[PIL]++ 10447c478bd9Sstevel@tonic-gate INC_CPU_STATS_INTR(%eax, %edx, %edx, %ebx) 10457c478bd9Sstevel@tonic-gate 10467c478bd9Sstevel@tonic-gate / 10477c478bd9Sstevel@tonic-gate / Take timestamp, compute interval, and update cumulative counter. 10487c478bd9Sstevel@tonic-gate / esi = thread pointer, ebx = cpu pointer, eax = PIL 10497c478bd9Sstevel@tonic-gate / 10507c478bd9Sstevel@tonic-gate movl %eax, %edi 10517c478bd9Sstevel@tonic-gate 10527c478bd9Sstevel@tonic-gate ASSERT_T_INTR_START_NZ(%esi) 10537c478bd9Sstevel@tonic-gate 10547c478bd9Sstevel@tonic-gate_tsc_patch9: 10557c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 10567c478bd9Sstevel@tonic-gate TSC_SUB_FROM(%esi, T_INTR_START) 10577a364d25Sschwartz PILBASE_INTRSTAT(%ebx, %edi) 10587c478bd9Sstevel@tonic-gate TSC_ADD_TO(%edi, CPU_INTRSTAT) 1059eda89462Sesolom INTRACCTBASE(%ebx, %edi) 1060eda89462Sesolom TSC_ADD_TO(%edi, CPU_INTRACCT) /* cpu_intracct[cpu_mstate] += tsc */ 10617c478bd9Sstevel@tonic-gate popl %edi 10627c478bd9Sstevel@tonic-gate popl %edx 10637c478bd9Sstevel@tonic-gate popl %eax 10647c478bd9Sstevel@tonic-gate 10657c478bd9Sstevel@tonic-gate / 10667c478bd9Sstevel@tonic-gate / Clear bit for this PIL in CPU's interrupt active bitmask. 10677c478bd9Sstevel@tonic-gate / 10687c478bd9Sstevel@tonic-gate 10697c478bd9Sstevel@tonic-gate ASSERT_CPU_INTR_ACTV(%eax, %ebx, _intr_thread_actv_bit_not_set) 10707c478bd9Sstevel@tonic-gate 10717c478bd9Sstevel@tonic-gate btrl %eax, CPU_INTR_ACTV(%ebx) 10727c478bd9Sstevel@tonic-gate 10737c478bd9Sstevel@tonic-gate / if there is still an interrupted thread underneath this one 10747c478bd9Sstevel@tonic-gate / then the interrupt was never blocked and the return is fairly 10757c478bd9Sstevel@tonic-gate / simple. Otherwise jump to intr_thread_exit 10767c478bd9Sstevel@tonic-gate cmpl $0, T_INTR(%esi) 10777c478bd9Sstevel@tonic-gate je intr_thread_exit 10787c478bd9Sstevel@tonic-gate 10797c478bd9Sstevel@tonic-gate / 10807c478bd9Sstevel@tonic-gate / link the thread back onto the interrupt thread pool 10817c478bd9Sstevel@tonic-gate LINK_INTR_THREAD(%ebx, %esi, %edx) 10827c478bd9Sstevel@tonic-gate 10837c478bd9Sstevel@tonic-gate movl CPU_BASE_SPL(%ebx), %eax /* used below. */ 10847c478bd9Sstevel@tonic-gate / set the thread state to free so kmdb doesn't see it 10857c478bd9Sstevel@tonic-gate movl $FREE_THREAD, T_STATE(%esi) 10867c478bd9Sstevel@tonic-gate 10877c478bd9Sstevel@tonic-gate cmpl %eax, %edi /* if (oldipl >= basespl) */ 10887c478bd9Sstevel@tonic-gate jae intr_restore_ipl /* then use oldipl */ 10897c478bd9Sstevel@tonic-gate movl %eax, %edi /* else use basespl */ 10907c478bd9Sstevel@tonic-gateintr_restore_ipl: 10917c478bd9Sstevel@tonic-gate movl %edi, CPU_PRI(%ebx) 10927c478bd9Sstevel@tonic-gate /* intr vector already on stack */ 10937c478bd9Sstevel@tonic-gate pushl %edi /* old ipl */ 10947c478bd9Sstevel@tonic-gate call *setlvlx /* eax contains the current ipl */ 10957c478bd9Sstevel@tonic-gate / 10967c478bd9Sstevel@tonic-gate / Switch back to the interrupted thread 10977c478bd9Sstevel@tonic-gate movl T_INTR(%esi), %ecx 10987c478bd9Sstevel@tonic-gate 10997c478bd9Sstevel@tonic-gate / Place starting timestamp in interrupted thread's thread structure. 11007c478bd9Sstevel@tonic-gate_tsc_patch10: 11017c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 11027a364d25Sschwartz TSC_STORE(%ecx, T_INTR_START) 11037c478bd9Sstevel@tonic-gate 11047c478bd9Sstevel@tonic-gate movl T_SP(%ecx), %esp /* restore stack pointer */ 11057c478bd9Sstevel@tonic-gate movl %esp, %ebp 11067c478bd9Sstevel@tonic-gate movl %ecx, CPU_THREAD(%ebx) 11077c478bd9Sstevel@tonic-gate 11087c478bd9Sstevel@tonic-gate movl CPU_SOFTINFO(%ebx), %edx /* any pending software interrupts */ 11097c478bd9Sstevel@tonic-gate orl %edx, %edx 11107c478bd9Sstevel@tonic-gate jz _sys_rtt 11117c478bd9Sstevel@tonic-gate jmp dosoftint /* check for softints before we return. */ 11127c478bd9Sstevel@tonic-gate 11137c478bd9Sstevel@tonic-gate / 11147c478bd9Sstevel@tonic-gate / An interrupt returned on what was once (and still might be) 11157c478bd9Sstevel@tonic-gate / an interrupt thread stack, but the interrupted process is no longer 11167c478bd9Sstevel@tonic-gate / there. This means the interrupt must have blocked. 11177c478bd9Sstevel@tonic-gate / 11187c478bd9Sstevel@tonic-gate / There is no longer a thread under this one, so put this thread back 11197c478bd9Sstevel@tonic-gate / on the CPU's free list and resume the idle thread which will dispatch 11207c478bd9Sstevel@tonic-gate / the next thread to run. 11217c478bd9Sstevel@tonic-gate / 11227c478bd9Sstevel@tonic-gate / All interrupts are disabled here 11237c478bd9Sstevel@tonic-gate / 11247c478bd9Sstevel@tonic-gate 11257c478bd9Sstevel@tonic-gateintr_thread_exit: 11267c478bd9Sstevel@tonic-gate#ifdef DEBUG 11277c478bd9Sstevel@tonic-gate incl intr_thread_cnt 11287c478bd9Sstevel@tonic-gate#endif 11297c478bd9Sstevel@tonic-gate INC64(%ebx, CPU_STATS_SYS_INTRBLK) /* cpu_stats.sys.intrblk++ */ 11307c478bd9Sstevel@tonic-gate / 11317c478bd9Sstevel@tonic-gate / Put thread back on the interrupt thread list. 11327c478bd9Sstevel@tonic-gate / As a reminder, the regs at this point are 11337c478bd9Sstevel@tonic-gate / esi interrupt thread 11347c478bd9Sstevel@tonic-gate / edi old ipl 11357c478bd9Sstevel@tonic-gate / ebx ptr to CPU struct 11367c478bd9Sstevel@tonic-gate 11377c478bd9Sstevel@tonic-gate / Set CPU's base SPL level based on active interrupts bitmask 11387c478bd9Sstevel@tonic-gate call set_base_spl 11397c478bd9Sstevel@tonic-gate 11407c478bd9Sstevel@tonic-gate movl CPU_BASE_SPL(%ebx), %edi 11417c478bd9Sstevel@tonic-gate movl %edi, CPU_PRI(%ebx) 11427c478bd9Sstevel@tonic-gate /* interrupt vector already on stack */ 11437c478bd9Sstevel@tonic-gate pushl %edi 11447c478bd9Sstevel@tonic-gate call *setlvlx 11457c478bd9Sstevel@tonic-gate addl $8, %esp /* XXX - don't need to pop since */ 11467c478bd9Sstevel@tonic-gate /* we are ready to switch */ 11477c478bd9Sstevel@tonic-gate call splhigh /* block all intrs below lock level */ 11487c478bd9Sstevel@tonic-gate / 11497c478bd9Sstevel@tonic-gate / Set the thread state to free so kmdb doesn't see it 11507c478bd9Sstevel@tonic-gate / 11517c478bd9Sstevel@tonic-gate movl $FREE_THREAD, T_STATE(%esi) 11527c478bd9Sstevel@tonic-gate / 11537c478bd9Sstevel@tonic-gate / Put thread on either the interrupt pool or the free pool and 11547c478bd9Sstevel@tonic-gate / call swtch() to resume another thread. 11557c478bd9Sstevel@tonic-gate / 11567c478bd9Sstevel@tonic-gate LINK_INTR_THREAD(%ebx, %esi, %edx) 11577c478bd9Sstevel@tonic-gate call swtch 11587c478bd9Sstevel@tonic-gate / swtch() shouldn't return 11597c478bd9Sstevel@tonic-gate 11607c478bd9Sstevel@tonic-gate SET_SIZE(intr_thread) 11617c478bd9Sstevel@tonic-gate 11627c478bd9Sstevel@tonic-gate#endif /* __lint */ 11637c478bd9Sstevel@tonic-gate#endif /* __i386 */ 11647c478bd9Sstevel@tonic-gate 11657c478bd9Sstevel@tonic-gate/* 11667c478bd9Sstevel@tonic-gate * Set Cpu's base SPL level, base on which interrupt levels are active 11677c478bd9Sstevel@tonic-gate * Called at spl7 or above. 11687c478bd9Sstevel@tonic-gate */ 11697c478bd9Sstevel@tonic-gate 11707c478bd9Sstevel@tonic-gate#if defined(__lint) 11717c478bd9Sstevel@tonic-gate 11727c478bd9Sstevel@tonic-gatevoid 11737c478bd9Sstevel@tonic-gateset_base_spl(void) 11747c478bd9Sstevel@tonic-gate{} 11757c478bd9Sstevel@tonic-gate 11767c478bd9Sstevel@tonic-gate#else /* __lint */ 11777c478bd9Sstevel@tonic-gate 11787c478bd9Sstevel@tonic-gate ENTRY_NP(set_base_spl) 11797c478bd9Sstevel@tonic-gate movl %gs:CPU_INTR_ACTV, %eax /* load active interrupts mask */ 11807c478bd9Sstevel@tonic-gate testl %eax, %eax /* is it zero? */ 11817c478bd9Sstevel@tonic-gate jz setbase 11827c478bd9Sstevel@tonic-gate testl $0xff00, %eax 11837c478bd9Sstevel@tonic-gate jnz ah_set 11847c478bd9Sstevel@tonic-gate shl $24, %eax /* shift 'em over so we can find */ 11857c478bd9Sstevel@tonic-gate /* the 1st bit faster */ 11867c478bd9Sstevel@tonic-gate bsrl %eax, %eax 11877c478bd9Sstevel@tonic-gate subl $24, %eax 11887c478bd9Sstevel@tonic-gatesetbase: 11897c478bd9Sstevel@tonic-gate movl %eax, %gs:CPU_BASE_SPL /* store base priority */ 11907c478bd9Sstevel@tonic-gate ret 11917c478bd9Sstevel@tonic-gateah_set: 11927c478bd9Sstevel@tonic-gate shl $16, %eax 11937c478bd9Sstevel@tonic-gate bsrl %eax, %eax 11947c478bd9Sstevel@tonic-gate subl $16, %eax 11957c478bd9Sstevel@tonic-gate jmp setbase 11967c478bd9Sstevel@tonic-gate SET_SIZE(set_base_spl) 11977c478bd9Sstevel@tonic-gate 11987c478bd9Sstevel@tonic-gate#endif /* __lint */ 11997c478bd9Sstevel@tonic-gate 12007c478bd9Sstevel@tonic-gate#if defined(__i386) 12017c478bd9Sstevel@tonic-gate 12027c478bd9Sstevel@tonic-gate/* 12037c478bd9Sstevel@tonic-gate * int 12047c478bd9Sstevel@tonic-gate * intr_passivate(from, to) 12057c478bd9Sstevel@tonic-gate * thread_id_t from; interrupt thread 12067c478bd9Sstevel@tonic-gate * thread_id_t to; interrupted thread 12077c478bd9Sstevel@tonic-gate * 12087c478bd9Sstevel@tonic-gate * intr_passivate(t, itp) makes the interrupted thread "t" runnable. 12097c478bd9Sstevel@tonic-gate * 12107c478bd9Sstevel@tonic-gate * Since t->t_sp has already been saved, t->t_pc is all that needs 12117c478bd9Sstevel@tonic-gate * set in this function. 12127c478bd9Sstevel@tonic-gate * 12137c478bd9Sstevel@tonic-gate * Returns interrupt level of the thread. 12147c478bd9Sstevel@tonic-gate */ 12157c478bd9Sstevel@tonic-gate 12167c478bd9Sstevel@tonic-gate#if defined(__lint) 12177c478bd9Sstevel@tonic-gate 12187c478bd9Sstevel@tonic-gate/* ARGSUSED */ 12197c478bd9Sstevel@tonic-gateint 12207c478bd9Sstevel@tonic-gateintr_passivate(kthread_id_t from, kthread_id_t to) 12217c478bd9Sstevel@tonic-gate{ return (0); } 12227c478bd9Sstevel@tonic-gate 12237c478bd9Sstevel@tonic-gate#else /* __lint */ 12247c478bd9Sstevel@tonic-gate 12257c478bd9Sstevel@tonic-gate ENTRY(intr_passivate) 12267c478bd9Sstevel@tonic-gate movl 8(%esp), %eax /* interrupted thread */ 12277c478bd9Sstevel@tonic-gate movl $_sys_rtt, T_PC(%eax) /* set T_PC for interrupted thread */ 12287c478bd9Sstevel@tonic-gate 12297c478bd9Sstevel@tonic-gate movl 4(%esp), %eax /* interrupt thread */ 12307c478bd9Sstevel@tonic-gate movl T_STACK(%eax), %eax /* get the pointer to the start of */ 12317c478bd9Sstevel@tonic-gate /* of the interrupt thread stack */ 12327c478bd9Sstevel@tonic-gate movl -4(%eax), %eax /* interrupt level was the first */ 12337c478bd9Sstevel@tonic-gate /* thing pushed onto the stack */ 12347c478bd9Sstevel@tonic-gate ret 12357c478bd9Sstevel@tonic-gate SET_SIZE(intr_passivate) 12367c478bd9Sstevel@tonic-gate 12377c478bd9Sstevel@tonic-gate#endif /* __lint */ 12387c478bd9Sstevel@tonic-gate#endif /* __i386 */ 12397c478bd9Sstevel@tonic-gate 12407c478bd9Sstevel@tonic-gate#if defined(__lint) 12417c478bd9Sstevel@tonic-gate 12427c478bd9Sstevel@tonic-gatevoid 12437c478bd9Sstevel@tonic-gatefakesoftint(void) 12447c478bd9Sstevel@tonic-gate{} 12457c478bd9Sstevel@tonic-gate 12467c478bd9Sstevel@tonic-gate#else /* __lint */ 12477c478bd9Sstevel@tonic-gate 12487c478bd9Sstevel@tonic-gate / 12497c478bd9Sstevel@tonic-gate / If we're here, we're being called from splx() to fake a soft 12507c478bd9Sstevel@tonic-gate / interrupt (note that interrupts are still disabled from splx()). 12517c478bd9Sstevel@tonic-gate / We execute this code when a soft interrupt is posted at 12527c478bd9Sstevel@tonic-gate / level higher than the CPU's current spl; when spl is lowered in 12537c478bd9Sstevel@tonic-gate / splx(), it will see the softint and jump here. We'll do exactly 12547c478bd9Sstevel@tonic-gate / what a trap would do: push our flags, %cs, %eip, error code 12557c478bd9Sstevel@tonic-gate / and trap number (T_SOFTINT). The cmnint() code will see T_SOFTINT 12567c478bd9Sstevel@tonic-gate / and branch to the dosoftint() code. 12577c478bd9Sstevel@tonic-gate / 12587c478bd9Sstevel@tonic-gate#if defined(__amd64) 12597c478bd9Sstevel@tonic-gate 12607c478bd9Sstevel@tonic-gate /* 12617c478bd9Sstevel@tonic-gate * In 64-bit mode, iretq -always- pops all five regs 12627c478bd9Sstevel@tonic-gate * Imitate the 16-byte auto-align of the stack, and the 12637c478bd9Sstevel@tonic-gate * zero-ed out %ss value. 12647c478bd9Sstevel@tonic-gate */ 12657c478bd9Sstevel@tonic-gate ENTRY_NP(fakesoftint) 12667c478bd9Sstevel@tonic-gate movq %rsp, %r11 12677c478bd9Sstevel@tonic-gate andq $-16, %rsp 12687c478bd9Sstevel@tonic-gate pushq $KDS_SEL /* %ss */ 12697c478bd9Sstevel@tonic-gate pushq %r11 /* %rsp */ 12707c478bd9Sstevel@tonic-gate pushf /* rflags */ 12717c478bd9Sstevel@tonic-gate pushq $KCS_SEL /* %cs */ 12727c478bd9Sstevel@tonic-gate leaq fakesoftint_return(%rip), %r11 12737c478bd9Sstevel@tonic-gate pushq %r11 /* %rip */ 12747c478bd9Sstevel@tonic-gate pushq $0 /* err */ 12757c478bd9Sstevel@tonic-gate pushq $T_SOFTINT /* trap */ 12767c478bd9Sstevel@tonic-gate jmp cmnint 12777c478bd9Sstevel@tonic-gate SET_SIZE(fakesoftint) 12787c478bd9Sstevel@tonic-gate 12797c478bd9Sstevel@tonic-gate#elif defined(__i386) 12807c478bd9Sstevel@tonic-gate 12817c478bd9Sstevel@tonic-gate ENTRY_NP(fakesoftint) 12827c478bd9Sstevel@tonic-gate pushf 12837c478bd9Sstevel@tonic-gate push %cs 12847c478bd9Sstevel@tonic-gate push $fakesoftint_return 12857c478bd9Sstevel@tonic-gate push $0 12867c478bd9Sstevel@tonic-gate push $T_SOFTINT 12877c478bd9Sstevel@tonic-gate jmp cmnint 12887c478bd9Sstevel@tonic-gate SET_SIZE(fakesoftint) 12897c478bd9Sstevel@tonic-gate 12907c478bd9Sstevel@tonic-gate#endif /* __i386 */ 12917c478bd9Sstevel@tonic-gate 12927c478bd9Sstevel@tonic-gate .align CPTRSIZE 12937c478bd9Sstevel@tonic-gate .globl _fakesoftint_size 12947c478bd9Sstevel@tonic-gate .type _fakesoftint_size, @object 12957c478bd9Sstevel@tonic-gate_fakesoftint_size: 12967c478bd9Sstevel@tonic-gate .NWORD . - fakesoftint 12977c478bd9Sstevel@tonic-gate SET_SIZE(_fakesoftint_size) 12987c478bd9Sstevel@tonic-gate 12997c478bd9Sstevel@tonic-gate/* 13007c478bd9Sstevel@tonic-gate * dosoftint(old_pil in %edi, softinfo in %edx, CPU pointer in %ebx) 13017c478bd9Sstevel@tonic-gate * Process software interrupts 13027c478bd9Sstevel@tonic-gate * Interrupts are disabled here. 13037c478bd9Sstevel@tonic-gate */ 13047c478bd9Sstevel@tonic-gate#if defined(__i386) 13057c478bd9Sstevel@tonic-gate 13067c478bd9Sstevel@tonic-gate ENTRY_NP(dosoftint) 13077c478bd9Sstevel@tonic-gate 13087c478bd9Sstevel@tonic-gate bsrl %edx, %edx /* find highest pending interrupt */ 13097c478bd9Sstevel@tonic-gate cmpl %edx, %edi /* if curipl >= pri soft pending intr */ 13107c478bd9Sstevel@tonic-gate jae _sys_rtt /* skip */ 13117c478bd9Sstevel@tonic-gate 13127c478bd9Sstevel@tonic-gate movl %gs:CPU_BASE_SPL, %eax /* check for blocked intr threads */ 13137c478bd9Sstevel@tonic-gate cmpl %edx, %eax /* if basespl >= pri soft pending */ 13147c478bd9Sstevel@tonic-gate jae _sys_rtt /* skip */ 13157c478bd9Sstevel@tonic-gate 13167c478bd9Sstevel@tonic-gate lock /* MP protect */ 13177c478bd9Sstevel@tonic-gate btrl %edx, CPU_SOFTINFO(%ebx) /* clear the selected interrupt bit */ 13187c478bd9Sstevel@tonic-gate jnc dosoftint_again 13197c478bd9Sstevel@tonic-gate 13207c478bd9Sstevel@tonic-gate movl %edx, CPU_PRI(%ebx) /* set IPL to sofint level */ 13217c478bd9Sstevel@tonic-gate pushl %edx 13227c478bd9Sstevel@tonic-gate call *setspl /* mask levels upto the softint level */ 13237c478bd9Sstevel@tonic-gate popl %eax /* priority we are at in %eax */ 13247c478bd9Sstevel@tonic-gate 13257c478bd9Sstevel@tonic-gate / Get set to run interrupt thread. 13267c478bd9Sstevel@tonic-gate / There should always be an interrupt thread since we allocate one 13277c478bd9Sstevel@tonic-gate / for each level on the CPU. 13287c478bd9Sstevel@tonic-gate UNLINK_INTR_THREAD(%ebx, %esi, %edx) 13297c478bd9Sstevel@tonic-gate 13307c478bd9Sstevel@tonic-gate / 13317c478bd9Sstevel@tonic-gate / Note that the code in kcpc_overflow_intr -relies- on the ordering 13327c478bd9Sstevel@tonic-gate / of events here - in particular that t->t_lwp of the interrupt 13337c478bd9Sstevel@tonic-gate / thread is set to the pinned thread *before* curthread is changed 13347c478bd9Sstevel@tonic-gate / 13357c478bd9Sstevel@tonic-gate movl CPU_THREAD(%ebx), %ecx 13367c478bd9Sstevel@tonic-gate 13377c478bd9Sstevel@tonic-gate / If we are interrupting an interrupt thread, account for it. 13387c478bd9Sstevel@tonic-gate testw $T_INTR_THREAD, T_FLAGS(%ecx) 13397c478bd9Sstevel@tonic-gate jz 0f 1340*fd71cd2fSesolom / 1341*fd71cd2fSesolom / We have interrupted an interrupt thread. Account for its time slice 1342*fd71cd2fSesolom / only if its time stamp is non-zero. t_intr_start may be zero due to 1343*fd71cd2fSesolom / cpu_intr_swtch_enter. 1344*fd71cd2fSesolom / 1345*fd71cd2fSesolom cmpl $0, T_INTR_START+4(%ecx) 1346*fd71cd2fSesolom jne 1f 1347*fd71cd2fSesolom cmpl $0, T_INTR_START(%ecx) 1348*fd71cd2fSesolom je 0f 1349*fd71cd2fSesolom1: 13507c478bd9Sstevel@tonic-gate pushl %eax 13517c478bd9Sstevel@tonic-gate movl %eax, %ebp 13527c478bd9Sstevel@tonic-gate_tsc_patch11: 13537c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 13547a364d25Sschwartz PILBASE_INTRSTAT(%ebx, %ebp) 13557c478bd9Sstevel@tonic-gate TSC_SUB_FROM(%ecx, T_INTR_START) 13567c478bd9Sstevel@tonic-gate TSC_ADD_TO(%ebp, CPU_INTRSTAT) 1357eda89462Sesolom INTRACCTBASE(%ebx, %ebp) 1358eda89462Sesolom TSC_ADD_TO(%ebp, CPU_INTRACCT) /* cpu_intracct[cpu_mstate] += tsc */ 13597c478bd9Sstevel@tonic-gate popl %eax 13607c478bd9Sstevel@tonic-gate0: 13617c478bd9Sstevel@tonic-gate movl T_LWP(%ecx), %ebp 13627c478bd9Sstevel@tonic-gate movl %ebp, T_LWP(%esi) 13637c478bd9Sstevel@tonic-gate / 13647c478bd9Sstevel@tonic-gate / Threads on the interrupt thread free list could have state already 13657c478bd9Sstevel@tonic-gate / set to TS_ONPROC, but it helps in debugging if they're TS_FREE 13667c478bd9Sstevel@tonic-gate / Could eliminate the next two instructions with a little work. 13677c478bd9Sstevel@tonic-gate / 13687c478bd9Sstevel@tonic-gate movl $ONPROC_THREAD, T_STATE(%esi) 13697c478bd9Sstevel@tonic-gate / 13707c478bd9Sstevel@tonic-gate / Push interrupted thread onto list from new thread. 13717c478bd9Sstevel@tonic-gate / Set the new thread as the current one. 13727c478bd9Sstevel@tonic-gate / Set interrupted thread's T_SP because if it is the idle thread, 13737c478bd9Sstevel@tonic-gate / Resume() may use that stack between threads. 13747c478bd9Sstevel@tonic-gate / 13757c478bd9Sstevel@tonic-gate movl %esp, T_SP(%ecx) /* mark stack for resume */ 13767c478bd9Sstevel@tonic-gate movl %ecx, T_INTR(%esi) /* push old thread */ 13777c478bd9Sstevel@tonic-gate movl %esi, CPU_THREAD(%ebx) /* set new thread */ 13787c478bd9Sstevel@tonic-gate movl T_STACK(%esi), %esp /* interrupt stack pointer */ 13797c478bd9Sstevel@tonic-gate movl %esp, %ebp 13807c478bd9Sstevel@tonic-gate 13817c478bd9Sstevel@tonic-gate pushl %eax /* push ipl as first element in stack */ 13827c478bd9Sstevel@tonic-gate /* see intr_passivate() */ 13837c478bd9Sstevel@tonic-gate / 13847c478bd9Sstevel@tonic-gate / Set bit for this PIL in CPU's interrupt active bitmask. 13857c478bd9Sstevel@tonic-gate / 13867c478bd9Sstevel@tonic-gate 13877c478bd9Sstevel@tonic-gate ASSERT_NOT_CPU_INTR_ACTV(%eax, %ebx, _dosoftint_actv_bit_set) 13887c478bd9Sstevel@tonic-gate 13897c478bd9Sstevel@tonic-gate btsl %eax, CPU_INTR_ACTV(%ebx) 13907c478bd9Sstevel@tonic-gate 13917c478bd9Sstevel@tonic-gate / 13927c478bd9Sstevel@tonic-gate / Initialize thread priority level from intr_pri 13937c478bd9Sstevel@tonic-gate / 13947c478bd9Sstevel@tonic-gate movb %al, T_PIL(%esi) /* store pil */ 13957c478bd9Sstevel@tonic-gate movzwl intr_pri, %ecx 13967c478bd9Sstevel@tonic-gate addl %eax, %ecx /* convert level to dispatch priority */ 13977c478bd9Sstevel@tonic-gate movw %cx, T_PRI(%esi) 13987c478bd9Sstevel@tonic-gate 13997c478bd9Sstevel@tonic-gate / 14007c478bd9Sstevel@tonic-gate / Store starting timestamp in thread structure. 14017c478bd9Sstevel@tonic-gate / esi = thread, ebx = cpu pointer, eax = PIL 14027c478bd9Sstevel@tonic-gate / 14037c478bd9Sstevel@tonic-gate movl %eax, %ecx /* save PIL from rdtsc clobber */ 14047c478bd9Sstevel@tonic-gate_tsc_patch12: 14057c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 14067a364d25Sschwartz TSC_STORE(%esi, T_INTR_START) 14077c478bd9Sstevel@tonic-gate 14087c478bd9Sstevel@tonic-gate sti /* enable interrupts */ 14097c478bd9Sstevel@tonic-gate 14107c478bd9Sstevel@tonic-gate / 14117c478bd9Sstevel@tonic-gate / Enabling interrupts (above) could raise the current 14127c478bd9Sstevel@tonic-gate / IPL and base SPL. But, we continue processing the current soft 14137c478bd9Sstevel@tonic-gate / interrupt and we will check the base SPL next time in the loop 14147c478bd9Sstevel@tonic-gate / so that blocked interrupt thread would get a chance to run. 14157c478bd9Sstevel@tonic-gate / 14167c478bd9Sstevel@tonic-gate 14177c478bd9Sstevel@tonic-gate / 14187c478bd9Sstevel@tonic-gate / dispatch soft interrupts 14197c478bd9Sstevel@tonic-gate / 14207c478bd9Sstevel@tonic-gate pushl %ecx 14217c478bd9Sstevel@tonic-gate call av_dispatch_softvect 14227c478bd9Sstevel@tonic-gate addl $4, %esp 14237c478bd9Sstevel@tonic-gate 14247c478bd9Sstevel@tonic-gate cli /* protect interrupt thread pool */ 14257c478bd9Sstevel@tonic-gate /* and softinfo & sysinfo */ 14267c478bd9Sstevel@tonic-gate movl CPU_THREAD(%ebx), %esi /* restore thread pointer */ 14277c478bd9Sstevel@tonic-gate movzbl T_PIL(%esi), %ecx 14287c478bd9Sstevel@tonic-gate 14297c478bd9Sstevel@tonic-gate / cpu_stats.sys.intr[PIL]++ 14307c478bd9Sstevel@tonic-gate INC_CPU_STATS_INTR(%ecx, %edx, %edx, %ebx) 14317c478bd9Sstevel@tonic-gate 14327c478bd9Sstevel@tonic-gate / 14337c478bd9Sstevel@tonic-gate / Clear bit for this PIL in CPU's interrupt active bitmask. 14347c478bd9Sstevel@tonic-gate / 14357c478bd9Sstevel@tonic-gate 14367c478bd9Sstevel@tonic-gate ASSERT_CPU_INTR_ACTV(%ecx, %ebx, _dosoftint_actv_bit_not_set) 14377c478bd9Sstevel@tonic-gate 14387c478bd9Sstevel@tonic-gate btrl %ecx, CPU_INTR_ACTV(%ebx) 14397c478bd9Sstevel@tonic-gate 14407c478bd9Sstevel@tonic-gate / 14417c478bd9Sstevel@tonic-gate / Take timestamp, compute interval, update cumulative counter. 14427c478bd9Sstevel@tonic-gate / esi = thread, ebx = cpu, ecx = PIL 14437c478bd9Sstevel@tonic-gate / 14447a364d25Sschwartz PILBASE_INTRSTAT(%ebx, %ecx) 14457c478bd9Sstevel@tonic-gate_tsc_patch13: 14467c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 14477c478bd9Sstevel@tonic-gate TSC_SUB_FROM(%esi, T_INTR_START) 14487c478bd9Sstevel@tonic-gate TSC_ADD_TO(%ecx, CPU_INTRSTAT) 1449eda89462Sesolom INTRACCTBASE(%ebx, %ecx) 1450eda89462Sesolom TSC_ADD_TO(%ecx, CPU_INTRACCT) /* cpu_intracct[cpu_mstate] += tsc */ 14517c478bd9Sstevel@tonic-gate 14527c478bd9Sstevel@tonic-gate / if there is still an interrupt thread underneath this one 14537c478bd9Sstevel@tonic-gate / then the interrupt was never blocked and the return is fairly 14547c478bd9Sstevel@tonic-gate / simple. Otherwise jump to softintr_thread_exit. 14557c478bd9Sstevel@tonic-gate / softintr_thread_exit expect esi to be curthread & ebx to be ipl. 14567c478bd9Sstevel@tonic-gate cmpl $0, T_INTR(%esi) 14577c478bd9Sstevel@tonic-gate je softintr_thread_exit 14587c478bd9Sstevel@tonic-gate 14597c478bd9Sstevel@tonic-gate / 14607c478bd9Sstevel@tonic-gate / link the thread back onto the interrupt thread pool 14617c478bd9Sstevel@tonic-gate LINK_INTR_THREAD(%ebx, %esi, %edx) 14627c478bd9Sstevel@tonic-gate 14637c478bd9Sstevel@tonic-gate / set the thread state to free so kmdb doesn't see it 14647c478bd9Sstevel@tonic-gate movl $FREE_THREAD, T_STATE(%esi) 14657c478bd9Sstevel@tonic-gate / 14667c478bd9Sstevel@tonic-gate / Switch back to the interrupted thread 14677c478bd9Sstevel@tonic-gate movl T_INTR(%esi), %ecx 14687c478bd9Sstevel@tonic-gate movl %ecx, CPU_THREAD(%ebx) 14697c478bd9Sstevel@tonic-gate movl T_SP(%ecx), %esp /* restore stack pointer */ 14707c478bd9Sstevel@tonic-gate movl %esp, %ebp 14717c478bd9Sstevel@tonic-gate 14727c478bd9Sstevel@tonic-gate / If we are returning to an interrupt thread, store a starting 14737c478bd9Sstevel@tonic-gate / timestamp in the thread structure. 14747c478bd9Sstevel@tonic-gate testw $T_INTR_THREAD, T_FLAGS(%ecx) 14757c478bd9Sstevel@tonic-gate jz 0f 14767c478bd9Sstevel@tonic-gate_tsc_patch14: 14777c478bd9Sstevel@tonic-gate nop; nop /* patched to rdtsc if available */ 14787a364d25Sschwartz TSC_STORE(%ecx, T_INTR_START) 14797c478bd9Sstevel@tonic-gate0: 14807c478bd9Sstevel@tonic-gate movl CPU_BASE_SPL(%ebx), %eax 14817c478bd9Sstevel@tonic-gate cmpl %eax, %edi /* if (oldipl >= basespl) */ 14827c478bd9Sstevel@tonic-gate jae softintr_restore_ipl /* then use oldipl */ 14837c478bd9Sstevel@tonic-gate movl %eax, %edi /* else use basespl */ 14847c478bd9Sstevel@tonic-gatesoftintr_restore_ipl: 14857c478bd9Sstevel@tonic-gate movl %edi, CPU_PRI(%ebx) /* set IPL to old level */ 14867c478bd9Sstevel@tonic-gate pushl %edi 14877c478bd9Sstevel@tonic-gate call *setspl 14887c478bd9Sstevel@tonic-gate popl %eax 14897c478bd9Sstevel@tonic-gatedosoftint_again: 14907c478bd9Sstevel@tonic-gate movl CPU_SOFTINFO(%ebx), %edx /* any pending software interrupts */ 14917c478bd9Sstevel@tonic-gate orl %edx, %edx 14927c478bd9Sstevel@tonic-gate jz _sys_rtt 14937c478bd9Sstevel@tonic-gate jmp dosoftint /* process more software interrupts */ 14947c478bd9Sstevel@tonic-gate 14957c478bd9Sstevel@tonic-gatesoftintr_thread_exit: 14967c478bd9Sstevel@tonic-gate / 14977c478bd9Sstevel@tonic-gate / Put thread back on the interrupt thread list. 14987c478bd9Sstevel@tonic-gate / As a reminder, the regs at this point are 14997c478bd9Sstevel@tonic-gate / %esi interrupt thread 15007c478bd9Sstevel@tonic-gate 15017c478bd9Sstevel@tonic-gate / 15027c478bd9Sstevel@tonic-gate / This was an interrupt thread, so set CPU's base SPL level 15037c478bd9Sstevel@tonic-gate / set_base_spl only uses %eax. 15047c478bd9Sstevel@tonic-gate / 15057c478bd9Sstevel@tonic-gate call set_base_spl /* interrupt vector already on stack */ 15067c478bd9Sstevel@tonic-gate / 15077c478bd9Sstevel@tonic-gate / Set the thread state to free so kmdb doesn't see it 15087c478bd9Sstevel@tonic-gate / 15097c478bd9Sstevel@tonic-gate movl $FREE_THREAD, T_STATE(%esi) 15107c478bd9Sstevel@tonic-gate / 15117c478bd9Sstevel@tonic-gate / Put thread on either the interrupt pool or the free pool and 15127c478bd9Sstevel@tonic-gate / call swtch() to resume another thread. 15137c478bd9Sstevel@tonic-gate / 15147c478bd9Sstevel@tonic-gate LOADCPU(%ebx) 15157c478bd9Sstevel@tonic-gate LINK_INTR_THREAD(%ebx, %esi, %edx) 15167c478bd9Sstevel@tonic-gate call splhigh /* block all intrs below lock lvl */ 15177c478bd9Sstevel@tonic-gate call swtch 15187c478bd9Sstevel@tonic-gate / swtch() shouldn't return 15197c478bd9Sstevel@tonic-gate SET_SIZE(dosoftint) 15207c478bd9Sstevel@tonic-gate 15217c478bd9Sstevel@tonic-gate#endif /* __i386 */ 15227c478bd9Sstevel@tonic-gate#endif /* __lint */ 15237a364d25Sschwartz 15247a364d25Sschwartz#if defined(lint) 15257a364d25Sschwartz 15267a364d25Sschwartz/* 15277a364d25Sschwartz * intr_get_time() is a resource for interrupt handlers to determine how 15287a364d25Sschwartz * much time has been spent handling the current interrupt. Such a function 15297a364d25Sschwartz * is needed because higher level interrupts can arrive during the 15307a364d25Sschwartz * processing of an interrupt, thus making direct comparisons of %tick by 15317a364d25Sschwartz * the handler inaccurate. intr_get_time() only returns time spent in the 15327a364d25Sschwartz * current interrupt handler. 15337a364d25Sschwartz * 15347a364d25Sschwartz * The caller must be calling from an interrupt handler running at a pil 15357a364d25Sschwartz * below or at lock level. Timings are not provided for high-level 15367a364d25Sschwartz * interrupts. 15377a364d25Sschwartz * 15387a364d25Sschwartz * The first time intr_get_time() is called while handling an interrupt, 15397a364d25Sschwartz * it returns the time since the interrupt handler was invoked. Subsequent 15407a364d25Sschwartz * calls will return the time since the prior call to intr_get_time(). Time 15417a364d25Sschwartz * is returned as ticks. Use tsc_scalehrtime() to convert ticks to nsec. 15427a364d25Sschwartz * 15437a364d25Sschwartz * Theory Of Intrstat[][]: 15447a364d25Sschwartz * 15457a364d25Sschwartz * uint64_t intrstat[pil][0..1] is an array indexed by pil level, with two 15467a364d25Sschwartz * uint64_ts per pil. 15477a364d25Sschwartz * 15487a364d25Sschwartz * intrstat[pil][0] is a cumulative count of the number of ticks spent 15497a364d25Sschwartz * handling all interrupts at the specified pil on this CPU. It is 15507a364d25Sschwartz * exported via kstats to the user. 15517a364d25Sschwartz * 15527a364d25Sschwartz * intrstat[pil][1] is always a count of ticks less than or equal to the 15537a364d25Sschwartz * value in [0]. The difference between [1] and [0] is the value returned 15547a364d25Sschwartz * by a call to intr_get_time(). At the start of interrupt processing, 15557a364d25Sschwartz * [0] and [1] will be equal (or nearly so). As the interrupt consumes 15567a364d25Sschwartz * time, [0] will increase, but [1] will remain the same. A call to 15577a364d25Sschwartz * intr_get_time() will return the difference, then update [1] to be the 15587a364d25Sschwartz * same as [0]. Future calls will return the time since the last call. 15597a364d25Sschwartz * Finally, when the interrupt completes, [1] is updated to the same as [0]. 15607a364d25Sschwartz * 15617a364d25Sschwartz * Implementation: 15627a364d25Sschwartz * 15637a364d25Sschwartz * intr_get_time() works much like a higher level interrupt arriving. It 15647a364d25Sschwartz * "checkpoints" the timing information by incrementing intrstat[pil][0] 15657a364d25Sschwartz * to include elapsed running time, and by setting t_intr_start to rdtsc. 15667a364d25Sschwartz * It then sets the return value to intrstat[pil][0] - intrstat[pil][1], 15677a364d25Sschwartz * and updates intrstat[pil][1] to be the same as the new value of 15687a364d25Sschwartz * intrstat[pil][0]. 15697a364d25Sschwartz * 15707a364d25Sschwartz * In the normal handling of interrupts, after an interrupt handler returns 15717a364d25Sschwartz * and the code in intr_thread() updates intrstat[pil][0], it then sets 15727a364d25Sschwartz * intrstat[pil][1] to the new value of intrstat[pil][0]. When [0] == [1], 15737a364d25Sschwartz * the timings are reset, i.e. intr_get_time() will return [0] - [1] which 15747a364d25Sschwartz * is 0. 15757a364d25Sschwartz * 15767a364d25Sschwartz * Whenever interrupts arrive on a CPU which is handling a lower pil 15777a364d25Sschwartz * interrupt, they update the lower pil's [0] to show time spent in the 15787a364d25Sschwartz * handler that they've interrupted. This results in a growing discrepancy 15797a364d25Sschwartz * between [0] and [1], which is returned the next time intr_get_time() is 15807a364d25Sschwartz * called. Time spent in the higher-pil interrupt will not be returned in 15817a364d25Sschwartz * the next intr_get_time() call from the original interrupt, because 15827a364d25Sschwartz * the higher-pil interrupt's time is accumulated in intrstat[higherpil][]. 15837a364d25Sschwartz */ 15847a364d25Sschwartz 15857a364d25Sschwartz/*ARGSUSED*/ 15867a364d25Sschwartzuint64_t 15877a364d25Sschwartzintr_get_time(void) 15887a364d25Sschwartz{ return 0; } 15897a364d25Sschwartz#else /* lint */ 15907a364d25Sschwartz 15917a364d25Sschwartz 15927a364d25Sschwartz#if defined(__amd64) 15937a364d25Sschwartz ENTRY_NP(intr_get_time) 15947a364d25Sschwartz cli /* make this easy -- block intrs */ 15957a364d25Sschwartz LOADCPU(%rdi) 15967a364d25Sschwartz call intr_thread_get_time 15977a364d25Sschwartz sti 15987a364d25Sschwartz ret 15997a364d25Sschwartz SET_SIZE(intr_get_time) 16007a364d25Sschwartz 16017a364d25Sschwartz#elif defined(__i386) 16027a364d25Sschwartz 16037a364d25Sschwartz#ifdef DEBUG 16047a364d25Sschwartz 16057a364d25Sschwartz 16067a364d25Sschwartz_intr_get_time_high_pil: 16077a364d25Sschwartz .string "intr_get_time(): %pil > LOCK_LEVEL" 16087a364d25Sschwartz_intr_get_time_not_intr: 16097a364d25Sschwartz .string "intr_get_time(): not called from an interrupt thread" 16107a364d25Sschwartz_intr_get_time_no_start_time: 16117a364d25Sschwartz .string "intr_get_time(): t_intr_start == 0" 16127a364d25Sschwartz 16137a364d25Sschwartz/* 16147a364d25Sschwartz * ASSERT(%pil <= LOCK_LEVEL) 16157a364d25Sschwartz */ 16167a364d25Sschwartz#define ASSERT_PIL_BELOW_LOCK_LEVEL(cpureg) \ 16177a364d25Sschwartz testl $CPU_INTR_ACTV_HIGH_LEVEL_MASK, CPU_INTR_ACTV(cpureg); \ 16187a364d25Sschwartz jz 0f; \ 16197a364d25Sschwartz __PANIC(_intr_get_time_high_pil, 0f); \ 16207a364d25Sschwartz0: 16217a364d25Sschwartz 16227a364d25Sschwartz/* 16237a364d25Sschwartz * ASSERT((t_flags & T_INTR_THREAD) != 0 && t_pil > 0) 16247a364d25Sschwartz */ 16257a364d25Sschwartz#define ASSERT_NO_PIL_0_INTRS(thrreg) \ 16267a364d25Sschwartz testw $T_INTR_THREAD, T_FLAGS(thrreg); \ 16277a364d25Sschwartz jz 1f; \ 16287a364d25Sschwartz cmpb $0, T_PIL(thrreg); \ 16297a364d25Sschwartz jne 0f; \ 16307a364d25Sschwartz1: \ 16317a364d25Sschwartz __PANIC(_intr_get_time_not_intr, 0f); \ 16327a364d25Sschwartz0: 16337a364d25Sschwartz 16347a364d25Sschwartz/* 16357a364d25Sschwartz * ASSERT(t_intr_start != 0) 16367a364d25Sschwartz */ 16377a364d25Sschwartz#define ASSERT_INTR_START_NOT_0(thrreg) \ 16387a364d25Sschwartz cmpl $0, T_INTR_START(thrreg); \ 16397a364d25Sschwartz jnz 0f; \ 16407a364d25Sschwartz cmpl $0, T_INTR_START+4(thrreg); \ 16417a364d25Sschwartz jnz 0f; \ 16427a364d25Sschwartz __PANIC(_intr_get_time_no_start_time, 0f); \ 16437a364d25Sschwartz0: 16447a364d25Sschwartz 16457a364d25Sschwartz#endif /* DEBUG */ 16467a364d25Sschwartz 16477a364d25Sschwartz ENTRY_NP(intr_get_time) 16487a364d25Sschwartz 16497a364d25Sschwartz cli /* make this easy -- block intrs */ 16507a364d25Sschwartz pushl %esi /* and free up some registers */ 16517a364d25Sschwartz 16527a364d25Sschwartz LOADCPU(%esi) 16537a364d25Sschwartz movl CPU_THREAD(%esi), %ecx 16547a364d25Sschwartz 16557a364d25Sschwartz#ifdef DEBUG 16567a364d25Sschwartz ASSERT_PIL_BELOW_LOCK_LEVEL(%esi) 16577a364d25Sschwartz ASSERT_NO_PIL_0_INTRS(%ecx) 16587a364d25Sschwartz ASSERT_INTR_START_NOT_0(%ecx) 16597a364d25Sschwartz#endif /* DEBUG */ 16607a364d25Sschwartz 16617a364d25Sschwartz_tsc_patch17: 16627a364d25Sschwartz nop; nop /* patched to rdtsc if available */ 16637a364d25Sschwartz TSC_SUB_FROM(%ecx, T_INTR_START) /* get elapsed time */ 16647a364d25Sschwartz TSC_ADD_TO(%ecx, T_INTR_START) /* T_INTR_START = rdtsc */ 16657a364d25Sschwartz 16667a364d25Sschwartz movzbl T_PIL(%ecx), %ecx /* %ecx = pil */ 16677a364d25Sschwartz PILBASE_INTRSTAT(%esi, %ecx) /* %ecx = CPU + pil*16 */ 16687a364d25Sschwartz TSC_ADD_TO(%ecx, CPU_INTRSTAT) /* intrstat[0] += elapsed */ 16697a364d25Sschwartz TSC_LOAD(%ecx, CPU_INTRSTAT) /* get new intrstat[0] */ 16707a364d25Sschwartz TSC_SUB_FROM(%ecx, CPU_INTRSTAT+8) /* diff with intrstat[1] */ 16717a364d25Sschwartz TSC_ADD_TO(%ecx, CPU_INTRSTAT+8) /* intrstat[1] = intrstat[0] */ 16727a364d25Sschwartz 16737a364d25Sschwartz /* %edx/%eax contain difference between old and new intrstat[1] */ 16747a364d25Sschwartz 16757a364d25Sschwartz popl %esi 16767a364d25Sschwartz sti 16777a364d25Sschwartz ret 16787a364d25Sschwartz SET_SIZE(intr_get_time) 16797a364d25Sschwartz#endif /* __i386 */ 16807a364d25Sschwartz 16817a364d25Sschwartz#endif /* lint */ 1682