1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22/* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27/* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */ 28/* Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T */ 29/* All Rights Reserved */ 30 31/* Copyright (c) 1987, 1988 Microsoft Corporation */ 32/* All Rights Reserved */ 33 34#pragma ident "%Z%%M% %I% %E% SMI" 35 36#include <sys/asm_linkage.h> 37#include <sys/asm_misc.h> 38#include <sys/regset.h> 39#include <sys/psw.h> 40#include <sys/x86_archext.h> 41 42#if defined(__lint) 43 44#include <sys/types.h> 45#include <sys/thread.h> 46#include <sys/systm.h> 47 48#else /* __lint */ 49 50#include <sys/segments.h> 51#include <sys/pcb.h> 52#include <sys/trap.h> 53#include <sys/ftrace.h> 54#include <sys/traptrace.h> 55#include <sys/clock.h> 56#include <sys/panic.h> 57#include "assym.h" 58 59_ftrace_intr_thread_fmt: 60 .string "intr_thread(): regs=0x%lx, int=0x%x, pil=0x%x" 61 62#endif /* lint */ 63 64#if defined(__i386) 65 66#if defined(__lint) 67 68void 69patch_tsc(void) 70{} 71 72#else /* __lint */ 73 74/* 75 * To cope with processors that do not implement the rdtsc instruction, 76 * we patch the kernel to use rdtsc if that feature is detected on the CPU. 77 * On an unpatched kernel, all locations requiring rdtsc are nop's. 78 * 79 * This function patches the nop's to rdtsc. 80 */ 81 ENTRY_NP(patch_tsc) 82 movw _rdtsc_insn, %cx 83 movw %cx, _tsc_patch1 84 movw %cx, _tsc_patch2 85 movw %cx, _tsc_patch3 86 movw %cx, _tsc_patch4 87 movw %cx, _tsc_patch5 88 movw %cx, _tsc_patch6 89 movw %cx, _tsc_patch7 90 movw %cx, _tsc_patch8 91 movw %cx, _tsc_patch9 92 movw %cx, _tsc_patch10 93 movw %cx, _tsc_patch11 94 movw %cx, _tsc_patch12 95 movw %cx, _tsc_patch13 96 movw %cx, _tsc_patch14 97 movw %cx, _tsc_patch15 98 movw %cx, _tsc_patch16 99 movw %cx, _tsc_patch17 100 ret 101_rdtsc_insn: 102 rdtsc 103 SET_SIZE(patch_tsc) 104 105#endif /* __lint */ 106 107#endif /* __i386 */ 108 109 110#if defined(__lint) 111 112void 113_interrupt(void) 114{} 115 116#else /* __lint */ 117 118#if defined(__amd64) 119 120 /* 121 * Common register usage: 122 * 123 * %rbx cpu pointer 124 * %r12 trap trace pointer -and- stash of 125 * vec across intr_thread dispatch. 126 * %r13d ipl of isr 127 * %r14d old ipl (ipl level we entered on) 128 * %r15 interrupted thread stack pointer 129 */ 130 ENTRY_NP2(cmnint, _interrupt) 131 132 INTR_PUSH 133 134 /* 135 * At the end of TRACE_PTR %r12 points to the current TRAPTRACE entry 136 */ 137 TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_INTERRUPT) 138 /* Uses labels 8 and 9 */ 139 TRACE_REGS(%r12, %rsp, %rax, %rbx) /* Uses label 9 */ 140 TRACE_STAMP(%r12) /* Clobbers %eax, %edx, uses 9 */ 141 142 DISABLE_INTR_FLAGS /* (and set kernel flag values) */ 143 144 movq %rsp, %rbp 145 146 TRACE_STACK(%r12) 147 148 LOADCPU(%rbx) /* &cpu */ 149 leaq REGOFF_TRAPNO(%rbp), %rsi /* &vector */ 150 movl CPU_PRI(%rbx), %r14d /* old ipl */ 151 movl CPU_SOFTINFO(%rbx), %edx 152 153#ifdef TRAPTRACE 154 movl $255, TTR_IPL(%r12) 155 movl %r14d, %edi 156 movb %dil, TTR_PRI(%r12) 157 movl CPU_BASE_SPL(%rbx), %edi 158 movb %dil, TTR_SPL(%r12) 159 movb $255, TTR_VECTOR(%r12) 160#endif 161 162 /* 163 * Check to see if the trap number is T_SOFTINT; if it is, 164 * jump straight to dosoftint now. 165 */ 166 cmpq $T_SOFTINT, (%rsi) 167 je dosoftint 168 169 /* 170 * Raise the interrupt priority level, returns newpil. 171 * (The vector address is in %rsi so setlvl can update it.) 172 */ 173 movl %r14d, %edi /* old ipl */ 174 /* &vector */ 175 call *setlvl(%rip) 176 177#ifdef TRAPTRACE 178 movb %al, TTR_IPL(%r12) 179#endif 180 /* 181 * check for spurious interrupt 182 */ 183 cmpl $-1, %eax 184 je _sys_rtt 185 186#ifdef TRAPTRACE 187 movl %r14d, %edx 188 movb %dl, TTR_PRI(%r12) 189 movl CPU_BASE_SPL(%rbx), %edx 190 movb %dl, TTR_SPL(%r12) 191#endif 192 movl %eax, CPU_PRI(%rbx) /* update ipl */ 193 194#ifdef TRAPTRACE 195 movl REGOFF_TRAPNO(%rbp), %edx 196 movb %dl, TTR_VECTOR(%r12) 197#endif 198 movl %eax, %r13d /* ipl of isr */ 199 200 /* 201 * At this point we can take one of two paths. 202 * If the new level is at or below lock level, we will 203 * run this interrupt in a separate thread. 204 */ 205 cmpl $LOCK_LEVEL, %eax 206 jbe intr_thread 207 208 movq %rbx, %rdi /* &cpu */ 209 movl %r13d, %esi /* ipl */ 210 movl %r14d, %edx /* old ipl */ 211 movq %rbp, %rcx /* ®s */ 212 call hilevel_intr_prolog 213 orl %eax, %eax /* zero if need to switch stack */ 214 jnz 1f 215 216 /* 217 * Save the thread stack and get on the cpu's interrupt stack 218 */ 219 movq %rsp, %r15 220 movq CPU_INTR_STACK(%rbx), %rsp 2211: 222 223 sti 224 225 /* 226 * Walk the list of handlers for this vector, calling 227 * them as we go until no more interrupts are claimed. 228 */ 229 movl REGOFF_TRAPNO(%rbp), %edi 230 call av_dispatch_autovect 231 232 cli 233 234 movq %rbx, %rdi /* &cpu */ 235 movl %r13d, %esi /* ipl */ 236 movl %r14d, %edx /* oldipl */ 237 movl REGOFF_TRAPNO(%rbp), %ecx /* vec */ 238 call hilevel_intr_epilog 239 orl %eax, %eax /* zero if need to switch stack */ 240 jnz 2f 241 movq %r15, %rsp 2422: /* 243 * Check for, and execute, softints before we iret. 244 * 245 * (dosoftint expects oldipl in %r14d (which is where it is) 246 * the cpu pointer in %rbx (which is where it is) and the 247 * softinfo in %edx (which is where we'll put it right now)) 248 */ 249 movl CPU_SOFTINFO(%rbx), %edx 250 orl %edx, %edx 251 jz _sys_rtt 252 jmp dosoftint 253 /*NOTREACHED*/ 254 255 SET_SIZE(cmnint) 256 SET_SIZE(_interrupt) 257 258/* 259 * Handle an interrupt in a new thread 260 * 261 * As we branch here, interrupts are still masked, 262 * %rbx still contains the cpu pointer, 263 * %r14d contains the old ipl that we came in on, and 264 * %eax contains the new ipl that we got from the setlvl routine 265 */ 266 267 ENTRY_NP(intr_thread) 268 269 movq %rbx, %rdi /* &cpu */ 270 movq %rbp, %rsi /* ®s = stack pointer for _sys_rtt */ 271 movl REGOFF_TRAPNO(%rbp), %r12d /* stash the vec */ 272 movl %eax, %edx /* new pil from setlvlx() */ 273 call intr_thread_prolog 274 movq %rsp, %r15 275 movq %rax, %rsp /* t_stk from interrupt thread */ 276 movq %rsp, %rbp 277 278 sti 279 280 testl $FTRACE_ENABLED, CPU_FTRACE_STATE(%rbx) 281 jz 1f 282 /* 283 * ftracing support. do we need this on x86? 284 */ 285 leaq _ftrace_intr_thread_fmt(%rip), %rdi 286 movq %rbp, %rsi /* ®s */ 287 movl %r12d, %edx /* vec */ 288 movq CPU_THREAD(%rbx), %r11 /* (the interrupt thread) */ 289 movzbl T_PIL(%r11), %ecx /* newipl */ 290 call ftrace_3_notick 2911: 292 movl %r12d, %edi /* vec */ 293 call av_dispatch_autovect 294 295 cli 296 297 movq %rbx, %rdi /* &cpu */ 298 movl %r12d, %esi /* vec */ 299 movl %r14d, %edx /* oldpil */ 300 call intr_thread_epilog 301 /* 302 * If we return from here (we might not if the interrupted thread 303 * has exited or blocked, in which case we'll have quietly swtch()ed 304 * away) then we need to switch back to our old %rsp 305 */ 306 movq %r15, %rsp 307 movq %rsp, %rbp 308 /* 309 * Check for, and execute, softints before we iret. 310 * 311 * (dosoftint expects oldpil in %r14d, the cpu pointer in %rbx and 312 * the mcpu_softinfo.st_pending field in %edx. 313 */ 314 movl CPU_SOFTINFO(%rbx), %edx 315 orl %edx, %edx 316 jz _sys_rtt 317 /*FALLTHROUGH*/ 318 319/* 320 * Process soft interrupts. 321 * Interrupts are masked, and we have a minimal frame on the stack. 322 * %edx should contain the mcpu_softinfo.st_pending field 323 */ 324 325 ALTENTRY(dosoftint) 326 327 movq %rbx, %rdi /* &cpu */ 328 movq %rbp, %rsi /* ®s = stack pointer for _sys_rtt */ 329 /* cpu->cpu_m.mcpu_softinfo.st_pending */ 330 movl %r14d, %ecx /* oldipl */ 331 call dosoftint_prolog 332 /* 333 * dosoftint_prolog() usually returns a stack pointer for the 334 * interrupt thread that we must switch to. However, if the 335 * returned stack pointer is NULL, then the software interrupt was 336 * too low in priority to run now; we'll catch it another time. 337 */ 338 orq %rax, %rax 339 jz _sys_rtt 340 movq %rsp, %r15 341 movq %rax, %rsp /* t_stk from interrupt thread */ 342 movq %rsp, %rbp 343 344 sti 345 346 /* 347 * Enabling interrupts (above) could raise the current ipl 348 * and base spl. But, we continue processing the current soft 349 * interrupt and we will check the base spl next time around 350 * so that blocked interrupt threads get a chance to run. 351 */ 352 movq CPU_THREAD(%rbx), %r11 /* now an interrupt thread */ 353 movzbl T_PIL(%r11), %edi 354 call av_dispatch_softvect 355 356 cli 357 358 movq %rbx, %rdi /* &cpu */ 359 movl %r14d, %esi /* oldpil */ 360 call dosoftint_epilog 361 movq %r15, %rsp /* back on old stack pointer */ 362 movq %rsp, %rbp 363 movl CPU_SOFTINFO(%rbx), %edx 364 orl %edx, %edx 365 jz _sys_rtt 366 jmp dosoftint 367 368 SET_SIZE(dosoftint) 369 SET_SIZE(intr_thread) 370 371#elif defined(__i386) 372 373/* 374 * One day, this should just invoke the C routines that know how to 375 * do all the interrupt bookkeeping. In the meantime, try 376 * and make the assembler a little more comprehensible. 377 */ 378 379#define INC64(basereg, offset) \ 380 addl $1, offset(basereg); \ 381 adcl $0, offset + 4(basereg) 382 383#define TSC_CLR(basereg, offset) \ 384 movl $0, offset(basereg); \ 385 movl $0, offset + 4(basereg) 386 387/* 388 * The following macros assume the time value is in %edx:%eax 389 * e.g. from a rdtsc instruction. 390 */ 391#define TSC_STORE(reg, offset) \ 392 movl %eax, offset(reg); \ 393 movl %edx, offset + 4(reg) 394 395#define TSC_LOAD(reg, offset) \ 396 movl offset(reg), %eax; \ 397 movl offset + 4(reg), %edx 398 399#define TSC_ADD_TO(reg, offset) \ 400 addl %eax, offset(reg); \ 401 adcl %edx, offset + 4(reg) 402 403#define TSC_SUB_FROM(reg, offset) \ 404 subl offset(reg), %eax; \ 405 sbbl offset + 4(reg), %edx /* interval in edx:eax */ 406 407/* 408 * basereg - pointer to cpu struct 409 * pilreg - pil or converted pil (pil - (LOCK_LEVEL + 1)) 410 * 411 * Returns (base + pil * 8) in pilreg 412 */ 413#define PILBASE(basereg, pilreg) \ 414 lea (basereg, pilreg, 8), pilreg 415 416/* 417 * Returns (base + (pil - (LOCK_LEVEL + 1)) * 8) in pilreg 418 */ 419#define HIGHPILBASE(basereg, pilreg) \ 420 subl $LOCK_LEVEL + 1, pilreg; \ 421 PILBASE(basereg, pilreg) 422 423/* 424 * Returns (base + pil * 16) in pilreg 425 */ 426#define PILBASE_INTRSTAT(basereg, pilreg) \ 427 shl $4, pilreg; \ 428 addl basereg, pilreg; 429 430/* 431 * Returns (cpu + cpu_mstate * 8) in tgt 432 */ 433#define INTRACCTBASE(cpureg, tgtreg) \ 434 movzwl CPU_MSTATE(cpureg), tgtreg; \ 435 lea (cpureg, tgtreg, 8), tgtreg 436 437/* 438 * cpu_stats.sys.intr[PIL]++ 439 */ 440#define INC_CPU_STATS_INTR(pilreg, tmpreg, tmpreg_32, basereg) \ 441 movl pilreg, tmpreg_32; \ 442 PILBASE(basereg, tmpreg); \ 443 INC64(tmpreg, _CONST(CPU_STATS_SYS_INTR - 8)) 444 445/* 446 * Unlink thread from CPU's list 447 */ 448#define UNLINK_INTR_THREAD(cpureg, ithread, tmpreg) \ 449 mov CPU_INTR_THREAD(cpureg), ithread; \ 450 mov T_LINK(ithread), tmpreg; \ 451 mov tmpreg, CPU_INTR_THREAD(cpureg) 452 453/* 454 * Link a thread into CPU's list 455 */ 456#define LINK_INTR_THREAD(cpureg, ithread, tmpreg) \ 457 mov CPU_INTR_THREAD(cpureg), tmpreg; \ 458 mov tmpreg, T_LINK(ithread); \ 459 mov ithread, CPU_INTR_THREAD(cpureg) 460 461#if defined(DEBUG) 462 463/* 464 * Do not call panic, if panic is already in progress. 465 */ 466#define __PANIC(msg, label) \ 467 cmpl $0, panic_quiesce; \ 468 jne label; \ 469 pushl $msg; \ 470 call panic 471 472#define __CMP64_JNE(basereg, offset, label) \ 473 cmpl $0, offset(basereg); \ 474 jne label; \ 475 cmpl $0, offset + 4(basereg); \ 476 jne label 477 478/* 479 * ASSERT(!(CPU->cpu_intr_actv & (1 << PIL))) 480 */ 481#define ASSERT_NOT_CPU_INTR_ACTV(pilreg, basereg, msg) \ 482 btl pilreg, CPU_INTR_ACTV(basereg); \ 483 jnc 4f; \ 484 __PANIC(msg, 4f); \ 4854: 486 487/* 488 * ASSERT(CPU->cpu_intr_actv & (1 << PIL)) 489 */ 490#define ASSERT_CPU_INTR_ACTV(pilreg, basereg, msg) \ 491 btl pilreg, CPU_INTR_ACTV(basereg); \ 492 jc 5f; \ 493 __PANIC(msg, 5f); \ 4945: 495 496/* 497 * ASSERT(CPU->cpu_pil_high_start != 0) 498 */ 499#define ASSERT_CPU_PIL_HIGH_START_NZ(basereg) \ 500 __CMP64_JNE(basereg, CPU_PIL_HIGH_START, 6f); \ 501 __PANIC(_interrupt_timestamp_zero, 6f); \ 5026: 503 504/* 505 * ASSERT(t->t_intr_start != 0) 506 */ 507#define ASSERT_T_INTR_START_NZ(basereg) \ 508 __CMP64_JNE(basereg, T_INTR_START, 7f); \ 509 __PANIC(_intr_thread_t_intr_start_zero, 7f); \ 5107: 511 512_interrupt_actv_bit_set: 513 .string "_interrupt(): cpu_intr_actv bit already set for PIL" 514_interrupt_actv_bit_not_set: 515 .string "_interrupt(): cpu_intr_actv bit not set for PIL" 516_interrupt_timestamp_zero: 517 .string "_interrupt(): timestamp zero upon handler return" 518_intr_thread_actv_bit_not_set: 519 .string "intr_thread(): cpu_intr_actv bit not set for PIL" 520_intr_thread_t_intr_start_zero: 521 .string "intr_thread(): t_intr_start zero upon handler return" 522_dosoftint_actv_bit_set: 523 .string "dosoftint(): cpu_intr_actv bit already set for PIL" 524_dosoftint_actv_bit_not_set: 525 .string "dosoftint(): cpu_intr_actv bit not set for PIL" 526 527 DGDEF(intr_thread_cnt) 528 529#else 530#define ASSERT_NOT_CPU_INTR_ACTV(pilreg, basereg, msg) 531#define ASSERT_CPU_INTR_ACTV(pilreg, basereg, msg) 532#define ASSERT_CPU_PIL_HIGH_START_NZ(basereg) 533#define ASSERT_T_INTR_START_NZ(basereg) 534#endif 535 536 ENTRY_NP2(cmnint, _interrupt) 537 538 INTR_PUSH 539 540 /* 541 * At the end of TRACE_PTR %esi points to the current TRAPTRACE entry 542 */ 543 TRACE_PTR(%esi, %eax, %eax, %edx, $TT_INTERRUPT) 544 /* Uses labels 8 and 9 */ 545 TRACE_REGS(%esi, %esp, %eax, %ebx) /* Uses label 9 */ 546 TRACE_STAMP(%esi) /* Clobbers %eax, %edx, uses 9 */ 547 548 movl %esp, %ebp 549 DISABLE_INTR_FLAGS 550 LOADCPU(%ebx) /* get pointer to CPU struct. Avoid gs refs */ 551 leal REGOFF_TRAPNO(%ebp), %ecx /* get address of vector */ 552 movl CPU_PRI(%ebx), %edi /* get ipl */ 553 movl CPU_SOFTINFO(%ebx), %edx 554 555 / 556 / Check to see if the trap number is T_SOFTINT; if it is, we'll 557 / jump straight to dosoftint now. 558 / 559 cmpl $T_SOFTINT, (%ecx) 560 je dosoftint 561 562 / raise interrupt priority level 563 / oldipl is in %edi, vectorp is in %ecx 564 / newipl is returned in %eax 565 pushl %ecx 566 pushl %edi 567 call *setlvl 568 popl %edi /* save oldpil in %edi */ 569 popl %ecx 570 571#ifdef TRAPTRACE 572 movb %al, TTR_IPL(%esi) 573#endif 574 575 / check for spurious interrupt 576 cmp $-1, %eax 577 je _sys_rtt 578 579#ifdef TRAPTRACE 580 movl CPU_PRI(%ebx), %edx 581 movb %dl, TTR_PRI(%esi) 582 movl CPU_BASE_SPL(%ebx), %edx 583 movb %dl, TTR_SPL(%esi) 584#endif 585 586 movl %eax, CPU_PRI(%ebx) /* update ipl */ 587 movl REGOFF_TRAPNO(%ebp), %ecx /* reload the interrupt vector */ 588 589#ifdef TRAPTRACE 590 movb %cl, TTR_VECTOR(%esi) 591#endif 592 593 / At this point we can take one of two paths. If the new priority 594 / level is less than or equal to LOCK LEVEL then we jump to code that 595 / will run this interrupt as a separate thread. Otherwise the 596 / interrupt is NOT run as a separate thread. 597 598 / %edi - old priority level 599 / %ebp - pointer to REGS 600 / %ecx - translated vector 601 / %eax - ipl of isr 602 / %ebx - cpu pointer 603 604 cmpl $LOCK_LEVEL, %eax /* compare to highest thread level */ 605 jbe intr_thread /* process as a separate thread */ 606 607 cmpl $CBE_HIGH_PIL, %eax /* Is this a CY_HIGH_LEVEL interrupt? */ 608 jne 2f 609 610 movl REGOFF_PC(%ebp), %esi 611 movl %edi, CPU_PROFILE_PIL(%ebx) /* record interrupted PIL */ 612 testw $CPL_MASK, REGOFF_CS(%ebp) /* trap from supervisor mode? */ 613 jz 1f 614 movl %esi, CPU_PROFILE_UPC(%ebx) /* record user PC */ 615 movl $0, CPU_PROFILE_PC(%ebx) /* zero kernel PC */ 616 jmp 2f 617 6181: 619 movl %esi, CPU_PROFILE_PC(%ebx) /* record kernel PC */ 620 movl $0, CPU_PROFILE_UPC(%ebx) /* zero user PC */ 621 6222: 623 pushl %ecx /* vec */ 624 pushl %eax /* newpil */ 625 626 / 627 / See if we are interrupting another high-level interrupt. 628 / 629 movl CPU_INTR_ACTV(%ebx), %eax 630 andl $CPU_INTR_ACTV_HIGH_LEVEL_MASK, %eax 631 jz 0f 632 / 633 / We have interrupted another high-level interrupt. 634 / Load starting timestamp, compute interval, update cumulative counter. 635 / 636 bsrl %eax, %ecx /* find PIL of interrupted handler */ 637 movl %ecx, %esi /* save PIL for later */ 638 HIGHPILBASE(%ebx, %ecx) 639_tsc_patch1: 640 nop; nop /* patched to rdtsc if available */ 641 TSC_SUB_FROM(%ecx, CPU_PIL_HIGH_START) 642 643 PILBASE_INTRSTAT(%ebx, %esi) 644 TSC_ADD_TO(%esi, CPU_INTRSTAT) 645 INTRACCTBASE(%ebx, %ecx) 646 TSC_ADD_TO(%ecx, CPU_INTRACCT) /* cpu_intracct[cpu_mstate] += tsc */ 647 / 648 / Another high-level interrupt is active below this one, so 649 / there is no need to check for an interrupt thread. That will be 650 / done by the lowest priority high-level interrupt active. 651 / 652 jmp 1f 6530: 654 / 655 / See if we are interrupting a low-level interrupt thread. 656 / 657 movl CPU_THREAD(%ebx), %esi 658 testw $T_INTR_THREAD, T_FLAGS(%esi) 659 jz 1f 660 / 661 / We have interrupted an interrupt thread. Account for its time slice 662 / only if its time stamp is non-zero. 663 / 664 cmpl $0, T_INTR_START+4(%esi) 665 jne 0f 666 cmpl $0, T_INTR_START(%esi) 667 je 1f 6680: 669 movzbl T_PIL(%esi), %ecx /* %ecx has PIL of interrupted handler */ 670 PILBASE_INTRSTAT(%ebx, %ecx) 671_tsc_patch2: 672 nop; nop /* patched to rdtsc if available */ 673 TSC_SUB_FROM(%esi, T_INTR_START) 674 TSC_CLR(%esi, T_INTR_START) 675 TSC_ADD_TO(%ecx, CPU_INTRSTAT) 676 INTRACCTBASE(%ebx, %ecx) 677 TSC_ADD_TO(%ecx, CPU_INTRACCT) /* cpu_intracct[cpu_mstate] += tsc */ 6781: 679 / Store starting timestamp in CPU structure for this PIL. 680 popl %ecx /* restore new PIL */ 681 pushl %ecx 682 HIGHPILBASE(%ebx, %ecx) 683_tsc_patch3: 684 nop; nop /* patched to rdtsc if available */ 685 TSC_STORE(%ecx, CPU_PIL_HIGH_START) 686 687 popl %eax /* restore new pil */ 688 popl %ecx /* vec */ 689 / 690 / Set bit for this PIL in CPU's interrupt active bitmask. 691 / 692 693 ASSERT_NOT_CPU_INTR_ACTV(%eax, %ebx, _interrupt_actv_bit_set) 694 695 / Save old CPU_INTR_ACTV 696 movl CPU_INTR_ACTV(%ebx), %esi 697 698 cmpl $15, %eax 699 jne 0f 700 / PIL-15 interrupt. Increment nest-count in upper 16 bits of intr_actv 701 incw CPU_INTR_ACTV_REF(%ebx) /* increment ref count */ 7020: 703 btsl %eax, CPU_INTR_ACTV(%ebx) 704 / 705 / Handle high-level nested interrupt on separate interrupt stack 706 / 707 testl $CPU_INTR_ACTV_HIGH_LEVEL_MASK, %esi 708 jnz onstack /* already on interrupt stack */ 709 movl %esp, %eax 710 movl CPU_INTR_STACK(%ebx), %esp /* get on interrupt stack */ 711 pushl %eax /* save the thread stack pointer */ 712onstack: 713 movl $autovect, %esi /* get autovect structure before */ 714 /* sti to save on AGI later */ 715 sti /* enable interrupts */ 716 pushl %ecx /* save interrupt vector */ 717 / 718 / Get handler address 719 / 720pre_loop1: 721 movl AVH_LINK(%esi, %ecx, 8), %esi 722 xorl %ebx, %ebx /* bh is no. of intpts in chain */ 723 /* bl is DDI_INTR_CLAIMED status of chain */ 724 testl %esi, %esi /* if pointer is null */ 725 jz .intr_ret /* then skip */ 726loop1: 727 incb %bh 728 movl AV_VECTOR(%esi), %edx /* get the interrupt routine */ 729 testl %edx, %edx /* if func is null */ 730 jz .intr_ret /* then skip */ 731 pushl $0 732 pushl AV_INTARG2(%esi) 733 pushl AV_INTARG1(%esi) 734 pushl AV_VECTOR(%esi) 735 pushl AV_DIP(%esi) 736 call __dtrace_probe_interrupt__start 737 pushl AV_INTARG2(%esi) /* get 2nd arg to interrupt routine */ 738 pushl AV_INTARG1(%esi) /* get first arg to interrupt routine */ 739 call *%edx /* call interrupt routine with arg */ 740 addl $8, %esp 741 movl %eax, 16(%esp) 742 call __dtrace_probe_interrupt__complete 743 addl $20, %esp 744 orb %al, %bl /* see if anyone claims intpt. */ 745 movl AV_LINK(%esi), %esi /* get next routine on list */ 746 testl %esi, %esi /* if pointer is non-null */ 747 jnz loop1 /* then continue */ 748 749.intr_ret: 750 cmpb $1, %bh /* if only 1 intpt in chain, it is OK */ 751 je .intr_ret1 752 orb %bl, %bl /* If no one claims intpt, then it is OK */ 753 jz .intr_ret1 754 movl (%esp), %ecx /* else restore intr vector */ 755 movl $autovect, %esi /* get autovect structure */ 756 jmp pre_loop1 /* and try again. */ 757 758.intr_ret1: 759 LOADCPU(%ebx) /* get pointer to cpu struct */ 760 761 cli 762 movl CPU_PRI(%ebx), %esi 763 764 / cpu_stats.sys.intr[PIL]++ 765 INC_CPU_STATS_INTR(%esi, %eax, %eax, %ebx) 766 767 / 768 / Clear bit for this PIL in CPU's interrupt active bitmask. 769 / 770 771 ASSERT_CPU_INTR_ACTV(%esi, %ebx, _interrupt_actv_bit_not_set) 772 773 cmpl $15, %esi 774 jne 0f 775 / Only clear bit if reference count is now zero. 776 decw CPU_INTR_ACTV_REF(%ebx) 777 jnz 1f 7780: 779 btrl %esi, CPU_INTR_ACTV(%ebx) 7801: 781 / 782 / Take timestamp, compute interval, update cumulative counter. 783 / esi = PIL 784_tsc_patch4: 785 nop; nop /* patched to rdtsc if available */ 786 movl %esi, %ecx /* save for later */ 787 HIGHPILBASE(%ebx, %esi) 788 789 ASSERT_CPU_PIL_HIGH_START_NZ(%esi) 790 791 TSC_SUB_FROM(%esi, CPU_PIL_HIGH_START) 792 793 PILBASE_INTRSTAT(%ebx, %ecx) 794 TSC_ADD_TO(%ecx, CPU_INTRSTAT) 795 INTRACCTBASE(%ebx, %esi) 796 TSC_ADD_TO(%esi, CPU_INTRACCT) /* cpu_intracct[cpu_mstate] += tsc */ 797 / 798 / Check for lower-PIL nested high-level interrupt beneath current one 799 / If so, place a starting timestamp in its pil_high_start entry. 800 / 801 movl CPU_INTR_ACTV(%ebx), %eax 802 movl %eax, %esi 803 andl $CPU_INTR_ACTV_HIGH_LEVEL_MASK, %eax 804 jz 0f 805 bsrl %eax, %ecx /* find PIL of nested interrupt */ 806 HIGHPILBASE(%ebx, %ecx) 807_tsc_patch5: 808 nop; nop /* patched to rdtsc if available */ 809 TSC_STORE(%ecx, CPU_PIL_HIGH_START) 810 / 811 / Another high-level interrupt is active below this one, so 812 / there is no need to check for an interrupt thread. That will be 813 / done by the lowest priority high-level interrupt active. 814 / 815 jmp 1f 8160: 817 / Check to see if there is a low-level interrupt active. If so, 818 / place a starting timestamp in the thread structure. 819 movl CPU_THREAD(%ebx), %esi 820 testw $T_INTR_THREAD, T_FLAGS(%esi) 821 jz 1f 822_tsc_patch6: 823 nop; nop /* patched to rdtsc if available */ 824 TSC_STORE(%esi, T_INTR_START) 8251: 826 movl %edi, CPU_PRI(%ebx) 827 /* interrupt vector already on stack */ 828 pushl %edi /* old ipl */ 829 call *setlvlx 830 addl $8, %esp /* eax contains the current ipl */ 831 832 movl CPU_INTR_ACTV(%ebx), %esi /* reset stack pointer if no more */ 833 shrl $LOCK_LEVEL + 1, %esi /* HI PRI intrs. */ 834 jnz .intr_ret2 835 popl %esp /* restore the thread stack pointer */ 836.intr_ret2: 837 movl CPU_SOFTINFO(%ebx), %edx /* any pending software interrupts */ 838 orl %edx, %edx 839 jz _sys_rtt 840 jmp dosoftint /* check for softints before we return. */ 841 SET_SIZE(cmnint) 842 SET_SIZE(_interrupt) 843 844#endif /* __i386 */ 845 846/* 847 * Declare a uintptr_t which has the size of _interrupt to enable stack 848 * traceback code to know when a regs structure is on the stack. 849 */ 850 .globl _interrupt_size 851 .align CLONGSIZE 852_interrupt_size: 853 .NWORD . - _interrupt 854 .type _interrupt_size, @object 855 856#endif /* __lint */ 857 858#if defined(__i386) 859 860/* 861 * Handle an interrupt in a new thread. 862 * Entry: traps disabled. 863 * %edi - old priority level 864 * %ebp - pointer to REGS 865 * %ecx - translated vector 866 * %eax - ipl of isr. 867 * %ebx - pointer to CPU struct 868 * Uses: 869 */ 870 871#if !defined(__lint) 872 873 ENTRY_NP(intr_thread) 874 / 875 / Set bit for this PIL in CPU's interrupt active bitmask. 876 / 877 878 ASSERT_NOT_CPU_INTR_ACTV(%eax, %ebx, _interrupt_actv_bit_set) 879 880 btsl %eax, CPU_INTR_ACTV(%ebx) 881 882 / Get set to run interrupt thread. 883 / There should always be an interrupt thread since we allocate one 884 / for each level on the CPU. 885 / 886 / Note that the code in kcpc_overflow_intr -relies- on the ordering 887 / of events here - in particular that t->t_lwp of the interrupt 888 / thread is set to the pinned thread *before* curthread is changed 889 / 890 movl CPU_THREAD(%ebx), %edx /* cur thread in edx */ 891 892 / 893 / Are we interrupting an interrupt thread? If so, account for it. 894 / 895 testw $T_INTR_THREAD, T_FLAGS(%edx) 896 jz 0f 897 pushl %ecx 898 pushl %eax 899 movl %edx, %esi 900_tsc_patch7: 901 nop; nop /* patched to rdtsc if available */ 902 TSC_SUB_FROM(%esi, T_INTR_START) 903 TSC_CLR(%esi, T_INTR_START) 904 movzbl T_PIL(%esi), %ecx 905 PILBASE_INTRSTAT(%ebx, %ecx) 906 TSC_ADD_TO(%ecx, CPU_INTRSTAT) 907 INTRACCTBASE(%ebx, %ecx) 908 TSC_ADD_TO(%ecx, CPU_INTRACCT) /* cpu_intracct[cpu_mstate] += tsc */ 909 movl %esi, %edx 910 popl %eax 911 popl %ecx 9120: 913 movl %esp, T_SP(%edx) /* mark stack in curthread for resume */ 914 pushl %edi /* get a temporary register */ 915 UNLINK_INTR_THREAD(%ebx, %esi, %edi) 916 917 movl T_LWP(%edx), %edi 918 movl %edx, T_INTR(%esi) /* push old thread */ 919 movl %edi, T_LWP(%esi) 920 / 921 / Threads on the interrupt thread free list could have state already 922 / set to TS_ONPROC, but it helps in debugging if they're TS_FREE 923 / 924 movl $ONPROC_THREAD, T_STATE(%esi) 925 / 926 / chain the interrupted thread onto list from the interrupt thread. 927 / Set the new interrupt thread as the current one. 928 / 929 popl %edi /* Don't need a temp reg anymore */ 930 movl T_STACK(%esi), %esp /* interrupt stack pointer */ 931 movl %esp, %ebp 932 movl %esi, CPU_THREAD(%ebx) /* set new thread */ 933 pushl %eax /* save the ipl */ 934 / 935 / Initialize thread priority level from intr_pri 936 / 937 movb %al, T_PIL(%esi) /* store pil */ 938 movzwl intr_pri, %ebx /* XXX Can cause probs if new class */ 939 /* is loaded on some other cpu. */ 940 addl %ebx, %eax /* convert level to dispatch priority */ 941 movw %ax, T_PRI(%esi) 942 943 / 944 / Take timestamp and store it in the thread structure. 945 / 946 movl %eax, %ebx /* save priority over rdtsc */ 947_tsc_patch8: 948 nop; nop /* patched to rdtsc if available */ 949 TSC_STORE(%esi, T_INTR_START) 950 movl %ebx, %eax /* restore priority */ 951 952 / The following 3 instructions need not be in cli. 953 / Putting them here only to avoid the AGI penalty on Pentiums. 954 955 pushl %ecx /* save interrupt vector. */ 956 pushl %esi /* save interrupt thread */ 957 movl $autovect, %esi /* get autovect structure */ 958 sti /* enable interrupts */ 959 960 / Fast event tracing. 961 LOADCPU(%ebx) 962 movl CPU_FTRACE_STATE(%ebx), %ebx 963 testl $FTRACE_ENABLED, %ebx 964 jz 1f 965 966 movl 8(%esp), %ebx 967 pushl %ebx /* ipl */ 968 pushl %ecx /* int vector */ 969 movl T_SP(%edx), %ebx 970 pushl %ebx /* ®s */ 971 pushl $_ftrace_intr_thread_fmt 972 call ftrace_3_notick 973 addl $8, %esp 974 popl %ecx /* restore int vector */ 975 addl $4, %esp 9761: 977pre_loop2: 978 movl AVH_LINK(%esi, %ecx, 8), %esi 979 xorl %ebx, %ebx /* bh is cno. of intpts in chain */ 980 /* bl is DDI_INTR_CLAIMED status of * chain */ 981 testl %esi, %esi /* if pointer is null */ 982 jz loop_done2 /* we're done */ 983loop2: 984 movl AV_VECTOR(%esi), %edx /* get the interrupt routine */ 985 testl %edx, %edx /* if pointer is null */ 986 jz loop_done2 /* we're done */ 987 incb %bh 988 pushl $0 989 pushl AV_INTARG2(%esi) 990 pushl AV_INTARG1(%esi) 991 pushl AV_VECTOR(%esi) 992 pushl AV_DIP(%esi) 993 call __dtrace_probe_interrupt__start 994 pushl AV_INTARG2(%esi) /* get 2nd arg to interrupt routine */ 995 pushl AV_INTARG1(%esi) /* get first arg to interrupt routine */ 996 call *%edx /* call interrupt routine with arg */ 997 addl $8, %esp 998 movl %eax, 16(%esp) 999 call __dtrace_probe_interrupt__complete 1000 addl $20, %esp 1001 orb %al, %bl /* see if anyone claims intpt. */ 1002 movl AV_TICKSP(%esi), %ecx 1003 testl %ecx, %ecx 1004 jz no_time 1005 call intr_get_time 1006 movl AV_TICKSP(%esi), %ecx 1007 TSC_ADD_TO(%ecx, 0) 1008no_time: 1009 movl AV_LINK(%esi), %esi /* get next routine on list */ 1010 testl %esi, %esi /* if pointer is non-null */ 1011 jnz loop2 /* continue */ 1012loop_done2: 1013 cmpb $1, %bh /* if only 1 intpt in chain, it is OK */ 1014 je .loop_done2_1 1015 orb %bl, %bl /* If no one claims intpt, then it is OK */ 1016 jz .loop_done2_1 1017 movl $autovect, %esi /* else get autovect structure */ 1018 movl 4(%esp), %ecx /* restore intr vector */ 1019 jmp pre_loop2 /* and try again. */ 1020.loop_done2_1: 1021 popl %esi /* restore intr thread pointer */ 1022 1023 LOADCPU(%ebx) 1024 1025 cli /* protect interrupt thread pool and intr_actv */ 1026 movzbl T_PIL(%esi), %eax 1027 1028 / Save value in regs 1029 pushl %eax /* current pil */ 1030 pushl %edx /* (huh?) */ 1031 pushl %edi /* old pil */ 1032 1033 / cpu_stats.sys.intr[PIL]++ 1034 INC_CPU_STATS_INTR(%eax, %edx, %edx, %ebx) 1035 1036 / 1037 / Take timestamp, compute interval, and update cumulative counter. 1038 / esi = thread pointer, ebx = cpu pointer, eax = PIL 1039 / 1040 movl %eax, %edi 1041 1042 ASSERT_T_INTR_START_NZ(%esi) 1043 1044_tsc_patch9: 1045 nop; nop /* patched to rdtsc if available */ 1046 TSC_SUB_FROM(%esi, T_INTR_START) 1047 PILBASE_INTRSTAT(%ebx, %edi) 1048 TSC_ADD_TO(%edi, CPU_INTRSTAT) 1049 INTRACCTBASE(%ebx, %edi) 1050 TSC_ADD_TO(%edi, CPU_INTRACCT) /* cpu_intracct[cpu_mstate] += tsc */ 1051 popl %edi 1052 popl %edx 1053 popl %eax 1054 1055 / 1056 / Clear bit for this PIL in CPU's interrupt active bitmask. 1057 / 1058 1059 ASSERT_CPU_INTR_ACTV(%eax, %ebx, _intr_thread_actv_bit_not_set) 1060 1061 btrl %eax, CPU_INTR_ACTV(%ebx) 1062 1063 / if there is still an interrupted thread underneath this one 1064 / then the interrupt was never blocked and the return is fairly 1065 / simple. Otherwise jump to intr_thread_exit 1066 cmpl $0, T_INTR(%esi) 1067 je intr_thread_exit 1068 1069 / 1070 / link the thread back onto the interrupt thread pool 1071 LINK_INTR_THREAD(%ebx, %esi, %edx) 1072 1073 movl CPU_BASE_SPL(%ebx), %eax /* used below. */ 1074 / set the thread state to free so kmdb doesn't see it 1075 movl $FREE_THREAD, T_STATE(%esi) 1076 1077 cmpl %eax, %edi /* if (oldipl >= basespl) */ 1078 jae intr_restore_ipl /* then use oldipl */ 1079 movl %eax, %edi /* else use basespl */ 1080intr_restore_ipl: 1081 movl %edi, CPU_PRI(%ebx) 1082 /* intr vector already on stack */ 1083 pushl %edi /* old ipl */ 1084 call *setlvlx /* eax contains the current ipl */ 1085 / 1086 / Switch back to the interrupted thread 1087 movl T_INTR(%esi), %ecx 1088 1089 / Place starting timestamp in interrupted thread's thread structure. 1090_tsc_patch10: 1091 nop; nop /* patched to rdtsc if available */ 1092 TSC_STORE(%ecx, T_INTR_START) 1093 1094 movl T_SP(%ecx), %esp /* restore stack pointer */ 1095 movl %esp, %ebp 1096 movl %ecx, CPU_THREAD(%ebx) 1097 1098 movl CPU_SOFTINFO(%ebx), %edx /* any pending software interrupts */ 1099 orl %edx, %edx 1100 jz _sys_rtt 1101 jmp dosoftint /* check for softints before we return. */ 1102 1103 / 1104 / An interrupt returned on what was once (and still might be) 1105 / an interrupt thread stack, but the interrupted process is no longer 1106 / there. This means the interrupt must have blocked. 1107 / 1108 / There is no longer a thread under this one, so put this thread back 1109 / on the CPU's free list and resume the idle thread which will dispatch 1110 / the next thread to run. 1111 / 1112 / All interrupts are disabled here 1113 / 1114 1115intr_thread_exit: 1116#ifdef DEBUG 1117 incl intr_thread_cnt 1118#endif 1119 INC64(%ebx, CPU_STATS_SYS_INTRBLK) /* cpu_stats.sys.intrblk++ */ 1120 / 1121 / Put thread back on the interrupt thread list. 1122 / As a reminder, the regs at this point are 1123 / esi interrupt thread 1124 / edi old ipl 1125 / ebx ptr to CPU struct 1126 1127 / Set CPU's base SPL level based on active interrupts bitmask 1128 call set_base_spl 1129 1130 movl CPU_BASE_SPL(%ebx), %edi 1131 movl %edi, CPU_PRI(%ebx) 1132 /* interrupt vector already on stack */ 1133 pushl %edi 1134 call *setlvlx 1135 addl $8, %esp /* XXX - don't need to pop since */ 1136 /* we are ready to switch */ 1137 call splhigh /* block all intrs below lock level */ 1138 / 1139 / Set the thread state to free so kmdb doesn't see it 1140 / 1141 movl $FREE_THREAD, T_STATE(%esi) 1142 / 1143 / Put thread on either the interrupt pool or the free pool and 1144 / call swtch() to resume another thread. 1145 / 1146 LINK_INTR_THREAD(%ebx, %esi, %edx) 1147 call swtch 1148 / swtch() shouldn't return 1149 1150 SET_SIZE(intr_thread) 1151 1152#endif /* __lint */ 1153#endif /* __i386 */ 1154 1155/* 1156 * Set Cpu's base SPL level, base on which interrupt levels are active 1157 * Called at spl7 or above. 1158 */ 1159 1160#if defined(__lint) 1161 1162void 1163set_base_spl(void) 1164{} 1165 1166#else /* __lint */ 1167 1168 ENTRY_NP(set_base_spl) 1169 movl %gs:CPU_INTR_ACTV, %eax /* load active interrupts mask */ 1170 testl %eax, %eax /* is it zero? */ 1171 jz setbase 1172 testl $0xff00, %eax 1173 jnz ah_set 1174 shl $24, %eax /* shift 'em over so we can find */ 1175 /* the 1st bit faster */ 1176 bsrl %eax, %eax 1177 subl $24, %eax 1178setbase: 1179 movl %eax, %gs:CPU_BASE_SPL /* store base priority */ 1180 ret 1181ah_set: 1182 shl $16, %eax 1183 bsrl %eax, %eax 1184 subl $16, %eax 1185 jmp setbase 1186 SET_SIZE(set_base_spl) 1187 1188#endif /* __lint */ 1189 1190#if defined(__i386) 1191 1192/* 1193 * int 1194 * intr_passivate(from, to) 1195 * thread_id_t from; interrupt thread 1196 * thread_id_t to; interrupted thread 1197 * 1198 * intr_passivate(t, itp) makes the interrupted thread "t" runnable. 1199 * 1200 * Since t->t_sp has already been saved, t->t_pc is all that needs 1201 * set in this function. 1202 * 1203 * Returns interrupt level of the thread. 1204 */ 1205 1206#if defined(__lint) 1207 1208/* ARGSUSED */ 1209int 1210intr_passivate(kthread_id_t from, kthread_id_t to) 1211{ return (0); } 1212 1213#else /* __lint */ 1214 1215 ENTRY(intr_passivate) 1216 movl 8(%esp), %eax /* interrupted thread */ 1217 movl $_sys_rtt, T_PC(%eax) /* set T_PC for interrupted thread */ 1218 1219 movl 4(%esp), %eax /* interrupt thread */ 1220 movl T_STACK(%eax), %eax /* get the pointer to the start of */ 1221 /* of the interrupt thread stack */ 1222 movl -4(%eax), %eax /* interrupt level was the first */ 1223 /* thing pushed onto the stack */ 1224 ret 1225 SET_SIZE(intr_passivate) 1226 1227#endif /* __lint */ 1228#endif /* __i386 */ 1229 1230#if defined(__lint) 1231 1232void 1233fakesoftint(void) 1234{} 1235 1236#else /* __lint */ 1237 1238 / 1239 / If we're here, we're being called from splx() to fake a soft 1240 / interrupt (note that interrupts are still disabled from splx()). 1241 / We execute this code when a soft interrupt is posted at 1242 / level higher than the CPU's current spl; when spl is lowered in 1243 / splx(), it will see the softint and jump here. We'll do exactly 1244 / what a trap would do: push our flags, %cs, %eip, error code 1245 / and trap number (T_SOFTINT). The cmnint() code will see T_SOFTINT 1246 / and branch to the dosoftint() code. 1247 / 1248#if defined(__amd64) 1249 1250 /* 1251 * In 64-bit mode, iretq -always- pops all five regs 1252 * Imitate the 16-byte auto-align of the stack, and the 1253 * zero-ed out %ss value. 1254 */ 1255 ENTRY_NP(fakesoftint) 1256 movq %rsp, %r11 1257 andq $-16, %rsp 1258 pushq $KDS_SEL /* %ss */ 1259 pushq %r11 /* %rsp */ 1260 pushf /* rflags */ 1261 pushq $KCS_SEL /* %cs */ 1262 leaq fakesoftint_return(%rip), %r11 1263 pushq %r11 /* %rip */ 1264 pushq $0 /* err */ 1265 pushq $T_SOFTINT /* trap */ 1266 jmp cmnint 1267 SET_SIZE(fakesoftint) 1268 1269#elif defined(__i386) 1270 1271 ENTRY_NP(fakesoftint) 1272 pushf 1273 push %cs 1274 push $fakesoftint_return 1275 push $0 1276 push $T_SOFTINT 1277 jmp cmnint 1278 SET_SIZE(fakesoftint) 1279 1280#endif /* __i386 */ 1281 1282 .align CPTRSIZE 1283 .globl _fakesoftint_size 1284 .type _fakesoftint_size, @object 1285_fakesoftint_size: 1286 .NWORD . - fakesoftint 1287 SET_SIZE(_fakesoftint_size) 1288 1289/* 1290 * dosoftint(old_pil in %edi, softinfo in %edx, CPU pointer in %ebx) 1291 * Process software interrupts 1292 * Interrupts are disabled here. 1293 */ 1294#if defined(__i386) 1295 1296 ENTRY_NP(dosoftint) 1297 1298 bsrl %edx, %edx /* find highest pending interrupt */ 1299 cmpl %edx, %edi /* if curipl >= pri soft pending intr */ 1300 jae _sys_rtt /* skip */ 1301 1302 movl %gs:CPU_BASE_SPL, %eax /* check for blocked intr threads */ 1303 cmpl %edx, %eax /* if basespl >= pri soft pending */ 1304 jae _sys_rtt /* skip */ 1305 1306 lock /* MP protect */ 1307 btrl %edx, CPU_SOFTINFO(%ebx) /* clear the selected interrupt bit */ 1308 jnc dosoftint_again 1309 1310 movl %edx, CPU_PRI(%ebx) /* set IPL to sofint level */ 1311 pushl %edx 1312 call *setspl /* mask levels upto the softint level */ 1313 popl %eax /* priority we are at in %eax */ 1314 1315 / Get set to run interrupt thread. 1316 / There should always be an interrupt thread since we allocate one 1317 / for each level on the CPU. 1318 UNLINK_INTR_THREAD(%ebx, %esi, %edx) 1319 1320 / 1321 / Note that the code in kcpc_overflow_intr -relies- on the ordering 1322 / of events here - in particular that t->t_lwp of the interrupt 1323 / thread is set to the pinned thread *before* curthread is changed 1324 / 1325 movl CPU_THREAD(%ebx), %ecx 1326 1327 / If we are interrupting an interrupt thread, account for it. 1328 testw $T_INTR_THREAD, T_FLAGS(%ecx) 1329 jz 0f 1330 pushl %eax 1331 movl %eax, %ebp 1332_tsc_patch11: 1333 nop; nop /* patched to rdtsc if available */ 1334 PILBASE_INTRSTAT(%ebx, %ebp) 1335 TSC_SUB_FROM(%ecx, T_INTR_START) 1336 TSC_ADD_TO(%ebp, CPU_INTRSTAT) 1337 INTRACCTBASE(%ebx, %ebp) 1338 TSC_ADD_TO(%ebp, CPU_INTRACCT) /* cpu_intracct[cpu_mstate] += tsc */ 1339 popl %eax 13400: 1341 movl T_LWP(%ecx), %ebp 1342 movl %ebp, T_LWP(%esi) 1343 / 1344 / Threads on the interrupt thread free list could have state already 1345 / set to TS_ONPROC, but it helps in debugging if they're TS_FREE 1346 / Could eliminate the next two instructions with a little work. 1347 / 1348 movl $ONPROC_THREAD, T_STATE(%esi) 1349 / 1350 / Push interrupted thread onto list from new thread. 1351 / Set the new thread as the current one. 1352 / Set interrupted thread's T_SP because if it is the idle thread, 1353 / Resume() may use that stack between threads. 1354 / 1355 movl %esp, T_SP(%ecx) /* mark stack for resume */ 1356 movl %ecx, T_INTR(%esi) /* push old thread */ 1357 movl %esi, CPU_THREAD(%ebx) /* set new thread */ 1358 movl T_STACK(%esi), %esp /* interrupt stack pointer */ 1359 movl %esp, %ebp 1360 1361 pushl %eax /* push ipl as first element in stack */ 1362 /* see intr_passivate() */ 1363 / 1364 / Set bit for this PIL in CPU's interrupt active bitmask. 1365 / 1366 1367 ASSERT_NOT_CPU_INTR_ACTV(%eax, %ebx, _dosoftint_actv_bit_set) 1368 1369 btsl %eax, CPU_INTR_ACTV(%ebx) 1370 1371 / 1372 / Initialize thread priority level from intr_pri 1373 / 1374 movb %al, T_PIL(%esi) /* store pil */ 1375 movzwl intr_pri, %ecx 1376 addl %eax, %ecx /* convert level to dispatch priority */ 1377 movw %cx, T_PRI(%esi) 1378 1379 / 1380 / Store starting timestamp in thread structure. 1381 / esi = thread, ebx = cpu pointer, eax = PIL 1382 / 1383 movl %eax, %ecx /* save PIL from rdtsc clobber */ 1384_tsc_patch12: 1385 nop; nop /* patched to rdtsc if available */ 1386 TSC_STORE(%esi, T_INTR_START) 1387 1388 sti /* enable interrupts */ 1389 1390 / 1391 / Enabling interrupts (above) could raise the current 1392 / IPL and base SPL. But, we continue processing the current soft 1393 / interrupt and we will check the base SPL next time in the loop 1394 / so that blocked interrupt thread would get a chance to run. 1395 / 1396 1397 / 1398 / dispatch soft interrupts 1399 / 1400 pushl %ecx 1401 call av_dispatch_softvect 1402 addl $4, %esp 1403 1404 cli /* protect interrupt thread pool */ 1405 /* and softinfo & sysinfo */ 1406 movl CPU_THREAD(%ebx), %esi /* restore thread pointer */ 1407 movzbl T_PIL(%esi), %ecx 1408 1409 / cpu_stats.sys.intr[PIL]++ 1410 INC_CPU_STATS_INTR(%ecx, %edx, %edx, %ebx) 1411 1412 / 1413 / Clear bit for this PIL in CPU's interrupt active bitmask. 1414 / 1415 1416 ASSERT_CPU_INTR_ACTV(%ecx, %ebx, _dosoftint_actv_bit_not_set) 1417 1418 btrl %ecx, CPU_INTR_ACTV(%ebx) 1419 1420 / 1421 / Take timestamp, compute interval, update cumulative counter. 1422 / esi = thread, ebx = cpu, ecx = PIL 1423 / 1424 PILBASE_INTRSTAT(%ebx, %ecx) 1425_tsc_patch13: 1426 nop; nop /* patched to rdtsc if available */ 1427 TSC_SUB_FROM(%esi, T_INTR_START) 1428 TSC_ADD_TO(%ecx, CPU_INTRSTAT) 1429 INTRACCTBASE(%ebx, %ecx) 1430 TSC_ADD_TO(%ecx, CPU_INTRACCT) /* cpu_intracct[cpu_mstate] += tsc */ 1431 1432 / if there is still an interrupt thread underneath this one 1433 / then the interrupt was never blocked and the return is fairly 1434 / simple. Otherwise jump to softintr_thread_exit. 1435 / softintr_thread_exit expect esi to be curthread & ebx to be ipl. 1436 cmpl $0, T_INTR(%esi) 1437 je softintr_thread_exit 1438 1439 / 1440 / link the thread back onto the interrupt thread pool 1441 LINK_INTR_THREAD(%ebx, %esi, %edx) 1442 1443 / set the thread state to free so kmdb doesn't see it 1444 movl $FREE_THREAD, T_STATE(%esi) 1445 / 1446 / Switch back to the interrupted thread 1447 movl T_INTR(%esi), %ecx 1448 movl %ecx, CPU_THREAD(%ebx) 1449 movl T_SP(%ecx), %esp /* restore stack pointer */ 1450 movl %esp, %ebp 1451 1452 / If we are returning to an interrupt thread, store a starting 1453 / timestamp in the thread structure. 1454 testw $T_INTR_THREAD, T_FLAGS(%ecx) 1455 jz 0f 1456_tsc_patch14: 1457 nop; nop /* patched to rdtsc if available */ 1458 TSC_STORE(%ecx, T_INTR_START) 14590: 1460 movl CPU_BASE_SPL(%ebx), %eax 1461 cmpl %eax, %edi /* if (oldipl >= basespl) */ 1462 jae softintr_restore_ipl /* then use oldipl */ 1463 movl %eax, %edi /* else use basespl */ 1464softintr_restore_ipl: 1465 movl %edi, CPU_PRI(%ebx) /* set IPL to old level */ 1466 pushl %edi 1467 call *setspl 1468 popl %eax 1469dosoftint_again: 1470 movl CPU_SOFTINFO(%ebx), %edx /* any pending software interrupts */ 1471 orl %edx, %edx 1472 jz _sys_rtt 1473 jmp dosoftint /* process more software interrupts */ 1474 1475softintr_thread_exit: 1476 / 1477 / Put thread back on the interrupt thread list. 1478 / As a reminder, the regs at this point are 1479 / %esi interrupt thread 1480 1481 / 1482 / This was an interrupt thread, so set CPU's base SPL level 1483 / set_base_spl only uses %eax. 1484 / 1485 call set_base_spl /* interrupt vector already on stack */ 1486 / 1487 / Set the thread state to free so kmdb doesn't see it 1488 / 1489 movl $FREE_THREAD, T_STATE(%esi) 1490 / 1491 / Put thread on either the interrupt pool or the free pool and 1492 / call swtch() to resume another thread. 1493 / 1494 LOADCPU(%ebx) 1495 LINK_INTR_THREAD(%ebx, %esi, %edx) 1496 call splhigh /* block all intrs below lock lvl */ 1497 call swtch 1498 / swtch() shouldn't return 1499 SET_SIZE(dosoftint) 1500 1501#endif /* __i386 */ 1502#endif /* __lint */ 1503 1504#if defined(lint) 1505 1506/* 1507 * intr_get_time() is a resource for interrupt handlers to determine how 1508 * much time has been spent handling the current interrupt. Such a function 1509 * is needed because higher level interrupts can arrive during the 1510 * processing of an interrupt, thus making direct comparisons of %tick by 1511 * the handler inaccurate. intr_get_time() only returns time spent in the 1512 * current interrupt handler. 1513 * 1514 * The caller must be calling from an interrupt handler running at a pil 1515 * below or at lock level. Timings are not provided for high-level 1516 * interrupts. 1517 * 1518 * The first time intr_get_time() is called while handling an interrupt, 1519 * it returns the time since the interrupt handler was invoked. Subsequent 1520 * calls will return the time since the prior call to intr_get_time(). Time 1521 * is returned as ticks. Use tsc_scalehrtime() to convert ticks to nsec. 1522 * 1523 * Theory Of Intrstat[][]: 1524 * 1525 * uint64_t intrstat[pil][0..1] is an array indexed by pil level, with two 1526 * uint64_ts per pil. 1527 * 1528 * intrstat[pil][0] is a cumulative count of the number of ticks spent 1529 * handling all interrupts at the specified pil on this CPU. It is 1530 * exported via kstats to the user. 1531 * 1532 * intrstat[pil][1] is always a count of ticks less than or equal to the 1533 * value in [0]. The difference between [1] and [0] is the value returned 1534 * by a call to intr_get_time(). At the start of interrupt processing, 1535 * [0] and [1] will be equal (or nearly so). As the interrupt consumes 1536 * time, [0] will increase, but [1] will remain the same. A call to 1537 * intr_get_time() will return the difference, then update [1] to be the 1538 * same as [0]. Future calls will return the time since the last call. 1539 * Finally, when the interrupt completes, [1] is updated to the same as [0]. 1540 * 1541 * Implementation: 1542 * 1543 * intr_get_time() works much like a higher level interrupt arriving. It 1544 * "checkpoints" the timing information by incrementing intrstat[pil][0] 1545 * to include elapsed running time, and by setting t_intr_start to rdtsc. 1546 * It then sets the return value to intrstat[pil][0] - intrstat[pil][1], 1547 * and updates intrstat[pil][1] to be the same as the new value of 1548 * intrstat[pil][0]. 1549 * 1550 * In the normal handling of interrupts, after an interrupt handler returns 1551 * and the code in intr_thread() updates intrstat[pil][0], it then sets 1552 * intrstat[pil][1] to the new value of intrstat[pil][0]. When [0] == [1], 1553 * the timings are reset, i.e. intr_get_time() will return [0] - [1] which 1554 * is 0. 1555 * 1556 * Whenever interrupts arrive on a CPU which is handling a lower pil 1557 * interrupt, they update the lower pil's [0] to show time spent in the 1558 * handler that they've interrupted. This results in a growing discrepancy 1559 * between [0] and [1], which is returned the next time intr_get_time() is 1560 * called. Time spent in the higher-pil interrupt will not be returned in 1561 * the next intr_get_time() call from the original interrupt, because 1562 * the higher-pil interrupt's time is accumulated in intrstat[higherpil][]. 1563 */ 1564 1565/*ARGSUSED*/ 1566uint64_t 1567intr_get_time(void) 1568{ return 0; } 1569#else /* lint */ 1570 1571 1572#if defined(__amd64) 1573 ENTRY_NP(intr_get_time) 1574 cli /* make this easy -- block intrs */ 1575 LOADCPU(%rdi) 1576 call intr_thread_get_time 1577 sti 1578 ret 1579 SET_SIZE(intr_get_time) 1580 1581#elif defined(__i386) 1582 1583#ifdef DEBUG 1584 1585 1586_intr_get_time_high_pil: 1587 .string "intr_get_time(): %pil > LOCK_LEVEL" 1588_intr_get_time_not_intr: 1589 .string "intr_get_time(): not called from an interrupt thread" 1590_intr_get_time_no_start_time: 1591 .string "intr_get_time(): t_intr_start == 0" 1592 1593/* 1594 * ASSERT(%pil <= LOCK_LEVEL) 1595 */ 1596#define ASSERT_PIL_BELOW_LOCK_LEVEL(cpureg) \ 1597 testl $CPU_INTR_ACTV_HIGH_LEVEL_MASK, CPU_INTR_ACTV(cpureg); \ 1598 jz 0f; \ 1599 __PANIC(_intr_get_time_high_pil, 0f); \ 16000: 1601 1602/* 1603 * ASSERT((t_flags & T_INTR_THREAD) != 0 && t_pil > 0) 1604 */ 1605#define ASSERT_NO_PIL_0_INTRS(thrreg) \ 1606 testw $T_INTR_THREAD, T_FLAGS(thrreg); \ 1607 jz 1f; \ 1608 cmpb $0, T_PIL(thrreg); \ 1609 jne 0f; \ 16101: \ 1611 __PANIC(_intr_get_time_not_intr, 0f); \ 16120: 1613 1614/* 1615 * ASSERT(t_intr_start != 0) 1616 */ 1617#define ASSERT_INTR_START_NOT_0(thrreg) \ 1618 cmpl $0, T_INTR_START(thrreg); \ 1619 jnz 0f; \ 1620 cmpl $0, T_INTR_START+4(thrreg); \ 1621 jnz 0f; \ 1622 __PANIC(_intr_get_time_no_start_time, 0f); \ 16230: 1624 1625#endif /* DEBUG */ 1626 1627 ENTRY_NP(intr_get_time) 1628 1629 cli /* make this easy -- block intrs */ 1630 pushl %esi /* and free up some registers */ 1631 1632 LOADCPU(%esi) 1633 movl CPU_THREAD(%esi), %ecx 1634 1635#ifdef DEBUG 1636 ASSERT_PIL_BELOW_LOCK_LEVEL(%esi) 1637 ASSERT_NO_PIL_0_INTRS(%ecx) 1638 ASSERT_INTR_START_NOT_0(%ecx) 1639#endif /* DEBUG */ 1640 1641_tsc_patch17: 1642 nop; nop /* patched to rdtsc if available */ 1643 TSC_SUB_FROM(%ecx, T_INTR_START) /* get elapsed time */ 1644 TSC_ADD_TO(%ecx, T_INTR_START) /* T_INTR_START = rdtsc */ 1645 1646 movzbl T_PIL(%ecx), %ecx /* %ecx = pil */ 1647 PILBASE_INTRSTAT(%esi, %ecx) /* %ecx = CPU + pil*16 */ 1648 TSC_ADD_TO(%ecx, CPU_INTRSTAT) /* intrstat[0] += elapsed */ 1649 TSC_LOAD(%ecx, CPU_INTRSTAT) /* get new intrstat[0] */ 1650 TSC_SUB_FROM(%ecx, CPU_INTRSTAT+8) /* diff with intrstat[1] */ 1651 TSC_ADD_TO(%ecx, CPU_INTRSTAT+8) /* intrstat[1] = intrstat[0] */ 1652 1653 /* %edx/%eax contain difference between old and new intrstat[1] */ 1654 1655 popl %esi 1656 sti 1657 ret 1658 SET_SIZE(intr_get_time) 1659#endif /* __i386 */ 1660 1661#endif /* lint */ 1662