1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22/* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27/* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */ 28/* Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T */ 29/* All Rights Reserved */ 30 31/* Copyright (c) 1987, 1988 Microsoft Corporation */ 32/* All Rights Reserved */ 33 34#pragma ident "%Z%%M% %I% %E% SMI" 35 36#include <sys/asm_linkage.h> 37#include <sys/asm_misc.h> 38#include <sys/regset.h> 39#include <sys/psw.h> 40#include <sys/x86_archext.h> 41 42#if defined(__lint) 43 44#include <sys/types.h> 45#include <sys/thread.h> 46#include <sys/systm.h> 47 48#else /* __lint */ 49 50#include <sys/segments.h> 51#include <sys/pcb.h> 52#include <sys/trap.h> 53#include <sys/ftrace.h> 54#include <sys/traptrace.h> 55#include <sys/clock.h> 56#include <sys/panic.h> 57#include "assym.h" 58 59_ftrace_intr_thread_fmt: 60 .string "intr_thread(): regs=0x%lx, int=0x%x, pil=0x%x" 61 62#endif /* lint */ 63 64#if defined(__i386) 65 66#if defined(__lint) 67 68void 69patch_tsc(void) 70{} 71 72#else /* __lint */ 73 74/* 75 * To cope with processors that do not implement the rdtsc instruction, 76 * we patch the kernel to use rdtsc if that feature is detected on the CPU. 77 * On an unpatched kernel, all locations requiring rdtsc are nop's. 78 * 79 * This function patches the nop's to rdtsc. 80 */ 81 ENTRY_NP(patch_tsc) 82 movw _rdtsc_insn, %cx 83 movw %cx, _tsc_patch1 84 movw %cx, _tsc_patch2 85 movw %cx, _tsc_patch3 86 movw %cx, _tsc_patch4 87 movw %cx, _tsc_patch5 88 movw %cx, _tsc_patch6 89 movw %cx, _tsc_patch7 90 movw %cx, _tsc_patch8 91 movw %cx, _tsc_patch9 92 movw %cx, _tsc_patch10 93 movw %cx, _tsc_patch11 94 movw %cx, _tsc_patch12 95 movw %cx, _tsc_patch13 96 movw %cx, _tsc_patch14 97 movw %cx, _tsc_patch15 98 movw %cx, _tsc_patch16 99 ret 100_rdtsc_insn: 101 rdtsc 102 SET_SIZE(patch_tsc) 103 104#endif /* __lint */ 105 106#endif /* __i386 */ 107 108 109#if defined(__lint) 110 111void 112_interrupt(void) 113{} 114 115#else /* __lint */ 116 117#if defined(__amd64) 118 119 /* 120 * Common register usage: 121 * 122 * %rbx cpu pointer 123 * %r12 trap trace pointer -and- stash of 124 * vec across intr_thread dispatch. 125 * %r13d ipl of isr 126 * %r14d old ipl (ipl level we entered on) 127 * %r15 interrupted thread stack pointer 128 */ 129 ENTRY_NP2(cmnint, _interrupt) 130 131 INTR_PUSH 132 133 /* 134 * At the end of TRACE_PTR %r12 points to the current TRAPTRACE entry 135 */ 136 TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_INTERRUPT) 137 /* Uses labels 8 and 9 */ 138 TRACE_REGS(%r12, %rsp, %rax, %rbx) /* Uses label 9 */ 139 TRACE_STAMP(%r12) /* Clobbers %eax, %edx, uses 9 */ 140 141 DISABLE_INTR_FLAGS /* (and set kernel flag values) */ 142 143 movq %rsp, %rbp 144 145 TRACE_STACK(%r12) 146 147 LOADCPU(%rbx) /* &cpu */ 148 leaq REGOFF_TRAPNO(%rbp), %rsi /* &vector */ 149 movl CPU_PRI(%rbx), %r14d /* old ipl */ 150 movl CPU_SOFTINFO(%rbx), %edx 151 152#ifdef TRAPTRACE 153 movl $255, TTR_IPL(%r12) 154 movl %r14d, %edi 155 movb %dil, TTR_PRI(%r12) 156 movl CPU_BASE_SPL(%rbx), %edi 157 movb %dil, TTR_SPL(%r12) 158 movb $255, TTR_VECTOR(%r12) 159#endif 160 161 /* 162 * Check to see if the trap number is T_SOFTINT; if it is, 163 * jump straight to dosoftint now. 164 */ 165 cmpq $T_SOFTINT, (%rsi) 166 je dosoftint 167 168 /* 169 * Raise the interrupt priority level, returns newpil. 170 * (The vector address is in %rsi so setlvl can update it.) 171 */ 172 movl %r14d, %edi /* old ipl */ 173 /* &vector */ 174 call *setlvl(%rip) 175 176#ifdef TRAPTRACE 177 movb %al, TTR_IPL(%r12) 178#endif 179 /* 180 * check for spurious interrupt 181 */ 182 cmpl $-1, %eax 183 je _sys_rtt 184 185#ifdef TRAPTRACE 186 movl %r14d, %edx 187 movb %dl, TTR_PRI(%r12) 188 movl CPU_BASE_SPL(%rbx), %edx 189 movb %dl, TTR_SPL(%r12) 190#endif 191 movl %eax, CPU_PRI(%rbx) /* update ipl */ 192 193#ifdef TRAPTRACE 194 movl REGOFF_TRAPNO(%rbp), %edx 195 movb %dl, TTR_VECTOR(%r12) 196#endif 197 movl %eax, %r13d /* ipl of isr */ 198 199 /* 200 * At this point we can take one of two paths. 201 * If the new level is at or below lock level, we will 202 * run this interrupt in a separate thread. 203 */ 204 cmpl $LOCK_LEVEL, %eax 205 jbe intr_thread 206 207 movq %rbx, %rdi /* &cpu */ 208 movl %r13d, %esi /* ipl */ 209 movl %r14d, %edx /* old ipl */ 210 movq %rbp, %rcx /* ®s */ 211 call hilevel_intr_prolog 212 orl %eax, %eax /* zero if need to switch stack */ 213 jnz 1f 214 215 /* 216 * Save the thread stack and get on the cpu's interrupt stack 217 */ 218 movq %rsp, %r15 219 movq CPU_INTR_STACK(%rbx), %rsp 2201: 221 222 sti 223 224 /* 225 * Walk the list of handlers for this vector, calling 226 * them as we go until no more interrupts are claimed. 227 */ 228 movl REGOFF_TRAPNO(%rbp), %edi 229 call av_dispatch_autovect 230 231 cli 232 233 movq %rbx, %rdi /* &cpu */ 234 movl %r13d, %esi /* ipl */ 235 movl %r14d, %edx /* oldipl */ 236 movl REGOFF_TRAPNO(%rbp), %ecx /* vec */ 237 call hilevel_intr_epilog 238 orl %eax, %eax /* zero if need to switch stack */ 239 jnz 2f 240 movq %r15, %rsp 2412: /* 242 * Check for, and execute, softints before we iret. 243 * 244 * (dosoftint expects oldipl in %r14d (which is where it is) 245 * the cpu pointer in %rbx (which is where it is) and the 246 * softinfo in %edx (which is where we'll put it right now)) 247 */ 248 movl CPU_SOFTINFO(%rbx), %edx 249 orl %edx, %edx 250 jz _sys_rtt 251 jmp dosoftint 252 /*NOTREACHED*/ 253 254 SET_SIZE(cmnint) 255 SET_SIZE(_interrupt) 256 257/* 258 * Handle an interrupt in a new thread 259 * 260 * As we branch here, interrupts are still masked, 261 * %rbx still contains the cpu pointer, 262 * %r14d contains the old ipl that we came in on, and 263 * %eax contains the new ipl that we got from the setlvl routine 264 */ 265 266 ENTRY_NP(intr_thread) 267 268 movq %rbx, %rdi /* &cpu */ 269 movq %rbp, %rsi /* ®s = stack pointer for _sys_rtt */ 270 movl REGOFF_TRAPNO(%rbp), %r12d /* stash the vec */ 271 movl %eax, %edx /* new pil from setlvlx() */ 272 call intr_thread_prolog 273 movq %rsp, %r15 274 movq %rax, %rsp /* t_stk from interrupt thread */ 275 movq %rsp, %rbp 276 277 sti 278 279 testl $FTRACE_ENABLED, CPU_FTRACE_STATE(%rbx) 280 jz 1f 281 /* 282 * ftracing support. do we need this on x86? 283 */ 284 leaq _ftrace_intr_thread_fmt(%rip), %rdi 285 movq %rbp, %rsi /* ®s */ 286 movl %r12d, %edx /* vec */ 287 movq CPU_THREAD(%rbx), %r11 /* (the interrupt thread) */ 288 movzbl T_PIL(%r11), %ecx /* newipl */ 289 call ftrace_3_notick 2901: 291 movl %r12d, %edi /* vec */ 292 call av_dispatch_autovect 293 294 cli 295 296 movq %rbx, %rdi /* &cpu */ 297 movl %r12d, %esi /* vec */ 298 movl %r14d, %edx /* oldpil */ 299 call intr_thread_epilog 300 /* 301 * If we return from here (we might not if the interrupted thread 302 * has exited or blocked, in which case we'll have quietly swtch()ed 303 * away) then we need to switch back to our old %rsp 304 */ 305 movq %r15, %rsp 306 movq %rsp, %rbp 307 /* 308 * Check for, and execute, softints before we iret. 309 * 310 * (dosoftint expects oldpil in %r14d, the cpu pointer in %rbx and 311 * the mcpu_softinfo.st_pending field in %edx. 312 */ 313 movl CPU_SOFTINFO(%rbx), %edx 314 orl %edx, %edx 315 jz _sys_rtt 316 /*FALLTHROUGH*/ 317 318/* 319 * Process soft interrupts. 320 * Interrupts are masked, and we have a minimal frame on the stack. 321 * %edx should contain the mcpu_softinfo.st_pending field 322 */ 323 324 ALTENTRY(dosoftint) 325 326 movq %rbx, %rdi /* &cpu */ 327 movq %rbp, %rsi /* ®s = stack pointer for _sys_rtt */ 328 /* cpu->cpu_m.mcpu_softinfo.st_pending */ 329 movl %r14d, %ecx /* oldipl */ 330 call dosoftint_prolog 331 /* 332 * dosoftint_prolog() usually returns a stack pointer for the 333 * interrupt thread that we must switch to. However, if the 334 * returned stack pointer is NULL, then the software interrupt was 335 * too low in priority to run now; we'll catch it another time. 336 */ 337 orq %rax, %rax 338 jz _sys_rtt 339 movq %rsp, %r15 340 movq %rax, %rsp /* t_stk from interrupt thread */ 341 movq %rsp, %rbp 342 343 sti 344 345 /* 346 * Enabling interrupts (above) could raise the current ipl 347 * and base spl. But, we continue processing the current soft 348 * interrupt and we will check the base spl next time around 349 * so that blocked interrupt threads get a chance to run. 350 */ 351 movq CPU_THREAD(%rbx), %r11 /* now an interrupt thread */ 352 movzbl T_PIL(%r11), %edi 353 call av_dispatch_softvect 354 355 cli 356 357 movq %rbx, %rdi /* &cpu */ 358 movl %r14d, %esi /* oldpil */ 359 call dosoftint_epilog 360 movq %r15, %rsp /* back on old stack pointer */ 361 movq %rsp, %rbp 362 movl CPU_SOFTINFO(%rbx), %edx 363 orl %edx, %edx 364 jz _sys_rtt 365 jmp dosoftint 366 367 SET_SIZE(dosoftint) 368 SET_SIZE(intr_thread) 369 370#elif defined(__i386) 371 372/* 373 * One day, this should just invoke the C routines that know how to 374 * do all the interrupt bookkeeping. In the meantime, try 375 * and make the assembler a little more comprehensible. 376 */ 377 378#define INC64(basereg, offset) \ 379 addl $1, offset(basereg); \ 380 adcl $0, offset + 4(basereg) 381 382#define TSC_CLR(basereg, offset) \ 383 movl $0, offset(basereg); \ 384 movl $0, offset + 4(basereg) 385 386/* 387 * The following macros assume the time value is in %edx:%eax 388 * e.g. from a rdtsc instruction. 389 */ 390#define TSC_MOV(reg, offset) \ 391 movl %eax, offset(reg); \ 392 movl %edx, offset + 4(reg) 393 394#define TSC_ADD_TO(reg, offset) \ 395 addl %eax, offset(reg); \ 396 adcl %edx, offset + 4(reg) 397 398#define TSC_SUB_FROM(reg, offset) \ 399 subl offset(reg), %eax; \ 400 sbbl offset + 4(reg), %edx /* interval in edx:eax */ 401 402/* 403 * basereg - pointer to cpu struct 404 * pilreg - pil or converted pil (pil - (LOCK_LEVEL + 1)) 405 * pilreg_32 - 32-bit version of pilreg 406 * 407 * Returns (base + pil * 8) in pilreg 408 */ 409#define PILBASE(basereg, pilreg) \ 410 lea (basereg, pilreg, 8), pilreg 411 412/* 413 * Returns (base + (pil - (LOCK_LEVEL + 1)) * 8) in pilreg 414 */ 415#define HIGHPILBASE(basereg, pilreg, pilreg_32) \ 416 subl $LOCK_LEVEL + 1, pilreg_32; \ 417 PILBASE(basereg, pilreg) 418 419/* 420 * cpu_stats.sys.intr[PIL]++ 421 */ 422#define INC_CPU_STATS_INTR(pilreg, tmpreg, tmpreg_32, basereg) \ 423 movl pilreg, tmpreg_32; \ 424 PILBASE(basereg, tmpreg); \ 425 INC64(tmpreg, _CONST(CPU_STATS_SYS_INTR - 8)) 426 427/* 428 * Unlink thread from CPU's list 429 */ 430#define UNLINK_INTR_THREAD(cpureg, ithread, tmpreg) \ 431 mov CPU_INTR_THREAD(cpureg), ithread; \ 432 mov T_LINK(ithread), tmpreg; \ 433 mov tmpreg, CPU_INTR_THREAD(cpureg) 434 435/* 436 * Link a thread into CPU's list 437 */ 438#define LINK_INTR_THREAD(cpureg, ithread, tmpreg) \ 439 mov CPU_INTR_THREAD(cpureg), tmpreg; \ 440 mov tmpreg, T_LINK(ithread); \ 441 mov ithread, CPU_INTR_THREAD(cpureg) 442 443#if defined(DEBUG) 444 445/* 446 * Do not call panic, if panic is already in progress. 447 */ 448#define __PANIC(msg, label) \ 449 cmpl $0, panic_quiesce; \ 450 jne label; \ 451 pushl $msg; \ 452 call panic 453 454#define __CMP64_JNE(basereg, offset, label) \ 455 cmpl $0, offset(basereg); \ 456 jne label; \ 457 cmpl $0, offset + 4(basereg); \ 458 jne label 459 460/* 461 * ASSERT(!(CPU->cpu_intr_actv & (1 << PIL))) 462 */ 463#define ASSERT_NOT_CPU_INTR_ACTV(pilreg, basereg, msg) \ 464 btl pilreg, CPU_INTR_ACTV(basereg); \ 465 jnc 4f; \ 466 __PANIC(msg, 4f); \ 4674: 468 469/* 470 * ASSERT(CPU->cpu_intr_actv & (1 << PIL)) 471 */ 472#define ASSERT_CPU_INTR_ACTV(pilreg, basereg, msg) \ 473 btl pilreg, CPU_INTR_ACTV(basereg); \ 474 jc 5f; \ 475 __PANIC(msg, 5f); \ 4765: 477 478/* 479 * ASSERT(CPU->cpu_pil_high_start != 0) 480 */ 481#define ASSERT_CPU_PIL_HIGH_START_NZ(basereg) \ 482 __CMP64_JNE(basereg, CPU_PIL_HIGH_START, 6f); \ 483 __PANIC(_interrupt_timestamp_zero, 6f); \ 4846: 485 486/* 487 * ASSERT(t->t_intr_start != 0) 488 */ 489#define ASSERT_T_INTR_START_NZ(basereg) \ 490 __CMP64_JNE(basereg, T_INTR_START, 7f); \ 491 __PANIC(_intr_thread_t_intr_start_zero, 7f); \ 4927: 493 494_interrupt_actv_bit_set: 495 .string "_interrupt(): cpu_intr_actv bit already set for PIL" 496_interrupt_actv_bit_not_set: 497 .string "_interrupt(): cpu_intr_actv bit not set for PIL" 498_interrupt_timestamp_zero: 499 .string "_interrupt(): timestamp zero upon handler return" 500_intr_thread_actv_bit_not_set: 501 .string "intr_thread(): cpu_intr_actv bit not set for PIL" 502_intr_thread_t_intr_start_zero: 503 .string "intr_thread(): t_intr_start zero upon handler return" 504_dosoftint_actv_bit_set: 505 .string "dosoftint(): cpu_intr_actv bit already set for PIL" 506_dosoftint_actv_bit_not_set: 507 .string "dosoftint(): cpu_intr_actv bit not set for PIL" 508 509 DGDEF(intr_thread_cnt) 510 511#else 512#define ASSERT_NOT_CPU_INTR_ACTV(pilreg, basereg, msg) 513#define ASSERT_CPU_INTR_ACTV(pilreg, basereg, msg) 514#define ASSERT_CPU_PIL_HIGH_START_NZ(basereg) 515#define ASSERT_T_INTR_START_NZ(basereg) 516#endif 517 518 ENTRY_NP2(cmnint, _interrupt) 519 520 INTR_PUSH 521 522 /* 523 * At the end of TRACE_PTR %esi points to the current TRAPTRACE entry 524 */ 525 TRACE_PTR(%esi, %eax, %eax, %edx, $TT_INTERRUPT) 526 /* Uses labels 8 and 9 */ 527 TRACE_REGS(%esi, %esp, %eax, %ebx) /* Uses label 9 */ 528 TRACE_STAMP(%esi) /* Clobbers %eax, %edx, uses 9 */ 529 530 movl %esp, %ebp 531 DISABLE_INTR_FLAGS 532 LOADCPU(%ebx) /* get pointer to CPU struct. Avoid gs refs */ 533 leal REGOFF_TRAPNO(%ebp), %ecx /* get address of vector */ 534 movl CPU_PRI(%ebx), %edi /* get ipl */ 535 movl CPU_SOFTINFO(%ebx), %edx 536 537 / 538 / Check to see if the trap number is T_SOFTINT; if it is, we'll 539 / jump straight to dosoftint now. 540 / 541 cmpl $T_SOFTINT, (%ecx) 542 je dosoftint 543 544 / raise interrupt priority level 545 / oldipl is in %edi, vectorp is in %ecx 546 / newipl is returned in %eax 547 pushl %ecx 548 pushl %edi 549 call *setlvl 550 popl %edi /* save oldpil in %edi */ 551 popl %ecx 552 553#ifdef TRAPTRACE 554 movb %al, TTR_IPL(%esi) 555#endif 556 557 / check for spurious interrupt 558 cmp $-1, %eax 559 je _sys_rtt 560 561#ifdef TRAPTRACE 562 movl CPU_PRI(%ebx), %edx 563 movb %dl, TTR_PRI(%esi) 564 movl CPU_BASE_SPL(%ebx), %edx 565 movb %dl, TTR_SPL(%esi) 566#endif 567 568 movl %eax, CPU_PRI(%ebx) /* update ipl */ 569 movl REGOFF_TRAPNO(%ebp), %ecx /* reload the interrupt vector */ 570 571#ifdef TRAPTRACE 572 movb %cl, TTR_VECTOR(%esi) 573#endif 574 575 / At this point we can take one of two paths. If the new priority 576 / level is less than or equal to LOCK LEVEL then we jump to code that 577 / will run this interrupt as a separate thread. Otherwise the 578 / interrupt is NOT run as a separate thread. 579 580 / %edi - old priority level 581 / %ebp - pointer to REGS 582 / %ecx - translated vector 583 / %eax - ipl of isr 584 / %ebx - cpu pointer 585 586 cmpl $LOCK_LEVEL, %eax /* compare to highest thread level */ 587 jbe intr_thread /* process as a separate thread */ 588 589 cmpl $CBE_HIGH_PIL, %eax /* Is this a CY_HIGH_LEVEL interrupt? */ 590 jne 2f 591 592 movl REGOFF_PC(%ebp), %esi 593 movl %edi, CPU_PROFILE_PIL(%ebx) /* record interrupted PIL */ 594 testw $CPL_MASK, REGOFF_CS(%ebp) /* trap from supervisor mode? */ 595 jz 1f 596 movl %esi, CPU_PROFILE_UPC(%ebx) /* record user PC */ 597 movl $0, CPU_PROFILE_PC(%ebx) /* zero kernel PC */ 598 jmp 2f 599 6001: 601 movl %esi, CPU_PROFILE_PC(%ebx) /* record kernel PC */ 602 movl $0, CPU_PROFILE_UPC(%ebx) /* zero user PC */ 603 6042: 605 pushl %ecx /* vec */ 606 pushl %eax /* newpil */ 607 608 / 609 / See if we are interrupting another high-level interrupt. 610 / 611 movl CPU_INTR_ACTV(%ebx), %eax 612 andl $CPU_INTR_ACTV_HIGH_LEVEL_MASK, %eax 613 jz 0f 614 / 615 / We have interrupted another high-level interrupt. 616 / Load starting timestamp, compute interval, update cumulative counter. 617 / 618 bsrl %eax, %ecx /* find PIL of interrupted handler */ 619 HIGHPILBASE(%ebx, %ecx, %ecx) 620_tsc_patch1: 621 nop; nop /* patched to rdtsc if available */ 622 TSC_SUB_FROM(%ecx, CPU_PIL_HIGH_START) 623 addl $CPU_INTRSTAT_LOW_PIL_OFFSET, %ecx /* offset PILs 0-10 */ 624 TSC_ADD_TO(%ecx, CPU_INTRSTAT) 625 / 626 / Another high-level interrupt is active below this one, so 627 / there is no need to check for an interrupt thread. That will be 628 / done by the lowest priority high-level interrupt active. 629 / 630 jmp 1f 6310: 632 / 633 / See if we are interrupting a low-level interrupt thread. 634 / 635 movl CPU_THREAD(%ebx), %esi 636 testw $T_INTR_THREAD, T_FLAGS(%esi) 637 jz 1f 638 / 639 / We have interrupted an interrupt thread. Account for its time slice 640 / only if its time stamp is non-zero. 641 / 642 cmpl $0, T_INTR_START+4(%esi) 643 jne 0f 644 cmpl $0, T_INTR_START(%esi) 645 je 1f 6460: 647 movzbl T_PIL(%esi), %ecx /* %ecx has PIL of interrupted handler */ 648 PILBASE(%ebx, %ecx) 649_tsc_patch2: 650 nop; nop /* patched to rdtsc if available */ 651 TSC_SUB_FROM(%esi, T_INTR_START) 652 TSC_CLR(%esi, T_INTR_START) 653 TSC_ADD_TO(%ecx, CPU_INTRSTAT) 6541: 655 / Store starting timestamp in CPU structure for this PIL. 656 popl %ecx /* restore new PIL */ 657 pushl %ecx 658 HIGHPILBASE(%ebx, %ecx, %ecx) 659_tsc_patch3: 660 nop; nop /* patched to rdtsc if available */ 661 TSC_MOV(%ecx, CPU_PIL_HIGH_START) 662 663 popl %eax /* restore new pil */ 664 popl %ecx /* vec */ 665 / 666 / Set bit for this PIL in CPU's interrupt active bitmask. 667 / 668 669 ASSERT_NOT_CPU_INTR_ACTV(%eax, %ebx, _interrupt_actv_bit_set) 670 671 / Save old CPU_INTR_ACTV 672 movl CPU_INTR_ACTV(%ebx), %esi 673 674 cmpl $15, %eax 675 jne 0f 676 / PIL-15 interrupt. Increment nest-count in upper 16 bits of intr_actv 677 incw CPU_INTR_ACTV_REF(%ebx) /* increment ref count */ 6780: 679 btsl %eax, CPU_INTR_ACTV(%ebx) 680 / 681 / Handle high-level nested interrupt on separate interrupt stack 682 / 683 testl $CPU_INTR_ACTV_HIGH_LEVEL_MASK, %esi 684 jnz onstack /* already on interrupt stack */ 685 movl %esp, %eax 686 movl CPU_INTR_STACK(%ebx), %esp /* get on interrupt stack */ 687 pushl %eax /* save the thread stack pointer */ 688onstack: 689 movl $autovect, %esi /* get autovect structure before */ 690 /* sti to save on AGI later */ 691 sti /* enable interrupts */ 692 pushl %ecx /* save interrupt vector */ 693 / 694 / Get handler address 695 / 696pre_loop1: 697 movl AVH_LINK(%esi, %ecx, 8), %esi 698 xorl %ebx, %ebx /* bh is no. of intpts in chain */ 699 /* bl is DDI_INTR_CLAIMED status of chain */ 700 testl %esi, %esi /* if pointer is null */ 701 jz .intr_ret /* then skip */ 702loop1: 703 incb %bh 704 movl AV_VECTOR(%esi), %edx /* get the interrupt routine */ 705 testl %edx, %edx /* if func is null */ 706 jz .intr_ret /* then skip */ 707 pushl $0 708 pushl AV_INTARG2(%esi) 709 pushl AV_INTARG1(%esi) 710 pushl AV_VECTOR(%esi) 711 pushl AV_DIP(%esi) 712 call __dtrace_probe_interrupt__start 713 pushl AV_INTARG2(%esi) /* get 2nd arg to interrupt routine */ 714 pushl AV_INTARG1(%esi) /* get first arg to interrupt routine */ 715 call *%edx /* call interrupt routine with arg */ 716 addl $8, %esp 717 movl %eax, 16(%esp) 718 call __dtrace_probe_interrupt__complete 719 addl $20, %esp 720 orb %al, %bl /* see if anyone claims intpt. */ 721 movl AV_LINK(%esi), %esi /* get next routine on list */ 722 testl %esi, %esi /* if pointer is non-null */ 723 jnz loop1 /* then continue */ 724 725.intr_ret: 726 cmpb $1, %bh /* if only 1 intpt in chain, it is OK */ 727 je .intr_ret1 728 orb %bl, %bl /* If no one claims intpt, then it is OK */ 729 jz .intr_ret1 730 movl (%esp), %ecx /* else restore intr vector */ 731 movl $autovect, %esi /* get autovect structure */ 732 jmp pre_loop1 /* and try again. */ 733 734.intr_ret1: 735 LOADCPU(%ebx) /* get pointer to cpu struct */ 736 737 cli 738 movl CPU_PRI(%ebx), %esi 739 740 / cpu_stats.sys.intr[PIL]++ 741 INC_CPU_STATS_INTR(%esi, %eax, %eax, %ebx) 742 743 / 744 / Clear bit for this PIL in CPU's interrupt active bitmask. 745 / 746 747 ASSERT_CPU_INTR_ACTV(%esi, %ebx, _interrupt_actv_bit_not_set) 748 749 cmpl $15, %esi 750 jne 0f 751 / Only clear bit if reference count is now zero. 752 decw CPU_INTR_ACTV_REF(%ebx) 753 jnz 1f 7540: 755 btrl %esi, CPU_INTR_ACTV(%ebx) 7561: 757 / 758 / Take timestamp, compute interval, update cumulative counter. 759 / esi = PIL 760_tsc_patch4: 761 nop; nop /* patched to rdtsc if available */ 762 HIGHPILBASE(%ebx, %esi, %esi) 763 764 ASSERT_CPU_PIL_HIGH_START_NZ(%esi) 765 766 TSC_SUB_FROM(%esi, CPU_PIL_HIGH_START) 767 addl $CPU_INTRSTAT_LOW_PIL_OFFSET, %esi /* offset PILs 0-10 */ 768 TSC_ADD_TO(%esi, CPU_INTRSTAT) 769 / 770 / Check for lower-PIL nested high-level interrupt beneath current one 771 / If so, place a starting timestamp in its pil_high_start entry. 772 / 773 movl CPU_INTR_ACTV(%ebx), %eax 774 movl %eax, %esi 775 andl $CPU_INTR_ACTV_HIGH_LEVEL_MASK, %eax 776 jz 0f 777 bsrl %eax, %ecx /* find PIL of nested interrupt */ 778 HIGHPILBASE(%ebx, %ecx, %ecx) 779_tsc_patch5: 780 nop; nop /* patched to rdtsc if available */ 781 TSC_MOV(%ecx, CPU_PIL_HIGH_START) 782 / 783 / Another high-level interrupt is active below this one, so 784 / there is no need to check for an interrupt thread. That will be 785 / done by the lowest priority high-level interrupt active. 786 / 787 jmp 1f 7880: 789 / Check to see if there is a low-level interrupt active. If so, 790 / place a starting timestamp in the thread structure. 791 movl CPU_THREAD(%ebx), %esi 792 testw $T_INTR_THREAD, T_FLAGS(%esi) 793 jz 1f 794_tsc_patch6: 795 nop; nop /* patched to rdtsc if available */ 796 TSC_MOV(%esi, T_INTR_START) 7971: 798 movl %edi, CPU_PRI(%ebx) 799 /* interrupt vector already on stack */ 800 pushl %edi /* old ipl */ 801 call *setlvlx 802 addl $8, %esp /* eax contains the current ipl */ 803 804 movl CPU_INTR_ACTV(%ebx), %esi /* reset stack pointer if no more */ 805 shrl $LOCK_LEVEL + 1, %esi /* HI PRI intrs. */ 806 jnz .intr_ret2 807 popl %esp /* restore the thread stack pointer */ 808.intr_ret2: 809 movl CPU_SOFTINFO(%ebx), %edx /* any pending software interrupts */ 810 orl %edx, %edx 811 jz _sys_rtt 812 jmp dosoftint /* check for softints before we return. */ 813 SET_SIZE(cmnint) 814 SET_SIZE(_interrupt) 815 816#endif /* __i386 */ 817 818/* 819 * Declare a uintptr_t which has the size of _interrupt to enable stack 820 * traceback code to know when a regs structure is on the stack. 821 */ 822 .globl _interrupt_size 823 .align CLONGSIZE 824_interrupt_size: 825 .NWORD . - _interrupt 826 .type _interrupt_size, @object 827 828#endif /* __lint */ 829 830#if defined(__i386) 831 832/* 833 * Handle an interrupt in a new thread. 834 * Entry: traps disabled. 835 * %edi - old priority level 836 * %ebp - pointer to REGS 837 * %ecx - translated vector 838 * %eax - ipl of isr. 839 * %ebx - pointer to CPU struct 840 * Uses: 841 */ 842 843#if !defined(__lint) 844 845 ENTRY_NP(intr_thread) 846 / 847 / Set bit for this PIL in CPU's interrupt active bitmask. 848 / 849 850 ASSERT_NOT_CPU_INTR_ACTV(%eax, %ebx, _interrupt_actv_bit_set) 851 852 btsl %eax, CPU_INTR_ACTV(%ebx) 853 854 / Get set to run interrupt thread. 855 / There should always be an interrupt thread since we allocate one 856 / for each level on the CPU. 857 / 858 / Note that the code in kcpc_overflow_intr -relies- on the ordering 859 / of events here - in particular that t->t_lwp of the interrupt 860 / thread is set to the pinned thread *before* curthread is changed 861 / 862 movl CPU_THREAD(%ebx), %edx /* cur thread in edx */ 863 864 / 865 / Are we interrupting an interrupt thread? If so, account for it. 866 / 867 testw $T_INTR_THREAD, T_FLAGS(%edx) 868 jz 0f 869 pushl %ecx 870 pushl %eax 871 movl %edx, %esi 872_tsc_patch7: 873 nop; nop /* patched to rdtsc if available */ 874 TSC_SUB_FROM(%esi, T_INTR_START) 875 TSC_CLR(%esi, T_INTR_START) 876 movzbl T_PIL(%esi), %ecx 877 PILBASE(%ebx, %ecx) 878 TSC_ADD_TO(%ecx, CPU_INTRSTAT) 879 movl %esi, %edx 880 popl %eax 881 popl %ecx 8820: 883 movl %esp, T_SP(%edx) /* mark stack in curthread for resume */ 884 pushl %edi /* get a temporary register */ 885 UNLINK_INTR_THREAD(%ebx, %esi, %edi) 886 887 movl T_LWP(%edx), %edi 888 movl %edx, T_INTR(%esi) /* push old thread */ 889 movl %edi, T_LWP(%esi) 890 / 891 / Threads on the interrupt thread free list could have state already 892 / set to TS_ONPROC, but it helps in debugging if they're TS_FREE 893 / 894 movl $ONPROC_THREAD, T_STATE(%esi) 895 / 896 / chain the interrupted thread onto list from the interrupt thread. 897 / Set the new interrupt thread as the current one. 898 / 899 popl %edi /* Don't need a temp reg anymore */ 900 movl T_STACK(%esi), %esp /* interrupt stack pointer */ 901 movl %esp, %ebp 902 movl %esi, CPU_THREAD(%ebx) /* set new thread */ 903 pushl %eax /* save the ipl */ 904 / 905 / Initialize thread priority level from intr_pri 906 / 907 movb %al, T_PIL(%esi) /* store pil */ 908 movzwl intr_pri, %ebx /* XXX Can cause probs if new class */ 909 /* is loaded on some other cpu. */ 910 addl %ebx, %eax /* convert level to dispatch priority */ 911 movw %ax, T_PRI(%esi) 912 913 / 914 / Take timestamp and store it in the thread structure. 915 / 916 movl %eax, %ebx /* save priority over rdtsc */ 917_tsc_patch8: 918 nop; nop /* patched to rdtsc if available */ 919 TSC_MOV(%esi, T_INTR_START) 920 movl %ebx, %eax /* restore priority */ 921 922 / The following 3 instructions need not be in cli. 923 / Putting them here only to avoid the AGI penalty on Pentiums. 924 925 pushl %ecx /* save interrupt vector. */ 926 pushl %esi /* save interrupt thread */ 927 movl $autovect, %esi /* get autovect structure */ 928 sti /* enable interrupts */ 929 930 / Fast event tracing. 931 LOADCPU(%ebx) 932 movl CPU_FTRACE_STATE(%ebx), %ebx 933 testl $FTRACE_ENABLED, %ebx 934 jz 1f 935 936 movl 8(%esp), %ebx 937 pushl %ebx /* ipl */ 938 pushl %ecx /* int vector */ 939 movl T_SP(%edx), %ebx 940 pushl %ebx /* ®s */ 941 pushl $_ftrace_intr_thread_fmt 942 call ftrace_3_notick 943 addl $8, %esp 944 popl %ecx /* restore int vector */ 945 addl $4, %esp 9461: 947pre_loop2: 948 movl AVH_LINK(%esi, %ecx, 8), %esi 949 xorl %ebx, %ebx /* bh is cno. of intpts in chain */ 950 /* bl is DDI_INTR_CLAIMED status of * chain */ 951 testl %esi, %esi /* if pointer is null */ 952 jz loop_done2 /* we're done */ 953loop2: 954 movl AV_VECTOR(%esi), %edx /* get the interrupt routine */ 955 testl %edx, %edx /* if pointer is null */ 956 jz loop_done2 /* we're done */ 957 incb %bh 958 pushl $0 959 pushl AV_INTARG2(%esi) 960 pushl AV_INTARG1(%esi) 961 pushl AV_VECTOR(%esi) 962 pushl AV_DIP(%esi) 963 call __dtrace_probe_interrupt__start 964 pushl AV_INTARG2(%esi) /* get 2nd arg to interrupt routine */ 965 pushl AV_INTARG1(%esi) /* get first arg to interrupt routine */ 966 call *%edx /* call interrupt routine with arg */ 967 addl $8, %esp 968 movl %eax, 16(%esp) 969 call __dtrace_probe_interrupt__complete 970 addl $20, %esp 971 orb %al, %bl /* see if anyone claims intpt. */ 972 movl AV_LINK(%esi), %esi /* get next routine on list */ 973 testl %esi, %esi /* if pointer is non-null */ 974 jnz loop2 /* continue */ 975loop_done2: 976 cmpb $1, %bh /* if only 1 intpt in chain, it is OK */ 977 je .loop_done2_1 978 orb %bl, %bl /* If no one claims intpt, then it is OK */ 979 jz .loop_done2_1 980 movl $autovect, %esi /* else get autovect structure */ 981 movl 4(%esp), %ecx /* restore intr vector */ 982 jmp pre_loop2 /* and try again. */ 983.loop_done2_1: 984 popl %esi /* restore intr thread pointer */ 985 986 LOADCPU(%ebx) 987 988 cli /* protect interrupt thread pool and intr_actv */ 989 movzbl T_PIL(%esi), %eax 990 991 / Save value in regs 992 pushl %eax /* current pil */ 993 pushl %edx /* (huh?) */ 994 pushl %edi /* old pil */ 995 996 / cpu_stats.sys.intr[PIL]++ 997 INC_CPU_STATS_INTR(%eax, %edx, %edx, %ebx) 998 999 / 1000 / Take timestamp, compute interval, and update cumulative counter. 1001 / esi = thread pointer, ebx = cpu pointer, eax = PIL 1002 / 1003 movl %eax, %edi 1004 1005 ASSERT_T_INTR_START_NZ(%esi) 1006 1007_tsc_patch9: 1008 nop; nop /* patched to rdtsc if available */ 1009 TSC_SUB_FROM(%esi, T_INTR_START) 1010 PILBASE(%ebx, %edi) 1011 TSC_ADD_TO(%edi, CPU_INTRSTAT) 1012 popl %edi 1013 popl %edx 1014 popl %eax 1015 1016 / 1017 / Clear bit for this PIL in CPU's interrupt active bitmask. 1018 / 1019 1020 ASSERT_CPU_INTR_ACTV(%eax, %ebx, _intr_thread_actv_bit_not_set) 1021 1022 btrl %eax, CPU_INTR_ACTV(%ebx) 1023 1024 / if there is still an interrupted thread underneath this one 1025 / then the interrupt was never blocked and the return is fairly 1026 / simple. Otherwise jump to intr_thread_exit 1027 cmpl $0, T_INTR(%esi) 1028 je intr_thread_exit 1029 1030 / 1031 / link the thread back onto the interrupt thread pool 1032 LINK_INTR_THREAD(%ebx, %esi, %edx) 1033 1034 movl CPU_BASE_SPL(%ebx), %eax /* used below. */ 1035 / set the thread state to free so kmdb doesn't see it 1036 movl $FREE_THREAD, T_STATE(%esi) 1037 1038 cmpl %eax, %edi /* if (oldipl >= basespl) */ 1039 jae intr_restore_ipl /* then use oldipl */ 1040 movl %eax, %edi /* else use basespl */ 1041intr_restore_ipl: 1042 movl %edi, CPU_PRI(%ebx) 1043 /* intr vector already on stack */ 1044 pushl %edi /* old ipl */ 1045 call *setlvlx /* eax contains the current ipl */ 1046 / 1047 / Switch back to the interrupted thread 1048 movl T_INTR(%esi), %ecx 1049 1050 / Place starting timestamp in interrupted thread's thread structure. 1051_tsc_patch10: 1052 nop; nop /* patched to rdtsc if available */ 1053 TSC_MOV(%ecx, T_INTR_START) 1054 1055 movl T_SP(%ecx), %esp /* restore stack pointer */ 1056 movl %esp, %ebp 1057 movl %ecx, CPU_THREAD(%ebx) 1058 1059 movl CPU_SOFTINFO(%ebx), %edx /* any pending software interrupts */ 1060 orl %edx, %edx 1061 jz _sys_rtt 1062 jmp dosoftint /* check for softints before we return. */ 1063 1064 / 1065 / An interrupt returned on what was once (and still might be) 1066 / an interrupt thread stack, but the interrupted process is no longer 1067 / there. This means the interrupt must have blocked. 1068 / 1069 / There is no longer a thread under this one, so put this thread back 1070 / on the CPU's free list and resume the idle thread which will dispatch 1071 / the next thread to run. 1072 / 1073 / All interrupts are disabled here 1074 / 1075 1076intr_thread_exit: 1077#ifdef DEBUG 1078 incl intr_thread_cnt 1079#endif 1080 INC64(%ebx, CPU_STATS_SYS_INTRBLK) /* cpu_stats.sys.intrblk++ */ 1081 / 1082 / Put thread back on the interrupt thread list. 1083 / As a reminder, the regs at this point are 1084 / esi interrupt thread 1085 / edi old ipl 1086 / ebx ptr to CPU struct 1087 1088 / Set CPU's base SPL level based on active interrupts bitmask 1089 call set_base_spl 1090 1091 movl CPU_BASE_SPL(%ebx), %edi 1092 movl %edi, CPU_PRI(%ebx) 1093 /* interrupt vector already on stack */ 1094 pushl %edi 1095 call *setlvlx 1096 addl $8, %esp /* XXX - don't need to pop since */ 1097 /* we are ready to switch */ 1098 call splhigh /* block all intrs below lock level */ 1099 / 1100 / Set the thread state to free so kmdb doesn't see it 1101 / 1102 movl $FREE_THREAD, T_STATE(%esi) 1103 / 1104 / Put thread on either the interrupt pool or the free pool and 1105 / call swtch() to resume another thread. 1106 / 1107 LINK_INTR_THREAD(%ebx, %esi, %edx) 1108 call swtch 1109 / swtch() shouldn't return 1110 1111 SET_SIZE(intr_thread) 1112 1113#endif /* __lint */ 1114#endif /* __i386 */ 1115 1116/* 1117 * Set Cpu's base SPL level, base on which interrupt levels are active 1118 * Called at spl7 or above. 1119 */ 1120 1121#if defined(__lint) 1122 1123void 1124set_base_spl(void) 1125{} 1126 1127#else /* __lint */ 1128 1129 ENTRY_NP(set_base_spl) 1130 movl %gs:CPU_INTR_ACTV, %eax /* load active interrupts mask */ 1131 testl %eax, %eax /* is it zero? */ 1132 jz setbase 1133 testl $0xff00, %eax 1134 jnz ah_set 1135 shl $24, %eax /* shift 'em over so we can find */ 1136 /* the 1st bit faster */ 1137 bsrl %eax, %eax 1138 subl $24, %eax 1139setbase: 1140 movl %eax, %gs:CPU_BASE_SPL /* store base priority */ 1141 ret 1142ah_set: 1143 shl $16, %eax 1144 bsrl %eax, %eax 1145 subl $16, %eax 1146 jmp setbase 1147 SET_SIZE(set_base_spl) 1148 1149#endif /* __lint */ 1150 1151#if defined(__i386) 1152 1153/* 1154 * int 1155 * intr_passivate(from, to) 1156 * thread_id_t from; interrupt thread 1157 * thread_id_t to; interrupted thread 1158 * 1159 * intr_passivate(t, itp) makes the interrupted thread "t" runnable. 1160 * 1161 * Since t->t_sp has already been saved, t->t_pc is all that needs 1162 * set in this function. 1163 * 1164 * Returns interrupt level of the thread. 1165 */ 1166 1167#if defined(__lint) 1168 1169/* ARGSUSED */ 1170int 1171intr_passivate(kthread_id_t from, kthread_id_t to) 1172{ return (0); } 1173 1174#else /* __lint */ 1175 1176 ENTRY(intr_passivate) 1177 movl 8(%esp), %eax /* interrupted thread */ 1178 movl $_sys_rtt, T_PC(%eax) /* set T_PC for interrupted thread */ 1179 1180 movl 4(%esp), %eax /* interrupt thread */ 1181 movl T_STACK(%eax), %eax /* get the pointer to the start of */ 1182 /* of the interrupt thread stack */ 1183 movl -4(%eax), %eax /* interrupt level was the first */ 1184 /* thing pushed onto the stack */ 1185 ret 1186 SET_SIZE(intr_passivate) 1187 1188#endif /* __lint */ 1189#endif /* __i386 */ 1190 1191#if defined(__lint) 1192 1193void 1194fakesoftint(void) 1195{} 1196 1197#else /* __lint */ 1198 1199 / 1200 / If we're here, we're being called from splx() to fake a soft 1201 / interrupt (note that interrupts are still disabled from splx()). 1202 / We execute this code when a soft interrupt is posted at 1203 / level higher than the CPU's current spl; when spl is lowered in 1204 / splx(), it will see the softint and jump here. We'll do exactly 1205 / what a trap would do: push our flags, %cs, %eip, error code 1206 / and trap number (T_SOFTINT). The cmnint() code will see T_SOFTINT 1207 / and branch to the dosoftint() code. 1208 / 1209#if defined(__amd64) 1210 1211 /* 1212 * In 64-bit mode, iretq -always- pops all five regs 1213 * Imitate the 16-byte auto-align of the stack, and the 1214 * zero-ed out %ss value. 1215 */ 1216 ENTRY_NP(fakesoftint) 1217 movq %rsp, %r11 1218 andq $-16, %rsp 1219 pushq $KDS_SEL /* %ss */ 1220 pushq %r11 /* %rsp */ 1221 pushf /* rflags */ 1222 pushq $KCS_SEL /* %cs */ 1223 leaq fakesoftint_return(%rip), %r11 1224 pushq %r11 /* %rip */ 1225 pushq $0 /* err */ 1226 pushq $T_SOFTINT /* trap */ 1227 jmp cmnint 1228 SET_SIZE(fakesoftint) 1229 1230#elif defined(__i386) 1231 1232 ENTRY_NP(fakesoftint) 1233 pushf 1234 push %cs 1235 push $fakesoftint_return 1236 push $0 1237 push $T_SOFTINT 1238 jmp cmnint 1239 SET_SIZE(fakesoftint) 1240 1241#endif /* __i386 */ 1242 1243 .align CPTRSIZE 1244 .globl _fakesoftint_size 1245 .type _fakesoftint_size, @object 1246_fakesoftint_size: 1247 .NWORD . - fakesoftint 1248 SET_SIZE(_fakesoftint_size) 1249 1250/* 1251 * dosoftint(old_pil in %edi, softinfo in %edx, CPU pointer in %ebx) 1252 * Process software interrupts 1253 * Interrupts are disabled here. 1254 */ 1255#if defined(__i386) 1256 1257 ENTRY_NP(dosoftint) 1258 1259 bsrl %edx, %edx /* find highest pending interrupt */ 1260 cmpl %edx, %edi /* if curipl >= pri soft pending intr */ 1261 jae _sys_rtt /* skip */ 1262 1263 movl %gs:CPU_BASE_SPL, %eax /* check for blocked intr threads */ 1264 cmpl %edx, %eax /* if basespl >= pri soft pending */ 1265 jae _sys_rtt /* skip */ 1266 1267 lock /* MP protect */ 1268 btrl %edx, CPU_SOFTINFO(%ebx) /* clear the selected interrupt bit */ 1269 jnc dosoftint_again 1270 1271 movl %edx, CPU_PRI(%ebx) /* set IPL to sofint level */ 1272 pushl %edx 1273 call *setspl /* mask levels upto the softint level */ 1274 popl %eax /* priority we are at in %eax */ 1275 1276 / Get set to run interrupt thread. 1277 / There should always be an interrupt thread since we allocate one 1278 / for each level on the CPU. 1279 UNLINK_INTR_THREAD(%ebx, %esi, %edx) 1280 1281 / 1282 / Note that the code in kcpc_overflow_intr -relies- on the ordering 1283 / of events here - in particular that t->t_lwp of the interrupt 1284 / thread is set to the pinned thread *before* curthread is changed 1285 / 1286 movl CPU_THREAD(%ebx), %ecx 1287 1288 / If we are interrupting an interrupt thread, account for it. 1289 testw $T_INTR_THREAD, T_FLAGS(%ecx) 1290 jz 0f 1291 pushl %eax 1292 movl %eax, %ebp 1293_tsc_patch11: 1294 nop; nop /* patched to rdtsc if available */ 1295 PILBASE(%ebx, %ebp) 1296 TSC_SUB_FROM(%ecx, T_INTR_START) 1297 TSC_ADD_TO(%ebp, CPU_INTRSTAT) 1298 popl %eax 12990: 1300 movl T_LWP(%ecx), %ebp 1301 movl %ebp, T_LWP(%esi) 1302 / 1303 / Threads on the interrupt thread free list could have state already 1304 / set to TS_ONPROC, but it helps in debugging if they're TS_FREE 1305 / Could eliminate the next two instructions with a little work. 1306 / 1307 movl $ONPROC_THREAD, T_STATE(%esi) 1308 / 1309 / Push interrupted thread onto list from new thread. 1310 / Set the new thread as the current one. 1311 / Set interrupted thread's T_SP because if it is the idle thread, 1312 / Resume() may use that stack between threads. 1313 / 1314 movl %esp, T_SP(%ecx) /* mark stack for resume */ 1315 movl %ecx, T_INTR(%esi) /* push old thread */ 1316 movl %esi, CPU_THREAD(%ebx) /* set new thread */ 1317 movl T_STACK(%esi), %esp /* interrupt stack pointer */ 1318 movl %esp, %ebp 1319 1320 pushl %eax /* push ipl as first element in stack */ 1321 /* see intr_passivate() */ 1322 / 1323 / Set bit for this PIL in CPU's interrupt active bitmask. 1324 / 1325 1326 ASSERT_NOT_CPU_INTR_ACTV(%eax, %ebx, _dosoftint_actv_bit_set) 1327 1328 btsl %eax, CPU_INTR_ACTV(%ebx) 1329 1330 / 1331 / Initialize thread priority level from intr_pri 1332 / 1333 movb %al, T_PIL(%esi) /* store pil */ 1334 movzwl intr_pri, %ecx 1335 addl %eax, %ecx /* convert level to dispatch priority */ 1336 movw %cx, T_PRI(%esi) 1337 1338 / 1339 / Store starting timestamp in thread structure. 1340 / esi = thread, ebx = cpu pointer, eax = PIL 1341 / 1342 movl %eax, %ecx /* save PIL from rdtsc clobber */ 1343_tsc_patch12: 1344 nop; nop /* patched to rdtsc if available */ 1345 TSC_MOV(%esi, T_INTR_START) 1346 1347 sti /* enable interrupts */ 1348 1349 / 1350 / Enabling interrupts (above) could raise the current 1351 / IPL and base SPL. But, we continue processing the current soft 1352 / interrupt and we will check the base SPL next time in the loop 1353 / so that blocked interrupt thread would get a chance to run. 1354 / 1355 1356 / 1357 / dispatch soft interrupts 1358 / 1359 pushl %ecx 1360 call av_dispatch_softvect 1361 addl $4, %esp 1362 1363 cli /* protect interrupt thread pool */ 1364 /* and softinfo & sysinfo */ 1365 movl CPU_THREAD(%ebx), %esi /* restore thread pointer */ 1366 movzbl T_PIL(%esi), %ecx 1367 1368 / cpu_stats.sys.intr[PIL]++ 1369 INC_CPU_STATS_INTR(%ecx, %edx, %edx, %ebx) 1370 1371 / 1372 / Clear bit for this PIL in CPU's interrupt active bitmask. 1373 / 1374 1375 ASSERT_CPU_INTR_ACTV(%ecx, %ebx, _dosoftint_actv_bit_not_set) 1376 1377 btrl %ecx, CPU_INTR_ACTV(%ebx) 1378 1379 / 1380 / Take timestamp, compute interval, update cumulative counter. 1381 / esi = thread, ebx = cpu, ecx = PIL 1382 / 1383 PILBASE(%ebx, %ecx) 1384_tsc_patch13: 1385 nop; nop /* patched to rdtsc if available */ 1386 TSC_SUB_FROM(%esi, T_INTR_START) 1387 TSC_ADD_TO(%ecx, CPU_INTRSTAT) 1388 1389 / if there is still an interrupt thread underneath this one 1390 / then the interrupt was never blocked and the return is fairly 1391 / simple. Otherwise jump to softintr_thread_exit. 1392 / softintr_thread_exit expect esi to be curthread & ebx to be ipl. 1393 cmpl $0, T_INTR(%esi) 1394 je softintr_thread_exit 1395 1396 / 1397 / link the thread back onto the interrupt thread pool 1398 LINK_INTR_THREAD(%ebx, %esi, %edx) 1399 1400 / set the thread state to free so kmdb doesn't see it 1401 movl $FREE_THREAD, T_STATE(%esi) 1402 / 1403 / Switch back to the interrupted thread 1404 movl T_INTR(%esi), %ecx 1405 movl %ecx, CPU_THREAD(%ebx) 1406 movl T_SP(%ecx), %esp /* restore stack pointer */ 1407 movl %esp, %ebp 1408 1409 / If we are returning to an interrupt thread, store a starting 1410 / timestamp in the thread structure. 1411 testw $T_INTR_THREAD, T_FLAGS(%ecx) 1412 jz 0f 1413_tsc_patch14: 1414 nop; nop /* patched to rdtsc if available */ 1415 TSC_MOV(%ecx, T_INTR_START) 14160: 1417 movl CPU_BASE_SPL(%ebx), %eax 1418 cmpl %eax, %edi /* if (oldipl >= basespl) */ 1419 jae softintr_restore_ipl /* then use oldipl */ 1420 movl %eax, %edi /* else use basespl */ 1421softintr_restore_ipl: 1422 movl %edi, CPU_PRI(%ebx) /* set IPL to old level */ 1423 pushl %edi 1424 call *setspl 1425 popl %eax 1426dosoftint_again: 1427 movl CPU_SOFTINFO(%ebx), %edx /* any pending software interrupts */ 1428 orl %edx, %edx 1429 jz _sys_rtt 1430 jmp dosoftint /* process more software interrupts */ 1431 1432softintr_thread_exit: 1433 / 1434 / Put thread back on the interrupt thread list. 1435 / As a reminder, the regs at this point are 1436 / %esi interrupt thread 1437 1438 / 1439 / This was an interrupt thread, so set CPU's base SPL level 1440 / set_base_spl only uses %eax. 1441 / 1442 call set_base_spl /* interrupt vector already on stack */ 1443 / 1444 / Set the thread state to free so kmdb doesn't see it 1445 / 1446 movl $FREE_THREAD, T_STATE(%esi) 1447 / 1448 / Put thread on either the interrupt pool or the free pool and 1449 / call swtch() to resume another thread. 1450 / 1451 LOADCPU(%ebx) 1452 LINK_INTR_THREAD(%ebx, %esi, %edx) 1453 call splhigh /* block all intrs below lock lvl */ 1454 call swtch 1455 / swtch() shouldn't return 1456 SET_SIZE(dosoftint) 1457 1458#endif /* __i386 */ 1459#endif /* __lint */ 1460