1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/cpuvar.h> 29 #include <sys/regset.h> 30 #include <sys/psw.h> 31 #include <sys/types.h> 32 #include <sys/thread.h> 33 #include <sys/systm.h> 34 #include <sys/segments.h> 35 #include <sys/pcb.h> 36 #include <sys/trap.h> 37 #include <sys/ftrace.h> 38 #include <sys/traptrace.h> 39 #include <sys/clock.h> 40 #include <sys/panic.h> 41 #include <sys/disp.h> 42 #include <vm/seg_kp.h> 43 #include <sys/stack.h> 44 #include <sys/sysmacros.h> 45 #include <sys/cmn_err.h> 46 #include <sys/kstat.h> 47 #include <sys/smp_impldefs.h> 48 #include <sys/pool_pset.h> 49 #include <sys/zone.h> 50 #include <sys/bitmap.h> 51 52 #if defined(__amd64) 53 54 #if defined(__lint) 55 /* 56 * atomic_btr32() is a gcc __inline__ function, defined in <asm/bitmap.h> 57 * For lint purposes, define it here. 58 */ 59 uint_t 60 atomic_btr32(uint32_t *pending, uint_t pil) 61 { 62 return (*pending &= ~(1 << pil)); 63 } 64 #else 65 66 extern uint_t atomic_btr32(uint32_t *pending, uint_t pil); 67 68 #endif 69 70 /* 71 * This code is amd64-only for now, but as time permits, we should 72 * use this on i386 too. 73 */ 74 75 /* 76 * Some questions to ponder: 77 * - in several of these routines, we make multiple calls to tsc_read() 78 * without invoking functions .. couldn't we just reuse the same 79 * timestamp sometimes? 80 * - if we have the inline, we can probably make set_base_spl be a 81 * C routine too. 82 */ 83 84 static uint_t 85 bsrw_insn(uint16_t mask) 86 { 87 uint_t index = sizeof (mask) * NBBY - 1; 88 89 ASSERT(mask != 0); 90 91 while ((mask & (1 << index)) == 0) 92 index--; 93 return (index); 94 } 95 96 /* 97 * Do all the work necessary to set up the cpu and thread structures 98 * to dispatch a high-level interrupt. 99 * 100 * Returns 0 if we're -not- already on the high-level interrupt stack, 101 * (and *must* switch to it), non-zero if we are already on that stack. 102 * 103 * Called with interrupts masked. 104 * The 'pil' is already set to the appropriate level for rp->r_trapno. 105 */ 106 int 107 hilevel_intr_prolog(struct cpu *cpu, uint_t pil, uint_t oldpil, struct regs *rp) 108 { 109 struct machcpu *mcpu = &cpu->cpu_m; 110 uint_t mask; 111 hrtime_t intrtime; 112 113 ASSERT(pil > LOCK_LEVEL); 114 115 if (pil == CBE_HIGH_PIL) { 116 cpu->cpu_profile_pil = oldpil; 117 if (USERMODE(rp->r_cs)) { 118 cpu->cpu_profile_pc = 0; 119 cpu->cpu_profile_upc = rp->r_pc; 120 } else { 121 cpu->cpu_profile_pc = rp->r_pc; 122 cpu->cpu_profile_upc = 0; 123 } 124 } 125 126 mask = cpu->cpu_intr_actv & CPU_INTR_ACTV_HIGH_LEVEL_MASK; 127 if (mask != 0) { 128 int nestpil; 129 130 /* 131 * We have interrupted another high-level interrupt. 132 * Load starting timestamp, compute interval, update 133 * cumulative counter. 134 */ 135 nestpil = bsrw_insn((uint16_t)mask); 136 ASSERT(nestpil < pil); 137 intrtime = tsc_read() - 138 mcpu->pil_high_start[nestpil - (LOCK_LEVEL + 1)]; 139 mcpu->intrstat[nestpil][0] += intrtime; 140 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime; 141 /* 142 * Another high-level interrupt is active below this one, so 143 * there is no need to check for an interrupt thread. That 144 * will be done by the lowest priority high-level interrupt 145 * active. 146 */ 147 } else { 148 kthread_t *t = cpu->cpu_thread; 149 150 /* 151 * See if we are interrupting a low-level interrupt thread. 152 * If so, account for its time slice only if its time stamp 153 * is non-zero. 154 */ 155 if ((t->t_flag & T_INTR_THREAD) != 0 && t->t_intr_start != 0) { 156 intrtime = tsc_read() - t->t_intr_start; 157 mcpu->intrstat[t->t_pil][0] += intrtime; 158 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime; 159 t->t_intr_start = 0; 160 } 161 } 162 163 /* 164 * Store starting timestamp in CPU structure for this PIL. 165 */ 166 mcpu->pil_high_start[pil - (LOCK_LEVEL + 1)] = tsc_read(); 167 168 ASSERT((cpu->cpu_intr_actv & (1 << pil)) == 0); 169 170 if (pil == 15) { 171 /* 172 * To support reentrant level 15 interrupts, we maintain a 173 * recursion count in the top half of cpu_intr_actv. Only 174 * when this count hits zero do we clear the PIL 15 bit from 175 * the lower half of cpu_intr_actv. 176 */ 177 uint16_t *refcntp = (uint16_t *)&cpu->cpu_intr_actv + 1; 178 (*refcntp)++; 179 } 180 181 mask = cpu->cpu_intr_actv; 182 183 cpu->cpu_intr_actv |= (1 << pil); 184 185 return (mask & CPU_INTR_ACTV_HIGH_LEVEL_MASK); 186 } 187 188 /* 189 * Does most of the work of returning from a high level interrupt. 190 * 191 * Returns 0 if there are no more high level interrupts (in which 192 * case we must switch back to the interrupted thread stack) or 193 * non-zero if there are more (in which case we should stay on it). 194 * 195 * Called with interrupts masked 196 */ 197 int 198 hilevel_intr_epilog(struct cpu *cpu, uint_t pil, uint_t oldpil, uint_t vecnum) 199 { 200 struct machcpu *mcpu = &cpu->cpu_m; 201 uint_t mask; 202 hrtime_t intrtime; 203 204 ASSERT(mcpu->mcpu_pri == pil); 205 206 cpu->cpu_stats.sys.intr[pil - 1]++; 207 208 ASSERT(cpu->cpu_intr_actv & (1 << pil)); 209 210 if (pil == 15) { 211 /* 212 * To support reentrant level 15 interrupts, we maintain a 213 * recursion count in the top half of cpu_intr_actv. Only 214 * when this count hits zero do we clear the PIL 15 bit from 215 * the lower half of cpu_intr_actv. 216 */ 217 uint16_t *refcntp = (uint16_t *)&cpu->cpu_intr_actv + 1; 218 219 ASSERT(*refcntp > 0); 220 221 if (--(*refcntp) == 0) 222 cpu->cpu_intr_actv &= ~(1 << pil); 223 } else { 224 cpu->cpu_intr_actv &= ~(1 << pil); 225 } 226 227 ASSERT(mcpu->pil_high_start[pil - (LOCK_LEVEL + 1)] != 0); 228 229 intrtime = tsc_read() - mcpu->pil_high_start[pil - (LOCK_LEVEL + 1)]; 230 mcpu->intrstat[pil][0] += intrtime; 231 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime; 232 233 /* 234 * Check for lower-pil nested high-level interrupt beneath 235 * current one. If so, place a starting timestamp in its 236 * pil_high_start entry. 237 */ 238 mask = cpu->cpu_intr_actv & CPU_INTR_ACTV_HIGH_LEVEL_MASK; 239 if (mask != 0) { 240 int nestpil; 241 242 /* 243 * find PIL of nested interrupt 244 */ 245 nestpil = bsrw_insn((uint16_t)mask); 246 ASSERT(nestpil < pil); 247 mcpu->pil_high_start[nestpil - (LOCK_LEVEL + 1)] = tsc_read(); 248 /* 249 * (Another high-level interrupt is active below this one, 250 * so there is no need to check for an interrupt 251 * thread. That will be done by the lowest priority 252 * high-level interrupt active.) 253 */ 254 } else { 255 /* 256 * Check to see if there is a low-level interrupt active. 257 * If so, place a starting timestamp in the thread 258 * structure. 259 */ 260 kthread_t *t = cpu->cpu_thread; 261 262 if (t->t_flag & T_INTR_THREAD) 263 t->t_intr_start = tsc_read(); 264 } 265 266 mcpu->mcpu_pri = oldpil; 267 (void) (*setlvlx)(oldpil, vecnum); 268 269 return (cpu->cpu_intr_actv & CPU_INTR_ACTV_HIGH_LEVEL_MASK); 270 } 271 272 /* 273 * Set up the cpu, thread and interrupt thread structures for 274 * executing an interrupt thread. The new stack pointer of the 275 * interrupt thread (which *must* be switched to) is returned. 276 */ 277 caddr_t 278 intr_thread_prolog(struct cpu *cpu, caddr_t stackptr, uint_t pil) 279 { 280 struct machcpu *mcpu = &cpu->cpu_m; 281 kthread_t *t, *volatile it; 282 283 ASSERT(pil > 0); 284 ASSERT((cpu->cpu_intr_actv & (1 << pil)) == 0); 285 cpu->cpu_intr_actv |= (1 << pil); 286 287 /* 288 * Get set to run an interrupt thread. 289 * There should always be an interrupt thread, since we 290 * allocate one for each level on each CPU. 291 * 292 * t_intr_start could be zero due to cpu_intr_swtch_enter. 293 */ 294 t = cpu->cpu_thread; 295 if ((t->t_flag & T_INTR_THREAD) && t->t_intr_start != 0) { 296 hrtime_t intrtime = tsc_read() - t->t_intr_start; 297 mcpu->intrstat[t->t_pil][0] += intrtime; 298 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime; 299 t->t_intr_start = 0; 300 } 301 302 ASSERT(SA((uintptr_t)stackptr) == (uintptr_t)stackptr); 303 304 t->t_sp = (uintptr_t)stackptr; /* mark stack in curthread for resume */ 305 306 /* 307 * unlink the interrupt thread off the cpu 308 * 309 * Note that the code in kcpc_overflow_intr -relies- on the 310 * ordering of events here - in particular that t->t_lwp of 311 * the interrupt thread is set to the pinned thread *before* 312 * curthread is changed. 313 */ 314 it = cpu->cpu_intr_thread; 315 cpu->cpu_intr_thread = it->t_link; 316 it->t_intr = t; 317 it->t_lwp = t->t_lwp; 318 319 /* 320 * (threads on the interrupt thread free list could have state 321 * preset to TS_ONPROC, but it helps in debugging if 322 * they're TS_FREE.) 323 */ 324 it->t_state = TS_ONPROC; 325 326 cpu->cpu_thread = it; /* new curthread on this cpu */ 327 it->t_pil = (uchar_t)pil; 328 it->t_pri = intr_pri + (pri_t)pil; 329 it->t_intr_start = tsc_read(); 330 331 return (it->t_stk); 332 } 333 334 335 #ifdef DEBUG 336 int intr_thread_cnt; 337 #endif 338 339 /* 340 * Called with interrupts disabled 341 */ 342 void 343 intr_thread_epilog(struct cpu *cpu, uint_t vec, uint_t oldpil) 344 { 345 struct machcpu *mcpu = &cpu->cpu_m; 346 kthread_t *t; 347 kthread_t *it = cpu->cpu_thread; /* curthread */ 348 uint_t pil, basespl; 349 hrtime_t intrtime; 350 351 pil = it->t_pil; 352 cpu->cpu_stats.sys.intr[pil - 1]++; 353 354 ASSERT(it->t_intr_start != 0); 355 intrtime = tsc_read() - it->t_intr_start; 356 mcpu->intrstat[pil][0] += intrtime; 357 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime; 358 359 ASSERT(cpu->cpu_intr_actv & (1 << pil)); 360 cpu->cpu_intr_actv &= ~(1 << pil); 361 362 /* 363 * If there is still an interrupted thread underneath this one 364 * then the interrupt was never blocked and the return is 365 * fairly simple. Otherwise it isn't. 366 */ 367 if ((t = it->t_intr) == NULL) { 368 /* 369 * The interrupted thread is no longer pinned underneath 370 * the interrupt thread. This means the interrupt must 371 * have blocked, and the interrupted thread has been 372 * unpinned, and has probably been running around the 373 * system for a while. 374 * 375 * Since there is no longer a thread under this one, put 376 * this interrupt thread back on the CPU's free list and 377 * resume the idle thread which will dispatch the next 378 * thread to run. 379 */ 380 #ifdef DEBUG 381 intr_thread_cnt++; 382 #endif 383 cpu->cpu_stats.sys.intrblk++; 384 /* 385 * Set CPU's base SPL based on active interrupts bitmask 386 */ 387 set_base_spl(); 388 basespl = cpu->cpu_base_spl; 389 mcpu->mcpu_pri = basespl; 390 (*setlvlx)(basespl, vec); 391 (void) splhigh(); 392 it->t_state = TS_FREE; 393 /* 394 * Return interrupt thread to pool 395 */ 396 it->t_link = cpu->cpu_intr_thread; 397 cpu->cpu_intr_thread = it; 398 swtch(); 399 /*NOTREACHED*/ 400 } 401 402 /* 403 * Return interrupt thread to the pool 404 */ 405 it->t_link = cpu->cpu_intr_thread; 406 cpu->cpu_intr_thread = it; 407 it->t_state = TS_FREE; 408 409 basespl = cpu->cpu_base_spl; 410 pil = MAX(oldpil, basespl); 411 mcpu->mcpu_pri = pil; 412 (*setlvlx)(pil, vec); 413 t->t_intr_start = tsc_read(); 414 cpu->cpu_thread = t; 415 } 416 417 /* 418 * Called with interrupts disabled by an interrupt thread to determine 419 * how much time has elapsed. See interrupt.s:intr_get_time() for detailed 420 * theory of operation. 421 */ 422 uint64_t 423 intr_thread_get_time(struct cpu *cpu) 424 { 425 struct machcpu *mcpu = &cpu->cpu_m; 426 kthread_t *t = cpu->cpu_thread; 427 uint64_t time, delta, ret; 428 uint_t pil = t->t_pil; 429 430 ASSERT((cpu->cpu_intr_actv & CPU_INTR_ACTV_HIGH_LEVEL_MASK) == 0); 431 ASSERT(t->t_flag & T_INTR_THREAD); 432 ASSERT(pil != 0); 433 ASSERT(t->t_intr_start != 0); 434 435 time = tsc_read(); 436 delta = time - t->t_intr_start; 437 t->t_intr_start = time; 438 439 time = mcpu->intrstat[pil][0] + delta; 440 ret = time - mcpu->intrstat[pil][1]; 441 mcpu->intrstat[pil][0] = time; 442 mcpu->intrstat[pil][1] = time; 443 cpu->cpu_intracct[cpu->cpu_mstate] += delta; 444 445 return (ret); 446 } 447 448 caddr_t 449 dosoftint_prolog( 450 struct cpu *cpu, 451 caddr_t stackptr, 452 uint32_t st_pending, 453 uint_t oldpil) 454 { 455 kthread_t *t, *volatile it; 456 struct machcpu *mcpu = &cpu->cpu_m; 457 uint_t pil; 458 459 top: 460 ASSERT(st_pending == mcpu->mcpu_softinfo.st_pending); 461 462 pil = bsrw_insn((uint16_t)st_pending); 463 if (pil <= oldpil || pil <= cpu->cpu_base_spl) 464 return (0); 465 466 /* 467 * XX64 Sigh. 468 * 469 * This is a transliteration of the i386 assembler code for 470 * soft interrupts. One question is "why does this need 471 * to be atomic?" One possible race is -other- processors 472 * posting soft interrupts to us in set_pending() i.e. the 473 * CPU might get preempted just after the address computation, 474 * but just before the atomic transaction, so another CPU would 475 * actually set the original CPU's st_pending bit. However, 476 * it looks like it would be simpler to disable preemption there. 477 * Are there other races for which preemption control doesn't work? 478 * 479 * The i386 assembler version -also- checks to see if the bit 480 * being cleared was actually set; if it wasn't, it rechecks 481 * for more. This seems a bit strange, as the only code that 482 * ever clears the bit is -this- code running with interrupts 483 * disabled on -this- CPU. This code would probably be cheaper: 484 * 485 * atomic_and_32((uint32_t *)&mcpu->mcpu_softinfo.st_pending, 486 * ~(1 << pil)); 487 * 488 * and t->t_preempt--/++ around set_pending() even cheaper, 489 * but at this point, correctness is critical, so we slavishly 490 * emulate the i386 port. 491 */ 492 if (atomic_btr32((uint32_t *)&mcpu->mcpu_softinfo.st_pending, pil) 493 == 0) { 494 st_pending = mcpu->mcpu_softinfo.st_pending; 495 goto top; 496 } 497 498 mcpu->mcpu_pri = pil; 499 (*setspl)(pil); 500 501 /* 502 * Get set to run interrupt thread. 503 * There should always be an interrupt thread since we 504 * allocate one for each level on the CPU. 505 */ 506 it = cpu->cpu_intr_thread; 507 cpu->cpu_intr_thread = it->t_link; 508 509 /* t_intr_start could be zero due to cpu_intr_swtch_enter. */ 510 t = cpu->cpu_thread; 511 if ((t->t_flag & T_INTR_THREAD) && t->t_intr_start != 0) { 512 hrtime_t intrtime = tsc_read() - t->t_intr_start; 513 mcpu->intrstat[pil][0] += intrtime; 514 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime; 515 t->t_intr_start = 0; 516 } 517 518 /* 519 * Note that the code in kcpc_overflow_intr -relies- on the 520 * ordering of events here - in particular that t->t_lwp of 521 * the interrupt thread is set to the pinned thread *before* 522 * curthread is changed. 523 */ 524 it->t_lwp = t->t_lwp; 525 it->t_state = TS_ONPROC; 526 527 /* 528 * Push interrupted thread onto list from new thread. 529 * Set the new thread as the current one. 530 * Set interrupted thread's T_SP because if it is the idle thread, 531 * resume() may use that stack between threads. 532 */ 533 534 ASSERT(SA((uintptr_t)stackptr) == (uintptr_t)stackptr); 535 t->t_sp = (uintptr_t)stackptr; 536 537 it->t_intr = t; 538 cpu->cpu_thread = it; 539 540 /* 541 * Set bit for this pil in CPU's interrupt active bitmask. 542 */ 543 ASSERT((cpu->cpu_intr_actv & (1 << pil)) == 0); 544 cpu->cpu_intr_actv |= (1 << pil); 545 546 /* 547 * Initialize thread priority level from intr_pri 548 */ 549 it->t_pil = (uchar_t)pil; 550 it->t_pri = (pri_t)pil + intr_pri; 551 it->t_intr_start = tsc_read(); 552 553 return (it->t_stk); 554 } 555 556 void 557 dosoftint_epilog(struct cpu *cpu, uint_t oldpil) 558 { 559 struct machcpu *mcpu = &cpu->cpu_m; 560 kthread_t *t, *it; 561 uint_t pil, basespl; 562 hrtime_t intrtime; 563 564 it = cpu->cpu_thread; 565 pil = it->t_pil; 566 567 cpu->cpu_stats.sys.intr[pil - 1]++; 568 569 ASSERT(cpu->cpu_intr_actv & (1 << pil)); 570 cpu->cpu_intr_actv &= ~(1 << pil); 571 intrtime = tsc_read() - it->t_intr_start; 572 mcpu->intrstat[pil][0] += intrtime; 573 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime; 574 575 /* 576 * If there is still an interrupted thread underneath this one 577 * then the interrupt was never blocked and the return is 578 * fairly simple. Otherwise it isn't. 579 */ 580 if ((t = it->t_intr) == NULL) { 581 /* 582 * Put thread back on the interrupt thread list. 583 * This was an interrupt thread, so set CPU's base SPL. 584 */ 585 set_base_spl(); 586 it->t_state = TS_FREE; 587 it->t_link = cpu->cpu_intr_thread; 588 cpu->cpu_intr_thread = it; 589 (void) splhigh(); 590 swtch(); 591 /*NOTREACHED*/ 592 } 593 it->t_link = cpu->cpu_intr_thread; 594 cpu->cpu_intr_thread = it; 595 it->t_state = TS_FREE; 596 cpu->cpu_thread = t; 597 if (t->t_flag & T_INTR_THREAD) 598 t->t_intr_start = tsc_read(); 599 basespl = cpu->cpu_base_spl; 600 pil = MAX(oldpil, basespl); 601 mcpu->mcpu_pri = pil; 602 (*setspl)(pil); 603 } 604 605 /* 606 * Make the interrupted thread 'to' be runnable. 607 * 608 * Since t->t_sp has already been saved, t->t_pc is all 609 * that needs to be set in this function. 610 * 611 * Returns the interrupt level of the interrupt thread. 612 */ 613 int 614 intr_passivate( 615 kthread_t *it, /* interrupt thread */ 616 kthread_t *t) /* interrupted thread */ 617 { 618 extern void _sys_rtt(); 619 620 ASSERT(it->t_flag & T_INTR_THREAD); 621 ASSERT(SA(t->t_sp) == t->t_sp); 622 623 t->t_pc = (uintptr_t)_sys_rtt; 624 return (it->t_pil); 625 } 626 627 #endif /* __amd64 */ 628 629 /* 630 * Create interrupt kstats for this CPU. 631 */ 632 void 633 cpu_create_intrstat(cpu_t *cp) 634 { 635 int i; 636 kstat_t *intr_ksp; 637 kstat_named_t *knp; 638 char name[KSTAT_STRLEN]; 639 zoneid_t zoneid; 640 641 ASSERT(MUTEX_HELD(&cpu_lock)); 642 643 if (pool_pset_enabled()) 644 zoneid = GLOBAL_ZONEID; 645 else 646 zoneid = ALL_ZONES; 647 648 intr_ksp = kstat_create_zone("cpu", cp->cpu_id, "intrstat", "misc", 649 KSTAT_TYPE_NAMED, PIL_MAX * 2, NULL, zoneid); 650 651 /* 652 * Initialize each PIL's named kstat 653 */ 654 if (intr_ksp != NULL) { 655 intr_ksp->ks_update = cpu_kstat_intrstat_update; 656 knp = (kstat_named_t *)intr_ksp->ks_data; 657 intr_ksp->ks_private = cp; 658 for (i = 0; i < PIL_MAX; i++) { 659 (void) snprintf(name, KSTAT_STRLEN, "level-%d-time", 660 i + 1); 661 kstat_named_init(&knp[i * 2], name, KSTAT_DATA_UINT64); 662 (void) snprintf(name, KSTAT_STRLEN, "level-%d-count", 663 i + 1); 664 kstat_named_init(&knp[(i * 2) + 1], name, 665 KSTAT_DATA_UINT64); 666 } 667 kstat_install(intr_ksp); 668 } 669 } 670 671 /* 672 * Delete interrupt kstats for this CPU. 673 */ 674 void 675 cpu_delete_intrstat(cpu_t *cp) 676 { 677 kstat_delete_byname_zone("cpu", cp->cpu_id, "intrstat", ALL_ZONES); 678 } 679 680 /* 681 * Convert interrupt statistics from CPU ticks to nanoseconds and 682 * update kstat. 683 */ 684 int 685 cpu_kstat_intrstat_update(kstat_t *ksp, int rw) 686 { 687 kstat_named_t *knp = ksp->ks_data; 688 cpu_t *cpup = (cpu_t *)ksp->ks_private; 689 int i; 690 hrtime_t hrt; 691 692 if (rw == KSTAT_WRITE) 693 return (EACCES); 694 695 for (i = 0; i < PIL_MAX; i++) { 696 hrt = (hrtime_t)cpup->cpu_m.intrstat[i + 1][0]; 697 tsc_scalehrtime(&hrt); 698 knp[i * 2].value.ui64 = (uint64_t)hrt; 699 knp[(i * 2) + 1].value.ui64 = cpup->cpu_stats.sys.intr[i]; 700 } 701 702 return (0); 703 } 704 705 /* 706 * An interrupt thread is ending a time slice, so compute the interval it 707 * ran for and update the statistic for its PIL. 708 */ 709 void 710 cpu_intr_swtch_enter(kthread_id_t t) 711 { 712 uint64_t interval; 713 uint64_t start; 714 cpu_t *cpu; 715 716 ASSERT((t->t_flag & T_INTR_THREAD) != 0); 717 ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL); 718 719 /* 720 * We could be here with a zero timestamp. This could happen if: 721 * an interrupt thread which no longer has a pinned thread underneath 722 * it (i.e. it blocked at some point in its past) has finished running 723 * its handler. intr_thread() updated the interrupt statistic for its 724 * PIL and zeroed its timestamp. Since there was no pinned thread to 725 * return to, swtch() gets called and we end up here. 726 * 727 * Note that we use atomic ops below (cas64 and atomic_add_64), which 728 * we don't use in the functions above, because we're not called 729 * with interrupts blocked, but the epilog/prolog functions are. 730 */ 731 if (t->t_intr_start) { 732 do { 733 start = t->t_intr_start; 734 interval = tsc_read() - start; 735 } while (cas64(&t->t_intr_start, start, 0) != start); 736 cpu = CPU; 737 cpu->cpu_m.intrstat[t->t_pil][0] += interval; 738 739 atomic_add_64((uint64_t *)&cpu->cpu_intracct[cpu->cpu_mstate], 740 interval); 741 } else 742 ASSERT(t->t_intr == NULL); 743 } 744 745 /* 746 * An interrupt thread is returning from swtch(). Place a starting timestamp 747 * in its thread structure. 748 */ 749 void 750 cpu_intr_swtch_exit(kthread_id_t t) 751 { 752 uint64_t ts; 753 754 ASSERT((t->t_flag & T_INTR_THREAD) != 0); 755 ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL); 756 757 do { 758 ts = t->t_intr_start; 759 } while (cas64(&t->t_intr_start, ts, tsc_read()) != ts); 760 } 761