1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26#pragma ident "%Z%%M% %I% %E% SMI" 27 28#if defined(lint) 29#include <sys/types.h> 30#include <sys/thread.h> 31#else /* lint */ 32#include "assym.h" 33#endif /* lint */ 34 35#include <sys/cmn_err.h> 36#include <sys/ftrace.h> 37#include <sys/asm_linkage.h> 38#include <sys/machthread.h> 39#include <sys/machcpuvar.h> 40#include <sys/intreg.h> 41#include <sys/ivintr.h> 42 43#ifdef TRAPTRACE 44#include <sys/traptrace.h> 45#endif /* TRAPTRACE */ 46 47#if defined(lint) 48 49/* ARGSUSED */ 50void 51pil_interrupt(int level) 52{} 53 54#else /* lint */ 55 56 57/* 58 * (TT 0x40..0x4F, TL>0) Interrupt Level N Handler (N == 1..15) 59 * Register passed from LEVEL_INTERRUPT(level) 60 * %g4 - interrupt request level 61 */ 62 ENTRY_NP(pil_interrupt) 63 ! 64 ! Register usage 65 ! %g1 - cpu 66 ! %g2 - pointer to intr_vec_t (iv) 67 ! %g4 - pil 68 ! %g3, %g5, %g6, %g7 - temps 69 ! 70 ! Grab the first or list head intr_vec_t off the intr_head[pil] 71 ! and panic immediately if list head is NULL. Otherwise, update 72 ! intr_head[pil] to next intr_vec_t on the list and clear softint 73 ! %clear_softint, if next intr_vec_t is NULL. 74 ! 75 CPU_ADDR(%g1, %g5) ! %g1 = cpu 76 ! 77 ALTENTRY(pil_interrupt_common) 78 sll %g4, CPTRSHIFT, %g5 ! %g5 = offset to the pil entry 79 add %g1, INTR_HEAD, %g6 ! %g6 = &cpu->m_cpu.intr_head 80 add %g6, %g5, %g6 ! %g6 = &cpu->m_cpu.intr_head[pil] 81 ldn [%g6], %g2 ! %g2 = cpu->m_cpu.intr_head[pil] 82 brnz,pt %g2, 0f ! check list head (iv) is NULL 83 nop 84 ba ptl1_panic ! panic, list head (iv) is NULL 85 mov PTL1_BAD_INTR_VEC, %g1 860: 87 lduh [%g2 + IV_FLAGS], %g7 ! %g7 = iv->iv_flags 88 and %g7, IV_SOFTINT_MT, %g3 ! %g3 = iv->iv_flags & IV_SOFTINT_MT 89 brz,pt %g3, 1f ! check for multi target softint 90 add %g2, IV_PIL_NEXT, %g7 ! g7% = &iv->iv_pil_next 91 ld [%g1 + CPU_ID], %g3 ! for multi target softint, use cpuid 92 sll %g3, CPTRSHIFT, %g3 ! convert cpuid to offset address 93 add %g7, %g3, %g7 ! %g5 = &iv->iv_xpil_next[cpuid] 941: 95 ldn [%g7], %g3 ! %g3 = next intr_vec_t 96 brnz,pn %g3, 2f ! branch if next intr_vec_t non NULL 97 stn %g3, [%g6] ! update cpu->m_cpu.intr_head[pil] 98 add %g1, INTR_TAIL, %g6 ! %g6 = &cpu->m_cpu.intr_tail 99 stn %g0, [%g5 + %g6] ! clear cpu->m_cpu.intr_tail[pil] 100 mov 1, %g5 ! %g5 = 1 101 sll %g5, %g4, %g5 ! %g5 = 1 << pil 102 wr %g5, CLEAR_SOFTINT ! clear interrupt on this pil 1032: 104#ifdef TRAPTRACE 105 TRACE_PTR(%g5, %g6) 106 GET_TRACE_TICK(%g6) 107 stxa %g6, [%g5 + TRAP_ENT_TICK]%asi ! trap_tick = %tick 108 TRACE_SAVE_TL_GL_REGS(%g5, %g6) 109 rdpr %tt, %g6 110 stha %g6, [%g5 + TRAP_ENT_TT]%asi ! trap_type = %tt 111 rdpr %tpc, %g6 112 stna %g6, [%g5 + TRAP_ENT_TPC]%asi ! trap_pc = %tpc 113 rdpr %tstate, %g6 114 stxa %g6, [%g5 + TRAP_ENT_TSTATE]%asi ! trap_tstate = %tstate 115 stna %sp, [%g5 + TRAP_ENT_SP]%asi ! trap_sp = %sp 116 stna %g2, [%g5 + TRAP_ENT_TR]%asi ! trap_tr = first intr_vec 117 stna %g3, [%g5 + TRAP_ENT_F1]%asi ! trap_f1 = next intr_vec 118 sll %g4, CPTRSHIFT, %g3 119 add %g1, INTR_HEAD, %g6 120 ldn [%g6 + %g3], %g6 ! %g6=cpu->m_cpu.intr_head[pil] 121 stna %g6, [%g5 + TRAP_ENT_F2]%asi ! trap_f2 = intr_head[pil] 122 add %g1, INTR_TAIL, %g6 123 ldn [%g6 + %g3], %g6 ! %g6=cpu->m_cpu.intr_tail[pil] 124 stna %g6, [%g5 + TRAP_ENT_F3]%asi ! trap_f3 = intr_tail[pil] 125 stna %g4, [%g5 + TRAP_ENT_F4]%asi ! trap_f4 = pil 126 TRACE_NEXT(%g5, %g6, %g3) 127#endif /* TRAPTRACE */ 128 ! 129 ! clear the iv_pending flag for this interrupt request 130 ! 131 lduh [%g2 + IV_FLAGS], %g3 ! %g3 = iv->iv_flags 132 andn %g3, IV_SOFTINT_PEND, %g3 ! %g3 = !(iv->iv_flags & PEND) 133 sth %g3, [%g2 + IV_FLAGS] ! clear IV_SOFTINT_PEND flag 134 stn %g0, [%g7] ! clear iv->iv_pil_next or 135 ! iv->iv_pil_xnext 136 137 ! 138 ! Prepare for sys_trap() 139 ! 140 ! Registers passed to sys_trap() 141 ! %g1 - interrupt handler at TL==0 142 ! %g2 - pointer to current intr_vec_t (iv), 143 ! job queue for intr_thread or current_thread 144 ! %g3 - pil 145 ! %g4 - initial pil for handler 146 ! 147 ! figure which handler to run and which %pil it starts at 148 ! intr_thread starts at DISP_LEVEL to prevent preemption 149 ! current_thread starts at PIL_MAX to protect cpu_intr_actv 150 ! 151 mov %g4, %g3 ! %g3 = %g4, pil 152 cmp %g4, LOCK_LEVEL 153 bg,a,pt %xcc, 3f ! branch if pil > LOCK_LEVEL 154 mov PIL_MAX, %g4 ! %g4 = PIL_MAX (15) 155 sethi %hi(intr_thread), %g1 ! %g1 = intr_thread 156 mov DISP_LEVEL, %g4 ! %g4 = DISP_LEVEL (11) 157 ba,pt %xcc, sys_trap 158 or %g1, %lo(intr_thread), %g1 1593: 160 sethi %hi(current_thread), %g1 ! %g1 = current_thread 161 ba,pt %xcc, sys_trap 162 or %g1, %lo(current_thread), %g1 163 SET_SIZE(pil_interrupt_common) 164 SET_SIZE(pil_interrupt) 165 166#endif /* lint */ 167 168 169#ifndef lint 170_spurious: 171 .asciz "!interrupt 0x%x at level %d not serviced" 172 173/* 174 * SERVE_INTR_PRE is called once, just before the first invocation 175 * of SERVE_INTR. 176 * 177 * Registers on entry: 178 * 179 * iv_p, cpu, regs: may be out-registers 180 * ls1, ls2: local scratch registers 181 * os1, os2, os3: scratch registers, may be out 182 */ 183 184#define SERVE_INTR_PRE(iv_p, cpu, ls1, ls2, os1, os2, os3, regs) \ 185 mov iv_p, ls1; \ 186 mov iv_p, ls2; \ 187 SERVE_INTR_TRACE(iv_p, os1, os2, os3, regs); 188 189/* 190 * SERVE_INTR is called immediately after either SERVE_INTR_PRE or 191 * SERVE_INTR_NEXT, without intervening code. No register values 192 * may be modified. 193 * 194 * After calling SERVE_INTR, the caller must check if os3 is set. If 195 * so, there is another interrupt to process. The caller must call 196 * SERVE_INTR_NEXT, immediately followed by SERVE_INTR. 197 * 198 * Before calling SERVE_INTR_NEXT, the caller may perform accounting 199 * and other actions which need to occur after invocation of an interrupt 200 * handler. However, the values of ls1 and os3 *must* be preserved and 201 * passed unmodified into SERVE_INTR_NEXT. 202 * 203 * Registers on return from SERVE_INTR: 204 * 205 * ls1 - the pil just processed 206 * ls2 - the pointer to intr_vec_t (iv) just processed 207 * os3 - if set, another interrupt needs to be processed 208 * cpu, ls1, os3 - must be preserved if os3 is set 209 */ 210 211#define SERVE_INTR(os5, cpu, ls1, ls2, os1, os2, os3, os4) \ 212 ldn [ls1 + IV_HANDLER], os2; \ 213 ldn [ls1 + IV_ARG1], %o0; \ 214 ldn [ls1 + IV_ARG2], %o1; \ 215 call os2; \ 216 lduh [ls1 + IV_PIL], ls1; \ 217 brnz,pt %o0, 2f; \ 218 mov CE_WARN, %o0; \ 219 set _spurious, %o1; \ 220 mov ls2, %o2; \ 221 call cmn_err; \ 222 rdpr %pil, %o3; \ 2232: ldn [THREAD_REG + T_CPU], cpu; \ 224 sll ls1, 3, os1; \ 225 add os1, CPU_STATS_SYS_INTR - 8, os2; \ 226 ldx [cpu + os2], os3; \ 227 inc os3; \ 228 stx os3, [cpu + os2]; \ 229 sll ls1, CPTRSHIFT, os2; \ 230 add cpu, INTR_HEAD, os1; \ 231 add os1, os2, os1; \ 232 ldn [os1], os3; 233 234/* 235 * Registers on entry: 236 * 237 * cpu - cpu pointer (clobbered, set to cpu upon completion) 238 * ls1, os3 - preserved from prior call to SERVE_INTR 239 * ls2 - local scratch reg (not preserved) 240 * os1, os2, os4, os5 - scratch reg, can be out (not preserved) 241 */ 242#define SERVE_INTR_NEXT(os5, cpu, ls1, ls2, os1, os2, os3, os4) \ 243 sll ls1, CPTRSHIFT, os4; \ 244 add cpu, INTR_HEAD, os1; \ 245 rdpr %pstate, ls2; \ 246 wrpr ls2, PSTATE_IE, %pstate; \ 247 lduh [os3 + IV_FLAGS], os2; \ 248 and os2, IV_SOFTINT_MT, os2; \ 249 brz,pt os2, 4f; \ 250 add os3, IV_PIL_NEXT, os2; \ 251 ld [cpu + CPU_ID], os5; \ 252 sll os5, CPTRSHIFT, os5; \ 253 add os2, os5, os2; \ 2544: ldn [os2], os5; \ 255 brnz,pn os5, 5f; \ 256 stn os5, [os1 + os4]; \ 257 add cpu, INTR_TAIL, os1; \ 258 stn %g0, [os1 + os4]; \ 259 mov 1, os1; \ 260 sll os1, ls1, os1; \ 261 wr os1, CLEAR_SOFTINT; \ 2625: lduh [os3 + IV_FLAGS], ls1; \ 263 andn ls1, IV_SOFTINT_PEND, ls1; \ 264 sth ls1, [os3 + IV_FLAGS]; \ 265 stn %g0, [os2]; \ 266 wrpr %g0, ls2, %pstate; \ 267 mov os3, ls1; \ 268 mov os3, ls2; \ 269 SERVE_INTR_TRACE2(os5, os1, os2, os3, os4); 270 271#ifdef TRAPTRACE 272/* 273 * inum - not modified, _spurious depends on it. 274 */ 275#define SERVE_INTR_TRACE(inum, os1, os2, os3, os4) \ 276 rdpr %pstate, os3; \ 277 andn os3, PSTATE_IE | PSTATE_AM, os2; \ 278 wrpr %g0, os2, %pstate; \ 279 TRACE_PTR(os1, os2); \ 280 ldn [os4 + PC_OFF], os2; \ 281 stna os2, [os1 + TRAP_ENT_TPC]%asi; \ 282 ldx [os4 + TSTATE_OFF], os2; \ 283 stxa os2, [os1 + TRAP_ENT_TSTATE]%asi; \ 284 mov os3, os4; \ 285 GET_TRACE_TICK(os2); \ 286 stxa os2, [os1 + TRAP_ENT_TICK]%asi; \ 287 TRACE_SAVE_TL_GL_REGS(os1, os2); \ 288 set TT_SERVE_INTR, os2; \ 289 rdpr %pil, os3; \ 290 or os2, os3, os2; \ 291 stha os2, [os1 + TRAP_ENT_TT]%asi; \ 292 stna %sp, [os1 + TRAP_ENT_SP]%asi; \ 293 stna inum, [os1 + TRAP_ENT_TR]%asi; \ 294 stna %g0, [os1 + TRAP_ENT_F1]%asi; \ 295 stna %g0, [os1 + TRAP_ENT_F2]%asi; \ 296 stna %g0, [os1 + TRAP_ENT_F3]%asi; \ 297 stna %g0, [os1 + TRAP_ENT_F4]%asi; \ 298 TRACE_NEXT(os1, os2, os3); \ 299 wrpr %g0, os4, %pstate 300#else /* TRAPTRACE */ 301#define SERVE_INTR_TRACE(inum, os1, os2, os3, os4) 302#endif /* TRAPTRACE */ 303 304#ifdef TRAPTRACE 305/* 306 * inum - not modified, _spurious depends on it. 307 */ 308#define SERVE_INTR_TRACE2(inum, os1, os2, os3, os4) \ 309 rdpr %pstate, os3; \ 310 andn os3, PSTATE_IE | PSTATE_AM, os2; \ 311 wrpr %g0, os2, %pstate; \ 312 TRACE_PTR(os1, os2); \ 313 stna %g0, [os1 + TRAP_ENT_TPC]%asi; \ 314 stxa %g0, [os1 + TRAP_ENT_TSTATE]%asi; \ 315 mov os3, os4; \ 316 GET_TRACE_TICK(os2); \ 317 stxa os2, [os1 + TRAP_ENT_TICK]%asi; \ 318 TRACE_SAVE_TL_GL_REGS(os1, os2); \ 319 set TT_SERVE_INTR, os2; \ 320 rdpr %pil, os3; \ 321 or os2, os3, os2; \ 322 stha os2, [os1 + TRAP_ENT_TT]%asi; \ 323 stna %sp, [os1 + TRAP_ENT_SP]%asi; \ 324 stna inum, [os1 + TRAP_ENT_TR]%asi; \ 325 stna %g0, [os1 + TRAP_ENT_F1]%asi; \ 326 stna %g0, [os1 + TRAP_ENT_F2]%asi; \ 327 stna %g0, [os1 + TRAP_ENT_F3]%asi; \ 328 stna %g0, [os1 + TRAP_ENT_F4]%asi; \ 329 TRACE_NEXT(os1, os2, os3); \ 330 wrpr %g0, os4, %pstate 331#else /* TRAPTRACE */ 332#define SERVE_INTR_TRACE2(inum, os1, os2, os3, os4) 333#endif /* TRAPTRACE */ 334 335#endif /* lint */ 336 337#if defined(lint) 338 339/*ARGSUSED*/ 340void 341intr_thread(struct regs *regs, uint64_t iv_p, uint_t pil) 342{} 343 344#else /* lint */ 345 346#define INTRCNT_LIMIT 16 347 348/* 349 * Handle an interrupt in a new thread. 350 * Entry: 351 * %o0 = pointer to regs structure 352 * %o1 = pointer to current intr_vec_t (iv) to be processed 353 * %o2 = pil 354 * %sp = on current thread's kernel stack 355 * %o7 = return linkage to trap code 356 * %g7 = current thread 357 * %pstate = normal globals, interrupts enabled, 358 * privileged, fp disabled 359 * %pil = DISP_LEVEL 360 * 361 * Register Usage 362 * %l0 = return linkage 363 * %l1 = pil 364 * %l2 - %l3 = scratch 365 * %l4 - %l7 = reserved for sys_trap 366 * %o2 = cpu 367 * %o3 = intr thread 368 * %o0 = scratch 369 * %o4 - %o5 = scratch 370 */ 371 ENTRY_NP(intr_thread) 372 mov %o7, %l0 373 mov %o2, %l1 374 ! 375 ! See if we are interrupting another interrupt thread. 376 ! 377 lduh [THREAD_REG + T_FLAGS], %o3 378 andcc %o3, T_INTR_THREAD, %g0 379 bz,pt %xcc, 1f 380 ldn [THREAD_REG + T_CPU], %o2 ! delay - load CPU pointer 381 382 ! We have interrupted an interrupt thread. Take a timestamp, 383 ! compute its interval, and update its cumulative counter. 384 add THREAD_REG, T_INTR_START, %o5 3850: 386 ldx [%o5], %o3 387 brz,pn %o3, 1f 388 ! We came in on top of an interrupt thread that had no timestamp. 389 ! This could happen if, for instance, an interrupt thread which had 390 ! previously blocked is being set up to run again in resume(), but 391 ! resume() hasn't yet stored a timestamp for it. Or, it could be in 392 ! swtch() after its slice has been accounted for. 393 ! Only account for the time slice if the starting timestamp is non-zero. 394 rdpr %tick, %o4 ! delay 395 sllx %o4, 1, %o4 ! shift off NPT bit 396 srlx %o4, 1, %o4 397 sub %o4, %o3, %o4 ! o4 has interval 398 399 ! A high-level interrupt in current_thread() interrupting here 400 ! will account for the interrupted thread's time slice, but 401 ! only if t_intr_start is non-zero. Since this code is going to account 402 ! for the time slice, we want to "atomically" load the thread's 403 ! starting timestamp, calculate the interval with %tick, and zero 404 ! its starting timestamp. 405 ! To do this, we do a casx on the t_intr_start field, and store 0 to it. 406 ! If it has changed since we loaded it above, we need to re-compute the 407 ! interval, since a changed t_intr_start implies current_thread placed 408 ! a new, later timestamp there after running a high-level interrupt, 409 ! and the %tick val in %o4 had become stale. 410 mov %g0, %l2 411 casx [%o5], %o3, %l2 412 413 ! If %l2 == %o3, our casx was successful. If not, the starting timestamp 414 ! changed between loading it (after label 0b) and computing the 415 ! interval above. 416 cmp %l2, %o3 417 bne,pn %xcc, 0b 418 419 ! Check for Energy Star mode 420 lduh [%o2 + CPU_DIVISOR], %l2 ! delay -- %l2 = clock divisor 421 cmp %l2, 1 422 bg,a,pn %xcc, 2f 423 mulx %o4, %l2, %o4 ! multiply interval by clock divisor iff > 1 4242: 425 ! We now know that a valid interval for the interrupted interrupt 426 ! thread is in %o4. Update its cumulative counter. 427 ldub [THREAD_REG + T_PIL], %l3 ! load PIL 428 sllx %l3, 4, %l3 ! convert PIL index to byte offset 429 add %l3, CPU_MCPU, %l3 ! CPU_INTRSTAT is too big for use 430 add %l3, MCPU_INTRSTAT, %l3 ! as const, add offsets separately 431 ldx [%o2 + %l3], %o5 ! old counter in o5 432 add %o5, %o4, %o5 ! new counter in o5 433 stx %o5, [%o2 + %l3] ! store new counter 434 435 ! Also update intracct[] 436 lduh [%o2 + CPU_MSTATE], %l3 437 sllx %l3, 3, %l3 438 add %l3, CPU_INTRACCT, %l3 439 add %l3, %o2, %l3 4400: 441 ldx [%l3], %o5 442 add %o5, %o4, %o3 443 casx [%l3], %o5, %o3 444 cmp %o5, %o3 445 bne,pn %xcc, 0b 446 nop 447 4481: 449 ! 450 ! Get set to run interrupt thread. 451 ! There should always be an interrupt thread since we allocate one 452 ! for each level on the CPU. 453 ! 454 ! Note that the code in kcpc_overflow_intr -relies- on the ordering 455 ! of events here -- in particular that t->t_lwp of the interrupt thread 456 ! is set to the pinned thread *before* curthread is changed. 457 ! 458 ldn [%o2 + CPU_INTR_THREAD], %o3 ! interrupt thread pool 459 ldn [%o3 + T_LINK], %o4 ! unlink thread from CPU's list 460 stn %o4, [%o2 + CPU_INTR_THREAD] 461 ! 462 ! Set bit for this level in CPU's active interrupt bitmask. 463 ! 464 ld [%o2 + CPU_INTR_ACTV], %o5 465 mov 1, %o4 466 sll %o4, %l1, %o4 467#ifdef DEBUG 468 ! 469 ! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL))) 470 ! 471 andcc %o5, %o4, %g0 472 bz,pt %xcc, 0f 473 nop 474 ! Do not call panic if a panic is already in progress. 475 sethi %hi(panic_quiesce), %l2 476 ld [%l2 + %lo(panic_quiesce)], %l2 477 brnz,pn %l2, 0f 478 nop 479 sethi %hi(intr_thread_actv_bit_set), %o0 480 call panic 481 or %o0, %lo(intr_thread_actv_bit_set), %o0 4820: 483#endif /* DEBUG */ 484 or %o5, %o4, %o5 485 st %o5, [%o2 + CPU_INTR_ACTV] 486 ! 487 ! Consider the new thread part of the same LWP so that 488 ! window overflow code can find the PCB. 489 ! 490 ldn [THREAD_REG + T_LWP], %o4 491 stn %o4, [%o3 + T_LWP] 492 ! 493 ! Threads on the interrupt thread free list could have state already 494 ! set to TS_ONPROC, but it helps in debugging if they're TS_FREE 495 ! Could eliminate the next two instructions with a little work. 496 ! 497 mov TS_ONPROC, %o4 498 st %o4, [%o3 + T_STATE] 499 ! 500 ! Push interrupted thread onto list from new thread. 501 ! Set the new thread as the current one. 502 ! Set interrupted thread's T_SP because if it is the idle thread, 503 ! resume may use that stack between threads. 504 ! 505 stn %o7, [THREAD_REG + T_PC] ! mark pc for resume 506 stn %sp, [THREAD_REG + T_SP] ! mark stack for resume 507 stn THREAD_REG, [%o3 + T_INTR] ! push old thread 508 stn %o3, [%o2 + CPU_THREAD] ! set new thread 509 mov %o3, THREAD_REG ! set global curthread register 510 ldn [%o3 + T_STACK], %o4 ! interrupt stack pointer 511 sub %o4, STACK_BIAS, %sp 512 ! 513 ! Initialize thread priority level from intr_pri 514 ! 515 sethi %hi(intr_pri), %o4 516 ldsh [%o4 + %lo(intr_pri)], %o4 ! grab base interrupt priority 517 add %l1, %o4, %o4 ! convert level to dispatch priority 518 sth %o4, [THREAD_REG + T_PRI] 519 stub %l1, [THREAD_REG + T_PIL] ! save pil for intr_passivate 520 521 ! Store starting timestamp in thread structure. 522 add THREAD_REG, T_INTR_START, %o3 5231: 524 ldx [%o3], %o5 525 rdpr %tick, %o4 526 sllx %o4, 1, %o4 527 srlx %o4, 1, %o4 ! shift off NPT bit 528 casx [%o3], %o5, %o4 529 cmp %o4, %o5 530 ! If a high-level interrupt occurred while we were attempting to store 531 ! the timestamp, try again. 532 bne,pn %xcc, 1b 533 nop 534 535 wrpr %g0, %l1, %pil ! lower %pil to new level 536 ! 537 ! Fast event tracing. 538 ! 539 ld [%o2 + CPU_FTRACE_STATE], %o4 ! %o2 = curthread->t_cpu 540 btst FTRACE_ENABLED, %o4 541 be,pt %icc, 1f ! skip if ftrace disabled 542 mov %l1, %o5 543 ! 544 ! Tracing is enabled - write the trace entry. 545 ! 546 save %sp, -SA(MINFRAME), %sp 547 set ftrace_intr_thread_format_str, %o0 548 mov %i0, %o1 549 mov %i1, %o2 550 mov %i5, %o3 551 call ftrace_3 552 ldn [%i0 + PC_OFF], %o4 553 restore 5541: 555 ! 556 ! call the handler 557 ! 558 SERVE_INTR_PRE(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0) 559 ! 560 ! %o0 and %o1 are now available as scratch registers. 561 ! 5620: 563 SERVE_INTR(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0) 564 ! 565 ! If %o3 is set, we must call serve_intr_next, and both %l1 and %o3 566 ! must be preserved. %l1 holds our pil, %l3 holds our inum. 567 ! 568 ! Note: %l1 is the pil level we're processing, but we may have a 569 ! higher effective pil because a higher-level interrupt may have 570 ! blocked. 571 ! 572 wrpr %g0, DISP_LEVEL, %pil 573 ! 574 ! Take timestamp, compute interval, update cumulative counter. 575 ! 576 add THREAD_REG, T_INTR_START, %o5 5771: 578 ldx [%o5], %o0 579#ifdef DEBUG 580 brnz %o0, 9f 581 nop 582 ! Do not call panic if a panic is already in progress. 583 sethi %hi(panic_quiesce), %o1 584 ld [%o1 + %lo(panic_quiesce)], %o1 585 brnz,pn %o1, 9f 586 nop 587 sethi %hi(intr_thread_t_intr_start_zero), %o0 588 call panic 589 or %o0, %lo(intr_thread_t_intr_start_zero), %o0 5909: 591#endif /* DEBUG */ 592 rdpr %tick, %o1 593 sllx %o1, 1, %o1 594 srlx %o1, 1, %o1 ! shift off NPT bit 595 sub %o1, %o0, %l2 ! l2 has interval 596 ! 597 ! The general outline of what the code here does is: 598 ! 1. load t_intr_start, %tick, and calculate the delta 599 ! 2. replace t_intr_start with %tick (if %o3 is set) or 0. 600 ! 601 ! The problem is that a high-level interrupt could arrive at any time. 602 ! It will account for (%tick - t_intr_start) for us when it starts, 603 ! unless we have set t_intr_start to zero, and then set t_intr_start 604 ! to a new %tick when it finishes. To account for this, our first step 605 ! is to load t_intr_start and the last is to use casx to store the new 606 ! t_intr_start. This guarantees atomicity in reading t_intr_start, 607 ! reading %tick, and updating t_intr_start. 608 ! 609 movrz %o3, %g0, %o1 610 casx [%o5], %o0, %o1 611 cmp %o0, %o1 612 bne,pn %xcc, 1b 613 ! 614 ! Check for Energy Star mode 615 ! 616 lduh [%o2 + CPU_DIVISOR], %o0 ! delay -- %o0 = clock divisor 617 cmp %o0, 1 618 bg,a,pn %xcc, 2f 619 mulx %l2, %o0, %l2 ! multiply interval by clock divisor iff > 1 6202: 621 ! 622 ! Update cpu_intrstat. If o3 is set then we will be processing another 623 ! interrupt. Above we have set t_intr_start to %tick, not 0. This 624 ! means a high-level interrupt can arrive and update the same stats 625 ! we're updating. Need to use casx. 626 ! 627 sllx %l1, 4, %o1 ! delay - PIL as byte offset 628 add %o1, CPU_MCPU, %o1 ! CPU_INTRSTAT const too big 629 add %o1, MCPU_INTRSTAT, %o1 ! add parts separately 630 add %o1, %o2, %o1 6311: 632 ldx [%o1], %o5 ! old counter in o5 633 add %o5, %l2, %o0 ! new counter in o0 634 stx %o0, [%o1 + 8] ! store into intrstat[pil][1] 635 casx [%o1], %o5, %o0 ! and into intrstat[pil][0] 636 cmp %o5, %o0 637 bne,pn %xcc, 1b 638 nop 639 640 ! Also update intracct[] 641 lduh [%o2 + CPU_MSTATE], %o1 642 sllx %o1, 3, %o1 643 add %o1, CPU_INTRACCT, %o1 644 add %o1, %o2, %o1 6451: 646 ldx [%o1], %o5 647 add %o5, %l2, %o0 648 casx [%o1], %o5, %o0 649 cmp %o5, %o0 650 bne,pn %xcc, 1b 651 nop 652 653 ! 654 ! Don't keep a pinned process pinned indefinitely. Bump cpu_intrcnt 655 ! for each interrupt handler we invoke. If we hit INTRCNT_LIMIT, then 656 ! we've crossed the threshold and we should unpin the pinned threads 657 ! by preempt()ing ourselves, which will bubble up the t_intr chain 658 ! until hitting the non-interrupt thread, which will then in turn 659 ! preempt itself allowing the interrupt processing to resume. Finally, 660 ! the scheduler takes over and picks the next thread to run. 661 ! 662 ! If our CPU is quiesced, we cannot preempt because the idle thread 663 ! won't ever re-enter the scheduler, and the interrupt will be forever 664 ! blocked. 665 ! 666 ! If t_intr is NULL, we're not pinning anyone, so we use a simpler 667 ! algorithm. Just check for cpu_kprunrun, and if set then preempt. 668 ! This insures we enter the scheduler if a higher-priority thread 669 ! has become runnable. 670 ! 671 lduh [%o2 + CPU_FLAGS], %o5 ! don't preempt if quiesced 672 andcc %o5, CPU_QUIESCED, %g0 673 bnz,pn %xcc, 1f 674 675 ldn [THREAD_REG + T_INTR], %o5 ! pinning anything? 676 brz,pn %o5, 3f ! if not, don't inc intrcnt 677 678 ldub [%o2 + CPU_INTRCNT], %o5 ! delay - %o5 = cpu_intrcnt 679 inc %o5 680 cmp %o5, INTRCNT_LIMIT ! have we hit the limit? 681 bl,a,pt %xcc, 1f ! no preempt if < INTRCNT_LIMIT 682 stub %o5, [%o2 + CPU_INTRCNT] ! delay annul - inc CPU_INTRCNT 683 bg,pn %xcc, 2f ! don't inc stats again 684 ! 685 ! We've reached the limit. Set cpu_intrcnt and cpu_kprunrun, and do 686 ! CPU_STATS_ADDQ(cp, sys, intrunpin, 1). Then call preempt. 687 ! 688 mov 1, %o4 ! delay 689 stub %o4, [%o2 + CPU_KPRUNRUN] 690 ldx [%o2 + CPU_STATS_SYS_INTRUNPIN], %o4 691 inc %o4 692 stx %o4, [%o2 + CPU_STATS_SYS_INTRUNPIN] 693 ba 2f 694 stub %o5, [%o2 + CPU_INTRCNT] ! delay 6953: 696 ! Code for t_intr == NULL 697 ldub [%o2 + CPU_KPRUNRUN], %o5 698 brz,pt %o5, 1f ! don't preempt unless kprunrun 6992: 700 ! Time to call preempt 701 mov %o2, %l3 ! delay - save %o2 702 call preempt 703 mov %o3, %l2 ! delay - save %o3. 704 mov %l3, %o2 ! restore %o2 705 mov %l2, %o3 ! restore %o3 706 wrpr %g0, DISP_LEVEL, %pil ! up from cpu_base_spl 7071: 708 ! 709 ! Do we need to call serve_intr_next and do this again? 710 ! 711 brz,a,pt %o3, 0f 712 ld [%o2 + CPU_INTR_ACTV], %o5 ! delay annulled 713 ! 714 ! Restore %pil before calling serve_intr() again. We must check 715 ! CPU_BASE_SPL and set %pil to max(our-pil, CPU_BASE_SPL) 716 ! 717 ld [%o2 + CPU_BASE_SPL], %o4 718 cmp %o4, %l1 719 movl %xcc, %l1, %o4 720 wrpr %g0, %o4, %pil 721 SERVE_INTR_NEXT(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0) 722 ba 0b ! compute new stats 723 nop 7240: 725 ! 726 ! Clear bit for this level in CPU's interrupt active bitmask. 727 ! 728 mov 1, %o4 729 sll %o4, %l1, %o4 730#ifdef DEBUG 731 ! 732 ! ASSERT(CPU->cpu_intr_actv & (1 << PIL)) 733 ! 734 andcc %o4, %o5, %g0 735 bnz,pt %xcc, 0f 736 nop 737 ! Do not call panic if a panic is already in progress. 738 sethi %hi(panic_quiesce), %l2 739 ld [%l2 + %lo(panic_quiesce)], %l2 740 brnz,pn %l2, 0f 741 nop 742 sethi %hi(intr_thread_actv_bit_not_set), %o0 743 call panic 744 or %o0, %lo(intr_thread_actv_bit_not_set), %o0 7450: 746#endif /* DEBUG */ 747 andn %o5, %o4, %o5 748 st %o5, [%o2 + CPU_INTR_ACTV] 749 ! 750 ! If there is still an interrupted thread underneath this one, 751 ! then the interrupt was never blocked and the return is fairly 752 ! simple. Otherwise jump to intr_thread_exit. 753 ! 754 ldn [THREAD_REG + T_INTR], %o4 ! pinned thread 755 brz,pn %o4, intr_thread_exit ! branch if none 756 nop 757 ! 758 ! link the thread back onto the interrupt thread pool 759 ! 760 ldn [%o2 + CPU_INTR_THREAD], %o3 761 stn %o3, [THREAD_REG + T_LINK] 762 stn THREAD_REG, [%o2 + CPU_INTR_THREAD] 763 ! 764 ! set the thread state to free so kernel debuggers don't see it 765 ! 766 mov TS_FREE, %o5 767 st %o5, [THREAD_REG + T_STATE] 768 ! 769 ! Switch back to the interrupted thread and return 770 ! 771 stn %o4, [%o2 + CPU_THREAD] 772 membar #StoreLoad ! sync with mutex_exit() 773 mov %o4, THREAD_REG 774 775 ! If we pinned an interrupt thread, store its starting timestamp. 776 lduh [THREAD_REG + T_FLAGS], %o5 777 andcc %o5, T_INTR_THREAD, %g0 778 bz,pt %xcc, 1f 779 ldn [THREAD_REG + T_SP], %sp ! delay - restore %sp 780 781 add THREAD_REG, T_INTR_START, %o3 ! o3 has &curthread->t_intr_star 7820: 783 ldx [%o3], %o4 ! o4 = t_intr_start before 784 rdpr %tick, %o5 785 sllx %o5, 1, %o5 786 srlx %o5, 1, %o5 ! shift off NPT bit 787 casx [%o3], %o4, %o5 ! put o5 in ts if o4 == ts after 788 cmp %o4, %o5 789 ! If a high-level interrupt occurred while we were attempting to store 790 ! the timestamp, try again. 791 bne,pn %xcc, 0b 792 ldn [THREAD_REG + T_SP], %sp ! delay - restore %sp 7931: 794 ! If the thread being restarted isn't pinning anyone, and no interrupts 795 ! are pending, zero out cpu_intrcnt 796 ldn [THREAD_REG + T_INTR], %o4 797 brnz,pn %o4, 2f 798 rd SOFTINT, %o4 ! delay 799 set SOFTINT_MASK, %o5 800 andcc %o4, %o5, %g0 801 bz,a,pt %xcc, 2f 802 stub %g0, [%o2 + CPU_INTRCNT] ! delay annul 8032: 804 jmp %l0 + 8 805 nop 806 SET_SIZE(intr_thread) 807 /* Not Reached */ 808 809 ! 810 ! An interrupt returned on what was once (and still might be) 811 ! an interrupt thread stack, but the interrupted process is no longer 812 ! there. This means the interrupt must have blocked. 813 ! 814 ! There is no longer a thread under this one, so put this thread back 815 ! on the CPU's free list and resume the idle thread which will dispatch 816 ! the next thread to run. 817 ! 818 ! All traps below DISP_LEVEL are disabled here, but the mondo interrupt 819 ! is enabled. 820 ! 821 ENTRY_NP(intr_thread_exit) 822#ifdef TRAPTRACE 823 rdpr %pstate, %l2 824 andn %l2, PSTATE_IE | PSTATE_AM, %o4 825 wrpr %g0, %o4, %pstate ! cpu to known state 826 TRACE_PTR(%o4, %o5) 827 GET_TRACE_TICK(%o5) 828 stxa %o5, [%o4 + TRAP_ENT_TICK]%asi 829 TRACE_SAVE_TL_GL_REGS(%o4, %o5) 830 set TT_INTR_EXIT, %o5 831 stha %o5, [%o4 + TRAP_ENT_TT]%asi 832 stna %g0, [%o4 + TRAP_ENT_TPC]%asi 833 stxa %g0, [%o4 + TRAP_ENT_TSTATE]%asi 834 stna %sp, [%o4 + TRAP_ENT_SP]%asi 835 stna THREAD_REG, [%o4 + TRAP_ENT_TR]%asi 836 ld [%o2 + CPU_BASE_SPL], %o5 837 stna %o5, [%o4 + TRAP_ENT_F1]%asi 838 stna %g0, [%o4 + TRAP_ENT_F2]%asi 839 stna %g0, [%o4 + TRAP_ENT_F3]%asi 840 stna %g0, [%o4 + TRAP_ENT_F4]%asi 841 TRACE_NEXT(%o4, %o5, %o0) 842 wrpr %g0, %l2, %pstate 843#endif /* TRAPTRACE */ 844 ! cpu_stats.sys.intrblk++ 845 ldx [%o2 + CPU_STATS_SYS_INTRBLK], %o4 846 inc %o4 847 stx %o4, [%o2 + CPU_STATS_SYS_INTRBLK] 848 ! 849 ! Put thread back on the interrupt thread list. 850 ! 851 852 ! 853 ! Set the CPU's base SPL level. 854 ! 855#ifdef DEBUG 856 ! 857 ! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL))) 858 ! 859 ld [%o2 + CPU_INTR_ACTV], %o5 860 mov 1, %o4 861 sll %o4, %l1, %o4 862 and %o5, %o4, %o4 863 brz,pt %o4, 0f 864 nop 865 ! Do not call panic if a panic is already in progress. 866 sethi %hi(panic_quiesce), %l2 867 ld [%l2 + %lo(panic_quiesce)], %l2 868 brnz,pn %l2, 0f 869 nop 870 sethi %hi(intr_thread_exit_actv_bit_set), %o0 871 call panic 872 or %o0, %lo(intr_thread_exit_actv_bit_set), %o0 8730: 874#endif /* DEBUG */ 875 call _intr_set_spl ! set CPU's base SPL level 876 ld [%o2 + CPU_INTR_ACTV], %o5 ! delay - load active mask 877 ! 878 ! set the thread state to free so kernel debuggers don't see it 879 ! 880 mov TS_FREE, %o4 881 st %o4, [THREAD_REG + T_STATE] 882 ! 883 ! Put thread on either the interrupt pool or the free pool and 884 ! call swtch() to resume another thread. 885 ! 886 ldn [%o2 + CPU_INTR_THREAD], %o5 ! get list pointer 887 stn %o5, [THREAD_REG + T_LINK] 888 call swtch ! switch to best thread 889 stn THREAD_REG, [%o2 + CPU_INTR_THREAD] ! delay - put thread on list 890 ba,a,pt %xcc, . ! swtch() shouldn't return 891 SET_SIZE(intr_thread_exit) 892 893 .global ftrace_intr_thread_format_str 894ftrace_intr_thread_format_str: 895 .asciz "intr_thread(): regs=0x%lx, int=0x%lx, pil=0x%lx" 896#ifdef DEBUG 897intr_thread_actv_bit_set: 898 .asciz "intr_thread(): cpu_intr_actv bit already set for PIL" 899intr_thread_actv_bit_not_set: 900 .asciz "intr_thread(): cpu_intr_actv bit not set for PIL" 901intr_thread_exit_actv_bit_set: 902 .asciz "intr_thread_exit(): cpu_intr_actv bit erroneously set for PIL" 903intr_thread_t_intr_start_zero: 904 .asciz "intr_thread(): t_intr_start zero upon handler return" 905#endif /* DEBUG */ 906#endif /* lint */ 907 908#if defined(lint) 909 910/* 911 * Handle an interrupt in the current thread 912 * Entry: 913 * %o0 = pointer to regs structure 914 * %o1 = pointer to current intr_vec_t (iv) to be processed 915 * %o2 = pil 916 * %sp = on current thread's kernel stack 917 * %o7 = return linkage to trap code 918 * %g7 = current thread 919 * %pstate = normal globals, interrupts enabled, 920 * privileged, fp disabled 921 * %pil = PIL_MAX 922 * 923 * Register Usage 924 * %l0 = return linkage 925 * %l1 = old stack 926 * %l2 - %l3 = scratch 927 * %l4 - %l7 = reserved for sys_trap 928 * %o3 = cpu 929 * %o0 = scratch 930 * %o4 - %o5 = scratch 931 */ 932/* ARGSUSED */ 933void 934current_thread(struct regs *regs, uint64_t iv_p, uint_t pil) 935{} 936 937#else /* lint */ 938 939 ENTRY_NP(current_thread) 940 941 mov %o7, %l0 942 ldn [THREAD_REG + T_CPU], %o3 943 ! 944 ! Set bit for this level in CPU's active interrupt bitmask. 945 ! 946 ld [%o3 + CPU_INTR_ACTV], %o5 ! o5 has cpu_intr_actv b4 chng 947 mov 1, %o4 948 sll %o4, %o2, %o4 ! construct mask for level 949#ifdef DEBUG 950 ! 951 ! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL))) 952 ! 953 andcc %o5, %o4, %g0 954 bz,pt %xcc, 0f 955 nop 956 ! Do not call panic if a panic is already in progress. 957 sethi %hi(panic_quiesce), %l2 958 ld [%l2 + %lo(panic_quiesce)], %l2 959 brnz,pn %l2, 0f 960 nop 961 sethi %hi(current_thread_actv_bit_set), %o0 962 call panic 963 or %o0, %lo(current_thread_actv_bit_set), %o0 9640: 965#endif /* DEBUG */ 966 or %o5, %o4, %o4 967 ! 968 ! See if we are interrupting another high-level interrupt. 969 ! 970 srl %o5, LOCK_LEVEL + 1, %o5 ! only look at high-level bits 971 brz,pt %o5, 1f 972 st %o4, [%o3 + CPU_INTR_ACTV] ! delay - store active mask 973 ! 974 ! We have interrupted another high-level interrupt. Find its PIL, 975 ! compute the interval it ran for, and update its cumulative counter. 976 ! 977 ! Register usage: 978 979 ! o2 = PIL of this interrupt 980 ! o5 = high PIL bits of INTR_ACTV (not including this PIL) 981 ! l1 = bitmask used to find other active high-level PIL 982 ! o4 = index of bit set in l1 983 ! Use cpu_intr_actv to find the cpu_pil_high_start[] offset for the 984 ! interrupted high-level interrupt. 985 ! Create mask for cpu_intr_actv. Begin by looking for bits set 986 ! at one level below the current PIL. Since %o5 contains the active 987 ! mask already shifted right by (LOCK_LEVEL + 1), we start by looking 988 ! at bit (current_pil - (LOCK_LEVEL + 2)). 989 sub %o2, LOCK_LEVEL + 2, %o4 990 mov 1, %l1 991 sll %l1, %o4, %l1 9922: 993#ifdef DEBUG 994 ! ASSERT(%l1 != 0) (we didn't shift the bit off the right edge) 995 brnz,pt %l1, 9f 996 nop 997 998 ! Don't panic if a panic is already in progress. 999 sethi %hi(panic_quiesce), %l3 1000 ld [%l3 + %lo(panic_quiesce)], %l3 1001 brnz,pn %l3, 9f 1002 nop 1003 sethi %hi(current_thread_nested_PIL_not_found), %o0 1004 call panic 1005 or %o0, %lo(current_thread_nested_PIL_not_found), %o0 10069: 1007#endif /* DEBUG */ 1008 andcc %l1, %o5, %g0 ! test mask against high-level bits of 1009 bnz %xcc, 3f ! cpu_intr_actv 1010 nop 1011 srl %l1, 1, %l1 ! No match. Try next lower PIL. 1012 ba,pt %xcc, 2b 1013 sub %o4, 1, %o4 ! delay - decrement PIL 10143: 1015 sll %o4, 3, %o4 ! index to byte offset 1016 add %o4, CPU_MCPU, %l1 ! CPU_PIL_HIGH_START is too large 1017 add %l1, MCPU_PIL_HIGH_START, %l1 1018 ldx [%o3 + %l1], %l3 ! load starting timestamp 1019#ifdef DEBUG 1020 brnz,pt %l3, 9f 1021 nop 1022 ! Don't panic if a panic is already in progress. 1023 sethi %hi(panic_quiesce), %l1 1024 ld [%l1 + %lo(panic_quiesce)], %l1 1025 brnz,pn %l1, 9f 1026 nop 1027 srl %o4, 3, %o1 ! Find interrupted PIL for panic 1028 add %o1, LOCK_LEVEL + 1, %o1 1029 sethi %hi(current_thread_nested_pil_zero), %o0 1030 call panic 1031 or %o0, %lo(current_thread_nested_pil_zero), %o0 10329: 1033#endif /* DEBUG */ 1034 rdpr %tick, %l1 1035 sllx %l1, 1, %l1 1036 srlx %l1, 1, %l1 ! shake off NPT bit 1037 sub %l1, %l3, %l3 ! interval in %l3 1038 ! 1039 ! Check for Energy Star mode 1040 ! 1041 lduh [%o3 + CPU_DIVISOR], %l1 ! %l1 = clock divisor 1042 cmp %l1, 1 1043 bg,a,pn %xcc, 2f 1044 mulx %l3, %l1, %l3 ! multiply interval by clock divisor iff > 1 10452: 1046 ! 1047 ! We need to find the CPU offset of the cumulative counter. We start 1048 ! with %o4, which has (PIL - (LOCK_LEVEL + 1)) * 8. We need PIL * 16, 1049 ! so we shift left 1, then add (LOCK_LEVEL + 1) * 16, which is 1050 ! CPU_INTRSTAT_LOW_PIL_OFFSET. 1051 ! 1052 sll %o4, 1, %o4 1053 add %o4, CPU_MCPU, %o4 ! CPU_INTRSTAT const too large 1054 add %o4, MCPU_INTRSTAT, %o4 ! add parts separately 1055 add %o4, CPU_INTRSTAT_LOW_PIL_OFFSET, %o4 1056 ldx [%o3 + %o4], %l1 ! old counter in l1 1057 add %l1, %l3, %l1 ! new counter in l1 1058 stx %l1, [%o3 + %o4] ! store new counter 1059 1060 ! Also update intracct[] 1061 lduh [%o3 + CPU_MSTATE], %o4 1062 sllx %o4, 3, %o4 1063 add %o4, CPU_INTRACCT, %o4 1064 ldx [%o3 + %o4], %l1 1065 add %l1, %l3, %l1 1066 ! Another high-level interrupt is active below this one, so 1067 ! there is no need to check for an interrupt thread. That will be 1068 ! done by the lowest priority high-level interrupt active. 1069 ba,pt %xcc, 5f 1070 stx %l1, [%o3 + %o4] ! delay - store new counter 10711: 1072 ! If we haven't interrupted another high-level interrupt, we may be 1073 ! interrupting a low level interrupt thread. If so, compute its interval 1074 ! and update its cumulative counter. 1075 lduh [THREAD_REG + T_FLAGS], %o4 1076 andcc %o4, T_INTR_THREAD, %g0 1077 bz,pt %xcc, 4f 1078 nop 1079 1080 ! We have interrupted an interrupt thread. Take timestamp, compute 1081 ! interval, update cumulative counter. 1082 1083 ! Check t_intr_start. If it is zero, either intr_thread() or 1084 ! current_thread() (at a lower PIL, of course) already did 1085 ! the accounting for the underlying interrupt thread. 1086 ldx [THREAD_REG + T_INTR_START], %o5 1087 brz,pn %o5, 4f 1088 nop 1089 1090 stx %g0, [THREAD_REG + T_INTR_START] 1091 rdpr %tick, %o4 1092 sllx %o4, 1, %o4 1093 srlx %o4, 1, %o4 ! shake off NPT bit 1094 sub %o4, %o5, %o5 ! o5 has the interval 1095 1096 ! Check for Energy Star mode 1097 lduh [%o3 + CPU_DIVISOR], %o4 ! %o4 = clock divisor 1098 cmp %o4, 1 1099 bg,a,pn %xcc, 2f 1100 mulx %o5, %o4, %o5 ! multiply interval by clock divisor iff > 1 11012: 1102 ldub [THREAD_REG + T_PIL], %o4 1103 sllx %o4, 4, %o4 ! PIL index to byte offset 1104 add %o4, CPU_MCPU, %o4 ! CPU_INTRSTAT const too large 1105 add %o4, MCPU_INTRSTAT, %o4 ! add parts separately 1106 ldx [%o3 + %o4], %l2 ! old counter in l2 1107 add %l2, %o5, %l2 ! new counter in l2 1108 stx %l2, [%o3 + %o4] ! store new counter 1109 1110 ! Also update intracct[] 1111 lduh [%o3 + CPU_MSTATE], %o4 1112 sllx %o4, 3, %o4 1113 add %o4, CPU_INTRACCT, %o4 1114 ldx [%o3 + %o4], %l2 1115 add %l2, %o5, %l2 1116 stx %l2, [%o3 + %o4] 11174: 1118 ! 1119 ! Handle high-level interrupts on separate interrupt stack. 1120 ! No other high-level interrupts are active, so switch to int stack. 1121 ! 1122 mov %sp, %l1 1123 ldn [%o3 + CPU_INTR_STACK], %l3 1124 sub %l3, STACK_BIAS, %sp 1125 11265: 1127#ifdef DEBUG 1128 ! 1129 ! ASSERT(%o2 > LOCK_LEVEL) 1130 ! 1131 cmp %o2, LOCK_LEVEL 1132 bg,pt %xcc, 3f 1133 nop 1134 mov CE_PANIC, %o0 1135 sethi %hi(current_thread_wrong_pil), %o1 1136 call cmn_err ! %o2 has the %pil already 1137 or %o1, %lo(current_thread_wrong_pil), %o1 1138#endif 11393: 1140 ! Store starting timestamp for this PIL in CPU structure at 1141 ! cpu.cpu_m.pil_high_start[PIL - (LOCK_LEVEL + 1)] 1142 sub %o2, LOCK_LEVEL + 1, %o4 ! convert PIL to array index 1143 sllx %o4, 3, %o4 ! index to byte offset 1144 add %o4, CPU_MCPU, %o4 ! CPU_PIL_HIGH_START is too large 1145 add %o4, MCPU_PIL_HIGH_START, %o4 1146 rdpr %tick, %o5 1147 sllx %o5, 1, %o5 1148 srlx %o5, 1, %o5 1149 stx %o5, [%o3 + %o4] 1150 1151 wrpr %g0, %o2, %pil ! enable interrupts 1152 1153 ! 1154 ! call the handler 1155 ! 1156 SERVE_INTR_PRE(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0) 11571: 1158 SERVE_INTR(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0) 1159 1160 brz,a,pt %o2, 0f ! if %o2, more intrs await 1161 rdpr %pil, %o2 ! delay annulled 1162 SERVE_INTR_NEXT(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0) 1163 ba 1b 1164 nop 11650: 1166 wrpr %g0, PIL_MAX, %pil ! disable interrupts (1-15) 1167 1168 cmp %o2, PIL_15 1169 bne,pt %xcc, 3f 1170 nop 1171 1172 sethi %hi(cpc_level15_inum), %o1 1173 ldx [%o1 + %lo(cpc_level15_inum)], %o1 ! arg for intr_enqueue_req 1174 brz %o1, 3f 1175 nop 1176 1177 rdpr %pstate, %g5 1178 andn %g5, PSTATE_IE, %g1 1179 wrpr %g0, %g1, %pstate ! Disable vec interrupts 1180 1181 call intr_enqueue_req ! preserves %g5 1182 mov PIL_15, %o0 1183 1184 ! clear perfcntr overflow 1185 mov 1, %o0 1186 sllx %o0, PIL_15, %o0 1187 wr %o0, CLEAR_SOFTINT 1188 1189 wrpr %g0, %g5, %pstate ! Enable vec interrupts 1190 11913: 1192 cmp %o2, PIL_14 1193 be tick_rtt ! cpu-specific tick processing 1194 nop 1195 .global current_thread_complete 1196current_thread_complete: 1197 ! 1198 ! Register usage: 1199 ! 1200 ! %l1 = stack pointer 1201 ! %l2 = CPU_INTR_ACTV >> (LOCK_LEVEL + 1) 1202 ! %o2 = PIL 1203 ! %o3 = CPU pointer 1204 ! %o4, %o5, %l3, %l4, %l5 = scratch 1205 ! 1206 ldn [THREAD_REG + T_CPU], %o3 1207 ! 1208 ! Clear bit for this level in CPU's interrupt active bitmask. 1209 ! 1210 ld [%o3 + CPU_INTR_ACTV], %l2 1211 mov 1, %o5 1212 sll %o5, %o2, %o5 1213#ifdef DEBUG 1214 ! 1215 ! ASSERT(CPU->cpu_intr_actv & (1 << PIL)) 1216 ! 1217 andcc %l2, %o5, %g0 1218 bnz,pt %xcc, 0f 1219 nop 1220 ! Do not call panic if a panic is already in progress. 1221 sethi %hi(panic_quiesce), %l2 1222 ld [%l2 + %lo(panic_quiesce)], %l2 1223 brnz,pn %l2, 0f 1224 nop 1225 sethi %hi(current_thread_actv_bit_not_set), %o0 1226 call panic 1227 or %o0, %lo(current_thread_actv_bit_not_set), %o0 12280: 1229#endif /* DEBUG */ 1230 andn %l2, %o5, %l2 1231 st %l2, [%o3 + CPU_INTR_ACTV] 1232 1233 ! Take timestamp, compute interval, update cumulative counter. 1234 sub %o2, LOCK_LEVEL + 1, %o4 ! PIL to array index 1235 sllx %o4, 3, %o4 ! index to byte offset 1236 add %o4, CPU_MCPU, %o4 ! CPU_PIL_HIGH_START is too large 1237 add %o4, MCPU_PIL_HIGH_START, %o4 1238 rdpr %tick, %o5 1239 sllx %o5, 1, %o5 1240 srlx %o5, 1, %o5 1241 ldx [%o3 + %o4], %o0 1242#ifdef DEBUG 1243 ! ASSERT(cpu.cpu_m.pil_high_start[pil - (LOCK_LEVEL + 1)] != 0) 1244 brnz,pt %o0, 9f 1245 nop 1246 ! Don't panic if a panic is already in progress. 1247 sethi %hi(panic_quiesce), %l2 1248 ld [%l2 + %lo(panic_quiesce)], %l2 1249 brnz,pn %l2, 9f 1250 nop 1251 sethi %hi(current_thread_timestamp_zero), %o0 1252 call panic 1253 or %o0, %lo(current_thread_timestamp_zero), %o0 12549: 1255#endif /* DEBUG */ 1256 stx %g0, [%o3 + %o4] 1257 sub %o5, %o0, %o5 ! interval in o5 1258 1259 ! Check for Energy Star mode 1260 lduh [%o3 + CPU_DIVISOR], %o4 ! %o4 = clock divisor 1261 cmp %o4, 1 1262 bg,a,pn %xcc, 2f 1263 mulx %o5, %o4, %o5 ! multiply interval by clock divisor iff > 1 12642: 1265 sllx %o2, 4, %o4 ! PIL index to byte offset 1266 add %o4, CPU_MCPU, %o4 ! CPU_INTRSTAT too large 1267 add %o4, MCPU_INTRSTAT, %o4 ! add parts separately 1268 ldx [%o3 + %o4], %o0 ! old counter in o0 1269 add %o0, %o5, %o0 ! new counter in o0 1270 stx %o0, [%o3 + %o4] ! store new counter 1271 1272 ! Also update intracct[] 1273 lduh [%o3 + CPU_MSTATE], %o4 1274 sllx %o4, 3, %o4 1275 add %o4, CPU_INTRACCT, %o4 1276 ldx [%o3 + %o4], %o0 1277 add %o0, %o5, %o0 1278 stx %o0, [%o3 + %o4] 1279 1280 ! 1281 ! get back on current thread's stack 1282 ! 1283 srl %l2, LOCK_LEVEL + 1, %l2 1284 tst %l2 ! any more high-level ints? 1285 movz %xcc, %l1, %sp 1286 ! 1287 ! Current register usage: 1288 ! o2 = PIL 1289 ! o3 = CPU pointer 1290 ! l0 = return address 1291 ! l2 = intr_actv shifted right 1292 ! 1293 bz,pt %xcc, 3f ! if l2 was zero, no more ints 1294 nop 1295 ! 1296 ! We found another high-level interrupt active below the one that just 1297 ! returned. Store a starting timestamp for it in the CPU structure. 1298 ! 1299 ! Use cpu_intr_actv to find the cpu_pil_high_start[] offset for the 1300 ! interrupted high-level interrupt. 1301 ! Create mask for cpu_intr_actv. Begin by looking for bits set 1302 ! at one level below the current PIL. Since %l2 contains the active 1303 ! mask already shifted right by (LOCK_LEVEL + 1), we start by looking 1304 ! at bit (current_pil - (LOCK_LEVEL + 2)). 1305 ! %l1 = mask, %o5 = index of bit set in mask 1306 ! 1307 mov 1, %l1 1308 sub %o2, LOCK_LEVEL + 2, %o5 1309 sll %l1, %o5, %l1 ! l1 = mask for level 13101: 1311#ifdef DEBUG 1312 ! ASSERT(%l1 != 0) (we didn't shift the bit off the right edge) 1313 brnz,pt %l1, 9f 1314 nop 1315 sethi %hi(current_thread_nested_PIL_not_found), %o0 1316 call panic 1317 or %o0, %lo(current_thread_nested_PIL_not_found), %o0 13189: 1319#endif /* DEBUG */ 1320 andcc %l1, %l2, %g0 ! test mask against high-level bits of 1321 bnz %xcc, 2f ! cpu_intr_actv 1322 nop 1323 srl %l1, 1, %l1 ! No match. Try next lower PIL. 1324 ba,pt %xcc, 1b 1325 sub %o5, 1, %o5 ! delay - decrement PIL 13262: 1327 sll %o5, 3, %o5 ! convert array index to byte offset 1328 add %o5, CPU_MCPU, %o5 ! CPU_PIL_HIGH_START is too large 1329 add %o5, MCPU_PIL_HIGH_START, %o5 1330 rdpr %tick, %o4 1331 sllx %o4, 1, %o4 1332 srlx %o4, 1, %o4 1333 ! Another high-level interrupt is active below this one, so 1334 ! there is no need to check for an interrupt thread. That will be 1335 ! done by the lowest priority high-level interrupt active. 1336 ba,pt %xcc, 1f 1337 stx %o4, [%o3 + %o5] ! delay - store timestamp 13383: 1339 ! If we haven't interrupted another high-level interrupt, we may have 1340 ! interrupted a low level interrupt thread. If so, store a starting 1341 ! timestamp in its thread structure. 1342 lduh [THREAD_REG + T_FLAGS], %o4 1343 andcc %o4, T_INTR_THREAD, %g0 1344 bz,pt %xcc, 1f 1345 nop 1346 1347 rdpr %tick, %o4 1348 sllx %o4, 1, %o4 1349 srlx %o4, 1, %o4 ! Shake off NPT bit 1350 stx %o4, [THREAD_REG + T_INTR_START] 13511: 1352 ! Enable interrupts and return 1353 jmp %l0 + 8 1354 wrpr %g0, %o2, %pil ! enable interrupts 1355 SET_SIZE(current_thread) 1356 1357 1358#ifdef DEBUG 1359current_thread_wrong_pil: 1360 .asciz "current_thread: unexpected pil level: %d" 1361current_thread_actv_bit_set: 1362 .asciz "current_thread(): cpu_intr_actv bit already set for PIL" 1363current_thread_actv_bit_not_set: 1364 .asciz "current_thread(): cpu_intr_actv bit not set for PIL" 1365current_thread_nested_pil_zero: 1366 .asciz "current_thread(): timestamp zero for nested PIL %d" 1367current_thread_timestamp_zero: 1368 .asciz "current_thread(): timestamp zero upon handler return" 1369current_thread_nested_PIL_not_found: 1370 .asciz "current_thread: couldn't find nested high-level PIL" 1371#endif /* DEBUG */ 1372#endif /* lint */ 1373 1374/* 1375 * Return a thread's interrupt level. 1376 * Since this isn't saved anywhere but in %l4 on interrupt entry, we 1377 * must dig it out of the save area. 1378 * 1379 * Caller 'swears' that this really is an interrupt thread. 1380 * 1381 * int 1382 * intr_level(t) 1383 * kthread_id_t t; 1384 */ 1385 1386#if defined(lint) 1387 1388/* ARGSUSED */ 1389int 1390intr_level(kthread_id_t t) 1391{ return (0); } 1392 1393#else /* lint */ 1394 1395 ENTRY_NP(intr_level) 1396 retl 1397 ldub [%o0 + T_PIL], %o0 ! return saved pil 1398 SET_SIZE(intr_level) 1399 1400#endif /* lint */ 1401 1402#if defined(lint) 1403 1404/* ARGSUSED */ 1405int 1406disable_pil_intr() 1407{ return (0); } 1408 1409#else /* lint */ 1410 1411 ENTRY_NP(disable_pil_intr) 1412 rdpr %pil, %o0 1413 retl 1414 wrpr %g0, PIL_MAX, %pil ! disable interrupts (1-15) 1415 SET_SIZE(disable_pil_intr) 1416 1417#endif /* lint */ 1418 1419#if defined(lint) 1420 1421/* ARGSUSED */ 1422void 1423enable_pil_intr(int pil_save) 1424{} 1425 1426#else /* lint */ 1427 1428 ENTRY_NP(enable_pil_intr) 1429 retl 1430 wrpr %o0, %pil 1431 SET_SIZE(enable_pil_intr) 1432 1433#endif /* lint */ 1434 1435#if defined(lint) 1436 1437/* ARGSUSED */ 1438uint_t 1439disable_vec_intr(void) 1440{ return (0); } 1441 1442#else /* lint */ 1443 1444 ENTRY_NP(disable_vec_intr) 1445 rdpr %pstate, %o0 1446 andn %o0, PSTATE_IE, %g1 1447 retl 1448 wrpr %g0, %g1, %pstate ! disable interrupt 1449 SET_SIZE(disable_vec_intr) 1450 1451#endif /* lint */ 1452 1453#if defined(lint) 1454 1455/* ARGSUSED */ 1456void 1457enable_vec_intr(uint_t pstate_save) 1458{} 1459 1460#else /* lint */ 1461 1462 ENTRY_NP(enable_vec_intr) 1463 retl 1464 wrpr %g0, %o0, %pstate 1465 SET_SIZE(enable_vec_intr) 1466 1467#endif /* lint */ 1468 1469#if defined(lint) 1470 1471void 1472cbe_level14(void) 1473{} 1474 1475#else /* lint */ 1476 1477 ENTRY_NP(cbe_level14) 1478 save %sp, -SA(MINFRAME), %sp ! get a new window 1479 ! 1480 ! Make sure that this is from TICK_COMPARE; if not just return 1481 ! 1482 rd SOFTINT, %l1 1483 set (TICK_INT_MASK | STICK_INT_MASK), %o2 1484 andcc %l1, %o2, %g0 1485 bz,pn %icc, 2f 1486 nop 1487 1488 CPU_ADDR(%o1, %o2) 1489 call cyclic_fire 1490 mov %o1, %o0 14912: 1492 ret 1493 restore %g0, 1, %o0 1494 SET_SIZE(cbe_level14) 1495 1496#endif /* lint */ 1497 1498 1499#if defined(lint) 1500 1501/* ARGSUSED */ 1502void 1503setsoftint(uint64_t iv_p) 1504{} 1505 1506#else /* lint */ 1507 1508 ENTRY_NP(setsoftint) 1509 save %sp, -SA(MINFRAME), %sp ! get a new window 1510 rdpr %pstate, %l5 1511 andn %l5, PSTATE_IE, %l1 1512 wrpr %l1, %pstate ! disable interrupt 1513 ! 1514 ! We have a pointer to an interrupt vector data structure. 1515 ! Put the request on the cpu's softint priority list and 1516 ! set %set_softint. 1517 ! 1518 ! Register usage 1519 ! %i0 - pointer to intr_vec_t (iv) 1520 ! %l2 - requested pil 1521 ! %l4 - cpu 1522 ! %l5 - pstate 1523 ! %l1, %l3, %l6 - temps 1524 ! 1525 ! check if a softint is pending for this softint, 1526 ! if one is pending, don't bother queuing another. 1527 ! 1528 lduh [%i0 + IV_FLAGS], %l1 ! %l1 = iv->iv_flags 1529 and %l1, IV_SOFTINT_PEND, %l6 ! %l6 = iv->iv_flags & IV_SOFTINT_PEND 1530 brnz,pn %l6, 4f ! branch if softint is already pending 1531 or %l1, IV_SOFTINT_PEND, %l2 1532 sth %l2, [%i0 + IV_FLAGS] ! Set IV_SOFTINT_PEND flag 1533 1534 CPU_ADDR(%l4, %l2) ! %l4 = cpu 1535 lduh [%i0 + IV_PIL], %l2 ! %l2 = iv->iv_pil 1536 1537 ! 1538 ! Insert intr_vec_t (iv) to appropriate cpu's softint priority list 1539 ! 1540 sll %l2, CPTRSHIFT, %l0 ! %l0 = offset to pil entry 1541 add %l4, INTR_TAIL, %l6 ! %l6 = &cpu->m_cpu.intr_tail 1542 ldn [%l6 + %l0], %l1 ! %l1 = cpu->m_cpu.intr_tail[pil] 1543 ! current tail (ct) 1544 brz,pt %l1, 2f ! branch if current tail is NULL 1545 stn %i0, [%l6 + %l0] ! make intr_vec_t (iv) as new tail 1546 ! 1547 ! there's pending intr_vec_t already 1548 ! 1549 lduh [%l1 + IV_FLAGS], %l6 ! %l6 = ct->iv_flags 1550 and %l6, IV_SOFTINT_MT, %l6 ! %l6 = ct->iv_flags & IV_SOFTINT_MT 1551 brz,pt %l6, 1f ! check for Multi target softint flag 1552 add %l1, IV_PIL_NEXT, %l3 ! %l3 = &ct->iv_pil_next 1553 ld [%l4 + CPU_ID], %l6 ! for multi target softint, use cpuid 1554 sll %l6, CPTRSHIFT, %l6 ! calculate offset address from cpuid 1555 add %l3, %l6, %l3 ! %l3 = &ct->iv_xpil_next[cpuid] 15561: 1557 ! 1558 ! update old tail 1559 ! 1560 ba,pt %xcc, 3f 1561 stn %i0, [%l3] ! [%l3] = iv, set pil_next field 15622: 1563 ! 1564 ! no pending intr_vec_t; make intr_vec_t as new head 1565 ! 1566 add %l4, INTR_HEAD, %l6 ! %l6 = &cpu->m_cpu.intr_head[pil] 1567 stn %i0, [%l6 + %l0] ! cpu->m_cpu.intr_head[pil] = iv 15683: 1569 ! 1570 ! Write %set_softint with (1<<pil) to cause a "pil" level trap 1571 ! 1572 mov 1, %l1 ! %l1 = 1 1573 sll %l1, %l2, %l1 ! %l1 = 1 << pil 1574 wr %l1, SET_SOFTINT ! trigger required pil softint 15754: 1576 wrpr %g0, %l5, %pstate ! %pstate = saved %pstate (in %l5) 1577 ret 1578 restore 1579 SET_SIZE(setsoftint) 1580 1581#endif /* lint */ 1582 1583#if defined(lint) 1584 1585/*ARGSUSED*/ 1586void 1587setsoftint_tl1(uint64_t iv_p, uint64_t dummy) 1588{} 1589 1590#else /* lint */ 1591 1592 ! 1593 ! Register usage 1594 ! Arguments: 1595 ! %g1 - Pointer to intr_vec_t (iv) 1596 ! 1597 ! Internal: 1598 ! %g2 - pil 1599 ! %g4 - cpu 1600 ! %g3,%g5-g7 - temps 1601 ! 1602 ENTRY_NP(setsoftint_tl1) 1603 ! 1604 ! We have a pointer to an interrupt vector data structure. 1605 ! Put the request on the cpu's softint priority list and 1606 ! set %set_softint. 1607 ! 1608 CPU_ADDR(%g4, %g2) ! %g4 = cpu 1609 lduh [%g1 + IV_PIL], %g2 ! %g2 = iv->iv_pil 1610 1611 ! 1612 ! Insert intr_vec_t (iv) to appropriate cpu's softint priority list 1613 ! 1614 sll %g2, CPTRSHIFT, %g7 ! %g7 = offset to pil entry 1615 add %g4, INTR_TAIL, %g6 ! %g6 = &cpu->m_cpu.intr_tail 1616 ldn [%g6 + %g7], %g5 ! %g5 = cpu->m_cpu.intr_tail[pil] 1617 ! current tail (ct) 1618 brz,pt %g5, 1f ! branch if current tail is NULL 1619 stn %g1, [%g6 + %g7] ! make intr_rec_t (iv) as new tail 1620 ! 1621 ! there's pending intr_vec_t already 1622 ! 1623 lduh [%g5 + IV_FLAGS], %g6 ! %g6 = ct->iv_flags 1624 and %g6, IV_SOFTINT_MT, %g6 ! %g6 = ct->iv_flags & IV_SOFTINT_MT 1625 brz,pt %g6, 0f ! check for Multi target softint flag 1626 add %g5, IV_PIL_NEXT, %g3 ! %g3 = &ct->iv_pil_next 1627 ld [%g4 + CPU_ID], %g6 ! for multi target softint, use cpuid 1628 sll %g6, CPTRSHIFT, %g6 ! calculate offset address from cpuid 1629 add %g3, %g6, %g3 ! %g3 = &ct->iv_xpil_next[cpuid] 16300: 1631 ! 1632 ! update old tail 1633 ! 1634 ba,pt %xcc, 2f 1635 stn %g1, [%g3] ! [%g3] = iv, set pil_next field 16361: 1637 ! 1638 ! no pending intr_vec_t; make intr_vec_t as new head 1639 ! 1640 add %g4, INTR_HEAD, %g6 ! %g6 = &cpu->m_cpu.intr_head[pil] 1641 stn %g1, [%g6 + %g7] ! cpu->m_cpu.intr_head[pil] = iv 16422: 1643#ifdef TRAPTRACE 1644 TRACE_PTR(%g5, %g6) 1645 GET_TRACE_TICK(%g6) 1646 stxa %g6, [%g5 + TRAP_ENT_TICK]%asi ! trap_tick = %tick 1647 TRACE_SAVE_TL_GL_REGS(%g5, %g6) 1648 rdpr %tt, %g6 1649 stha %g6, [%g5 + TRAP_ENT_TT]%asi ! trap_type = %tt 1650 rdpr %tpc, %g6 1651 stna %g6, [%g5 + TRAP_ENT_TPC]%asi ! trap_pc = %tpc 1652 rdpr %tstate, %g6 1653 stxa %g6, [%g5 + TRAP_ENT_TSTATE]%asi ! trap_tstate = %tstate 1654 stna %sp, [%g5 + TRAP_ENT_SP]%asi ! trap_sp = %sp 1655 stna %g1, [%g5 + TRAP_ENT_TR]%asi ! trap_tr = iv 1656 ldn [%g1 + IV_PIL_NEXT], %g6 ! 1657 stna %g6, [%g5 + TRAP_ENT_F1]%asi ! trap_f1 = iv->iv_pil_next 1658 add %g4, INTR_HEAD, %g6 1659 ldn [%g6 + %g7], %g6 ! %g6=cpu->m_cpu.intr_head[pil] 1660 stna %g6, [%g5 + TRAP_ENT_F2]%asi ! trap_f2 = intr_head[pil] 1661 add %g4, INTR_TAIL, %g6 1662 ldn [%g6 + %g7], %g6 ! %g6=cpu->m_cpu.intr_tail[pil] 1663 stna %g6, [%g5 + TRAP_ENT_F3]%asi ! trap_f3 = intr_tail[pil] 1664 stna %g2, [%g5 + TRAP_ENT_F4]%asi ! trap_f4 = pil 1665 TRACE_NEXT(%g5, %g6, %g3) 1666#endif /* TRAPTRACE */ 1667 ! 1668 ! Write %set_softint with (1<<pil) to cause a "pil" level trap 1669 ! 1670 mov 1, %g5 ! %g5 = 1 1671 sll %g5, %g2, %g5 ! %g5 = 1 << pil 1672 wr %g5, SET_SOFTINT ! trigger required pil softint 1673 retry 1674 SET_SIZE(setsoftint_tl1) 1675 1676#endif /* lint */ 1677 1678#if defined(lint) 1679 1680/*ARGSUSED*/ 1681void 1682setvecint_tl1(uint64_t inum, uint64_t dummy) 1683{} 1684 1685#else /* lint */ 1686 1687 ! 1688 ! Register usage 1689 ! Arguments: 1690 ! %g1 - inumber 1691 ! 1692 ! Internal: 1693 ! %g1 - softint pil mask 1694 ! %g2 - pil of intr_vec_t 1695 ! %g3 - pointer to current intr_vec_t (iv) 1696 ! %g4 - cpu 1697 ! %g5, %g6,%g7 - temps 1698 ! 1699 ENTRY_NP(setvecint_tl1) 1700 ! 1701 ! Verify the inumber received (should be inum < MAXIVNUM). 1702 ! 1703 set MAXIVNUM, %g2 1704 cmp %g1, %g2 1705 bgeu,pn %xcc, .no_ivintr 1706 clr %g2 ! expected in .no_ivintr 1707 1708 ! 1709 ! Fetch data from intr_vec_table according to the inum. 1710 ! 1711 ! We have an interrupt number. Fetch the interrupt vector requests 1712 ! from the interrupt vector table for a given interrupt number and 1713 ! insert them into cpu's softint priority lists and set %set_softint. 1714 ! 1715 set intr_vec_table, %g5 ! %g5 = intr_vec_table 1716 sll %g1, CPTRSHIFT, %g6 ! %g6 = offset to inum entry in table 1717 add %g5, %g6, %g5 ! %g5 = &intr_vec_table[inum] 1718 ldn [%g5], %g3 ! %g3 = pointer to first entry of 1719 ! intr_vec_t list 1720 1721 ! Verify the first intr_vec_t pointer for a given inum and it should 1722 ! not be NULL. This used to be guarded by DEBUG but broken drivers can 1723 ! cause spurious tick interrupts when the softint register is programmed 1724 ! with 1 << 0 at the end of this routine. Now we always check for a 1725 ! valid intr_vec_t pointer. 1726 brz,pn %g3, .no_ivintr 1727 nop 1728 1729 ! 1730 ! Traverse the intr_vec_t link list, put each item on to corresponding 1731 ! CPU softint priority queue, and compose the final softint pil mask. 1732 ! 1733 ! At this point: 1734 ! %g3 = intr_vec_table[inum] 1735 ! 1736 CPU_ADDR(%g4, %g2) ! %g4 = cpu 1737 mov %g0, %g1 ! %g1 = 0, initialize pil mask to 0 17380: 1739 ! 1740 ! Insert next intr_vec_t (iv) to appropriate cpu's softint priority list 1741 ! 1742 ! At this point: 1743 ! %g1 = softint pil mask 1744 ! %g3 = pointer to next intr_vec_t (iv) 1745 ! %g4 = cpu 1746 ! 1747 lduh [%g3 + IV_PIL], %g2 ! %g2 = iv->iv_pil 1748 sll %g2, CPTRSHIFT, %g7 ! %g7 = offset to pil entry 1749 add %g4, INTR_TAIL, %g6 ! %g6 = &cpu->m_cpu.intr_tail 1750 ldn [%g6 + %g7], %g5 ! %g5 = cpu->m_cpu.intr_tail[pil] 1751 ! current tail (ct) 1752 brz,pt %g5, 2f ! branch if current tail is NULL 1753 stn %g3, [%g6 + %g7] ! make intr_vec_t (iv) as new tail 1754 ! cpu->m_cpu.intr_tail[pil] = iv 1755 ! 1756 ! there's pending intr_vec_t already 1757 ! 1758 lduh [%g5 + IV_FLAGS], %g6 ! %g6 = ct->iv_flags 1759 and %g6, IV_SOFTINT_MT, %g6 ! %g6 = ct->iv_flags & IV_SOFTINT_MT 1760 brz,pt %g6, 1f ! check for Multi target softint flag 1761 add %g5, IV_PIL_NEXT, %g5 ! %g5 = &ct->iv_pil_next 1762 ld [%g4 + CPU_ID], %g6 ! for multi target softint, use cpuid 1763 sll %g6, CPTRSHIFT, %g6 ! calculate offset address from cpuid 1764 add %g5, %g6, %g5 ! %g5 = &ct->iv_xpil_next[cpuid] 17651: 1766 ! 1767 ! update old tail 1768 ! 1769 ba,pt %xcc, 3f 1770 stn %g3, [%g5] ! [%g5] = iv, set pil_next field 17712: 1772 ! 1773 ! no pending intr_vec_t; make intr_vec_t as new head 1774 ! 1775 add %g4, INTR_HEAD, %g6 ! %g6 = &cpu->m_cpu.intr_head[pil] 1776 stn %g3, [%g6 + %g7] ! cpu->m_cpu.intr_head[pil] = iv 17773: 1778#ifdef TRAPTRACE 1779 TRACE_PTR(%g5, %g6) 1780 GET_TRACE_TICK(%g6) 1781 stxa %g6, [%g5 + TRAP_ENT_TICK]%asi ! trap_tick = %tick 1782 TRACE_SAVE_TL_GL_REGS(%g5, %g6) 1783 rdpr %tt, %g6 1784 stha %g6, [%g5 + TRAP_ENT_TT]%asi ! trap_type = %tt` 1785 rdpr %tpc, %g6 1786 stna %g6, [%g5 + TRAP_ENT_TPC]%asi ! trap_pc = %tpc 1787 rdpr %tstate, %g6 1788 stxa %g6, [%g5 + TRAP_ENT_TSTATE]%asi ! trap_tstate = %tstate 1789 stna %sp, [%g5 + TRAP_ENT_SP]%asi ! trap_sp = %sp 1790 stna %g3, [%g5 + TRAP_ENT_TR]%asi ! trap_tr = iv 1791 stna %g1, [%g5 + TRAP_ENT_F1]%asi ! trap_f1 = pil mask 1792 add %g4, INTR_HEAD, %g6 1793 ldn [%g6 + %g7], %g6 ! %g6=cpu->m_cpu.intr_head[pil] 1794 stna %g6, [%g5 + TRAP_ENT_F2]%asi ! trap_f2 = intr_head[pil] 1795 add %g4, INTR_TAIL, %g6 1796 ldn [%g6 + %g7], %g6 ! %g6=cpu->m_cpu.intr_tail[pil] 1797 stna %g6, [%g5 + TRAP_ENT_F3]%asi ! trap_f3 = intr_tail[pil] 1798 stna %g2, [%g5 + TRAP_ENT_F4]%asi ! trap_f4 = pil 1799 TRACE_NEXT(%g5, %g6, %g7) 1800#endif /* TRAPTRACE */ 1801 mov 1, %g6 ! %g6 = 1 1802 sll %g6, %g2, %g6 ! %g6 = 1 << pil 1803 or %g1, %g6, %g1 ! %g1 |= (1 << pil), pil mask 1804 ldn [%g3 + IV_VEC_NEXT], %g3 ! %g3 = pointer to next intr_vec_t (iv) 1805 brnz,pn %g3, 0b ! iv->iv_vec_next is non NULL, goto 0b 1806 nop 1807 wr %g1, SET_SOFTINT ! triggered one or more pil softints 1808 retry 1809 1810.no_ivintr: 1811 ! no_ivintr: arguments: rp, inum (%g1), pil (%g2 == 0) 1812 mov %g2, %g3 1813 mov %g1, %g2 1814 set no_ivintr, %g1 1815 ba,pt %xcc, sys_trap 1816 mov PIL_15, %g4 1817 SET_SIZE(setvecint_tl1) 1818 1819#endif /* lint */ 1820 1821#if defined(lint) 1822 1823/*ARGSUSED*/ 1824void 1825wr_clr_softint(uint_t value) 1826{} 1827 1828#else 1829 1830 ENTRY_NP(wr_clr_softint) 1831 retl 1832 wr %o0, CLEAR_SOFTINT 1833 SET_SIZE(wr_clr_softint) 1834 1835#endif /* lint */ 1836 1837#if defined(lint) 1838 1839/*ARGSUSED*/ 1840void 1841intr_enqueue_req(uint_t pil, uint64_t inum) 1842{} 1843 1844#else /* lint */ 1845 1846/* 1847 * intr_enqueue_req 1848 * 1849 * %o0 - pil 1850 * %o1 - pointer to intr_vec_t (iv) 1851 * %o5 - preserved 1852 * %g5 - preserved 1853 */ 1854 ENTRY_NP(intr_enqueue_req) 1855 ! 1856 CPU_ADDR(%g4, %g1) ! %g4 = cpu 1857 1858 ! 1859 ! Insert intr_vec_t (iv) to appropriate cpu's softint priority list 1860 ! 1861 sll %o0, CPTRSHIFT, %o0 ! %o0 = offset to pil entry 1862 add %g4, INTR_TAIL, %g6 ! %g6 = &cpu->m_cpu.intr_tail 1863 ldn [%o0 + %g6], %g1 ! %g1 = cpu->m_cpu.intr_tail[pil] 1864 ! current tail (ct) 1865 brz,pt %g1, 2f ! branch if current tail is NULL 1866 stn %o1, [%g6 + %o0] ! make intr_vec_t (iv) as new tail 1867 1868 ! 1869 ! there's pending intr_vec_t already 1870 ! 1871 lduh [%g1 + IV_FLAGS], %g6 ! %g6 = ct->iv_flags 1872 and %g6, IV_SOFTINT_MT, %g6 ! %g6 = ct->iv_flags & IV_SOFTINT_MT 1873 brz,pt %g6, 1f ! check for Multi target softint flag 1874 add %g1, IV_PIL_NEXT, %g3 ! %g3 = &ct->iv_pil_next 1875 ld [%g4 + CPU_ID], %g6 ! for multi target softint, use cpuid 1876 sll %g6, CPTRSHIFT, %g6 ! calculate offset address from cpuid 1877 add %g3, %g6, %g3 ! %g3 = &ct->iv_xpil_next[cpuid] 18781: 1879 ! 1880 ! update old tail 1881 ! 1882 ba,pt %xcc, 3f 1883 stn %o1, [%g3] ! {%g5] = iv, set pil_next field 18842: 1885 ! 1886 ! no intr_vec_t's queued so make intr_vec_t as new head 1887 ! 1888 add %g4, INTR_HEAD, %g6 ! %g6 = &cpu->m_cpu.intr_head[pil] 1889 stn %o1, [%g6 + %o0] ! cpu->m_cpu.intr_head[pil] = iv 18903: 1891 retl 1892 nop 1893 SET_SIZE(intr_enqueue_req) 1894 1895#endif /* lint */ 1896 1897/* 1898 * Set CPU's base SPL level, based on which interrupt levels are active. 1899 * Called at spl7 or above. 1900 */ 1901 1902#if defined(lint) 1903 1904void 1905set_base_spl(void) 1906{} 1907 1908#else /* lint */ 1909 1910 ENTRY_NP(set_base_spl) 1911 ldn [THREAD_REG + T_CPU], %o2 ! load CPU pointer 1912 ld [%o2 + CPU_INTR_ACTV], %o5 ! load active interrupts mask 1913 1914/* 1915 * WARNING: non-standard callinq sequence; do not call from C 1916 * %o2 = pointer to CPU 1917 * %o5 = updated CPU_INTR_ACTV 1918 */ 1919_intr_set_spl: ! intr_thread_exit enters here 1920 ! 1921 ! Determine highest interrupt level active. Several could be blocked 1922 ! at higher levels than this one, so must convert flags to a PIL 1923 ! Normally nothing will be blocked, so test this first. 1924 ! 1925 brz,pt %o5, 1f ! nothing active 1926 sra %o5, 11, %o3 ! delay - set %o3 to bits 15-11 1927 set _intr_flag_table, %o1 1928 tst %o3 ! see if any of the bits set 1929 ldub [%o1 + %o3], %o3 ! load bit number 1930 bnz,a,pn %xcc, 1f ! yes, add 10 and we're done 1931 add %o3, 11-1, %o3 ! delay - add bit number - 1 1932 1933 sra %o5, 6, %o3 ! test bits 10-6 1934 tst %o3 1935 ldub [%o1 + %o3], %o3 1936 bnz,a,pn %xcc, 1f 1937 add %o3, 6-1, %o3 1938 1939 sra %o5, 1, %o3 ! test bits 5-1 1940 ldub [%o1 + %o3], %o3 1941 1942 ! 1943 ! highest interrupt level number active is in %l6 1944 ! 19451: 1946 retl 1947 st %o3, [%o2 + CPU_BASE_SPL] ! delay - store base priority 1948 SET_SIZE(set_base_spl) 1949 1950/* 1951 * Table that finds the most significant bit set in a five bit field. 1952 * Each entry is the high-order bit number + 1 of it's index in the table. 1953 * This read-only data is in the text segment. 1954 */ 1955_intr_flag_table: 1956 .byte 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4 1957 .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 1958 .align 4 1959 1960#endif /* lint */ 1961 1962/* 1963 * int 1964 * intr_passivate(from, to) 1965 * kthread_id_t from; interrupt thread 1966 * kthread_id_t to; interrupted thread 1967 */ 1968 1969#if defined(lint) 1970 1971/* ARGSUSED */ 1972int 1973intr_passivate(kthread_id_t from, kthread_id_t to) 1974{ return (0); } 1975 1976#else /* lint */ 1977 1978 ENTRY_NP(intr_passivate) 1979 save %sp, -SA(MINFRAME), %sp ! get a new window 1980 1981 flushw ! force register windows to stack 1982 ! 1983 ! restore registers from the base of the stack of the interrupt thread. 1984 ! 1985 ldn [%i0 + T_STACK], %i2 ! get stack save area pointer 1986 ldn [%i2 + (0*GREGSIZE)], %l0 ! load locals 1987 ldn [%i2 + (1*GREGSIZE)], %l1 1988 ldn [%i2 + (2*GREGSIZE)], %l2 1989 ldn [%i2 + (3*GREGSIZE)], %l3 1990 ldn [%i2 + (4*GREGSIZE)], %l4 1991 ldn [%i2 + (5*GREGSIZE)], %l5 1992 ldn [%i2 + (6*GREGSIZE)], %l6 1993 ldn [%i2 + (7*GREGSIZE)], %l7 1994 ldn [%i2 + (8*GREGSIZE)], %o0 ! put ins from stack in outs 1995 ldn [%i2 + (9*GREGSIZE)], %o1 1996 ldn [%i2 + (10*GREGSIZE)], %o2 1997 ldn [%i2 + (11*GREGSIZE)], %o3 1998 ldn [%i2 + (12*GREGSIZE)], %o4 1999 ldn [%i2 + (13*GREGSIZE)], %o5 2000 ldn [%i2 + (14*GREGSIZE)], %i4 2001 ! copy stack/pointer without using %sp 2002 ldn [%i2 + (15*GREGSIZE)], %i5 2003 ! 2004 ! put registers into the save area at the top of the interrupted 2005 ! thread's stack, pointed to by %l7 in the save area just loaded. 2006 ! 2007 ldn [%i1 + T_SP], %i3 ! get stack save area pointer 2008 stn %l0, [%i3 + STACK_BIAS + (0*GREGSIZE)] ! save locals 2009 stn %l1, [%i3 + STACK_BIAS + (1*GREGSIZE)] 2010 stn %l2, [%i3 + STACK_BIAS + (2*GREGSIZE)] 2011 stn %l3, [%i3 + STACK_BIAS + (3*GREGSIZE)] 2012 stn %l4, [%i3 + STACK_BIAS + (4*GREGSIZE)] 2013 stn %l5, [%i3 + STACK_BIAS + (5*GREGSIZE)] 2014 stn %l6, [%i3 + STACK_BIAS + (6*GREGSIZE)] 2015 stn %l7, [%i3 + STACK_BIAS + (7*GREGSIZE)] 2016 stn %o0, [%i3 + STACK_BIAS + (8*GREGSIZE)] ! save ins using outs 2017 stn %o1, [%i3 + STACK_BIAS + (9*GREGSIZE)] 2018 stn %o2, [%i3 + STACK_BIAS + (10*GREGSIZE)] 2019 stn %o3, [%i3 + STACK_BIAS + (11*GREGSIZE)] 2020 stn %o4, [%i3 + STACK_BIAS + (12*GREGSIZE)] 2021 stn %o5, [%i3 + STACK_BIAS + (13*GREGSIZE)] 2022 stn %i4, [%i3 + STACK_BIAS + (14*GREGSIZE)] 2023 ! fp, %i7 copied using %i4 2024 stn %i5, [%i3 + STACK_BIAS + (15*GREGSIZE)] 2025 stn %g0, [%i2 + ((8+6)*GREGSIZE)] 2026 ! clear fp in save area 2027 2028 ! load saved pil for return 2029 ldub [%i0 + T_PIL], %i0 2030 ret 2031 restore 2032 SET_SIZE(intr_passivate) 2033 2034#endif /* lint */ 2035 2036#if defined(lint) 2037 2038/* 2039 * intr_get_time() is a resource for interrupt handlers to determine how 2040 * much time has been spent handling the current interrupt. Such a function 2041 * is needed because higher level interrupts can arrive during the 2042 * processing of an interrupt, thus making direct comparisons of %tick by 2043 * the handler inaccurate. intr_get_time() only returns time spent in the 2044 * current interrupt handler. 2045 * 2046 * The caller must be calling from an interrupt handler running at a pil 2047 * below or at lock level. Timings are not provided for high-level 2048 * interrupts. 2049 * 2050 * The first time intr_get_time() is called while handling an interrupt, 2051 * it returns the time since the interrupt handler was invoked. Subsequent 2052 * calls will return the time since the prior call to intr_get_time(). Time 2053 * is returned as ticks, adjusted for any clock divisor due to power 2054 * management. Use tick2ns() to convert ticks to nsec. Warning: ticks may 2055 * not be the same across CPUs. 2056 * 2057 * Theory Of Intrstat[][]: 2058 * 2059 * uint64_t intrstat[pil][0..1] is an array indexed by pil level, with two 2060 * uint64_ts per pil. 2061 * 2062 * intrstat[pil][0] is a cumulative count of the number of ticks spent 2063 * handling all interrupts at the specified pil on this CPU. It is 2064 * exported via kstats to the user. 2065 * 2066 * intrstat[pil][1] is always a count of ticks less than or equal to the 2067 * value in [0]. The difference between [1] and [0] is the value returned 2068 * by a call to intr_get_time(). At the start of interrupt processing, 2069 * [0] and [1] will be equal (or nearly so). As the interrupt consumes 2070 * time, [0] will increase, but [1] will remain the same. A call to 2071 * intr_get_time() will return the difference, then update [1] to be the 2072 * same as [0]. Future calls will return the time since the last call. 2073 * Finally, when the interrupt completes, [1] is updated to the same as [0]. 2074 * 2075 * Implementation: 2076 * 2077 * intr_get_time() works much like a higher level interrupt arriving. It 2078 * "checkpoints" the timing information by incrementing intrstat[pil][0] 2079 * to include elapsed running time, and by setting t_intr_start to %tick. 2080 * It then sets the return value to intrstat[pil][0] - intrstat[pil][1], 2081 * and updates intrstat[pil][1] to be the same as the new value of 2082 * intrstat[pil][0]. 2083 * 2084 * In the normal handling of interrupts, after an interrupt handler returns 2085 * and the code in intr_thread() updates intrstat[pil][0], it then sets 2086 * intrstat[pil][1] to the new value of intrstat[pil][0]. When [0] == [1], 2087 * the timings are reset, i.e. intr_get_time() will return [0] - [1] which 2088 * is 0. 2089 * 2090 * Whenever interrupts arrive on a CPU which is handling a lower pil 2091 * interrupt, they update the lower pil's [0] to show time spent in the 2092 * handler that they've interrupted. This results in a growing discrepancy 2093 * between [0] and [1], which is returned the next time intr_get_time() is 2094 * called. Time spent in the higher-pil interrupt will not be returned in 2095 * the next intr_get_time() call from the original interrupt, because 2096 * the higher-pil interrupt's time is accumulated in intrstat[higherpil][]. 2097 */ 2098 2099/*ARGSUSED*/ 2100uint64_t 2101intr_get_time(void) 2102{ return 0; } 2103#else /* lint */ 2104 2105 ENTRY_NP(intr_get_time) 2106#ifdef DEBUG 2107 ! 2108 ! Lots of asserts, but just check panic_quiesce first. 2109 ! Don't bother with lots of tests if we're just ignoring them. 2110 ! 2111 sethi %hi(panic_quiesce), %o0 2112 ld [%o0 + %lo(panic_quiesce)], %o0 2113 brnz,pn %o0, 2f 2114 nop 2115 ! 2116 ! ASSERT(%pil <= LOCK_LEVEL) 2117 ! 2118 rdpr %pil, %o1 2119 cmp %o1, LOCK_LEVEL 2120 ble,pt %xcc, 0f 2121 sethi %hi(intr_get_time_high_pil), %o0 ! delay 2122 call panic 2123 or %o0, %lo(intr_get_time_high_pil), %o0 21240: 2125 ! 2126 ! ASSERT((t_flags & T_INTR_THREAD) != 0 && t_pil > 0) 2127 ! 2128 lduh [THREAD_REG + T_FLAGS], %o2 2129 andcc %o2, T_INTR_THREAD, %g0 2130 bz,pn %xcc, 1f 2131 ldub [THREAD_REG + T_PIL], %o1 ! delay 2132 brnz,pt %o1, 0f 21331: 2134 sethi %hi(intr_get_time_not_intr), %o0 2135 call panic 2136 or %o0, %lo(intr_get_time_not_intr), %o0 21370: 2138 ! 2139 ! ASSERT(t_intr_start != 0) 2140 ! 2141 ldx [THREAD_REG + T_INTR_START], %o1 2142 brnz,pt %o1, 2f 2143 sethi %hi(intr_get_time_no_start_time), %o0 ! delay 2144 call panic 2145 or %o0, %lo(intr_get_time_no_start_time), %o0 21462: 2147#endif /* DEBUG */ 2148 ! 2149 ! %o0 = elapsed time and return value 2150 ! %o1 = pil 2151 ! %o2 = scratch 2152 ! %o3 = scratch 2153 ! %o4 = scratch 2154 ! %o5 = cpu 2155 ! 2156 wrpr %g0, PIL_MAX, %pil ! make this easy -- block normal intrs 2157 ldn [THREAD_REG + T_CPU], %o5 2158 ldub [THREAD_REG + T_PIL], %o1 2159 ldx [THREAD_REG + T_INTR_START], %o3 ! %o3 = t_intr_start 2160 ! 2161 ! Calculate elapsed time since t_intr_start. Update t_intr_start, 2162 ! get delta, and multiply by cpu_divisor if necessary. 2163 ! 2164 rdpr %tick, %o2 2165 sllx %o2, 1, %o2 2166 srlx %o2, 1, %o2 2167 stx %o2, [THREAD_REG + T_INTR_START] 2168 sub %o2, %o3, %o0 2169 2170 lduh [%o5 + CPU_DIVISOR], %o4 2171 cmp %o4, 1 2172 bg,a,pn %xcc, 1f 2173 mulx %o0, %o4, %o0 ! multiply interval by clock divisor iff > 1 21741: 2175 ! Update intracct[] 2176 lduh [%o5 + CPU_MSTATE], %o4 2177 sllx %o4, 3, %o4 2178 add %o4, CPU_INTRACCT, %o4 2179 ldx [%o5 + %o4], %o2 2180 add %o2, %o0, %o2 2181 stx %o2, [%o5 + %o4] 2182 2183 ! 2184 ! Increment cpu_m.intrstat[pil][0]. Calculate elapsed time since 2185 ! cpu_m.intrstat[pil][1], which is either when the interrupt was 2186 ! first entered, or the last time intr_get_time() was invoked. Then 2187 ! update cpu_m.intrstat[pil][1] to match [0]. 2188 ! 2189 sllx %o1, 4, %o3 2190 add %o3, CPU_MCPU, %o3 2191 add %o3, MCPU_INTRSTAT, %o3 2192 add %o3, %o5, %o3 ! %o3 = cpu_m.intrstat[pil][0] 2193 ldx [%o3], %o2 2194 add %o2, %o0, %o2 ! %o2 = new value for intrstat 2195 stx %o2, [%o3] 2196 ldx [%o3 + 8], %o4 ! %o4 = cpu_m.intrstat[pil][1] 2197 sub %o2, %o4, %o0 ! %o0 is elapsed time since %o4 2198 stx %o2, [%o3 + 8] ! make [1] match [0], resetting time 2199 2200 ld [%o5 + CPU_BASE_SPL], %o2 ! restore %pil to the greater 2201 cmp %o2, %o1 ! of either our pil %o1 or 2202 movl %xcc, %o1, %o2 ! cpu_base_spl. 2203 retl 2204 wrpr %g0, %o2, %pil 2205 SET_SIZE(intr_get_time) 2206 2207#ifdef DEBUG 2208intr_get_time_high_pil: 2209 .asciz "intr_get_time(): %pil > LOCK_LEVEL" 2210intr_get_time_not_intr: 2211 .asciz "intr_get_time(): not called from an interrupt thread" 2212intr_get_time_no_start_time: 2213 .asciz "intr_get_time(): t_intr_start == 0" 2214#endif /* DEBUG */ 2215#endif /* lint */ 2216