/* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License, Version 1.0 only * (the "License"). You may not use this file except in compliance * with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright 2005 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ #pragma ident "%Z%%M% %I% %E% SMI" #if defined(lint) #include #include #else /* lint */ #include "assym.h" #endif /* lint */ #include #include #include #include #include #include #ifdef TRAPTRACE #include #endif /* TRAPTRACE */ #if defined(lint) /* ARGSUSED */ void pil_interrupt(int level) {} #else /* lint */ /* * (TT 0x40..0x4F, TL>0) Interrupt Level N Handler (N == 1..15) * Register passed from LEVEL_INTERRUPT(level) * %g4 - interrupt request level */ ENTRY_NP(pil_interrupt) ! ! Register usage ! %g1 - cpu ! %g3 - intr_req ! %g4 - pil ! %g2, %g5, %g6 - temps ! ! grab the 1st intr_req off the list ! if the list is empty, clear %clear_softint ! CPU_ADDR(%g1, %g5) ! ALTENTRY(pil_interrupt_common) sll %g4, CPTRSHIFT, %g5 add %g1, INTR_HEAD, %g6 ! intr_head[0] add %g6, %g5, %g6 ! intr_head[pil] ldn [%g6], %g3 ! g3 = intr_req #ifndef DEBUG brnz,pt %g3, 5f nop #else ! ! Verify the address of intr_req; it should be within the ! address range of intr_pool and intr_head ! or the address range of intr_add_head and intr_add_tail. ! The range of intr_add_head and intr_add_tail is subdivided ! by cpu, but the subdivision is not verified here. ! ! Registers passed to sys_trap() ! %g1 - no_intr_req ! %g2 - intr_req ! %g3 - %pil ! %g4 - current pil ! add %g1, INTR_POOL, %g2 cmp %g3, %g2 blu,pn %xcc, 8f nop add %g1, INTR_HEAD, %g2 cmp %g2, %g3 bgeu,pt %xcc, 5f nop 8: sethi %hi(intr_add_head), %g2 ldn [%g2 + %lo(intr_add_head)], %g2 brz,pn %g2, 4f ! intr_add_head can be NULL cmp %g3, %g2 blu,pn %xcc, 4f nop sethi %hi(intr_add_tail), %g2 ldn [%g2 + %lo(intr_add_tail)], %g2 cmp %g2, %g3 bgeu,pt %xcc, 5f nop 4: #endif /* DEBUG */ #ifdef TRAPTRACE TRACE_PTR(%g5, %g2) GET_TRACE_TICK(%g2) stxa %g2, [%g5 + TRAP_ENT_TICK]%asi TRACE_SAVE_TL_GL_REGS(%g5, %g2) mov 0xbad, %g2 stha %g2, [%g5 + TRAP_ENT_TT]%asi rdpr %tpc, %g2 stna %g2, [%g5 + TRAP_ENT_TPC]%asi rdpr %tstate, %g2 stxa %g2, [%g5 + TRAP_ENT_TSTATE]%asi stna %g0, [%g5 + TRAP_ENT_SP]%asi stna %g1, [%g5 + TRAP_ENT_TR]%asi rd SOFTINT, %g2 stna %g2, [%g5 + TRAP_ENT_F1]%asi stna %g3, [%g5 + TRAP_ENT_F2]%asi stna %g4, [%g5 + TRAP_ENT_F3]%asi stna %g6, [%g5 + TRAP_ENT_F4]%asi TRACE_NEXT(%g5, %g2, %g1) #endif /* TRAPTRACE */ ba ptl1_panic mov PTL1_BAD_INTR_REQ, %g1 5: ldn [%g3 + INTR_NEXT], %g2 ! 2nd entry brnz,pn %g2, 1f ! branch if list not empty stn %g2, [%g6] add %g1, INTR_TAIL, %g6 ! intr_tail[0] stn %g0, [%g5 + %g6] ! update intr_tail[pil] mov 1, %g5 sll %g5, %g4, %g5 wr %g5, CLEAR_SOFTINT 1: ! ! put intr_req on free list ! %g2 - inumber ! ldn [%g1 + INTR_HEAD], %g5 ! current head of free list lduw [%g3 + INTR_NUMBER], %g2 stn %g3, [%g1 + INTR_HEAD] stn %g5, [%g3 + INTR_NEXT] #ifdef TRAPTRACE TRACE_PTR(%g5, %g6) GET_TRACE_TICK(%g6) stxa %g6, [%g5 + TRAP_ENT_TICK]%asi TRACE_SAVE_TL_GL_REGS(%g5, %g6) rdpr %tt, %g6 stha %g6, [%g5 + TRAP_ENT_TT]%asi rdpr %tpc, %g6 stna %g6, [%g5 + TRAP_ENT_TPC]%asi rdpr %tstate, %g6 stxa %g6, [%g5 + TRAP_ENT_TSTATE]%asi stna %sp, [%g5 + TRAP_ENT_SP]%asi stna %g3, [%g5 + TRAP_ENT_TR]%asi stna %g2, [%g5 + TRAP_ENT_F1]%asi sll %g4, CPTRSHIFT, %g3 add %g1, INTR_HEAD, %g6 ldn [%g6 + %g3], %g6 ! intr_head[pil] stna %g6, [%g5 + TRAP_ENT_F2]%asi add %g1, INTR_TAIL, %g6 ldn [%g6 + %g3], %g6 ! intr_tail[pil] stna %g4, [%g5 + TRAP_ENT_F3]%asi stna %g6, [%g5 + TRAP_ENT_F4]%asi TRACE_NEXT(%g5, %g6, %g3) #endif /* TRAPTRACE */ ! ! clear the iv_pending flag for this inum ! set intr_vector, %g5; sll %g2, INTR_VECTOR_SHIFT, %g6; add %g5, %g6, %g5; ! &intr_vector[inum] sth %g0, [%g5 + IV_PENDING] ! ! Prepare for sys_trap() ! ! Registers passed to sys_trap() ! %g1 - interrupt handler at TL==0 ! %g2 - inumber ! %g3 - pil ! %g4 - initial pil for handler ! ! figure which handler to run and which %pil it starts at ! intr_thread starts at DISP_LEVEL to prevent preemption ! current_thread starts at PIL_MAX to protect cpu_intr_actv ! mov %g4, %g3 cmp %g4, LOCK_LEVEL bg,a,pt %xcc, 4f ! branch if pil > LOCK_LEVEL mov PIL_MAX, %g4 sethi %hi(intr_thread), %g1 mov DISP_LEVEL, %g4 ba,pt %xcc, sys_trap or %g1, %lo(intr_thread), %g1 4: sethi %hi(current_thread), %g1 ba,pt %xcc, sys_trap or %g1, %lo(current_thread), %g1 SET_SIZE(pil_interrupt_common) SET_SIZE(pil_interrupt) #endif /* lint */ #ifndef lint _spurious: .asciz "!interrupt 0x%x at level %d not serviced" /* * SERVE_INTR_PRE is called once, just before the first invocation * of SERVE_INTR. * * Registers on entry: * * inum, cpu, regs: may be out-registers * ls1, ls2: local scratch registers * os1, os2, os3: scratch registers, may be out */ #define SERVE_INTR_PRE(inum, cpu, ls1, ls2, os1, os2, os3, regs) \ set intr_vector, ls1; \ sll inum, INTR_VECTOR_SHIFT, os1; \ add ls1, os1, ls1; \ SERVE_INTR_TRACE(inum, os1, os2, os3, regs); \ mov inum, ls2; /* * SERVE_INTR is called immediately after either SERVE_INTR_PRE or * SERVE_INTR_NEXT, without intervening code. No register values * may be modified. * * After calling SERVE_INTR, the caller must check if os3 is set. If * so, there is another interrupt to process. The caller must call * SERVE_INTR_NEXT, immediately followed by SERVE_INTR. * * Before calling SERVE_INTR_NEXT, the caller may perform accounting * and other actions which need to occur after invocation of an interrupt * handler. However, the values of ls1 and os3 *must* be preserved and * passed unmodified into SERVE_INTR_NEXT. * * Registers on return from SERVE_INTR: * * ls1 - the pil just processed * ls2 - the inum just processed * os3 - if set, another interrupt needs to be processed * cpu, ls1, os3 - must be preserved if os3 is set */ #define SERVE_INTR(os5, cpu, ls1, ls2, os1, os2, os3, os4) \ ldn [ls1 + IV_HANDLER], os2; \ ldn [ls1 + IV_ARG], %o0; \ ldn [ls1 + IV_SOFTINT_ARG2], %o1; \ call os2; \ lduh [ls1 + IV_PIL], ls1; \ brnz,pt %o0, 2f; \ mov CE_WARN, %o0; \ set _spurious, %o1; \ mov ls2, %o2; \ call cmn_err; \ rdpr %pil, %o3; \ 2: ldn [THREAD_REG + T_CPU], cpu; \ sll ls1, 3, os1; \ add os1, CPU_STATS_SYS_INTR - 8, os2; \ ldx [cpu + os2], os3; \ inc os3; \ stx os3, [cpu + os2]; \ sll ls1, CPTRSHIFT, os2; \ add cpu, INTR_HEAD, os1; \ add os1, os2, os1; \ ldn [os1], os3; /* * Registers on entry: * * cpu - cpu pointer (clobbered, set to cpu upon completion) * ls1, os3 - preserved from prior call to SERVE_INTR * ls2 - local scratch reg (not preserved) * os1, os2, os4, os5 - scratch reg, can be out (not preserved) */ #define SERVE_INTR_NEXT(os5, cpu, ls1, ls2, os1, os2, os3, os4) \ sll ls1, CPTRSHIFT, os4; \ add cpu, INTR_HEAD, os1; \ rdpr %pstate, ls2; \ wrpr ls2, PSTATE_IE, %pstate; \ ldn [os3 + INTR_NEXT], os2; \ brnz,pn os2, 4f; \ stn os2, [os1 + os4]; \ add cpu, INTR_TAIL, os1; \ stn %g0, [os1 + os4]; \ mov 1, os1; \ sll os1, ls1, os1; \ wr os1, CLEAR_SOFTINT; \ 4: ldn [cpu + INTR_HEAD], os1; \ ld [os3 + INTR_NUMBER], os5; \ stn os3, [cpu + INTR_HEAD]; \ stn os1, [os3 + INTR_NEXT]; \ set intr_vector, ls1; \ sll os5, INTR_VECTOR_SHIFT, os1; \ add ls1, os1, ls1; \ sth %g0, [ls1 + IV_PENDING]; \ wrpr %g0, ls2, %pstate; \ SERVE_INTR_TRACE2(os5, os1, os2, os3, os4); \ mov os5, ls2; #ifdef TRAPTRACE /* * inum - not modified, _spurious depends on it. */ #define SERVE_INTR_TRACE(inum, os1, os2, os3, os4) \ rdpr %pstate, os3; \ andn os3, PSTATE_IE | PSTATE_AM, os2; \ wrpr %g0, os2, %pstate; \ TRACE_PTR(os1, os2); \ ldn [os4 + PC_OFF], os2; \ stna os2, [os1 + TRAP_ENT_TPC]%asi; \ ldx [os4 + TSTATE_OFF], os2; \ stxa os2, [os1 + TRAP_ENT_TSTATE]%asi; \ mov os3, os4; \ GET_TRACE_TICK(os2); \ stxa os2, [os1 + TRAP_ENT_TICK]%asi; \ TRACE_SAVE_TL_GL_REGS(os1, os2); \ set TT_SERVE_INTR, os2; \ rdpr %pil, os3; \ or os2, os3, os2; \ stha os2, [os1 + TRAP_ENT_TT]%asi; \ stna %sp, [os1 + TRAP_ENT_SP]%asi; \ stna inum, [os1 + TRAP_ENT_TR]%asi; \ stna %g0, [os1 + TRAP_ENT_F1]%asi; \ stna %g0, [os1 + TRAP_ENT_F2]%asi; \ stna %g0, [os1 + TRAP_ENT_F3]%asi; \ stna %g0, [os1 + TRAP_ENT_F4]%asi; \ TRACE_NEXT(os1, os2, os3); \ wrpr %g0, os4, %pstate #else /* TRAPTRACE */ #define SERVE_INTR_TRACE(inum, os1, os2, os3, os4) #endif /* TRAPTRACE */ #ifdef TRAPTRACE /* * inum - not modified, _spurious depends on it. */ #define SERVE_INTR_TRACE2(inum, os1, os2, os3, os4) \ rdpr %pstate, os3; \ andn os3, PSTATE_IE | PSTATE_AM, os2; \ wrpr %g0, os2, %pstate; \ TRACE_PTR(os1, os2); \ stna %g0, [os1 + TRAP_ENT_TPC]%asi; \ stxa %g0, [os1 + TRAP_ENT_TSTATE]%asi; \ mov os3, os4; \ GET_TRACE_TICK(os2); \ stxa os2, [os1 + TRAP_ENT_TICK]%asi; \ TRACE_SAVE_TL_GL_REGS(os1, os2); \ set TT_SERVE_INTR, os2; \ rdpr %pil, os3; \ or os2, os3, os2; \ stha os2, [os1 + TRAP_ENT_TT]%asi; \ stna %sp, [os1 + TRAP_ENT_SP]%asi; \ stna inum, [os1 + TRAP_ENT_TR]%asi; \ stna %g0, [os1 + TRAP_ENT_F1]%asi; \ stna %g0, [os1 + TRAP_ENT_F2]%asi; \ stna %g0, [os1 + TRAP_ENT_F3]%asi; \ stna %g0, [os1 + TRAP_ENT_F4]%asi; \ TRACE_NEXT(os1, os2, os3); \ wrpr %g0, os4, %pstate #else /* TRAPTRACE */ #define SERVE_INTR_TRACE2(inum, os1, os2, os3, os4) #endif /* TRAPTRACE */ #endif /* lint */ #if defined(lint) /*ARGSUSED*/ void intr_thread(struct regs *regs, uint_t inumber, uint_t pil) {} #else /* lint */ #define INTRCNT_LIMIT 16 /* * Handle an interrupt in a new thread. * Entry: * %o0 = pointer to regs structure * %o1 = inumber * %o2 = pil * %sp = on current thread's kernel stack * %o7 = return linkage to trap code * %g7 = current thread * %pstate = normal globals, interrupts enabled, * privileged, fp disabled * %pil = DISP_LEVEL * * Register Usage * %l0 = return linkage * %l1 = pil * %l2 - %l3 = scratch * %l4 - %l7 = reserved for sys_trap * %o2 = cpu * %o3 = intr thread * %o0 = scratch * %o4 - %o5 = scratch */ ENTRY_NP(intr_thread) mov %o7, %l0 mov %o2, %l1 ! ! See if we are interrupting another interrupt thread. ! lduh [THREAD_REG + T_FLAGS], %o3 andcc %o3, T_INTR_THREAD, %g0 bz,pt %xcc, 1f ldn [THREAD_REG + T_CPU], %o2 ! delay - load CPU pointer ! We have interrupted an interrupt thread. Take a timestamp, ! compute its interval, and update its cumulative counter. add THREAD_REG, T_INTR_START, %o5 0: ldx [%o5], %o3 brz,pn %o3, 1f ! We came in on top of an interrupt thread that had no timestamp. ! This could happen if, for instance, an interrupt thread which had ! previously blocked is being set up to run again in resume(), but ! resume() hasn't yet stored a timestamp for it. Or, it could be in ! swtch() after its slice has been accounted for. ! Only account for the time slice if the starting timestamp is non-zero. rdpr %tick, %o4 ! delay sllx %o4, 1, %o4 ! shift off NPT bit srlx %o4, 1, %o4 sub %o4, %o3, %o4 ! o4 has interval ! A high-level interrupt in current_thread() interrupting here ! will account for the interrupted thread's time slice, but ! only if t_intr_start is non-zero. Since this code is going to account ! for the time slice, we want to "atomically" load the thread's ! starting timestamp, calculate the interval with %tick, and zero ! its starting timestamp. ! To do this, we do a casx on the t_intr_start field, and store 0 to it. ! If it has changed since we loaded it above, we need to re-compute the ! interval, since a changed t_intr_start implies current_thread placed ! a new, later timestamp there after running a high-level interrupt, ! and the %tick val in %o4 had become stale. mov %g0, %l2 casx [%o5], %o3, %l2 ! If %l2 == %o3, our casx was successful. If not, the starting timestamp ! changed between loading it (after label 0b) and computing the ! interval above. cmp %l2, %o3 bne,pn %xcc, 0b ! Check for Energy Star mode lduh [%o2 + CPU_DIVISOR], %l2 ! delay -- %l2 = clock divisor cmp %l2, 1 bg,a,pn %xcc, 2f mulx %o4, %l2, %o4 ! multiply interval by clock divisor iff > 1 2: ! We now know that a valid interval for the interrupted interrupt ! thread is in %o4. Update its cumulative counter. ldub [THREAD_REG + T_PIL], %l3 ! load PIL sllx %l3, 4, %l3 ! convert PIL index to byte offset add %l3, CPU_MCPU, %l3 ! CPU_INTRSTAT is too big for use add %l3, MCPU_INTRSTAT, %l3 ! as const, add offsets separately ldx [%o2 + %l3], %o5 ! old counter in o5 add %o5, %o4, %o5 ! new counter in o5 stx %o5, [%o2 + %l3] ! store new counter ! Also update intracct[] lduh [%o2 + CPU_MSTATE], %l3 sllx %l3, 3, %l3 add %l3, CPU_INTRACCT, %l3 add %l3, %o2, %l3 0: ldx [%l3], %o5 add %o5, %o4, %o3 casx [%l3], %o5, %o3 cmp %o5, %o3 bne,pn %xcc, 0b nop 1: ! ! Get set to run interrupt thread. ! There should always be an interrupt thread since we allocate one ! for each level on the CPU. ! ! Note that the code in kcpc_overflow_intr -relies- on the ordering ! of events here -- in particular that t->t_lwp of the interrupt thread ! is set to the pinned thread *before* curthread is changed. ! ldn [%o2 + CPU_INTR_THREAD], %o3 ! interrupt thread pool ldn [%o3 + T_LINK], %o4 ! unlink thread from CPU's list stn %o4, [%o2 + CPU_INTR_THREAD] ! ! Set bit for this level in CPU's active interrupt bitmask. ! ld [%o2 + CPU_INTR_ACTV], %o5 mov 1, %o4 sll %o4, %l1, %o4 #ifdef DEBUG ! ! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL))) ! andcc %o5, %o4, %g0 bz,pt %xcc, 0f nop ! Do not call panic if a panic is already in progress. sethi %hi(panic_quiesce), %l2 ld [%l2 + %lo(panic_quiesce)], %l2 brnz,pn %l2, 0f nop sethi %hi(intr_thread_actv_bit_set), %o0 call panic or %o0, %lo(intr_thread_actv_bit_set), %o0 0: #endif /* DEBUG */ or %o5, %o4, %o5 st %o5, [%o2 + CPU_INTR_ACTV] ! ! Consider the new thread part of the same LWP so that ! window overflow code can find the PCB. ! ldn [THREAD_REG + T_LWP], %o4 stn %o4, [%o3 + T_LWP] ! ! Threads on the interrupt thread free list could have state already ! set to TS_ONPROC, but it helps in debugging if they're TS_FREE ! Could eliminate the next two instructions with a little work. ! mov TS_ONPROC, %o4 st %o4, [%o3 + T_STATE] ! ! Push interrupted thread onto list from new thread. ! Set the new thread as the current one. ! Set interrupted thread's T_SP because if it is the idle thread, ! resume may use that stack between threads. ! stn %o7, [THREAD_REG + T_PC] ! mark pc for resume stn %sp, [THREAD_REG + T_SP] ! mark stack for resume stn THREAD_REG, [%o3 + T_INTR] ! push old thread stn %o3, [%o2 + CPU_THREAD] ! set new thread mov %o3, THREAD_REG ! set global curthread register ldn [%o3 + T_STACK], %o4 ! interrupt stack pointer sub %o4, STACK_BIAS, %sp ! ! Initialize thread priority level from intr_pri ! sethi %hi(intr_pri), %o4 ldsh [%o4 + %lo(intr_pri)], %o4 ! grab base interrupt priority add %l1, %o4, %o4 ! convert level to dispatch priority sth %o4, [THREAD_REG + T_PRI] stub %l1, [THREAD_REG + T_PIL] ! save pil for intr_passivate ! Store starting timestamp in thread structure. add THREAD_REG, T_INTR_START, %o3 1: ldx [%o3], %o5 rdpr %tick, %o4 sllx %o4, 1, %o4 srlx %o4, 1, %o4 ! shift off NPT bit casx [%o3], %o5, %o4 cmp %o4, %o5 ! If a high-level interrupt occurred while we were attempting to store ! the timestamp, try again. bne,pn %xcc, 1b nop wrpr %g0, %l1, %pil ! lower %pil to new level ! ! Fast event tracing. ! ld [%o2 + CPU_FTRACE_STATE], %o4 ! %o2 = curthread->t_cpu btst FTRACE_ENABLED, %o4 be,pt %icc, 1f ! skip if ftrace disabled mov %l1, %o5 ! ! Tracing is enabled - write the trace entry. ! save %sp, -SA(MINFRAME), %sp set ftrace_intr_thread_format_str, %o0 mov %i0, %o1 mov %i1, %o2 call ftrace_3 mov %i5, %o3 restore 1: ! ! call the handler ! SERVE_INTR_PRE(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0) ! ! %o0 and %o1 are now available as scratch registers. ! 0: SERVE_INTR(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0) ! ! If %o3 is set, we must call serve_intr_post, and both %l1 and %o3 ! must be preserved. %l1 holds our pil, %l3 holds our inum. ! ! Note: %l1 is the pil level we're processing, but we may have a ! higher effective pil because a higher-level interrupt may have ! blocked. ! wrpr %g0, DISP_LEVEL, %pil ! ! Take timestamp, compute interval, update cumulative counter. ! add THREAD_REG, T_INTR_START, %o5 1: ldx [%o5], %o0 #ifdef DEBUG brnz %o0, 9f nop ! Do not call panic if a panic is already in progress. sethi %hi(panic_quiesce), %o1 ld [%o1 + %lo(panic_quiesce)], %o1 brnz,pn %o1, 9f nop sethi %hi(intr_thread_t_intr_start_zero), %o0 call panic or %o0, %lo(intr_thread_t_intr_start_zero), %o0 9: #endif /* DEBUG */ rdpr %tick, %o1 sllx %o1, 1, %o1 srlx %o1, 1, %o1 ! shift off NPT bit sub %o1, %o0, %l2 ! l2 has interval ! ! The general outline of what the code here does is: ! 1. load t_intr_start, %tick, and calculate the delta ! 2. replace t_intr_start with %tick (if %o3 is set) or 0. ! ! The problem is that a high-level interrupt could arrive at any time. ! It will account for (%tick - t_intr_start) for us when it starts, ! unless we have set t_intr_start to zero, and then set t_intr_start ! to a new %tick when it finishes. To account for this, our first step ! is to load t_intr_start and the last is to use casx to store the new ! t_intr_start. This guarantees atomicity in reading t_intr_start, ! reading %tick, and updating t_intr_start. ! movrz %o3, %g0, %o1 casx [%o5], %o0, %o1 cmp %o0, %o1 bne,pn %xcc, 1b ! ! Check for Energy Star mode ! lduh [%o2 + CPU_DIVISOR], %o0 ! delay -- %o0 = clock divisor cmp %o0, 1 bg,a,pn %xcc, 2f mulx %l2, %o0, %l2 ! multiply interval by clock divisor iff > 1 2: ! ! Update cpu_intrstat. If o3 is set then we will be processing another ! interrupt. Above we have set t_intr_start to %tick, not 0. This ! means a high-level interrupt can arrive and update the same stats ! we're updating. Need to use casx. ! sllx %l1, 4, %o1 ! delay - PIL as byte offset add %o1, CPU_MCPU, %o1 ! CPU_INTRSTAT const too big add %o1, MCPU_INTRSTAT, %o1 ! add parts separately add %o1, %o2, %o1 1: ldx [%o1], %o5 ! old counter in o5 add %o5, %l2, %o0 ! new counter in o0 stx %o0, [%o1 + 8] ! store into intrstat[pil][1] casx [%o1], %o5, %o0 ! and into intrstat[pil][0] cmp %o5, %o0 bne,pn %xcc, 1b nop ! Also update intracct[] lduh [%o2 + CPU_MSTATE], %o1 sllx %o1, 3, %o1 add %o1, CPU_INTRACCT, %o1 add %o1, %o2, %o1 1: ldx [%o1], %o5 add %o5, %l2, %o0 casx [%o1], %o5, %o0 cmp %o5, %o0 bne,pn %xcc, 1b nop ! ! Don't keep a pinned process pinned indefinitely. Bump cpu_intrcnt ! for each interrupt handler we invoke. If we hit INTRCNT_LIMIT, then ! we've crossed the threshold and we should unpin the pinned threads ! by preempt()ing ourselves, which will bubble up the t_intr chain ! until hitting the non-interrupt thread, which will then in turn ! preempt itself allowing the interrupt processing to resume. Finally, ! the scheduler takes over and picks the next thread to run. ! ! If our CPU is quiesced, we cannot preempt because the idle thread ! won't ever re-enter the scheduler, and the interrupt will be forever ! blocked. ! ! If t_intr is NULL, we're not pinning anyone, so we use a simpler ! algorithm. Just check for cpu_kprunrun, and if set then preempt. ! This insures we enter the scheduler if a higher-priority thread ! has become runnable. ! lduh [%o2 + CPU_FLAGS], %o5 ! don't preempt if quiesced andcc %o5, CPU_QUIESCED, %g0 bnz,pn %xcc, 1f ldn [THREAD_REG + T_INTR], %o5 ! pinning anything? brz,pn %o5, 3f ! if not, don't inc intrcnt ldub [%o2 + CPU_INTRCNT], %o5 ! delay - %o5 = cpu_intrcnt inc %o5 cmp %o5, INTRCNT_LIMIT ! have we hit the limit? bl,a,pt %xcc, 1f ! no preempt if < INTRCNT_LIMIT stub %o5, [%o2 + CPU_INTRCNT] ! delay annul - inc CPU_INTRCNT bg,pn %xcc, 2f ! don't inc stats again ! ! We've reached the limit. Set cpu_intrcnt and cpu_kprunrun, and do ! CPU_STATS_ADDQ(cp, sys, intrunpin, 1). Then call preempt. ! mov 1, %o4 ! delay stub %o4, [%o2 + CPU_KPRUNRUN] ldx [%o2 + CPU_STATS_SYS_INTRUNPIN], %o4 inc %o4 stx %o4, [%o2 + CPU_STATS_SYS_INTRUNPIN] ba 2f stub %o5, [%o2 + CPU_INTRCNT] ! delay 3: ! Code for t_intr == NULL ldub [%o2 + CPU_KPRUNRUN], %o5 brz,pt %o5, 1f ! don't preempt unless kprunrun 2: ! Time to call preempt mov %o2, %l3 ! delay - save %o2 call preempt mov %o3, %l2 ! delay - save %o3. mov %l3, %o2 ! restore %o2 mov %l2, %o3 ! restore %o3 wrpr %g0, DISP_LEVEL, %pil ! up from cpu_base_spl 1: ! ! Do we need to call serve_intr_post and do this again? ! brz,a,pt %o3, 0f ld [%o2 + CPU_INTR_ACTV], %o5 ! delay annulled ! ! Restore %pil before calling serve_intr() again. We must check ! CPU_BASE_SPL and set %pil to max(our-pil, CPU_BASE_SPL) ! ld [%o2 + CPU_BASE_SPL], %o4 cmp %o4, %l1 movl %xcc, %l1, %o4 wrpr %g0, %o4, %pil SERVE_INTR_NEXT(%o1, %o2, %l1, %l3, %o4, %o5, %o3, %o0) ba 0b ! compute new stats nop 0: ! ! Clear bit for this level in CPU's interrupt active bitmask. ! mov 1, %o4 sll %o4, %l1, %o4 #ifdef DEBUG ! ! ASSERT(CPU->cpu_intr_actv & (1 << PIL)) ! andcc %o4, %o5, %g0 bnz,pt %xcc, 0f nop ! Do not call panic if a panic is already in progress. sethi %hi(panic_quiesce), %l2 ld [%l2 + %lo(panic_quiesce)], %l2 brnz,pn %l2, 0f nop sethi %hi(intr_thread_actv_bit_not_set), %o0 call panic or %o0, %lo(intr_thread_actv_bit_not_set), %o0 0: #endif /* DEBUG */ andn %o5, %o4, %o5 st %o5, [%o2 + CPU_INTR_ACTV] ! ! If there is still an interrupted thread underneath this one, ! then the interrupt was never blocked and the return is fairly ! simple. Otherwise jump to intr_thread_exit. ! ldn [THREAD_REG + T_INTR], %o4 ! pinned thread brz,pn %o4, intr_thread_exit ! branch if none nop ! ! link the thread back onto the interrupt thread pool ! ldn [%o2 + CPU_INTR_THREAD], %o3 stn %o3, [THREAD_REG + T_LINK] stn THREAD_REG, [%o2 + CPU_INTR_THREAD] ! ! set the thread state to free so kernel debuggers don't see it ! mov TS_FREE, %o5 st %o5, [THREAD_REG + T_STATE] ! ! Switch back to the interrupted thread and return ! stn %o4, [%o2 + CPU_THREAD] mov %o4, THREAD_REG ! If we pinned an interrupt thread, store its starting timestamp. lduh [THREAD_REG + T_FLAGS], %o5 andcc %o5, T_INTR_THREAD, %g0 bz,pt %xcc, 1f ldn [THREAD_REG + T_SP], %sp ! delay - restore %sp add THREAD_REG, T_INTR_START, %o3 ! o3 has &curthread->t_intr_star 0: ldx [%o3], %o4 ! o4 = t_intr_start before rdpr %tick, %o5 sllx %o5, 1, %o5 srlx %o5, 1, %o5 ! shift off NPT bit casx [%o3], %o4, %o5 ! put o5 in ts if o4 == ts after cmp %o4, %o5 ! If a high-level interrupt occurred while we were attempting to store ! the timestamp, try again. bne,pn %xcc, 0b ldn [THREAD_REG + T_SP], %sp ! delay - restore %sp 1: ! If the thread being restarted isn't pinning anyone, and no interrupts ! are pending, zero out cpu_intrcnt ldn [THREAD_REG + T_INTR], %o4 brnz,pn %o4, 2f rd SOFTINT, %o4 ! delay set SOFTINT_MASK, %o5 andcc %o4, %o5, %g0 bz,a,pt %xcc, 2f stub %g0, [%o2 + CPU_INTRCNT] ! delay annul 2: jmp %l0 + 8 nop SET_SIZE(intr_thread) /* Not Reached */ ! ! An interrupt returned on what was once (and still might be) ! an interrupt thread stack, but the interrupted process is no longer ! there. This means the interrupt must have blocked. ! ! There is no longer a thread under this one, so put this thread back ! on the CPU's free list and resume the idle thread which will dispatch ! the next thread to run. ! ! All traps below DISP_LEVEL are disabled here, but the mondo interrupt ! is enabled. ! ENTRY_NP(intr_thread_exit) #ifdef TRAPTRACE rdpr %pstate, %l2 andn %l2, PSTATE_IE | PSTATE_AM, %o4 wrpr %g0, %o4, %pstate ! cpu to known state TRACE_PTR(%o4, %o5) GET_TRACE_TICK(%o5) stxa %o5, [%o4 + TRAP_ENT_TICK]%asi TRACE_SAVE_TL_GL_REGS(%o4, %o5) set TT_INTR_EXIT, %o5 stha %o5, [%o4 + TRAP_ENT_TT]%asi stna %g0, [%o4 + TRAP_ENT_TPC]%asi stxa %g0, [%o4 + TRAP_ENT_TSTATE]%asi stna %sp, [%o4 + TRAP_ENT_SP]%asi stna THREAD_REG, [%o4 + TRAP_ENT_TR]%asi ld [%o2 + CPU_BASE_SPL], %o5 stna %o5, [%o4 + TRAP_ENT_F1]%asi stna %g0, [%o4 + TRAP_ENT_F2]%asi stna %g0, [%o4 + TRAP_ENT_F3]%asi stna %g0, [%o4 + TRAP_ENT_F4]%asi TRACE_NEXT(%o4, %o5, %o0) wrpr %g0, %l2, %pstate #endif /* TRAPTRACE */ ! cpu_stats.sys.intrblk++ ldx [%o2 + CPU_STATS_SYS_INTRBLK], %o4 inc %o4 stx %o4, [%o2 + CPU_STATS_SYS_INTRBLK] ! ! Put thread back on the interrupt thread list. ! ! ! Set the CPU's base SPL level. ! #ifdef DEBUG ! ! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL))) ! ld [%o2 + CPU_INTR_ACTV], %o5 mov 1, %o4 sll %o4, %l1, %o4 and %o5, %o4, %o4 brz,pt %o4, 0f nop ! Do not call panic if a panic is already in progress. sethi %hi(panic_quiesce), %l2 ld [%l2 + %lo(panic_quiesce)], %l2 brnz,pn %l2, 0f nop sethi %hi(intr_thread_exit_actv_bit_set), %o0 call panic or %o0, %lo(intr_thread_exit_actv_bit_set), %o0 0: #endif /* DEBUG */ call _intr_set_spl ! set CPU's base SPL level ld [%o2 + CPU_INTR_ACTV], %o5 ! delay - load active mask ! ! set the thread state to free so kernel debuggers don't see it ! mov TS_FREE, %o4 st %o4, [THREAD_REG + T_STATE] ! ! Put thread on either the interrupt pool or the free pool and ! call swtch() to resume another thread. ! ldn [%o2 + CPU_INTR_THREAD], %o5 ! get list pointer stn %o5, [THREAD_REG + T_LINK] call swtch ! switch to best thread stn THREAD_REG, [%o2 + CPU_INTR_THREAD] ! delay - put thread on list ba,a,pt %xcc, . ! swtch() shouldn't return SET_SIZE(intr_thread_exit) .global ftrace_intr_thread_format_str ftrace_intr_thread_format_str: .asciz "intr_thread(): regs=0x%lx, int=0x%lx, pil=0x%lx" #ifdef DEBUG intr_thread_actv_bit_set: .asciz "intr_thread(): cpu_intr_actv bit already set for PIL" intr_thread_actv_bit_not_set: .asciz "intr_thread(): cpu_intr_actv bit not set for PIL" intr_thread_exit_actv_bit_set: .asciz "intr_thread_exit(): cpu_intr_actv bit erroneously set for PIL" intr_thread_t_intr_start_zero: .asciz "intr_thread(): t_intr_start zero upon handler return" #endif /* DEBUG */ #endif /* lint */ #if defined(lint) /* * Handle an interrupt in the current thread * Entry: * %o0 = pointer to regs structure * %o1 = inumber * %o2 = pil * %sp = on current thread's kernel stack * %o7 = return linkage to trap code * %g7 = current thread * %pstate = normal globals, interrupts enabled, * privileged, fp disabled * %pil = PIL_MAX * * Register Usage * %l0 = return linkage * %l1 = old stack * %l2 - %l3 = scratch * %l4 - %l7 = reserved for sys_trap * %o3 = cpu * %o0 = scratch * %o4 - %o5 = scratch */ /* ARGSUSED */ void current_thread(struct regs *regs, uint_t inumber, uint_t pil) {} #else /* lint */ ENTRY_NP(current_thread) mov %o7, %l0 ldn [THREAD_REG + T_CPU], %o3 ! ! Set bit for this level in CPU's active interrupt bitmask. ! ld [%o3 + CPU_INTR_ACTV], %o5 ! o5 has cpu_intr_actv b4 chng mov 1, %o4 sll %o4, %o2, %o4 ! construct mask for level #ifdef DEBUG ! ! ASSERT(!(CPU->cpu_intr_actv & (1 << PIL))) ! andcc %o5, %o4, %g0 bz,pt %xcc, 0f nop ! Do not call panic if a panic is already in progress. sethi %hi(panic_quiesce), %l2 ld [%l2 + %lo(panic_quiesce)], %l2 brnz,pn %l2, 0f nop sethi %hi(current_thread_actv_bit_set), %o0 call panic or %o0, %lo(current_thread_actv_bit_set), %o0 0: #endif /* DEBUG */ or %o5, %o4, %o4 ! ! See if we are interrupting another high-level interrupt. ! srl %o5, LOCK_LEVEL + 1, %o5 ! only look at high-level bits brz,pt %o5, 1f st %o4, [%o3 + CPU_INTR_ACTV] ! delay - store active mask ! ! We have interrupted another high-level interrupt. Find its PIL, ! compute the interval it ran for, and update its cumulative counter. ! ! Register usage: ! o2 = PIL of this interrupt ! o5 = high PIL bits of INTR_ACTV (not including this PIL) ! l1 = bitmask used to find other active high-level PIL ! o4 = index of bit set in l1 ! Use cpu_intr_actv to find the cpu_pil_high_start[] offset for the ! interrupted high-level interrupt. ! Create mask for cpu_intr_actv. Begin by looking for bits set ! at one level below the current PIL. Since %o5 contains the active ! mask already shifted right by (LOCK_LEVEL + 1), we start by looking ! at bit (current_pil - (LOCK_LEVEL + 2)). sub %o2, LOCK_LEVEL + 2, %o4 mov 1, %l1 sll %l1, %o4, %l1 2: #ifdef DEBUG ! ASSERT(%l1 != 0) (we didn't shift the bit off the right edge) brnz,pt %l1, 9f nop ! Don't panic if a panic is already in progress. sethi %hi(panic_quiesce), %l3 ld [%l3 + %lo(panic_quiesce)], %l3 brnz,pn %l3, 9f nop sethi %hi(current_thread_nested_PIL_not_found), %o0 call panic or %o0, %lo(current_thread_nested_PIL_not_found), %o0 9: #endif /* DEBUG */ andcc %l1, %o5, %g0 ! test mask against high-level bits of bnz %xcc, 3f ! cpu_intr_actv nop srl %l1, 1, %l1 ! No match. Try next lower PIL. ba,pt %xcc, 2b sub %o4, 1, %o4 ! delay - decrement PIL 3: sll %o4, 3, %o4 ! index to byte offset add %o4, CPU_MCPU, %l1 ! CPU_PIL_HIGH_START is too large add %l1, MCPU_PIL_HIGH_START, %l1 ldx [%o3 + %l1], %l3 ! load starting timestamp #ifdef DEBUG brnz,pt %l3, 9f nop ! Don't panic if a panic is already in progress. sethi %hi(panic_quiesce), %l1 ld [%l1 + %lo(panic_quiesce)], %l1 brnz,pn %l1, 9f nop srl %o4, 3, %o1 ! Find interrupted PIL for panic add %o1, LOCK_LEVEL + 1, %o1 sethi %hi(current_thread_nested_pil_zero), %o0 call panic or %o0, %lo(current_thread_nested_pil_zero), %o0 9: #endif /* DEBUG */ rdpr %tick, %l1 sllx %l1, 1, %l1 srlx %l1, 1, %l1 ! shake off NPT bit sub %l1, %l3, %l3 ! interval in %l3 ! ! Check for Energy Star mode ! lduh [%o3 + CPU_DIVISOR], %l1 ! %l1 = clock divisor cmp %l1, 1 bg,a,pn %xcc, 2f mulx %l3, %l1, %l3 ! multiply interval by clock divisor iff > 1 2: ! ! We need to find the CPU offset of the cumulative counter. We start ! with %o4, which has (PIL - (LOCK_LEVEL + 1)) * 8. We need PIL * 16, ! so we shift left 1, then add (LOCK_LEVEL + 1) * 16, which is ! CPU_INTRSTAT_LOW_PIL_OFFSET. ! sll %o4, 1, %o4 add %o4, CPU_MCPU, %o4 ! CPU_INTRSTAT const too large add %o4, MCPU_INTRSTAT, %o4 ! add parts separately add %o4, CPU_INTRSTAT_LOW_PIL_OFFSET, %o4 ldx [%o3 + %o4], %l1 ! old counter in l1 add %l1, %l3, %l1 ! new counter in l1 stx %l1, [%o3 + %o4] ! store new counter ! Also update intracct[] lduh [%o3 + CPU_MSTATE], %o4 sllx %o4, 3, %o4 add %o4, CPU_INTRACCT, %o4 ldx [%o3 + %o4], %l1 add %l1, %l3, %l1 ! Another high-level interrupt is active below this one, so ! there is no need to check for an interrupt thread. That will be ! done by the lowest priority high-level interrupt active. ba,pt %xcc, 5f stx %l1, [%o3 + %o4] ! delay - store new counter 1: ! If we haven't interrupted another high-level interrupt, we may be ! interrupting a low level interrupt thread. If so, compute its interval ! and update its cumulative counter. lduh [THREAD_REG + T_FLAGS], %o4 andcc %o4, T_INTR_THREAD, %g0 bz,pt %xcc, 4f nop ! We have interrupted an interrupt thread. Take timestamp, compute ! interval, update cumulative counter. ! Check t_intr_start. If it is zero, either intr_thread() or ! current_thread() (at a lower PIL, of course) already did ! the accounting for the underlying interrupt thread. ldx [THREAD_REG + T_INTR_START], %o5 brz,pn %o5, 4f nop stx %g0, [THREAD_REG + T_INTR_START] rdpr %tick, %o4 sllx %o4, 1, %o4 srlx %o4, 1, %o4 ! shake off NPT bit sub %o4, %o5, %o5 ! o5 has the interval ! Check for Energy Star mode lduh [%o3 + CPU_DIVISOR], %o4 ! %o4 = clock divisor cmp %o4, 1 bg,a,pn %xcc, 2f mulx %o5, %o4, %o5 ! multiply interval by clock divisor iff > 1 2: ldub [THREAD_REG + T_PIL], %o4 sllx %o4, 4, %o4 ! PIL index to byte offset add %o4, CPU_MCPU, %o4 ! CPU_INTRSTAT const too large add %o4, MCPU_INTRSTAT, %o4 ! add parts separately ldx [%o3 + %o4], %l2 ! old counter in l2 add %l2, %o5, %l2 ! new counter in l2 stx %l2, [%o3 + %o4] ! store new counter ! Also update intracct[] lduh [%o3 + CPU_MSTATE], %o4 sllx %o4, 3, %o4 add %o4, CPU_INTRACCT, %o4 ldx [%o3 + %o4], %l2 add %l2, %o5, %l2 stx %l2, [%o3 + %o4] 4: ! ! Handle high-level interrupts on separate interrupt stack. ! No other high-level interrupts are active, so switch to int stack. ! mov %sp, %l1 ldn [%o3 + CPU_INTR_STACK], %l3 sub %l3, STACK_BIAS, %sp 5: #ifdef DEBUG ! ! ASSERT(%o2 > LOCK_LEVEL) ! cmp %o2, LOCK_LEVEL bg,pt %xcc, 3f nop mov CE_PANIC, %o0 sethi %hi(current_thread_wrong_pil), %o1 call cmn_err ! %o2 has the %pil already or %o1, %lo(current_thread_wrong_pil), %o1 #endif 3: ! Store starting timestamp for this PIL in CPU structure at ! cpu.cpu_m.pil_high_start[PIL - (LOCK_LEVEL + 1)] sub %o2, LOCK_LEVEL + 1, %o4 ! convert PIL to array index sllx %o4, 3, %o4 ! index to byte offset add %o4, CPU_MCPU, %o4 ! CPU_PIL_HIGH_START is too large add %o4, MCPU_PIL_HIGH_START, %o4 rdpr %tick, %o5 sllx %o5, 1, %o5 srlx %o5, 1, %o5 stx %o5, [%o3 + %o4] wrpr %g0, %o2, %pil ! enable interrupts ! ! call the handler ! SERVE_INTR_PRE(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0) 1: SERVE_INTR(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0) brz,a,pt %o2, 0f ! if %o2, more intrs await rdpr %pil, %o2 ! delay annulled SERVE_INTR_NEXT(%o1, %o3, %l2, %l3, %o4, %o5, %o2, %o0) ba 1b nop 0: wrpr %g0, PIL_MAX, %pil ! disable interrupts (1-15) cmp %o2, PIL_15 bne,pt %xcc, 3f nop sethi %hi(cpc_level15_inum), %o1 ld [%o1 + %lo(cpc_level15_inum)], %o1 ! arg for intr_enqueue_req brz %o1, 3f nop rdpr %pstate, %g5 andn %g5, PSTATE_IE, %g1 wrpr %g0, %g1, %pstate ! Disable vec interrupts call intr_enqueue_req ! preserves %g5 mov PIL_15, %o0 ! clear perfcntr overflow mov 1, %o0 sllx %o0, PIL_15, %o0 wr %o0, CLEAR_SOFTINT wrpr %g0, %g5, %pstate ! Enable vec interrupts 3: cmp %o2, PIL_14 be tick_rtt ! cpu-specific tick processing nop .global current_thread_complete current_thread_complete: ! ! Register usage: ! ! %l1 = stack pointer ! %l2 = CPU_INTR_ACTV >> (LOCK_LEVEL + 1) ! %o2 = PIL ! %o3 = CPU pointer ! %o4, %o5, %l3, %l4, %l5 = scratch ! ldn [THREAD_REG + T_CPU], %o3 ! ! Clear bit for this level in CPU's interrupt active bitmask. ! ld [%o3 + CPU_INTR_ACTV], %l2 mov 1, %o5 sll %o5, %o2, %o5 #ifdef DEBUG ! ! ASSERT(CPU->cpu_intr_actv & (1 << PIL)) ! andcc %l2, %o5, %g0 bnz,pt %xcc, 0f nop ! Do not call panic if a panic is already in progress. sethi %hi(panic_quiesce), %l2 ld [%l2 + %lo(panic_quiesce)], %l2 brnz,pn %l2, 0f nop sethi %hi(current_thread_actv_bit_not_set), %o0 call panic or %o0, %lo(current_thread_actv_bit_not_set), %o0 0: #endif /* DEBUG */ andn %l2, %o5, %l2 st %l2, [%o3 + CPU_INTR_ACTV] ! Take timestamp, compute interval, update cumulative counter. sub %o2, LOCK_LEVEL + 1, %o4 ! PIL to array index sllx %o4, 3, %o4 ! index to byte offset add %o4, CPU_MCPU, %o4 ! CPU_PIL_HIGH_START is too large add %o4, MCPU_PIL_HIGH_START, %o4 rdpr %tick, %o5 sllx %o5, 1, %o5 srlx %o5, 1, %o5 ldx [%o3 + %o4], %o0 #ifdef DEBUG ! ASSERT(cpu.cpu_m.pil_high_start[pil - (LOCK_LEVEL + 1)] != 0) brnz,pt %o0, 9f nop ! Don't panic if a panic is already in progress. sethi %hi(panic_quiesce), %l2 ld [%l2 + %lo(panic_quiesce)], %l2 brnz,pn %l2, 9f nop sethi %hi(current_thread_timestamp_zero), %o0 call panic or %o0, %lo(current_thread_timestamp_zero), %o0 9: #endif /* DEBUG */ stx %g0, [%o3 + %o4] sub %o5, %o0, %o5 ! interval in o5 ! Check for Energy Star mode lduh [%o3 + CPU_DIVISOR], %o4 ! %o4 = clock divisor cmp %o4, 1 bg,a,pn %xcc, 2f mulx %o5, %o4, %o5 ! multiply interval by clock divisor iff > 1 2: sllx %o2, 4, %o4 ! PIL index to byte offset add %o4, CPU_MCPU, %o4 ! CPU_INTRSTAT too large add %o4, MCPU_INTRSTAT, %o4 ! add parts separately ldx [%o3 + %o4], %o0 ! old counter in o0 add %o0, %o5, %o0 ! new counter in o0 stx %o0, [%o3 + %o4] ! store new counter ! Also update intracct[] lduh [%o3 + CPU_MSTATE], %o4 sllx %o4, 3, %o4 add %o4, CPU_INTRACCT, %o4 ldx [%o3 + %o4], %o0 add %o0, %o5, %o0 stx %o0, [%o3 + %o4] ! ! get back on current thread's stack ! srl %l2, LOCK_LEVEL + 1, %l2 tst %l2 ! any more high-level ints? movz %xcc, %l1, %sp ! ! Current register usage: ! o2 = PIL ! o3 = CPU pointer ! l0 = return address ! l2 = intr_actv shifted right ! bz,pt %xcc, 3f ! if l2 was zero, no more ints nop ! ! We found another high-level interrupt active below the one that just ! returned. Store a starting timestamp for it in the CPU structure. ! ! Use cpu_intr_actv to find the cpu_pil_high_start[] offset for the ! interrupted high-level interrupt. ! Create mask for cpu_intr_actv. Begin by looking for bits set ! at one level below the current PIL. Since %l2 contains the active ! mask already shifted right by (LOCK_LEVEL + 1), we start by looking ! at bit (current_pil - (LOCK_LEVEL + 2)). ! %l1 = mask, %o5 = index of bit set in mask ! mov 1, %l1 sub %o2, LOCK_LEVEL + 2, %o5 sll %l1, %o5, %l1 ! l1 = mask for level 1: #ifdef DEBUG ! ASSERT(%l1 != 0) (we didn't shift the bit off the right edge) brnz,pt %l1, 9f nop sethi %hi(current_thread_nested_PIL_not_found), %o0 call panic or %o0, %lo(current_thread_nested_PIL_not_found), %o0 9: #endif /* DEBUG */ andcc %l1, %l2, %g0 ! test mask against high-level bits of bnz %xcc, 2f ! cpu_intr_actv nop srl %l1, 1, %l1 ! No match. Try next lower PIL. ba,pt %xcc, 1b sub %o5, 1, %o5 ! delay - decrement PIL 2: sll %o5, 3, %o5 ! convert array index to byte offset add %o5, CPU_MCPU, %o5 ! CPU_PIL_HIGH_START is too large add %o5, MCPU_PIL_HIGH_START, %o5 rdpr %tick, %o4 sllx %o4, 1, %o4 srlx %o4, 1, %o4 ! Another high-level interrupt is active below this one, so ! there is no need to check for an interrupt thread. That will be ! done by the lowest priority high-level interrupt active. ba,pt %xcc, 1f stx %o4, [%o3 + %o5] ! delay - store timestamp 3: ! If we haven't interrupted another high-level interrupt, we may have ! interrupted a low level interrupt thread. If so, store a starting ! timestamp in its thread structure. lduh [THREAD_REG + T_FLAGS], %o4 andcc %o4, T_INTR_THREAD, %g0 bz,pt %xcc, 1f nop rdpr %tick, %o4 sllx %o4, 1, %o4 srlx %o4, 1, %o4 ! Shake off NPT bit stx %o4, [THREAD_REG + T_INTR_START] 1: ! Enable interrupts and return jmp %l0 + 8 wrpr %g0, %o2, %pil ! enable interrupts SET_SIZE(current_thread) #ifdef DEBUG current_thread_wrong_pil: .asciz "current_thread: unexpected pil level: %d" current_thread_actv_bit_set: .asciz "current_thread(): cpu_intr_actv bit already set for PIL" current_thread_actv_bit_not_set: .asciz "current_thread(): cpu_intr_actv bit not set for PIL" current_thread_nested_pil_zero: .asciz "current_thread(): timestamp zero for nested PIL %d" current_thread_timestamp_zero: .asciz "current_thread(): timestamp zero upon handler return" current_thread_nested_PIL_not_found: .asciz "current_thread: couldn't find nested high-level PIL" #endif /* DEBUG */ #endif /* lint */ /* * Return a thread's interrupt level. * Since this isn't saved anywhere but in %l4 on interrupt entry, we * must dig it out of the save area. * * Caller 'swears' that this really is an interrupt thread. * * int * intr_level(t) * kthread_id_t t; */ #if defined(lint) /* ARGSUSED */ int intr_level(kthread_id_t t) { return (0); } #else /* lint */ ENTRY_NP(intr_level) retl ldub [%o0 + T_PIL], %o0 ! return saved pil SET_SIZE(intr_level) #endif /* lint */ #if defined(lint) /* ARGSUSED */ int disable_pil_intr() { return (0); } #else /* lint */ ENTRY_NP(disable_pil_intr) rdpr %pil, %o0 retl wrpr %g0, PIL_MAX, %pil ! disable interrupts (1-15) SET_SIZE(disable_pil_intr) #endif /* lint */ #if defined(lint) /* ARGSUSED */ void enable_pil_intr(int pil_save) {} #else /* lint */ ENTRY_NP(enable_pil_intr) retl wrpr %o0, %pil SET_SIZE(enable_pil_intr) #endif /* lint */ #if defined(lint) /* ARGSUSED */ uint_t disable_vec_intr(void) { return (0); } #else /* lint */ ENTRY_NP(disable_vec_intr) rdpr %pstate, %o0 andn %o0, PSTATE_IE, %g1 retl wrpr %g0, %g1, %pstate ! disable interrupt SET_SIZE(disable_vec_intr) #endif /* lint */ #if defined(lint) /* ARGSUSED */ void enable_vec_intr(uint_t pstate_save) {} #else /* lint */ ENTRY_NP(enable_vec_intr) retl wrpr %g0, %o0, %pstate SET_SIZE(enable_vec_intr) #endif /* lint */ #if defined(lint) void cbe_level14(void) {} #else /* lint */ ENTRY_NP(cbe_level14) save %sp, -SA(MINFRAME), %sp ! get a new window ! ! Make sure that this is from TICK_COMPARE; if not just return ! rd SOFTINT, %l1 set (TICK_INT_MASK | STICK_INT_MASK), %o2 andcc %l1, %o2, %g0 bz,pn %icc, 2f nop CPU_ADDR(%o1, %o2) call cyclic_fire mov %o1, %o0 2: ret restore %g0, 1, %o0 SET_SIZE(cbe_level14) #endif /* lint */ #if defined(lint) /* ARGSUSED */ void setsoftint(uint_t inum) {} #else /* lint */ ENTRY_NP(setsoftint) save %sp, -SA(MINFRAME), %sp ! get a new window rdpr %pstate, %l5 andn %l5, PSTATE_IE, %l1 wrpr %l1, %pstate ! disable interrupt ! ! Fetch data from intr_vector[] table according to the inum. ! ! We have an interrupt number. ! Put the request on the cpu's softint list, ! and set %set_softint. ! ! Register usage ! %i0 - inumber ! %l2 - requested pil ! %l3 - intr_req ! %l4 - *cpu ! %l1, %l6 - temps ! ! check if a softint is pending for this inum already ! if one is pending, don't bother queuing another ! set intr_vector, %l1 sll %i0, INTR_VECTOR_SHIFT, %l6 add %l1, %l6, %l1 ! %l1 = &intr_vector[inum] lduh [%l1 + IV_PENDING], %l6 brnz,pn %l6, 4f ! branch, if pending or %g0, 1, %l2 sth %l2, [%l1 + IV_PENDING] ! intr_vector[inum].pend = 1 ! ! allocate an intr_req from the free list ! CPU_ADDR(%l4, %l2) ldn [%l4 + INTR_HEAD], %l3 lduh [%l1 + IV_PIL], %l2 ! ! fixup free list ! ldn [%l3 + INTR_NEXT], %l6 stn %l6, [%l4 + INTR_HEAD] ! ! fill up intr_req ! st %i0, [%l3 + INTR_NUMBER] stn %g0, [%l3 + INTR_NEXT] ! ! move intr_req to appropriate list ! sll %l2, CPTRSHIFT, %l0 add %l4, INTR_TAIL, %l6 ldn [%l6 + %l0], %l1 ! current tail brz,pt %l1, 2f ! branch if list empty stn %l3, [%l6 + %l0] ! make intr_req new tail ! ! there's pending intr_req already ! ba,pt %xcc, 3f stn %l3, [%l1 + INTR_NEXT] ! update old tail 2: ! ! no pending intr_req; make intr_req new head ! add %l4, INTR_HEAD, %l6 stn %l3, [%l6 + %l0] 3: ! ! Write %set_softint with (1< 0) ! lduh [THREAD_REG + T_FLAGS], %o2 andcc %o2, T_INTR_THREAD, %g0 bz,pn %xcc, 1f ldub [THREAD_REG + T_PIL], %o1 ! delay brnz,pt %o1, 0f 1: sethi %hi(intr_get_time_not_intr), %o0 call panic or %o0, %lo(intr_get_time_not_intr), %o0 0: ! ! ASSERT(t_intr_start != 0) ! ldx [THREAD_REG + T_INTR_START], %o1 brnz,pt %o1, 2f sethi %hi(intr_get_time_no_start_time), %o0 ! delay call panic or %o0, %lo(intr_get_time_no_start_time), %o0 2: #endif /* DEBUG */ ! ! %o0 = elapsed time and return value ! %o1 = pil ! %o2 = scratch ! %o3 = scratch ! %o4 = scratch ! %o5 = cpu ! wrpr %g0, PIL_MAX, %pil ! make this easy -- block normal intrs ldn [THREAD_REG + T_CPU], %o5 ldub [THREAD_REG + T_PIL], %o1 ldx [THREAD_REG + T_INTR_START], %o3 ! %o3 = t_intr_start ! ! Calculate elapsed time since t_intr_start. Update t_intr_start, ! get delta, and multiply by cpu_divisor if necessary. ! rdpr %tick, %o2 sllx %o2, 1, %o2 srlx %o2, 1, %o2 stx %o2, [THREAD_REG + T_INTR_START] sub %o2, %o3, %o0 lduh [%o5 + CPU_DIVISOR], %o4 cmp %o4, 1 bg,a,pn %xcc, 1f mulx %o0, %o4, %o0 ! multiply interval by clock divisor iff > 1 1: ! Update intracct[] lduh [%o5 + CPU_MSTATE], %o4 sllx %o4, 3, %o4 add %o4, CPU_INTRACCT, %o4 ldx [%o5 + %o4], %o2 add %o2, %o0, %o2 stx %o2, [%o5 + %o4] ! ! Increment cpu_m.intrstat[pil][0]. Calculate elapsed time since ! cpu_m.intrstat[pil][1], which is either when the interrupt was ! first entered, or the last time intr_get_time() was invoked. Then ! update cpu_m.intrstat[pil][1] to match [0]. ! sllx %o1, 4, %o3 add %o3, CPU_MCPU, %o3 add %o3, MCPU_INTRSTAT, %o3 add %o3, %o5, %o3 ! %o3 = cpu_m.intrstat[pil][0] ldx [%o3], %o2 add %o2, %o0, %o2 ! %o2 = new value for intrstat stx %o2, [%o3] ldx [%o3 + 8], %o4 ! %o4 = cpu_m.intrstat[pil][1] sub %o2, %o4, %o0 ! %o0 is elapsed time since %o4 stx %o2, [%o3 + 8] ! make [1] match [0], resetting time ld [%o5 + CPU_BASE_SPL], %o2 ! restore %pil to the greater cmp %o2, %o1 ! of either our pil %o1 or movl %xcc, %o1, %o2 ! cpu_base_spl. retl wrpr %g0, %o2, %pil SET_SIZE(intr_get_time) #ifdef DEBUG intr_get_time_high_pil: .asciz "intr_get_time(): %pil > LOCK_LEVEL" intr_get_time_not_intr: .asciz "intr_get_time(): not called from an interrupt thread" intr_get_time_no_start_time: .asciz "intr_get_time(): t_intr_start == 0" #endif /* DEBUG */ #endif /* lint */ #if !defined(lint) /* * Check shift value used for computing array offsets */ #if INTR_VECTOR_SIZE != (1 << INTR_VECTOR_SHIFT) #error "INTR_VECTOR_SIZE has changed" #endif #endif /* lint */