1*7c478bd9Sstevel@tonic-gate /* 2*7c478bd9Sstevel@tonic-gate * CDDL HEADER START 3*7c478bd9Sstevel@tonic-gate * 4*7c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*7c478bd9Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*7c478bd9Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*7c478bd9Sstevel@tonic-gate * with the License. 8*7c478bd9Sstevel@tonic-gate * 9*7c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*7c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*7c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 12*7c478bd9Sstevel@tonic-gate * and limitations under the License. 13*7c478bd9Sstevel@tonic-gate * 14*7c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*7c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*7c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*7c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*7c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*7c478bd9Sstevel@tonic-gate * 20*7c478bd9Sstevel@tonic-gate * CDDL HEADER END 21*7c478bd9Sstevel@tonic-gate */ 22*7c478bd9Sstevel@tonic-gate /* 23*7c478bd9Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*7c478bd9Sstevel@tonic-gate * Use is subject to license terms. 25*7c478bd9Sstevel@tonic-gate */ 26*7c478bd9Sstevel@tonic-gate 27*7c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 28*7c478bd9Sstevel@tonic-gate 29*7c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 30*7c478bd9Sstevel@tonic-gate #include <sys/regset.h> 31*7c478bd9Sstevel@tonic-gate #include <sys/psw.h> 32*7c478bd9Sstevel@tonic-gate #include <sys/types.h> 33*7c478bd9Sstevel@tonic-gate #include <sys/thread.h> 34*7c478bd9Sstevel@tonic-gate #include <sys/systm.h> 35*7c478bd9Sstevel@tonic-gate #include <sys/segments.h> 36*7c478bd9Sstevel@tonic-gate #include <sys/pcb.h> 37*7c478bd9Sstevel@tonic-gate #include <sys/trap.h> 38*7c478bd9Sstevel@tonic-gate #include <sys/ftrace.h> 39*7c478bd9Sstevel@tonic-gate #include <sys/traptrace.h> 40*7c478bd9Sstevel@tonic-gate #include <sys/clock.h> 41*7c478bd9Sstevel@tonic-gate #include <sys/panic.h> 42*7c478bd9Sstevel@tonic-gate #include <sys/disp.h> 43*7c478bd9Sstevel@tonic-gate #include <vm/seg_kp.h> 44*7c478bd9Sstevel@tonic-gate #include <sys/stack.h> 45*7c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 46*7c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 47*7c478bd9Sstevel@tonic-gate #include <sys/kstat.h> 48*7c478bd9Sstevel@tonic-gate #include <sys/smp_impldefs.h> 49*7c478bd9Sstevel@tonic-gate #include <sys/pool_pset.h> 50*7c478bd9Sstevel@tonic-gate #include <sys/zone.h> 51*7c478bd9Sstevel@tonic-gate #include <sys/bitmap.h> 52*7c478bd9Sstevel@tonic-gate 53*7c478bd9Sstevel@tonic-gate #if defined(__amd64) 54*7c478bd9Sstevel@tonic-gate 55*7c478bd9Sstevel@tonic-gate #if defined(__lint) 56*7c478bd9Sstevel@tonic-gate /* 57*7c478bd9Sstevel@tonic-gate * atomic_btr32() is a gcc __inline__ function, defined in <asm/bitmap.h> 58*7c478bd9Sstevel@tonic-gate * For lint purposes, define it here. 59*7c478bd9Sstevel@tonic-gate */ 60*7c478bd9Sstevel@tonic-gate uint_t 61*7c478bd9Sstevel@tonic-gate atomic_btr32(uint32_t *pending, uint_t pil) 62*7c478bd9Sstevel@tonic-gate { 63*7c478bd9Sstevel@tonic-gate return (*pending &= ~(1 << pil)); 64*7c478bd9Sstevel@tonic-gate } 65*7c478bd9Sstevel@tonic-gate #else 66*7c478bd9Sstevel@tonic-gate 67*7c478bd9Sstevel@tonic-gate extern uint_t atomic_btr32(uint32_t *pending, uint_t pil); 68*7c478bd9Sstevel@tonic-gate 69*7c478bd9Sstevel@tonic-gate #endif 70*7c478bd9Sstevel@tonic-gate 71*7c478bd9Sstevel@tonic-gate /* 72*7c478bd9Sstevel@tonic-gate * This code is amd64-only for now, but as time permits, we should 73*7c478bd9Sstevel@tonic-gate * use this on i386 too. 74*7c478bd9Sstevel@tonic-gate */ 75*7c478bd9Sstevel@tonic-gate 76*7c478bd9Sstevel@tonic-gate /* 77*7c478bd9Sstevel@tonic-gate * Some questions to ponder: 78*7c478bd9Sstevel@tonic-gate * - in several of these routines, we make multiple calls to tsc_read() 79*7c478bd9Sstevel@tonic-gate * without invoking functions .. couldn't we just reuse the same 80*7c478bd9Sstevel@tonic-gate * timestamp sometimes? 81*7c478bd9Sstevel@tonic-gate * - if we have the inline, we can probably make set_base_spl be a 82*7c478bd9Sstevel@tonic-gate * C routine too. 83*7c478bd9Sstevel@tonic-gate */ 84*7c478bd9Sstevel@tonic-gate 85*7c478bd9Sstevel@tonic-gate static uint_t 86*7c478bd9Sstevel@tonic-gate bsrw_insn(uint16_t mask) 87*7c478bd9Sstevel@tonic-gate { 88*7c478bd9Sstevel@tonic-gate uint_t index = sizeof (mask) * NBBY - 1; 89*7c478bd9Sstevel@tonic-gate 90*7c478bd9Sstevel@tonic-gate ASSERT(mask != 0); 91*7c478bd9Sstevel@tonic-gate 92*7c478bd9Sstevel@tonic-gate while ((mask & (1 << index)) == 0) 93*7c478bd9Sstevel@tonic-gate index--; 94*7c478bd9Sstevel@tonic-gate return (index); 95*7c478bd9Sstevel@tonic-gate } 96*7c478bd9Sstevel@tonic-gate 97*7c478bd9Sstevel@tonic-gate /* 98*7c478bd9Sstevel@tonic-gate * Do all the work necessary to set up the cpu and thread structures 99*7c478bd9Sstevel@tonic-gate * to dispatch a high-level interrupt. 100*7c478bd9Sstevel@tonic-gate * 101*7c478bd9Sstevel@tonic-gate * Returns 0 if we're -not- already on the high-level interrupt stack, 102*7c478bd9Sstevel@tonic-gate * (and *must* switch to it), non-zero if we are already on that stack. 103*7c478bd9Sstevel@tonic-gate * 104*7c478bd9Sstevel@tonic-gate * Called with interrupts masked. 105*7c478bd9Sstevel@tonic-gate * The 'pil' is already set to the appropriate level for rp->r_trapno. 106*7c478bd9Sstevel@tonic-gate */ 107*7c478bd9Sstevel@tonic-gate int 108*7c478bd9Sstevel@tonic-gate hilevel_intr_prolog(struct cpu *cpu, uint_t pil, uint_t oldpil, struct regs *rp) 109*7c478bd9Sstevel@tonic-gate { 110*7c478bd9Sstevel@tonic-gate struct machcpu *mcpu = &cpu->cpu_m; 111*7c478bd9Sstevel@tonic-gate uint_t mask; 112*7c478bd9Sstevel@tonic-gate 113*7c478bd9Sstevel@tonic-gate ASSERT(pil > LOCK_LEVEL); 114*7c478bd9Sstevel@tonic-gate 115*7c478bd9Sstevel@tonic-gate if (pil == CBE_HIGH_PIL) { 116*7c478bd9Sstevel@tonic-gate cpu->cpu_profile_pil = oldpil; 117*7c478bd9Sstevel@tonic-gate if (USERMODE(rp->r_cs)) { 118*7c478bd9Sstevel@tonic-gate cpu->cpu_profile_pc = 0; 119*7c478bd9Sstevel@tonic-gate cpu->cpu_profile_upc = rp->r_pc; 120*7c478bd9Sstevel@tonic-gate } else { 121*7c478bd9Sstevel@tonic-gate cpu->cpu_profile_pc = rp->r_pc; 122*7c478bd9Sstevel@tonic-gate cpu->cpu_profile_upc = 0; 123*7c478bd9Sstevel@tonic-gate } 124*7c478bd9Sstevel@tonic-gate } 125*7c478bd9Sstevel@tonic-gate 126*7c478bd9Sstevel@tonic-gate mask = cpu->cpu_intr_actv & CPU_INTR_ACTV_HIGH_LEVEL_MASK; 127*7c478bd9Sstevel@tonic-gate if (mask != 0) { 128*7c478bd9Sstevel@tonic-gate int nestpil; 129*7c478bd9Sstevel@tonic-gate 130*7c478bd9Sstevel@tonic-gate /* 131*7c478bd9Sstevel@tonic-gate * We have interrupted another high-level interrupt. 132*7c478bd9Sstevel@tonic-gate * Load starting timestamp, compute interval, update 133*7c478bd9Sstevel@tonic-gate * cumulative counter. 134*7c478bd9Sstevel@tonic-gate */ 135*7c478bd9Sstevel@tonic-gate nestpil = bsrw_insn((uint16_t)mask); 136*7c478bd9Sstevel@tonic-gate ASSERT(nestpil < pil); 137*7c478bd9Sstevel@tonic-gate mcpu->intrstat[nestpil] += tsc_read() - 138*7c478bd9Sstevel@tonic-gate mcpu->pil_high_start[nestpil - (LOCK_LEVEL + 1)]; 139*7c478bd9Sstevel@tonic-gate /* 140*7c478bd9Sstevel@tonic-gate * Another high-level interrupt is active below this one, so 141*7c478bd9Sstevel@tonic-gate * there is no need to check for an interrupt thread. That 142*7c478bd9Sstevel@tonic-gate * will be done by the lowest priority high-level interrupt 143*7c478bd9Sstevel@tonic-gate * active. 144*7c478bd9Sstevel@tonic-gate */ 145*7c478bd9Sstevel@tonic-gate } else { 146*7c478bd9Sstevel@tonic-gate kthread_t *t = cpu->cpu_thread; 147*7c478bd9Sstevel@tonic-gate 148*7c478bd9Sstevel@tonic-gate /* 149*7c478bd9Sstevel@tonic-gate * See if we are interrupting a low-level interrupt thread. 150*7c478bd9Sstevel@tonic-gate * If so, account for its time slice only if its time stamp 151*7c478bd9Sstevel@tonic-gate * is non-zero. 152*7c478bd9Sstevel@tonic-gate */ 153*7c478bd9Sstevel@tonic-gate if ((t->t_flag & T_INTR_THREAD) != 0 && t->t_intr_start != 0) { 154*7c478bd9Sstevel@tonic-gate mcpu->intrstat[t->t_pil] += 155*7c478bd9Sstevel@tonic-gate tsc_read() - t->t_intr_start; 156*7c478bd9Sstevel@tonic-gate t->t_intr_start = 0; 157*7c478bd9Sstevel@tonic-gate } 158*7c478bd9Sstevel@tonic-gate } 159*7c478bd9Sstevel@tonic-gate 160*7c478bd9Sstevel@tonic-gate /* 161*7c478bd9Sstevel@tonic-gate * Store starting timestamp in CPU structure for this PIL. 162*7c478bd9Sstevel@tonic-gate */ 163*7c478bd9Sstevel@tonic-gate mcpu->pil_high_start[pil - (LOCK_LEVEL + 1)] = tsc_read(); 164*7c478bd9Sstevel@tonic-gate 165*7c478bd9Sstevel@tonic-gate ASSERT((cpu->cpu_intr_actv & (1 << pil)) == 0); 166*7c478bd9Sstevel@tonic-gate 167*7c478bd9Sstevel@tonic-gate if (pil == 15) { 168*7c478bd9Sstevel@tonic-gate /* 169*7c478bd9Sstevel@tonic-gate * To support reentrant level 15 interrupts, we maintain a 170*7c478bd9Sstevel@tonic-gate * recursion count in the top half of cpu_intr_actv. Only 171*7c478bd9Sstevel@tonic-gate * when this count hits zero do we clear the PIL 15 bit from 172*7c478bd9Sstevel@tonic-gate * the lower half of cpu_intr_actv. 173*7c478bd9Sstevel@tonic-gate */ 174*7c478bd9Sstevel@tonic-gate uint16_t *refcntp = (uint16_t *)&cpu->cpu_intr_actv + 1; 175*7c478bd9Sstevel@tonic-gate (*refcntp)++; 176*7c478bd9Sstevel@tonic-gate } 177*7c478bd9Sstevel@tonic-gate 178*7c478bd9Sstevel@tonic-gate mask = cpu->cpu_intr_actv; 179*7c478bd9Sstevel@tonic-gate 180*7c478bd9Sstevel@tonic-gate cpu->cpu_intr_actv |= (1 << pil); 181*7c478bd9Sstevel@tonic-gate 182*7c478bd9Sstevel@tonic-gate return (mask & CPU_INTR_ACTV_HIGH_LEVEL_MASK); 183*7c478bd9Sstevel@tonic-gate } 184*7c478bd9Sstevel@tonic-gate 185*7c478bd9Sstevel@tonic-gate /* 186*7c478bd9Sstevel@tonic-gate * Does most of the work of returning from a high level interrupt. 187*7c478bd9Sstevel@tonic-gate * 188*7c478bd9Sstevel@tonic-gate * Returns 0 if there are no more high level interrupts (in which 189*7c478bd9Sstevel@tonic-gate * case we must switch back to the interrupted thread stack) or 190*7c478bd9Sstevel@tonic-gate * non-zero if there are more (in which case we should stay on it). 191*7c478bd9Sstevel@tonic-gate * 192*7c478bd9Sstevel@tonic-gate * Called with interrupts masked 193*7c478bd9Sstevel@tonic-gate */ 194*7c478bd9Sstevel@tonic-gate int 195*7c478bd9Sstevel@tonic-gate hilevel_intr_epilog(struct cpu *cpu, uint_t pil, uint_t oldpil, uint_t vecnum) 196*7c478bd9Sstevel@tonic-gate { 197*7c478bd9Sstevel@tonic-gate struct machcpu *mcpu = &cpu->cpu_m; 198*7c478bd9Sstevel@tonic-gate uint_t mask; 199*7c478bd9Sstevel@tonic-gate 200*7c478bd9Sstevel@tonic-gate ASSERT(mcpu->mcpu_pri == pil); 201*7c478bd9Sstevel@tonic-gate 202*7c478bd9Sstevel@tonic-gate cpu->cpu_stats.sys.intr[pil - 1]++; 203*7c478bd9Sstevel@tonic-gate 204*7c478bd9Sstevel@tonic-gate ASSERT(cpu->cpu_intr_actv & (1 << pil)); 205*7c478bd9Sstevel@tonic-gate 206*7c478bd9Sstevel@tonic-gate if (pil == 15) { 207*7c478bd9Sstevel@tonic-gate /* 208*7c478bd9Sstevel@tonic-gate * To support reentrant level 15 interrupts, we maintain a 209*7c478bd9Sstevel@tonic-gate * recursion count in the top half of cpu_intr_actv. Only 210*7c478bd9Sstevel@tonic-gate * when this count hits zero do we clear the PIL 15 bit from 211*7c478bd9Sstevel@tonic-gate * the lower half of cpu_intr_actv. 212*7c478bd9Sstevel@tonic-gate */ 213*7c478bd9Sstevel@tonic-gate uint16_t *refcntp = (uint16_t *)&cpu->cpu_intr_actv + 1; 214*7c478bd9Sstevel@tonic-gate 215*7c478bd9Sstevel@tonic-gate ASSERT(*refcntp > 0); 216*7c478bd9Sstevel@tonic-gate 217*7c478bd9Sstevel@tonic-gate if (--(*refcntp) == 0) 218*7c478bd9Sstevel@tonic-gate cpu->cpu_intr_actv &= ~(1 << pil); 219*7c478bd9Sstevel@tonic-gate } else { 220*7c478bd9Sstevel@tonic-gate cpu->cpu_intr_actv &= ~(1 << pil); 221*7c478bd9Sstevel@tonic-gate } 222*7c478bd9Sstevel@tonic-gate 223*7c478bd9Sstevel@tonic-gate ASSERT(mcpu->pil_high_start[pil - (LOCK_LEVEL + 1)] != 0); 224*7c478bd9Sstevel@tonic-gate 225*7c478bd9Sstevel@tonic-gate mcpu->intrstat[pil] += 226*7c478bd9Sstevel@tonic-gate tsc_read() - mcpu->pil_high_start[pil - (LOCK_LEVEL + 1)]; 227*7c478bd9Sstevel@tonic-gate 228*7c478bd9Sstevel@tonic-gate /* 229*7c478bd9Sstevel@tonic-gate * Check for lower-pil nested high-level interrupt beneath 230*7c478bd9Sstevel@tonic-gate * current one. If so, place a starting timestamp in its 231*7c478bd9Sstevel@tonic-gate * pil_high_start entry. 232*7c478bd9Sstevel@tonic-gate */ 233*7c478bd9Sstevel@tonic-gate mask = cpu->cpu_intr_actv & CPU_INTR_ACTV_HIGH_LEVEL_MASK; 234*7c478bd9Sstevel@tonic-gate if (mask != 0) { 235*7c478bd9Sstevel@tonic-gate int nestpil; 236*7c478bd9Sstevel@tonic-gate 237*7c478bd9Sstevel@tonic-gate /* 238*7c478bd9Sstevel@tonic-gate * find PIL of nested interrupt 239*7c478bd9Sstevel@tonic-gate */ 240*7c478bd9Sstevel@tonic-gate nestpil = bsrw_insn((uint16_t)mask); 241*7c478bd9Sstevel@tonic-gate ASSERT(nestpil < pil); 242*7c478bd9Sstevel@tonic-gate mcpu->pil_high_start[nestpil - (LOCK_LEVEL + 1)] = tsc_read(); 243*7c478bd9Sstevel@tonic-gate /* 244*7c478bd9Sstevel@tonic-gate * (Another high-level interrupt is active below this one, 245*7c478bd9Sstevel@tonic-gate * so there is no need to check for an interrupt 246*7c478bd9Sstevel@tonic-gate * thread. That will be done by the lowest priority 247*7c478bd9Sstevel@tonic-gate * high-level interrupt active.) 248*7c478bd9Sstevel@tonic-gate */ 249*7c478bd9Sstevel@tonic-gate } else { 250*7c478bd9Sstevel@tonic-gate /* 251*7c478bd9Sstevel@tonic-gate * Check to see if there is a low-level interrupt active. 252*7c478bd9Sstevel@tonic-gate * If so, place a starting timestamp in the thread 253*7c478bd9Sstevel@tonic-gate * structure. 254*7c478bd9Sstevel@tonic-gate */ 255*7c478bd9Sstevel@tonic-gate kthread_t *t = cpu->cpu_thread; 256*7c478bd9Sstevel@tonic-gate 257*7c478bd9Sstevel@tonic-gate if (t->t_flag & T_INTR_THREAD) 258*7c478bd9Sstevel@tonic-gate t->t_intr_start = tsc_read(); 259*7c478bd9Sstevel@tonic-gate } 260*7c478bd9Sstevel@tonic-gate 261*7c478bd9Sstevel@tonic-gate mcpu->mcpu_pri = oldpil; 262*7c478bd9Sstevel@tonic-gate (void) (*setlvlx)(oldpil, vecnum); 263*7c478bd9Sstevel@tonic-gate 264*7c478bd9Sstevel@tonic-gate return (cpu->cpu_intr_actv & CPU_INTR_ACTV_HIGH_LEVEL_MASK); 265*7c478bd9Sstevel@tonic-gate } 266*7c478bd9Sstevel@tonic-gate 267*7c478bd9Sstevel@tonic-gate /* 268*7c478bd9Sstevel@tonic-gate * Set up the cpu, thread and interrupt thread structures for 269*7c478bd9Sstevel@tonic-gate * executing an interrupt thread. The new stack pointer of the 270*7c478bd9Sstevel@tonic-gate * interrupt thread (which *must* be switched to) is returned. 271*7c478bd9Sstevel@tonic-gate */ 272*7c478bd9Sstevel@tonic-gate caddr_t 273*7c478bd9Sstevel@tonic-gate intr_thread_prolog(struct cpu *cpu, caddr_t stackptr, uint_t pil) 274*7c478bd9Sstevel@tonic-gate { 275*7c478bd9Sstevel@tonic-gate struct machcpu *mcpu = &cpu->cpu_m; 276*7c478bd9Sstevel@tonic-gate kthread_t *t, *volatile it; 277*7c478bd9Sstevel@tonic-gate 278*7c478bd9Sstevel@tonic-gate ASSERT(pil > 0); 279*7c478bd9Sstevel@tonic-gate ASSERT((cpu->cpu_intr_actv & (1 << pil)) == 0); 280*7c478bd9Sstevel@tonic-gate cpu->cpu_intr_actv |= (1 << pil); 281*7c478bd9Sstevel@tonic-gate 282*7c478bd9Sstevel@tonic-gate /* 283*7c478bd9Sstevel@tonic-gate * Get set to run an interrupt thread. 284*7c478bd9Sstevel@tonic-gate * There should always be an interrupt thread, since we 285*7c478bd9Sstevel@tonic-gate * allocate one for each level on each CPU. 286*7c478bd9Sstevel@tonic-gate * 287*7c478bd9Sstevel@tonic-gate * Note that the code in kcpc_overflow_intr -relies- on the 288*7c478bd9Sstevel@tonic-gate * ordering of events here - in particular that t->t_lwp of 289*7c478bd9Sstevel@tonic-gate * the interrupt thread is set to the pinned thread *before* 290*7c478bd9Sstevel@tonic-gate * curthread is changed. 291*7c478bd9Sstevel@tonic-gate */ 292*7c478bd9Sstevel@tonic-gate t = cpu->cpu_thread; 293*7c478bd9Sstevel@tonic-gate if (t->t_flag & T_INTR_THREAD) { 294*7c478bd9Sstevel@tonic-gate mcpu->intrstat[t->t_pil] += t->t_intr_start - tsc_read(); 295*7c478bd9Sstevel@tonic-gate t->t_intr_start = 0; 296*7c478bd9Sstevel@tonic-gate } 297*7c478bd9Sstevel@tonic-gate 298*7c478bd9Sstevel@tonic-gate ASSERT(SA((uintptr_t)stackptr) == (uintptr_t)stackptr); 299*7c478bd9Sstevel@tonic-gate 300*7c478bd9Sstevel@tonic-gate t->t_sp = (uintptr_t)stackptr; /* mark stack in curthread for resume */ 301*7c478bd9Sstevel@tonic-gate 302*7c478bd9Sstevel@tonic-gate /* 303*7c478bd9Sstevel@tonic-gate * unlink the interrupt thread off the cpu 304*7c478bd9Sstevel@tonic-gate */ 305*7c478bd9Sstevel@tonic-gate it = cpu->cpu_intr_thread; 306*7c478bd9Sstevel@tonic-gate cpu->cpu_intr_thread = it->t_link; 307*7c478bd9Sstevel@tonic-gate it->t_intr = t; 308*7c478bd9Sstevel@tonic-gate it->t_lwp = t->t_lwp; 309*7c478bd9Sstevel@tonic-gate 310*7c478bd9Sstevel@tonic-gate /* 311*7c478bd9Sstevel@tonic-gate * (threads on the interrupt thread free list could have state 312*7c478bd9Sstevel@tonic-gate * preset to TS_ONPROC, but it helps in debugging if 313*7c478bd9Sstevel@tonic-gate * they're TS_FREE.) 314*7c478bd9Sstevel@tonic-gate */ 315*7c478bd9Sstevel@tonic-gate it->t_state = TS_ONPROC; 316*7c478bd9Sstevel@tonic-gate 317*7c478bd9Sstevel@tonic-gate cpu->cpu_thread = it; /* new curthread on this cpu */ 318*7c478bd9Sstevel@tonic-gate it->t_pil = (uchar_t)pil; 319*7c478bd9Sstevel@tonic-gate it->t_pri = intr_pri + (pri_t)pil; 320*7c478bd9Sstevel@tonic-gate it->t_intr_start = tsc_read(); 321*7c478bd9Sstevel@tonic-gate 322*7c478bd9Sstevel@tonic-gate return (it->t_stk); 323*7c478bd9Sstevel@tonic-gate } 324*7c478bd9Sstevel@tonic-gate 325*7c478bd9Sstevel@tonic-gate 326*7c478bd9Sstevel@tonic-gate #ifdef DEBUG 327*7c478bd9Sstevel@tonic-gate int intr_thread_cnt; 328*7c478bd9Sstevel@tonic-gate #endif 329*7c478bd9Sstevel@tonic-gate 330*7c478bd9Sstevel@tonic-gate /* 331*7c478bd9Sstevel@tonic-gate * Called with interrupts disabled 332*7c478bd9Sstevel@tonic-gate */ 333*7c478bd9Sstevel@tonic-gate void 334*7c478bd9Sstevel@tonic-gate intr_thread_epilog(struct cpu *cpu, uint_t vec, uint_t oldpil) 335*7c478bd9Sstevel@tonic-gate { 336*7c478bd9Sstevel@tonic-gate struct machcpu *mcpu = &cpu->cpu_m; 337*7c478bd9Sstevel@tonic-gate kthread_t *t; 338*7c478bd9Sstevel@tonic-gate kthread_t *it = cpu->cpu_thread; /* curthread */ 339*7c478bd9Sstevel@tonic-gate uint_t pil, basespl; 340*7c478bd9Sstevel@tonic-gate 341*7c478bd9Sstevel@tonic-gate pil = it->t_pil; 342*7c478bd9Sstevel@tonic-gate cpu->cpu_stats.sys.intr[pil - 1]++; 343*7c478bd9Sstevel@tonic-gate 344*7c478bd9Sstevel@tonic-gate ASSERT(it->t_intr_start != 0); 345*7c478bd9Sstevel@tonic-gate mcpu->intrstat[pil] += tsc_read() - it->t_intr_start; 346*7c478bd9Sstevel@tonic-gate 347*7c478bd9Sstevel@tonic-gate ASSERT(cpu->cpu_intr_actv & (1 << pil)); 348*7c478bd9Sstevel@tonic-gate cpu->cpu_intr_actv &= ~(1 << pil); 349*7c478bd9Sstevel@tonic-gate 350*7c478bd9Sstevel@tonic-gate /* 351*7c478bd9Sstevel@tonic-gate * If there is still an interrupted thread underneath this one 352*7c478bd9Sstevel@tonic-gate * then the interrupt was never blocked and the return is 353*7c478bd9Sstevel@tonic-gate * fairly simple. Otherwise it isn't. 354*7c478bd9Sstevel@tonic-gate */ 355*7c478bd9Sstevel@tonic-gate if ((t = it->t_intr) == NULL) { 356*7c478bd9Sstevel@tonic-gate /* 357*7c478bd9Sstevel@tonic-gate * The interrupted thread is no longer pinned underneath 358*7c478bd9Sstevel@tonic-gate * the interrupt thread. This means the interrupt must 359*7c478bd9Sstevel@tonic-gate * have blocked, and the interrupted thread has been 360*7c478bd9Sstevel@tonic-gate * unpinned, and has probably been running around the 361*7c478bd9Sstevel@tonic-gate * system for a while. 362*7c478bd9Sstevel@tonic-gate * 363*7c478bd9Sstevel@tonic-gate * Since there is no longer a thread under this one, put 364*7c478bd9Sstevel@tonic-gate * this interrupt thread back on the CPU's free list and 365*7c478bd9Sstevel@tonic-gate * resume the idle thread which will dispatch the next 366*7c478bd9Sstevel@tonic-gate * thread to run. 367*7c478bd9Sstevel@tonic-gate */ 368*7c478bd9Sstevel@tonic-gate #ifdef DEBUG 369*7c478bd9Sstevel@tonic-gate intr_thread_cnt++; 370*7c478bd9Sstevel@tonic-gate #endif 371*7c478bd9Sstevel@tonic-gate cpu->cpu_stats.sys.intrblk++; 372*7c478bd9Sstevel@tonic-gate /* 373*7c478bd9Sstevel@tonic-gate * Set CPU's base SPL based on active interrupts bitmask 374*7c478bd9Sstevel@tonic-gate */ 375*7c478bd9Sstevel@tonic-gate set_base_spl(); 376*7c478bd9Sstevel@tonic-gate basespl = cpu->cpu_base_spl; 377*7c478bd9Sstevel@tonic-gate mcpu->mcpu_pri = basespl; 378*7c478bd9Sstevel@tonic-gate (*setlvlx)(basespl, vec); 379*7c478bd9Sstevel@tonic-gate (void) splhigh(); 380*7c478bd9Sstevel@tonic-gate it->t_state = TS_FREE; 381*7c478bd9Sstevel@tonic-gate /* 382*7c478bd9Sstevel@tonic-gate * Return interrupt thread to pool 383*7c478bd9Sstevel@tonic-gate */ 384*7c478bd9Sstevel@tonic-gate it->t_link = cpu->cpu_intr_thread; 385*7c478bd9Sstevel@tonic-gate cpu->cpu_intr_thread = it; 386*7c478bd9Sstevel@tonic-gate swtch(); 387*7c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 388*7c478bd9Sstevel@tonic-gate } 389*7c478bd9Sstevel@tonic-gate 390*7c478bd9Sstevel@tonic-gate /* 391*7c478bd9Sstevel@tonic-gate * Return interrupt thread to the pool 392*7c478bd9Sstevel@tonic-gate */ 393*7c478bd9Sstevel@tonic-gate it->t_link = cpu->cpu_intr_thread; 394*7c478bd9Sstevel@tonic-gate cpu->cpu_intr_thread = it; 395*7c478bd9Sstevel@tonic-gate it->t_state = TS_FREE; 396*7c478bd9Sstevel@tonic-gate 397*7c478bd9Sstevel@tonic-gate basespl = cpu->cpu_base_spl; 398*7c478bd9Sstevel@tonic-gate pil = MAX(oldpil, basespl); 399*7c478bd9Sstevel@tonic-gate mcpu->mcpu_pri = pil; 400*7c478bd9Sstevel@tonic-gate (*setlvlx)(pil, vec); 401*7c478bd9Sstevel@tonic-gate t->t_intr_start = tsc_read(); 402*7c478bd9Sstevel@tonic-gate cpu->cpu_thread = t; 403*7c478bd9Sstevel@tonic-gate } 404*7c478bd9Sstevel@tonic-gate 405*7c478bd9Sstevel@tonic-gate caddr_t 406*7c478bd9Sstevel@tonic-gate dosoftint_prolog( 407*7c478bd9Sstevel@tonic-gate struct cpu *cpu, 408*7c478bd9Sstevel@tonic-gate caddr_t stackptr, 409*7c478bd9Sstevel@tonic-gate uint32_t st_pending, 410*7c478bd9Sstevel@tonic-gate uint_t oldpil) 411*7c478bd9Sstevel@tonic-gate { 412*7c478bd9Sstevel@tonic-gate kthread_t *t, *volatile it; 413*7c478bd9Sstevel@tonic-gate struct machcpu *mcpu = &cpu->cpu_m; 414*7c478bd9Sstevel@tonic-gate uint_t pil; 415*7c478bd9Sstevel@tonic-gate 416*7c478bd9Sstevel@tonic-gate top: 417*7c478bd9Sstevel@tonic-gate ASSERT(st_pending == mcpu->mcpu_softinfo.st_pending); 418*7c478bd9Sstevel@tonic-gate 419*7c478bd9Sstevel@tonic-gate pil = bsrw_insn((uint16_t)st_pending); 420*7c478bd9Sstevel@tonic-gate if (pil <= oldpil || pil <= cpu->cpu_base_spl) 421*7c478bd9Sstevel@tonic-gate return (0); 422*7c478bd9Sstevel@tonic-gate 423*7c478bd9Sstevel@tonic-gate /* 424*7c478bd9Sstevel@tonic-gate * XX64 Sigh. 425*7c478bd9Sstevel@tonic-gate * 426*7c478bd9Sstevel@tonic-gate * This is a transliteration of the i386 assembler code for 427*7c478bd9Sstevel@tonic-gate * soft interrupts. One question is "why does this need 428*7c478bd9Sstevel@tonic-gate * to be atomic?" One possible race is -other- processors 429*7c478bd9Sstevel@tonic-gate * posting soft interrupts to us in set_pending() i.e. the 430*7c478bd9Sstevel@tonic-gate * CPU might get preempted just after the address computation, 431*7c478bd9Sstevel@tonic-gate * but just before the atomic transaction, so another CPU would 432*7c478bd9Sstevel@tonic-gate * actually set the original CPU's st_pending bit. However, 433*7c478bd9Sstevel@tonic-gate * it looks like it would be simpler to disable preemption there. 434*7c478bd9Sstevel@tonic-gate * Are there other races for which preemption control doesn't work? 435*7c478bd9Sstevel@tonic-gate * 436*7c478bd9Sstevel@tonic-gate * The i386 assembler version -also- checks to see if the bit 437*7c478bd9Sstevel@tonic-gate * being cleared was actually set; if it wasn't, it rechecks 438*7c478bd9Sstevel@tonic-gate * for more. This seems a bit strange, as the only code that 439*7c478bd9Sstevel@tonic-gate * ever clears the bit is -this- code running with interrupts 440*7c478bd9Sstevel@tonic-gate * disabled on -this- CPU. This code would probably be cheaper: 441*7c478bd9Sstevel@tonic-gate * 442*7c478bd9Sstevel@tonic-gate * atomic_and_32((uint32_t *)&mcpu->mcpu_softinfo.st_pending, 443*7c478bd9Sstevel@tonic-gate * ~(1 << pil)); 444*7c478bd9Sstevel@tonic-gate * 445*7c478bd9Sstevel@tonic-gate * and t->t_preempt--/++ around set_pending() even cheaper, 446*7c478bd9Sstevel@tonic-gate * but at this point, correctness is critical, so we slavishly 447*7c478bd9Sstevel@tonic-gate * emulate the i386 port. 448*7c478bd9Sstevel@tonic-gate */ 449*7c478bd9Sstevel@tonic-gate if (atomic_btr32((uint32_t *)&mcpu->mcpu_softinfo.st_pending, pil) 450*7c478bd9Sstevel@tonic-gate == 0) { 451*7c478bd9Sstevel@tonic-gate st_pending = mcpu->mcpu_softinfo.st_pending; 452*7c478bd9Sstevel@tonic-gate goto top; 453*7c478bd9Sstevel@tonic-gate } 454*7c478bd9Sstevel@tonic-gate 455*7c478bd9Sstevel@tonic-gate mcpu->mcpu_pri = pil; 456*7c478bd9Sstevel@tonic-gate (*setspl)(pil); 457*7c478bd9Sstevel@tonic-gate 458*7c478bd9Sstevel@tonic-gate /* 459*7c478bd9Sstevel@tonic-gate * Get set to run interrupt thread. 460*7c478bd9Sstevel@tonic-gate * There should always be an interrupt thread since we 461*7c478bd9Sstevel@tonic-gate * allocate one for each level on the CPU. 462*7c478bd9Sstevel@tonic-gate */ 463*7c478bd9Sstevel@tonic-gate it = cpu->cpu_intr_thread; 464*7c478bd9Sstevel@tonic-gate cpu->cpu_intr_thread = it->t_link; 465*7c478bd9Sstevel@tonic-gate 466*7c478bd9Sstevel@tonic-gate /* 467*7c478bd9Sstevel@tonic-gate * Note that the code in kcpc_overflow_intr -relies- on the 468*7c478bd9Sstevel@tonic-gate * ordering of events here - in particular that t->t_lwp of 469*7c478bd9Sstevel@tonic-gate * the interrupt thread is set to the pinned thread *before* 470*7c478bd9Sstevel@tonic-gate * curthread is changed 471*7c478bd9Sstevel@tonic-gate */ 472*7c478bd9Sstevel@tonic-gate t = cpu->cpu_thread; 473*7c478bd9Sstevel@tonic-gate if (t->t_flag & T_INTR_THREAD) 474*7c478bd9Sstevel@tonic-gate mcpu->intrstat[pil] += tsc_read() - t->t_intr_start; 475*7c478bd9Sstevel@tonic-gate it->t_lwp = t->t_lwp; 476*7c478bd9Sstevel@tonic-gate it->t_state = TS_ONPROC; 477*7c478bd9Sstevel@tonic-gate 478*7c478bd9Sstevel@tonic-gate /* 479*7c478bd9Sstevel@tonic-gate * Push interrupted thread onto list from new thread. 480*7c478bd9Sstevel@tonic-gate * Set the new thread as the current one. 481*7c478bd9Sstevel@tonic-gate * Set interrupted thread's T_SP because if it is the idle thread, 482*7c478bd9Sstevel@tonic-gate * resume() may use that stack between threads. 483*7c478bd9Sstevel@tonic-gate */ 484*7c478bd9Sstevel@tonic-gate 485*7c478bd9Sstevel@tonic-gate ASSERT(SA((uintptr_t)stackptr) == (uintptr_t)stackptr); 486*7c478bd9Sstevel@tonic-gate t->t_sp = (uintptr_t)stackptr; 487*7c478bd9Sstevel@tonic-gate 488*7c478bd9Sstevel@tonic-gate it->t_intr = t; 489*7c478bd9Sstevel@tonic-gate cpu->cpu_thread = it; 490*7c478bd9Sstevel@tonic-gate 491*7c478bd9Sstevel@tonic-gate /* 492*7c478bd9Sstevel@tonic-gate * Set bit for this pil in CPU's interrupt active bitmask. 493*7c478bd9Sstevel@tonic-gate */ 494*7c478bd9Sstevel@tonic-gate ASSERT((cpu->cpu_intr_actv & (1 << pil)) == 0); 495*7c478bd9Sstevel@tonic-gate cpu->cpu_intr_actv |= (1 << pil); 496*7c478bd9Sstevel@tonic-gate 497*7c478bd9Sstevel@tonic-gate /* 498*7c478bd9Sstevel@tonic-gate * Initialize thread priority level from intr_pri 499*7c478bd9Sstevel@tonic-gate */ 500*7c478bd9Sstevel@tonic-gate it->t_pil = (uchar_t)pil; 501*7c478bd9Sstevel@tonic-gate it->t_pri = (pri_t)pil + intr_pri; 502*7c478bd9Sstevel@tonic-gate it->t_intr_start = tsc_read(); 503*7c478bd9Sstevel@tonic-gate 504*7c478bd9Sstevel@tonic-gate return (it->t_stk); 505*7c478bd9Sstevel@tonic-gate } 506*7c478bd9Sstevel@tonic-gate 507*7c478bd9Sstevel@tonic-gate void 508*7c478bd9Sstevel@tonic-gate dosoftint_epilog(struct cpu *cpu, uint_t oldpil) 509*7c478bd9Sstevel@tonic-gate { 510*7c478bd9Sstevel@tonic-gate struct machcpu *mcpu = &cpu->cpu_m; 511*7c478bd9Sstevel@tonic-gate kthread_t *t, *it; 512*7c478bd9Sstevel@tonic-gate uint_t pil, basespl; 513*7c478bd9Sstevel@tonic-gate 514*7c478bd9Sstevel@tonic-gate it = cpu->cpu_thread; 515*7c478bd9Sstevel@tonic-gate pil = it->t_pil; 516*7c478bd9Sstevel@tonic-gate 517*7c478bd9Sstevel@tonic-gate cpu->cpu_stats.sys.intr[pil - 1]++; 518*7c478bd9Sstevel@tonic-gate 519*7c478bd9Sstevel@tonic-gate ASSERT(cpu->cpu_intr_actv & (1 << pil)); 520*7c478bd9Sstevel@tonic-gate cpu->cpu_intr_actv &= ~(1 << pil); 521*7c478bd9Sstevel@tonic-gate mcpu->intrstat[pil] += tsc_read() - it->t_intr_start; 522*7c478bd9Sstevel@tonic-gate 523*7c478bd9Sstevel@tonic-gate /* 524*7c478bd9Sstevel@tonic-gate * If there is still an interrupted thread underneath this one 525*7c478bd9Sstevel@tonic-gate * then the interrupt was never blocked and the return is 526*7c478bd9Sstevel@tonic-gate * fairly simple. Otherwise it isn't. 527*7c478bd9Sstevel@tonic-gate */ 528*7c478bd9Sstevel@tonic-gate if ((t = it->t_intr) == NULL) { 529*7c478bd9Sstevel@tonic-gate /* 530*7c478bd9Sstevel@tonic-gate * Put thread back on the interrupt thread list. 531*7c478bd9Sstevel@tonic-gate * This was an interrupt thread, so set CPU's base SPL. 532*7c478bd9Sstevel@tonic-gate */ 533*7c478bd9Sstevel@tonic-gate set_base_spl(); 534*7c478bd9Sstevel@tonic-gate it->t_state = TS_FREE; 535*7c478bd9Sstevel@tonic-gate it->t_link = cpu->cpu_intr_thread; 536*7c478bd9Sstevel@tonic-gate cpu->cpu_intr_thread = it; 537*7c478bd9Sstevel@tonic-gate (void) splhigh(); 538*7c478bd9Sstevel@tonic-gate swtch(); 539*7c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 540*7c478bd9Sstevel@tonic-gate } 541*7c478bd9Sstevel@tonic-gate it->t_link = cpu->cpu_intr_thread; 542*7c478bd9Sstevel@tonic-gate cpu->cpu_intr_thread = it; 543*7c478bd9Sstevel@tonic-gate it->t_state = TS_FREE; 544*7c478bd9Sstevel@tonic-gate cpu->cpu_thread = t; 545*7c478bd9Sstevel@tonic-gate if (t->t_flag & T_INTR_THREAD) 546*7c478bd9Sstevel@tonic-gate t->t_intr_start = tsc_read(); 547*7c478bd9Sstevel@tonic-gate basespl = cpu->cpu_base_spl; 548*7c478bd9Sstevel@tonic-gate pil = MAX(oldpil, basespl); 549*7c478bd9Sstevel@tonic-gate mcpu->mcpu_pri = pil; 550*7c478bd9Sstevel@tonic-gate (*setspl)(pil); 551*7c478bd9Sstevel@tonic-gate } 552*7c478bd9Sstevel@tonic-gate 553*7c478bd9Sstevel@tonic-gate /* 554*7c478bd9Sstevel@tonic-gate * Make the interrupted thread 'to' be runnable. 555*7c478bd9Sstevel@tonic-gate * 556*7c478bd9Sstevel@tonic-gate * Since t->t_sp has already been saved, t->t_pc is all 557*7c478bd9Sstevel@tonic-gate * that needs to be set in this function. 558*7c478bd9Sstevel@tonic-gate * 559*7c478bd9Sstevel@tonic-gate * Returns the interrupt level of the interrupt thread. 560*7c478bd9Sstevel@tonic-gate */ 561*7c478bd9Sstevel@tonic-gate int 562*7c478bd9Sstevel@tonic-gate intr_passivate( 563*7c478bd9Sstevel@tonic-gate kthread_t *it, /* interrupt thread */ 564*7c478bd9Sstevel@tonic-gate kthread_t *t) /* interrupted thread */ 565*7c478bd9Sstevel@tonic-gate { 566*7c478bd9Sstevel@tonic-gate extern void _sys_rtt(); 567*7c478bd9Sstevel@tonic-gate 568*7c478bd9Sstevel@tonic-gate ASSERT(it->t_flag & T_INTR_THREAD); 569*7c478bd9Sstevel@tonic-gate ASSERT(SA(t->t_sp) == t->t_sp); 570*7c478bd9Sstevel@tonic-gate 571*7c478bd9Sstevel@tonic-gate t->t_pc = (uintptr_t)_sys_rtt; 572*7c478bd9Sstevel@tonic-gate return (it->t_pil); 573*7c478bd9Sstevel@tonic-gate } 574*7c478bd9Sstevel@tonic-gate 575*7c478bd9Sstevel@tonic-gate #endif /* __amd64 */ 576*7c478bd9Sstevel@tonic-gate 577*7c478bd9Sstevel@tonic-gate /* 578*7c478bd9Sstevel@tonic-gate * Allocate threads and stacks for interrupt handling. 579*7c478bd9Sstevel@tonic-gate */ 580*7c478bd9Sstevel@tonic-gate #define NINTR_THREADS (LOCK_LEVEL-1) /* number of interrupt threads */ 581*7c478bd9Sstevel@tonic-gate 582*7c478bd9Sstevel@tonic-gate void 583*7c478bd9Sstevel@tonic-gate init_intr_threads(struct cpu *cp) 584*7c478bd9Sstevel@tonic-gate { 585*7c478bd9Sstevel@tonic-gate int i; 586*7c478bd9Sstevel@tonic-gate 587*7c478bd9Sstevel@tonic-gate for (i = 0; i < NINTR_THREADS; i++) 588*7c478bd9Sstevel@tonic-gate thread_create_intr(cp); 589*7c478bd9Sstevel@tonic-gate 590*7c478bd9Sstevel@tonic-gate cp->cpu_intr_stack = (caddr_t)segkp_get(segkp, INTR_STACK_SIZE, 591*7c478bd9Sstevel@tonic-gate KPD_HASREDZONE | KPD_NO_ANON | KPD_LOCKED) + 592*7c478bd9Sstevel@tonic-gate INTR_STACK_SIZE - SA(MINFRAME); 593*7c478bd9Sstevel@tonic-gate } 594*7c478bd9Sstevel@tonic-gate 595*7c478bd9Sstevel@tonic-gate /* 596*7c478bd9Sstevel@tonic-gate * Create interrupt kstats for this CPU. 597*7c478bd9Sstevel@tonic-gate */ 598*7c478bd9Sstevel@tonic-gate void 599*7c478bd9Sstevel@tonic-gate cpu_create_intrstat(cpu_t *cp) 600*7c478bd9Sstevel@tonic-gate { 601*7c478bd9Sstevel@tonic-gate int i; 602*7c478bd9Sstevel@tonic-gate kstat_t *intr_ksp; 603*7c478bd9Sstevel@tonic-gate kstat_named_t *knp; 604*7c478bd9Sstevel@tonic-gate char name[KSTAT_STRLEN]; 605*7c478bd9Sstevel@tonic-gate zoneid_t zoneid; 606*7c478bd9Sstevel@tonic-gate 607*7c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 608*7c478bd9Sstevel@tonic-gate 609*7c478bd9Sstevel@tonic-gate if (pool_pset_enabled()) 610*7c478bd9Sstevel@tonic-gate zoneid = GLOBAL_ZONEID; 611*7c478bd9Sstevel@tonic-gate else 612*7c478bd9Sstevel@tonic-gate zoneid = ALL_ZONES; 613*7c478bd9Sstevel@tonic-gate 614*7c478bd9Sstevel@tonic-gate intr_ksp = kstat_create_zone("cpu", cp->cpu_id, "intrstat", "misc", 615*7c478bd9Sstevel@tonic-gate KSTAT_TYPE_NAMED, PIL_MAX * 2, NULL, zoneid); 616*7c478bd9Sstevel@tonic-gate 617*7c478bd9Sstevel@tonic-gate /* 618*7c478bd9Sstevel@tonic-gate * Initialize each PIL's named kstat 619*7c478bd9Sstevel@tonic-gate */ 620*7c478bd9Sstevel@tonic-gate if (intr_ksp != NULL) { 621*7c478bd9Sstevel@tonic-gate intr_ksp->ks_update = cpu_kstat_intrstat_update; 622*7c478bd9Sstevel@tonic-gate knp = (kstat_named_t *)intr_ksp->ks_data; 623*7c478bd9Sstevel@tonic-gate intr_ksp->ks_private = cp; 624*7c478bd9Sstevel@tonic-gate for (i = 0; i < PIL_MAX; i++) { 625*7c478bd9Sstevel@tonic-gate (void) snprintf(name, KSTAT_STRLEN, "level-%d-time", 626*7c478bd9Sstevel@tonic-gate i + 1); 627*7c478bd9Sstevel@tonic-gate kstat_named_init(&knp[i * 2], name, KSTAT_DATA_UINT64); 628*7c478bd9Sstevel@tonic-gate (void) snprintf(name, KSTAT_STRLEN, "level-%d-count", 629*7c478bd9Sstevel@tonic-gate i + 1); 630*7c478bd9Sstevel@tonic-gate kstat_named_init(&knp[(i * 2) + 1], name, 631*7c478bd9Sstevel@tonic-gate KSTAT_DATA_UINT64); 632*7c478bd9Sstevel@tonic-gate } 633*7c478bd9Sstevel@tonic-gate kstat_install(intr_ksp); 634*7c478bd9Sstevel@tonic-gate } 635*7c478bd9Sstevel@tonic-gate } 636*7c478bd9Sstevel@tonic-gate 637*7c478bd9Sstevel@tonic-gate /* 638*7c478bd9Sstevel@tonic-gate * Delete interrupt kstats for this CPU. 639*7c478bd9Sstevel@tonic-gate */ 640*7c478bd9Sstevel@tonic-gate void 641*7c478bd9Sstevel@tonic-gate cpu_delete_intrstat(cpu_t *cp) 642*7c478bd9Sstevel@tonic-gate { 643*7c478bd9Sstevel@tonic-gate kstat_delete_byname_zone("cpu", cp->cpu_id, "intrstat", ALL_ZONES); 644*7c478bd9Sstevel@tonic-gate } 645*7c478bd9Sstevel@tonic-gate 646*7c478bd9Sstevel@tonic-gate /* 647*7c478bd9Sstevel@tonic-gate * Convert interrupt statistics from CPU ticks to nanoseconds and 648*7c478bd9Sstevel@tonic-gate * update kstat. 649*7c478bd9Sstevel@tonic-gate */ 650*7c478bd9Sstevel@tonic-gate int 651*7c478bd9Sstevel@tonic-gate cpu_kstat_intrstat_update(kstat_t *ksp, int rw) 652*7c478bd9Sstevel@tonic-gate { 653*7c478bd9Sstevel@tonic-gate kstat_named_t *knp = ksp->ks_data; 654*7c478bd9Sstevel@tonic-gate cpu_t *cpup = (cpu_t *)ksp->ks_private; 655*7c478bd9Sstevel@tonic-gate int i; 656*7c478bd9Sstevel@tonic-gate hrtime_t hrt; 657*7c478bd9Sstevel@tonic-gate 658*7c478bd9Sstevel@tonic-gate if (rw == KSTAT_WRITE) 659*7c478bd9Sstevel@tonic-gate return (EACCES); 660*7c478bd9Sstevel@tonic-gate 661*7c478bd9Sstevel@tonic-gate for (i = 0; i < PIL_MAX; i++) { 662*7c478bd9Sstevel@tonic-gate hrt = (hrtime_t)cpup->cpu_m.intrstat[i + 1]; 663*7c478bd9Sstevel@tonic-gate tsc_scalehrtime(&hrt); 664*7c478bd9Sstevel@tonic-gate knp[i * 2].value.ui64 = (uint64_t)hrt; 665*7c478bd9Sstevel@tonic-gate knp[(i * 2) + 1].value.ui64 = cpup->cpu_stats.sys.intr[i]; 666*7c478bd9Sstevel@tonic-gate } 667*7c478bd9Sstevel@tonic-gate 668*7c478bd9Sstevel@tonic-gate return (0); 669*7c478bd9Sstevel@tonic-gate } 670*7c478bd9Sstevel@tonic-gate 671*7c478bd9Sstevel@tonic-gate /* 672*7c478bd9Sstevel@tonic-gate * An interrupt thread is ending a time slice, so compute the interval it 673*7c478bd9Sstevel@tonic-gate * ran for and update the statistic for its PIL. 674*7c478bd9Sstevel@tonic-gate */ 675*7c478bd9Sstevel@tonic-gate void 676*7c478bd9Sstevel@tonic-gate cpu_intr_swtch_enter(kthread_id_t t) 677*7c478bd9Sstevel@tonic-gate { 678*7c478bd9Sstevel@tonic-gate uint64_t interval; 679*7c478bd9Sstevel@tonic-gate uint64_t start; 680*7c478bd9Sstevel@tonic-gate 681*7c478bd9Sstevel@tonic-gate ASSERT((t->t_flag & T_INTR_THREAD) != 0); 682*7c478bd9Sstevel@tonic-gate ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL); 683*7c478bd9Sstevel@tonic-gate 684*7c478bd9Sstevel@tonic-gate /* 685*7c478bd9Sstevel@tonic-gate * We could be here with a zero timestamp. This could happen if: 686*7c478bd9Sstevel@tonic-gate * an interrupt thread which no longer has a pinned thread underneath 687*7c478bd9Sstevel@tonic-gate * it (i.e. it blocked at some point in its past) has finished running 688*7c478bd9Sstevel@tonic-gate * its handler. intr_thread() updated the interrupt statistic for its 689*7c478bd9Sstevel@tonic-gate * PIL and zeroed its timestamp. Since there was no pinned thread to 690*7c478bd9Sstevel@tonic-gate * return to, swtch() gets called and we end up here. 691*7c478bd9Sstevel@tonic-gate */ 692*7c478bd9Sstevel@tonic-gate if (t->t_intr_start) { 693*7c478bd9Sstevel@tonic-gate do { 694*7c478bd9Sstevel@tonic-gate start = t->t_intr_start; 695*7c478bd9Sstevel@tonic-gate interval = tsc_read() - start; 696*7c478bd9Sstevel@tonic-gate } while (cas64(&t->t_intr_start, start, 0) != start); 697*7c478bd9Sstevel@tonic-gate CPU->cpu_m.intrstat[t->t_pil] += interval; 698*7c478bd9Sstevel@tonic-gate } else 699*7c478bd9Sstevel@tonic-gate ASSERT(t->t_intr == NULL); 700*7c478bd9Sstevel@tonic-gate } 701*7c478bd9Sstevel@tonic-gate 702*7c478bd9Sstevel@tonic-gate /* 703*7c478bd9Sstevel@tonic-gate * An interrupt thread is returning from swtch(). Place a starting timestamp 704*7c478bd9Sstevel@tonic-gate * in its thread structure. 705*7c478bd9Sstevel@tonic-gate */ 706*7c478bd9Sstevel@tonic-gate void 707*7c478bd9Sstevel@tonic-gate cpu_intr_swtch_exit(kthread_id_t t) 708*7c478bd9Sstevel@tonic-gate { 709*7c478bd9Sstevel@tonic-gate uint64_t ts; 710*7c478bd9Sstevel@tonic-gate 711*7c478bd9Sstevel@tonic-gate ASSERT((t->t_flag & T_INTR_THREAD) != 0); 712*7c478bd9Sstevel@tonic-gate ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL); 713*7c478bd9Sstevel@tonic-gate 714*7c478bd9Sstevel@tonic-gate do { 715*7c478bd9Sstevel@tonic-gate ts = t->t_intr_start; 716*7c478bd9Sstevel@tonic-gate } while (cas64(&t->t_intr_start, ts, tsc_read()) != ts); 717*7c478bd9Sstevel@tonic-gate } 718