17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5f2bd4627Sjohansen * Common Development and Distribution License (the "License"). 6f2bd4627Sjohansen * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 2235a5a358SJonathan Adams * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 237c478bd9Sstevel@tonic-gate * Use is subject to license terms. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate #include <sys/types.h> 277c478bd9Sstevel@tonic-gate #include <sys/param.h> 287c478bd9Sstevel@tonic-gate #include <sys/systm.h> 297c478bd9Sstevel@tonic-gate #include <sys/user.h> 307c478bd9Sstevel@tonic-gate #include <sys/proc.h> 317c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 327c478bd9Sstevel@tonic-gate #include <sys/thread.h> 337c478bd9Sstevel@tonic-gate #include <sys/debug.h> 347c478bd9Sstevel@tonic-gate #include <sys/msacct.h> 357c478bd9Sstevel@tonic-gate #include <sys/time.h> 36542a813cSJerry Jelinek #include <sys/zone.h> 377c478bd9Sstevel@tonic-gate 387c478bd9Sstevel@tonic-gate /* 397c478bd9Sstevel@tonic-gate * Mega-theory block comment: 407c478bd9Sstevel@tonic-gate * 417c478bd9Sstevel@tonic-gate * Microstate accounting uses finite states and the transitions between these 427c478bd9Sstevel@tonic-gate * states to measure timing and accounting information. The state information 437c478bd9Sstevel@tonic-gate * is presently tracked for threads (via microstate accounting) and cpus (via 447c478bd9Sstevel@tonic-gate * cpu microstate accounting). In each case, these accounting mechanisms use 457c478bd9Sstevel@tonic-gate * states and transitions to measure time spent in each state instead of 467c478bd9Sstevel@tonic-gate * clock-based sampling methodologies. 477c478bd9Sstevel@tonic-gate * 487c478bd9Sstevel@tonic-gate * For microstate accounting: 497c478bd9Sstevel@tonic-gate * state transitions are accomplished by calling new_mstate() to switch between 507c478bd9Sstevel@tonic-gate * states. Transitions from a sleeping state (LMS_SLEEP and LMS_STOPPED) occur 517c478bd9Sstevel@tonic-gate * by calling restore_mstate() which restores a thread to its previously running 527c478bd9Sstevel@tonic-gate * state. This code is primarialy executed by the dispatcher in disp() before 537c478bd9Sstevel@tonic-gate * running a process that was put to sleep. If the thread was not in a sleeping 547c478bd9Sstevel@tonic-gate * state, this call has little effect other than to update the count of time the 557c478bd9Sstevel@tonic-gate * thread has spent waiting on run-queues in its lifetime. 567c478bd9Sstevel@tonic-gate * 577c478bd9Sstevel@tonic-gate * For cpu microstate accounting: 587c478bd9Sstevel@tonic-gate * Cpu microstate accounting is similar to the microstate accounting for threads 597c478bd9Sstevel@tonic-gate * but it tracks user, system, and idle time for cpus. Cpu microstate 607c478bd9Sstevel@tonic-gate * accounting does not track interrupt times as there is a pre-existing 617c478bd9Sstevel@tonic-gate * interrupt accounting mechanism for this purpose. Cpu microstate accounting 627c478bd9Sstevel@tonic-gate * tracks time that user threads have spent active, idle, or in the system on a 637c478bd9Sstevel@tonic-gate * given cpu. Cpu microstate accounting has fewer states which allows it to 647c478bd9Sstevel@tonic-gate * have better defined transitions. The states transition in the following 657c478bd9Sstevel@tonic-gate * order: 667c478bd9Sstevel@tonic-gate * 677c478bd9Sstevel@tonic-gate * CMS_USER <-> CMS_SYSTEM <-> CMS_IDLE 687c478bd9Sstevel@tonic-gate * 697c478bd9Sstevel@tonic-gate * In order to get to the idle state, the cpu microstate must first go through 707c478bd9Sstevel@tonic-gate * the system state, and vice-versa for the user state from idle. The switching 717c478bd9Sstevel@tonic-gate * of the microstates from user to system is done as part of the regular thread 727c478bd9Sstevel@tonic-gate * microstate accounting code, except for the idle state which is switched by 737c478bd9Sstevel@tonic-gate * the dispatcher before it runs the idle loop. 747c478bd9Sstevel@tonic-gate * 757c478bd9Sstevel@tonic-gate * Cpu percentages: 767c478bd9Sstevel@tonic-gate * Cpu percentages are now handled by and based upon microstate accounting 777c478bd9Sstevel@tonic-gate * information (the same is true for load averages). The routines which handle 787c478bd9Sstevel@tonic-gate * the growing/shrinking and exponentiation of cpu percentages have been moved 797c478bd9Sstevel@tonic-gate * here as it now makes more sense for them to be generated from the microstate 807c478bd9Sstevel@tonic-gate * code. Cpu percentages are generated similarly to the way they were before; 817c478bd9Sstevel@tonic-gate * however, now they are based upon high-resolution timestamps and the 827c478bd9Sstevel@tonic-gate * timestamps are modified at various state changes instead of during a clock() 837c478bd9Sstevel@tonic-gate * interrupt. This allows us to generate more accurate cpu percentages which 847c478bd9Sstevel@tonic-gate * are also in-sync with microstate data. 857c478bd9Sstevel@tonic-gate */ 867c478bd9Sstevel@tonic-gate 877c478bd9Sstevel@tonic-gate /* 887c478bd9Sstevel@tonic-gate * Initialize the microstate level and the 897c478bd9Sstevel@tonic-gate * associated accounting information for an LWP. 907c478bd9Sstevel@tonic-gate */ 917c478bd9Sstevel@tonic-gate void 927c478bd9Sstevel@tonic-gate init_mstate( 937c478bd9Sstevel@tonic-gate kthread_t *t, 947c478bd9Sstevel@tonic-gate int init_state) 957c478bd9Sstevel@tonic-gate { 967c478bd9Sstevel@tonic-gate struct mstate *ms; 977c478bd9Sstevel@tonic-gate klwp_t *lwp; 987c478bd9Sstevel@tonic-gate hrtime_t curtime; 997c478bd9Sstevel@tonic-gate 1007c478bd9Sstevel@tonic-gate ASSERT(init_state != LMS_WAIT_CPU); 1017c478bd9Sstevel@tonic-gate ASSERT((unsigned)init_state < NMSTATES); 1027c478bd9Sstevel@tonic-gate 1037c478bd9Sstevel@tonic-gate if ((lwp = ttolwp(t)) != NULL) { 1047c478bd9Sstevel@tonic-gate ms = &lwp->lwp_mstate; 1057c478bd9Sstevel@tonic-gate curtime = gethrtime_unscaled(); 1067c478bd9Sstevel@tonic-gate ms->ms_prev = LMS_SYSTEM; 1077c478bd9Sstevel@tonic-gate ms->ms_start = curtime; 1087c478bd9Sstevel@tonic-gate ms->ms_term = 0; 1097c478bd9Sstevel@tonic-gate ms->ms_state_start = curtime; 1107c478bd9Sstevel@tonic-gate t->t_mstate = init_state; 1117c478bd9Sstevel@tonic-gate t->t_waitrq = 0; 1127c478bd9Sstevel@tonic-gate t->t_hrtime = curtime; 1137c478bd9Sstevel@tonic-gate if ((t->t_proc_flag & TP_MSACCT) == 0) 1147c478bd9Sstevel@tonic-gate t->t_proc_flag |= TP_MSACCT; 1157c478bd9Sstevel@tonic-gate bzero((caddr_t)&ms->ms_acct[0], sizeof (ms->ms_acct)); 1167c478bd9Sstevel@tonic-gate } 1177c478bd9Sstevel@tonic-gate } 1187c478bd9Sstevel@tonic-gate 1197c478bd9Sstevel@tonic-gate /* 1207c478bd9Sstevel@tonic-gate * Initialize the microstate level and associated accounting information 1217c478bd9Sstevel@tonic-gate * for the specified cpu 1227c478bd9Sstevel@tonic-gate */ 1237c478bd9Sstevel@tonic-gate 1247c478bd9Sstevel@tonic-gate void 1257c478bd9Sstevel@tonic-gate init_cpu_mstate( 1267c478bd9Sstevel@tonic-gate cpu_t *cpu, 1277c478bd9Sstevel@tonic-gate int init_state) 1287c478bd9Sstevel@tonic-gate { 1297c478bd9Sstevel@tonic-gate ASSERT(init_state != CMS_DISABLED); 1307c478bd9Sstevel@tonic-gate 1317c478bd9Sstevel@tonic-gate cpu->cpu_mstate = init_state; 1327c478bd9Sstevel@tonic-gate cpu->cpu_mstate_start = gethrtime_unscaled(); 1337c478bd9Sstevel@tonic-gate cpu->cpu_waitrq = 0; 1347c478bd9Sstevel@tonic-gate bzero((caddr_t)&cpu->cpu_acct[0], sizeof (cpu->cpu_acct)); 1357c478bd9Sstevel@tonic-gate } 1367c478bd9Sstevel@tonic-gate 1377c478bd9Sstevel@tonic-gate /* 1387c478bd9Sstevel@tonic-gate * sets cpu state to OFFLINE. We don't actually track this time, 1397c478bd9Sstevel@tonic-gate * but it serves as a useful placeholder state for when we're not 1407c478bd9Sstevel@tonic-gate * doing anything. 1417c478bd9Sstevel@tonic-gate */ 1427c478bd9Sstevel@tonic-gate 1437c478bd9Sstevel@tonic-gate void 1447c478bd9Sstevel@tonic-gate term_cpu_mstate(struct cpu *cpu) 1457c478bd9Sstevel@tonic-gate { 1467c478bd9Sstevel@tonic-gate ASSERT(cpu->cpu_mstate != CMS_DISABLED); 1477c478bd9Sstevel@tonic-gate cpu->cpu_mstate = CMS_DISABLED; 1487c478bd9Sstevel@tonic-gate cpu->cpu_mstate_start = 0; 1497c478bd9Sstevel@tonic-gate } 1507c478bd9Sstevel@tonic-gate 1519102d475Sesolom /* NEW_CPU_MSTATE comments inline in new_cpu_mstate below. */ 1529102d475Sesolom 1539102d475Sesolom #define NEW_CPU_MSTATE(state) \ 1549102d475Sesolom gen = cpu->cpu_mstate_gen; \ 1559102d475Sesolom cpu->cpu_mstate_gen = 0; \ 1569102d475Sesolom /* Need membar_producer() here if stores not ordered / TSO */ \ 1579102d475Sesolom cpu->cpu_acct[cpu->cpu_mstate] += curtime - cpu->cpu_mstate_start; \ 1589102d475Sesolom cpu->cpu_mstate = state; \ 1599102d475Sesolom cpu->cpu_mstate_start = curtime; \ 1609102d475Sesolom /* Need membar_producer() here if stores not ordered / TSO */ \ 1619102d475Sesolom cpu->cpu_mstate_gen = (++gen == 0) ? 1 : gen; 1629102d475Sesolom 1637c478bd9Sstevel@tonic-gate void 164eda89462Sesolom new_cpu_mstate(int cmstate, hrtime_t curtime) 1657c478bd9Sstevel@tonic-gate { 166eda89462Sesolom cpu_t *cpu = CPU; 167eda89462Sesolom uint16_t gen; 1687c478bd9Sstevel@tonic-gate 1697c478bd9Sstevel@tonic-gate ASSERT(cpu->cpu_mstate != CMS_DISABLED); 1707c478bd9Sstevel@tonic-gate ASSERT(cmstate < NCMSTATES); 1717c478bd9Sstevel@tonic-gate ASSERT(cmstate != CMS_DISABLED); 172eda89462Sesolom 173eda89462Sesolom /* 174eda89462Sesolom * This function cannot be re-entrant on a given CPU. As such, 175eda89462Sesolom * we ASSERT and panic if we are called on behalf of an interrupt. 176eda89462Sesolom * The one exception is for an interrupt which has previously 177eda89462Sesolom * blocked. Such an interrupt is being scheduled by the dispatcher 178eda89462Sesolom * just like a normal thread, and as such cannot arrive here 179eda89462Sesolom * in a re-entrant manner. 180eda89462Sesolom */ 181eda89462Sesolom 182eda89462Sesolom ASSERT(!CPU_ON_INTR(cpu) && curthread->t_intr == NULL); 1837c478bd9Sstevel@tonic-gate ASSERT(curthread->t_preempt > 0 || curthread == cpu->cpu_idle_thread); 1847c478bd9Sstevel@tonic-gate 185eda89462Sesolom /* 186eda89462Sesolom * LOCKING, or lack thereof: 187eda89462Sesolom * 188eda89462Sesolom * Updates to CPU mstate can only be made by the CPU 189eda89462Sesolom * itself, and the above check to ignore interrupts 190eda89462Sesolom * should prevent recursion into this function on a given 191eda89462Sesolom * processor. i.e. no possible write contention. 192eda89462Sesolom * 193eda89462Sesolom * However, reads of CPU mstate can occur at any time 194eda89462Sesolom * from any CPU. Any locking added to this code path 195eda89462Sesolom * would seriously impact syscall performance. So, 196eda89462Sesolom * instead we have a best-effort protection for readers. 197eda89462Sesolom * The reader will want to account for any time between 198eda89462Sesolom * cpu_mstate_start and the present time. This requires 199eda89462Sesolom * some guarantees that the reader is getting coherent 200eda89462Sesolom * information. 201eda89462Sesolom * 202eda89462Sesolom * We use a generation counter, which is set to 0 before 203eda89462Sesolom * we start making changes, and is set to a new value 204eda89462Sesolom * after we're done. Someone reading the CPU mstate 205eda89462Sesolom * should check for the same non-zero value of this 206eda89462Sesolom * counter both before and after reading all state. The 207eda89462Sesolom * important point is that the reader is not a 208eda89462Sesolom * performance-critical path, but this function is. 2099102d475Sesolom * 2109102d475Sesolom * The ordering of writes is critical. cpu_mstate_gen must 2119102d475Sesolom * be visibly zero on all CPUs before we change cpu_mstate 2129102d475Sesolom * and cpu_mstate_start. Additionally, cpu_mstate_gen must 2139102d475Sesolom * not be restored to oldgen+1 until after all of the other 2149102d475Sesolom * writes have become visible. 2159102d475Sesolom * 2169102d475Sesolom * Normally one puts membar_producer() calls to accomplish 2179102d475Sesolom * this. Unfortunately this routine is extremely performance 2189102d475Sesolom * critical (esp. in syscall_mstate below) and we cannot 2199102d475Sesolom * afford the additional time, particularly on some x86 2209102d475Sesolom * architectures with extremely slow sfence calls. On a 2219102d475Sesolom * CPU which guarantees write ordering (including sparc, x86, 2229102d475Sesolom * and amd64) this is not a problem. The compiler could still 2239102d475Sesolom * reorder the writes, so we make the four cpu fields 2249102d475Sesolom * volatile to prevent this. 2259102d475Sesolom * 2269102d475Sesolom * TSO warning: should we port to a non-TSO (or equivalent) 2279102d475Sesolom * CPU, this will break. 2289102d475Sesolom * 2299102d475Sesolom * The reader stills needs the membar_consumer() calls because, 2309102d475Sesolom * although the volatiles prevent the compiler from reordering 2319102d475Sesolom * loads, the CPU can still do so. 232eda89462Sesolom */ 233eda89462Sesolom 2349102d475Sesolom NEW_CPU_MSTATE(cmstate); 2357c478bd9Sstevel@tonic-gate } 2367c478bd9Sstevel@tonic-gate 2377c478bd9Sstevel@tonic-gate /* 238c97ad5cdSakolb * Return an aggregation of user and system CPU time consumed by 239c97ad5cdSakolb * the specified thread in scaled nanoseconds. 240c97ad5cdSakolb */ 241c97ad5cdSakolb hrtime_t 242c97ad5cdSakolb mstate_thread_onproc_time(kthread_t *t) 243c97ad5cdSakolb { 244c97ad5cdSakolb hrtime_t aggr_time; 245c97ad5cdSakolb hrtime_t now; 24635a5a358SJonathan Adams hrtime_t waitrq; 247c97ad5cdSakolb hrtime_t state_start; 248c97ad5cdSakolb struct mstate *ms; 249c97ad5cdSakolb klwp_t *lwp; 250c97ad5cdSakolb int mstate; 251c97ad5cdSakolb 252c97ad5cdSakolb ASSERT(THREAD_LOCK_HELD(t)); 253c97ad5cdSakolb 254c97ad5cdSakolb if ((lwp = ttolwp(t)) == NULL) 255c97ad5cdSakolb return (0); 256c97ad5cdSakolb 257c97ad5cdSakolb mstate = t->t_mstate; 25835a5a358SJonathan Adams waitrq = t->t_waitrq; 259c97ad5cdSakolb ms = &lwp->lwp_mstate; 260c97ad5cdSakolb state_start = ms->ms_state_start; 261c97ad5cdSakolb 262c97ad5cdSakolb aggr_time = ms->ms_acct[LMS_USER] + 263c97ad5cdSakolb ms->ms_acct[LMS_SYSTEM] + ms->ms_acct[LMS_TRAP]; 264c97ad5cdSakolb 265c97ad5cdSakolb now = gethrtime_unscaled(); 266c97ad5cdSakolb 267c97ad5cdSakolb /* 268c97ad5cdSakolb * NOTE: gethrtime_unscaled on X86 taken on different CPUs is 269c97ad5cdSakolb * inconsistent, so it is possible that now < state_start. 270c97ad5cdSakolb */ 27135a5a358SJonathan Adams if (mstate == LMS_USER || mstate == LMS_SYSTEM || mstate == LMS_TRAP) { 27235a5a358SJonathan Adams /* if waitrq is zero, count all of the time. */ 27335a5a358SJonathan Adams if (waitrq == 0) { 27435a5a358SJonathan Adams waitrq = now; 27535a5a358SJonathan Adams } 27635a5a358SJonathan Adams 27735a5a358SJonathan Adams if (waitrq > state_start) { 27835a5a358SJonathan Adams aggr_time += waitrq - state_start; 27935a5a358SJonathan Adams } 280c97ad5cdSakolb } 281c97ad5cdSakolb 282c97ad5cdSakolb scalehrtime(&aggr_time); 283c97ad5cdSakolb return (aggr_time); 284c97ad5cdSakolb } 285c97ad5cdSakolb 286c97ad5cdSakolb /* 28735a5a358SJonathan Adams * Return the amount of onproc and runnable time this thread has experienced. 28835a5a358SJonathan Adams * 28935a5a358SJonathan Adams * Because the fields we read are not protected by locks when updated 29035a5a358SJonathan Adams * by the thread itself, this is an inherently racey interface. In 29135a5a358SJonathan Adams * particular, the ASSERT(THREAD_LOCK_HELD(t)) doesn't guarantee as much 29235a5a358SJonathan Adams * as it might appear to. 29335a5a358SJonathan Adams * 29435a5a358SJonathan Adams * The implication for users of this interface is that onproc and runnable 29535a5a358SJonathan Adams * are *NOT* monotonically increasing; they may temporarily be larger than 29635a5a358SJonathan Adams * they should be. 29735a5a358SJonathan Adams */ 29835a5a358SJonathan Adams void 29935a5a358SJonathan Adams mstate_systhread_times(kthread_t *t, hrtime_t *onproc, hrtime_t *runnable) 30035a5a358SJonathan Adams { 30135a5a358SJonathan Adams struct mstate *const ms = &ttolwp(t)->lwp_mstate; 30235a5a358SJonathan Adams 30335a5a358SJonathan Adams int mstate; 30435a5a358SJonathan Adams hrtime_t now; 30535a5a358SJonathan Adams hrtime_t state_start; 30635a5a358SJonathan Adams hrtime_t waitrq; 30735a5a358SJonathan Adams hrtime_t aggr_onp; 30835a5a358SJonathan Adams hrtime_t aggr_run; 30935a5a358SJonathan Adams 31035a5a358SJonathan Adams ASSERT(THREAD_LOCK_HELD(t)); 31135a5a358SJonathan Adams ASSERT(t->t_procp->p_flag & SSYS); 31235a5a358SJonathan Adams ASSERT(ttolwp(t) != NULL); 31335a5a358SJonathan Adams 31435a5a358SJonathan Adams /* shouldn't be any non-SYSTEM on-CPU time */ 31535a5a358SJonathan Adams ASSERT(ms->ms_acct[LMS_USER] == 0); 31635a5a358SJonathan Adams ASSERT(ms->ms_acct[LMS_TRAP] == 0); 31735a5a358SJonathan Adams 31835a5a358SJonathan Adams mstate = t->t_mstate; 31935a5a358SJonathan Adams waitrq = t->t_waitrq; 32035a5a358SJonathan Adams state_start = ms->ms_state_start; 32135a5a358SJonathan Adams 32235a5a358SJonathan Adams aggr_onp = ms->ms_acct[LMS_SYSTEM]; 32335a5a358SJonathan Adams aggr_run = ms->ms_acct[LMS_WAIT_CPU]; 32435a5a358SJonathan Adams 32535a5a358SJonathan Adams now = gethrtime_unscaled(); 32635a5a358SJonathan Adams 32735a5a358SJonathan Adams /* if waitrq == 0, then there is no time to account to TS_RUN */ 32835a5a358SJonathan Adams if (waitrq == 0) 32935a5a358SJonathan Adams waitrq = now; 33035a5a358SJonathan Adams 33135a5a358SJonathan Adams /* If there is system time to accumulate, do so */ 33235a5a358SJonathan Adams if (mstate == LMS_SYSTEM && state_start < waitrq) 33335a5a358SJonathan Adams aggr_onp += waitrq - state_start; 33435a5a358SJonathan Adams 33535a5a358SJonathan Adams if (waitrq < now) 33635a5a358SJonathan Adams aggr_run += now - waitrq; 33735a5a358SJonathan Adams 33835a5a358SJonathan Adams scalehrtime(&aggr_onp); 33935a5a358SJonathan Adams scalehrtime(&aggr_run); 34035a5a358SJonathan Adams 34135a5a358SJonathan Adams *onproc = aggr_onp; 34235a5a358SJonathan Adams *runnable = aggr_run; 34335a5a358SJonathan Adams } 34435a5a358SJonathan Adams 34535a5a358SJonathan Adams /* 3467c478bd9Sstevel@tonic-gate * Return an aggregation of microstate times in scaled nanoseconds (high-res 3477c478bd9Sstevel@tonic-gate * time). This keeps in mind that p_acct is already scaled, and ms_acct is 3487c478bd9Sstevel@tonic-gate * not. 3497c478bd9Sstevel@tonic-gate */ 3507c478bd9Sstevel@tonic-gate hrtime_t 3517c478bd9Sstevel@tonic-gate mstate_aggr_state(proc_t *p, int a_state) 3527c478bd9Sstevel@tonic-gate { 3537c478bd9Sstevel@tonic-gate struct mstate *ms; 3547c478bd9Sstevel@tonic-gate kthread_t *t; 3557c478bd9Sstevel@tonic-gate klwp_t *lwp; 3567c478bd9Sstevel@tonic-gate hrtime_t aggr_time; 3577c478bd9Sstevel@tonic-gate hrtime_t scaledtime; 3587c478bd9Sstevel@tonic-gate 3597c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 3607c478bd9Sstevel@tonic-gate ASSERT((unsigned)a_state < NMSTATES); 3617c478bd9Sstevel@tonic-gate 3627c478bd9Sstevel@tonic-gate aggr_time = p->p_acct[a_state]; 3637c478bd9Sstevel@tonic-gate if (a_state == LMS_SYSTEM) 3647c478bd9Sstevel@tonic-gate aggr_time += p->p_acct[LMS_TRAP]; 3657c478bd9Sstevel@tonic-gate 3667c478bd9Sstevel@tonic-gate t = p->p_tlist; 3677c478bd9Sstevel@tonic-gate if (t == NULL) 3687c478bd9Sstevel@tonic-gate return (aggr_time); 3697c478bd9Sstevel@tonic-gate 3707c478bd9Sstevel@tonic-gate do { 3717c478bd9Sstevel@tonic-gate if (t->t_proc_flag & TP_LWPEXIT) 3727c478bd9Sstevel@tonic-gate continue; 3737c478bd9Sstevel@tonic-gate 3747c478bd9Sstevel@tonic-gate lwp = ttolwp(t); 3757c478bd9Sstevel@tonic-gate ms = &lwp->lwp_mstate; 3767c478bd9Sstevel@tonic-gate scaledtime = ms->ms_acct[a_state]; 3777c478bd9Sstevel@tonic-gate scalehrtime(&scaledtime); 3787c478bd9Sstevel@tonic-gate aggr_time += scaledtime; 3797c478bd9Sstevel@tonic-gate if (a_state == LMS_SYSTEM) { 3807c478bd9Sstevel@tonic-gate scaledtime = ms->ms_acct[LMS_TRAP]; 3817c478bd9Sstevel@tonic-gate scalehrtime(&scaledtime); 3827c478bd9Sstevel@tonic-gate aggr_time += scaledtime; 3837c478bd9Sstevel@tonic-gate } 3847c478bd9Sstevel@tonic-gate } while ((t = t->t_forw) != p->p_tlist); 3857c478bd9Sstevel@tonic-gate 3867c478bd9Sstevel@tonic-gate return (aggr_time); 3877c478bd9Sstevel@tonic-gate } 3887c478bd9Sstevel@tonic-gate 3899102d475Sesolom 3907c478bd9Sstevel@tonic-gate void 3917c478bd9Sstevel@tonic-gate syscall_mstate(int fromms, int toms) 3927c478bd9Sstevel@tonic-gate { 3937c478bd9Sstevel@tonic-gate kthread_t *t = curthread; 394542a813cSJerry Jelinek zone_t *z = ttozone(t); 3957c478bd9Sstevel@tonic-gate struct mstate *ms; 3967c478bd9Sstevel@tonic-gate hrtime_t *mstimep; 3977c478bd9Sstevel@tonic-gate hrtime_t curtime; 3987c478bd9Sstevel@tonic-gate klwp_t *lwp; 3997c478bd9Sstevel@tonic-gate hrtime_t newtime; 4009102d475Sesolom cpu_t *cpu; 4019102d475Sesolom uint16_t gen; 4027c478bd9Sstevel@tonic-gate 4037c478bd9Sstevel@tonic-gate if ((lwp = ttolwp(t)) == NULL) 4047c478bd9Sstevel@tonic-gate return; 4057c478bd9Sstevel@tonic-gate 4067c478bd9Sstevel@tonic-gate ASSERT(fromms < NMSTATES); 4077c478bd9Sstevel@tonic-gate ASSERT(toms < NMSTATES); 4087c478bd9Sstevel@tonic-gate 4097c478bd9Sstevel@tonic-gate ms = &lwp->lwp_mstate; 4107c478bd9Sstevel@tonic-gate mstimep = &ms->ms_acct[fromms]; 4117c478bd9Sstevel@tonic-gate curtime = gethrtime_unscaled(); 4127c478bd9Sstevel@tonic-gate newtime = curtime - ms->ms_state_start; 4137c478bd9Sstevel@tonic-gate while (newtime < 0) { 4147c478bd9Sstevel@tonic-gate curtime = gethrtime_unscaled(); 4157c478bd9Sstevel@tonic-gate newtime = curtime - ms->ms_state_start; 4167c478bd9Sstevel@tonic-gate } 4177c478bd9Sstevel@tonic-gate *mstimep += newtime; 418542a813cSJerry Jelinek if (fromms == LMS_USER) 419542a813cSJerry Jelinek atomic_add_64(&z->zone_utime, newtime); 420542a813cSJerry Jelinek else if (fromms == LMS_SYSTEM) 421542a813cSJerry Jelinek atomic_add_64(&z->zone_stime, newtime); 4227c478bd9Sstevel@tonic-gate t->t_mstate = toms; 4237c478bd9Sstevel@tonic-gate ms->ms_state_start = curtime; 4247c478bd9Sstevel@tonic-gate ms->ms_prev = fromms; 425eda89462Sesolom kpreempt_disable(); /* don't change CPU while changing CPU's state */ 4269102d475Sesolom cpu = CPU; 4279102d475Sesolom ASSERT(cpu == t->t_cpu); 4289102d475Sesolom if ((toms != LMS_USER) && (cpu->cpu_mstate != CMS_SYSTEM)) { 4299102d475Sesolom NEW_CPU_MSTATE(CMS_SYSTEM); 4309102d475Sesolom } else if ((toms == LMS_USER) && (cpu->cpu_mstate != CMS_USER)) { 4319102d475Sesolom NEW_CPU_MSTATE(CMS_USER); 4329102d475Sesolom } 4337c478bd9Sstevel@tonic-gate kpreempt_enable(); 4347c478bd9Sstevel@tonic-gate } 4357c478bd9Sstevel@tonic-gate 4369102d475Sesolom #undef NEW_CPU_MSTATE 4379102d475Sesolom 4387c478bd9Sstevel@tonic-gate /* 4397c478bd9Sstevel@tonic-gate * The following is for computing the percentage of cpu time used recently 4407c478bd9Sstevel@tonic-gate * by an lwp. The function cpu_decay() is also called from /proc code. 4417c478bd9Sstevel@tonic-gate * 4427c478bd9Sstevel@tonic-gate * exp_x(x): 4437c478bd9Sstevel@tonic-gate * Given x as a 64-bit non-negative scaled integer of arbitrary magnitude, 4447c478bd9Sstevel@tonic-gate * Return exp(-x) as a 64-bit scaled integer in the range [0 .. 1]. 4457c478bd9Sstevel@tonic-gate * 4467c478bd9Sstevel@tonic-gate * Scaling for 64-bit scaled integer: 4477c478bd9Sstevel@tonic-gate * The binary point is to the right of the high-order bit 4487c478bd9Sstevel@tonic-gate * of the low-order 32-bit word. 4497c478bd9Sstevel@tonic-gate */ 4507c478bd9Sstevel@tonic-gate 4517c478bd9Sstevel@tonic-gate #define LSHIFT 31 4527c478bd9Sstevel@tonic-gate #define LSI_ONE ((uint32_t)1 << LSHIFT) /* 32-bit scaled integer 1 */ 4537c478bd9Sstevel@tonic-gate 4547c478bd9Sstevel@tonic-gate #ifdef DEBUG 4557c478bd9Sstevel@tonic-gate uint_t expx_cnt = 0; /* number of calls to exp_x() */ 4567c478bd9Sstevel@tonic-gate uint_t expx_mul = 0; /* number of long multiplies in exp_x() */ 4577c478bd9Sstevel@tonic-gate #endif 4587c478bd9Sstevel@tonic-gate 4597c478bd9Sstevel@tonic-gate static uint64_t 4607c478bd9Sstevel@tonic-gate exp_x(uint64_t x) 4617c478bd9Sstevel@tonic-gate { 4627c478bd9Sstevel@tonic-gate int i; 4637c478bd9Sstevel@tonic-gate uint64_t ull; 4647c478bd9Sstevel@tonic-gate uint32_t ui; 4657c478bd9Sstevel@tonic-gate 4667c478bd9Sstevel@tonic-gate #ifdef DEBUG 4677c478bd9Sstevel@tonic-gate expx_cnt++; 4687c478bd9Sstevel@tonic-gate #endif 4697c478bd9Sstevel@tonic-gate /* 4707c478bd9Sstevel@tonic-gate * By the formula: 4717c478bd9Sstevel@tonic-gate * exp(-x) = exp(-x/2) * exp(-x/2) 4727c478bd9Sstevel@tonic-gate * we keep halving x until it becomes small enough for 4737c478bd9Sstevel@tonic-gate * the following approximation to be accurate enough: 4747c478bd9Sstevel@tonic-gate * exp(-x) = 1 - x 4757c478bd9Sstevel@tonic-gate * We reduce x until it is less than 1/4 (the 2 in LSHIFT-2 below). 4767c478bd9Sstevel@tonic-gate * Our final error will be smaller than 4% . 4777c478bd9Sstevel@tonic-gate */ 4787c478bd9Sstevel@tonic-gate 4797c478bd9Sstevel@tonic-gate /* 4807c478bd9Sstevel@tonic-gate * Use a uint64_t for the initial shift calculation. 4817c478bd9Sstevel@tonic-gate */ 4827c478bd9Sstevel@tonic-gate ull = x >> (LSHIFT-2); 4837c478bd9Sstevel@tonic-gate 4847c478bd9Sstevel@tonic-gate /* 4857c478bd9Sstevel@tonic-gate * Short circuit: 4867c478bd9Sstevel@tonic-gate * A number this large produces effectively 0 (actually .005). 4877c478bd9Sstevel@tonic-gate * This way, we will never do more than 5 multiplies. 4887c478bd9Sstevel@tonic-gate */ 4897c478bd9Sstevel@tonic-gate if (ull >= (1 << 5)) 4907c478bd9Sstevel@tonic-gate return (0); 4917c478bd9Sstevel@tonic-gate 4927c478bd9Sstevel@tonic-gate ui = ull; /* OK. Now we can use a uint_t. */ 4937c478bd9Sstevel@tonic-gate for (i = 0; ui != 0; i++) 4947c478bd9Sstevel@tonic-gate ui >>= 1; 4957c478bd9Sstevel@tonic-gate 4967c478bd9Sstevel@tonic-gate if (i != 0) { 4977c478bd9Sstevel@tonic-gate #ifdef DEBUG 4987c478bd9Sstevel@tonic-gate expx_mul += i; /* seldom happens */ 4997c478bd9Sstevel@tonic-gate #endif 5007c478bd9Sstevel@tonic-gate x >>= i; 5017c478bd9Sstevel@tonic-gate } 5027c478bd9Sstevel@tonic-gate 5037c478bd9Sstevel@tonic-gate /* 5047c478bd9Sstevel@tonic-gate * Now we compute 1 - x and square it the number of times 5057c478bd9Sstevel@tonic-gate * that we halved x above to produce the final result: 5067c478bd9Sstevel@tonic-gate */ 5077c478bd9Sstevel@tonic-gate x = LSI_ONE - x; 5087c478bd9Sstevel@tonic-gate while (i--) 5097c478bd9Sstevel@tonic-gate x = (x * x) >> LSHIFT; 5107c478bd9Sstevel@tonic-gate 5117c478bd9Sstevel@tonic-gate return (x); 5127c478bd9Sstevel@tonic-gate } 5137c478bd9Sstevel@tonic-gate 5147c478bd9Sstevel@tonic-gate /* 5157c478bd9Sstevel@tonic-gate * Given the old percent cpu and a time delta in nanoseconds, 5167c478bd9Sstevel@tonic-gate * return the new decayed percent cpu: pct * exp(-tau), 5177c478bd9Sstevel@tonic-gate * where 'tau' is the time delta multiplied by a decay factor. 5187c478bd9Sstevel@tonic-gate * We have chosen the decay factor (cpu_decay_factor in param.c) 5197c478bd9Sstevel@tonic-gate * to make the decay over five seconds be approximately 20%. 5207c478bd9Sstevel@tonic-gate * 5217c478bd9Sstevel@tonic-gate * 'pct' is a 32-bit scaled integer <= 1 5227c478bd9Sstevel@tonic-gate * The binary point is to the right of the high-order bit 5237c478bd9Sstevel@tonic-gate * of the 32-bit word. 5247c478bd9Sstevel@tonic-gate */ 5257c478bd9Sstevel@tonic-gate static uint32_t 5267c478bd9Sstevel@tonic-gate cpu_decay(uint32_t pct, hrtime_t nsec) 5277c478bd9Sstevel@tonic-gate { 5287c478bd9Sstevel@tonic-gate uint64_t delta = (uint64_t)nsec; 5297c478bd9Sstevel@tonic-gate 5307c478bd9Sstevel@tonic-gate delta /= cpu_decay_factor; 5317c478bd9Sstevel@tonic-gate return ((pct * exp_x(delta)) >> LSHIFT); 5327c478bd9Sstevel@tonic-gate } 5337c478bd9Sstevel@tonic-gate 5347c478bd9Sstevel@tonic-gate /* 5357c478bd9Sstevel@tonic-gate * Given the old percent cpu and a time delta in nanoseconds, 5367c478bd9Sstevel@tonic-gate * return the new grown percent cpu: 1 - ( 1 - pct ) * exp(-tau) 5377c478bd9Sstevel@tonic-gate */ 5387c478bd9Sstevel@tonic-gate static uint32_t 5397c478bd9Sstevel@tonic-gate cpu_grow(uint32_t pct, hrtime_t nsec) 5407c478bd9Sstevel@tonic-gate { 5417c478bd9Sstevel@tonic-gate return (LSI_ONE - cpu_decay(LSI_ONE - pct, nsec)); 5427c478bd9Sstevel@tonic-gate } 5437c478bd9Sstevel@tonic-gate 5447c478bd9Sstevel@tonic-gate 5457c478bd9Sstevel@tonic-gate /* 5467c478bd9Sstevel@tonic-gate * Defined to determine whether a lwp is still on a processor. 5477c478bd9Sstevel@tonic-gate */ 5487c478bd9Sstevel@tonic-gate 5497c478bd9Sstevel@tonic-gate #define T_ONPROC(kt) \ 5507c478bd9Sstevel@tonic-gate ((kt)->t_mstate < LMS_SLEEP) 5517c478bd9Sstevel@tonic-gate #define T_OFFPROC(kt) \ 5527c478bd9Sstevel@tonic-gate ((kt)->t_mstate >= LMS_SLEEP) 5537c478bd9Sstevel@tonic-gate 5547c478bd9Sstevel@tonic-gate uint_t 5557c478bd9Sstevel@tonic-gate cpu_update_pct(kthread_t *t, hrtime_t newtime) 5567c478bd9Sstevel@tonic-gate { 5577c478bd9Sstevel@tonic-gate hrtime_t delta; 5587c478bd9Sstevel@tonic-gate hrtime_t hrlb; 5597c478bd9Sstevel@tonic-gate uint_t pctcpu; 5607c478bd9Sstevel@tonic-gate uint_t npctcpu; 5617c478bd9Sstevel@tonic-gate 5627c478bd9Sstevel@tonic-gate /* 5637c478bd9Sstevel@tonic-gate * This routine can get called at PIL > 0, this *has* to be 5647c478bd9Sstevel@tonic-gate * done atomically. Holding locks here causes bad things to happen. 5657c478bd9Sstevel@tonic-gate * (read: deadlock). 5667c478bd9Sstevel@tonic-gate */ 5677c478bd9Sstevel@tonic-gate 5687c478bd9Sstevel@tonic-gate do { 5697c478bd9Sstevel@tonic-gate if (T_ONPROC(t) && t->t_waitrq == 0) { 5707c478bd9Sstevel@tonic-gate hrlb = t->t_hrtime; 5717c478bd9Sstevel@tonic-gate delta = newtime - hrlb; 5727c478bd9Sstevel@tonic-gate if (delta < 0) { 5737c478bd9Sstevel@tonic-gate newtime = gethrtime_unscaled(); 5747c478bd9Sstevel@tonic-gate delta = newtime - hrlb; 5757c478bd9Sstevel@tonic-gate } 5767c478bd9Sstevel@tonic-gate t->t_hrtime = newtime; 5777c478bd9Sstevel@tonic-gate scalehrtime(&delta); 5787c478bd9Sstevel@tonic-gate pctcpu = t->t_pctcpu; 5797c478bd9Sstevel@tonic-gate npctcpu = cpu_grow(pctcpu, delta); 5807c478bd9Sstevel@tonic-gate } else { 5817c478bd9Sstevel@tonic-gate hrlb = t->t_hrtime; 5827c478bd9Sstevel@tonic-gate delta = newtime - hrlb; 5837c478bd9Sstevel@tonic-gate if (delta < 0) { 5847c478bd9Sstevel@tonic-gate newtime = gethrtime_unscaled(); 5857c478bd9Sstevel@tonic-gate delta = newtime - hrlb; 5867c478bd9Sstevel@tonic-gate } 5877c478bd9Sstevel@tonic-gate t->t_hrtime = newtime; 5887c478bd9Sstevel@tonic-gate scalehrtime(&delta); 5897c478bd9Sstevel@tonic-gate pctcpu = t->t_pctcpu; 5907c478bd9Sstevel@tonic-gate npctcpu = cpu_decay(pctcpu, delta); 5917c478bd9Sstevel@tonic-gate } 592*75d94465SJosef 'Jeff' Sipek } while (atomic_cas_32(&t->t_pctcpu, pctcpu, npctcpu) != pctcpu); 5937c478bd9Sstevel@tonic-gate 5947c478bd9Sstevel@tonic-gate return (npctcpu); 5957c478bd9Sstevel@tonic-gate } 5967c478bd9Sstevel@tonic-gate 5977c478bd9Sstevel@tonic-gate /* 5987c478bd9Sstevel@tonic-gate * Change the microstate level for the LWP and update the 5997c478bd9Sstevel@tonic-gate * associated accounting information. Return the previous 6007c478bd9Sstevel@tonic-gate * LWP state. 6017c478bd9Sstevel@tonic-gate */ 6027c478bd9Sstevel@tonic-gate int 6037c478bd9Sstevel@tonic-gate new_mstate(kthread_t *t, int new_state) 6047c478bd9Sstevel@tonic-gate { 6057c478bd9Sstevel@tonic-gate struct mstate *ms; 6067c478bd9Sstevel@tonic-gate unsigned state; 6077c478bd9Sstevel@tonic-gate hrtime_t *mstimep; 6087c478bd9Sstevel@tonic-gate hrtime_t curtime; 6097c478bd9Sstevel@tonic-gate hrtime_t newtime; 6107c478bd9Sstevel@tonic-gate hrtime_t oldtime; 611542a813cSJerry Jelinek hrtime_t ztime; 612542a813cSJerry Jelinek hrtime_t origstart; 6137c478bd9Sstevel@tonic-gate klwp_t *lwp; 614542a813cSJerry Jelinek zone_t *z; 6157c478bd9Sstevel@tonic-gate 6167c478bd9Sstevel@tonic-gate ASSERT(new_state != LMS_WAIT_CPU); 6177c478bd9Sstevel@tonic-gate ASSERT((unsigned)new_state < NMSTATES); 6187c478bd9Sstevel@tonic-gate ASSERT(t == curthread || THREAD_LOCK_HELD(t)); 6197c478bd9Sstevel@tonic-gate 62014e7cc63Sjohansen /* 62114e7cc63Sjohansen * Don't do microstate processing for threads without a lwp (kernel 62214e7cc63Sjohansen * threads). Also, if we're an interrupt thread that is pinning another 62314e7cc63Sjohansen * thread, our t_mstate hasn't been initialized. We'd be modifying the 62414e7cc63Sjohansen * microstate of the underlying lwp which doesn't realize that it's 62514e7cc63Sjohansen * pinned. In this case, also don't change the microstate. 62614e7cc63Sjohansen */ 62714e7cc63Sjohansen if (((lwp = ttolwp(t)) == NULL) || t->t_intr) 6287c478bd9Sstevel@tonic-gate return (LMS_SYSTEM); 6297c478bd9Sstevel@tonic-gate 6307c478bd9Sstevel@tonic-gate curtime = gethrtime_unscaled(); 6317c478bd9Sstevel@tonic-gate 6327c478bd9Sstevel@tonic-gate /* adjust cpu percentages before we go any further */ 6337c478bd9Sstevel@tonic-gate (void) cpu_update_pct(t, curtime); 6347c478bd9Sstevel@tonic-gate 6357c478bd9Sstevel@tonic-gate ms = &lwp->lwp_mstate; 6367c478bd9Sstevel@tonic-gate state = t->t_mstate; 637542a813cSJerry Jelinek origstart = ms->ms_state_start; 6387c478bd9Sstevel@tonic-gate do { 6397c478bd9Sstevel@tonic-gate switch (state) { 6407c478bd9Sstevel@tonic-gate case LMS_TFAULT: 6417c478bd9Sstevel@tonic-gate case LMS_DFAULT: 6427c478bd9Sstevel@tonic-gate case LMS_KFAULT: 6437c478bd9Sstevel@tonic-gate case LMS_USER_LOCK: 6447c478bd9Sstevel@tonic-gate mstimep = &ms->ms_acct[LMS_SYSTEM]; 6457c478bd9Sstevel@tonic-gate break; 6467c478bd9Sstevel@tonic-gate default: 6477c478bd9Sstevel@tonic-gate mstimep = &ms->ms_acct[state]; 6487c478bd9Sstevel@tonic-gate break; 6497c478bd9Sstevel@tonic-gate } 650542a813cSJerry Jelinek ztime = newtime = curtime - ms->ms_state_start; 6517c478bd9Sstevel@tonic-gate if (newtime < 0) { 6527c478bd9Sstevel@tonic-gate curtime = gethrtime_unscaled(); 6537c478bd9Sstevel@tonic-gate oldtime = *mstimep - 1; /* force CAS to fail */ 6547c478bd9Sstevel@tonic-gate continue; 6557c478bd9Sstevel@tonic-gate } 6567c478bd9Sstevel@tonic-gate oldtime = *mstimep; 6577c478bd9Sstevel@tonic-gate newtime += oldtime; 6587c478bd9Sstevel@tonic-gate t->t_mstate = new_state; 6597c478bd9Sstevel@tonic-gate ms->ms_state_start = curtime; 660*75d94465SJosef 'Jeff' Sipek } while (atomic_cas_64((uint64_t *)mstimep, oldtime, newtime) != 661*75d94465SJosef 'Jeff' Sipek oldtime); 662542a813cSJerry Jelinek 663542a813cSJerry Jelinek /* 664542a813cSJerry Jelinek * When the system boots the initial startup thread will have a 665542a813cSJerry Jelinek * ms_state_start of 0 which would add a huge system time to the global 666542a813cSJerry Jelinek * zone. We want to skip aggregating that initial bit of work. 667542a813cSJerry Jelinek */ 668542a813cSJerry Jelinek if (origstart != 0) { 669542a813cSJerry Jelinek z = ttozone(t); 670542a813cSJerry Jelinek if (state == LMS_USER) 671542a813cSJerry Jelinek atomic_add_64(&z->zone_utime, ztime); 672542a813cSJerry Jelinek else if (state == LMS_SYSTEM) 673542a813cSJerry Jelinek atomic_add_64(&z->zone_stime, ztime); 674542a813cSJerry Jelinek } 675542a813cSJerry Jelinek 6767c478bd9Sstevel@tonic-gate /* 6777c478bd9Sstevel@tonic-gate * Remember the previous running microstate. 6787c478bd9Sstevel@tonic-gate */ 6797c478bd9Sstevel@tonic-gate if (state != LMS_SLEEP && state != LMS_STOPPED) 6807c478bd9Sstevel@tonic-gate ms->ms_prev = state; 6817c478bd9Sstevel@tonic-gate 6827c478bd9Sstevel@tonic-gate /* 6837c478bd9Sstevel@tonic-gate * Switch CPU microstate if appropriate 6847c478bd9Sstevel@tonic-gate */ 685eda89462Sesolom 6867c478bd9Sstevel@tonic-gate kpreempt_disable(); /* MUST disable kpreempt before touching t->cpu */ 687eda89462Sesolom ASSERT(t->t_cpu == CPU); 688eda89462Sesolom if (!CPU_ON_INTR(t->t_cpu) && curthread->t_intr == NULL) { 689eda89462Sesolom if (new_state == LMS_USER && t->t_cpu->cpu_mstate != CMS_USER) 690eda89462Sesolom new_cpu_mstate(CMS_USER, curtime); 691eda89462Sesolom else if (new_state != LMS_USER && 692eda89462Sesolom t->t_cpu->cpu_mstate != CMS_SYSTEM) 693eda89462Sesolom new_cpu_mstate(CMS_SYSTEM, curtime); 6947c478bd9Sstevel@tonic-gate } 6957c478bd9Sstevel@tonic-gate kpreempt_enable(); 6967c478bd9Sstevel@tonic-gate 6977c478bd9Sstevel@tonic-gate return (ms->ms_prev); 6987c478bd9Sstevel@tonic-gate } 6997c478bd9Sstevel@tonic-gate 7007c478bd9Sstevel@tonic-gate /* 7017c478bd9Sstevel@tonic-gate * Restore the LWP microstate to the previous runnable state. 7027c478bd9Sstevel@tonic-gate * Called from disp() with the newly selected lwp. 7037c478bd9Sstevel@tonic-gate */ 7047c478bd9Sstevel@tonic-gate void 7057c478bd9Sstevel@tonic-gate restore_mstate(kthread_t *t) 7067c478bd9Sstevel@tonic-gate { 7077c478bd9Sstevel@tonic-gate struct mstate *ms; 7087c478bd9Sstevel@tonic-gate hrtime_t *mstimep; 7097c478bd9Sstevel@tonic-gate klwp_t *lwp; 7107c478bd9Sstevel@tonic-gate hrtime_t curtime; 7117c478bd9Sstevel@tonic-gate hrtime_t waitrq; 7127c478bd9Sstevel@tonic-gate hrtime_t newtime; 7137c478bd9Sstevel@tonic-gate hrtime_t oldtime; 714542a813cSJerry Jelinek hrtime_t waittime; 715542a813cSJerry Jelinek zone_t *z; 7167c478bd9Sstevel@tonic-gate 71714e7cc63Sjohansen /* 71814e7cc63Sjohansen * Don't call restore mstate of threads without lwps. (Kernel threads) 71914e7cc63Sjohansen * 72014e7cc63Sjohansen * threads with t_intr set shouldn't be in the dispatcher, so assert 72114e7cc63Sjohansen * that nobody here has t_intr. 72214e7cc63Sjohansen */ 72314e7cc63Sjohansen ASSERT(t->t_intr == NULL); 72414e7cc63Sjohansen 7257c478bd9Sstevel@tonic-gate if ((lwp = ttolwp(t)) == NULL) 7267c478bd9Sstevel@tonic-gate return; 7277c478bd9Sstevel@tonic-gate 7287c478bd9Sstevel@tonic-gate curtime = gethrtime_unscaled(); 7297c478bd9Sstevel@tonic-gate (void) cpu_update_pct(t, curtime); 7307c478bd9Sstevel@tonic-gate ms = &lwp->lwp_mstate; 7317c478bd9Sstevel@tonic-gate ASSERT((unsigned)t->t_mstate < NMSTATES); 7327c478bd9Sstevel@tonic-gate do { 7337c478bd9Sstevel@tonic-gate switch (t->t_mstate) { 7347c478bd9Sstevel@tonic-gate case LMS_SLEEP: 7357c478bd9Sstevel@tonic-gate /* 7367c478bd9Sstevel@tonic-gate * Update the timer for the current sleep state. 7377c478bd9Sstevel@tonic-gate */ 7387c478bd9Sstevel@tonic-gate ASSERT((unsigned)ms->ms_prev < NMSTATES); 7397c478bd9Sstevel@tonic-gate switch (ms->ms_prev) { 7407c478bd9Sstevel@tonic-gate case LMS_TFAULT: 7417c478bd9Sstevel@tonic-gate case LMS_DFAULT: 7427c478bd9Sstevel@tonic-gate case LMS_KFAULT: 7437c478bd9Sstevel@tonic-gate case LMS_USER_LOCK: 7447c478bd9Sstevel@tonic-gate mstimep = &ms->ms_acct[ms->ms_prev]; 7457c478bd9Sstevel@tonic-gate break; 7467c478bd9Sstevel@tonic-gate default: 7477c478bd9Sstevel@tonic-gate mstimep = &ms->ms_acct[LMS_SLEEP]; 7487c478bd9Sstevel@tonic-gate break; 7497c478bd9Sstevel@tonic-gate } 7507c478bd9Sstevel@tonic-gate /* 7517c478bd9Sstevel@tonic-gate * Return to the previous run state. 7527c478bd9Sstevel@tonic-gate */ 7537c478bd9Sstevel@tonic-gate t->t_mstate = ms->ms_prev; 7547c478bd9Sstevel@tonic-gate break; 7557c478bd9Sstevel@tonic-gate case LMS_STOPPED: 7567c478bd9Sstevel@tonic-gate mstimep = &ms->ms_acct[LMS_STOPPED]; 7577c478bd9Sstevel@tonic-gate /* 7587c478bd9Sstevel@tonic-gate * Return to the previous run state. 7597c478bd9Sstevel@tonic-gate */ 7607c478bd9Sstevel@tonic-gate t->t_mstate = ms->ms_prev; 7617c478bd9Sstevel@tonic-gate break; 7627c478bd9Sstevel@tonic-gate case LMS_TFAULT: 7637c478bd9Sstevel@tonic-gate case LMS_DFAULT: 7647c478bd9Sstevel@tonic-gate case LMS_KFAULT: 7657c478bd9Sstevel@tonic-gate case LMS_USER_LOCK: 7667c478bd9Sstevel@tonic-gate mstimep = &ms->ms_acct[LMS_SYSTEM]; 7677c478bd9Sstevel@tonic-gate break; 7687c478bd9Sstevel@tonic-gate default: 7697c478bd9Sstevel@tonic-gate mstimep = &ms->ms_acct[t->t_mstate]; 7707c478bd9Sstevel@tonic-gate break; 7717c478bd9Sstevel@tonic-gate } 7727c478bd9Sstevel@tonic-gate waitrq = t->t_waitrq; /* hopefully atomic */ 773f2bd4627Sjohansen if (waitrq == 0) { 7747c478bd9Sstevel@tonic-gate waitrq = curtime; 7757c478bd9Sstevel@tonic-gate } 776f2bd4627Sjohansen t->t_waitrq = 0; 7777c478bd9Sstevel@tonic-gate newtime = waitrq - ms->ms_state_start; 7787c478bd9Sstevel@tonic-gate if (newtime < 0) { 7797c478bd9Sstevel@tonic-gate curtime = gethrtime_unscaled(); 7807c478bd9Sstevel@tonic-gate oldtime = *mstimep - 1; /* force CAS to fail */ 7817c478bd9Sstevel@tonic-gate continue; 7827c478bd9Sstevel@tonic-gate } 7837c478bd9Sstevel@tonic-gate oldtime = *mstimep; 7847c478bd9Sstevel@tonic-gate newtime += oldtime; 785*75d94465SJosef 'Jeff' Sipek } while (atomic_cas_64((uint64_t *)mstimep, oldtime, newtime) != 786*75d94465SJosef 'Jeff' Sipek oldtime); 787542a813cSJerry Jelinek 7887c478bd9Sstevel@tonic-gate /* 7897c478bd9Sstevel@tonic-gate * Update the WAIT_CPU timer and per-cpu waitrq total. 7907c478bd9Sstevel@tonic-gate */ 791542a813cSJerry Jelinek z = ttozone(t); 792542a813cSJerry Jelinek waittime = curtime - waitrq; 793542a813cSJerry Jelinek ms->ms_acct[LMS_WAIT_CPU] += waittime; 794542a813cSJerry Jelinek atomic_add_64(&z->zone_wtime, waittime); 795542a813cSJerry Jelinek CPU->cpu_waitrq += waittime; 7967c478bd9Sstevel@tonic-gate ms->ms_state_start = curtime; 7977c478bd9Sstevel@tonic-gate } 7987c478bd9Sstevel@tonic-gate 7997c478bd9Sstevel@tonic-gate /* 8007c478bd9Sstevel@tonic-gate * Copy lwp microstate accounting and resource usage information 8017c478bd9Sstevel@tonic-gate * to the process. (lwp is terminating) 8027c478bd9Sstevel@tonic-gate */ 8037c478bd9Sstevel@tonic-gate void 8047c478bd9Sstevel@tonic-gate term_mstate(kthread_t *t) 8057c478bd9Sstevel@tonic-gate { 8067c478bd9Sstevel@tonic-gate struct mstate *ms; 8077c478bd9Sstevel@tonic-gate proc_t *p = ttoproc(t); 8087c478bd9Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 8097c478bd9Sstevel@tonic-gate int i; 8107c478bd9Sstevel@tonic-gate hrtime_t tmp; 8117c478bd9Sstevel@tonic-gate 8127c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 8137c478bd9Sstevel@tonic-gate 8147c478bd9Sstevel@tonic-gate ms = &lwp->lwp_mstate; 8157c478bd9Sstevel@tonic-gate (void) new_mstate(t, LMS_STOPPED); 8167c478bd9Sstevel@tonic-gate ms->ms_term = ms->ms_state_start; 8177c478bd9Sstevel@tonic-gate tmp = ms->ms_term - ms->ms_start; 8187c478bd9Sstevel@tonic-gate scalehrtime(&tmp); 8197c478bd9Sstevel@tonic-gate p->p_mlreal += tmp; 8207c478bd9Sstevel@tonic-gate for (i = 0; i < NMSTATES; i++) { 8217c478bd9Sstevel@tonic-gate tmp = ms->ms_acct[i]; 8227c478bd9Sstevel@tonic-gate scalehrtime(&tmp); 8237c478bd9Sstevel@tonic-gate p->p_acct[i] += tmp; 8247c478bd9Sstevel@tonic-gate } 8257c478bd9Sstevel@tonic-gate p->p_ru.minflt += lwp->lwp_ru.minflt; 8267c478bd9Sstevel@tonic-gate p->p_ru.majflt += lwp->lwp_ru.majflt; 8277c478bd9Sstevel@tonic-gate p->p_ru.nswap += lwp->lwp_ru.nswap; 8287c478bd9Sstevel@tonic-gate p->p_ru.inblock += lwp->lwp_ru.inblock; 8297c478bd9Sstevel@tonic-gate p->p_ru.oublock += lwp->lwp_ru.oublock; 8307c478bd9Sstevel@tonic-gate p->p_ru.msgsnd += lwp->lwp_ru.msgsnd; 8317c478bd9Sstevel@tonic-gate p->p_ru.msgrcv += lwp->lwp_ru.msgrcv; 8327c478bd9Sstevel@tonic-gate p->p_ru.nsignals += lwp->lwp_ru.nsignals; 8337c478bd9Sstevel@tonic-gate p->p_ru.nvcsw += lwp->lwp_ru.nvcsw; 8347c478bd9Sstevel@tonic-gate p->p_ru.nivcsw += lwp->lwp_ru.nivcsw; 8357c478bd9Sstevel@tonic-gate p->p_ru.sysc += lwp->lwp_ru.sysc; 8367c478bd9Sstevel@tonic-gate p->p_ru.ioch += lwp->lwp_ru.ioch; 8377c478bd9Sstevel@tonic-gate p->p_defunct++; 8387c478bd9Sstevel@tonic-gate } 839