1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 * Copyright (c) 2018, Joyent, Inc. 25 */ 26 27 #include <sys/types.h> 28 #include <sys/param.h> 29 #include <sys/systm.h> 30 #include <sys/user.h> 31 #include <sys/proc.h> 32 #include <sys/cpuvar.h> 33 #include <sys/thread.h> 34 #include <sys/debug.h> 35 #include <sys/msacct.h> 36 #include <sys/time.h> 37 #include <sys/zone.h> 38 39 /* 40 * Mega-theory block comment: 41 * 42 * Microstate accounting uses finite states and the transitions between these 43 * states to measure timing and accounting information. The state information 44 * is presently tracked for threads (via microstate accounting) and cpus (via 45 * cpu microstate accounting). In each case, these accounting mechanisms use 46 * states and transitions to measure time spent in each state instead of 47 * clock-based sampling methodologies. 48 * 49 * For microstate accounting: 50 * state transitions are accomplished by calling new_mstate() to switch between 51 * states. Transitions from a sleeping state (LMS_SLEEP and LMS_STOPPED) occur 52 * by calling restore_mstate() which restores a thread to its previously running 53 * state. This code is primarialy executed by the dispatcher in disp() before 54 * running a process that was put to sleep. If the thread was not in a sleeping 55 * state, this call has little effect other than to update the count of time the 56 * thread has spent waiting on run-queues in its lifetime. 57 * 58 * For cpu microstate accounting: 59 * Cpu microstate accounting is similar to the microstate accounting for threads 60 * but it tracks user, system, and idle time for cpus. Cpu microstate 61 * accounting does not track interrupt times as there is a pre-existing 62 * interrupt accounting mechanism for this purpose. Cpu microstate accounting 63 * tracks time that user threads have spent active, idle, or in the system on a 64 * given cpu. Cpu microstate accounting has fewer states which allows it to 65 * have better defined transitions. The states transition in the following 66 * order: 67 * 68 * CMS_USER <-> CMS_SYSTEM <-> CMS_IDLE 69 * 70 * In order to get to the idle state, the cpu microstate must first go through 71 * the system state, and vice-versa for the user state from idle. The switching 72 * of the microstates from user to system is done as part of the regular thread 73 * microstate accounting code, except for the idle state which is switched by 74 * the dispatcher before it runs the idle loop. 75 * 76 * Cpu percentages: 77 * Cpu percentages are now handled by and based upon microstate accounting 78 * information (the same is true for load averages). The routines which handle 79 * the growing/shrinking and exponentiation of cpu percentages have been moved 80 * here as it now makes more sense for them to be generated from the microstate 81 * code. Cpu percentages are generated similarly to the way they were before; 82 * however, now they are based upon high-resolution timestamps and the 83 * timestamps are modified at various state changes instead of during a clock() 84 * interrupt. This allows us to generate more accurate cpu percentages which 85 * are also in-sync with microstate data. 86 */ 87 88 /* 89 * Initialize the microstate level and the 90 * associated accounting information for an LWP. 91 */ 92 void 93 init_mstate( 94 kthread_t *t, 95 int init_state) 96 { 97 struct mstate *ms; 98 klwp_t *lwp; 99 hrtime_t curtime; 100 101 ASSERT(init_state != LMS_WAIT_CPU); 102 ASSERT((unsigned)init_state < NMSTATES); 103 104 if ((lwp = ttolwp(t)) != NULL) { 105 ms = &lwp->lwp_mstate; 106 curtime = gethrtime_unscaled(); 107 ms->ms_prev = LMS_SYSTEM; 108 ms->ms_start = curtime; 109 ms->ms_term = 0; 110 ms->ms_state_start = curtime; 111 t->t_mstate = init_state; 112 t->t_waitrq = 0; 113 t->t_hrtime = curtime; 114 if ((t->t_proc_flag & TP_MSACCT) == 0) 115 t->t_proc_flag |= TP_MSACCT; 116 bzero((caddr_t)&ms->ms_acct[0], sizeof (ms->ms_acct)); 117 } 118 } 119 120 /* 121 * Initialize the microstate level and associated accounting information 122 * for the specified cpu 123 */ 124 125 void 126 init_cpu_mstate( 127 cpu_t *cpu, 128 int init_state) 129 { 130 ASSERT(init_state != CMS_DISABLED); 131 132 cpu->cpu_mstate = init_state; 133 cpu->cpu_mstate_start = gethrtime_unscaled(); 134 cpu->cpu_waitrq = 0; 135 bzero((caddr_t)&cpu->cpu_acct[0], sizeof (cpu->cpu_acct)); 136 } 137 138 /* 139 * sets cpu state to OFFLINE. We don't actually track this time, 140 * but it serves as a useful placeholder state for when we're not 141 * doing anything. 142 */ 143 144 void 145 term_cpu_mstate(struct cpu *cpu) 146 { 147 ASSERT(cpu->cpu_mstate != CMS_DISABLED); 148 cpu->cpu_mstate = CMS_DISABLED; 149 cpu->cpu_mstate_start = 0; 150 } 151 152 /* NEW_CPU_MSTATE comments inline in new_cpu_mstate below. */ 153 154 #define NEW_CPU_MSTATE(state) \ 155 gen = cpu->cpu_mstate_gen; \ 156 cpu->cpu_mstate_gen = 0; \ 157 /* Need membar_producer() here if stores not ordered / TSO */ \ 158 cpu->cpu_acct[cpu->cpu_mstate] += curtime - cpu->cpu_mstate_start; \ 159 cpu->cpu_mstate = state; \ 160 cpu->cpu_mstate_start = curtime; \ 161 /* Need membar_producer() here if stores not ordered / TSO */ \ 162 cpu->cpu_mstate_gen = (++gen == 0) ? 1 : gen; 163 164 void 165 new_cpu_mstate(int cmstate, hrtime_t curtime) 166 { 167 cpu_t *cpu = CPU; 168 uint16_t gen; 169 170 ASSERT(cpu->cpu_mstate != CMS_DISABLED); 171 ASSERT(cmstate < NCMSTATES); 172 ASSERT(cmstate != CMS_DISABLED); 173 174 /* 175 * This function cannot be re-entrant on a given CPU. As such, 176 * we ASSERT and panic if we are called on behalf of an interrupt. 177 * The one exception is for an interrupt which has previously 178 * blocked. Such an interrupt is being scheduled by the dispatcher 179 * just like a normal thread, and as such cannot arrive here 180 * in a re-entrant manner. 181 */ 182 183 ASSERT(!CPU_ON_INTR(cpu) && curthread->t_intr == NULL); 184 ASSERT(curthread->t_preempt > 0 || curthread == cpu->cpu_idle_thread); 185 186 /* 187 * LOCKING, or lack thereof: 188 * 189 * Updates to CPU mstate can only be made by the CPU 190 * itself, and the above check to ignore interrupts 191 * should prevent recursion into this function on a given 192 * processor. i.e. no possible write contention. 193 * 194 * However, reads of CPU mstate can occur at any time 195 * from any CPU. Any locking added to this code path 196 * would seriously impact syscall performance. So, 197 * instead we have a best-effort protection for readers. 198 * The reader will want to account for any time between 199 * cpu_mstate_start and the present time. This requires 200 * some guarantees that the reader is getting coherent 201 * information. 202 * 203 * We use a generation counter, which is set to 0 before 204 * we start making changes, and is set to a new value 205 * after we're done. Someone reading the CPU mstate 206 * should check for the same non-zero value of this 207 * counter both before and after reading all state. The 208 * important point is that the reader is not a 209 * performance-critical path, but this function is. 210 * 211 * The ordering of writes is critical. cpu_mstate_gen must 212 * be visibly zero on all CPUs before we change cpu_mstate 213 * and cpu_mstate_start. Additionally, cpu_mstate_gen must 214 * not be restored to oldgen+1 until after all of the other 215 * writes have become visible. 216 * 217 * Normally one puts membar_producer() calls to accomplish 218 * this. Unfortunately this routine is extremely performance 219 * critical (esp. in syscall_mstate below) and we cannot 220 * afford the additional time, particularly on some x86 221 * architectures with extremely slow sfence calls. On a 222 * CPU which guarantees write ordering (including sparc, x86, 223 * and amd64) this is not a problem. The compiler could still 224 * reorder the writes, so we make the four cpu fields 225 * volatile to prevent this. 226 * 227 * TSO warning: should we port to a non-TSO (or equivalent) 228 * CPU, this will break. 229 * 230 * The reader stills needs the membar_consumer() calls because, 231 * although the volatiles prevent the compiler from reordering 232 * loads, the CPU can still do so. 233 */ 234 235 NEW_CPU_MSTATE(cmstate); 236 } 237 238 /* 239 * Return an aggregation of user and system CPU time consumed by 240 * the specified thread in scaled nanoseconds. 241 */ 242 hrtime_t 243 mstate_thread_onproc_time(kthread_t *t) 244 { 245 hrtime_t aggr_time; 246 hrtime_t now; 247 hrtime_t waitrq; 248 hrtime_t state_start; 249 struct mstate *ms; 250 klwp_t *lwp; 251 int mstate; 252 253 ASSERT(THREAD_LOCK_HELD(t)); 254 255 if ((lwp = ttolwp(t)) == NULL) 256 return (0); 257 258 mstate = t->t_mstate; 259 waitrq = t->t_waitrq; 260 ms = &lwp->lwp_mstate; 261 state_start = ms->ms_state_start; 262 263 aggr_time = ms->ms_acct[LMS_USER] + 264 ms->ms_acct[LMS_SYSTEM] + ms->ms_acct[LMS_TRAP]; 265 266 now = gethrtime_unscaled(); 267 268 /* 269 * NOTE: gethrtime_unscaled on X86 taken on different CPUs is 270 * inconsistent, so it is possible that now < state_start. 271 */ 272 if (mstate == LMS_USER || mstate == LMS_SYSTEM || mstate == LMS_TRAP) { 273 /* if waitrq is zero, count all of the time. */ 274 if (waitrq == 0) { 275 waitrq = now; 276 } 277 278 if (waitrq > state_start) { 279 aggr_time += waitrq - state_start; 280 } 281 } 282 283 scalehrtime(&aggr_time); 284 return (aggr_time); 285 } 286 287 /* 288 * Return the amount of onproc and runnable time this thread has experienced. 289 * 290 * Because the fields we read are not protected by locks when updated 291 * by the thread itself, this is an inherently racey interface. In 292 * particular, the ASSERT(THREAD_LOCK_HELD(t)) doesn't guarantee as much 293 * as it might appear to. 294 * 295 * The implication for users of this interface is that onproc and runnable 296 * are *NOT* monotonically increasing; they may temporarily be larger than 297 * they should be. 298 */ 299 void 300 mstate_systhread_times(kthread_t *t, hrtime_t *onproc, hrtime_t *runnable) 301 { 302 struct mstate *const ms = &ttolwp(t)->lwp_mstate; 303 304 int mstate; 305 hrtime_t now; 306 hrtime_t state_start; 307 hrtime_t waitrq; 308 hrtime_t aggr_onp; 309 hrtime_t aggr_run; 310 311 ASSERT(THREAD_LOCK_HELD(t)); 312 ASSERT(t->t_procp->p_flag & SSYS); 313 ASSERT(ttolwp(t) != NULL); 314 315 /* shouldn't be any non-SYSTEM on-CPU time */ 316 ASSERT(ms->ms_acct[LMS_USER] == 0); 317 ASSERT(ms->ms_acct[LMS_TRAP] == 0); 318 319 mstate = t->t_mstate; 320 waitrq = t->t_waitrq; 321 state_start = ms->ms_state_start; 322 323 aggr_onp = ms->ms_acct[LMS_SYSTEM]; 324 aggr_run = ms->ms_acct[LMS_WAIT_CPU]; 325 326 now = gethrtime_unscaled(); 327 328 /* if waitrq == 0, then there is no time to account to TS_RUN */ 329 if (waitrq == 0) 330 waitrq = now; 331 332 /* If there is system time to accumulate, do so */ 333 if (mstate == LMS_SYSTEM && state_start < waitrq) 334 aggr_onp += waitrq - state_start; 335 336 if (waitrq < now) 337 aggr_run += now - waitrq; 338 339 scalehrtime(&aggr_onp); 340 scalehrtime(&aggr_run); 341 342 *onproc = aggr_onp; 343 *runnable = aggr_run; 344 } 345 346 /* 347 * Return an aggregation of microstate times in scaled nanoseconds (high-res 348 * time). This keeps in mind that p_acct is already scaled, and ms_acct is 349 * not. 350 */ 351 hrtime_t 352 mstate_aggr_state(proc_t *p, int a_state) 353 { 354 struct mstate *ms; 355 kthread_t *t; 356 klwp_t *lwp; 357 hrtime_t aggr_time; 358 hrtime_t scaledtime; 359 360 ASSERT(MUTEX_HELD(&p->p_lock)); 361 ASSERT((unsigned)a_state < NMSTATES); 362 363 if ((unsigned)a_state >= NMSTATES) 364 return (0); 365 366 aggr_time = p->p_acct[a_state]; 367 if (a_state == LMS_SYSTEM) 368 aggr_time += p->p_acct[LMS_TRAP]; 369 370 t = p->p_tlist; 371 if (t == NULL) 372 return (aggr_time); 373 374 do { 375 if (t->t_proc_flag & TP_LWPEXIT) 376 continue; 377 378 lwp = ttolwp(t); 379 ms = &lwp->lwp_mstate; 380 scaledtime = ms->ms_acct[a_state]; 381 scalehrtime(&scaledtime); 382 aggr_time += scaledtime; 383 if (a_state == LMS_SYSTEM) { 384 scaledtime = ms->ms_acct[LMS_TRAP]; 385 scalehrtime(&scaledtime); 386 aggr_time += scaledtime; 387 } 388 } while ((t = t->t_forw) != p->p_tlist); 389 390 return (aggr_time); 391 } 392 393 394 void 395 syscall_mstate(int fromms, int toms) 396 { 397 kthread_t *t = curthread; 398 zone_t *z = ttozone(t); 399 struct mstate *ms; 400 hrtime_t *mstimep; 401 hrtime_t curtime; 402 klwp_t *lwp; 403 hrtime_t newtime; 404 cpu_t *cpu; 405 uint16_t gen; 406 407 if ((lwp = ttolwp(t)) == NULL) 408 return; 409 410 ASSERT(fromms < NMSTATES); 411 ASSERT(toms < NMSTATES); 412 413 ms = &lwp->lwp_mstate; 414 mstimep = &ms->ms_acct[fromms]; 415 curtime = gethrtime_unscaled(); 416 newtime = curtime - ms->ms_state_start; 417 while (newtime < 0) { 418 curtime = gethrtime_unscaled(); 419 newtime = curtime - ms->ms_state_start; 420 } 421 *mstimep += newtime; 422 t->t_mstate = toms; 423 ms->ms_state_start = curtime; 424 ms->ms_prev = fromms; 425 kpreempt_disable(); /* don't change CPU while changing CPU's state */ 426 cpu = CPU; 427 ASSERT(cpu == t->t_cpu); 428 429 if (fromms == LMS_USER) { 430 CPU_UARRAY_VAL(z->zone_ustate, cpu->cpu_id, 431 ZONE_USTATE_UTIME) += newtime; 432 } else if (fromms == LMS_SYSTEM) { 433 CPU_UARRAY_VAL(z->zone_ustate, cpu->cpu_id, 434 ZONE_USTATE_STIME) += newtime; 435 } 436 437 if ((toms != LMS_USER) && (cpu->cpu_mstate != CMS_SYSTEM)) { 438 NEW_CPU_MSTATE(CMS_SYSTEM); 439 } else if ((toms == LMS_USER) && (cpu->cpu_mstate != CMS_USER)) { 440 NEW_CPU_MSTATE(CMS_USER); 441 } 442 kpreempt_enable(); 443 } 444 445 #undef NEW_CPU_MSTATE 446 447 /* 448 * The following is for computing the percentage of cpu time used recently 449 * by an lwp. The function cpu_decay() is also called from /proc code. 450 * 451 * exp_x(x): 452 * Given x as a 64-bit non-negative scaled integer of arbitrary magnitude, 453 * Return exp(-x) as a 64-bit scaled integer in the range [0 .. 1]. 454 * 455 * Scaling for 64-bit scaled integer: 456 * The binary point is to the right of the high-order bit 457 * of the low-order 32-bit word. 458 */ 459 460 #define LSHIFT 31 461 #define LSI_ONE ((uint32_t)1 << LSHIFT) /* 32-bit scaled integer 1 */ 462 463 #ifdef DEBUG 464 uint_t expx_cnt = 0; /* number of calls to exp_x() */ 465 uint_t expx_mul = 0; /* number of long multiplies in exp_x() */ 466 #endif 467 468 static uint64_t 469 exp_x(uint64_t x) 470 { 471 int i; 472 uint64_t ull; 473 uint32_t ui; 474 475 #ifdef DEBUG 476 expx_cnt++; 477 #endif 478 /* 479 * By the formula: 480 * exp(-x) = exp(-x/2) * exp(-x/2) 481 * we keep halving x until it becomes small enough for 482 * the following approximation to be accurate enough: 483 * exp(-x) = 1 - x 484 * We reduce x until it is less than 1/4 (the 2 in LSHIFT-2 below). 485 * Our final error will be smaller than 4% . 486 */ 487 488 /* 489 * Use a uint64_t for the initial shift calculation. 490 */ 491 ull = x >> (LSHIFT-2); 492 493 /* 494 * Short circuit: 495 * A number this large produces effectively 0 (actually .005). 496 * This way, we will never do more than 5 multiplies. 497 */ 498 if (ull >= (1 << 5)) 499 return (0); 500 501 ui = ull; /* OK. Now we can use a uint_t. */ 502 for (i = 0; ui != 0; i++) 503 ui >>= 1; 504 505 if (i != 0) { 506 #ifdef DEBUG 507 expx_mul += i; /* seldom happens */ 508 #endif 509 x >>= i; 510 } 511 512 /* 513 * Now we compute 1 - x and square it the number of times 514 * that we halved x above to produce the final result: 515 */ 516 x = LSI_ONE - x; 517 while (i--) 518 x = (x * x) >> LSHIFT; 519 520 return (x); 521 } 522 523 /* 524 * Given the old percent cpu and a time delta in nanoseconds, 525 * return the new decayed percent cpu: pct * exp(-tau), 526 * where 'tau' is the time delta multiplied by a decay factor. 527 * We have chosen the decay factor (cpu_decay_factor in param.c) 528 * to make the decay over five seconds be approximately 20%. 529 * 530 * 'pct' is a 32-bit scaled integer <= 1 531 * The binary point is to the right of the high-order bit 532 * of the 32-bit word. 533 */ 534 static uint32_t 535 cpu_decay(uint32_t pct, hrtime_t nsec) 536 { 537 uint64_t delta = (uint64_t)nsec; 538 539 delta /= cpu_decay_factor; 540 return ((pct * exp_x(delta)) >> LSHIFT); 541 } 542 543 /* 544 * Given the old percent cpu and a time delta in nanoseconds, 545 * return the new grown percent cpu: 1 - ( 1 - pct ) * exp(-tau) 546 */ 547 static uint32_t 548 cpu_grow(uint32_t pct, hrtime_t nsec) 549 { 550 return (LSI_ONE - cpu_decay(LSI_ONE - pct, nsec)); 551 } 552 553 554 /* 555 * Defined to determine whether a lwp is still on a processor. 556 */ 557 558 #define T_ONPROC(kt) \ 559 ((kt)->t_mstate < LMS_SLEEP) 560 #define T_OFFPROC(kt) \ 561 ((kt)->t_mstate >= LMS_SLEEP) 562 563 uint_t 564 cpu_update_pct(kthread_t *t, hrtime_t newtime) 565 { 566 hrtime_t delta; 567 hrtime_t hrlb; 568 uint_t pctcpu; 569 uint_t npctcpu; 570 571 /* 572 * This routine can get called at PIL > 0, this *has* to be 573 * done atomically. Holding locks here causes bad things to happen. 574 * (read: deadlock). 575 */ 576 577 do { 578 pctcpu = t->t_pctcpu; 579 hrlb = t->t_hrtime; 580 delta = newtime - hrlb; 581 if (delta < 0) { 582 newtime = gethrtime_unscaled(); 583 delta = newtime - hrlb; 584 } 585 t->t_hrtime = newtime; 586 scalehrtime(&delta); 587 if (T_ONPROC(t) && t->t_waitrq == 0) { 588 npctcpu = cpu_grow(pctcpu, delta); 589 } else { 590 npctcpu = cpu_decay(pctcpu, delta); 591 } 592 } while (atomic_cas_32(&t->t_pctcpu, pctcpu, npctcpu) != pctcpu); 593 594 return (npctcpu); 595 } 596 597 /* 598 * Change the microstate level for the LWP and update the 599 * associated accounting information. Return the previous 600 * LWP state. 601 */ 602 int 603 new_mstate(kthread_t *t, int new_state) 604 { 605 struct mstate *ms; 606 unsigned state; 607 hrtime_t *mstimep; 608 hrtime_t curtime; 609 hrtime_t newtime; 610 hrtime_t oldtime; 611 hrtime_t ztime; 612 hrtime_t origstart; 613 klwp_t *lwp; 614 zone_t *z; 615 616 ASSERT(new_state != LMS_WAIT_CPU); 617 ASSERT((unsigned)new_state < NMSTATES); 618 ASSERT(t == curthread || THREAD_LOCK_HELD(t)); 619 620 /* 621 * Don't do microstate processing for threads without a lwp (kernel 622 * threads). Also, if we're an interrupt thread that is pinning another 623 * thread, our t_mstate hasn't been initialized. We'd be modifying the 624 * microstate of the underlying lwp which doesn't realize that it's 625 * pinned. In this case, also don't change the microstate. 626 */ 627 if (((lwp = ttolwp(t)) == NULL) || t->t_intr) 628 return (LMS_SYSTEM); 629 630 curtime = gethrtime_unscaled(); 631 632 /* adjust cpu percentages before we go any further */ 633 (void) cpu_update_pct(t, curtime); 634 635 ms = &lwp->lwp_mstate; 636 state = t->t_mstate; 637 origstart = ms->ms_state_start; 638 do { 639 switch (state) { 640 case LMS_TFAULT: 641 case LMS_DFAULT: 642 case LMS_KFAULT: 643 case LMS_USER_LOCK: 644 mstimep = &ms->ms_acct[LMS_SYSTEM]; 645 break; 646 default: 647 mstimep = &ms->ms_acct[state]; 648 break; 649 } 650 ztime = newtime = curtime - ms->ms_state_start; 651 if (newtime < 0) { 652 curtime = gethrtime_unscaled(); 653 oldtime = *mstimep - 1; /* force CAS to fail */ 654 continue; 655 } 656 oldtime = *mstimep; 657 newtime += oldtime; 658 t->t_mstate = new_state; 659 ms->ms_state_start = curtime; 660 } while (atomic_cas_64((uint64_t *)mstimep, oldtime, newtime) != 661 oldtime); 662 663 /* 664 * Remember the previous running microstate. 665 */ 666 if (state != LMS_SLEEP && state != LMS_STOPPED) 667 ms->ms_prev = state; 668 669 /* 670 * Switch CPU microstate if appropriate 671 */ 672 673 kpreempt_disable(); /* MUST disable kpreempt before touching t->cpu */ 674 675 ASSERT(t->t_cpu == CPU); 676 677 /* 678 * When the system boots the initial startup thread will have a 679 * ms_state_start of 0 which would add a huge system time to the global 680 * zone. We want to skip aggregating that initial bit of work. 681 */ 682 if (origstart != 0) { 683 z = ttozone(t); 684 if (state == LMS_USER) { 685 CPU_UARRAY_VAL(z->zone_ustate, t->t_cpu->cpu_id, 686 ZONE_USTATE_UTIME) += ztime; 687 } else if (state == LMS_SYSTEM) { 688 CPU_UARRAY_VAL(z->zone_ustate, t->t_cpu->cpu_id, 689 ZONE_USTATE_STIME) += ztime; 690 } 691 } 692 693 if (!CPU_ON_INTR(t->t_cpu) && curthread->t_intr == NULL) { 694 if (new_state == LMS_USER && t->t_cpu->cpu_mstate != CMS_USER) 695 new_cpu_mstate(CMS_USER, curtime); 696 else if (new_state != LMS_USER && 697 t->t_cpu->cpu_mstate != CMS_SYSTEM) 698 new_cpu_mstate(CMS_SYSTEM, curtime); 699 } 700 kpreempt_enable(); 701 702 return (ms->ms_prev); 703 } 704 705 /* 706 * Restore the LWP microstate to the previous runnable state. 707 * Called from disp() with the newly selected lwp. 708 */ 709 void 710 restore_mstate(kthread_t *t) 711 { 712 struct mstate *ms; 713 hrtime_t *mstimep; 714 klwp_t *lwp; 715 hrtime_t curtime; 716 hrtime_t waitrq; 717 hrtime_t newtime; 718 hrtime_t oldtime; 719 hrtime_t waittime; 720 zone_t *z; 721 722 /* 723 * Don't call restore mstate of threads without lwps. (Kernel threads) 724 * 725 * threads with t_intr set shouldn't be in the dispatcher, so assert 726 * that nobody here has t_intr. 727 */ 728 ASSERT(t->t_intr == NULL); 729 730 if ((lwp = ttolwp(t)) == NULL) 731 return; 732 733 curtime = gethrtime_unscaled(); 734 (void) cpu_update_pct(t, curtime); 735 ms = &lwp->lwp_mstate; 736 ASSERT((unsigned)t->t_mstate < NMSTATES); 737 do { 738 switch (t->t_mstate) { 739 case LMS_SLEEP: 740 /* 741 * Update the timer for the current sleep state. 742 */ 743 ASSERT((unsigned)ms->ms_prev < NMSTATES); 744 switch (ms->ms_prev) { 745 case LMS_TFAULT: 746 case LMS_DFAULT: 747 case LMS_KFAULT: 748 case LMS_USER_LOCK: 749 mstimep = &ms->ms_acct[ms->ms_prev]; 750 break; 751 default: 752 mstimep = &ms->ms_acct[LMS_SLEEP]; 753 break; 754 } 755 /* 756 * Return to the previous run state. 757 */ 758 t->t_mstate = ms->ms_prev; 759 break; 760 case LMS_STOPPED: 761 mstimep = &ms->ms_acct[LMS_STOPPED]; 762 /* 763 * Return to the previous run state. 764 */ 765 t->t_mstate = ms->ms_prev; 766 break; 767 case LMS_TFAULT: 768 case LMS_DFAULT: 769 case LMS_KFAULT: 770 case LMS_USER_LOCK: 771 mstimep = &ms->ms_acct[LMS_SYSTEM]; 772 break; 773 default: 774 mstimep = &ms->ms_acct[t->t_mstate]; 775 break; 776 } 777 waitrq = t->t_waitrq; /* hopefully atomic */ 778 if (waitrq == 0) { 779 waitrq = curtime; 780 } 781 t->t_waitrq = 0; 782 newtime = waitrq - ms->ms_state_start; 783 if (newtime < 0) { 784 curtime = gethrtime_unscaled(); 785 oldtime = *mstimep - 1; /* force CAS to fail */ 786 continue; 787 } 788 oldtime = *mstimep; 789 newtime += oldtime; 790 } while (atomic_cas_64((uint64_t *)mstimep, oldtime, newtime) != 791 oldtime); 792 793 /* 794 * Update the WAIT_CPU timer and per-cpu waitrq total. 795 */ 796 z = ttozone(t); 797 waittime = curtime - waitrq; 798 ms->ms_acct[LMS_WAIT_CPU] += waittime; 799 800 /* 801 * We are in a disp context where we're not going to migrate CPUs. 802 */ 803 CPU_UARRAY_VAL(z->zone_ustate, CPU->cpu_id, 804 ZONE_USTATE_WTIME) += waittime; 805 806 CPU->cpu_waitrq += waittime; 807 ms->ms_state_start = curtime; 808 } 809 810 /* 811 * Copy lwp microstate accounting and resource usage information 812 * to the process. (lwp is terminating) 813 */ 814 void 815 term_mstate(kthread_t *t) 816 { 817 struct mstate *ms; 818 proc_t *p = ttoproc(t); 819 klwp_t *lwp = ttolwp(t); 820 int i; 821 hrtime_t tmp; 822 823 ASSERT(MUTEX_HELD(&p->p_lock)); 824 825 ms = &lwp->lwp_mstate; 826 (void) new_mstate(t, LMS_STOPPED); 827 ms->ms_term = ms->ms_state_start; 828 tmp = ms->ms_term - ms->ms_start; 829 scalehrtime(&tmp); 830 p->p_mlreal += tmp; 831 for (i = 0; i < NMSTATES; i++) { 832 tmp = ms->ms_acct[i]; 833 scalehrtime(&tmp); 834 p->p_acct[i] += tmp; 835 } 836 p->p_ru.minflt += lwp->lwp_ru.minflt; 837 p->p_ru.majflt += lwp->lwp_ru.majflt; 838 p->p_ru.nswap += lwp->lwp_ru.nswap; 839 p->p_ru.inblock += lwp->lwp_ru.inblock; 840 p->p_ru.oublock += lwp->lwp_ru.oublock; 841 p->p_ru.msgsnd += lwp->lwp_ru.msgsnd; 842 p->p_ru.msgrcv += lwp->lwp_ru.msgrcv; 843 p->p_ru.nsignals += lwp->lwp_ru.nsignals; 844 p->p_ru.nvcsw += lwp->lwp_ru.nvcsw; 845 p->p_ru.nivcsw += lwp->lwp_ru.nivcsw; 846 p->p_ru.sysc += lwp->lwp_ru.sysc; 847 p->p_ru.ioch += lwp->lwp_ru.ioch; 848 p->p_defunct++; 849 } 850