1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/types.h> 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/user.h> 32 #include <sys/proc.h> 33 #include <sys/cpuvar.h> 34 #include <sys/thread.h> 35 #include <sys/debug.h> 36 #include <sys/msacct.h> 37 #include <sys/time.h> 38 39 /* 40 * Mega-theory block comment: 41 * 42 * Microstate accounting uses finite states and the transitions between these 43 * states to measure timing and accounting information. The state information 44 * is presently tracked for threads (via microstate accounting) and cpus (via 45 * cpu microstate accounting). In each case, these accounting mechanisms use 46 * states and transitions to measure time spent in each state instead of 47 * clock-based sampling methodologies. 48 * 49 * For microstate accounting: 50 * state transitions are accomplished by calling new_mstate() to switch between 51 * states. Transitions from a sleeping state (LMS_SLEEP and LMS_STOPPED) occur 52 * by calling restore_mstate() which restores a thread to its previously running 53 * state. This code is primarialy executed by the dispatcher in disp() before 54 * running a process that was put to sleep. If the thread was not in a sleeping 55 * state, this call has little effect other than to update the count of time the 56 * thread has spent waiting on run-queues in its lifetime. 57 * 58 * For cpu microstate accounting: 59 * Cpu microstate accounting is similar to the microstate accounting for threads 60 * but it tracks user, system, and idle time for cpus. Cpu microstate 61 * accounting does not track interrupt times as there is a pre-existing 62 * interrupt accounting mechanism for this purpose. Cpu microstate accounting 63 * tracks time that user threads have spent active, idle, or in the system on a 64 * given cpu. Cpu microstate accounting has fewer states which allows it to 65 * have better defined transitions. The states transition in the following 66 * order: 67 * 68 * CMS_USER <-> CMS_SYSTEM <-> CMS_IDLE 69 * 70 * In order to get to the idle state, the cpu microstate must first go through 71 * the system state, and vice-versa for the user state from idle. The switching 72 * of the microstates from user to system is done as part of the regular thread 73 * microstate accounting code, except for the idle state which is switched by 74 * the dispatcher before it runs the idle loop. 75 * 76 * Cpu percentages: 77 * Cpu percentages are now handled by and based upon microstate accounting 78 * information (the same is true for load averages). The routines which handle 79 * the growing/shrinking and exponentiation of cpu percentages have been moved 80 * here as it now makes more sense for them to be generated from the microstate 81 * code. Cpu percentages are generated similarly to the way they were before; 82 * however, now they are based upon high-resolution timestamps and the 83 * timestamps are modified at various state changes instead of during a clock() 84 * interrupt. This allows us to generate more accurate cpu percentages which 85 * are also in-sync with microstate data. 86 */ 87 88 /* 89 * Initialize the microstate level and the 90 * associated accounting information for an LWP. 91 */ 92 void 93 init_mstate( 94 kthread_t *t, 95 int init_state) 96 { 97 struct mstate *ms; 98 klwp_t *lwp; 99 hrtime_t curtime; 100 101 ASSERT(init_state != LMS_WAIT_CPU); 102 ASSERT((unsigned)init_state < NMSTATES); 103 104 if ((lwp = ttolwp(t)) != NULL) { 105 ms = &lwp->lwp_mstate; 106 curtime = gethrtime_unscaled(); 107 ms->ms_prev = LMS_SYSTEM; 108 ms->ms_start = curtime; 109 ms->ms_term = 0; 110 ms->ms_state_start = curtime; 111 t->t_mstate = init_state; 112 t->t_waitrq = 0; 113 t->t_hrtime = curtime; 114 if ((t->t_proc_flag & TP_MSACCT) == 0) 115 t->t_proc_flag |= TP_MSACCT; 116 bzero((caddr_t)&ms->ms_acct[0], sizeof (ms->ms_acct)); 117 } 118 } 119 120 /* 121 * Initialize the microstate level and associated accounting information 122 * for the specified cpu 123 */ 124 125 void 126 init_cpu_mstate( 127 cpu_t *cpu, 128 int init_state) 129 { 130 ASSERT(init_state != CMS_DISABLED); 131 132 cpu->cpu_mstate = init_state; 133 cpu->cpu_mstate_start = gethrtime_unscaled(); 134 cpu->cpu_waitrq = 0; 135 bzero((caddr_t)&cpu->cpu_acct[0], sizeof (cpu->cpu_acct)); 136 } 137 138 /* 139 * sets cpu state to OFFLINE. We don't actually track this time, 140 * but it serves as a useful placeholder state for when we're not 141 * doing anything. 142 */ 143 144 void 145 term_cpu_mstate(struct cpu *cpu) 146 { 147 ASSERT(cpu->cpu_mstate != CMS_DISABLED); 148 cpu->cpu_mstate = CMS_DISABLED; 149 cpu->cpu_mstate_start = 0; 150 } 151 152 /* NEW_CPU_MSTATE comments inline in new_cpu_mstate below. */ 153 154 #define NEW_CPU_MSTATE(state) \ 155 gen = cpu->cpu_mstate_gen; \ 156 cpu->cpu_mstate_gen = 0; \ 157 /* Need membar_producer() here if stores not ordered / TSO */ \ 158 cpu->cpu_acct[cpu->cpu_mstate] += curtime - cpu->cpu_mstate_start; \ 159 cpu->cpu_mstate = state; \ 160 cpu->cpu_mstate_start = curtime; \ 161 /* Need membar_producer() here if stores not ordered / TSO */ \ 162 cpu->cpu_mstate_gen = (++gen == 0) ? 1 : gen; 163 164 void 165 new_cpu_mstate(int cmstate, hrtime_t curtime) 166 { 167 cpu_t *cpu = CPU; 168 uint16_t gen; 169 170 ASSERT(cpu->cpu_mstate != CMS_DISABLED); 171 ASSERT(cmstate < NCMSTATES); 172 ASSERT(cmstate != CMS_DISABLED); 173 174 /* 175 * This function cannot be re-entrant on a given CPU. As such, 176 * we ASSERT and panic if we are called on behalf of an interrupt. 177 * The one exception is for an interrupt which has previously 178 * blocked. Such an interrupt is being scheduled by the dispatcher 179 * just like a normal thread, and as such cannot arrive here 180 * in a re-entrant manner. 181 */ 182 183 ASSERT(!CPU_ON_INTR(cpu) && curthread->t_intr == NULL); 184 ASSERT(curthread->t_preempt > 0 || curthread == cpu->cpu_idle_thread); 185 186 /* 187 * LOCKING, or lack thereof: 188 * 189 * Updates to CPU mstate can only be made by the CPU 190 * itself, and the above check to ignore interrupts 191 * should prevent recursion into this function on a given 192 * processor. i.e. no possible write contention. 193 * 194 * However, reads of CPU mstate can occur at any time 195 * from any CPU. Any locking added to this code path 196 * would seriously impact syscall performance. So, 197 * instead we have a best-effort protection for readers. 198 * The reader will want to account for any time between 199 * cpu_mstate_start and the present time. This requires 200 * some guarantees that the reader is getting coherent 201 * information. 202 * 203 * We use a generation counter, which is set to 0 before 204 * we start making changes, and is set to a new value 205 * after we're done. Someone reading the CPU mstate 206 * should check for the same non-zero value of this 207 * counter both before and after reading all state. The 208 * important point is that the reader is not a 209 * performance-critical path, but this function is. 210 * 211 * The ordering of writes is critical. cpu_mstate_gen must 212 * be visibly zero on all CPUs before we change cpu_mstate 213 * and cpu_mstate_start. Additionally, cpu_mstate_gen must 214 * not be restored to oldgen+1 until after all of the other 215 * writes have become visible. 216 * 217 * Normally one puts membar_producer() calls to accomplish 218 * this. Unfortunately this routine is extremely performance 219 * critical (esp. in syscall_mstate below) and we cannot 220 * afford the additional time, particularly on some x86 221 * architectures with extremely slow sfence calls. On a 222 * CPU which guarantees write ordering (including sparc, x86, 223 * and amd64) this is not a problem. The compiler could still 224 * reorder the writes, so we make the four cpu fields 225 * volatile to prevent this. 226 * 227 * TSO warning: should we port to a non-TSO (or equivalent) 228 * CPU, this will break. 229 * 230 * The reader stills needs the membar_consumer() calls because, 231 * although the volatiles prevent the compiler from reordering 232 * loads, the CPU can still do so. 233 */ 234 235 NEW_CPU_MSTATE(cmstate); 236 } 237 238 /* 239 * Return an aggregation of user and system CPU time consumed by 240 * the specified thread in scaled nanoseconds. 241 */ 242 hrtime_t 243 mstate_thread_onproc_time(kthread_t *t) 244 { 245 hrtime_t aggr_time; 246 hrtime_t now; 247 hrtime_t state_start; 248 struct mstate *ms; 249 klwp_t *lwp; 250 int mstate; 251 252 ASSERT(THREAD_LOCK_HELD(t)); 253 254 if ((lwp = ttolwp(t)) == NULL) 255 return (0); 256 257 mstate = t->t_mstate; 258 ms = &lwp->lwp_mstate; 259 state_start = ms->ms_state_start; 260 261 aggr_time = ms->ms_acct[LMS_USER] + 262 ms->ms_acct[LMS_SYSTEM] + ms->ms_acct[LMS_TRAP]; 263 264 now = gethrtime_unscaled(); 265 266 /* 267 * NOTE: gethrtime_unscaled on X86 taken on different CPUs is 268 * inconsistent, so it is possible that now < state_start. 269 */ 270 if ((mstate == LMS_USER || mstate == LMS_SYSTEM || 271 mstate == LMS_TRAP) && (now > state_start)) { 272 aggr_time += now - state_start; 273 } 274 275 scalehrtime(&aggr_time); 276 return (aggr_time); 277 } 278 279 /* 280 * Return an aggregation of microstate times in scaled nanoseconds (high-res 281 * time). This keeps in mind that p_acct is already scaled, and ms_acct is 282 * not. 283 */ 284 hrtime_t 285 mstate_aggr_state(proc_t *p, int a_state) 286 { 287 struct mstate *ms; 288 kthread_t *t; 289 klwp_t *lwp; 290 hrtime_t aggr_time; 291 hrtime_t scaledtime; 292 293 ASSERT(MUTEX_HELD(&p->p_lock)); 294 ASSERT((unsigned)a_state < NMSTATES); 295 296 aggr_time = p->p_acct[a_state]; 297 if (a_state == LMS_SYSTEM) 298 aggr_time += p->p_acct[LMS_TRAP]; 299 300 t = p->p_tlist; 301 if (t == NULL) 302 return (aggr_time); 303 304 do { 305 if (t->t_proc_flag & TP_LWPEXIT) 306 continue; 307 308 lwp = ttolwp(t); 309 ms = &lwp->lwp_mstate; 310 scaledtime = ms->ms_acct[a_state]; 311 scalehrtime(&scaledtime); 312 aggr_time += scaledtime; 313 if (a_state == LMS_SYSTEM) { 314 scaledtime = ms->ms_acct[LMS_TRAP]; 315 scalehrtime(&scaledtime); 316 aggr_time += scaledtime; 317 } 318 } while ((t = t->t_forw) != p->p_tlist); 319 320 return (aggr_time); 321 } 322 323 324 void 325 syscall_mstate(int fromms, int toms) 326 { 327 kthread_t *t = curthread; 328 struct mstate *ms; 329 hrtime_t *mstimep; 330 hrtime_t curtime; 331 klwp_t *lwp; 332 hrtime_t newtime; 333 cpu_t *cpu; 334 uint16_t gen; 335 336 if ((lwp = ttolwp(t)) == NULL) 337 return; 338 339 ASSERT(fromms < NMSTATES); 340 ASSERT(toms < NMSTATES); 341 342 ms = &lwp->lwp_mstate; 343 mstimep = &ms->ms_acct[fromms]; 344 curtime = gethrtime_unscaled(); 345 newtime = curtime - ms->ms_state_start; 346 while (newtime < 0) { 347 curtime = gethrtime_unscaled(); 348 newtime = curtime - ms->ms_state_start; 349 } 350 *mstimep += newtime; 351 t->t_mstate = toms; 352 ms->ms_state_start = curtime; 353 ms->ms_prev = fromms; 354 kpreempt_disable(); /* don't change CPU while changing CPU's state */ 355 cpu = CPU; 356 ASSERT(cpu == t->t_cpu); 357 if ((toms != LMS_USER) && (cpu->cpu_mstate != CMS_SYSTEM)) { 358 NEW_CPU_MSTATE(CMS_SYSTEM); 359 } else if ((toms == LMS_USER) && (cpu->cpu_mstate != CMS_USER)) { 360 NEW_CPU_MSTATE(CMS_USER); 361 } 362 kpreempt_enable(); 363 } 364 365 #undef NEW_CPU_MSTATE 366 367 /* 368 * The following is for computing the percentage of cpu time used recently 369 * by an lwp. The function cpu_decay() is also called from /proc code. 370 * 371 * exp_x(x): 372 * Given x as a 64-bit non-negative scaled integer of arbitrary magnitude, 373 * Return exp(-x) as a 64-bit scaled integer in the range [0 .. 1]. 374 * 375 * Scaling for 64-bit scaled integer: 376 * The binary point is to the right of the high-order bit 377 * of the low-order 32-bit word. 378 */ 379 380 #define LSHIFT 31 381 #define LSI_ONE ((uint32_t)1 << LSHIFT) /* 32-bit scaled integer 1 */ 382 383 #ifdef DEBUG 384 uint_t expx_cnt = 0; /* number of calls to exp_x() */ 385 uint_t expx_mul = 0; /* number of long multiplies in exp_x() */ 386 #endif 387 388 static uint64_t 389 exp_x(uint64_t x) 390 { 391 int i; 392 uint64_t ull; 393 uint32_t ui; 394 395 #ifdef DEBUG 396 expx_cnt++; 397 #endif 398 /* 399 * By the formula: 400 * exp(-x) = exp(-x/2) * exp(-x/2) 401 * we keep halving x until it becomes small enough for 402 * the following approximation to be accurate enough: 403 * exp(-x) = 1 - x 404 * We reduce x until it is less than 1/4 (the 2 in LSHIFT-2 below). 405 * Our final error will be smaller than 4% . 406 */ 407 408 /* 409 * Use a uint64_t for the initial shift calculation. 410 */ 411 ull = x >> (LSHIFT-2); 412 413 /* 414 * Short circuit: 415 * A number this large produces effectively 0 (actually .005). 416 * This way, we will never do more than 5 multiplies. 417 */ 418 if (ull >= (1 << 5)) 419 return (0); 420 421 ui = ull; /* OK. Now we can use a uint_t. */ 422 for (i = 0; ui != 0; i++) 423 ui >>= 1; 424 425 if (i != 0) { 426 #ifdef DEBUG 427 expx_mul += i; /* seldom happens */ 428 #endif 429 x >>= i; 430 } 431 432 /* 433 * Now we compute 1 - x and square it the number of times 434 * that we halved x above to produce the final result: 435 */ 436 x = LSI_ONE - x; 437 while (i--) 438 x = (x * x) >> LSHIFT; 439 440 return (x); 441 } 442 443 /* 444 * Given the old percent cpu and a time delta in nanoseconds, 445 * return the new decayed percent cpu: pct * exp(-tau), 446 * where 'tau' is the time delta multiplied by a decay factor. 447 * We have chosen the decay factor (cpu_decay_factor in param.c) 448 * to make the decay over five seconds be approximately 20%. 449 * 450 * 'pct' is a 32-bit scaled integer <= 1 451 * The binary point is to the right of the high-order bit 452 * of the 32-bit word. 453 */ 454 static uint32_t 455 cpu_decay(uint32_t pct, hrtime_t nsec) 456 { 457 uint64_t delta = (uint64_t)nsec; 458 459 delta /= cpu_decay_factor; 460 return ((pct * exp_x(delta)) >> LSHIFT); 461 } 462 463 /* 464 * Given the old percent cpu and a time delta in nanoseconds, 465 * return the new grown percent cpu: 1 - ( 1 - pct ) * exp(-tau) 466 */ 467 static uint32_t 468 cpu_grow(uint32_t pct, hrtime_t nsec) 469 { 470 return (LSI_ONE - cpu_decay(LSI_ONE - pct, nsec)); 471 } 472 473 474 /* 475 * Defined to determine whether a lwp is still on a processor. 476 */ 477 478 #define T_ONPROC(kt) \ 479 ((kt)->t_mstate < LMS_SLEEP) 480 #define T_OFFPROC(kt) \ 481 ((kt)->t_mstate >= LMS_SLEEP) 482 483 uint_t 484 cpu_update_pct(kthread_t *t, hrtime_t newtime) 485 { 486 hrtime_t delta; 487 hrtime_t hrlb; 488 uint_t pctcpu; 489 uint_t npctcpu; 490 491 /* 492 * This routine can get called at PIL > 0, this *has* to be 493 * done atomically. Holding locks here causes bad things to happen. 494 * (read: deadlock). 495 */ 496 497 do { 498 if (T_ONPROC(t) && t->t_waitrq == 0) { 499 hrlb = t->t_hrtime; 500 delta = newtime - hrlb; 501 if (delta < 0) { 502 newtime = gethrtime_unscaled(); 503 delta = newtime - hrlb; 504 } 505 t->t_hrtime = newtime; 506 scalehrtime(&delta); 507 pctcpu = t->t_pctcpu; 508 npctcpu = cpu_grow(pctcpu, delta); 509 } else { 510 hrlb = t->t_hrtime; 511 delta = newtime - hrlb; 512 if (delta < 0) { 513 newtime = gethrtime_unscaled(); 514 delta = newtime - hrlb; 515 } 516 t->t_hrtime = newtime; 517 scalehrtime(&delta); 518 pctcpu = t->t_pctcpu; 519 npctcpu = cpu_decay(pctcpu, delta); 520 } 521 } while (cas32(&t->t_pctcpu, pctcpu, npctcpu) != pctcpu); 522 523 return (npctcpu); 524 } 525 526 /* 527 * Change the microstate level for the LWP and update the 528 * associated accounting information. Return the previous 529 * LWP state. 530 */ 531 int 532 new_mstate(kthread_t *t, int new_state) 533 { 534 struct mstate *ms; 535 unsigned state; 536 hrtime_t *mstimep; 537 hrtime_t curtime; 538 hrtime_t newtime; 539 hrtime_t oldtime; 540 klwp_t *lwp; 541 542 ASSERT(new_state != LMS_WAIT_CPU); 543 ASSERT((unsigned)new_state < NMSTATES); 544 ASSERT(t == curthread || THREAD_LOCK_HELD(t)); 545 546 /* 547 * Don't do microstate processing for threads without a lwp (kernel 548 * threads). Also, if we're an interrupt thread that is pinning another 549 * thread, our t_mstate hasn't been initialized. We'd be modifying the 550 * microstate of the underlying lwp which doesn't realize that it's 551 * pinned. In this case, also don't change the microstate. 552 */ 553 if (((lwp = ttolwp(t)) == NULL) || t->t_intr) 554 return (LMS_SYSTEM); 555 556 curtime = gethrtime_unscaled(); 557 558 /* adjust cpu percentages before we go any further */ 559 (void) cpu_update_pct(t, curtime); 560 561 ms = &lwp->lwp_mstate; 562 state = t->t_mstate; 563 do { 564 switch (state) { 565 case LMS_TFAULT: 566 case LMS_DFAULT: 567 case LMS_KFAULT: 568 case LMS_USER_LOCK: 569 mstimep = &ms->ms_acct[LMS_SYSTEM]; 570 break; 571 default: 572 mstimep = &ms->ms_acct[state]; 573 break; 574 } 575 newtime = curtime - ms->ms_state_start; 576 if (newtime < 0) { 577 curtime = gethrtime_unscaled(); 578 oldtime = *mstimep - 1; /* force CAS to fail */ 579 continue; 580 } 581 oldtime = *mstimep; 582 newtime += oldtime; 583 t->t_mstate = new_state; 584 ms->ms_state_start = curtime; 585 } while (cas64((uint64_t *)mstimep, oldtime, newtime) != oldtime); 586 /* 587 * Remember the previous running microstate. 588 */ 589 if (state != LMS_SLEEP && state != LMS_STOPPED) 590 ms->ms_prev = state; 591 592 /* 593 * Switch CPU microstate if appropriate 594 */ 595 596 kpreempt_disable(); /* MUST disable kpreempt before touching t->cpu */ 597 ASSERT(t->t_cpu == CPU); 598 if (!CPU_ON_INTR(t->t_cpu) && curthread->t_intr == NULL) { 599 if (new_state == LMS_USER && t->t_cpu->cpu_mstate != CMS_USER) 600 new_cpu_mstate(CMS_USER, curtime); 601 else if (new_state != LMS_USER && 602 t->t_cpu->cpu_mstate != CMS_SYSTEM) 603 new_cpu_mstate(CMS_SYSTEM, curtime); 604 } 605 kpreempt_enable(); 606 607 return (ms->ms_prev); 608 } 609 610 /* 611 * Restore the LWP microstate to the previous runnable state. 612 * Called from disp() with the newly selected lwp. 613 */ 614 void 615 restore_mstate(kthread_t *t) 616 { 617 struct mstate *ms; 618 hrtime_t *mstimep; 619 klwp_t *lwp; 620 hrtime_t curtime; 621 hrtime_t waitrq; 622 hrtime_t newtime; 623 hrtime_t oldtime; 624 625 /* 626 * Don't call restore mstate of threads without lwps. (Kernel threads) 627 * 628 * threads with t_intr set shouldn't be in the dispatcher, so assert 629 * that nobody here has t_intr. 630 */ 631 ASSERT(t->t_intr == NULL); 632 633 if ((lwp = ttolwp(t)) == NULL) 634 return; 635 636 curtime = gethrtime_unscaled(); 637 (void) cpu_update_pct(t, curtime); 638 ms = &lwp->lwp_mstate; 639 ASSERT((unsigned)t->t_mstate < NMSTATES); 640 do { 641 switch (t->t_mstate) { 642 case LMS_SLEEP: 643 /* 644 * Update the timer for the current sleep state. 645 */ 646 ASSERT((unsigned)ms->ms_prev < NMSTATES); 647 switch (ms->ms_prev) { 648 case LMS_TFAULT: 649 case LMS_DFAULT: 650 case LMS_KFAULT: 651 case LMS_USER_LOCK: 652 mstimep = &ms->ms_acct[ms->ms_prev]; 653 break; 654 default: 655 mstimep = &ms->ms_acct[LMS_SLEEP]; 656 break; 657 } 658 /* 659 * Return to the previous run state. 660 */ 661 t->t_mstate = ms->ms_prev; 662 break; 663 case LMS_STOPPED: 664 mstimep = &ms->ms_acct[LMS_STOPPED]; 665 /* 666 * Return to the previous run state. 667 */ 668 t->t_mstate = ms->ms_prev; 669 break; 670 case LMS_TFAULT: 671 case LMS_DFAULT: 672 case LMS_KFAULT: 673 case LMS_USER_LOCK: 674 mstimep = &ms->ms_acct[LMS_SYSTEM]; 675 break; 676 default: 677 mstimep = &ms->ms_acct[t->t_mstate]; 678 break; 679 } 680 waitrq = t->t_waitrq; /* hopefully atomic */ 681 if (waitrq == 0) { 682 waitrq = curtime; 683 } 684 t->t_waitrq = 0; 685 newtime = waitrq - ms->ms_state_start; 686 if (newtime < 0) { 687 curtime = gethrtime_unscaled(); 688 oldtime = *mstimep - 1; /* force CAS to fail */ 689 continue; 690 } 691 oldtime = *mstimep; 692 newtime += oldtime; 693 } while (cas64((uint64_t *)mstimep, oldtime, newtime) != oldtime); 694 /* 695 * Update the WAIT_CPU timer and per-cpu waitrq total. 696 */ 697 ms->ms_acct[LMS_WAIT_CPU] += (curtime - waitrq); 698 CPU->cpu_waitrq += (curtime - waitrq); 699 ms->ms_state_start = curtime; 700 } 701 702 /* 703 * Copy lwp microstate accounting and resource usage information 704 * to the process. (lwp is terminating) 705 */ 706 void 707 term_mstate(kthread_t *t) 708 { 709 struct mstate *ms; 710 proc_t *p = ttoproc(t); 711 klwp_t *lwp = ttolwp(t); 712 int i; 713 hrtime_t tmp; 714 715 ASSERT(MUTEX_HELD(&p->p_lock)); 716 717 ms = &lwp->lwp_mstate; 718 (void) new_mstate(t, LMS_STOPPED); 719 ms->ms_term = ms->ms_state_start; 720 tmp = ms->ms_term - ms->ms_start; 721 scalehrtime(&tmp); 722 p->p_mlreal += tmp; 723 for (i = 0; i < NMSTATES; i++) { 724 tmp = ms->ms_acct[i]; 725 scalehrtime(&tmp); 726 p->p_acct[i] += tmp; 727 } 728 p->p_ru.minflt += lwp->lwp_ru.minflt; 729 p->p_ru.majflt += lwp->lwp_ru.majflt; 730 p->p_ru.nswap += lwp->lwp_ru.nswap; 731 p->p_ru.inblock += lwp->lwp_ru.inblock; 732 p->p_ru.oublock += lwp->lwp_ru.oublock; 733 p->p_ru.msgsnd += lwp->lwp_ru.msgsnd; 734 p->p_ru.msgrcv += lwp->lwp_ru.msgrcv; 735 p->p_ru.nsignals += lwp->lwp_ru.nsignals; 736 p->p_ru.nvcsw += lwp->lwp_ru.nvcsw; 737 p->p_ru.nivcsw += lwp->lwp_ru.nivcsw; 738 p->p_ru.sysc += lwp->lwp_ru.sysc; 739 p->p_ru.ioch += lwp->lwp_ru.ioch; 740 p->p_defunct++; 741 } 742