1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/user.h> 33 #include <sys/proc.h> 34 #include <sys/cpuvar.h> 35 #include <sys/thread.h> 36 #include <sys/debug.h> 37 #include <sys/msacct.h> 38 #include <sys/time.h> 39 40 /* 41 * Mega-theory block comment: 42 * 43 * Microstate accounting uses finite states and the transitions between these 44 * states to measure timing and accounting information. The state information 45 * is presently tracked for threads (via microstate accounting) and cpus (via 46 * cpu microstate accounting). In each case, these accounting mechanisms use 47 * states and transitions to measure time spent in each state instead of 48 * clock-based sampling methodologies. 49 * 50 * For microstate accounting: 51 * state transitions are accomplished by calling new_mstate() to switch between 52 * states. Transitions from a sleeping state (LMS_SLEEP and LMS_STOPPED) occur 53 * by calling restore_mstate() which restores a thread to its previously running 54 * state. This code is primarialy executed by the dispatcher in disp() before 55 * running a process that was put to sleep. If the thread was not in a sleeping 56 * state, this call has little effect other than to update the count of time the 57 * thread has spent waiting on run-queues in its lifetime. 58 * 59 * For cpu microstate accounting: 60 * Cpu microstate accounting is similar to the microstate accounting for threads 61 * but it tracks user, system, and idle time for cpus. Cpu microstate 62 * accounting does not track interrupt times as there is a pre-existing 63 * interrupt accounting mechanism for this purpose. Cpu microstate accounting 64 * tracks time that user threads have spent active, idle, or in the system on a 65 * given cpu. Cpu microstate accounting has fewer states which allows it to 66 * have better defined transitions. The states transition in the following 67 * order: 68 * 69 * CMS_USER <-> CMS_SYSTEM <-> CMS_IDLE 70 * 71 * In order to get to the idle state, the cpu microstate must first go through 72 * the system state, and vice-versa for the user state from idle. The switching 73 * of the microstates from user to system is done as part of the regular thread 74 * microstate accounting code, except for the idle state which is switched by 75 * the dispatcher before it runs the idle loop. 76 * 77 * Cpu percentages: 78 * Cpu percentages are now handled by and based upon microstate accounting 79 * information (the same is true for load averages). The routines which handle 80 * the growing/shrinking and exponentiation of cpu percentages have been moved 81 * here as it now makes more sense for them to be generated from the microstate 82 * code. Cpu percentages are generated similarly to the way they were before; 83 * however, now they are based upon high-resolution timestamps and the 84 * timestamps are modified at various state changes instead of during a clock() 85 * interrupt. This allows us to generate more accurate cpu percentages which 86 * are also in-sync with microstate data. 87 */ 88 89 /* 90 * Initialize the microstate level and the 91 * associated accounting information for an LWP. 92 */ 93 void 94 init_mstate( 95 kthread_t *t, 96 int init_state) 97 { 98 struct mstate *ms; 99 klwp_t *lwp; 100 hrtime_t curtime; 101 102 ASSERT(init_state != LMS_WAIT_CPU); 103 ASSERT((unsigned)init_state < NMSTATES); 104 105 if ((lwp = ttolwp(t)) != NULL) { 106 ms = &lwp->lwp_mstate; 107 curtime = gethrtime_unscaled(); 108 ms->ms_prev = LMS_SYSTEM; 109 ms->ms_start = curtime; 110 ms->ms_term = 0; 111 ms->ms_state_start = curtime; 112 t->t_mstate = init_state; 113 t->t_waitrq = 0; 114 t->t_hrtime = curtime; 115 if ((t->t_proc_flag & TP_MSACCT) == 0) 116 t->t_proc_flag |= TP_MSACCT; 117 bzero((caddr_t)&ms->ms_acct[0], sizeof (ms->ms_acct)); 118 } 119 } 120 121 /* 122 * Initialize the microstate level and associated accounting information 123 * for the specified cpu 124 */ 125 126 void 127 init_cpu_mstate( 128 cpu_t *cpu, 129 int init_state) 130 { 131 ASSERT(init_state != CMS_DISABLED); 132 133 cpu->cpu_mstate = init_state; 134 cpu->cpu_mstate_start = gethrtime_unscaled(); 135 cpu->cpu_waitrq = 0; 136 bzero((caddr_t)&cpu->cpu_acct[0], sizeof (cpu->cpu_acct)); 137 } 138 139 /* 140 * sets cpu state to OFFLINE. We don't actually track this time, 141 * but it serves as a useful placeholder state for when we're not 142 * doing anything. 143 */ 144 145 void 146 term_cpu_mstate(struct cpu *cpu) 147 { 148 ASSERT(cpu->cpu_mstate != CMS_DISABLED); 149 cpu->cpu_mstate = CMS_DISABLED; 150 cpu->cpu_mstate_start = 0; 151 } 152 153 /* NEW_CPU_MSTATE comments inline in new_cpu_mstate below. */ 154 155 #define NEW_CPU_MSTATE(state) \ 156 gen = cpu->cpu_mstate_gen; \ 157 cpu->cpu_mstate_gen = 0; \ 158 /* Need membar_producer() here if stores not ordered / TSO */ \ 159 cpu->cpu_acct[cpu->cpu_mstate] += curtime - cpu->cpu_mstate_start; \ 160 cpu->cpu_mstate = state; \ 161 cpu->cpu_mstate_start = curtime; \ 162 /* Need membar_producer() here if stores not ordered / TSO */ \ 163 cpu->cpu_mstate_gen = (++gen == 0) ? 1 : gen; 164 165 void 166 new_cpu_mstate(int cmstate, hrtime_t curtime) 167 { 168 cpu_t *cpu = CPU; 169 uint16_t gen; 170 171 ASSERT(cpu->cpu_mstate != CMS_DISABLED); 172 ASSERT(cmstate < NCMSTATES); 173 ASSERT(cmstate != CMS_DISABLED); 174 175 /* 176 * This function cannot be re-entrant on a given CPU. As such, 177 * we ASSERT and panic if we are called on behalf of an interrupt. 178 * The one exception is for an interrupt which has previously 179 * blocked. Such an interrupt is being scheduled by the dispatcher 180 * just like a normal thread, and as such cannot arrive here 181 * in a re-entrant manner. 182 */ 183 184 ASSERT(!CPU_ON_INTR(cpu) && curthread->t_intr == NULL); 185 ASSERT(curthread->t_preempt > 0 || curthread == cpu->cpu_idle_thread); 186 187 /* 188 * LOCKING, or lack thereof: 189 * 190 * Updates to CPU mstate can only be made by the CPU 191 * itself, and the above check to ignore interrupts 192 * should prevent recursion into this function on a given 193 * processor. i.e. no possible write contention. 194 * 195 * However, reads of CPU mstate can occur at any time 196 * from any CPU. Any locking added to this code path 197 * would seriously impact syscall performance. So, 198 * instead we have a best-effort protection for readers. 199 * The reader will want to account for any time between 200 * cpu_mstate_start and the present time. This requires 201 * some guarantees that the reader is getting coherent 202 * information. 203 * 204 * We use a generation counter, which is set to 0 before 205 * we start making changes, and is set to a new value 206 * after we're done. Someone reading the CPU mstate 207 * should check for the same non-zero value of this 208 * counter both before and after reading all state. The 209 * important point is that the reader is not a 210 * performance-critical path, but this function is. 211 * 212 * The ordering of writes is critical. cpu_mstate_gen must 213 * be visibly zero on all CPUs before we change cpu_mstate 214 * and cpu_mstate_start. Additionally, cpu_mstate_gen must 215 * not be restored to oldgen+1 until after all of the other 216 * writes have become visible. 217 * 218 * Normally one puts membar_producer() calls to accomplish 219 * this. Unfortunately this routine is extremely performance 220 * critical (esp. in syscall_mstate below) and we cannot 221 * afford the additional time, particularly on some x86 222 * architectures with extremely slow sfence calls. On a 223 * CPU which guarantees write ordering (including sparc, x86, 224 * and amd64) this is not a problem. The compiler could still 225 * reorder the writes, so we make the four cpu fields 226 * volatile to prevent this. 227 * 228 * TSO warning: should we port to a non-TSO (or equivalent) 229 * CPU, this will break. 230 * 231 * The reader stills needs the membar_consumer() calls because, 232 * although the volatiles prevent the compiler from reordering 233 * loads, the CPU can still do so. 234 */ 235 236 NEW_CPU_MSTATE(cmstate); 237 } 238 239 /* 240 * Return an aggregation of microstate times in scaled nanoseconds (high-res 241 * time). This keeps in mind that p_acct is already scaled, and ms_acct is 242 * not. 243 */ 244 hrtime_t 245 mstate_aggr_state(proc_t *p, int a_state) 246 { 247 struct mstate *ms; 248 kthread_t *t; 249 klwp_t *lwp; 250 hrtime_t aggr_time; 251 hrtime_t scaledtime; 252 253 ASSERT(MUTEX_HELD(&p->p_lock)); 254 ASSERT((unsigned)a_state < NMSTATES); 255 256 aggr_time = p->p_acct[a_state]; 257 if (a_state == LMS_SYSTEM) 258 aggr_time += p->p_acct[LMS_TRAP]; 259 260 t = p->p_tlist; 261 if (t == NULL) 262 return (aggr_time); 263 264 do { 265 if (t->t_proc_flag & TP_LWPEXIT) 266 continue; 267 268 lwp = ttolwp(t); 269 ms = &lwp->lwp_mstate; 270 scaledtime = ms->ms_acct[a_state]; 271 scalehrtime(&scaledtime); 272 aggr_time += scaledtime; 273 if (a_state == LMS_SYSTEM) { 274 scaledtime = ms->ms_acct[LMS_TRAP]; 275 scalehrtime(&scaledtime); 276 aggr_time += scaledtime; 277 } 278 } while ((t = t->t_forw) != p->p_tlist); 279 280 return (aggr_time); 281 } 282 283 284 void 285 syscall_mstate(int fromms, int toms) 286 { 287 kthread_t *t = curthread; 288 struct mstate *ms; 289 hrtime_t *mstimep; 290 hrtime_t curtime; 291 klwp_t *lwp; 292 hrtime_t newtime; 293 cpu_t *cpu; 294 uint16_t gen; 295 296 if ((lwp = ttolwp(t)) == NULL) 297 return; 298 299 ASSERT(fromms < NMSTATES); 300 ASSERT(toms < NMSTATES); 301 302 ms = &lwp->lwp_mstate; 303 mstimep = &ms->ms_acct[fromms]; 304 curtime = gethrtime_unscaled(); 305 newtime = curtime - ms->ms_state_start; 306 while (newtime < 0) { 307 curtime = gethrtime_unscaled(); 308 newtime = curtime - ms->ms_state_start; 309 } 310 *mstimep += newtime; 311 t->t_mstate = toms; 312 ms->ms_state_start = curtime; 313 ms->ms_prev = fromms; 314 kpreempt_disable(); /* don't change CPU while changing CPU's state */ 315 cpu = CPU; 316 ASSERT(cpu == t->t_cpu); 317 if ((toms != LMS_USER) && (cpu->cpu_mstate != CMS_SYSTEM)) { 318 NEW_CPU_MSTATE(CMS_SYSTEM); 319 } else if ((toms == LMS_USER) && (cpu->cpu_mstate != CMS_USER)) { 320 NEW_CPU_MSTATE(CMS_USER); 321 } 322 kpreempt_enable(); 323 } 324 325 #undef NEW_CPU_MSTATE 326 327 /* 328 * The following is for computing the percentage of cpu time used recently 329 * by an lwp. The function cpu_decay() is also called from /proc code. 330 * 331 * exp_x(x): 332 * Given x as a 64-bit non-negative scaled integer of arbitrary magnitude, 333 * Return exp(-x) as a 64-bit scaled integer in the range [0 .. 1]. 334 * 335 * Scaling for 64-bit scaled integer: 336 * The binary point is to the right of the high-order bit 337 * of the low-order 32-bit word. 338 */ 339 340 #define LSHIFT 31 341 #define LSI_ONE ((uint32_t)1 << LSHIFT) /* 32-bit scaled integer 1 */ 342 343 #ifdef DEBUG 344 uint_t expx_cnt = 0; /* number of calls to exp_x() */ 345 uint_t expx_mul = 0; /* number of long multiplies in exp_x() */ 346 #endif 347 348 static uint64_t 349 exp_x(uint64_t x) 350 { 351 int i; 352 uint64_t ull; 353 uint32_t ui; 354 355 #ifdef DEBUG 356 expx_cnt++; 357 #endif 358 /* 359 * By the formula: 360 * exp(-x) = exp(-x/2) * exp(-x/2) 361 * we keep halving x until it becomes small enough for 362 * the following approximation to be accurate enough: 363 * exp(-x) = 1 - x 364 * We reduce x until it is less than 1/4 (the 2 in LSHIFT-2 below). 365 * Our final error will be smaller than 4% . 366 */ 367 368 /* 369 * Use a uint64_t for the initial shift calculation. 370 */ 371 ull = x >> (LSHIFT-2); 372 373 /* 374 * Short circuit: 375 * A number this large produces effectively 0 (actually .005). 376 * This way, we will never do more than 5 multiplies. 377 */ 378 if (ull >= (1 << 5)) 379 return (0); 380 381 ui = ull; /* OK. Now we can use a uint_t. */ 382 for (i = 0; ui != 0; i++) 383 ui >>= 1; 384 385 if (i != 0) { 386 #ifdef DEBUG 387 expx_mul += i; /* seldom happens */ 388 #endif 389 x >>= i; 390 } 391 392 /* 393 * Now we compute 1 - x and square it the number of times 394 * that we halved x above to produce the final result: 395 */ 396 x = LSI_ONE - x; 397 while (i--) 398 x = (x * x) >> LSHIFT; 399 400 return (x); 401 } 402 403 /* 404 * Given the old percent cpu and a time delta in nanoseconds, 405 * return the new decayed percent cpu: pct * exp(-tau), 406 * where 'tau' is the time delta multiplied by a decay factor. 407 * We have chosen the decay factor (cpu_decay_factor in param.c) 408 * to make the decay over five seconds be approximately 20%. 409 * 410 * 'pct' is a 32-bit scaled integer <= 1 411 * The binary point is to the right of the high-order bit 412 * of the 32-bit word. 413 */ 414 static uint32_t 415 cpu_decay(uint32_t pct, hrtime_t nsec) 416 { 417 uint64_t delta = (uint64_t)nsec; 418 419 delta /= cpu_decay_factor; 420 return ((pct * exp_x(delta)) >> LSHIFT); 421 } 422 423 /* 424 * Given the old percent cpu and a time delta in nanoseconds, 425 * return the new grown percent cpu: 1 - ( 1 - pct ) * exp(-tau) 426 */ 427 static uint32_t 428 cpu_grow(uint32_t pct, hrtime_t nsec) 429 { 430 return (LSI_ONE - cpu_decay(LSI_ONE - pct, nsec)); 431 } 432 433 434 /* 435 * Defined to determine whether a lwp is still on a processor. 436 */ 437 438 #define T_ONPROC(kt) \ 439 ((kt)->t_mstate < LMS_SLEEP) 440 #define T_OFFPROC(kt) \ 441 ((kt)->t_mstate >= LMS_SLEEP) 442 443 uint_t 444 cpu_update_pct(kthread_t *t, hrtime_t newtime) 445 { 446 hrtime_t delta; 447 hrtime_t hrlb; 448 uint_t pctcpu; 449 uint_t npctcpu; 450 451 /* 452 * This routine can get called at PIL > 0, this *has* to be 453 * done atomically. Holding locks here causes bad things to happen. 454 * (read: deadlock). 455 */ 456 457 do { 458 if (T_ONPROC(t) && t->t_waitrq == 0) { 459 hrlb = t->t_hrtime; 460 delta = newtime - hrlb; 461 if (delta < 0) { 462 newtime = gethrtime_unscaled(); 463 delta = newtime - hrlb; 464 } 465 t->t_hrtime = newtime; 466 scalehrtime(&delta); 467 pctcpu = t->t_pctcpu; 468 npctcpu = cpu_grow(pctcpu, delta); 469 } else { 470 hrlb = t->t_hrtime; 471 delta = newtime - hrlb; 472 if (delta < 0) { 473 newtime = gethrtime_unscaled(); 474 delta = newtime - hrlb; 475 } 476 t->t_hrtime = newtime; 477 scalehrtime(&delta); 478 pctcpu = t->t_pctcpu; 479 npctcpu = cpu_decay(pctcpu, delta); 480 } 481 } while (cas32(&t->t_pctcpu, pctcpu, npctcpu) != pctcpu); 482 483 return (npctcpu); 484 } 485 486 /* 487 * Change the microstate level for the LWP and update the 488 * associated accounting information. Return the previous 489 * LWP state. 490 */ 491 int 492 new_mstate(kthread_t *t, int new_state) 493 { 494 struct mstate *ms; 495 unsigned state; 496 hrtime_t *mstimep; 497 hrtime_t curtime; 498 hrtime_t newtime; 499 hrtime_t oldtime; 500 klwp_t *lwp; 501 502 ASSERT(new_state != LMS_WAIT_CPU); 503 ASSERT((unsigned)new_state < NMSTATES); 504 ASSERT(t == curthread || THREAD_LOCK_HELD(t)); 505 506 if ((lwp = ttolwp(t)) == NULL) 507 return (LMS_SYSTEM); 508 509 curtime = gethrtime_unscaled(); 510 511 /* adjust cpu percentages before we go any further */ 512 (void) cpu_update_pct(t, curtime); 513 514 ms = &lwp->lwp_mstate; 515 state = t->t_mstate; 516 do { 517 switch (state) { 518 case LMS_TFAULT: 519 case LMS_DFAULT: 520 case LMS_KFAULT: 521 case LMS_USER_LOCK: 522 mstimep = &ms->ms_acct[LMS_SYSTEM]; 523 break; 524 default: 525 mstimep = &ms->ms_acct[state]; 526 break; 527 } 528 newtime = curtime - ms->ms_state_start; 529 if (newtime < 0) { 530 curtime = gethrtime_unscaled(); 531 oldtime = *mstimep - 1; /* force CAS to fail */ 532 continue; 533 } 534 oldtime = *mstimep; 535 newtime += oldtime; 536 t->t_mstate = new_state; 537 ms->ms_state_start = curtime; 538 } while (cas64((uint64_t *)mstimep, oldtime, newtime) != oldtime); 539 /* 540 * Remember the previous running microstate. 541 */ 542 if (state != LMS_SLEEP && state != LMS_STOPPED) 543 ms->ms_prev = state; 544 545 /* 546 * Switch CPU microstate if appropriate 547 */ 548 549 kpreempt_disable(); /* MUST disable kpreempt before touching t->cpu */ 550 ASSERT(t->t_cpu == CPU); 551 if (!CPU_ON_INTR(t->t_cpu) && curthread->t_intr == NULL) { 552 if (new_state == LMS_USER && t->t_cpu->cpu_mstate != CMS_USER) 553 new_cpu_mstate(CMS_USER, curtime); 554 else if (new_state != LMS_USER && 555 t->t_cpu->cpu_mstate != CMS_SYSTEM) 556 new_cpu_mstate(CMS_SYSTEM, curtime); 557 } 558 kpreempt_enable(); 559 560 return (ms->ms_prev); 561 } 562 563 static long waitrqis0 = 0; 564 565 /* 566 * Restore the LWP microstate to the previous runnable state. 567 * Called from disp() with the newly selected lwp. 568 */ 569 void 570 restore_mstate(kthread_t *t) 571 { 572 struct mstate *ms; 573 hrtime_t *mstimep; 574 klwp_t *lwp; 575 hrtime_t curtime; 576 hrtime_t waitrq; 577 hrtime_t newtime; 578 hrtime_t oldtime; 579 580 if ((lwp = ttolwp(t)) == NULL) 581 return; 582 583 curtime = gethrtime_unscaled(); 584 (void) cpu_update_pct(t, curtime); 585 ms = &lwp->lwp_mstate; 586 ASSERT((unsigned)t->t_mstate < NMSTATES); 587 do { 588 switch (t->t_mstate) { 589 case LMS_SLEEP: 590 /* 591 * Update the timer for the current sleep state. 592 */ 593 ASSERT((unsigned)ms->ms_prev < NMSTATES); 594 switch (ms->ms_prev) { 595 case LMS_TFAULT: 596 case LMS_DFAULT: 597 case LMS_KFAULT: 598 case LMS_USER_LOCK: 599 mstimep = &ms->ms_acct[ms->ms_prev]; 600 break; 601 default: 602 mstimep = &ms->ms_acct[LMS_SLEEP]; 603 break; 604 } 605 /* 606 * Return to the previous run state. 607 */ 608 t->t_mstate = ms->ms_prev; 609 break; 610 case LMS_STOPPED: 611 mstimep = &ms->ms_acct[LMS_STOPPED]; 612 /* 613 * Return to the previous run state. 614 */ 615 t->t_mstate = ms->ms_prev; 616 break; 617 case LMS_TFAULT: 618 case LMS_DFAULT: 619 case LMS_KFAULT: 620 case LMS_USER_LOCK: 621 mstimep = &ms->ms_acct[LMS_SYSTEM]; 622 break; 623 default: 624 mstimep = &ms->ms_acct[t->t_mstate]; 625 break; 626 } 627 waitrq = t->t_waitrq; /* hopefully atomic */ 628 t->t_waitrq = 0; 629 if (waitrq == 0) { /* should only happen during boot */ 630 waitrq = curtime; 631 waitrqis0++; 632 } 633 newtime = waitrq - ms->ms_state_start; 634 if (newtime < 0) { 635 curtime = gethrtime_unscaled(); 636 oldtime = *mstimep - 1; /* force CAS to fail */ 637 continue; 638 } 639 oldtime = *mstimep; 640 newtime += oldtime; 641 } while (cas64((uint64_t *)mstimep, oldtime, newtime) != oldtime); 642 /* 643 * Update the WAIT_CPU timer and per-cpu waitrq total. 644 */ 645 ms->ms_acct[LMS_WAIT_CPU] += (curtime - waitrq); 646 CPU->cpu_waitrq += (curtime - waitrq); 647 ms->ms_state_start = curtime; 648 } 649 650 /* 651 * Copy lwp microstate accounting and resource usage information 652 * to the process. (lwp is terminating) 653 */ 654 void 655 term_mstate(kthread_t *t) 656 { 657 struct mstate *ms; 658 proc_t *p = ttoproc(t); 659 klwp_t *lwp = ttolwp(t); 660 int i; 661 hrtime_t tmp; 662 663 ASSERT(MUTEX_HELD(&p->p_lock)); 664 665 ms = &lwp->lwp_mstate; 666 (void) new_mstate(t, LMS_STOPPED); 667 ms->ms_term = ms->ms_state_start; 668 tmp = ms->ms_term - ms->ms_start; 669 scalehrtime(&tmp); 670 p->p_mlreal += tmp; 671 for (i = 0; i < NMSTATES; i++) { 672 tmp = ms->ms_acct[i]; 673 scalehrtime(&tmp); 674 p->p_acct[i] += tmp; 675 } 676 p->p_ru.minflt += lwp->lwp_ru.minflt; 677 p->p_ru.majflt += lwp->lwp_ru.majflt; 678 p->p_ru.nswap += lwp->lwp_ru.nswap; 679 p->p_ru.inblock += lwp->lwp_ru.inblock; 680 p->p_ru.oublock += lwp->lwp_ru.oublock; 681 p->p_ru.msgsnd += lwp->lwp_ru.msgsnd; 682 p->p_ru.msgrcv += lwp->lwp_ru.msgrcv; 683 p->p_ru.nsignals += lwp->lwp_ru.nsignals; 684 p->p_ru.nvcsw += lwp->lwp_ru.nvcsw; 685 p->p_ru.nivcsw += lwp->lwp_ru.nivcsw; 686 p->p_ru.sysc += lwp->lwp_ru.sysc; 687 p->p_ru.ioch += lwp->lwp_ru.ioch; 688 p->p_defunct++; 689 } 690