1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #include "opt_kdb.h" 43 #include "opt_device_polling.h" 44 #include "opt_hwpmc_hooks.h" 45 #include "opt_ntp.h" 46 #include "opt_watchdog.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/callout.h> 51 #include <sys/epoch.h> 52 #include <sys/eventhandler.h> 53 #include <sys/gtaskqueue.h> 54 #include <sys/kdb.h> 55 #include <sys/kernel.h> 56 #include <sys/kthread.h> 57 #include <sys/ktr.h> 58 #include <sys/lock.h> 59 #include <sys/mutex.h> 60 #include <sys/proc.h> 61 #include <sys/resource.h> 62 #include <sys/resourcevar.h> 63 #include <sys/sched.h> 64 #include <sys/sdt.h> 65 #include <sys/signalvar.h> 66 #include <sys/sleepqueue.h> 67 #include <sys/smp.h> 68 #include <vm/vm.h> 69 #include <vm/pmap.h> 70 #include <vm/vm_map.h> 71 #include <sys/sysctl.h> 72 #include <sys/bus.h> 73 #include <sys/interrupt.h> 74 #include <sys/limits.h> 75 #include <sys/timetc.h> 76 77 #ifdef GPROF 78 #include <sys/gmon.h> 79 #endif 80 81 #ifdef HWPMC_HOOKS 82 #include <sys/pmckern.h> 83 PMC_SOFT_DEFINE( , , clock, hard); 84 PMC_SOFT_DEFINE( , , clock, stat); 85 PMC_SOFT_DEFINE_EX( , , clock, prof, \ 86 cpu_startprofclock, cpu_stopprofclock); 87 #endif 88 89 #ifdef DEVICE_POLLING 90 extern void hardclock_device_poll(void); 91 #endif /* DEVICE_POLLING */ 92 93 static void initclocks(void *dummy); 94 SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL); 95 96 /* Spin-lock protecting profiling statistics. */ 97 static struct mtx time_lock; 98 99 SDT_PROVIDER_DECLARE(sched); 100 SDT_PROBE_DEFINE2(sched, , , tick, "struct thread *", "struct proc *"); 101 102 static int 103 sysctl_kern_cp_time(SYSCTL_HANDLER_ARGS) 104 { 105 int error; 106 long cp_time[CPUSTATES]; 107 #ifdef SCTL_MASK32 108 int i; 109 unsigned int cp_time32[CPUSTATES]; 110 #endif 111 112 read_cpu_time(cp_time); 113 #ifdef SCTL_MASK32 114 if (req->flags & SCTL_MASK32) { 115 if (!req->oldptr) 116 return SYSCTL_OUT(req, 0, sizeof(cp_time32)); 117 for (i = 0; i < CPUSTATES; i++) 118 cp_time32[i] = (unsigned int)cp_time[i]; 119 error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32)); 120 } else 121 #endif 122 { 123 if (!req->oldptr) 124 return SYSCTL_OUT(req, 0, sizeof(cp_time)); 125 error = SYSCTL_OUT(req, cp_time, sizeof(cp_time)); 126 } 127 return error; 128 } 129 130 SYSCTL_PROC(_kern, OID_AUTO, cp_time, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE, 131 0,0, sysctl_kern_cp_time, "LU", "CPU time statistics"); 132 133 static long empty[CPUSTATES]; 134 135 static int 136 sysctl_kern_cp_times(SYSCTL_HANDLER_ARGS) 137 { 138 struct pcpu *pcpu; 139 int error; 140 int c; 141 long *cp_time; 142 #ifdef SCTL_MASK32 143 unsigned int cp_time32[CPUSTATES]; 144 int i; 145 #endif 146 147 if (!req->oldptr) { 148 #ifdef SCTL_MASK32 149 if (req->flags & SCTL_MASK32) 150 return SYSCTL_OUT(req, 0, sizeof(cp_time32) * (mp_maxid + 1)); 151 else 152 #endif 153 return SYSCTL_OUT(req, 0, sizeof(long) * CPUSTATES * (mp_maxid + 1)); 154 } 155 for (error = 0, c = 0; error == 0 && c <= mp_maxid; c++) { 156 if (!CPU_ABSENT(c)) { 157 pcpu = pcpu_find(c); 158 cp_time = pcpu->pc_cp_time; 159 } else { 160 cp_time = empty; 161 } 162 #ifdef SCTL_MASK32 163 if (req->flags & SCTL_MASK32) { 164 for (i = 0; i < CPUSTATES; i++) 165 cp_time32[i] = (unsigned int)cp_time[i]; 166 error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32)); 167 } else 168 #endif 169 error = SYSCTL_OUT(req, cp_time, sizeof(long) * CPUSTATES); 170 } 171 return error; 172 } 173 174 SYSCTL_PROC(_kern, OID_AUTO, cp_times, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE, 175 0,0, sysctl_kern_cp_times, "LU", "per-CPU time statistics"); 176 177 #ifdef DEADLKRES 178 static const char *blessed[] = { 179 "getblk", 180 "so_snd_sx", 181 "so_rcv_sx", 182 NULL 183 }; 184 static int slptime_threshold = 1800; 185 static int blktime_threshold = 900; 186 static int sleepfreq = 3; 187 188 static void 189 deadlres_td_on_lock(struct proc *p, struct thread *td, int blkticks) 190 { 191 int tticks; 192 193 sx_assert(&allproc_lock, SX_LOCKED); 194 PROC_LOCK_ASSERT(p, MA_OWNED); 195 THREAD_LOCK_ASSERT(td, MA_OWNED); 196 /* 197 * The thread should be blocked on a turnstile, simply check 198 * if the turnstile channel is in good state. 199 */ 200 MPASS(td->td_blocked != NULL); 201 202 tticks = ticks - td->td_blktick; 203 if (tticks > blkticks) 204 /* 205 * Accordingly with provided thresholds, this thread is stuck 206 * for too long on a turnstile. 207 */ 208 panic("%s: possible deadlock detected for %p (%s), " 209 "blocked for %d ticks\n", __func__, 210 td, sched_tdname(td), tticks); 211 } 212 213 static void 214 deadlres_td_sleep_q(struct proc *p, struct thread *td, int slpticks) 215 { 216 const void *wchan; 217 int i, slptype, tticks; 218 219 sx_assert(&allproc_lock, SX_LOCKED); 220 PROC_LOCK_ASSERT(p, MA_OWNED); 221 THREAD_LOCK_ASSERT(td, MA_OWNED); 222 /* 223 * Check if the thread is sleeping on a lock, otherwise skip the check. 224 * Drop the thread lock in order to avoid a LOR with the sleepqueue 225 * spinlock. 226 */ 227 wchan = td->td_wchan; 228 tticks = ticks - td->td_slptick; 229 slptype = sleepq_type(wchan); 230 if ((slptype == SLEEPQ_SX || slptype == SLEEPQ_LK) && 231 tticks > slpticks) { 232 233 /* 234 * Accordingly with provided thresholds, this thread is stuck 235 * for too long on a sleepqueue. 236 * However, being on a sleepqueue, we might still check for the 237 * blessed list. 238 */ 239 for (i = 0; blessed[i] != NULL; i++) 240 if (!strcmp(blessed[i], td->td_wmesg)) 241 return; 242 243 panic("%s: possible deadlock detected for %p (%s), " 244 "blocked for %d ticks\n", __func__, 245 td, sched_tdname(td), tticks); 246 } 247 } 248 249 static void 250 deadlkres(void) 251 { 252 struct proc *p; 253 struct thread *td; 254 int blkticks, slpticks, tryl; 255 256 tryl = 0; 257 for (;;) { 258 blkticks = blktime_threshold * hz; 259 slpticks = slptime_threshold * hz; 260 261 /* 262 * Avoid to sleep on the sx_lock in order to avoid a 263 * possible priority inversion problem leading to 264 * starvation. 265 * If the lock can't be held after 100 tries, panic. 266 */ 267 if (!sx_try_slock(&allproc_lock)) { 268 if (tryl > 100) 269 panic("%s: possible deadlock detected " 270 "on allproc_lock\n", __func__); 271 tryl++; 272 pause("allproc", sleepfreq * hz); 273 continue; 274 } 275 tryl = 0; 276 FOREACH_PROC_IN_SYSTEM(p) { 277 PROC_LOCK(p); 278 if (p->p_state == PRS_NEW) { 279 PROC_UNLOCK(p); 280 continue; 281 } 282 FOREACH_THREAD_IN_PROC(p, td) { 283 thread_lock(td); 284 if (TD_ON_LOCK(td)) 285 deadlres_td_on_lock(p, td, 286 blkticks); 287 else if (TD_IS_SLEEPING(td)) 288 deadlres_td_sleep_q(p, td, 289 slpticks); 290 thread_unlock(td); 291 } 292 PROC_UNLOCK(p); 293 } 294 sx_sunlock(&allproc_lock); 295 296 /* Sleep for sleepfreq seconds. */ 297 pause("-", sleepfreq * hz); 298 } 299 } 300 301 static struct kthread_desc deadlkres_kd = { 302 "deadlkres", 303 deadlkres, 304 (struct thread **)NULL 305 }; 306 307 SYSINIT(deadlkres, SI_SUB_CLOCKS, SI_ORDER_ANY, kthread_start, &deadlkres_kd); 308 309 static SYSCTL_NODE(_debug, OID_AUTO, deadlkres, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 310 "Deadlock resolver"); 311 SYSCTL_INT(_debug_deadlkres, OID_AUTO, slptime_threshold, CTLFLAG_RW, 312 &slptime_threshold, 0, 313 "Number of seconds within is valid to sleep on a sleepqueue"); 314 SYSCTL_INT(_debug_deadlkres, OID_AUTO, blktime_threshold, CTLFLAG_RW, 315 &blktime_threshold, 0, 316 "Number of seconds within is valid to block on a turnstile"); 317 SYSCTL_INT(_debug_deadlkres, OID_AUTO, sleepfreq, CTLFLAG_RW, &sleepfreq, 0, 318 "Number of seconds between any deadlock resolver thread run"); 319 #endif /* DEADLKRES */ 320 321 void 322 read_cpu_time(long *cp_time) 323 { 324 struct pcpu *pc; 325 int i, j; 326 327 /* Sum up global cp_time[]. */ 328 bzero(cp_time, sizeof(long) * CPUSTATES); 329 CPU_FOREACH(i) { 330 pc = pcpu_find(i); 331 for (j = 0; j < CPUSTATES; j++) 332 cp_time[j] += pc->pc_cp_time[j]; 333 } 334 } 335 336 #include <sys/watchdog.h> 337 338 static int watchdog_ticks; 339 static int watchdog_enabled; 340 static void watchdog_fire(void); 341 static void watchdog_config(void *, u_int, int *); 342 343 static void 344 watchdog_attach(void) 345 { 346 EVENTHANDLER_REGISTER(watchdog_list, watchdog_config, NULL, 0); 347 } 348 349 /* 350 * Clock handling routines. 351 * 352 * This code is written to operate with two timers that run independently of 353 * each other. 354 * 355 * The main timer, running hz times per second, is used to trigger interval 356 * timers, timeouts and rescheduling as needed. 357 * 358 * The second timer handles kernel and user profiling, 359 * and does resource use estimation. If the second timer is programmable, 360 * it is randomized to avoid aliasing between the two clocks. For example, 361 * the randomization prevents an adversary from always giving up the cpu 362 * just before its quantum expires. Otherwise, it would never accumulate 363 * cpu ticks. The mean frequency of the second timer is stathz. 364 * 365 * If no second timer exists, stathz will be zero; in this case we drive 366 * profiling and statistics off the main clock. This WILL NOT be accurate; 367 * do not do it unless absolutely necessary. 368 * 369 * The statistics clock may (or may not) be run at a higher rate while 370 * profiling. This profile clock runs at profhz. We require that profhz 371 * be an integral multiple of stathz. 372 * 373 * If the statistics clock is running fast, it must be divided by the ratio 374 * profhz/stathz for statistics. (For profiling, every tick counts.) 375 * 376 * Time-of-day is maintained using a "timecounter", which may or may 377 * not be related to the hardware generating the above mentioned 378 * interrupts. 379 */ 380 381 int stathz; 382 int profhz; 383 int profprocs; 384 volatile int ticks; 385 int psratio; 386 387 DPCPU_DEFINE_STATIC(int, pcputicks); /* Per-CPU version of ticks. */ 388 #ifdef DEVICE_POLLING 389 static int devpoll_run = 0; 390 #endif 391 392 /* 393 * Initialize clock frequencies and start both clocks running. 394 */ 395 /* ARGSUSED*/ 396 static void 397 initclocks(void *dummy) 398 { 399 int i; 400 401 /* 402 * Set divisors to 1 (normal case) and let the machine-specific 403 * code do its bit. 404 */ 405 mtx_init(&time_lock, "time lock", NULL, MTX_DEF); 406 cpu_initclocks(); 407 408 /* 409 * Compute profhz/stathz, and fix profhz if needed. 410 */ 411 i = stathz ? stathz : hz; 412 if (profhz == 0) 413 profhz = i; 414 psratio = profhz / i; 415 416 #ifdef SW_WATCHDOG 417 /* Enable hardclock watchdog now, even if a hardware watchdog exists. */ 418 watchdog_attach(); 419 #else 420 /* Volunteer to run a software watchdog. */ 421 if (wdog_software_attach == NULL) 422 wdog_software_attach = watchdog_attach; 423 #endif 424 } 425 426 static __noinline void 427 hardclock_itimer(struct thread *td, struct pstats *pstats, int cnt, int usermode) 428 { 429 struct proc *p; 430 int flags; 431 432 flags = 0; 433 p = td->td_proc; 434 if (usermode && 435 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) { 436 PROC_ITIMLOCK(p); 437 if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], 438 tick * cnt) == 0) 439 flags |= TDF_ALRMPEND | TDF_ASTPENDING; 440 PROC_ITIMUNLOCK(p); 441 } 442 if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) { 443 PROC_ITIMLOCK(p); 444 if (itimerdecr(&pstats->p_timer[ITIMER_PROF], 445 tick * cnt) == 0) 446 flags |= TDF_PROFPEND | TDF_ASTPENDING; 447 PROC_ITIMUNLOCK(p); 448 } 449 if (flags != 0) { 450 thread_lock(td); 451 td->td_flags |= flags; 452 thread_unlock(td); 453 } 454 } 455 456 void 457 hardclock(int cnt, int usermode) 458 { 459 struct pstats *pstats; 460 struct thread *td = curthread; 461 struct proc *p = td->td_proc; 462 int *t = DPCPU_PTR(pcputicks); 463 int global, i, newticks; 464 465 /* 466 * Update per-CPU and possibly global ticks values. 467 */ 468 *t += cnt; 469 global = ticks; 470 do { 471 newticks = *t - global; 472 if (newticks <= 0) { 473 if (newticks < -1) 474 *t = global - 1; 475 newticks = 0; 476 break; 477 } 478 } while (!atomic_fcmpset_int(&ticks, &global, *t)); 479 480 /* 481 * Run current process's virtual and profile time, as needed. 482 */ 483 pstats = p->p_stats; 484 if (__predict_false( 485 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) || 486 timevalisset(&pstats->p_timer[ITIMER_PROF].it_value))) 487 hardclock_itimer(td, pstats, cnt, usermode); 488 489 #ifdef HWPMC_HOOKS 490 if (PMC_CPU_HAS_SAMPLES(PCPU_GET(cpuid))) 491 PMC_CALL_HOOK_UNLOCKED(curthread, PMC_FN_DO_SAMPLES, NULL); 492 if (td->td_intr_frame != NULL) 493 PMC_SOFT_CALL_TF( , , clock, hard, td->td_intr_frame); 494 #endif 495 /* We are in charge to handle this tick duty. */ 496 if (newticks > 0) { 497 tc_ticktock(newticks); 498 #ifdef DEVICE_POLLING 499 /* Dangerous and no need to call these things concurrently. */ 500 if (atomic_cmpset_acq_int(&devpoll_run, 0, 1)) { 501 /* This is very short and quick. */ 502 hardclock_device_poll(); 503 atomic_store_rel_int(&devpoll_run, 0); 504 } 505 #endif /* DEVICE_POLLING */ 506 if (watchdog_enabled > 0) { 507 i = atomic_fetchadd_int(&watchdog_ticks, -newticks); 508 if (i > 0 && i <= newticks) 509 watchdog_fire(); 510 } 511 intr_event_handle(clk_intr_event, NULL); 512 } 513 if (curcpu == CPU_FIRST()) 514 cpu_tick_calibration(); 515 if (__predict_false(DPCPU_GET(epoch_cb_count))) 516 GROUPTASK_ENQUEUE(DPCPU_PTR(epoch_cb_task)); 517 } 518 519 void 520 hardclock_sync(int cpu) 521 { 522 int *t; 523 KASSERT(!CPU_ABSENT(cpu), ("Absent CPU %d", cpu)); 524 t = DPCPU_ID_PTR(cpu, pcputicks); 525 526 *t = ticks; 527 } 528 529 /* 530 * Compute number of ticks in the specified amount of time. 531 */ 532 int 533 tvtohz(struct timeval *tv) 534 { 535 unsigned long ticks; 536 long sec, usec; 537 538 /* 539 * If the number of usecs in the whole seconds part of the time 540 * difference fits in a long, then the total number of usecs will 541 * fit in an unsigned long. Compute the total and convert it to 542 * ticks, rounding up and adding 1 to allow for the current tick 543 * to expire. Rounding also depends on unsigned long arithmetic 544 * to avoid overflow. 545 * 546 * Otherwise, if the number of ticks in the whole seconds part of 547 * the time difference fits in a long, then convert the parts to 548 * ticks separately and add, using similar rounding methods and 549 * overflow avoidance. This method would work in the previous 550 * case but it is slightly slower and assumes that hz is integral. 551 * 552 * Otherwise, round the time difference down to the maximum 553 * representable value. 554 * 555 * If ints have 32 bits, then the maximum value for any timeout in 556 * 10ms ticks is 248 days. 557 */ 558 sec = tv->tv_sec; 559 usec = tv->tv_usec; 560 if (usec < 0) { 561 sec--; 562 usec += 1000000; 563 } 564 if (sec < 0) { 565 #ifdef DIAGNOSTIC 566 if (usec > 0) { 567 sec++; 568 usec -= 1000000; 569 } 570 printf("tvotohz: negative time difference %ld sec %ld usec\n", 571 sec, usec); 572 #endif 573 ticks = 1; 574 } else if (sec <= LONG_MAX / 1000000) 575 ticks = howmany(sec * 1000000 + (unsigned long)usec, tick) + 1; 576 else if (sec <= LONG_MAX / hz) 577 ticks = sec * hz 578 + howmany((unsigned long)usec, tick) + 1; 579 else 580 ticks = LONG_MAX; 581 if (ticks > INT_MAX) 582 ticks = INT_MAX; 583 return ((int)ticks); 584 } 585 586 /* 587 * Start profiling on a process. 588 * 589 * Kernel profiling passes proc0 which never exits and hence 590 * keeps the profile clock running constantly. 591 */ 592 void 593 startprofclock(struct proc *p) 594 { 595 596 PROC_LOCK_ASSERT(p, MA_OWNED); 597 if (p->p_flag & P_STOPPROF) 598 return; 599 if ((p->p_flag & P_PROFIL) == 0) { 600 p->p_flag |= P_PROFIL; 601 mtx_lock(&time_lock); 602 if (++profprocs == 1) 603 cpu_startprofclock(); 604 mtx_unlock(&time_lock); 605 } 606 } 607 608 /* 609 * Stop profiling on a process. 610 */ 611 void 612 stopprofclock(struct proc *p) 613 { 614 615 PROC_LOCK_ASSERT(p, MA_OWNED); 616 if (p->p_flag & P_PROFIL) { 617 if (p->p_profthreads != 0) { 618 while (p->p_profthreads != 0) { 619 p->p_flag |= P_STOPPROF; 620 msleep(&p->p_profthreads, &p->p_mtx, PPAUSE, 621 "stopprof", 0); 622 } 623 } 624 if ((p->p_flag & P_PROFIL) == 0) 625 return; 626 p->p_flag &= ~P_PROFIL; 627 mtx_lock(&time_lock); 628 if (--profprocs == 0) 629 cpu_stopprofclock(); 630 mtx_unlock(&time_lock); 631 } 632 } 633 634 /* 635 * Statistics clock. Updates rusage information and calls the scheduler 636 * to adjust priorities of the active thread. 637 * 638 * This should be called by all active processors. 639 */ 640 void 641 statclock(int cnt, int usermode) 642 { 643 struct rusage *ru; 644 struct vmspace *vm; 645 struct thread *td; 646 struct proc *p; 647 long rss; 648 long *cp_time; 649 uint64_t runtime, new_switchtime; 650 651 td = curthread; 652 p = td->td_proc; 653 654 cp_time = (long *)PCPU_PTR(cp_time); 655 if (usermode) { 656 /* 657 * Charge the time as appropriate. 658 */ 659 td->td_uticks += cnt; 660 if (p->p_nice > NZERO) 661 cp_time[CP_NICE] += cnt; 662 else 663 cp_time[CP_USER] += cnt; 664 } else { 665 /* 666 * Came from kernel mode, so we were: 667 * - handling an interrupt, 668 * - doing syscall or trap work on behalf of the current 669 * user process, or 670 * - spinning in the idle loop. 671 * Whichever it is, charge the time as appropriate. 672 * Note that we charge interrupts to the current process, 673 * regardless of whether they are ``for'' that process, 674 * so that we know how much of its real time was spent 675 * in ``non-process'' (i.e., interrupt) work. 676 */ 677 if ((td->td_pflags & TDP_ITHREAD) || 678 td->td_intr_nesting_level >= 2) { 679 td->td_iticks += cnt; 680 cp_time[CP_INTR] += cnt; 681 } else { 682 td->td_pticks += cnt; 683 td->td_sticks += cnt; 684 if (!TD_IS_IDLETHREAD(td)) 685 cp_time[CP_SYS] += cnt; 686 else 687 cp_time[CP_IDLE] += cnt; 688 } 689 } 690 691 /* Update resource usage integrals and maximums. */ 692 MPASS(p->p_vmspace != NULL); 693 vm = p->p_vmspace; 694 ru = &td->td_ru; 695 ru->ru_ixrss += pgtok(vm->vm_tsize) * cnt; 696 ru->ru_idrss += pgtok(vm->vm_dsize) * cnt; 697 ru->ru_isrss += pgtok(vm->vm_ssize) * cnt; 698 rss = pgtok(vmspace_resident_count(vm)); 699 if (ru->ru_maxrss < rss) 700 ru->ru_maxrss = rss; 701 KTR_POINT2(KTR_SCHED, "thread", sched_tdname(td), "statclock", 702 "prio:%d", td->td_priority, "stathz:%d", (stathz)?stathz:hz); 703 SDT_PROBE2(sched, , , tick, td, td->td_proc); 704 thread_lock_flags(td, MTX_QUIET); 705 706 /* 707 * Compute the amount of time during which the current 708 * thread was running, and add that to its total so far. 709 */ 710 new_switchtime = cpu_ticks(); 711 runtime = new_switchtime - PCPU_GET(switchtime); 712 td->td_runtime += runtime; 713 td->td_incruntime += runtime; 714 PCPU_SET(switchtime, new_switchtime); 715 716 sched_clock(td, cnt); 717 thread_unlock(td); 718 #ifdef HWPMC_HOOKS 719 if (td->td_intr_frame != NULL) 720 PMC_SOFT_CALL_TF( , , clock, stat, td->td_intr_frame); 721 #endif 722 } 723 724 void 725 profclock(int cnt, int usermode, uintfptr_t pc) 726 { 727 struct thread *td; 728 #ifdef GPROF 729 struct gmonparam *g; 730 uintfptr_t i; 731 #endif 732 733 td = curthread; 734 if (usermode) { 735 /* 736 * Came from user mode; CPU was in user state. 737 * If this process is being profiled, record the tick. 738 * if there is no related user location yet, don't 739 * bother trying to count it. 740 */ 741 if (td->td_proc->p_flag & P_PROFIL) 742 addupc_intr(td, pc, cnt); 743 } 744 #ifdef GPROF 745 else { 746 /* 747 * Kernel statistics are just like addupc_intr, only easier. 748 */ 749 g = &_gmonparam; 750 if (g->state == GMON_PROF_ON && pc >= g->lowpc) { 751 i = PC_TO_I(g, pc); 752 if (i < g->textsize) { 753 KCOUNT(g, i) += cnt; 754 } 755 } 756 } 757 #endif 758 #ifdef HWPMC_HOOKS 759 if (td->td_intr_frame != NULL) 760 PMC_SOFT_CALL_TF( , , clock, prof, td->td_intr_frame); 761 #endif 762 } 763 764 /* 765 * Return information about system clocks. 766 */ 767 static int 768 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS) 769 { 770 struct clockinfo clkinfo; 771 /* 772 * Construct clockinfo structure. 773 */ 774 bzero(&clkinfo, sizeof(clkinfo)); 775 clkinfo.hz = hz; 776 clkinfo.tick = tick; 777 clkinfo.profhz = profhz; 778 clkinfo.stathz = stathz ? stathz : hz; 779 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req)); 780 } 781 782 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, 783 CTLTYPE_STRUCT|CTLFLAG_RD|CTLFLAG_MPSAFE, 784 0, 0, sysctl_kern_clockrate, "S,clockinfo", 785 "Rate and period of various kernel clocks"); 786 787 static void 788 watchdog_config(void *unused __unused, u_int cmd, int *error) 789 { 790 u_int u; 791 792 u = cmd & WD_INTERVAL; 793 if (u >= WD_TO_1SEC) { 794 watchdog_ticks = (1 << (u - WD_TO_1SEC)) * hz; 795 watchdog_enabled = 1; 796 *error = 0; 797 } else { 798 watchdog_enabled = 0; 799 } 800 } 801 802 /* 803 * Handle a watchdog timeout by dumping interrupt information and 804 * then either dropping to DDB or panicking. 805 */ 806 static void 807 watchdog_fire(void) 808 { 809 int nintr; 810 uint64_t inttotal; 811 u_long *curintr; 812 char *curname; 813 814 curintr = intrcnt; 815 curname = intrnames; 816 inttotal = 0; 817 nintr = sintrcnt / sizeof(u_long); 818 819 printf("interrupt total\n"); 820 while (--nintr >= 0) { 821 if (*curintr) 822 printf("%-12s %20lu\n", curname, *curintr); 823 curname += strlen(curname) + 1; 824 inttotal += *curintr++; 825 } 826 printf("Total %20ju\n", (uintmax_t)inttotal); 827 828 #if defined(KDB) && !defined(KDB_UNATTENDED) 829 kdb_backtrace(); 830 kdb_enter(KDB_WHY_WATCHDOG, "watchdog timeout"); 831 #else 832 panic("watchdog timeout"); 833 #endif 834 } 835