1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 23 /* All Rights Reserved */ 24 25 26 /* 27 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 28 * Use is subject to license terms. 29 */ 30 31 #pragma ident "%Z%%M% %I% %E% SMI" 32 33 #include <sys/param.h> 34 #include <sys/t_lock.h> 35 #include <sys/types.h> 36 #include <sys/tuneable.h> 37 #include <sys/sysmacros.h> 38 #include <sys/systm.h> 39 #include <sys/cpuvar.h> 40 #include <sys/lgrp.h> 41 #include <sys/user.h> 42 #include <sys/proc.h> 43 #include <sys/callo.h> 44 #include <sys/kmem.h> 45 #include <sys/var.h> 46 #include <sys/cmn_err.h> 47 #include <sys/swap.h> 48 #include <sys/vmsystm.h> 49 #include <sys/class.h> 50 #include <sys/time.h> 51 #include <sys/debug.h> 52 #include <sys/vtrace.h> 53 #include <sys/spl.h> 54 #include <sys/atomic.h> 55 #include <sys/dumphdr.h> 56 #include <sys/archsystm.h> 57 #include <sys/fs/swapnode.h> 58 #include <sys/panic.h> 59 #include <sys/disp.h> 60 #include <sys/msacct.h> 61 #include <sys/mem_cage.h> 62 63 #include <vm/page.h> 64 #include <vm/anon.h> 65 #include <vm/rm.h> 66 #include <sys/cyclic.h> 67 #include <sys/cpupart.h> 68 #include <sys/rctl.h> 69 #include <sys/task.h> 70 #include <sys/chip.h> 71 #include <sys/sdt.h> 72 73 /* 74 * for NTP support 75 */ 76 #include <sys/timex.h> 77 #include <sys/inttypes.h> 78 79 /* 80 * clock is called straight from 81 * the real time clock interrupt. 82 * 83 * Functions: 84 * reprime clock 85 * schedule callouts 86 * maintain date 87 * jab the scheduler 88 */ 89 90 extern kcondvar_t fsflush_cv; 91 extern sysinfo_t sysinfo; 92 extern vminfo_t vminfo; 93 extern int idleswtch; /* flag set while idle in pswtch() */ 94 95 /* 96 * high-precision avenrun values. These are needed to make the 97 * regular avenrun values accurate. 98 */ 99 static uint64_t hp_avenrun[3]; 100 int avenrun[3]; /* FSCALED average run queue lengths */ 101 time_t time; /* time in seconds since 1970 - for compatibility only */ 102 103 static struct loadavg_s loadavg; 104 /* 105 * Phase/frequency-lock loop (PLL/FLL) definitions 106 * 107 * The following variables are read and set by the ntp_adjtime() system 108 * call. 109 * 110 * time_state shows the state of the system clock, with values defined 111 * in the timex.h header file. 112 * 113 * time_status shows the status of the system clock, with bits defined 114 * in the timex.h header file. 115 * 116 * time_offset is used by the PLL/FLL to adjust the system time in small 117 * increments. 118 * 119 * time_constant determines the bandwidth or "stiffness" of the PLL. 120 * 121 * time_tolerance determines maximum frequency error or tolerance of the 122 * CPU clock oscillator and is a property of the architecture; however, 123 * in principle it could change as result of the presence of external 124 * discipline signals, for instance. 125 * 126 * time_precision is usually equal to the kernel tick variable; however, 127 * in cases where a precision clock counter or external clock is 128 * available, the resolution can be much less than this and depend on 129 * whether the external clock is working or not. 130 * 131 * time_maxerror is initialized by a ntp_adjtime() call and increased by 132 * the kernel once each second to reflect the maximum error bound 133 * growth. 134 * 135 * time_esterror is set and read by the ntp_adjtime() call, but 136 * otherwise not used by the kernel. 137 */ 138 int32_t time_state = TIME_OK; /* clock state */ 139 int32_t time_status = STA_UNSYNC; /* clock status bits */ 140 int32_t time_offset = 0; /* time offset (us) */ 141 int32_t time_constant = 0; /* pll time constant */ 142 int32_t time_tolerance = MAXFREQ; /* frequency tolerance (scaled ppm) */ 143 int32_t time_precision = 1; /* clock precision (us) */ 144 int32_t time_maxerror = MAXPHASE; /* maximum error (us) */ 145 int32_t time_esterror = MAXPHASE; /* estimated error (us) */ 146 147 /* 148 * The following variables establish the state of the PLL/FLL and the 149 * residual time and frequency offset of the local clock. The scale 150 * factors are defined in the timex.h header file. 151 * 152 * time_phase and time_freq are the phase increment and the frequency 153 * increment, respectively, of the kernel time variable. 154 * 155 * time_freq is set via ntp_adjtime() from a value stored in a file when 156 * the synchronization daemon is first started. Its value is retrieved 157 * via ntp_adjtime() and written to the file about once per hour by the 158 * daemon. 159 * 160 * time_adj is the adjustment added to the value of tick at each timer 161 * interrupt and is recomputed from time_phase and time_freq at each 162 * seconds rollover. 163 * 164 * time_reftime is the second's portion of the system time at the last 165 * call to ntp_adjtime(). It is used to adjust the time_freq variable 166 * and to increase the time_maxerror as the time since last update 167 * increases. 168 */ 169 int32_t time_phase = 0; /* phase offset (scaled us) */ 170 int32_t time_freq = 0; /* frequency offset (scaled ppm) */ 171 int32_t time_adj = 0; /* tick adjust (scaled 1 / hz) */ 172 int32_t time_reftime = 0; /* time at last adjustment (s) */ 173 174 /* 175 * The scale factors of the following variables are defined in the 176 * timex.h header file. 177 * 178 * pps_time contains the time at each calibration interval, as read by 179 * microtime(). pps_count counts the seconds of the calibration 180 * interval, the duration of which is nominally pps_shift in powers of 181 * two. 182 * 183 * pps_offset is the time offset produced by the time median filter 184 * pps_tf[], while pps_jitter is the dispersion (jitter) measured by 185 * this filter. 186 * 187 * pps_freq is the frequency offset produced by the frequency median 188 * filter pps_ff[], while pps_stabil is the dispersion (wander) measured 189 * by this filter. 190 * 191 * pps_usec is latched from a high resolution counter or external clock 192 * at pps_time. Here we want the hardware counter contents only, not the 193 * contents plus the time_tv.usec as usual. 194 * 195 * pps_valid counts the number of seconds since the last PPS update. It 196 * is used as a watchdog timer to disable the PPS discipline should the 197 * PPS signal be lost. 198 * 199 * pps_glitch counts the number of seconds since the beginning of an 200 * offset burst more than tick/2 from current nominal offset. It is used 201 * mainly to suppress error bursts due to priority conflicts between the 202 * PPS interrupt and timer interrupt. 203 * 204 * pps_intcnt counts the calibration intervals for use in the interval- 205 * adaptation algorithm. It's just too complicated for words. 206 */ 207 struct timeval pps_time; /* kernel time at last interval */ 208 int32_t pps_tf[] = {0, 0, 0}; /* pps time offset median filter (us) */ 209 int32_t pps_offset = 0; /* pps time offset (us) */ 210 int32_t pps_jitter = MAXTIME; /* time dispersion (jitter) (us) */ 211 int32_t pps_ff[] = {0, 0, 0}; /* pps frequency offset median filter */ 212 int32_t pps_freq = 0; /* frequency offset (scaled ppm) */ 213 int32_t pps_stabil = MAXFREQ; /* frequency dispersion (scaled ppm) */ 214 int32_t pps_usec = 0; /* microsec counter at last interval */ 215 int32_t pps_valid = PPS_VALID; /* pps signal watchdog counter */ 216 int32_t pps_glitch = 0; /* pps signal glitch counter */ 217 int32_t pps_count = 0; /* calibration interval counter (s) */ 218 int32_t pps_shift = PPS_SHIFT; /* interval duration (s) (shift) */ 219 int32_t pps_intcnt = 0; /* intervals at current duration */ 220 221 /* 222 * PPS signal quality monitors 223 * 224 * pps_jitcnt counts the seconds that have been discarded because the 225 * jitter measured by the time median filter exceeds the limit MAXTIME 226 * (100 us). 227 * 228 * pps_calcnt counts the frequency calibration intervals, which are 229 * variable from 4 s to 256 s. 230 * 231 * pps_errcnt counts the calibration intervals which have been discarded 232 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the 233 * calibration interval jitter exceeds two ticks. 234 * 235 * pps_stbcnt counts the calibration intervals that have been discarded 236 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us). 237 */ 238 int32_t pps_jitcnt = 0; /* jitter limit exceeded */ 239 int32_t pps_calcnt = 0; /* calibration intervals */ 240 int32_t pps_errcnt = 0; /* calibration errors */ 241 int32_t pps_stbcnt = 0; /* stability limit exceeded */ 242 243 /* The following variables require no explicit locking */ 244 volatile clock_t lbolt; /* time in Hz since last boot */ 245 volatile int64_t lbolt64; /* lbolt64 won't wrap for 2.9 billion yrs */ 246 247 kcondvar_t lbolt_cv; 248 int one_sec = 1; /* turned on once every second */ 249 static int fsflushcnt; /* counter for t_fsflushr */ 250 int dosynctodr = 1; /* patchable; enable/disable sync to TOD chip */ 251 int tod_needsync = 0; /* need to sync tod chip with software time */ 252 static int tod_broken = 0; /* clock chip doesn't work */ 253 time_t boot_time = 0; /* Boot time in seconds since 1970 */ 254 cyclic_id_t clock_cyclic; /* clock()'s cyclic_id */ 255 cyclic_id_t deadman_cyclic; /* deadman()'s cyclic_id */ 256 257 static int lgrp_ticks; /* counter to schedule lgrp load calcs */ 258 259 /* 260 * rechoose_interval_history is used to detect when rechoose_interval's 261 * value has changed (via hotpatching for example), so that the 262 * cached values in the cpu structures may be updated. 263 */ 264 static int rechoose_interval_history = RECHOOSE_INTERVAL; 265 266 /* 267 * for tod fault detection 268 */ 269 #define TOD_REF_FREQ ((longlong_t)(NANOSEC)) 270 #define TOD_STALL_THRESHOLD (TOD_REF_FREQ * 3 / 2) 271 #define TOD_JUMP_THRESHOLD (TOD_REF_FREQ / 2) 272 #define TOD_FILTER_N 4 273 #define TOD_FILTER_SETTLE (4 * TOD_FILTER_N) 274 static int tod_faulted = TOD_NOFAULT; 275 static int tod_fault_reset_flag = 0; 276 277 /* patchable via /etc/system */ 278 int tod_validate_enable = 1; 279 280 /* 281 * tod_fault_table[] must be aligned with 282 * enum tod_fault_type in systm.h 283 */ 284 static char *tod_fault_table[] = { 285 "Reversed", /* TOD_REVERSED */ 286 "Stalled", /* TOD_STALLED */ 287 "Jumped", /* TOD_JUMPED */ 288 "Changed in Clock Rate" /* TOD_RATECHANGED */ 289 /* 290 * no strings needed for TOD_NOFAULT 291 */ 292 }; 293 294 /* 295 * test hook for tod broken detection in tod_validate 296 */ 297 int tod_unit_test = 0; 298 time_t tod_test_injector; 299 300 #define CLOCK_ADJ_HIST_SIZE 4 301 302 static int adj_hist_entry; 303 304 int64_t clock_adj_hist[CLOCK_ADJ_HIST_SIZE]; 305 306 static void clock_tick(kthread_t *); 307 static void calcloadavg(int, uint64_t *); 308 static int genloadavg(struct loadavg_s *); 309 static void loadavg_update(); 310 311 void (*cmm_clock_callout)() = NULL; 312 313 #ifdef KSLICE 314 int kslice = KSLICE; 315 #endif 316 317 static void 318 clock(void) 319 { 320 kthread_t *t; 321 kmutex_t *plockp; /* pointer to thread's process lock */ 322 int pinned_intr = 0; 323 uint_t nrunnable, nrunning; 324 uint_t w_io; 325 cpu_t *cp; 326 cpupart_t *cpupart; 327 int exiting; 328 extern void set_anoninfo(); 329 extern void set_freemem(); 330 void (*funcp)(); 331 int32_t ltemp; 332 int64_t lltemp; 333 int s; 334 int do_lgrp_load; 335 int rechoose_update = 0; 336 int rechoose; 337 int i; 338 339 if (panicstr) 340 return; 341 342 set_anoninfo(); 343 /* 344 * Make sure that 'freemem' do not drift too far from the truth 345 */ 346 set_freemem(); 347 348 349 /* 350 * Before the section which is repeated is executed, we do 351 * the time delta processing which occurs every clock tick 352 * 353 * There is additional processing which happens every time 354 * the nanosecond counter rolls over which is described 355 * below - see the section which begins with : if (one_sec) 356 * 357 * This section marks the beginning of the precision-kernel 358 * code fragment. 359 * 360 * First, compute the phase adjustment. If the low-order bits 361 * (time_phase) of the update overflow, bump the higher order 362 * bits (time_update). 363 */ 364 time_phase += time_adj; 365 if (time_phase <= -FINEUSEC) { 366 ltemp = -time_phase / SCALE_PHASE; 367 time_phase += ltemp * SCALE_PHASE; 368 s = hr_clock_lock(); 369 timedelta -= ltemp * (NANOSEC/MICROSEC); 370 hr_clock_unlock(s); 371 } else if (time_phase >= FINEUSEC) { 372 ltemp = time_phase / SCALE_PHASE; 373 time_phase -= ltemp * SCALE_PHASE; 374 s = hr_clock_lock(); 375 timedelta += ltemp * (NANOSEC/MICROSEC); 376 hr_clock_unlock(s); 377 } 378 379 /* 380 * End of precision-kernel code fragment which is processed 381 * every timer interrupt. 382 * 383 * Continue with the interrupt processing as scheduled. 384 * 385 * Did we pin another interrupt thread? Need to check this before 386 * grabbing any adaptive locks, since if we block on a lock the 387 * pinned thread could escape. Note that this is just a heuristic; 388 * if we take multiple laps though clock() without returning from 389 * the interrupt because we have another clock tick pending, then 390 * the pinned interrupt could be released by one of the previous 391 * laps. The only consequence is that the CPU will be counted as 392 * in idle (or wait) state once the pinned interrupt is released. 393 * Since this accounting is inaccurate by nature, this isn't a big 394 * deal --- but we should try to get it right in the common case 395 * where we only call clock() once per interrupt. 396 */ 397 if (curthread->t_intr != NULL) 398 pinned_intr = (curthread->t_intr->t_flag & T_INTR_THREAD); 399 400 /* 401 * Count the number of runnable threads and the number waiting 402 * for some form of I/O to complete -- gets added to 403 * sysinfo.waiting. To know the state of the system, must add 404 * wait counts from all CPUs. Also add up the per-partition 405 * statistics. 406 */ 407 w_io = 0; 408 nrunnable = 0; 409 410 /* 411 * keep track of when to update lgrp/part loads 412 */ 413 414 do_lgrp_load = 0; 415 if (lgrp_ticks++ >= hz / 10) { 416 lgrp_ticks = 0; 417 do_lgrp_load = 1; 418 } 419 420 /* 421 * The dispatcher tunable rechoose_interval may be hot-patched. 422 * Note if it has a new value. If so, the effective rechoose_interval 423 * cached in the cpu structures needs to be updated. 424 * If needed we'll do this during the walk of the cpu_list below. 425 */ 426 if (rechoose_interval != rechoose_interval_history) { 427 rechoose_interval_history = rechoose_interval; 428 rechoose_update = 1; 429 } 430 431 if (one_sec) 432 loadavg_update(); 433 434 435 /* 436 * First count the threads waiting on kpreempt queues in each 437 * CPU partition. 438 */ 439 440 cpupart = cp_list_head; 441 do { 442 uint_t cpupart_nrunnable = cpupart->cp_kp_queue.disp_nrunnable; 443 444 cpupart->cp_updates++; 445 nrunnable += cpupart_nrunnable; 446 cpupart->cp_nrunnable_cum += cpupart_nrunnable; 447 if (one_sec) { 448 cpupart->cp_nrunning = 0; 449 cpupart->cp_nrunnable = cpupart_nrunnable; 450 } 451 } while ((cpupart = cpupart->cp_next) != cp_list_head); 452 453 454 /* Now count the per-CPU statistics. */ 455 cp = cpu_list; 456 do { 457 uint_t cpu_nrunnable = cp->cpu_disp->disp_nrunnable; 458 459 nrunnable += cpu_nrunnable; 460 cpupart = cp->cpu_part; 461 cpupart->cp_nrunnable_cum += cpu_nrunnable; 462 if (one_sec) 463 cpupart->cp_nrunnable += cpu_nrunnable; 464 if (do_lgrp_load && 465 (cp->cpu_flags & CPU_EXISTS)) { 466 /* 467 * When updating the lgroup's load average, 468 * account for the thread running on the CPU. 469 * If the CPU is the current one, then we need 470 * to account for the underlying thread which 471 * got the clock interrupt not the thread that is 472 * handling the interrupt and caculating the load 473 * average 474 */ 475 t = cp->cpu_thread; 476 if (CPU == cp) 477 t = t->t_intr; 478 479 /* 480 * Account for the load average for this thread if 481 * it isn't the idle thread or it is on the interrupt 482 * stack and not the current CPU handling the clock 483 * interrupt 484 */ 485 if ((t && t != cp->cpu_idle_thread) || (CPU != cp && 486 CPU_ON_INTR(cp))) { 487 if (t->t_lpl == cp->cpu_lpl) { 488 /* local thread */ 489 cpu_nrunnable++; 490 } else { 491 /* 492 * This is a remote thread, charge it 493 * against its home lgroup. Note that 494 * we notice that a thread is remote 495 * only if it's currently executing. 496 * This is a reasonable approximation, 497 * since queued remote threads are rare. 498 * Note also that if we didn't charge 499 * it to its home lgroup, remote 500 * execution would often make a system 501 * appear balanced even though it was 502 * not, and thread placement/migration 503 * would often not be done correctly. 504 */ 505 lgrp_loadavg(t->t_lpl, 506 LGRP_LOADAVG_IN_THREAD_MAX, 0); 507 } 508 } 509 lgrp_loadavg(cp->cpu_lpl, 510 cpu_nrunnable * LGRP_LOADAVG_IN_THREAD_MAX, 1); 511 } 512 /* 513 * The platform may define a per physical processor 514 * adjustment of rechoose_interval. The effective 515 * (base + adjustment) rechoose_interval is cached 516 * in the cpu structures for efficiency. Above we detect 517 * if the cached values need updating, and here is where 518 * the update happens. 519 */ 520 if (rechoose_update) { 521 rechoose = rechoose_interval + 522 cp->cpu_chip->chip_rechoose_adj; 523 cp->cpu_rechoose = (rechoose < 0) ? 0 : rechoose; 524 } 525 } while ((cp = cp->cpu_next) != cpu_list); 526 527 /* 528 * Do tick processing for all the active threads running in 529 * the system. 530 */ 531 cp = cpu_list; 532 nrunning = 0; 533 do { 534 klwp_id_t lwp; 535 int intr; 536 int thread_away; 537 538 /* 539 * Don't do any tick processing on CPUs that 540 * aren't even in the system or aren't up yet. 541 */ 542 if ((cp->cpu_flags & CPU_EXISTS) == 0) { 543 continue; 544 } 545 546 /* 547 * The locking here is rather tricky. We use 548 * thread_free_lock to keep the currently running 549 * thread from being freed or recycled while we're 550 * looking at it. We can then check if the thread 551 * is exiting and get the appropriate p_lock if it 552 * is not. We have to be careful, though, because 553 * the _process_ can still be freed while we're 554 * holding thread_free_lock. To avoid touching the 555 * proc structure we put a pointer to the p_lock in the 556 * thread structure. The p_lock is persistent so we 557 * can acquire it even if the process is gone. At that 558 * point we can check (again) if the thread is exiting 559 * and either drop the lock or do the tick processing. 560 */ 561 mutex_enter(&thread_free_lock); 562 /* 563 * We cannot hold the cpu_lock to prevent the 564 * cpu_list from changing in the clock interrupt. 565 * As long as we don't block (or don't get pre-empted) 566 * the cpu_list will not change (all threads are paused 567 * before list modification). If the list does change 568 * any deleted cpu structures will remain with cpu_next 569 * set to NULL, hence the following test. 570 */ 571 if (cp->cpu_next == NULL) { 572 mutex_exit(&thread_free_lock); 573 break; 574 } 575 t = cp->cpu_thread; /* Current running thread */ 576 if (CPU == cp) { 577 /* 578 * 't' will be the clock interrupt thread on this 579 * CPU. Use the pinned thread (if any) on this CPU 580 * as the target of the clock tick. If we pinned 581 * an interrupt, though, just keep using the clock 582 * interrupt thread since the formerly pinned one 583 * may have gone away. One interrupt thread is as 584 * good as another, and this means we don't have 585 * to continue to check pinned_intr in subsequent 586 * code. 587 */ 588 ASSERT(t == curthread); 589 if (t->t_intr != NULL && !pinned_intr) 590 t = t->t_intr; 591 } 592 593 intr = t->t_flag & T_INTR_THREAD; 594 lwp = ttolwp(t); 595 if (lwp == NULL || (t->t_proc_flag & TP_LWPEXIT) || intr) { 596 /* 597 * Thread is exiting (or uninteresting) so don't 598 * do tick processing or grab p_lock. Once we 599 * drop thread_free_lock we can't look inside the 600 * thread or lwp structure, since the thread may 601 * have gone away. 602 */ 603 exiting = 1; 604 } else { 605 /* 606 * OK, try to grab the process lock. See 607 * comments above for why we're not using 608 * ttoproc(t)->p_lockp here. 609 */ 610 plockp = t->t_plockp; 611 mutex_enter(plockp); 612 /* See above comment. */ 613 if (cp->cpu_next == NULL) { 614 mutex_exit(plockp); 615 mutex_exit(&thread_free_lock); 616 break; 617 } 618 /* 619 * The thread may have exited between when we 620 * checked above, and when we got the p_lock. 621 */ 622 if (t->t_proc_flag & TP_LWPEXIT) { 623 mutex_exit(plockp); 624 exiting = 1; 625 } else { 626 exiting = 0; 627 } 628 } 629 /* 630 * Either we have the p_lock for the thread's process, 631 * or we don't care about the thread structure any more. 632 * Either way we can drop thread_free_lock. 633 */ 634 mutex_exit(&thread_free_lock); 635 636 /* 637 * Update user, system, and idle cpu times. 638 */ 639 if (one_sec) { 640 nrunning++; 641 cp->cpu_part->cp_nrunning++; 642 } 643 /* 644 * If we haven't done tick processing for this 645 * lwp, then do it now. Since we don't hold the 646 * lwp down on a CPU it can migrate and show up 647 * more than once, hence the lbolt check. 648 * 649 * Also, make sure that it's okay to perform the 650 * tick processing before calling clock_tick. 651 * Setting thread_away to a TRUE value (ie. not 0) 652 * results in tick processing not being performed for 653 * that thread. Or, in other words, keeps the thread 654 * away from clock_tick processing. 655 */ 656 thread_away = ((cp->cpu_flags & CPU_QUIESCED) || 657 CPU_ON_INTR(cp) || intr || 658 (cp->cpu_dispthread == cp->cpu_idle_thread) || exiting); 659 660 if ((!thread_away) && (lbolt - t->t_lbolt != 0)) { 661 t->t_lbolt = lbolt; 662 clock_tick(t); 663 } 664 665 #ifdef KSLICE 666 /* 667 * Ah what the heck, give this kid a taste of the real 668 * world and yank the rug out from under it. 669 * But, only if we are running UniProcessor. 670 */ 671 if ((kslice) && (ncpus == 1)) { 672 aston(t); 673 cp->cpu_runrun = 1; 674 cp->cpu_kprunrun = 1; 675 } 676 #endif 677 if (!exiting) 678 mutex_exit(plockp); 679 } while ((cp = cp->cpu_next) != cpu_list); 680 681 /* 682 * bump time in ticks 683 * 684 * We rely on there being only one clock thread and hence 685 * don't need a lock to protect lbolt. 686 */ 687 lbolt++; 688 atomic_add_64((uint64_t *)&lbolt64, (int64_t)1); 689 690 /* 691 * Check for a callout that needs be called from the clock 692 * thread to support the membership protocol in a clustered 693 * system. Copy the function pointer so that we can reset 694 * this to NULL if needed. 695 */ 696 if ((funcp = cmm_clock_callout) != NULL) 697 (*funcp)(); 698 699 /* 700 * Wakeup the cageout thread waiters once per second. 701 */ 702 if (one_sec) 703 kcage_tick(); 704 705 /* 706 * Schedule timeout() requests if any are due at this time. 707 */ 708 callout_schedule(); 709 710 if (one_sec) { 711 712 int drift, absdrift; 713 timestruc_t tod; 714 int s; 715 716 /* 717 * Beginning of precision-kernel code fragment executed 718 * every second. 719 * 720 * On rollover of the second the phase adjustment to be 721 * used for the next second is calculated. Also, the 722 * maximum error is increased by the tolerance. If the 723 * PPS frequency discipline code is present, the phase is 724 * increased to compensate for the CPU clock oscillator 725 * frequency error. 726 * 727 * On a 32-bit machine and given parameters in the timex.h 728 * header file, the maximum phase adjustment is +-512 ms 729 * and maximum frequency offset is (a tad less than) 730 * +-512 ppm. On a 64-bit machine, you shouldn't need to ask. 731 */ 732 time_maxerror += time_tolerance / SCALE_USEC; 733 734 /* 735 * Leap second processing. If in leap-insert state at 736 * the end of the day, the system clock is set back one 737 * second; if in leap-delete state, the system clock is 738 * set ahead one second. The microtime() routine or 739 * external clock driver will insure that reported time 740 * is always monotonic. The ugly divides should be 741 * replaced. 742 */ 743 switch (time_state) { 744 745 case TIME_OK: 746 if (time_status & STA_INS) 747 time_state = TIME_INS; 748 else if (time_status & STA_DEL) 749 time_state = TIME_DEL; 750 break; 751 752 case TIME_INS: 753 if (hrestime.tv_sec % 86400 == 0) { 754 s = hr_clock_lock(); 755 hrestime.tv_sec--; 756 hr_clock_unlock(s); 757 time_state = TIME_OOP; 758 } 759 break; 760 761 case TIME_DEL: 762 if ((hrestime.tv_sec + 1) % 86400 == 0) { 763 s = hr_clock_lock(); 764 hrestime.tv_sec++; 765 hr_clock_unlock(s); 766 time_state = TIME_WAIT; 767 } 768 break; 769 770 case TIME_OOP: 771 time_state = TIME_WAIT; 772 break; 773 774 case TIME_WAIT: 775 if (!(time_status & (STA_INS | STA_DEL))) 776 time_state = TIME_OK; 777 default: 778 break; 779 } 780 781 /* 782 * Compute the phase adjustment for the next second. In 783 * PLL mode, the offset is reduced by a fixed factor 784 * times the time constant. In FLL mode the offset is 785 * used directly. In either mode, the maximum phase 786 * adjustment for each second is clamped so as to spread 787 * the adjustment over not more than the number of 788 * seconds between updates. 789 */ 790 if (time_offset == 0) 791 time_adj = 0; 792 else if (time_offset < 0) { 793 lltemp = -time_offset; 794 if (!(time_status & STA_FLL)) { 795 if ((1 << time_constant) >= SCALE_KG) 796 lltemp *= (1 << time_constant) / 797 SCALE_KG; 798 else 799 lltemp = (lltemp / SCALE_KG) >> 800 time_constant; 801 } 802 if (lltemp > (MAXPHASE / MINSEC) * SCALE_UPDATE) 803 lltemp = (MAXPHASE / MINSEC) * SCALE_UPDATE; 804 time_offset += lltemp; 805 time_adj = -(lltemp * SCALE_PHASE) / hz / SCALE_UPDATE; 806 } else { 807 lltemp = time_offset; 808 if (!(time_status & STA_FLL)) { 809 if ((1 << time_constant) >= SCALE_KG) 810 lltemp *= (1 << time_constant) / 811 SCALE_KG; 812 else 813 lltemp = (lltemp / SCALE_KG) >> 814 time_constant; 815 } 816 if (lltemp > (MAXPHASE / MINSEC) * SCALE_UPDATE) 817 lltemp = (MAXPHASE / MINSEC) * SCALE_UPDATE; 818 time_offset -= lltemp; 819 time_adj = (lltemp * SCALE_PHASE) / hz / SCALE_UPDATE; 820 } 821 822 /* 823 * Compute the frequency estimate and additional phase 824 * adjustment due to frequency error for the next 825 * second. When the PPS signal is engaged, gnaw on the 826 * watchdog counter and update the frequency computed by 827 * the pll and the PPS signal. 828 */ 829 pps_valid++; 830 if (pps_valid == PPS_VALID) { 831 pps_jitter = MAXTIME; 832 pps_stabil = MAXFREQ; 833 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | 834 STA_PPSWANDER | STA_PPSERROR); 835 } 836 lltemp = time_freq + pps_freq; 837 838 if (lltemp) 839 time_adj += (lltemp * SCALE_PHASE) / (SCALE_USEC * hz); 840 841 /* 842 * End of precision kernel-code fragment 843 * 844 * The section below should be modified if we are planning 845 * to use NTP for synchronization. 846 * 847 * Note: the clock synchronization code now assumes 848 * the following: 849 * - if dosynctodr is 1, then compute the drift between 850 * the tod chip and software time and adjust one or 851 * the other depending on the circumstances 852 * 853 * - if dosynctodr is 0, then the tod chip is independent 854 * of the software clock and should not be adjusted, 855 * but allowed to free run. this allows NTP to sync. 856 * hrestime without any interference from the tod chip. 857 */ 858 859 mutex_enter(&tod_lock); 860 tod = tod_get(); 861 drift = tod.tv_sec - hrestime.tv_sec; 862 absdrift = (drift >= 0) ? drift : -drift; 863 if (tod_needsync || absdrift > 1) { 864 int s; 865 if (absdrift > 2) { 866 if (!tod_broken && tod_faulted == TOD_NOFAULT) { 867 s = hr_clock_lock(); 868 hrestime = tod; 869 membar_enter(); /* hrestime visible */ 870 timedelta = 0; 871 timechanged++; 872 tod_needsync = 0; 873 hr_clock_unlock(s); 874 } 875 } else { 876 if (tod_needsync || !dosynctodr) { 877 gethrestime(&tod); 878 tod_set(tod); 879 s = hr_clock_lock(); 880 if (timedelta == 0) 881 tod_needsync = 0; 882 hr_clock_unlock(s); 883 } else { 884 /* 885 * If the drift is 2 seconds on the 886 * money, then the TOD is adjusting 887 * the clock; record that. 888 */ 889 clock_adj_hist[adj_hist_entry++ % 890 CLOCK_ADJ_HIST_SIZE] = lbolt64; 891 s = hr_clock_lock(); 892 timedelta = (int64_t)drift*NANOSEC; 893 hr_clock_unlock(s); 894 } 895 } 896 } 897 one_sec = 0; 898 time = gethrestime_sec(); /* for crusty old kmem readers */ 899 mutex_exit(&tod_lock); 900 901 /* 902 * Some drivers still depend on this... XXX 903 */ 904 cv_broadcast(&lbolt_cv); 905 906 sysinfo.updates++; 907 vminfo.freemem += freemem; 908 { 909 pgcnt_t maxswap, resv, free; 910 pgcnt_t avail = 911 MAX((spgcnt_t)(availrmem - swapfs_minfree), 0); 912 913 maxswap = k_anoninfo.ani_mem_resv 914 + k_anoninfo.ani_max +avail; 915 free = k_anoninfo.ani_free + avail; 916 resv = k_anoninfo.ani_phys_resv + 917 k_anoninfo.ani_mem_resv; 918 919 vminfo.swap_resv += resv; 920 /* number of reserved and allocated pages */ 921 #ifdef DEBUG 922 if (maxswap < free) 923 cmn_err(CE_WARN, "clock: maxswap < free"); 924 if (maxswap < resv) 925 cmn_err(CE_WARN, "clock: maxswap < resv"); 926 #endif 927 vminfo.swap_alloc += maxswap - free; 928 vminfo.swap_avail += maxswap - resv; 929 vminfo.swap_free += free; 930 } 931 if (nrunnable) { 932 sysinfo.runque += nrunnable; 933 sysinfo.runocc++; 934 } 935 if (nswapped) { 936 sysinfo.swpque += nswapped; 937 sysinfo.swpocc++; 938 } 939 sysinfo.waiting += w_io; 940 941 /* 942 * Wake up fsflush to write out DELWRI 943 * buffers, dirty pages and other cached 944 * administrative data, e.g. inodes. 945 */ 946 if (--fsflushcnt <= 0) { 947 fsflushcnt = tune.t_fsflushr; 948 cv_signal(&fsflush_cv); 949 } 950 951 vmmeter(); 952 calcloadavg(genloadavg(&loadavg), hp_avenrun); 953 for (i = 0; i < 3; i++) 954 /* 955 * At the moment avenrun[] can only hold 31 956 * bits of load average as it is a signed 957 * int in the API. We need to ensure that 958 * hp_avenrun[i] >> (16 - FSHIFT) will not be 959 * too large. If it is, we put the largest value 960 * that we can use into avenrun[i]. This is 961 * kludgey, but about all we can do until we 962 * avenrun[] is declared as an array of uint64[] 963 */ 964 if (hp_avenrun[i] < ((uint64_t)1<<(31+16-FSHIFT))) 965 avenrun[i] = (int32_t)(hp_avenrun[i] >> 966 (16 - FSHIFT)); 967 else 968 avenrun[i] = 0x7fffffff; 969 970 cpupart = cp_list_head; 971 do { 972 calcloadavg(genloadavg(&cpupart->cp_loadavg), 973 cpupart->cp_hp_avenrun); 974 } while ((cpupart = cpupart->cp_next) != cp_list_head); 975 976 /* 977 * Wake up the swapper thread if necessary. 978 */ 979 if (runin || 980 (runout && (avefree < desfree || wake_sched_sec))) { 981 t = &t0; 982 thread_lock(t); 983 if (t->t_state == TS_STOPPED) { 984 runin = runout = 0; 985 wake_sched_sec = 0; 986 t->t_whystop = 0; 987 t->t_whatstop = 0; 988 t->t_schedflag &= ~TS_ALLSTART; 989 THREAD_TRANSITION(t); 990 setfrontdq(t); 991 } 992 thread_unlock(t); 993 } 994 } 995 996 /* 997 * Wake up the swapper if any high priority swapped-out threads 998 * became runable during the last tick. 999 */ 1000 if (wake_sched) { 1001 t = &t0; 1002 thread_lock(t); 1003 if (t->t_state == TS_STOPPED) { 1004 runin = runout = 0; 1005 wake_sched = 0; 1006 t->t_whystop = 0; 1007 t->t_whatstop = 0; 1008 t->t_schedflag &= ~TS_ALLSTART; 1009 THREAD_TRANSITION(t); 1010 setfrontdq(t); 1011 } 1012 thread_unlock(t); 1013 } 1014 } 1015 1016 void 1017 clock_init(void) 1018 { 1019 cyc_handler_t hdlr; 1020 cyc_time_t when; 1021 1022 hdlr.cyh_func = (cyc_func_t)clock; 1023 hdlr.cyh_level = CY_LOCK_LEVEL; 1024 hdlr.cyh_arg = NULL; 1025 1026 when.cyt_when = 0; 1027 when.cyt_interval = nsec_per_tick; 1028 1029 mutex_enter(&cpu_lock); 1030 clock_cyclic = cyclic_add(&hdlr, &when); 1031 mutex_exit(&cpu_lock); 1032 } 1033 1034 /* 1035 * Called before calcloadavg to get 10-sec moving loadavg together 1036 */ 1037 1038 static int 1039 genloadavg(struct loadavg_s *avgs) 1040 { 1041 int avg; 1042 int spos; /* starting position */ 1043 int cpos; /* moving current position */ 1044 int i; 1045 int slen; 1046 hrtime_t hr_avg; 1047 1048 /* 10-second snapshot, calculate first positon */ 1049 if (avgs->lg_len == 0) { 1050 return (0); 1051 } 1052 slen = avgs->lg_len < S_MOVAVG_SZ ? avgs->lg_len : S_MOVAVG_SZ; 1053 1054 spos = (avgs->lg_cur - 1) >= 0 ? avgs->lg_cur - 1 : 1055 S_LOADAVG_SZ + (avgs->lg_cur - 1); 1056 for (i = hr_avg = 0; i < slen; i++) { 1057 cpos = (spos - i) >= 0 ? spos - i : S_LOADAVG_SZ + (spos - i); 1058 hr_avg += avgs->lg_loads[cpos]; 1059 } 1060 1061 hr_avg = hr_avg / slen; 1062 avg = hr_avg / (NANOSEC / LGRP_LOADAVG_IN_THREAD_MAX); 1063 1064 return (avg); 1065 } 1066 1067 /* 1068 * Run every second from clock () to update the loadavg count available to the 1069 * system and cpu-partitions. 1070 * 1071 * This works by sampling the previous usr, sys, wait time elapsed, 1072 * computing a delta, and adding that delta to the elapsed usr, sys, 1073 * wait increase. 1074 */ 1075 1076 static void 1077 loadavg_update() 1078 { 1079 cpu_t *cp; 1080 cpupart_t *cpupart; 1081 hrtime_t cpu_total; 1082 int prev; 1083 1084 cp = cpu_list; 1085 loadavg.lg_total = 0; 1086 1087 /* 1088 * first pass totals up per-cpu statistics for system and cpu 1089 * partitions 1090 */ 1091 1092 do { 1093 struct loadavg_s *lavg; 1094 1095 lavg = &cp->cpu_loadavg; 1096 1097 cpu_total = cp->cpu_acct[CMS_USER] + 1098 cp->cpu_acct[CMS_SYSTEM] + cp->cpu_waitrq; 1099 /* compute delta against last total */ 1100 scalehrtime(&cpu_total); 1101 prev = (lavg->lg_cur - 1) >= 0 ? lavg->lg_cur - 1 : 1102 S_LOADAVG_SZ + (lavg->lg_cur - 1); 1103 if (lavg->lg_loads[prev] <= 0) { 1104 lavg->lg_loads[lavg->lg_cur] = cpu_total; 1105 cpu_total = 0; 1106 } else { 1107 lavg->lg_loads[lavg->lg_cur] = cpu_total; 1108 cpu_total = cpu_total - lavg->lg_loads[prev]; 1109 if (cpu_total < 0) 1110 cpu_total = 0; 1111 } 1112 1113 lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ; 1114 lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ? 1115 lavg->lg_len + 1 : S_LOADAVG_SZ; 1116 1117 loadavg.lg_total += cpu_total; 1118 cp->cpu_part->cp_loadavg.lg_total += cpu_total; 1119 1120 } while ((cp = cp->cpu_next) != cpu_list); 1121 1122 loadavg.lg_loads[loadavg.lg_cur] = loadavg.lg_total; 1123 loadavg.lg_cur = (loadavg.lg_cur + 1) % S_LOADAVG_SZ; 1124 loadavg.lg_len = (loadavg.lg_len + 1) < S_LOADAVG_SZ ? 1125 loadavg.lg_len + 1 : S_LOADAVG_SZ; 1126 /* 1127 * Second pass updates counts 1128 */ 1129 cpupart = cp_list_head; 1130 1131 do { 1132 struct loadavg_s *lavg; 1133 1134 lavg = &cpupart->cp_loadavg; 1135 lavg->lg_loads[lavg->lg_cur] = lavg->lg_total; 1136 lavg->lg_total = 0; 1137 lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ; 1138 lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ? 1139 lavg->lg_len + 1 : S_LOADAVG_SZ; 1140 1141 } while ((cpupart = cpupart->cp_next) != cp_list_head); 1142 1143 } 1144 1145 /* 1146 * clock_update() - local clock update 1147 * 1148 * This routine is called by ntp_adjtime() to update the local clock 1149 * phase and frequency. The implementation is of an 1150 * adaptive-parameter, hybrid phase/frequency-lock loop (PLL/FLL). The 1151 * routine computes new time and frequency offset estimates for each 1152 * call. The PPS signal itself determines the new time offset, 1153 * instead of the calling argument. Presumably, calls to 1154 * ntp_adjtime() occur only when the caller believes the local clock 1155 * is valid within some bound (+-128 ms with NTP). If the caller's 1156 * time is far different than the PPS time, an argument will ensue, 1157 * and it's not clear who will lose. 1158 * 1159 * For uncompensated quartz crystal oscillatores and nominal update 1160 * intervals less than 1024 s, operation should be in phase-lock mode 1161 * (STA_FLL = 0), where the loop is disciplined to phase. For update 1162 * intervals greater than this, operation should be in frequency-lock 1163 * mode (STA_FLL = 1), where the loop is disciplined to frequency. 1164 * 1165 * Note: mutex(&tod_lock) is in effect. 1166 */ 1167 void 1168 clock_update(int offset) 1169 { 1170 int ltemp, mtemp, s; 1171 1172 ASSERT(MUTEX_HELD(&tod_lock)); 1173 1174 if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME)) 1175 return; 1176 ltemp = offset; 1177 if ((time_status & STA_PPSTIME) && (time_status & STA_PPSSIGNAL)) 1178 ltemp = pps_offset; 1179 1180 /* 1181 * Scale the phase adjustment and clamp to the operating range. 1182 */ 1183 if (ltemp > MAXPHASE) 1184 time_offset = MAXPHASE * SCALE_UPDATE; 1185 else if (ltemp < -MAXPHASE) 1186 time_offset = -(MAXPHASE * SCALE_UPDATE); 1187 else 1188 time_offset = ltemp * SCALE_UPDATE; 1189 1190 /* 1191 * Select whether the frequency is to be controlled and in which 1192 * mode (PLL or FLL). Clamp to the operating range. Ugly 1193 * multiply/divide should be replaced someday. 1194 */ 1195 if (time_status & STA_FREQHOLD || time_reftime == 0) 1196 time_reftime = hrestime.tv_sec; 1197 1198 mtemp = hrestime.tv_sec - time_reftime; 1199 time_reftime = hrestime.tv_sec; 1200 1201 if (time_status & STA_FLL) { 1202 if (mtemp >= MINSEC) { 1203 ltemp = ((time_offset / mtemp) * (SCALE_USEC / 1204 SCALE_UPDATE)); 1205 if (ltemp) 1206 time_freq += ltemp / SCALE_KH; 1207 } 1208 } else { 1209 if (mtemp < MAXSEC) { 1210 ltemp *= mtemp; 1211 if (ltemp) 1212 time_freq += (int)(((int64_t)ltemp * 1213 SCALE_USEC) / SCALE_KF) 1214 / (1 << (time_constant * 2)); 1215 } 1216 } 1217 if (time_freq > time_tolerance) 1218 time_freq = time_tolerance; 1219 else if (time_freq < -time_tolerance) 1220 time_freq = -time_tolerance; 1221 1222 s = hr_clock_lock(); 1223 tod_needsync = 1; 1224 hr_clock_unlock(s); 1225 } 1226 1227 /* 1228 * ddi_hardpps() - discipline CPU clock oscillator to external PPS signal 1229 * 1230 * This routine is called at each PPS interrupt in order to discipline 1231 * the CPU clock oscillator to the PPS signal. It measures the PPS phase 1232 * and leaves it in a handy spot for the clock() routine. It 1233 * integrates successive PPS phase differences and calculates the 1234 * frequency offset. This is used in clock() to discipline the CPU 1235 * clock oscillator so that intrinsic frequency error is cancelled out. 1236 * The code requires the caller to capture the time and hardware counter 1237 * value at the on-time PPS signal transition. 1238 * 1239 * Note that, on some Unix systems, this routine runs at an interrupt 1240 * priority level higher than the timer interrupt routine clock(). 1241 * Therefore, the variables used are distinct from the clock() 1242 * variables, except for certain exceptions: The PPS frequency pps_freq 1243 * and phase pps_offset variables are determined by this routine and 1244 * updated atomically. The time_tolerance variable can be considered a 1245 * constant, since it is infrequently changed, and then only when the 1246 * PPS signal is disabled. The watchdog counter pps_valid is updated 1247 * once per second by clock() and is atomically cleared in this 1248 * routine. 1249 * 1250 * tvp is the time of the last tick; usec is a microsecond count since the 1251 * last tick. 1252 * 1253 * Note: In Solaris systems, the tick value is actually given by 1254 * usec_per_tick. This is called from the serial driver cdintr(), 1255 * or equivalent, at a high PIL. Because the kernel keeps a 1256 * highresolution time, the following code can accept either 1257 * the traditional argument pair, or the current highres timestamp 1258 * in tvp and zero in usec. 1259 */ 1260 void 1261 ddi_hardpps(struct timeval *tvp, int usec) 1262 { 1263 int u_usec, v_usec, bigtick; 1264 time_t cal_sec; 1265 int cal_usec; 1266 1267 /* 1268 * An occasional glitch can be produced when the PPS interrupt 1269 * occurs in the clock() routine before the time variable is 1270 * updated. Here the offset is discarded when the difference 1271 * between it and the last one is greater than tick/2, but not 1272 * if the interval since the first discard exceeds 30 s. 1273 */ 1274 time_status |= STA_PPSSIGNAL; 1275 time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR); 1276 pps_valid = 0; 1277 u_usec = -tvp->tv_usec; 1278 if (u_usec < -(MICROSEC/2)) 1279 u_usec += MICROSEC; 1280 v_usec = pps_offset - u_usec; 1281 if (v_usec < 0) 1282 v_usec = -v_usec; 1283 if (v_usec > (usec_per_tick >> 1)) { 1284 if (pps_glitch > MAXGLITCH) { 1285 pps_glitch = 0; 1286 pps_tf[2] = u_usec; 1287 pps_tf[1] = u_usec; 1288 } else { 1289 pps_glitch++; 1290 u_usec = pps_offset; 1291 } 1292 } else 1293 pps_glitch = 0; 1294 1295 /* 1296 * A three-stage median filter is used to help deglitch the pps 1297 * time. The median sample becomes the time offset estimate; the 1298 * difference between the other two samples becomes the time 1299 * dispersion (jitter) estimate. 1300 */ 1301 pps_tf[2] = pps_tf[1]; 1302 pps_tf[1] = pps_tf[0]; 1303 pps_tf[0] = u_usec; 1304 if (pps_tf[0] > pps_tf[1]) { 1305 if (pps_tf[1] > pps_tf[2]) { 1306 pps_offset = pps_tf[1]; /* 0 1 2 */ 1307 v_usec = pps_tf[0] - pps_tf[2]; 1308 } else if (pps_tf[2] > pps_tf[0]) { 1309 pps_offset = pps_tf[0]; /* 2 0 1 */ 1310 v_usec = pps_tf[2] - pps_tf[1]; 1311 } else { 1312 pps_offset = pps_tf[2]; /* 0 2 1 */ 1313 v_usec = pps_tf[0] - pps_tf[1]; 1314 } 1315 } else { 1316 if (pps_tf[1] < pps_tf[2]) { 1317 pps_offset = pps_tf[1]; /* 2 1 0 */ 1318 v_usec = pps_tf[2] - pps_tf[0]; 1319 } else if (pps_tf[2] < pps_tf[0]) { 1320 pps_offset = pps_tf[0]; /* 1 0 2 */ 1321 v_usec = pps_tf[1] - pps_tf[2]; 1322 } else { 1323 pps_offset = pps_tf[2]; /* 1 2 0 */ 1324 v_usec = pps_tf[1] - pps_tf[0]; 1325 } 1326 } 1327 if (v_usec > MAXTIME) 1328 pps_jitcnt++; 1329 v_usec = (v_usec << PPS_AVG) - pps_jitter; 1330 pps_jitter += v_usec / (1 << PPS_AVG); 1331 if (pps_jitter > (MAXTIME >> 1)) 1332 time_status |= STA_PPSJITTER; 1333 1334 /* 1335 * During the calibration interval adjust the starting time when 1336 * the tick overflows. At the end of the interval compute the 1337 * duration of the interval and the difference of the hardware 1338 * counters at the beginning and end of the interval. This code 1339 * is deliciously complicated by the fact valid differences may 1340 * exceed the value of tick when using long calibration 1341 * intervals and small ticks. Note that the counter can be 1342 * greater than tick if caught at just the wrong instant, but 1343 * the values returned and used here are correct. 1344 */ 1345 bigtick = (int)usec_per_tick * SCALE_USEC; 1346 pps_usec -= pps_freq; 1347 if (pps_usec >= bigtick) 1348 pps_usec -= bigtick; 1349 if (pps_usec < 0) 1350 pps_usec += bigtick; 1351 pps_time.tv_sec++; 1352 pps_count++; 1353 if (pps_count < (1 << pps_shift)) 1354 return; 1355 pps_count = 0; 1356 pps_calcnt++; 1357 u_usec = usec * SCALE_USEC; 1358 v_usec = pps_usec - u_usec; 1359 if (v_usec >= bigtick >> 1) 1360 v_usec -= bigtick; 1361 if (v_usec < -(bigtick >> 1)) 1362 v_usec += bigtick; 1363 if (v_usec < 0) 1364 v_usec = -(-v_usec >> pps_shift); 1365 else 1366 v_usec = v_usec >> pps_shift; 1367 pps_usec = u_usec; 1368 cal_sec = tvp->tv_sec; 1369 cal_usec = tvp->tv_usec; 1370 cal_sec -= pps_time.tv_sec; 1371 cal_usec -= pps_time.tv_usec; 1372 if (cal_usec < 0) { 1373 cal_usec += MICROSEC; 1374 cal_sec--; 1375 } 1376 pps_time = *tvp; 1377 1378 /* 1379 * Check for lost interrupts, noise, excessive jitter and 1380 * excessive frequency error. The number of timer ticks during 1381 * the interval may vary +-1 tick. Add to this a margin of one 1382 * tick for the PPS signal jitter and maximum frequency 1383 * deviation. If the limits are exceeded, the calibration 1384 * interval is reset to the minimum and we start over. 1385 */ 1386 u_usec = (int)usec_per_tick << 1; 1387 if (!((cal_sec == -1 && cal_usec > (MICROSEC - u_usec)) || 1388 (cal_sec == 0 && cal_usec < u_usec)) || 1389 v_usec > time_tolerance || v_usec < -time_tolerance) { 1390 pps_errcnt++; 1391 pps_shift = PPS_SHIFT; 1392 pps_intcnt = 0; 1393 time_status |= STA_PPSERROR; 1394 return; 1395 } 1396 1397 /* 1398 * A three-stage median filter is used to help deglitch the pps 1399 * frequency. The median sample becomes the frequency offset 1400 * estimate; the difference between the other two samples 1401 * becomes the frequency dispersion (stability) estimate. 1402 */ 1403 pps_ff[2] = pps_ff[1]; 1404 pps_ff[1] = pps_ff[0]; 1405 pps_ff[0] = v_usec; 1406 if (pps_ff[0] > pps_ff[1]) { 1407 if (pps_ff[1] > pps_ff[2]) { 1408 u_usec = pps_ff[1]; /* 0 1 2 */ 1409 v_usec = pps_ff[0] - pps_ff[2]; 1410 } else if (pps_ff[2] > pps_ff[0]) { 1411 u_usec = pps_ff[0]; /* 2 0 1 */ 1412 v_usec = pps_ff[2] - pps_ff[1]; 1413 } else { 1414 u_usec = pps_ff[2]; /* 0 2 1 */ 1415 v_usec = pps_ff[0] - pps_ff[1]; 1416 } 1417 } else { 1418 if (pps_ff[1] < pps_ff[2]) { 1419 u_usec = pps_ff[1]; /* 2 1 0 */ 1420 v_usec = pps_ff[2] - pps_ff[0]; 1421 } else if (pps_ff[2] < pps_ff[0]) { 1422 u_usec = pps_ff[0]; /* 1 0 2 */ 1423 v_usec = pps_ff[1] - pps_ff[2]; 1424 } else { 1425 u_usec = pps_ff[2]; /* 1 2 0 */ 1426 v_usec = pps_ff[1] - pps_ff[0]; 1427 } 1428 } 1429 1430 /* 1431 * Here the frequency dispersion (stability) is updated. If it 1432 * is less than one-fourth the maximum (MAXFREQ), the frequency 1433 * offset is updated as well, but clamped to the tolerance. It 1434 * will be processed later by the clock() routine. 1435 */ 1436 v_usec = (v_usec >> 1) - pps_stabil; 1437 if (v_usec < 0) 1438 pps_stabil -= -v_usec >> PPS_AVG; 1439 else 1440 pps_stabil += v_usec >> PPS_AVG; 1441 if (pps_stabil > MAXFREQ >> 2) { 1442 pps_stbcnt++; 1443 time_status |= STA_PPSWANDER; 1444 return; 1445 } 1446 if (time_status & STA_PPSFREQ) { 1447 if (u_usec < 0) { 1448 pps_freq -= -u_usec >> PPS_AVG; 1449 if (pps_freq < -time_tolerance) 1450 pps_freq = -time_tolerance; 1451 u_usec = -u_usec; 1452 } else { 1453 pps_freq += u_usec >> PPS_AVG; 1454 if (pps_freq > time_tolerance) 1455 pps_freq = time_tolerance; 1456 } 1457 } 1458 1459 /* 1460 * Here the calibration interval is adjusted. If the maximum 1461 * time difference is greater than tick / 4, reduce the interval 1462 * by half. If this is not the case for four consecutive 1463 * intervals, double the interval. 1464 */ 1465 if (u_usec << pps_shift > bigtick >> 2) { 1466 pps_intcnt = 0; 1467 if (pps_shift > PPS_SHIFT) 1468 pps_shift--; 1469 } else if (pps_intcnt >= 4) { 1470 pps_intcnt = 0; 1471 if (pps_shift < PPS_SHIFTMAX) 1472 pps_shift++; 1473 } else 1474 pps_intcnt++; 1475 1476 /* 1477 * If recovering from kmdb, then make sure the tod chip gets resynced. 1478 * If we took an early exit above, then we don't yet have a stable 1479 * calibration signal to lock onto, so don't mark the tod for sync 1480 * until we get all the way here. 1481 */ 1482 { 1483 int s = hr_clock_lock(); 1484 1485 tod_needsync = 1; 1486 hr_clock_unlock(s); 1487 } 1488 } 1489 1490 /* 1491 * Handle clock tick processing for a thread. 1492 * Check for timer action, enforce CPU rlimit, do profiling etc. 1493 */ 1494 void 1495 clock_tick(kthread_t *t) 1496 { 1497 struct proc *pp; 1498 klwp_id_t lwp; 1499 struct as *as; 1500 clock_t utime; 1501 clock_t stime; 1502 int poke = 0; /* notify another CPU */ 1503 int user_mode; 1504 size_t rss; 1505 1506 /* Must be operating on a lwp/thread */ 1507 if ((lwp = ttolwp(t)) == NULL) { 1508 panic("clock_tick: no lwp"); 1509 /*NOTREACHED*/ 1510 } 1511 1512 CL_TICK(t); /* Class specific tick processing */ 1513 DTRACE_SCHED1(tick, kthread_t *, t); 1514 1515 pp = ttoproc(t); 1516 1517 /* pp->p_lock makes sure that the thread does not exit */ 1518 ASSERT(MUTEX_HELD(&pp->p_lock)); 1519 1520 user_mode = (lwp->lwp_state == LWP_USER); 1521 1522 /* 1523 * Update process times. Should use high res clock and state 1524 * changes instead of statistical sampling method. XXX 1525 */ 1526 if (user_mode) { 1527 pp->p_utime++; 1528 pp->p_task->tk_cpu_time++; 1529 } else { 1530 pp->p_stime++; 1531 pp->p_task->tk_cpu_time++; 1532 } 1533 as = pp->p_as; 1534 1535 /* 1536 * Update user profiling statistics. Get the pc from the 1537 * lwp when the AST happens. 1538 */ 1539 if (pp->p_prof.pr_scale) { 1540 atomic_add_32(&lwp->lwp_oweupc, 1); 1541 if (user_mode) { 1542 poke = 1; 1543 aston(t); 1544 } 1545 } 1546 1547 utime = pp->p_utime; 1548 stime = pp->p_stime; 1549 1550 /* 1551 * If CPU was in user state, process lwp-virtual time 1552 * interval timer. 1553 */ 1554 if (user_mode && 1555 timerisset(&lwp->lwp_timer[ITIMER_VIRTUAL].it_value) && 1556 itimerdecr(&lwp->lwp_timer[ITIMER_VIRTUAL], usec_per_tick) == 0) { 1557 poke = 1; 1558 sigtoproc(pp, t, SIGVTALRM); 1559 } 1560 1561 if (timerisset(&lwp->lwp_timer[ITIMER_PROF].it_value) && 1562 itimerdecr(&lwp->lwp_timer[ITIMER_PROF], usec_per_tick) == 0) { 1563 poke = 1; 1564 sigtoproc(pp, t, SIGPROF); 1565 } 1566 1567 /* 1568 * Enforce CPU resource controls: 1569 * (a) process.max-cpu-time resource control 1570 */ 1571 (void) rctl_test(rctlproc_legacy[RLIMIT_CPU], pp->p_rctls, pp, 1572 (utime + stime)/hz, RCA_UNSAFE_SIGINFO); 1573 1574 /* 1575 * (b) task.max-cpu-time resource control 1576 */ 1577 (void) rctl_test(rc_task_cpu_time, pp->p_task->tk_rctls, pp, 1, 1578 RCA_UNSAFE_SIGINFO); 1579 1580 /* 1581 * Update memory usage for the currently running process. 1582 */ 1583 rss = rm_asrss(as); 1584 PTOU(pp)->u_mem += rss; 1585 if (rss > PTOU(pp)->u_mem_max) 1586 PTOU(pp)->u_mem_max = rss; 1587 1588 /* 1589 * Notify the CPU the thread is running on. 1590 */ 1591 if (poke && t->t_cpu != CPU) 1592 poke_cpu(t->t_cpu->cpu_id); 1593 } 1594 1595 void 1596 profil_tick(uintptr_t upc) 1597 { 1598 int ticks; 1599 proc_t *p = ttoproc(curthread); 1600 klwp_t *lwp = ttolwp(curthread); 1601 struct prof *pr = &p->p_prof; 1602 1603 do { 1604 ticks = lwp->lwp_oweupc; 1605 } while (cas32(&lwp->lwp_oweupc, ticks, 0) != ticks); 1606 1607 mutex_enter(&p->p_pflock); 1608 if (pr->pr_scale >= 2 && upc >= pr->pr_off) { 1609 /* 1610 * Old-style profiling 1611 */ 1612 uint16_t *slot = pr->pr_base; 1613 uint16_t old, new; 1614 if (pr->pr_scale != 2) { 1615 uintptr_t delta = upc - pr->pr_off; 1616 uintptr_t byteoff = ((delta >> 16) * pr->pr_scale) + 1617 (((delta & 0xffff) * pr->pr_scale) >> 16); 1618 if (byteoff >= (uintptr_t)pr->pr_size) { 1619 mutex_exit(&p->p_pflock); 1620 return; 1621 } 1622 slot += byteoff / sizeof (uint16_t); 1623 } 1624 if (fuword16(slot, &old) < 0 || 1625 (new = old + ticks) > SHRT_MAX || 1626 suword16(slot, new) < 0) { 1627 pr->pr_scale = 0; 1628 } 1629 } else if (pr->pr_scale == 1) { 1630 /* 1631 * PC Sampling 1632 */ 1633 model_t model = lwp_getdatamodel(lwp); 1634 int result; 1635 #ifdef __lint 1636 model = model; 1637 #endif 1638 while (ticks-- > 0) { 1639 if (pr->pr_samples == pr->pr_size) { 1640 /* buffer full, turn off sampling */ 1641 pr->pr_scale = 0; 1642 break; 1643 } 1644 switch (SIZEOF_PTR(model)) { 1645 case sizeof (uint32_t): 1646 result = suword32(pr->pr_base, (uint32_t)upc); 1647 break; 1648 #ifdef _LP64 1649 case sizeof (uint64_t): 1650 result = suword64(pr->pr_base, (uint64_t)upc); 1651 break; 1652 #endif 1653 default: 1654 cmn_err(CE_WARN, "profil_tick: unexpected " 1655 "data model"); 1656 result = -1; 1657 break; 1658 } 1659 if (result != 0) { 1660 pr->pr_scale = 0; 1661 break; 1662 } 1663 pr->pr_base = (caddr_t)pr->pr_base + SIZEOF_PTR(model); 1664 pr->pr_samples++; 1665 } 1666 } 1667 mutex_exit(&p->p_pflock); 1668 } 1669 1670 static void 1671 delay_wakeup(void *arg) 1672 { 1673 kthread_t *t = arg; 1674 1675 mutex_enter(&t->t_delay_lock); 1676 cv_signal(&t->t_delay_cv); 1677 mutex_exit(&t->t_delay_lock); 1678 } 1679 1680 void 1681 delay(clock_t ticks) 1682 { 1683 kthread_t *t = curthread; 1684 clock_t deadline = lbolt + ticks; 1685 clock_t timeleft; 1686 timeout_id_t id; 1687 1688 if (panicstr && ticks > 0) { 1689 /* 1690 * Timeouts aren't running, so all we can do is spin. 1691 */ 1692 drv_usecwait(TICK_TO_USEC(ticks)); 1693 return; 1694 } 1695 1696 while ((timeleft = deadline - lbolt) > 0) { 1697 mutex_enter(&t->t_delay_lock); 1698 id = timeout(delay_wakeup, t, timeleft); 1699 cv_wait(&t->t_delay_cv, &t->t_delay_lock); 1700 mutex_exit(&t->t_delay_lock); 1701 (void) untimeout(id); 1702 } 1703 } 1704 1705 /* 1706 * Like delay, but interruptible by a signal. 1707 */ 1708 int 1709 delay_sig(clock_t ticks) 1710 { 1711 clock_t deadline = lbolt + ticks; 1712 clock_t rc; 1713 1714 mutex_enter(&curthread->t_delay_lock); 1715 do { 1716 rc = cv_timedwait_sig(&curthread->t_delay_cv, 1717 &curthread->t_delay_lock, deadline); 1718 } while (rc > 0); 1719 mutex_exit(&curthread->t_delay_lock); 1720 if (rc == 0) 1721 return (EINTR); 1722 return (0); 1723 } 1724 1725 #define SECONDS_PER_DAY 86400 1726 1727 /* 1728 * Initialize the system time based on the TOD chip. approx is used as 1729 * an approximation of time (e.g. from the filesystem) in the event that 1730 * the TOD chip has been cleared or is unresponsive. An approx of -1 1731 * means the filesystem doesn't keep time. 1732 */ 1733 void 1734 clkset(time_t approx) 1735 { 1736 timestruc_t ts; 1737 int spl; 1738 int set_clock = 0; 1739 1740 mutex_enter(&tod_lock); 1741 ts = tod_get(); 1742 1743 if (ts.tv_sec > 365 * SECONDS_PER_DAY) { 1744 /* 1745 * If the TOD chip is reporting some time after 1971, 1746 * then it probably didn't lose power or become otherwise 1747 * cleared in the recent past; check to assure that 1748 * the time coming from the filesystem isn't in the future 1749 * according to the TOD chip. 1750 */ 1751 if (approx != -1 && approx > ts.tv_sec) { 1752 cmn_err(CE_WARN, "Last shutdown is later " 1753 "than time on time-of-day chip; check date."); 1754 } 1755 } else { 1756 /* 1757 * If the TOD chip isn't giving correct time, then set it to 1758 * the time that was passed in as a rough estimate. If we 1759 * don't have an estimate, then set the clock back to a time 1760 * when Oliver North, ALF and Dire Straits were all on the 1761 * collective brain: 1987. 1762 */ 1763 timestruc_t tmp; 1764 if (approx == -1) 1765 ts.tv_sec = (1987 - 1970) * 365 * SECONDS_PER_DAY; 1766 else 1767 ts.tv_sec = approx; 1768 ts.tv_nsec = 0; 1769 1770 /* 1771 * Attempt to write the new time to the TOD chip. Set spl high 1772 * to avoid getting preempted between the tod_set and tod_get. 1773 */ 1774 spl = splhi(); 1775 tod_set(ts); 1776 tmp = tod_get(); 1777 splx(spl); 1778 1779 if (tmp.tv_sec != ts.tv_sec && tmp.tv_sec != ts.tv_sec + 1) { 1780 tod_broken = 1; 1781 dosynctodr = 0; 1782 cmn_err(CE_WARN, "Time-of-day chip unresponsive;" 1783 " dead batteries?"); 1784 } else { 1785 cmn_err(CE_WARN, "Time-of-day chip had " 1786 "incorrect date; check and reset."); 1787 } 1788 set_clock = 1; 1789 } 1790 1791 if (!boot_time) { 1792 boot_time = ts.tv_sec; 1793 set_clock = 1; 1794 } 1795 1796 if (set_clock) 1797 set_hrestime(&ts); 1798 1799 mutex_exit(&tod_lock); 1800 } 1801 1802 int timechanged; /* for testing if the system time has been reset */ 1803 1804 void 1805 set_hrestime(timestruc_t *ts) 1806 { 1807 int spl = hr_clock_lock(); 1808 hrestime = *ts; 1809 membar_enter(); /* hrestime must be visible before timechanged++ */ 1810 timedelta = 0; 1811 timechanged++; 1812 hr_clock_unlock(spl); 1813 } 1814 1815 static uint_t deadman_seconds; 1816 static uint32_t deadman_panics; 1817 static int deadman_enabled = 0; 1818 static int deadman_panic_timers = 1; 1819 1820 static void 1821 deadman(void) 1822 { 1823 if (panicstr) { 1824 /* 1825 * During panic, other CPUs besides the panic 1826 * master continue to handle cyclics and some other 1827 * interrupts. The code below is intended to be 1828 * single threaded, so any CPU other than the master 1829 * must keep out. 1830 */ 1831 if (CPU->cpu_id != panic_cpu.cpu_id) 1832 return; 1833 1834 /* 1835 * If we're panicking, the deadman cyclic continues to increase 1836 * lbolt in case the dump device driver relies on this for 1837 * timeouts. Note that we rely on deadman() being invoked once 1838 * per second, and credit lbolt and lbolt64 with hz ticks each. 1839 */ 1840 lbolt += hz; 1841 lbolt64 += hz; 1842 1843 if (!deadman_panic_timers) 1844 return; /* allow all timers to be manually disabled */ 1845 1846 /* 1847 * If we are generating a crash dump or syncing filesystems and 1848 * the corresponding timer is set, decrement it and re-enter 1849 * the panic code to abort it and advance to the next state. 1850 * The panic states and triggers are explained in panic.c. 1851 */ 1852 if (panic_dump) { 1853 if (dump_timeleft && (--dump_timeleft == 0)) { 1854 panic("panic dump timeout"); 1855 /*NOTREACHED*/ 1856 } 1857 } else if (panic_sync) { 1858 if (sync_timeleft && (--sync_timeleft == 0)) { 1859 panic("panic sync timeout"); 1860 /*NOTREACHED*/ 1861 } 1862 } 1863 1864 return; 1865 } 1866 1867 if (lbolt != CPU->cpu_deadman_lbolt) { 1868 CPU->cpu_deadman_lbolt = lbolt; 1869 CPU->cpu_deadman_countdown = deadman_seconds; 1870 return; 1871 } 1872 1873 if (CPU->cpu_deadman_countdown-- > 0) 1874 return; 1875 1876 /* 1877 * Regardless of whether or not we actually bring the system down, 1878 * bump the deadman_panics variable. 1879 * 1880 * N.B. deadman_panics is incremented once for each CPU that 1881 * passes through here. It's expected that all the CPUs will 1882 * detect this condition within one second of each other, so 1883 * when deadman_enabled is off, deadman_panics will 1884 * typically be a multiple of the total number of CPUs in 1885 * the system. 1886 */ 1887 atomic_add_32(&deadman_panics, 1); 1888 1889 if (!deadman_enabled) { 1890 CPU->cpu_deadman_countdown = deadman_seconds; 1891 return; 1892 } 1893 1894 /* 1895 * If we're here, we want to bring the system down. 1896 */ 1897 panic("deadman: timed out after %d seconds of clock " 1898 "inactivity", deadman_seconds); 1899 /*NOTREACHED*/ 1900 } 1901 1902 /*ARGSUSED*/ 1903 static void 1904 deadman_online(void *arg, cpu_t *cpu, cyc_handler_t *hdlr, cyc_time_t *when) 1905 { 1906 cpu->cpu_deadman_lbolt = 0; 1907 cpu->cpu_deadman_countdown = deadman_seconds; 1908 1909 hdlr->cyh_func = (cyc_func_t)deadman; 1910 hdlr->cyh_level = CY_HIGH_LEVEL; 1911 hdlr->cyh_arg = NULL; 1912 1913 /* 1914 * Stagger the CPUs so that they don't all run deadman() at 1915 * the same time. Simplest reason to do this is to make it 1916 * more likely that only one CPU will panic in case of a 1917 * timeout. This is (strictly speaking) an aesthetic, not a 1918 * technical consideration. 1919 * 1920 * The interval must be one second in accordance with the 1921 * code in deadman() above to increase lbolt during panic. 1922 */ 1923 when->cyt_when = cpu->cpu_id * (NANOSEC / NCPU); 1924 when->cyt_interval = NANOSEC; 1925 } 1926 1927 1928 void 1929 deadman_init(void) 1930 { 1931 cyc_omni_handler_t hdlr; 1932 1933 if (deadman_seconds == 0) 1934 deadman_seconds = snoop_interval / MICROSEC; 1935 1936 if (snooping) 1937 deadman_enabled = 1; 1938 1939 hdlr.cyo_online = deadman_online; 1940 hdlr.cyo_offline = NULL; 1941 hdlr.cyo_arg = NULL; 1942 1943 mutex_enter(&cpu_lock); 1944 deadman_cyclic = cyclic_add_omni(&hdlr); 1945 mutex_exit(&cpu_lock); 1946 } 1947 1948 /* 1949 * tod_fault() is for updating tod validate mechanism state: 1950 * (1) TOD_NOFAULT: for resetting the state to 'normal'. 1951 * currently used for debugging only 1952 * (2) The following four cases detected by tod validate mechanism: 1953 * TOD_REVERSED: current tod value is less than previous value. 1954 * TOD_STALLED: current tod value hasn't advanced. 1955 * TOD_JUMPED: current tod value advanced too far from previous value. 1956 * TOD_RATECHANGED: the ratio between average tod delta and 1957 * average tick delta has changed. 1958 */ 1959 enum tod_fault_type 1960 tod_fault(enum tod_fault_type ftype, int off) 1961 { 1962 ASSERT(MUTEX_HELD(&tod_lock)); 1963 1964 if (tod_faulted != ftype) { 1965 switch (ftype) { 1966 case TOD_NOFAULT: 1967 if (&plat_tod_fault) 1968 plat_tod_fault(TOD_NOFAULT); 1969 cmn_err(CE_NOTE, "Restarted tracking " 1970 "Time of Day clock."); 1971 tod_faulted = ftype; 1972 break; 1973 case TOD_REVERSED: 1974 case TOD_JUMPED: 1975 if (tod_faulted == TOD_NOFAULT) { 1976 if (&plat_tod_fault) 1977 plat_tod_fault(ftype); 1978 cmn_err(CE_WARN, "Time of Day clock error: " 1979 "reason [%s by 0x%x]. -- " 1980 " Stopped tracking Time Of Day clock.", 1981 tod_fault_table[ftype], off); 1982 tod_faulted = ftype; 1983 } 1984 break; 1985 case TOD_STALLED: 1986 case TOD_RATECHANGED: 1987 if (tod_faulted == TOD_NOFAULT) { 1988 if (&plat_tod_fault) 1989 plat_tod_fault(ftype); 1990 cmn_err(CE_WARN, "Time of Day clock error: " 1991 "reason [%s]. -- " 1992 " Stopped tracking Time Of Day clock.", 1993 tod_fault_table[ftype]); 1994 tod_faulted = ftype; 1995 } 1996 break; 1997 default: 1998 break; 1999 } 2000 } 2001 return (tod_faulted); 2002 } 2003 2004 void 2005 tod_fault_reset() 2006 { 2007 tod_fault_reset_flag = 1; 2008 } 2009 2010 2011 /* 2012 * tod_validate() is used for checking values returned by tod_get(). 2013 * Four error cases can be detected by this routine: 2014 * TOD_REVERSED: current tod value is less than previous. 2015 * TOD_STALLED: current tod value hasn't advanced. 2016 * TOD_JUMPED: current tod value advanced too far from previous value. 2017 * TOD_RATECHANGED: the ratio between average tod delta and 2018 * average tick delta has changed. 2019 */ 2020 time_t 2021 tod_validate(time_t tod) 2022 { 2023 time_t diff_tod; 2024 hrtime_t diff_tick; 2025 2026 long dtick; 2027 int dtick_delta; 2028 2029 int off = 0; 2030 enum tod_fault_type tod_bad = TOD_NOFAULT; 2031 2032 static int firsttime = 1; 2033 2034 static time_t prev_tod = 0; 2035 static hrtime_t prev_tick = 0; 2036 static long dtick_avg = TOD_REF_FREQ; 2037 2038 hrtime_t tick = gethrtime(); 2039 2040 ASSERT(MUTEX_HELD(&tod_lock)); 2041 2042 /* 2043 * tod_validate_enable is patchable via /etc/system. 2044 * If TOD is already faulted, there is nothing to do 2045 */ 2046 if ((tod_validate_enable == 0) || (tod_faulted != TOD_NOFAULT)) { 2047 return (tod); 2048 } 2049 2050 /* 2051 * Update prev_tod and prev_tick values for first run 2052 */ 2053 if (firsttime) { 2054 firsttime = 0; 2055 prev_tod = tod; 2056 prev_tick = tick; 2057 return (tod); 2058 } 2059 2060 /* 2061 * For either of these conditions, we need to reset ourself 2062 * and start validation from zero since each condition 2063 * indicates that the TOD will be updated with new value 2064 * Also, note that tod_needsync will be reset in clock() 2065 */ 2066 if (tod_needsync || tod_fault_reset_flag) { 2067 firsttime = 1; 2068 prev_tod = 0; 2069 prev_tick = 0; 2070 dtick_avg = TOD_REF_FREQ; 2071 2072 if (tod_fault_reset_flag) 2073 tod_fault_reset_flag = 0; 2074 2075 return (tod); 2076 } 2077 2078 /* test hook */ 2079 switch (tod_unit_test) { 2080 case 1: /* for testing jumping tod */ 2081 tod += tod_test_injector; 2082 tod_unit_test = 0; 2083 break; 2084 case 2: /* for testing stuck tod bit */ 2085 tod |= 1 << tod_test_injector; 2086 tod_unit_test = 0; 2087 break; 2088 case 3: /* for testing stalled tod */ 2089 tod = prev_tod; 2090 tod_unit_test = 0; 2091 break; 2092 case 4: /* reset tod fault status */ 2093 (void) tod_fault(TOD_NOFAULT, 0); 2094 tod_unit_test = 0; 2095 break; 2096 default: 2097 break; 2098 } 2099 2100 diff_tod = tod - prev_tod; 2101 diff_tick = tick - prev_tick; 2102 2103 ASSERT(diff_tick >= 0); 2104 2105 if (diff_tod < 0) { 2106 /* ERROR - tod reversed */ 2107 tod_bad = TOD_REVERSED; 2108 off = (int)(prev_tod - tod); 2109 } else if (diff_tod == 0) { 2110 /* tod did not advance */ 2111 if (diff_tick > TOD_STALL_THRESHOLD) { 2112 /* ERROR - tod stalled */ 2113 tod_bad = TOD_STALLED; 2114 } else { 2115 /* 2116 * Make sure we don't update prev_tick 2117 * so that diff_tick is calculated since 2118 * the first diff_tod == 0 2119 */ 2120 return (tod); 2121 } 2122 } else { 2123 /* calculate dtick */ 2124 dtick = diff_tick / diff_tod; 2125 2126 /* update dtick averages */ 2127 dtick_avg += ((dtick - dtick_avg) / TOD_FILTER_N); 2128 2129 /* 2130 * Calculate dtick_delta as 2131 * variation from reference freq in quartiles 2132 */ 2133 dtick_delta = (dtick_avg - TOD_REF_FREQ) / 2134 (TOD_REF_FREQ >> 2); 2135 2136 /* 2137 * Even with a perfectly functioning TOD device, 2138 * when the number of elapsed seconds is low the 2139 * algorithm can calculate a rate that is beyond 2140 * tolerance, causing an error. The algorithm is 2141 * inaccurate when elapsed time is low (less than 2142 * 5 seconds). 2143 */ 2144 if (diff_tod > 4) { 2145 if (dtick < TOD_JUMP_THRESHOLD) { 2146 /* ERROR - tod jumped */ 2147 tod_bad = TOD_JUMPED; 2148 off = (int)diff_tod; 2149 } else if (dtick_delta) { 2150 /* ERROR - change in clock rate */ 2151 tod_bad = TOD_RATECHANGED; 2152 } 2153 } 2154 } 2155 2156 if (tod_bad != TOD_NOFAULT) { 2157 (void) tod_fault(tod_bad, off); 2158 2159 /* 2160 * Disable dosynctodr since we are going to fault 2161 * the TOD chip anyway here 2162 */ 2163 dosynctodr = 0; 2164 2165 /* 2166 * Set tod to the correct value from hrestime 2167 */ 2168 tod = hrestime.tv_sec; 2169 } 2170 2171 prev_tod = tod; 2172 prev_tick = tick; 2173 return (tod); 2174 } 2175 2176 static void 2177 calcloadavg(int nrun, uint64_t *hp_ave) 2178 { 2179 static int64_t f[3] = { 135, 27, 9 }; 2180 uint_t i; 2181 int64_t q, r; 2182 2183 /* 2184 * Compute load average over the last 1, 5, and 15 minutes 2185 * (60, 300, and 900 seconds). The constants in f[3] are for 2186 * exponential decay: 2187 * (1 - exp(-1/60)) << 13 = 135, 2188 * (1 - exp(-1/300)) << 13 = 27, 2189 * (1 - exp(-1/900)) << 13 = 9. 2190 */ 2191 2192 /* 2193 * a little hoop-jumping to avoid integer overflow 2194 */ 2195 for (i = 0; i < 3; i++) { 2196 q = (hp_ave[i] >> 16) << 7; 2197 r = (hp_ave[i] & 0xffff) << 7; 2198 hp_ave[i] += ((nrun - q) * f[i] - ((r * f[i]) >> 16)) >> 4; 2199 } 2200 } 2201