1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 22 /* All Rights Reserved */ 23 24 25 /* 26 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 27 * Use is subject to license terms. 28 */ 29 30 #pragma ident "%Z%%M% %I% %E% SMI" 31 32 #include <sys/param.h> 33 #include <sys/t_lock.h> 34 #include <sys/types.h> 35 #include <sys/tuneable.h> 36 #include <sys/sysmacros.h> 37 #include <sys/systm.h> 38 #include <sys/cpuvar.h> 39 #include <sys/lgrp.h> 40 #include <sys/user.h> 41 #include <sys/proc.h> 42 #include <sys/callo.h> 43 #include <sys/kmem.h> 44 #include <sys/var.h> 45 #include <sys/cmn_err.h> 46 #include <sys/swap.h> 47 #include <sys/vmsystm.h> 48 #include <sys/class.h> 49 #include <sys/time.h> 50 #include <sys/debug.h> 51 #include <sys/vtrace.h> 52 #include <sys/spl.h> 53 #include <sys/atomic.h> 54 #include <sys/dumphdr.h> 55 #include <sys/archsystm.h> 56 #include <sys/fs/swapnode.h> 57 #include <sys/panic.h> 58 #include <sys/disp.h> 59 #include <sys/msacct.h> 60 #include <sys/mem_cage.h> 61 62 #include <vm/page.h> 63 #include <vm/anon.h> 64 #include <vm/rm.h> 65 #include <sys/cyclic.h> 66 #include <sys/cpupart.h> 67 #include <sys/rctl.h> 68 #include <sys/task.h> 69 #include <sys/sdt.h> 70 71 #ifdef __sparc 72 #include <sys/wdt.h> 73 #endif 74 75 /* 76 * for NTP support 77 */ 78 #include <sys/timex.h> 79 #include <sys/inttypes.h> 80 81 /* 82 * clock() is called straight from the clock cyclic; see clock_init(). 83 * 84 * Functions: 85 * reprime clock 86 * schedule callouts 87 * maintain date 88 * jab the scheduler 89 */ 90 91 extern kcondvar_t fsflush_cv; 92 extern sysinfo_t sysinfo; 93 extern vminfo_t vminfo; 94 extern int idleswtch; /* flag set while idle in pswtch() */ 95 96 /* 97 * high-precision avenrun values. These are needed to make the 98 * regular avenrun values accurate. 99 */ 100 static uint64_t hp_avenrun[3]; 101 int avenrun[3]; /* FSCALED average run queue lengths */ 102 time_t time; /* time in seconds since 1970 - for compatibility only */ 103 104 static struct loadavg_s loadavg; 105 /* 106 * Phase/frequency-lock loop (PLL/FLL) definitions 107 * 108 * The following variables are read and set by the ntp_adjtime() system 109 * call. 110 * 111 * time_state shows the state of the system clock, with values defined 112 * in the timex.h header file. 113 * 114 * time_status shows the status of the system clock, with bits defined 115 * in the timex.h header file. 116 * 117 * time_offset is used by the PLL/FLL to adjust the system time in small 118 * increments. 119 * 120 * time_constant determines the bandwidth or "stiffness" of the PLL. 121 * 122 * time_tolerance determines maximum frequency error or tolerance of the 123 * CPU clock oscillator and is a property of the architecture; however, 124 * in principle it could change as result of the presence of external 125 * discipline signals, for instance. 126 * 127 * time_precision is usually equal to the kernel tick variable; however, 128 * in cases where a precision clock counter or external clock is 129 * available, the resolution can be much less than this and depend on 130 * whether the external clock is working or not. 131 * 132 * time_maxerror is initialized by a ntp_adjtime() call and increased by 133 * the kernel once each second to reflect the maximum error bound 134 * growth. 135 * 136 * time_esterror is set and read by the ntp_adjtime() call, but 137 * otherwise not used by the kernel. 138 */ 139 int32_t time_state = TIME_OK; /* clock state */ 140 int32_t time_status = STA_UNSYNC; /* clock status bits */ 141 int32_t time_offset = 0; /* time offset (us) */ 142 int32_t time_constant = 0; /* pll time constant */ 143 int32_t time_tolerance = MAXFREQ; /* frequency tolerance (scaled ppm) */ 144 int32_t time_precision = 1; /* clock precision (us) */ 145 int32_t time_maxerror = MAXPHASE; /* maximum error (us) */ 146 int32_t time_esterror = MAXPHASE; /* estimated error (us) */ 147 148 /* 149 * The following variables establish the state of the PLL/FLL and the 150 * residual time and frequency offset of the local clock. The scale 151 * factors are defined in the timex.h header file. 152 * 153 * time_phase and time_freq are the phase increment and the frequency 154 * increment, respectively, of the kernel time variable. 155 * 156 * time_freq is set via ntp_adjtime() from a value stored in a file when 157 * the synchronization daemon is first started. Its value is retrieved 158 * via ntp_adjtime() and written to the file about once per hour by the 159 * daemon. 160 * 161 * time_adj is the adjustment added to the value of tick at each timer 162 * interrupt and is recomputed from time_phase and time_freq at each 163 * seconds rollover. 164 * 165 * time_reftime is the second's portion of the system time at the last 166 * call to ntp_adjtime(). It is used to adjust the time_freq variable 167 * and to increase the time_maxerror as the time since last update 168 * increases. 169 */ 170 int32_t time_phase = 0; /* phase offset (scaled us) */ 171 int32_t time_freq = 0; /* frequency offset (scaled ppm) */ 172 int32_t time_adj = 0; /* tick adjust (scaled 1 / hz) */ 173 int32_t time_reftime = 0; /* time at last adjustment (s) */ 174 175 /* 176 * The scale factors of the following variables are defined in the 177 * timex.h header file. 178 * 179 * pps_time contains the time at each calibration interval, as read by 180 * microtime(). pps_count counts the seconds of the calibration 181 * interval, the duration of which is nominally pps_shift in powers of 182 * two. 183 * 184 * pps_offset is the time offset produced by the time median filter 185 * pps_tf[], while pps_jitter is the dispersion (jitter) measured by 186 * this filter. 187 * 188 * pps_freq is the frequency offset produced by the frequency median 189 * filter pps_ff[], while pps_stabil is the dispersion (wander) measured 190 * by this filter. 191 * 192 * pps_usec is latched from a high resolution counter or external clock 193 * at pps_time. Here we want the hardware counter contents only, not the 194 * contents plus the time_tv.usec as usual. 195 * 196 * pps_valid counts the number of seconds since the last PPS update. It 197 * is used as a watchdog timer to disable the PPS discipline should the 198 * PPS signal be lost. 199 * 200 * pps_glitch counts the number of seconds since the beginning of an 201 * offset burst more than tick/2 from current nominal offset. It is used 202 * mainly to suppress error bursts due to priority conflicts between the 203 * PPS interrupt and timer interrupt. 204 * 205 * pps_intcnt counts the calibration intervals for use in the interval- 206 * adaptation algorithm. It's just too complicated for words. 207 */ 208 struct timeval pps_time; /* kernel time at last interval */ 209 int32_t pps_tf[] = {0, 0, 0}; /* pps time offset median filter (us) */ 210 int32_t pps_offset = 0; /* pps time offset (us) */ 211 int32_t pps_jitter = MAXTIME; /* time dispersion (jitter) (us) */ 212 int32_t pps_ff[] = {0, 0, 0}; /* pps frequency offset median filter */ 213 int32_t pps_freq = 0; /* frequency offset (scaled ppm) */ 214 int32_t pps_stabil = MAXFREQ; /* frequency dispersion (scaled ppm) */ 215 int32_t pps_usec = 0; /* microsec counter at last interval */ 216 int32_t pps_valid = PPS_VALID; /* pps signal watchdog counter */ 217 int32_t pps_glitch = 0; /* pps signal glitch counter */ 218 int32_t pps_count = 0; /* calibration interval counter (s) */ 219 int32_t pps_shift = PPS_SHIFT; /* interval duration (s) (shift) */ 220 int32_t pps_intcnt = 0; /* intervals at current duration */ 221 222 /* 223 * PPS signal quality monitors 224 * 225 * pps_jitcnt counts the seconds that have been discarded because the 226 * jitter measured by the time median filter exceeds the limit MAXTIME 227 * (100 us). 228 * 229 * pps_calcnt counts the frequency calibration intervals, which are 230 * variable from 4 s to 256 s. 231 * 232 * pps_errcnt counts the calibration intervals which have been discarded 233 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the 234 * calibration interval jitter exceeds two ticks. 235 * 236 * pps_stbcnt counts the calibration intervals that have been discarded 237 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us). 238 */ 239 int32_t pps_jitcnt = 0; /* jitter limit exceeded */ 240 int32_t pps_calcnt = 0; /* calibration intervals */ 241 int32_t pps_errcnt = 0; /* calibration errors */ 242 int32_t pps_stbcnt = 0; /* stability limit exceeded */ 243 244 /* The following variables require no explicit locking */ 245 volatile clock_t lbolt; /* time in Hz since last boot */ 246 volatile int64_t lbolt64; /* lbolt64 won't wrap for 2.9 billion yrs */ 247 248 kcondvar_t lbolt_cv; 249 int one_sec = 1; /* turned on once every second */ 250 static int fsflushcnt; /* counter for t_fsflushr */ 251 int dosynctodr = 1; /* patchable; enable/disable sync to TOD chip */ 252 int tod_needsync = 0; /* need to sync tod chip with software time */ 253 static int tod_broken = 0; /* clock chip doesn't work */ 254 time_t boot_time = 0; /* Boot time in seconds since 1970 */ 255 cyclic_id_t clock_cyclic; /* clock()'s cyclic_id */ 256 cyclic_id_t deadman_cyclic; /* deadman()'s cyclic_id */ 257 258 static int lgrp_ticks; /* counter to schedule lgrp load calcs */ 259 260 /* 261 * for tod fault detection 262 */ 263 #define TOD_REF_FREQ ((longlong_t)(NANOSEC)) 264 #define TOD_STALL_THRESHOLD (TOD_REF_FREQ * 3 / 2) 265 #define TOD_JUMP_THRESHOLD (TOD_REF_FREQ / 2) 266 #define TOD_FILTER_N 4 267 #define TOD_FILTER_SETTLE (4 * TOD_FILTER_N) 268 static int tod_faulted = TOD_NOFAULT; 269 static int tod_fault_reset_flag = 0; 270 271 /* patchable via /etc/system */ 272 int tod_validate_enable = 1; 273 274 /* 275 * On non-SPARC systems, TOD validation must be deferred until gethrtime 276 * returns non-zero values (after mach_clkinit's execution). 277 * On SPARC systems, it must be deferred until after hrtime_base 278 * and hres_last_tick are set (in the first invocation of hres_tick). 279 * Since in both cases the prerequisites occur before the invocation of 280 * tod_get() in clock(), the deferment is lifted there. 281 */ 282 static boolean_t tod_validate_deferred = B_TRUE; 283 284 /* 285 * tod_fault_table[] must be aligned with 286 * enum tod_fault_type in systm.h 287 */ 288 static char *tod_fault_table[] = { 289 "Reversed", /* TOD_REVERSED */ 290 "Stalled", /* TOD_STALLED */ 291 "Jumped", /* TOD_JUMPED */ 292 "Changed in Clock Rate" /* TOD_RATECHANGED */ 293 /* 294 * no strings needed for TOD_NOFAULT 295 */ 296 }; 297 298 /* 299 * test hook for tod broken detection in tod_validate 300 */ 301 int tod_unit_test = 0; 302 time_t tod_test_injector; 303 304 #define CLOCK_ADJ_HIST_SIZE 4 305 306 static int adj_hist_entry; 307 308 int64_t clock_adj_hist[CLOCK_ADJ_HIST_SIZE]; 309 310 static void clock_tick(kthread_t *); 311 static void calcloadavg(int, uint64_t *); 312 static int genloadavg(struct loadavg_s *); 313 static void loadavg_update(); 314 315 void (*cmm_clock_callout)() = NULL; 316 void (*cpucaps_clock_callout)() = NULL; 317 318 static void 319 clock(void) 320 { 321 kthread_t *t; 322 kmutex_t *plockp; /* pointer to thread's process lock */ 323 int pinned_intr = 0; 324 uint_t nrunnable, nrunning; 325 uint_t w_io; 326 cpu_t *cp; 327 cpupart_t *cpupart; 328 int exiting; 329 extern void set_anoninfo(); 330 extern void set_freemem(); 331 void (*funcp)(); 332 int32_t ltemp; 333 int64_t lltemp; 334 int s; 335 int do_lgrp_load; 336 int i; 337 338 if (panicstr) 339 return; 340 341 set_anoninfo(); 342 /* 343 * Make sure that 'freemem' do not drift too far from the truth 344 */ 345 set_freemem(); 346 347 348 /* 349 * Before the section which is repeated is executed, we do 350 * the time delta processing which occurs every clock tick 351 * 352 * There is additional processing which happens every time 353 * the nanosecond counter rolls over which is described 354 * below - see the section which begins with : if (one_sec) 355 * 356 * This section marks the beginning of the precision-kernel 357 * code fragment. 358 * 359 * First, compute the phase adjustment. If the low-order bits 360 * (time_phase) of the update overflow, bump the higher order 361 * bits (time_update). 362 */ 363 time_phase += time_adj; 364 if (time_phase <= -FINEUSEC) { 365 ltemp = -time_phase / SCALE_PHASE; 366 time_phase += ltemp * SCALE_PHASE; 367 s = hr_clock_lock(); 368 timedelta -= ltemp * (NANOSEC/MICROSEC); 369 hr_clock_unlock(s); 370 } else if (time_phase >= FINEUSEC) { 371 ltemp = time_phase / SCALE_PHASE; 372 time_phase -= ltemp * SCALE_PHASE; 373 s = hr_clock_lock(); 374 timedelta += ltemp * (NANOSEC/MICROSEC); 375 hr_clock_unlock(s); 376 } 377 378 /* 379 * End of precision-kernel code fragment which is processed 380 * every timer interrupt. 381 * 382 * Continue with the interrupt processing as scheduled. 383 * 384 * Did we pin another interrupt thread? Need to check this before 385 * grabbing any adaptive locks, since if we block on a lock the 386 * pinned thread could escape. Note that this is just a heuristic; 387 * if we take multiple laps though clock() without returning from 388 * the interrupt because we have another clock tick pending, then 389 * the pinned interrupt could be released by one of the previous 390 * laps. The only consequence is that the CPU will be counted as 391 * in idle (or wait) state once the pinned interrupt is released. 392 * Since this accounting is inaccurate by nature, this isn't a big 393 * deal --- but we should try to get it right in the common case 394 * where we only call clock() once per interrupt. 395 */ 396 if (curthread->t_intr != NULL) 397 pinned_intr = (curthread->t_intr->t_flag & T_INTR_THREAD); 398 399 /* 400 * Count the number of runnable threads and the number waiting 401 * for some form of I/O to complete -- gets added to 402 * sysinfo.waiting. To know the state of the system, must add 403 * wait counts from all CPUs. Also add up the per-partition 404 * statistics. 405 */ 406 w_io = 0; 407 nrunnable = 0; 408 409 /* 410 * keep track of when to update lgrp/part loads 411 */ 412 413 do_lgrp_load = 0; 414 if (lgrp_ticks++ >= hz / 10) { 415 lgrp_ticks = 0; 416 do_lgrp_load = 1; 417 } 418 419 if (one_sec) 420 loadavg_update(); 421 422 /* 423 * First count the threads waiting on kpreempt queues in each 424 * CPU partition. 425 */ 426 427 cpupart = cp_list_head; 428 do { 429 uint_t cpupart_nrunnable = cpupart->cp_kp_queue.disp_nrunnable; 430 431 cpupart->cp_updates++; 432 nrunnable += cpupart_nrunnable; 433 cpupart->cp_nrunnable_cum += cpupart_nrunnable; 434 if (one_sec) { 435 cpupart->cp_nrunning = 0; 436 cpupart->cp_nrunnable = cpupart_nrunnable; 437 } 438 } while ((cpupart = cpupart->cp_next) != cp_list_head); 439 440 441 /* Now count the per-CPU statistics. */ 442 cp = cpu_list; 443 do { 444 uint_t cpu_nrunnable = cp->cpu_disp->disp_nrunnable; 445 446 nrunnable += cpu_nrunnable; 447 cpupart = cp->cpu_part; 448 cpupart->cp_nrunnable_cum += cpu_nrunnable; 449 if (one_sec) { 450 cpupart->cp_nrunnable += cpu_nrunnable; 451 /* 452 * w_io is used to update sysinfo.waiting during 453 * one_second processing below. Only gather w_io 454 * information when we walk the list of cpus if we're 455 * going to perform one_second processing. 456 */ 457 w_io += CPU_STATS(cp, sys.iowait); 458 459 } 460 if (do_lgrp_load && 461 (cp->cpu_flags & CPU_EXISTS)) { 462 /* 463 * When updating the lgroup's load average, 464 * account for the thread running on the CPU. 465 * If the CPU is the current one, then we need 466 * to account for the underlying thread which 467 * got the clock interrupt not the thread that is 468 * handling the interrupt and caculating the load 469 * average 470 */ 471 t = cp->cpu_thread; 472 if (CPU == cp) 473 t = t->t_intr; 474 475 /* 476 * Account for the load average for this thread if 477 * it isn't the idle thread or it is on the interrupt 478 * stack and not the current CPU handling the clock 479 * interrupt 480 */ 481 if ((t && t != cp->cpu_idle_thread) || (CPU != cp && 482 CPU_ON_INTR(cp))) { 483 if (t->t_lpl == cp->cpu_lpl) { 484 /* local thread */ 485 cpu_nrunnable++; 486 } else { 487 /* 488 * This is a remote thread, charge it 489 * against its home lgroup. Note that 490 * we notice that a thread is remote 491 * only if it's currently executing. 492 * This is a reasonable approximation, 493 * since queued remote threads are rare. 494 * Note also that if we didn't charge 495 * it to its home lgroup, remote 496 * execution would often make a system 497 * appear balanced even though it was 498 * not, and thread placement/migration 499 * would often not be done correctly. 500 */ 501 lgrp_loadavg(t->t_lpl, 502 LGRP_LOADAVG_IN_THREAD_MAX, 0); 503 } 504 } 505 lgrp_loadavg(cp->cpu_lpl, 506 cpu_nrunnable * LGRP_LOADAVG_IN_THREAD_MAX, 1); 507 } 508 } while ((cp = cp->cpu_next) != cpu_list); 509 510 /* 511 * Do tick processing for all the active threads running in 512 * the system. We're trying to be more fair by walking the 513 * list of CPUs starting from a different CPUs each time. 514 */ 515 cp = clock_cpu_list; 516 nrunning = 0; 517 do { 518 klwp_id_t lwp; 519 int intr; 520 int thread_away; 521 522 /* 523 * Don't do any tick processing on CPUs that 524 * aren't even in the system or aren't up yet. 525 */ 526 if ((cp->cpu_flags & CPU_EXISTS) == 0) { 527 continue; 528 } 529 530 /* 531 * The locking here is rather tricky. We use 532 * thread_free_lock to keep the currently running 533 * thread from being freed or recycled while we're 534 * looking at it. We can then check if the thread 535 * is exiting and get the appropriate p_lock if it 536 * is not. We have to be careful, though, because 537 * the _process_ can still be freed while we're 538 * holding thread_free_lock. To avoid touching the 539 * proc structure we put a pointer to the p_lock in the 540 * thread structure. The p_lock is persistent so we 541 * can acquire it even if the process is gone. At that 542 * point we can check (again) if the thread is exiting 543 * and either drop the lock or do the tick processing. 544 */ 545 mutex_enter(&thread_free_lock); 546 /* 547 * We cannot hold the cpu_lock to prevent the 548 * cpu_list from changing in the clock interrupt. 549 * As long as we don't block (or don't get pre-empted) 550 * the cpu_list will not change (all threads are paused 551 * before list modification). If the list does change 552 * any deleted cpu structures will remain with cpu_next 553 * set to NULL, hence the following test. 554 */ 555 if (cp->cpu_next == NULL) { 556 mutex_exit(&thread_free_lock); 557 break; 558 } 559 t = cp->cpu_thread; /* Current running thread */ 560 if (CPU == cp) { 561 /* 562 * 't' will be the clock interrupt thread on this 563 * CPU. Use the pinned thread (if any) on this CPU 564 * as the target of the clock tick. If we pinned 565 * an interrupt, though, just keep using the clock 566 * interrupt thread since the formerly pinned one 567 * may have gone away. One interrupt thread is as 568 * good as another, and this means we don't have 569 * to continue to check pinned_intr in subsequent 570 * code. 571 */ 572 ASSERT(t == curthread); 573 if (t->t_intr != NULL && !pinned_intr) 574 t = t->t_intr; 575 } 576 577 intr = t->t_flag & T_INTR_THREAD; 578 lwp = ttolwp(t); 579 if (lwp == NULL || (t->t_proc_flag & TP_LWPEXIT) || intr) { 580 /* 581 * Thread is exiting (or uninteresting) so don't 582 * do tick processing or grab p_lock. Once we 583 * drop thread_free_lock we can't look inside the 584 * thread or lwp structure, since the thread may 585 * have gone away. 586 */ 587 exiting = 1; 588 } else { 589 /* 590 * OK, try to grab the process lock. See 591 * comments above for why we're not using 592 * ttoproc(t)->p_lockp here. 593 */ 594 plockp = t->t_plockp; 595 mutex_enter(plockp); 596 /* See above comment. */ 597 if (cp->cpu_next == NULL) { 598 mutex_exit(plockp); 599 mutex_exit(&thread_free_lock); 600 break; 601 } 602 /* 603 * The thread may have exited between when we 604 * checked above, and when we got the p_lock. 605 */ 606 if (t->t_proc_flag & TP_LWPEXIT) { 607 mutex_exit(plockp); 608 exiting = 1; 609 } else { 610 exiting = 0; 611 } 612 } 613 /* 614 * Either we have the p_lock for the thread's process, 615 * or we don't care about the thread structure any more. 616 * Either way we can drop thread_free_lock. 617 */ 618 mutex_exit(&thread_free_lock); 619 620 /* 621 * Update user, system, and idle cpu times. 622 */ 623 if (one_sec) { 624 nrunning++; 625 cp->cpu_part->cp_nrunning++; 626 } 627 /* 628 * If we haven't done tick processing for this 629 * lwp, then do it now. Since we don't hold the 630 * lwp down on a CPU it can migrate and show up 631 * more than once, hence the lbolt check. 632 * 633 * Also, make sure that it's okay to perform the 634 * tick processing before calling clock_tick. 635 * Setting thread_away to a TRUE value (ie. not 0) 636 * results in tick processing not being performed for 637 * that thread. Or, in other words, keeps the thread 638 * away from clock_tick processing. 639 */ 640 thread_away = ((cp->cpu_flags & CPU_QUIESCED) || 641 CPU_ON_INTR(cp) || intr || 642 (cp->cpu_dispthread == cp->cpu_idle_thread) || exiting); 643 644 if ((!thread_away) && (lbolt - t->t_lbolt != 0)) { 645 t->t_lbolt = lbolt; 646 clock_tick(t); 647 } 648 649 if (!exiting) 650 mutex_exit(plockp); 651 } while ((cp = cp->cpu_next) != clock_cpu_list); 652 653 clock_cpu_list = clock_cpu_list->cpu_next; 654 655 /* 656 * bump time in ticks 657 * 658 * We rely on there being only one clock thread and hence 659 * don't need a lock to protect lbolt. 660 */ 661 lbolt++; 662 atomic_add_64((uint64_t *)&lbolt64, (int64_t)1); 663 664 /* 665 * Check for a callout that needs be called from the clock 666 * thread to support the membership protocol in a clustered 667 * system. Copy the function pointer so that we can reset 668 * this to NULL if needed. 669 */ 670 if ((funcp = cmm_clock_callout) != NULL) 671 (*funcp)(); 672 673 if ((funcp = cpucaps_clock_callout) != NULL) 674 (*funcp)(); 675 676 /* 677 * Wakeup the cageout thread waiters once per second. 678 */ 679 if (one_sec) 680 kcage_tick(); 681 682 /* 683 * Schedule timeout() requests if any are due at this time. 684 */ 685 callout_schedule(); 686 687 if (one_sec) { 688 689 int drift, absdrift; 690 timestruc_t tod; 691 int s; 692 693 /* 694 * Beginning of precision-kernel code fragment executed 695 * every second. 696 * 697 * On rollover of the second the phase adjustment to be 698 * used for the next second is calculated. Also, the 699 * maximum error is increased by the tolerance. If the 700 * PPS frequency discipline code is present, the phase is 701 * increased to compensate for the CPU clock oscillator 702 * frequency error. 703 * 704 * On a 32-bit machine and given parameters in the timex.h 705 * header file, the maximum phase adjustment is +-512 ms 706 * and maximum frequency offset is (a tad less than) 707 * +-512 ppm. On a 64-bit machine, you shouldn't need to ask. 708 */ 709 time_maxerror += time_tolerance / SCALE_USEC; 710 711 /* 712 * Leap second processing. If in leap-insert state at 713 * the end of the day, the system clock is set back one 714 * second; if in leap-delete state, the system clock is 715 * set ahead one second. The microtime() routine or 716 * external clock driver will insure that reported time 717 * is always monotonic. The ugly divides should be 718 * replaced. 719 */ 720 switch (time_state) { 721 722 case TIME_OK: 723 if (time_status & STA_INS) 724 time_state = TIME_INS; 725 else if (time_status & STA_DEL) 726 time_state = TIME_DEL; 727 break; 728 729 case TIME_INS: 730 if (hrestime.tv_sec % 86400 == 0) { 731 s = hr_clock_lock(); 732 hrestime.tv_sec--; 733 hr_clock_unlock(s); 734 time_state = TIME_OOP; 735 } 736 break; 737 738 case TIME_DEL: 739 if ((hrestime.tv_sec + 1) % 86400 == 0) { 740 s = hr_clock_lock(); 741 hrestime.tv_sec++; 742 hr_clock_unlock(s); 743 time_state = TIME_WAIT; 744 } 745 break; 746 747 case TIME_OOP: 748 time_state = TIME_WAIT; 749 break; 750 751 case TIME_WAIT: 752 if (!(time_status & (STA_INS | STA_DEL))) 753 time_state = TIME_OK; 754 default: 755 break; 756 } 757 758 /* 759 * Compute the phase adjustment for the next second. In 760 * PLL mode, the offset is reduced by a fixed factor 761 * times the time constant. In FLL mode the offset is 762 * used directly. In either mode, the maximum phase 763 * adjustment for each second is clamped so as to spread 764 * the adjustment over not more than the number of 765 * seconds between updates. 766 */ 767 if (time_offset == 0) 768 time_adj = 0; 769 else if (time_offset < 0) { 770 lltemp = -time_offset; 771 if (!(time_status & STA_FLL)) { 772 if ((1 << time_constant) >= SCALE_KG) 773 lltemp *= (1 << time_constant) / 774 SCALE_KG; 775 else 776 lltemp = (lltemp / SCALE_KG) >> 777 time_constant; 778 } 779 if (lltemp > (MAXPHASE / MINSEC) * SCALE_UPDATE) 780 lltemp = (MAXPHASE / MINSEC) * SCALE_UPDATE; 781 time_offset += lltemp; 782 time_adj = -(lltemp * SCALE_PHASE) / hz / SCALE_UPDATE; 783 } else { 784 lltemp = time_offset; 785 if (!(time_status & STA_FLL)) { 786 if ((1 << time_constant) >= SCALE_KG) 787 lltemp *= (1 << time_constant) / 788 SCALE_KG; 789 else 790 lltemp = (lltemp / SCALE_KG) >> 791 time_constant; 792 } 793 if (lltemp > (MAXPHASE / MINSEC) * SCALE_UPDATE) 794 lltemp = (MAXPHASE / MINSEC) * SCALE_UPDATE; 795 time_offset -= lltemp; 796 time_adj = (lltemp * SCALE_PHASE) / hz / SCALE_UPDATE; 797 } 798 799 /* 800 * Compute the frequency estimate and additional phase 801 * adjustment due to frequency error for the next 802 * second. When the PPS signal is engaged, gnaw on the 803 * watchdog counter and update the frequency computed by 804 * the pll and the PPS signal. 805 */ 806 pps_valid++; 807 if (pps_valid == PPS_VALID) { 808 pps_jitter = MAXTIME; 809 pps_stabil = MAXFREQ; 810 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | 811 STA_PPSWANDER | STA_PPSERROR); 812 } 813 lltemp = time_freq + pps_freq; 814 815 if (lltemp) 816 time_adj += (lltemp * SCALE_PHASE) / (SCALE_USEC * hz); 817 818 /* 819 * End of precision kernel-code fragment 820 * 821 * The section below should be modified if we are planning 822 * to use NTP for synchronization. 823 * 824 * Note: the clock synchronization code now assumes 825 * the following: 826 * - if dosynctodr is 1, then compute the drift between 827 * the tod chip and software time and adjust one or 828 * the other depending on the circumstances 829 * 830 * - if dosynctodr is 0, then the tod chip is independent 831 * of the software clock and should not be adjusted, 832 * but allowed to free run. this allows NTP to sync. 833 * hrestime without any interference from the tod chip. 834 */ 835 836 tod_validate_deferred = B_FALSE; 837 mutex_enter(&tod_lock); 838 tod = tod_get(); 839 drift = tod.tv_sec - hrestime.tv_sec; 840 absdrift = (drift >= 0) ? drift : -drift; 841 if (tod_needsync || absdrift > 1) { 842 int s; 843 if (absdrift > 2) { 844 if (!tod_broken && tod_faulted == TOD_NOFAULT) { 845 s = hr_clock_lock(); 846 hrestime = tod; 847 membar_enter(); /* hrestime visible */ 848 timedelta = 0; 849 hrestime_isvalid = 1; 850 tod_needsync = 0; 851 hr_clock_unlock(s); 852 } 853 } else { 854 if (tod_needsync || !dosynctodr) { 855 gethrestime(&tod); 856 tod_set(tod); 857 s = hr_clock_lock(); 858 if (timedelta == 0) 859 tod_needsync = 0; 860 hr_clock_unlock(s); 861 } else { 862 /* 863 * If the drift is 2 seconds on the 864 * money, then the TOD is adjusting 865 * the clock; record that. 866 */ 867 clock_adj_hist[adj_hist_entry++ % 868 CLOCK_ADJ_HIST_SIZE] = lbolt64; 869 s = hr_clock_lock(); 870 timedelta = (int64_t)drift*NANOSEC; 871 hr_clock_unlock(s); 872 } 873 } 874 } 875 one_sec = 0; 876 time = gethrestime_sec(); /* for crusty old kmem readers */ 877 mutex_exit(&tod_lock); 878 879 /* 880 * Some drivers still depend on this... XXX 881 */ 882 cv_broadcast(&lbolt_cv); 883 884 sysinfo.updates++; 885 vminfo.freemem += freemem; 886 { 887 pgcnt_t maxswap, resv, free; 888 pgcnt_t avail = 889 MAX((spgcnt_t)(availrmem - swapfs_minfree), 0); 890 891 maxswap = k_anoninfo.ani_mem_resv 892 + k_anoninfo.ani_max +avail; 893 free = k_anoninfo.ani_free + avail; 894 resv = k_anoninfo.ani_phys_resv + 895 k_anoninfo.ani_mem_resv; 896 897 vminfo.swap_resv += resv; 898 /* number of reserved and allocated pages */ 899 #ifdef DEBUG 900 if (maxswap < free) 901 cmn_err(CE_WARN, "clock: maxswap < free"); 902 if (maxswap < resv) 903 cmn_err(CE_WARN, "clock: maxswap < resv"); 904 #endif 905 vminfo.swap_alloc += maxswap - free; 906 vminfo.swap_avail += maxswap - resv; 907 vminfo.swap_free += free; 908 } 909 if (nrunnable) { 910 sysinfo.runque += nrunnable; 911 sysinfo.runocc++; 912 } 913 if (nswapped) { 914 sysinfo.swpque += nswapped; 915 sysinfo.swpocc++; 916 } 917 sysinfo.waiting += w_io; 918 919 /* 920 * Wake up fsflush to write out DELWRI 921 * buffers, dirty pages and other cached 922 * administrative data, e.g. inodes. 923 */ 924 if (--fsflushcnt <= 0) { 925 fsflushcnt = tune.t_fsflushr; 926 cv_signal(&fsflush_cv); 927 } 928 929 vmmeter(); 930 calcloadavg(genloadavg(&loadavg), hp_avenrun); 931 for (i = 0; i < 3; i++) 932 /* 933 * At the moment avenrun[] can only hold 31 934 * bits of load average as it is a signed 935 * int in the API. We need to ensure that 936 * hp_avenrun[i] >> (16 - FSHIFT) will not be 937 * too large. If it is, we put the largest value 938 * that we can use into avenrun[i]. This is 939 * kludgey, but about all we can do until we 940 * avenrun[] is declared as an array of uint64[] 941 */ 942 if (hp_avenrun[i] < ((uint64_t)1<<(31+16-FSHIFT))) 943 avenrun[i] = (int32_t)(hp_avenrun[i] >> 944 (16 - FSHIFT)); 945 else 946 avenrun[i] = 0x7fffffff; 947 948 cpupart = cp_list_head; 949 do { 950 calcloadavg(genloadavg(&cpupart->cp_loadavg), 951 cpupart->cp_hp_avenrun); 952 } while ((cpupart = cpupart->cp_next) != cp_list_head); 953 954 /* 955 * Wake up the swapper thread if necessary. 956 */ 957 if (runin || 958 (runout && (avefree < desfree || wake_sched_sec))) { 959 t = &t0; 960 thread_lock(t); 961 if (t->t_state == TS_STOPPED) { 962 runin = runout = 0; 963 wake_sched_sec = 0; 964 t->t_whystop = 0; 965 t->t_whatstop = 0; 966 t->t_schedflag &= ~TS_ALLSTART; 967 THREAD_TRANSITION(t); 968 setfrontdq(t); 969 } 970 thread_unlock(t); 971 } 972 } 973 974 /* 975 * Wake up the swapper if any high priority swapped-out threads 976 * became runable during the last tick. 977 */ 978 if (wake_sched) { 979 t = &t0; 980 thread_lock(t); 981 if (t->t_state == TS_STOPPED) { 982 runin = runout = 0; 983 wake_sched = 0; 984 t->t_whystop = 0; 985 t->t_whatstop = 0; 986 t->t_schedflag &= ~TS_ALLSTART; 987 THREAD_TRANSITION(t); 988 setfrontdq(t); 989 } 990 thread_unlock(t); 991 } 992 } 993 994 void 995 clock_init(void) 996 { 997 cyc_handler_t hdlr; 998 cyc_time_t when; 999 1000 hdlr.cyh_func = (cyc_func_t)clock; 1001 hdlr.cyh_level = CY_LOCK_LEVEL; 1002 hdlr.cyh_arg = NULL; 1003 1004 when.cyt_when = 0; 1005 when.cyt_interval = nsec_per_tick; 1006 1007 mutex_enter(&cpu_lock); 1008 clock_cyclic = cyclic_add(&hdlr, &when); 1009 mutex_exit(&cpu_lock); 1010 } 1011 1012 /* 1013 * Called before calcloadavg to get 10-sec moving loadavg together 1014 */ 1015 1016 static int 1017 genloadavg(struct loadavg_s *avgs) 1018 { 1019 int avg; 1020 int spos; /* starting position */ 1021 int cpos; /* moving current position */ 1022 int i; 1023 int slen; 1024 hrtime_t hr_avg; 1025 1026 /* 10-second snapshot, calculate first positon */ 1027 if (avgs->lg_len == 0) { 1028 return (0); 1029 } 1030 slen = avgs->lg_len < S_MOVAVG_SZ ? avgs->lg_len : S_MOVAVG_SZ; 1031 1032 spos = (avgs->lg_cur - 1) >= 0 ? avgs->lg_cur - 1 : 1033 S_LOADAVG_SZ + (avgs->lg_cur - 1); 1034 for (i = hr_avg = 0; i < slen; i++) { 1035 cpos = (spos - i) >= 0 ? spos - i : S_LOADAVG_SZ + (spos - i); 1036 hr_avg += avgs->lg_loads[cpos]; 1037 } 1038 1039 hr_avg = hr_avg / slen; 1040 avg = hr_avg / (NANOSEC / LGRP_LOADAVG_IN_THREAD_MAX); 1041 1042 return (avg); 1043 } 1044 1045 /* 1046 * Run every second from clock () to update the loadavg count available to the 1047 * system and cpu-partitions. 1048 * 1049 * This works by sampling the previous usr, sys, wait time elapsed, 1050 * computing a delta, and adding that delta to the elapsed usr, sys, 1051 * wait increase. 1052 */ 1053 1054 static void 1055 loadavg_update() 1056 { 1057 cpu_t *cp; 1058 cpupart_t *cpupart; 1059 hrtime_t cpu_total; 1060 int prev; 1061 1062 cp = cpu_list; 1063 loadavg.lg_total = 0; 1064 1065 /* 1066 * first pass totals up per-cpu statistics for system and cpu 1067 * partitions 1068 */ 1069 1070 do { 1071 struct loadavg_s *lavg; 1072 1073 lavg = &cp->cpu_loadavg; 1074 1075 cpu_total = cp->cpu_acct[CMS_USER] + 1076 cp->cpu_acct[CMS_SYSTEM] + cp->cpu_waitrq; 1077 /* compute delta against last total */ 1078 scalehrtime(&cpu_total); 1079 prev = (lavg->lg_cur - 1) >= 0 ? lavg->lg_cur - 1 : 1080 S_LOADAVG_SZ + (lavg->lg_cur - 1); 1081 if (lavg->lg_loads[prev] <= 0) { 1082 lavg->lg_loads[lavg->lg_cur] = cpu_total; 1083 cpu_total = 0; 1084 } else { 1085 lavg->lg_loads[lavg->lg_cur] = cpu_total; 1086 cpu_total = cpu_total - lavg->lg_loads[prev]; 1087 if (cpu_total < 0) 1088 cpu_total = 0; 1089 } 1090 1091 lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ; 1092 lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ? 1093 lavg->lg_len + 1 : S_LOADAVG_SZ; 1094 1095 loadavg.lg_total += cpu_total; 1096 cp->cpu_part->cp_loadavg.lg_total += cpu_total; 1097 1098 } while ((cp = cp->cpu_next) != cpu_list); 1099 1100 loadavg.lg_loads[loadavg.lg_cur] = loadavg.lg_total; 1101 loadavg.lg_cur = (loadavg.lg_cur + 1) % S_LOADAVG_SZ; 1102 loadavg.lg_len = (loadavg.lg_len + 1) < S_LOADAVG_SZ ? 1103 loadavg.lg_len + 1 : S_LOADAVG_SZ; 1104 /* 1105 * Second pass updates counts 1106 */ 1107 cpupart = cp_list_head; 1108 1109 do { 1110 struct loadavg_s *lavg; 1111 1112 lavg = &cpupart->cp_loadavg; 1113 lavg->lg_loads[lavg->lg_cur] = lavg->lg_total; 1114 lavg->lg_total = 0; 1115 lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ; 1116 lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ? 1117 lavg->lg_len + 1 : S_LOADAVG_SZ; 1118 1119 } while ((cpupart = cpupart->cp_next) != cp_list_head); 1120 1121 } 1122 1123 /* 1124 * clock_update() - local clock update 1125 * 1126 * This routine is called by ntp_adjtime() to update the local clock 1127 * phase and frequency. The implementation is of an 1128 * adaptive-parameter, hybrid phase/frequency-lock loop (PLL/FLL). The 1129 * routine computes new time and frequency offset estimates for each 1130 * call. The PPS signal itself determines the new time offset, 1131 * instead of the calling argument. Presumably, calls to 1132 * ntp_adjtime() occur only when the caller believes the local clock 1133 * is valid within some bound (+-128 ms with NTP). If the caller's 1134 * time is far different than the PPS time, an argument will ensue, 1135 * and it's not clear who will lose. 1136 * 1137 * For uncompensated quartz crystal oscillatores and nominal update 1138 * intervals less than 1024 s, operation should be in phase-lock mode 1139 * (STA_FLL = 0), where the loop is disciplined to phase. For update 1140 * intervals greater than this, operation should be in frequency-lock 1141 * mode (STA_FLL = 1), where the loop is disciplined to frequency. 1142 * 1143 * Note: mutex(&tod_lock) is in effect. 1144 */ 1145 void 1146 clock_update(int offset) 1147 { 1148 int ltemp, mtemp, s; 1149 1150 ASSERT(MUTEX_HELD(&tod_lock)); 1151 1152 if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME)) 1153 return; 1154 ltemp = offset; 1155 if ((time_status & STA_PPSTIME) && (time_status & STA_PPSSIGNAL)) 1156 ltemp = pps_offset; 1157 1158 /* 1159 * Scale the phase adjustment and clamp to the operating range. 1160 */ 1161 if (ltemp > MAXPHASE) 1162 time_offset = MAXPHASE * SCALE_UPDATE; 1163 else if (ltemp < -MAXPHASE) 1164 time_offset = -(MAXPHASE * SCALE_UPDATE); 1165 else 1166 time_offset = ltemp * SCALE_UPDATE; 1167 1168 /* 1169 * Select whether the frequency is to be controlled and in which 1170 * mode (PLL or FLL). Clamp to the operating range. Ugly 1171 * multiply/divide should be replaced someday. 1172 */ 1173 if (time_status & STA_FREQHOLD || time_reftime == 0) 1174 time_reftime = hrestime.tv_sec; 1175 1176 mtemp = hrestime.tv_sec - time_reftime; 1177 time_reftime = hrestime.tv_sec; 1178 1179 if (time_status & STA_FLL) { 1180 if (mtemp >= MINSEC) { 1181 ltemp = ((time_offset / mtemp) * (SCALE_USEC / 1182 SCALE_UPDATE)); 1183 if (ltemp) 1184 time_freq += ltemp / SCALE_KH; 1185 } 1186 } else { 1187 if (mtemp < MAXSEC) { 1188 ltemp *= mtemp; 1189 if (ltemp) 1190 time_freq += (int)(((int64_t)ltemp * 1191 SCALE_USEC) / SCALE_KF) 1192 / (1 << (time_constant * 2)); 1193 } 1194 } 1195 if (time_freq > time_tolerance) 1196 time_freq = time_tolerance; 1197 else if (time_freq < -time_tolerance) 1198 time_freq = -time_tolerance; 1199 1200 s = hr_clock_lock(); 1201 tod_needsync = 1; 1202 hr_clock_unlock(s); 1203 } 1204 1205 /* 1206 * ddi_hardpps() - discipline CPU clock oscillator to external PPS signal 1207 * 1208 * This routine is called at each PPS interrupt in order to discipline 1209 * the CPU clock oscillator to the PPS signal. It measures the PPS phase 1210 * and leaves it in a handy spot for the clock() routine. It 1211 * integrates successive PPS phase differences and calculates the 1212 * frequency offset. This is used in clock() to discipline the CPU 1213 * clock oscillator so that intrinsic frequency error is cancelled out. 1214 * The code requires the caller to capture the time and hardware counter 1215 * value at the on-time PPS signal transition. 1216 * 1217 * Note that, on some Unix systems, this routine runs at an interrupt 1218 * priority level higher than the timer interrupt routine clock(). 1219 * Therefore, the variables used are distinct from the clock() 1220 * variables, except for certain exceptions: The PPS frequency pps_freq 1221 * and phase pps_offset variables are determined by this routine and 1222 * updated atomically. The time_tolerance variable can be considered a 1223 * constant, since it is infrequently changed, and then only when the 1224 * PPS signal is disabled. The watchdog counter pps_valid is updated 1225 * once per second by clock() and is atomically cleared in this 1226 * routine. 1227 * 1228 * tvp is the time of the last tick; usec is a microsecond count since the 1229 * last tick. 1230 * 1231 * Note: In Solaris systems, the tick value is actually given by 1232 * usec_per_tick. This is called from the serial driver cdintr(), 1233 * or equivalent, at a high PIL. Because the kernel keeps a 1234 * highresolution time, the following code can accept either 1235 * the traditional argument pair, or the current highres timestamp 1236 * in tvp and zero in usec. 1237 */ 1238 void 1239 ddi_hardpps(struct timeval *tvp, int usec) 1240 { 1241 int u_usec, v_usec, bigtick; 1242 time_t cal_sec; 1243 int cal_usec; 1244 1245 /* 1246 * An occasional glitch can be produced when the PPS interrupt 1247 * occurs in the clock() routine before the time variable is 1248 * updated. Here the offset is discarded when the difference 1249 * between it and the last one is greater than tick/2, but not 1250 * if the interval since the first discard exceeds 30 s. 1251 */ 1252 time_status |= STA_PPSSIGNAL; 1253 time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR); 1254 pps_valid = 0; 1255 u_usec = -tvp->tv_usec; 1256 if (u_usec < -(MICROSEC/2)) 1257 u_usec += MICROSEC; 1258 v_usec = pps_offset - u_usec; 1259 if (v_usec < 0) 1260 v_usec = -v_usec; 1261 if (v_usec > (usec_per_tick >> 1)) { 1262 if (pps_glitch > MAXGLITCH) { 1263 pps_glitch = 0; 1264 pps_tf[2] = u_usec; 1265 pps_tf[1] = u_usec; 1266 } else { 1267 pps_glitch++; 1268 u_usec = pps_offset; 1269 } 1270 } else 1271 pps_glitch = 0; 1272 1273 /* 1274 * A three-stage median filter is used to help deglitch the pps 1275 * time. The median sample becomes the time offset estimate; the 1276 * difference between the other two samples becomes the time 1277 * dispersion (jitter) estimate. 1278 */ 1279 pps_tf[2] = pps_tf[1]; 1280 pps_tf[1] = pps_tf[0]; 1281 pps_tf[0] = u_usec; 1282 if (pps_tf[0] > pps_tf[1]) { 1283 if (pps_tf[1] > pps_tf[2]) { 1284 pps_offset = pps_tf[1]; /* 0 1 2 */ 1285 v_usec = pps_tf[0] - pps_tf[2]; 1286 } else if (pps_tf[2] > pps_tf[0]) { 1287 pps_offset = pps_tf[0]; /* 2 0 1 */ 1288 v_usec = pps_tf[2] - pps_tf[1]; 1289 } else { 1290 pps_offset = pps_tf[2]; /* 0 2 1 */ 1291 v_usec = pps_tf[0] - pps_tf[1]; 1292 } 1293 } else { 1294 if (pps_tf[1] < pps_tf[2]) { 1295 pps_offset = pps_tf[1]; /* 2 1 0 */ 1296 v_usec = pps_tf[2] - pps_tf[0]; 1297 } else if (pps_tf[2] < pps_tf[0]) { 1298 pps_offset = pps_tf[0]; /* 1 0 2 */ 1299 v_usec = pps_tf[1] - pps_tf[2]; 1300 } else { 1301 pps_offset = pps_tf[2]; /* 1 2 0 */ 1302 v_usec = pps_tf[1] - pps_tf[0]; 1303 } 1304 } 1305 if (v_usec > MAXTIME) 1306 pps_jitcnt++; 1307 v_usec = (v_usec << PPS_AVG) - pps_jitter; 1308 pps_jitter += v_usec / (1 << PPS_AVG); 1309 if (pps_jitter > (MAXTIME >> 1)) 1310 time_status |= STA_PPSJITTER; 1311 1312 /* 1313 * During the calibration interval adjust the starting time when 1314 * the tick overflows. At the end of the interval compute the 1315 * duration of the interval and the difference of the hardware 1316 * counters at the beginning and end of the interval. This code 1317 * is deliciously complicated by the fact valid differences may 1318 * exceed the value of tick when using long calibration 1319 * intervals and small ticks. Note that the counter can be 1320 * greater than tick if caught at just the wrong instant, but 1321 * the values returned and used here are correct. 1322 */ 1323 bigtick = (int)usec_per_tick * SCALE_USEC; 1324 pps_usec -= pps_freq; 1325 if (pps_usec >= bigtick) 1326 pps_usec -= bigtick; 1327 if (pps_usec < 0) 1328 pps_usec += bigtick; 1329 pps_time.tv_sec++; 1330 pps_count++; 1331 if (pps_count < (1 << pps_shift)) 1332 return; 1333 pps_count = 0; 1334 pps_calcnt++; 1335 u_usec = usec * SCALE_USEC; 1336 v_usec = pps_usec - u_usec; 1337 if (v_usec >= bigtick >> 1) 1338 v_usec -= bigtick; 1339 if (v_usec < -(bigtick >> 1)) 1340 v_usec += bigtick; 1341 if (v_usec < 0) 1342 v_usec = -(-v_usec >> pps_shift); 1343 else 1344 v_usec = v_usec >> pps_shift; 1345 pps_usec = u_usec; 1346 cal_sec = tvp->tv_sec; 1347 cal_usec = tvp->tv_usec; 1348 cal_sec -= pps_time.tv_sec; 1349 cal_usec -= pps_time.tv_usec; 1350 if (cal_usec < 0) { 1351 cal_usec += MICROSEC; 1352 cal_sec--; 1353 } 1354 pps_time = *tvp; 1355 1356 /* 1357 * Check for lost interrupts, noise, excessive jitter and 1358 * excessive frequency error. The number of timer ticks during 1359 * the interval may vary +-1 tick. Add to this a margin of one 1360 * tick for the PPS signal jitter and maximum frequency 1361 * deviation. If the limits are exceeded, the calibration 1362 * interval is reset to the minimum and we start over. 1363 */ 1364 u_usec = (int)usec_per_tick << 1; 1365 if (!((cal_sec == -1 && cal_usec > (MICROSEC - u_usec)) || 1366 (cal_sec == 0 && cal_usec < u_usec)) || 1367 v_usec > time_tolerance || v_usec < -time_tolerance) { 1368 pps_errcnt++; 1369 pps_shift = PPS_SHIFT; 1370 pps_intcnt = 0; 1371 time_status |= STA_PPSERROR; 1372 return; 1373 } 1374 1375 /* 1376 * A three-stage median filter is used to help deglitch the pps 1377 * frequency. The median sample becomes the frequency offset 1378 * estimate; the difference between the other two samples 1379 * becomes the frequency dispersion (stability) estimate. 1380 */ 1381 pps_ff[2] = pps_ff[1]; 1382 pps_ff[1] = pps_ff[0]; 1383 pps_ff[0] = v_usec; 1384 if (pps_ff[0] > pps_ff[1]) { 1385 if (pps_ff[1] > pps_ff[2]) { 1386 u_usec = pps_ff[1]; /* 0 1 2 */ 1387 v_usec = pps_ff[0] - pps_ff[2]; 1388 } else if (pps_ff[2] > pps_ff[0]) { 1389 u_usec = pps_ff[0]; /* 2 0 1 */ 1390 v_usec = pps_ff[2] - pps_ff[1]; 1391 } else { 1392 u_usec = pps_ff[2]; /* 0 2 1 */ 1393 v_usec = pps_ff[0] - pps_ff[1]; 1394 } 1395 } else { 1396 if (pps_ff[1] < pps_ff[2]) { 1397 u_usec = pps_ff[1]; /* 2 1 0 */ 1398 v_usec = pps_ff[2] - pps_ff[0]; 1399 } else if (pps_ff[2] < pps_ff[0]) { 1400 u_usec = pps_ff[0]; /* 1 0 2 */ 1401 v_usec = pps_ff[1] - pps_ff[2]; 1402 } else { 1403 u_usec = pps_ff[2]; /* 1 2 0 */ 1404 v_usec = pps_ff[1] - pps_ff[0]; 1405 } 1406 } 1407 1408 /* 1409 * Here the frequency dispersion (stability) is updated. If it 1410 * is less than one-fourth the maximum (MAXFREQ), the frequency 1411 * offset is updated as well, but clamped to the tolerance. It 1412 * will be processed later by the clock() routine. 1413 */ 1414 v_usec = (v_usec >> 1) - pps_stabil; 1415 if (v_usec < 0) 1416 pps_stabil -= -v_usec >> PPS_AVG; 1417 else 1418 pps_stabil += v_usec >> PPS_AVG; 1419 if (pps_stabil > MAXFREQ >> 2) { 1420 pps_stbcnt++; 1421 time_status |= STA_PPSWANDER; 1422 return; 1423 } 1424 if (time_status & STA_PPSFREQ) { 1425 if (u_usec < 0) { 1426 pps_freq -= -u_usec >> PPS_AVG; 1427 if (pps_freq < -time_tolerance) 1428 pps_freq = -time_tolerance; 1429 u_usec = -u_usec; 1430 } else { 1431 pps_freq += u_usec >> PPS_AVG; 1432 if (pps_freq > time_tolerance) 1433 pps_freq = time_tolerance; 1434 } 1435 } 1436 1437 /* 1438 * Here the calibration interval is adjusted. If the maximum 1439 * time difference is greater than tick / 4, reduce the interval 1440 * by half. If this is not the case for four consecutive 1441 * intervals, double the interval. 1442 */ 1443 if (u_usec << pps_shift > bigtick >> 2) { 1444 pps_intcnt = 0; 1445 if (pps_shift > PPS_SHIFT) 1446 pps_shift--; 1447 } else if (pps_intcnt >= 4) { 1448 pps_intcnt = 0; 1449 if (pps_shift < PPS_SHIFTMAX) 1450 pps_shift++; 1451 } else 1452 pps_intcnt++; 1453 1454 /* 1455 * If recovering from kmdb, then make sure the tod chip gets resynced. 1456 * If we took an early exit above, then we don't yet have a stable 1457 * calibration signal to lock onto, so don't mark the tod for sync 1458 * until we get all the way here. 1459 */ 1460 { 1461 int s = hr_clock_lock(); 1462 1463 tod_needsync = 1; 1464 hr_clock_unlock(s); 1465 } 1466 } 1467 1468 /* 1469 * Handle clock tick processing for a thread. 1470 * Check for timer action, enforce CPU rlimit, do profiling etc. 1471 */ 1472 void 1473 clock_tick(kthread_t *t) 1474 { 1475 struct proc *pp; 1476 klwp_id_t lwp; 1477 struct as *as; 1478 clock_t utime; 1479 clock_t stime; 1480 int poke = 0; /* notify another CPU */ 1481 int user_mode; 1482 size_t rss; 1483 1484 /* Must be operating on a lwp/thread */ 1485 if ((lwp = ttolwp(t)) == NULL) { 1486 panic("clock_tick: no lwp"); 1487 /*NOTREACHED*/ 1488 } 1489 1490 CL_TICK(t); /* Class specific tick processing */ 1491 DTRACE_SCHED1(tick, kthread_t *, t); 1492 1493 pp = ttoproc(t); 1494 1495 /* pp->p_lock makes sure that the thread does not exit */ 1496 ASSERT(MUTEX_HELD(&pp->p_lock)); 1497 1498 user_mode = (lwp->lwp_state == LWP_USER); 1499 1500 /* 1501 * Update process times. Should use high res clock and state 1502 * changes instead of statistical sampling method. XXX 1503 */ 1504 if (user_mode) { 1505 pp->p_utime++; 1506 pp->p_task->tk_cpu_time++; 1507 } else { 1508 pp->p_stime++; 1509 pp->p_task->tk_cpu_time++; 1510 } 1511 as = pp->p_as; 1512 1513 /* 1514 * Update user profiling statistics. Get the pc from the 1515 * lwp when the AST happens. 1516 */ 1517 if (pp->p_prof.pr_scale) { 1518 atomic_add_32(&lwp->lwp_oweupc, 1); 1519 if (user_mode) { 1520 poke = 1; 1521 aston(t); 1522 } 1523 } 1524 1525 utime = pp->p_utime; 1526 stime = pp->p_stime; 1527 1528 /* 1529 * If CPU was in user state, process lwp-virtual time 1530 * interval timer. 1531 */ 1532 if (user_mode && 1533 timerisset(&lwp->lwp_timer[ITIMER_VIRTUAL].it_value) && 1534 itimerdecr(&lwp->lwp_timer[ITIMER_VIRTUAL], usec_per_tick) == 0) { 1535 poke = 1; 1536 sigtoproc(pp, t, SIGVTALRM); 1537 } 1538 1539 if (timerisset(&lwp->lwp_timer[ITIMER_PROF].it_value) && 1540 itimerdecr(&lwp->lwp_timer[ITIMER_PROF], usec_per_tick) == 0) { 1541 poke = 1; 1542 sigtoproc(pp, t, SIGPROF); 1543 } 1544 1545 /* 1546 * Enforce CPU resource controls: 1547 * (a) process.max-cpu-time resource control 1548 */ 1549 (void) rctl_test(rctlproc_legacy[RLIMIT_CPU], pp->p_rctls, pp, 1550 (utime + stime)/hz, RCA_UNSAFE_SIGINFO); 1551 1552 /* 1553 * (b) task.max-cpu-time resource control 1554 */ 1555 (void) rctl_test(rc_task_cpu_time, pp->p_task->tk_rctls, pp, 1, 1556 RCA_UNSAFE_SIGINFO); 1557 1558 /* 1559 * Update memory usage for the currently running process. 1560 */ 1561 rss = rm_asrss(as); 1562 PTOU(pp)->u_mem += rss; 1563 if (rss > PTOU(pp)->u_mem_max) 1564 PTOU(pp)->u_mem_max = rss; 1565 1566 /* 1567 * Notify the CPU the thread is running on. 1568 */ 1569 if (poke && t->t_cpu != CPU) 1570 poke_cpu(t->t_cpu->cpu_id); 1571 } 1572 1573 void 1574 profil_tick(uintptr_t upc) 1575 { 1576 int ticks; 1577 proc_t *p = ttoproc(curthread); 1578 klwp_t *lwp = ttolwp(curthread); 1579 struct prof *pr = &p->p_prof; 1580 1581 do { 1582 ticks = lwp->lwp_oweupc; 1583 } while (cas32(&lwp->lwp_oweupc, ticks, 0) != ticks); 1584 1585 mutex_enter(&p->p_pflock); 1586 if (pr->pr_scale >= 2 && upc >= pr->pr_off) { 1587 /* 1588 * Old-style profiling 1589 */ 1590 uint16_t *slot = pr->pr_base; 1591 uint16_t old, new; 1592 if (pr->pr_scale != 2) { 1593 uintptr_t delta = upc - pr->pr_off; 1594 uintptr_t byteoff = ((delta >> 16) * pr->pr_scale) + 1595 (((delta & 0xffff) * pr->pr_scale) >> 16); 1596 if (byteoff >= (uintptr_t)pr->pr_size) { 1597 mutex_exit(&p->p_pflock); 1598 return; 1599 } 1600 slot += byteoff / sizeof (uint16_t); 1601 } 1602 if (fuword16(slot, &old) < 0 || 1603 (new = old + ticks) > SHRT_MAX || 1604 suword16(slot, new) < 0) { 1605 pr->pr_scale = 0; 1606 } 1607 } else if (pr->pr_scale == 1) { 1608 /* 1609 * PC Sampling 1610 */ 1611 model_t model = lwp_getdatamodel(lwp); 1612 int result; 1613 #ifdef __lint 1614 model = model; 1615 #endif 1616 while (ticks-- > 0) { 1617 if (pr->pr_samples == pr->pr_size) { 1618 /* buffer full, turn off sampling */ 1619 pr->pr_scale = 0; 1620 break; 1621 } 1622 switch (SIZEOF_PTR(model)) { 1623 case sizeof (uint32_t): 1624 result = suword32(pr->pr_base, (uint32_t)upc); 1625 break; 1626 #ifdef _LP64 1627 case sizeof (uint64_t): 1628 result = suword64(pr->pr_base, (uint64_t)upc); 1629 break; 1630 #endif 1631 default: 1632 cmn_err(CE_WARN, "profil_tick: unexpected " 1633 "data model"); 1634 result = -1; 1635 break; 1636 } 1637 if (result != 0) { 1638 pr->pr_scale = 0; 1639 break; 1640 } 1641 pr->pr_base = (caddr_t)pr->pr_base + SIZEOF_PTR(model); 1642 pr->pr_samples++; 1643 } 1644 } 1645 mutex_exit(&p->p_pflock); 1646 } 1647 1648 static void 1649 delay_wakeup(void *arg) 1650 { 1651 kthread_t *t = arg; 1652 1653 mutex_enter(&t->t_delay_lock); 1654 cv_signal(&t->t_delay_cv); 1655 mutex_exit(&t->t_delay_lock); 1656 } 1657 1658 void 1659 delay(clock_t ticks) 1660 { 1661 kthread_t *t = curthread; 1662 clock_t deadline = lbolt + ticks; 1663 clock_t timeleft; 1664 timeout_id_t id; 1665 1666 if (panicstr && ticks > 0) { 1667 /* 1668 * Timeouts aren't running, so all we can do is spin. 1669 */ 1670 drv_usecwait(TICK_TO_USEC(ticks)); 1671 return; 1672 } 1673 1674 while ((timeleft = deadline - lbolt) > 0) { 1675 mutex_enter(&t->t_delay_lock); 1676 id = timeout(delay_wakeup, t, timeleft); 1677 cv_wait(&t->t_delay_cv, &t->t_delay_lock); 1678 mutex_exit(&t->t_delay_lock); 1679 (void) untimeout(id); 1680 } 1681 } 1682 1683 /* 1684 * Like delay, but interruptible by a signal. 1685 */ 1686 int 1687 delay_sig(clock_t ticks) 1688 { 1689 clock_t deadline = lbolt + ticks; 1690 clock_t rc; 1691 1692 mutex_enter(&curthread->t_delay_lock); 1693 do { 1694 rc = cv_timedwait_sig(&curthread->t_delay_cv, 1695 &curthread->t_delay_lock, deadline); 1696 } while (rc > 0); 1697 mutex_exit(&curthread->t_delay_lock); 1698 if (rc == 0) 1699 return (EINTR); 1700 return (0); 1701 } 1702 1703 #define SECONDS_PER_DAY 86400 1704 1705 /* 1706 * Initialize the system time based on the TOD chip. approx is used as 1707 * an approximation of time (e.g. from the filesystem) in the event that 1708 * the TOD chip has been cleared or is unresponsive. An approx of -1 1709 * means the filesystem doesn't keep time. 1710 */ 1711 void 1712 clkset(time_t approx) 1713 { 1714 timestruc_t ts; 1715 int spl; 1716 int set_clock = 0; 1717 1718 mutex_enter(&tod_lock); 1719 ts = tod_get(); 1720 1721 if (ts.tv_sec > 365 * SECONDS_PER_DAY) { 1722 /* 1723 * If the TOD chip is reporting some time after 1971, 1724 * then it probably didn't lose power or become otherwise 1725 * cleared in the recent past; check to assure that 1726 * the time coming from the filesystem isn't in the future 1727 * according to the TOD chip. 1728 */ 1729 if (approx != -1 && approx > ts.tv_sec) { 1730 cmn_err(CE_WARN, "Last shutdown is later " 1731 "than time on time-of-day chip; check date."); 1732 } 1733 } else { 1734 /* 1735 * If the TOD chip isn't giving correct time, then set it to 1736 * the time that was passed in as a rough estimate. If we 1737 * don't have an estimate, then set the clock back to a time 1738 * when Oliver North, ALF and Dire Straits were all on the 1739 * collective brain: 1987. 1740 */ 1741 timestruc_t tmp; 1742 if (approx == -1) 1743 ts.tv_sec = (1987 - 1970) * 365 * SECONDS_PER_DAY; 1744 else 1745 ts.tv_sec = approx; 1746 ts.tv_nsec = 0; 1747 1748 /* 1749 * Attempt to write the new time to the TOD chip. Set spl high 1750 * to avoid getting preempted between the tod_set and tod_get. 1751 */ 1752 spl = splhi(); 1753 tod_set(ts); 1754 tmp = tod_get(); 1755 splx(spl); 1756 1757 if (tmp.tv_sec != ts.tv_sec && tmp.tv_sec != ts.tv_sec + 1) { 1758 tod_broken = 1; 1759 dosynctodr = 0; 1760 cmn_err(CE_WARN, "Time-of-day chip unresponsive;" 1761 " dead batteries?"); 1762 } else { 1763 cmn_err(CE_WARN, "Time-of-day chip had " 1764 "incorrect date; check and reset."); 1765 } 1766 set_clock = 1; 1767 } 1768 1769 if (!boot_time) { 1770 boot_time = ts.tv_sec; 1771 set_clock = 1; 1772 } 1773 1774 if (set_clock) 1775 set_hrestime(&ts); 1776 1777 mutex_exit(&tod_lock); 1778 } 1779 1780 int hrestime_isvalid = 0; 1781 1782 void 1783 set_hrestime(timestruc_t *ts) 1784 { 1785 int spl = hr_clock_lock(); 1786 hrestime = *ts; 1787 /* 1788 * hrestime must be visible before hrestime_isvalid 1789 * is set to 1 1790 */ 1791 membar_enter(); 1792 timedelta = 0; 1793 hrestime_isvalid = 1; 1794 hr_clock_unlock(spl); 1795 } 1796 1797 static uint_t deadman_seconds; 1798 static uint32_t deadman_panics; 1799 static int deadman_enabled = 0; 1800 static int deadman_panic_timers = 1; 1801 1802 static void 1803 deadman(void) 1804 { 1805 if (panicstr) { 1806 /* 1807 * During panic, other CPUs besides the panic 1808 * master continue to handle cyclics and some other 1809 * interrupts. The code below is intended to be 1810 * single threaded, so any CPU other than the master 1811 * must keep out. 1812 */ 1813 if (CPU->cpu_id != panic_cpu.cpu_id) 1814 return; 1815 1816 /* 1817 * If we're panicking, the deadman cyclic continues to increase 1818 * lbolt in case the dump device driver relies on this for 1819 * timeouts. Note that we rely on deadman() being invoked once 1820 * per second, and credit lbolt and lbolt64 with hz ticks each. 1821 */ 1822 lbolt += hz; 1823 lbolt64 += hz; 1824 1825 #ifdef __sparc 1826 watchdog_pat(); 1827 #endif 1828 1829 if (!deadman_panic_timers) 1830 return; /* allow all timers to be manually disabled */ 1831 1832 /* 1833 * If we are generating a crash dump or syncing filesystems and 1834 * the corresponding timer is set, decrement it and re-enter 1835 * the panic code to abort it and advance to the next state. 1836 * The panic states and triggers are explained in panic.c. 1837 */ 1838 if (panic_dump) { 1839 if (dump_timeleft && (--dump_timeleft == 0)) { 1840 panic("panic dump timeout"); 1841 /*NOTREACHED*/ 1842 } 1843 } else if (panic_sync) { 1844 if (sync_timeleft && (--sync_timeleft == 0)) { 1845 panic("panic sync timeout"); 1846 /*NOTREACHED*/ 1847 } 1848 } 1849 1850 return; 1851 } 1852 1853 if (lbolt != CPU->cpu_deadman_lbolt) { 1854 CPU->cpu_deadman_lbolt = lbolt; 1855 CPU->cpu_deadman_countdown = deadman_seconds; 1856 return; 1857 } 1858 1859 if (CPU->cpu_deadman_countdown-- > 0) 1860 return; 1861 1862 /* 1863 * Regardless of whether or not we actually bring the system down, 1864 * bump the deadman_panics variable. 1865 * 1866 * N.B. deadman_panics is incremented once for each CPU that 1867 * passes through here. It's expected that all the CPUs will 1868 * detect this condition within one second of each other, so 1869 * when deadman_enabled is off, deadman_panics will 1870 * typically be a multiple of the total number of CPUs in 1871 * the system. 1872 */ 1873 atomic_add_32(&deadman_panics, 1); 1874 1875 if (!deadman_enabled) { 1876 CPU->cpu_deadman_countdown = deadman_seconds; 1877 return; 1878 } 1879 1880 /* 1881 * If we're here, we want to bring the system down. 1882 */ 1883 panic("deadman: timed out after %d seconds of clock " 1884 "inactivity", deadman_seconds); 1885 /*NOTREACHED*/ 1886 } 1887 1888 /*ARGSUSED*/ 1889 static void 1890 deadman_online(void *arg, cpu_t *cpu, cyc_handler_t *hdlr, cyc_time_t *when) 1891 { 1892 cpu->cpu_deadman_lbolt = 0; 1893 cpu->cpu_deadman_countdown = deadman_seconds; 1894 1895 hdlr->cyh_func = (cyc_func_t)deadman; 1896 hdlr->cyh_level = CY_HIGH_LEVEL; 1897 hdlr->cyh_arg = NULL; 1898 1899 /* 1900 * Stagger the CPUs so that they don't all run deadman() at 1901 * the same time. Simplest reason to do this is to make it 1902 * more likely that only one CPU will panic in case of a 1903 * timeout. This is (strictly speaking) an aesthetic, not a 1904 * technical consideration. 1905 * 1906 * The interval must be one second in accordance with the 1907 * code in deadman() above to increase lbolt during panic. 1908 */ 1909 when->cyt_when = cpu->cpu_id * (NANOSEC / NCPU); 1910 when->cyt_interval = NANOSEC; 1911 } 1912 1913 1914 void 1915 deadman_init(void) 1916 { 1917 cyc_omni_handler_t hdlr; 1918 1919 if (deadman_seconds == 0) 1920 deadman_seconds = snoop_interval / MICROSEC; 1921 1922 if (snooping) 1923 deadman_enabled = 1; 1924 1925 hdlr.cyo_online = deadman_online; 1926 hdlr.cyo_offline = NULL; 1927 hdlr.cyo_arg = NULL; 1928 1929 mutex_enter(&cpu_lock); 1930 deadman_cyclic = cyclic_add_omni(&hdlr); 1931 mutex_exit(&cpu_lock); 1932 } 1933 1934 /* 1935 * tod_fault() is for updating tod validate mechanism state: 1936 * (1) TOD_NOFAULT: for resetting the state to 'normal'. 1937 * currently used for debugging only 1938 * (2) The following four cases detected by tod validate mechanism: 1939 * TOD_REVERSED: current tod value is less than previous value. 1940 * TOD_STALLED: current tod value hasn't advanced. 1941 * TOD_JUMPED: current tod value advanced too far from previous value. 1942 * TOD_RATECHANGED: the ratio between average tod delta and 1943 * average tick delta has changed. 1944 */ 1945 enum tod_fault_type 1946 tod_fault(enum tod_fault_type ftype, int off) 1947 { 1948 ASSERT(MUTEX_HELD(&tod_lock)); 1949 1950 if (tod_faulted != ftype) { 1951 switch (ftype) { 1952 case TOD_NOFAULT: 1953 plat_tod_fault(TOD_NOFAULT); 1954 cmn_err(CE_NOTE, "Restarted tracking " 1955 "Time of Day clock."); 1956 tod_faulted = ftype; 1957 break; 1958 case TOD_REVERSED: 1959 case TOD_JUMPED: 1960 if (tod_faulted == TOD_NOFAULT) { 1961 plat_tod_fault(ftype); 1962 cmn_err(CE_WARN, "Time of Day clock error: " 1963 "reason [%s by 0x%x]. -- " 1964 " Stopped tracking Time Of Day clock.", 1965 tod_fault_table[ftype], off); 1966 tod_faulted = ftype; 1967 } 1968 break; 1969 case TOD_STALLED: 1970 case TOD_RATECHANGED: 1971 if (tod_faulted == TOD_NOFAULT) { 1972 plat_tod_fault(ftype); 1973 cmn_err(CE_WARN, "Time of Day clock error: " 1974 "reason [%s]. -- " 1975 " Stopped tracking Time Of Day clock.", 1976 tod_fault_table[ftype]); 1977 tod_faulted = ftype; 1978 } 1979 break; 1980 default: 1981 break; 1982 } 1983 } 1984 return (tod_faulted); 1985 } 1986 1987 void 1988 tod_fault_reset() 1989 { 1990 tod_fault_reset_flag = 1; 1991 } 1992 1993 1994 /* 1995 * tod_validate() is used for checking values returned by tod_get(). 1996 * Four error cases can be detected by this routine: 1997 * TOD_REVERSED: current tod value is less than previous. 1998 * TOD_STALLED: current tod value hasn't advanced. 1999 * TOD_JUMPED: current tod value advanced too far from previous value. 2000 * TOD_RATECHANGED: the ratio between average tod delta and 2001 * average tick delta has changed. 2002 */ 2003 time_t 2004 tod_validate(time_t tod) 2005 { 2006 time_t diff_tod; 2007 hrtime_t diff_tick; 2008 2009 long dtick; 2010 int dtick_delta; 2011 2012 int off = 0; 2013 enum tod_fault_type tod_bad = TOD_NOFAULT; 2014 2015 static int firsttime = 1; 2016 2017 static time_t prev_tod = 0; 2018 static hrtime_t prev_tick = 0; 2019 static long dtick_avg = TOD_REF_FREQ; 2020 2021 hrtime_t tick = gethrtime(); 2022 2023 ASSERT(MUTEX_HELD(&tod_lock)); 2024 2025 /* 2026 * tod_validate_enable is patchable via /etc/system. 2027 * If TOD is already faulted, or if TOD validation is deferred, 2028 * there is nothing to do. 2029 */ 2030 if ((tod_validate_enable == 0) || (tod_faulted != TOD_NOFAULT) || 2031 tod_validate_deferred) { 2032 return (tod); 2033 } 2034 2035 /* 2036 * Update prev_tod and prev_tick values for first run 2037 */ 2038 if (firsttime) { 2039 firsttime = 0; 2040 prev_tod = tod; 2041 prev_tick = tick; 2042 return (tod); 2043 } 2044 2045 /* 2046 * For either of these conditions, we need to reset ourself 2047 * and start validation from zero since each condition 2048 * indicates that the TOD will be updated with new value 2049 * Also, note that tod_needsync will be reset in clock() 2050 */ 2051 if (tod_needsync || tod_fault_reset_flag) { 2052 firsttime = 1; 2053 prev_tod = 0; 2054 prev_tick = 0; 2055 dtick_avg = TOD_REF_FREQ; 2056 2057 if (tod_fault_reset_flag) 2058 tod_fault_reset_flag = 0; 2059 2060 return (tod); 2061 } 2062 2063 /* test hook */ 2064 switch (tod_unit_test) { 2065 case 1: /* for testing jumping tod */ 2066 tod += tod_test_injector; 2067 tod_unit_test = 0; 2068 break; 2069 case 2: /* for testing stuck tod bit */ 2070 tod |= 1 << tod_test_injector; 2071 tod_unit_test = 0; 2072 break; 2073 case 3: /* for testing stalled tod */ 2074 tod = prev_tod; 2075 tod_unit_test = 0; 2076 break; 2077 case 4: /* reset tod fault status */ 2078 (void) tod_fault(TOD_NOFAULT, 0); 2079 tod_unit_test = 0; 2080 break; 2081 default: 2082 break; 2083 } 2084 2085 diff_tod = tod - prev_tod; 2086 diff_tick = tick - prev_tick; 2087 2088 ASSERT(diff_tick >= 0); 2089 2090 if (diff_tod < 0) { 2091 /* ERROR - tod reversed */ 2092 tod_bad = TOD_REVERSED; 2093 off = (int)(prev_tod - tod); 2094 } else if (diff_tod == 0) { 2095 /* tod did not advance */ 2096 if (diff_tick > TOD_STALL_THRESHOLD) { 2097 /* ERROR - tod stalled */ 2098 tod_bad = TOD_STALLED; 2099 } else { 2100 /* 2101 * Make sure we don't update prev_tick 2102 * so that diff_tick is calculated since 2103 * the first diff_tod == 0 2104 */ 2105 return (tod); 2106 } 2107 } else { 2108 /* calculate dtick */ 2109 dtick = diff_tick / diff_tod; 2110 2111 /* update dtick averages */ 2112 dtick_avg += ((dtick - dtick_avg) / TOD_FILTER_N); 2113 2114 /* 2115 * Calculate dtick_delta as 2116 * variation from reference freq in quartiles 2117 */ 2118 dtick_delta = (dtick_avg - TOD_REF_FREQ) / 2119 (TOD_REF_FREQ >> 2); 2120 2121 /* 2122 * Even with a perfectly functioning TOD device, 2123 * when the number of elapsed seconds is low the 2124 * algorithm can calculate a rate that is beyond 2125 * tolerance, causing an error. The algorithm is 2126 * inaccurate when elapsed time is low (less than 2127 * 5 seconds). 2128 */ 2129 if (diff_tod > 4) { 2130 if (dtick < TOD_JUMP_THRESHOLD) { 2131 /* ERROR - tod jumped */ 2132 tod_bad = TOD_JUMPED; 2133 off = (int)diff_tod; 2134 } else if (dtick_delta) { 2135 /* ERROR - change in clock rate */ 2136 tod_bad = TOD_RATECHANGED; 2137 } 2138 } 2139 } 2140 2141 if (tod_bad != TOD_NOFAULT) { 2142 (void) tod_fault(tod_bad, off); 2143 2144 /* 2145 * Disable dosynctodr since we are going to fault 2146 * the TOD chip anyway here 2147 */ 2148 dosynctodr = 0; 2149 2150 /* 2151 * Set tod to the correct value from hrestime 2152 */ 2153 tod = hrestime.tv_sec; 2154 } 2155 2156 prev_tod = tod; 2157 prev_tick = tick; 2158 return (tod); 2159 } 2160 2161 static void 2162 calcloadavg(int nrun, uint64_t *hp_ave) 2163 { 2164 static int64_t f[3] = { 135, 27, 9 }; 2165 uint_t i; 2166 int64_t q, r; 2167 2168 /* 2169 * Compute load average over the last 1, 5, and 15 minutes 2170 * (60, 300, and 900 seconds). The constants in f[3] are for 2171 * exponential decay: 2172 * (1 - exp(-1/60)) << 13 = 135, 2173 * (1 - exp(-1/300)) << 13 = 27, 2174 * (1 - exp(-1/900)) << 13 = 9. 2175 */ 2176 2177 /* 2178 * a little hoop-jumping to avoid integer overflow 2179 */ 2180 for (i = 0; i < 3; i++) { 2181 q = (hp_ave[i] >> 16) << 7; 2182 r = (hp_ave[i] & 0xffff) << 7; 2183 hp_ave[i] += ((nrun - q) * f[i] - ((r * f[i]) >> 16)) >> 4; 2184 } 2185 } 2186