1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 22 /* All Rights Reserved */ 23 24 25 /* 26 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 27 * Use is subject to license terms. 28 */ 29 30 #pragma ident "%Z%%M% %I% %E% SMI" 31 32 #include <sys/param.h> 33 #include <sys/t_lock.h> 34 #include <sys/types.h> 35 #include <sys/tuneable.h> 36 #include <sys/sysmacros.h> 37 #include <sys/systm.h> 38 #include <sys/cpuvar.h> 39 #include <sys/lgrp.h> 40 #include <sys/user.h> 41 #include <sys/proc.h> 42 #include <sys/callo.h> 43 #include <sys/kmem.h> 44 #include <sys/var.h> 45 #include <sys/cmn_err.h> 46 #include <sys/swap.h> 47 #include <sys/vmsystm.h> 48 #include <sys/class.h> 49 #include <sys/time.h> 50 #include <sys/debug.h> 51 #include <sys/vtrace.h> 52 #include <sys/spl.h> 53 #include <sys/atomic.h> 54 #include <sys/dumphdr.h> 55 #include <sys/archsystm.h> 56 #include <sys/fs/swapnode.h> 57 #include <sys/panic.h> 58 #include <sys/disp.h> 59 #include <sys/msacct.h> 60 #include <sys/mem_cage.h> 61 62 #include <vm/page.h> 63 #include <vm/anon.h> 64 #include <vm/rm.h> 65 #include <sys/cyclic.h> 66 #include <sys/cpupart.h> 67 #include <sys/rctl.h> 68 #include <sys/task.h> 69 #include <sys/sdt.h> 70 71 /* 72 * for NTP support 73 */ 74 #include <sys/timex.h> 75 #include <sys/inttypes.h> 76 77 /* 78 * clock() is called straight from the clock cyclic; see clock_init(). 79 * 80 * Functions: 81 * reprime clock 82 * schedule callouts 83 * maintain date 84 * jab the scheduler 85 */ 86 87 extern kcondvar_t fsflush_cv; 88 extern sysinfo_t sysinfo; 89 extern vminfo_t vminfo; 90 extern int idleswtch; /* flag set while idle in pswtch() */ 91 92 /* 93 * high-precision avenrun values. These are needed to make the 94 * regular avenrun values accurate. 95 */ 96 static uint64_t hp_avenrun[3]; 97 int avenrun[3]; /* FSCALED average run queue lengths */ 98 time_t time; /* time in seconds since 1970 - for compatibility only */ 99 100 static struct loadavg_s loadavg; 101 /* 102 * Phase/frequency-lock loop (PLL/FLL) definitions 103 * 104 * The following variables are read and set by the ntp_adjtime() system 105 * call. 106 * 107 * time_state shows the state of the system clock, with values defined 108 * in the timex.h header file. 109 * 110 * time_status shows the status of the system clock, with bits defined 111 * in the timex.h header file. 112 * 113 * time_offset is used by the PLL/FLL to adjust the system time in small 114 * increments. 115 * 116 * time_constant determines the bandwidth or "stiffness" of the PLL. 117 * 118 * time_tolerance determines maximum frequency error or tolerance of the 119 * CPU clock oscillator and is a property of the architecture; however, 120 * in principle it could change as result of the presence of external 121 * discipline signals, for instance. 122 * 123 * time_precision is usually equal to the kernel tick variable; however, 124 * in cases where a precision clock counter or external clock is 125 * available, the resolution can be much less than this and depend on 126 * whether the external clock is working or not. 127 * 128 * time_maxerror is initialized by a ntp_adjtime() call and increased by 129 * the kernel once each second to reflect the maximum error bound 130 * growth. 131 * 132 * time_esterror is set and read by the ntp_adjtime() call, but 133 * otherwise not used by the kernel. 134 */ 135 int32_t time_state = TIME_OK; /* clock state */ 136 int32_t time_status = STA_UNSYNC; /* clock status bits */ 137 int32_t time_offset = 0; /* time offset (us) */ 138 int32_t time_constant = 0; /* pll time constant */ 139 int32_t time_tolerance = MAXFREQ; /* frequency tolerance (scaled ppm) */ 140 int32_t time_precision = 1; /* clock precision (us) */ 141 int32_t time_maxerror = MAXPHASE; /* maximum error (us) */ 142 int32_t time_esterror = MAXPHASE; /* estimated error (us) */ 143 144 /* 145 * The following variables establish the state of the PLL/FLL and the 146 * residual time and frequency offset of the local clock. The scale 147 * factors are defined in the timex.h header file. 148 * 149 * time_phase and time_freq are the phase increment and the frequency 150 * increment, respectively, of the kernel time variable. 151 * 152 * time_freq is set via ntp_adjtime() from a value stored in a file when 153 * the synchronization daemon is first started. Its value is retrieved 154 * via ntp_adjtime() and written to the file about once per hour by the 155 * daemon. 156 * 157 * time_adj is the adjustment added to the value of tick at each timer 158 * interrupt and is recomputed from time_phase and time_freq at each 159 * seconds rollover. 160 * 161 * time_reftime is the second's portion of the system time at the last 162 * call to ntp_adjtime(). It is used to adjust the time_freq variable 163 * and to increase the time_maxerror as the time since last update 164 * increases. 165 */ 166 int32_t time_phase = 0; /* phase offset (scaled us) */ 167 int32_t time_freq = 0; /* frequency offset (scaled ppm) */ 168 int32_t time_adj = 0; /* tick adjust (scaled 1 / hz) */ 169 int32_t time_reftime = 0; /* time at last adjustment (s) */ 170 171 /* 172 * The scale factors of the following variables are defined in the 173 * timex.h header file. 174 * 175 * pps_time contains the time at each calibration interval, as read by 176 * microtime(). pps_count counts the seconds of the calibration 177 * interval, the duration of which is nominally pps_shift in powers of 178 * two. 179 * 180 * pps_offset is the time offset produced by the time median filter 181 * pps_tf[], while pps_jitter is the dispersion (jitter) measured by 182 * this filter. 183 * 184 * pps_freq is the frequency offset produced by the frequency median 185 * filter pps_ff[], while pps_stabil is the dispersion (wander) measured 186 * by this filter. 187 * 188 * pps_usec is latched from a high resolution counter or external clock 189 * at pps_time. Here we want the hardware counter contents only, not the 190 * contents plus the time_tv.usec as usual. 191 * 192 * pps_valid counts the number of seconds since the last PPS update. It 193 * is used as a watchdog timer to disable the PPS discipline should the 194 * PPS signal be lost. 195 * 196 * pps_glitch counts the number of seconds since the beginning of an 197 * offset burst more than tick/2 from current nominal offset. It is used 198 * mainly to suppress error bursts due to priority conflicts between the 199 * PPS interrupt and timer interrupt. 200 * 201 * pps_intcnt counts the calibration intervals for use in the interval- 202 * adaptation algorithm. It's just too complicated for words. 203 */ 204 struct timeval pps_time; /* kernel time at last interval */ 205 int32_t pps_tf[] = {0, 0, 0}; /* pps time offset median filter (us) */ 206 int32_t pps_offset = 0; /* pps time offset (us) */ 207 int32_t pps_jitter = MAXTIME; /* time dispersion (jitter) (us) */ 208 int32_t pps_ff[] = {0, 0, 0}; /* pps frequency offset median filter */ 209 int32_t pps_freq = 0; /* frequency offset (scaled ppm) */ 210 int32_t pps_stabil = MAXFREQ; /* frequency dispersion (scaled ppm) */ 211 int32_t pps_usec = 0; /* microsec counter at last interval */ 212 int32_t pps_valid = PPS_VALID; /* pps signal watchdog counter */ 213 int32_t pps_glitch = 0; /* pps signal glitch counter */ 214 int32_t pps_count = 0; /* calibration interval counter (s) */ 215 int32_t pps_shift = PPS_SHIFT; /* interval duration (s) (shift) */ 216 int32_t pps_intcnt = 0; /* intervals at current duration */ 217 218 /* 219 * PPS signal quality monitors 220 * 221 * pps_jitcnt counts the seconds that have been discarded because the 222 * jitter measured by the time median filter exceeds the limit MAXTIME 223 * (100 us). 224 * 225 * pps_calcnt counts the frequency calibration intervals, which are 226 * variable from 4 s to 256 s. 227 * 228 * pps_errcnt counts the calibration intervals which have been discarded 229 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the 230 * calibration interval jitter exceeds two ticks. 231 * 232 * pps_stbcnt counts the calibration intervals that have been discarded 233 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us). 234 */ 235 int32_t pps_jitcnt = 0; /* jitter limit exceeded */ 236 int32_t pps_calcnt = 0; /* calibration intervals */ 237 int32_t pps_errcnt = 0; /* calibration errors */ 238 int32_t pps_stbcnt = 0; /* stability limit exceeded */ 239 240 /* The following variables require no explicit locking */ 241 volatile clock_t lbolt; /* time in Hz since last boot */ 242 volatile int64_t lbolt64; /* lbolt64 won't wrap for 2.9 billion yrs */ 243 244 kcondvar_t lbolt_cv; 245 int one_sec = 1; /* turned on once every second */ 246 static int fsflushcnt; /* counter for t_fsflushr */ 247 int dosynctodr = 1; /* patchable; enable/disable sync to TOD chip */ 248 int tod_needsync = 0; /* need to sync tod chip with software time */ 249 static int tod_broken = 0; /* clock chip doesn't work */ 250 time_t boot_time = 0; /* Boot time in seconds since 1970 */ 251 cyclic_id_t clock_cyclic; /* clock()'s cyclic_id */ 252 cyclic_id_t deadman_cyclic; /* deadman()'s cyclic_id */ 253 254 static int lgrp_ticks; /* counter to schedule lgrp load calcs */ 255 256 /* 257 * for tod fault detection 258 */ 259 #define TOD_REF_FREQ ((longlong_t)(NANOSEC)) 260 #define TOD_STALL_THRESHOLD (TOD_REF_FREQ * 3 / 2) 261 #define TOD_JUMP_THRESHOLD (TOD_REF_FREQ / 2) 262 #define TOD_FILTER_N 4 263 #define TOD_FILTER_SETTLE (4 * TOD_FILTER_N) 264 static int tod_faulted = TOD_NOFAULT; 265 static int tod_fault_reset_flag = 0; 266 267 /* patchable via /etc/system */ 268 int tod_validate_enable = 1; 269 270 /* 271 * On non-SPARC systems, TOD validation must be deferred until gethrtime 272 * returns non-zero values (after mach_clkinit's execution). 273 * On SPARC systems, it must be deferred until after hrtime_base 274 * and hres_last_tick are set (in the first invocation of hres_tick). 275 * Since in both cases the prerequisites occur before the invocation of 276 * tod_get() in clock(), the deferment is lifted there. 277 */ 278 static boolean_t tod_validate_deferred = B_TRUE; 279 280 /* 281 * tod_fault_table[] must be aligned with 282 * enum tod_fault_type in systm.h 283 */ 284 static char *tod_fault_table[] = { 285 "Reversed", /* TOD_REVERSED */ 286 "Stalled", /* TOD_STALLED */ 287 "Jumped", /* TOD_JUMPED */ 288 "Changed in Clock Rate", /* TOD_RATECHANGED */ 289 "Is Read-Only" /* TOD_RDONLY */ 290 /* 291 * no strings needed for TOD_NOFAULT 292 */ 293 }; 294 295 /* 296 * test hook for tod broken detection in tod_validate 297 */ 298 int tod_unit_test = 0; 299 time_t tod_test_injector; 300 301 #define CLOCK_ADJ_HIST_SIZE 4 302 303 static int adj_hist_entry; 304 305 int64_t clock_adj_hist[CLOCK_ADJ_HIST_SIZE]; 306 307 static void clock_tick(kthread_t *); 308 static void calcloadavg(int, uint64_t *); 309 static int genloadavg(struct loadavg_s *); 310 static void loadavg_update(); 311 312 void (*cmm_clock_callout)() = NULL; 313 void (*cpucaps_clock_callout)() = NULL; 314 315 static void 316 clock(void) 317 { 318 kthread_t *t; 319 kmutex_t *plockp; /* pointer to thread's process lock */ 320 int pinned_intr = 0; 321 uint_t nrunnable, nrunning; 322 uint_t w_io; 323 cpu_t *cp; 324 cpupart_t *cpupart; 325 int exiting; 326 extern void set_anoninfo(); 327 extern void set_freemem(); 328 void (*funcp)(); 329 int32_t ltemp; 330 int64_t lltemp; 331 int s; 332 int do_lgrp_load; 333 int i; 334 335 if (panicstr) 336 return; 337 338 set_anoninfo(); 339 /* 340 * Make sure that 'freemem' do not drift too far from the truth 341 */ 342 set_freemem(); 343 344 345 /* 346 * Before the section which is repeated is executed, we do 347 * the time delta processing which occurs every clock tick 348 * 349 * There is additional processing which happens every time 350 * the nanosecond counter rolls over which is described 351 * below - see the section which begins with : if (one_sec) 352 * 353 * This section marks the beginning of the precision-kernel 354 * code fragment. 355 * 356 * First, compute the phase adjustment. If the low-order bits 357 * (time_phase) of the update overflow, bump the higher order 358 * bits (time_update). 359 */ 360 time_phase += time_adj; 361 if (time_phase <= -FINEUSEC) { 362 ltemp = -time_phase / SCALE_PHASE; 363 time_phase += ltemp * SCALE_PHASE; 364 s = hr_clock_lock(); 365 timedelta -= ltemp * (NANOSEC/MICROSEC); 366 hr_clock_unlock(s); 367 } else if (time_phase >= FINEUSEC) { 368 ltemp = time_phase / SCALE_PHASE; 369 time_phase -= ltemp * SCALE_PHASE; 370 s = hr_clock_lock(); 371 timedelta += ltemp * (NANOSEC/MICROSEC); 372 hr_clock_unlock(s); 373 } 374 375 /* 376 * End of precision-kernel code fragment which is processed 377 * every timer interrupt. 378 * 379 * Continue with the interrupt processing as scheduled. 380 * 381 * Did we pin another interrupt thread? Need to check this before 382 * grabbing any adaptive locks, since if we block on a lock the 383 * pinned thread could escape. Note that this is just a heuristic; 384 * if we take multiple laps though clock() without returning from 385 * the interrupt because we have another clock tick pending, then 386 * the pinned interrupt could be released by one of the previous 387 * laps. The only consequence is that the CPU will be counted as 388 * in idle (or wait) state once the pinned interrupt is released. 389 * Since this accounting is inaccurate by nature, this isn't a big 390 * deal --- but we should try to get it right in the common case 391 * where we only call clock() once per interrupt. 392 */ 393 if (curthread->t_intr != NULL) 394 pinned_intr = (curthread->t_intr->t_flag & T_INTR_THREAD); 395 396 /* 397 * Count the number of runnable threads and the number waiting 398 * for some form of I/O to complete -- gets added to 399 * sysinfo.waiting. To know the state of the system, must add 400 * wait counts from all CPUs. Also add up the per-partition 401 * statistics. 402 */ 403 w_io = 0; 404 nrunnable = 0; 405 406 /* 407 * keep track of when to update lgrp/part loads 408 */ 409 410 do_lgrp_load = 0; 411 if (lgrp_ticks++ >= hz / 10) { 412 lgrp_ticks = 0; 413 do_lgrp_load = 1; 414 } 415 416 if (one_sec) 417 loadavg_update(); 418 419 /* 420 * First count the threads waiting on kpreempt queues in each 421 * CPU partition. 422 */ 423 424 cpupart = cp_list_head; 425 do { 426 uint_t cpupart_nrunnable = cpupart->cp_kp_queue.disp_nrunnable; 427 428 cpupart->cp_updates++; 429 nrunnable += cpupart_nrunnable; 430 cpupart->cp_nrunnable_cum += cpupart_nrunnable; 431 if (one_sec) { 432 cpupart->cp_nrunning = 0; 433 cpupart->cp_nrunnable = cpupart_nrunnable; 434 } 435 } while ((cpupart = cpupart->cp_next) != cp_list_head); 436 437 438 /* Now count the per-CPU statistics. */ 439 cp = cpu_list; 440 do { 441 uint_t cpu_nrunnable = cp->cpu_disp->disp_nrunnable; 442 443 nrunnable += cpu_nrunnable; 444 cpupart = cp->cpu_part; 445 cpupart->cp_nrunnable_cum += cpu_nrunnable; 446 if (one_sec) { 447 cpupart->cp_nrunnable += cpu_nrunnable; 448 /* 449 * w_io is used to update sysinfo.waiting during 450 * one_second processing below. Only gather w_io 451 * information when we walk the list of cpus if we're 452 * going to perform one_second processing. 453 */ 454 w_io += CPU_STATS(cp, sys.iowait); 455 } 456 457 if (one_sec && (cp->cpu_flags & CPU_EXISTS)) { 458 int i, load, change; 459 hrtime_t intracct, intrused; 460 const hrtime_t maxnsec = 1000000000; 461 const int precision = 100; 462 463 /* 464 * Estimate interrupt load on this cpu each second. 465 * Computes cpu_intrload as %utilization (0-99). 466 */ 467 468 /* add up interrupt time from all micro states */ 469 for (intracct = 0, i = 0; i < NCMSTATES; i++) 470 intracct += cp->cpu_intracct[i]; 471 scalehrtime(&intracct); 472 473 /* compute nsec used in the past second */ 474 intrused = intracct - cp->cpu_intrlast; 475 cp->cpu_intrlast = intracct; 476 477 /* limit the value for safety (and the first pass) */ 478 if (intrused >= maxnsec) 479 intrused = maxnsec - 1; 480 481 /* calculate %time in interrupt */ 482 load = (precision * intrused) / maxnsec; 483 ASSERT(load >= 0 && load < precision); 484 change = cp->cpu_intrload - load; 485 486 /* jump to new max, or decay the old max */ 487 if (change < 0) 488 cp->cpu_intrload = load; 489 else if (change > 0) 490 cp->cpu_intrload -= (change + 3) / 4; 491 492 DTRACE_PROBE3(cpu_intrload, 493 cpu_t *, cp, 494 hrtime_t, intracct, 495 hrtime_t, intrused); 496 } 497 498 if (do_lgrp_load && 499 (cp->cpu_flags & CPU_EXISTS)) { 500 /* 501 * When updating the lgroup's load average, 502 * account for the thread running on the CPU. 503 * If the CPU is the current one, then we need 504 * to account for the underlying thread which 505 * got the clock interrupt not the thread that is 506 * handling the interrupt and caculating the load 507 * average 508 */ 509 t = cp->cpu_thread; 510 if (CPU == cp) 511 t = t->t_intr; 512 513 /* 514 * Account for the load average for this thread if 515 * it isn't the idle thread or it is on the interrupt 516 * stack and not the current CPU handling the clock 517 * interrupt 518 */ 519 if ((t && t != cp->cpu_idle_thread) || (CPU != cp && 520 CPU_ON_INTR(cp))) { 521 if (t->t_lpl == cp->cpu_lpl) { 522 /* local thread */ 523 cpu_nrunnable++; 524 } else { 525 /* 526 * This is a remote thread, charge it 527 * against its home lgroup. Note that 528 * we notice that a thread is remote 529 * only if it's currently executing. 530 * This is a reasonable approximation, 531 * since queued remote threads are rare. 532 * Note also that if we didn't charge 533 * it to its home lgroup, remote 534 * execution would often make a system 535 * appear balanced even though it was 536 * not, and thread placement/migration 537 * would often not be done correctly. 538 */ 539 lgrp_loadavg(t->t_lpl, 540 LGRP_LOADAVG_IN_THREAD_MAX, 0); 541 } 542 } 543 lgrp_loadavg(cp->cpu_lpl, 544 cpu_nrunnable * LGRP_LOADAVG_IN_THREAD_MAX, 1); 545 } 546 } while ((cp = cp->cpu_next) != cpu_list); 547 548 /* 549 * Do tick processing for all the active threads running in 550 * the system. We're trying to be more fair by walking the 551 * list of CPUs starting from a different CPUs each time. 552 */ 553 cp = clock_cpu_list; 554 nrunning = 0; 555 do { 556 klwp_id_t lwp; 557 int intr; 558 int thread_away; 559 560 /* 561 * Don't do any tick processing on CPUs that 562 * aren't even in the system or aren't up yet. 563 */ 564 if ((cp->cpu_flags & CPU_EXISTS) == 0) { 565 continue; 566 } 567 568 /* 569 * The locking here is rather tricky. We use 570 * thread_free_lock to keep the currently running 571 * thread from being freed or recycled while we're 572 * looking at it. We can then check if the thread 573 * is exiting and get the appropriate p_lock if it 574 * is not. We have to be careful, though, because 575 * the _process_ can still be freed while we're 576 * holding thread_free_lock. To avoid touching the 577 * proc structure we put a pointer to the p_lock in the 578 * thread structure. The p_lock is persistent so we 579 * can acquire it even if the process is gone. At that 580 * point we can check (again) if the thread is exiting 581 * and either drop the lock or do the tick processing. 582 */ 583 mutex_enter(&thread_free_lock); 584 /* 585 * We cannot hold the cpu_lock to prevent the 586 * cpu_list from changing in the clock interrupt. 587 * As long as we don't block (or don't get pre-empted) 588 * the cpu_list will not change (all threads are paused 589 * before list modification). If the list does change 590 * any deleted cpu structures will remain with cpu_next 591 * set to NULL, hence the following test. 592 */ 593 if (cp->cpu_next == NULL) { 594 mutex_exit(&thread_free_lock); 595 break; 596 } 597 t = cp->cpu_thread; /* Current running thread */ 598 if (CPU == cp) { 599 /* 600 * 't' will be the clock interrupt thread on this 601 * CPU. Use the pinned thread (if any) on this CPU 602 * as the target of the clock tick. If we pinned 603 * an interrupt, though, just keep using the clock 604 * interrupt thread since the formerly pinned one 605 * may have gone away. One interrupt thread is as 606 * good as another, and this means we don't have 607 * to continue to check pinned_intr in subsequent 608 * code. 609 */ 610 ASSERT(t == curthread); 611 if (t->t_intr != NULL && !pinned_intr) 612 t = t->t_intr; 613 } 614 615 intr = t->t_flag & T_INTR_THREAD; 616 lwp = ttolwp(t); 617 if (lwp == NULL || (t->t_proc_flag & TP_LWPEXIT) || intr) { 618 /* 619 * Thread is exiting (or uninteresting) so don't 620 * do tick processing or grab p_lock. Once we 621 * drop thread_free_lock we can't look inside the 622 * thread or lwp structure, since the thread may 623 * have gone away. 624 */ 625 exiting = 1; 626 } else { 627 /* 628 * OK, try to grab the process lock. See 629 * comments above for why we're not using 630 * ttoproc(t)->p_lockp here. 631 */ 632 plockp = t->t_plockp; 633 mutex_enter(plockp); 634 /* See above comment. */ 635 if (cp->cpu_next == NULL) { 636 mutex_exit(plockp); 637 mutex_exit(&thread_free_lock); 638 break; 639 } 640 /* 641 * The thread may have exited between when we 642 * checked above, and when we got the p_lock. 643 */ 644 if (t->t_proc_flag & TP_LWPEXIT) { 645 mutex_exit(plockp); 646 exiting = 1; 647 } else { 648 exiting = 0; 649 } 650 } 651 /* 652 * Either we have the p_lock for the thread's process, 653 * or we don't care about the thread structure any more. 654 * Either way we can drop thread_free_lock. 655 */ 656 mutex_exit(&thread_free_lock); 657 658 /* 659 * Update user, system, and idle cpu times. 660 */ 661 if (one_sec) { 662 nrunning++; 663 cp->cpu_part->cp_nrunning++; 664 } 665 /* 666 * If we haven't done tick processing for this 667 * lwp, then do it now. Since we don't hold the 668 * lwp down on a CPU it can migrate and show up 669 * more than once, hence the lbolt check. 670 * 671 * Also, make sure that it's okay to perform the 672 * tick processing before calling clock_tick. 673 * Setting thread_away to a TRUE value (ie. not 0) 674 * results in tick processing not being performed for 675 * that thread. Or, in other words, keeps the thread 676 * away from clock_tick processing. 677 */ 678 thread_away = ((cp->cpu_flags & CPU_QUIESCED) || 679 CPU_ON_INTR(cp) || intr || 680 (cp->cpu_dispthread == cp->cpu_idle_thread) || exiting); 681 682 if ((!thread_away) && (lbolt - t->t_lbolt != 0)) { 683 t->t_lbolt = lbolt; 684 clock_tick(t); 685 } 686 687 if (!exiting) 688 mutex_exit(plockp); 689 } while ((cp = cp->cpu_next) != clock_cpu_list); 690 691 clock_cpu_list = clock_cpu_list->cpu_next; 692 693 /* 694 * bump time in ticks 695 * 696 * We rely on there being only one clock thread and hence 697 * don't need a lock to protect lbolt. 698 */ 699 lbolt++; 700 atomic_add_64((uint64_t *)&lbolt64, (int64_t)1); 701 702 /* 703 * Check for a callout that needs be called from the clock 704 * thread to support the membership protocol in a clustered 705 * system. Copy the function pointer so that we can reset 706 * this to NULL if needed. 707 */ 708 if ((funcp = cmm_clock_callout) != NULL) 709 (*funcp)(); 710 711 if ((funcp = cpucaps_clock_callout) != NULL) 712 (*funcp)(); 713 714 /* 715 * Wakeup the cageout thread waiters once per second. 716 */ 717 if (one_sec) 718 kcage_tick(); 719 720 /* 721 * Schedule timeout() requests if any are due at this time. 722 */ 723 callout_schedule(); 724 725 if (one_sec) { 726 727 int drift, absdrift; 728 timestruc_t tod; 729 int s; 730 731 /* 732 * Beginning of precision-kernel code fragment executed 733 * every second. 734 * 735 * On rollover of the second the phase adjustment to be 736 * used for the next second is calculated. Also, the 737 * maximum error is increased by the tolerance. If the 738 * PPS frequency discipline code is present, the phase is 739 * increased to compensate for the CPU clock oscillator 740 * frequency error. 741 * 742 * On a 32-bit machine and given parameters in the timex.h 743 * header file, the maximum phase adjustment is +-512 ms 744 * and maximum frequency offset is (a tad less than) 745 * +-512 ppm. On a 64-bit machine, you shouldn't need to ask. 746 */ 747 time_maxerror += time_tolerance / SCALE_USEC; 748 749 /* 750 * Leap second processing. If in leap-insert state at 751 * the end of the day, the system clock is set back one 752 * second; if in leap-delete state, the system clock is 753 * set ahead one second. The microtime() routine or 754 * external clock driver will insure that reported time 755 * is always monotonic. The ugly divides should be 756 * replaced. 757 */ 758 switch (time_state) { 759 760 case TIME_OK: 761 if (time_status & STA_INS) 762 time_state = TIME_INS; 763 else if (time_status & STA_DEL) 764 time_state = TIME_DEL; 765 break; 766 767 case TIME_INS: 768 if (hrestime.tv_sec % 86400 == 0) { 769 s = hr_clock_lock(); 770 hrestime.tv_sec--; 771 hr_clock_unlock(s); 772 time_state = TIME_OOP; 773 } 774 break; 775 776 case TIME_DEL: 777 if ((hrestime.tv_sec + 1) % 86400 == 0) { 778 s = hr_clock_lock(); 779 hrestime.tv_sec++; 780 hr_clock_unlock(s); 781 time_state = TIME_WAIT; 782 } 783 break; 784 785 case TIME_OOP: 786 time_state = TIME_WAIT; 787 break; 788 789 case TIME_WAIT: 790 if (!(time_status & (STA_INS | STA_DEL))) 791 time_state = TIME_OK; 792 default: 793 break; 794 } 795 796 /* 797 * Compute the phase adjustment for the next second. In 798 * PLL mode, the offset is reduced by a fixed factor 799 * times the time constant. In FLL mode the offset is 800 * used directly. In either mode, the maximum phase 801 * adjustment for each second is clamped so as to spread 802 * the adjustment over not more than the number of 803 * seconds between updates. 804 */ 805 if (time_offset == 0) 806 time_adj = 0; 807 else if (time_offset < 0) { 808 lltemp = -time_offset; 809 if (!(time_status & STA_FLL)) { 810 if ((1 << time_constant) >= SCALE_KG) 811 lltemp *= (1 << time_constant) / 812 SCALE_KG; 813 else 814 lltemp = (lltemp / SCALE_KG) >> 815 time_constant; 816 } 817 if (lltemp > (MAXPHASE / MINSEC) * SCALE_UPDATE) 818 lltemp = (MAXPHASE / MINSEC) * SCALE_UPDATE; 819 time_offset += lltemp; 820 time_adj = -(lltemp * SCALE_PHASE) / hz / SCALE_UPDATE; 821 } else { 822 lltemp = time_offset; 823 if (!(time_status & STA_FLL)) { 824 if ((1 << time_constant) >= SCALE_KG) 825 lltemp *= (1 << time_constant) / 826 SCALE_KG; 827 else 828 lltemp = (lltemp / SCALE_KG) >> 829 time_constant; 830 } 831 if (lltemp > (MAXPHASE / MINSEC) * SCALE_UPDATE) 832 lltemp = (MAXPHASE / MINSEC) * SCALE_UPDATE; 833 time_offset -= lltemp; 834 time_adj = (lltemp * SCALE_PHASE) / hz / SCALE_UPDATE; 835 } 836 837 /* 838 * Compute the frequency estimate and additional phase 839 * adjustment due to frequency error for the next 840 * second. When the PPS signal is engaged, gnaw on the 841 * watchdog counter and update the frequency computed by 842 * the pll and the PPS signal. 843 */ 844 pps_valid++; 845 if (pps_valid == PPS_VALID) { 846 pps_jitter = MAXTIME; 847 pps_stabil = MAXFREQ; 848 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | 849 STA_PPSWANDER | STA_PPSERROR); 850 } 851 lltemp = time_freq + pps_freq; 852 853 if (lltemp) 854 time_adj += (lltemp * SCALE_PHASE) / (SCALE_USEC * hz); 855 856 /* 857 * End of precision kernel-code fragment 858 * 859 * The section below should be modified if we are planning 860 * to use NTP for synchronization. 861 * 862 * Note: the clock synchronization code now assumes 863 * the following: 864 * - if dosynctodr is 1, then compute the drift between 865 * the tod chip and software time and adjust one or 866 * the other depending on the circumstances 867 * 868 * - if dosynctodr is 0, then the tod chip is independent 869 * of the software clock and should not be adjusted, 870 * but allowed to free run. this allows NTP to sync. 871 * hrestime without any interference from the tod chip. 872 */ 873 874 tod_validate_deferred = B_FALSE; 875 mutex_enter(&tod_lock); 876 tod = tod_get(); 877 drift = tod.tv_sec - hrestime.tv_sec; 878 absdrift = (drift >= 0) ? drift : -drift; 879 if (tod_needsync || absdrift > 1) { 880 int s; 881 if (absdrift > 2) { 882 if (!tod_broken && tod_faulted == TOD_NOFAULT) { 883 s = hr_clock_lock(); 884 hrestime = tod; 885 membar_enter(); /* hrestime visible */ 886 timedelta = 0; 887 timechanged++; 888 tod_needsync = 0; 889 hr_clock_unlock(s); 890 } 891 } else { 892 if (tod_needsync || !dosynctodr) { 893 gethrestime(&tod); 894 tod_set(tod); 895 s = hr_clock_lock(); 896 if (timedelta == 0) 897 tod_needsync = 0; 898 hr_clock_unlock(s); 899 } else { 900 /* 901 * If the drift is 2 seconds on the 902 * money, then the TOD is adjusting 903 * the clock; record that. 904 */ 905 clock_adj_hist[adj_hist_entry++ % 906 CLOCK_ADJ_HIST_SIZE] = lbolt64; 907 s = hr_clock_lock(); 908 timedelta = (int64_t)drift*NANOSEC; 909 hr_clock_unlock(s); 910 } 911 } 912 } 913 one_sec = 0; 914 time = gethrestime_sec(); /* for crusty old kmem readers */ 915 mutex_exit(&tod_lock); 916 917 /* 918 * Some drivers still depend on this... XXX 919 */ 920 cv_broadcast(&lbolt_cv); 921 922 sysinfo.updates++; 923 vminfo.freemem += freemem; 924 { 925 pgcnt_t maxswap, resv, free; 926 pgcnt_t avail = 927 MAX((spgcnt_t)(availrmem - swapfs_minfree), 0); 928 929 maxswap = k_anoninfo.ani_mem_resv + 930 k_anoninfo.ani_max +avail; 931 free = k_anoninfo.ani_free + avail; 932 resv = k_anoninfo.ani_phys_resv + 933 k_anoninfo.ani_mem_resv; 934 935 vminfo.swap_resv += resv; 936 /* number of reserved and allocated pages */ 937 #ifdef DEBUG 938 if (maxswap < free) 939 cmn_err(CE_WARN, "clock: maxswap < free"); 940 if (maxswap < resv) 941 cmn_err(CE_WARN, "clock: maxswap < resv"); 942 #endif 943 vminfo.swap_alloc += maxswap - free; 944 vminfo.swap_avail += maxswap - resv; 945 vminfo.swap_free += free; 946 } 947 if (nrunnable) { 948 sysinfo.runque += nrunnable; 949 sysinfo.runocc++; 950 } 951 if (nswapped) { 952 sysinfo.swpque += nswapped; 953 sysinfo.swpocc++; 954 } 955 sysinfo.waiting += w_io; 956 957 /* 958 * Wake up fsflush to write out DELWRI 959 * buffers, dirty pages and other cached 960 * administrative data, e.g. inodes. 961 */ 962 if (--fsflushcnt <= 0) { 963 fsflushcnt = tune.t_fsflushr; 964 cv_signal(&fsflush_cv); 965 } 966 967 vmmeter(); 968 calcloadavg(genloadavg(&loadavg), hp_avenrun); 969 for (i = 0; i < 3; i++) 970 /* 971 * At the moment avenrun[] can only hold 31 972 * bits of load average as it is a signed 973 * int in the API. We need to ensure that 974 * hp_avenrun[i] >> (16 - FSHIFT) will not be 975 * too large. If it is, we put the largest value 976 * that we can use into avenrun[i]. This is 977 * kludgey, but about all we can do until we 978 * avenrun[] is declared as an array of uint64[] 979 */ 980 if (hp_avenrun[i] < ((uint64_t)1<<(31+16-FSHIFT))) 981 avenrun[i] = (int32_t)(hp_avenrun[i] >> 982 (16 - FSHIFT)); 983 else 984 avenrun[i] = 0x7fffffff; 985 986 cpupart = cp_list_head; 987 do { 988 calcloadavg(genloadavg(&cpupart->cp_loadavg), 989 cpupart->cp_hp_avenrun); 990 } while ((cpupart = cpupart->cp_next) != cp_list_head); 991 992 /* 993 * Wake up the swapper thread if necessary. 994 */ 995 if (runin || 996 (runout && (avefree < desfree || wake_sched_sec))) { 997 t = &t0; 998 thread_lock(t); 999 if (t->t_state == TS_STOPPED) { 1000 runin = runout = 0; 1001 wake_sched_sec = 0; 1002 t->t_whystop = 0; 1003 t->t_whatstop = 0; 1004 t->t_schedflag &= ~TS_ALLSTART; 1005 THREAD_TRANSITION(t); 1006 setfrontdq(t); 1007 } 1008 thread_unlock(t); 1009 } 1010 } 1011 1012 /* 1013 * Wake up the swapper if any high priority swapped-out threads 1014 * became runable during the last tick. 1015 */ 1016 if (wake_sched) { 1017 t = &t0; 1018 thread_lock(t); 1019 if (t->t_state == TS_STOPPED) { 1020 runin = runout = 0; 1021 wake_sched = 0; 1022 t->t_whystop = 0; 1023 t->t_whatstop = 0; 1024 t->t_schedflag &= ~TS_ALLSTART; 1025 THREAD_TRANSITION(t); 1026 setfrontdq(t); 1027 } 1028 thread_unlock(t); 1029 } 1030 } 1031 1032 void 1033 clock_init(void) 1034 { 1035 cyc_handler_t hdlr; 1036 cyc_time_t when; 1037 1038 hdlr.cyh_func = (cyc_func_t)clock; 1039 hdlr.cyh_level = CY_LOCK_LEVEL; 1040 hdlr.cyh_arg = NULL; 1041 1042 when.cyt_when = 0; 1043 when.cyt_interval = nsec_per_tick; 1044 1045 mutex_enter(&cpu_lock); 1046 clock_cyclic = cyclic_add(&hdlr, &when); 1047 mutex_exit(&cpu_lock); 1048 } 1049 1050 /* 1051 * Called before calcloadavg to get 10-sec moving loadavg together 1052 */ 1053 1054 static int 1055 genloadavg(struct loadavg_s *avgs) 1056 { 1057 int avg; 1058 int spos; /* starting position */ 1059 int cpos; /* moving current position */ 1060 int i; 1061 int slen; 1062 hrtime_t hr_avg; 1063 1064 /* 10-second snapshot, calculate first positon */ 1065 if (avgs->lg_len == 0) { 1066 return (0); 1067 } 1068 slen = avgs->lg_len < S_MOVAVG_SZ ? avgs->lg_len : S_MOVAVG_SZ; 1069 1070 spos = (avgs->lg_cur - 1) >= 0 ? avgs->lg_cur - 1 : 1071 S_LOADAVG_SZ + (avgs->lg_cur - 1); 1072 for (i = hr_avg = 0; i < slen; i++) { 1073 cpos = (spos - i) >= 0 ? spos - i : S_LOADAVG_SZ + (spos - i); 1074 hr_avg += avgs->lg_loads[cpos]; 1075 } 1076 1077 hr_avg = hr_avg / slen; 1078 avg = hr_avg / (NANOSEC / LGRP_LOADAVG_IN_THREAD_MAX); 1079 1080 return (avg); 1081 } 1082 1083 /* 1084 * Run every second from clock () to update the loadavg count available to the 1085 * system and cpu-partitions. 1086 * 1087 * This works by sampling the previous usr, sys, wait time elapsed, 1088 * computing a delta, and adding that delta to the elapsed usr, sys, 1089 * wait increase. 1090 */ 1091 1092 static void 1093 loadavg_update() 1094 { 1095 cpu_t *cp; 1096 cpupart_t *cpupart; 1097 hrtime_t cpu_total; 1098 int prev; 1099 1100 cp = cpu_list; 1101 loadavg.lg_total = 0; 1102 1103 /* 1104 * first pass totals up per-cpu statistics for system and cpu 1105 * partitions 1106 */ 1107 1108 do { 1109 struct loadavg_s *lavg; 1110 1111 lavg = &cp->cpu_loadavg; 1112 1113 cpu_total = cp->cpu_acct[CMS_USER] + 1114 cp->cpu_acct[CMS_SYSTEM] + cp->cpu_waitrq; 1115 /* compute delta against last total */ 1116 scalehrtime(&cpu_total); 1117 prev = (lavg->lg_cur - 1) >= 0 ? lavg->lg_cur - 1 : 1118 S_LOADAVG_SZ + (lavg->lg_cur - 1); 1119 if (lavg->lg_loads[prev] <= 0) { 1120 lavg->lg_loads[lavg->lg_cur] = cpu_total; 1121 cpu_total = 0; 1122 } else { 1123 lavg->lg_loads[lavg->lg_cur] = cpu_total; 1124 cpu_total = cpu_total - lavg->lg_loads[prev]; 1125 if (cpu_total < 0) 1126 cpu_total = 0; 1127 } 1128 1129 lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ; 1130 lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ? 1131 lavg->lg_len + 1 : S_LOADAVG_SZ; 1132 1133 loadavg.lg_total += cpu_total; 1134 cp->cpu_part->cp_loadavg.lg_total += cpu_total; 1135 1136 } while ((cp = cp->cpu_next) != cpu_list); 1137 1138 loadavg.lg_loads[loadavg.lg_cur] = loadavg.lg_total; 1139 loadavg.lg_cur = (loadavg.lg_cur + 1) % S_LOADAVG_SZ; 1140 loadavg.lg_len = (loadavg.lg_len + 1) < S_LOADAVG_SZ ? 1141 loadavg.lg_len + 1 : S_LOADAVG_SZ; 1142 /* 1143 * Second pass updates counts 1144 */ 1145 cpupart = cp_list_head; 1146 1147 do { 1148 struct loadavg_s *lavg; 1149 1150 lavg = &cpupart->cp_loadavg; 1151 lavg->lg_loads[lavg->lg_cur] = lavg->lg_total; 1152 lavg->lg_total = 0; 1153 lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ; 1154 lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ? 1155 lavg->lg_len + 1 : S_LOADAVG_SZ; 1156 1157 } while ((cpupart = cpupart->cp_next) != cp_list_head); 1158 1159 } 1160 1161 /* 1162 * clock_update() - local clock update 1163 * 1164 * This routine is called by ntp_adjtime() to update the local clock 1165 * phase and frequency. The implementation is of an 1166 * adaptive-parameter, hybrid phase/frequency-lock loop (PLL/FLL). The 1167 * routine computes new time and frequency offset estimates for each 1168 * call. The PPS signal itself determines the new time offset, 1169 * instead of the calling argument. Presumably, calls to 1170 * ntp_adjtime() occur only when the caller believes the local clock 1171 * is valid within some bound (+-128 ms with NTP). If the caller's 1172 * time is far different than the PPS time, an argument will ensue, 1173 * and it's not clear who will lose. 1174 * 1175 * For uncompensated quartz crystal oscillatores and nominal update 1176 * intervals less than 1024 s, operation should be in phase-lock mode 1177 * (STA_FLL = 0), where the loop is disciplined to phase. For update 1178 * intervals greater than this, operation should be in frequency-lock 1179 * mode (STA_FLL = 1), where the loop is disciplined to frequency. 1180 * 1181 * Note: mutex(&tod_lock) is in effect. 1182 */ 1183 void 1184 clock_update(int offset) 1185 { 1186 int ltemp, mtemp, s; 1187 1188 ASSERT(MUTEX_HELD(&tod_lock)); 1189 1190 if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME)) 1191 return; 1192 ltemp = offset; 1193 if ((time_status & STA_PPSTIME) && (time_status & STA_PPSSIGNAL)) 1194 ltemp = pps_offset; 1195 1196 /* 1197 * Scale the phase adjustment and clamp to the operating range. 1198 */ 1199 if (ltemp > MAXPHASE) 1200 time_offset = MAXPHASE * SCALE_UPDATE; 1201 else if (ltemp < -MAXPHASE) 1202 time_offset = -(MAXPHASE * SCALE_UPDATE); 1203 else 1204 time_offset = ltemp * SCALE_UPDATE; 1205 1206 /* 1207 * Select whether the frequency is to be controlled and in which 1208 * mode (PLL or FLL). Clamp to the operating range. Ugly 1209 * multiply/divide should be replaced someday. 1210 */ 1211 if (time_status & STA_FREQHOLD || time_reftime == 0) 1212 time_reftime = hrestime.tv_sec; 1213 1214 mtemp = hrestime.tv_sec - time_reftime; 1215 time_reftime = hrestime.tv_sec; 1216 1217 if (time_status & STA_FLL) { 1218 if (mtemp >= MINSEC) { 1219 ltemp = ((time_offset / mtemp) * (SCALE_USEC / 1220 SCALE_UPDATE)); 1221 if (ltemp) 1222 time_freq += ltemp / SCALE_KH; 1223 } 1224 } else { 1225 if (mtemp < MAXSEC) { 1226 ltemp *= mtemp; 1227 if (ltemp) 1228 time_freq += (int)(((int64_t)ltemp * 1229 SCALE_USEC) / SCALE_KF) 1230 / (1 << (time_constant * 2)); 1231 } 1232 } 1233 if (time_freq > time_tolerance) 1234 time_freq = time_tolerance; 1235 else if (time_freq < -time_tolerance) 1236 time_freq = -time_tolerance; 1237 1238 s = hr_clock_lock(); 1239 tod_needsync = 1; 1240 hr_clock_unlock(s); 1241 } 1242 1243 /* 1244 * ddi_hardpps() - discipline CPU clock oscillator to external PPS signal 1245 * 1246 * This routine is called at each PPS interrupt in order to discipline 1247 * the CPU clock oscillator to the PPS signal. It measures the PPS phase 1248 * and leaves it in a handy spot for the clock() routine. It 1249 * integrates successive PPS phase differences and calculates the 1250 * frequency offset. This is used in clock() to discipline the CPU 1251 * clock oscillator so that intrinsic frequency error is cancelled out. 1252 * The code requires the caller to capture the time and hardware counter 1253 * value at the on-time PPS signal transition. 1254 * 1255 * Note that, on some Unix systems, this routine runs at an interrupt 1256 * priority level higher than the timer interrupt routine clock(). 1257 * Therefore, the variables used are distinct from the clock() 1258 * variables, except for certain exceptions: The PPS frequency pps_freq 1259 * and phase pps_offset variables are determined by this routine and 1260 * updated atomically. The time_tolerance variable can be considered a 1261 * constant, since it is infrequently changed, and then only when the 1262 * PPS signal is disabled. The watchdog counter pps_valid is updated 1263 * once per second by clock() and is atomically cleared in this 1264 * routine. 1265 * 1266 * tvp is the time of the last tick; usec is a microsecond count since the 1267 * last tick. 1268 * 1269 * Note: In Solaris systems, the tick value is actually given by 1270 * usec_per_tick. This is called from the serial driver cdintr(), 1271 * or equivalent, at a high PIL. Because the kernel keeps a 1272 * highresolution time, the following code can accept either 1273 * the traditional argument pair, or the current highres timestamp 1274 * in tvp and zero in usec. 1275 */ 1276 void 1277 ddi_hardpps(struct timeval *tvp, int usec) 1278 { 1279 int u_usec, v_usec, bigtick; 1280 time_t cal_sec; 1281 int cal_usec; 1282 1283 /* 1284 * An occasional glitch can be produced when the PPS interrupt 1285 * occurs in the clock() routine before the time variable is 1286 * updated. Here the offset is discarded when the difference 1287 * between it and the last one is greater than tick/2, but not 1288 * if the interval since the first discard exceeds 30 s. 1289 */ 1290 time_status |= STA_PPSSIGNAL; 1291 time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR); 1292 pps_valid = 0; 1293 u_usec = -tvp->tv_usec; 1294 if (u_usec < -(MICROSEC/2)) 1295 u_usec += MICROSEC; 1296 v_usec = pps_offset - u_usec; 1297 if (v_usec < 0) 1298 v_usec = -v_usec; 1299 if (v_usec > (usec_per_tick >> 1)) { 1300 if (pps_glitch > MAXGLITCH) { 1301 pps_glitch = 0; 1302 pps_tf[2] = u_usec; 1303 pps_tf[1] = u_usec; 1304 } else { 1305 pps_glitch++; 1306 u_usec = pps_offset; 1307 } 1308 } else 1309 pps_glitch = 0; 1310 1311 /* 1312 * A three-stage median filter is used to help deglitch the pps 1313 * time. The median sample becomes the time offset estimate; the 1314 * difference between the other two samples becomes the time 1315 * dispersion (jitter) estimate. 1316 */ 1317 pps_tf[2] = pps_tf[1]; 1318 pps_tf[1] = pps_tf[0]; 1319 pps_tf[0] = u_usec; 1320 if (pps_tf[0] > pps_tf[1]) { 1321 if (pps_tf[1] > pps_tf[2]) { 1322 pps_offset = pps_tf[1]; /* 0 1 2 */ 1323 v_usec = pps_tf[0] - pps_tf[2]; 1324 } else if (pps_tf[2] > pps_tf[0]) { 1325 pps_offset = pps_tf[0]; /* 2 0 1 */ 1326 v_usec = pps_tf[2] - pps_tf[1]; 1327 } else { 1328 pps_offset = pps_tf[2]; /* 0 2 1 */ 1329 v_usec = pps_tf[0] - pps_tf[1]; 1330 } 1331 } else { 1332 if (pps_tf[1] < pps_tf[2]) { 1333 pps_offset = pps_tf[1]; /* 2 1 0 */ 1334 v_usec = pps_tf[2] - pps_tf[0]; 1335 } else if (pps_tf[2] < pps_tf[0]) { 1336 pps_offset = pps_tf[0]; /* 1 0 2 */ 1337 v_usec = pps_tf[1] - pps_tf[2]; 1338 } else { 1339 pps_offset = pps_tf[2]; /* 1 2 0 */ 1340 v_usec = pps_tf[1] - pps_tf[0]; 1341 } 1342 } 1343 if (v_usec > MAXTIME) 1344 pps_jitcnt++; 1345 v_usec = (v_usec << PPS_AVG) - pps_jitter; 1346 pps_jitter += v_usec / (1 << PPS_AVG); 1347 if (pps_jitter > (MAXTIME >> 1)) 1348 time_status |= STA_PPSJITTER; 1349 1350 /* 1351 * During the calibration interval adjust the starting time when 1352 * the tick overflows. At the end of the interval compute the 1353 * duration of the interval and the difference of the hardware 1354 * counters at the beginning and end of the interval. This code 1355 * is deliciously complicated by the fact valid differences may 1356 * exceed the value of tick when using long calibration 1357 * intervals and small ticks. Note that the counter can be 1358 * greater than tick if caught at just the wrong instant, but 1359 * the values returned and used here are correct. 1360 */ 1361 bigtick = (int)usec_per_tick * SCALE_USEC; 1362 pps_usec -= pps_freq; 1363 if (pps_usec >= bigtick) 1364 pps_usec -= bigtick; 1365 if (pps_usec < 0) 1366 pps_usec += bigtick; 1367 pps_time.tv_sec++; 1368 pps_count++; 1369 if (pps_count < (1 << pps_shift)) 1370 return; 1371 pps_count = 0; 1372 pps_calcnt++; 1373 u_usec = usec * SCALE_USEC; 1374 v_usec = pps_usec - u_usec; 1375 if (v_usec >= bigtick >> 1) 1376 v_usec -= bigtick; 1377 if (v_usec < -(bigtick >> 1)) 1378 v_usec += bigtick; 1379 if (v_usec < 0) 1380 v_usec = -(-v_usec >> pps_shift); 1381 else 1382 v_usec = v_usec >> pps_shift; 1383 pps_usec = u_usec; 1384 cal_sec = tvp->tv_sec; 1385 cal_usec = tvp->tv_usec; 1386 cal_sec -= pps_time.tv_sec; 1387 cal_usec -= pps_time.tv_usec; 1388 if (cal_usec < 0) { 1389 cal_usec += MICROSEC; 1390 cal_sec--; 1391 } 1392 pps_time = *tvp; 1393 1394 /* 1395 * Check for lost interrupts, noise, excessive jitter and 1396 * excessive frequency error. The number of timer ticks during 1397 * the interval may vary +-1 tick. Add to this a margin of one 1398 * tick for the PPS signal jitter and maximum frequency 1399 * deviation. If the limits are exceeded, the calibration 1400 * interval is reset to the minimum and we start over. 1401 */ 1402 u_usec = (int)usec_per_tick << 1; 1403 if (!((cal_sec == -1 && cal_usec > (MICROSEC - u_usec)) || 1404 (cal_sec == 0 && cal_usec < u_usec)) || 1405 v_usec > time_tolerance || v_usec < -time_tolerance) { 1406 pps_errcnt++; 1407 pps_shift = PPS_SHIFT; 1408 pps_intcnt = 0; 1409 time_status |= STA_PPSERROR; 1410 return; 1411 } 1412 1413 /* 1414 * A three-stage median filter is used to help deglitch the pps 1415 * frequency. The median sample becomes the frequency offset 1416 * estimate; the difference between the other two samples 1417 * becomes the frequency dispersion (stability) estimate. 1418 */ 1419 pps_ff[2] = pps_ff[1]; 1420 pps_ff[1] = pps_ff[0]; 1421 pps_ff[0] = v_usec; 1422 if (pps_ff[0] > pps_ff[1]) { 1423 if (pps_ff[1] > pps_ff[2]) { 1424 u_usec = pps_ff[1]; /* 0 1 2 */ 1425 v_usec = pps_ff[0] - pps_ff[2]; 1426 } else if (pps_ff[2] > pps_ff[0]) { 1427 u_usec = pps_ff[0]; /* 2 0 1 */ 1428 v_usec = pps_ff[2] - pps_ff[1]; 1429 } else { 1430 u_usec = pps_ff[2]; /* 0 2 1 */ 1431 v_usec = pps_ff[0] - pps_ff[1]; 1432 } 1433 } else { 1434 if (pps_ff[1] < pps_ff[2]) { 1435 u_usec = pps_ff[1]; /* 2 1 0 */ 1436 v_usec = pps_ff[2] - pps_ff[0]; 1437 } else if (pps_ff[2] < pps_ff[0]) { 1438 u_usec = pps_ff[0]; /* 1 0 2 */ 1439 v_usec = pps_ff[1] - pps_ff[2]; 1440 } else { 1441 u_usec = pps_ff[2]; /* 1 2 0 */ 1442 v_usec = pps_ff[1] - pps_ff[0]; 1443 } 1444 } 1445 1446 /* 1447 * Here the frequency dispersion (stability) is updated. If it 1448 * is less than one-fourth the maximum (MAXFREQ), the frequency 1449 * offset is updated as well, but clamped to the tolerance. It 1450 * will be processed later by the clock() routine. 1451 */ 1452 v_usec = (v_usec >> 1) - pps_stabil; 1453 if (v_usec < 0) 1454 pps_stabil -= -v_usec >> PPS_AVG; 1455 else 1456 pps_stabil += v_usec >> PPS_AVG; 1457 if (pps_stabil > MAXFREQ >> 2) { 1458 pps_stbcnt++; 1459 time_status |= STA_PPSWANDER; 1460 return; 1461 } 1462 if (time_status & STA_PPSFREQ) { 1463 if (u_usec < 0) { 1464 pps_freq -= -u_usec >> PPS_AVG; 1465 if (pps_freq < -time_tolerance) 1466 pps_freq = -time_tolerance; 1467 u_usec = -u_usec; 1468 } else { 1469 pps_freq += u_usec >> PPS_AVG; 1470 if (pps_freq > time_tolerance) 1471 pps_freq = time_tolerance; 1472 } 1473 } 1474 1475 /* 1476 * Here the calibration interval is adjusted. If the maximum 1477 * time difference is greater than tick / 4, reduce the interval 1478 * by half. If this is not the case for four consecutive 1479 * intervals, double the interval. 1480 */ 1481 if (u_usec << pps_shift > bigtick >> 2) { 1482 pps_intcnt = 0; 1483 if (pps_shift > PPS_SHIFT) 1484 pps_shift--; 1485 } else if (pps_intcnt >= 4) { 1486 pps_intcnt = 0; 1487 if (pps_shift < PPS_SHIFTMAX) 1488 pps_shift++; 1489 } else 1490 pps_intcnt++; 1491 1492 /* 1493 * If recovering from kmdb, then make sure the tod chip gets resynced. 1494 * If we took an early exit above, then we don't yet have a stable 1495 * calibration signal to lock onto, so don't mark the tod for sync 1496 * until we get all the way here. 1497 */ 1498 { 1499 int s = hr_clock_lock(); 1500 1501 tod_needsync = 1; 1502 hr_clock_unlock(s); 1503 } 1504 } 1505 1506 /* 1507 * Handle clock tick processing for a thread. 1508 * Check for timer action, enforce CPU rlimit, do profiling etc. 1509 */ 1510 void 1511 clock_tick(kthread_t *t) 1512 { 1513 struct proc *pp; 1514 klwp_id_t lwp; 1515 struct as *as; 1516 clock_t utime; 1517 clock_t stime; 1518 int poke = 0; /* notify another CPU */ 1519 int user_mode; 1520 size_t rss; 1521 1522 /* Must be operating on a lwp/thread */ 1523 if ((lwp = ttolwp(t)) == NULL) { 1524 panic("clock_tick: no lwp"); 1525 /*NOTREACHED*/ 1526 } 1527 1528 CL_TICK(t); /* Class specific tick processing */ 1529 DTRACE_SCHED1(tick, kthread_t *, t); 1530 1531 pp = ttoproc(t); 1532 1533 /* pp->p_lock makes sure that the thread does not exit */ 1534 ASSERT(MUTEX_HELD(&pp->p_lock)); 1535 1536 user_mode = (lwp->lwp_state == LWP_USER); 1537 1538 /* 1539 * Update process times. Should use high res clock and state 1540 * changes instead of statistical sampling method. XXX 1541 */ 1542 if (user_mode) { 1543 pp->p_utime++; 1544 pp->p_task->tk_cpu_time++; 1545 } else { 1546 pp->p_stime++; 1547 pp->p_task->tk_cpu_time++; 1548 } 1549 as = pp->p_as; 1550 1551 /* 1552 * Update user profiling statistics. Get the pc from the 1553 * lwp when the AST happens. 1554 */ 1555 if (pp->p_prof.pr_scale) { 1556 atomic_add_32(&lwp->lwp_oweupc, 1); 1557 if (user_mode) { 1558 poke = 1; 1559 aston(t); 1560 } 1561 } 1562 1563 utime = pp->p_utime; 1564 stime = pp->p_stime; 1565 1566 /* 1567 * If CPU was in user state, process lwp-virtual time 1568 * interval timer. 1569 */ 1570 if (user_mode && 1571 timerisset(&lwp->lwp_timer[ITIMER_VIRTUAL].it_value) && 1572 itimerdecr(&lwp->lwp_timer[ITIMER_VIRTUAL], usec_per_tick) == 0) { 1573 poke = 1; 1574 sigtoproc(pp, t, SIGVTALRM); 1575 } 1576 1577 if (timerisset(&lwp->lwp_timer[ITIMER_PROF].it_value) && 1578 itimerdecr(&lwp->lwp_timer[ITIMER_PROF], usec_per_tick) == 0) { 1579 poke = 1; 1580 sigtoproc(pp, t, SIGPROF); 1581 } 1582 1583 /* 1584 * Enforce CPU resource controls: 1585 * (a) process.max-cpu-time resource control 1586 */ 1587 (void) rctl_test(rctlproc_legacy[RLIMIT_CPU], pp->p_rctls, pp, 1588 (utime + stime)/hz, RCA_UNSAFE_SIGINFO); 1589 1590 /* 1591 * (b) task.max-cpu-time resource control 1592 */ 1593 (void) rctl_test(rc_task_cpu_time, pp->p_task->tk_rctls, pp, 1, 1594 RCA_UNSAFE_SIGINFO); 1595 1596 /* 1597 * Update memory usage for the currently running process. 1598 */ 1599 rss = rm_asrss(as); 1600 PTOU(pp)->u_mem += rss; 1601 if (rss > PTOU(pp)->u_mem_max) 1602 PTOU(pp)->u_mem_max = rss; 1603 1604 /* 1605 * Notify the CPU the thread is running on. 1606 */ 1607 if (poke && t->t_cpu != CPU) 1608 poke_cpu(t->t_cpu->cpu_id); 1609 } 1610 1611 void 1612 profil_tick(uintptr_t upc) 1613 { 1614 int ticks; 1615 proc_t *p = ttoproc(curthread); 1616 klwp_t *lwp = ttolwp(curthread); 1617 struct prof *pr = &p->p_prof; 1618 1619 do { 1620 ticks = lwp->lwp_oweupc; 1621 } while (cas32(&lwp->lwp_oweupc, ticks, 0) != ticks); 1622 1623 mutex_enter(&p->p_pflock); 1624 if (pr->pr_scale >= 2 && upc >= pr->pr_off) { 1625 /* 1626 * Old-style profiling 1627 */ 1628 uint16_t *slot = pr->pr_base; 1629 uint16_t old, new; 1630 if (pr->pr_scale != 2) { 1631 uintptr_t delta = upc - pr->pr_off; 1632 uintptr_t byteoff = ((delta >> 16) * pr->pr_scale) + 1633 (((delta & 0xffff) * pr->pr_scale) >> 16); 1634 if (byteoff >= (uintptr_t)pr->pr_size) { 1635 mutex_exit(&p->p_pflock); 1636 return; 1637 } 1638 slot += byteoff / sizeof (uint16_t); 1639 } 1640 if (fuword16(slot, &old) < 0 || 1641 (new = old + ticks) > SHRT_MAX || 1642 suword16(slot, new) < 0) { 1643 pr->pr_scale = 0; 1644 } 1645 } else if (pr->pr_scale == 1) { 1646 /* 1647 * PC Sampling 1648 */ 1649 model_t model = lwp_getdatamodel(lwp); 1650 int result; 1651 #ifdef __lint 1652 model = model; 1653 #endif 1654 while (ticks-- > 0) { 1655 if (pr->pr_samples == pr->pr_size) { 1656 /* buffer full, turn off sampling */ 1657 pr->pr_scale = 0; 1658 break; 1659 } 1660 switch (SIZEOF_PTR(model)) { 1661 case sizeof (uint32_t): 1662 result = suword32(pr->pr_base, (uint32_t)upc); 1663 break; 1664 #ifdef _LP64 1665 case sizeof (uint64_t): 1666 result = suword64(pr->pr_base, (uint64_t)upc); 1667 break; 1668 #endif 1669 default: 1670 cmn_err(CE_WARN, "profil_tick: unexpected " 1671 "data model"); 1672 result = -1; 1673 break; 1674 } 1675 if (result != 0) { 1676 pr->pr_scale = 0; 1677 break; 1678 } 1679 pr->pr_base = (caddr_t)pr->pr_base + SIZEOF_PTR(model); 1680 pr->pr_samples++; 1681 } 1682 } 1683 mutex_exit(&p->p_pflock); 1684 } 1685 1686 static void 1687 delay_wakeup(void *arg) 1688 { 1689 kthread_t *t = arg; 1690 1691 mutex_enter(&t->t_delay_lock); 1692 cv_signal(&t->t_delay_cv); 1693 mutex_exit(&t->t_delay_lock); 1694 } 1695 1696 void 1697 delay(clock_t ticks) 1698 { 1699 kthread_t *t = curthread; 1700 clock_t deadline = lbolt + ticks; 1701 clock_t timeleft; 1702 timeout_id_t id; 1703 1704 if (panicstr && ticks > 0) { 1705 /* 1706 * Timeouts aren't running, so all we can do is spin. 1707 */ 1708 drv_usecwait(TICK_TO_USEC(ticks)); 1709 return; 1710 } 1711 1712 while ((timeleft = deadline - lbolt) > 0) { 1713 mutex_enter(&t->t_delay_lock); 1714 id = timeout(delay_wakeup, t, timeleft); 1715 cv_wait(&t->t_delay_cv, &t->t_delay_lock); 1716 mutex_exit(&t->t_delay_lock); 1717 (void) untimeout(id); 1718 } 1719 } 1720 1721 /* 1722 * Like delay, but interruptible by a signal. 1723 */ 1724 int 1725 delay_sig(clock_t ticks) 1726 { 1727 clock_t deadline = lbolt + ticks; 1728 clock_t rc; 1729 1730 mutex_enter(&curthread->t_delay_lock); 1731 do { 1732 rc = cv_timedwait_sig(&curthread->t_delay_cv, 1733 &curthread->t_delay_lock, deadline); 1734 } while (rc > 0); 1735 mutex_exit(&curthread->t_delay_lock); 1736 if (rc == 0) 1737 return (EINTR); 1738 return (0); 1739 } 1740 1741 #define SECONDS_PER_DAY 86400 1742 1743 /* 1744 * Initialize the system time based on the TOD chip. approx is used as 1745 * an approximation of time (e.g. from the filesystem) in the event that 1746 * the TOD chip has been cleared or is unresponsive. An approx of -1 1747 * means the filesystem doesn't keep time. 1748 */ 1749 void 1750 clkset(time_t approx) 1751 { 1752 timestruc_t ts; 1753 int spl; 1754 int set_clock = 0; 1755 1756 mutex_enter(&tod_lock); 1757 ts = tod_get(); 1758 1759 if (ts.tv_sec > 365 * SECONDS_PER_DAY) { 1760 /* 1761 * If the TOD chip is reporting some time after 1971, 1762 * then it probably didn't lose power or become otherwise 1763 * cleared in the recent past; check to assure that 1764 * the time coming from the filesystem isn't in the future 1765 * according to the TOD chip. 1766 */ 1767 if (approx != -1 && approx > ts.tv_sec) { 1768 cmn_err(CE_WARN, "Last shutdown is later " 1769 "than time on time-of-day chip; check date."); 1770 } 1771 } else { 1772 /* 1773 * If the TOD chip isn't giving correct time, then set it to 1774 * the time that was passed in as a rough estimate. If we 1775 * don't have an estimate, then set the clock back to a time 1776 * when Oliver North, ALF and Dire Straits were all on the 1777 * collective brain: 1987. 1778 */ 1779 timestruc_t tmp; 1780 if (approx == -1) 1781 ts.tv_sec = (1987 - 1970) * 365 * SECONDS_PER_DAY; 1782 else 1783 ts.tv_sec = approx; 1784 ts.tv_nsec = 0; 1785 1786 /* 1787 * Attempt to write the new time to the TOD chip. Set spl high 1788 * to avoid getting preempted between the tod_set and tod_get. 1789 */ 1790 spl = splhi(); 1791 tod_set(ts); 1792 tmp = tod_get(); 1793 splx(spl); 1794 1795 if (tmp.tv_sec != ts.tv_sec && tmp.tv_sec != ts.tv_sec + 1) { 1796 tod_broken = 1; 1797 dosynctodr = 0; 1798 cmn_err(CE_WARN, "Time-of-day chip unresponsive;" 1799 " dead batteries?"); 1800 } else { 1801 cmn_err(CE_WARN, "Time-of-day chip had " 1802 "incorrect date; check and reset."); 1803 } 1804 set_clock = 1; 1805 } 1806 1807 if (!boot_time) { 1808 boot_time = ts.tv_sec; 1809 set_clock = 1; 1810 } 1811 1812 if (set_clock) 1813 set_hrestime(&ts); 1814 1815 mutex_exit(&tod_lock); 1816 } 1817 1818 int timechanged; /* for testing if the system time has been reset */ 1819 1820 void 1821 set_hrestime(timestruc_t *ts) 1822 { 1823 int spl = hr_clock_lock(); 1824 hrestime = *ts; 1825 membar_enter(); /* hrestime must be visible before timechanged++ */ 1826 timedelta = 0; 1827 timechanged++; 1828 hr_clock_unlock(spl); 1829 } 1830 1831 static uint_t deadman_seconds; 1832 static uint32_t deadman_panics; 1833 static int deadman_enabled = 0; 1834 static int deadman_panic_timers = 1; 1835 1836 static void 1837 deadman(void) 1838 { 1839 if (panicstr) { 1840 /* 1841 * During panic, other CPUs besides the panic 1842 * master continue to handle cyclics and some other 1843 * interrupts. The code below is intended to be 1844 * single threaded, so any CPU other than the master 1845 * must keep out. 1846 */ 1847 if (CPU->cpu_id != panic_cpu.cpu_id) 1848 return; 1849 1850 /* 1851 * If we're panicking, the deadman cyclic continues to increase 1852 * lbolt in case the dump device driver relies on this for 1853 * timeouts. Note that we rely on deadman() being invoked once 1854 * per second, and credit lbolt and lbolt64 with hz ticks each. 1855 */ 1856 lbolt += hz; 1857 lbolt64 += hz; 1858 1859 if (!deadman_panic_timers) 1860 return; /* allow all timers to be manually disabled */ 1861 1862 /* 1863 * If we are generating a crash dump or syncing filesystems and 1864 * the corresponding timer is set, decrement it and re-enter 1865 * the panic code to abort it and advance to the next state. 1866 * The panic states and triggers are explained in panic.c. 1867 */ 1868 if (panic_dump) { 1869 if (dump_timeleft && (--dump_timeleft == 0)) { 1870 panic("panic dump timeout"); 1871 /*NOTREACHED*/ 1872 } 1873 } else if (panic_sync) { 1874 if (sync_timeleft && (--sync_timeleft == 0)) { 1875 panic("panic sync timeout"); 1876 /*NOTREACHED*/ 1877 } 1878 } 1879 1880 return; 1881 } 1882 1883 if (lbolt != CPU->cpu_deadman_lbolt) { 1884 CPU->cpu_deadman_lbolt = lbolt; 1885 CPU->cpu_deadman_countdown = deadman_seconds; 1886 return; 1887 } 1888 1889 if (CPU->cpu_deadman_countdown-- > 0) 1890 return; 1891 1892 /* 1893 * Regardless of whether or not we actually bring the system down, 1894 * bump the deadman_panics variable. 1895 * 1896 * N.B. deadman_panics is incremented once for each CPU that 1897 * passes through here. It's expected that all the CPUs will 1898 * detect this condition within one second of each other, so 1899 * when deadman_enabled is off, deadman_panics will 1900 * typically be a multiple of the total number of CPUs in 1901 * the system. 1902 */ 1903 atomic_add_32(&deadman_panics, 1); 1904 1905 if (!deadman_enabled) { 1906 CPU->cpu_deadman_countdown = deadman_seconds; 1907 return; 1908 } 1909 1910 /* 1911 * If we're here, we want to bring the system down. 1912 */ 1913 panic("deadman: timed out after %d seconds of clock " 1914 "inactivity", deadman_seconds); 1915 /*NOTREACHED*/ 1916 } 1917 1918 /*ARGSUSED*/ 1919 static void 1920 deadman_online(void *arg, cpu_t *cpu, cyc_handler_t *hdlr, cyc_time_t *when) 1921 { 1922 cpu->cpu_deadman_lbolt = 0; 1923 cpu->cpu_deadman_countdown = deadman_seconds; 1924 1925 hdlr->cyh_func = (cyc_func_t)deadman; 1926 hdlr->cyh_level = CY_HIGH_LEVEL; 1927 hdlr->cyh_arg = NULL; 1928 1929 /* 1930 * Stagger the CPUs so that they don't all run deadman() at 1931 * the same time. Simplest reason to do this is to make it 1932 * more likely that only one CPU will panic in case of a 1933 * timeout. This is (strictly speaking) an aesthetic, not a 1934 * technical consideration. 1935 * 1936 * The interval must be one second in accordance with the 1937 * code in deadman() above to increase lbolt during panic. 1938 */ 1939 when->cyt_when = cpu->cpu_id * (NANOSEC / NCPU); 1940 when->cyt_interval = NANOSEC; 1941 } 1942 1943 1944 void 1945 deadman_init(void) 1946 { 1947 cyc_omni_handler_t hdlr; 1948 1949 if (deadman_seconds == 0) 1950 deadman_seconds = snoop_interval / MICROSEC; 1951 1952 if (snooping) 1953 deadman_enabled = 1; 1954 1955 hdlr.cyo_online = deadman_online; 1956 hdlr.cyo_offline = NULL; 1957 hdlr.cyo_arg = NULL; 1958 1959 mutex_enter(&cpu_lock); 1960 deadman_cyclic = cyclic_add_omni(&hdlr); 1961 mutex_exit(&cpu_lock); 1962 } 1963 1964 /* 1965 * tod_fault() is for updating tod validate mechanism state: 1966 * (1) TOD_NOFAULT: for resetting the state to 'normal'. 1967 * currently used for debugging only 1968 * (2) The following four cases detected by tod validate mechanism: 1969 * TOD_REVERSED: current tod value is less than previous value. 1970 * TOD_STALLED: current tod value hasn't advanced. 1971 * TOD_JUMPED: current tod value advanced too far from previous value. 1972 * TOD_RATECHANGED: the ratio between average tod delta and 1973 * average tick delta has changed. 1974 * (3) TOD_RDONLY: when the TOD clock is not writeable e.g. because it is 1975 * a virtual TOD provided by a hypervisor. 1976 */ 1977 enum tod_fault_type 1978 tod_fault(enum tod_fault_type ftype, int off) 1979 { 1980 ASSERT(MUTEX_HELD(&tod_lock)); 1981 1982 if (tod_faulted != ftype) { 1983 switch (ftype) { 1984 case TOD_NOFAULT: 1985 plat_tod_fault(TOD_NOFAULT); 1986 cmn_err(CE_NOTE, "Restarted tracking " 1987 "Time of Day clock."); 1988 tod_faulted = ftype; 1989 break; 1990 case TOD_REVERSED: 1991 case TOD_JUMPED: 1992 if (tod_faulted == TOD_NOFAULT) { 1993 plat_tod_fault(ftype); 1994 cmn_err(CE_WARN, "Time of Day clock error: " 1995 "reason [%s by 0x%x]. -- " 1996 " Stopped tracking Time Of Day clock.", 1997 tod_fault_table[ftype], off); 1998 tod_faulted = ftype; 1999 } 2000 break; 2001 case TOD_STALLED: 2002 case TOD_RATECHANGED: 2003 if (tod_faulted == TOD_NOFAULT) { 2004 plat_tod_fault(ftype); 2005 cmn_err(CE_WARN, "Time of Day clock error: " 2006 "reason [%s]. -- " 2007 " Stopped tracking Time Of Day clock.", 2008 tod_fault_table[ftype]); 2009 tod_faulted = ftype; 2010 } 2011 break; 2012 case TOD_RDONLY: 2013 if (tod_faulted == TOD_NOFAULT) { 2014 plat_tod_fault(ftype); 2015 cmn_err(CE_NOTE, "!Time of Day clock is " 2016 "Read-Only; set of Date/Time will not " 2017 "persist across reboot."); 2018 tod_faulted = ftype; 2019 } 2020 break; 2021 default: 2022 break; 2023 } 2024 } 2025 return (tod_faulted); 2026 } 2027 2028 void 2029 tod_fault_reset() 2030 { 2031 tod_fault_reset_flag = 1; 2032 } 2033 2034 2035 /* 2036 * tod_validate() is used for checking values returned by tod_get(). 2037 * Four error cases can be detected by this routine: 2038 * TOD_REVERSED: current tod value is less than previous. 2039 * TOD_STALLED: current tod value hasn't advanced. 2040 * TOD_JUMPED: current tod value advanced too far from previous value. 2041 * TOD_RATECHANGED: the ratio between average tod delta and 2042 * average tick delta has changed. 2043 */ 2044 time_t 2045 tod_validate(time_t tod) 2046 { 2047 time_t diff_tod; 2048 hrtime_t diff_tick; 2049 2050 long dtick; 2051 int dtick_delta; 2052 2053 int off = 0; 2054 enum tod_fault_type tod_bad = TOD_NOFAULT; 2055 2056 static int firsttime = 1; 2057 2058 static time_t prev_tod = 0; 2059 static hrtime_t prev_tick = 0; 2060 static long dtick_avg = TOD_REF_FREQ; 2061 2062 hrtime_t tick = gethrtime(); 2063 2064 ASSERT(MUTEX_HELD(&tod_lock)); 2065 2066 /* 2067 * tod_validate_enable is patchable via /etc/system. 2068 * If TOD is already faulted, or if TOD validation is deferred, 2069 * there is nothing to do. 2070 */ 2071 if ((tod_validate_enable == 0) || (tod_faulted != TOD_NOFAULT) || 2072 tod_validate_deferred) { 2073 return (tod); 2074 } 2075 2076 /* 2077 * Update prev_tod and prev_tick values for first run 2078 */ 2079 if (firsttime) { 2080 firsttime = 0; 2081 prev_tod = tod; 2082 prev_tick = tick; 2083 return (tod); 2084 } 2085 2086 /* 2087 * For either of these conditions, we need to reset ourself 2088 * and start validation from zero since each condition 2089 * indicates that the TOD will be updated with new value 2090 * Also, note that tod_needsync will be reset in clock() 2091 */ 2092 if (tod_needsync || tod_fault_reset_flag) { 2093 firsttime = 1; 2094 prev_tod = 0; 2095 prev_tick = 0; 2096 dtick_avg = TOD_REF_FREQ; 2097 2098 if (tod_fault_reset_flag) 2099 tod_fault_reset_flag = 0; 2100 2101 return (tod); 2102 } 2103 2104 /* test hook */ 2105 switch (tod_unit_test) { 2106 case 1: /* for testing jumping tod */ 2107 tod += tod_test_injector; 2108 tod_unit_test = 0; 2109 break; 2110 case 2: /* for testing stuck tod bit */ 2111 tod |= 1 << tod_test_injector; 2112 tod_unit_test = 0; 2113 break; 2114 case 3: /* for testing stalled tod */ 2115 tod = prev_tod; 2116 tod_unit_test = 0; 2117 break; 2118 case 4: /* reset tod fault status */ 2119 (void) tod_fault(TOD_NOFAULT, 0); 2120 tod_unit_test = 0; 2121 break; 2122 default: 2123 break; 2124 } 2125 2126 diff_tod = tod - prev_tod; 2127 diff_tick = tick - prev_tick; 2128 2129 ASSERT(diff_tick >= 0); 2130 2131 if (diff_tod < 0) { 2132 /* ERROR - tod reversed */ 2133 tod_bad = TOD_REVERSED; 2134 off = (int)(prev_tod - tod); 2135 } else if (diff_tod == 0) { 2136 /* tod did not advance */ 2137 if (diff_tick > TOD_STALL_THRESHOLD) { 2138 /* ERROR - tod stalled */ 2139 tod_bad = TOD_STALLED; 2140 } else { 2141 /* 2142 * Make sure we don't update prev_tick 2143 * so that diff_tick is calculated since 2144 * the first diff_tod == 0 2145 */ 2146 return (tod); 2147 } 2148 } else { 2149 /* calculate dtick */ 2150 dtick = diff_tick / diff_tod; 2151 2152 /* update dtick averages */ 2153 dtick_avg += ((dtick - dtick_avg) / TOD_FILTER_N); 2154 2155 /* 2156 * Calculate dtick_delta as 2157 * variation from reference freq in quartiles 2158 */ 2159 dtick_delta = (dtick_avg - TOD_REF_FREQ) / 2160 (TOD_REF_FREQ >> 2); 2161 2162 /* 2163 * Even with a perfectly functioning TOD device, 2164 * when the number of elapsed seconds is low the 2165 * algorithm can calculate a rate that is beyond 2166 * tolerance, causing an error. The algorithm is 2167 * inaccurate when elapsed time is low (less than 2168 * 5 seconds). 2169 */ 2170 if (diff_tod > 4) { 2171 if (dtick < TOD_JUMP_THRESHOLD) { 2172 /* ERROR - tod jumped */ 2173 tod_bad = TOD_JUMPED; 2174 off = (int)diff_tod; 2175 } else if (dtick_delta) { 2176 /* ERROR - change in clock rate */ 2177 tod_bad = TOD_RATECHANGED; 2178 } 2179 } 2180 } 2181 2182 if (tod_bad != TOD_NOFAULT) { 2183 (void) tod_fault(tod_bad, off); 2184 2185 /* 2186 * Disable dosynctodr since we are going to fault 2187 * the TOD chip anyway here 2188 */ 2189 dosynctodr = 0; 2190 2191 /* 2192 * Set tod to the correct value from hrestime 2193 */ 2194 tod = hrestime.tv_sec; 2195 } 2196 2197 prev_tod = tod; 2198 prev_tick = tick; 2199 return (tod); 2200 } 2201 2202 static void 2203 calcloadavg(int nrun, uint64_t *hp_ave) 2204 { 2205 static int64_t f[3] = { 135, 27, 9 }; 2206 uint_t i; 2207 int64_t q, r; 2208 2209 /* 2210 * Compute load average over the last 1, 5, and 15 minutes 2211 * (60, 300, and 900 seconds). The constants in f[3] are for 2212 * exponential decay: 2213 * (1 - exp(-1/60)) << 13 = 135, 2214 * (1 - exp(-1/300)) << 13 = 27, 2215 * (1 - exp(-1/900)) << 13 = 9. 2216 */ 2217 2218 /* 2219 * a little hoop-jumping to avoid integer overflow 2220 */ 2221 for (i = 0; i < 3; i++) { 2222 q = (hp_ave[i] >> 16) << 7; 2223 r = (hp_ave[i] & 0xffff) << 7; 2224 hp_ave[i] += ((nrun - q) * f[i] - ((r * f[i]) >> 16)) >> 4; 2225 } 2226 } 2227