1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 23 /* All Rights Reserved */ 24 25 26 /* 27 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 28 * Use is subject to license terms. 29 */ 30 31 #pragma ident "%Z%%M% %I% %E% SMI" 32 33 #include <sys/param.h> 34 #include <sys/t_lock.h> 35 #include <sys/types.h> 36 #include <sys/tuneable.h> 37 #include <sys/sysmacros.h> 38 #include <sys/systm.h> 39 #include <sys/cpuvar.h> 40 #include <sys/lgrp.h> 41 #include <sys/user.h> 42 #include <sys/proc.h> 43 #include <sys/callo.h> 44 #include <sys/kmem.h> 45 #include <sys/var.h> 46 #include <sys/cmn_err.h> 47 #include <sys/swap.h> 48 #include <sys/vmsystm.h> 49 #include <sys/class.h> 50 #include <sys/time.h> 51 #include <sys/debug.h> 52 #include <sys/vtrace.h> 53 #include <sys/spl.h> 54 #include <sys/atomic.h> 55 #include <sys/dumphdr.h> 56 #include <sys/archsystm.h> 57 #include <sys/fs/swapnode.h> 58 #include <sys/panic.h> 59 #include <sys/disp.h> 60 #include <sys/msacct.h> 61 #include <sys/mem_cage.h> 62 63 #include <vm/page.h> 64 #include <vm/anon.h> 65 #include <vm/rm.h> 66 #include <sys/cyclic.h> 67 #include <sys/cpupart.h> 68 #include <sys/rctl.h> 69 #include <sys/task.h> 70 #include <sys/chip.h> 71 #include <sys/sdt.h> 72 73 /* 74 * for NTP support 75 */ 76 #include <sys/timex.h> 77 #include <sys/inttypes.h> 78 79 /* 80 * clock is called straight from 81 * the real time clock interrupt. 82 * 83 * Functions: 84 * reprime clock 85 * schedule callouts 86 * maintain date 87 * jab the scheduler 88 */ 89 90 extern kcondvar_t fsflush_cv; 91 extern sysinfo_t sysinfo; 92 extern vminfo_t vminfo; 93 extern int idleswtch; /* flag set while idle in pswtch() */ 94 95 /* 96 * high-precision avenrun values. These are needed to make the 97 * regular avenrun values accurate. 98 */ 99 static uint64_t hp_avenrun[3]; 100 int avenrun[3]; /* FSCALED average run queue lengths */ 101 time_t time; /* time in seconds since 1970 - for compatibility only */ 102 103 static struct loadavg_s loadavg; 104 /* 105 * Phase/frequency-lock loop (PLL/FLL) definitions 106 * 107 * The following variables are read and set by the ntp_adjtime() system 108 * call. 109 * 110 * time_state shows the state of the system clock, with values defined 111 * in the timex.h header file. 112 * 113 * time_status shows the status of the system clock, with bits defined 114 * in the timex.h header file. 115 * 116 * time_offset is used by the PLL/FLL to adjust the system time in small 117 * increments. 118 * 119 * time_constant determines the bandwidth or "stiffness" of the PLL. 120 * 121 * time_tolerance determines maximum frequency error or tolerance of the 122 * CPU clock oscillator and is a property of the architecture; however, 123 * in principle it could change as result of the presence of external 124 * discipline signals, for instance. 125 * 126 * time_precision is usually equal to the kernel tick variable; however, 127 * in cases where a precision clock counter or external clock is 128 * available, the resolution can be much less than this and depend on 129 * whether the external clock is working or not. 130 * 131 * time_maxerror is initialized by a ntp_adjtime() call and increased by 132 * the kernel once each second to reflect the maximum error bound 133 * growth. 134 * 135 * time_esterror is set and read by the ntp_adjtime() call, but 136 * otherwise not used by the kernel. 137 */ 138 int32_t time_state = TIME_OK; /* clock state */ 139 int32_t time_status = STA_UNSYNC; /* clock status bits */ 140 int32_t time_offset = 0; /* time offset (us) */ 141 int32_t time_constant = 0; /* pll time constant */ 142 int32_t time_tolerance = MAXFREQ; /* frequency tolerance (scaled ppm) */ 143 int32_t time_precision = 1; /* clock precision (us) */ 144 int32_t time_maxerror = MAXPHASE; /* maximum error (us) */ 145 int32_t time_esterror = MAXPHASE; /* estimated error (us) */ 146 147 /* 148 * The following variables establish the state of the PLL/FLL and the 149 * residual time and frequency offset of the local clock. The scale 150 * factors are defined in the timex.h header file. 151 * 152 * time_phase and time_freq are the phase increment and the frequency 153 * increment, respectively, of the kernel time variable. 154 * 155 * time_freq is set via ntp_adjtime() from a value stored in a file when 156 * the synchronization daemon is first started. Its value is retrieved 157 * via ntp_adjtime() and written to the file about once per hour by the 158 * daemon. 159 * 160 * time_adj is the adjustment added to the value of tick at each timer 161 * interrupt and is recomputed from time_phase and time_freq at each 162 * seconds rollover. 163 * 164 * time_reftime is the second's portion of the system time at the last 165 * call to ntp_adjtime(). It is used to adjust the time_freq variable 166 * and to increase the time_maxerror as the time since last update 167 * increases. 168 */ 169 int32_t time_phase = 0; /* phase offset (scaled us) */ 170 int32_t time_freq = 0; /* frequency offset (scaled ppm) */ 171 int32_t time_adj = 0; /* tick adjust (scaled 1 / hz) */ 172 int32_t time_reftime = 0; /* time at last adjustment (s) */ 173 174 /* 175 * The scale factors of the following variables are defined in the 176 * timex.h header file. 177 * 178 * pps_time contains the time at each calibration interval, as read by 179 * microtime(). pps_count counts the seconds of the calibration 180 * interval, the duration of which is nominally pps_shift in powers of 181 * two. 182 * 183 * pps_offset is the time offset produced by the time median filter 184 * pps_tf[], while pps_jitter is the dispersion (jitter) measured by 185 * this filter. 186 * 187 * pps_freq is the frequency offset produced by the frequency median 188 * filter pps_ff[], while pps_stabil is the dispersion (wander) measured 189 * by this filter. 190 * 191 * pps_usec is latched from a high resolution counter or external clock 192 * at pps_time. Here we want the hardware counter contents only, not the 193 * contents plus the time_tv.usec as usual. 194 * 195 * pps_valid counts the number of seconds since the last PPS update. It 196 * is used as a watchdog timer to disable the PPS discipline should the 197 * PPS signal be lost. 198 * 199 * pps_glitch counts the number of seconds since the beginning of an 200 * offset burst more than tick/2 from current nominal offset. It is used 201 * mainly to suppress error bursts due to priority conflicts between the 202 * PPS interrupt and timer interrupt. 203 * 204 * pps_intcnt counts the calibration intervals for use in the interval- 205 * adaptation algorithm. It's just too complicated for words. 206 */ 207 struct timeval pps_time; /* kernel time at last interval */ 208 int32_t pps_tf[] = {0, 0, 0}; /* pps time offset median filter (us) */ 209 int32_t pps_offset = 0; /* pps time offset (us) */ 210 int32_t pps_jitter = MAXTIME; /* time dispersion (jitter) (us) */ 211 int32_t pps_ff[] = {0, 0, 0}; /* pps frequency offset median filter */ 212 int32_t pps_freq = 0; /* frequency offset (scaled ppm) */ 213 int32_t pps_stabil = MAXFREQ; /* frequency dispersion (scaled ppm) */ 214 int32_t pps_usec = 0; /* microsec counter at last interval */ 215 int32_t pps_valid = PPS_VALID; /* pps signal watchdog counter */ 216 int32_t pps_glitch = 0; /* pps signal glitch counter */ 217 int32_t pps_count = 0; /* calibration interval counter (s) */ 218 int32_t pps_shift = PPS_SHIFT; /* interval duration (s) (shift) */ 219 int32_t pps_intcnt = 0; /* intervals at current duration */ 220 221 /* 222 * PPS signal quality monitors 223 * 224 * pps_jitcnt counts the seconds that have been discarded because the 225 * jitter measured by the time median filter exceeds the limit MAXTIME 226 * (100 us). 227 * 228 * pps_calcnt counts the frequency calibration intervals, which are 229 * variable from 4 s to 256 s. 230 * 231 * pps_errcnt counts the calibration intervals which have been discarded 232 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the 233 * calibration interval jitter exceeds two ticks. 234 * 235 * pps_stbcnt counts the calibration intervals that have been discarded 236 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us). 237 */ 238 int32_t pps_jitcnt = 0; /* jitter limit exceeded */ 239 int32_t pps_calcnt = 0; /* calibration intervals */ 240 int32_t pps_errcnt = 0; /* calibration errors */ 241 int32_t pps_stbcnt = 0; /* stability limit exceeded */ 242 243 /* The following variables require no explicit locking */ 244 volatile clock_t lbolt; /* time in Hz since last boot */ 245 volatile int64_t lbolt64; /* lbolt64 won't wrap for 2.9 billion yrs */ 246 247 kcondvar_t lbolt_cv; 248 int one_sec = 1; /* turned on once every second */ 249 static int fsflushcnt; /* counter for t_fsflushr */ 250 int dosynctodr = 1; /* patchable; enable/disable sync to TOD chip */ 251 int tod_needsync = 0; /* need to sync tod chip with software time */ 252 static int tod_broken = 0; /* clock chip doesn't work */ 253 time_t boot_time = 0; /* Boot time in seconds since 1970 */ 254 cyclic_id_t clock_cyclic; /* clock()'s cyclic_id */ 255 cyclic_id_t deadman_cyclic; /* deadman()'s cyclic_id */ 256 257 static int lgrp_ticks; /* counter to schedule lgrp load calcs */ 258 259 /* 260 * rechoose_interval_history is used to detect when rechoose_interval's 261 * value has changed (via hotpatching for example), so that the 262 * cached values in the cpu structures may be updated. 263 */ 264 static int rechoose_interval_history = RECHOOSE_INTERVAL; 265 266 /* 267 * for tod fault detection 268 */ 269 #define TOD_REF_FREQ ((longlong_t)(NANOSEC)) 270 #define TOD_STALL_THRESHOLD (TOD_REF_FREQ * 3 / 2) 271 #define TOD_JUMP_THRESHOLD (TOD_REF_FREQ / 2) 272 #define TOD_FILTER_N 4 273 #define TOD_FILTER_SETTLE (4 * TOD_FILTER_N) 274 static int tod_faulted = TOD_NOFAULT; 275 static int tod_fault_reset_flag = 0; 276 277 /* patchable via /etc/system */ 278 int tod_validate_enable = 1; 279 280 /* 281 * On non-SPARC systems, TOD validation must be deferred until gethrtime 282 * returns non-zero values (after mach_clkinit's execution). 283 * On SPARC systems, it must be deferred until after hrtime_base 284 * and hres_last_tick are set (in the first invocation of hres_tick). 285 * Since in both cases the prerequisites occur before the invocation of 286 * tod_get() in clock(), the deferment is lifted there. 287 */ 288 static boolean_t tod_validate_deferred = B_TRUE; 289 290 /* 291 * tod_fault_table[] must be aligned with 292 * enum tod_fault_type in systm.h 293 */ 294 static char *tod_fault_table[] = { 295 "Reversed", /* TOD_REVERSED */ 296 "Stalled", /* TOD_STALLED */ 297 "Jumped", /* TOD_JUMPED */ 298 "Changed in Clock Rate" /* TOD_RATECHANGED */ 299 /* 300 * no strings needed for TOD_NOFAULT 301 */ 302 }; 303 304 /* 305 * test hook for tod broken detection in tod_validate 306 */ 307 int tod_unit_test = 0; 308 time_t tod_test_injector; 309 310 #define CLOCK_ADJ_HIST_SIZE 4 311 312 static int adj_hist_entry; 313 314 int64_t clock_adj_hist[CLOCK_ADJ_HIST_SIZE]; 315 316 static void clock_tick(kthread_t *); 317 static void calcloadavg(int, uint64_t *); 318 static int genloadavg(struct loadavg_s *); 319 static void loadavg_update(); 320 321 void (*cmm_clock_callout)() = NULL; 322 323 #ifdef KSLICE 324 int kslice = KSLICE; 325 #endif 326 327 static void 328 clock(void) 329 { 330 kthread_t *t; 331 kmutex_t *plockp; /* pointer to thread's process lock */ 332 int pinned_intr = 0; 333 uint_t nrunnable, nrunning; 334 uint_t w_io; 335 cpu_t *cp; 336 cpupart_t *cpupart; 337 int exiting; 338 extern void set_anoninfo(); 339 extern void set_freemem(); 340 void (*funcp)(); 341 int32_t ltemp; 342 int64_t lltemp; 343 int s; 344 int do_lgrp_load; 345 int rechoose_update = 0; 346 int rechoose; 347 int i; 348 349 if (panicstr) 350 return; 351 352 set_anoninfo(); 353 /* 354 * Make sure that 'freemem' do not drift too far from the truth 355 */ 356 set_freemem(); 357 358 359 /* 360 * Before the section which is repeated is executed, we do 361 * the time delta processing which occurs every clock tick 362 * 363 * There is additional processing which happens every time 364 * the nanosecond counter rolls over which is described 365 * below - see the section which begins with : if (one_sec) 366 * 367 * This section marks the beginning of the precision-kernel 368 * code fragment. 369 * 370 * First, compute the phase adjustment. If the low-order bits 371 * (time_phase) of the update overflow, bump the higher order 372 * bits (time_update). 373 */ 374 time_phase += time_adj; 375 if (time_phase <= -FINEUSEC) { 376 ltemp = -time_phase / SCALE_PHASE; 377 time_phase += ltemp * SCALE_PHASE; 378 s = hr_clock_lock(); 379 timedelta -= ltemp * (NANOSEC/MICROSEC); 380 hr_clock_unlock(s); 381 } else if (time_phase >= FINEUSEC) { 382 ltemp = time_phase / SCALE_PHASE; 383 time_phase -= ltemp * SCALE_PHASE; 384 s = hr_clock_lock(); 385 timedelta += ltemp * (NANOSEC/MICROSEC); 386 hr_clock_unlock(s); 387 } 388 389 /* 390 * End of precision-kernel code fragment which is processed 391 * every timer interrupt. 392 * 393 * Continue with the interrupt processing as scheduled. 394 * 395 * Did we pin another interrupt thread? Need to check this before 396 * grabbing any adaptive locks, since if we block on a lock the 397 * pinned thread could escape. Note that this is just a heuristic; 398 * if we take multiple laps though clock() without returning from 399 * the interrupt because we have another clock tick pending, then 400 * the pinned interrupt could be released by one of the previous 401 * laps. The only consequence is that the CPU will be counted as 402 * in idle (or wait) state once the pinned interrupt is released. 403 * Since this accounting is inaccurate by nature, this isn't a big 404 * deal --- but we should try to get it right in the common case 405 * where we only call clock() once per interrupt. 406 */ 407 if (curthread->t_intr != NULL) 408 pinned_intr = (curthread->t_intr->t_flag & T_INTR_THREAD); 409 410 /* 411 * Count the number of runnable threads and the number waiting 412 * for some form of I/O to complete -- gets added to 413 * sysinfo.waiting. To know the state of the system, must add 414 * wait counts from all CPUs. Also add up the per-partition 415 * statistics. 416 */ 417 w_io = 0; 418 nrunnable = 0; 419 420 /* 421 * keep track of when to update lgrp/part loads 422 */ 423 424 do_lgrp_load = 0; 425 if (lgrp_ticks++ >= hz / 10) { 426 lgrp_ticks = 0; 427 do_lgrp_load = 1; 428 } 429 430 /* 431 * The dispatcher tunable rechoose_interval may be hot-patched. 432 * Note if it has a new value. If so, the effective rechoose_interval 433 * cached in the cpu structures needs to be updated. 434 * If needed we'll do this during the walk of the cpu_list below. 435 */ 436 if (rechoose_interval != rechoose_interval_history) { 437 rechoose_interval_history = rechoose_interval; 438 rechoose_update = 1; 439 } 440 441 if (one_sec) 442 loadavg_update(); 443 444 445 /* 446 * First count the threads waiting on kpreempt queues in each 447 * CPU partition. 448 */ 449 450 cpupart = cp_list_head; 451 do { 452 uint_t cpupart_nrunnable = cpupart->cp_kp_queue.disp_nrunnable; 453 454 cpupart->cp_updates++; 455 nrunnable += cpupart_nrunnable; 456 cpupart->cp_nrunnable_cum += cpupart_nrunnable; 457 if (one_sec) { 458 cpupart->cp_nrunning = 0; 459 cpupart->cp_nrunnable = cpupart_nrunnable; 460 } 461 } while ((cpupart = cpupart->cp_next) != cp_list_head); 462 463 464 /* Now count the per-CPU statistics. */ 465 cp = cpu_list; 466 do { 467 uint_t cpu_nrunnable = cp->cpu_disp->disp_nrunnable; 468 469 nrunnable += cpu_nrunnable; 470 cpupart = cp->cpu_part; 471 cpupart->cp_nrunnable_cum += cpu_nrunnable; 472 if (one_sec) 473 cpupart->cp_nrunnable += cpu_nrunnable; 474 if (do_lgrp_load && 475 (cp->cpu_flags & CPU_EXISTS)) { 476 /* 477 * When updating the lgroup's load average, 478 * account for the thread running on the CPU. 479 * If the CPU is the current one, then we need 480 * to account for the underlying thread which 481 * got the clock interrupt not the thread that is 482 * handling the interrupt and caculating the load 483 * average 484 */ 485 t = cp->cpu_thread; 486 if (CPU == cp) 487 t = t->t_intr; 488 489 /* 490 * Account for the load average for this thread if 491 * it isn't the idle thread or it is on the interrupt 492 * stack and not the current CPU handling the clock 493 * interrupt 494 */ 495 if ((t && t != cp->cpu_idle_thread) || (CPU != cp && 496 CPU_ON_INTR(cp))) { 497 if (t->t_lpl == cp->cpu_lpl) { 498 /* local thread */ 499 cpu_nrunnable++; 500 } else { 501 /* 502 * This is a remote thread, charge it 503 * against its home lgroup. Note that 504 * we notice that a thread is remote 505 * only if it's currently executing. 506 * This is a reasonable approximation, 507 * since queued remote threads are rare. 508 * Note also that if we didn't charge 509 * it to its home lgroup, remote 510 * execution would often make a system 511 * appear balanced even though it was 512 * not, and thread placement/migration 513 * would often not be done correctly. 514 */ 515 lgrp_loadavg(t->t_lpl, 516 LGRP_LOADAVG_IN_THREAD_MAX, 0); 517 } 518 } 519 lgrp_loadavg(cp->cpu_lpl, 520 cpu_nrunnable * LGRP_LOADAVG_IN_THREAD_MAX, 1); 521 } 522 /* 523 * The platform may define a per physical processor 524 * adjustment of rechoose_interval. The effective 525 * (base + adjustment) rechoose_interval is cached 526 * in the cpu structures for efficiency. Above we detect 527 * if the cached values need updating, and here is where 528 * the update happens. 529 */ 530 if (rechoose_update) { 531 rechoose = rechoose_interval + 532 cp->cpu_chip->chip_rechoose_adj; 533 cp->cpu_rechoose = (rechoose < 0) ? 0 : rechoose; 534 } 535 } while ((cp = cp->cpu_next) != cpu_list); 536 537 /* 538 * Do tick processing for all the active threads running in 539 * the system. 540 */ 541 cp = cpu_list; 542 nrunning = 0; 543 do { 544 klwp_id_t lwp; 545 int intr; 546 int thread_away; 547 548 /* 549 * Don't do any tick processing on CPUs that 550 * aren't even in the system or aren't up yet. 551 */ 552 if ((cp->cpu_flags & CPU_EXISTS) == 0) { 553 continue; 554 } 555 556 /* 557 * The locking here is rather tricky. We use 558 * thread_free_lock to keep the currently running 559 * thread from being freed or recycled while we're 560 * looking at it. We can then check if the thread 561 * is exiting and get the appropriate p_lock if it 562 * is not. We have to be careful, though, because 563 * the _process_ can still be freed while we're 564 * holding thread_free_lock. To avoid touching the 565 * proc structure we put a pointer to the p_lock in the 566 * thread structure. The p_lock is persistent so we 567 * can acquire it even if the process is gone. At that 568 * point we can check (again) if the thread is exiting 569 * and either drop the lock or do the tick processing. 570 */ 571 mutex_enter(&thread_free_lock); 572 /* 573 * We cannot hold the cpu_lock to prevent the 574 * cpu_list from changing in the clock interrupt. 575 * As long as we don't block (or don't get pre-empted) 576 * the cpu_list will not change (all threads are paused 577 * before list modification). If the list does change 578 * any deleted cpu structures will remain with cpu_next 579 * set to NULL, hence the following test. 580 */ 581 if (cp->cpu_next == NULL) { 582 mutex_exit(&thread_free_lock); 583 break; 584 } 585 t = cp->cpu_thread; /* Current running thread */ 586 if (CPU == cp) { 587 /* 588 * 't' will be the clock interrupt thread on this 589 * CPU. Use the pinned thread (if any) on this CPU 590 * as the target of the clock tick. If we pinned 591 * an interrupt, though, just keep using the clock 592 * interrupt thread since the formerly pinned one 593 * may have gone away. One interrupt thread is as 594 * good as another, and this means we don't have 595 * to continue to check pinned_intr in subsequent 596 * code. 597 */ 598 ASSERT(t == curthread); 599 if (t->t_intr != NULL && !pinned_intr) 600 t = t->t_intr; 601 } 602 603 intr = t->t_flag & T_INTR_THREAD; 604 lwp = ttolwp(t); 605 if (lwp == NULL || (t->t_proc_flag & TP_LWPEXIT) || intr) { 606 /* 607 * Thread is exiting (or uninteresting) so don't 608 * do tick processing or grab p_lock. Once we 609 * drop thread_free_lock we can't look inside the 610 * thread or lwp structure, since the thread may 611 * have gone away. 612 */ 613 exiting = 1; 614 } else { 615 /* 616 * OK, try to grab the process lock. See 617 * comments above for why we're not using 618 * ttoproc(t)->p_lockp here. 619 */ 620 plockp = t->t_plockp; 621 mutex_enter(plockp); 622 /* See above comment. */ 623 if (cp->cpu_next == NULL) { 624 mutex_exit(plockp); 625 mutex_exit(&thread_free_lock); 626 break; 627 } 628 /* 629 * The thread may have exited between when we 630 * checked above, and when we got the p_lock. 631 */ 632 if (t->t_proc_flag & TP_LWPEXIT) { 633 mutex_exit(plockp); 634 exiting = 1; 635 } else { 636 exiting = 0; 637 } 638 } 639 /* 640 * Either we have the p_lock for the thread's process, 641 * or we don't care about the thread structure any more. 642 * Either way we can drop thread_free_lock. 643 */ 644 mutex_exit(&thread_free_lock); 645 646 /* 647 * Update user, system, and idle cpu times. 648 */ 649 if (one_sec) { 650 nrunning++; 651 cp->cpu_part->cp_nrunning++; 652 } 653 /* 654 * If we haven't done tick processing for this 655 * lwp, then do it now. Since we don't hold the 656 * lwp down on a CPU it can migrate and show up 657 * more than once, hence the lbolt check. 658 * 659 * Also, make sure that it's okay to perform the 660 * tick processing before calling clock_tick. 661 * Setting thread_away to a TRUE value (ie. not 0) 662 * results in tick processing not being performed for 663 * that thread. Or, in other words, keeps the thread 664 * away from clock_tick processing. 665 */ 666 thread_away = ((cp->cpu_flags & CPU_QUIESCED) || 667 CPU_ON_INTR(cp) || intr || 668 (cp->cpu_dispthread == cp->cpu_idle_thread) || exiting); 669 670 if ((!thread_away) && (lbolt - t->t_lbolt != 0)) { 671 t->t_lbolt = lbolt; 672 clock_tick(t); 673 } 674 675 #ifdef KSLICE 676 /* 677 * Ah what the heck, give this kid a taste of the real 678 * world and yank the rug out from under it. 679 * But, only if we are running UniProcessor. 680 */ 681 if ((kslice) && (ncpus == 1)) { 682 aston(t); 683 cp->cpu_runrun = 1; 684 cp->cpu_kprunrun = 1; 685 } 686 #endif 687 if (!exiting) 688 mutex_exit(plockp); 689 } while ((cp = cp->cpu_next) != cpu_list); 690 691 /* 692 * bump time in ticks 693 * 694 * We rely on there being only one clock thread and hence 695 * don't need a lock to protect lbolt. 696 */ 697 lbolt++; 698 atomic_add_64((uint64_t *)&lbolt64, (int64_t)1); 699 700 /* 701 * Check for a callout that needs be called from the clock 702 * thread to support the membership protocol in a clustered 703 * system. Copy the function pointer so that we can reset 704 * this to NULL if needed. 705 */ 706 if ((funcp = cmm_clock_callout) != NULL) 707 (*funcp)(); 708 709 /* 710 * Wakeup the cageout thread waiters once per second. 711 */ 712 if (one_sec) 713 kcage_tick(); 714 715 /* 716 * Schedule timeout() requests if any are due at this time. 717 */ 718 callout_schedule(); 719 720 if (one_sec) { 721 722 int drift, absdrift; 723 timestruc_t tod; 724 int s; 725 726 /* 727 * Beginning of precision-kernel code fragment executed 728 * every second. 729 * 730 * On rollover of the second the phase adjustment to be 731 * used for the next second is calculated. Also, the 732 * maximum error is increased by the tolerance. If the 733 * PPS frequency discipline code is present, the phase is 734 * increased to compensate for the CPU clock oscillator 735 * frequency error. 736 * 737 * On a 32-bit machine and given parameters in the timex.h 738 * header file, the maximum phase adjustment is +-512 ms 739 * and maximum frequency offset is (a tad less than) 740 * +-512 ppm. On a 64-bit machine, you shouldn't need to ask. 741 */ 742 time_maxerror += time_tolerance / SCALE_USEC; 743 744 /* 745 * Leap second processing. If in leap-insert state at 746 * the end of the day, the system clock is set back one 747 * second; if in leap-delete state, the system clock is 748 * set ahead one second. The microtime() routine or 749 * external clock driver will insure that reported time 750 * is always monotonic. The ugly divides should be 751 * replaced. 752 */ 753 switch (time_state) { 754 755 case TIME_OK: 756 if (time_status & STA_INS) 757 time_state = TIME_INS; 758 else if (time_status & STA_DEL) 759 time_state = TIME_DEL; 760 break; 761 762 case TIME_INS: 763 if (hrestime.tv_sec % 86400 == 0) { 764 s = hr_clock_lock(); 765 hrestime.tv_sec--; 766 hr_clock_unlock(s); 767 time_state = TIME_OOP; 768 } 769 break; 770 771 case TIME_DEL: 772 if ((hrestime.tv_sec + 1) % 86400 == 0) { 773 s = hr_clock_lock(); 774 hrestime.tv_sec++; 775 hr_clock_unlock(s); 776 time_state = TIME_WAIT; 777 } 778 break; 779 780 case TIME_OOP: 781 time_state = TIME_WAIT; 782 break; 783 784 case TIME_WAIT: 785 if (!(time_status & (STA_INS | STA_DEL))) 786 time_state = TIME_OK; 787 default: 788 break; 789 } 790 791 /* 792 * Compute the phase adjustment for the next second. In 793 * PLL mode, the offset is reduced by a fixed factor 794 * times the time constant. In FLL mode the offset is 795 * used directly. In either mode, the maximum phase 796 * adjustment for each second is clamped so as to spread 797 * the adjustment over not more than the number of 798 * seconds between updates. 799 */ 800 if (time_offset == 0) 801 time_adj = 0; 802 else if (time_offset < 0) { 803 lltemp = -time_offset; 804 if (!(time_status & STA_FLL)) { 805 if ((1 << time_constant) >= SCALE_KG) 806 lltemp *= (1 << time_constant) / 807 SCALE_KG; 808 else 809 lltemp = (lltemp / SCALE_KG) >> 810 time_constant; 811 } 812 if (lltemp > (MAXPHASE / MINSEC) * SCALE_UPDATE) 813 lltemp = (MAXPHASE / MINSEC) * SCALE_UPDATE; 814 time_offset += lltemp; 815 time_adj = -(lltemp * SCALE_PHASE) / hz / SCALE_UPDATE; 816 } else { 817 lltemp = time_offset; 818 if (!(time_status & STA_FLL)) { 819 if ((1 << time_constant) >= SCALE_KG) 820 lltemp *= (1 << time_constant) / 821 SCALE_KG; 822 else 823 lltemp = (lltemp / SCALE_KG) >> 824 time_constant; 825 } 826 if (lltemp > (MAXPHASE / MINSEC) * SCALE_UPDATE) 827 lltemp = (MAXPHASE / MINSEC) * SCALE_UPDATE; 828 time_offset -= lltemp; 829 time_adj = (lltemp * SCALE_PHASE) / hz / SCALE_UPDATE; 830 } 831 832 /* 833 * Compute the frequency estimate and additional phase 834 * adjustment due to frequency error for the next 835 * second. When the PPS signal is engaged, gnaw on the 836 * watchdog counter and update the frequency computed by 837 * the pll and the PPS signal. 838 */ 839 pps_valid++; 840 if (pps_valid == PPS_VALID) { 841 pps_jitter = MAXTIME; 842 pps_stabil = MAXFREQ; 843 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | 844 STA_PPSWANDER | STA_PPSERROR); 845 } 846 lltemp = time_freq + pps_freq; 847 848 if (lltemp) 849 time_adj += (lltemp * SCALE_PHASE) / (SCALE_USEC * hz); 850 851 /* 852 * End of precision kernel-code fragment 853 * 854 * The section below should be modified if we are planning 855 * to use NTP for synchronization. 856 * 857 * Note: the clock synchronization code now assumes 858 * the following: 859 * - if dosynctodr is 1, then compute the drift between 860 * the tod chip and software time and adjust one or 861 * the other depending on the circumstances 862 * 863 * - if dosynctodr is 0, then the tod chip is independent 864 * of the software clock and should not be adjusted, 865 * but allowed to free run. this allows NTP to sync. 866 * hrestime without any interference from the tod chip. 867 */ 868 869 tod_validate_deferred = B_FALSE; 870 mutex_enter(&tod_lock); 871 tod = tod_get(); 872 drift = tod.tv_sec - hrestime.tv_sec; 873 absdrift = (drift >= 0) ? drift : -drift; 874 if (tod_needsync || absdrift > 1) { 875 int s; 876 if (absdrift > 2) { 877 if (!tod_broken && tod_faulted == TOD_NOFAULT) { 878 s = hr_clock_lock(); 879 hrestime = tod; 880 membar_enter(); /* hrestime visible */ 881 timedelta = 0; 882 timechanged++; 883 tod_needsync = 0; 884 hr_clock_unlock(s); 885 } 886 } else { 887 if (tod_needsync || !dosynctodr) { 888 gethrestime(&tod); 889 tod_set(tod); 890 s = hr_clock_lock(); 891 if (timedelta == 0) 892 tod_needsync = 0; 893 hr_clock_unlock(s); 894 } else { 895 /* 896 * If the drift is 2 seconds on the 897 * money, then the TOD is adjusting 898 * the clock; record that. 899 */ 900 clock_adj_hist[adj_hist_entry++ % 901 CLOCK_ADJ_HIST_SIZE] = lbolt64; 902 s = hr_clock_lock(); 903 timedelta = (int64_t)drift*NANOSEC; 904 hr_clock_unlock(s); 905 } 906 } 907 } 908 one_sec = 0; 909 time = gethrestime_sec(); /* for crusty old kmem readers */ 910 mutex_exit(&tod_lock); 911 912 /* 913 * Some drivers still depend on this... XXX 914 */ 915 cv_broadcast(&lbolt_cv); 916 917 sysinfo.updates++; 918 vminfo.freemem += freemem; 919 { 920 pgcnt_t maxswap, resv, free; 921 pgcnt_t avail = 922 MAX((spgcnt_t)(availrmem - swapfs_minfree), 0); 923 924 maxswap = k_anoninfo.ani_mem_resv 925 + k_anoninfo.ani_max +avail; 926 free = k_anoninfo.ani_free + avail; 927 resv = k_anoninfo.ani_phys_resv + 928 k_anoninfo.ani_mem_resv; 929 930 vminfo.swap_resv += resv; 931 /* number of reserved and allocated pages */ 932 #ifdef DEBUG 933 if (maxswap < free) 934 cmn_err(CE_WARN, "clock: maxswap < free"); 935 if (maxswap < resv) 936 cmn_err(CE_WARN, "clock: maxswap < resv"); 937 #endif 938 vminfo.swap_alloc += maxswap - free; 939 vminfo.swap_avail += maxswap - resv; 940 vminfo.swap_free += free; 941 } 942 if (nrunnable) { 943 sysinfo.runque += nrunnable; 944 sysinfo.runocc++; 945 } 946 if (nswapped) { 947 sysinfo.swpque += nswapped; 948 sysinfo.swpocc++; 949 } 950 sysinfo.waiting += w_io; 951 952 /* 953 * Wake up fsflush to write out DELWRI 954 * buffers, dirty pages and other cached 955 * administrative data, e.g. inodes. 956 */ 957 if (--fsflushcnt <= 0) { 958 fsflushcnt = tune.t_fsflushr; 959 cv_signal(&fsflush_cv); 960 } 961 962 vmmeter(); 963 calcloadavg(genloadavg(&loadavg), hp_avenrun); 964 for (i = 0; i < 3; i++) 965 /* 966 * At the moment avenrun[] can only hold 31 967 * bits of load average as it is a signed 968 * int in the API. We need to ensure that 969 * hp_avenrun[i] >> (16 - FSHIFT) will not be 970 * too large. If it is, we put the largest value 971 * that we can use into avenrun[i]. This is 972 * kludgey, but about all we can do until we 973 * avenrun[] is declared as an array of uint64[] 974 */ 975 if (hp_avenrun[i] < ((uint64_t)1<<(31+16-FSHIFT))) 976 avenrun[i] = (int32_t)(hp_avenrun[i] >> 977 (16 - FSHIFT)); 978 else 979 avenrun[i] = 0x7fffffff; 980 981 cpupart = cp_list_head; 982 do { 983 calcloadavg(genloadavg(&cpupart->cp_loadavg), 984 cpupart->cp_hp_avenrun); 985 } while ((cpupart = cpupart->cp_next) != cp_list_head); 986 987 /* 988 * Wake up the swapper thread if necessary. 989 */ 990 if (runin || 991 (runout && (avefree < desfree || wake_sched_sec))) { 992 t = &t0; 993 thread_lock(t); 994 if (t->t_state == TS_STOPPED) { 995 runin = runout = 0; 996 wake_sched_sec = 0; 997 t->t_whystop = 0; 998 t->t_whatstop = 0; 999 t->t_schedflag &= ~TS_ALLSTART; 1000 THREAD_TRANSITION(t); 1001 setfrontdq(t); 1002 } 1003 thread_unlock(t); 1004 } 1005 } 1006 1007 /* 1008 * Wake up the swapper if any high priority swapped-out threads 1009 * became runable during the last tick. 1010 */ 1011 if (wake_sched) { 1012 t = &t0; 1013 thread_lock(t); 1014 if (t->t_state == TS_STOPPED) { 1015 runin = runout = 0; 1016 wake_sched = 0; 1017 t->t_whystop = 0; 1018 t->t_whatstop = 0; 1019 t->t_schedflag &= ~TS_ALLSTART; 1020 THREAD_TRANSITION(t); 1021 setfrontdq(t); 1022 } 1023 thread_unlock(t); 1024 } 1025 } 1026 1027 void 1028 clock_init(void) 1029 { 1030 cyc_handler_t hdlr; 1031 cyc_time_t when; 1032 1033 hdlr.cyh_func = (cyc_func_t)clock; 1034 hdlr.cyh_level = CY_LOCK_LEVEL; 1035 hdlr.cyh_arg = NULL; 1036 1037 when.cyt_when = 0; 1038 when.cyt_interval = nsec_per_tick; 1039 1040 mutex_enter(&cpu_lock); 1041 clock_cyclic = cyclic_add(&hdlr, &when); 1042 mutex_exit(&cpu_lock); 1043 } 1044 1045 /* 1046 * Called before calcloadavg to get 10-sec moving loadavg together 1047 */ 1048 1049 static int 1050 genloadavg(struct loadavg_s *avgs) 1051 { 1052 int avg; 1053 int spos; /* starting position */ 1054 int cpos; /* moving current position */ 1055 int i; 1056 int slen; 1057 hrtime_t hr_avg; 1058 1059 /* 10-second snapshot, calculate first positon */ 1060 if (avgs->lg_len == 0) { 1061 return (0); 1062 } 1063 slen = avgs->lg_len < S_MOVAVG_SZ ? avgs->lg_len : S_MOVAVG_SZ; 1064 1065 spos = (avgs->lg_cur - 1) >= 0 ? avgs->lg_cur - 1 : 1066 S_LOADAVG_SZ + (avgs->lg_cur - 1); 1067 for (i = hr_avg = 0; i < slen; i++) { 1068 cpos = (spos - i) >= 0 ? spos - i : S_LOADAVG_SZ + (spos - i); 1069 hr_avg += avgs->lg_loads[cpos]; 1070 } 1071 1072 hr_avg = hr_avg / slen; 1073 avg = hr_avg / (NANOSEC / LGRP_LOADAVG_IN_THREAD_MAX); 1074 1075 return (avg); 1076 } 1077 1078 /* 1079 * Run every second from clock () to update the loadavg count available to the 1080 * system and cpu-partitions. 1081 * 1082 * This works by sampling the previous usr, sys, wait time elapsed, 1083 * computing a delta, and adding that delta to the elapsed usr, sys, 1084 * wait increase. 1085 */ 1086 1087 static void 1088 loadavg_update() 1089 { 1090 cpu_t *cp; 1091 cpupart_t *cpupart; 1092 hrtime_t cpu_total; 1093 int prev; 1094 1095 cp = cpu_list; 1096 loadavg.lg_total = 0; 1097 1098 /* 1099 * first pass totals up per-cpu statistics for system and cpu 1100 * partitions 1101 */ 1102 1103 do { 1104 struct loadavg_s *lavg; 1105 1106 lavg = &cp->cpu_loadavg; 1107 1108 cpu_total = cp->cpu_acct[CMS_USER] + 1109 cp->cpu_acct[CMS_SYSTEM] + cp->cpu_waitrq; 1110 /* compute delta against last total */ 1111 scalehrtime(&cpu_total); 1112 prev = (lavg->lg_cur - 1) >= 0 ? lavg->lg_cur - 1 : 1113 S_LOADAVG_SZ + (lavg->lg_cur - 1); 1114 if (lavg->lg_loads[prev] <= 0) { 1115 lavg->lg_loads[lavg->lg_cur] = cpu_total; 1116 cpu_total = 0; 1117 } else { 1118 lavg->lg_loads[lavg->lg_cur] = cpu_total; 1119 cpu_total = cpu_total - lavg->lg_loads[prev]; 1120 if (cpu_total < 0) 1121 cpu_total = 0; 1122 } 1123 1124 lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ; 1125 lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ? 1126 lavg->lg_len + 1 : S_LOADAVG_SZ; 1127 1128 loadavg.lg_total += cpu_total; 1129 cp->cpu_part->cp_loadavg.lg_total += cpu_total; 1130 1131 } while ((cp = cp->cpu_next) != cpu_list); 1132 1133 loadavg.lg_loads[loadavg.lg_cur] = loadavg.lg_total; 1134 loadavg.lg_cur = (loadavg.lg_cur + 1) % S_LOADAVG_SZ; 1135 loadavg.lg_len = (loadavg.lg_len + 1) < S_LOADAVG_SZ ? 1136 loadavg.lg_len + 1 : S_LOADAVG_SZ; 1137 /* 1138 * Second pass updates counts 1139 */ 1140 cpupart = cp_list_head; 1141 1142 do { 1143 struct loadavg_s *lavg; 1144 1145 lavg = &cpupart->cp_loadavg; 1146 lavg->lg_loads[lavg->lg_cur] = lavg->lg_total; 1147 lavg->lg_total = 0; 1148 lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ; 1149 lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ? 1150 lavg->lg_len + 1 : S_LOADAVG_SZ; 1151 1152 } while ((cpupart = cpupart->cp_next) != cp_list_head); 1153 1154 } 1155 1156 /* 1157 * clock_update() - local clock update 1158 * 1159 * This routine is called by ntp_adjtime() to update the local clock 1160 * phase and frequency. The implementation is of an 1161 * adaptive-parameter, hybrid phase/frequency-lock loop (PLL/FLL). The 1162 * routine computes new time and frequency offset estimates for each 1163 * call. The PPS signal itself determines the new time offset, 1164 * instead of the calling argument. Presumably, calls to 1165 * ntp_adjtime() occur only when the caller believes the local clock 1166 * is valid within some bound (+-128 ms with NTP). If the caller's 1167 * time is far different than the PPS time, an argument will ensue, 1168 * and it's not clear who will lose. 1169 * 1170 * For uncompensated quartz crystal oscillatores and nominal update 1171 * intervals less than 1024 s, operation should be in phase-lock mode 1172 * (STA_FLL = 0), where the loop is disciplined to phase. For update 1173 * intervals greater than this, operation should be in frequency-lock 1174 * mode (STA_FLL = 1), where the loop is disciplined to frequency. 1175 * 1176 * Note: mutex(&tod_lock) is in effect. 1177 */ 1178 void 1179 clock_update(int offset) 1180 { 1181 int ltemp, mtemp, s; 1182 1183 ASSERT(MUTEX_HELD(&tod_lock)); 1184 1185 if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME)) 1186 return; 1187 ltemp = offset; 1188 if ((time_status & STA_PPSTIME) && (time_status & STA_PPSSIGNAL)) 1189 ltemp = pps_offset; 1190 1191 /* 1192 * Scale the phase adjustment and clamp to the operating range. 1193 */ 1194 if (ltemp > MAXPHASE) 1195 time_offset = MAXPHASE * SCALE_UPDATE; 1196 else if (ltemp < -MAXPHASE) 1197 time_offset = -(MAXPHASE * SCALE_UPDATE); 1198 else 1199 time_offset = ltemp * SCALE_UPDATE; 1200 1201 /* 1202 * Select whether the frequency is to be controlled and in which 1203 * mode (PLL or FLL). Clamp to the operating range. Ugly 1204 * multiply/divide should be replaced someday. 1205 */ 1206 if (time_status & STA_FREQHOLD || time_reftime == 0) 1207 time_reftime = hrestime.tv_sec; 1208 1209 mtemp = hrestime.tv_sec - time_reftime; 1210 time_reftime = hrestime.tv_sec; 1211 1212 if (time_status & STA_FLL) { 1213 if (mtemp >= MINSEC) { 1214 ltemp = ((time_offset / mtemp) * (SCALE_USEC / 1215 SCALE_UPDATE)); 1216 if (ltemp) 1217 time_freq += ltemp / SCALE_KH; 1218 } 1219 } else { 1220 if (mtemp < MAXSEC) { 1221 ltemp *= mtemp; 1222 if (ltemp) 1223 time_freq += (int)(((int64_t)ltemp * 1224 SCALE_USEC) / SCALE_KF) 1225 / (1 << (time_constant * 2)); 1226 } 1227 } 1228 if (time_freq > time_tolerance) 1229 time_freq = time_tolerance; 1230 else if (time_freq < -time_tolerance) 1231 time_freq = -time_tolerance; 1232 1233 s = hr_clock_lock(); 1234 tod_needsync = 1; 1235 hr_clock_unlock(s); 1236 } 1237 1238 /* 1239 * ddi_hardpps() - discipline CPU clock oscillator to external PPS signal 1240 * 1241 * This routine is called at each PPS interrupt in order to discipline 1242 * the CPU clock oscillator to the PPS signal. It measures the PPS phase 1243 * and leaves it in a handy spot for the clock() routine. It 1244 * integrates successive PPS phase differences and calculates the 1245 * frequency offset. This is used in clock() to discipline the CPU 1246 * clock oscillator so that intrinsic frequency error is cancelled out. 1247 * The code requires the caller to capture the time and hardware counter 1248 * value at the on-time PPS signal transition. 1249 * 1250 * Note that, on some Unix systems, this routine runs at an interrupt 1251 * priority level higher than the timer interrupt routine clock(). 1252 * Therefore, the variables used are distinct from the clock() 1253 * variables, except for certain exceptions: The PPS frequency pps_freq 1254 * and phase pps_offset variables are determined by this routine and 1255 * updated atomically. The time_tolerance variable can be considered a 1256 * constant, since it is infrequently changed, and then only when the 1257 * PPS signal is disabled. The watchdog counter pps_valid is updated 1258 * once per second by clock() and is atomically cleared in this 1259 * routine. 1260 * 1261 * tvp is the time of the last tick; usec is a microsecond count since the 1262 * last tick. 1263 * 1264 * Note: In Solaris systems, the tick value is actually given by 1265 * usec_per_tick. This is called from the serial driver cdintr(), 1266 * or equivalent, at a high PIL. Because the kernel keeps a 1267 * highresolution time, the following code can accept either 1268 * the traditional argument pair, or the current highres timestamp 1269 * in tvp and zero in usec. 1270 */ 1271 void 1272 ddi_hardpps(struct timeval *tvp, int usec) 1273 { 1274 int u_usec, v_usec, bigtick; 1275 time_t cal_sec; 1276 int cal_usec; 1277 1278 /* 1279 * An occasional glitch can be produced when the PPS interrupt 1280 * occurs in the clock() routine before the time variable is 1281 * updated. Here the offset is discarded when the difference 1282 * between it and the last one is greater than tick/2, but not 1283 * if the interval since the first discard exceeds 30 s. 1284 */ 1285 time_status |= STA_PPSSIGNAL; 1286 time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR); 1287 pps_valid = 0; 1288 u_usec = -tvp->tv_usec; 1289 if (u_usec < -(MICROSEC/2)) 1290 u_usec += MICROSEC; 1291 v_usec = pps_offset - u_usec; 1292 if (v_usec < 0) 1293 v_usec = -v_usec; 1294 if (v_usec > (usec_per_tick >> 1)) { 1295 if (pps_glitch > MAXGLITCH) { 1296 pps_glitch = 0; 1297 pps_tf[2] = u_usec; 1298 pps_tf[1] = u_usec; 1299 } else { 1300 pps_glitch++; 1301 u_usec = pps_offset; 1302 } 1303 } else 1304 pps_glitch = 0; 1305 1306 /* 1307 * A three-stage median filter is used to help deglitch the pps 1308 * time. The median sample becomes the time offset estimate; the 1309 * difference between the other two samples becomes the time 1310 * dispersion (jitter) estimate. 1311 */ 1312 pps_tf[2] = pps_tf[1]; 1313 pps_tf[1] = pps_tf[0]; 1314 pps_tf[0] = u_usec; 1315 if (pps_tf[0] > pps_tf[1]) { 1316 if (pps_tf[1] > pps_tf[2]) { 1317 pps_offset = pps_tf[1]; /* 0 1 2 */ 1318 v_usec = pps_tf[0] - pps_tf[2]; 1319 } else if (pps_tf[2] > pps_tf[0]) { 1320 pps_offset = pps_tf[0]; /* 2 0 1 */ 1321 v_usec = pps_tf[2] - pps_tf[1]; 1322 } else { 1323 pps_offset = pps_tf[2]; /* 0 2 1 */ 1324 v_usec = pps_tf[0] - pps_tf[1]; 1325 } 1326 } else { 1327 if (pps_tf[1] < pps_tf[2]) { 1328 pps_offset = pps_tf[1]; /* 2 1 0 */ 1329 v_usec = pps_tf[2] - pps_tf[0]; 1330 } else if (pps_tf[2] < pps_tf[0]) { 1331 pps_offset = pps_tf[0]; /* 1 0 2 */ 1332 v_usec = pps_tf[1] - pps_tf[2]; 1333 } else { 1334 pps_offset = pps_tf[2]; /* 1 2 0 */ 1335 v_usec = pps_tf[1] - pps_tf[0]; 1336 } 1337 } 1338 if (v_usec > MAXTIME) 1339 pps_jitcnt++; 1340 v_usec = (v_usec << PPS_AVG) - pps_jitter; 1341 pps_jitter += v_usec / (1 << PPS_AVG); 1342 if (pps_jitter > (MAXTIME >> 1)) 1343 time_status |= STA_PPSJITTER; 1344 1345 /* 1346 * During the calibration interval adjust the starting time when 1347 * the tick overflows. At the end of the interval compute the 1348 * duration of the interval and the difference of the hardware 1349 * counters at the beginning and end of the interval. This code 1350 * is deliciously complicated by the fact valid differences may 1351 * exceed the value of tick when using long calibration 1352 * intervals and small ticks. Note that the counter can be 1353 * greater than tick if caught at just the wrong instant, but 1354 * the values returned and used here are correct. 1355 */ 1356 bigtick = (int)usec_per_tick * SCALE_USEC; 1357 pps_usec -= pps_freq; 1358 if (pps_usec >= bigtick) 1359 pps_usec -= bigtick; 1360 if (pps_usec < 0) 1361 pps_usec += bigtick; 1362 pps_time.tv_sec++; 1363 pps_count++; 1364 if (pps_count < (1 << pps_shift)) 1365 return; 1366 pps_count = 0; 1367 pps_calcnt++; 1368 u_usec = usec * SCALE_USEC; 1369 v_usec = pps_usec - u_usec; 1370 if (v_usec >= bigtick >> 1) 1371 v_usec -= bigtick; 1372 if (v_usec < -(bigtick >> 1)) 1373 v_usec += bigtick; 1374 if (v_usec < 0) 1375 v_usec = -(-v_usec >> pps_shift); 1376 else 1377 v_usec = v_usec >> pps_shift; 1378 pps_usec = u_usec; 1379 cal_sec = tvp->tv_sec; 1380 cal_usec = tvp->tv_usec; 1381 cal_sec -= pps_time.tv_sec; 1382 cal_usec -= pps_time.tv_usec; 1383 if (cal_usec < 0) { 1384 cal_usec += MICROSEC; 1385 cal_sec--; 1386 } 1387 pps_time = *tvp; 1388 1389 /* 1390 * Check for lost interrupts, noise, excessive jitter and 1391 * excessive frequency error. The number of timer ticks during 1392 * the interval may vary +-1 tick. Add to this a margin of one 1393 * tick for the PPS signal jitter and maximum frequency 1394 * deviation. If the limits are exceeded, the calibration 1395 * interval is reset to the minimum and we start over. 1396 */ 1397 u_usec = (int)usec_per_tick << 1; 1398 if (!((cal_sec == -1 && cal_usec > (MICROSEC - u_usec)) || 1399 (cal_sec == 0 && cal_usec < u_usec)) || 1400 v_usec > time_tolerance || v_usec < -time_tolerance) { 1401 pps_errcnt++; 1402 pps_shift = PPS_SHIFT; 1403 pps_intcnt = 0; 1404 time_status |= STA_PPSERROR; 1405 return; 1406 } 1407 1408 /* 1409 * A three-stage median filter is used to help deglitch the pps 1410 * frequency. The median sample becomes the frequency offset 1411 * estimate; the difference between the other two samples 1412 * becomes the frequency dispersion (stability) estimate. 1413 */ 1414 pps_ff[2] = pps_ff[1]; 1415 pps_ff[1] = pps_ff[0]; 1416 pps_ff[0] = v_usec; 1417 if (pps_ff[0] > pps_ff[1]) { 1418 if (pps_ff[1] > pps_ff[2]) { 1419 u_usec = pps_ff[1]; /* 0 1 2 */ 1420 v_usec = pps_ff[0] - pps_ff[2]; 1421 } else if (pps_ff[2] > pps_ff[0]) { 1422 u_usec = pps_ff[0]; /* 2 0 1 */ 1423 v_usec = pps_ff[2] - pps_ff[1]; 1424 } else { 1425 u_usec = pps_ff[2]; /* 0 2 1 */ 1426 v_usec = pps_ff[0] - pps_ff[1]; 1427 } 1428 } else { 1429 if (pps_ff[1] < pps_ff[2]) { 1430 u_usec = pps_ff[1]; /* 2 1 0 */ 1431 v_usec = pps_ff[2] - pps_ff[0]; 1432 } else if (pps_ff[2] < pps_ff[0]) { 1433 u_usec = pps_ff[0]; /* 1 0 2 */ 1434 v_usec = pps_ff[1] - pps_ff[2]; 1435 } else { 1436 u_usec = pps_ff[2]; /* 1 2 0 */ 1437 v_usec = pps_ff[1] - pps_ff[0]; 1438 } 1439 } 1440 1441 /* 1442 * Here the frequency dispersion (stability) is updated. If it 1443 * is less than one-fourth the maximum (MAXFREQ), the frequency 1444 * offset is updated as well, but clamped to the tolerance. It 1445 * will be processed later by the clock() routine. 1446 */ 1447 v_usec = (v_usec >> 1) - pps_stabil; 1448 if (v_usec < 0) 1449 pps_stabil -= -v_usec >> PPS_AVG; 1450 else 1451 pps_stabil += v_usec >> PPS_AVG; 1452 if (pps_stabil > MAXFREQ >> 2) { 1453 pps_stbcnt++; 1454 time_status |= STA_PPSWANDER; 1455 return; 1456 } 1457 if (time_status & STA_PPSFREQ) { 1458 if (u_usec < 0) { 1459 pps_freq -= -u_usec >> PPS_AVG; 1460 if (pps_freq < -time_tolerance) 1461 pps_freq = -time_tolerance; 1462 u_usec = -u_usec; 1463 } else { 1464 pps_freq += u_usec >> PPS_AVG; 1465 if (pps_freq > time_tolerance) 1466 pps_freq = time_tolerance; 1467 } 1468 } 1469 1470 /* 1471 * Here the calibration interval is adjusted. If the maximum 1472 * time difference is greater than tick / 4, reduce the interval 1473 * by half. If this is not the case for four consecutive 1474 * intervals, double the interval. 1475 */ 1476 if (u_usec << pps_shift > bigtick >> 2) { 1477 pps_intcnt = 0; 1478 if (pps_shift > PPS_SHIFT) 1479 pps_shift--; 1480 } else if (pps_intcnt >= 4) { 1481 pps_intcnt = 0; 1482 if (pps_shift < PPS_SHIFTMAX) 1483 pps_shift++; 1484 } else 1485 pps_intcnt++; 1486 1487 /* 1488 * If recovering from kmdb, then make sure the tod chip gets resynced. 1489 * If we took an early exit above, then we don't yet have a stable 1490 * calibration signal to lock onto, so don't mark the tod for sync 1491 * until we get all the way here. 1492 */ 1493 { 1494 int s = hr_clock_lock(); 1495 1496 tod_needsync = 1; 1497 hr_clock_unlock(s); 1498 } 1499 } 1500 1501 /* 1502 * Handle clock tick processing for a thread. 1503 * Check for timer action, enforce CPU rlimit, do profiling etc. 1504 */ 1505 void 1506 clock_tick(kthread_t *t) 1507 { 1508 struct proc *pp; 1509 klwp_id_t lwp; 1510 struct as *as; 1511 clock_t utime; 1512 clock_t stime; 1513 int poke = 0; /* notify another CPU */ 1514 int user_mode; 1515 size_t rss; 1516 1517 /* Must be operating on a lwp/thread */ 1518 if ((lwp = ttolwp(t)) == NULL) { 1519 panic("clock_tick: no lwp"); 1520 /*NOTREACHED*/ 1521 } 1522 1523 CL_TICK(t); /* Class specific tick processing */ 1524 DTRACE_SCHED1(tick, kthread_t *, t); 1525 1526 pp = ttoproc(t); 1527 1528 /* pp->p_lock makes sure that the thread does not exit */ 1529 ASSERT(MUTEX_HELD(&pp->p_lock)); 1530 1531 user_mode = (lwp->lwp_state == LWP_USER); 1532 1533 /* 1534 * Update process times. Should use high res clock and state 1535 * changes instead of statistical sampling method. XXX 1536 */ 1537 if (user_mode) { 1538 pp->p_utime++; 1539 pp->p_task->tk_cpu_time++; 1540 } else { 1541 pp->p_stime++; 1542 pp->p_task->tk_cpu_time++; 1543 } 1544 as = pp->p_as; 1545 1546 /* 1547 * Update user profiling statistics. Get the pc from the 1548 * lwp when the AST happens. 1549 */ 1550 if (pp->p_prof.pr_scale) { 1551 atomic_add_32(&lwp->lwp_oweupc, 1); 1552 if (user_mode) { 1553 poke = 1; 1554 aston(t); 1555 } 1556 } 1557 1558 utime = pp->p_utime; 1559 stime = pp->p_stime; 1560 1561 /* 1562 * If CPU was in user state, process lwp-virtual time 1563 * interval timer. 1564 */ 1565 if (user_mode && 1566 timerisset(&lwp->lwp_timer[ITIMER_VIRTUAL].it_value) && 1567 itimerdecr(&lwp->lwp_timer[ITIMER_VIRTUAL], usec_per_tick) == 0) { 1568 poke = 1; 1569 sigtoproc(pp, t, SIGVTALRM); 1570 } 1571 1572 if (timerisset(&lwp->lwp_timer[ITIMER_PROF].it_value) && 1573 itimerdecr(&lwp->lwp_timer[ITIMER_PROF], usec_per_tick) == 0) { 1574 poke = 1; 1575 sigtoproc(pp, t, SIGPROF); 1576 } 1577 1578 /* 1579 * Enforce CPU resource controls: 1580 * (a) process.max-cpu-time resource control 1581 */ 1582 (void) rctl_test(rctlproc_legacy[RLIMIT_CPU], pp->p_rctls, pp, 1583 (utime + stime)/hz, RCA_UNSAFE_SIGINFO); 1584 1585 /* 1586 * (b) task.max-cpu-time resource control 1587 */ 1588 (void) rctl_test(rc_task_cpu_time, pp->p_task->tk_rctls, pp, 1, 1589 RCA_UNSAFE_SIGINFO); 1590 1591 /* 1592 * Update memory usage for the currently running process. 1593 */ 1594 rss = rm_asrss(as); 1595 PTOU(pp)->u_mem += rss; 1596 if (rss > PTOU(pp)->u_mem_max) 1597 PTOU(pp)->u_mem_max = rss; 1598 1599 /* 1600 * Notify the CPU the thread is running on. 1601 */ 1602 if (poke && t->t_cpu != CPU) 1603 poke_cpu(t->t_cpu->cpu_id); 1604 } 1605 1606 void 1607 profil_tick(uintptr_t upc) 1608 { 1609 int ticks; 1610 proc_t *p = ttoproc(curthread); 1611 klwp_t *lwp = ttolwp(curthread); 1612 struct prof *pr = &p->p_prof; 1613 1614 do { 1615 ticks = lwp->lwp_oweupc; 1616 } while (cas32(&lwp->lwp_oweupc, ticks, 0) != ticks); 1617 1618 mutex_enter(&p->p_pflock); 1619 if (pr->pr_scale >= 2 && upc >= pr->pr_off) { 1620 /* 1621 * Old-style profiling 1622 */ 1623 uint16_t *slot = pr->pr_base; 1624 uint16_t old, new; 1625 if (pr->pr_scale != 2) { 1626 uintptr_t delta = upc - pr->pr_off; 1627 uintptr_t byteoff = ((delta >> 16) * pr->pr_scale) + 1628 (((delta & 0xffff) * pr->pr_scale) >> 16); 1629 if (byteoff >= (uintptr_t)pr->pr_size) { 1630 mutex_exit(&p->p_pflock); 1631 return; 1632 } 1633 slot += byteoff / sizeof (uint16_t); 1634 } 1635 if (fuword16(slot, &old) < 0 || 1636 (new = old + ticks) > SHRT_MAX || 1637 suword16(slot, new) < 0) { 1638 pr->pr_scale = 0; 1639 } 1640 } else if (pr->pr_scale == 1) { 1641 /* 1642 * PC Sampling 1643 */ 1644 model_t model = lwp_getdatamodel(lwp); 1645 int result; 1646 #ifdef __lint 1647 model = model; 1648 #endif 1649 while (ticks-- > 0) { 1650 if (pr->pr_samples == pr->pr_size) { 1651 /* buffer full, turn off sampling */ 1652 pr->pr_scale = 0; 1653 break; 1654 } 1655 switch (SIZEOF_PTR(model)) { 1656 case sizeof (uint32_t): 1657 result = suword32(pr->pr_base, (uint32_t)upc); 1658 break; 1659 #ifdef _LP64 1660 case sizeof (uint64_t): 1661 result = suword64(pr->pr_base, (uint64_t)upc); 1662 break; 1663 #endif 1664 default: 1665 cmn_err(CE_WARN, "profil_tick: unexpected " 1666 "data model"); 1667 result = -1; 1668 break; 1669 } 1670 if (result != 0) { 1671 pr->pr_scale = 0; 1672 break; 1673 } 1674 pr->pr_base = (caddr_t)pr->pr_base + SIZEOF_PTR(model); 1675 pr->pr_samples++; 1676 } 1677 } 1678 mutex_exit(&p->p_pflock); 1679 } 1680 1681 static void 1682 delay_wakeup(void *arg) 1683 { 1684 kthread_t *t = arg; 1685 1686 mutex_enter(&t->t_delay_lock); 1687 cv_signal(&t->t_delay_cv); 1688 mutex_exit(&t->t_delay_lock); 1689 } 1690 1691 void 1692 delay(clock_t ticks) 1693 { 1694 kthread_t *t = curthread; 1695 clock_t deadline = lbolt + ticks; 1696 clock_t timeleft; 1697 timeout_id_t id; 1698 1699 if (panicstr && ticks > 0) { 1700 /* 1701 * Timeouts aren't running, so all we can do is spin. 1702 */ 1703 drv_usecwait(TICK_TO_USEC(ticks)); 1704 return; 1705 } 1706 1707 while ((timeleft = deadline - lbolt) > 0) { 1708 mutex_enter(&t->t_delay_lock); 1709 id = timeout(delay_wakeup, t, timeleft); 1710 cv_wait(&t->t_delay_cv, &t->t_delay_lock); 1711 mutex_exit(&t->t_delay_lock); 1712 (void) untimeout(id); 1713 } 1714 } 1715 1716 /* 1717 * Like delay, but interruptible by a signal. 1718 */ 1719 int 1720 delay_sig(clock_t ticks) 1721 { 1722 clock_t deadline = lbolt + ticks; 1723 clock_t rc; 1724 1725 mutex_enter(&curthread->t_delay_lock); 1726 do { 1727 rc = cv_timedwait_sig(&curthread->t_delay_cv, 1728 &curthread->t_delay_lock, deadline); 1729 } while (rc > 0); 1730 mutex_exit(&curthread->t_delay_lock); 1731 if (rc == 0) 1732 return (EINTR); 1733 return (0); 1734 } 1735 1736 #define SECONDS_PER_DAY 86400 1737 1738 /* 1739 * Initialize the system time based on the TOD chip. approx is used as 1740 * an approximation of time (e.g. from the filesystem) in the event that 1741 * the TOD chip has been cleared or is unresponsive. An approx of -1 1742 * means the filesystem doesn't keep time. 1743 */ 1744 void 1745 clkset(time_t approx) 1746 { 1747 timestruc_t ts; 1748 int spl; 1749 int set_clock = 0; 1750 1751 mutex_enter(&tod_lock); 1752 ts = tod_get(); 1753 1754 if (ts.tv_sec > 365 * SECONDS_PER_DAY) { 1755 /* 1756 * If the TOD chip is reporting some time after 1971, 1757 * then it probably didn't lose power or become otherwise 1758 * cleared in the recent past; check to assure that 1759 * the time coming from the filesystem isn't in the future 1760 * according to the TOD chip. 1761 */ 1762 if (approx != -1 && approx > ts.tv_sec) { 1763 cmn_err(CE_WARN, "Last shutdown is later " 1764 "than time on time-of-day chip; check date."); 1765 } 1766 } else { 1767 /* 1768 * If the TOD chip isn't giving correct time, then set it to 1769 * the time that was passed in as a rough estimate. If we 1770 * don't have an estimate, then set the clock back to a time 1771 * when Oliver North, ALF and Dire Straits were all on the 1772 * collective brain: 1987. 1773 */ 1774 timestruc_t tmp; 1775 if (approx == -1) 1776 ts.tv_sec = (1987 - 1970) * 365 * SECONDS_PER_DAY; 1777 else 1778 ts.tv_sec = approx; 1779 ts.tv_nsec = 0; 1780 1781 /* 1782 * Attempt to write the new time to the TOD chip. Set spl high 1783 * to avoid getting preempted between the tod_set and tod_get. 1784 */ 1785 spl = splhi(); 1786 tod_set(ts); 1787 tmp = tod_get(); 1788 splx(spl); 1789 1790 if (tmp.tv_sec != ts.tv_sec && tmp.tv_sec != ts.tv_sec + 1) { 1791 tod_broken = 1; 1792 dosynctodr = 0; 1793 cmn_err(CE_WARN, "Time-of-day chip unresponsive;" 1794 " dead batteries?"); 1795 } else { 1796 cmn_err(CE_WARN, "Time-of-day chip had " 1797 "incorrect date; check and reset."); 1798 } 1799 set_clock = 1; 1800 } 1801 1802 if (!boot_time) { 1803 boot_time = ts.tv_sec; 1804 set_clock = 1; 1805 } 1806 1807 if (set_clock) 1808 set_hrestime(&ts); 1809 1810 mutex_exit(&tod_lock); 1811 } 1812 1813 int timechanged; /* for testing if the system time has been reset */ 1814 1815 void 1816 set_hrestime(timestruc_t *ts) 1817 { 1818 int spl = hr_clock_lock(); 1819 hrestime = *ts; 1820 membar_enter(); /* hrestime must be visible before timechanged++ */ 1821 timedelta = 0; 1822 timechanged++; 1823 hr_clock_unlock(spl); 1824 } 1825 1826 static uint_t deadman_seconds; 1827 static uint32_t deadman_panics; 1828 static int deadman_enabled = 0; 1829 static int deadman_panic_timers = 1; 1830 1831 static void 1832 deadman(void) 1833 { 1834 if (panicstr) { 1835 /* 1836 * During panic, other CPUs besides the panic 1837 * master continue to handle cyclics and some other 1838 * interrupts. The code below is intended to be 1839 * single threaded, so any CPU other than the master 1840 * must keep out. 1841 */ 1842 if (CPU->cpu_id != panic_cpu.cpu_id) 1843 return; 1844 1845 /* 1846 * If we're panicking, the deadman cyclic continues to increase 1847 * lbolt in case the dump device driver relies on this for 1848 * timeouts. Note that we rely on deadman() being invoked once 1849 * per second, and credit lbolt and lbolt64 with hz ticks each. 1850 */ 1851 lbolt += hz; 1852 lbolt64 += hz; 1853 1854 if (!deadman_panic_timers) 1855 return; /* allow all timers to be manually disabled */ 1856 1857 /* 1858 * If we are generating a crash dump or syncing filesystems and 1859 * the corresponding timer is set, decrement it and re-enter 1860 * the panic code to abort it and advance to the next state. 1861 * The panic states and triggers are explained in panic.c. 1862 */ 1863 if (panic_dump) { 1864 if (dump_timeleft && (--dump_timeleft == 0)) { 1865 panic("panic dump timeout"); 1866 /*NOTREACHED*/ 1867 } 1868 } else if (panic_sync) { 1869 if (sync_timeleft && (--sync_timeleft == 0)) { 1870 panic("panic sync timeout"); 1871 /*NOTREACHED*/ 1872 } 1873 } 1874 1875 return; 1876 } 1877 1878 if (lbolt != CPU->cpu_deadman_lbolt) { 1879 CPU->cpu_deadman_lbolt = lbolt; 1880 CPU->cpu_deadman_countdown = deadman_seconds; 1881 return; 1882 } 1883 1884 if (CPU->cpu_deadman_countdown-- > 0) 1885 return; 1886 1887 /* 1888 * Regardless of whether or not we actually bring the system down, 1889 * bump the deadman_panics variable. 1890 * 1891 * N.B. deadman_panics is incremented once for each CPU that 1892 * passes through here. It's expected that all the CPUs will 1893 * detect this condition within one second of each other, so 1894 * when deadman_enabled is off, deadman_panics will 1895 * typically be a multiple of the total number of CPUs in 1896 * the system. 1897 */ 1898 atomic_add_32(&deadman_panics, 1); 1899 1900 if (!deadman_enabled) { 1901 CPU->cpu_deadman_countdown = deadman_seconds; 1902 return; 1903 } 1904 1905 /* 1906 * If we're here, we want to bring the system down. 1907 */ 1908 panic("deadman: timed out after %d seconds of clock " 1909 "inactivity", deadman_seconds); 1910 /*NOTREACHED*/ 1911 } 1912 1913 /*ARGSUSED*/ 1914 static void 1915 deadman_online(void *arg, cpu_t *cpu, cyc_handler_t *hdlr, cyc_time_t *when) 1916 { 1917 cpu->cpu_deadman_lbolt = 0; 1918 cpu->cpu_deadman_countdown = deadman_seconds; 1919 1920 hdlr->cyh_func = (cyc_func_t)deadman; 1921 hdlr->cyh_level = CY_HIGH_LEVEL; 1922 hdlr->cyh_arg = NULL; 1923 1924 /* 1925 * Stagger the CPUs so that they don't all run deadman() at 1926 * the same time. Simplest reason to do this is to make it 1927 * more likely that only one CPU will panic in case of a 1928 * timeout. This is (strictly speaking) an aesthetic, not a 1929 * technical consideration. 1930 * 1931 * The interval must be one second in accordance with the 1932 * code in deadman() above to increase lbolt during panic. 1933 */ 1934 when->cyt_when = cpu->cpu_id * (NANOSEC / NCPU); 1935 when->cyt_interval = NANOSEC; 1936 } 1937 1938 1939 void 1940 deadman_init(void) 1941 { 1942 cyc_omni_handler_t hdlr; 1943 1944 if (deadman_seconds == 0) 1945 deadman_seconds = snoop_interval / MICROSEC; 1946 1947 if (snooping) 1948 deadman_enabled = 1; 1949 1950 hdlr.cyo_online = deadman_online; 1951 hdlr.cyo_offline = NULL; 1952 hdlr.cyo_arg = NULL; 1953 1954 mutex_enter(&cpu_lock); 1955 deadman_cyclic = cyclic_add_omni(&hdlr); 1956 mutex_exit(&cpu_lock); 1957 } 1958 1959 /* 1960 * tod_fault() is for updating tod validate mechanism state: 1961 * (1) TOD_NOFAULT: for resetting the state to 'normal'. 1962 * currently used for debugging only 1963 * (2) The following four cases detected by tod validate mechanism: 1964 * TOD_REVERSED: current tod value is less than previous value. 1965 * TOD_STALLED: current tod value hasn't advanced. 1966 * TOD_JUMPED: current tod value advanced too far from previous value. 1967 * TOD_RATECHANGED: the ratio between average tod delta and 1968 * average tick delta has changed. 1969 */ 1970 enum tod_fault_type 1971 tod_fault(enum tod_fault_type ftype, int off) 1972 { 1973 ASSERT(MUTEX_HELD(&tod_lock)); 1974 1975 if (tod_faulted != ftype) { 1976 switch (ftype) { 1977 case TOD_NOFAULT: 1978 plat_tod_fault(TOD_NOFAULT); 1979 cmn_err(CE_NOTE, "Restarted tracking " 1980 "Time of Day clock."); 1981 tod_faulted = ftype; 1982 break; 1983 case TOD_REVERSED: 1984 case TOD_JUMPED: 1985 if (tod_faulted == TOD_NOFAULT) { 1986 plat_tod_fault(ftype); 1987 cmn_err(CE_WARN, "Time of Day clock error: " 1988 "reason [%s by 0x%x]. -- " 1989 " Stopped tracking Time Of Day clock.", 1990 tod_fault_table[ftype], off); 1991 tod_faulted = ftype; 1992 } 1993 break; 1994 case TOD_STALLED: 1995 case TOD_RATECHANGED: 1996 if (tod_faulted == TOD_NOFAULT) { 1997 plat_tod_fault(ftype); 1998 cmn_err(CE_WARN, "Time of Day clock error: " 1999 "reason [%s]. -- " 2000 " Stopped tracking Time Of Day clock.", 2001 tod_fault_table[ftype]); 2002 tod_faulted = ftype; 2003 } 2004 break; 2005 default: 2006 break; 2007 } 2008 } 2009 return (tod_faulted); 2010 } 2011 2012 void 2013 tod_fault_reset() 2014 { 2015 tod_fault_reset_flag = 1; 2016 } 2017 2018 2019 /* 2020 * tod_validate() is used for checking values returned by tod_get(). 2021 * Four error cases can be detected by this routine: 2022 * TOD_REVERSED: current tod value is less than previous. 2023 * TOD_STALLED: current tod value hasn't advanced. 2024 * TOD_JUMPED: current tod value advanced too far from previous value. 2025 * TOD_RATECHANGED: the ratio between average tod delta and 2026 * average tick delta has changed. 2027 */ 2028 time_t 2029 tod_validate(time_t tod) 2030 { 2031 time_t diff_tod; 2032 hrtime_t diff_tick; 2033 2034 long dtick; 2035 int dtick_delta; 2036 2037 int off = 0; 2038 enum tod_fault_type tod_bad = TOD_NOFAULT; 2039 2040 static int firsttime = 1; 2041 2042 static time_t prev_tod = 0; 2043 static hrtime_t prev_tick = 0; 2044 static long dtick_avg = TOD_REF_FREQ; 2045 2046 hrtime_t tick = gethrtime(); 2047 2048 ASSERT(MUTEX_HELD(&tod_lock)); 2049 2050 /* 2051 * tod_validate_enable is patchable via /etc/system. 2052 * If TOD is already faulted, or if TOD validation is deferred, 2053 * there is nothing to do. 2054 */ 2055 if ((tod_validate_enable == 0) || (tod_faulted != TOD_NOFAULT) || 2056 tod_validate_deferred) { 2057 return (tod); 2058 } 2059 2060 /* 2061 * Update prev_tod and prev_tick values for first run 2062 */ 2063 if (firsttime) { 2064 firsttime = 0; 2065 prev_tod = tod; 2066 prev_tick = tick; 2067 return (tod); 2068 } 2069 2070 /* 2071 * For either of these conditions, we need to reset ourself 2072 * and start validation from zero since each condition 2073 * indicates that the TOD will be updated with new value 2074 * Also, note that tod_needsync will be reset in clock() 2075 */ 2076 if (tod_needsync || tod_fault_reset_flag) { 2077 firsttime = 1; 2078 prev_tod = 0; 2079 prev_tick = 0; 2080 dtick_avg = TOD_REF_FREQ; 2081 2082 if (tod_fault_reset_flag) 2083 tod_fault_reset_flag = 0; 2084 2085 return (tod); 2086 } 2087 2088 /* test hook */ 2089 switch (tod_unit_test) { 2090 case 1: /* for testing jumping tod */ 2091 tod += tod_test_injector; 2092 tod_unit_test = 0; 2093 break; 2094 case 2: /* for testing stuck tod bit */ 2095 tod |= 1 << tod_test_injector; 2096 tod_unit_test = 0; 2097 break; 2098 case 3: /* for testing stalled tod */ 2099 tod = prev_tod; 2100 tod_unit_test = 0; 2101 break; 2102 case 4: /* reset tod fault status */ 2103 (void) tod_fault(TOD_NOFAULT, 0); 2104 tod_unit_test = 0; 2105 break; 2106 default: 2107 break; 2108 } 2109 2110 diff_tod = tod - prev_tod; 2111 diff_tick = tick - prev_tick; 2112 2113 ASSERT(diff_tick >= 0); 2114 2115 if (diff_tod < 0) { 2116 /* ERROR - tod reversed */ 2117 tod_bad = TOD_REVERSED; 2118 off = (int)(prev_tod - tod); 2119 } else if (diff_tod == 0) { 2120 /* tod did not advance */ 2121 if (diff_tick > TOD_STALL_THRESHOLD) { 2122 /* ERROR - tod stalled */ 2123 tod_bad = TOD_STALLED; 2124 } else { 2125 /* 2126 * Make sure we don't update prev_tick 2127 * so that diff_tick is calculated since 2128 * the first diff_tod == 0 2129 */ 2130 return (tod); 2131 } 2132 } else { 2133 /* calculate dtick */ 2134 dtick = diff_tick / diff_tod; 2135 2136 /* update dtick averages */ 2137 dtick_avg += ((dtick - dtick_avg) / TOD_FILTER_N); 2138 2139 /* 2140 * Calculate dtick_delta as 2141 * variation from reference freq in quartiles 2142 */ 2143 dtick_delta = (dtick_avg - TOD_REF_FREQ) / 2144 (TOD_REF_FREQ >> 2); 2145 2146 /* 2147 * Even with a perfectly functioning TOD device, 2148 * when the number of elapsed seconds is low the 2149 * algorithm can calculate a rate that is beyond 2150 * tolerance, causing an error. The algorithm is 2151 * inaccurate when elapsed time is low (less than 2152 * 5 seconds). 2153 */ 2154 if (diff_tod > 4) { 2155 if (dtick < TOD_JUMP_THRESHOLD) { 2156 /* ERROR - tod jumped */ 2157 tod_bad = TOD_JUMPED; 2158 off = (int)diff_tod; 2159 } else if (dtick_delta) { 2160 /* ERROR - change in clock rate */ 2161 tod_bad = TOD_RATECHANGED; 2162 } 2163 } 2164 } 2165 2166 if (tod_bad != TOD_NOFAULT) { 2167 (void) tod_fault(tod_bad, off); 2168 2169 /* 2170 * Disable dosynctodr since we are going to fault 2171 * the TOD chip anyway here 2172 */ 2173 dosynctodr = 0; 2174 2175 /* 2176 * Set tod to the correct value from hrestime 2177 */ 2178 tod = hrestime.tv_sec; 2179 } 2180 2181 prev_tod = tod; 2182 prev_tick = tick; 2183 return (tod); 2184 } 2185 2186 static void 2187 calcloadavg(int nrun, uint64_t *hp_ave) 2188 { 2189 static int64_t f[3] = { 135, 27, 9 }; 2190 uint_t i; 2191 int64_t q, r; 2192 2193 /* 2194 * Compute load average over the last 1, 5, and 15 minutes 2195 * (60, 300, and 900 seconds). The constants in f[3] are for 2196 * exponential decay: 2197 * (1 - exp(-1/60)) << 13 = 135, 2198 * (1 - exp(-1/300)) << 13 = 27, 2199 * (1 - exp(-1/900)) << 13 = 9. 2200 */ 2201 2202 /* 2203 * a little hoop-jumping to avoid integer overflow 2204 */ 2205 for (i = 0; i < 3; i++) { 2206 q = (hp_ave[i] >> 16) << 7; 2207 r = (hp_ave[i] & 0xffff) << 7; 2208 hp_ave[i] += ((nrun - q) * f[i] - ((r * f[i]) >> 16)) >> 4; 2209 } 2210 } 2211