1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 22 /* All Rights Reserved */ 23 24 25 /* 26 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 27 * Use is subject to license terms. 28 */ 29 30 #pragma ident "%Z%%M% %I% %E% SMI" 31 32 #include <sys/param.h> 33 #include <sys/t_lock.h> 34 #include <sys/types.h> 35 #include <sys/tuneable.h> 36 #include <sys/sysmacros.h> 37 #include <sys/systm.h> 38 #include <sys/cpuvar.h> 39 #include <sys/lgrp.h> 40 #include <sys/user.h> 41 #include <sys/proc.h> 42 #include <sys/callo.h> 43 #include <sys/kmem.h> 44 #include <sys/var.h> 45 #include <sys/cmn_err.h> 46 #include <sys/swap.h> 47 #include <sys/vmsystm.h> 48 #include <sys/class.h> 49 #include <sys/time.h> 50 #include <sys/debug.h> 51 #include <sys/vtrace.h> 52 #include <sys/spl.h> 53 #include <sys/atomic.h> 54 #include <sys/dumphdr.h> 55 #include <sys/archsystm.h> 56 #include <sys/fs/swapnode.h> 57 #include <sys/panic.h> 58 #include <sys/disp.h> 59 #include <sys/msacct.h> 60 #include <sys/mem_cage.h> 61 62 #include <vm/page.h> 63 #include <vm/anon.h> 64 #include <vm/rm.h> 65 #include <sys/cyclic.h> 66 #include <sys/cpupart.h> 67 #include <sys/rctl.h> 68 #include <sys/task.h> 69 #include <sys/sdt.h> 70 #include <sys/ddi_timer.h> 71 72 /* 73 * for NTP support 74 */ 75 #include <sys/timex.h> 76 #include <sys/inttypes.h> 77 78 /* 79 * clock() is called straight from the clock cyclic; see clock_init(). 80 * 81 * Functions: 82 * reprime clock 83 * schedule callouts 84 * maintain date 85 * jab the scheduler 86 */ 87 88 extern kcondvar_t fsflush_cv; 89 extern sysinfo_t sysinfo; 90 extern vminfo_t vminfo; 91 extern int idleswtch; /* flag set while idle in pswtch() */ 92 93 /* 94 * high-precision avenrun values. These are needed to make the 95 * regular avenrun values accurate. 96 */ 97 static uint64_t hp_avenrun[3]; 98 int avenrun[3]; /* FSCALED average run queue lengths */ 99 time_t time; /* time in seconds since 1970 - for compatibility only */ 100 101 static struct loadavg_s loadavg; 102 /* 103 * Phase/frequency-lock loop (PLL/FLL) definitions 104 * 105 * The following variables are read and set by the ntp_adjtime() system 106 * call. 107 * 108 * time_state shows the state of the system clock, with values defined 109 * in the timex.h header file. 110 * 111 * time_status shows the status of the system clock, with bits defined 112 * in the timex.h header file. 113 * 114 * time_offset is used by the PLL/FLL to adjust the system time in small 115 * increments. 116 * 117 * time_constant determines the bandwidth or "stiffness" of the PLL. 118 * 119 * time_tolerance determines maximum frequency error or tolerance of the 120 * CPU clock oscillator and is a property of the architecture; however, 121 * in principle it could change as result of the presence of external 122 * discipline signals, for instance. 123 * 124 * time_precision is usually equal to the kernel tick variable; however, 125 * in cases where a precision clock counter or external clock is 126 * available, the resolution can be much less than this and depend on 127 * whether the external clock is working or not. 128 * 129 * time_maxerror is initialized by a ntp_adjtime() call and increased by 130 * the kernel once each second to reflect the maximum error bound 131 * growth. 132 * 133 * time_esterror is set and read by the ntp_adjtime() call, but 134 * otherwise not used by the kernel. 135 */ 136 int32_t time_state = TIME_OK; /* clock state */ 137 int32_t time_status = STA_UNSYNC; /* clock status bits */ 138 int32_t time_offset = 0; /* time offset (us) */ 139 int32_t time_constant = 0; /* pll time constant */ 140 int32_t time_tolerance = MAXFREQ; /* frequency tolerance (scaled ppm) */ 141 int32_t time_precision = 1; /* clock precision (us) */ 142 int32_t time_maxerror = MAXPHASE; /* maximum error (us) */ 143 int32_t time_esterror = MAXPHASE; /* estimated error (us) */ 144 145 /* 146 * The following variables establish the state of the PLL/FLL and the 147 * residual time and frequency offset of the local clock. The scale 148 * factors are defined in the timex.h header file. 149 * 150 * time_phase and time_freq are the phase increment and the frequency 151 * increment, respectively, of the kernel time variable. 152 * 153 * time_freq is set via ntp_adjtime() from a value stored in a file when 154 * the synchronization daemon is first started. Its value is retrieved 155 * via ntp_adjtime() and written to the file about once per hour by the 156 * daemon. 157 * 158 * time_adj is the adjustment added to the value of tick at each timer 159 * interrupt and is recomputed from time_phase and time_freq at each 160 * seconds rollover. 161 * 162 * time_reftime is the second's portion of the system time at the last 163 * call to ntp_adjtime(). It is used to adjust the time_freq variable 164 * and to increase the time_maxerror as the time since last update 165 * increases. 166 */ 167 int32_t time_phase = 0; /* phase offset (scaled us) */ 168 int32_t time_freq = 0; /* frequency offset (scaled ppm) */ 169 int32_t time_adj = 0; /* tick adjust (scaled 1 / hz) */ 170 int32_t time_reftime = 0; /* time at last adjustment (s) */ 171 172 /* 173 * The scale factors of the following variables are defined in the 174 * timex.h header file. 175 * 176 * pps_time contains the time at each calibration interval, as read by 177 * microtime(). pps_count counts the seconds of the calibration 178 * interval, the duration of which is nominally pps_shift in powers of 179 * two. 180 * 181 * pps_offset is the time offset produced by the time median filter 182 * pps_tf[], while pps_jitter is the dispersion (jitter) measured by 183 * this filter. 184 * 185 * pps_freq is the frequency offset produced by the frequency median 186 * filter pps_ff[], while pps_stabil is the dispersion (wander) measured 187 * by this filter. 188 * 189 * pps_usec is latched from a high resolution counter or external clock 190 * at pps_time. Here we want the hardware counter contents only, not the 191 * contents plus the time_tv.usec as usual. 192 * 193 * pps_valid counts the number of seconds since the last PPS update. It 194 * is used as a watchdog timer to disable the PPS discipline should the 195 * PPS signal be lost. 196 * 197 * pps_glitch counts the number of seconds since the beginning of an 198 * offset burst more than tick/2 from current nominal offset. It is used 199 * mainly to suppress error bursts due to priority conflicts between the 200 * PPS interrupt and timer interrupt. 201 * 202 * pps_intcnt counts the calibration intervals for use in the interval- 203 * adaptation algorithm. It's just too complicated for words. 204 */ 205 struct timeval pps_time; /* kernel time at last interval */ 206 int32_t pps_tf[] = {0, 0, 0}; /* pps time offset median filter (us) */ 207 int32_t pps_offset = 0; /* pps time offset (us) */ 208 int32_t pps_jitter = MAXTIME; /* time dispersion (jitter) (us) */ 209 int32_t pps_ff[] = {0, 0, 0}; /* pps frequency offset median filter */ 210 int32_t pps_freq = 0; /* frequency offset (scaled ppm) */ 211 int32_t pps_stabil = MAXFREQ; /* frequency dispersion (scaled ppm) */ 212 int32_t pps_usec = 0; /* microsec counter at last interval */ 213 int32_t pps_valid = PPS_VALID; /* pps signal watchdog counter */ 214 int32_t pps_glitch = 0; /* pps signal glitch counter */ 215 int32_t pps_count = 0; /* calibration interval counter (s) */ 216 int32_t pps_shift = PPS_SHIFT; /* interval duration (s) (shift) */ 217 int32_t pps_intcnt = 0; /* intervals at current duration */ 218 219 /* 220 * PPS signal quality monitors 221 * 222 * pps_jitcnt counts the seconds that have been discarded because the 223 * jitter measured by the time median filter exceeds the limit MAXTIME 224 * (100 us). 225 * 226 * pps_calcnt counts the frequency calibration intervals, which are 227 * variable from 4 s to 256 s. 228 * 229 * pps_errcnt counts the calibration intervals which have been discarded 230 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the 231 * calibration interval jitter exceeds two ticks. 232 * 233 * pps_stbcnt counts the calibration intervals that have been discarded 234 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us). 235 */ 236 int32_t pps_jitcnt = 0; /* jitter limit exceeded */ 237 int32_t pps_calcnt = 0; /* calibration intervals */ 238 int32_t pps_errcnt = 0; /* calibration errors */ 239 int32_t pps_stbcnt = 0; /* stability limit exceeded */ 240 241 /* The following variables require no explicit locking */ 242 volatile clock_t lbolt; /* time in Hz since last boot */ 243 volatile int64_t lbolt64; /* lbolt64 won't wrap for 2.9 billion yrs */ 244 245 kcondvar_t lbolt_cv; 246 int one_sec = 1; /* turned on once every second */ 247 static int fsflushcnt; /* counter for t_fsflushr */ 248 int dosynctodr = 1; /* patchable; enable/disable sync to TOD chip */ 249 int tod_needsync = 0; /* need to sync tod chip with software time */ 250 static int tod_broken = 0; /* clock chip doesn't work */ 251 time_t boot_time = 0; /* Boot time in seconds since 1970 */ 252 cyclic_id_t clock_cyclic; /* clock()'s cyclic_id */ 253 cyclic_id_t deadman_cyclic; /* deadman()'s cyclic_id */ 254 cyclic_id_t ddi_timer_cyclic; /* cyclic_timer()'s cyclic_id */ 255 256 static int lgrp_ticks; /* counter to schedule lgrp load calcs */ 257 258 /* 259 * for tod fault detection 260 */ 261 #define TOD_REF_FREQ ((longlong_t)(NANOSEC)) 262 #define TOD_STALL_THRESHOLD (TOD_REF_FREQ * 3 / 2) 263 #define TOD_JUMP_THRESHOLD (TOD_REF_FREQ / 2) 264 #define TOD_FILTER_N 4 265 #define TOD_FILTER_SETTLE (4 * TOD_FILTER_N) 266 static int tod_faulted = TOD_NOFAULT; 267 static int tod_fault_reset_flag = 0; 268 269 /* patchable via /etc/system */ 270 int tod_validate_enable = 1; 271 272 /* 273 * On non-SPARC systems, TOD validation must be deferred until gethrtime 274 * returns non-zero values (after mach_clkinit's execution). 275 * On SPARC systems, it must be deferred until after hrtime_base 276 * and hres_last_tick are set (in the first invocation of hres_tick). 277 * Since in both cases the prerequisites occur before the invocation of 278 * tod_get() in clock(), the deferment is lifted there. 279 */ 280 static boolean_t tod_validate_deferred = B_TRUE; 281 282 /* 283 * tod_fault_table[] must be aligned with 284 * enum tod_fault_type in systm.h 285 */ 286 static char *tod_fault_table[] = { 287 "Reversed", /* TOD_REVERSED */ 288 "Stalled", /* TOD_STALLED */ 289 "Jumped", /* TOD_JUMPED */ 290 "Changed in Clock Rate", /* TOD_RATECHANGED */ 291 "Is Read-Only" /* TOD_RDONLY */ 292 /* 293 * no strings needed for TOD_NOFAULT 294 */ 295 }; 296 297 /* 298 * test hook for tod broken detection in tod_validate 299 */ 300 int tod_unit_test = 0; 301 time_t tod_test_injector; 302 303 #define CLOCK_ADJ_HIST_SIZE 4 304 305 static int adj_hist_entry; 306 307 int64_t clock_adj_hist[CLOCK_ADJ_HIST_SIZE]; 308 309 static void clock_tick(kthread_t *); 310 static void calcloadavg(int, uint64_t *); 311 static int genloadavg(struct loadavg_s *); 312 static void loadavg_update(); 313 314 void (*cmm_clock_callout)() = NULL; 315 void (*cpucaps_clock_callout)() = NULL; 316 317 static void 318 clock(void) 319 { 320 kthread_t *t; 321 kmutex_t *plockp; /* pointer to thread's process lock */ 322 int pinned_intr = 0; 323 uint_t nrunnable, nrunning; 324 uint_t w_io; 325 cpu_t *cp; 326 cpupart_t *cpupart; 327 int exiting; 328 extern void set_anoninfo(); 329 extern void set_freemem(); 330 void (*funcp)(); 331 int32_t ltemp; 332 int64_t lltemp; 333 int s; 334 int do_lgrp_load; 335 int i; 336 337 if (panicstr) 338 return; 339 340 set_anoninfo(); 341 /* 342 * Make sure that 'freemem' do not drift too far from the truth 343 */ 344 set_freemem(); 345 346 347 /* 348 * Before the section which is repeated is executed, we do 349 * the time delta processing which occurs every clock tick 350 * 351 * There is additional processing which happens every time 352 * the nanosecond counter rolls over which is described 353 * below - see the section which begins with : if (one_sec) 354 * 355 * This section marks the beginning of the precision-kernel 356 * code fragment. 357 * 358 * First, compute the phase adjustment. If the low-order bits 359 * (time_phase) of the update overflow, bump the higher order 360 * bits (time_update). 361 */ 362 time_phase += time_adj; 363 if (time_phase <= -FINEUSEC) { 364 ltemp = -time_phase / SCALE_PHASE; 365 time_phase += ltemp * SCALE_PHASE; 366 s = hr_clock_lock(); 367 timedelta -= ltemp * (NANOSEC/MICROSEC); 368 hr_clock_unlock(s); 369 } else if (time_phase >= FINEUSEC) { 370 ltemp = time_phase / SCALE_PHASE; 371 time_phase -= ltemp * SCALE_PHASE; 372 s = hr_clock_lock(); 373 timedelta += ltemp * (NANOSEC/MICROSEC); 374 hr_clock_unlock(s); 375 } 376 377 /* 378 * End of precision-kernel code fragment which is processed 379 * every timer interrupt. 380 * 381 * Continue with the interrupt processing as scheduled. 382 * 383 * Did we pin another interrupt thread? Need to check this before 384 * grabbing any adaptive locks, since if we block on a lock the 385 * pinned thread could escape. Note that this is just a heuristic; 386 * if we take multiple laps though clock() without returning from 387 * the interrupt because we have another clock tick pending, then 388 * the pinned interrupt could be released by one of the previous 389 * laps. The only consequence is that the CPU will be counted as 390 * in idle (or wait) state once the pinned interrupt is released. 391 * Since this accounting is inaccurate by nature, this isn't a big 392 * deal --- but we should try to get it right in the common case 393 * where we only call clock() once per interrupt. 394 */ 395 if (curthread->t_intr != NULL) 396 pinned_intr = (curthread->t_intr->t_flag & T_INTR_THREAD); 397 398 /* 399 * Count the number of runnable threads and the number waiting 400 * for some form of I/O to complete -- gets added to 401 * sysinfo.waiting. To know the state of the system, must add 402 * wait counts from all CPUs. Also add up the per-partition 403 * statistics. 404 */ 405 w_io = 0; 406 nrunnable = 0; 407 408 /* 409 * keep track of when to update lgrp/part loads 410 */ 411 412 do_lgrp_load = 0; 413 if (lgrp_ticks++ >= hz / 10) { 414 lgrp_ticks = 0; 415 do_lgrp_load = 1; 416 } 417 418 if (one_sec) 419 loadavg_update(); 420 421 /* 422 * First count the threads waiting on kpreempt queues in each 423 * CPU partition. 424 */ 425 426 cpupart = cp_list_head; 427 do { 428 uint_t cpupart_nrunnable = cpupart->cp_kp_queue.disp_nrunnable; 429 430 cpupart->cp_updates++; 431 nrunnable += cpupart_nrunnable; 432 cpupart->cp_nrunnable_cum += cpupart_nrunnable; 433 if (one_sec) { 434 cpupart->cp_nrunning = 0; 435 cpupart->cp_nrunnable = cpupart_nrunnable; 436 } 437 } while ((cpupart = cpupart->cp_next) != cp_list_head); 438 439 440 /* Now count the per-CPU statistics. */ 441 cp = cpu_list; 442 do { 443 uint_t cpu_nrunnable = cp->cpu_disp->disp_nrunnable; 444 445 nrunnable += cpu_nrunnable; 446 cpupart = cp->cpu_part; 447 cpupart->cp_nrunnable_cum += cpu_nrunnable; 448 if (one_sec) { 449 cpupart->cp_nrunnable += cpu_nrunnable; 450 /* 451 * w_io is used to update sysinfo.waiting during 452 * one_second processing below. Only gather w_io 453 * information when we walk the list of cpus if we're 454 * going to perform one_second processing. 455 */ 456 w_io += CPU_STATS(cp, sys.iowait); 457 } 458 459 if (one_sec && (cp->cpu_flags & CPU_EXISTS)) { 460 int i, load, change; 461 hrtime_t intracct, intrused; 462 const hrtime_t maxnsec = 1000000000; 463 const int precision = 100; 464 465 /* 466 * Estimate interrupt load on this cpu each second. 467 * Computes cpu_intrload as %utilization (0-99). 468 */ 469 470 /* add up interrupt time from all micro states */ 471 for (intracct = 0, i = 0; i < NCMSTATES; i++) 472 intracct += cp->cpu_intracct[i]; 473 scalehrtime(&intracct); 474 475 /* compute nsec used in the past second */ 476 intrused = intracct - cp->cpu_intrlast; 477 cp->cpu_intrlast = intracct; 478 479 /* limit the value for safety (and the first pass) */ 480 if (intrused >= maxnsec) 481 intrused = maxnsec - 1; 482 483 /* calculate %time in interrupt */ 484 load = (precision * intrused) / maxnsec; 485 ASSERT(load >= 0 && load < precision); 486 change = cp->cpu_intrload - load; 487 488 /* jump to new max, or decay the old max */ 489 if (change < 0) 490 cp->cpu_intrload = load; 491 else if (change > 0) 492 cp->cpu_intrload -= (change + 3) / 4; 493 494 DTRACE_PROBE3(cpu_intrload, 495 cpu_t *, cp, 496 hrtime_t, intracct, 497 hrtime_t, intrused); 498 } 499 500 if (do_lgrp_load && 501 (cp->cpu_flags & CPU_EXISTS)) { 502 /* 503 * When updating the lgroup's load average, 504 * account for the thread running on the CPU. 505 * If the CPU is the current one, then we need 506 * to account for the underlying thread which 507 * got the clock interrupt not the thread that is 508 * handling the interrupt and caculating the load 509 * average 510 */ 511 t = cp->cpu_thread; 512 if (CPU == cp) 513 t = t->t_intr; 514 515 /* 516 * Account for the load average for this thread if 517 * it isn't the idle thread or it is on the interrupt 518 * stack and not the current CPU handling the clock 519 * interrupt 520 */ 521 if ((t && t != cp->cpu_idle_thread) || (CPU != cp && 522 CPU_ON_INTR(cp))) { 523 if (t->t_lpl == cp->cpu_lpl) { 524 /* local thread */ 525 cpu_nrunnable++; 526 } else { 527 /* 528 * This is a remote thread, charge it 529 * against its home lgroup. Note that 530 * we notice that a thread is remote 531 * only if it's currently executing. 532 * This is a reasonable approximation, 533 * since queued remote threads are rare. 534 * Note also that if we didn't charge 535 * it to its home lgroup, remote 536 * execution would often make a system 537 * appear balanced even though it was 538 * not, and thread placement/migration 539 * would often not be done correctly. 540 */ 541 lgrp_loadavg(t->t_lpl, 542 LGRP_LOADAVG_IN_THREAD_MAX, 0); 543 } 544 } 545 lgrp_loadavg(cp->cpu_lpl, 546 cpu_nrunnable * LGRP_LOADAVG_IN_THREAD_MAX, 1); 547 } 548 } while ((cp = cp->cpu_next) != cpu_list); 549 550 /* 551 * Do tick processing for all the active threads running in 552 * the system. We're trying to be more fair by walking the 553 * list of CPUs starting from a different CPUs each time. 554 */ 555 cp = clock_cpu_list; 556 nrunning = 0; 557 do { 558 klwp_id_t lwp; 559 int intr; 560 int thread_away; 561 562 /* 563 * Don't do any tick processing on CPUs that 564 * aren't even in the system or aren't up yet. 565 */ 566 if ((cp->cpu_flags & CPU_EXISTS) == 0) { 567 continue; 568 } 569 570 /* 571 * The locking here is rather tricky. We use 572 * thread_free_lock to keep the currently running 573 * thread from being freed or recycled while we're 574 * looking at it. We can then check if the thread 575 * is exiting and get the appropriate p_lock if it 576 * is not. We have to be careful, though, because 577 * the _process_ can still be freed while we're 578 * holding thread_free_lock. To avoid touching the 579 * proc structure we put a pointer to the p_lock in the 580 * thread structure. The p_lock is persistent so we 581 * can acquire it even if the process is gone. At that 582 * point we can check (again) if the thread is exiting 583 * and either drop the lock or do the tick processing. 584 */ 585 mutex_enter(&thread_free_lock); 586 /* 587 * We cannot hold the cpu_lock to prevent the 588 * cpu_list from changing in the clock interrupt. 589 * As long as we don't block (or don't get pre-empted) 590 * the cpu_list will not change (all threads are paused 591 * before list modification). If the list does change 592 * any deleted cpu structures will remain with cpu_next 593 * set to NULL, hence the following test. 594 */ 595 if (cp->cpu_next == NULL) { 596 mutex_exit(&thread_free_lock); 597 break; 598 } 599 t = cp->cpu_thread; /* Current running thread */ 600 if (CPU == cp) { 601 /* 602 * 't' will be the clock interrupt thread on this 603 * CPU. Use the pinned thread (if any) on this CPU 604 * as the target of the clock tick. If we pinned 605 * an interrupt, though, just keep using the clock 606 * interrupt thread since the formerly pinned one 607 * may have gone away. One interrupt thread is as 608 * good as another, and this means we don't have 609 * to continue to check pinned_intr in subsequent 610 * code. 611 */ 612 ASSERT(t == curthread); 613 if (t->t_intr != NULL && !pinned_intr) 614 t = t->t_intr; 615 } 616 617 intr = t->t_flag & T_INTR_THREAD; 618 lwp = ttolwp(t); 619 if (lwp == NULL || (t->t_proc_flag & TP_LWPEXIT) || intr) { 620 /* 621 * Thread is exiting (or uninteresting) so don't 622 * do tick processing or grab p_lock. Once we 623 * drop thread_free_lock we can't look inside the 624 * thread or lwp structure, since the thread may 625 * have gone away. 626 */ 627 exiting = 1; 628 } else { 629 /* 630 * OK, try to grab the process lock. See 631 * comments above for why we're not using 632 * ttoproc(t)->p_lockp here. 633 */ 634 plockp = t->t_plockp; 635 mutex_enter(plockp); 636 /* See above comment. */ 637 if (cp->cpu_next == NULL) { 638 mutex_exit(plockp); 639 mutex_exit(&thread_free_lock); 640 break; 641 } 642 /* 643 * The thread may have exited between when we 644 * checked above, and when we got the p_lock. 645 */ 646 if (t->t_proc_flag & TP_LWPEXIT) { 647 mutex_exit(plockp); 648 exiting = 1; 649 } else { 650 exiting = 0; 651 } 652 } 653 /* 654 * Either we have the p_lock for the thread's process, 655 * or we don't care about the thread structure any more. 656 * Either way we can drop thread_free_lock. 657 */ 658 mutex_exit(&thread_free_lock); 659 660 /* 661 * Update user, system, and idle cpu times. 662 */ 663 if (one_sec) { 664 nrunning++; 665 cp->cpu_part->cp_nrunning++; 666 } 667 /* 668 * If we haven't done tick processing for this 669 * lwp, then do it now. Since we don't hold the 670 * lwp down on a CPU it can migrate and show up 671 * more than once, hence the lbolt check. 672 * 673 * Also, make sure that it's okay to perform the 674 * tick processing before calling clock_tick. 675 * Setting thread_away to a TRUE value (ie. not 0) 676 * results in tick processing not being performed for 677 * that thread. Or, in other words, keeps the thread 678 * away from clock_tick processing. 679 */ 680 thread_away = ((cp->cpu_flags & CPU_QUIESCED) || 681 CPU_ON_INTR(cp) || intr || 682 (cp->cpu_dispthread == cp->cpu_idle_thread) || exiting); 683 684 if ((!thread_away) && (lbolt - t->t_lbolt != 0)) { 685 t->t_lbolt = lbolt; 686 clock_tick(t); 687 } 688 689 if (!exiting) 690 mutex_exit(plockp); 691 } while ((cp = cp->cpu_next) != clock_cpu_list); 692 693 clock_cpu_list = clock_cpu_list->cpu_next; 694 695 /* 696 * bump time in ticks 697 * 698 * We rely on there being only one clock thread and hence 699 * don't need a lock to protect lbolt. 700 */ 701 lbolt++; 702 atomic_add_64((uint64_t *)&lbolt64, (int64_t)1); 703 704 /* 705 * Check for a callout that needs be called from the clock 706 * thread to support the membership protocol in a clustered 707 * system. Copy the function pointer so that we can reset 708 * this to NULL if needed. 709 */ 710 if ((funcp = cmm_clock_callout) != NULL) 711 (*funcp)(); 712 713 if ((funcp = cpucaps_clock_callout) != NULL) 714 (*funcp)(); 715 716 /* 717 * Wakeup the cageout thread waiters once per second. 718 */ 719 if (one_sec) 720 kcage_tick(); 721 722 /* 723 * Schedule timeout() requests if any are due at this time. 724 */ 725 callout_schedule(); 726 727 if (one_sec) { 728 729 int drift, absdrift; 730 timestruc_t tod; 731 int s; 732 733 /* 734 * Beginning of precision-kernel code fragment executed 735 * every second. 736 * 737 * On rollover of the second the phase adjustment to be 738 * used for the next second is calculated. Also, the 739 * maximum error is increased by the tolerance. If the 740 * PPS frequency discipline code is present, the phase is 741 * increased to compensate for the CPU clock oscillator 742 * frequency error. 743 * 744 * On a 32-bit machine and given parameters in the timex.h 745 * header file, the maximum phase adjustment is +-512 ms 746 * and maximum frequency offset is (a tad less than) 747 * +-512 ppm. On a 64-bit machine, you shouldn't need to ask. 748 */ 749 time_maxerror += time_tolerance / SCALE_USEC; 750 751 /* 752 * Leap second processing. If in leap-insert state at 753 * the end of the day, the system clock is set back one 754 * second; if in leap-delete state, the system clock is 755 * set ahead one second. The microtime() routine or 756 * external clock driver will insure that reported time 757 * is always monotonic. The ugly divides should be 758 * replaced. 759 */ 760 switch (time_state) { 761 762 case TIME_OK: 763 if (time_status & STA_INS) 764 time_state = TIME_INS; 765 else if (time_status & STA_DEL) 766 time_state = TIME_DEL; 767 break; 768 769 case TIME_INS: 770 if (hrestime.tv_sec % 86400 == 0) { 771 s = hr_clock_lock(); 772 hrestime.tv_sec--; 773 hr_clock_unlock(s); 774 time_state = TIME_OOP; 775 } 776 break; 777 778 case TIME_DEL: 779 if ((hrestime.tv_sec + 1) % 86400 == 0) { 780 s = hr_clock_lock(); 781 hrestime.tv_sec++; 782 hr_clock_unlock(s); 783 time_state = TIME_WAIT; 784 } 785 break; 786 787 case TIME_OOP: 788 time_state = TIME_WAIT; 789 break; 790 791 case TIME_WAIT: 792 if (!(time_status & (STA_INS | STA_DEL))) 793 time_state = TIME_OK; 794 default: 795 break; 796 } 797 798 /* 799 * Compute the phase adjustment for the next second. In 800 * PLL mode, the offset is reduced by a fixed factor 801 * times the time constant. In FLL mode the offset is 802 * used directly. In either mode, the maximum phase 803 * adjustment for each second is clamped so as to spread 804 * the adjustment over not more than the number of 805 * seconds between updates. 806 */ 807 if (time_offset == 0) 808 time_adj = 0; 809 else if (time_offset < 0) { 810 lltemp = -time_offset; 811 if (!(time_status & STA_FLL)) { 812 if ((1 << time_constant) >= SCALE_KG) 813 lltemp *= (1 << time_constant) / 814 SCALE_KG; 815 else 816 lltemp = (lltemp / SCALE_KG) >> 817 time_constant; 818 } 819 if (lltemp > (MAXPHASE / MINSEC) * SCALE_UPDATE) 820 lltemp = (MAXPHASE / MINSEC) * SCALE_UPDATE; 821 time_offset += lltemp; 822 time_adj = -(lltemp * SCALE_PHASE) / hz / SCALE_UPDATE; 823 } else { 824 lltemp = time_offset; 825 if (!(time_status & STA_FLL)) { 826 if ((1 << time_constant) >= SCALE_KG) 827 lltemp *= (1 << time_constant) / 828 SCALE_KG; 829 else 830 lltemp = (lltemp / SCALE_KG) >> 831 time_constant; 832 } 833 if (lltemp > (MAXPHASE / MINSEC) * SCALE_UPDATE) 834 lltemp = (MAXPHASE / MINSEC) * SCALE_UPDATE; 835 time_offset -= lltemp; 836 time_adj = (lltemp * SCALE_PHASE) / hz / SCALE_UPDATE; 837 } 838 839 /* 840 * Compute the frequency estimate and additional phase 841 * adjustment due to frequency error for the next 842 * second. When the PPS signal is engaged, gnaw on the 843 * watchdog counter and update the frequency computed by 844 * the pll and the PPS signal. 845 */ 846 pps_valid++; 847 if (pps_valid == PPS_VALID) { 848 pps_jitter = MAXTIME; 849 pps_stabil = MAXFREQ; 850 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | 851 STA_PPSWANDER | STA_PPSERROR); 852 } 853 lltemp = time_freq + pps_freq; 854 855 if (lltemp) 856 time_adj += (lltemp * SCALE_PHASE) / (SCALE_USEC * hz); 857 858 /* 859 * End of precision kernel-code fragment 860 * 861 * The section below should be modified if we are planning 862 * to use NTP for synchronization. 863 * 864 * Note: the clock synchronization code now assumes 865 * the following: 866 * - if dosynctodr is 1, then compute the drift between 867 * the tod chip and software time and adjust one or 868 * the other depending on the circumstances 869 * 870 * - if dosynctodr is 0, then the tod chip is independent 871 * of the software clock and should not be adjusted, 872 * but allowed to free run. this allows NTP to sync. 873 * hrestime without any interference from the tod chip. 874 */ 875 876 tod_validate_deferred = B_FALSE; 877 mutex_enter(&tod_lock); 878 tod = tod_get(); 879 drift = tod.tv_sec - hrestime.tv_sec; 880 absdrift = (drift >= 0) ? drift : -drift; 881 if (tod_needsync || absdrift > 1) { 882 int s; 883 if (absdrift > 2) { 884 if (!tod_broken && tod_faulted == TOD_NOFAULT) { 885 s = hr_clock_lock(); 886 hrestime = tod; 887 membar_enter(); /* hrestime visible */ 888 timedelta = 0; 889 timechanged++; 890 tod_needsync = 0; 891 hr_clock_unlock(s); 892 } 893 } else { 894 if (tod_needsync || !dosynctodr) { 895 gethrestime(&tod); 896 tod_set(tod); 897 s = hr_clock_lock(); 898 if (timedelta == 0) 899 tod_needsync = 0; 900 hr_clock_unlock(s); 901 } else { 902 /* 903 * If the drift is 2 seconds on the 904 * money, then the TOD is adjusting 905 * the clock; record that. 906 */ 907 clock_adj_hist[adj_hist_entry++ % 908 CLOCK_ADJ_HIST_SIZE] = lbolt64; 909 s = hr_clock_lock(); 910 timedelta = (int64_t)drift*NANOSEC; 911 hr_clock_unlock(s); 912 } 913 } 914 } 915 one_sec = 0; 916 time = gethrestime_sec(); /* for crusty old kmem readers */ 917 mutex_exit(&tod_lock); 918 919 /* 920 * Some drivers still depend on this... XXX 921 */ 922 cv_broadcast(&lbolt_cv); 923 924 sysinfo.updates++; 925 vminfo.freemem += freemem; 926 { 927 pgcnt_t maxswap, resv, free; 928 pgcnt_t avail = 929 MAX((spgcnt_t)(availrmem - swapfs_minfree), 0); 930 931 maxswap = k_anoninfo.ani_mem_resv + 932 k_anoninfo.ani_max +avail; 933 free = k_anoninfo.ani_free + avail; 934 resv = k_anoninfo.ani_phys_resv + 935 k_anoninfo.ani_mem_resv; 936 937 vminfo.swap_resv += resv; 938 /* number of reserved and allocated pages */ 939 #ifdef DEBUG 940 if (maxswap < free) 941 cmn_err(CE_WARN, "clock: maxswap < free"); 942 if (maxswap < resv) 943 cmn_err(CE_WARN, "clock: maxswap < resv"); 944 #endif 945 vminfo.swap_alloc += maxswap - free; 946 vminfo.swap_avail += maxswap - resv; 947 vminfo.swap_free += free; 948 } 949 if (nrunnable) { 950 sysinfo.runque += nrunnable; 951 sysinfo.runocc++; 952 } 953 if (nswapped) { 954 sysinfo.swpque += nswapped; 955 sysinfo.swpocc++; 956 } 957 sysinfo.waiting += w_io; 958 959 /* 960 * Wake up fsflush to write out DELWRI 961 * buffers, dirty pages and other cached 962 * administrative data, e.g. inodes. 963 */ 964 if (--fsflushcnt <= 0) { 965 fsflushcnt = tune.t_fsflushr; 966 cv_signal(&fsflush_cv); 967 } 968 969 vmmeter(); 970 calcloadavg(genloadavg(&loadavg), hp_avenrun); 971 for (i = 0; i < 3; i++) 972 /* 973 * At the moment avenrun[] can only hold 31 974 * bits of load average as it is a signed 975 * int in the API. We need to ensure that 976 * hp_avenrun[i] >> (16 - FSHIFT) will not be 977 * too large. If it is, we put the largest value 978 * that we can use into avenrun[i]. This is 979 * kludgey, but about all we can do until we 980 * avenrun[] is declared as an array of uint64[] 981 */ 982 if (hp_avenrun[i] < ((uint64_t)1<<(31+16-FSHIFT))) 983 avenrun[i] = (int32_t)(hp_avenrun[i] >> 984 (16 - FSHIFT)); 985 else 986 avenrun[i] = 0x7fffffff; 987 988 cpupart = cp_list_head; 989 do { 990 calcloadavg(genloadavg(&cpupart->cp_loadavg), 991 cpupart->cp_hp_avenrun); 992 } while ((cpupart = cpupart->cp_next) != cp_list_head); 993 994 /* 995 * Wake up the swapper thread if necessary. 996 */ 997 if (runin || 998 (runout && (avefree < desfree || wake_sched_sec))) { 999 t = &t0; 1000 thread_lock(t); 1001 if (t->t_state == TS_STOPPED) { 1002 runin = runout = 0; 1003 wake_sched_sec = 0; 1004 t->t_whystop = 0; 1005 t->t_whatstop = 0; 1006 t->t_schedflag &= ~TS_ALLSTART; 1007 THREAD_TRANSITION(t); 1008 setfrontdq(t); 1009 } 1010 thread_unlock(t); 1011 } 1012 } 1013 1014 /* 1015 * Wake up the swapper if any high priority swapped-out threads 1016 * became runable during the last tick. 1017 */ 1018 if (wake_sched) { 1019 t = &t0; 1020 thread_lock(t); 1021 if (t->t_state == TS_STOPPED) { 1022 runin = runout = 0; 1023 wake_sched = 0; 1024 t->t_whystop = 0; 1025 t->t_whatstop = 0; 1026 t->t_schedflag &= ~TS_ALLSTART; 1027 THREAD_TRANSITION(t); 1028 setfrontdq(t); 1029 } 1030 thread_unlock(t); 1031 } 1032 } 1033 1034 void 1035 clock_init(void) 1036 { 1037 cyc_handler_t hdlr; 1038 cyc_time_t when; 1039 1040 hdlr.cyh_func = (cyc_func_t)clock; 1041 hdlr.cyh_level = CY_LOCK_LEVEL; 1042 hdlr.cyh_arg = NULL; 1043 1044 when.cyt_when = 0; 1045 when.cyt_interval = nsec_per_tick; 1046 1047 mutex_enter(&cpu_lock); 1048 clock_cyclic = cyclic_add(&hdlr, &when); 1049 mutex_exit(&cpu_lock); 1050 1051 /* 1052 * cyclic_timer is dedicated to the ddi interface, which 1053 * uses the same clock resolution as the system one. 1054 */ 1055 hdlr.cyh_func = (cyc_func_t)cyclic_timer; 1056 hdlr.cyh_level = CY_LOCK_LEVEL; 1057 hdlr.cyh_arg = NULL; 1058 1059 mutex_enter(&cpu_lock); 1060 ddi_timer_cyclic = cyclic_add(&hdlr, &when); 1061 mutex_exit(&cpu_lock); 1062 } 1063 1064 /* 1065 * Called before calcloadavg to get 10-sec moving loadavg together 1066 */ 1067 1068 static int 1069 genloadavg(struct loadavg_s *avgs) 1070 { 1071 int avg; 1072 int spos; /* starting position */ 1073 int cpos; /* moving current position */ 1074 int i; 1075 int slen; 1076 hrtime_t hr_avg; 1077 1078 /* 10-second snapshot, calculate first positon */ 1079 if (avgs->lg_len == 0) { 1080 return (0); 1081 } 1082 slen = avgs->lg_len < S_MOVAVG_SZ ? avgs->lg_len : S_MOVAVG_SZ; 1083 1084 spos = (avgs->lg_cur - 1) >= 0 ? avgs->lg_cur - 1 : 1085 S_LOADAVG_SZ + (avgs->lg_cur - 1); 1086 for (i = hr_avg = 0; i < slen; i++) { 1087 cpos = (spos - i) >= 0 ? spos - i : S_LOADAVG_SZ + (spos - i); 1088 hr_avg += avgs->lg_loads[cpos]; 1089 } 1090 1091 hr_avg = hr_avg / slen; 1092 avg = hr_avg / (NANOSEC / LGRP_LOADAVG_IN_THREAD_MAX); 1093 1094 return (avg); 1095 } 1096 1097 /* 1098 * Run every second from clock () to update the loadavg count available to the 1099 * system and cpu-partitions. 1100 * 1101 * This works by sampling the previous usr, sys, wait time elapsed, 1102 * computing a delta, and adding that delta to the elapsed usr, sys, 1103 * wait increase. 1104 */ 1105 1106 static void 1107 loadavg_update() 1108 { 1109 cpu_t *cp; 1110 cpupart_t *cpupart; 1111 hrtime_t cpu_total; 1112 int prev; 1113 1114 cp = cpu_list; 1115 loadavg.lg_total = 0; 1116 1117 /* 1118 * first pass totals up per-cpu statistics for system and cpu 1119 * partitions 1120 */ 1121 1122 do { 1123 struct loadavg_s *lavg; 1124 1125 lavg = &cp->cpu_loadavg; 1126 1127 cpu_total = cp->cpu_acct[CMS_USER] + 1128 cp->cpu_acct[CMS_SYSTEM] + cp->cpu_waitrq; 1129 /* compute delta against last total */ 1130 scalehrtime(&cpu_total); 1131 prev = (lavg->lg_cur - 1) >= 0 ? lavg->lg_cur - 1 : 1132 S_LOADAVG_SZ + (lavg->lg_cur - 1); 1133 if (lavg->lg_loads[prev] <= 0) { 1134 lavg->lg_loads[lavg->lg_cur] = cpu_total; 1135 cpu_total = 0; 1136 } else { 1137 lavg->lg_loads[lavg->lg_cur] = cpu_total; 1138 cpu_total = cpu_total - lavg->lg_loads[prev]; 1139 if (cpu_total < 0) 1140 cpu_total = 0; 1141 } 1142 1143 lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ; 1144 lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ? 1145 lavg->lg_len + 1 : S_LOADAVG_SZ; 1146 1147 loadavg.lg_total += cpu_total; 1148 cp->cpu_part->cp_loadavg.lg_total += cpu_total; 1149 1150 } while ((cp = cp->cpu_next) != cpu_list); 1151 1152 loadavg.lg_loads[loadavg.lg_cur] = loadavg.lg_total; 1153 loadavg.lg_cur = (loadavg.lg_cur + 1) % S_LOADAVG_SZ; 1154 loadavg.lg_len = (loadavg.lg_len + 1) < S_LOADAVG_SZ ? 1155 loadavg.lg_len + 1 : S_LOADAVG_SZ; 1156 /* 1157 * Second pass updates counts 1158 */ 1159 cpupart = cp_list_head; 1160 1161 do { 1162 struct loadavg_s *lavg; 1163 1164 lavg = &cpupart->cp_loadavg; 1165 lavg->lg_loads[lavg->lg_cur] = lavg->lg_total; 1166 lavg->lg_total = 0; 1167 lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ; 1168 lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ? 1169 lavg->lg_len + 1 : S_LOADAVG_SZ; 1170 1171 } while ((cpupart = cpupart->cp_next) != cp_list_head); 1172 1173 } 1174 1175 /* 1176 * clock_update() - local clock update 1177 * 1178 * This routine is called by ntp_adjtime() to update the local clock 1179 * phase and frequency. The implementation is of an 1180 * adaptive-parameter, hybrid phase/frequency-lock loop (PLL/FLL). The 1181 * routine computes new time and frequency offset estimates for each 1182 * call. The PPS signal itself determines the new time offset, 1183 * instead of the calling argument. Presumably, calls to 1184 * ntp_adjtime() occur only when the caller believes the local clock 1185 * is valid within some bound (+-128 ms with NTP). If the caller's 1186 * time is far different than the PPS time, an argument will ensue, 1187 * and it's not clear who will lose. 1188 * 1189 * For uncompensated quartz crystal oscillatores and nominal update 1190 * intervals less than 1024 s, operation should be in phase-lock mode 1191 * (STA_FLL = 0), where the loop is disciplined to phase. For update 1192 * intervals greater than this, operation should be in frequency-lock 1193 * mode (STA_FLL = 1), where the loop is disciplined to frequency. 1194 * 1195 * Note: mutex(&tod_lock) is in effect. 1196 */ 1197 void 1198 clock_update(int offset) 1199 { 1200 int ltemp, mtemp, s; 1201 1202 ASSERT(MUTEX_HELD(&tod_lock)); 1203 1204 if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME)) 1205 return; 1206 ltemp = offset; 1207 if ((time_status & STA_PPSTIME) && (time_status & STA_PPSSIGNAL)) 1208 ltemp = pps_offset; 1209 1210 /* 1211 * Scale the phase adjustment and clamp to the operating range. 1212 */ 1213 if (ltemp > MAXPHASE) 1214 time_offset = MAXPHASE * SCALE_UPDATE; 1215 else if (ltemp < -MAXPHASE) 1216 time_offset = -(MAXPHASE * SCALE_UPDATE); 1217 else 1218 time_offset = ltemp * SCALE_UPDATE; 1219 1220 /* 1221 * Select whether the frequency is to be controlled and in which 1222 * mode (PLL or FLL). Clamp to the operating range. Ugly 1223 * multiply/divide should be replaced someday. 1224 */ 1225 if (time_status & STA_FREQHOLD || time_reftime == 0) 1226 time_reftime = hrestime.tv_sec; 1227 1228 mtemp = hrestime.tv_sec - time_reftime; 1229 time_reftime = hrestime.tv_sec; 1230 1231 if (time_status & STA_FLL) { 1232 if (mtemp >= MINSEC) { 1233 ltemp = ((time_offset / mtemp) * (SCALE_USEC / 1234 SCALE_UPDATE)); 1235 if (ltemp) 1236 time_freq += ltemp / SCALE_KH; 1237 } 1238 } else { 1239 if (mtemp < MAXSEC) { 1240 ltemp *= mtemp; 1241 if (ltemp) 1242 time_freq += (int)(((int64_t)ltemp * 1243 SCALE_USEC) / SCALE_KF) 1244 / (1 << (time_constant * 2)); 1245 } 1246 } 1247 if (time_freq > time_tolerance) 1248 time_freq = time_tolerance; 1249 else if (time_freq < -time_tolerance) 1250 time_freq = -time_tolerance; 1251 1252 s = hr_clock_lock(); 1253 tod_needsync = 1; 1254 hr_clock_unlock(s); 1255 } 1256 1257 /* 1258 * ddi_hardpps() - discipline CPU clock oscillator to external PPS signal 1259 * 1260 * This routine is called at each PPS interrupt in order to discipline 1261 * the CPU clock oscillator to the PPS signal. It measures the PPS phase 1262 * and leaves it in a handy spot for the clock() routine. It 1263 * integrates successive PPS phase differences and calculates the 1264 * frequency offset. This is used in clock() to discipline the CPU 1265 * clock oscillator so that intrinsic frequency error is cancelled out. 1266 * The code requires the caller to capture the time and hardware counter 1267 * value at the on-time PPS signal transition. 1268 * 1269 * Note that, on some Unix systems, this routine runs at an interrupt 1270 * priority level higher than the timer interrupt routine clock(). 1271 * Therefore, the variables used are distinct from the clock() 1272 * variables, except for certain exceptions: The PPS frequency pps_freq 1273 * and phase pps_offset variables are determined by this routine and 1274 * updated atomically. The time_tolerance variable can be considered a 1275 * constant, since it is infrequently changed, and then only when the 1276 * PPS signal is disabled. The watchdog counter pps_valid is updated 1277 * once per second by clock() and is atomically cleared in this 1278 * routine. 1279 * 1280 * tvp is the time of the last tick; usec is a microsecond count since the 1281 * last tick. 1282 * 1283 * Note: In Solaris systems, the tick value is actually given by 1284 * usec_per_tick. This is called from the serial driver cdintr(), 1285 * or equivalent, at a high PIL. Because the kernel keeps a 1286 * highresolution time, the following code can accept either 1287 * the traditional argument pair, or the current highres timestamp 1288 * in tvp and zero in usec. 1289 */ 1290 void 1291 ddi_hardpps(struct timeval *tvp, int usec) 1292 { 1293 int u_usec, v_usec, bigtick; 1294 time_t cal_sec; 1295 int cal_usec; 1296 1297 /* 1298 * An occasional glitch can be produced when the PPS interrupt 1299 * occurs in the clock() routine before the time variable is 1300 * updated. Here the offset is discarded when the difference 1301 * between it and the last one is greater than tick/2, but not 1302 * if the interval since the first discard exceeds 30 s. 1303 */ 1304 time_status |= STA_PPSSIGNAL; 1305 time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR); 1306 pps_valid = 0; 1307 u_usec = -tvp->tv_usec; 1308 if (u_usec < -(MICROSEC/2)) 1309 u_usec += MICROSEC; 1310 v_usec = pps_offset - u_usec; 1311 if (v_usec < 0) 1312 v_usec = -v_usec; 1313 if (v_usec > (usec_per_tick >> 1)) { 1314 if (pps_glitch > MAXGLITCH) { 1315 pps_glitch = 0; 1316 pps_tf[2] = u_usec; 1317 pps_tf[1] = u_usec; 1318 } else { 1319 pps_glitch++; 1320 u_usec = pps_offset; 1321 } 1322 } else 1323 pps_glitch = 0; 1324 1325 /* 1326 * A three-stage median filter is used to help deglitch the pps 1327 * time. The median sample becomes the time offset estimate; the 1328 * difference between the other two samples becomes the time 1329 * dispersion (jitter) estimate. 1330 */ 1331 pps_tf[2] = pps_tf[1]; 1332 pps_tf[1] = pps_tf[0]; 1333 pps_tf[0] = u_usec; 1334 if (pps_tf[0] > pps_tf[1]) { 1335 if (pps_tf[1] > pps_tf[2]) { 1336 pps_offset = pps_tf[1]; /* 0 1 2 */ 1337 v_usec = pps_tf[0] - pps_tf[2]; 1338 } else if (pps_tf[2] > pps_tf[0]) { 1339 pps_offset = pps_tf[0]; /* 2 0 1 */ 1340 v_usec = pps_tf[2] - pps_tf[1]; 1341 } else { 1342 pps_offset = pps_tf[2]; /* 0 2 1 */ 1343 v_usec = pps_tf[0] - pps_tf[1]; 1344 } 1345 } else { 1346 if (pps_tf[1] < pps_tf[2]) { 1347 pps_offset = pps_tf[1]; /* 2 1 0 */ 1348 v_usec = pps_tf[2] - pps_tf[0]; 1349 } else if (pps_tf[2] < pps_tf[0]) { 1350 pps_offset = pps_tf[0]; /* 1 0 2 */ 1351 v_usec = pps_tf[1] - pps_tf[2]; 1352 } else { 1353 pps_offset = pps_tf[2]; /* 1 2 0 */ 1354 v_usec = pps_tf[1] - pps_tf[0]; 1355 } 1356 } 1357 if (v_usec > MAXTIME) 1358 pps_jitcnt++; 1359 v_usec = (v_usec << PPS_AVG) - pps_jitter; 1360 pps_jitter += v_usec / (1 << PPS_AVG); 1361 if (pps_jitter > (MAXTIME >> 1)) 1362 time_status |= STA_PPSJITTER; 1363 1364 /* 1365 * During the calibration interval adjust the starting time when 1366 * the tick overflows. At the end of the interval compute the 1367 * duration of the interval and the difference of the hardware 1368 * counters at the beginning and end of the interval. This code 1369 * is deliciously complicated by the fact valid differences may 1370 * exceed the value of tick when using long calibration 1371 * intervals and small ticks. Note that the counter can be 1372 * greater than tick if caught at just the wrong instant, but 1373 * the values returned and used here are correct. 1374 */ 1375 bigtick = (int)usec_per_tick * SCALE_USEC; 1376 pps_usec -= pps_freq; 1377 if (pps_usec >= bigtick) 1378 pps_usec -= bigtick; 1379 if (pps_usec < 0) 1380 pps_usec += bigtick; 1381 pps_time.tv_sec++; 1382 pps_count++; 1383 if (pps_count < (1 << pps_shift)) 1384 return; 1385 pps_count = 0; 1386 pps_calcnt++; 1387 u_usec = usec * SCALE_USEC; 1388 v_usec = pps_usec - u_usec; 1389 if (v_usec >= bigtick >> 1) 1390 v_usec -= bigtick; 1391 if (v_usec < -(bigtick >> 1)) 1392 v_usec += bigtick; 1393 if (v_usec < 0) 1394 v_usec = -(-v_usec >> pps_shift); 1395 else 1396 v_usec = v_usec >> pps_shift; 1397 pps_usec = u_usec; 1398 cal_sec = tvp->tv_sec; 1399 cal_usec = tvp->tv_usec; 1400 cal_sec -= pps_time.tv_sec; 1401 cal_usec -= pps_time.tv_usec; 1402 if (cal_usec < 0) { 1403 cal_usec += MICROSEC; 1404 cal_sec--; 1405 } 1406 pps_time = *tvp; 1407 1408 /* 1409 * Check for lost interrupts, noise, excessive jitter and 1410 * excessive frequency error. The number of timer ticks during 1411 * the interval may vary +-1 tick. Add to this a margin of one 1412 * tick for the PPS signal jitter and maximum frequency 1413 * deviation. If the limits are exceeded, the calibration 1414 * interval is reset to the minimum and we start over. 1415 */ 1416 u_usec = (int)usec_per_tick << 1; 1417 if (!((cal_sec == -1 && cal_usec > (MICROSEC - u_usec)) || 1418 (cal_sec == 0 && cal_usec < u_usec)) || 1419 v_usec > time_tolerance || v_usec < -time_tolerance) { 1420 pps_errcnt++; 1421 pps_shift = PPS_SHIFT; 1422 pps_intcnt = 0; 1423 time_status |= STA_PPSERROR; 1424 return; 1425 } 1426 1427 /* 1428 * A three-stage median filter is used to help deglitch the pps 1429 * frequency. The median sample becomes the frequency offset 1430 * estimate; the difference between the other two samples 1431 * becomes the frequency dispersion (stability) estimate. 1432 */ 1433 pps_ff[2] = pps_ff[1]; 1434 pps_ff[1] = pps_ff[0]; 1435 pps_ff[0] = v_usec; 1436 if (pps_ff[0] > pps_ff[1]) { 1437 if (pps_ff[1] > pps_ff[2]) { 1438 u_usec = pps_ff[1]; /* 0 1 2 */ 1439 v_usec = pps_ff[0] - pps_ff[2]; 1440 } else if (pps_ff[2] > pps_ff[0]) { 1441 u_usec = pps_ff[0]; /* 2 0 1 */ 1442 v_usec = pps_ff[2] - pps_ff[1]; 1443 } else { 1444 u_usec = pps_ff[2]; /* 0 2 1 */ 1445 v_usec = pps_ff[0] - pps_ff[1]; 1446 } 1447 } else { 1448 if (pps_ff[1] < pps_ff[2]) { 1449 u_usec = pps_ff[1]; /* 2 1 0 */ 1450 v_usec = pps_ff[2] - pps_ff[0]; 1451 } else if (pps_ff[2] < pps_ff[0]) { 1452 u_usec = pps_ff[0]; /* 1 0 2 */ 1453 v_usec = pps_ff[1] - pps_ff[2]; 1454 } else { 1455 u_usec = pps_ff[2]; /* 1 2 0 */ 1456 v_usec = pps_ff[1] - pps_ff[0]; 1457 } 1458 } 1459 1460 /* 1461 * Here the frequency dispersion (stability) is updated. If it 1462 * is less than one-fourth the maximum (MAXFREQ), the frequency 1463 * offset is updated as well, but clamped to the tolerance. It 1464 * will be processed later by the clock() routine. 1465 */ 1466 v_usec = (v_usec >> 1) - pps_stabil; 1467 if (v_usec < 0) 1468 pps_stabil -= -v_usec >> PPS_AVG; 1469 else 1470 pps_stabil += v_usec >> PPS_AVG; 1471 if (pps_stabil > MAXFREQ >> 2) { 1472 pps_stbcnt++; 1473 time_status |= STA_PPSWANDER; 1474 return; 1475 } 1476 if (time_status & STA_PPSFREQ) { 1477 if (u_usec < 0) { 1478 pps_freq -= -u_usec >> PPS_AVG; 1479 if (pps_freq < -time_tolerance) 1480 pps_freq = -time_tolerance; 1481 u_usec = -u_usec; 1482 } else { 1483 pps_freq += u_usec >> PPS_AVG; 1484 if (pps_freq > time_tolerance) 1485 pps_freq = time_tolerance; 1486 } 1487 } 1488 1489 /* 1490 * Here the calibration interval is adjusted. If the maximum 1491 * time difference is greater than tick / 4, reduce the interval 1492 * by half. If this is not the case for four consecutive 1493 * intervals, double the interval. 1494 */ 1495 if (u_usec << pps_shift > bigtick >> 2) { 1496 pps_intcnt = 0; 1497 if (pps_shift > PPS_SHIFT) 1498 pps_shift--; 1499 } else if (pps_intcnt >= 4) { 1500 pps_intcnt = 0; 1501 if (pps_shift < PPS_SHIFTMAX) 1502 pps_shift++; 1503 } else 1504 pps_intcnt++; 1505 1506 /* 1507 * If recovering from kmdb, then make sure the tod chip gets resynced. 1508 * If we took an early exit above, then we don't yet have a stable 1509 * calibration signal to lock onto, so don't mark the tod for sync 1510 * until we get all the way here. 1511 */ 1512 { 1513 int s = hr_clock_lock(); 1514 1515 tod_needsync = 1; 1516 hr_clock_unlock(s); 1517 } 1518 } 1519 1520 /* 1521 * Handle clock tick processing for a thread. 1522 * Check for timer action, enforce CPU rlimit, do profiling etc. 1523 */ 1524 void 1525 clock_tick(kthread_t *t) 1526 { 1527 struct proc *pp; 1528 klwp_id_t lwp; 1529 struct as *as; 1530 clock_t utime; 1531 clock_t stime; 1532 int poke = 0; /* notify another CPU */ 1533 int user_mode; 1534 size_t rss; 1535 1536 /* Must be operating on a lwp/thread */ 1537 if ((lwp = ttolwp(t)) == NULL) { 1538 panic("clock_tick: no lwp"); 1539 /*NOTREACHED*/ 1540 } 1541 1542 CL_TICK(t); /* Class specific tick processing */ 1543 DTRACE_SCHED1(tick, kthread_t *, t); 1544 1545 pp = ttoproc(t); 1546 1547 /* pp->p_lock makes sure that the thread does not exit */ 1548 ASSERT(MUTEX_HELD(&pp->p_lock)); 1549 1550 user_mode = (lwp->lwp_state == LWP_USER); 1551 1552 /* 1553 * Update process times. Should use high res clock and state 1554 * changes instead of statistical sampling method. XXX 1555 */ 1556 if (user_mode) { 1557 pp->p_utime++; 1558 pp->p_task->tk_cpu_time++; 1559 } else { 1560 pp->p_stime++; 1561 pp->p_task->tk_cpu_time++; 1562 } 1563 as = pp->p_as; 1564 1565 /* 1566 * Update user profiling statistics. Get the pc from the 1567 * lwp when the AST happens. 1568 */ 1569 if (pp->p_prof.pr_scale) { 1570 atomic_add_32(&lwp->lwp_oweupc, 1); 1571 if (user_mode) { 1572 poke = 1; 1573 aston(t); 1574 } 1575 } 1576 1577 utime = pp->p_utime; 1578 stime = pp->p_stime; 1579 1580 /* 1581 * If CPU was in user state, process lwp-virtual time 1582 * interval timer. 1583 */ 1584 if (user_mode && 1585 timerisset(&lwp->lwp_timer[ITIMER_VIRTUAL].it_value) && 1586 itimerdecr(&lwp->lwp_timer[ITIMER_VIRTUAL], usec_per_tick) == 0) { 1587 poke = 1; 1588 sigtoproc(pp, t, SIGVTALRM); 1589 } 1590 1591 if (timerisset(&lwp->lwp_timer[ITIMER_PROF].it_value) && 1592 itimerdecr(&lwp->lwp_timer[ITIMER_PROF], usec_per_tick) == 0) { 1593 poke = 1; 1594 sigtoproc(pp, t, SIGPROF); 1595 } 1596 1597 /* 1598 * Enforce CPU resource controls: 1599 * (a) process.max-cpu-time resource control 1600 */ 1601 (void) rctl_test(rctlproc_legacy[RLIMIT_CPU], pp->p_rctls, pp, 1602 (utime + stime)/hz, RCA_UNSAFE_SIGINFO); 1603 1604 /* 1605 * (b) task.max-cpu-time resource control 1606 */ 1607 (void) rctl_test(rc_task_cpu_time, pp->p_task->tk_rctls, pp, 1, 1608 RCA_UNSAFE_SIGINFO); 1609 1610 /* 1611 * Update memory usage for the currently running process. 1612 */ 1613 rss = rm_asrss(as); 1614 PTOU(pp)->u_mem += rss; 1615 if (rss > PTOU(pp)->u_mem_max) 1616 PTOU(pp)->u_mem_max = rss; 1617 1618 /* 1619 * Notify the CPU the thread is running on. 1620 */ 1621 if (poke && t->t_cpu != CPU) 1622 poke_cpu(t->t_cpu->cpu_id); 1623 } 1624 1625 void 1626 profil_tick(uintptr_t upc) 1627 { 1628 int ticks; 1629 proc_t *p = ttoproc(curthread); 1630 klwp_t *lwp = ttolwp(curthread); 1631 struct prof *pr = &p->p_prof; 1632 1633 do { 1634 ticks = lwp->lwp_oweupc; 1635 } while (cas32(&lwp->lwp_oweupc, ticks, 0) != ticks); 1636 1637 mutex_enter(&p->p_pflock); 1638 if (pr->pr_scale >= 2 && upc >= pr->pr_off) { 1639 /* 1640 * Old-style profiling 1641 */ 1642 uint16_t *slot = pr->pr_base; 1643 uint16_t old, new; 1644 if (pr->pr_scale != 2) { 1645 uintptr_t delta = upc - pr->pr_off; 1646 uintptr_t byteoff = ((delta >> 16) * pr->pr_scale) + 1647 (((delta & 0xffff) * pr->pr_scale) >> 16); 1648 if (byteoff >= (uintptr_t)pr->pr_size) { 1649 mutex_exit(&p->p_pflock); 1650 return; 1651 } 1652 slot += byteoff / sizeof (uint16_t); 1653 } 1654 if (fuword16(slot, &old) < 0 || 1655 (new = old + ticks) > SHRT_MAX || 1656 suword16(slot, new) < 0) { 1657 pr->pr_scale = 0; 1658 } 1659 } else if (pr->pr_scale == 1) { 1660 /* 1661 * PC Sampling 1662 */ 1663 model_t model = lwp_getdatamodel(lwp); 1664 int result; 1665 #ifdef __lint 1666 model = model; 1667 #endif 1668 while (ticks-- > 0) { 1669 if (pr->pr_samples == pr->pr_size) { 1670 /* buffer full, turn off sampling */ 1671 pr->pr_scale = 0; 1672 break; 1673 } 1674 switch (SIZEOF_PTR(model)) { 1675 case sizeof (uint32_t): 1676 result = suword32(pr->pr_base, (uint32_t)upc); 1677 break; 1678 #ifdef _LP64 1679 case sizeof (uint64_t): 1680 result = suword64(pr->pr_base, (uint64_t)upc); 1681 break; 1682 #endif 1683 default: 1684 cmn_err(CE_WARN, "profil_tick: unexpected " 1685 "data model"); 1686 result = -1; 1687 break; 1688 } 1689 if (result != 0) { 1690 pr->pr_scale = 0; 1691 break; 1692 } 1693 pr->pr_base = (caddr_t)pr->pr_base + SIZEOF_PTR(model); 1694 pr->pr_samples++; 1695 } 1696 } 1697 mutex_exit(&p->p_pflock); 1698 } 1699 1700 static void 1701 delay_wakeup(void *arg) 1702 { 1703 kthread_t *t = arg; 1704 1705 mutex_enter(&t->t_delay_lock); 1706 cv_signal(&t->t_delay_cv); 1707 mutex_exit(&t->t_delay_lock); 1708 } 1709 1710 void 1711 delay(clock_t ticks) 1712 { 1713 kthread_t *t = curthread; 1714 clock_t deadline = lbolt + ticks; 1715 clock_t timeleft; 1716 timeout_id_t id; 1717 1718 if (panicstr && ticks > 0) { 1719 /* 1720 * Timeouts aren't running, so all we can do is spin. 1721 */ 1722 drv_usecwait(TICK_TO_USEC(ticks)); 1723 return; 1724 } 1725 1726 while ((timeleft = deadline - lbolt) > 0) { 1727 mutex_enter(&t->t_delay_lock); 1728 id = timeout(delay_wakeup, t, timeleft); 1729 cv_wait(&t->t_delay_cv, &t->t_delay_lock); 1730 mutex_exit(&t->t_delay_lock); 1731 (void) untimeout(id); 1732 } 1733 } 1734 1735 /* 1736 * Like delay, but interruptible by a signal. 1737 */ 1738 int 1739 delay_sig(clock_t ticks) 1740 { 1741 clock_t deadline = lbolt + ticks; 1742 clock_t rc; 1743 1744 mutex_enter(&curthread->t_delay_lock); 1745 do { 1746 rc = cv_timedwait_sig(&curthread->t_delay_cv, 1747 &curthread->t_delay_lock, deadline); 1748 } while (rc > 0); 1749 mutex_exit(&curthread->t_delay_lock); 1750 if (rc == 0) 1751 return (EINTR); 1752 return (0); 1753 } 1754 1755 #define SECONDS_PER_DAY 86400 1756 1757 /* 1758 * Initialize the system time based on the TOD chip. approx is used as 1759 * an approximation of time (e.g. from the filesystem) in the event that 1760 * the TOD chip has been cleared or is unresponsive. An approx of -1 1761 * means the filesystem doesn't keep time. 1762 */ 1763 void 1764 clkset(time_t approx) 1765 { 1766 timestruc_t ts; 1767 int spl; 1768 int set_clock = 0; 1769 1770 mutex_enter(&tod_lock); 1771 ts = tod_get(); 1772 1773 if (ts.tv_sec > 365 * SECONDS_PER_DAY) { 1774 /* 1775 * If the TOD chip is reporting some time after 1971, 1776 * then it probably didn't lose power or become otherwise 1777 * cleared in the recent past; check to assure that 1778 * the time coming from the filesystem isn't in the future 1779 * according to the TOD chip. 1780 */ 1781 if (approx != -1 && approx > ts.tv_sec) { 1782 cmn_err(CE_WARN, "Last shutdown is later " 1783 "than time on time-of-day chip; check date."); 1784 } 1785 } else { 1786 /* 1787 * If the TOD chip isn't giving correct time, then set it to 1788 * the time that was passed in as a rough estimate. If we 1789 * don't have an estimate, then set the clock back to a time 1790 * when Oliver North, ALF and Dire Straits were all on the 1791 * collective brain: 1987. 1792 */ 1793 timestruc_t tmp; 1794 if (approx == -1) 1795 ts.tv_sec = (1987 - 1970) * 365 * SECONDS_PER_DAY; 1796 else 1797 ts.tv_sec = approx; 1798 ts.tv_nsec = 0; 1799 1800 /* 1801 * Attempt to write the new time to the TOD chip. Set spl high 1802 * to avoid getting preempted between the tod_set and tod_get. 1803 */ 1804 spl = splhi(); 1805 tod_set(ts); 1806 tmp = tod_get(); 1807 splx(spl); 1808 1809 if (tmp.tv_sec != ts.tv_sec && tmp.tv_sec != ts.tv_sec + 1) { 1810 tod_broken = 1; 1811 dosynctodr = 0; 1812 cmn_err(CE_WARN, "Time-of-day chip unresponsive;" 1813 " dead batteries?"); 1814 } else { 1815 cmn_err(CE_WARN, "Time-of-day chip had " 1816 "incorrect date; check and reset."); 1817 } 1818 set_clock = 1; 1819 } 1820 1821 if (!boot_time) { 1822 boot_time = ts.tv_sec; 1823 set_clock = 1; 1824 } 1825 1826 if (set_clock) 1827 set_hrestime(&ts); 1828 1829 mutex_exit(&tod_lock); 1830 } 1831 1832 int timechanged; /* for testing if the system time has been reset */ 1833 1834 void 1835 set_hrestime(timestruc_t *ts) 1836 { 1837 int spl = hr_clock_lock(); 1838 hrestime = *ts; 1839 membar_enter(); /* hrestime must be visible before timechanged++ */ 1840 timedelta = 0; 1841 timechanged++; 1842 hr_clock_unlock(spl); 1843 } 1844 1845 static uint_t deadman_seconds; 1846 static uint32_t deadman_panics; 1847 static int deadman_enabled = 0; 1848 static int deadman_panic_timers = 1; 1849 1850 static void 1851 deadman(void) 1852 { 1853 if (panicstr) { 1854 /* 1855 * During panic, other CPUs besides the panic 1856 * master continue to handle cyclics and some other 1857 * interrupts. The code below is intended to be 1858 * single threaded, so any CPU other than the master 1859 * must keep out. 1860 */ 1861 if (CPU->cpu_id != panic_cpu.cpu_id) 1862 return; 1863 1864 /* 1865 * If we're panicking, the deadman cyclic continues to increase 1866 * lbolt in case the dump device driver relies on this for 1867 * timeouts. Note that we rely on deadman() being invoked once 1868 * per second, and credit lbolt and lbolt64 with hz ticks each. 1869 */ 1870 lbolt += hz; 1871 lbolt64 += hz; 1872 1873 if (!deadman_panic_timers) 1874 return; /* allow all timers to be manually disabled */ 1875 1876 /* 1877 * If we are generating a crash dump or syncing filesystems and 1878 * the corresponding timer is set, decrement it and re-enter 1879 * the panic code to abort it and advance to the next state. 1880 * The panic states and triggers are explained in panic.c. 1881 */ 1882 if (panic_dump) { 1883 if (dump_timeleft && (--dump_timeleft == 0)) { 1884 panic("panic dump timeout"); 1885 /*NOTREACHED*/ 1886 } 1887 } else if (panic_sync) { 1888 if (sync_timeleft && (--sync_timeleft == 0)) { 1889 panic("panic sync timeout"); 1890 /*NOTREACHED*/ 1891 } 1892 } 1893 1894 return; 1895 } 1896 1897 if (lbolt != CPU->cpu_deadman_lbolt) { 1898 CPU->cpu_deadman_lbolt = lbolt; 1899 CPU->cpu_deadman_countdown = deadman_seconds; 1900 return; 1901 } 1902 1903 if (CPU->cpu_deadman_countdown-- > 0) 1904 return; 1905 1906 /* 1907 * Regardless of whether or not we actually bring the system down, 1908 * bump the deadman_panics variable. 1909 * 1910 * N.B. deadman_panics is incremented once for each CPU that 1911 * passes through here. It's expected that all the CPUs will 1912 * detect this condition within one second of each other, so 1913 * when deadman_enabled is off, deadman_panics will 1914 * typically be a multiple of the total number of CPUs in 1915 * the system. 1916 */ 1917 atomic_add_32(&deadman_panics, 1); 1918 1919 if (!deadman_enabled) { 1920 CPU->cpu_deadman_countdown = deadman_seconds; 1921 return; 1922 } 1923 1924 /* 1925 * If we're here, we want to bring the system down. 1926 */ 1927 panic("deadman: timed out after %d seconds of clock " 1928 "inactivity", deadman_seconds); 1929 /*NOTREACHED*/ 1930 } 1931 1932 /*ARGSUSED*/ 1933 static void 1934 deadman_online(void *arg, cpu_t *cpu, cyc_handler_t *hdlr, cyc_time_t *when) 1935 { 1936 cpu->cpu_deadman_lbolt = 0; 1937 cpu->cpu_deadman_countdown = deadman_seconds; 1938 1939 hdlr->cyh_func = (cyc_func_t)deadman; 1940 hdlr->cyh_level = CY_HIGH_LEVEL; 1941 hdlr->cyh_arg = NULL; 1942 1943 /* 1944 * Stagger the CPUs so that they don't all run deadman() at 1945 * the same time. Simplest reason to do this is to make it 1946 * more likely that only one CPU will panic in case of a 1947 * timeout. This is (strictly speaking) an aesthetic, not a 1948 * technical consideration. 1949 * 1950 * The interval must be one second in accordance with the 1951 * code in deadman() above to increase lbolt during panic. 1952 */ 1953 when->cyt_when = cpu->cpu_id * (NANOSEC / NCPU); 1954 when->cyt_interval = NANOSEC; 1955 } 1956 1957 1958 void 1959 deadman_init(void) 1960 { 1961 cyc_omni_handler_t hdlr; 1962 1963 if (deadman_seconds == 0) 1964 deadman_seconds = snoop_interval / MICROSEC; 1965 1966 if (snooping) 1967 deadman_enabled = 1; 1968 1969 hdlr.cyo_online = deadman_online; 1970 hdlr.cyo_offline = NULL; 1971 hdlr.cyo_arg = NULL; 1972 1973 mutex_enter(&cpu_lock); 1974 deadman_cyclic = cyclic_add_omni(&hdlr); 1975 mutex_exit(&cpu_lock); 1976 } 1977 1978 /* 1979 * tod_fault() is for updating tod validate mechanism state: 1980 * (1) TOD_NOFAULT: for resetting the state to 'normal'. 1981 * currently used for debugging only 1982 * (2) The following four cases detected by tod validate mechanism: 1983 * TOD_REVERSED: current tod value is less than previous value. 1984 * TOD_STALLED: current tod value hasn't advanced. 1985 * TOD_JUMPED: current tod value advanced too far from previous value. 1986 * TOD_RATECHANGED: the ratio between average tod delta and 1987 * average tick delta has changed. 1988 * (3) TOD_RDONLY: when the TOD clock is not writeable e.g. because it is 1989 * a virtual TOD provided by a hypervisor. 1990 */ 1991 enum tod_fault_type 1992 tod_fault(enum tod_fault_type ftype, int off) 1993 { 1994 ASSERT(MUTEX_HELD(&tod_lock)); 1995 1996 if (tod_faulted != ftype) { 1997 switch (ftype) { 1998 case TOD_NOFAULT: 1999 plat_tod_fault(TOD_NOFAULT); 2000 cmn_err(CE_NOTE, "Restarted tracking " 2001 "Time of Day clock."); 2002 tod_faulted = ftype; 2003 break; 2004 case TOD_REVERSED: 2005 case TOD_JUMPED: 2006 if (tod_faulted == TOD_NOFAULT) { 2007 plat_tod_fault(ftype); 2008 cmn_err(CE_WARN, "Time of Day clock error: " 2009 "reason [%s by 0x%x]. -- " 2010 " Stopped tracking Time Of Day clock.", 2011 tod_fault_table[ftype], off); 2012 tod_faulted = ftype; 2013 } 2014 break; 2015 case TOD_STALLED: 2016 case TOD_RATECHANGED: 2017 if (tod_faulted == TOD_NOFAULT) { 2018 plat_tod_fault(ftype); 2019 cmn_err(CE_WARN, "Time of Day clock error: " 2020 "reason [%s]. -- " 2021 " Stopped tracking Time Of Day clock.", 2022 tod_fault_table[ftype]); 2023 tod_faulted = ftype; 2024 } 2025 break; 2026 case TOD_RDONLY: 2027 if (tod_faulted == TOD_NOFAULT) { 2028 plat_tod_fault(ftype); 2029 cmn_err(CE_NOTE, "!Time of Day clock is " 2030 "Read-Only; set of Date/Time will not " 2031 "persist across reboot."); 2032 tod_faulted = ftype; 2033 } 2034 break; 2035 default: 2036 break; 2037 } 2038 } 2039 return (tod_faulted); 2040 } 2041 2042 void 2043 tod_fault_reset() 2044 { 2045 tod_fault_reset_flag = 1; 2046 } 2047 2048 2049 /* 2050 * tod_validate() is used for checking values returned by tod_get(). 2051 * Four error cases can be detected by this routine: 2052 * TOD_REVERSED: current tod value is less than previous. 2053 * TOD_STALLED: current tod value hasn't advanced. 2054 * TOD_JUMPED: current tod value advanced too far from previous value. 2055 * TOD_RATECHANGED: the ratio between average tod delta and 2056 * average tick delta has changed. 2057 */ 2058 time_t 2059 tod_validate(time_t tod) 2060 { 2061 time_t diff_tod; 2062 hrtime_t diff_tick; 2063 2064 long dtick; 2065 int dtick_delta; 2066 2067 int off = 0; 2068 enum tod_fault_type tod_bad = TOD_NOFAULT; 2069 2070 static int firsttime = 1; 2071 2072 static time_t prev_tod = 0; 2073 static hrtime_t prev_tick = 0; 2074 static long dtick_avg = TOD_REF_FREQ; 2075 2076 hrtime_t tick = gethrtime(); 2077 2078 ASSERT(MUTEX_HELD(&tod_lock)); 2079 2080 /* 2081 * tod_validate_enable is patchable via /etc/system. 2082 * If TOD is already faulted, or if TOD validation is deferred, 2083 * there is nothing to do. 2084 */ 2085 if ((tod_validate_enable == 0) || (tod_faulted != TOD_NOFAULT) || 2086 tod_validate_deferred) { 2087 return (tod); 2088 } 2089 2090 /* 2091 * Update prev_tod and prev_tick values for first run 2092 */ 2093 if (firsttime) { 2094 firsttime = 0; 2095 prev_tod = tod; 2096 prev_tick = tick; 2097 return (tod); 2098 } 2099 2100 /* 2101 * For either of these conditions, we need to reset ourself 2102 * and start validation from zero since each condition 2103 * indicates that the TOD will be updated with new value 2104 * Also, note that tod_needsync will be reset in clock() 2105 */ 2106 if (tod_needsync || tod_fault_reset_flag) { 2107 firsttime = 1; 2108 prev_tod = 0; 2109 prev_tick = 0; 2110 dtick_avg = TOD_REF_FREQ; 2111 2112 if (tod_fault_reset_flag) 2113 tod_fault_reset_flag = 0; 2114 2115 return (tod); 2116 } 2117 2118 /* test hook */ 2119 switch (tod_unit_test) { 2120 case 1: /* for testing jumping tod */ 2121 tod += tod_test_injector; 2122 tod_unit_test = 0; 2123 break; 2124 case 2: /* for testing stuck tod bit */ 2125 tod |= 1 << tod_test_injector; 2126 tod_unit_test = 0; 2127 break; 2128 case 3: /* for testing stalled tod */ 2129 tod = prev_tod; 2130 tod_unit_test = 0; 2131 break; 2132 case 4: /* reset tod fault status */ 2133 (void) tod_fault(TOD_NOFAULT, 0); 2134 tod_unit_test = 0; 2135 break; 2136 default: 2137 break; 2138 } 2139 2140 diff_tod = tod - prev_tod; 2141 diff_tick = tick - prev_tick; 2142 2143 ASSERT(diff_tick >= 0); 2144 2145 if (diff_tod < 0) { 2146 /* ERROR - tod reversed */ 2147 tod_bad = TOD_REVERSED; 2148 off = (int)(prev_tod - tod); 2149 } else if (diff_tod == 0) { 2150 /* tod did not advance */ 2151 if (diff_tick > TOD_STALL_THRESHOLD) { 2152 /* ERROR - tod stalled */ 2153 tod_bad = TOD_STALLED; 2154 } else { 2155 /* 2156 * Make sure we don't update prev_tick 2157 * so that diff_tick is calculated since 2158 * the first diff_tod == 0 2159 */ 2160 return (tod); 2161 } 2162 } else { 2163 /* calculate dtick */ 2164 dtick = diff_tick / diff_tod; 2165 2166 /* update dtick averages */ 2167 dtick_avg += ((dtick - dtick_avg) / TOD_FILTER_N); 2168 2169 /* 2170 * Calculate dtick_delta as 2171 * variation from reference freq in quartiles 2172 */ 2173 dtick_delta = (dtick_avg - TOD_REF_FREQ) / 2174 (TOD_REF_FREQ >> 2); 2175 2176 /* 2177 * Even with a perfectly functioning TOD device, 2178 * when the number of elapsed seconds is low the 2179 * algorithm can calculate a rate that is beyond 2180 * tolerance, causing an error. The algorithm is 2181 * inaccurate when elapsed time is low (less than 2182 * 5 seconds). 2183 */ 2184 if (diff_tod > 4) { 2185 if (dtick < TOD_JUMP_THRESHOLD) { 2186 /* ERROR - tod jumped */ 2187 tod_bad = TOD_JUMPED; 2188 off = (int)diff_tod; 2189 } else if (dtick_delta) { 2190 /* ERROR - change in clock rate */ 2191 tod_bad = TOD_RATECHANGED; 2192 } 2193 } 2194 } 2195 2196 if (tod_bad != TOD_NOFAULT) { 2197 (void) tod_fault(tod_bad, off); 2198 2199 /* 2200 * Disable dosynctodr since we are going to fault 2201 * the TOD chip anyway here 2202 */ 2203 dosynctodr = 0; 2204 2205 /* 2206 * Set tod to the correct value from hrestime 2207 */ 2208 tod = hrestime.tv_sec; 2209 } 2210 2211 prev_tod = tod; 2212 prev_tick = tick; 2213 return (tod); 2214 } 2215 2216 static void 2217 calcloadavg(int nrun, uint64_t *hp_ave) 2218 { 2219 static int64_t f[3] = { 135, 27, 9 }; 2220 uint_t i; 2221 int64_t q, r; 2222 2223 /* 2224 * Compute load average over the last 1, 5, and 15 minutes 2225 * (60, 300, and 900 seconds). The constants in f[3] are for 2226 * exponential decay: 2227 * (1 - exp(-1/60)) << 13 = 135, 2228 * (1 - exp(-1/300)) << 13 = 27, 2229 * (1 - exp(-1/900)) << 13 = 9. 2230 */ 2231 2232 /* 2233 * a little hoop-jumping to avoid integer overflow 2234 */ 2235 for (i = 0; i < 3; i++) { 2236 q = (hp_ave[i] >> 16) << 7; 2237 r = (hp_ave[i] & 0xffff) << 7; 2238 hp_ave[i] += ((nrun - q) * f[i] - ((r * f[i]) >> 16)) >> 4; 2239 } 2240 } 2241