1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 22 /* All Rights Reserved */ 23 24 /* 25 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 26 * Use is subject to license terms. 27 */ 28 29 #include <sys/param.h> 30 #include <sys/t_lock.h> 31 #include <sys/types.h> 32 #include <sys/tuneable.h> 33 #include <sys/sysmacros.h> 34 #include <sys/systm.h> 35 #include <sys/cpuvar.h> 36 #include <sys/lgrp.h> 37 #include <sys/user.h> 38 #include <sys/proc.h> 39 #include <sys/callo.h> 40 #include <sys/kmem.h> 41 #include <sys/var.h> 42 #include <sys/cmn_err.h> 43 #include <sys/swap.h> 44 #include <sys/vmsystm.h> 45 #include <sys/class.h> 46 #include <sys/time.h> 47 #include <sys/debug.h> 48 #include <sys/vtrace.h> 49 #include <sys/spl.h> 50 #include <sys/atomic.h> 51 #include <sys/dumphdr.h> 52 #include <sys/archsystm.h> 53 #include <sys/fs/swapnode.h> 54 #include <sys/panic.h> 55 #include <sys/disp.h> 56 #include <sys/msacct.h> 57 #include <sys/mem_cage.h> 58 59 #include <vm/page.h> 60 #include <vm/anon.h> 61 #include <vm/rm.h> 62 #include <sys/cyclic.h> 63 #include <sys/cpupart.h> 64 #include <sys/rctl.h> 65 #include <sys/task.h> 66 #include <sys/sdt.h> 67 #include <sys/ddi_timer.h> 68 #include <sys/random.h> 69 #include <sys/modctl.h> 70 71 /* 72 * for NTP support 73 */ 74 #include <sys/timex.h> 75 #include <sys/inttypes.h> 76 77 #include <sys/sunddi.h> 78 #include <sys/clock_impl.h> 79 80 /* 81 * clock() is called straight from the clock cyclic; see clock_init(). 82 * 83 * Functions: 84 * reprime clock 85 * maintain date 86 * jab the scheduler 87 */ 88 89 extern kcondvar_t fsflush_cv; 90 extern sysinfo_t sysinfo; 91 extern vminfo_t vminfo; 92 extern int idleswtch; /* flag set while idle in pswtch() */ 93 extern hrtime_t volatile devinfo_freeze; 94 95 /* 96 * high-precision avenrun values. These are needed to make the 97 * regular avenrun values accurate. 98 */ 99 static uint64_t hp_avenrun[3]; 100 int avenrun[3]; /* FSCALED average run queue lengths */ 101 time_t time; /* time in seconds since 1970 - for compatibility only */ 102 103 static struct loadavg_s loadavg; 104 /* 105 * Phase/frequency-lock loop (PLL/FLL) definitions 106 * 107 * The following variables are read and set by the ntp_adjtime() system 108 * call. 109 * 110 * time_state shows the state of the system clock, with values defined 111 * in the timex.h header file. 112 * 113 * time_status shows the status of the system clock, with bits defined 114 * in the timex.h header file. 115 * 116 * time_offset is used by the PLL/FLL to adjust the system time in small 117 * increments. 118 * 119 * time_constant determines the bandwidth or "stiffness" of the PLL. 120 * 121 * time_tolerance determines maximum frequency error or tolerance of the 122 * CPU clock oscillator and is a property of the architecture; however, 123 * in principle it could change as result of the presence of external 124 * discipline signals, for instance. 125 * 126 * time_precision is usually equal to the kernel tick variable; however, 127 * in cases where a precision clock counter or external clock is 128 * available, the resolution can be much less than this and depend on 129 * whether the external clock is working or not. 130 * 131 * time_maxerror is initialized by a ntp_adjtime() call and increased by 132 * the kernel once each second to reflect the maximum error bound 133 * growth. 134 * 135 * time_esterror is set and read by the ntp_adjtime() call, but 136 * otherwise not used by the kernel. 137 */ 138 int32_t time_state = TIME_OK; /* clock state */ 139 int32_t time_status = STA_UNSYNC; /* clock status bits */ 140 int32_t time_offset = 0; /* time offset (us) */ 141 int32_t time_constant = 0; /* pll time constant */ 142 int32_t time_tolerance = MAXFREQ; /* frequency tolerance (scaled ppm) */ 143 int32_t time_precision = 1; /* clock precision (us) */ 144 int32_t time_maxerror = MAXPHASE; /* maximum error (us) */ 145 int32_t time_esterror = MAXPHASE; /* estimated error (us) */ 146 147 /* 148 * The following variables establish the state of the PLL/FLL and the 149 * residual time and frequency offset of the local clock. The scale 150 * factors are defined in the timex.h header file. 151 * 152 * time_phase and time_freq are the phase increment and the frequency 153 * increment, respectively, of the kernel time variable. 154 * 155 * time_freq is set via ntp_adjtime() from a value stored in a file when 156 * the synchronization daemon is first started. Its value is retrieved 157 * via ntp_adjtime() and written to the file about once per hour by the 158 * daemon. 159 * 160 * time_adj is the adjustment added to the value of tick at each timer 161 * interrupt and is recomputed from time_phase and time_freq at each 162 * seconds rollover. 163 * 164 * time_reftime is the second's portion of the system time at the last 165 * call to ntp_adjtime(). It is used to adjust the time_freq variable 166 * and to increase the time_maxerror as the time since last update 167 * increases. 168 */ 169 int32_t time_phase = 0; /* phase offset (scaled us) */ 170 int32_t time_freq = 0; /* frequency offset (scaled ppm) */ 171 int32_t time_adj = 0; /* tick adjust (scaled 1 / hz) */ 172 int32_t time_reftime = 0; /* time at last adjustment (s) */ 173 174 /* 175 * The scale factors of the following variables are defined in the 176 * timex.h header file. 177 * 178 * pps_time contains the time at each calibration interval, as read by 179 * microtime(). pps_count counts the seconds of the calibration 180 * interval, the duration of which is nominally pps_shift in powers of 181 * two. 182 * 183 * pps_offset is the time offset produced by the time median filter 184 * pps_tf[], while pps_jitter is the dispersion (jitter) measured by 185 * this filter. 186 * 187 * pps_freq is the frequency offset produced by the frequency median 188 * filter pps_ff[], while pps_stabil is the dispersion (wander) measured 189 * by this filter. 190 * 191 * pps_usec is latched from a high resolution counter or external clock 192 * at pps_time. Here we want the hardware counter contents only, not the 193 * contents plus the time_tv.usec as usual. 194 * 195 * pps_valid counts the number of seconds since the last PPS update. It 196 * is used as a watchdog timer to disable the PPS discipline should the 197 * PPS signal be lost. 198 * 199 * pps_glitch counts the number of seconds since the beginning of an 200 * offset burst more than tick/2 from current nominal offset. It is used 201 * mainly to suppress error bursts due to priority conflicts between the 202 * PPS interrupt and timer interrupt. 203 * 204 * pps_intcnt counts the calibration intervals for use in the interval- 205 * adaptation algorithm. It's just too complicated for words. 206 */ 207 struct timeval pps_time; /* kernel time at last interval */ 208 int32_t pps_tf[] = {0, 0, 0}; /* pps time offset median filter (us) */ 209 int32_t pps_offset = 0; /* pps time offset (us) */ 210 int32_t pps_jitter = MAXTIME; /* time dispersion (jitter) (us) */ 211 int32_t pps_ff[] = {0, 0, 0}; /* pps frequency offset median filter */ 212 int32_t pps_freq = 0; /* frequency offset (scaled ppm) */ 213 int32_t pps_stabil = MAXFREQ; /* frequency dispersion (scaled ppm) */ 214 int32_t pps_usec = 0; /* microsec counter at last interval */ 215 int32_t pps_valid = PPS_VALID; /* pps signal watchdog counter */ 216 int32_t pps_glitch = 0; /* pps signal glitch counter */ 217 int32_t pps_count = 0; /* calibration interval counter (s) */ 218 int32_t pps_shift = PPS_SHIFT; /* interval duration (s) (shift) */ 219 int32_t pps_intcnt = 0; /* intervals at current duration */ 220 221 /* 222 * PPS signal quality monitors 223 * 224 * pps_jitcnt counts the seconds that have been discarded because the 225 * jitter measured by the time median filter exceeds the limit MAXTIME 226 * (100 us). 227 * 228 * pps_calcnt counts the frequency calibration intervals, which are 229 * variable from 4 s to 256 s. 230 * 231 * pps_errcnt counts the calibration intervals which have been discarded 232 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the 233 * calibration interval jitter exceeds two ticks. 234 * 235 * pps_stbcnt counts the calibration intervals that have been discarded 236 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us). 237 */ 238 int32_t pps_jitcnt = 0; /* jitter limit exceeded */ 239 int32_t pps_calcnt = 0; /* calibration intervals */ 240 int32_t pps_errcnt = 0; /* calibration errors */ 241 int32_t pps_stbcnt = 0; /* stability limit exceeded */ 242 243 kcondvar_t lbolt_cv; 244 245 /* 246 * Hybrid lbolt implementation: 247 * 248 * The service historically provided by the lbolt and lbolt64 variables has 249 * been replaced by the ddi_get_lbolt() and ddi_get_lbolt64() routines, and the 250 * original symbols removed from the system. The once clock driven variables are 251 * now implemented in an event driven fashion, backed by gethrtime() coarsed to 252 * the appropriate clock resolution. The default event driven implementation is 253 * complemented by a cyclic driven one, active only during periods of intense 254 * activity around the DDI lbolt routines, when a lbolt specific cyclic is 255 * reprogramed to fire at a clock tick interval to serve consumers of lbolt who 256 * rely on the original low cost of consulting a memory position. 257 * 258 * The implementation uses the number of calls to these routines and the 259 * frequency of these to determine when to transition from event to cyclic 260 * driven and vice-versa. These values are kept on a per CPU basis for 261 * scalability reasons and to prevent CPUs from constantly invalidating a single 262 * cache line when modifying a global variable. The transition from event to 263 * cyclic mode happens once the thresholds are crossed, and activity on any CPU 264 * can cause such transition. 265 * 266 * The lbolt_hybrid function pointer is called by ddi_get_lbolt() and 267 * ddi_get_lbolt64(), and will point to lbolt_event_driven() or 268 * lbolt_cyclic_driven() according to the current mode. When the thresholds 269 * are exceeded, lbolt_event_driven() will reprogram the lbolt cyclic to 270 * fire at a nsec_per_tick interval and increment an internal variable at 271 * each firing. lbolt_hybrid will then point to lbolt_cyclic_driven(), which 272 * will simply return the value of such variable. lbolt_cyclic() will attempt 273 * to shut itself off at each threshold interval (sampling period for calls 274 * to the DDI lbolt routines), and return to the event driven mode, but will 275 * be prevented from doing so if lbolt_cyclic_driven() is being heavily used. 276 * 277 * lbolt_bootstrap is used during boot to serve lbolt consumers who don't wait 278 * for the cyclic subsystem to be intialized. 279 * 280 */ 281 int64_t lbolt_bootstrap(void); 282 int64_t lbolt_event_driven(void); 283 int64_t lbolt_cyclic_driven(void); 284 int64_t (*lbolt_hybrid)(void) = lbolt_bootstrap; 285 uint_t lbolt_ev_to_cyclic(caddr_t, caddr_t); 286 287 /* 288 * lbolt's cyclic, installed by clock_init(). 289 */ 290 static void lbolt_cyclic(void); 291 292 /* 293 * Tunable to keep lbolt in cyclic driven mode. This will prevent the system 294 * from switching back to event driven, once it reaches cyclic mode. 295 */ 296 static boolean_t lbolt_cyc_only = B_FALSE; 297 298 /* 299 * Cache aligned, per CPU structure with lbolt usage statistics. 300 */ 301 static lbolt_cpu_t *lb_cpu; 302 303 /* 304 * Single, cache aligned, structure with all the information required by 305 * the lbolt implementation. 306 */ 307 lbolt_info_t *lb_info; 308 309 310 int one_sec = 1; /* turned on once every second */ 311 static int fsflushcnt; /* counter for t_fsflushr */ 312 int dosynctodr = 1; /* patchable; enable/disable sync to TOD chip */ 313 int tod_needsync = 0; /* need to sync tod chip with software time */ 314 static int tod_broken = 0; /* clock chip doesn't work */ 315 time_t boot_time = 0; /* Boot time in seconds since 1970 */ 316 cyclic_id_t clock_cyclic; /* clock()'s cyclic_id */ 317 cyclic_id_t deadman_cyclic; /* deadman()'s cyclic_id */ 318 cyclic_id_t ddi_timer_cyclic; /* cyclic_timer()'s cyclic_id */ 319 320 extern void clock_tick_schedule(int); 321 322 static int lgrp_ticks; /* counter to schedule lgrp load calcs */ 323 324 /* 325 * for tod fault detection 326 */ 327 #define TOD_REF_FREQ ((longlong_t)(NANOSEC)) 328 #define TOD_STALL_THRESHOLD (TOD_REF_FREQ * 3 / 2) 329 #define TOD_JUMP_THRESHOLD (TOD_REF_FREQ / 2) 330 #define TOD_FILTER_N 4 331 #define TOD_FILTER_SETTLE (4 * TOD_FILTER_N) 332 static int tod_faulted = TOD_NOFAULT; 333 334 static int tod_status_flag = 0; /* used by tod_validate() */ 335 336 static hrtime_t prev_set_tick = 0; /* gethrtime() prior to tod_set() */ 337 static time_t prev_set_tod = 0; /* tv_sec value passed to tod_set() */ 338 339 /* patchable via /etc/system */ 340 int tod_validate_enable = 1; 341 342 /* Diagnose/Limit messages about delay(9F) called from interrupt context */ 343 int delay_from_interrupt_diagnose = 0; 344 volatile uint32_t delay_from_interrupt_msg = 20; 345 346 /* 347 * On non-SPARC systems, TOD validation must be deferred until gethrtime 348 * returns non-zero values (after mach_clkinit's execution). 349 * On SPARC systems, it must be deferred until after hrtime_base 350 * and hres_last_tick are set (in the first invocation of hres_tick). 351 * Since in both cases the prerequisites occur before the invocation of 352 * tod_get() in clock(), the deferment is lifted there. 353 */ 354 static boolean_t tod_validate_deferred = B_TRUE; 355 356 /* 357 * tod_fault_table[] must be aligned with 358 * enum tod_fault_type in systm.h 359 */ 360 static char *tod_fault_table[] = { 361 "Reversed", /* TOD_REVERSED */ 362 "Stalled", /* TOD_STALLED */ 363 "Jumped", /* TOD_JUMPED */ 364 "Changed in Clock Rate", /* TOD_RATECHANGED */ 365 "Is Read-Only" /* TOD_RDONLY */ 366 /* 367 * no strings needed for TOD_NOFAULT 368 */ 369 }; 370 371 /* 372 * test hook for tod broken detection in tod_validate 373 */ 374 int tod_unit_test = 0; 375 time_t tod_test_injector; 376 377 #define CLOCK_ADJ_HIST_SIZE 4 378 379 static int adj_hist_entry; 380 381 int64_t clock_adj_hist[CLOCK_ADJ_HIST_SIZE]; 382 383 static void calcloadavg(int, uint64_t *); 384 static int genloadavg(struct loadavg_s *); 385 static void loadavg_update(); 386 387 void (*cmm_clock_callout)() = NULL; 388 void (*cpucaps_clock_callout)() = NULL; 389 390 extern clock_t clock_tick_proc_max; 391 392 static int64_t deadman_counter = 0; 393 394 static void 395 clock(void) 396 { 397 kthread_t *t; 398 uint_t nrunnable; 399 uint_t w_io; 400 cpu_t *cp; 401 cpupart_t *cpupart; 402 extern void set_anoninfo(); 403 extern void set_freemem(); 404 void (*funcp)(); 405 int32_t ltemp; 406 int64_t lltemp; 407 int s; 408 int do_lgrp_load; 409 int i; 410 clock_t now = LBOLT_NO_ACCOUNT; /* current tick */ 411 412 if (panicstr) 413 return; 414 415 set_anoninfo(); 416 /* 417 * Make sure that 'freemem' do not drift too far from the truth 418 */ 419 set_freemem(); 420 421 422 /* 423 * Before the section which is repeated is executed, we do 424 * the time delta processing which occurs every clock tick 425 * 426 * There is additional processing which happens every time 427 * the nanosecond counter rolls over which is described 428 * below - see the section which begins with : if (one_sec) 429 * 430 * This section marks the beginning of the precision-kernel 431 * code fragment. 432 * 433 * First, compute the phase adjustment. If the low-order bits 434 * (time_phase) of the update overflow, bump the higher order 435 * bits (time_update). 436 */ 437 time_phase += time_adj; 438 if (time_phase <= -FINEUSEC) { 439 ltemp = -time_phase / SCALE_PHASE; 440 time_phase += ltemp * SCALE_PHASE; 441 s = hr_clock_lock(); 442 timedelta -= ltemp * (NANOSEC/MICROSEC); 443 hr_clock_unlock(s); 444 } else if (time_phase >= FINEUSEC) { 445 ltemp = time_phase / SCALE_PHASE; 446 time_phase -= ltemp * SCALE_PHASE; 447 s = hr_clock_lock(); 448 timedelta += ltemp * (NANOSEC/MICROSEC); 449 hr_clock_unlock(s); 450 } 451 452 /* 453 * End of precision-kernel code fragment which is processed 454 * every timer interrupt. 455 * 456 * Continue with the interrupt processing as scheduled. 457 */ 458 /* 459 * Count the number of runnable threads and the number waiting 460 * for some form of I/O to complete -- gets added to 461 * sysinfo.waiting. To know the state of the system, must add 462 * wait counts from all CPUs. Also add up the per-partition 463 * statistics. 464 */ 465 w_io = 0; 466 nrunnable = 0; 467 468 /* 469 * keep track of when to update lgrp/part loads 470 */ 471 472 do_lgrp_load = 0; 473 if (lgrp_ticks++ >= hz / 10) { 474 lgrp_ticks = 0; 475 do_lgrp_load = 1; 476 } 477 478 if (one_sec) { 479 loadavg_update(); 480 deadman_counter++; 481 } 482 483 /* 484 * First count the threads waiting on kpreempt queues in each 485 * CPU partition. 486 */ 487 488 cpupart = cp_list_head; 489 do { 490 uint_t cpupart_nrunnable = cpupart->cp_kp_queue.disp_nrunnable; 491 492 cpupart->cp_updates++; 493 nrunnable += cpupart_nrunnable; 494 cpupart->cp_nrunnable_cum += cpupart_nrunnable; 495 if (one_sec) { 496 cpupart->cp_nrunning = 0; 497 cpupart->cp_nrunnable = cpupart_nrunnable; 498 } 499 } while ((cpupart = cpupart->cp_next) != cp_list_head); 500 501 502 /* Now count the per-CPU statistics. */ 503 cp = cpu_list; 504 do { 505 uint_t cpu_nrunnable = cp->cpu_disp->disp_nrunnable; 506 507 nrunnable += cpu_nrunnable; 508 cpupart = cp->cpu_part; 509 cpupart->cp_nrunnable_cum += cpu_nrunnable; 510 if (one_sec) { 511 cpupart->cp_nrunnable += cpu_nrunnable; 512 /* 513 * Update user, system, and idle cpu times. 514 */ 515 cpupart->cp_nrunning++; 516 /* 517 * w_io is used to update sysinfo.waiting during 518 * one_second processing below. Only gather w_io 519 * information when we walk the list of cpus if we're 520 * going to perform one_second processing. 521 */ 522 w_io += CPU_STATS(cp, sys.iowait); 523 } 524 525 if (one_sec && (cp->cpu_flags & CPU_EXISTS)) { 526 int i, load, change; 527 hrtime_t intracct, intrused; 528 const hrtime_t maxnsec = 1000000000; 529 const int precision = 100; 530 531 /* 532 * Estimate interrupt load on this cpu each second. 533 * Computes cpu_intrload as %utilization (0-99). 534 */ 535 536 /* add up interrupt time from all micro states */ 537 for (intracct = 0, i = 0; i < NCMSTATES; i++) 538 intracct += cp->cpu_intracct[i]; 539 scalehrtime(&intracct); 540 541 /* compute nsec used in the past second */ 542 intrused = intracct - cp->cpu_intrlast; 543 cp->cpu_intrlast = intracct; 544 545 /* limit the value for safety (and the first pass) */ 546 if (intrused >= maxnsec) 547 intrused = maxnsec - 1; 548 549 /* calculate %time in interrupt */ 550 load = (precision * intrused) / maxnsec; 551 ASSERT(load >= 0 && load < precision); 552 change = cp->cpu_intrload - load; 553 554 /* jump to new max, or decay the old max */ 555 if (change < 0) 556 cp->cpu_intrload = load; 557 else if (change > 0) 558 cp->cpu_intrload -= (change + 3) / 4; 559 560 DTRACE_PROBE3(cpu_intrload, 561 cpu_t *, cp, 562 hrtime_t, intracct, 563 hrtime_t, intrused); 564 } 565 566 if (do_lgrp_load && 567 (cp->cpu_flags & CPU_EXISTS)) { 568 /* 569 * When updating the lgroup's load average, 570 * account for the thread running on the CPU. 571 * If the CPU is the current one, then we need 572 * to account for the underlying thread which 573 * got the clock interrupt not the thread that is 574 * handling the interrupt and caculating the load 575 * average 576 */ 577 t = cp->cpu_thread; 578 if (CPU == cp) 579 t = t->t_intr; 580 581 /* 582 * Account for the load average for this thread if 583 * it isn't the idle thread or it is on the interrupt 584 * stack and not the current CPU handling the clock 585 * interrupt 586 */ 587 if ((t && t != cp->cpu_idle_thread) || (CPU != cp && 588 CPU_ON_INTR(cp))) { 589 if (t->t_lpl == cp->cpu_lpl) { 590 /* local thread */ 591 cpu_nrunnable++; 592 } else { 593 /* 594 * This is a remote thread, charge it 595 * against its home lgroup. Note that 596 * we notice that a thread is remote 597 * only if it's currently executing. 598 * This is a reasonable approximation, 599 * since queued remote threads are rare. 600 * Note also that if we didn't charge 601 * it to its home lgroup, remote 602 * execution would often make a system 603 * appear balanced even though it was 604 * not, and thread placement/migration 605 * would often not be done correctly. 606 */ 607 lgrp_loadavg(t->t_lpl, 608 LGRP_LOADAVG_IN_THREAD_MAX, 0); 609 } 610 } 611 lgrp_loadavg(cp->cpu_lpl, 612 cpu_nrunnable * LGRP_LOADAVG_IN_THREAD_MAX, 1); 613 } 614 } while ((cp = cp->cpu_next) != cpu_list); 615 616 clock_tick_schedule(one_sec); 617 618 /* 619 * Check for a callout that needs be called from the clock 620 * thread to support the membership protocol in a clustered 621 * system. Copy the function pointer so that we can reset 622 * this to NULL if needed. 623 */ 624 if ((funcp = cmm_clock_callout) != NULL) 625 (*funcp)(); 626 627 if ((funcp = cpucaps_clock_callout) != NULL) 628 (*funcp)(); 629 630 /* 631 * Wakeup the cageout thread waiters once per second. 632 */ 633 if (one_sec) 634 kcage_tick(); 635 636 if (one_sec) { 637 638 int drift, absdrift; 639 timestruc_t tod; 640 int s; 641 642 /* 643 * Beginning of precision-kernel code fragment executed 644 * every second. 645 * 646 * On rollover of the second the phase adjustment to be 647 * used for the next second is calculated. Also, the 648 * maximum error is increased by the tolerance. If the 649 * PPS frequency discipline code is present, the phase is 650 * increased to compensate for the CPU clock oscillator 651 * frequency error. 652 * 653 * On a 32-bit machine and given parameters in the timex.h 654 * header file, the maximum phase adjustment is +-512 ms 655 * and maximum frequency offset is (a tad less than) 656 * +-512 ppm. On a 64-bit machine, you shouldn't need to ask. 657 */ 658 time_maxerror += time_tolerance / SCALE_USEC; 659 660 /* 661 * Leap second processing. If in leap-insert state at 662 * the end of the day, the system clock is set back one 663 * second; if in leap-delete state, the system clock is 664 * set ahead one second. The microtime() routine or 665 * external clock driver will insure that reported time 666 * is always monotonic. The ugly divides should be 667 * replaced. 668 */ 669 switch (time_state) { 670 671 case TIME_OK: 672 if (time_status & STA_INS) 673 time_state = TIME_INS; 674 else if (time_status & STA_DEL) 675 time_state = TIME_DEL; 676 break; 677 678 case TIME_INS: 679 if (hrestime.tv_sec % 86400 == 0) { 680 s = hr_clock_lock(); 681 hrestime.tv_sec--; 682 hr_clock_unlock(s); 683 time_state = TIME_OOP; 684 } 685 break; 686 687 case TIME_DEL: 688 if ((hrestime.tv_sec + 1) % 86400 == 0) { 689 s = hr_clock_lock(); 690 hrestime.tv_sec++; 691 hr_clock_unlock(s); 692 time_state = TIME_WAIT; 693 } 694 break; 695 696 case TIME_OOP: 697 time_state = TIME_WAIT; 698 break; 699 700 case TIME_WAIT: 701 if (!(time_status & (STA_INS | STA_DEL))) 702 time_state = TIME_OK; 703 default: 704 break; 705 } 706 707 /* 708 * Compute the phase adjustment for the next second. In 709 * PLL mode, the offset is reduced by a fixed factor 710 * times the time constant. In FLL mode the offset is 711 * used directly. In either mode, the maximum phase 712 * adjustment for each second is clamped so as to spread 713 * the adjustment over not more than the number of 714 * seconds between updates. 715 */ 716 if (time_offset == 0) 717 time_adj = 0; 718 else if (time_offset < 0) { 719 lltemp = -time_offset; 720 if (!(time_status & STA_FLL)) { 721 if ((1 << time_constant) >= SCALE_KG) 722 lltemp *= (1 << time_constant) / 723 SCALE_KG; 724 else 725 lltemp = (lltemp / SCALE_KG) >> 726 time_constant; 727 } 728 if (lltemp > (MAXPHASE / MINSEC) * SCALE_UPDATE) 729 lltemp = (MAXPHASE / MINSEC) * SCALE_UPDATE; 730 time_offset += lltemp; 731 time_adj = -(lltemp * SCALE_PHASE) / hz / SCALE_UPDATE; 732 } else { 733 lltemp = time_offset; 734 if (!(time_status & STA_FLL)) { 735 if ((1 << time_constant) >= SCALE_KG) 736 lltemp *= (1 << time_constant) / 737 SCALE_KG; 738 else 739 lltemp = (lltemp / SCALE_KG) >> 740 time_constant; 741 } 742 if (lltemp > (MAXPHASE / MINSEC) * SCALE_UPDATE) 743 lltemp = (MAXPHASE / MINSEC) * SCALE_UPDATE; 744 time_offset -= lltemp; 745 time_adj = (lltemp * SCALE_PHASE) / hz / SCALE_UPDATE; 746 } 747 748 /* 749 * Compute the frequency estimate and additional phase 750 * adjustment due to frequency error for the next 751 * second. When the PPS signal is engaged, gnaw on the 752 * watchdog counter and update the frequency computed by 753 * the pll and the PPS signal. 754 */ 755 pps_valid++; 756 if (pps_valid == PPS_VALID) { 757 pps_jitter = MAXTIME; 758 pps_stabil = MAXFREQ; 759 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | 760 STA_PPSWANDER | STA_PPSERROR); 761 } 762 lltemp = time_freq + pps_freq; 763 764 if (lltemp) 765 time_adj += (lltemp * SCALE_PHASE) / (SCALE_USEC * hz); 766 767 /* 768 * End of precision kernel-code fragment 769 * 770 * The section below should be modified if we are planning 771 * to use NTP for synchronization. 772 * 773 * Note: the clock synchronization code now assumes 774 * the following: 775 * - if dosynctodr is 1, then compute the drift between 776 * the tod chip and software time and adjust one or 777 * the other depending on the circumstances 778 * 779 * - if dosynctodr is 0, then the tod chip is independent 780 * of the software clock and should not be adjusted, 781 * but allowed to free run. this allows NTP to sync. 782 * hrestime without any interference from the tod chip. 783 */ 784 785 tod_validate_deferred = B_FALSE; 786 mutex_enter(&tod_lock); 787 tod = tod_get(); 788 drift = tod.tv_sec - hrestime.tv_sec; 789 absdrift = (drift >= 0) ? drift : -drift; 790 if (tod_needsync || absdrift > 1) { 791 int s; 792 if (absdrift > 2) { 793 if (!tod_broken && tod_faulted == TOD_NOFAULT) { 794 s = hr_clock_lock(); 795 hrestime = tod; 796 membar_enter(); /* hrestime visible */ 797 timedelta = 0; 798 timechanged++; 799 tod_needsync = 0; 800 hr_clock_unlock(s); 801 callout_hrestime(); 802 803 } 804 } else { 805 if (tod_needsync || !dosynctodr) { 806 gethrestime(&tod); 807 tod_set(tod); 808 s = hr_clock_lock(); 809 if (timedelta == 0) 810 tod_needsync = 0; 811 hr_clock_unlock(s); 812 } else { 813 /* 814 * If the drift is 2 seconds on the 815 * money, then the TOD is adjusting 816 * the clock; record that. 817 */ 818 clock_adj_hist[adj_hist_entry++ % 819 CLOCK_ADJ_HIST_SIZE] = now; 820 s = hr_clock_lock(); 821 timedelta = (int64_t)drift*NANOSEC; 822 hr_clock_unlock(s); 823 } 824 } 825 } 826 one_sec = 0; 827 time = gethrestime_sec(); /* for crusty old kmem readers */ 828 mutex_exit(&tod_lock); 829 830 /* 831 * Some drivers still depend on this... XXX 832 */ 833 cv_broadcast(&lbolt_cv); 834 835 vminfo.freemem += freemem; 836 { 837 pgcnt_t maxswap, resv, free; 838 pgcnt_t avail = 839 MAX((spgcnt_t)(availrmem - swapfs_minfree), 0); 840 841 maxswap = k_anoninfo.ani_mem_resv + 842 k_anoninfo.ani_max +avail; 843 free = k_anoninfo.ani_free + avail; 844 resv = k_anoninfo.ani_phys_resv + 845 k_anoninfo.ani_mem_resv; 846 847 vminfo.swap_resv += resv; 848 /* number of reserved and allocated pages */ 849 #ifdef DEBUG 850 if (maxswap < free) 851 cmn_err(CE_WARN, "clock: maxswap < free"); 852 if (maxswap < resv) 853 cmn_err(CE_WARN, "clock: maxswap < resv"); 854 #endif 855 vminfo.swap_alloc += maxswap - free; 856 vminfo.swap_avail += maxswap - resv; 857 vminfo.swap_free += free; 858 } 859 vminfo.updates++; 860 if (nrunnable) { 861 sysinfo.runque += nrunnable; 862 sysinfo.runocc++; 863 } 864 if (nswapped) { 865 sysinfo.swpque += nswapped; 866 sysinfo.swpocc++; 867 } 868 sysinfo.waiting += w_io; 869 sysinfo.updates++; 870 871 /* 872 * Wake up fsflush to write out DELWRI 873 * buffers, dirty pages and other cached 874 * administrative data, e.g. inodes. 875 */ 876 if (--fsflushcnt <= 0) { 877 fsflushcnt = tune.t_fsflushr; 878 cv_signal(&fsflush_cv); 879 } 880 881 vmmeter(); 882 calcloadavg(genloadavg(&loadavg), hp_avenrun); 883 for (i = 0; i < 3; i++) 884 /* 885 * At the moment avenrun[] can only hold 31 886 * bits of load average as it is a signed 887 * int in the API. We need to ensure that 888 * hp_avenrun[i] >> (16 - FSHIFT) will not be 889 * too large. If it is, we put the largest value 890 * that we can use into avenrun[i]. This is 891 * kludgey, but about all we can do until we 892 * avenrun[] is declared as an array of uint64[] 893 */ 894 if (hp_avenrun[i] < ((uint64_t)1<<(31+16-FSHIFT))) 895 avenrun[i] = (int32_t)(hp_avenrun[i] >> 896 (16 - FSHIFT)); 897 else 898 avenrun[i] = 0x7fffffff; 899 900 cpupart = cp_list_head; 901 do { 902 calcloadavg(genloadavg(&cpupart->cp_loadavg), 903 cpupart->cp_hp_avenrun); 904 } while ((cpupart = cpupart->cp_next) != cp_list_head); 905 906 /* 907 * Wake up the swapper thread if necessary. 908 */ 909 if (runin || 910 (runout && (avefree < desfree || wake_sched_sec))) { 911 t = &t0; 912 thread_lock(t); 913 if (t->t_state == TS_STOPPED) { 914 runin = runout = 0; 915 wake_sched_sec = 0; 916 t->t_whystop = 0; 917 t->t_whatstop = 0; 918 t->t_schedflag &= ~TS_ALLSTART; 919 THREAD_TRANSITION(t); 920 setfrontdq(t); 921 } 922 thread_unlock(t); 923 } 924 } 925 926 /* 927 * Wake up the swapper if any high priority swapped-out threads 928 * became runable during the last tick. 929 */ 930 if (wake_sched) { 931 t = &t0; 932 thread_lock(t); 933 if (t->t_state == TS_STOPPED) { 934 runin = runout = 0; 935 wake_sched = 0; 936 t->t_whystop = 0; 937 t->t_whatstop = 0; 938 t->t_schedflag &= ~TS_ALLSTART; 939 THREAD_TRANSITION(t); 940 setfrontdq(t); 941 } 942 thread_unlock(t); 943 } 944 } 945 946 void 947 clock_init(void) 948 { 949 cyc_handler_t clk_hdlr, timer_hdlr, lbolt_hdlr; 950 cyc_time_t clk_when, lbolt_when; 951 int i, sz; 952 intptr_t buf; 953 954 /* 955 * Setup handler and timer for the clock cyclic. 956 */ 957 clk_hdlr.cyh_func = (cyc_func_t)clock; 958 clk_hdlr.cyh_level = CY_LOCK_LEVEL; 959 clk_hdlr.cyh_arg = NULL; 960 961 clk_when.cyt_when = 0; 962 clk_when.cyt_interval = nsec_per_tick; 963 964 /* 965 * cyclic_timer is dedicated to the ddi interface, which 966 * uses the same clock resolution as the system one. 967 */ 968 timer_hdlr.cyh_func = (cyc_func_t)cyclic_timer; 969 timer_hdlr.cyh_level = CY_LOCK_LEVEL; 970 timer_hdlr.cyh_arg = NULL; 971 972 /* 973 * The lbolt cyclic will be reprogramed to fire at a nsec_per_tick 974 * interval to satisfy performance needs of the DDI lbolt consumers. 975 * It is off by default. 976 */ 977 lbolt_hdlr.cyh_func = (cyc_func_t)lbolt_cyclic; 978 lbolt_hdlr.cyh_level = CY_LOCK_LEVEL; 979 lbolt_hdlr.cyh_arg = NULL; 980 981 lbolt_when.cyt_interval = nsec_per_tick; 982 983 /* 984 * Allocate cache line aligned space for the per CPU lbolt data and 985 * lbolt info structures, and initialize them with their default 986 * values. Note that these structures are also cache line sized. 987 */ 988 sz = sizeof (lbolt_info_t) + CPU_CACHE_COHERENCE_SIZE; 989 buf = (intptr_t)kmem_zalloc(sz, KM_SLEEP); 990 lb_info = (lbolt_info_t *)P2ROUNDUP(buf, CPU_CACHE_COHERENCE_SIZE); 991 992 if (hz != HZ_DEFAULT) 993 lb_info->lbi_thresh_interval = LBOLT_THRESH_INTERVAL * 994 hz/HZ_DEFAULT; 995 else 996 lb_info->lbi_thresh_interval = LBOLT_THRESH_INTERVAL; 997 998 lb_info->lbi_thresh_calls = LBOLT_THRESH_CALLS; 999 1000 sz = (sizeof (lbolt_cpu_t) * max_ncpus) + CPU_CACHE_COHERENCE_SIZE; 1001 buf = (intptr_t)kmem_zalloc(sz, KM_SLEEP); 1002 lb_cpu = (lbolt_cpu_t *)P2ROUNDUP(buf, CPU_CACHE_COHERENCE_SIZE); 1003 1004 for (i = 0; i < max_ncpus; i++) 1005 lb_cpu[i].lbc_counter = lb_info->lbi_thresh_calls; 1006 1007 /* 1008 * Install the softint used to switch between event and cyclic driven 1009 * lbolt. We use a soft interrupt to make sure the context of the 1010 * cyclic reprogram call is safe. 1011 */ 1012 lbolt_softint_add(); 1013 1014 /* 1015 * Since the hybrid lbolt implementation is based on a hardware counter 1016 * that is reset at every hardware reboot and that we'd like to have 1017 * the lbolt value starting at zero after both a hardware and a fast 1018 * reboot, we calculate the number of clock ticks the system's been up 1019 * and store it in the lbi_debug_time field of the lbolt info structure. 1020 * The value of this field will be subtracted from lbolt before 1021 * returning it. 1022 */ 1023 lb_info->lbi_internal = lb_info->lbi_debug_time = 1024 (gethrtime()/nsec_per_tick); 1025 1026 /* 1027 * lbolt_hybrid points at lbolt_bootstrap until now. The LBOLT_* macros 1028 * and lbolt_debug_{enter,return} use this value as an indication that 1029 * the initializaion above hasn't been completed. Setting lbolt_hybrid 1030 * to either lbolt_{cyclic,event}_driven here signals those code paths 1031 * that the lbolt related structures can be used. 1032 */ 1033 if (lbolt_cyc_only) { 1034 lbolt_when.cyt_when = 0; 1035 lbolt_hybrid = lbolt_cyclic_driven; 1036 } else { 1037 lbolt_when.cyt_when = CY_INFINITY; 1038 lbolt_hybrid = lbolt_event_driven; 1039 } 1040 1041 /* 1042 * Grab cpu_lock and install all three cyclics. 1043 */ 1044 mutex_enter(&cpu_lock); 1045 1046 clock_cyclic = cyclic_add(&clk_hdlr, &clk_when); 1047 ddi_timer_cyclic = cyclic_add(&timer_hdlr, &clk_when); 1048 lb_info->id.lbi_cyclic_id = cyclic_add(&lbolt_hdlr, &lbolt_when); 1049 1050 mutex_exit(&cpu_lock); 1051 } 1052 1053 /* 1054 * Called before calcloadavg to get 10-sec moving loadavg together 1055 */ 1056 1057 static int 1058 genloadavg(struct loadavg_s *avgs) 1059 { 1060 int avg; 1061 int spos; /* starting position */ 1062 int cpos; /* moving current position */ 1063 int i; 1064 int slen; 1065 hrtime_t hr_avg; 1066 1067 /* 10-second snapshot, calculate first positon */ 1068 if (avgs->lg_len == 0) { 1069 return (0); 1070 } 1071 slen = avgs->lg_len < S_MOVAVG_SZ ? avgs->lg_len : S_MOVAVG_SZ; 1072 1073 spos = (avgs->lg_cur - 1) >= 0 ? avgs->lg_cur - 1 : 1074 S_LOADAVG_SZ + (avgs->lg_cur - 1); 1075 for (i = hr_avg = 0; i < slen; i++) { 1076 cpos = (spos - i) >= 0 ? spos - i : S_LOADAVG_SZ + (spos - i); 1077 hr_avg += avgs->lg_loads[cpos]; 1078 } 1079 1080 hr_avg = hr_avg / slen; 1081 avg = hr_avg / (NANOSEC / LGRP_LOADAVG_IN_THREAD_MAX); 1082 1083 return (avg); 1084 } 1085 1086 /* 1087 * Run every second from clock () to update the loadavg count available to the 1088 * system and cpu-partitions. 1089 * 1090 * This works by sampling the previous usr, sys, wait time elapsed, 1091 * computing a delta, and adding that delta to the elapsed usr, sys, 1092 * wait increase. 1093 */ 1094 1095 static void 1096 loadavg_update() 1097 { 1098 cpu_t *cp; 1099 cpupart_t *cpupart; 1100 hrtime_t cpu_total; 1101 int prev; 1102 1103 cp = cpu_list; 1104 loadavg.lg_total = 0; 1105 1106 /* 1107 * first pass totals up per-cpu statistics for system and cpu 1108 * partitions 1109 */ 1110 1111 do { 1112 struct loadavg_s *lavg; 1113 1114 lavg = &cp->cpu_loadavg; 1115 1116 cpu_total = cp->cpu_acct[CMS_USER] + 1117 cp->cpu_acct[CMS_SYSTEM] + cp->cpu_waitrq; 1118 /* compute delta against last total */ 1119 scalehrtime(&cpu_total); 1120 prev = (lavg->lg_cur - 1) >= 0 ? lavg->lg_cur - 1 : 1121 S_LOADAVG_SZ + (lavg->lg_cur - 1); 1122 if (lavg->lg_loads[prev] <= 0) { 1123 lavg->lg_loads[lavg->lg_cur] = cpu_total; 1124 cpu_total = 0; 1125 } else { 1126 lavg->lg_loads[lavg->lg_cur] = cpu_total; 1127 cpu_total = cpu_total - lavg->lg_loads[prev]; 1128 if (cpu_total < 0) 1129 cpu_total = 0; 1130 } 1131 1132 lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ; 1133 lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ? 1134 lavg->lg_len + 1 : S_LOADAVG_SZ; 1135 1136 loadavg.lg_total += cpu_total; 1137 cp->cpu_part->cp_loadavg.lg_total += cpu_total; 1138 1139 } while ((cp = cp->cpu_next) != cpu_list); 1140 1141 loadavg.lg_loads[loadavg.lg_cur] = loadavg.lg_total; 1142 loadavg.lg_cur = (loadavg.lg_cur + 1) % S_LOADAVG_SZ; 1143 loadavg.lg_len = (loadavg.lg_len + 1) < S_LOADAVG_SZ ? 1144 loadavg.lg_len + 1 : S_LOADAVG_SZ; 1145 /* 1146 * Second pass updates counts 1147 */ 1148 cpupart = cp_list_head; 1149 1150 do { 1151 struct loadavg_s *lavg; 1152 1153 lavg = &cpupart->cp_loadavg; 1154 lavg->lg_loads[lavg->lg_cur] = lavg->lg_total; 1155 lavg->lg_total = 0; 1156 lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ; 1157 lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ? 1158 lavg->lg_len + 1 : S_LOADAVG_SZ; 1159 1160 } while ((cpupart = cpupart->cp_next) != cp_list_head); 1161 1162 } 1163 1164 /* 1165 * clock_update() - local clock update 1166 * 1167 * This routine is called by ntp_adjtime() to update the local clock 1168 * phase and frequency. The implementation is of an 1169 * adaptive-parameter, hybrid phase/frequency-lock loop (PLL/FLL). The 1170 * routine computes new time and frequency offset estimates for each 1171 * call. The PPS signal itself determines the new time offset, 1172 * instead of the calling argument. Presumably, calls to 1173 * ntp_adjtime() occur only when the caller believes the local clock 1174 * is valid within some bound (+-128 ms with NTP). If the caller's 1175 * time is far different than the PPS time, an argument will ensue, 1176 * and it's not clear who will lose. 1177 * 1178 * For uncompensated quartz crystal oscillatores and nominal update 1179 * intervals less than 1024 s, operation should be in phase-lock mode 1180 * (STA_FLL = 0), where the loop is disciplined to phase. For update 1181 * intervals greater than this, operation should be in frequency-lock 1182 * mode (STA_FLL = 1), where the loop is disciplined to frequency. 1183 * 1184 * Note: mutex(&tod_lock) is in effect. 1185 */ 1186 void 1187 clock_update(int offset) 1188 { 1189 int ltemp, mtemp, s; 1190 1191 ASSERT(MUTEX_HELD(&tod_lock)); 1192 1193 if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME)) 1194 return; 1195 ltemp = offset; 1196 if ((time_status & STA_PPSTIME) && (time_status & STA_PPSSIGNAL)) 1197 ltemp = pps_offset; 1198 1199 /* 1200 * Scale the phase adjustment and clamp to the operating range. 1201 */ 1202 if (ltemp > MAXPHASE) 1203 time_offset = MAXPHASE * SCALE_UPDATE; 1204 else if (ltemp < -MAXPHASE) 1205 time_offset = -(MAXPHASE * SCALE_UPDATE); 1206 else 1207 time_offset = ltemp * SCALE_UPDATE; 1208 1209 /* 1210 * Select whether the frequency is to be controlled and in which 1211 * mode (PLL or FLL). Clamp to the operating range. Ugly 1212 * multiply/divide should be replaced someday. 1213 */ 1214 if (time_status & STA_FREQHOLD || time_reftime == 0) 1215 time_reftime = hrestime.tv_sec; 1216 1217 mtemp = hrestime.tv_sec - time_reftime; 1218 time_reftime = hrestime.tv_sec; 1219 1220 if (time_status & STA_FLL) { 1221 if (mtemp >= MINSEC) { 1222 ltemp = ((time_offset / mtemp) * (SCALE_USEC / 1223 SCALE_UPDATE)); 1224 if (ltemp) 1225 time_freq += ltemp / SCALE_KH; 1226 } 1227 } else { 1228 if (mtemp < MAXSEC) { 1229 ltemp *= mtemp; 1230 if (ltemp) 1231 time_freq += (int)(((int64_t)ltemp * 1232 SCALE_USEC) / SCALE_KF) 1233 / (1 << (time_constant * 2)); 1234 } 1235 } 1236 if (time_freq > time_tolerance) 1237 time_freq = time_tolerance; 1238 else if (time_freq < -time_tolerance) 1239 time_freq = -time_tolerance; 1240 1241 s = hr_clock_lock(); 1242 tod_needsync = 1; 1243 hr_clock_unlock(s); 1244 } 1245 1246 /* 1247 * ddi_hardpps() - discipline CPU clock oscillator to external PPS signal 1248 * 1249 * This routine is called at each PPS interrupt in order to discipline 1250 * the CPU clock oscillator to the PPS signal. It measures the PPS phase 1251 * and leaves it in a handy spot for the clock() routine. It 1252 * integrates successive PPS phase differences and calculates the 1253 * frequency offset. This is used in clock() to discipline the CPU 1254 * clock oscillator so that intrinsic frequency error is cancelled out. 1255 * The code requires the caller to capture the time and hardware counter 1256 * value at the on-time PPS signal transition. 1257 * 1258 * Note that, on some Unix systems, this routine runs at an interrupt 1259 * priority level higher than the timer interrupt routine clock(). 1260 * Therefore, the variables used are distinct from the clock() 1261 * variables, except for certain exceptions: The PPS frequency pps_freq 1262 * and phase pps_offset variables are determined by this routine and 1263 * updated atomically. The time_tolerance variable can be considered a 1264 * constant, since it is infrequently changed, and then only when the 1265 * PPS signal is disabled. The watchdog counter pps_valid is updated 1266 * once per second by clock() and is atomically cleared in this 1267 * routine. 1268 * 1269 * tvp is the time of the last tick; usec is a microsecond count since the 1270 * last tick. 1271 * 1272 * Note: In Solaris systems, the tick value is actually given by 1273 * usec_per_tick. This is called from the serial driver cdintr(), 1274 * or equivalent, at a high PIL. Because the kernel keeps a 1275 * highresolution time, the following code can accept either 1276 * the traditional argument pair, or the current highres timestamp 1277 * in tvp and zero in usec. 1278 */ 1279 void 1280 ddi_hardpps(struct timeval *tvp, int usec) 1281 { 1282 int u_usec, v_usec, bigtick; 1283 time_t cal_sec; 1284 int cal_usec; 1285 1286 /* 1287 * An occasional glitch can be produced when the PPS interrupt 1288 * occurs in the clock() routine before the time variable is 1289 * updated. Here the offset is discarded when the difference 1290 * between it and the last one is greater than tick/2, but not 1291 * if the interval since the first discard exceeds 30 s. 1292 */ 1293 time_status |= STA_PPSSIGNAL; 1294 time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR); 1295 pps_valid = 0; 1296 u_usec = -tvp->tv_usec; 1297 if (u_usec < -(MICROSEC/2)) 1298 u_usec += MICROSEC; 1299 v_usec = pps_offset - u_usec; 1300 if (v_usec < 0) 1301 v_usec = -v_usec; 1302 if (v_usec > (usec_per_tick >> 1)) { 1303 if (pps_glitch > MAXGLITCH) { 1304 pps_glitch = 0; 1305 pps_tf[2] = u_usec; 1306 pps_tf[1] = u_usec; 1307 } else { 1308 pps_glitch++; 1309 u_usec = pps_offset; 1310 } 1311 } else 1312 pps_glitch = 0; 1313 1314 /* 1315 * A three-stage median filter is used to help deglitch the pps 1316 * time. The median sample becomes the time offset estimate; the 1317 * difference between the other two samples becomes the time 1318 * dispersion (jitter) estimate. 1319 */ 1320 pps_tf[2] = pps_tf[1]; 1321 pps_tf[1] = pps_tf[0]; 1322 pps_tf[0] = u_usec; 1323 if (pps_tf[0] > pps_tf[1]) { 1324 if (pps_tf[1] > pps_tf[2]) { 1325 pps_offset = pps_tf[1]; /* 0 1 2 */ 1326 v_usec = pps_tf[0] - pps_tf[2]; 1327 } else if (pps_tf[2] > pps_tf[0]) { 1328 pps_offset = pps_tf[0]; /* 2 0 1 */ 1329 v_usec = pps_tf[2] - pps_tf[1]; 1330 } else { 1331 pps_offset = pps_tf[2]; /* 0 2 1 */ 1332 v_usec = pps_tf[0] - pps_tf[1]; 1333 } 1334 } else { 1335 if (pps_tf[1] < pps_tf[2]) { 1336 pps_offset = pps_tf[1]; /* 2 1 0 */ 1337 v_usec = pps_tf[2] - pps_tf[0]; 1338 } else if (pps_tf[2] < pps_tf[0]) { 1339 pps_offset = pps_tf[0]; /* 1 0 2 */ 1340 v_usec = pps_tf[1] - pps_tf[2]; 1341 } else { 1342 pps_offset = pps_tf[2]; /* 1 2 0 */ 1343 v_usec = pps_tf[1] - pps_tf[0]; 1344 } 1345 } 1346 if (v_usec > MAXTIME) 1347 pps_jitcnt++; 1348 v_usec = (v_usec << PPS_AVG) - pps_jitter; 1349 pps_jitter += v_usec / (1 << PPS_AVG); 1350 if (pps_jitter > (MAXTIME >> 1)) 1351 time_status |= STA_PPSJITTER; 1352 1353 /* 1354 * During the calibration interval adjust the starting time when 1355 * the tick overflows. At the end of the interval compute the 1356 * duration of the interval and the difference of the hardware 1357 * counters at the beginning and end of the interval. This code 1358 * is deliciously complicated by the fact valid differences may 1359 * exceed the value of tick when using long calibration 1360 * intervals and small ticks. Note that the counter can be 1361 * greater than tick if caught at just the wrong instant, but 1362 * the values returned and used here are correct. 1363 */ 1364 bigtick = (int)usec_per_tick * SCALE_USEC; 1365 pps_usec -= pps_freq; 1366 if (pps_usec >= bigtick) 1367 pps_usec -= bigtick; 1368 if (pps_usec < 0) 1369 pps_usec += bigtick; 1370 pps_time.tv_sec++; 1371 pps_count++; 1372 if (pps_count < (1 << pps_shift)) 1373 return; 1374 pps_count = 0; 1375 pps_calcnt++; 1376 u_usec = usec * SCALE_USEC; 1377 v_usec = pps_usec - u_usec; 1378 if (v_usec >= bigtick >> 1) 1379 v_usec -= bigtick; 1380 if (v_usec < -(bigtick >> 1)) 1381 v_usec += bigtick; 1382 if (v_usec < 0) 1383 v_usec = -(-v_usec >> pps_shift); 1384 else 1385 v_usec = v_usec >> pps_shift; 1386 pps_usec = u_usec; 1387 cal_sec = tvp->tv_sec; 1388 cal_usec = tvp->tv_usec; 1389 cal_sec -= pps_time.tv_sec; 1390 cal_usec -= pps_time.tv_usec; 1391 if (cal_usec < 0) { 1392 cal_usec += MICROSEC; 1393 cal_sec--; 1394 } 1395 pps_time = *tvp; 1396 1397 /* 1398 * Check for lost interrupts, noise, excessive jitter and 1399 * excessive frequency error. The number of timer ticks during 1400 * the interval may vary +-1 tick. Add to this a margin of one 1401 * tick for the PPS signal jitter and maximum frequency 1402 * deviation. If the limits are exceeded, the calibration 1403 * interval is reset to the minimum and we start over. 1404 */ 1405 u_usec = (int)usec_per_tick << 1; 1406 if (!((cal_sec == -1 && cal_usec > (MICROSEC - u_usec)) || 1407 (cal_sec == 0 && cal_usec < u_usec)) || 1408 v_usec > time_tolerance || v_usec < -time_tolerance) { 1409 pps_errcnt++; 1410 pps_shift = PPS_SHIFT; 1411 pps_intcnt = 0; 1412 time_status |= STA_PPSERROR; 1413 return; 1414 } 1415 1416 /* 1417 * A three-stage median filter is used to help deglitch the pps 1418 * frequency. The median sample becomes the frequency offset 1419 * estimate; the difference between the other two samples 1420 * becomes the frequency dispersion (stability) estimate. 1421 */ 1422 pps_ff[2] = pps_ff[1]; 1423 pps_ff[1] = pps_ff[0]; 1424 pps_ff[0] = v_usec; 1425 if (pps_ff[0] > pps_ff[1]) { 1426 if (pps_ff[1] > pps_ff[2]) { 1427 u_usec = pps_ff[1]; /* 0 1 2 */ 1428 v_usec = pps_ff[0] - pps_ff[2]; 1429 } else if (pps_ff[2] > pps_ff[0]) { 1430 u_usec = pps_ff[0]; /* 2 0 1 */ 1431 v_usec = pps_ff[2] - pps_ff[1]; 1432 } else { 1433 u_usec = pps_ff[2]; /* 0 2 1 */ 1434 v_usec = pps_ff[0] - pps_ff[1]; 1435 } 1436 } else { 1437 if (pps_ff[1] < pps_ff[2]) { 1438 u_usec = pps_ff[1]; /* 2 1 0 */ 1439 v_usec = pps_ff[2] - pps_ff[0]; 1440 } else if (pps_ff[2] < pps_ff[0]) { 1441 u_usec = pps_ff[0]; /* 1 0 2 */ 1442 v_usec = pps_ff[1] - pps_ff[2]; 1443 } else { 1444 u_usec = pps_ff[2]; /* 1 2 0 */ 1445 v_usec = pps_ff[1] - pps_ff[0]; 1446 } 1447 } 1448 1449 /* 1450 * Here the frequency dispersion (stability) is updated. If it 1451 * is less than one-fourth the maximum (MAXFREQ), the frequency 1452 * offset is updated as well, but clamped to the tolerance. It 1453 * will be processed later by the clock() routine. 1454 */ 1455 v_usec = (v_usec >> 1) - pps_stabil; 1456 if (v_usec < 0) 1457 pps_stabil -= -v_usec >> PPS_AVG; 1458 else 1459 pps_stabil += v_usec >> PPS_AVG; 1460 if (pps_stabil > MAXFREQ >> 2) { 1461 pps_stbcnt++; 1462 time_status |= STA_PPSWANDER; 1463 return; 1464 } 1465 if (time_status & STA_PPSFREQ) { 1466 if (u_usec < 0) { 1467 pps_freq -= -u_usec >> PPS_AVG; 1468 if (pps_freq < -time_tolerance) 1469 pps_freq = -time_tolerance; 1470 u_usec = -u_usec; 1471 } else { 1472 pps_freq += u_usec >> PPS_AVG; 1473 if (pps_freq > time_tolerance) 1474 pps_freq = time_tolerance; 1475 } 1476 } 1477 1478 /* 1479 * Here the calibration interval is adjusted. If the maximum 1480 * time difference is greater than tick / 4, reduce the interval 1481 * by half. If this is not the case for four consecutive 1482 * intervals, double the interval. 1483 */ 1484 if (u_usec << pps_shift > bigtick >> 2) { 1485 pps_intcnt = 0; 1486 if (pps_shift > PPS_SHIFT) 1487 pps_shift--; 1488 } else if (pps_intcnt >= 4) { 1489 pps_intcnt = 0; 1490 if (pps_shift < PPS_SHIFTMAX) 1491 pps_shift++; 1492 } else 1493 pps_intcnt++; 1494 1495 /* 1496 * If recovering from kmdb, then make sure the tod chip gets resynced. 1497 * If we took an early exit above, then we don't yet have a stable 1498 * calibration signal to lock onto, so don't mark the tod for sync 1499 * until we get all the way here. 1500 */ 1501 { 1502 int s = hr_clock_lock(); 1503 1504 tod_needsync = 1; 1505 hr_clock_unlock(s); 1506 } 1507 } 1508 1509 /* 1510 * Handle clock tick processing for a thread. 1511 * Check for timer action, enforce CPU rlimit, do profiling etc. 1512 */ 1513 void 1514 clock_tick(kthread_t *t, int pending) 1515 { 1516 struct proc *pp; 1517 klwp_id_t lwp; 1518 struct as *as; 1519 clock_t ticks; 1520 int poke = 0; /* notify another CPU */ 1521 int user_mode; 1522 size_t rss; 1523 int i, total_usec, usec; 1524 rctl_qty_t secs; 1525 1526 ASSERT(pending > 0); 1527 1528 /* Must be operating on a lwp/thread */ 1529 if ((lwp = ttolwp(t)) == NULL) { 1530 panic("clock_tick: no lwp"); 1531 /*NOTREACHED*/ 1532 } 1533 1534 for (i = 0; i < pending; i++) { 1535 CL_TICK(t); /* Class specific tick processing */ 1536 DTRACE_SCHED1(tick, kthread_t *, t); 1537 } 1538 1539 pp = ttoproc(t); 1540 1541 /* pp->p_lock makes sure that the thread does not exit */ 1542 ASSERT(MUTEX_HELD(&pp->p_lock)); 1543 1544 user_mode = (lwp->lwp_state == LWP_USER); 1545 1546 ticks = (pp->p_utime + pp->p_stime) % hz; 1547 /* 1548 * Update process times. Should use high res clock and state 1549 * changes instead of statistical sampling method. XXX 1550 */ 1551 if (user_mode) { 1552 pp->p_utime += pending; 1553 } else { 1554 pp->p_stime += pending; 1555 } 1556 1557 pp->p_ttime += pending; 1558 as = pp->p_as; 1559 1560 /* 1561 * Update user profiling statistics. Get the pc from the 1562 * lwp when the AST happens. 1563 */ 1564 if (pp->p_prof.pr_scale) { 1565 atomic_add_32(&lwp->lwp_oweupc, (int32_t)pending); 1566 if (user_mode) { 1567 poke = 1; 1568 aston(t); 1569 } 1570 } 1571 1572 /* 1573 * If CPU was in user state, process lwp-virtual time 1574 * interval timer. The value passed to itimerdecr() has to be 1575 * in microseconds and has to be less than one second. Hence 1576 * this loop. 1577 */ 1578 total_usec = usec_per_tick * pending; 1579 while (total_usec > 0) { 1580 usec = MIN(total_usec, (MICROSEC - 1)); 1581 if (user_mode && 1582 timerisset(&lwp->lwp_timer[ITIMER_VIRTUAL].it_value) && 1583 itimerdecr(&lwp->lwp_timer[ITIMER_VIRTUAL], usec) == 0) { 1584 poke = 1; 1585 sigtoproc(pp, t, SIGVTALRM); 1586 } 1587 total_usec -= usec; 1588 } 1589 1590 /* 1591 * If CPU was in user state, process lwp-profile 1592 * interval timer. 1593 */ 1594 total_usec = usec_per_tick * pending; 1595 while (total_usec > 0) { 1596 usec = MIN(total_usec, (MICROSEC - 1)); 1597 if (timerisset(&lwp->lwp_timer[ITIMER_PROF].it_value) && 1598 itimerdecr(&lwp->lwp_timer[ITIMER_PROF], usec) == 0) { 1599 poke = 1; 1600 sigtoproc(pp, t, SIGPROF); 1601 } 1602 total_usec -= usec; 1603 } 1604 1605 /* 1606 * Enforce CPU resource controls: 1607 * (a) process.max-cpu-time resource control 1608 * 1609 * Perform the check only if we have accumulated more a second. 1610 */ 1611 if ((ticks + pending) >= hz) { 1612 (void) rctl_test(rctlproc_legacy[RLIMIT_CPU], pp->p_rctls, pp, 1613 (pp->p_utime + pp->p_stime)/hz, RCA_UNSAFE_SIGINFO); 1614 } 1615 1616 /* 1617 * (b) task.max-cpu-time resource control 1618 * 1619 * If we have accumulated enough ticks, increment the task CPU 1620 * time usage and test for the resource limit. This minimizes the 1621 * number of calls to the rct_test(). The task CPU time mutex 1622 * is highly contentious as many processes can be sharing a task. 1623 */ 1624 if (pp->p_ttime >= clock_tick_proc_max) { 1625 secs = task_cpu_time_incr(pp->p_task, pp->p_ttime); 1626 pp->p_ttime = 0; 1627 if (secs) { 1628 (void) rctl_test(rc_task_cpu_time, pp->p_task->tk_rctls, 1629 pp, secs, RCA_UNSAFE_SIGINFO); 1630 } 1631 } 1632 1633 /* 1634 * Update memory usage for the currently running process. 1635 */ 1636 rss = rm_asrss(as); 1637 PTOU(pp)->u_mem += rss; 1638 if (rss > PTOU(pp)->u_mem_max) 1639 PTOU(pp)->u_mem_max = rss; 1640 1641 /* 1642 * Notify the CPU the thread is running on. 1643 */ 1644 if (poke && t->t_cpu != CPU) 1645 poke_cpu(t->t_cpu->cpu_id); 1646 } 1647 1648 void 1649 profil_tick(uintptr_t upc) 1650 { 1651 int ticks; 1652 proc_t *p = ttoproc(curthread); 1653 klwp_t *lwp = ttolwp(curthread); 1654 struct prof *pr = &p->p_prof; 1655 1656 do { 1657 ticks = lwp->lwp_oweupc; 1658 } while (cas32(&lwp->lwp_oweupc, ticks, 0) != ticks); 1659 1660 mutex_enter(&p->p_pflock); 1661 if (pr->pr_scale >= 2 && upc >= pr->pr_off) { 1662 /* 1663 * Old-style profiling 1664 */ 1665 uint16_t *slot = pr->pr_base; 1666 uint16_t old, new; 1667 if (pr->pr_scale != 2) { 1668 uintptr_t delta = upc - pr->pr_off; 1669 uintptr_t byteoff = ((delta >> 16) * pr->pr_scale) + 1670 (((delta & 0xffff) * pr->pr_scale) >> 16); 1671 if (byteoff >= (uintptr_t)pr->pr_size) { 1672 mutex_exit(&p->p_pflock); 1673 return; 1674 } 1675 slot += byteoff / sizeof (uint16_t); 1676 } 1677 if (fuword16(slot, &old) < 0 || 1678 (new = old + ticks) > SHRT_MAX || 1679 suword16(slot, new) < 0) { 1680 pr->pr_scale = 0; 1681 } 1682 } else if (pr->pr_scale == 1) { 1683 /* 1684 * PC Sampling 1685 */ 1686 model_t model = lwp_getdatamodel(lwp); 1687 int result; 1688 #ifdef __lint 1689 model = model; 1690 #endif 1691 while (ticks-- > 0) { 1692 if (pr->pr_samples == pr->pr_size) { 1693 /* buffer full, turn off sampling */ 1694 pr->pr_scale = 0; 1695 break; 1696 } 1697 switch (SIZEOF_PTR(model)) { 1698 case sizeof (uint32_t): 1699 result = suword32(pr->pr_base, (uint32_t)upc); 1700 break; 1701 #ifdef _LP64 1702 case sizeof (uint64_t): 1703 result = suword64(pr->pr_base, (uint64_t)upc); 1704 break; 1705 #endif 1706 default: 1707 cmn_err(CE_WARN, "profil_tick: unexpected " 1708 "data model"); 1709 result = -1; 1710 break; 1711 } 1712 if (result != 0) { 1713 pr->pr_scale = 0; 1714 break; 1715 } 1716 pr->pr_base = (caddr_t)pr->pr_base + SIZEOF_PTR(model); 1717 pr->pr_samples++; 1718 } 1719 } 1720 mutex_exit(&p->p_pflock); 1721 } 1722 1723 static void 1724 delay_wakeup(void *arg) 1725 { 1726 kthread_t *t = arg; 1727 1728 mutex_enter(&t->t_delay_lock); 1729 cv_signal(&t->t_delay_cv); 1730 mutex_exit(&t->t_delay_lock); 1731 } 1732 1733 /* 1734 * The delay(9F) man page indicates that it can only be called from user or 1735 * kernel context - detect and diagnose bad calls. The following macro will 1736 * produce a limited number of messages identifying bad callers. This is done 1737 * in a macro so that caller() is meaningful. When a bad caller is identified, 1738 * switching to 'drv_usecwait(TICK_TO_USEC(ticks));' may be appropriate. 1739 */ 1740 #define DELAY_CONTEXT_CHECK() { \ 1741 uint32_t m; \ 1742 char *f; \ 1743 ulong_t off; \ 1744 \ 1745 m = delay_from_interrupt_msg; \ 1746 if (delay_from_interrupt_diagnose && servicing_interrupt() && \ 1747 !panicstr && !devinfo_freeze && \ 1748 atomic_cas_32(&delay_from_interrupt_msg, m ? m : 1, m-1)) { \ 1749 f = modgetsymname((uintptr_t)caller(), &off); \ 1750 cmn_err(CE_WARN, "delay(9F) called from " \ 1751 "interrupt context: %s`%s", \ 1752 mod_containing_pc(caller()), f ? f : "..."); \ 1753 } \ 1754 } 1755 1756 /* 1757 * delay_common: common delay code. 1758 */ 1759 static void 1760 delay_common(clock_t ticks) 1761 { 1762 kthread_t *t = curthread; 1763 clock_t deadline; 1764 clock_t timeleft; 1765 callout_id_t id; 1766 1767 /* If timeouts aren't running all we can do is spin. */ 1768 if (panicstr || devinfo_freeze) { 1769 /* Convert delay(9F) call into drv_usecwait(9F) call. */ 1770 if (ticks > 0) 1771 drv_usecwait(TICK_TO_USEC(ticks)); 1772 return; 1773 } 1774 1775 deadline = ddi_get_lbolt() + ticks; 1776 while ((timeleft = deadline - ddi_get_lbolt()) > 0) { 1777 mutex_enter(&t->t_delay_lock); 1778 id = timeout_default(delay_wakeup, t, timeleft); 1779 cv_wait(&t->t_delay_cv, &t->t_delay_lock); 1780 mutex_exit(&t->t_delay_lock); 1781 (void) untimeout_default(id, 0); 1782 } 1783 } 1784 1785 /* 1786 * Delay specified number of clock ticks. 1787 */ 1788 void 1789 delay(clock_t ticks) 1790 { 1791 DELAY_CONTEXT_CHECK(); 1792 1793 delay_common(ticks); 1794 } 1795 1796 /* 1797 * Delay a random number of clock ticks between 1 and ticks. 1798 */ 1799 void 1800 delay_random(clock_t ticks) 1801 { 1802 int r; 1803 1804 DELAY_CONTEXT_CHECK(); 1805 1806 (void) random_get_pseudo_bytes((void *)&r, sizeof (r)); 1807 if (ticks == 0) 1808 ticks = 1; 1809 ticks = (r % ticks) + 1; 1810 delay_common(ticks); 1811 } 1812 1813 /* 1814 * Like delay, but interruptible by a signal. 1815 */ 1816 int 1817 delay_sig(clock_t ticks) 1818 { 1819 kthread_t *t = curthread; 1820 clock_t deadline; 1821 clock_t rc; 1822 1823 /* If timeouts aren't running all we can do is spin. */ 1824 if (panicstr || devinfo_freeze) { 1825 if (ticks > 0) 1826 drv_usecwait(TICK_TO_USEC(ticks)); 1827 return (0); 1828 } 1829 1830 deadline = ddi_get_lbolt() + ticks; 1831 mutex_enter(&t->t_delay_lock); 1832 do { 1833 rc = cv_timedwait_sig(&t->t_delay_cv, 1834 &t->t_delay_lock, deadline); 1835 /* loop until past deadline or signaled */ 1836 } while (rc > 0); 1837 mutex_exit(&t->t_delay_lock); 1838 if (rc == 0) 1839 return (EINTR); 1840 return (0); 1841 } 1842 1843 1844 #define SECONDS_PER_DAY 86400 1845 1846 /* 1847 * Initialize the system time based on the TOD chip. approx is used as 1848 * an approximation of time (e.g. from the filesystem) in the event that 1849 * the TOD chip has been cleared or is unresponsive. An approx of -1 1850 * means the filesystem doesn't keep time. 1851 */ 1852 void 1853 clkset(time_t approx) 1854 { 1855 timestruc_t ts; 1856 int spl; 1857 int set_clock = 0; 1858 1859 mutex_enter(&tod_lock); 1860 ts = tod_get(); 1861 1862 if (ts.tv_sec > 365 * SECONDS_PER_DAY) { 1863 /* 1864 * If the TOD chip is reporting some time after 1971, 1865 * then it probably didn't lose power or become otherwise 1866 * cleared in the recent past; check to assure that 1867 * the time coming from the filesystem isn't in the future 1868 * according to the TOD chip. 1869 */ 1870 if (approx != -1 && approx > ts.tv_sec) { 1871 cmn_err(CE_WARN, "Last shutdown is later " 1872 "than time on time-of-day chip; check date."); 1873 } 1874 } else { 1875 /* 1876 * If the TOD chip isn't giving correct time, set it to the 1877 * greater of i) approx and ii) 1987. That way if approx 1878 * is negative or is earlier than 1987, we set the clock 1879 * back to a time when Oliver North, ALF and Dire Straits 1880 * were all on the collective brain: 1987. 1881 */ 1882 timestruc_t tmp; 1883 time_t diagnose_date = (1987 - 1970) * 365 * SECONDS_PER_DAY; 1884 ts.tv_sec = (approx > diagnose_date ? approx : diagnose_date); 1885 ts.tv_nsec = 0; 1886 1887 /* 1888 * Attempt to write the new time to the TOD chip. Set spl high 1889 * to avoid getting preempted between the tod_set and tod_get. 1890 */ 1891 spl = splhi(); 1892 tod_set(ts); 1893 tmp = tod_get(); 1894 splx(spl); 1895 1896 if (tmp.tv_sec != ts.tv_sec && tmp.tv_sec != ts.tv_sec + 1) { 1897 tod_broken = 1; 1898 dosynctodr = 0; 1899 cmn_err(CE_WARN, "Time-of-day chip unresponsive."); 1900 } else { 1901 cmn_err(CE_WARN, "Time-of-day chip had " 1902 "incorrect date; check and reset."); 1903 } 1904 set_clock = 1; 1905 } 1906 1907 if (!boot_time) { 1908 boot_time = ts.tv_sec; 1909 set_clock = 1; 1910 } 1911 1912 if (set_clock) 1913 set_hrestime(&ts); 1914 1915 mutex_exit(&tod_lock); 1916 } 1917 1918 int timechanged; /* for testing if the system time has been reset */ 1919 1920 void 1921 set_hrestime(timestruc_t *ts) 1922 { 1923 int spl = hr_clock_lock(); 1924 hrestime = *ts; 1925 membar_enter(); /* hrestime must be visible before timechanged++ */ 1926 timedelta = 0; 1927 timechanged++; 1928 hr_clock_unlock(spl); 1929 callout_hrestime(); 1930 } 1931 1932 static uint_t deadman_seconds; 1933 static uint32_t deadman_panics; 1934 static int deadman_enabled = 0; 1935 static int deadman_panic_timers = 1; 1936 1937 static void 1938 deadman(void) 1939 { 1940 if (panicstr) { 1941 /* 1942 * During panic, other CPUs besides the panic 1943 * master continue to handle cyclics and some other 1944 * interrupts. The code below is intended to be 1945 * single threaded, so any CPU other than the master 1946 * must keep out. 1947 */ 1948 if (CPU->cpu_id != panic_cpu.cpu_id) 1949 return; 1950 1951 if (!deadman_panic_timers) 1952 return; /* allow all timers to be manually disabled */ 1953 1954 /* 1955 * If we are generating a crash dump or syncing filesystems and 1956 * the corresponding timer is set, decrement it and re-enter 1957 * the panic code to abort it and advance to the next state. 1958 * The panic states and triggers are explained in panic.c. 1959 */ 1960 if (panic_dump) { 1961 if (dump_timeleft && (--dump_timeleft == 0)) { 1962 panic("panic dump timeout"); 1963 /*NOTREACHED*/ 1964 } 1965 } else if (panic_sync) { 1966 if (sync_timeleft && (--sync_timeleft == 0)) { 1967 panic("panic sync timeout"); 1968 /*NOTREACHED*/ 1969 } 1970 } 1971 1972 return; 1973 } 1974 1975 if (deadman_counter != CPU->cpu_deadman_counter) { 1976 CPU->cpu_deadman_counter = deadman_counter; 1977 CPU->cpu_deadman_countdown = deadman_seconds; 1978 return; 1979 } 1980 1981 if (--CPU->cpu_deadman_countdown > 0) 1982 return; 1983 1984 /* 1985 * Regardless of whether or not we actually bring the system down, 1986 * bump the deadman_panics variable. 1987 * 1988 * N.B. deadman_panics is incremented once for each CPU that 1989 * passes through here. It's expected that all the CPUs will 1990 * detect this condition within one second of each other, so 1991 * when deadman_enabled is off, deadman_panics will 1992 * typically be a multiple of the total number of CPUs in 1993 * the system. 1994 */ 1995 atomic_add_32(&deadman_panics, 1); 1996 1997 if (!deadman_enabled) { 1998 CPU->cpu_deadman_countdown = deadman_seconds; 1999 return; 2000 } 2001 2002 /* 2003 * If we're here, we want to bring the system down. 2004 */ 2005 panic("deadman: timed out after %d seconds of clock " 2006 "inactivity", deadman_seconds); 2007 /*NOTREACHED*/ 2008 } 2009 2010 /*ARGSUSED*/ 2011 static void 2012 deadman_online(void *arg, cpu_t *cpu, cyc_handler_t *hdlr, cyc_time_t *when) 2013 { 2014 cpu->cpu_deadman_counter = 0; 2015 cpu->cpu_deadman_countdown = deadman_seconds; 2016 2017 hdlr->cyh_func = (cyc_func_t)deadman; 2018 hdlr->cyh_level = CY_HIGH_LEVEL; 2019 hdlr->cyh_arg = NULL; 2020 2021 /* 2022 * Stagger the CPUs so that they don't all run deadman() at 2023 * the same time. Simplest reason to do this is to make it 2024 * more likely that only one CPU will panic in case of a 2025 * timeout. This is (strictly speaking) an aesthetic, not a 2026 * technical consideration. 2027 */ 2028 when->cyt_when = cpu->cpu_id * (NANOSEC / NCPU); 2029 when->cyt_interval = NANOSEC; 2030 } 2031 2032 2033 void 2034 deadman_init(void) 2035 { 2036 cyc_omni_handler_t hdlr; 2037 2038 if (deadman_seconds == 0) 2039 deadman_seconds = snoop_interval / MICROSEC; 2040 2041 if (snooping) 2042 deadman_enabled = 1; 2043 2044 hdlr.cyo_online = deadman_online; 2045 hdlr.cyo_offline = NULL; 2046 hdlr.cyo_arg = NULL; 2047 2048 mutex_enter(&cpu_lock); 2049 deadman_cyclic = cyclic_add_omni(&hdlr); 2050 mutex_exit(&cpu_lock); 2051 } 2052 2053 /* 2054 * tod_fault() is for updating tod validate mechanism state: 2055 * (1) TOD_NOFAULT: for resetting the state to 'normal'. 2056 * currently used for debugging only 2057 * (2) The following four cases detected by tod validate mechanism: 2058 * TOD_REVERSED: current tod value is less than previous value. 2059 * TOD_STALLED: current tod value hasn't advanced. 2060 * TOD_JUMPED: current tod value advanced too far from previous value. 2061 * TOD_RATECHANGED: the ratio between average tod delta and 2062 * average tick delta has changed. 2063 * (3) TOD_RDONLY: when the TOD clock is not writeable e.g. because it is 2064 * a virtual TOD provided by a hypervisor. 2065 */ 2066 enum tod_fault_type 2067 tod_fault(enum tod_fault_type ftype, int off) 2068 { 2069 ASSERT(MUTEX_HELD(&tod_lock)); 2070 2071 if (tod_faulted != ftype) { 2072 switch (ftype) { 2073 case TOD_NOFAULT: 2074 plat_tod_fault(TOD_NOFAULT); 2075 cmn_err(CE_NOTE, "Restarted tracking " 2076 "Time of Day clock."); 2077 tod_faulted = ftype; 2078 break; 2079 case TOD_REVERSED: 2080 case TOD_JUMPED: 2081 if (tod_faulted == TOD_NOFAULT) { 2082 plat_tod_fault(ftype); 2083 cmn_err(CE_WARN, "Time of Day clock error: " 2084 "reason [%s by 0x%x]. -- " 2085 " Stopped tracking Time Of Day clock.", 2086 tod_fault_table[ftype], off); 2087 tod_faulted = ftype; 2088 } 2089 break; 2090 case TOD_STALLED: 2091 case TOD_RATECHANGED: 2092 if (tod_faulted == TOD_NOFAULT) { 2093 plat_tod_fault(ftype); 2094 cmn_err(CE_WARN, "Time of Day clock error: " 2095 "reason [%s]. -- " 2096 " Stopped tracking Time Of Day clock.", 2097 tod_fault_table[ftype]); 2098 tod_faulted = ftype; 2099 } 2100 break; 2101 case TOD_RDONLY: 2102 if (tod_faulted == TOD_NOFAULT) { 2103 plat_tod_fault(ftype); 2104 cmn_err(CE_NOTE, "!Time of Day clock is " 2105 "Read-Only; set of Date/Time will not " 2106 "persist across reboot."); 2107 tod_faulted = ftype; 2108 } 2109 break; 2110 default: 2111 break; 2112 } 2113 } 2114 return (tod_faulted); 2115 } 2116 2117 /* 2118 * Two functions that allow tod_status_flag to be manipulated by functions 2119 * external to this file. 2120 */ 2121 2122 void 2123 tod_status_set(int tod_flag) 2124 { 2125 tod_status_flag |= tod_flag; 2126 } 2127 2128 void 2129 tod_status_clear(int tod_flag) 2130 { 2131 tod_status_flag &= ~tod_flag; 2132 } 2133 2134 /* 2135 * Record a timestamp and the value passed to tod_set(). The next call to 2136 * tod_validate() can use these values, prev_set_tick and prev_set_tod, 2137 * when checking the timestruc_t returned by tod_get(). Ordinarily, 2138 * tod_validate() will use prev_tick and prev_tod for this task but these 2139 * become obsolete, and will be re-assigned with the prev_set_* values, 2140 * in the case when the TOD is re-written. 2141 */ 2142 void 2143 tod_set_prev(timestruc_t ts) 2144 { 2145 if ((tod_validate_enable == 0) || (tod_faulted != TOD_NOFAULT) || 2146 tod_validate_deferred) { 2147 return; 2148 } 2149 prev_set_tick = gethrtime(); 2150 /* 2151 * A negative value will be set to zero in utc_to_tod() so we fake 2152 * a zero here in such a case. This would need to change if the 2153 * behavior of utc_to_tod() changes. 2154 */ 2155 prev_set_tod = ts.tv_sec < 0 ? 0 : ts.tv_sec; 2156 } 2157 2158 /* 2159 * tod_validate() is used for checking values returned by tod_get(). 2160 * Four error cases can be detected by this routine: 2161 * TOD_REVERSED: current tod value is less than previous. 2162 * TOD_STALLED: current tod value hasn't advanced. 2163 * TOD_JUMPED: current tod value advanced too far from previous value. 2164 * TOD_RATECHANGED: the ratio between average tod delta and 2165 * average tick delta has changed. 2166 */ 2167 time_t 2168 tod_validate(time_t tod) 2169 { 2170 time_t diff_tod; 2171 hrtime_t diff_tick; 2172 2173 long dtick; 2174 int dtick_delta; 2175 2176 int off = 0; 2177 enum tod_fault_type tod_bad = TOD_NOFAULT; 2178 2179 static int firsttime = 1; 2180 2181 static time_t prev_tod = 0; 2182 static hrtime_t prev_tick = 0; 2183 static long dtick_avg = TOD_REF_FREQ; 2184 2185 int cpr_resume_done = 0; 2186 int dr_resume_done = 0; 2187 2188 hrtime_t tick = gethrtime(); 2189 2190 ASSERT(MUTEX_HELD(&tod_lock)); 2191 2192 /* 2193 * tod_validate_enable is patchable via /etc/system. 2194 * If TOD is already faulted, or if TOD validation is deferred, 2195 * there is nothing to do. 2196 */ 2197 if ((tod_validate_enable == 0) || (tod_faulted != TOD_NOFAULT) || 2198 tod_validate_deferred) { 2199 return (tod); 2200 } 2201 2202 /* 2203 * If this is the first time through, we just need to save the tod 2204 * we were called with and hrtime so we can use them next time to 2205 * validate tod_get(). 2206 */ 2207 if (firsttime) { 2208 firsttime = 0; 2209 prev_tod = tod; 2210 prev_tick = tick; 2211 return (tod); 2212 } 2213 2214 /* 2215 * Handle any flags that have been turned on by tod_status_set(). 2216 * In the case where a tod_set() is done and then a subsequent 2217 * tod_get() fails (ie, both TOD_SET_DONE and TOD_GET_FAILED are 2218 * true), we treat the TOD_GET_FAILED with precedence by switching 2219 * off the flag, returning tod and leaving TOD_SET_DONE asserted 2220 * until such time as tod_get() completes successfully. 2221 */ 2222 if (tod_status_flag & TOD_GET_FAILED) { 2223 /* 2224 * tod_get() has encountered an issue, possibly transitory, 2225 * when reading TOD. We'll just return the incoming tod 2226 * value (which is actually hrestime.tv_sec in this case) 2227 * and when we get a genuine tod, following a successful 2228 * tod_get(), we can validate using prev_tod and prev_tick. 2229 */ 2230 tod_status_flag &= ~TOD_GET_FAILED; 2231 return (tod); 2232 } else if (tod_status_flag & TOD_SET_DONE) { 2233 /* 2234 * TOD has been modified. Just before the TOD was written, 2235 * tod_set_prev() saved tod and hrtime; we can now use 2236 * those values, prev_set_tod and prev_set_tick, to validate 2237 * the incoming tod that's just been read. 2238 */ 2239 prev_tod = prev_set_tod; 2240 prev_tick = prev_set_tick; 2241 dtick_avg = TOD_REF_FREQ; 2242 tod_status_flag &= ~TOD_SET_DONE; 2243 /* 2244 * If a tod_set() preceded a cpr_suspend() without an 2245 * intervening tod_validate(), we need to ensure that a 2246 * TOD_JUMPED condition is ignored. 2247 * Note this isn't a concern in the case of DR as we've 2248 * just reassigned dtick_avg, above. 2249 */ 2250 if (tod_status_flag & TOD_CPR_RESUME_DONE) { 2251 cpr_resume_done = 1; 2252 tod_status_flag &= ~TOD_CPR_RESUME_DONE; 2253 } 2254 } else if (tod_status_flag & TOD_CPR_RESUME_DONE) { 2255 /* 2256 * The system's coming back from a checkpoint resume. 2257 */ 2258 cpr_resume_done = 1; 2259 tod_status_flag &= ~TOD_CPR_RESUME_DONE; 2260 /* 2261 * We need to handle the possibility of a CPR suspend 2262 * operation having been initiated whilst a DR event was 2263 * in-flight. 2264 */ 2265 if (tod_status_flag & TOD_DR_RESUME_DONE) { 2266 dr_resume_done = 1; 2267 tod_status_flag &= ~TOD_DR_RESUME_DONE; 2268 } 2269 } else if (tod_status_flag & TOD_DR_RESUME_DONE) { 2270 /* 2271 * A Dynamic Reconfiguration event has taken place. 2272 */ 2273 dr_resume_done = 1; 2274 tod_status_flag &= ~TOD_DR_RESUME_DONE; 2275 } 2276 2277 /* test hook */ 2278 switch (tod_unit_test) { 2279 case 1: /* for testing jumping tod */ 2280 tod += tod_test_injector; 2281 tod_unit_test = 0; 2282 break; 2283 case 2: /* for testing stuck tod bit */ 2284 tod |= 1 << tod_test_injector; 2285 tod_unit_test = 0; 2286 break; 2287 case 3: /* for testing stalled tod */ 2288 tod = prev_tod; 2289 tod_unit_test = 0; 2290 break; 2291 case 4: /* reset tod fault status */ 2292 (void) tod_fault(TOD_NOFAULT, 0); 2293 tod_unit_test = 0; 2294 break; 2295 default: 2296 break; 2297 } 2298 2299 diff_tod = tod - prev_tod; 2300 diff_tick = tick - prev_tick; 2301 2302 ASSERT(diff_tick >= 0); 2303 2304 if (diff_tod < 0) { 2305 /* ERROR - tod reversed */ 2306 tod_bad = TOD_REVERSED; 2307 off = (int)(prev_tod - tod); 2308 } else if (diff_tod == 0) { 2309 /* tod did not advance */ 2310 if (diff_tick > TOD_STALL_THRESHOLD) { 2311 /* ERROR - tod stalled */ 2312 tod_bad = TOD_STALLED; 2313 } else { 2314 /* 2315 * Make sure we don't update prev_tick 2316 * so that diff_tick is calculated since 2317 * the first diff_tod == 0 2318 */ 2319 return (tod); 2320 } 2321 } else { 2322 /* calculate dtick */ 2323 dtick = diff_tick / diff_tod; 2324 2325 /* update dtick averages */ 2326 dtick_avg += ((dtick - dtick_avg) / TOD_FILTER_N); 2327 2328 /* 2329 * Calculate dtick_delta as 2330 * variation from reference freq in quartiles 2331 */ 2332 dtick_delta = (dtick_avg - TOD_REF_FREQ) / 2333 (TOD_REF_FREQ >> 2); 2334 2335 /* 2336 * Even with a perfectly functioning TOD device, 2337 * when the number of elapsed seconds is low the 2338 * algorithm can calculate a rate that is beyond 2339 * tolerance, causing an error. The algorithm is 2340 * inaccurate when elapsed time is low (less than 2341 * 5 seconds). 2342 */ 2343 if (diff_tod > 4) { 2344 if (dtick < TOD_JUMP_THRESHOLD) { 2345 /* 2346 * If we've just done a CPR resume, we detect 2347 * a jump in the TOD but, actually, what's 2348 * happened is that the TOD has been increasing 2349 * whilst the system was suspended and the tick 2350 * count hasn't kept up. We consider the first 2351 * occurrence of this after a resume as normal 2352 * and ignore it; otherwise, in a non-resume 2353 * case, we regard it as a TOD problem. 2354 */ 2355 if (!cpr_resume_done) { 2356 /* ERROR - tod jumped */ 2357 tod_bad = TOD_JUMPED; 2358 off = (int)diff_tod; 2359 } 2360 } 2361 if (dtick_delta) { 2362 /* 2363 * If we've just done a DR resume, dtick_avg 2364 * can go a bit askew so we reset it and carry 2365 * on; otherwise, the TOD is in error. 2366 */ 2367 if (dr_resume_done) { 2368 dtick_avg = TOD_REF_FREQ; 2369 } else { 2370 /* ERROR - change in clock rate */ 2371 tod_bad = TOD_RATECHANGED; 2372 } 2373 } 2374 } 2375 } 2376 2377 if (tod_bad != TOD_NOFAULT) { 2378 (void) tod_fault(tod_bad, off); 2379 2380 /* 2381 * Disable dosynctodr since we are going to fault 2382 * the TOD chip anyway here 2383 */ 2384 dosynctodr = 0; 2385 2386 /* 2387 * Set tod to the correct value from hrestime 2388 */ 2389 tod = hrestime.tv_sec; 2390 } 2391 2392 prev_tod = tod; 2393 prev_tick = tick; 2394 return (tod); 2395 } 2396 2397 static void 2398 calcloadavg(int nrun, uint64_t *hp_ave) 2399 { 2400 static int64_t f[3] = { 135, 27, 9 }; 2401 uint_t i; 2402 int64_t q, r; 2403 2404 /* 2405 * Compute load average over the last 1, 5, and 15 minutes 2406 * (60, 300, and 900 seconds). The constants in f[3] are for 2407 * exponential decay: 2408 * (1 - exp(-1/60)) << 13 = 135, 2409 * (1 - exp(-1/300)) << 13 = 27, 2410 * (1 - exp(-1/900)) << 13 = 9. 2411 */ 2412 2413 /* 2414 * a little hoop-jumping to avoid integer overflow 2415 */ 2416 for (i = 0; i < 3; i++) { 2417 q = (hp_ave[i] >> 16) << 7; 2418 r = (hp_ave[i] & 0xffff) << 7; 2419 hp_ave[i] += ((nrun - q) * f[i] - ((r * f[i]) >> 16)) >> 4; 2420 } 2421 } 2422 2423 /* 2424 * lbolt_hybrid() is used by ddi_get_lbolt() and ddi_get_lbolt64() to 2425 * calculate the value of lbolt according to the current mode. In the event 2426 * driven mode (the default), lbolt is calculated by dividing the current hires 2427 * time by the number of nanoseconds per clock tick. In the cyclic driven mode 2428 * an internal variable is incremented at each firing of the lbolt cyclic 2429 * and returned by lbolt_cyclic_driven(). 2430 * 2431 * The system will transition from event to cyclic driven mode when the number 2432 * of calls to lbolt_event_driven() exceeds the (per CPU) threshold within a 2433 * window of time. It does so by reprograming lbolt_cyclic from CY_INFINITY to 2434 * nsec_per_tick. The lbolt cyclic will remain ON while at least one CPU is 2435 * causing enough activity to cross the thresholds. 2436 */ 2437 int64_t 2438 lbolt_bootstrap(void) 2439 { 2440 return (0); 2441 } 2442 2443 /* ARGSUSED */ 2444 uint_t 2445 lbolt_ev_to_cyclic(caddr_t arg1, caddr_t arg2) 2446 { 2447 hrtime_t ts, exp; 2448 int ret; 2449 2450 ASSERT(lbolt_hybrid != lbolt_cyclic_driven); 2451 2452 kpreempt_disable(); 2453 2454 ts = gethrtime(); 2455 lb_info->lbi_internal = (ts/nsec_per_tick); 2456 2457 /* 2458 * Align the next expiration to a clock tick boundary. 2459 */ 2460 exp = ts + nsec_per_tick - 1; 2461 exp = (exp/nsec_per_tick) * nsec_per_tick; 2462 2463 ret = cyclic_reprogram(lb_info->id.lbi_cyclic_id, exp); 2464 ASSERT(ret); 2465 2466 lbolt_hybrid = lbolt_cyclic_driven; 2467 lb_info->lbi_cyc_deactivate = B_FALSE; 2468 lb_info->lbi_cyc_deac_start = lb_info->lbi_internal; 2469 2470 kpreempt_enable(); 2471 2472 ret = atomic_dec_32_nv(&lb_info->lbi_token); 2473 ASSERT(ret == 0); 2474 2475 return (1); 2476 } 2477 2478 int64_t 2479 lbolt_event_driven(void) 2480 { 2481 hrtime_t ts; 2482 int64_t lb; 2483 int ret, cpu = CPU->cpu_seqid; 2484 2485 ts = gethrtime(); 2486 ASSERT(ts > 0); 2487 2488 ASSERT(nsec_per_tick > 0); 2489 lb = (ts/nsec_per_tick); 2490 2491 /* 2492 * Switch to cyclic mode if the number of calls to this routine 2493 * has reached the threshold within the interval. 2494 */ 2495 if ((lb - lb_cpu[cpu].lbc_cnt_start) < lb_info->lbi_thresh_interval) { 2496 2497 if (--lb_cpu[cpu].lbc_counter == 0) { 2498 /* 2499 * Reached the threshold within the interval, reset 2500 * the usage statistics. 2501 */ 2502 lb_cpu[cpu].lbc_counter = lb_info->lbi_thresh_calls; 2503 lb_cpu[cpu].lbc_cnt_start = lb; 2504 2505 /* 2506 * Make sure only one thread reprograms the 2507 * lbolt cyclic and changes the mode. 2508 */ 2509 if (panicstr == NULL && 2510 atomic_cas_32(&lb_info->lbi_token, 0, 1) == 0) { 2511 2512 if (lbolt_hybrid == lbolt_cyclic_driven) { 2513 ret = atomic_dec_32_nv( 2514 &lb_info->lbi_token); 2515 ASSERT(ret == 0); 2516 } else { 2517 lbolt_softint_post(); 2518 } 2519 } 2520 } 2521 } else { 2522 /* 2523 * Exceeded the interval, reset the usage statistics. 2524 */ 2525 lb_cpu[cpu].lbc_counter = lb_info->lbi_thresh_calls; 2526 lb_cpu[cpu].lbc_cnt_start = lb; 2527 } 2528 2529 ASSERT(lb >= lb_info->lbi_debug_time); 2530 2531 return (lb - lb_info->lbi_debug_time); 2532 } 2533 2534 int64_t 2535 lbolt_cyclic_driven(void) 2536 { 2537 int64_t lb = lb_info->lbi_internal; 2538 int cpu = CPU->cpu_seqid; 2539 2540 if ((lb - lb_cpu[cpu].lbc_cnt_start) < lb_info->lbi_thresh_interval) { 2541 2542 if (lb_cpu[cpu].lbc_counter == 0) 2543 /* 2544 * Reached the threshold within the interval, 2545 * prevent the lbolt cyclic from turning itself 2546 * off. 2547 */ 2548 lb_info->lbi_cyc_deactivate = B_FALSE; 2549 else 2550 lb_cpu[cpu].lbc_counter--; 2551 } else { 2552 /* 2553 * Only reset the usage statistics when the interval has 2554 * exceeded. 2555 */ 2556 lb_cpu[cpu].lbc_counter = lb_info->lbi_thresh_calls; 2557 lb_cpu[cpu].lbc_cnt_start = lb; 2558 } 2559 2560 ASSERT(lb >= lb_info->lbi_debug_time); 2561 2562 return (lb - lb_info->lbi_debug_time); 2563 } 2564 2565 /* 2566 * The lbolt_cyclic() routine will fire at a nsec_per_tick interval to satisfy 2567 * performance needs of ddi_get_lbolt() and ddi_get_lbolt64() consumers. 2568 * It is inactive by default, and will be activated when switching from event 2569 * to cyclic driven lbolt. The cyclic will turn itself off unless signaled 2570 * by lbolt_cyclic_driven(). 2571 */ 2572 static void 2573 lbolt_cyclic(void) 2574 { 2575 int ret; 2576 2577 lb_info->lbi_internal++; 2578 2579 if (!lbolt_cyc_only) { 2580 2581 if (lb_info->lbi_cyc_deactivate) { 2582 /* 2583 * Switching from cyclic to event driven mode. 2584 */ 2585 if (atomic_cas_32(&lb_info->lbi_token, 0, 1) == 0) { 2586 2587 if (lbolt_hybrid == lbolt_event_driven) { 2588 ret = atomic_dec_32_nv( 2589 &lb_info->lbi_token); 2590 ASSERT(ret == 0); 2591 return; 2592 } 2593 2594 kpreempt_disable(); 2595 2596 lbolt_hybrid = lbolt_event_driven; 2597 ret = cyclic_reprogram( 2598 lb_info->id.lbi_cyclic_id, 2599 CY_INFINITY); 2600 ASSERT(ret); 2601 2602 kpreempt_enable(); 2603 2604 ret = atomic_dec_32_nv(&lb_info->lbi_token); 2605 ASSERT(ret == 0); 2606 } 2607 } 2608 2609 /* 2610 * The lbolt cyclic should not try to deactivate itself before 2611 * the sampling period has elapsed. 2612 */ 2613 if (lb_info->lbi_internal - lb_info->lbi_cyc_deac_start >= 2614 lb_info->lbi_thresh_interval) { 2615 lb_info->lbi_cyc_deactivate = B_TRUE; 2616 lb_info->lbi_cyc_deac_start = lb_info->lbi_internal; 2617 } 2618 } 2619 } 2620 2621 /* 2622 * Since the lbolt service was historically cyclic driven, it must be 'stopped' 2623 * when the system drops into the kernel debugger. lbolt_debug_entry() is 2624 * called by the KDI system claim callbacks to record a hires timestamp at 2625 * debug enter time. lbolt_debug_return() is called by the sistem release 2626 * callbacks to account for the time spent in the debugger. The value is then 2627 * accumulated in the lb_info structure and used by lbolt_event_driven() and 2628 * lbolt_cyclic_driven(), as well as the mdb_get_lbolt() routine. 2629 */ 2630 void 2631 lbolt_debug_entry(void) 2632 { 2633 if (lbolt_hybrid != lbolt_bootstrap) { 2634 ASSERT(lb_info != NULL); 2635 lb_info->lbi_debug_ts = gethrtime(); 2636 } 2637 } 2638 2639 /* 2640 * Calculate the time spent in the debugger and add it to the lbolt info 2641 * structure. We also update the internal lbolt value in case we were in 2642 * cyclic driven mode going in. 2643 */ 2644 void 2645 lbolt_debug_return(void) 2646 { 2647 hrtime_t ts; 2648 2649 if (lbolt_hybrid != lbolt_bootstrap) { 2650 ASSERT(lb_info != NULL); 2651 ASSERT(nsec_per_tick > 0); 2652 2653 ts = gethrtime(); 2654 lb_info->lbi_internal = (ts/nsec_per_tick); 2655 lb_info->lbi_debug_time += 2656 ((ts - lb_info->lbi_debug_ts)/nsec_per_tick); 2657 2658 lb_info->lbi_debug_ts = 0; 2659 } 2660 } 2661