Lines Matching +full:clock +full:- +full:frequency

84  * clock() is called straight from the clock cyclic; see clock_init().
87 * reprime clock
99 * high-precision avenrun values. These are needed to make the
104 time_t time; /* time in seconds since 1970 - for compatibility only */
108 * Phase/frequency-lock loop (PLL/FLL) definitions
113 * time_state shows the state of the system clock, with values defined
116 * time_status shows the status of the system clock, with bits defined
124 * time_tolerance determines maximum frequency error or tolerance of the
125 * CPU clock oscillator and is a property of the architecture; however,
130 * in cases where a precision clock counter or external clock is
132 * whether the external clock is working or not.
141 int32_t time_state = TIME_OK; /* clock state */
142 int32_t time_status = STA_UNSYNC; /* clock status bits */
145 int32_t time_tolerance = MAXFREQ; /* frequency tolerance (scaled ppm) */
146 int32_t time_precision = 1; /* clock precision (us) */
152 * residual time and frequency offset of the local clock. The scale
155 * time_phase and time_freq are the phase increment and the frequency
173 int32_t time_freq = 0; /* frequency offset (scaled ppm) */
190 * pps_freq is the frequency offset produced by the frequency median
194 * pps_usec is latched from a high resolution counter or external clock
207 * pps_intcnt counts the calibration intervals for use in the interval-
214 int32_t pps_ff[] = {0, 0, 0}; /* pps frequency offset median filter */
215 int32_t pps_freq = 0; /* frequency offset (scaled ppm) */
216 int32_t pps_stabil = MAXFREQ; /* frequency dispersion (scaled ppm) */
231 * pps_calcnt counts the frequency calibration intervals, which are
239 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
253 * original symbols removed from the system. The once clock driven variables are
255 * the appropriate clock resolution. The default event driven implementation is
258 * reprogramed to fire at a clock tick interval to serve consumers of lbolt who
262 * frequency of these to determine when to transition from event to cyclic
263 * driven and vice-versa. These values are kept on a per CPU basis for
317 static int tod_broken = 0; /* clock chip doesn't work */
319 cyclic_id_t clock_cyclic; /* clock()'s cyclic_id */
351 * On non-SPARC systems, TOD validation must be deferred until gethrtime
352 * returns non-zero values (after mach_clkinit's execution).
356 * tod_get() in clock(), the deferment is lifted there.
368 "Changed in Clock Rate", /* TOD_RATECHANGED */
369 "Is Read-Only" /* TOD_RDONLY */
399 clock(void) in clock() function
425 * the time delta processing which occurs every clock tick in clock()
429 * below - see the section which begins with : if (one_sec) in clock()
431 * This section marks the beginning of the precision-kernel in clock()
434 * First, compute the phase adjustment. If the low-order bits in clock()
439 if (time_phase <= -FINEUSEC) { in clock()
440 ltemp = -time_phase / SCALE_PHASE; in clock()
443 timedelta -= ltemp * (NANOSEC/MICROSEC); in clock()
447 time_phase -= ltemp * SCALE_PHASE; in clock()
454 * End of precision-kernel code fragment which is processed in clock()
461 * for some form of I/O to complete -- gets added to in clock()
463 * wait counts from all CPUs. Also add up the per-partition in clock()
492 uint_t cpupart_nrunnable = cpupart->cp_kp_queue.disp_nrunnable; in clock()
494 cpupart->cp_updates++; in clock()
496 cpupart->cp_nrunnable_cum += cpupart_nrunnable; in clock()
498 cpupart->cp_nrunning = 0; in clock()
499 cpupart->cp_nrunnable = cpupart_nrunnable; in clock()
501 } while ((cpupart = cpupart->cp_next) != cp_list_head); in clock()
504 /* Now count the per-CPU statistics. */ in clock()
507 uint_t cpu_nrunnable = cp->cpu_disp->disp_nrunnable; in clock()
510 cpupart = cp->cpu_part; in clock()
511 cpupart->cp_nrunnable_cum += cpu_nrunnable; in clock()
513 cpupart->cp_nrunnable += cpu_nrunnable; in clock()
517 cpupart->cp_nrunning++; in clock()
527 if (one_sec && (cp->cpu_flags & CPU_EXISTS)) { in clock()
535 * Computes cpu_intrload as %utilization (0-99). in clock()
540 intracct += cp->cpu_intracct[i]; in clock()
544 intrused = intracct - cp->cpu_intrlast; in clock()
545 cp->cpu_intrlast = intracct; in clock()
549 intrused = maxnsec - 1; in clock()
554 change = cp->cpu_intrload - load; in clock()
558 cp->cpu_intrload = load; in clock()
560 cp->cpu_intrload -= (change + 3) / 4; in clock()
569 (cp->cpu_flags & CPU_EXISTS)) { in clock()
575 * got the clock interrupt not the thread that is in clock()
579 t = cp->cpu_thread; in clock()
581 t = t->t_intr; in clock()
586 * stack and not the current CPU handling the clock in clock()
589 if ((t && t != cp->cpu_idle_thread) || (CPU != cp && in clock()
591 if (t->t_lpl == cp->cpu_lpl) { in clock()
609 lgrp_loadavg(t->t_lpl, in clock()
613 lgrp_loadavg(cp->cpu_lpl, in clock()
616 } while ((cp = cp->cpu_next) != cpu_list); in clock()
621 * Check for a callout that needs be called from the clock in clock()
645 * Beginning of precision-kernel code fragment executed in clock()
651 * PPS frequency discipline code is present, the phase is in clock()
652 * increased to compensate for the CPU clock oscillator in clock()
653 * frequency error. in clock()
655 * On a 32-bit machine and given parameters in the timex.h in clock()
656 * header file, the maximum phase adjustment is +-512 ms in clock()
657 * and maximum frequency offset is (a tad less than) in clock()
658 * +-512 ppm. On a 64-bit machine, you shouldn't need to ask. in clock()
663 * Leap second processing. If in leap-insert state at in clock()
664 * the end of the day, the system clock is set back one in clock()
665 * second; if in leap-delete state, the system clock is in clock()
667 * external clock driver will insure that reported time in clock()
683 hrestime.tv_sec--; in clock()
721 lltemp = -time_offset; in clock()
733 time_adj = -(lltemp * SCALE_PHASE) / hz / SCALE_UPDATE; in clock()
746 time_offset -= lltemp; in clock()
751 * Compute the frequency estimate and additional phase in clock()
752 * adjustment due to frequency error for the next in clock()
754 * watchdog counter and update the frequency computed by in clock()
770 * End of precision kernel-code fragment in clock()
775 * Note: the clock synchronization code now assumes in clock()
777 * - if dosynctodr is 1, then compute the drift between in clock()
781 * - if dosynctodr is 0, then the tod chip is independent in clock()
782 * of the software clock and should not be adjusted, in clock()
790 drift = tod.tv_sec - hrestime.tv_sec; in clock()
791 absdrift = (drift >= 0) ? drift : -drift; in clock()
818 * the clock; record that. in clock()
841 MAX((spgcnt_t)(availrmem - swapfs_minfree), 0); in clock()
855 cmn_err(CE_WARN, "clock: maxswap < free"); in clock()
857 cmn_err(CE_WARN, "clock: maxswap < resv"); in clock()
859 vminfo.swap_alloc += maxswap - free; in clock()
860 vminfo.swap_avail += maxswap - resv; in clock()
880 if (--fsflushcnt <= 0) { in clock()
892 * hp_avenrun[i] >> (16 - FSHIFT) will not be in clock()
898 if (hp_avenrun[i] < ((uint64_t)1<<(31+16-FSHIFT))) in clock()
900 (16 - FSHIFT)); in clock()
906 calcloadavg(genloadavg(&cpupart->cp_loadavg), in clock()
907 cpupart->cp_hp_avenrun); in clock()
908 } while ((cpupart = cpupart->cp_next) != cp_list_head); in clock()
917 if (t->t_state == TS_STOPPED) { in clock()
920 t->t_whystop = 0; in clock()
921 t->t_whatstop = 0; in clock()
922 t->t_schedflag &= ~TS_ALLSTART; in clock()
931 * Wake up the swapper if any high priority swapped-out threads in clock()
937 if (t->t_state == TS_STOPPED) { in clock()
940 t->t_whystop = 0; in clock()
941 t->t_whatstop = 0; in clock()
942 t->t_schedflag &= ~TS_ALLSTART; in clock()
959 * Setup handler and timer for the clock cyclic. in clock_init()
961 clk_hdlr.cyh_func = (cyc_func_t)clock; in clock_init()
989 lb_info->lbi_thresh_interval = LBOLT_THRESH_INTERVAL * in clock_init()
992 lb_info->lbi_thresh_interval = LBOLT_THRESH_INTERVAL; in clock_init()
994 lb_info->lbi_thresh_calls = LBOLT_THRESH_CALLS; in clock_init()
1001 lb_cpu[i].lbc_counter = lb_info->lbi_thresh_calls; in clock_init()
1014 * reboot, we calculate the number of clock ticks the system's been up in clock_init()
1019 lb_info->lbi_internal = lb_info->lbi_debug_time = in clock_init()
1043 lb_info->id.lbi_cyclic_id = cyclic_add(&lbolt_hdlr, &lbolt_when); in clock_init()
1049 * Called before calcloadavg to get 10-sec moving loadavg together
1062 /* 10-second snapshot, calculate first positon */ in genloadavg()
1063 if (avgs->lg_len == 0) { in genloadavg()
1066 slen = avgs->lg_len < S_MOVAVG_SZ ? avgs->lg_len : S_MOVAVG_SZ; in genloadavg()
1068 spos = (avgs->lg_cur - 1) >= 0 ? avgs->lg_cur - 1 : in genloadavg()
1069 S_LOADAVG_SZ + (avgs->lg_cur - 1); in genloadavg()
1071 cpos = (spos - i) >= 0 ? spos - i : S_LOADAVG_SZ + (spos - i); in genloadavg()
1072 hr_avg += avgs->lg_loads[cpos]; in genloadavg()
1082 * Run every second from clock () to update the loadavg count available to the
1083 * system and cpu-partitions.
1102 * first pass totals up per-cpu statistics for system and cpu in loadavg_update()
1109 lavg = &cp->cpu_loadavg; in loadavg_update()
1111 cpu_total = cp->cpu_acct[CMS_USER] + in loadavg_update()
1112 cp->cpu_acct[CMS_SYSTEM] + cp->cpu_waitrq; in loadavg_update()
1115 prev = (lavg->lg_cur - 1) >= 0 ? lavg->lg_cur - 1 : in loadavg_update()
1116 S_LOADAVG_SZ + (lavg->lg_cur - 1); in loadavg_update()
1117 if (lavg->lg_loads[prev] <= 0) { in loadavg_update()
1118 lavg->lg_loads[lavg->lg_cur] = cpu_total; in loadavg_update()
1121 lavg->lg_loads[lavg->lg_cur] = cpu_total; in loadavg_update()
1122 cpu_total = cpu_total - lavg->lg_loads[prev]; in loadavg_update()
1127 lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ; in loadavg_update()
1128 lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ? in loadavg_update()
1129 lavg->lg_len + 1 : S_LOADAVG_SZ; in loadavg_update()
1132 cp->cpu_part->cp_loadavg.lg_total += cpu_total; in loadavg_update()
1134 } while ((cp = cp->cpu_next) != cpu_list); in loadavg_update()
1148 lavg = &cpupart->cp_loadavg; in loadavg_update()
1149 lavg->lg_loads[lavg->lg_cur] = lavg->lg_total; in loadavg_update()
1150 lavg->lg_total = 0; in loadavg_update()
1151 lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ; in loadavg_update()
1152 lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ? in loadavg_update()
1153 lavg->lg_len + 1 : S_LOADAVG_SZ; in loadavg_update()
1155 } while ((cpupart = cpupart->cp_next) != cp_list_head); in loadavg_update()
1158 * Third pass totals up per-zone statistics. in loadavg_update()
1164 * clock_update() - local clock update
1166 * This routine is called by ntp_adjtime() to update the local clock
1167 * phase and frequency. The implementation is of an
1168 * adaptive-parameter, hybrid phase/frequency-lock loop (PLL/FLL). The
1169 * routine computes new time and frequency offset estimates for each
1172 * ntp_adjtime() occur only when the caller believes the local clock
1173 * is valid within some bound (+-128 ms with NTP). If the caller's
1178 * intervals less than 1024 s, operation should be in phase-lock mode
1180 * intervals greater than this, operation should be in frequency-lock
1181 * mode (STA_FLL = 1), where the loop is disciplined to frequency.
1203 else if (ltemp < -MAXPHASE) in clock_update()
1204 time_offset = -(MAXPHASE * SCALE_UPDATE); in clock_update()
1209 * Select whether the frequency is to be controlled and in which in clock_update()
1216 mtemp = hrestime.tv_sec - time_reftime; in clock_update()
1237 else if (time_freq < -time_tolerance) in clock_update()
1238 time_freq = -time_tolerance; in clock_update()
1246 * ddi_hardpps() - discipline CPU clock oscillator to external PPS signal
1249 * the CPU clock oscillator to the PPS signal. It measures the PPS phase
1250 * and leaves it in a handy spot for the clock() routine. It
1252 * frequency offset. This is used in clock() to discipline the CPU
1253 * clock oscillator so that intrinsic frequency error is cancelled out.
1255 * value at the on-time PPS signal transition.
1258 * priority level higher than the timer interrupt routine clock().
1259 * Therefore, the variables used are distinct from the clock()
1260 * variables, except for certain exceptions: The PPS frequency pps_freq
1265 * once per second by clock() and is atomically cleared in this
1287 * occurs in the clock() routine before the time variable is in ddi_hardpps()
1295 u_usec = -tvp->tv_usec; in ddi_hardpps()
1296 if (u_usec < -(MICROSEC/2)) in ddi_hardpps()
1298 v_usec = pps_offset - u_usec; in ddi_hardpps()
1300 v_usec = -v_usec; in ddi_hardpps()
1314 * A three-stage median filter is used to help deglitch the pps in ddi_hardpps()
1325 v_usec = pps_tf[0] - pps_tf[2]; in ddi_hardpps()
1328 v_usec = pps_tf[2] - pps_tf[1]; in ddi_hardpps()
1331 v_usec = pps_tf[0] - pps_tf[1]; in ddi_hardpps()
1336 v_usec = pps_tf[2] - pps_tf[0]; in ddi_hardpps()
1339 v_usec = pps_tf[1] - pps_tf[2]; in ddi_hardpps()
1342 v_usec = pps_tf[1] - pps_tf[0]; in ddi_hardpps()
1347 v_usec = (v_usec << PPS_AVG) - pps_jitter; in ddi_hardpps()
1364 pps_usec -= pps_freq; in ddi_hardpps()
1366 pps_usec -= bigtick; in ddi_hardpps()
1376 v_usec = pps_usec - u_usec; in ddi_hardpps()
1378 v_usec -= bigtick; in ddi_hardpps()
1379 if (v_usec < -(bigtick >> 1)) in ddi_hardpps()
1382 v_usec = -(-v_usec >> pps_shift); in ddi_hardpps()
1386 cal_sec = tvp->tv_sec; in ddi_hardpps()
1387 cal_usec = tvp->tv_usec; in ddi_hardpps()
1388 cal_sec -= pps_time.tv_sec; in ddi_hardpps()
1389 cal_usec -= pps_time.tv_usec; in ddi_hardpps()
1392 cal_sec--; in ddi_hardpps()
1398 * excessive frequency error. The number of timer ticks during in ddi_hardpps()
1399 * the interval may vary +-1 tick. Add to this a margin of one in ddi_hardpps()
1400 * tick for the PPS signal jitter and maximum frequency in ddi_hardpps()
1405 if (!((cal_sec == -1 && cal_usec > (MICROSEC - u_usec)) || in ddi_hardpps()
1407 v_usec > time_tolerance || v_usec < -time_tolerance) { in ddi_hardpps()
1416 * A three-stage median filter is used to help deglitch the pps in ddi_hardpps()
1417 * frequency. The median sample becomes the frequency offset in ddi_hardpps()
1419 * becomes the frequency dispersion (stability) estimate. in ddi_hardpps()
1427 v_usec = pps_ff[0] - pps_ff[2]; in ddi_hardpps()
1430 v_usec = pps_ff[2] - pps_ff[1]; in ddi_hardpps()
1433 v_usec = pps_ff[0] - pps_ff[1]; in ddi_hardpps()
1438 v_usec = pps_ff[2] - pps_ff[0]; in ddi_hardpps()
1441 v_usec = pps_ff[1] - pps_ff[2]; in ddi_hardpps()
1444 v_usec = pps_ff[1] - pps_ff[0]; in ddi_hardpps()
1449 * Here the frequency dispersion (stability) is updated. If it in ddi_hardpps()
1450 * is less than one-fourth the maximum (MAXFREQ), the frequency in ddi_hardpps()
1452 * will be processed later by the clock() routine. in ddi_hardpps()
1454 v_usec = (v_usec >> 1) - pps_stabil; in ddi_hardpps()
1456 pps_stabil -= -v_usec >> PPS_AVG; in ddi_hardpps()
1466 pps_freq -= -u_usec >> PPS_AVG; in ddi_hardpps()
1467 if (pps_freq < -time_tolerance) in ddi_hardpps()
1468 pps_freq = -time_tolerance; in ddi_hardpps()
1469 u_usec = -u_usec; in ddi_hardpps()
1486 pps_shift--; in ddi_hardpps()
1509 * Handle clock tick processing for a thread.
1540 /* pp->p_lock makes sure that the thread does not exit */ in clock_tick()
1541 ASSERT(MUTEX_HELD(&pp->p_lock)); in clock_tick()
1543 user_mode = (lwp->lwp_state == LWP_USER); in clock_tick()
1545 ticks = (pp->p_utime + pp->p_stime) % hz; in clock_tick()
1547 * Update process times. Should use high res clock and state in clock_tick()
1551 pp->p_utime += pending; in clock_tick()
1553 pp->p_stime += pending; in clock_tick()
1556 pp->p_ttime += pending; in clock_tick()
1557 as = pp->p_as; in clock_tick()
1563 if (pp->p_prof.pr_scale) { in clock_tick()
1564 atomic_add_32(&lwp->lwp_oweupc, (int32_t)pending); in clock_tick()
1572 * If CPU was in user state, process lwp-virtual time in clock_tick()
1579 usec = MIN(total_usec, (MICROSEC - 1)); in clock_tick()
1581 timerisset(&lwp->lwp_timer[ITIMER_VIRTUAL].it_value) && in clock_tick()
1582 itimerdecr(&lwp->lwp_timer[ITIMER_VIRTUAL], usec) == 0) { in clock_tick()
1586 total_usec -= usec; in clock_tick()
1590 * If CPU was in user state, process lwp-profile in clock_tick()
1595 usec = MIN(total_usec, (MICROSEC - 1)); in clock_tick()
1596 if (timerisset(&lwp->lwp_timer[ITIMER_PROF].it_value) && in clock_tick()
1597 itimerdecr(&lwp->lwp_timer[ITIMER_PROF], usec) == 0) { in clock_tick()
1601 total_usec -= usec; in clock_tick()
1606 * (a) process.max-cpu-time resource control in clock_tick()
1611 (void) rctl_test(rctlproc_legacy[RLIMIT_CPU], pp->p_rctls, pp, in clock_tick()
1612 (pp->p_utime + pp->p_stime)/hz, RCA_UNSAFE_SIGINFO); in clock_tick()
1616 * (b) task.max-cpu-time resource control in clock_tick()
1623 if (pp->p_ttime >= clock_tick_proc_max) { in clock_tick()
1624 secs = task_cpu_time_incr(pp->p_task, pp->p_ttime); in clock_tick()
1625 pp->p_ttime = 0; in clock_tick()
1627 (void) rctl_test(rc_task_cpu_time, pp->p_task->tk_rctls, in clock_tick()
1636 PTOU(pp)->u_mem += rss; in clock_tick()
1637 if (rss > PTOU(pp)->u_mem_max) in clock_tick()
1638 PTOU(pp)->u_mem_max = rss; in clock_tick()
1643 if (poke && t->t_cpu != CPU) in clock_tick()
1644 poke_cpu(t->t_cpu->cpu_id); in clock_tick()
1653 struct prof *pr = &p->p_prof; in profil_tick()
1656 ticks = lwp->lwp_oweupc; in profil_tick()
1657 } while (atomic_cas_32(&lwp->lwp_oweupc, ticks, 0) != ticks); in profil_tick()
1659 mutex_enter(&p->p_pflock); in profil_tick()
1660 if (pr->pr_scale >= 2 && upc >= pr->pr_off) { in profil_tick()
1662 * Old-style profiling in profil_tick()
1664 uint16_t *slot = pr->pr_base; in profil_tick()
1666 if (pr->pr_scale != 2) { in profil_tick()
1667 uintptr_t delta = upc - pr->pr_off; in profil_tick()
1668 uintptr_t byteoff = ((delta >> 16) * pr->pr_scale) + in profil_tick()
1669 (((delta & 0xffff) * pr->pr_scale) >> 16); in profil_tick()
1670 if (byteoff >= (uintptr_t)pr->pr_size) { in profil_tick()
1671 mutex_exit(&p->p_pflock); in profil_tick()
1679 pr->pr_scale = 0; in profil_tick()
1681 } else if (pr->pr_scale == 1) { in profil_tick()
1690 while (ticks-- > 0) { in profil_tick()
1691 if (pr->pr_samples == pr->pr_size) { in profil_tick()
1693 pr->pr_scale = 0; in profil_tick()
1698 result = suword32(pr->pr_base, (uint32_t)upc); in profil_tick()
1702 result = suword64(pr->pr_base, (uint64_t)upc); in profil_tick()
1708 result = -1; in profil_tick()
1712 pr->pr_scale = 0; in profil_tick()
1715 pr->pr_base = (caddr_t)pr->pr_base + SIZEOF_PTR(model); in profil_tick()
1716 pr->pr_samples++; in profil_tick()
1719 mutex_exit(&p->p_pflock); in profil_tick()
1727 mutex_enter(&t->t_delay_lock); in delay_wakeup()
1728 cv_signal(&t->t_delay_cv); in delay_wakeup()
1729 mutex_exit(&t->t_delay_lock); in delay_wakeup()
1734 * kernel context - detect and diagnose bad calls. The following macro will
1747 atomic_cas_32(&delay_from_interrupt_msg, m ? m : 1, m-1)) { \
1775 while ((timeleft = deadline - ddi_get_lbolt()) > 0) { in delay_common()
1776 mutex_enter(&t->t_delay_lock); in delay_common()
1778 cv_wait(&t->t_delay_cv, &t->t_delay_lock); in delay_common()
1779 mutex_exit(&t->t_delay_lock); in delay_common()
1785 * Delay specified number of clock ticks.
1796 * Delay a random number of clock ticks between 1 and ticks.
1830 mutex_enter(&t->t_delay_lock); in delay_sig()
1832 rc = cv_timedwait_sig(&t->t_delay_cv, in delay_sig()
1833 &t->t_delay_lock, deadline); in delay_sig()
1836 mutex_exit(&t->t_delay_lock); in delay_sig()
1848 * the TOD chip has been cleared or is unresponsive. An approx of -1
1869 if (approx != -1 && approx > ts.tv_sec) { in clkset()
1871 "than time on time-of-day chip; check date."); in clkset()
1877 * is negative or is earlier than 1987, we set the clock in clkset()
1882 time_t diagnose_date = (1987 - 1970) * 365 * SECONDS_PER_DAY; in clkset()
1898 cmn_err(CE_WARN, "Time-of-day chip unresponsive."); in clkset()
1900 cmn_err(CE_WARN, "Time-of-day chip had " in clkset()
1908 global_zone->zone_boot_time = ts.tv_sec; in clkset()
1948 if (CPU->cpu_id != panic_cpu.cpu_id) in deadman()
1956 * the corresponding timer is set, decrement it and re-enter in deadman()
1961 if (dump_timeleft && (--dump_timeleft == 0)) { in deadman()
1969 if (deadman_counter != CPU->cpu_deadman_counter) { in deadman()
1970 CPU->cpu_deadman_counter = deadman_counter; in deadman()
1971 CPU->cpu_deadman_countdown = deadman_seconds; in deadman()
1975 if (--CPU->cpu_deadman_countdown > 0) in deadman()
1992 CPU->cpu_deadman_countdown = deadman_seconds; in deadman()
1999 panic("deadman: timed out after %d seconds of clock " in deadman()
2008 cpu->cpu_deadman_counter = 0; in deadman_online()
2009 cpu->cpu_deadman_countdown = deadman_seconds; in deadman_online()
2011 hdlr->cyh_func = (cyc_func_t)deadman; in deadman_online()
2012 hdlr->cyh_level = CY_HIGH_LEVEL; in deadman_online()
2013 hdlr->cyh_arg = NULL; in deadman_online()
2022 when->cyt_when = cpu->cpu_id * (NANOSEC / NCPU); in deadman_online()
2023 when->cyt_interval = NANOSEC; in deadman_online()
2057 * (3) TOD_RDONLY: when the TOD clock is not writeable e.g. because it is
2070 "Time of Day clock."); in tod_fault()
2077 cmn_err(CE_WARN, "Time of Day clock error: " in tod_fault()
2078 "reason [%s by 0x%x]. -- " in tod_fault()
2079 " Stopped tracking Time Of Day clock.", in tod_fault()
2088 cmn_err(CE_WARN, "Time of Day clock error: " in tod_fault()
2089 "reason [%s]. -- " in tod_fault()
2090 " Stopped tracking Time Of Day clock.", in tod_fault()
2098 cmn_err(CE_NOTE, "!Time of Day clock is " in tod_fault()
2099 "Read-Only; set of Date/Time will not " in tod_fault()
2133 * become obsolete, and will be re-assigned with the prev_set_* values,
2134 * in the case when the TOD is re-written.
2257 * in-flight. in tod_validate()
2293 diff_tod = tod - prev_tod; in tod_validate()
2294 diff_tick = tick - prev_tick; in tod_validate()
2299 /* ERROR - tod reversed */ in tod_validate()
2301 off = (int)(prev_tod - tod); in tod_validate()
2305 /* ERROR - tod stalled */ in tod_validate()
2320 dtick_avg += ((dtick - dtick_avg) / TOD_FILTER_N); in tod_validate()
2326 dtick_delta = (dtick_avg - TOD_REF_FREQ) / in tod_validate()
2346 * and ignore it; otherwise, in a non-resume in tod_validate()
2350 /* ERROR - tod jumped */ in tod_validate()
2364 /* ERROR - change in clock rate */ in tod_validate()
2402 * (1 - exp(-1/60)) << 13 = 135, in calcloadavg()
2403 * (1 - exp(-1/300)) << 13 = 27, in calcloadavg()
2404 * (1 - exp(-1/900)) << 13 = 9. in calcloadavg()
2408 * a little hoop-jumping to avoid integer overflow in calcloadavg()
2413 hp_ave[i] += ((nrun - q) * f[i] - ((r * f[i]) >> 16)) >> 4; in calcloadavg()
2421 * time by the number of nanoseconds per clock tick. In the cyclic driven mode
2449 lb_info->lbi_internal = (ts/nsec_per_tick); in lbolt_ev_to_cyclic()
2452 * Align the next expiration to a clock tick boundary. in lbolt_ev_to_cyclic()
2454 exp = ts + nsec_per_tick - 1; in lbolt_ev_to_cyclic()
2457 ret = cyclic_reprogram(lb_info->id.lbi_cyclic_id, exp); in lbolt_ev_to_cyclic()
2461 lb_info->lbi_cyc_deactivate = B_FALSE; in lbolt_ev_to_cyclic()
2462 lb_info->lbi_cyc_deac_start = lb_info->lbi_internal; in lbolt_ev_to_cyclic()
2466 ret = atomic_dec_32_nv(&lb_info->lbi_token); in lbolt_ev_to_cyclic()
2477 int ret, cpu = CPU->cpu_seqid; in lbolt_event_driven()
2489 if ((lb - lb_cpu[cpu].lbc_cnt_start) < lb_info->lbi_thresh_interval) { in lbolt_event_driven()
2491 if (--lb_cpu[cpu].lbc_counter == 0) { in lbolt_event_driven()
2496 lb_cpu[cpu].lbc_counter = lb_info->lbi_thresh_calls; in lbolt_event_driven()
2504 atomic_cas_32(&lb_info->lbi_token, 0, 1) == 0) { in lbolt_event_driven()
2508 &lb_info->lbi_token); in lbolt_event_driven()
2519 lb_cpu[cpu].lbc_counter = lb_info->lbi_thresh_calls; in lbolt_event_driven()
2523 ASSERT(lb >= lb_info->lbi_debug_time); in lbolt_event_driven()
2525 return (lb - lb_info->lbi_debug_time); in lbolt_event_driven()
2531 int64_t lb = lb_info->lbi_internal; in lbolt_cyclic_driven()
2539 if (lb_info->lbi_cyc_deactivate) { in lbolt_cyclic_driven()
2540 cpu = CPU->cpu_seqid; in lbolt_cyclic_driven()
2541 if ((lb - lb_cpu[cpu].lbc_cnt_start) < in lbolt_cyclic_driven()
2542 lb_info->lbi_thresh_interval) { in lbolt_cyclic_driven()
2550 lb_info->lbi_cyc_deactivate = B_FALSE; in lbolt_cyclic_driven()
2552 lb_cpu[cpu].lbc_counter--; in lbolt_cyclic_driven()
2558 lb_cpu[cpu].lbc_counter = lb_info->lbi_thresh_calls; in lbolt_cyclic_driven()
2563 ASSERT(lb >= lb_info->lbi_debug_time); in lbolt_cyclic_driven()
2565 return (lb - lb_info->lbi_debug_time); in lbolt_cyclic_driven()
2580 lb_info->lbi_internal++; in lbolt_cyclic()
2584 if (lb_info->lbi_cyc_deactivate) { in lbolt_cyclic()
2589 atomic_cas_32(&lb_info->lbi_token, 0, 1) == 0) { in lbolt_cyclic()
2593 &lb_info->lbi_token); in lbolt_cyclic()
2602 lb_info->id.lbi_cyclic_id, in lbolt_cyclic()
2608 ret = atomic_dec_32_nv(&lb_info->lbi_token); in lbolt_cyclic()
2617 if (lb_info->lbi_internal - lb_info->lbi_cyc_deac_start >= in lbolt_cyclic()
2618 lb_info->lbi_thresh_interval) { in lbolt_cyclic()
2619 lb_info->lbi_cyc_deactivate = B_TRUE; in lbolt_cyclic()
2620 lb_info->lbi_cyc_deac_start = lb_info->lbi_internal; in lbolt_cyclic()
2639 lb_info->lbi_debug_ts = gethrtime(); in lbolt_debug_entry()
2658 lb_info->lbi_internal = (ts/nsec_per_tick); in lbolt_debug_return()
2659 lb_info->lbi_debug_time += in lbolt_debug_return()
2660 ((ts - lb_info->lbi_debug_ts)/nsec_per_tick); in lbolt_debug_return()
2662 lb_info->lbi_debug_ts = 0; in lbolt_debug_return()