xref: /titanic_52/usr/src/uts/common/os/clock.c (revision ea46d7619be99679c4c99ed47508abe31d5e0979)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
22 /*	  All Rights Reserved	*/
23 
24 /*
25  * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
26  */
27 
28 #include <sys/param.h>
29 #include <sys/t_lock.h>
30 #include <sys/types.h>
31 #include <sys/tuneable.h>
32 #include <sys/sysmacros.h>
33 #include <sys/systm.h>
34 #include <sys/cpuvar.h>
35 #include <sys/lgrp.h>
36 #include <sys/user.h>
37 #include <sys/proc.h>
38 #include <sys/callo.h>
39 #include <sys/kmem.h>
40 #include <sys/var.h>
41 #include <sys/cmn_err.h>
42 #include <sys/swap.h>
43 #include <sys/vmsystm.h>
44 #include <sys/class.h>
45 #include <sys/time.h>
46 #include <sys/debug.h>
47 #include <sys/vtrace.h>
48 #include <sys/spl.h>
49 #include <sys/atomic.h>
50 #include <sys/dumphdr.h>
51 #include <sys/archsystm.h>
52 #include <sys/fs/swapnode.h>
53 #include <sys/panic.h>
54 #include <sys/disp.h>
55 #include <sys/msacct.h>
56 #include <sys/mem_cage.h>
57 #include <sys/kflt_mem.h>
58 
59 #include <vm/page.h>
60 #include <vm/anon.h>
61 #include <vm/rm.h>
62 #include <sys/cyclic.h>
63 #include <sys/cpupart.h>
64 #include <sys/rctl.h>
65 #include <sys/task.h>
66 #include <sys/sdt.h>
67 #include <sys/ddi_timer.h>
68 #include <sys/random.h>
69 #include <sys/modctl.h>
70 
71 /*
72  * for NTP support
73  */
74 #include <sys/timex.h>
75 #include <sys/inttypes.h>
76 
77 #include <sys/sunddi.h>
78 #include <sys/clock_impl.h>
79 
80 /*
81  * clock() is called straight from the clock cyclic; see clock_init().
82  *
83  * Functions:
84  *	reprime clock
85  *	maintain date
86  *	jab the scheduler
87  */
88 
89 extern kcondvar_t	fsflush_cv;
90 extern sysinfo_t	sysinfo;
91 extern vminfo_t	vminfo;
92 extern int	idleswtch;	/* flag set while idle in pswtch() */
93 extern hrtime_t volatile devinfo_freeze;
94 
95 /*
96  * high-precision avenrun values.  These are needed to make the
97  * regular avenrun values accurate.
98  */
99 static uint64_t hp_avenrun[3];
100 int	avenrun[3];		/* FSCALED average run queue lengths */
101 time_t	time;	/* time in seconds since 1970 - for compatibility only */
102 
103 static struct loadavg_s loadavg;
104 /*
105  * Phase/frequency-lock loop (PLL/FLL) definitions
106  *
107  * The following variables are read and set by the ntp_adjtime() system
108  * call.
109  *
110  * time_state shows the state of the system clock, with values defined
111  * in the timex.h header file.
112  *
113  * time_status shows the status of the system clock, with bits defined
114  * in the timex.h header file.
115  *
116  * time_offset is used by the PLL/FLL to adjust the system time in small
117  * increments.
118  *
119  * time_constant determines the bandwidth or "stiffness" of the PLL.
120  *
121  * time_tolerance determines maximum frequency error or tolerance of the
122  * CPU clock oscillator and is a property of the architecture; however,
123  * in principle it could change as result of the presence of external
124  * discipline signals, for instance.
125  *
126  * time_precision is usually equal to the kernel tick variable; however,
127  * in cases where a precision clock counter or external clock is
128  * available, the resolution can be much less than this and depend on
129  * whether the external clock is working or not.
130  *
131  * time_maxerror is initialized by a ntp_adjtime() call and increased by
132  * the kernel once each second to reflect the maximum error bound
133  * growth.
134  *
135  * time_esterror is set and read by the ntp_adjtime() call, but
136  * otherwise not used by the kernel.
137  */
138 int32_t time_state = TIME_OK;	/* clock state */
139 int32_t time_status = STA_UNSYNC;	/* clock status bits */
140 int32_t time_offset = 0;		/* time offset (us) */
141 int32_t time_constant = 0;		/* pll time constant */
142 int32_t time_tolerance = MAXFREQ;	/* frequency tolerance (scaled ppm) */
143 int32_t time_precision = 1;	/* clock precision (us) */
144 int32_t time_maxerror = MAXPHASE;	/* maximum error (us) */
145 int32_t time_esterror = MAXPHASE;	/* estimated error (us) */
146 
147 /*
148  * The following variables establish the state of the PLL/FLL and the
149  * residual time and frequency offset of the local clock. The scale
150  * factors are defined in the timex.h header file.
151  *
152  * time_phase and time_freq are the phase increment and the frequency
153  * increment, respectively, of the kernel time variable.
154  *
155  * time_freq is set via ntp_adjtime() from a value stored in a file when
156  * the synchronization daemon is first started. Its value is retrieved
157  * via ntp_adjtime() and written to the file about once per hour by the
158  * daemon.
159  *
160  * time_adj is the adjustment added to the value of tick at each timer
161  * interrupt and is recomputed from time_phase and time_freq at each
162  * seconds rollover.
163  *
164  * time_reftime is the second's portion of the system time at the last
165  * call to ntp_adjtime(). It is used to adjust the time_freq variable
166  * and to increase the time_maxerror as the time since last update
167  * increases.
168  */
169 int32_t time_phase = 0;		/* phase offset (scaled us) */
170 int32_t time_freq = 0;		/* frequency offset (scaled ppm) */
171 int32_t time_adj = 0;		/* tick adjust (scaled 1 / hz) */
172 int32_t time_reftime = 0;		/* time at last adjustment (s) */
173 
174 /*
175  * The scale factors of the following variables are defined in the
176  * timex.h header file.
177  *
178  * pps_time contains the time at each calibration interval, as read by
179  * microtime(). pps_count counts the seconds of the calibration
180  * interval, the duration of which is nominally pps_shift in powers of
181  * two.
182  *
183  * pps_offset is the time offset produced by the time median filter
184  * pps_tf[], while pps_jitter is the dispersion (jitter) measured by
185  * this filter.
186  *
187  * pps_freq is the frequency offset produced by the frequency median
188  * filter pps_ff[], while pps_stabil is the dispersion (wander) measured
189  * by this filter.
190  *
191  * pps_usec is latched from a high resolution counter or external clock
192  * at pps_time. Here we want the hardware counter contents only, not the
193  * contents plus the time_tv.usec as usual.
194  *
195  * pps_valid counts the number of seconds since the last PPS update. It
196  * is used as a watchdog timer to disable the PPS discipline should the
197  * PPS signal be lost.
198  *
199  * pps_glitch counts the number of seconds since the beginning of an
200  * offset burst more than tick/2 from current nominal offset. It is used
201  * mainly to suppress error bursts due to priority conflicts between the
202  * PPS interrupt and timer interrupt.
203  *
204  * pps_intcnt counts the calibration intervals for use in the interval-
205  * adaptation algorithm. It's just too complicated for words.
206  */
207 struct timeval pps_time;	/* kernel time at last interval */
208 int32_t pps_tf[] = {0, 0, 0};	/* pps time offset median filter (us) */
209 int32_t pps_offset = 0;		/* pps time offset (us) */
210 int32_t pps_jitter = MAXTIME;	/* time dispersion (jitter) (us) */
211 int32_t pps_ff[] = {0, 0, 0};	/* pps frequency offset median filter */
212 int32_t pps_freq = 0;		/* frequency offset (scaled ppm) */
213 int32_t pps_stabil = MAXFREQ;	/* frequency dispersion (scaled ppm) */
214 int32_t pps_usec = 0;		/* microsec counter at last interval */
215 int32_t pps_valid = PPS_VALID;	/* pps signal watchdog counter */
216 int32_t pps_glitch = 0;		/* pps signal glitch counter */
217 int32_t pps_count = 0;		/* calibration interval counter (s) */
218 int32_t pps_shift = PPS_SHIFT;	/* interval duration (s) (shift) */
219 int32_t pps_intcnt = 0;		/* intervals at current duration */
220 
221 /*
222  * PPS signal quality monitors
223  *
224  * pps_jitcnt counts the seconds that have been discarded because the
225  * jitter measured by the time median filter exceeds the limit MAXTIME
226  * (100 us).
227  *
228  * pps_calcnt counts the frequency calibration intervals, which are
229  * variable from 4 s to 256 s.
230  *
231  * pps_errcnt counts the calibration intervals which have been discarded
232  * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
233  * calibration interval jitter exceeds two ticks.
234  *
235  * pps_stbcnt counts the calibration intervals that have been discarded
236  * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
237  */
238 int32_t pps_jitcnt = 0;		/* jitter limit exceeded */
239 int32_t pps_calcnt = 0;		/* calibration intervals */
240 int32_t pps_errcnt = 0;		/* calibration errors */
241 int32_t pps_stbcnt = 0;		/* stability limit exceeded */
242 
243 kcondvar_t lbolt_cv;
244 
245 /*
246  * Hybrid lbolt implementation:
247  *
248  * The service historically provided by the lbolt and lbolt64 variables has
249  * been replaced by the ddi_get_lbolt() and ddi_get_lbolt64() routines, and the
250  * original symbols removed from the system. The once clock driven variables are
251  * now implemented in an event driven fashion, backed by gethrtime() coarsed to
252  * the appropriate clock resolution. The default event driven implementation is
253  * complemented by a cyclic driven one, active only during periods of intense
254  * activity around the DDI lbolt routines, when a lbolt specific cyclic is
255  * reprogramed to fire at a clock tick interval to serve consumers of lbolt who
256  * rely on the original low cost of consulting a memory position.
257  *
258  * The implementation uses the number of calls to these routines and the
259  * frequency of these to determine when to transition from event to cyclic
260  * driven and vice-versa. These values are kept on a per CPU basis for
261  * scalability reasons and to prevent CPUs from constantly invalidating a single
262  * cache line when modifying a global variable. The transition from event to
263  * cyclic mode happens once the thresholds are crossed, and activity on any CPU
264  * can cause such transition.
265  *
266  * The lbolt_hybrid function pointer is called by ddi_get_lbolt() and
267  * ddi_get_lbolt64(), and will point to lbolt_event_driven() or
268  * lbolt_cyclic_driven() according to the current mode. When the thresholds
269  * are exceeded, lbolt_event_driven() will reprogram the lbolt cyclic to
270  * fire at a nsec_per_tick interval and increment an internal variable at
271  * each firing. lbolt_hybrid will then point to lbolt_cyclic_driven(), which
272  * will simply return the value of such variable. lbolt_cyclic() will attempt
273  * to shut itself off at each threshold interval (sampling period for calls
274  * to the DDI lbolt routines), and return to the event driven mode, but will
275  * be prevented from doing so if lbolt_cyclic_driven() is being heavily used.
276  *
277  * lbolt_bootstrap is used during boot to serve lbolt consumers who don't wait
278  * for the cyclic subsystem to be intialized.
279  *
280  */
281 int64_t lbolt_bootstrap(void);
282 int64_t lbolt_event_driven(void);
283 int64_t lbolt_cyclic_driven(void);
284 int64_t (*lbolt_hybrid)(void) = lbolt_bootstrap;
285 uint_t lbolt_ev_to_cyclic(caddr_t, caddr_t);
286 
287 /*
288  * lbolt's cyclic, installed by clock_init().
289  */
290 static void lbolt_cyclic(void);
291 
292 /*
293  * Tunable to keep lbolt in cyclic driven mode. This will prevent the system
294  * from switching back to event driven, once it reaches cyclic mode.
295  */
296 static boolean_t lbolt_cyc_only = B_FALSE;
297 
298 /*
299  * Cache aligned, per CPU structure with lbolt usage statistics.
300  */
301 static lbolt_cpu_t *lb_cpu;
302 
303 /*
304  * Single, cache aligned, structure with all the information required by
305  * the lbolt implementation.
306  */
307 lbolt_info_t *lb_info;
308 
309 
310 int one_sec = 1; /* turned on once every second */
311 static int fsflushcnt;	/* counter for t_fsflushr */
312 int	dosynctodr = 1;	/* patchable; enable/disable sync to TOD chip */
313 int	tod_needsync = 0;	/* need to sync tod chip with software time */
314 static int tod_broken = 0;	/* clock chip doesn't work */
315 time_t	boot_time = 0;		/* Boot time in seconds since 1970 */
316 cyclic_id_t clock_cyclic;	/* clock()'s cyclic_id */
317 cyclic_id_t deadman_cyclic;	/* deadman()'s cyclic_id */
318 cyclic_id_t ddi_timer_cyclic;	/* cyclic_timer()'s cyclic_id */
319 
320 extern void	clock_tick_schedule(int);
321 
322 static int lgrp_ticks;		/* counter to schedule lgrp load calcs */
323 
324 /*
325  * for tod fault detection
326  */
327 #define	TOD_REF_FREQ		((longlong_t)(NANOSEC))
328 #define	TOD_STALL_THRESHOLD	(TOD_REF_FREQ * 3 / 2)
329 #define	TOD_JUMP_THRESHOLD	(TOD_REF_FREQ / 2)
330 #define	TOD_FILTER_N		4
331 #define	TOD_FILTER_SETTLE	(4 * TOD_FILTER_N)
332 static int tod_faulted = TOD_NOFAULT;
333 
334 static int tod_status_flag = 0;		/* used by tod_validate() */
335 
336 static hrtime_t prev_set_tick = 0;	/* gethrtime() prior to tod_set() */
337 static time_t prev_set_tod = 0;		/* tv_sec value passed to tod_set() */
338 
339 /* patchable via /etc/system */
340 int tod_validate_enable = 1;
341 
342 /* Diagnose/Limit messages about delay(9F) called from interrupt context */
343 int			delay_from_interrupt_diagnose = 0;
344 volatile uint32_t	delay_from_interrupt_msg = 20;
345 
346 /*
347  * On non-SPARC systems, TOD validation must be deferred until gethrtime
348  * returns non-zero values (after mach_clkinit's execution).
349  * On SPARC systems, it must be deferred until after hrtime_base
350  * and hres_last_tick are set (in the first invocation of hres_tick).
351  * Since in both cases the prerequisites occur before the invocation of
352  * tod_get() in clock(), the deferment is lifted there.
353  */
354 static boolean_t tod_validate_deferred = B_TRUE;
355 
356 /*
357  * tod_fault_table[] must be aligned with
358  * enum tod_fault_type in systm.h
359  */
360 static char *tod_fault_table[] = {
361 	"Reversed",			/* TOD_REVERSED */
362 	"Stalled",			/* TOD_STALLED */
363 	"Jumped",			/* TOD_JUMPED */
364 	"Changed in Clock Rate",	/* TOD_RATECHANGED */
365 	"Is Read-Only"			/* TOD_RDONLY */
366 	/*
367 	 * no strings needed for TOD_NOFAULT
368 	 */
369 };
370 
371 /*
372  * test hook for tod broken detection in tod_validate
373  */
374 int tod_unit_test = 0;
375 time_t tod_test_injector;
376 
377 #define	CLOCK_ADJ_HIST_SIZE	4
378 
379 static int	adj_hist_entry;
380 
381 int64_t clock_adj_hist[CLOCK_ADJ_HIST_SIZE];
382 
383 static void calcloadavg(int, uint64_t *);
384 static int genloadavg(struct loadavg_s *);
385 static void loadavg_update();
386 
387 void (*cmm_clock_callout)() = NULL;
388 void (*cpucaps_clock_callout)() = NULL;
389 
390 extern clock_t clock_tick_proc_max;
391 
392 static int64_t deadman_counter = 0;
393 
394 static void
395 clock(void)
396 {
397 	kthread_t	*t;
398 	uint_t	nrunnable;
399 	uint_t	w_io;
400 	cpu_t	*cp;
401 	cpupart_t *cpupart;
402 	extern void set_anoninfo();
403 	extern	void	set_freemem();
404 	void	(*funcp)();
405 	int32_t ltemp;
406 	int64_t lltemp;
407 	int s;
408 	int do_lgrp_load;
409 	int i;
410 	clock_t now = LBOLT_NO_ACCOUNT;	/* current tick */
411 
412 	if (panicstr)
413 		return;
414 
415 	set_anoninfo();
416 	/*
417 	 * Make sure that 'freemem' do not drift too far from the truth
418 	 */
419 	set_freemem();
420 
421 
422 	/*
423 	 * Before the section which is repeated is executed, we do
424 	 * the time delta processing which occurs every clock tick
425 	 *
426 	 * There is additional processing which happens every time
427 	 * the nanosecond counter rolls over which is described
428 	 * below - see the section which begins with : if (one_sec)
429 	 *
430 	 * This section marks the beginning of the precision-kernel
431 	 * code fragment.
432 	 *
433 	 * First, compute the phase adjustment. If the low-order bits
434 	 * (time_phase) of the update overflow, bump the higher order
435 	 * bits (time_update).
436 	 */
437 	time_phase += time_adj;
438 	if (time_phase <= -FINEUSEC) {
439 		ltemp = -time_phase / SCALE_PHASE;
440 		time_phase += ltemp * SCALE_PHASE;
441 		s = hr_clock_lock();
442 		timedelta -= ltemp * (NANOSEC/MICROSEC);
443 		hr_clock_unlock(s);
444 	} else if (time_phase >= FINEUSEC) {
445 		ltemp = time_phase / SCALE_PHASE;
446 		time_phase -= ltemp * SCALE_PHASE;
447 		s = hr_clock_lock();
448 		timedelta += ltemp * (NANOSEC/MICROSEC);
449 		hr_clock_unlock(s);
450 	}
451 
452 	/*
453 	 * End of precision-kernel code fragment which is processed
454 	 * every timer interrupt.
455 	 *
456 	 * Continue with the interrupt processing as scheduled.
457 	 */
458 	/*
459 	 * Count the number of runnable threads and the number waiting
460 	 * for some form of I/O to complete -- gets added to
461 	 * sysinfo.waiting.  To know the state of the system, must add
462 	 * wait counts from all CPUs.  Also add up the per-partition
463 	 * statistics.
464 	 */
465 	w_io = 0;
466 	nrunnable = 0;
467 
468 	/*
469 	 * keep track of when to update lgrp/part loads
470 	 */
471 
472 	do_lgrp_load = 0;
473 	if (lgrp_ticks++ >= hz / 10) {
474 		lgrp_ticks = 0;
475 		do_lgrp_load = 1;
476 	}
477 
478 	if (one_sec) {
479 		loadavg_update();
480 		deadman_counter++;
481 	}
482 
483 	/*
484 	 * First count the threads waiting on kpreempt queues in each
485 	 * CPU partition.
486 	 */
487 
488 	cpupart = cp_list_head;
489 	do {
490 		uint_t cpupart_nrunnable = cpupart->cp_kp_queue.disp_nrunnable;
491 
492 		cpupart->cp_updates++;
493 		nrunnable += cpupart_nrunnable;
494 		cpupart->cp_nrunnable_cum += cpupart_nrunnable;
495 		if (one_sec) {
496 			cpupart->cp_nrunning = 0;
497 			cpupart->cp_nrunnable = cpupart_nrunnable;
498 		}
499 	} while ((cpupart = cpupart->cp_next) != cp_list_head);
500 
501 
502 	/* Now count the per-CPU statistics. */
503 	cp = cpu_list;
504 	do {
505 		uint_t cpu_nrunnable = cp->cpu_disp->disp_nrunnable;
506 
507 		nrunnable += cpu_nrunnable;
508 		cpupart = cp->cpu_part;
509 		cpupart->cp_nrunnable_cum += cpu_nrunnable;
510 		if (one_sec) {
511 			cpupart->cp_nrunnable += cpu_nrunnable;
512 			/*
513 			 * Update user, system, and idle cpu times.
514 			 */
515 			cpupart->cp_nrunning++;
516 			/*
517 			 * w_io is used to update sysinfo.waiting during
518 			 * one_second processing below.  Only gather w_io
519 			 * information when we walk the list of cpus if we're
520 			 * going to perform one_second processing.
521 			 */
522 			w_io += CPU_STATS(cp, sys.iowait);
523 		}
524 
525 		if (one_sec && (cp->cpu_flags & CPU_EXISTS)) {
526 			int i, load, change;
527 			hrtime_t intracct, intrused;
528 			const hrtime_t maxnsec = 1000000000;
529 			const int precision = 100;
530 
531 			/*
532 			 * Estimate interrupt load on this cpu each second.
533 			 * Computes cpu_intrload as %utilization (0-99).
534 			 */
535 
536 			/* add up interrupt time from all micro states */
537 			for (intracct = 0, i = 0; i < NCMSTATES; i++)
538 				intracct += cp->cpu_intracct[i];
539 			scalehrtime(&intracct);
540 
541 			/* compute nsec used in the past second */
542 			intrused = intracct - cp->cpu_intrlast;
543 			cp->cpu_intrlast = intracct;
544 
545 			/* limit the value for safety (and the first pass) */
546 			if (intrused >= maxnsec)
547 				intrused = maxnsec - 1;
548 
549 			/* calculate %time in interrupt */
550 			load = (precision * intrused) / maxnsec;
551 			ASSERT(load >= 0 && load < precision);
552 			change = cp->cpu_intrload - load;
553 
554 			/* jump to new max, or decay the old max */
555 			if (change < 0)
556 				cp->cpu_intrload = load;
557 			else if (change > 0)
558 				cp->cpu_intrload -= (change + 3) / 4;
559 
560 			DTRACE_PROBE3(cpu_intrload,
561 			    cpu_t *, cp,
562 			    hrtime_t, intracct,
563 			    hrtime_t, intrused);
564 		}
565 
566 		if (do_lgrp_load &&
567 		    (cp->cpu_flags & CPU_EXISTS)) {
568 			/*
569 			 * When updating the lgroup's load average,
570 			 * account for the thread running on the CPU.
571 			 * If the CPU is the current one, then we need
572 			 * to account for the underlying thread which
573 			 * got the clock interrupt not the thread that is
574 			 * handling the interrupt and caculating the load
575 			 * average
576 			 */
577 			t = cp->cpu_thread;
578 			if (CPU == cp)
579 				t = t->t_intr;
580 
581 			/*
582 			 * Account for the load average for this thread if
583 			 * it isn't the idle thread or it is on the interrupt
584 			 * stack and not the current CPU handling the clock
585 			 * interrupt
586 			 */
587 			if ((t && t != cp->cpu_idle_thread) || (CPU != cp &&
588 			    CPU_ON_INTR(cp))) {
589 				if (t->t_lpl == cp->cpu_lpl) {
590 					/* local thread */
591 					cpu_nrunnable++;
592 				} else {
593 					/*
594 					 * This is a remote thread, charge it
595 					 * against its home lgroup.  Note that
596 					 * we notice that a thread is remote
597 					 * only if it's currently executing.
598 					 * This is a reasonable approximation,
599 					 * since queued remote threads are rare.
600 					 * Note also that if we didn't charge
601 					 * it to its home lgroup, remote
602 					 * execution would often make a system
603 					 * appear balanced even though it was
604 					 * not, and thread placement/migration
605 					 * would often not be done correctly.
606 					 */
607 					lgrp_loadavg(t->t_lpl,
608 					    LGRP_LOADAVG_IN_THREAD_MAX, 0);
609 				}
610 			}
611 			lgrp_loadavg(cp->cpu_lpl,
612 			    cpu_nrunnable * LGRP_LOADAVG_IN_THREAD_MAX, 1);
613 		}
614 	} while ((cp = cp->cpu_next) != cpu_list);
615 
616 	clock_tick_schedule(one_sec);
617 
618 	/*
619 	 * Check for a callout that needs be called from the clock
620 	 * thread to support the membership protocol in a clustered
621 	 * system.  Copy the function pointer so that we can reset
622 	 * this to NULL if needed.
623 	 */
624 	if ((funcp = cmm_clock_callout) != NULL)
625 		(*funcp)();
626 
627 	if ((funcp = cpucaps_clock_callout) != NULL)
628 		(*funcp)();
629 
630 	/*
631 	 * Wakeup the cageout thread waiters once per second.
632 	 */
633 	if (one_sec) {
634 		if (kcage_on) {
635 			kcage_tick();
636 		} else if (kflt_on) {
637 			kflt_tick();
638 		}
639 	}
640 
641 	if (one_sec) {
642 
643 		int drift, absdrift;
644 		timestruc_t tod;
645 		int s;
646 
647 		/*
648 		 * Beginning of precision-kernel code fragment executed
649 		 * every second.
650 		 *
651 		 * On rollover of the second the phase adjustment to be
652 		 * used for the next second is calculated.  Also, the
653 		 * maximum error is increased by the tolerance.  If the
654 		 * PPS frequency discipline code is present, the phase is
655 		 * increased to compensate for the CPU clock oscillator
656 		 * frequency error.
657 		 *
658 		 * On a 32-bit machine and given parameters in the timex.h
659 		 * header file, the maximum phase adjustment is +-512 ms
660 		 * and maximum frequency offset is (a tad less than)
661 		 * +-512 ppm. On a 64-bit machine, you shouldn't need to ask.
662 		 */
663 		time_maxerror += time_tolerance / SCALE_USEC;
664 
665 		/*
666 		 * Leap second processing. If in leap-insert state at
667 		 * the end of the day, the system clock is set back one
668 		 * second; if in leap-delete state, the system clock is
669 		 * set ahead one second. The microtime() routine or
670 		 * external clock driver will insure that reported time
671 		 * is always monotonic. The ugly divides should be
672 		 * replaced.
673 		 */
674 		switch (time_state) {
675 
676 		case TIME_OK:
677 			if (time_status & STA_INS)
678 				time_state = TIME_INS;
679 			else if (time_status & STA_DEL)
680 				time_state = TIME_DEL;
681 			break;
682 
683 		case TIME_INS:
684 			if (hrestime.tv_sec % 86400 == 0) {
685 				s = hr_clock_lock();
686 				hrestime.tv_sec--;
687 				hr_clock_unlock(s);
688 				time_state = TIME_OOP;
689 			}
690 			break;
691 
692 		case TIME_DEL:
693 			if ((hrestime.tv_sec + 1) % 86400 == 0) {
694 				s = hr_clock_lock();
695 				hrestime.tv_sec++;
696 				hr_clock_unlock(s);
697 				time_state = TIME_WAIT;
698 			}
699 			break;
700 
701 		case TIME_OOP:
702 			time_state = TIME_WAIT;
703 			break;
704 
705 		case TIME_WAIT:
706 			if (!(time_status & (STA_INS | STA_DEL)))
707 				time_state = TIME_OK;
708 		default:
709 			break;
710 		}
711 
712 		/*
713 		 * Compute the phase adjustment for the next second. In
714 		 * PLL mode, the offset is reduced by a fixed factor
715 		 * times the time constant. In FLL mode the offset is
716 		 * used directly. In either mode, the maximum phase
717 		 * adjustment for each second is clamped so as to spread
718 		 * the adjustment over not more than the number of
719 		 * seconds between updates.
720 		 */
721 		if (time_offset == 0)
722 			time_adj = 0;
723 		else if (time_offset < 0) {
724 			lltemp = -time_offset;
725 			if (!(time_status & STA_FLL)) {
726 				if ((1 << time_constant) >= SCALE_KG)
727 					lltemp *= (1 << time_constant) /
728 					    SCALE_KG;
729 				else
730 					lltemp = (lltemp / SCALE_KG) >>
731 					    time_constant;
732 			}
733 			if (lltemp > (MAXPHASE / MINSEC) * SCALE_UPDATE)
734 				lltemp = (MAXPHASE / MINSEC) * SCALE_UPDATE;
735 			time_offset += lltemp;
736 			time_adj = -(lltemp * SCALE_PHASE) / hz / SCALE_UPDATE;
737 		} else {
738 			lltemp = time_offset;
739 			if (!(time_status & STA_FLL)) {
740 				if ((1 << time_constant) >= SCALE_KG)
741 					lltemp *= (1 << time_constant) /
742 					    SCALE_KG;
743 				else
744 					lltemp = (lltemp / SCALE_KG) >>
745 					    time_constant;
746 			}
747 			if (lltemp > (MAXPHASE / MINSEC) * SCALE_UPDATE)
748 				lltemp = (MAXPHASE / MINSEC) * SCALE_UPDATE;
749 			time_offset -= lltemp;
750 			time_adj = (lltemp * SCALE_PHASE) / hz / SCALE_UPDATE;
751 		}
752 
753 		/*
754 		 * Compute the frequency estimate and additional phase
755 		 * adjustment due to frequency error for the next
756 		 * second. When the PPS signal is engaged, gnaw on the
757 		 * watchdog counter and update the frequency computed by
758 		 * the pll and the PPS signal.
759 		 */
760 		pps_valid++;
761 		if (pps_valid == PPS_VALID) {
762 			pps_jitter = MAXTIME;
763 			pps_stabil = MAXFREQ;
764 			time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
765 			    STA_PPSWANDER | STA_PPSERROR);
766 		}
767 		lltemp = time_freq + pps_freq;
768 
769 		if (lltemp)
770 			time_adj += (lltemp * SCALE_PHASE) / (SCALE_USEC * hz);
771 
772 		/*
773 		 * End of precision kernel-code fragment
774 		 *
775 		 * The section below should be modified if we are planning
776 		 * to use NTP for synchronization.
777 		 *
778 		 * Note: the clock synchronization code now assumes
779 		 * the following:
780 		 *   - if dosynctodr is 1, then compute the drift between
781 		 *	the tod chip and software time and adjust one or
782 		 *	the other depending on the circumstances
783 		 *
784 		 *   - if dosynctodr is 0, then the tod chip is independent
785 		 *	of the software clock and should not be adjusted,
786 		 *	but allowed to free run.  this allows NTP to sync.
787 		 *	hrestime without any interference from the tod chip.
788 		 */
789 
790 		tod_validate_deferred = B_FALSE;
791 		mutex_enter(&tod_lock);
792 		tod = tod_get();
793 		drift = tod.tv_sec - hrestime.tv_sec;
794 		absdrift = (drift >= 0) ? drift : -drift;
795 		if (tod_needsync || absdrift > 1) {
796 			int s;
797 			if (absdrift > 2) {
798 				if (!tod_broken && tod_faulted == TOD_NOFAULT) {
799 					s = hr_clock_lock();
800 					hrestime = tod;
801 					membar_enter();	/* hrestime visible */
802 					timedelta = 0;
803 					timechanged++;
804 					tod_needsync = 0;
805 					hr_clock_unlock(s);
806 					callout_hrestime();
807 
808 				}
809 			} else {
810 				if (tod_needsync || !dosynctodr) {
811 					gethrestime(&tod);
812 					tod_set(tod);
813 					s = hr_clock_lock();
814 					if (timedelta == 0)
815 						tod_needsync = 0;
816 					hr_clock_unlock(s);
817 				} else {
818 					/*
819 					 * If the drift is 2 seconds on the
820 					 * money, then the TOD is adjusting
821 					 * the clock;  record that.
822 					 */
823 					clock_adj_hist[adj_hist_entry++ %
824 					    CLOCK_ADJ_HIST_SIZE] = now;
825 					s = hr_clock_lock();
826 					timedelta = (int64_t)drift*NANOSEC;
827 					hr_clock_unlock(s);
828 				}
829 			}
830 		}
831 		one_sec = 0;
832 		time = gethrestime_sec();  /* for crusty old kmem readers */
833 		mutex_exit(&tod_lock);
834 
835 		/*
836 		 * Some drivers still depend on this... XXX
837 		 */
838 		cv_broadcast(&lbolt_cv);
839 
840 		vminfo.freemem += freemem;
841 		{
842 			pgcnt_t maxswap, resv, free;
843 			pgcnt_t avail =
844 			    MAX((spgcnt_t)(availrmem - swapfs_minfree), 0);
845 
846 			maxswap = k_anoninfo.ani_mem_resv +
847 			    k_anoninfo.ani_max +avail;
848 			free = k_anoninfo.ani_free + avail;
849 			resv = k_anoninfo.ani_phys_resv +
850 			    k_anoninfo.ani_mem_resv;
851 
852 			vminfo.swap_resv += resv;
853 			/* number of reserved and allocated pages */
854 #ifdef	DEBUG
855 			if (maxswap < free)
856 				cmn_err(CE_WARN, "clock: maxswap < free");
857 			if (maxswap < resv)
858 				cmn_err(CE_WARN, "clock: maxswap < resv");
859 #endif
860 			vminfo.swap_alloc += maxswap - free;
861 			vminfo.swap_avail += maxswap - resv;
862 			vminfo.swap_free += free;
863 		}
864 		vminfo.updates++;
865 		if (nrunnable) {
866 			sysinfo.runque += nrunnable;
867 			sysinfo.runocc++;
868 		}
869 		if (nswapped) {
870 			sysinfo.swpque += nswapped;
871 			sysinfo.swpocc++;
872 		}
873 		sysinfo.waiting += w_io;
874 		sysinfo.updates++;
875 
876 		/*
877 		 * Wake up fsflush to write out DELWRI
878 		 * buffers, dirty pages and other cached
879 		 * administrative data, e.g. inodes.
880 		 */
881 		if (--fsflushcnt <= 0) {
882 			fsflushcnt = tune.t_fsflushr;
883 			cv_signal(&fsflush_cv);
884 		}
885 
886 		vmmeter();
887 		calcloadavg(genloadavg(&loadavg), hp_avenrun);
888 		for (i = 0; i < 3; i++)
889 			/*
890 			 * At the moment avenrun[] can only hold 31
891 			 * bits of load average as it is a signed
892 			 * int in the API. We need to ensure that
893 			 * hp_avenrun[i] >> (16 - FSHIFT) will not be
894 			 * too large. If it is, we put the largest value
895 			 * that we can use into avenrun[i]. This is
896 			 * kludgey, but about all we can do until we
897 			 * avenrun[] is declared as an array of uint64[]
898 			 */
899 			if (hp_avenrun[i] < ((uint64_t)1<<(31+16-FSHIFT)))
900 				avenrun[i] = (int32_t)(hp_avenrun[i] >>
901 				    (16 - FSHIFT));
902 			else
903 				avenrun[i] = 0x7fffffff;
904 
905 		cpupart = cp_list_head;
906 		do {
907 			calcloadavg(genloadavg(&cpupart->cp_loadavg),
908 			    cpupart->cp_hp_avenrun);
909 		} while ((cpupart = cpupart->cp_next) != cp_list_head);
910 
911 		/*
912 		 * Wake up the swapper thread if necessary.
913 		 */
914 		if (runin ||
915 		    (runout && (avefree < desfree || wake_sched_sec))) {
916 			t = &t0;
917 			thread_lock(t);
918 			if (t->t_state == TS_STOPPED) {
919 				runin = runout = 0;
920 				wake_sched_sec = 0;
921 				t->t_whystop = 0;
922 				t->t_whatstop = 0;
923 				t->t_schedflag &= ~TS_ALLSTART;
924 				THREAD_TRANSITION(t);
925 				setfrontdq(t);
926 			}
927 			thread_unlock(t);
928 		}
929 	}
930 
931 	/*
932 	 * Wake up the swapper if any high priority swapped-out threads
933 	 * became runable during the last tick.
934 	 */
935 	if (wake_sched) {
936 		t = &t0;
937 		thread_lock(t);
938 		if (t->t_state == TS_STOPPED) {
939 			runin = runout = 0;
940 			wake_sched = 0;
941 			t->t_whystop = 0;
942 			t->t_whatstop = 0;
943 			t->t_schedflag &= ~TS_ALLSTART;
944 			THREAD_TRANSITION(t);
945 			setfrontdq(t);
946 		}
947 		thread_unlock(t);
948 	}
949 }
950 
951 void
952 clock_init(void)
953 {
954 	cyc_handler_t clk_hdlr, timer_hdlr, lbolt_hdlr;
955 	cyc_time_t clk_when, lbolt_when;
956 	int i, sz;
957 	intptr_t buf;
958 
959 	/*
960 	 * Setup handler and timer for the clock cyclic.
961 	 */
962 	clk_hdlr.cyh_func = (cyc_func_t)clock;
963 	clk_hdlr.cyh_level = CY_LOCK_LEVEL;
964 	clk_hdlr.cyh_arg = NULL;
965 
966 	clk_when.cyt_when = 0;
967 	clk_when.cyt_interval = nsec_per_tick;
968 
969 	/*
970 	 * cyclic_timer is dedicated to the ddi interface, which
971 	 * uses the same clock resolution as the system one.
972 	 */
973 	timer_hdlr.cyh_func = (cyc_func_t)cyclic_timer;
974 	timer_hdlr.cyh_level = CY_LOCK_LEVEL;
975 	timer_hdlr.cyh_arg = NULL;
976 
977 	/*
978 	 * The lbolt cyclic will be reprogramed to fire at a nsec_per_tick
979 	 * interval to satisfy performance needs of the DDI lbolt consumers.
980 	 * It is off by default.
981 	 */
982 	lbolt_hdlr.cyh_func = (cyc_func_t)lbolt_cyclic;
983 	lbolt_hdlr.cyh_level = CY_LOCK_LEVEL;
984 	lbolt_hdlr.cyh_arg = NULL;
985 
986 	lbolt_when.cyt_interval = nsec_per_tick;
987 
988 	/*
989 	 * Allocate cache line aligned space for the per CPU lbolt data and
990 	 * lbolt info structures, and initialize them with their default
991 	 * values. Note that these structures are also cache line sized.
992 	 */
993 	sz = sizeof (lbolt_info_t) + CPU_CACHE_COHERENCE_SIZE;
994 	buf = (intptr_t)kmem_zalloc(sz, KM_SLEEP);
995 	lb_info = (lbolt_info_t *)P2ROUNDUP(buf, CPU_CACHE_COHERENCE_SIZE);
996 
997 	if (hz != HZ_DEFAULT)
998 		lb_info->lbi_thresh_interval = LBOLT_THRESH_INTERVAL *
999 		    hz/HZ_DEFAULT;
1000 	else
1001 		lb_info->lbi_thresh_interval = LBOLT_THRESH_INTERVAL;
1002 
1003 	lb_info->lbi_thresh_calls = LBOLT_THRESH_CALLS;
1004 
1005 	sz = (sizeof (lbolt_cpu_t) * max_ncpus) + CPU_CACHE_COHERENCE_SIZE;
1006 	buf = (intptr_t)kmem_zalloc(sz, KM_SLEEP);
1007 	lb_cpu = (lbolt_cpu_t *)P2ROUNDUP(buf, CPU_CACHE_COHERENCE_SIZE);
1008 
1009 	for (i = 0; i < max_ncpus; i++)
1010 		lb_cpu[i].lbc_counter = lb_info->lbi_thresh_calls;
1011 
1012 	/*
1013 	 * Install the softint used to switch between event and cyclic driven
1014 	 * lbolt. We use a soft interrupt to make sure the context of the
1015 	 * cyclic reprogram call is safe.
1016 	 */
1017 	lbolt_softint_add();
1018 
1019 	/*
1020 	 * Since the hybrid lbolt implementation is based on a hardware counter
1021 	 * that is reset at every hardware reboot and that we'd like to have
1022 	 * the lbolt value starting at zero after both a hardware and a fast
1023 	 * reboot, we calculate the number of clock ticks the system's been up
1024 	 * and store it in the lbi_debug_time field of the lbolt info structure.
1025 	 * The value of this field will be subtracted from lbolt before
1026 	 * returning it.
1027 	 */
1028 	lb_info->lbi_internal = lb_info->lbi_debug_time =
1029 	    (gethrtime()/nsec_per_tick);
1030 
1031 	/*
1032 	 * lbolt_hybrid points at lbolt_bootstrap until now. The LBOLT_* macros
1033 	 * and lbolt_debug_{enter,return} use this value as an indication that
1034 	 * the initializaion above hasn't been completed. Setting lbolt_hybrid
1035 	 * to either lbolt_{cyclic,event}_driven here signals those code paths
1036 	 * that the lbolt related structures can be used.
1037 	 */
1038 	if (lbolt_cyc_only) {
1039 		lbolt_when.cyt_when = 0;
1040 		lbolt_hybrid = lbolt_cyclic_driven;
1041 	} else {
1042 		lbolt_when.cyt_when = CY_INFINITY;
1043 		lbolt_hybrid = lbolt_event_driven;
1044 	}
1045 
1046 	/*
1047 	 * Grab cpu_lock and install all three cyclics.
1048 	 */
1049 	mutex_enter(&cpu_lock);
1050 
1051 	clock_cyclic = cyclic_add(&clk_hdlr, &clk_when);
1052 	ddi_timer_cyclic = cyclic_add(&timer_hdlr, &clk_when);
1053 	lb_info->id.lbi_cyclic_id = cyclic_add(&lbolt_hdlr, &lbolt_when);
1054 
1055 	mutex_exit(&cpu_lock);
1056 }
1057 
1058 /*
1059  * Called before calcloadavg to get 10-sec moving loadavg together
1060  */
1061 
1062 static int
1063 genloadavg(struct loadavg_s *avgs)
1064 {
1065 	int avg;
1066 	int spos; /* starting position */
1067 	int cpos; /* moving current position */
1068 	int i;
1069 	int slen;
1070 	hrtime_t hr_avg;
1071 
1072 	/* 10-second snapshot, calculate first positon */
1073 	if (avgs->lg_len == 0) {
1074 		return (0);
1075 	}
1076 	slen = avgs->lg_len < S_MOVAVG_SZ ? avgs->lg_len : S_MOVAVG_SZ;
1077 
1078 	spos = (avgs->lg_cur - 1) >= 0 ? avgs->lg_cur - 1 :
1079 	    S_LOADAVG_SZ + (avgs->lg_cur - 1);
1080 	for (i = hr_avg = 0; i < slen; i++) {
1081 		cpos = (spos - i) >= 0 ? spos - i : S_LOADAVG_SZ + (spos - i);
1082 		hr_avg += avgs->lg_loads[cpos];
1083 	}
1084 
1085 	hr_avg = hr_avg / slen;
1086 	avg = hr_avg / (NANOSEC / LGRP_LOADAVG_IN_THREAD_MAX);
1087 
1088 	return (avg);
1089 }
1090 
1091 /*
1092  * Run every second from clock () to update the loadavg count available to the
1093  * system and cpu-partitions.
1094  *
1095  * This works by sampling the previous usr, sys, wait time elapsed,
1096  * computing a delta, and adding that delta to the elapsed usr, sys,
1097  * wait increase.
1098  */
1099 
1100 static void
1101 loadavg_update()
1102 {
1103 	cpu_t *cp;
1104 	cpupart_t *cpupart;
1105 	hrtime_t cpu_total;
1106 	int prev;
1107 
1108 	cp = cpu_list;
1109 	loadavg.lg_total = 0;
1110 
1111 	/*
1112 	 * first pass totals up per-cpu statistics for system and cpu
1113 	 * partitions
1114 	 */
1115 
1116 	do {
1117 		struct loadavg_s *lavg;
1118 
1119 		lavg = &cp->cpu_loadavg;
1120 
1121 		cpu_total = cp->cpu_acct[CMS_USER] +
1122 		    cp->cpu_acct[CMS_SYSTEM] + cp->cpu_waitrq;
1123 		/* compute delta against last total */
1124 		scalehrtime(&cpu_total);
1125 		prev = (lavg->lg_cur - 1) >= 0 ? lavg->lg_cur - 1 :
1126 		    S_LOADAVG_SZ + (lavg->lg_cur - 1);
1127 		if (lavg->lg_loads[prev] <= 0) {
1128 			lavg->lg_loads[lavg->lg_cur] = cpu_total;
1129 			cpu_total = 0;
1130 		} else {
1131 			lavg->lg_loads[lavg->lg_cur] = cpu_total;
1132 			cpu_total = cpu_total - lavg->lg_loads[prev];
1133 			if (cpu_total < 0)
1134 				cpu_total = 0;
1135 		}
1136 
1137 		lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ;
1138 		lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ?
1139 		    lavg->lg_len + 1 : S_LOADAVG_SZ;
1140 
1141 		loadavg.lg_total += cpu_total;
1142 		cp->cpu_part->cp_loadavg.lg_total += cpu_total;
1143 
1144 	} while ((cp = cp->cpu_next) != cpu_list);
1145 
1146 	loadavg.lg_loads[loadavg.lg_cur] = loadavg.lg_total;
1147 	loadavg.lg_cur = (loadavg.lg_cur + 1) % S_LOADAVG_SZ;
1148 	loadavg.lg_len = (loadavg.lg_len + 1) < S_LOADAVG_SZ ?
1149 	    loadavg.lg_len + 1 : S_LOADAVG_SZ;
1150 	/*
1151 	 * Second pass updates counts
1152 	 */
1153 	cpupart = cp_list_head;
1154 
1155 	do {
1156 		struct loadavg_s *lavg;
1157 
1158 		lavg = &cpupart->cp_loadavg;
1159 		lavg->lg_loads[lavg->lg_cur] = lavg->lg_total;
1160 		lavg->lg_total = 0;
1161 		lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ;
1162 		lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ?
1163 		    lavg->lg_len + 1 : S_LOADAVG_SZ;
1164 
1165 	} while ((cpupart = cpupart->cp_next) != cp_list_head);
1166 
1167 }
1168 
1169 /*
1170  * clock_update() - local clock update
1171  *
1172  * This routine is called by ntp_adjtime() to update the local clock
1173  * phase and frequency. The implementation is of an
1174  * adaptive-parameter, hybrid phase/frequency-lock loop (PLL/FLL). The
1175  * routine computes new time and frequency offset estimates for each
1176  * call.  The PPS signal itself determines the new time offset,
1177  * instead of the calling argument.  Presumably, calls to
1178  * ntp_adjtime() occur only when the caller believes the local clock
1179  * is valid within some bound (+-128 ms with NTP). If the caller's
1180  * time is far different than the PPS time, an argument will ensue,
1181  * and it's not clear who will lose.
1182  *
1183  * For uncompensated quartz crystal oscillatores and nominal update
1184  * intervals less than 1024 s, operation should be in phase-lock mode
1185  * (STA_FLL = 0), where the loop is disciplined to phase. For update
1186  * intervals greater than this, operation should be in frequency-lock
1187  * mode (STA_FLL = 1), where the loop is disciplined to frequency.
1188  *
1189  * Note: mutex(&tod_lock) is in effect.
1190  */
1191 void
1192 clock_update(int offset)
1193 {
1194 	int ltemp, mtemp, s;
1195 
1196 	ASSERT(MUTEX_HELD(&tod_lock));
1197 
1198 	if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME))
1199 		return;
1200 	ltemp = offset;
1201 	if ((time_status & STA_PPSTIME) && (time_status & STA_PPSSIGNAL))
1202 		ltemp = pps_offset;
1203 
1204 	/*
1205 	 * Scale the phase adjustment and clamp to the operating range.
1206 	 */
1207 	if (ltemp > MAXPHASE)
1208 		time_offset = MAXPHASE * SCALE_UPDATE;
1209 	else if (ltemp < -MAXPHASE)
1210 		time_offset = -(MAXPHASE * SCALE_UPDATE);
1211 	else
1212 		time_offset = ltemp * SCALE_UPDATE;
1213 
1214 	/*
1215 	 * Select whether the frequency is to be controlled and in which
1216 	 * mode (PLL or FLL). Clamp to the operating range. Ugly
1217 	 * multiply/divide should be replaced someday.
1218 	 */
1219 	if (time_status & STA_FREQHOLD || time_reftime == 0)
1220 		time_reftime = hrestime.tv_sec;
1221 
1222 	mtemp = hrestime.tv_sec - time_reftime;
1223 	time_reftime = hrestime.tv_sec;
1224 
1225 	if (time_status & STA_FLL) {
1226 		if (mtemp >= MINSEC) {
1227 			ltemp = ((time_offset / mtemp) * (SCALE_USEC /
1228 			    SCALE_UPDATE));
1229 			if (ltemp)
1230 				time_freq += ltemp / SCALE_KH;
1231 		}
1232 	} else {
1233 		if (mtemp < MAXSEC) {
1234 			ltemp *= mtemp;
1235 			if (ltemp)
1236 				time_freq += (int)(((int64_t)ltemp *
1237 				    SCALE_USEC) / SCALE_KF)
1238 				    / (1 << (time_constant * 2));
1239 		}
1240 	}
1241 	if (time_freq > time_tolerance)
1242 		time_freq = time_tolerance;
1243 	else if (time_freq < -time_tolerance)
1244 		time_freq = -time_tolerance;
1245 
1246 	s = hr_clock_lock();
1247 	tod_needsync = 1;
1248 	hr_clock_unlock(s);
1249 }
1250 
1251 /*
1252  * ddi_hardpps() - discipline CPU clock oscillator to external PPS signal
1253  *
1254  * This routine is called at each PPS interrupt in order to discipline
1255  * the CPU clock oscillator to the PPS signal. It measures the PPS phase
1256  * and leaves it in a handy spot for the clock() routine. It
1257  * integrates successive PPS phase differences and calculates the
1258  * frequency offset. This is used in clock() to discipline the CPU
1259  * clock oscillator so that intrinsic frequency error is cancelled out.
1260  * The code requires the caller to capture the time and hardware counter
1261  * value at the on-time PPS signal transition.
1262  *
1263  * Note that, on some Unix systems, this routine runs at an interrupt
1264  * priority level higher than the timer interrupt routine clock().
1265  * Therefore, the variables used are distinct from the clock()
1266  * variables, except for certain exceptions: The PPS frequency pps_freq
1267  * and phase pps_offset variables are determined by this routine and
1268  * updated atomically. The time_tolerance variable can be considered a
1269  * constant, since it is infrequently changed, and then only when the
1270  * PPS signal is disabled. The watchdog counter pps_valid is updated
1271  * once per second by clock() and is atomically cleared in this
1272  * routine.
1273  *
1274  * tvp is the time of the last tick; usec is a microsecond count since the
1275  * last tick.
1276  *
1277  * Note: In Solaris systems, the tick value is actually given by
1278  *       usec_per_tick.  This is called from the serial driver cdintr(),
1279  *	 or equivalent, at a high PIL.  Because the kernel keeps a
1280  *	 highresolution time, the following code can accept either
1281  *	 the traditional argument pair, or the current highres timestamp
1282  *       in tvp and zero in usec.
1283  */
1284 void
1285 ddi_hardpps(struct timeval *tvp, int usec)
1286 {
1287 	int u_usec, v_usec, bigtick;
1288 	time_t cal_sec;
1289 	int cal_usec;
1290 
1291 	/*
1292 	 * An occasional glitch can be produced when the PPS interrupt
1293 	 * occurs in the clock() routine before the time variable is
1294 	 * updated. Here the offset is discarded when the difference
1295 	 * between it and the last one is greater than tick/2, but not
1296 	 * if the interval since the first discard exceeds 30 s.
1297 	 */
1298 	time_status |= STA_PPSSIGNAL;
1299 	time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
1300 	pps_valid = 0;
1301 	u_usec = -tvp->tv_usec;
1302 	if (u_usec < -(MICROSEC/2))
1303 		u_usec += MICROSEC;
1304 	v_usec = pps_offset - u_usec;
1305 	if (v_usec < 0)
1306 		v_usec = -v_usec;
1307 	if (v_usec > (usec_per_tick >> 1)) {
1308 		if (pps_glitch > MAXGLITCH) {
1309 			pps_glitch = 0;
1310 			pps_tf[2] = u_usec;
1311 			pps_tf[1] = u_usec;
1312 		} else {
1313 			pps_glitch++;
1314 			u_usec = pps_offset;
1315 		}
1316 	} else
1317 		pps_glitch = 0;
1318 
1319 	/*
1320 	 * A three-stage median filter is used to help deglitch the pps
1321 	 * time. The median sample becomes the time offset estimate; the
1322 	 * difference between the other two samples becomes the time
1323 	 * dispersion (jitter) estimate.
1324 	 */
1325 	pps_tf[2] = pps_tf[1];
1326 	pps_tf[1] = pps_tf[0];
1327 	pps_tf[0] = u_usec;
1328 	if (pps_tf[0] > pps_tf[1]) {
1329 		if (pps_tf[1] > pps_tf[2]) {
1330 			pps_offset = pps_tf[1];		/* 0 1 2 */
1331 			v_usec = pps_tf[0] - pps_tf[2];
1332 		} else if (pps_tf[2] > pps_tf[0]) {
1333 			pps_offset = pps_tf[0];		/* 2 0 1 */
1334 			v_usec = pps_tf[2] - pps_tf[1];
1335 		} else {
1336 			pps_offset = pps_tf[2];		/* 0 2 1 */
1337 			v_usec = pps_tf[0] - pps_tf[1];
1338 		}
1339 	} else {
1340 		if (pps_tf[1] < pps_tf[2]) {
1341 			pps_offset = pps_tf[1];		/* 2 1 0 */
1342 			v_usec = pps_tf[2] - pps_tf[0];
1343 		} else  if (pps_tf[2] < pps_tf[0]) {
1344 			pps_offset = pps_tf[0];		/* 1 0 2 */
1345 			v_usec = pps_tf[1] - pps_tf[2];
1346 		} else {
1347 			pps_offset = pps_tf[2];		/* 1 2 0 */
1348 			v_usec = pps_tf[1] - pps_tf[0];
1349 		}
1350 	}
1351 	if (v_usec > MAXTIME)
1352 		pps_jitcnt++;
1353 	v_usec = (v_usec << PPS_AVG) - pps_jitter;
1354 	pps_jitter += v_usec / (1 << PPS_AVG);
1355 	if (pps_jitter > (MAXTIME >> 1))
1356 		time_status |= STA_PPSJITTER;
1357 
1358 	/*
1359 	 * During the calibration interval adjust the starting time when
1360 	 * the tick overflows. At the end of the interval compute the
1361 	 * duration of the interval and the difference of the hardware
1362 	 * counters at the beginning and end of the interval. This code
1363 	 * is deliciously complicated by the fact valid differences may
1364 	 * exceed the value of tick when using long calibration
1365 	 * intervals and small ticks. Note that the counter can be
1366 	 * greater than tick if caught at just the wrong instant, but
1367 	 * the values returned and used here are correct.
1368 	 */
1369 	bigtick = (int)usec_per_tick * SCALE_USEC;
1370 	pps_usec -= pps_freq;
1371 	if (pps_usec >= bigtick)
1372 		pps_usec -= bigtick;
1373 	if (pps_usec < 0)
1374 		pps_usec += bigtick;
1375 	pps_time.tv_sec++;
1376 	pps_count++;
1377 	if (pps_count < (1 << pps_shift))
1378 		return;
1379 	pps_count = 0;
1380 	pps_calcnt++;
1381 	u_usec = usec * SCALE_USEC;
1382 	v_usec = pps_usec - u_usec;
1383 	if (v_usec >= bigtick >> 1)
1384 		v_usec -= bigtick;
1385 	if (v_usec < -(bigtick >> 1))
1386 		v_usec += bigtick;
1387 	if (v_usec < 0)
1388 		v_usec = -(-v_usec >> pps_shift);
1389 	else
1390 		v_usec = v_usec >> pps_shift;
1391 	pps_usec = u_usec;
1392 	cal_sec = tvp->tv_sec;
1393 	cal_usec = tvp->tv_usec;
1394 	cal_sec -= pps_time.tv_sec;
1395 	cal_usec -= pps_time.tv_usec;
1396 	if (cal_usec < 0) {
1397 		cal_usec += MICROSEC;
1398 		cal_sec--;
1399 	}
1400 	pps_time = *tvp;
1401 
1402 	/*
1403 	 * Check for lost interrupts, noise, excessive jitter and
1404 	 * excessive frequency error. The number of timer ticks during
1405 	 * the interval may vary +-1 tick. Add to this a margin of one
1406 	 * tick for the PPS signal jitter and maximum frequency
1407 	 * deviation. If the limits are exceeded, the calibration
1408 	 * interval is reset to the minimum and we start over.
1409 	 */
1410 	u_usec = (int)usec_per_tick << 1;
1411 	if (!((cal_sec == -1 && cal_usec > (MICROSEC - u_usec)) ||
1412 	    (cal_sec == 0 && cal_usec < u_usec)) ||
1413 	    v_usec > time_tolerance || v_usec < -time_tolerance) {
1414 		pps_errcnt++;
1415 		pps_shift = PPS_SHIFT;
1416 		pps_intcnt = 0;
1417 		time_status |= STA_PPSERROR;
1418 		return;
1419 	}
1420 
1421 	/*
1422 	 * A three-stage median filter is used to help deglitch the pps
1423 	 * frequency. The median sample becomes the frequency offset
1424 	 * estimate; the difference between the other two samples
1425 	 * becomes the frequency dispersion (stability) estimate.
1426 	 */
1427 	pps_ff[2] = pps_ff[1];
1428 	pps_ff[1] = pps_ff[0];
1429 	pps_ff[0] = v_usec;
1430 	if (pps_ff[0] > pps_ff[1]) {
1431 		if (pps_ff[1] > pps_ff[2]) {
1432 			u_usec = pps_ff[1];		/* 0 1 2 */
1433 			v_usec = pps_ff[0] - pps_ff[2];
1434 		} else if (pps_ff[2] > pps_ff[0]) {
1435 			u_usec = pps_ff[0];		/* 2 0 1 */
1436 			v_usec = pps_ff[2] - pps_ff[1];
1437 		} else {
1438 			u_usec = pps_ff[2];		/* 0 2 1 */
1439 			v_usec = pps_ff[0] - pps_ff[1];
1440 		}
1441 	} else {
1442 		if (pps_ff[1] < pps_ff[2]) {
1443 			u_usec = pps_ff[1];		/* 2 1 0 */
1444 			v_usec = pps_ff[2] - pps_ff[0];
1445 		} else  if (pps_ff[2] < pps_ff[0]) {
1446 			u_usec = pps_ff[0];		/* 1 0 2 */
1447 			v_usec = pps_ff[1] - pps_ff[2];
1448 		} else {
1449 			u_usec = pps_ff[2];		/* 1 2 0 */
1450 			v_usec = pps_ff[1] - pps_ff[0];
1451 		}
1452 	}
1453 
1454 	/*
1455 	 * Here the frequency dispersion (stability) is updated. If it
1456 	 * is less than one-fourth the maximum (MAXFREQ), the frequency
1457 	 * offset is updated as well, but clamped to the tolerance. It
1458 	 * will be processed later by the clock() routine.
1459 	 */
1460 	v_usec = (v_usec >> 1) - pps_stabil;
1461 	if (v_usec < 0)
1462 		pps_stabil -= -v_usec >> PPS_AVG;
1463 	else
1464 		pps_stabil += v_usec >> PPS_AVG;
1465 	if (pps_stabil > MAXFREQ >> 2) {
1466 		pps_stbcnt++;
1467 		time_status |= STA_PPSWANDER;
1468 		return;
1469 	}
1470 	if (time_status & STA_PPSFREQ) {
1471 		if (u_usec < 0) {
1472 			pps_freq -= -u_usec >> PPS_AVG;
1473 			if (pps_freq < -time_tolerance)
1474 				pps_freq = -time_tolerance;
1475 			u_usec = -u_usec;
1476 		} else {
1477 			pps_freq += u_usec >> PPS_AVG;
1478 			if (pps_freq > time_tolerance)
1479 				pps_freq = time_tolerance;
1480 		}
1481 	}
1482 
1483 	/*
1484 	 * Here the calibration interval is adjusted. If the maximum
1485 	 * time difference is greater than tick / 4, reduce the interval
1486 	 * by half. If this is not the case for four consecutive
1487 	 * intervals, double the interval.
1488 	 */
1489 	if (u_usec << pps_shift > bigtick >> 2) {
1490 		pps_intcnt = 0;
1491 		if (pps_shift > PPS_SHIFT)
1492 			pps_shift--;
1493 	} else if (pps_intcnt >= 4) {
1494 		pps_intcnt = 0;
1495 		if (pps_shift < PPS_SHIFTMAX)
1496 			pps_shift++;
1497 	} else
1498 		pps_intcnt++;
1499 
1500 	/*
1501 	 * If recovering from kmdb, then make sure the tod chip gets resynced.
1502 	 * If we took an early exit above, then we don't yet have a stable
1503 	 * calibration signal to lock onto, so don't mark the tod for sync
1504 	 * until we get all the way here.
1505 	 */
1506 	{
1507 		int s = hr_clock_lock();
1508 
1509 		tod_needsync = 1;
1510 		hr_clock_unlock(s);
1511 	}
1512 }
1513 
1514 /*
1515  * Handle clock tick processing for a thread.
1516  * Check for timer action, enforce CPU rlimit, do profiling etc.
1517  */
1518 void
1519 clock_tick(kthread_t *t, int pending)
1520 {
1521 	struct proc *pp;
1522 	klwp_id_t    lwp;
1523 	struct as *as;
1524 	clock_t	ticks;
1525 	int	poke = 0;		/* notify another CPU */
1526 	int	user_mode;
1527 	size_t	 rss;
1528 	int i, total_usec, usec;
1529 	rctl_qty_t secs;
1530 
1531 	ASSERT(pending > 0);
1532 
1533 	/* Must be operating on a lwp/thread */
1534 	if ((lwp = ttolwp(t)) == NULL) {
1535 		panic("clock_tick: no lwp");
1536 		/*NOTREACHED*/
1537 	}
1538 
1539 	for (i = 0; i < pending; i++) {
1540 		CL_TICK(t);	/* Class specific tick processing */
1541 		DTRACE_SCHED1(tick, kthread_t *, t);
1542 	}
1543 
1544 	pp = ttoproc(t);
1545 
1546 	/* pp->p_lock makes sure that the thread does not exit */
1547 	ASSERT(MUTEX_HELD(&pp->p_lock));
1548 
1549 	user_mode = (lwp->lwp_state == LWP_USER);
1550 
1551 	ticks = (pp->p_utime + pp->p_stime) % hz;
1552 	/*
1553 	 * Update process times. Should use high res clock and state
1554 	 * changes instead of statistical sampling method. XXX
1555 	 */
1556 	if (user_mode) {
1557 		pp->p_utime += pending;
1558 	} else {
1559 		pp->p_stime += pending;
1560 	}
1561 
1562 	pp->p_ttime += pending;
1563 	as = pp->p_as;
1564 
1565 	/*
1566 	 * Update user profiling statistics. Get the pc from the
1567 	 * lwp when the AST happens.
1568 	 */
1569 	if (pp->p_prof.pr_scale) {
1570 		atomic_add_32(&lwp->lwp_oweupc, (int32_t)pending);
1571 		if (user_mode) {
1572 			poke = 1;
1573 			aston(t);
1574 		}
1575 	}
1576 
1577 	/*
1578 	 * If CPU was in user state, process lwp-virtual time
1579 	 * interval timer. The value passed to itimerdecr() has to be
1580 	 * in microseconds and has to be less than one second. Hence
1581 	 * this loop.
1582 	 */
1583 	total_usec = usec_per_tick * pending;
1584 	while (total_usec > 0) {
1585 		usec = MIN(total_usec, (MICROSEC - 1));
1586 		if (user_mode &&
1587 		    timerisset(&lwp->lwp_timer[ITIMER_VIRTUAL].it_value) &&
1588 		    itimerdecr(&lwp->lwp_timer[ITIMER_VIRTUAL], usec) == 0) {
1589 			poke = 1;
1590 			sigtoproc(pp, t, SIGVTALRM);
1591 		}
1592 		total_usec -= usec;
1593 	}
1594 
1595 	/*
1596 	 * If CPU was in user state, process lwp-profile
1597 	 * interval timer.
1598 	 */
1599 	total_usec = usec_per_tick * pending;
1600 	while (total_usec > 0) {
1601 		usec = MIN(total_usec, (MICROSEC - 1));
1602 		if (timerisset(&lwp->lwp_timer[ITIMER_PROF].it_value) &&
1603 		    itimerdecr(&lwp->lwp_timer[ITIMER_PROF], usec) == 0) {
1604 			poke = 1;
1605 			sigtoproc(pp, t, SIGPROF);
1606 		}
1607 		total_usec -= usec;
1608 	}
1609 
1610 	/*
1611 	 * Enforce CPU resource controls:
1612 	 *   (a) process.max-cpu-time resource control
1613 	 *
1614 	 * Perform the check only if we have accumulated more a second.
1615 	 */
1616 	if ((ticks + pending) >= hz) {
1617 		(void) rctl_test(rctlproc_legacy[RLIMIT_CPU], pp->p_rctls, pp,
1618 		    (pp->p_utime + pp->p_stime)/hz, RCA_UNSAFE_SIGINFO);
1619 	}
1620 
1621 	/*
1622 	 *   (b) task.max-cpu-time resource control
1623 	 *
1624 	 * If we have accumulated enough ticks, increment the task CPU
1625 	 * time usage and test for the resource limit. This minimizes the
1626 	 * number of calls to the rct_test(). The task CPU time mutex
1627 	 * is highly contentious as many processes can be sharing a task.
1628 	 */
1629 	if (pp->p_ttime >= clock_tick_proc_max) {
1630 		secs = task_cpu_time_incr(pp->p_task, pp->p_ttime);
1631 		pp->p_ttime = 0;
1632 		if (secs) {
1633 			(void) rctl_test(rc_task_cpu_time, pp->p_task->tk_rctls,
1634 			    pp, secs, RCA_UNSAFE_SIGINFO);
1635 		}
1636 	}
1637 
1638 	/*
1639 	 * Update memory usage for the currently running process.
1640 	 */
1641 	rss = rm_asrss(as);
1642 	PTOU(pp)->u_mem += rss;
1643 	if (rss > PTOU(pp)->u_mem_max)
1644 		PTOU(pp)->u_mem_max = rss;
1645 
1646 	/*
1647 	 * Notify the CPU the thread is running on.
1648 	 */
1649 	if (poke && t->t_cpu != CPU)
1650 		poke_cpu(t->t_cpu->cpu_id);
1651 }
1652 
1653 void
1654 profil_tick(uintptr_t upc)
1655 {
1656 	int ticks;
1657 	proc_t *p = ttoproc(curthread);
1658 	klwp_t *lwp = ttolwp(curthread);
1659 	struct prof *pr = &p->p_prof;
1660 
1661 	do {
1662 		ticks = lwp->lwp_oweupc;
1663 	} while (cas32(&lwp->lwp_oweupc, ticks, 0) != ticks);
1664 
1665 	mutex_enter(&p->p_pflock);
1666 	if (pr->pr_scale >= 2 && upc >= pr->pr_off) {
1667 		/*
1668 		 * Old-style profiling
1669 		 */
1670 		uint16_t *slot = pr->pr_base;
1671 		uint16_t old, new;
1672 		if (pr->pr_scale != 2) {
1673 			uintptr_t delta = upc - pr->pr_off;
1674 			uintptr_t byteoff = ((delta >> 16) * pr->pr_scale) +
1675 			    (((delta & 0xffff) * pr->pr_scale) >> 16);
1676 			if (byteoff >= (uintptr_t)pr->pr_size) {
1677 				mutex_exit(&p->p_pflock);
1678 				return;
1679 			}
1680 			slot += byteoff / sizeof (uint16_t);
1681 		}
1682 		if (fuword16(slot, &old) < 0 ||
1683 		    (new = old + ticks) > SHRT_MAX ||
1684 		    suword16(slot, new) < 0) {
1685 			pr->pr_scale = 0;
1686 		}
1687 	} else if (pr->pr_scale == 1) {
1688 		/*
1689 		 * PC Sampling
1690 		 */
1691 		model_t model = lwp_getdatamodel(lwp);
1692 		int result;
1693 #ifdef __lint
1694 		model = model;
1695 #endif
1696 		while (ticks-- > 0) {
1697 			if (pr->pr_samples == pr->pr_size) {
1698 				/* buffer full, turn off sampling */
1699 				pr->pr_scale = 0;
1700 				break;
1701 			}
1702 			switch (SIZEOF_PTR(model)) {
1703 			case sizeof (uint32_t):
1704 				result = suword32(pr->pr_base, (uint32_t)upc);
1705 				break;
1706 #ifdef _LP64
1707 			case sizeof (uint64_t):
1708 				result = suword64(pr->pr_base, (uint64_t)upc);
1709 				break;
1710 #endif
1711 			default:
1712 				cmn_err(CE_WARN, "profil_tick: unexpected "
1713 				    "data model");
1714 				result = -1;
1715 				break;
1716 			}
1717 			if (result != 0) {
1718 				pr->pr_scale = 0;
1719 				break;
1720 			}
1721 			pr->pr_base = (caddr_t)pr->pr_base + SIZEOF_PTR(model);
1722 			pr->pr_samples++;
1723 		}
1724 	}
1725 	mutex_exit(&p->p_pflock);
1726 }
1727 
1728 static void
1729 delay_wakeup(void *arg)
1730 {
1731 	kthread_t	*t = arg;
1732 
1733 	mutex_enter(&t->t_delay_lock);
1734 	cv_signal(&t->t_delay_cv);
1735 	mutex_exit(&t->t_delay_lock);
1736 }
1737 
1738 /*
1739  * The delay(9F) man page indicates that it can only be called from user or
1740  * kernel context - detect and diagnose bad calls. The following macro will
1741  * produce a limited number of messages identifying bad callers.  This is done
1742  * in a macro so that caller() is meaningful. When a bad caller is identified,
1743  * switching to 'drv_usecwait(TICK_TO_USEC(ticks));' may be appropriate.
1744  */
1745 #define	DELAY_CONTEXT_CHECK()	{					\
1746 	uint32_t	m;						\
1747 	char		*f;						\
1748 	ulong_t		off;						\
1749 									\
1750 	m = delay_from_interrupt_msg;					\
1751 	if (delay_from_interrupt_diagnose && servicing_interrupt() &&	\
1752 	    !panicstr && !devinfo_freeze &&				\
1753 	    atomic_cas_32(&delay_from_interrupt_msg, m ? m : 1, m-1)) {	\
1754 		f = modgetsymname((uintptr_t)caller(), &off);		\
1755 		cmn_err(CE_WARN, "delay(9F) called from "		\
1756 		    "interrupt context: %s`%s",				\
1757 		    mod_containing_pc(caller()), f ? f : "...");	\
1758 	}								\
1759 }
1760 
1761 /*
1762  * delay_common: common delay code.
1763  */
1764 static void
1765 delay_common(clock_t ticks)
1766 {
1767 	kthread_t	*t = curthread;
1768 	clock_t		deadline;
1769 	clock_t		timeleft;
1770 	callout_id_t	id;
1771 
1772 	/* If timeouts aren't running all we can do is spin. */
1773 	if (panicstr || devinfo_freeze) {
1774 		/* Convert delay(9F) call into drv_usecwait(9F) call. */
1775 		if (ticks > 0)
1776 			drv_usecwait(TICK_TO_USEC(ticks));
1777 		return;
1778 	}
1779 
1780 	deadline = ddi_get_lbolt() + ticks;
1781 	while ((timeleft = deadline - ddi_get_lbolt()) > 0) {
1782 		mutex_enter(&t->t_delay_lock);
1783 		id = timeout_default(delay_wakeup, t, timeleft);
1784 		cv_wait(&t->t_delay_cv, &t->t_delay_lock);
1785 		mutex_exit(&t->t_delay_lock);
1786 		(void) untimeout_default(id, 0);
1787 	}
1788 }
1789 
1790 /*
1791  * Delay specified number of clock ticks.
1792  */
1793 void
1794 delay(clock_t ticks)
1795 {
1796 	DELAY_CONTEXT_CHECK();
1797 
1798 	delay_common(ticks);
1799 }
1800 
1801 /*
1802  * Delay a random number of clock ticks between 1 and ticks.
1803  */
1804 void
1805 delay_random(clock_t ticks)
1806 {
1807 	int	r;
1808 
1809 	DELAY_CONTEXT_CHECK();
1810 
1811 	(void) random_get_pseudo_bytes((void *)&r, sizeof (r));
1812 	if (ticks == 0)
1813 		ticks = 1;
1814 	ticks = (r % ticks) + 1;
1815 	delay_common(ticks);
1816 }
1817 
1818 /*
1819  * Like delay, but interruptible by a signal.
1820  */
1821 int
1822 delay_sig(clock_t ticks)
1823 {
1824 	kthread_t	*t = curthread;
1825 	clock_t		deadline;
1826 	clock_t		rc;
1827 
1828 	/* If timeouts aren't running all we can do is spin. */
1829 	if (panicstr || devinfo_freeze) {
1830 		if (ticks > 0)
1831 			drv_usecwait(TICK_TO_USEC(ticks));
1832 		return (0);
1833 	}
1834 
1835 	deadline = ddi_get_lbolt() + ticks;
1836 	mutex_enter(&t->t_delay_lock);
1837 	do {
1838 		rc = cv_timedwait_sig(&t->t_delay_cv,
1839 		    &t->t_delay_lock, deadline);
1840 		/* loop until past deadline or signaled */
1841 	} while (rc > 0);
1842 	mutex_exit(&t->t_delay_lock);
1843 	if (rc == 0)
1844 		return (EINTR);
1845 	return (0);
1846 }
1847 
1848 
1849 #define	SECONDS_PER_DAY 86400
1850 
1851 /*
1852  * Initialize the system time based on the TOD chip.  approx is used as
1853  * an approximation of time (e.g. from the filesystem) in the event that
1854  * the TOD chip has been cleared or is unresponsive.  An approx of -1
1855  * means the filesystem doesn't keep time.
1856  */
1857 void
1858 clkset(time_t approx)
1859 {
1860 	timestruc_t ts;
1861 	int spl;
1862 	int set_clock = 0;
1863 
1864 	mutex_enter(&tod_lock);
1865 	ts = tod_get();
1866 
1867 	if (ts.tv_sec > 365 * SECONDS_PER_DAY) {
1868 		/*
1869 		 * If the TOD chip is reporting some time after 1971,
1870 		 * then it probably didn't lose power or become otherwise
1871 		 * cleared in the recent past;  check to assure that
1872 		 * the time coming from the filesystem isn't in the future
1873 		 * according to the TOD chip.
1874 		 */
1875 		if (approx != -1 && approx > ts.tv_sec) {
1876 			cmn_err(CE_WARN, "Last shutdown is later "
1877 			    "than time on time-of-day chip; check date.");
1878 		}
1879 	} else {
1880 		/*
1881 		 * If the TOD chip isn't giving correct time, set it to the
1882 		 * greater of i) approx and ii) 1987. That way if approx
1883 		 * is negative or is earlier than 1987, we set the clock
1884 		 * back to a time when Oliver North, ALF and Dire Straits
1885 		 * were all on the collective brain:  1987.
1886 		 */
1887 		timestruc_t tmp;
1888 		time_t diagnose_date = (1987 - 1970) * 365 * SECONDS_PER_DAY;
1889 		ts.tv_sec = (approx > diagnose_date ? approx : diagnose_date);
1890 		ts.tv_nsec = 0;
1891 
1892 		/*
1893 		 * Attempt to write the new time to the TOD chip.  Set spl high
1894 		 * to avoid getting preempted between the tod_set and tod_get.
1895 		 */
1896 		spl = splhi();
1897 		tod_set(ts);
1898 		tmp = tod_get();
1899 		splx(spl);
1900 
1901 		if (tmp.tv_sec != ts.tv_sec && tmp.tv_sec != ts.tv_sec + 1) {
1902 			tod_broken = 1;
1903 			dosynctodr = 0;
1904 			cmn_err(CE_WARN, "Time-of-day chip unresponsive.");
1905 		} else {
1906 			cmn_err(CE_WARN, "Time-of-day chip had "
1907 			    "incorrect date; check and reset.");
1908 		}
1909 		set_clock = 1;
1910 	}
1911 
1912 	if (!boot_time) {
1913 		boot_time = ts.tv_sec;
1914 		set_clock = 1;
1915 	}
1916 
1917 	if (set_clock)
1918 		set_hrestime(&ts);
1919 
1920 	mutex_exit(&tod_lock);
1921 }
1922 
1923 int	timechanged;	/* for testing if the system time has been reset */
1924 
1925 void
1926 set_hrestime(timestruc_t *ts)
1927 {
1928 	int spl = hr_clock_lock();
1929 	hrestime = *ts;
1930 	membar_enter();	/* hrestime must be visible before timechanged++ */
1931 	timedelta = 0;
1932 	timechanged++;
1933 	hr_clock_unlock(spl);
1934 	callout_hrestime();
1935 }
1936 
1937 static uint_t deadman_seconds;
1938 static uint32_t deadman_panics;
1939 static int deadman_enabled = 0;
1940 static int deadman_panic_timers = 1;
1941 
1942 static void
1943 deadman(void)
1944 {
1945 	if (panicstr) {
1946 		/*
1947 		 * During panic, other CPUs besides the panic
1948 		 * master continue to handle cyclics and some other
1949 		 * interrupts.  The code below is intended to be
1950 		 * single threaded, so any CPU other than the master
1951 		 * must keep out.
1952 		 */
1953 		if (CPU->cpu_id != panic_cpu.cpu_id)
1954 			return;
1955 
1956 		if (!deadman_panic_timers)
1957 			return; /* allow all timers to be manually disabled */
1958 
1959 		/*
1960 		 * If we are generating a crash dump or syncing filesystems and
1961 		 * the corresponding timer is set, decrement it and re-enter
1962 		 * the panic code to abort it and advance to the next state.
1963 		 * The panic states and triggers are explained in panic.c.
1964 		 */
1965 		if (panic_dump) {
1966 			if (dump_timeleft && (--dump_timeleft == 0)) {
1967 				panic("panic dump timeout");
1968 				/*NOTREACHED*/
1969 			}
1970 		} else if (panic_sync) {
1971 			if (sync_timeleft && (--sync_timeleft == 0)) {
1972 				panic("panic sync timeout");
1973 				/*NOTREACHED*/
1974 			}
1975 		}
1976 
1977 		return;
1978 	}
1979 
1980 	if (deadman_counter != CPU->cpu_deadman_counter) {
1981 		CPU->cpu_deadman_counter = deadman_counter;
1982 		CPU->cpu_deadman_countdown = deadman_seconds;
1983 		return;
1984 	}
1985 
1986 	if (--CPU->cpu_deadman_countdown > 0)
1987 		return;
1988 
1989 	/*
1990 	 * Regardless of whether or not we actually bring the system down,
1991 	 * bump the deadman_panics variable.
1992 	 *
1993 	 * N.B. deadman_panics is incremented once for each CPU that
1994 	 * passes through here.  It's expected that all the CPUs will
1995 	 * detect this condition within one second of each other, so
1996 	 * when deadman_enabled is off, deadman_panics will
1997 	 * typically be a multiple of the total number of CPUs in
1998 	 * the system.
1999 	 */
2000 	atomic_add_32(&deadman_panics, 1);
2001 
2002 	if (!deadman_enabled) {
2003 		CPU->cpu_deadman_countdown = deadman_seconds;
2004 		return;
2005 	}
2006 
2007 	/*
2008 	 * If we're here, we want to bring the system down.
2009 	 */
2010 	panic("deadman: timed out after %d seconds of clock "
2011 	    "inactivity", deadman_seconds);
2012 	/*NOTREACHED*/
2013 }
2014 
2015 /*ARGSUSED*/
2016 static void
2017 deadman_online(void *arg, cpu_t *cpu, cyc_handler_t *hdlr, cyc_time_t *when)
2018 {
2019 	cpu->cpu_deadman_counter = 0;
2020 	cpu->cpu_deadman_countdown = deadman_seconds;
2021 
2022 	hdlr->cyh_func = (cyc_func_t)deadman;
2023 	hdlr->cyh_level = CY_HIGH_LEVEL;
2024 	hdlr->cyh_arg = NULL;
2025 
2026 	/*
2027 	 * Stagger the CPUs so that they don't all run deadman() at
2028 	 * the same time.  Simplest reason to do this is to make it
2029 	 * more likely that only one CPU will panic in case of a
2030 	 * timeout.  This is (strictly speaking) an aesthetic, not a
2031 	 * technical consideration.
2032 	 */
2033 	when->cyt_when = cpu->cpu_id * (NANOSEC / NCPU);
2034 	when->cyt_interval = NANOSEC;
2035 }
2036 
2037 
2038 void
2039 deadman_init(void)
2040 {
2041 	cyc_omni_handler_t hdlr;
2042 
2043 	if (deadman_seconds == 0)
2044 		deadman_seconds = snoop_interval / MICROSEC;
2045 
2046 	if (snooping)
2047 		deadman_enabled = 1;
2048 
2049 	hdlr.cyo_online = deadman_online;
2050 	hdlr.cyo_offline = NULL;
2051 	hdlr.cyo_arg = NULL;
2052 
2053 	mutex_enter(&cpu_lock);
2054 	deadman_cyclic = cyclic_add_omni(&hdlr);
2055 	mutex_exit(&cpu_lock);
2056 }
2057 
2058 /*
2059  * tod_fault() is for updating tod validate mechanism state:
2060  * (1) TOD_NOFAULT: for resetting the state to 'normal'.
2061  *     currently used for debugging only
2062  * (2) The following four cases detected by tod validate mechanism:
2063  *       TOD_REVERSED: current tod value is less than previous value.
2064  *       TOD_STALLED: current tod value hasn't advanced.
2065  *       TOD_JUMPED: current tod value advanced too far from previous value.
2066  *       TOD_RATECHANGED: the ratio between average tod delta and
2067  *       average tick delta has changed.
2068  * (3) TOD_RDONLY: when the TOD clock is not writeable e.g. because it is
2069  *     a virtual TOD provided by a hypervisor.
2070  */
2071 enum tod_fault_type
2072 tod_fault(enum tod_fault_type ftype, int off)
2073 {
2074 	ASSERT(MUTEX_HELD(&tod_lock));
2075 
2076 	if (tod_faulted != ftype) {
2077 		switch (ftype) {
2078 		case TOD_NOFAULT:
2079 			plat_tod_fault(TOD_NOFAULT);
2080 			cmn_err(CE_NOTE, "Restarted tracking "
2081 			    "Time of Day clock.");
2082 			tod_faulted = ftype;
2083 			break;
2084 		case TOD_REVERSED:
2085 		case TOD_JUMPED:
2086 			if (tod_faulted == TOD_NOFAULT) {
2087 				plat_tod_fault(ftype);
2088 				cmn_err(CE_WARN, "Time of Day clock error: "
2089 				    "reason [%s by 0x%x]. -- "
2090 				    " Stopped tracking Time Of Day clock.",
2091 				    tod_fault_table[ftype], off);
2092 				tod_faulted = ftype;
2093 			}
2094 			break;
2095 		case TOD_STALLED:
2096 		case TOD_RATECHANGED:
2097 			if (tod_faulted == TOD_NOFAULT) {
2098 				plat_tod_fault(ftype);
2099 				cmn_err(CE_WARN, "Time of Day clock error: "
2100 				    "reason [%s]. -- "
2101 				    " Stopped tracking Time Of Day clock.",
2102 				    tod_fault_table[ftype]);
2103 				tod_faulted = ftype;
2104 			}
2105 			break;
2106 		case TOD_RDONLY:
2107 			if (tod_faulted == TOD_NOFAULT) {
2108 				plat_tod_fault(ftype);
2109 				cmn_err(CE_NOTE, "!Time of Day clock is "
2110 				    "Read-Only; set of Date/Time will not "
2111 				    "persist across reboot.");
2112 				tod_faulted = ftype;
2113 			}
2114 			break;
2115 		default:
2116 			break;
2117 		}
2118 	}
2119 	return (tod_faulted);
2120 }
2121 
2122 /*
2123  * Two functions that allow tod_status_flag to be manipulated by functions
2124  * external to this file.
2125  */
2126 
2127 void
2128 tod_status_set(int tod_flag)
2129 {
2130 	tod_status_flag |= tod_flag;
2131 }
2132 
2133 void
2134 tod_status_clear(int tod_flag)
2135 {
2136 	tod_status_flag &= ~tod_flag;
2137 }
2138 
2139 /*
2140  * Record a timestamp and the value passed to tod_set().  The next call to
2141  * tod_validate() can use these values, prev_set_tick and prev_set_tod,
2142  * when checking the timestruc_t returned by tod_get().  Ordinarily,
2143  * tod_validate() will use prev_tick and prev_tod for this task but these
2144  * become obsolete, and will be re-assigned with the prev_set_* values,
2145  * in the case when the TOD is re-written.
2146  */
2147 void
2148 tod_set_prev(timestruc_t ts)
2149 {
2150 	if ((tod_validate_enable == 0) || (tod_faulted != TOD_NOFAULT) ||
2151 	    tod_validate_deferred) {
2152 		return;
2153 	}
2154 	prev_set_tick = gethrtime();
2155 	/*
2156 	 * A negative value will be set to zero in utc_to_tod() so we fake
2157 	 * a zero here in such a case.  This would need to change if the
2158 	 * behavior of utc_to_tod() changes.
2159 	 */
2160 	prev_set_tod = ts.tv_sec < 0 ? 0 : ts.tv_sec;
2161 }
2162 
2163 /*
2164  * tod_validate() is used for checking values returned by tod_get().
2165  * Four error cases can be detected by this routine:
2166  *   TOD_REVERSED: current tod value is less than previous.
2167  *   TOD_STALLED: current tod value hasn't advanced.
2168  *   TOD_JUMPED: current tod value advanced too far from previous value.
2169  *   TOD_RATECHANGED: the ratio between average tod delta and
2170  *   average tick delta has changed.
2171  */
2172 time_t
2173 tod_validate(time_t tod)
2174 {
2175 	time_t diff_tod;
2176 	hrtime_t diff_tick;
2177 
2178 	long dtick;
2179 	int dtick_delta;
2180 
2181 	int off = 0;
2182 	enum tod_fault_type tod_bad = TOD_NOFAULT;
2183 
2184 	static int firsttime = 1;
2185 
2186 	static time_t prev_tod = 0;
2187 	static hrtime_t prev_tick = 0;
2188 	static long dtick_avg = TOD_REF_FREQ;
2189 
2190 	int cpr_resume_done = 0;
2191 	int dr_resume_done = 0;
2192 
2193 	hrtime_t tick = gethrtime();
2194 
2195 	ASSERT(MUTEX_HELD(&tod_lock));
2196 
2197 	/*
2198 	 * tod_validate_enable is patchable via /etc/system.
2199 	 * If TOD is already faulted, or if TOD validation is deferred,
2200 	 * there is nothing to do.
2201 	 */
2202 	if ((tod_validate_enable == 0) || (tod_faulted != TOD_NOFAULT) ||
2203 	    tod_validate_deferred) {
2204 		return (tod);
2205 	}
2206 
2207 	/*
2208 	 * If this is the first time through, we just need to save the tod
2209 	 * we were called with and hrtime so we can use them next time to
2210 	 * validate tod_get().
2211 	 */
2212 	if (firsttime) {
2213 		firsttime = 0;
2214 		prev_tod = tod;
2215 		prev_tick = tick;
2216 		return (tod);
2217 	}
2218 
2219 	/*
2220 	 * Handle any flags that have been turned on by tod_status_set().
2221 	 * In the case where a tod_set() is done and then a subsequent
2222 	 * tod_get() fails (ie, both TOD_SET_DONE and TOD_GET_FAILED are
2223 	 * true), we treat the TOD_GET_FAILED with precedence by switching
2224 	 * off the flag, returning tod and leaving TOD_SET_DONE asserted
2225 	 * until such time as tod_get() completes successfully.
2226 	 */
2227 	if (tod_status_flag & TOD_GET_FAILED) {
2228 		/*
2229 		 * tod_get() has encountered an issue, possibly transitory,
2230 		 * when reading TOD.  We'll just return the incoming tod
2231 		 * value (which is actually hrestime.tv_sec in this case)
2232 		 * and when we get a genuine tod, following a successful
2233 		 * tod_get(), we can validate using prev_tod and prev_tick.
2234 		 */
2235 		tod_status_flag &= ~TOD_GET_FAILED;
2236 		return (tod);
2237 	} else if (tod_status_flag & TOD_SET_DONE) {
2238 		/*
2239 		 * TOD has been modified.  Just before the TOD was written,
2240 		 * tod_set_prev() saved tod and hrtime; we can now use
2241 		 * those values, prev_set_tod and prev_set_tick, to validate
2242 		 * the incoming tod that's just been read.
2243 		 */
2244 		prev_tod = prev_set_tod;
2245 		prev_tick = prev_set_tick;
2246 		dtick_avg = TOD_REF_FREQ;
2247 		tod_status_flag &= ~TOD_SET_DONE;
2248 		/*
2249 		 * If a tod_set() preceded a cpr_suspend() without an
2250 		 * intervening tod_validate(), we need to ensure that a
2251 		 * TOD_JUMPED condition is ignored.
2252 		 * Note this isn't a concern in the case of DR as we've
2253 		 * just reassigned dtick_avg, above.
2254 		 */
2255 		if (tod_status_flag & TOD_CPR_RESUME_DONE) {
2256 			cpr_resume_done = 1;
2257 			tod_status_flag &= ~TOD_CPR_RESUME_DONE;
2258 		}
2259 	} else if (tod_status_flag & TOD_CPR_RESUME_DONE) {
2260 		/*
2261 		 * The system's coming back from a checkpoint resume.
2262 		 */
2263 		cpr_resume_done = 1;
2264 		tod_status_flag &= ~TOD_CPR_RESUME_DONE;
2265 		/*
2266 		 * We need to handle the possibility of a CPR suspend
2267 		 * operation having been initiated whilst a DR event was
2268 		 * in-flight.
2269 		 */
2270 		if (tod_status_flag & TOD_DR_RESUME_DONE) {
2271 			dr_resume_done = 1;
2272 			tod_status_flag &= ~TOD_DR_RESUME_DONE;
2273 		}
2274 	} else if (tod_status_flag & TOD_DR_RESUME_DONE) {
2275 		/*
2276 		 * A Dynamic Reconfiguration event has taken place.
2277 		 */
2278 		dr_resume_done = 1;
2279 		tod_status_flag &= ~TOD_DR_RESUME_DONE;
2280 	}
2281 
2282 	/* test hook */
2283 	switch (tod_unit_test) {
2284 	case 1: /* for testing jumping tod */
2285 		tod += tod_test_injector;
2286 		tod_unit_test = 0;
2287 		break;
2288 	case 2:	/* for testing stuck tod bit */
2289 		tod |= 1 << tod_test_injector;
2290 		tod_unit_test = 0;
2291 		break;
2292 	case 3:	/* for testing stalled tod */
2293 		tod = prev_tod;
2294 		tod_unit_test = 0;
2295 		break;
2296 	case 4:	/* reset tod fault status */
2297 		(void) tod_fault(TOD_NOFAULT, 0);
2298 		tod_unit_test = 0;
2299 		break;
2300 	default:
2301 		break;
2302 	}
2303 
2304 	diff_tod = tod - prev_tod;
2305 	diff_tick = tick - prev_tick;
2306 
2307 	ASSERT(diff_tick >= 0);
2308 
2309 	if (diff_tod < 0) {
2310 		/* ERROR - tod reversed */
2311 		tod_bad = TOD_REVERSED;
2312 		off = (int)(prev_tod - tod);
2313 	} else if (diff_tod == 0) {
2314 		/* tod did not advance */
2315 		if (diff_tick > TOD_STALL_THRESHOLD) {
2316 			/* ERROR - tod stalled */
2317 			tod_bad = TOD_STALLED;
2318 		} else {
2319 			/*
2320 			 * Make sure we don't update prev_tick
2321 			 * so that diff_tick is calculated since
2322 			 * the first diff_tod == 0
2323 			 */
2324 			return (tod);
2325 		}
2326 	} else {
2327 		/* calculate dtick */
2328 		dtick = diff_tick / diff_tod;
2329 
2330 		/* update dtick averages */
2331 		dtick_avg += ((dtick - dtick_avg) / TOD_FILTER_N);
2332 
2333 		/*
2334 		 * Calculate dtick_delta as
2335 		 * variation from reference freq in quartiles
2336 		 */
2337 		dtick_delta = (dtick_avg - TOD_REF_FREQ) /
2338 		    (TOD_REF_FREQ >> 2);
2339 
2340 		/*
2341 		 * Even with a perfectly functioning TOD device,
2342 		 * when the number of elapsed seconds is low the
2343 		 * algorithm can calculate a rate that is beyond
2344 		 * tolerance, causing an error.  The algorithm is
2345 		 * inaccurate when elapsed time is low (less than
2346 		 * 5 seconds).
2347 		 */
2348 		if (diff_tod > 4) {
2349 			if (dtick < TOD_JUMP_THRESHOLD) {
2350 				/*
2351 				 * If we've just done a CPR resume, we detect
2352 				 * a jump in the TOD but, actually, what's
2353 				 * happened is that the TOD has been increasing
2354 				 * whilst the system was suspended and the tick
2355 				 * count hasn't kept up.  We consider the first
2356 				 * occurrence of this after a resume as normal
2357 				 * and ignore it; otherwise, in a non-resume
2358 				 * case, we regard it as a TOD problem.
2359 				 */
2360 				if (!cpr_resume_done) {
2361 					/* ERROR - tod jumped */
2362 					tod_bad = TOD_JUMPED;
2363 					off = (int)diff_tod;
2364 				}
2365 			}
2366 			if (dtick_delta) {
2367 				/*
2368 				 * If we've just done a DR resume, dtick_avg
2369 				 * can go a bit askew so we reset it and carry
2370 				 * on; otherwise, the TOD is in error.
2371 				 */
2372 				if (dr_resume_done) {
2373 					dtick_avg = TOD_REF_FREQ;
2374 				} else {
2375 					/* ERROR - change in clock rate */
2376 					tod_bad = TOD_RATECHANGED;
2377 				}
2378 			}
2379 		}
2380 	}
2381 
2382 	if (tod_bad != TOD_NOFAULT) {
2383 		(void) tod_fault(tod_bad, off);
2384 
2385 		/*
2386 		 * Disable dosynctodr since we are going to fault
2387 		 * the TOD chip anyway here
2388 		 */
2389 		dosynctodr = 0;
2390 
2391 		/*
2392 		 * Set tod to the correct value from hrestime
2393 		 */
2394 		tod = hrestime.tv_sec;
2395 	}
2396 
2397 	prev_tod = tod;
2398 	prev_tick = tick;
2399 	return (tod);
2400 }
2401 
2402 static void
2403 calcloadavg(int nrun, uint64_t *hp_ave)
2404 {
2405 	static int64_t f[3] = { 135, 27, 9 };
2406 	uint_t i;
2407 	int64_t q, r;
2408 
2409 	/*
2410 	 * Compute load average over the last 1, 5, and 15 minutes
2411 	 * (60, 300, and 900 seconds).  The constants in f[3] are for
2412 	 * exponential decay:
2413 	 * (1 - exp(-1/60)) << 13 = 135,
2414 	 * (1 - exp(-1/300)) << 13 = 27,
2415 	 * (1 - exp(-1/900)) << 13 = 9.
2416 	 */
2417 
2418 	/*
2419 	 * a little hoop-jumping to avoid integer overflow
2420 	 */
2421 	for (i = 0; i < 3; i++) {
2422 		q = (hp_ave[i]  >> 16) << 7;
2423 		r = (hp_ave[i]  & 0xffff) << 7;
2424 		hp_ave[i] += ((nrun - q) * f[i] - ((r * f[i]) >> 16)) >> 4;
2425 	}
2426 }
2427 
2428 /*
2429  * lbolt_hybrid() is used by ddi_get_lbolt() and ddi_get_lbolt64() to
2430  * calculate the value of lbolt according to the current mode. In the event
2431  * driven mode (the default), lbolt is calculated by dividing the current hires
2432  * time by the number of nanoseconds per clock tick. In the cyclic driven mode
2433  * an internal variable is incremented at each firing of the lbolt cyclic
2434  * and returned by lbolt_cyclic_driven().
2435  *
2436  * The system will transition from event to cyclic driven mode when the number
2437  * of calls to lbolt_event_driven() exceeds the (per CPU) threshold within a
2438  * window of time. It does so by reprograming lbolt_cyclic from CY_INFINITY to
2439  * nsec_per_tick. The lbolt cyclic will remain ON while at least one CPU is
2440  * causing enough activity to cross the thresholds.
2441  */
2442 int64_t
2443 lbolt_bootstrap(void)
2444 {
2445 	return (0);
2446 }
2447 
2448 /* ARGSUSED */
2449 uint_t
2450 lbolt_ev_to_cyclic(caddr_t arg1, caddr_t arg2)
2451 {
2452 	hrtime_t ts, exp;
2453 	int ret;
2454 
2455 	ASSERT(lbolt_hybrid != lbolt_cyclic_driven);
2456 
2457 	kpreempt_disable();
2458 
2459 	ts = gethrtime();
2460 	lb_info->lbi_internal = (ts/nsec_per_tick);
2461 
2462 	/*
2463 	 * Align the next expiration to a clock tick boundary.
2464 	 */
2465 	exp = ts + nsec_per_tick - 1;
2466 	exp = (exp/nsec_per_tick) * nsec_per_tick;
2467 
2468 	ret = cyclic_reprogram(lb_info->id.lbi_cyclic_id, exp);
2469 	ASSERT(ret);
2470 
2471 	lbolt_hybrid = lbolt_cyclic_driven;
2472 	lb_info->lbi_cyc_deactivate = B_FALSE;
2473 	lb_info->lbi_cyc_deac_start = lb_info->lbi_internal;
2474 
2475 	kpreempt_enable();
2476 
2477 	ret = atomic_dec_32_nv(&lb_info->lbi_token);
2478 	ASSERT(ret == 0);
2479 
2480 	return (1);
2481 }
2482 
2483 int64_t
2484 lbolt_event_driven(void)
2485 {
2486 	hrtime_t ts;
2487 	int64_t lb;
2488 	int ret, cpu = CPU->cpu_seqid;
2489 
2490 	ts = gethrtime();
2491 	ASSERT(ts > 0);
2492 
2493 	ASSERT(nsec_per_tick > 0);
2494 	lb = (ts/nsec_per_tick);
2495 
2496 	/*
2497 	 * Switch to cyclic mode if the number of calls to this routine
2498 	 * has reached the threshold within the interval.
2499 	 */
2500 	if ((lb - lb_cpu[cpu].lbc_cnt_start) < lb_info->lbi_thresh_interval) {
2501 
2502 		if (--lb_cpu[cpu].lbc_counter == 0) {
2503 			/*
2504 			 * Reached the threshold within the interval, reset
2505 			 * the usage statistics.
2506 			 */
2507 			lb_cpu[cpu].lbc_counter = lb_info->lbi_thresh_calls;
2508 			lb_cpu[cpu].lbc_cnt_start = lb;
2509 
2510 			/*
2511 			 * Make sure only one thread reprograms the
2512 			 * lbolt cyclic and changes the mode.
2513 			 */
2514 			if (panicstr == NULL &&
2515 			    atomic_cas_32(&lb_info->lbi_token, 0, 1) == 0) {
2516 
2517 				if (lbolt_hybrid == lbolt_cyclic_driven) {
2518 					ret = atomic_dec_32_nv(
2519 					    &lb_info->lbi_token);
2520 					ASSERT(ret == 0);
2521 				} else {
2522 					lbolt_softint_post();
2523 				}
2524 			}
2525 		}
2526 	} else {
2527 		/*
2528 		 * Exceeded the interval, reset the usage statistics.
2529 		 */
2530 		lb_cpu[cpu].lbc_counter = lb_info->lbi_thresh_calls;
2531 		lb_cpu[cpu].lbc_cnt_start = lb;
2532 	}
2533 
2534 	ASSERT(lb >= lb_info->lbi_debug_time);
2535 
2536 	return (lb - lb_info->lbi_debug_time);
2537 }
2538 
2539 int64_t
2540 lbolt_cyclic_driven(void)
2541 {
2542 	int64_t lb = lb_info->lbi_internal;
2543 	int cpu;
2544 
2545 	/*
2546 	 * If a CPU has already prevented the lbolt cyclic from deactivating
2547 	 * itself, don't bother tracking the usage. Otherwise check if we're
2548 	 * within the interval and how the per CPU counter is doing.
2549 	 */
2550 	if (lb_info->lbi_cyc_deactivate) {
2551 		cpu = CPU->cpu_seqid;
2552 		if ((lb - lb_cpu[cpu].lbc_cnt_start) <
2553 		    lb_info->lbi_thresh_interval) {
2554 
2555 			if (lb_cpu[cpu].lbc_counter == 0)
2556 				/*
2557 				 * Reached the threshold within the interval,
2558 				 * prevent the lbolt cyclic from turning itself
2559 				 * off.
2560 				 */
2561 				lb_info->lbi_cyc_deactivate = B_FALSE;
2562 			else
2563 				lb_cpu[cpu].lbc_counter--;
2564 		} else {
2565 			/*
2566 			 * Only reset the usage statistics when we have
2567 			 * exceeded the interval.
2568 			 */
2569 			lb_cpu[cpu].lbc_counter = lb_info->lbi_thresh_calls;
2570 			lb_cpu[cpu].lbc_cnt_start = lb;
2571 		}
2572 	}
2573 
2574 	ASSERT(lb >= lb_info->lbi_debug_time);
2575 
2576 	return (lb - lb_info->lbi_debug_time);
2577 }
2578 
2579 /*
2580  * The lbolt_cyclic() routine will fire at a nsec_per_tick interval to satisfy
2581  * performance needs of ddi_get_lbolt() and ddi_get_lbolt64() consumers.
2582  * It is inactive by default, and will be activated when switching from event
2583  * to cyclic driven lbolt. The cyclic will turn itself off unless signaled
2584  * by lbolt_cyclic_driven().
2585  */
2586 static void
2587 lbolt_cyclic(void)
2588 {
2589 	int ret;
2590 
2591 	lb_info->lbi_internal++;
2592 
2593 	if (!lbolt_cyc_only) {
2594 
2595 		if (lb_info->lbi_cyc_deactivate) {
2596 			/*
2597 			 * Switching from cyclic to event driven mode.
2598 			 */
2599 			if (panicstr == NULL &&
2600 			    atomic_cas_32(&lb_info->lbi_token, 0, 1) == 0) {
2601 
2602 				if (lbolt_hybrid == lbolt_event_driven) {
2603 					ret = atomic_dec_32_nv(
2604 					    &lb_info->lbi_token);
2605 					ASSERT(ret == 0);
2606 					return;
2607 				}
2608 
2609 				kpreempt_disable();
2610 
2611 				lbolt_hybrid = lbolt_event_driven;
2612 				ret = cyclic_reprogram(
2613 				    lb_info->id.lbi_cyclic_id,
2614 				    CY_INFINITY);
2615 				ASSERT(ret);
2616 
2617 				kpreempt_enable();
2618 
2619 				ret = atomic_dec_32_nv(&lb_info->lbi_token);
2620 				ASSERT(ret == 0);
2621 			}
2622 		}
2623 
2624 		/*
2625 		 * The lbolt cyclic should not try to deactivate itself before
2626 		 * the sampling period has elapsed.
2627 		 */
2628 		if (lb_info->lbi_internal - lb_info->lbi_cyc_deac_start >=
2629 		    lb_info->lbi_thresh_interval) {
2630 			lb_info->lbi_cyc_deactivate = B_TRUE;
2631 			lb_info->lbi_cyc_deac_start = lb_info->lbi_internal;
2632 		}
2633 	}
2634 }
2635 
2636 /*
2637  * Since the lbolt service was historically cyclic driven, it must be 'stopped'
2638  * when the system drops into the kernel debugger. lbolt_debug_entry() is
2639  * called by the KDI system claim callbacks to record a hires timestamp at
2640  * debug enter time. lbolt_debug_return() is called by the sistem release
2641  * callbacks to account for the time spent in the debugger. The value is then
2642  * accumulated in the lb_info structure and used by lbolt_event_driven() and
2643  * lbolt_cyclic_driven(), as well as the mdb_get_lbolt() routine.
2644  */
2645 void
2646 lbolt_debug_entry(void)
2647 {
2648 	if (lbolt_hybrid != lbolt_bootstrap) {
2649 		ASSERT(lb_info != NULL);
2650 		lb_info->lbi_debug_ts = gethrtime();
2651 	}
2652 }
2653 
2654 /*
2655  * Calculate the time spent in the debugger and add it to the lbolt info
2656  * structure. We also update the internal lbolt value in case we were in
2657  * cyclic driven mode going in.
2658  */
2659 void
2660 lbolt_debug_return(void)
2661 {
2662 	hrtime_t ts;
2663 
2664 	if (lbolt_hybrid != lbolt_bootstrap) {
2665 		ASSERT(lb_info != NULL);
2666 		ASSERT(nsec_per_tick > 0);
2667 
2668 		ts = gethrtime();
2669 		lb_info->lbi_internal = (ts/nsec_per_tick);
2670 		lb_info->lbi_debug_time +=
2671 		    ((ts - lb_info->lbi_debug_ts)/nsec_per_tick);
2672 
2673 		lb_info->lbi_debug_ts = 0;
2674 	}
2675 }
2676