xref: /titanic_41/usr/src/uts/common/os/clock.c (revision ed5289f91b9bf164dccd6c75398362be77a4478d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
22 /*	  All Rights Reserved	*/
23 
24 
25 /*
26  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
27  * Use is subject to license terms.
28  */
29 
30 
31 #include <sys/param.h>
32 #include <sys/t_lock.h>
33 #include <sys/types.h>
34 #include <sys/tuneable.h>
35 #include <sys/sysmacros.h>
36 #include <sys/systm.h>
37 #include <sys/cpuvar.h>
38 #include <sys/lgrp.h>
39 #include <sys/user.h>
40 #include <sys/proc.h>
41 #include <sys/callo.h>
42 #include <sys/kmem.h>
43 #include <sys/var.h>
44 #include <sys/cmn_err.h>
45 #include <sys/swap.h>
46 #include <sys/vmsystm.h>
47 #include <sys/class.h>
48 #include <sys/time.h>
49 #include <sys/debug.h>
50 #include <sys/vtrace.h>
51 #include <sys/spl.h>
52 #include <sys/atomic.h>
53 #include <sys/dumphdr.h>
54 #include <sys/archsystm.h>
55 #include <sys/fs/swapnode.h>
56 #include <sys/panic.h>
57 #include <sys/disp.h>
58 #include <sys/msacct.h>
59 #include <sys/mem_cage.h>
60 
61 #include <vm/page.h>
62 #include <vm/anon.h>
63 #include <vm/rm.h>
64 #include <sys/cyclic.h>
65 #include <sys/cpupart.h>
66 #include <sys/rctl.h>
67 #include <sys/task.h>
68 #include <sys/sdt.h>
69 #include <sys/ddi_timer.h>
70 
71 /*
72  * for NTP support
73  */
74 #include <sys/timex.h>
75 #include <sys/inttypes.h>
76 
77 /*
78  * clock() is called straight from the clock cyclic; see clock_init().
79  *
80  * Functions:
81  *	reprime clock
82  *	schedule callouts
83  *	maintain date
84  *	jab the scheduler
85  */
86 
87 extern kcondvar_t	fsflush_cv;
88 extern sysinfo_t	sysinfo;
89 extern vminfo_t	vminfo;
90 extern int	idleswtch;	/* flag set while idle in pswtch() */
91 
92 /*
93  * high-precision avenrun values.  These are needed to make the
94  * regular avenrun values accurate.
95  */
96 static uint64_t hp_avenrun[3];
97 int	avenrun[3];		/* FSCALED average run queue lengths */
98 time_t	time;	/* time in seconds since 1970 - for compatibility only */
99 
100 static struct loadavg_s loadavg;
101 /*
102  * Phase/frequency-lock loop (PLL/FLL) definitions
103  *
104  * The following variables are read and set by the ntp_adjtime() system
105  * call.
106  *
107  * time_state shows the state of the system clock, with values defined
108  * in the timex.h header file.
109  *
110  * time_status shows the status of the system clock, with bits defined
111  * in the timex.h header file.
112  *
113  * time_offset is used by the PLL/FLL to adjust the system time in small
114  * increments.
115  *
116  * time_constant determines the bandwidth or "stiffness" of the PLL.
117  *
118  * time_tolerance determines maximum frequency error or tolerance of the
119  * CPU clock oscillator and is a property of the architecture; however,
120  * in principle it could change as result of the presence of external
121  * discipline signals, for instance.
122  *
123  * time_precision is usually equal to the kernel tick variable; however,
124  * in cases where a precision clock counter or external clock is
125  * available, the resolution can be much less than this and depend on
126  * whether the external clock is working or not.
127  *
128  * time_maxerror is initialized by a ntp_adjtime() call and increased by
129  * the kernel once each second to reflect the maximum error bound
130  * growth.
131  *
132  * time_esterror is set and read by the ntp_adjtime() call, but
133  * otherwise not used by the kernel.
134  */
135 int32_t time_state = TIME_OK;	/* clock state */
136 int32_t time_status = STA_UNSYNC;	/* clock status bits */
137 int32_t time_offset = 0;		/* time offset (us) */
138 int32_t time_constant = 0;		/* pll time constant */
139 int32_t time_tolerance = MAXFREQ;	/* frequency tolerance (scaled ppm) */
140 int32_t time_precision = 1;	/* clock precision (us) */
141 int32_t time_maxerror = MAXPHASE;	/* maximum error (us) */
142 int32_t time_esterror = MAXPHASE;	/* estimated error (us) */
143 
144 /*
145  * The following variables establish the state of the PLL/FLL and the
146  * residual time and frequency offset of the local clock. The scale
147  * factors are defined in the timex.h header file.
148  *
149  * time_phase and time_freq are the phase increment and the frequency
150  * increment, respectively, of the kernel time variable.
151  *
152  * time_freq is set via ntp_adjtime() from a value stored in a file when
153  * the synchronization daemon is first started. Its value is retrieved
154  * via ntp_adjtime() and written to the file about once per hour by the
155  * daemon.
156  *
157  * time_adj is the adjustment added to the value of tick at each timer
158  * interrupt and is recomputed from time_phase and time_freq at each
159  * seconds rollover.
160  *
161  * time_reftime is the second's portion of the system time at the last
162  * call to ntp_adjtime(). It is used to adjust the time_freq variable
163  * and to increase the time_maxerror as the time since last update
164  * increases.
165  */
166 int32_t time_phase = 0;		/* phase offset (scaled us) */
167 int32_t time_freq = 0;		/* frequency offset (scaled ppm) */
168 int32_t time_adj = 0;		/* tick adjust (scaled 1 / hz) */
169 int32_t time_reftime = 0;		/* time at last adjustment (s) */
170 
171 /*
172  * The scale factors of the following variables are defined in the
173  * timex.h header file.
174  *
175  * pps_time contains the time at each calibration interval, as read by
176  * microtime(). pps_count counts the seconds of the calibration
177  * interval, the duration of which is nominally pps_shift in powers of
178  * two.
179  *
180  * pps_offset is the time offset produced by the time median filter
181  * pps_tf[], while pps_jitter is the dispersion (jitter) measured by
182  * this filter.
183  *
184  * pps_freq is the frequency offset produced by the frequency median
185  * filter pps_ff[], while pps_stabil is the dispersion (wander) measured
186  * by this filter.
187  *
188  * pps_usec is latched from a high resolution counter or external clock
189  * at pps_time. Here we want the hardware counter contents only, not the
190  * contents plus the time_tv.usec as usual.
191  *
192  * pps_valid counts the number of seconds since the last PPS update. It
193  * is used as a watchdog timer to disable the PPS discipline should the
194  * PPS signal be lost.
195  *
196  * pps_glitch counts the number of seconds since the beginning of an
197  * offset burst more than tick/2 from current nominal offset. It is used
198  * mainly to suppress error bursts due to priority conflicts between the
199  * PPS interrupt and timer interrupt.
200  *
201  * pps_intcnt counts the calibration intervals for use in the interval-
202  * adaptation algorithm. It's just too complicated for words.
203  */
204 struct timeval pps_time;	/* kernel time at last interval */
205 int32_t pps_tf[] = {0, 0, 0};	/* pps time offset median filter (us) */
206 int32_t pps_offset = 0;		/* pps time offset (us) */
207 int32_t pps_jitter = MAXTIME;	/* time dispersion (jitter) (us) */
208 int32_t pps_ff[] = {0, 0, 0};	/* pps frequency offset median filter */
209 int32_t pps_freq = 0;		/* frequency offset (scaled ppm) */
210 int32_t pps_stabil = MAXFREQ;	/* frequency dispersion (scaled ppm) */
211 int32_t pps_usec = 0;		/* microsec counter at last interval */
212 int32_t pps_valid = PPS_VALID;	/* pps signal watchdog counter */
213 int32_t pps_glitch = 0;		/* pps signal glitch counter */
214 int32_t pps_count = 0;		/* calibration interval counter (s) */
215 int32_t pps_shift = PPS_SHIFT;	/* interval duration (s) (shift) */
216 int32_t pps_intcnt = 0;		/* intervals at current duration */
217 
218 /*
219  * PPS signal quality monitors
220  *
221  * pps_jitcnt counts the seconds that have been discarded because the
222  * jitter measured by the time median filter exceeds the limit MAXTIME
223  * (100 us).
224  *
225  * pps_calcnt counts the frequency calibration intervals, which are
226  * variable from 4 s to 256 s.
227  *
228  * pps_errcnt counts the calibration intervals which have been discarded
229  * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
230  * calibration interval jitter exceeds two ticks.
231  *
232  * pps_stbcnt counts the calibration intervals that have been discarded
233  * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
234  */
235 int32_t pps_jitcnt = 0;		/* jitter limit exceeded */
236 int32_t pps_calcnt = 0;		/* calibration intervals */
237 int32_t pps_errcnt = 0;		/* calibration errors */
238 int32_t pps_stbcnt = 0;		/* stability limit exceeded */
239 
240 /* The following variables require no explicit locking */
241 volatile clock_t lbolt;		/* time in Hz since last boot */
242 volatile int64_t lbolt64;	/* lbolt64 won't wrap for 2.9 billion yrs */
243 
244 kcondvar_t lbolt_cv;
245 int one_sec = 1; /* turned on once every second */
246 static int fsflushcnt;	/* counter for t_fsflushr */
247 int	dosynctodr = 1;	/* patchable; enable/disable sync to TOD chip */
248 int	tod_needsync = 0;	/* need to sync tod chip with software time */
249 static int tod_broken = 0;	/* clock chip doesn't work */
250 time_t	boot_time = 0;		/* Boot time in seconds since 1970 */
251 cyclic_id_t clock_cyclic;	/* clock()'s cyclic_id */
252 cyclic_id_t deadman_cyclic;	/* deadman()'s cyclic_id */
253 cyclic_id_t ddi_timer_cyclic;	/* cyclic_timer()'s cyclic_id */
254 
255 extern void	clock_tick_schedule(int);
256 
257 static int lgrp_ticks;		/* counter to schedule lgrp load calcs */
258 
259 /*
260  * for tod fault detection
261  */
262 #define	TOD_REF_FREQ		((longlong_t)(NANOSEC))
263 #define	TOD_STALL_THRESHOLD	(TOD_REF_FREQ * 3 / 2)
264 #define	TOD_JUMP_THRESHOLD	(TOD_REF_FREQ / 2)
265 #define	TOD_FILTER_N		4
266 #define	TOD_FILTER_SETTLE	(4 * TOD_FILTER_N)
267 static int tod_faulted = TOD_NOFAULT;
268 static int tod_fault_reset_flag = 0;
269 
270 /* patchable via /etc/system */
271 int tod_validate_enable = 1;
272 
273 /*
274  * On non-SPARC systems, TOD validation must be deferred until gethrtime
275  * returns non-zero values (after mach_clkinit's execution).
276  * On SPARC systems, it must be deferred until after hrtime_base
277  * and hres_last_tick are set (in the first invocation of hres_tick).
278  * Since in both cases the prerequisites occur before the invocation of
279  * tod_get() in clock(), the deferment is lifted there.
280  */
281 static boolean_t tod_validate_deferred = B_TRUE;
282 
283 /*
284  * tod_fault_table[] must be aligned with
285  * enum tod_fault_type in systm.h
286  */
287 static char *tod_fault_table[] = {
288 	"Reversed",			/* TOD_REVERSED */
289 	"Stalled",			/* TOD_STALLED */
290 	"Jumped",			/* TOD_JUMPED */
291 	"Changed in Clock Rate",	/* TOD_RATECHANGED */
292 	"Is Read-Only"			/* TOD_RDONLY */
293 	/*
294 	 * no strings needed for TOD_NOFAULT
295 	 */
296 };
297 
298 /*
299  * test hook for tod broken detection in tod_validate
300  */
301 int tod_unit_test = 0;
302 time_t tod_test_injector;
303 
304 #define	CLOCK_ADJ_HIST_SIZE	4
305 
306 static int	adj_hist_entry;
307 
308 int64_t clock_adj_hist[CLOCK_ADJ_HIST_SIZE];
309 
310 static void calcloadavg(int, uint64_t *);
311 static int genloadavg(struct loadavg_s *);
312 static void loadavg_update();
313 
314 void (*cmm_clock_callout)() = NULL;
315 void (*cpucaps_clock_callout)() = NULL;
316 
317 extern clock_t clock_tick_proc_max;
318 
319 static void
320 clock(void)
321 {
322 	kthread_t	*t;
323 	uint_t	nrunnable;
324 	uint_t	w_io;
325 	cpu_t	*cp;
326 	cpupart_t *cpupart;
327 	extern void set_anoninfo();
328 	extern	void	set_freemem();
329 	void	(*funcp)();
330 	int32_t ltemp;
331 	int64_t lltemp;
332 	int s;
333 	int do_lgrp_load;
334 	int i;
335 
336 	if (panicstr)
337 		return;
338 
339 	set_anoninfo();
340 	/*
341 	 * Make sure that 'freemem' do not drift too far from the truth
342 	 */
343 	set_freemem();
344 
345 
346 	/*
347 	 * Before the section which is repeated is executed, we do
348 	 * the time delta processing which occurs every clock tick
349 	 *
350 	 * There is additional processing which happens every time
351 	 * the nanosecond counter rolls over which is described
352 	 * below - see the section which begins with : if (one_sec)
353 	 *
354 	 * This section marks the beginning of the precision-kernel
355 	 * code fragment.
356 	 *
357 	 * First, compute the phase adjustment. If the low-order bits
358 	 * (time_phase) of the update overflow, bump the higher order
359 	 * bits (time_update).
360 	 */
361 	time_phase += time_adj;
362 	if (time_phase <= -FINEUSEC) {
363 		ltemp = -time_phase / SCALE_PHASE;
364 		time_phase += ltemp * SCALE_PHASE;
365 		s = hr_clock_lock();
366 		timedelta -= ltemp * (NANOSEC/MICROSEC);
367 		hr_clock_unlock(s);
368 	} else if (time_phase >= FINEUSEC) {
369 		ltemp = time_phase / SCALE_PHASE;
370 		time_phase -= ltemp * SCALE_PHASE;
371 		s = hr_clock_lock();
372 		timedelta += ltemp * (NANOSEC/MICROSEC);
373 		hr_clock_unlock(s);
374 	}
375 
376 	/*
377 	 * End of precision-kernel code fragment which is processed
378 	 * every timer interrupt.
379 	 *
380 	 * Continue with the interrupt processing as scheduled.
381 	 */
382 	/*
383 	 * Count the number of runnable threads and the number waiting
384 	 * for some form of I/O to complete -- gets added to
385 	 * sysinfo.waiting.  To know the state of the system, must add
386 	 * wait counts from all CPUs.  Also add up the per-partition
387 	 * statistics.
388 	 */
389 	w_io = 0;
390 	nrunnable = 0;
391 
392 	/*
393 	 * keep track of when to update lgrp/part loads
394 	 */
395 
396 	do_lgrp_load = 0;
397 	if (lgrp_ticks++ >= hz / 10) {
398 		lgrp_ticks = 0;
399 		do_lgrp_load = 1;
400 	}
401 
402 	if (one_sec)
403 		loadavg_update();
404 
405 	/*
406 	 * First count the threads waiting on kpreempt queues in each
407 	 * CPU partition.
408 	 */
409 
410 	cpupart = cp_list_head;
411 	do {
412 		uint_t cpupart_nrunnable = cpupart->cp_kp_queue.disp_nrunnable;
413 
414 		cpupart->cp_updates++;
415 		nrunnable += cpupart_nrunnable;
416 		cpupart->cp_nrunnable_cum += cpupart_nrunnable;
417 		if (one_sec) {
418 			cpupart->cp_nrunning = 0;
419 			cpupart->cp_nrunnable = cpupart_nrunnable;
420 		}
421 	} while ((cpupart = cpupart->cp_next) != cp_list_head);
422 
423 
424 	/* Now count the per-CPU statistics. */
425 	cp = cpu_list;
426 	do {
427 		uint_t cpu_nrunnable = cp->cpu_disp->disp_nrunnable;
428 
429 		nrunnable += cpu_nrunnable;
430 		cpupart = cp->cpu_part;
431 		cpupart->cp_nrunnable_cum += cpu_nrunnable;
432 		if (one_sec) {
433 			cpupart->cp_nrunnable += cpu_nrunnable;
434 			/*
435 			 * Update user, system, and idle cpu times.
436 			 */
437 			cpupart->cp_nrunning++;
438 			/*
439 			 * w_io is used to update sysinfo.waiting during
440 			 * one_second processing below.  Only gather w_io
441 			 * information when we walk the list of cpus if we're
442 			 * going to perform one_second processing.
443 			 */
444 			w_io += CPU_STATS(cp, sys.iowait);
445 		}
446 
447 		if (one_sec && (cp->cpu_flags & CPU_EXISTS)) {
448 			int i, load, change;
449 			hrtime_t intracct, intrused;
450 			const hrtime_t maxnsec = 1000000000;
451 			const int precision = 100;
452 
453 			/*
454 			 * Estimate interrupt load on this cpu each second.
455 			 * Computes cpu_intrload as %utilization (0-99).
456 			 */
457 
458 			/* add up interrupt time from all micro states */
459 			for (intracct = 0, i = 0; i < NCMSTATES; i++)
460 				intracct += cp->cpu_intracct[i];
461 			scalehrtime(&intracct);
462 
463 			/* compute nsec used in the past second */
464 			intrused = intracct - cp->cpu_intrlast;
465 			cp->cpu_intrlast = intracct;
466 
467 			/* limit the value for safety (and the first pass) */
468 			if (intrused >= maxnsec)
469 				intrused = maxnsec - 1;
470 
471 			/* calculate %time in interrupt */
472 			load = (precision * intrused) / maxnsec;
473 			ASSERT(load >= 0 && load < precision);
474 			change = cp->cpu_intrload - load;
475 
476 			/* jump to new max, or decay the old max */
477 			if (change < 0)
478 				cp->cpu_intrload = load;
479 			else if (change > 0)
480 				cp->cpu_intrload -= (change + 3) / 4;
481 
482 			DTRACE_PROBE3(cpu_intrload,
483 			    cpu_t *, cp,
484 			    hrtime_t, intracct,
485 			    hrtime_t, intrused);
486 		}
487 
488 		if (do_lgrp_load &&
489 		    (cp->cpu_flags & CPU_EXISTS)) {
490 			/*
491 			 * When updating the lgroup's load average,
492 			 * account for the thread running on the CPU.
493 			 * If the CPU is the current one, then we need
494 			 * to account for the underlying thread which
495 			 * got the clock interrupt not the thread that is
496 			 * handling the interrupt and caculating the load
497 			 * average
498 			 */
499 			t = cp->cpu_thread;
500 			if (CPU == cp)
501 				t = t->t_intr;
502 
503 			/*
504 			 * Account for the load average for this thread if
505 			 * it isn't the idle thread or it is on the interrupt
506 			 * stack and not the current CPU handling the clock
507 			 * interrupt
508 			 */
509 			if ((t && t != cp->cpu_idle_thread) || (CPU != cp &&
510 			    CPU_ON_INTR(cp))) {
511 				if (t->t_lpl == cp->cpu_lpl) {
512 					/* local thread */
513 					cpu_nrunnable++;
514 				} else {
515 					/*
516 					 * This is a remote thread, charge it
517 					 * against its home lgroup.  Note that
518 					 * we notice that a thread is remote
519 					 * only if it's currently executing.
520 					 * This is a reasonable approximation,
521 					 * since queued remote threads are rare.
522 					 * Note also that if we didn't charge
523 					 * it to its home lgroup, remote
524 					 * execution would often make a system
525 					 * appear balanced even though it was
526 					 * not, and thread placement/migration
527 					 * would often not be done correctly.
528 					 */
529 					lgrp_loadavg(t->t_lpl,
530 					    LGRP_LOADAVG_IN_THREAD_MAX, 0);
531 				}
532 			}
533 			lgrp_loadavg(cp->cpu_lpl,
534 			    cpu_nrunnable * LGRP_LOADAVG_IN_THREAD_MAX, 1);
535 		}
536 	} while ((cp = cp->cpu_next) != cpu_list);
537 
538 	clock_tick_schedule(one_sec);
539 
540 	/*
541 	 * bump time in ticks
542 	 *
543 	 * We rely on there being only one clock thread and hence
544 	 * don't need a lock to protect lbolt.
545 	 */
546 	lbolt++;
547 	atomic_add_64((uint64_t *)&lbolt64, (int64_t)1);
548 
549 	/*
550 	 * Check for a callout that needs be called from the clock
551 	 * thread to support the membership protocol in a clustered
552 	 * system.  Copy the function pointer so that we can reset
553 	 * this to NULL if needed.
554 	 */
555 	if ((funcp = cmm_clock_callout) != NULL)
556 		(*funcp)();
557 
558 	if ((funcp = cpucaps_clock_callout) != NULL)
559 		(*funcp)();
560 
561 	/*
562 	 * Wakeup the cageout thread waiters once per second.
563 	 */
564 	if (one_sec)
565 		kcage_tick();
566 
567 	/*
568 	 * Schedule timeout() requests if any are due at this time.
569 	 */
570 	callout_schedule();
571 
572 	if (one_sec) {
573 
574 		int drift, absdrift;
575 		timestruc_t tod;
576 		int s;
577 
578 		/*
579 		 * Beginning of precision-kernel code fragment executed
580 		 * every second.
581 		 *
582 		 * On rollover of the second the phase adjustment to be
583 		 * used for the next second is calculated.  Also, the
584 		 * maximum error is increased by the tolerance.  If the
585 		 * PPS frequency discipline code is present, the phase is
586 		 * increased to compensate for the CPU clock oscillator
587 		 * frequency error.
588 		 *
589 		 * On a 32-bit machine and given parameters in the timex.h
590 		 * header file, the maximum phase adjustment is +-512 ms
591 		 * and maximum frequency offset is (a tad less than)
592 		 * +-512 ppm. On a 64-bit machine, you shouldn't need to ask.
593 		 */
594 		time_maxerror += time_tolerance / SCALE_USEC;
595 
596 		/*
597 		 * Leap second processing. If in leap-insert state at
598 		 * the end of the day, the system clock is set back one
599 		 * second; if in leap-delete state, the system clock is
600 		 * set ahead one second. The microtime() routine or
601 		 * external clock driver will insure that reported time
602 		 * is always monotonic. The ugly divides should be
603 		 * replaced.
604 		 */
605 		switch (time_state) {
606 
607 		case TIME_OK:
608 			if (time_status & STA_INS)
609 				time_state = TIME_INS;
610 			else if (time_status & STA_DEL)
611 				time_state = TIME_DEL;
612 			break;
613 
614 		case TIME_INS:
615 			if (hrestime.tv_sec % 86400 == 0) {
616 				s = hr_clock_lock();
617 				hrestime.tv_sec--;
618 				hr_clock_unlock(s);
619 				time_state = TIME_OOP;
620 			}
621 			break;
622 
623 		case TIME_DEL:
624 			if ((hrestime.tv_sec + 1) % 86400 == 0) {
625 				s = hr_clock_lock();
626 				hrestime.tv_sec++;
627 				hr_clock_unlock(s);
628 				time_state = TIME_WAIT;
629 			}
630 			break;
631 
632 		case TIME_OOP:
633 			time_state = TIME_WAIT;
634 			break;
635 
636 		case TIME_WAIT:
637 			if (!(time_status & (STA_INS | STA_DEL)))
638 				time_state = TIME_OK;
639 		default:
640 			break;
641 		}
642 
643 		/*
644 		 * Compute the phase adjustment for the next second. In
645 		 * PLL mode, the offset is reduced by a fixed factor
646 		 * times the time constant. In FLL mode the offset is
647 		 * used directly. In either mode, the maximum phase
648 		 * adjustment for each second is clamped so as to spread
649 		 * the adjustment over not more than the number of
650 		 * seconds between updates.
651 		 */
652 		if (time_offset == 0)
653 			time_adj = 0;
654 		else if (time_offset < 0) {
655 			lltemp = -time_offset;
656 			if (!(time_status & STA_FLL)) {
657 				if ((1 << time_constant) >= SCALE_KG)
658 					lltemp *= (1 << time_constant) /
659 					    SCALE_KG;
660 				else
661 					lltemp = (lltemp / SCALE_KG) >>
662 					    time_constant;
663 			}
664 			if (lltemp > (MAXPHASE / MINSEC) * SCALE_UPDATE)
665 				lltemp = (MAXPHASE / MINSEC) * SCALE_UPDATE;
666 			time_offset += lltemp;
667 			time_adj = -(lltemp * SCALE_PHASE) / hz / SCALE_UPDATE;
668 		} else {
669 			lltemp = time_offset;
670 			if (!(time_status & STA_FLL)) {
671 				if ((1 << time_constant) >= SCALE_KG)
672 					lltemp *= (1 << time_constant) /
673 					    SCALE_KG;
674 				else
675 					lltemp = (lltemp / SCALE_KG) >>
676 					    time_constant;
677 			}
678 			if (lltemp > (MAXPHASE / MINSEC) * SCALE_UPDATE)
679 				lltemp = (MAXPHASE / MINSEC) * SCALE_UPDATE;
680 			time_offset -= lltemp;
681 			time_adj = (lltemp * SCALE_PHASE) / hz / SCALE_UPDATE;
682 		}
683 
684 		/*
685 		 * Compute the frequency estimate and additional phase
686 		 * adjustment due to frequency error for the next
687 		 * second. When the PPS signal is engaged, gnaw on the
688 		 * watchdog counter and update the frequency computed by
689 		 * the pll and the PPS signal.
690 		 */
691 		pps_valid++;
692 		if (pps_valid == PPS_VALID) {
693 			pps_jitter = MAXTIME;
694 			pps_stabil = MAXFREQ;
695 			time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
696 			    STA_PPSWANDER | STA_PPSERROR);
697 		}
698 		lltemp = time_freq + pps_freq;
699 
700 		if (lltemp)
701 			time_adj += (lltemp * SCALE_PHASE) / (SCALE_USEC * hz);
702 
703 		/*
704 		 * End of precision kernel-code fragment
705 		 *
706 		 * The section below should be modified if we are planning
707 		 * to use NTP for synchronization.
708 		 *
709 		 * Note: the clock synchronization code now assumes
710 		 * the following:
711 		 *   - if dosynctodr is 1, then compute the drift between
712 		 *	the tod chip and software time and adjust one or
713 		 *	the other depending on the circumstances
714 		 *
715 		 *   - if dosynctodr is 0, then the tod chip is independent
716 		 *	of the software clock and should not be adjusted,
717 		 *	but allowed to free run.  this allows NTP to sync.
718 		 *	hrestime without any interference from the tod chip.
719 		 */
720 
721 		tod_validate_deferred = B_FALSE;
722 		mutex_enter(&tod_lock);
723 		tod = tod_get();
724 		drift = tod.tv_sec - hrestime.tv_sec;
725 		absdrift = (drift >= 0) ? drift : -drift;
726 		if (tod_needsync || absdrift > 1) {
727 			int s;
728 			if (absdrift > 2) {
729 				if (!tod_broken && tod_faulted == TOD_NOFAULT) {
730 					s = hr_clock_lock();
731 					hrestime = tod;
732 					membar_enter();	/* hrestime visible */
733 					timedelta = 0;
734 					timechanged++;
735 					tod_needsync = 0;
736 					hr_clock_unlock(s);
737 				}
738 			} else {
739 				if (tod_needsync || !dosynctodr) {
740 					gethrestime(&tod);
741 					tod_set(tod);
742 					s = hr_clock_lock();
743 					if (timedelta == 0)
744 						tod_needsync = 0;
745 					hr_clock_unlock(s);
746 				} else {
747 					/*
748 					 * If the drift is 2 seconds on the
749 					 * money, then the TOD is adjusting
750 					 * the clock;  record that.
751 					 */
752 					clock_adj_hist[adj_hist_entry++ %
753 					    CLOCK_ADJ_HIST_SIZE] = lbolt64;
754 					s = hr_clock_lock();
755 					timedelta = (int64_t)drift*NANOSEC;
756 					hr_clock_unlock(s);
757 				}
758 			}
759 		}
760 		one_sec = 0;
761 		time = gethrestime_sec();  /* for crusty old kmem readers */
762 		mutex_exit(&tod_lock);
763 
764 		/*
765 		 * Some drivers still depend on this... XXX
766 		 */
767 		cv_broadcast(&lbolt_cv);
768 
769 		sysinfo.updates++;
770 		vminfo.freemem += freemem;
771 		{
772 			pgcnt_t maxswap, resv, free;
773 			pgcnt_t avail =
774 			    MAX((spgcnt_t)(availrmem - swapfs_minfree), 0);
775 
776 			maxswap = k_anoninfo.ani_mem_resv +
777 			    k_anoninfo.ani_max +avail;
778 			free = k_anoninfo.ani_free + avail;
779 			resv = k_anoninfo.ani_phys_resv +
780 			    k_anoninfo.ani_mem_resv;
781 
782 			vminfo.swap_resv += resv;
783 			/* number of reserved and allocated pages */
784 #ifdef	DEBUG
785 			if (maxswap < free)
786 				cmn_err(CE_WARN, "clock: maxswap < free");
787 			if (maxswap < resv)
788 				cmn_err(CE_WARN, "clock: maxswap < resv");
789 #endif
790 			vminfo.swap_alloc += maxswap - free;
791 			vminfo.swap_avail += maxswap - resv;
792 			vminfo.swap_free += free;
793 		}
794 		if (nrunnable) {
795 			sysinfo.runque += nrunnable;
796 			sysinfo.runocc++;
797 		}
798 		if (nswapped) {
799 			sysinfo.swpque += nswapped;
800 			sysinfo.swpocc++;
801 		}
802 		sysinfo.waiting += w_io;
803 
804 		/*
805 		 * Wake up fsflush to write out DELWRI
806 		 * buffers, dirty pages and other cached
807 		 * administrative data, e.g. inodes.
808 		 */
809 		if (--fsflushcnt <= 0) {
810 			fsflushcnt = tune.t_fsflushr;
811 			cv_signal(&fsflush_cv);
812 		}
813 
814 		vmmeter();
815 		calcloadavg(genloadavg(&loadavg), hp_avenrun);
816 		for (i = 0; i < 3; i++)
817 			/*
818 			 * At the moment avenrun[] can only hold 31
819 			 * bits of load average as it is a signed
820 			 * int in the API. We need to ensure that
821 			 * hp_avenrun[i] >> (16 - FSHIFT) will not be
822 			 * too large. If it is, we put the largest value
823 			 * that we can use into avenrun[i]. This is
824 			 * kludgey, but about all we can do until we
825 			 * avenrun[] is declared as an array of uint64[]
826 			 */
827 			if (hp_avenrun[i] < ((uint64_t)1<<(31+16-FSHIFT)))
828 				avenrun[i] = (int32_t)(hp_avenrun[i] >>
829 				    (16 - FSHIFT));
830 			else
831 				avenrun[i] = 0x7fffffff;
832 
833 		cpupart = cp_list_head;
834 		do {
835 			calcloadavg(genloadavg(&cpupart->cp_loadavg),
836 			    cpupart->cp_hp_avenrun);
837 		} while ((cpupart = cpupart->cp_next) != cp_list_head);
838 
839 		/*
840 		 * Wake up the swapper thread if necessary.
841 		 */
842 		if (runin ||
843 		    (runout && (avefree < desfree || wake_sched_sec))) {
844 			t = &t0;
845 			thread_lock(t);
846 			if (t->t_state == TS_STOPPED) {
847 				runin = runout = 0;
848 				wake_sched_sec = 0;
849 				t->t_whystop = 0;
850 				t->t_whatstop = 0;
851 				t->t_schedflag &= ~TS_ALLSTART;
852 				THREAD_TRANSITION(t);
853 				setfrontdq(t);
854 			}
855 			thread_unlock(t);
856 		}
857 	}
858 
859 	/*
860 	 * Wake up the swapper if any high priority swapped-out threads
861 	 * became runable during the last tick.
862 	 */
863 	if (wake_sched) {
864 		t = &t0;
865 		thread_lock(t);
866 		if (t->t_state == TS_STOPPED) {
867 			runin = runout = 0;
868 			wake_sched = 0;
869 			t->t_whystop = 0;
870 			t->t_whatstop = 0;
871 			t->t_schedflag &= ~TS_ALLSTART;
872 			THREAD_TRANSITION(t);
873 			setfrontdq(t);
874 		}
875 		thread_unlock(t);
876 	}
877 }
878 
879 void
880 clock_init(void)
881 {
882 	cyc_handler_t hdlr;
883 	cyc_time_t when;
884 
885 	hdlr.cyh_func = (cyc_func_t)clock;
886 	hdlr.cyh_level = CY_LOCK_LEVEL;
887 	hdlr.cyh_arg = NULL;
888 
889 	when.cyt_when = 0;
890 	when.cyt_interval = nsec_per_tick;
891 
892 	mutex_enter(&cpu_lock);
893 	clock_cyclic = cyclic_add(&hdlr, &when);
894 	mutex_exit(&cpu_lock);
895 
896 	/*
897 	 * cyclic_timer is dedicated to the ddi interface, which
898 	 * uses the same clock resolution as the system one.
899 	 */
900 	hdlr.cyh_func = (cyc_func_t)cyclic_timer;
901 	hdlr.cyh_level = CY_LOCK_LEVEL;
902 	hdlr.cyh_arg = NULL;
903 
904 	mutex_enter(&cpu_lock);
905 	ddi_timer_cyclic = cyclic_add(&hdlr, &when);
906 	mutex_exit(&cpu_lock);
907 }
908 
909 /*
910  * Called before calcloadavg to get 10-sec moving loadavg together
911  */
912 
913 static int
914 genloadavg(struct loadavg_s *avgs)
915 {
916 	int avg;
917 	int spos; /* starting position */
918 	int cpos; /* moving current position */
919 	int i;
920 	int slen;
921 	hrtime_t hr_avg;
922 
923 	/* 10-second snapshot, calculate first positon */
924 	if (avgs->lg_len == 0) {
925 		return (0);
926 	}
927 	slen = avgs->lg_len < S_MOVAVG_SZ ? avgs->lg_len : S_MOVAVG_SZ;
928 
929 	spos = (avgs->lg_cur - 1) >= 0 ? avgs->lg_cur - 1 :
930 	    S_LOADAVG_SZ + (avgs->lg_cur - 1);
931 	for (i = hr_avg = 0; i < slen; i++) {
932 		cpos = (spos - i) >= 0 ? spos - i : S_LOADAVG_SZ + (spos - i);
933 		hr_avg += avgs->lg_loads[cpos];
934 	}
935 
936 	hr_avg = hr_avg / slen;
937 	avg = hr_avg / (NANOSEC / LGRP_LOADAVG_IN_THREAD_MAX);
938 
939 	return (avg);
940 }
941 
942 /*
943  * Run every second from clock () to update the loadavg count available to the
944  * system and cpu-partitions.
945  *
946  * This works by sampling the previous usr, sys, wait time elapsed,
947  * computing a delta, and adding that delta to the elapsed usr, sys,
948  * wait increase.
949  */
950 
951 static void
952 loadavg_update()
953 {
954 	cpu_t *cp;
955 	cpupart_t *cpupart;
956 	hrtime_t cpu_total;
957 	int prev;
958 
959 	cp = cpu_list;
960 	loadavg.lg_total = 0;
961 
962 	/*
963 	 * first pass totals up per-cpu statistics for system and cpu
964 	 * partitions
965 	 */
966 
967 	do {
968 		struct loadavg_s *lavg;
969 
970 		lavg = &cp->cpu_loadavg;
971 
972 		cpu_total = cp->cpu_acct[CMS_USER] +
973 		    cp->cpu_acct[CMS_SYSTEM] + cp->cpu_waitrq;
974 		/* compute delta against last total */
975 		scalehrtime(&cpu_total);
976 		prev = (lavg->lg_cur - 1) >= 0 ? lavg->lg_cur - 1 :
977 		    S_LOADAVG_SZ + (lavg->lg_cur - 1);
978 		if (lavg->lg_loads[prev] <= 0) {
979 			lavg->lg_loads[lavg->lg_cur] = cpu_total;
980 			cpu_total = 0;
981 		} else {
982 			lavg->lg_loads[lavg->lg_cur] = cpu_total;
983 			cpu_total = cpu_total - lavg->lg_loads[prev];
984 			if (cpu_total < 0)
985 				cpu_total = 0;
986 		}
987 
988 		lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ;
989 		lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ?
990 		    lavg->lg_len + 1 : S_LOADAVG_SZ;
991 
992 		loadavg.lg_total += cpu_total;
993 		cp->cpu_part->cp_loadavg.lg_total += cpu_total;
994 
995 	} while ((cp = cp->cpu_next) != cpu_list);
996 
997 	loadavg.lg_loads[loadavg.lg_cur] = loadavg.lg_total;
998 	loadavg.lg_cur = (loadavg.lg_cur + 1) % S_LOADAVG_SZ;
999 	loadavg.lg_len = (loadavg.lg_len + 1) < S_LOADAVG_SZ ?
1000 	    loadavg.lg_len + 1 : S_LOADAVG_SZ;
1001 	/*
1002 	 * Second pass updates counts
1003 	 */
1004 	cpupart = cp_list_head;
1005 
1006 	do {
1007 		struct loadavg_s *lavg;
1008 
1009 		lavg = &cpupart->cp_loadavg;
1010 		lavg->lg_loads[lavg->lg_cur] = lavg->lg_total;
1011 		lavg->lg_total = 0;
1012 		lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ;
1013 		lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ?
1014 		    lavg->lg_len + 1 : S_LOADAVG_SZ;
1015 
1016 	} while ((cpupart = cpupart->cp_next) != cp_list_head);
1017 
1018 }
1019 
1020 /*
1021  * clock_update() - local clock update
1022  *
1023  * This routine is called by ntp_adjtime() to update the local clock
1024  * phase and frequency. The implementation is of an
1025  * adaptive-parameter, hybrid phase/frequency-lock loop (PLL/FLL). The
1026  * routine computes new time and frequency offset estimates for each
1027  * call.  The PPS signal itself determines the new time offset,
1028  * instead of the calling argument.  Presumably, calls to
1029  * ntp_adjtime() occur only when the caller believes the local clock
1030  * is valid within some bound (+-128 ms with NTP). If the caller's
1031  * time is far different than the PPS time, an argument will ensue,
1032  * and it's not clear who will lose.
1033  *
1034  * For uncompensated quartz crystal oscillatores and nominal update
1035  * intervals less than 1024 s, operation should be in phase-lock mode
1036  * (STA_FLL = 0), where the loop is disciplined to phase. For update
1037  * intervals greater than this, operation should be in frequency-lock
1038  * mode (STA_FLL = 1), where the loop is disciplined to frequency.
1039  *
1040  * Note: mutex(&tod_lock) is in effect.
1041  */
1042 void
1043 clock_update(int offset)
1044 {
1045 	int ltemp, mtemp, s;
1046 
1047 	ASSERT(MUTEX_HELD(&tod_lock));
1048 
1049 	if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME))
1050 		return;
1051 	ltemp = offset;
1052 	if ((time_status & STA_PPSTIME) && (time_status & STA_PPSSIGNAL))
1053 		ltemp = pps_offset;
1054 
1055 	/*
1056 	 * Scale the phase adjustment and clamp to the operating range.
1057 	 */
1058 	if (ltemp > MAXPHASE)
1059 		time_offset = MAXPHASE * SCALE_UPDATE;
1060 	else if (ltemp < -MAXPHASE)
1061 		time_offset = -(MAXPHASE * SCALE_UPDATE);
1062 	else
1063 		time_offset = ltemp * SCALE_UPDATE;
1064 
1065 	/*
1066 	 * Select whether the frequency is to be controlled and in which
1067 	 * mode (PLL or FLL). Clamp to the operating range. Ugly
1068 	 * multiply/divide should be replaced someday.
1069 	 */
1070 	if (time_status & STA_FREQHOLD || time_reftime == 0)
1071 		time_reftime = hrestime.tv_sec;
1072 
1073 	mtemp = hrestime.tv_sec - time_reftime;
1074 	time_reftime = hrestime.tv_sec;
1075 
1076 	if (time_status & STA_FLL) {
1077 		if (mtemp >= MINSEC) {
1078 			ltemp = ((time_offset / mtemp) * (SCALE_USEC /
1079 			    SCALE_UPDATE));
1080 			if (ltemp)
1081 				time_freq += ltemp / SCALE_KH;
1082 		}
1083 	} else {
1084 		if (mtemp < MAXSEC) {
1085 			ltemp *= mtemp;
1086 			if (ltemp)
1087 				time_freq += (int)(((int64_t)ltemp *
1088 				    SCALE_USEC) / SCALE_KF)
1089 				    / (1 << (time_constant * 2));
1090 		}
1091 	}
1092 	if (time_freq > time_tolerance)
1093 		time_freq = time_tolerance;
1094 	else if (time_freq < -time_tolerance)
1095 		time_freq = -time_tolerance;
1096 
1097 	s = hr_clock_lock();
1098 	tod_needsync = 1;
1099 	hr_clock_unlock(s);
1100 }
1101 
1102 /*
1103  * ddi_hardpps() - discipline CPU clock oscillator to external PPS signal
1104  *
1105  * This routine is called at each PPS interrupt in order to discipline
1106  * the CPU clock oscillator to the PPS signal. It measures the PPS phase
1107  * and leaves it in a handy spot for the clock() routine. It
1108  * integrates successive PPS phase differences and calculates the
1109  * frequency offset. This is used in clock() to discipline the CPU
1110  * clock oscillator so that intrinsic frequency error is cancelled out.
1111  * The code requires the caller to capture the time and hardware counter
1112  * value at the on-time PPS signal transition.
1113  *
1114  * Note that, on some Unix systems, this routine runs at an interrupt
1115  * priority level higher than the timer interrupt routine clock().
1116  * Therefore, the variables used are distinct from the clock()
1117  * variables, except for certain exceptions: The PPS frequency pps_freq
1118  * and phase pps_offset variables are determined by this routine and
1119  * updated atomically. The time_tolerance variable can be considered a
1120  * constant, since it is infrequently changed, and then only when the
1121  * PPS signal is disabled. The watchdog counter pps_valid is updated
1122  * once per second by clock() and is atomically cleared in this
1123  * routine.
1124  *
1125  * tvp is the time of the last tick; usec is a microsecond count since the
1126  * last tick.
1127  *
1128  * Note: In Solaris systems, the tick value is actually given by
1129  *       usec_per_tick.  This is called from the serial driver cdintr(),
1130  *	 or equivalent, at a high PIL.  Because the kernel keeps a
1131  *	 highresolution time, the following code can accept either
1132  *	 the traditional argument pair, or the current highres timestamp
1133  *       in tvp and zero in usec.
1134  */
1135 void
1136 ddi_hardpps(struct timeval *tvp, int usec)
1137 {
1138 	int u_usec, v_usec, bigtick;
1139 	time_t cal_sec;
1140 	int cal_usec;
1141 
1142 	/*
1143 	 * An occasional glitch can be produced when the PPS interrupt
1144 	 * occurs in the clock() routine before the time variable is
1145 	 * updated. Here the offset is discarded when the difference
1146 	 * between it and the last one is greater than tick/2, but not
1147 	 * if the interval since the first discard exceeds 30 s.
1148 	 */
1149 	time_status |= STA_PPSSIGNAL;
1150 	time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
1151 	pps_valid = 0;
1152 	u_usec = -tvp->tv_usec;
1153 	if (u_usec < -(MICROSEC/2))
1154 		u_usec += MICROSEC;
1155 	v_usec = pps_offset - u_usec;
1156 	if (v_usec < 0)
1157 		v_usec = -v_usec;
1158 	if (v_usec > (usec_per_tick >> 1)) {
1159 		if (pps_glitch > MAXGLITCH) {
1160 			pps_glitch = 0;
1161 			pps_tf[2] = u_usec;
1162 			pps_tf[1] = u_usec;
1163 		} else {
1164 			pps_glitch++;
1165 			u_usec = pps_offset;
1166 		}
1167 	} else
1168 		pps_glitch = 0;
1169 
1170 	/*
1171 	 * A three-stage median filter is used to help deglitch the pps
1172 	 * time. The median sample becomes the time offset estimate; the
1173 	 * difference between the other two samples becomes the time
1174 	 * dispersion (jitter) estimate.
1175 	 */
1176 	pps_tf[2] = pps_tf[1];
1177 	pps_tf[1] = pps_tf[0];
1178 	pps_tf[0] = u_usec;
1179 	if (pps_tf[0] > pps_tf[1]) {
1180 		if (pps_tf[1] > pps_tf[2]) {
1181 			pps_offset = pps_tf[1];		/* 0 1 2 */
1182 			v_usec = pps_tf[0] - pps_tf[2];
1183 		} else if (pps_tf[2] > pps_tf[0]) {
1184 			pps_offset = pps_tf[0];		/* 2 0 1 */
1185 			v_usec = pps_tf[2] - pps_tf[1];
1186 		} else {
1187 			pps_offset = pps_tf[2];		/* 0 2 1 */
1188 			v_usec = pps_tf[0] - pps_tf[1];
1189 		}
1190 	} else {
1191 		if (pps_tf[1] < pps_tf[2]) {
1192 			pps_offset = pps_tf[1];		/* 2 1 0 */
1193 			v_usec = pps_tf[2] - pps_tf[0];
1194 		} else  if (pps_tf[2] < pps_tf[0]) {
1195 			pps_offset = pps_tf[0];		/* 1 0 2 */
1196 			v_usec = pps_tf[1] - pps_tf[2];
1197 		} else {
1198 			pps_offset = pps_tf[2];		/* 1 2 0 */
1199 			v_usec = pps_tf[1] - pps_tf[0];
1200 		}
1201 	}
1202 	if (v_usec > MAXTIME)
1203 		pps_jitcnt++;
1204 	v_usec = (v_usec << PPS_AVG) - pps_jitter;
1205 	pps_jitter += v_usec / (1 << PPS_AVG);
1206 	if (pps_jitter > (MAXTIME >> 1))
1207 		time_status |= STA_PPSJITTER;
1208 
1209 	/*
1210 	 * During the calibration interval adjust the starting time when
1211 	 * the tick overflows. At the end of the interval compute the
1212 	 * duration of the interval and the difference of the hardware
1213 	 * counters at the beginning and end of the interval. This code
1214 	 * is deliciously complicated by the fact valid differences may
1215 	 * exceed the value of tick when using long calibration
1216 	 * intervals and small ticks. Note that the counter can be
1217 	 * greater than tick if caught at just the wrong instant, but
1218 	 * the values returned and used here are correct.
1219 	 */
1220 	bigtick = (int)usec_per_tick * SCALE_USEC;
1221 	pps_usec -= pps_freq;
1222 	if (pps_usec >= bigtick)
1223 		pps_usec -= bigtick;
1224 	if (pps_usec < 0)
1225 		pps_usec += bigtick;
1226 	pps_time.tv_sec++;
1227 	pps_count++;
1228 	if (pps_count < (1 << pps_shift))
1229 		return;
1230 	pps_count = 0;
1231 	pps_calcnt++;
1232 	u_usec = usec * SCALE_USEC;
1233 	v_usec = pps_usec - u_usec;
1234 	if (v_usec >= bigtick >> 1)
1235 		v_usec -= bigtick;
1236 	if (v_usec < -(bigtick >> 1))
1237 		v_usec += bigtick;
1238 	if (v_usec < 0)
1239 		v_usec = -(-v_usec >> pps_shift);
1240 	else
1241 		v_usec = v_usec >> pps_shift;
1242 	pps_usec = u_usec;
1243 	cal_sec = tvp->tv_sec;
1244 	cal_usec = tvp->tv_usec;
1245 	cal_sec -= pps_time.tv_sec;
1246 	cal_usec -= pps_time.tv_usec;
1247 	if (cal_usec < 0) {
1248 		cal_usec += MICROSEC;
1249 		cal_sec--;
1250 	}
1251 	pps_time = *tvp;
1252 
1253 	/*
1254 	 * Check for lost interrupts, noise, excessive jitter and
1255 	 * excessive frequency error. The number of timer ticks during
1256 	 * the interval may vary +-1 tick. Add to this a margin of one
1257 	 * tick for the PPS signal jitter and maximum frequency
1258 	 * deviation. If the limits are exceeded, the calibration
1259 	 * interval is reset to the minimum and we start over.
1260 	 */
1261 	u_usec = (int)usec_per_tick << 1;
1262 	if (!((cal_sec == -1 && cal_usec > (MICROSEC - u_usec)) ||
1263 	    (cal_sec == 0 && cal_usec < u_usec)) ||
1264 	    v_usec > time_tolerance || v_usec < -time_tolerance) {
1265 		pps_errcnt++;
1266 		pps_shift = PPS_SHIFT;
1267 		pps_intcnt = 0;
1268 		time_status |= STA_PPSERROR;
1269 		return;
1270 	}
1271 
1272 	/*
1273 	 * A three-stage median filter is used to help deglitch the pps
1274 	 * frequency. The median sample becomes the frequency offset
1275 	 * estimate; the difference between the other two samples
1276 	 * becomes the frequency dispersion (stability) estimate.
1277 	 */
1278 	pps_ff[2] = pps_ff[1];
1279 	pps_ff[1] = pps_ff[0];
1280 	pps_ff[0] = v_usec;
1281 	if (pps_ff[0] > pps_ff[1]) {
1282 		if (pps_ff[1] > pps_ff[2]) {
1283 			u_usec = pps_ff[1];		/* 0 1 2 */
1284 			v_usec = pps_ff[0] - pps_ff[2];
1285 		} else if (pps_ff[2] > pps_ff[0]) {
1286 			u_usec = pps_ff[0];		/* 2 0 1 */
1287 			v_usec = pps_ff[2] - pps_ff[1];
1288 		} else {
1289 			u_usec = pps_ff[2];		/* 0 2 1 */
1290 			v_usec = pps_ff[0] - pps_ff[1];
1291 		}
1292 	} else {
1293 		if (pps_ff[1] < pps_ff[2]) {
1294 			u_usec = pps_ff[1];		/* 2 1 0 */
1295 			v_usec = pps_ff[2] - pps_ff[0];
1296 		} else  if (pps_ff[2] < pps_ff[0]) {
1297 			u_usec = pps_ff[0];		/* 1 0 2 */
1298 			v_usec = pps_ff[1] - pps_ff[2];
1299 		} else {
1300 			u_usec = pps_ff[2];		/* 1 2 0 */
1301 			v_usec = pps_ff[1] - pps_ff[0];
1302 		}
1303 	}
1304 
1305 	/*
1306 	 * Here the frequency dispersion (stability) is updated. If it
1307 	 * is less than one-fourth the maximum (MAXFREQ), the frequency
1308 	 * offset is updated as well, but clamped to the tolerance. It
1309 	 * will be processed later by the clock() routine.
1310 	 */
1311 	v_usec = (v_usec >> 1) - pps_stabil;
1312 	if (v_usec < 0)
1313 		pps_stabil -= -v_usec >> PPS_AVG;
1314 	else
1315 		pps_stabil += v_usec >> PPS_AVG;
1316 	if (pps_stabil > MAXFREQ >> 2) {
1317 		pps_stbcnt++;
1318 		time_status |= STA_PPSWANDER;
1319 		return;
1320 	}
1321 	if (time_status & STA_PPSFREQ) {
1322 		if (u_usec < 0) {
1323 			pps_freq -= -u_usec >> PPS_AVG;
1324 			if (pps_freq < -time_tolerance)
1325 				pps_freq = -time_tolerance;
1326 			u_usec = -u_usec;
1327 		} else {
1328 			pps_freq += u_usec >> PPS_AVG;
1329 			if (pps_freq > time_tolerance)
1330 				pps_freq = time_tolerance;
1331 		}
1332 	}
1333 
1334 	/*
1335 	 * Here the calibration interval is adjusted. If the maximum
1336 	 * time difference is greater than tick / 4, reduce the interval
1337 	 * by half. If this is not the case for four consecutive
1338 	 * intervals, double the interval.
1339 	 */
1340 	if (u_usec << pps_shift > bigtick >> 2) {
1341 		pps_intcnt = 0;
1342 		if (pps_shift > PPS_SHIFT)
1343 			pps_shift--;
1344 	} else if (pps_intcnt >= 4) {
1345 		pps_intcnt = 0;
1346 		if (pps_shift < PPS_SHIFTMAX)
1347 			pps_shift++;
1348 	} else
1349 		pps_intcnt++;
1350 
1351 	/*
1352 	 * If recovering from kmdb, then make sure the tod chip gets resynced.
1353 	 * If we took an early exit above, then we don't yet have a stable
1354 	 * calibration signal to lock onto, so don't mark the tod for sync
1355 	 * until we get all the way here.
1356 	 */
1357 	{
1358 		int s = hr_clock_lock();
1359 
1360 		tod_needsync = 1;
1361 		hr_clock_unlock(s);
1362 	}
1363 }
1364 
1365 /*
1366  * Handle clock tick processing for a thread.
1367  * Check for timer action, enforce CPU rlimit, do profiling etc.
1368  */
1369 void
1370 clock_tick(kthread_t *t, int pending)
1371 {
1372 	struct proc *pp;
1373 	klwp_id_t    lwp;
1374 	struct as *as;
1375 	clock_t	ticks;
1376 	int	poke = 0;		/* notify another CPU */
1377 	int	user_mode;
1378 	size_t	 rss;
1379 	int i, total_usec, usec;
1380 	rctl_qty_t secs;
1381 
1382 	ASSERT(pending > 0);
1383 
1384 	/* Must be operating on a lwp/thread */
1385 	if ((lwp = ttolwp(t)) == NULL) {
1386 		panic("clock_tick: no lwp");
1387 		/*NOTREACHED*/
1388 	}
1389 
1390 	for (i = 0; i < pending; i++) {
1391 		CL_TICK(t);	/* Class specific tick processing */
1392 		DTRACE_SCHED1(tick, kthread_t *, t);
1393 	}
1394 
1395 	pp = ttoproc(t);
1396 
1397 	/* pp->p_lock makes sure that the thread does not exit */
1398 	ASSERT(MUTEX_HELD(&pp->p_lock));
1399 
1400 	user_mode = (lwp->lwp_state == LWP_USER);
1401 
1402 	ticks = (pp->p_utime + pp->p_stime) % hz;
1403 	/*
1404 	 * Update process times. Should use high res clock and state
1405 	 * changes instead of statistical sampling method. XXX
1406 	 */
1407 	if (user_mode) {
1408 		pp->p_utime += pending;
1409 	} else {
1410 		pp->p_stime += pending;
1411 	}
1412 
1413 	pp->p_ttime += pending;
1414 	as = pp->p_as;
1415 
1416 	/*
1417 	 * Update user profiling statistics. Get the pc from the
1418 	 * lwp when the AST happens.
1419 	 */
1420 	if (pp->p_prof.pr_scale) {
1421 		atomic_add_32(&lwp->lwp_oweupc, (int32_t)pending);
1422 		if (user_mode) {
1423 			poke = 1;
1424 			aston(t);
1425 		}
1426 	}
1427 
1428 	/*
1429 	 * If CPU was in user state, process lwp-virtual time
1430 	 * interval timer. The value passed to itimerdecr() has to be
1431 	 * in microseconds and has to be less than one second. Hence
1432 	 * this loop.
1433 	 */
1434 	total_usec = usec_per_tick * pending;
1435 	while (total_usec > 0) {
1436 		usec = MIN(total_usec, (MICROSEC - 1));
1437 		if (user_mode &&
1438 		    timerisset(&lwp->lwp_timer[ITIMER_VIRTUAL].it_value) &&
1439 		    itimerdecr(&lwp->lwp_timer[ITIMER_VIRTUAL], usec) == 0) {
1440 			poke = 1;
1441 			sigtoproc(pp, t, SIGVTALRM);
1442 		}
1443 		total_usec -= usec;
1444 	}
1445 
1446 	/*
1447 	 * If CPU was in user state, process lwp-profile
1448 	 * interval timer.
1449 	 */
1450 	total_usec = usec_per_tick * pending;
1451 	while (total_usec > 0) {
1452 		usec = MIN(total_usec, (MICROSEC - 1));
1453 		if (timerisset(&lwp->lwp_timer[ITIMER_PROF].it_value) &&
1454 		    itimerdecr(&lwp->lwp_timer[ITIMER_PROF], usec) == 0) {
1455 			poke = 1;
1456 			sigtoproc(pp, t, SIGPROF);
1457 		}
1458 		total_usec -= usec;
1459 	}
1460 
1461 	/*
1462 	 * Enforce CPU resource controls:
1463 	 *   (a) process.max-cpu-time resource control
1464 	 *
1465 	 * Perform the check only if we have accumulated more a second.
1466 	 */
1467 	if ((ticks + pending) >= hz) {
1468 		(void) rctl_test(rctlproc_legacy[RLIMIT_CPU], pp->p_rctls, pp,
1469 		    (pp->p_utime + pp->p_stime)/hz, RCA_UNSAFE_SIGINFO);
1470 	}
1471 
1472 	/*
1473 	 *   (b) task.max-cpu-time resource control
1474 	 *
1475 	 * If we have accumulated enough ticks, increment the task CPU
1476 	 * time usage and test for the resource limit. This minimizes the
1477 	 * number of calls to the rct_test(). The task CPU time mutex
1478 	 * is highly contentious as many processes can be sharing a task.
1479 	 */
1480 	if (pp->p_ttime >= clock_tick_proc_max) {
1481 		secs = task_cpu_time_incr(pp->p_task, pp->p_ttime);
1482 		pp->p_ttime = 0;
1483 		if (secs) {
1484 			(void) rctl_test(rc_task_cpu_time, pp->p_task->tk_rctls,
1485 			    pp, secs, RCA_UNSAFE_SIGINFO);
1486 		}
1487 	}
1488 
1489 	/*
1490 	 * Update memory usage for the currently running process.
1491 	 */
1492 	rss = rm_asrss(as);
1493 	PTOU(pp)->u_mem += rss;
1494 	if (rss > PTOU(pp)->u_mem_max)
1495 		PTOU(pp)->u_mem_max = rss;
1496 
1497 	/*
1498 	 * Notify the CPU the thread is running on.
1499 	 */
1500 	if (poke && t->t_cpu != CPU)
1501 		poke_cpu(t->t_cpu->cpu_id);
1502 }
1503 
1504 void
1505 profil_tick(uintptr_t upc)
1506 {
1507 	int ticks;
1508 	proc_t *p = ttoproc(curthread);
1509 	klwp_t *lwp = ttolwp(curthread);
1510 	struct prof *pr = &p->p_prof;
1511 
1512 	do {
1513 		ticks = lwp->lwp_oweupc;
1514 	} while (cas32(&lwp->lwp_oweupc, ticks, 0) != ticks);
1515 
1516 	mutex_enter(&p->p_pflock);
1517 	if (pr->pr_scale >= 2 && upc >= pr->pr_off) {
1518 		/*
1519 		 * Old-style profiling
1520 		 */
1521 		uint16_t *slot = pr->pr_base;
1522 		uint16_t old, new;
1523 		if (pr->pr_scale != 2) {
1524 			uintptr_t delta = upc - pr->pr_off;
1525 			uintptr_t byteoff = ((delta >> 16) * pr->pr_scale) +
1526 			    (((delta & 0xffff) * pr->pr_scale) >> 16);
1527 			if (byteoff >= (uintptr_t)pr->pr_size) {
1528 				mutex_exit(&p->p_pflock);
1529 				return;
1530 			}
1531 			slot += byteoff / sizeof (uint16_t);
1532 		}
1533 		if (fuword16(slot, &old) < 0 ||
1534 		    (new = old + ticks) > SHRT_MAX ||
1535 		    suword16(slot, new) < 0) {
1536 			pr->pr_scale = 0;
1537 		}
1538 	} else if (pr->pr_scale == 1) {
1539 		/*
1540 		 * PC Sampling
1541 		 */
1542 		model_t model = lwp_getdatamodel(lwp);
1543 		int result;
1544 #ifdef __lint
1545 		model = model;
1546 #endif
1547 		while (ticks-- > 0) {
1548 			if (pr->pr_samples == pr->pr_size) {
1549 				/* buffer full, turn off sampling */
1550 				pr->pr_scale = 0;
1551 				break;
1552 			}
1553 			switch (SIZEOF_PTR(model)) {
1554 			case sizeof (uint32_t):
1555 				result = suword32(pr->pr_base, (uint32_t)upc);
1556 				break;
1557 #ifdef _LP64
1558 			case sizeof (uint64_t):
1559 				result = suword64(pr->pr_base, (uint64_t)upc);
1560 				break;
1561 #endif
1562 			default:
1563 				cmn_err(CE_WARN, "profil_tick: unexpected "
1564 				    "data model");
1565 				result = -1;
1566 				break;
1567 			}
1568 			if (result != 0) {
1569 				pr->pr_scale = 0;
1570 				break;
1571 			}
1572 			pr->pr_base = (caddr_t)pr->pr_base + SIZEOF_PTR(model);
1573 			pr->pr_samples++;
1574 		}
1575 	}
1576 	mutex_exit(&p->p_pflock);
1577 }
1578 
1579 static void
1580 delay_wakeup(void *arg)
1581 {
1582 	kthread_t *t = arg;
1583 
1584 	mutex_enter(&t->t_delay_lock);
1585 	cv_signal(&t->t_delay_cv);
1586 	mutex_exit(&t->t_delay_lock);
1587 }
1588 
1589 void
1590 delay(clock_t ticks)
1591 {
1592 	kthread_t *t = curthread;
1593 	clock_t deadline = lbolt + ticks;
1594 	clock_t timeleft;
1595 	timeout_id_t id;
1596 	extern hrtime_t volatile devinfo_freeze;
1597 
1598 	if ((panicstr || devinfo_freeze) && ticks > 0) {
1599 		/*
1600 		 * Timeouts aren't running, so all we can do is spin.
1601 		 */
1602 		drv_usecwait(TICK_TO_USEC(ticks));
1603 		return;
1604 	}
1605 
1606 	while ((timeleft = deadline - lbolt) > 0) {
1607 		mutex_enter(&t->t_delay_lock);
1608 		id = timeout(delay_wakeup, t, timeleft);
1609 		cv_wait(&t->t_delay_cv, &t->t_delay_lock);
1610 		mutex_exit(&t->t_delay_lock);
1611 		(void) untimeout(id);
1612 	}
1613 }
1614 
1615 /*
1616  * Like delay, but interruptible by a signal.
1617  */
1618 int
1619 delay_sig(clock_t ticks)
1620 {
1621 	clock_t deadline = lbolt + ticks;
1622 	clock_t rc;
1623 
1624 	mutex_enter(&curthread->t_delay_lock);
1625 	do {
1626 		rc = cv_timedwait_sig(&curthread->t_delay_cv,
1627 		    &curthread->t_delay_lock, deadline);
1628 	} while (rc > 0);
1629 	mutex_exit(&curthread->t_delay_lock);
1630 	if (rc == 0)
1631 		return (EINTR);
1632 	return (0);
1633 }
1634 
1635 #define	SECONDS_PER_DAY 86400
1636 
1637 /*
1638  * Initialize the system time based on the TOD chip.  approx is used as
1639  * an approximation of time (e.g. from the filesystem) in the event that
1640  * the TOD chip has been cleared or is unresponsive.  An approx of -1
1641  * means the filesystem doesn't keep time.
1642  */
1643 void
1644 clkset(time_t approx)
1645 {
1646 	timestruc_t ts;
1647 	int spl;
1648 	int set_clock = 0;
1649 
1650 	mutex_enter(&tod_lock);
1651 	ts = tod_get();
1652 
1653 	if (ts.tv_sec > 365 * SECONDS_PER_DAY) {
1654 		/*
1655 		 * If the TOD chip is reporting some time after 1971,
1656 		 * then it probably didn't lose power or become otherwise
1657 		 * cleared in the recent past;  check to assure that
1658 		 * the time coming from the filesystem isn't in the future
1659 		 * according to the TOD chip.
1660 		 */
1661 		if (approx != -1 && approx > ts.tv_sec) {
1662 			cmn_err(CE_WARN, "Last shutdown is later "
1663 			    "than time on time-of-day chip; check date.");
1664 		}
1665 	} else {
1666 		/*
1667 		 * If the TOD chip isn't giving correct time, then set it to
1668 		 * the time that was passed in as a rough estimate.  If we
1669 		 * don't have an estimate, then set the clock back to a time
1670 		 * when Oliver North, ALF and Dire Straits were all on the
1671 		 * collective brain:  1987.
1672 		 */
1673 		timestruc_t tmp;
1674 		if (approx == -1)
1675 			ts.tv_sec = (1987 - 1970) * 365 * SECONDS_PER_DAY;
1676 		else
1677 			ts.tv_sec = approx;
1678 		ts.tv_nsec = 0;
1679 
1680 		/*
1681 		 * Attempt to write the new time to the TOD chip.  Set spl high
1682 		 * to avoid getting preempted between the tod_set and tod_get.
1683 		 */
1684 		spl = splhi();
1685 		tod_set(ts);
1686 		tmp = tod_get();
1687 		splx(spl);
1688 
1689 		if (tmp.tv_sec != ts.tv_sec && tmp.tv_sec != ts.tv_sec + 1) {
1690 			tod_broken = 1;
1691 			dosynctodr = 0;
1692 			cmn_err(CE_WARN, "Time-of-day chip unresponsive;"
1693 			    " dead batteries?");
1694 		} else {
1695 			cmn_err(CE_WARN, "Time-of-day chip had "
1696 			    "incorrect date; check and reset.");
1697 		}
1698 		set_clock = 1;
1699 	}
1700 
1701 	if (!boot_time) {
1702 		boot_time = ts.tv_sec;
1703 		set_clock = 1;
1704 	}
1705 
1706 	if (set_clock)
1707 		set_hrestime(&ts);
1708 
1709 	mutex_exit(&tod_lock);
1710 }
1711 
1712 int	timechanged;	/* for testing if the system time has been reset */
1713 
1714 void
1715 set_hrestime(timestruc_t *ts)
1716 {
1717 	int spl = hr_clock_lock();
1718 	hrestime = *ts;
1719 	membar_enter();	/* hrestime must be visible before timechanged++ */
1720 	timedelta = 0;
1721 	timechanged++;
1722 	hr_clock_unlock(spl);
1723 }
1724 
1725 static uint_t deadman_seconds;
1726 static uint32_t deadman_panics;
1727 static int deadman_enabled = 0;
1728 static int deadman_panic_timers = 1;
1729 
1730 static void
1731 deadman(void)
1732 {
1733 	if (panicstr) {
1734 		/*
1735 		 * During panic, other CPUs besides the panic
1736 		 * master continue to handle cyclics and some other
1737 		 * interrupts.  The code below is intended to be
1738 		 * single threaded, so any CPU other than the master
1739 		 * must keep out.
1740 		 */
1741 		if (CPU->cpu_id != panic_cpu.cpu_id)
1742 			return;
1743 
1744 		/*
1745 		 * If we're panicking, the deadman cyclic continues to increase
1746 		 * lbolt in case the dump device driver relies on this for
1747 		 * timeouts.  Note that we rely on deadman() being invoked once
1748 		 * per second, and credit lbolt and lbolt64 with hz ticks each.
1749 		 */
1750 		lbolt += hz;
1751 		lbolt64 += hz;
1752 
1753 		if (!deadman_panic_timers)
1754 			return; /* allow all timers to be manually disabled */
1755 
1756 		/*
1757 		 * If we are generating a crash dump or syncing filesystems and
1758 		 * the corresponding timer is set, decrement it and re-enter
1759 		 * the panic code to abort it and advance to the next state.
1760 		 * The panic states and triggers are explained in panic.c.
1761 		 */
1762 		if (panic_dump) {
1763 			if (dump_timeleft && (--dump_timeleft == 0)) {
1764 				panic("panic dump timeout");
1765 				/*NOTREACHED*/
1766 			}
1767 		} else if (panic_sync) {
1768 			if (sync_timeleft && (--sync_timeleft == 0)) {
1769 				panic("panic sync timeout");
1770 				/*NOTREACHED*/
1771 			}
1772 		}
1773 
1774 		return;
1775 	}
1776 
1777 	if (lbolt != CPU->cpu_deadman_lbolt) {
1778 		CPU->cpu_deadman_lbolt = lbolt;
1779 		CPU->cpu_deadman_countdown = deadman_seconds;
1780 		return;
1781 	}
1782 
1783 	if (--CPU->cpu_deadman_countdown > 0)
1784 		return;
1785 
1786 	/*
1787 	 * Regardless of whether or not we actually bring the system down,
1788 	 * bump the deadman_panics variable.
1789 	 *
1790 	 * N.B. deadman_panics is incremented once for each CPU that
1791 	 * passes through here.  It's expected that all the CPUs will
1792 	 * detect this condition within one second of each other, so
1793 	 * when deadman_enabled is off, deadman_panics will
1794 	 * typically be a multiple of the total number of CPUs in
1795 	 * the system.
1796 	 */
1797 	atomic_add_32(&deadman_panics, 1);
1798 
1799 	if (!deadman_enabled) {
1800 		CPU->cpu_deadman_countdown = deadman_seconds;
1801 		return;
1802 	}
1803 
1804 	/*
1805 	 * If we're here, we want to bring the system down.
1806 	 */
1807 	panic("deadman: timed out after %d seconds of clock "
1808 	    "inactivity", deadman_seconds);
1809 	/*NOTREACHED*/
1810 }
1811 
1812 /*ARGSUSED*/
1813 static void
1814 deadman_online(void *arg, cpu_t *cpu, cyc_handler_t *hdlr, cyc_time_t *when)
1815 {
1816 	cpu->cpu_deadman_lbolt = 0;
1817 	cpu->cpu_deadman_countdown = deadman_seconds;
1818 
1819 	hdlr->cyh_func = (cyc_func_t)deadman;
1820 	hdlr->cyh_level = CY_HIGH_LEVEL;
1821 	hdlr->cyh_arg = NULL;
1822 
1823 	/*
1824 	 * Stagger the CPUs so that they don't all run deadman() at
1825 	 * the same time.  Simplest reason to do this is to make it
1826 	 * more likely that only one CPU will panic in case of a
1827 	 * timeout.  This is (strictly speaking) an aesthetic, not a
1828 	 * technical consideration.
1829 	 *
1830 	 * The interval must be one second in accordance with the
1831 	 * code in deadman() above to increase lbolt during panic.
1832 	 */
1833 	when->cyt_when = cpu->cpu_id * (NANOSEC / NCPU);
1834 	when->cyt_interval = NANOSEC;
1835 }
1836 
1837 
1838 void
1839 deadman_init(void)
1840 {
1841 	cyc_omni_handler_t hdlr;
1842 
1843 	if (deadman_seconds == 0)
1844 		deadman_seconds = snoop_interval / MICROSEC;
1845 
1846 	if (snooping)
1847 		deadman_enabled = 1;
1848 
1849 	hdlr.cyo_online = deadman_online;
1850 	hdlr.cyo_offline = NULL;
1851 	hdlr.cyo_arg = NULL;
1852 
1853 	mutex_enter(&cpu_lock);
1854 	deadman_cyclic = cyclic_add_omni(&hdlr);
1855 	mutex_exit(&cpu_lock);
1856 }
1857 
1858 /*
1859  * tod_fault() is for updating tod validate mechanism state:
1860  * (1) TOD_NOFAULT: for resetting the state to 'normal'.
1861  *     currently used for debugging only
1862  * (2) The following four cases detected by tod validate mechanism:
1863  *       TOD_REVERSED: current tod value is less than previous value.
1864  *       TOD_STALLED: current tod value hasn't advanced.
1865  *       TOD_JUMPED: current tod value advanced too far from previous value.
1866  *       TOD_RATECHANGED: the ratio between average tod delta and
1867  *       average tick delta has changed.
1868  * (3) TOD_RDONLY: when the TOD clock is not writeable e.g. because it is
1869  *     a virtual TOD provided by a hypervisor.
1870  */
1871 enum tod_fault_type
1872 tod_fault(enum tod_fault_type ftype, int off)
1873 {
1874 	ASSERT(MUTEX_HELD(&tod_lock));
1875 
1876 	if (tod_faulted != ftype) {
1877 		switch (ftype) {
1878 		case TOD_NOFAULT:
1879 			plat_tod_fault(TOD_NOFAULT);
1880 			cmn_err(CE_NOTE, "Restarted tracking "
1881 			    "Time of Day clock.");
1882 			tod_faulted = ftype;
1883 			break;
1884 		case TOD_REVERSED:
1885 		case TOD_JUMPED:
1886 			if (tod_faulted == TOD_NOFAULT) {
1887 				plat_tod_fault(ftype);
1888 				cmn_err(CE_WARN, "Time of Day clock error: "
1889 				    "reason [%s by 0x%x]. -- "
1890 				    " Stopped tracking Time Of Day clock.",
1891 				    tod_fault_table[ftype], off);
1892 				tod_faulted = ftype;
1893 			}
1894 			break;
1895 		case TOD_STALLED:
1896 		case TOD_RATECHANGED:
1897 			if (tod_faulted == TOD_NOFAULT) {
1898 				plat_tod_fault(ftype);
1899 				cmn_err(CE_WARN, "Time of Day clock error: "
1900 				    "reason [%s]. -- "
1901 				    " Stopped tracking Time Of Day clock.",
1902 				    tod_fault_table[ftype]);
1903 				tod_faulted = ftype;
1904 			}
1905 			break;
1906 		case TOD_RDONLY:
1907 			if (tod_faulted == TOD_NOFAULT) {
1908 				plat_tod_fault(ftype);
1909 				cmn_err(CE_NOTE, "!Time of Day clock is "
1910 				    "Read-Only; set of Date/Time will not "
1911 				    "persist across reboot.");
1912 				tod_faulted = ftype;
1913 			}
1914 			break;
1915 		default:
1916 			break;
1917 		}
1918 	}
1919 	return (tod_faulted);
1920 }
1921 
1922 void
1923 tod_fault_reset()
1924 {
1925 	tod_fault_reset_flag = 1;
1926 }
1927 
1928 
1929 /*
1930  * tod_validate() is used for checking values returned by tod_get().
1931  * Four error cases can be detected by this routine:
1932  *   TOD_REVERSED: current tod value is less than previous.
1933  *   TOD_STALLED: current tod value hasn't advanced.
1934  *   TOD_JUMPED: current tod value advanced too far from previous value.
1935  *   TOD_RATECHANGED: the ratio between average tod delta and
1936  *   average tick delta has changed.
1937  */
1938 time_t
1939 tod_validate(time_t tod)
1940 {
1941 	time_t diff_tod;
1942 	hrtime_t diff_tick;
1943 
1944 	long dtick;
1945 	int dtick_delta;
1946 
1947 	int off = 0;
1948 	enum tod_fault_type tod_bad = TOD_NOFAULT;
1949 
1950 	static int firsttime = 1;
1951 
1952 	static time_t prev_tod = 0;
1953 	static hrtime_t prev_tick = 0;
1954 	static long dtick_avg = TOD_REF_FREQ;
1955 
1956 	hrtime_t tick = gethrtime();
1957 
1958 	ASSERT(MUTEX_HELD(&tod_lock));
1959 
1960 	/*
1961 	 * tod_validate_enable is patchable via /etc/system.
1962 	 * If TOD is already faulted, or if TOD validation is deferred,
1963 	 * there is nothing to do.
1964 	 */
1965 	if ((tod_validate_enable == 0) || (tod_faulted != TOD_NOFAULT) ||
1966 	    tod_validate_deferred) {
1967 		return (tod);
1968 	}
1969 
1970 	/*
1971 	 * Update prev_tod and prev_tick values for first run
1972 	 */
1973 	if (firsttime) {
1974 		firsttime = 0;
1975 		prev_tod = tod;
1976 		prev_tick = tick;
1977 		return (tod);
1978 	}
1979 
1980 	/*
1981 	 * For either of these conditions, we need to reset ourself
1982 	 * and start validation from zero since each condition
1983 	 * indicates that the TOD will be updated with new value
1984 	 * Also, note that tod_needsync will be reset in clock()
1985 	 */
1986 	if (tod_needsync || tod_fault_reset_flag) {
1987 		firsttime = 1;
1988 		prev_tod = 0;
1989 		prev_tick = 0;
1990 		dtick_avg = TOD_REF_FREQ;
1991 
1992 		if (tod_fault_reset_flag)
1993 			tod_fault_reset_flag = 0;
1994 
1995 		return (tod);
1996 	}
1997 
1998 	/* test hook */
1999 	switch (tod_unit_test) {
2000 	case 1: /* for testing jumping tod */
2001 		tod += tod_test_injector;
2002 		tod_unit_test = 0;
2003 		break;
2004 	case 2:	/* for testing stuck tod bit */
2005 		tod |= 1 << tod_test_injector;
2006 		tod_unit_test = 0;
2007 		break;
2008 	case 3:	/* for testing stalled tod */
2009 		tod = prev_tod;
2010 		tod_unit_test = 0;
2011 		break;
2012 	case 4:	/* reset tod fault status */
2013 		(void) tod_fault(TOD_NOFAULT, 0);
2014 		tod_unit_test = 0;
2015 		break;
2016 	default:
2017 		break;
2018 	}
2019 
2020 	diff_tod = tod - prev_tod;
2021 	diff_tick = tick - prev_tick;
2022 
2023 	ASSERT(diff_tick >= 0);
2024 
2025 	if (diff_tod < 0) {
2026 		/* ERROR - tod reversed */
2027 		tod_bad = TOD_REVERSED;
2028 		off = (int)(prev_tod - tod);
2029 	} else if (diff_tod == 0) {
2030 		/* tod did not advance */
2031 		if (diff_tick > TOD_STALL_THRESHOLD) {
2032 			/* ERROR - tod stalled */
2033 			tod_bad = TOD_STALLED;
2034 		} else {
2035 			/*
2036 			 * Make sure we don't update prev_tick
2037 			 * so that diff_tick is calculated since
2038 			 * the first diff_tod == 0
2039 			 */
2040 			return (tod);
2041 		}
2042 	} else {
2043 		/* calculate dtick */
2044 		dtick = diff_tick / diff_tod;
2045 
2046 		/* update dtick averages */
2047 		dtick_avg += ((dtick - dtick_avg) / TOD_FILTER_N);
2048 
2049 		/*
2050 		 * Calculate dtick_delta as
2051 		 * variation from reference freq in quartiles
2052 		 */
2053 		dtick_delta = (dtick_avg - TOD_REF_FREQ) /
2054 		    (TOD_REF_FREQ >> 2);
2055 
2056 		/*
2057 		 * Even with a perfectly functioning TOD device,
2058 		 * when the number of elapsed seconds is low the
2059 		 * algorithm can calculate a rate that is beyond
2060 		 * tolerance, causing an error.  The algorithm is
2061 		 * inaccurate when elapsed time is low (less than
2062 		 * 5 seconds).
2063 		 */
2064 		if (diff_tod > 4) {
2065 			if (dtick < TOD_JUMP_THRESHOLD) {
2066 				/* ERROR - tod jumped */
2067 				tod_bad = TOD_JUMPED;
2068 				off = (int)diff_tod;
2069 			} else if (dtick_delta) {
2070 				/* ERROR - change in clock rate */
2071 				tod_bad = TOD_RATECHANGED;
2072 			}
2073 		}
2074 	}
2075 
2076 	if (tod_bad != TOD_NOFAULT) {
2077 		(void) tod_fault(tod_bad, off);
2078 
2079 		/*
2080 		 * Disable dosynctodr since we are going to fault
2081 		 * the TOD chip anyway here
2082 		 */
2083 		dosynctodr = 0;
2084 
2085 		/*
2086 		 * Set tod to the correct value from hrestime
2087 		 */
2088 		tod = hrestime.tv_sec;
2089 	}
2090 
2091 	prev_tod = tod;
2092 	prev_tick = tick;
2093 	return (tod);
2094 }
2095 
2096 static void
2097 calcloadavg(int nrun, uint64_t *hp_ave)
2098 {
2099 	static int64_t f[3] = { 135, 27, 9 };
2100 	uint_t i;
2101 	int64_t q, r;
2102 
2103 	/*
2104 	 * Compute load average over the last 1, 5, and 15 minutes
2105 	 * (60, 300, and 900 seconds).  The constants in f[3] are for
2106 	 * exponential decay:
2107 	 * (1 - exp(-1/60)) << 13 = 135,
2108 	 * (1 - exp(-1/300)) << 13 = 27,
2109 	 * (1 - exp(-1/900)) << 13 = 9.
2110 	 */
2111 
2112 	/*
2113 	 * a little hoop-jumping to avoid integer overflow
2114 	 */
2115 	for (i = 0; i < 3; i++) {
2116 		q = (hp_ave[i]  >> 16) << 7;
2117 		r = (hp_ave[i]  & 0xffff) << 7;
2118 		hp_ave[i] += ((nrun - q) * f[i] - ((r * f[i]) >> 16)) >> 4;
2119 	}
2120 }
2121