xref: /freebsd/sys/kern/kern_tc.c (revision bcd92649c9952c9c9e8845dbd34276a60dd16664)
1 /*-
2  * Copyright (c) 1982, 1986, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
39  * $Id: kern_clock.c,v 1.27 1996/10/10 10:25:03 bde Exp $
40  */
41 
42 /* Portions of this software are covered by the following: */
43 /******************************************************************************
44  *                                                                            *
45  * Copyright (c) David L. Mills 1993, 1994                                    *
46  *                                                                            *
47  * Permission to use, copy, modify, and distribute this software and its      *
48  * documentation for any purpose and without fee is hereby granted, provided  *
49  * that the above copyright notice appears in all copies and that both the    *
50  * copyright notice and this permission notice appear in supporting           *
51  * documentation, and that the name University of Delaware not be used in     *
52  * advertising or publicity pertaining to distribution of the software        *
53  * without specific, written prior permission.  The University of Delaware    *
54  * makes no representations about the suitability this software for any       *
55  * purpose.  It is provided "as is" without express or implied warranty.      *
56  *                                                                            *
57  *****************************************************************************/
58 
59 #include "opt_cpu.h"		/* XXX */
60 
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/dkstat.h>
64 #include <sys/callout.h>
65 #include <sys/kernel.h>
66 #include <sys/proc.h>
67 #include <sys/resourcevar.h>
68 #include <sys/signalvar.h>
69 #include <sys/timex.h>
70 #include <vm/vm.h>
71 #include <vm/vm_param.h>
72 #include <vm/vm_prot.h>
73 #include <vm/lock.h>
74 #include <vm/pmap.h>
75 #include <vm/vm_map.h>
76 #include <sys/sysctl.h>
77 
78 #include <machine/cpu.h>
79 #define CLOCK_HAIR		/* XXX */
80 #include <machine/clock.h>
81 
82 #ifdef GPROF
83 #include <sys/gmon.h>
84 #endif
85 
86 static void initclocks __P((void *dummy));
87 SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL)
88 
89 /* Exported to machdep.c. */
90 struct callout *callfree, *callout;
91 
92 static struct callout calltodo;
93 
94 /* Some of these don't belong here, but it's easiest to concentrate them. */
95 static long cp_time[CPUSTATES];
96 long dk_seek[DK_NDRIVE];
97 static long dk_time[DK_NDRIVE];
98 long dk_wds[DK_NDRIVE];
99 long dk_wpms[DK_NDRIVE];
100 long dk_xfer[DK_NDRIVE];
101 
102 int dk_busy;
103 int dk_ndrive = 0;
104 char dk_names[DK_NDRIVE][DK_NAMELEN];
105 
106 long tk_cancc;
107 long tk_nin;
108 long tk_nout;
109 long tk_rawcc;
110 
111 /*
112  * Clock handling routines.
113  *
114  * This code is written to operate with two timers that run independently of
115  * each other.  The main clock, running hz times per second, is used to keep
116  * track of real time.  The second timer handles kernel and user profiling,
117  * and does resource use estimation.  If the second timer is programmable,
118  * it is randomized to avoid aliasing between the two clocks.  For example,
119  * the randomization prevents an adversary from always giving up the cpu
120  * just before its quantum expires.  Otherwise, it would never accumulate
121  * cpu ticks.  The mean frequency of the second timer is stathz.
122  *
123  * If no second timer exists, stathz will be zero; in this case we drive
124  * profiling and statistics off the main clock.  This WILL NOT be accurate;
125  * do not do it unless absolutely necessary.
126  *
127  * The statistics clock may (or may not) be run at a higher rate while
128  * profiling.  This profile clock runs at profhz.  We require that profhz
129  * be an integral multiple of stathz.
130  *
131  * If the statistics clock is running fast, it must be divided by the ratio
132  * profhz/stathz for statistics.  (For profiling, every tick counts.)
133  */
134 
135 /*
136  * TODO:
137  *	allocate more timeout table slots when table overflows.
138  */
139 
140 /*
141  * Bump a timeval by a small number of usec's.
142  */
143 #define BUMPTIME(t, usec) { \
144 	register volatile struct timeval *tp = (t); \
145 	register long us; \
146  \
147 	tp->tv_usec = us = tp->tv_usec + (usec); \
148 	if (us >= 1000000) { \
149 		tp->tv_usec = us - 1000000; \
150 		tp->tv_sec++; \
151 	} \
152 }
153 
154 int	stathz;
155 int	profhz;
156 static int profprocs;
157 int	ticks;
158 static int psdiv, pscnt;	/* prof => stat divider */
159 int psratio;			/* ratio: prof / stat */
160 
161 volatile struct	timeval time;
162 volatile struct	timeval mono_time;
163 
164 /*
165  * Phase-lock loop (PLL) definitions
166  *
167  * The following variables are read and set by the ntp_adjtime() system
168  * call.
169  *
170  * time_state shows the state of the system clock, with values defined
171  * in the timex.h header file.
172  *
173  * time_status shows the status of the system clock, with bits defined
174  * in the timex.h header file.
175  *
176  * time_offset is used by the PLL to adjust the system time in small
177  * increments.
178  *
179  * time_constant determines the bandwidth or "stiffness" of the PLL.
180  *
181  * time_tolerance determines maximum frequency error or tolerance of the
182  * CPU clock oscillator and is a property of the architecture; however,
183  * in principle it could change as result of the presence of external
184  * discipline signals, for instance.
185  *
186  * time_precision is usually equal to the kernel tick variable; however,
187  * in cases where a precision clock counter or external clock is
188  * available, the resolution can be much less than this and depend on
189  * whether the external clock is working or not.
190  *
191  * time_maxerror is initialized by a ntp_adjtime() call and increased by
192  * the kernel once each second to reflect the maximum error
193  * bound growth.
194  *
195  * time_esterror is set and read by the ntp_adjtime() call, but
196  * otherwise not used by the kernel.
197  */
198 int time_status = STA_UNSYNC;	/* clock status bits */
199 int time_state = TIME_OK;	/* clock state */
200 long time_offset = 0;		/* time offset (us) */
201 long time_constant = 0;		/* pll time constant */
202 long time_tolerance = MAXFREQ;	/* frequency tolerance (scaled ppm) */
203 long time_precision = 1;	/* clock precision (us) */
204 long time_maxerror = MAXPHASE;	/* maximum error (us) */
205 long time_esterror = MAXPHASE;	/* estimated error (us) */
206 
207 /*
208  * The following variables establish the state of the PLL and the
209  * residual time and frequency offset of the local clock. The scale
210  * factors are defined in the timex.h header file.
211  *
212  * time_phase and time_freq are the phase increment and the frequency
213  * increment, respectively, of the kernel time variable at each tick of
214  * the clock.
215  *
216  * time_freq is set via ntp_adjtime() from a value stored in a file when
217  * the synchronization daemon is first started. Its value is retrieved
218  * via ntp_adjtime() and written to the file about once per hour by the
219  * daemon.
220  *
221  * time_adj is the adjustment added to the value of tick at each timer
222  * interrupt and is recomputed at each timer interrupt.
223  *
224  * time_reftime is the second's portion of the system time on the last
225  * call to ntp_adjtime(). It is used to adjust the time_freq variable
226  * and to increase the time_maxerror as the time since last update
227  * increases.
228  */
229 static long time_phase = 0;		/* phase offset (scaled us) */
230 long time_freq = 0;		/* frequency offset (scaled ppm) */
231 static long time_adj = 0;		/* tick adjust (scaled 1 / hz) */
232 static long time_reftime = 0;		/* time at last adjustment (s) */
233 
234 #ifdef PPS_SYNC
235 /*
236  * The following variables are used only if the if the kernel PPS
237  * discipline code is configured (PPS_SYNC). The scale factors are
238  * defined in the timex.h header file.
239  *
240  * pps_time contains the time at each calibration interval, as read by
241  * microtime().
242  *
243  * pps_offset is the time offset produced by the time median filter
244  * pps_tf[], while pps_jitter is the dispersion measured by this
245  * filter.
246  *
247  * pps_freq is the frequency offset produced by the frequency median
248  * filter pps_ff[], while pps_stabil is the dispersion measured by
249  * this filter.
250  *
251  * pps_usec is latched from a high resolution counter or external clock
252  * at pps_time. Here we want the hardware counter contents only, not the
253  * contents plus the time_tv.usec as usual.
254  *
255  * pps_valid counts the number of seconds since the last PPS update. It
256  * is used as a watchdog timer to disable the PPS discipline should the
257  * PPS signal be lost.
258  *
259  * pps_glitch counts the number of seconds since the beginning of an
260  * offset burst more than tick/2 from current nominal offset. It is used
261  * mainly to suppress error bursts due to priority conflicts between the
262  * PPS interrupt and timer interrupt.
263  *
264  * pps_count counts the seconds of the calibration interval, the
265  * duration of which is pps_shift in powers of two.
266  *
267  * pps_intcnt counts the calibration intervals for use in the interval-
268  * adaptation algorithm. It's just too complicated for words.
269  */
270 struct timeval pps_time;	/* kernel time at last interval */
271 long pps_offset = 0;		/* pps time offset (us) */
272 long pps_jitter = MAXTIME;	/* pps time dispersion (jitter) (us) */
273 long pps_tf[] = {0, 0, 0};	/* pps time offset median filter (us) */
274 long pps_freq = 0;		/* frequency offset (scaled ppm) */
275 long pps_stabil = MAXFREQ;	/* frequency dispersion (scaled ppm) */
276 long pps_ff[] = {0, 0, 0};	/* frequency offset median filter */
277 long pps_usec = 0;		/* microsec counter at last interval */
278 long pps_valid = PPS_VALID;	/* pps signal watchdog counter */
279 int pps_glitch = 0;		/* pps signal glitch counter */
280 int pps_count = 0;		/* calibration interval counter (s) */
281 int pps_shift = PPS_SHIFT;	/* interval duration (s) (shift) */
282 int pps_intcnt = 0;		/* intervals at current duration */
283 
284 /*
285  * PPS signal quality monitors
286  *
287  * pps_jitcnt counts the seconds that have been discarded because the
288  * jitter measured by the time median filter exceeds the limit MAXTIME
289  * (100 us).
290  *
291  * pps_calcnt counts the frequency calibration intervals, which are
292  * variable from 4 s to 256 s.
293  *
294  * pps_errcnt counts the calibration intervals which have been discarded
295  * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
296  * calibration interval jitter exceeds two ticks.
297  *
298  * pps_stbcnt counts the calibration intervals that have been discarded
299  * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
300  */
301 long pps_jitcnt = 0;		/* jitter limit exceeded */
302 long pps_calcnt = 0;		/* calibration intervals */
303 long pps_errcnt = 0;		/* calibration errors */
304 long pps_stbcnt = 0;		/* stability limit exceeded */
305 #endif /* PPS_SYNC */
306 
307 /* XXX none of this stuff works under FreeBSD */
308 #ifdef EXT_CLOCK
309 /*
310  * External clock definitions
311  *
312  * The following definitions and declarations are used only if an
313  * external clock (HIGHBALL or TPRO) is configured on the system.
314  */
315 #define CLOCK_INTERVAL 30	/* CPU clock update interval (s) */
316 
317 /*
318  * The clock_count variable is set to CLOCK_INTERVAL at each PPS
319  * interrupt and decremented once each second.
320  */
321 int clock_count = 0;		/* CPU clock counter */
322 
323 #ifdef HIGHBALL
324 /*
325  * The clock_offset and clock_cpu variables are used by the HIGHBALL
326  * interface. The clock_offset variable defines the offset between
327  * system time and the HIGBALL counters. The clock_cpu variable contains
328  * the offset between the system clock and the HIGHBALL clock for use in
329  * disciplining the kernel time variable.
330  */
331 extern struct timeval clock_offset; /* Highball clock offset */
332 long clock_cpu = 0;		/* CPU clock adjust */
333 #endif /* HIGHBALL */
334 #endif /* EXT_CLOCK */
335 
336 /*
337  * hardupdate() - local clock update
338  *
339  * This routine is called by ntp_adjtime() to update the local clock
340  * phase and frequency. This is used to implement an adaptive-parameter,
341  * first-order, type-II phase-lock loop. The code computes new time and
342  * frequency offsets each time it is called. The hardclock() routine
343  * amortizes these offsets at each tick interrupt. If the kernel PPS
344  * discipline code is configured (PPS_SYNC), the PPS signal itself
345  * determines the new time offset, instead of the calling argument.
346  * Presumably, calls to ntp_adjtime() occur only when the caller
347  * believes the local clock is valid within some bound (+-128 ms with
348  * NTP). If the caller's time is far different than the PPS time, an
349  * argument will ensue, and it's not clear who will lose.
350  *
351  * For default SHIFT_UPDATE = 12, the offset is limited to +-512 ms, the
352  * maximum interval between updates is 4096 s and the maximum frequency
353  * offset is +-31.25 ms/s.
354  *
355  * Note: splclock() is in effect.
356  */
357 void
358 hardupdate(offset)
359 	long offset;
360 {
361 	long ltemp, mtemp;
362 
363 	if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME))
364 		return;
365 	ltemp = offset;
366 #ifdef PPS_SYNC
367 	if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
368 		ltemp = pps_offset;
369 #endif /* PPS_SYNC */
370 	if (ltemp > MAXPHASE)
371 		time_offset = MAXPHASE << SHIFT_UPDATE;
372 	else if (ltemp < -MAXPHASE)
373 		time_offset = -(MAXPHASE << SHIFT_UPDATE);
374 	else
375 		time_offset = ltemp << SHIFT_UPDATE;
376 	mtemp = time.tv_sec - time_reftime;
377 	time_reftime = time.tv_sec;
378 	if (mtemp > MAXSEC)
379 		mtemp = 0;
380 
381 	/* ugly multiply should be replaced */
382 	if (ltemp < 0)
383 		time_freq -= (-ltemp * mtemp) >> (time_constant +
384 		    time_constant + SHIFT_KF - SHIFT_USEC);
385 	else
386 		time_freq += (ltemp * mtemp) >> (time_constant +
387 		    time_constant + SHIFT_KF - SHIFT_USEC);
388 	if (time_freq > time_tolerance)
389 		time_freq = time_tolerance;
390 	else if (time_freq < -time_tolerance)
391 		time_freq = -time_tolerance;
392 }
393 
394 
395 
396 /*
397  * Initialize clock frequencies and start both clocks running.
398  */
399 /* ARGSUSED*/
400 static void
401 initclocks(dummy)
402 	void *dummy;
403 {
404 	register int i;
405 
406 	/*
407 	 * Set divisors to 1 (normal case) and let the machine-specific
408 	 * code do its bit.
409 	 */
410 	psdiv = pscnt = 1;
411 	cpu_initclocks();
412 
413 	/*
414 	 * Compute profhz/stathz, and fix profhz if needed.
415 	 */
416 	i = stathz ? stathz : hz;
417 	if (profhz == 0)
418 		profhz = i;
419 	psratio = profhz / i;
420 }
421 
422 /*
423  * The real-time timer, interrupting hz times per second.
424  */
425 void
426 hardclock(frame)
427 	register struct clockframe *frame;
428 {
429 	register struct callout *p1;
430 	register struct proc *p;
431 	register int needsoft;
432 
433 	/*
434 	 * Update real-time timeout queue.
435 	 * At front of queue are some number of events which are ``due''.
436 	 * The time to these is <= 0 and if negative represents the
437 	 * number of ticks which have passed since it was supposed to happen.
438 	 * The rest of the q elements (times > 0) are events yet to happen,
439 	 * where the time for each is given as a delta from the previous.
440 	 * Decrementing just the first of these serves to decrement the time
441 	 * to all events.
442 	 */
443 	needsoft = 0;
444 	for (p1 = calltodo.c_next; p1 != NULL; p1 = p1->c_next) {
445 		if (--p1->c_time > 0)
446 			break;
447 		needsoft = 1;
448 		if (p1->c_time == 0)
449 			break;
450 	}
451 
452 	p = curproc;
453 	if (p) {
454 		register struct pstats *pstats;
455 
456 		/*
457 		 * Run current process's virtual and profile time, as needed.
458 		 */
459 		pstats = p->p_stats;
460 		if (CLKF_USERMODE(frame) &&
461 		    timerisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
462 		    itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
463 			psignal(p, SIGVTALRM);
464 		if (timerisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
465 		    itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
466 			psignal(p, SIGPROF);
467 	}
468 
469 	/*
470 	 * If no separate statistics clock is available, run it from here.
471 	 */
472 	if (stathz == 0)
473 		statclock(frame);
474 
475 	/*
476 	 * Increment the time-of-day.
477 	 */
478 	ticks++;
479 	{
480 		int time_update;
481 		struct timeval newtime = time;
482 		long ltemp;
483 
484 		if (timedelta == 0) {
485 			time_update = CPU_THISTICKLEN(tick);
486 		} else {
487 			time_update = CPU_THISTICKLEN(tick) + tickdelta;
488 			timedelta -= tickdelta;
489 		}
490 		BUMPTIME(&mono_time, time_update);
491 
492 		/*
493 		 * Compute the phase adjustment. If the low-order bits
494 		 * (time_phase) of the update overflow, bump the high-order bits
495 		 * (time_update).
496 		 */
497 		time_phase += time_adj;
498 		if (time_phase <= -FINEUSEC) {
499 		  ltemp = -time_phase >> SHIFT_SCALE;
500 		  time_phase += ltemp << SHIFT_SCALE;
501 		  time_update -= ltemp;
502 		}
503 		else if (time_phase >= FINEUSEC) {
504 		  ltemp = time_phase >> SHIFT_SCALE;
505 		  time_phase -= ltemp << SHIFT_SCALE;
506 		  time_update += ltemp;
507 		}
508 
509 		newtime.tv_usec += time_update;
510 		/*
511 		 * On rollover of the second the phase adjustment to be used for
512 		 * the next second is calculated. Also, the maximum error is
513 		 * increased by the tolerance. If the PPS frequency discipline
514 		 * code is present, the phase is increased to compensate for the
515 		 * CPU clock oscillator frequency error.
516 		 *
517 		 * With SHIFT_SCALE = 23, the maximum frequency adjustment is
518 		 * +-256 us per tick, or 25.6 ms/s at a clock frequency of 100
519 		 * Hz. The time contribution is shifted right a minimum of two
520 		 * bits, while the frequency contribution is a right shift.
521 		 * Thus, overflow is prevented if the frequency contribution is
522 		 * limited to half the maximum or 15.625 ms/s.
523 		 */
524 		if (newtime.tv_usec >= 1000000) {
525 		  newtime.tv_usec -= 1000000;
526 		  newtime.tv_sec++;
527 		  time_maxerror += time_tolerance >> SHIFT_USEC;
528 		  if (time_offset < 0) {
529 		    ltemp = -time_offset >>
530 		      (SHIFT_KG + time_constant);
531 		    time_offset += ltemp;
532 		    time_adj = -ltemp <<
533 		      (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
534 		  } else {
535 		    ltemp = time_offset >>
536 		      (SHIFT_KG + time_constant);
537 		    time_offset -= ltemp;
538 		    time_adj = ltemp <<
539 		      (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
540 		  }
541 #ifdef PPS_SYNC
542 		  /*
543 		   * Gnaw on the watchdog counter and update the frequency
544 		   * computed by the pll and the PPS signal.
545 		   */
546 		  pps_valid++;
547 		  if (pps_valid == PPS_VALID) {
548 		    pps_jitter = MAXTIME;
549 		    pps_stabil = MAXFREQ;
550 		    time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
551 				     STA_PPSWANDER | STA_PPSERROR);
552 		  }
553 		  ltemp = time_freq + pps_freq;
554 #else
555 		  ltemp = time_freq;
556 #endif /* PPS_SYNC */
557 		  if (ltemp < 0)
558 		    time_adj -= -ltemp >>
559 		      (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
560 		  else
561 		    time_adj += ltemp >>
562 		      (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
563 
564 		  /*
565 		   * When the CPU clock oscillator frequency is not a
566 		   * power of two in Hz, the SHIFT_HZ is only an
567 		   * approximate scale factor. In the SunOS kernel, this
568 		   * results in a PLL gain factor of 1/1.28 = 0.78 what it
569 		   * should be. In the following code the overall gain is
570 		   * increased by a factor of 1.25, which results in a
571 		   * residual error less than 3 percent.
572 		   */
573 		  /* Same thing applies for FreeBSD --GAW */
574 		  if (hz == 100) {
575 		    if (time_adj < 0)
576 		      time_adj -= -time_adj >> 2;
577 		    else
578 		      time_adj += time_adj >> 2;
579 		  }
580 
581 		  /* XXX - this is really bogus, but can't be fixed until
582 		     xntpd's idea of the system clock is fixed to know how
583 		     the user wants leap seconds handled; in the mean time,
584 		     we assume that users of NTP are running without proper
585 		     leap second support (this is now the default anyway) */
586 		  /*
587 		   * Leap second processing. If in leap-insert state at
588 		   * the end of the day, the system clock is set back one
589 		   * second; if in leap-delete state, the system clock is
590 		   * set ahead one second. The microtime() routine or
591 		   * external clock driver will insure that reported time
592 		   * is always monotonic. The ugly divides should be
593 		   * replaced.
594 		   */
595 		  switch (time_state) {
596 
597 		  case TIME_OK:
598 		    if (time_status & STA_INS)
599 		      time_state = TIME_INS;
600 		    else if (time_status & STA_DEL)
601 		      time_state = TIME_DEL;
602 		    break;
603 
604 		  case TIME_INS:
605 		    if (newtime.tv_sec % 86400 == 0) {
606 		      newtime.tv_sec--;
607 		      time_state = TIME_OOP;
608 		    }
609 		    break;
610 
611 		  case TIME_DEL:
612 		    if ((newtime.tv_sec + 1) % 86400 == 0) {
613 		      newtime.tv_sec++;
614 		      time_state = TIME_WAIT;
615 		    }
616 		    break;
617 
618 		  case TIME_OOP:
619 		    time_state = TIME_WAIT;
620 		    break;
621 
622 		  case TIME_WAIT:
623 		    if (!(time_status & (STA_INS | STA_DEL)))
624 		      time_state = TIME_OK;
625 		  }
626 		}
627 		CPU_CLOCKUPDATE(&time, &newtime);
628 	}
629 
630 	/*
631 	 * Process callouts at a very low cpu priority, so we don't keep the
632 	 * relatively high clock interrupt priority any longer than necessary.
633 	 */
634 	if (needsoft) {
635 		if (CLKF_BASEPRI(frame)) {
636 			/*
637 			 * Save the overhead of a software interrupt;
638 			 * it will happen as soon as we return, so do it now.
639 			 */
640 			(void)splsoftclock();
641 			softclock();
642 		} else
643 			setsoftclock();
644 	}
645 }
646 
647 /*
648  * Software (low priority) clock interrupt.
649  * Run periodic events from timeout queue.
650  */
651 /*ARGSUSED*/
652 void
653 softclock()
654 {
655 	register struct callout *c;
656 	register void *arg;
657 	register void (*func) __P((void *));
658 	register int s;
659 
660 	s = splhigh();
661 	while ((c = calltodo.c_next) != NULL && c->c_time <= 0) {
662 		func = c->c_func;
663 		arg = c->c_arg;
664 		calltodo.c_next = c->c_next;
665 		c->c_next = callfree;
666 		callfree = c;
667 		splx(s);
668 		(*func)(arg);
669 		(void) splhigh();
670 	}
671 	splx(s);
672 }
673 
674 /*
675  * timeout --
676  *	Execute a function after a specified length of time.
677  *
678  * untimeout --
679  *	Cancel previous timeout function call.
680  *
681  *	See AT&T BCI Driver Reference Manual for specification.  This
682  *	implementation differs from that one in that no identification
683  *	value is returned from timeout, rather, the original arguments
684  *	to timeout are used to identify entries for untimeout.
685  */
686 void
687 timeout(ftn, arg, ticks)
688 	timeout_t ftn;
689 	void *arg;
690 	register int ticks;
691 {
692 	register struct callout *new, *p, *t;
693 	register int s;
694 
695 	if (ticks <= 0)
696 		ticks = 1;
697 
698 	/* Lock out the clock. */
699 	s = splhigh();
700 
701 	/* Fill in the next free callout structure. */
702 	if (callfree == NULL)
703 		panic("timeout table full");
704 	new = callfree;
705 	callfree = new->c_next;
706 	new->c_arg = arg;
707 	new->c_func = ftn;
708 
709 	/*
710 	 * The time for each event is stored as a difference from the time
711 	 * of the previous event on the queue.  Walk the queue, correcting
712 	 * the ticks argument for queue entries passed.  Correct the ticks
713 	 * value for the queue entry immediately after the insertion point
714 	 * as well.  Watch out for negative c_time values; these represent
715 	 * overdue events.
716 	 */
717 	for (p = &calltodo;
718 	    (t = p->c_next) != NULL && ticks > t->c_time; p = t)
719 		if (t->c_time > 0)
720 			ticks -= t->c_time;
721 	new->c_time = ticks;
722 	if (t != NULL)
723 		t->c_time -= ticks;
724 
725 	/* Insert the new entry into the queue. */
726 	p->c_next = new;
727 	new->c_next = t;
728 	splx(s);
729 }
730 
731 void
732 untimeout(ftn, arg)
733 	timeout_t ftn;
734 	void *arg;
735 {
736 	register struct callout *p, *t;
737 	register int s;
738 
739 	s = splhigh();
740 	for (p = &calltodo; (t = p->c_next) != NULL; p = t)
741 		if (t->c_func == ftn && t->c_arg == arg) {
742 			/* Increment next entry's tick count. */
743 			if (t->c_next && t->c_time > 0)
744 				t->c_next->c_time += t->c_time;
745 
746 			/* Move entry from callout queue to callfree queue. */
747 			p->c_next = t->c_next;
748 			t->c_next = callfree;
749 			callfree = t;
750 			break;
751 		}
752 	splx(s);
753 }
754 
755 /*
756  * Compute number of hz until specified time.  Used to
757  * compute third argument to timeout() from an absolute time.
758  */
759 int
760 hzto(tv)
761 	struct timeval *tv;
762 {
763 	register unsigned long ticks;
764 	register long sec, usec;
765 	int s;
766 
767 	/*
768 	 * If the number of usecs in the whole seconds part of the time
769 	 * difference fits in a long, then the total number of usecs will
770 	 * fit in an unsigned long.  Compute the total and convert it to
771 	 * ticks, rounding up and adding 1 to allow for the current tick
772 	 * to expire.  Rounding also depends on unsigned long arithmetic
773 	 * to avoid overflow.
774 	 *
775 	 * Otherwise, if the number of ticks in the whole seconds part of
776 	 * the time difference fits in a long, then convert the parts to
777 	 * ticks separately and add, using similar rounding methods and
778 	 * overflow avoidance.  This method would work in the previous
779 	 * case but it is slightly slower and assumes that hz is integral.
780 	 *
781 	 * Otherwise, round the time difference down to the maximum
782 	 * representable value.
783 	 *
784 	 * If ints have 32 bits, then the maximum value for any timeout in
785 	 * 10ms ticks is 248 days.
786 	 */
787 	s = splclock();
788 	sec = tv->tv_sec - time.tv_sec;
789 	usec = tv->tv_usec - time.tv_usec;
790 	splx(s);
791 	if (usec < 0) {
792 		sec--;
793 		usec += 1000000;
794 	}
795 	if (sec < 0) {
796 #ifdef DIAGNOSTIC
797 		printf("hzto: negative time difference %ld sec %ld usec\n",
798 		       sec, usec);
799 #endif
800 		ticks = 1;
801 	} else if (sec <= LONG_MAX / 1000000)
802 		ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
803 			/ tick + 1;
804 	else if (sec <= LONG_MAX / hz)
805 		ticks = sec * hz
806 			+ ((unsigned long)usec + (tick - 1)) / tick + 1;
807 	else
808 		ticks = LONG_MAX;
809 	if (ticks > INT_MAX)
810 		ticks = INT_MAX;
811 	return (ticks);
812 }
813 
814 /*
815  * Start profiling on a process.
816  *
817  * Kernel profiling passes proc0 which never exits and hence
818  * keeps the profile clock running constantly.
819  */
820 void
821 startprofclock(p)
822 	register struct proc *p;
823 {
824 	int s;
825 
826 	if ((p->p_flag & P_PROFIL) == 0) {
827 		p->p_flag |= P_PROFIL;
828 		if (++profprocs == 1 && stathz != 0) {
829 			s = splstatclock();
830 			psdiv = pscnt = psratio;
831 			setstatclockrate(profhz);
832 			splx(s);
833 		}
834 	}
835 }
836 
837 /*
838  * Stop profiling on a process.
839  */
840 void
841 stopprofclock(p)
842 	register struct proc *p;
843 {
844 	int s;
845 
846 	if (p->p_flag & P_PROFIL) {
847 		p->p_flag &= ~P_PROFIL;
848 		if (--profprocs == 0 && stathz != 0) {
849 			s = splstatclock();
850 			psdiv = pscnt = 1;
851 			setstatclockrate(stathz);
852 			splx(s);
853 		}
854 	}
855 }
856 
857 /*
858  * Statistics clock.  Grab profile sample, and if divider reaches 0,
859  * do process and kernel statistics.
860  */
861 void
862 statclock(frame)
863 	register struct clockframe *frame;
864 {
865 #ifdef GPROF
866 	register struct gmonparam *g;
867 #endif
868 	register struct proc *p;
869 	register int i;
870 	struct pstats *pstats;
871 	long rss;
872 	struct rusage *ru;
873 	struct vmspace *vm;
874 
875 	if (CLKF_USERMODE(frame)) {
876 		p = curproc;
877 		if (p->p_flag & P_PROFIL)
878 			addupc_intr(p, CLKF_PC(frame), 1);
879 		if (--pscnt > 0)
880 			return;
881 		/*
882 		 * Came from user mode; CPU was in user state.
883 		 * If this process is being profiled record the tick.
884 		 */
885 		p->p_uticks++;
886 		if (p->p_nice > NZERO)
887 			cp_time[CP_NICE]++;
888 		else
889 			cp_time[CP_USER]++;
890 	} else {
891 #ifdef GPROF
892 		/*
893 		 * Kernel statistics are just like addupc_intr, only easier.
894 		 */
895 		g = &_gmonparam;
896 		if (g->state == GMON_PROF_ON) {
897 			i = CLKF_PC(frame) - g->lowpc;
898 			if (i < g->textsize) {
899 				i /= HISTFRACTION * sizeof(*g->kcount);
900 				g->kcount[i]++;
901 			}
902 		}
903 #endif
904 		if (--pscnt > 0)
905 			return;
906 		/*
907 		 * Came from kernel mode, so we were:
908 		 * - handling an interrupt,
909 		 * - doing syscall or trap work on behalf of the current
910 		 *   user process, or
911 		 * - spinning in the idle loop.
912 		 * Whichever it is, charge the time as appropriate.
913 		 * Note that we charge interrupts to the current process,
914 		 * regardless of whether they are ``for'' that process,
915 		 * so that we know how much of its real time was spent
916 		 * in ``non-process'' (i.e., interrupt) work.
917 		 */
918 		p = curproc;
919 		if (CLKF_INTR(frame)) {
920 			if (p != NULL)
921 				p->p_iticks++;
922 			cp_time[CP_INTR]++;
923 		} else if (p != NULL) {
924 			p->p_sticks++;
925 			cp_time[CP_SYS]++;
926 		} else
927 			cp_time[CP_IDLE]++;
928 	}
929 	pscnt = psdiv;
930 
931 	/*
932 	 * We maintain statistics shown by user-level statistics
933 	 * programs:  the amount of time in each cpu state, and
934 	 * the amount of time each of DK_NDRIVE ``drives'' is busy.
935 	 *
936 	 * XXX	should either run linked list of drives, or (better)
937 	 *	grab timestamps in the start & done code.
938 	 */
939 	for (i = 0; i < DK_NDRIVE; i++)
940 		if (dk_busy & (1 << i))
941 			dk_time[i]++;
942 
943 	/*
944 	 * We adjust the priority of the current process.  The priority of
945 	 * a process gets worse as it accumulates CPU time.  The cpu usage
946 	 * estimator (p_estcpu) is increased here.  The formula for computing
947 	 * priorities (in kern_synch.c) will compute a different value each
948 	 * time p_estcpu increases by 4.  The cpu usage estimator ramps up
949 	 * quite quickly when the process is running (linearly), and decays
950 	 * away exponentially, at a rate which is proportionally slower when
951 	 * the system is busy.  The basic principal is that the system will
952 	 * 90% forget that the process used a lot of CPU time in 5 * loadav
953 	 * seconds.  This causes the system to favor processes which haven't
954 	 * run much recently, and to round-robin among other processes.
955 	 */
956 	if (p != NULL) {
957 		p->p_cpticks++;
958 		if (++p->p_estcpu == 0)
959 			p->p_estcpu--;
960 		if ((p->p_estcpu & 3) == 0) {
961 			resetpriority(p);
962 			if (p->p_priority >= PUSER)
963 				p->p_priority = p->p_usrpri;
964 		}
965 
966 		/* Update resource usage integrals and maximums. */
967 		if ((pstats = p->p_stats) != NULL &&
968 		    (ru = &pstats->p_ru) != NULL &&
969 		    (vm = p->p_vmspace) != NULL) {
970 			ru->ru_ixrss += vm->vm_tsize * PAGE_SIZE / 1024;
971 			ru->ru_idrss += vm->vm_dsize * PAGE_SIZE / 1024;
972 			ru->ru_isrss += vm->vm_ssize * PAGE_SIZE / 1024;
973 			rss = vm->vm_pmap.pm_stats.resident_count *
974 			      PAGE_SIZE / 1024;
975 			if (ru->ru_maxrss < rss)
976 				ru->ru_maxrss = rss;
977         	}
978 	}
979 }
980 
981 /*
982  * Return information about system clocks.
983  */
984 static int
985 sysctl_kern_clockrate SYSCTL_HANDLER_ARGS
986 {
987 	struct clockinfo clkinfo;
988 	/*
989 	 * Construct clockinfo structure.
990 	 */
991 	clkinfo.hz = hz;
992 	clkinfo.tick = tick;
993 	clkinfo.profhz = profhz;
994 	clkinfo.stathz = stathz ? stathz : hz;
995 	return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req));
996 }
997 
998 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD,
999 	0, 0, sysctl_kern_clockrate, "S,clockinfo","");
1000 
1001 /*#ifdef PPS_SYNC*/
1002 #if 0
1003 /* This code is completely bogus; if anybody ever wants to use it, get
1004  * the current version from Dave Mills. */
1005 
1006 /*
1007  * hardpps() - discipline CPU clock oscillator to external pps signal
1008  *
1009  * This routine is called at each PPS interrupt in order to discipline
1010  * the CPU clock oscillator to the PPS signal. It integrates successive
1011  * phase differences between the two oscillators and calculates the
1012  * frequency offset. This is used in hardclock() to discipline the CPU
1013  * clock oscillator so that intrinsic frequency error is cancelled out.
1014  * The code requires the caller to capture the time and hardware
1015  * counter value at the designated PPS signal transition.
1016  */
1017 void
1018 hardpps(tvp, usec)
1019 	struct timeval *tvp;		/* time at PPS */
1020 	long usec;			/* hardware counter at PPS */
1021 {
1022 	long u_usec, v_usec, bigtick;
1023 	long cal_sec, cal_usec;
1024 
1025 	/*
1026 	 * During the calibration interval adjust the starting time when
1027 	 * the tick overflows. At the end of the interval compute the
1028 	 * duration of the interval and the difference of the hardware
1029 	 * counters at the beginning and end of the interval. This code
1030 	 * is deliciously complicated by the fact valid differences may
1031 	 * exceed the value of tick when using long calibration
1032 	 * intervals and small ticks. Note that the counter can be
1033 	 * greater than tick if caught at just the wrong instant, but
1034 	 * the values returned and used here are correct.
1035 	 */
1036 	bigtick = (long)tick << SHIFT_USEC;
1037 	pps_usec -= ntp_pll.ybar;
1038 	if (pps_usec >= bigtick)
1039 		pps_usec -= bigtick;
1040 	if (pps_usec < 0)
1041 		pps_usec += bigtick;
1042 	pps_time.tv_sec++;
1043 	pps_count++;
1044 	if (pps_count < (1 << pps_shift))
1045 		return;
1046 	pps_count = 0;
1047 	ntp_pll.calcnt++;
1048 	u_usec = usec << SHIFT_USEC;
1049 	v_usec = pps_usec - u_usec;
1050 	if (v_usec >= bigtick >> 1)
1051 		v_usec -= bigtick;
1052 	if (v_usec < -(bigtick >> 1))
1053 		v_usec += bigtick;
1054 	if (v_usec < 0)
1055 		v_usec = -(-v_usec >> ntp_pll.shift);
1056 	else
1057 		v_usec = v_usec >> ntp_pll.shift;
1058 	pps_usec = u_usec;
1059 	cal_sec = tvp->tv_sec;
1060 	cal_usec = tvp->tv_usec;
1061 	cal_sec -= pps_time.tv_sec;
1062 	cal_usec -= pps_time.tv_usec;
1063 	if (cal_usec < 0) {
1064 		cal_usec += 1000000;
1065 		cal_sec--;
1066 	}
1067 	pps_time = *tvp;
1068 
1069 	/*
1070 	 * Check for lost interrupts, noise, excessive jitter and
1071 	 * excessive frequency error. The number of timer ticks during
1072 	 * the interval may vary +-1 tick. Add to this a margin of one
1073 	 * tick for the PPS signal jitter and maximum frequency
1074 	 * deviation. If the limits are exceeded, the calibration
1075 	 * interval is reset to the minimum and we start over.
1076 	 */
1077 	u_usec = (long)tick << 1;
1078 	if (!((cal_sec == -1 && cal_usec > (1000000 - u_usec))
1079 	    || (cal_sec == 0 && cal_usec < u_usec))
1080 	    || v_usec > ntp_pll.tolerance || v_usec < -ntp_pll.tolerance) {
1081 		ntp_pll.jitcnt++;
1082 		ntp_pll.shift = NTP_PLL.SHIFT;
1083 		pps_dispinc = PPS_DISPINC;
1084 		ntp_pll.intcnt = 0;
1085 		return;
1086 	}
1087 
1088 	/*
1089 	 * A three-stage median filter is used to help deglitch the pps
1090 	 * signal. The median sample becomes the offset estimate; the
1091 	 * difference between the other two samples becomes the
1092 	 * dispersion estimate.
1093 	 */
1094 	pps_mf[2] = pps_mf[1];
1095 	pps_mf[1] = pps_mf[0];
1096 	pps_mf[0] = v_usec;
1097 	if (pps_mf[0] > pps_mf[1]) {
1098 		if (pps_mf[1] > pps_mf[2]) {
1099 			u_usec = pps_mf[1];		/* 0 1 2 */
1100 			v_usec = pps_mf[0] - pps_mf[2];
1101 		} else if (pps_mf[2] > pps_mf[0]) {
1102 			u_usec = pps_mf[0];		/* 2 0 1 */
1103 			v_usec = pps_mf[2] - pps_mf[1];
1104 		} else {
1105 			u_usec = pps_mf[2];		/* 0 2 1 */
1106 			v_usec = pps_mf[0] - pps_mf[1];
1107 		}
1108 	} else {
1109 		if (pps_mf[1] < pps_mf[2]) {
1110 			u_usec = pps_mf[1];		/* 2 1 0 */
1111 			v_usec = pps_mf[2] - pps_mf[0];
1112 		} else  if (pps_mf[2] < pps_mf[0]) {
1113 			u_usec = pps_mf[0];		/* 1 0 2 */
1114 			v_usec = pps_mf[1] - pps_mf[2];
1115 		} else {
1116 			u_usec = pps_mf[2];		/* 1 2 0 */
1117 			v_usec = pps_mf[1] - pps_mf[0];
1118 		}
1119 	}
1120 
1121 	/*
1122 	 * Here the dispersion average is updated. If it is less than
1123 	 * the threshold pps_dispmax, the frequency average is updated
1124 	 * as well, but clamped to the tolerance.
1125 	 */
1126 	v_usec = (v_usec >> 1) - ntp_pll.disp;
1127 	if (v_usec < 0)
1128 		ntp_pll.disp -= -v_usec >> PPS_AVG;
1129 	else
1130 		ntp_pll.disp += v_usec >> PPS_AVG;
1131 	if (ntp_pll.disp > pps_dispmax) {
1132 		ntp_pll.discnt++;
1133 		return;
1134 	}
1135 	if (u_usec < 0) {
1136 		ntp_pll.ybar -= -u_usec >> PPS_AVG;
1137 		if (ntp_pll.ybar < -ntp_pll.tolerance)
1138 			ntp_pll.ybar = -ntp_pll.tolerance;
1139 		u_usec = -u_usec;
1140 	} else {
1141 		ntp_pll.ybar += u_usec >> PPS_AVG;
1142 		if (ntp_pll.ybar > ntp_pll.tolerance)
1143 			ntp_pll.ybar = ntp_pll.tolerance;
1144 	}
1145 
1146 	/*
1147 	 * Here the calibration interval is adjusted. If the maximum
1148 	 * time difference is greater than tick/4, reduce the interval
1149 	 * by half. If this is not the case for four consecutive
1150 	 * intervals, double the interval.
1151 	 */
1152 	if (u_usec << ntp_pll.shift > bigtick >> 2) {
1153 		ntp_pll.intcnt = 0;
1154 		if (ntp_pll.shift > NTP_PLL.SHIFT) {
1155 			ntp_pll.shift--;
1156 			pps_dispinc <<= 1;
1157 		}
1158 	} else if (ntp_pll.intcnt >= 4) {
1159 		ntp_pll.intcnt = 0;
1160 		if (ntp_pll.shift < NTP_PLL.SHIFTMAX) {
1161 			ntp_pll.shift++;
1162 			pps_dispinc >>= 1;
1163 		}
1164 	} else
1165 		ntp_pll.intcnt++;
1166 }
1167 #endif /* PPS_SYNC */
1168