xref: /freebsd/sys/kern/kern_ntptime.c (revision e8d8bef961a50d4dc22501cde4fb9fb0be1b2532)
1 /*-
2  ***********************************************************************
3  *								       *
4  * Copyright (c) David L. Mills 1993-2001			       *
5  *								       *
6  * Permission to use, copy, modify, and distribute this software and   *
7  * its documentation for any purpose and without fee is hereby	       *
8  * granted, provided that the above copyright notice appears in all    *
9  * copies and that both the copyright notice and this permission       *
10  * notice appear in supporting documentation, and that the name	       *
11  * University of Delaware not be used in advertising or publicity      *
12  * pertaining to distribution of the software without specific,	       *
13  * written prior permission. The University of Delaware makes no       *
14  * representations about the suitability this software for any	       *
15  * purpose. It is provided "as is" without express or implied	       *
16  * warranty.							       *
17  *								       *
18  **********************************************************************/
19 
20 /*
21  * Adapted from the original sources for FreeBSD and timecounters by:
22  * Poul-Henning Kamp <phk@FreeBSD.org>.
23  *
24  * The 32bit version of the "LP" macros seems a bit past its "sell by"
25  * date so I have retained only the 64bit version and included it directly
26  * in this file.
27  *
28  * Only minor changes done to interface with the timecounters over in
29  * sys/kern/kern_clock.c.   Some of the comments below may be (even more)
30  * confusing and/or plain wrong in that context.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "opt_ntp.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysproto.h>
41 #include <sys/eventhandler.h>
42 #include <sys/kernel.h>
43 #include <sys/priv.h>
44 #include <sys/proc.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/time.h>
48 #include <sys/timex.h>
49 #include <sys/timetc.h>
50 #include <sys/timepps.h>
51 #include <sys/syscallsubr.h>
52 #include <sys/sysctl.h>
53 
54 #ifdef PPS_SYNC
55 FEATURE(pps_sync, "Support usage of external PPS signal by kernel PLL");
56 #endif
57 
58 /*
59  * Single-precision macros for 64-bit machines
60  */
61 typedef int64_t l_fp;
62 #define L_ADD(v, u)	((v) += (u))
63 #define L_SUB(v, u)	((v) -= (u))
64 #define L_ADDHI(v, a)	((v) += (int64_t)(a) << 32)
65 #define L_NEG(v)	((v) = -(v))
66 #define L_RSHIFT(v, n) \
67 	do { \
68 		if ((v) < 0) \
69 			(v) = -(-(v) >> (n)); \
70 		else \
71 			(v) = (v) >> (n); \
72 	} while (0)
73 #define L_MPY(v, a)	((v) *= (a))
74 #define L_CLR(v)	((v) = 0)
75 #define L_ISNEG(v)	((v) < 0)
76 #define L_LINT(v, a)	((v) = (int64_t)(a) << 32)
77 #define L_GINT(v)	((v) < 0 ? -(-(v) >> 32) : (v) >> 32)
78 
79 /*
80  * Generic NTP kernel interface
81  *
82  * These routines constitute the Network Time Protocol (NTP) interfaces
83  * for user and daemon application programs. The ntp_gettime() routine
84  * provides the time, maximum error (synch distance) and estimated error
85  * (dispersion) to client user application programs. The ntp_adjtime()
86  * routine is used by the NTP daemon to adjust the system clock to an
87  * externally derived time. The time offset and related variables set by
88  * this routine are used by other routines in this module to adjust the
89  * phase and frequency of the clock discipline loop which controls the
90  * system clock.
91  *
92  * When the kernel time is reckoned directly in nanoseconds (NTP_NANO
93  * defined), the time at each tick interrupt is derived directly from
94  * the kernel time variable. When the kernel time is reckoned in
95  * microseconds, (NTP_NANO undefined), the time is derived from the
96  * kernel time variable together with a variable representing the
97  * leftover nanoseconds at the last tick interrupt. In either case, the
98  * current nanosecond time is reckoned from these values plus an
99  * interpolated value derived by the clock routines in another
100  * architecture-specific module. The interpolation can use either a
101  * dedicated counter or a processor cycle counter (PCC) implemented in
102  * some architectures.
103  *
104  * Note that all routines must run at priority splclock or higher.
105  */
106 /*
107  * Phase/frequency-lock loop (PLL/FLL) definitions
108  *
109  * The nanosecond clock discipline uses two variable types, time
110  * variables and frequency variables. Both types are represented as 64-
111  * bit fixed-point quantities with the decimal point between two 32-bit
112  * halves. On a 32-bit machine, each half is represented as a single
113  * word and mathematical operations are done using multiple-precision
114  * arithmetic. On a 64-bit machine, ordinary computer arithmetic is
115  * used.
116  *
117  * A time variable is a signed 64-bit fixed-point number in ns and
118  * fraction. It represents the remaining time offset to be amortized
119  * over succeeding tick interrupts. The maximum time offset is about
120  * 0.5 s and the resolution is about 2.3e-10 ns.
121  *
122  *			1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
123  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
124  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
125  * |s s s|			 ns				   |
126  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
127  * |			    fraction				   |
128  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
129  *
130  * A frequency variable is a signed 64-bit fixed-point number in ns/s
131  * and fraction. It represents the ns and fraction to be added to the
132  * kernel time variable at each second. The maximum frequency offset is
133  * about +-500000 ns/s and the resolution is about 2.3e-10 ns/s.
134  *
135  *			1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
136  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
137  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
138  * |s s s s s s s s s s s s s|	          ns/s			   |
139  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
140  * |			    fraction				   |
141  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
142  */
143 /*
144  * The following variables establish the state of the PLL/FLL and the
145  * residual time and frequency offset of the local clock.
146  */
147 #define SHIFT_PLL	4		/* PLL loop gain (shift) */
148 #define SHIFT_FLL	2		/* FLL loop gain (shift) */
149 
150 static int time_state = TIME_OK;	/* clock state */
151 int time_status = STA_UNSYNC;	/* clock status bits */
152 static long time_tai;			/* TAI offset (s) */
153 static long time_monitor;		/* last time offset scaled (ns) */
154 static long time_constant;		/* poll interval (shift) (s) */
155 static long time_precision = 1;		/* clock precision (ns) */
156 static long time_maxerror = MAXPHASE / 1000; /* maximum error (us) */
157 long time_esterror = MAXPHASE / 1000; /* estimated error (us) */
158 static long time_reftime;		/* uptime at last adjustment (s) */
159 static l_fp time_offset;		/* time offset (ns) */
160 static l_fp time_freq;			/* frequency offset (ns/s) */
161 static l_fp time_adj;			/* tick adjust (ns/s) */
162 
163 static int64_t time_adjtime;		/* correction from adjtime(2) (usec) */
164 
165 static struct mtx ntp_lock;
166 MTX_SYSINIT(ntp, &ntp_lock, "ntp", MTX_SPIN);
167 
168 #define	NTP_LOCK()		mtx_lock_spin(&ntp_lock)
169 #define	NTP_UNLOCK()		mtx_unlock_spin(&ntp_lock)
170 #define	NTP_ASSERT_LOCKED()	mtx_assert(&ntp_lock, MA_OWNED)
171 
172 #ifdef PPS_SYNC
173 /*
174  * The following variables are used when a pulse-per-second (PPS) signal
175  * is available and connected via a modem control lead. They establish
176  * the engineering parameters of the clock discipline loop when
177  * controlled by the PPS signal.
178  */
179 #define PPS_FAVG	2		/* min freq avg interval (s) (shift) */
180 #define PPS_FAVGDEF	8		/* default freq avg int (s) (shift) */
181 #define PPS_FAVGMAX	15		/* max freq avg interval (s) (shift) */
182 #define PPS_PAVG	4		/* phase avg interval (s) (shift) */
183 #define PPS_VALID	120		/* PPS signal watchdog max (s) */
184 #define PPS_MAXWANDER	100000		/* max PPS wander (ns/s) */
185 #define PPS_POPCORN	2		/* popcorn spike threshold (shift) */
186 
187 static struct timespec pps_tf[3];	/* phase median filter */
188 static l_fp pps_freq;			/* scaled frequency offset (ns/s) */
189 static long pps_fcount;			/* frequency accumulator */
190 static long pps_jitter;			/* nominal jitter (ns) */
191 static long pps_stabil;			/* nominal stability (scaled ns/s) */
192 static long pps_lastsec;		/* time at last calibration (s) */
193 static int pps_valid;			/* signal watchdog counter */
194 static int pps_shift = PPS_FAVG;	/* interval duration (s) (shift) */
195 static int pps_shiftmax = PPS_FAVGDEF;	/* max interval duration (s) (shift) */
196 static int pps_intcnt;			/* wander counter */
197 
198 /*
199  * PPS signal quality monitors
200  */
201 static long pps_calcnt;			/* calibration intervals */
202 static long pps_jitcnt;			/* jitter limit exceeded */
203 static long pps_stbcnt;			/* stability limit exceeded */
204 static long pps_errcnt;			/* calibration errors */
205 #endif /* PPS_SYNC */
206 /*
207  * End of phase/frequency-lock loop (PLL/FLL) definitions
208  */
209 
210 static void ntp_init(void);
211 static void hardupdate(long offset);
212 static void ntp_gettime1(struct ntptimeval *ntvp);
213 static bool ntp_is_time_error(int tsl);
214 
215 static bool
216 ntp_is_time_error(int tsl)
217 {
218 
219 	/*
220 	 * Status word error decode. If any of these conditions occur,
221 	 * an error is returned, instead of the status word. Most
222 	 * applications will care only about the fact the system clock
223 	 * may not be trusted, not about the details.
224 	 *
225 	 * Hardware or software error
226 	 */
227 	if ((tsl & (STA_UNSYNC | STA_CLOCKERR)) ||
228 
229 	/*
230 	 * PPS signal lost when either time or frequency synchronization
231 	 * requested
232 	 */
233 	    (tsl & (STA_PPSFREQ | STA_PPSTIME) &&
234 	    !(tsl & STA_PPSSIGNAL)) ||
235 
236 	/*
237 	 * PPS jitter exceeded when time synchronization requested
238 	 */
239 	    (tsl & STA_PPSTIME && tsl & STA_PPSJITTER) ||
240 
241 	/*
242 	 * PPS wander exceeded or calibration error when frequency
243 	 * synchronization requested
244 	 */
245 	    (tsl & STA_PPSFREQ &&
246 	    tsl & (STA_PPSWANDER | STA_PPSERROR)))
247 		return (true);
248 
249 	return (false);
250 }
251 
252 static void
253 ntp_gettime1(struct ntptimeval *ntvp)
254 {
255 	struct timespec atv;	/* nanosecond time */
256 
257 	NTP_ASSERT_LOCKED();
258 
259 	nanotime(&atv);
260 	ntvp->time.tv_sec = atv.tv_sec;
261 	ntvp->time.tv_nsec = atv.tv_nsec;
262 	ntvp->maxerror = time_maxerror;
263 	ntvp->esterror = time_esterror;
264 	ntvp->tai = time_tai;
265 	ntvp->time_state = time_state;
266 
267 	if (ntp_is_time_error(time_status))
268 		ntvp->time_state = TIME_ERROR;
269 }
270 
271 /*
272  * ntp_gettime() - NTP user application interface
273  *
274  * See the timex.h header file for synopsis and API description.  Note that
275  * the TAI offset is returned in the ntvtimeval.tai structure member.
276  */
277 #ifndef _SYS_SYSPROTO_H_
278 struct ntp_gettime_args {
279 	struct ntptimeval *ntvp;
280 };
281 #endif
282 /* ARGSUSED */
283 int
284 sys_ntp_gettime(struct thread *td, struct ntp_gettime_args *uap)
285 {
286 	struct ntptimeval ntv;
287 
288 	memset(&ntv, 0, sizeof(ntv));
289 
290 	NTP_LOCK();
291 	ntp_gettime1(&ntv);
292 	NTP_UNLOCK();
293 
294 	td->td_retval[0] = ntv.time_state;
295 	return (copyout(&ntv, uap->ntvp, sizeof(ntv)));
296 }
297 
298 static int
299 ntp_sysctl(SYSCTL_HANDLER_ARGS)
300 {
301 	struct ntptimeval ntv;	/* temporary structure */
302 
303 	memset(&ntv, 0, sizeof(ntv));
304 
305 	NTP_LOCK();
306 	ntp_gettime1(&ntv);
307 	NTP_UNLOCK();
308 
309 	return (sysctl_handle_opaque(oidp, &ntv, sizeof(ntv), req));
310 }
311 
312 SYSCTL_NODE(_kern, OID_AUTO, ntp_pll, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
313     "");
314 SYSCTL_PROC(_kern_ntp_pll, OID_AUTO, gettime, CTLTYPE_OPAQUE | CTLFLAG_RD |
315     CTLFLAG_MPSAFE, 0, sizeof(struct ntptimeval) , ntp_sysctl, "S,ntptimeval",
316     "");
317 
318 #ifdef PPS_SYNC
319 SYSCTL_INT(_kern_ntp_pll, OID_AUTO, pps_shiftmax, CTLFLAG_RW,
320     &pps_shiftmax, 0, "Max interval duration (sec) (shift)");
321 SYSCTL_INT(_kern_ntp_pll, OID_AUTO, pps_shift, CTLFLAG_RW,
322     &pps_shift, 0, "Interval duration (sec) (shift)");
323 SYSCTL_LONG(_kern_ntp_pll, OID_AUTO, time_monitor, CTLFLAG_RD,
324     &time_monitor, 0, "Last time offset scaled (ns)");
325 
326 SYSCTL_S64(_kern_ntp_pll, OID_AUTO, pps_freq, CTLFLAG_RD | CTLFLAG_MPSAFE,
327     &pps_freq, 0,
328     "Scaled frequency offset (ns/sec)");
329 SYSCTL_S64(_kern_ntp_pll, OID_AUTO, time_freq, CTLFLAG_RD | CTLFLAG_MPSAFE,
330     &time_freq, 0,
331     "Frequency offset (ns/sec)");
332 #endif
333 
334 /*
335  * ntp_adjtime() - NTP daemon application interface
336  *
337  * See the timex.h header file for synopsis and API description.  Note that
338  * the timex.constant structure member has a dual purpose to set the time
339  * constant and to set the TAI offset.
340  */
341 int
342 kern_ntp_adjtime(struct thread *td, struct timex *ntv, int *retvalp)
343 {
344 	long freq;		/* frequency ns/s) */
345 	int modes;		/* mode bits from structure */
346 	int error, retval;
347 
348 	/*
349 	 * Update selected clock variables - only the superuser can
350 	 * change anything. Note that there is no error checking here on
351 	 * the assumption the superuser should know what it is doing.
352 	 * Note that either the time constant or TAI offset are loaded
353 	 * from the ntv.constant member, depending on the mode bits. If
354 	 * the STA_PLL bit in the status word is cleared, the state and
355 	 * status words are reset to the initial values at boot.
356 	 */
357 	modes = ntv->modes;
358 	error = 0;
359 	if (modes)
360 		error = priv_check(td, PRIV_NTP_ADJTIME);
361 	if (error != 0)
362 		return (error);
363 	NTP_LOCK();
364 	if (modes & MOD_MAXERROR)
365 		time_maxerror = ntv->maxerror;
366 	if (modes & MOD_ESTERROR)
367 		time_esterror = ntv->esterror;
368 	if (modes & MOD_STATUS) {
369 		if (time_status & STA_PLL && !(ntv->status & STA_PLL)) {
370 			time_state = TIME_OK;
371 			time_status = STA_UNSYNC;
372 #ifdef PPS_SYNC
373 			pps_shift = PPS_FAVG;
374 #endif /* PPS_SYNC */
375 		}
376 		time_status &= STA_RONLY;
377 		time_status |= ntv->status & ~STA_RONLY;
378 	}
379 	if (modes & MOD_TIMECONST) {
380 		if (ntv->constant < 0)
381 			time_constant = 0;
382 		else if (ntv->constant > MAXTC)
383 			time_constant = MAXTC;
384 		else
385 			time_constant = ntv->constant;
386 	}
387 	if (modes & MOD_TAI) {
388 		if (ntv->constant > 0) /* XXX zero & negative numbers ? */
389 			time_tai = ntv->constant;
390 	}
391 #ifdef PPS_SYNC
392 	if (modes & MOD_PPSMAX) {
393 		if (ntv->shift < PPS_FAVG)
394 			pps_shiftmax = PPS_FAVG;
395 		else if (ntv->shift > PPS_FAVGMAX)
396 			pps_shiftmax = PPS_FAVGMAX;
397 		else
398 			pps_shiftmax = ntv->shift;
399 	}
400 #endif /* PPS_SYNC */
401 	if (modes & MOD_NANO)
402 		time_status |= STA_NANO;
403 	if (modes & MOD_MICRO)
404 		time_status &= ~STA_NANO;
405 	if (modes & MOD_CLKB)
406 		time_status |= STA_CLK;
407 	if (modes & MOD_CLKA)
408 		time_status &= ~STA_CLK;
409 	if (modes & MOD_FREQUENCY) {
410 		freq = (ntv->freq * 1000LL) >> 16;
411 		if (freq > MAXFREQ)
412 			L_LINT(time_freq, MAXFREQ);
413 		else if (freq < -MAXFREQ)
414 			L_LINT(time_freq, -MAXFREQ);
415 		else {
416 			/*
417 			 * ntv->freq is [PPM * 2^16] = [us/s * 2^16]
418 			 * time_freq is [ns/s * 2^32]
419 			 */
420 			time_freq = ntv->freq * 1000LL * 65536LL;
421 		}
422 #ifdef PPS_SYNC
423 		pps_freq = time_freq;
424 #endif /* PPS_SYNC */
425 	}
426 	if (modes & MOD_OFFSET) {
427 		if (time_status & STA_NANO)
428 			hardupdate(ntv->offset);
429 		else
430 			hardupdate(ntv->offset * 1000);
431 	}
432 
433 	/*
434 	 * Retrieve all clock variables. Note that the TAI offset is
435 	 * returned only by ntp_gettime();
436 	 */
437 	if (time_status & STA_NANO)
438 		ntv->offset = L_GINT(time_offset);
439 	else
440 		ntv->offset = L_GINT(time_offset) / 1000; /* XXX rounding ? */
441 	ntv->freq = L_GINT((time_freq / 1000LL) << 16);
442 	ntv->maxerror = time_maxerror;
443 	ntv->esterror = time_esterror;
444 	ntv->status = time_status;
445 	ntv->constant = time_constant;
446 	if (time_status & STA_NANO)
447 		ntv->precision = time_precision;
448 	else
449 		ntv->precision = time_precision / 1000;
450 	ntv->tolerance = MAXFREQ * SCALE_PPM;
451 #ifdef PPS_SYNC
452 	ntv->shift = pps_shift;
453 	ntv->ppsfreq = L_GINT((pps_freq / 1000LL) << 16);
454 	if (time_status & STA_NANO)
455 		ntv->jitter = pps_jitter;
456 	else
457 		ntv->jitter = pps_jitter / 1000;
458 	ntv->stabil = pps_stabil;
459 	ntv->calcnt = pps_calcnt;
460 	ntv->errcnt = pps_errcnt;
461 	ntv->jitcnt = pps_jitcnt;
462 	ntv->stbcnt = pps_stbcnt;
463 #endif /* PPS_SYNC */
464 	retval = ntp_is_time_error(time_status) ? TIME_ERROR : time_state;
465 	NTP_UNLOCK();
466 
467 	*retvalp = retval;
468 	return (0);
469 }
470 
471 #ifndef _SYS_SYSPROTO_H_
472 struct ntp_adjtime_args {
473 	struct timex *tp;
474 };
475 #endif
476 
477 int
478 sys_ntp_adjtime(struct thread *td, struct ntp_adjtime_args *uap)
479 {
480 	struct timex ntv;
481 	int error, retval;
482 
483 	error = copyin(uap->tp, &ntv, sizeof(ntv));
484 	if (error == 0) {
485 		error = kern_ntp_adjtime(td, &ntv, &retval);
486 		if (error == 0) {
487 			error = copyout(&ntv, uap->tp, sizeof(ntv));
488 			if (error == 0)
489 				td->td_retval[0] = retval;
490 		}
491 	}
492 	return (error);
493 }
494 
495 /*
496  * second_overflow() - called after ntp_tick_adjust()
497  *
498  * This routine is ordinarily called immediately following the above
499  * routine ntp_tick_adjust(). While these two routines are normally
500  * combined, they are separated here only for the purposes of
501  * simulation.
502  */
503 void
504 ntp_update_second(int64_t *adjustment, time_t *newsec)
505 {
506 	int tickrate;
507 	l_fp ftemp;		/* 32/64-bit temporary */
508 
509 	NTP_LOCK();
510 
511 	/*
512 	 * On rollover of the second both the nanosecond and microsecond
513 	 * clocks are updated and the state machine cranked as
514 	 * necessary. The phase adjustment to be used for the next
515 	 * second is calculated and the maximum error is increased by
516 	 * the tolerance.
517 	 */
518 	time_maxerror += MAXFREQ / 1000;
519 
520 	/*
521 	 * Leap second processing. If in leap-insert state at
522 	 * the end of the day, the system clock is set back one
523 	 * second; if in leap-delete state, the system clock is
524 	 * set ahead one second. The nano_time() routine or
525 	 * external clock driver will insure that reported time
526 	 * is always monotonic.
527 	 */
528 	switch (time_state) {
529 		/*
530 		 * No warning.
531 		 */
532 		case TIME_OK:
533 		if (time_status & STA_INS)
534 			time_state = TIME_INS;
535 		else if (time_status & STA_DEL)
536 			time_state = TIME_DEL;
537 		break;
538 
539 		/*
540 		 * Insert second 23:59:60 following second
541 		 * 23:59:59.
542 		 */
543 		case TIME_INS:
544 		if (!(time_status & STA_INS))
545 			time_state = TIME_OK;
546 		else if ((*newsec) % 86400 == 0) {
547 			(*newsec)--;
548 			time_state = TIME_OOP;
549 			time_tai++;
550 		}
551 		break;
552 
553 		/*
554 		 * Delete second 23:59:59.
555 		 */
556 		case TIME_DEL:
557 		if (!(time_status & STA_DEL))
558 			time_state = TIME_OK;
559 		else if (((*newsec) + 1) % 86400 == 0) {
560 			(*newsec)++;
561 			time_tai--;
562 			time_state = TIME_WAIT;
563 		}
564 		break;
565 
566 		/*
567 		 * Insert second in progress.
568 		 */
569 		case TIME_OOP:
570 			time_state = TIME_WAIT;
571 		break;
572 
573 		/*
574 		 * Wait for status bits to clear.
575 		 */
576 		case TIME_WAIT:
577 		if (!(time_status & (STA_INS | STA_DEL)))
578 			time_state = TIME_OK;
579 	}
580 
581 	/*
582 	 * Compute the total time adjustment for the next second
583 	 * in ns. The offset is reduced by a factor depending on
584 	 * whether the PPS signal is operating. Note that the
585 	 * value is in effect scaled by the clock frequency,
586 	 * since the adjustment is added at each tick interrupt.
587 	 */
588 	ftemp = time_offset;
589 #ifdef PPS_SYNC
590 	/* XXX even if PPS signal dies we should finish adjustment ? */
591 	if (time_status & STA_PPSTIME && time_status &
592 	    STA_PPSSIGNAL)
593 		L_RSHIFT(ftemp, pps_shift);
594 	else
595 		L_RSHIFT(ftemp, SHIFT_PLL + time_constant);
596 #else
597 		L_RSHIFT(ftemp, SHIFT_PLL + time_constant);
598 #endif /* PPS_SYNC */
599 	time_adj = ftemp;
600 	L_SUB(time_offset, ftemp);
601 	L_ADD(time_adj, time_freq);
602 
603 	/*
604 	 * Apply any correction from adjtime(2).  If more than one second
605 	 * off we slew at a rate of 5ms/s (5000 PPM) else 500us/s (500 PPM)
606 	 * until the last second is slewed the final < 500 usecs.
607 	 */
608 	if (time_adjtime != 0) {
609 		if (time_adjtime > 1000000)
610 			tickrate = 5000;
611 		else if (time_adjtime < -1000000)
612 			tickrate = -5000;
613 		else if (time_adjtime > 500)
614 			tickrate = 500;
615 		else if (time_adjtime < -500)
616 			tickrate = -500;
617 		else
618 			tickrate = time_adjtime;
619 		time_adjtime -= tickrate;
620 		L_LINT(ftemp, tickrate * 1000);
621 		L_ADD(time_adj, ftemp);
622 	}
623 	*adjustment = time_adj;
624 
625 #ifdef PPS_SYNC
626 	if (pps_valid > 0)
627 		pps_valid--;
628 	else
629 		time_status &= ~STA_PPSSIGNAL;
630 #endif /* PPS_SYNC */
631 
632 	NTP_UNLOCK();
633 }
634 
635 /*
636  * ntp_init() - initialize variables and structures
637  *
638  * This routine must be called after the kernel variables hz and tick
639  * are set or changed and before the next tick interrupt. In this
640  * particular implementation, these values are assumed set elsewhere in
641  * the kernel. The design allows the clock frequency and tick interval
642  * to be changed while the system is running. So, this routine should
643  * probably be integrated with the code that does that.
644  */
645 static void
646 ntp_init(void)
647 {
648 
649 	/*
650 	 * The following variables are initialized only at startup. Only
651 	 * those structures not cleared by the compiler need to be
652 	 * initialized, and these only in the simulator. In the actual
653 	 * kernel, any nonzero values here will quickly evaporate.
654 	 */
655 	L_CLR(time_offset);
656 	L_CLR(time_freq);
657 #ifdef PPS_SYNC
658 	pps_tf[0].tv_sec = pps_tf[0].tv_nsec = 0;
659 	pps_tf[1].tv_sec = pps_tf[1].tv_nsec = 0;
660 	pps_tf[2].tv_sec = pps_tf[2].tv_nsec = 0;
661 	pps_fcount = 0;
662 	L_CLR(pps_freq);
663 #endif /* PPS_SYNC */
664 }
665 
666 SYSINIT(ntpclocks, SI_SUB_CLOCKS, SI_ORDER_MIDDLE, ntp_init, NULL);
667 
668 /*
669  * hardupdate() - local clock update
670  *
671  * This routine is called by ntp_adjtime() to update the local clock
672  * phase and frequency. The implementation is of an adaptive-parameter,
673  * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new
674  * time and frequency offset estimates for each call. If the kernel PPS
675  * discipline code is configured (PPS_SYNC), the PPS signal itself
676  * determines the new time offset, instead of the calling argument.
677  * Presumably, calls to ntp_adjtime() occur only when the caller
678  * believes the local clock is valid within some bound (+-128 ms with
679  * NTP). If the caller's time is far different than the PPS time, an
680  * argument will ensue, and it's not clear who will lose.
681  *
682  * For uncompensated quartz crystal oscillators and nominal update
683  * intervals less than 256 s, operation should be in phase-lock mode,
684  * where the loop is disciplined to phase. For update intervals greater
685  * than 1024 s, operation should be in frequency-lock mode, where the
686  * loop is disciplined to frequency. Between 256 s and 1024 s, the mode
687  * is selected by the STA_MODE status bit.
688  */
689 static void
690 hardupdate(offset)
691 	long offset;		/* clock offset (ns) */
692 {
693 	long mtemp;
694 	l_fp ftemp;
695 
696 	NTP_ASSERT_LOCKED();
697 
698 	/*
699 	 * Select how the phase is to be controlled and from which
700 	 * source. If the PPS signal is present and enabled to
701 	 * discipline the time, the PPS offset is used; otherwise, the
702 	 * argument offset is used.
703 	 */
704 	if (!(time_status & STA_PLL))
705 		return;
706 	if (!(time_status & STA_PPSTIME && time_status &
707 	    STA_PPSSIGNAL)) {
708 		if (offset > MAXPHASE)
709 			time_monitor = MAXPHASE;
710 		else if (offset < -MAXPHASE)
711 			time_monitor = -MAXPHASE;
712 		else
713 			time_monitor = offset;
714 		L_LINT(time_offset, time_monitor);
715 	}
716 
717 	/*
718 	 * Select how the frequency is to be controlled and in which
719 	 * mode (PLL or FLL). If the PPS signal is present and enabled
720 	 * to discipline the frequency, the PPS frequency is used;
721 	 * otherwise, the argument offset is used to compute it.
722 	 */
723 	if (time_status & STA_PPSFREQ && time_status & STA_PPSSIGNAL) {
724 		time_reftime = time_uptime;
725 		return;
726 	}
727 	if (time_status & STA_FREQHOLD || time_reftime == 0)
728 		time_reftime = time_uptime;
729 	mtemp = time_uptime - time_reftime;
730 	L_LINT(ftemp, time_monitor);
731 	L_RSHIFT(ftemp, (SHIFT_PLL + 2 + time_constant) << 1);
732 	L_MPY(ftemp, mtemp);
733 	L_ADD(time_freq, ftemp);
734 	time_status &= ~STA_MODE;
735 	if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp >
736 	    MAXSEC)) {
737 		L_LINT(ftemp, (time_monitor << 4) / mtemp);
738 		L_RSHIFT(ftemp, SHIFT_FLL + 4);
739 		L_ADD(time_freq, ftemp);
740 		time_status |= STA_MODE;
741 	}
742 	time_reftime = time_uptime;
743 	if (L_GINT(time_freq) > MAXFREQ)
744 		L_LINT(time_freq, MAXFREQ);
745 	else if (L_GINT(time_freq) < -MAXFREQ)
746 		L_LINT(time_freq, -MAXFREQ);
747 }
748 
749 #ifdef PPS_SYNC
750 /*
751  * hardpps() - discipline CPU clock oscillator to external PPS signal
752  *
753  * This routine is called at each PPS interrupt in order to discipline
754  * the CPU clock oscillator to the PPS signal. There are two independent
755  * first-order feedback loops, one for the phase, the other for the
756  * frequency. The phase loop measures and grooms the PPS phase offset
757  * and leaves it in a handy spot for the seconds overflow routine. The
758  * frequency loop averages successive PPS phase differences and
759  * calculates the PPS frequency offset, which is also processed by the
760  * seconds overflow routine. The code requires the caller to capture the
761  * time and architecture-dependent hardware counter values in
762  * nanoseconds at the on-time PPS signal transition.
763  *
764  * Note that, on some Unix systems this routine runs at an interrupt
765  * priority level higher than the timer interrupt routine hardclock().
766  * Therefore, the variables used are distinct from the hardclock()
767  * variables, except for the actual time and frequency variables, which
768  * are determined by this routine and updated atomically.
769  *
770  * tsp  - time at PPS
771  * nsec - hardware counter at PPS
772  */
773 void
774 hardpps(struct timespec *tsp, long nsec)
775 {
776 	long u_sec, u_nsec, v_nsec; /* temps */
777 	l_fp ftemp;
778 
779 	NTP_LOCK();
780 
781 	/*
782 	 * The signal is first processed by a range gate and frequency
783 	 * discriminator. The range gate rejects noise spikes outside
784 	 * the range +-500 us. The frequency discriminator rejects input
785 	 * signals with apparent frequency outside the range 1 +-500
786 	 * PPM. If two hits occur in the same second, we ignore the
787 	 * later hit; if not and a hit occurs outside the range gate,
788 	 * keep the later hit for later comparison, but do not process
789 	 * it.
790 	 */
791 	time_status |= STA_PPSSIGNAL | STA_PPSJITTER;
792 	time_status &= ~(STA_PPSWANDER | STA_PPSERROR);
793 	pps_valid = PPS_VALID;
794 	u_sec = tsp->tv_sec;
795 	u_nsec = tsp->tv_nsec;
796 	if (u_nsec >= (NANOSECOND >> 1)) {
797 		u_nsec -= NANOSECOND;
798 		u_sec++;
799 	}
800 	v_nsec = u_nsec - pps_tf[0].tv_nsec;
801 	if (u_sec == pps_tf[0].tv_sec && v_nsec < NANOSECOND - MAXFREQ)
802 		goto out;
803 	pps_tf[2] = pps_tf[1];
804 	pps_tf[1] = pps_tf[0];
805 	pps_tf[0].tv_sec = u_sec;
806 	pps_tf[0].tv_nsec = u_nsec;
807 
808 	/*
809 	 * Compute the difference between the current and previous
810 	 * counter values. If the difference exceeds 0.5 s, assume it
811 	 * has wrapped around, so correct 1.0 s. If the result exceeds
812 	 * the tick interval, the sample point has crossed a tick
813 	 * boundary during the last second, so correct the tick. Very
814 	 * intricate.
815 	 */
816 	u_nsec = nsec;
817 	if (u_nsec > (NANOSECOND >> 1))
818 		u_nsec -= NANOSECOND;
819 	else if (u_nsec < -(NANOSECOND >> 1))
820 		u_nsec += NANOSECOND;
821 	pps_fcount += u_nsec;
822 	if (v_nsec > MAXFREQ || v_nsec < -MAXFREQ)
823 		goto out;
824 	time_status &= ~STA_PPSJITTER;
825 
826 	/*
827 	 * A three-stage median filter is used to help denoise the PPS
828 	 * time. The median sample becomes the time offset estimate; the
829 	 * difference between the other two samples becomes the time
830 	 * dispersion (jitter) estimate.
831 	 */
832 	if (pps_tf[0].tv_nsec > pps_tf[1].tv_nsec) {
833 		if (pps_tf[1].tv_nsec > pps_tf[2].tv_nsec) {
834 			v_nsec = pps_tf[1].tv_nsec;	/* 0 1 2 */
835 			u_nsec = pps_tf[0].tv_nsec - pps_tf[2].tv_nsec;
836 		} else if (pps_tf[2].tv_nsec > pps_tf[0].tv_nsec) {
837 			v_nsec = pps_tf[0].tv_nsec;	/* 2 0 1 */
838 			u_nsec = pps_tf[2].tv_nsec - pps_tf[1].tv_nsec;
839 		} else {
840 			v_nsec = pps_tf[2].tv_nsec;	/* 0 2 1 */
841 			u_nsec = pps_tf[0].tv_nsec - pps_tf[1].tv_nsec;
842 		}
843 	} else {
844 		if (pps_tf[1].tv_nsec < pps_tf[2].tv_nsec) {
845 			v_nsec = pps_tf[1].tv_nsec;	/* 2 1 0 */
846 			u_nsec = pps_tf[2].tv_nsec - pps_tf[0].tv_nsec;
847 		} else if (pps_tf[2].tv_nsec < pps_tf[0].tv_nsec) {
848 			v_nsec = pps_tf[0].tv_nsec;	/* 1 0 2 */
849 			u_nsec = pps_tf[1].tv_nsec - pps_tf[2].tv_nsec;
850 		} else {
851 			v_nsec = pps_tf[2].tv_nsec;	/* 1 2 0 */
852 			u_nsec = pps_tf[1].tv_nsec - pps_tf[0].tv_nsec;
853 		}
854 	}
855 
856 	/*
857 	 * Nominal jitter is due to PPS signal noise and interrupt
858 	 * latency. If it exceeds the popcorn threshold, the sample is
859 	 * discarded. otherwise, if so enabled, the time offset is
860 	 * updated. We can tolerate a modest loss of data here without
861 	 * much degrading time accuracy.
862 	 *
863 	 * The measurements being checked here were made with the system
864 	 * timecounter, so the popcorn threshold is not allowed to fall below
865 	 * the number of nanoseconds in two ticks of the timecounter.  For a
866 	 * timecounter running faster than 1 GHz the lower bound is 2ns, just
867 	 * to avoid a nonsensical threshold of zero.
868 	*/
869 	if (u_nsec > lmax(pps_jitter << PPS_POPCORN,
870 	    2 * (NANOSECOND / (long)qmin(NANOSECOND, tc_getfrequency())))) {
871 		time_status |= STA_PPSJITTER;
872 		pps_jitcnt++;
873 	} else if (time_status & STA_PPSTIME) {
874 		time_monitor = -v_nsec;
875 		L_LINT(time_offset, time_monitor);
876 	}
877 	pps_jitter += (u_nsec - pps_jitter) >> PPS_FAVG;
878 	u_sec = pps_tf[0].tv_sec - pps_lastsec;
879 	if (u_sec < (1 << pps_shift))
880 		goto out;
881 
882 	/*
883 	 * At the end of the calibration interval the difference between
884 	 * the first and last counter values becomes the scaled
885 	 * frequency. It will later be divided by the length of the
886 	 * interval to determine the frequency update. If the frequency
887 	 * exceeds a sanity threshold, or if the actual calibration
888 	 * interval is not equal to the expected length, the data are
889 	 * discarded. We can tolerate a modest loss of data here without
890 	 * much degrading frequency accuracy.
891 	 */
892 	pps_calcnt++;
893 	v_nsec = -pps_fcount;
894 	pps_lastsec = pps_tf[0].tv_sec;
895 	pps_fcount = 0;
896 	u_nsec = MAXFREQ << pps_shift;
897 	if (v_nsec > u_nsec || v_nsec < -u_nsec || u_sec != (1 << pps_shift)) {
898 		time_status |= STA_PPSERROR;
899 		pps_errcnt++;
900 		goto out;
901 	}
902 
903 	/*
904 	 * Here the raw frequency offset and wander (stability) is
905 	 * calculated. If the wander is less than the wander threshold
906 	 * for four consecutive averaging intervals, the interval is
907 	 * doubled; if it is greater than the threshold for four
908 	 * consecutive intervals, the interval is halved. The scaled
909 	 * frequency offset is converted to frequency offset. The
910 	 * stability metric is calculated as the average of recent
911 	 * frequency changes, but is used only for performance
912 	 * monitoring.
913 	 */
914 	L_LINT(ftemp, v_nsec);
915 	L_RSHIFT(ftemp, pps_shift);
916 	L_SUB(ftemp, pps_freq);
917 	u_nsec = L_GINT(ftemp);
918 	if (u_nsec > PPS_MAXWANDER) {
919 		L_LINT(ftemp, PPS_MAXWANDER);
920 		pps_intcnt--;
921 		time_status |= STA_PPSWANDER;
922 		pps_stbcnt++;
923 	} else if (u_nsec < -PPS_MAXWANDER) {
924 		L_LINT(ftemp, -PPS_MAXWANDER);
925 		pps_intcnt--;
926 		time_status |= STA_PPSWANDER;
927 		pps_stbcnt++;
928 	} else {
929 		pps_intcnt++;
930 	}
931 	if (pps_intcnt >= 4) {
932 		pps_intcnt = 4;
933 		if (pps_shift < pps_shiftmax) {
934 			pps_shift++;
935 			pps_intcnt = 0;
936 		}
937 	} else if (pps_intcnt <= -4 || pps_shift > pps_shiftmax) {
938 		pps_intcnt = -4;
939 		if (pps_shift > PPS_FAVG) {
940 			pps_shift--;
941 			pps_intcnt = 0;
942 		}
943 	}
944 	if (u_nsec < 0)
945 		u_nsec = -u_nsec;
946 	pps_stabil += (u_nsec * SCALE_PPM - pps_stabil) >> PPS_FAVG;
947 
948 	/*
949 	 * The PPS frequency is recalculated and clamped to the maximum
950 	 * MAXFREQ. If enabled, the system clock frequency is updated as
951 	 * well.
952 	 */
953 	L_ADD(pps_freq, ftemp);
954 	u_nsec = L_GINT(pps_freq);
955 	if (u_nsec > MAXFREQ)
956 		L_LINT(pps_freq, MAXFREQ);
957 	else if (u_nsec < -MAXFREQ)
958 		L_LINT(pps_freq, -MAXFREQ);
959 	if (time_status & STA_PPSFREQ)
960 		time_freq = pps_freq;
961 
962 out:
963 	NTP_UNLOCK();
964 }
965 #endif /* PPS_SYNC */
966 
967 #ifndef _SYS_SYSPROTO_H_
968 struct adjtime_args {
969 	struct timeval *delta;
970 	struct timeval *olddelta;
971 };
972 #endif
973 /* ARGSUSED */
974 int
975 sys_adjtime(struct thread *td, struct adjtime_args *uap)
976 {
977 	struct timeval delta, olddelta, *deltap;
978 	int error;
979 
980 	if (uap->delta) {
981 		error = copyin(uap->delta, &delta, sizeof(delta));
982 		if (error)
983 			return (error);
984 		deltap = &delta;
985 	} else
986 		deltap = NULL;
987 	error = kern_adjtime(td, deltap, &olddelta);
988 	if (uap->olddelta && error == 0)
989 		error = copyout(&olddelta, uap->olddelta, sizeof(olddelta));
990 	return (error);
991 }
992 
993 int
994 kern_adjtime(struct thread *td, struct timeval *delta, struct timeval *olddelta)
995 {
996 	struct timeval atv;
997 	int64_t ltr, ltw;
998 	int error;
999 
1000 	if (delta != NULL) {
1001 		error = priv_check(td, PRIV_ADJTIME);
1002 		if (error != 0)
1003 			return (error);
1004 		ltw = (int64_t)delta->tv_sec * 1000000 + delta->tv_usec;
1005 	}
1006 	NTP_LOCK();
1007 	ltr = time_adjtime;
1008 	if (delta != NULL)
1009 		time_adjtime = ltw;
1010 	NTP_UNLOCK();
1011 	if (olddelta != NULL) {
1012 		atv.tv_sec = ltr / 1000000;
1013 		atv.tv_usec = ltr % 1000000;
1014 		if (atv.tv_usec < 0) {
1015 			atv.tv_usec += 1000000;
1016 			atv.tv_sec--;
1017 		}
1018 		*olddelta = atv;
1019 	}
1020 	return (0);
1021 }
1022 
1023 static struct callout resettodr_callout;
1024 static int resettodr_period = 1800;
1025 
1026 static void
1027 periodic_resettodr(void *arg __unused)
1028 {
1029 
1030 	/*
1031 	 * Read of time_status is lock-less, which is fine since
1032 	 * ntp_is_time_error() operates on the consistent read value.
1033 	 */
1034 	if (!ntp_is_time_error(time_status))
1035 		resettodr();
1036 	if (resettodr_period > 0)
1037 		callout_schedule(&resettodr_callout, resettodr_period * hz);
1038 }
1039 
1040 static void
1041 shutdown_resettodr(void *arg __unused, int howto __unused)
1042 {
1043 
1044 	callout_drain(&resettodr_callout);
1045 	/* Another unlocked read of time_status */
1046 	if (resettodr_period > 0 && !ntp_is_time_error(time_status))
1047 		resettodr();
1048 }
1049 
1050 static int
1051 sysctl_resettodr_period(SYSCTL_HANDLER_ARGS)
1052 {
1053 	int error;
1054 
1055 	error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
1056 	if (error || !req->newptr)
1057 		return (error);
1058 	if (cold)
1059 		goto done;
1060 	if (resettodr_period == 0)
1061 		callout_stop(&resettodr_callout);
1062 	else
1063 		callout_reset(&resettodr_callout, resettodr_period * hz,
1064 		    periodic_resettodr, NULL);
1065 done:
1066 	return (0);
1067 }
1068 
1069 SYSCTL_PROC(_machdep, OID_AUTO, rtc_save_period, CTLTYPE_INT | CTLFLAG_RWTUN |
1070     CTLFLAG_MPSAFE, &resettodr_period, 1800, sysctl_resettodr_period, "I",
1071     "Save system time to RTC with this period (in seconds)");
1072 
1073 static void
1074 start_periodic_resettodr(void *arg __unused)
1075 {
1076 
1077 	EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_resettodr, NULL,
1078 	    SHUTDOWN_PRI_FIRST);
1079 	callout_init(&resettodr_callout, 1);
1080 	if (resettodr_period == 0)
1081 		return;
1082 	callout_reset(&resettodr_callout, resettodr_period * hz,
1083 	    periodic_resettodr, NULL);
1084 }
1085 
1086 SYSINIT(periodic_resettodr, SI_SUB_LAST, SI_ORDER_MIDDLE,
1087 	start_periodic_resettodr, NULL);
1088