1 /*-
2 ***********************************************************************
3 * *
4 * Copyright (c) David L. Mills 1993-2001 *
5 * *
6 * Permission to use, copy, modify, and distribute this software and *
7 * its documentation for any purpose and without fee is hereby *
8 * granted, provided that the above copyright notice appears in all *
9 * copies and that both the copyright notice and this permission *
10 * notice appear in supporting documentation, and that the name *
11 * University of Delaware not be used in advertising or publicity *
12 * pertaining to distribution of the software without specific, *
13 * written prior permission. The University of Delaware makes no *
14 * representations about the suitability this software for any *
15 * purpose. It is provided "as is" without express or implied *
16 * warranty. *
17 * *
18 **********************************************************************/
19
20 /*
21 * Adapted from the original sources for FreeBSD and timecounters by:
22 * Poul-Henning Kamp <phk@FreeBSD.org>.
23 *
24 * The 32bit version of the "LP" macros seems a bit past its "sell by"
25 * date so I have retained only the 64bit version and included it directly
26 * in this file.
27 *
28 * Only minor changes done to interface with the timecounters over in
29 * sys/kern/kern_clock.c. Some of the comments below may be (even more)
30 * confusing and/or plain wrong in that context.
31 */
32
33 #include <sys/cdefs.h>
34 #include "opt_ntp.h"
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/sysproto.h>
39 #include <sys/eventhandler.h>
40 #include <sys/kernel.h>
41 #include <sys/priv.h>
42 #include <sys/proc.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/time.h>
46 #include <sys/timex.h>
47 #include <sys/timetc.h>
48 #include <sys/timepps.h>
49 #include <sys/syscallsubr.h>
50 #include <sys/sysctl.h>
51
52 #ifdef PPS_SYNC
53 FEATURE(pps_sync, "Support usage of external PPS signal by kernel PLL");
54 #endif
55
56 /*
57 * Single-precision macros for 64-bit machines
58 */
59 typedef int64_t l_fp;
60 #define L_ADD(v, u) ((v) += (u))
61 #define L_SUB(v, u) ((v) -= (u))
62 #define L_ADDHI(v, a) ((v) += (int64_t)(a) << 32)
63 #define L_NEG(v) ((v) = -(v))
64 #define L_RSHIFT(v, n) \
65 do { \
66 if ((v) < 0) \
67 (v) = -(-(v) >> (n)); \
68 else \
69 (v) = (v) >> (n); \
70 } while (0)
71 #define L_MPY(v, a) ((v) *= (a))
72 #define L_CLR(v) ((v) = 0)
73 #define L_ISNEG(v) ((v) < 0)
74 #define L_LINT(v, a) \
75 do { \
76 if ((a) < 0) \
77 ((v) = -((int64_t)(-(a)) << 32)); \
78 else \
79 ((v) = (int64_t)(a) << 32); \
80 } while (0)
81 #define L_GINT(v) ((v) < 0 ? -(-(v) >> 32) : (v) >> 32)
82
83 /*
84 * Generic NTP kernel interface
85 *
86 * These routines constitute the Network Time Protocol (NTP) interfaces
87 * for user and daemon application programs. The ntp_gettime() routine
88 * provides the time, maximum error (synch distance) and estimated error
89 * (dispersion) to client user application programs. The ntp_adjtime()
90 * routine is used by the NTP daemon to adjust the system clock to an
91 * externally derived time. The time offset and related variables set by
92 * this routine are used by other routines in this module to adjust the
93 * phase and frequency of the clock discipline loop which controls the
94 * system clock.
95 *
96 * When the kernel time is reckoned directly in nanoseconds (NTP_NANO
97 * defined), the time at each tick interrupt is derived directly from
98 * the kernel time variable. When the kernel time is reckoned in
99 * microseconds, (NTP_NANO undefined), the time is derived from the
100 * kernel time variable together with a variable representing the
101 * leftover nanoseconds at the last tick interrupt. In either case, the
102 * current nanosecond time is reckoned from these values plus an
103 * interpolated value derived by the clock routines in another
104 * architecture-specific module. The interpolation can use either a
105 * dedicated counter or a processor cycle counter (PCC) implemented in
106 * some architectures.
107 *
108 * Note that all routines must run at priority splclock or higher.
109 */
110 /*
111 * Phase/frequency-lock loop (PLL/FLL) definitions
112 *
113 * The nanosecond clock discipline uses two variable types, time
114 * variables and frequency variables. Both types are represented as 64-
115 * bit fixed-point quantities with the decimal point between two 32-bit
116 * halves. On a 32-bit machine, each half is represented as a single
117 * word and mathematical operations are done using multiple-precision
118 * arithmetic. On a 64-bit machine, ordinary computer arithmetic is
119 * used.
120 *
121 * A time variable is a signed 64-bit fixed-point number in ns and
122 * fraction. It represents the remaining time offset to be amortized
123 * over succeeding tick interrupts. The maximum time offset is about
124 * 0.5 s and the resolution is about 2.3e-10 ns.
125 *
126 * 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
127 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
128 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
129 * |s s s| ns |
130 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
131 * | fraction |
132 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
133 *
134 * A frequency variable is a signed 64-bit fixed-point number in ns/s
135 * and fraction. It represents the ns and fraction to be added to the
136 * kernel time variable at each second. The maximum frequency offset is
137 * about +-500000 ns/s and the resolution is about 2.3e-10 ns/s.
138 *
139 * 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
140 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
141 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
142 * |s s s s s s s s s s s s s| ns/s |
143 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
144 * | fraction |
145 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
146 */
147 /*
148 * The following variables establish the state of the PLL/FLL and the
149 * residual time and frequency offset of the local clock.
150 */
151 #define SHIFT_PLL 4 /* PLL loop gain (shift) */
152 #define SHIFT_FLL 2 /* FLL loop gain (shift) */
153
154 static int time_state = TIME_OK; /* clock state */
155 int time_status = STA_UNSYNC; /* clock status bits */
156 static long time_tai; /* TAI offset (s) */
157 static long time_monitor; /* last time offset scaled (ns) */
158 static long time_constant; /* poll interval (shift) (s) */
159 static long time_precision = 1; /* clock precision (ns) */
160 static long time_maxerror = MAXPHASE / 1000; /* maximum error (us) */
161 long time_esterror = MAXPHASE / 1000; /* estimated error (us) */
162 static long time_reftime; /* uptime at last adjustment (s) */
163 static l_fp time_offset; /* time offset (ns) */
164 static l_fp time_freq; /* frequency offset (ns/s) */
165 static l_fp time_adj; /* tick adjust (ns/s) */
166
167 static int64_t time_adjtime; /* correction from adjtime(2) (usec) */
168
169 static struct mtx ntp_lock;
170 MTX_SYSINIT(ntp, &ntp_lock, "ntp", MTX_SPIN);
171
172 #define NTP_LOCK() mtx_lock_spin(&ntp_lock)
173 #define NTP_UNLOCK() mtx_unlock_spin(&ntp_lock)
174 #define NTP_ASSERT_LOCKED() mtx_assert(&ntp_lock, MA_OWNED)
175
176 #ifdef PPS_SYNC
177 /*
178 * The following variables are used when a pulse-per-second (PPS) signal
179 * is available and connected via a modem control lead. They establish
180 * the engineering parameters of the clock discipline loop when
181 * controlled by the PPS signal.
182 */
183 #define PPS_FAVG 2 /* min freq avg interval (s) (shift) */
184 #define PPS_FAVGDEF 8 /* default freq avg int (s) (shift) */
185 #define PPS_FAVGMAX 15 /* max freq avg interval (s) (shift) */
186 #define PPS_PAVG 4 /* phase avg interval (s) (shift) */
187 #define PPS_VALID 120 /* PPS signal watchdog max (s) */
188 #define PPS_MAXWANDER 100000 /* max PPS wander (ns/s) */
189 #define PPS_POPCORN 2 /* popcorn spike threshold (shift) */
190
191 static struct timespec pps_tf[3]; /* phase median filter */
192 static l_fp pps_freq; /* scaled frequency offset (ns/s) */
193 static long pps_fcount; /* frequency accumulator */
194 static long pps_jitter; /* nominal jitter (ns) */
195 static long pps_stabil; /* nominal stability (scaled ns/s) */
196 static time_t pps_lastsec; /* time at last calibration (s) */
197 static int pps_valid; /* signal watchdog counter */
198 static int pps_shift = PPS_FAVG; /* interval duration (s) (shift) */
199 static int pps_shiftmax = PPS_FAVGDEF; /* max interval duration (s) (shift) */
200 static int pps_intcnt; /* wander counter */
201
202 /*
203 * PPS signal quality monitors
204 */
205 static long pps_calcnt; /* calibration intervals */
206 static long pps_jitcnt; /* jitter limit exceeded */
207 static long pps_stbcnt; /* stability limit exceeded */
208 static long pps_errcnt; /* calibration errors */
209 #endif /* PPS_SYNC */
210 /*
211 * End of phase/frequency-lock loop (PLL/FLL) definitions
212 */
213
214 static void hardupdate(long offset);
215 static void ntp_gettime1(struct ntptimeval *ntvp);
216 static bool ntp_is_time_error(int tsl);
217
218 static bool
ntp_is_time_error(int tsl)219 ntp_is_time_error(int tsl)
220 {
221
222 /*
223 * Status word error decode. If any of these conditions occur,
224 * an error is returned, instead of the status word. Most
225 * applications will care only about the fact the system clock
226 * may not be trusted, not about the details.
227 *
228 * Hardware or software error
229 */
230 if ((tsl & (STA_UNSYNC | STA_CLOCKERR)) ||
231
232 /*
233 * PPS signal lost when either time or frequency synchronization
234 * requested
235 */
236 (tsl & (STA_PPSFREQ | STA_PPSTIME) &&
237 !(tsl & STA_PPSSIGNAL)) ||
238
239 /*
240 * PPS jitter exceeded when time synchronization requested
241 */
242 (tsl & STA_PPSTIME && tsl & STA_PPSJITTER) ||
243
244 /*
245 * PPS wander exceeded or calibration error when frequency
246 * synchronization requested
247 */
248 (tsl & STA_PPSFREQ &&
249 tsl & (STA_PPSWANDER | STA_PPSERROR)))
250 return (true);
251
252 return (false);
253 }
254
255 static void
ntp_gettime1(struct ntptimeval * ntvp)256 ntp_gettime1(struct ntptimeval *ntvp)
257 {
258 struct timespec atv; /* nanosecond time */
259
260 NTP_ASSERT_LOCKED();
261
262 nanotime(&atv);
263 ntvp->time.tv_sec = atv.tv_sec;
264 ntvp->time.tv_nsec = atv.tv_nsec;
265 ntvp->maxerror = time_maxerror;
266 ntvp->esterror = time_esterror;
267 ntvp->tai = time_tai;
268 ntvp->time_state = time_state;
269
270 if (ntp_is_time_error(time_status))
271 ntvp->time_state = TIME_ERROR;
272 }
273
274 /*
275 * ntp_gettime() - NTP user application interface
276 *
277 * See the timex.h header file for synopsis and API description. Note that
278 * the TAI offset is returned in the ntvtimeval.tai structure member.
279 */
280 #ifndef _SYS_SYSPROTO_H_
281 struct ntp_gettime_args {
282 struct ntptimeval *ntvp;
283 };
284 #endif
285 /* ARGSUSED */
286 int
sys_ntp_gettime(struct thread * td,struct ntp_gettime_args * uap)287 sys_ntp_gettime(struct thread *td, struct ntp_gettime_args *uap)
288 {
289 struct ntptimeval ntv;
290
291 memset(&ntv, 0, sizeof(ntv));
292
293 NTP_LOCK();
294 ntp_gettime1(&ntv);
295 NTP_UNLOCK();
296
297 td->td_retval[0] = ntv.time_state;
298 return (copyout(&ntv, uap->ntvp, sizeof(ntv)));
299 }
300
301 static int
ntp_sysctl(SYSCTL_HANDLER_ARGS)302 ntp_sysctl(SYSCTL_HANDLER_ARGS)
303 {
304 struct ntptimeval ntv; /* temporary structure */
305
306 memset(&ntv, 0, sizeof(ntv));
307
308 NTP_LOCK();
309 ntp_gettime1(&ntv);
310 NTP_UNLOCK();
311
312 return (sysctl_handle_opaque(oidp, &ntv, sizeof(ntv), req));
313 }
314
315 SYSCTL_NODE(_kern, OID_AUTO, ntp_pll, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
316 "");
317 SYSCTL_PROC(_kern_ntp_pll, OID_AUTO, gettime, CTLTYPE_OPAQUE | CTLFLAG_RD |
318 CTLFLAG_MPSAFE, 0, sizeof(struct ntptimeval) , ntp_sysctl, "S,ntptimeval",
319 "");
320
321 #ifdef PPS_SYNC
322 SYSCTL_INT(_kern_ntp_pll, OID_AUTO, pps_shiftmax, CTLFLAG_RW,
323 &pps_shiftmax, 0, "Max interval duration (sec) (shift)");
324 SYSCTL_INT(_kern_ntp_pll, OID_AUTO, pps_shift, CTLFLAG_RW,
325 &pps_shift, 0, "Interval duration (sec) (shift)");
326 SYSCTL_LONG(_kern_ntp_pll, OID_AUTO, time_monitor, CTLFLAG_RD,
327 &time_monitor, 0, "Last time offset scaled (ns)");
328
329 SYSCTL_S64(_kern_ntp_pll, OID_AUTO, pps_freq, CTLFLAG_RD | CTLFLAG_MPSAFE,
330 &pps_freq, 0,
331 "Scaled frequency offset (ns/sec)");
332 SYSCTL_S64(_kern_ntp_pll, OID_AUTO, time_freq, CTLFLAG_RD | CTLFLAG_MPSAFE,
333 &time_freq, 0,
334 "Frequency offset (ns/sec)");
335 #endif
336
337 /*
338 * ntp_adjtime() - NTP daemon application interface
339 *
340 * See the timex.h header file for synopsis and API description. Note that
341 * the timex.constant structure member has a dual purpose to set the time
342 * constant and to set the TAI offset.
343 */
344 int
kern_ntp_adjtime(struct thread * td,struct timex * ntv,int * retvalp)345 kern_ntp_adjtime(struct thread *td, struct timex *ntv, int *retvalp)
346 {
347 long freq; /* frequency ns/s) */
348 int modes; /* mode bits from structure */
349 int error, retval;
350
351 /*
352 * Update selected clock variables - only the superuser can
353 * change anything. Note that there is no error checking here on
354 * the assumption the superuser should know what it is doing.
355 * Note that either the time constant or TAI offset are loaded
356 * from the ntv.constant member, depending on the mode bits. If
357 * the STA_PLL bit in the status word is cleared, the state and
358 * status words are reset to the initial values at boot.
359 */
360 modes = ntv->modes;
361 error = 0;
362 if (modes)
363 error = priv_check(td, PRIV_NTP_ADJTIME);
364 if (error != 0)
365 return (error);
366 NTP_LOCK();
367 if (modes & MOD_MAXERROR)
368 time_maxerror = ntv->maxerror;
369 if (modes & MOD_ESTERROR)
370 time_esterror = ntv->esterror;
371 if (modes & MOD_STATUS) {
372 if (time_status & STA_PLL && !(ntv->status & STA_PLL)) {
373 time_state = TIME_OK;
374 time_status = STA_UNSYNC;
375 #ifdef PPS_SYNC
376 pps_shift = PPS_FAVG;
377 #endif /* PPS_SYNC */
378 }
379 time_status &= STA_RONLY;
380 time_status |= ntv->status & ~STA_RONLY;
381 }
382 if (modes & MOD_TIMECONST) {
383 if (ntv->constant < 0)
384 time_constant = 0;
385 else if (ntv->constant > MAXTC)
386 time_constant = MAXTC;
387 else
388 time_constant = ntv->constant;
389 }
390 if (modes & MOD_TAI) {
391 if (ntv->constant > 0) /* XXX zero & negative numbers ? */
392 time_tai = ntv->constant;
393 }
394 #ifdef PPS_SYNC
395 if (modes & MOD_PPSMAX) {
396 if (ntv->shift < PPS_FAVG)
397 pps_shiftmax = PPS_FAVG;
398 else if (ntv->shift > PPS_FAVGMAX)
399 pps_shiftmax = PPS_FAVGMAX;
400 else
401 pps_shiftmax = ntv->shift;
402 }
403 #endif /* PPS_SYNC */
404 if (modes & MOD_NANO)
405 time_status |= STA_NANO;
406 if (modes & MOD_MICRO)
407 time_status &= ~STA_NANO;
408 if (modes & MOD_CLKB)
409 time_status |= STA_CLK;
410 if (modes & MOD_CLKA)
411 time_status &= ~STA_CLK;
412 if (modes & MOD_FREQUENCY) {
413 freq = (ntv->freq * 1000LL) >> 16;
414 if (freq > MAXFREQ)
415 L_LINT(time_freq, MAXFREQ);
416 else if (freq < -MAXFREQ)
417 L_LINT(time_freq, -MAXFREQ);
418 else {
419 /*
420 * ntv->freq is [PPM * 2^16] = [us/s * 2^16]
421 * time_freq is [ns/s * 2^32]
422 */
423 time_freq = ntv->freq * 1000LL * 65536LL;
424 }
425 #ifdef PPS_SYNC
426 pps_freq = time_freq;
427 #endif /* PPS_SYNC */
428 }
429 if (modes & MOD_OFFSET) {
430 if (time_status & STA_NANO)
431 hardupdate(ntv->offset);
432 else
433 hardupdate(ntv->offset * 1000);
434 }
435
436 /*
437 * Retrieve all clock variables. Note that the TAI offset is
438 * returned only by ntp_gettime();
439 */
440 if (time_status & STA_NANO)
441 ntv->offset = L_GINT(time_offset);
442 else
443 ntv->offset = L_GINT(time_offset) / 1000; /* XXX rounding ? */
444 ntv->freq = L_GINT((time_freq / 1000LL) << 16);
445 ntv->maxerror = time_maxerror;
446 ntv->esterror = time_esterror;
447 ntv->status = time_status;
448 ntv->constant = time_constant;
449 if (time_status & STA_NANO)
450 ntv->precision = time_precision;
451 else
452 ntv->precision = time_precision / 1000;
453 ntv->tolerance = MAXFREQ * SCALE_PPM;
454 #ifdef PPS_SYNC
455 ntv->shift = pps_shift;
456 ntv->ppsfreq = L_GINT((pps_freq / 1000LL) << 16);
457 if (time_status & STA_NANO)
458 ntv->jitter = pps_jitter;
459 else
460 ntv->jitter = pps_jitter / 1000;
461 ntv->stabil = pps_stabil;
462 ntv->calcnt = pps_calcnt;
463 ntv->errcnt = pps_errcnt;
464 ntv->jitcnt = pps_jitcnt;
465 ntv->stbcnt = pps_stbcnt;
466 #endif /* PPS_SYNC */
467 retval = ntp_is_time_error(time_status) ? TIME_ERROR : time_state;
468 NTP_UNLOCK();
469
470 *retvalp = retval;
471 return (0);
472 }
473
474 #ifndef _SYS_SYSPROTO_H_
475 struct ntp_adjtime_args {
476 struct timex *tp;
477 };
478 #endif
479
480 int
sys_ntp_adjtime(struct thread * td,struct ntp_adjtime_args * uap)481 sys_ntp_adjtime(struct thread *td, struct ntp_adjtime_args *uap)
482 {
483 struct timex ntv;
484 int error, retval;
485
486 error = copyin(uap->tp, &ntv, sizeof(ntv));
487 if (error == 0) {
488 error = kern_ntp_adjtime(td, &ntv, &retval);
489 if (error == 0) {
490 error = copyout(&ntv, uap->tp, sizeof(ntv));
491 if (error == 0)
492 td->td_retval[0] = retval;
493 }
494 }
495 return (error);
496 }
497
498 /*
499 * second_overflow() - called after ntp_tick_adjust()
500 *
501 * This routine is ordinarily called immediately following the above
502 * routine ntp_tick_adjust(). While these two routines are normally
503 * combined, they are separated here only for the purposes of
504 * simulation.
505 */
506 void
ntp_update_second(int64_t * adjustment,time_t * newsec)507 ntp_update_second(int64_t *adjustment, time_t *newsec)
508 {
509 int tickrate;
510 l_fp ftemp; /* 32/64-bit temporary */
511
512 NTP_LOCK();
513
514 /*
515 * On rollover of the second both the nanosecond and microsecond
516 * clocks are updated and the state machine cranked as
517 * necessary. The phase adjustment to be used for the next
518 * second is calculated and the maximum error is increased by
519 * the tolerance.
520 */
521 time_maxerror += MAXFREQ / 1000;
522
523 /*
524 * Leap second processing. If in leap-insert state at
525 * the end of the day, the system clock is set back one
526 * second; if in leap-delete state, the system clock is
527 * set ahead one second. The nano_time() routine or
528 * external clock driver will insure that reported time
529 * is always monotonic.
530 */
531 switch (time_state) {
532 /*
533 * No warning.
534 */
535 case TIME_OK:
536 if (time_status & STA_INS)
537 time_state = TIME_INS;
538 else if (time_status & STA_DEL)
539 time_state = TIME_DEL;
540 break;
541
542 /*
543 * Insert second 23:59:60 following second
544 * 23:59:59.
545 */
546 case TIME_INS:
547 if (!(time_status & STA_INS))
548 time_state = TIME_OK;
549 else if ((*newsec) % 86400 == 0) {
550 (*newsec)--;
551 time_state = TIME_OOP;
552 time_tai++;
553 }
554 break;
555
556 /*
557 * Delete second 23:59:59.
558 */
559 case TIME_DEL:
560 if (!(time_status & STA_DEL))
561 time_state = TIME_OK;
562 else if (((*newsec) + 1) % 86400 == 0) {
563 (*newsec)++;
564 time_tai--;
565 time_state = TIME_WAIT;
566 }
567 break;
568
569 /*
570 * Insert second in progress.
571 */
572 case TIME_OOP:
573 time_state = TIME_WAIT;
574 break;
575
576 /*
577 * Wait for status bits to clear.
578 */
579 case TIME_WAIT:
580 if (!(time_status & (STA_INS | STA_DEL)))
581 time_state = TIME_OK;
582 }
583
584 /*
585 * Compute the total time adjustment for the next second
586 * in ns. The offset is reduced by a factor depending on
587 * whether the PPS signal is operating. Note that the
588 * value is in effect scaled by the clock frequency,
589 * since the adjustment is added at each tick interrupt.
590 */
591 ftemp = time_offset;
592 #ifdef PPS_SYNC
593 /* XXX even if PPS signal dies we should finish adjustment ? */
594 if (time_status & STA_PPSTIME && time_status &
595 STA_PPSSIGNAL)
596 L_RSHIFT(ftemp, pps_shift);
597 else
598 L_RSHIFT(ftemp, SHIFT_PLL + time_constant);
599 #else
600 L_RSHIFT(ftemp, SHIFT_PLL + time_constant);
601 #endif /* PPS_SYNC */
602 time_adj = ftemp;
603 L_SUB(time_offset, ftemp);
604 L_ADD(time_adj, time_freq);
605
606 /*
607 * Apply any correction from adjtime(2). If more than one second
608 * off we slew at a rate of 5ms/s (5000 PPM) else 500us/s (500 PPM)
609 * until the last second is slewed the final < 500 usecs.
610 */
611 if (time_adjtime != 0) {
612 if (time_adjtime > 1000000)
613 tickrate = 5000;
614 else if (time_adjtime < -1000000)
615 tickrate = -5000;
616 else if (time_adjtime > 500)
617 tickrate = 500;
618 else if (time_adjtime < -500)
619 tickrate = -500;
620 else
621 tickrate = time_adjtime;
622 time_adjtime -= tickrate;
623 L_LINT(ftemp, tickrate * 1000);
624 L_ADD(time_adj, ftemp);
625 }
626 *adjustment = time_adj;
627
628 #ifdef PPS_SYNC
629 if (pps_valid > 0)
630 pps_valid--;
631 else
632 time_status &= ~STA_PPSSIGNAL;
633 #endif /* PPS_SYNC */
634
635 NTP_UNLOCK();
636 }
637
638 /*
639 * hardupdate() - local clock update
640 *
641 * This routine is called by ntp_adjtime() to update the local clock
642 * phase and frequency. The implementation is of an adaptive-parameter,
643 * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new
644 * time and frequency offset estimates for each call. If the kernel PPS
645 * discipline code is configured (PPS_SYNC), the PPS signal itself
646 * determines the new time offset, instead of the calling argument.
647 * Presumably, calls to ntp_adjtime() occur only when the caller
648 * believes the local clock is valid within some bound (+-128 ms with
649 * NTP). If the caller's time is far different than the PPS time, an
650 * argument will ensue, and it's not clear who will lose.
651 *
652 * For uncompensated quartz crystal oscillators and nominal update
653 * intervals less than 256 s, operation should be in phase-lock mode,
654 * where the loop is disciplined to phase. For update intervals greater
655 * than 1024 s, operation should be in frequency-lock mode, where the
656 * loop is disciplined to frequency. Between 256 s and 1024 s, the mode
657 * is selected by the STA_MODE status bit.
658 */
659 static void
hardupdate(long offset)660 hardupdate(long offset /* clock offset (ns) */)
661 {
662 long mtemp;
663 l_fp ftemp;
664
665 NTP_ASSERT_LOCKED();
666
667 /*
668 * Select how the phase is to be controlled and from which
669 * source. If the PPS signal is present and enabled to
670 * discipline the time, the PPS offset is used; otherwise, the
671 * argument offset is used.
672 */
673 if (!(time_status & STA_PLL))
674 return;
675 if (!(time_status & STA_PPSTIME && time_status &
676 STA_PPSSIGNAL)) {
677 if (offset > MAXPHASE)
678 time_monitor = MAXPHASE;
679 else if (offset < -MAXPHASE)
680 time_monitor = -MAXPHASE;
681 else
682 time_monitor = offset;
683 L_LINT(time_offset, time_monitor);
684 }
685
686 /*
687 * Select how the frequency is to be controlled and in which
688 * mode (PLL or FLL). If the PPS signal is present and enabled
689 * to discipline the frequency, the PPS frequency is used;
690 * otherwise, the argument offset is used to compute it.
691 */
692 if (time_status & STA_PPSFREQ && time_status & STA_PPSSIGNAL) {
693 time_reftime = time_uptime;
694 return;
695 }
696 if (time_status & STA_FREQHOLD || time_reftime == 0)
697 time_reftime = time_uptime;
698 mtemp = time_uptime - time_reftime;
699 L_LINT(ftemp, time_monitor);
700 L_RSHIFT(ftemp, (SHIFT_PLL + 2 + time_constant) << 1);
701 L_MPY(ftemp, mtemp);
702 L_ADD(time_freq, ftemp);
703 time_status &= ~STA_MODE;
704 if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp >
705 MAXSEC)) {
706 L_LINT(ftemp, (time_monitor << 4) / mtemp);
707 L_RSHIFT(ftemp, SHIFT_FLL + 4);
708 L_ADD(time_freq, ftemp);
709 time_status |= STA_MODE;
710 }
711 time_reftime = time_uptime;
712 if (L_GINT(time_freq) > MAXFREQ)
713 L_LINT(time_freq, MAXFREQ);
714 else if (L_GINT(time_freq) < -MAXFREQ)
715 L_LINT(time_freq, -MAXFREQ);
716 }
717
718 #ifdef PPS_SYNC
719 /*
720 * hardpps() - discipline CPU clock oscillator to external PPS signal
721 *
722 * This routine is called at each PPS interrupt in order to discipline
723 * the CPU clock oscillator to the PPS signal. There are two independent
724 * first-order feedback loops, one for the phase, the other for the
725 * frequency. The phase loop measures and grooms the PPS phase offset
726 * and leaves it in a handy spot for the seconds overflow routine. The
727 * frequency loop averages successive PPS phase differences and
728 * calculates the PPS frequency offset, which is also processed by the
729 * seconds overflow routine. The code requires the caller to capture the
730 * time and architecture-dependent hardware counter values in
731 * nanoseconds at the on-time PPS signal transition.
732 *
733 * Note that, on some Unix systems this routine runs at an interrupt
734 * priority level higher than the timer interrupt routine hardclock().
735 * Therefore, the variables used are distinct from the hardclock()
736 * variables, except for the actual time and frequency variables, which
737 * are determined by this routine and updated atomically.
738 *
739 * tsp - time at current PPS event
740 * delta_nsec - time elapsed between the previous and current PPS event
741 */
742 void
hardpps(struct timespec * tsp,long delta_nsec)743 hardpps(struct timespec *tsp, long delta_nsec)
744 {
745 long u_nsec, v_nsec; /* temps */
746 time_t u_sec;
747 l_fp ftemp;
748
749 NTP_LOCK();
750
751 /*
752 * The signal is first processed by a range gate and frequency
753 * discriminator. The range gate rejects noise spikes outside
754 * the range +-500 us. The frequency discriminator rejects input
755 * signals with apparent frequency outside the range 1 +-500
756 * PPM. If two hits occur in the same second, we ignore the
757 * later hit; if not and a hit occurs outside the range gate,
758 * keep the later hit for later comparison, but do not process
759 * it.
760 */
761 time_status |= STA_PPSSIGNAL | STA_PPSJITTER;
762 time_status &= ~(STA_PPSWANDER | STA_PPSERROR);
763 pps_valid = PPS_VALID;
764 u_sec = tsp->tv_sec;
765 u_nsec = tsp->tv_nsec;
766 if (u_nsec >= (NANOSECOND >> 1)) {
767 u_nsec -= NANOSECOND;
768 u_sec++;
769 }
770 v_nsec = u_nsec - pps_tf[0].tv_nsec;
771 if (u_sec == pps_tf[0].tv_sec && v_nsec < NANOSECOND - MAXFREQ)
772 goto out;
773 pps_tf[2] = pps_tf[1];
774 pps_tf[1] = pps_tf[0];
775 pps_tf[0].tv_sec = u_sec;
776 pps_tf[0].tv_nsec = u_nsec;
777
778 /*
779 * Update the frequency accumulator using the difference between the
780 * current and previous PPS event measured directly by the timecounter.
781 */
782 pps_fcount += delta_nsec - NANOSECOND;
783 if (v_nsec > MAXFREQ || v_nsec < -MAXFREQ)
784 goto out;
785 time_status &= ~STA_PPSJITTER;
786
787 /*
788 * A three-stage median filter is used to help denoise the PPS
789 * time. The median sample becomes the time offset estimate; the
790 * difference between the other two samples becomes the time
791 * dispersion (jitter) estimate.
792 */
793 if (pps_tf[0].tv_nsec > pps_tf[1].tv_nsec) {
794 if (pps_tf[1].tv_nsec > pps_tf[2].tv_nsec) {
795 v_nsec = pps_tf[1].tv_nsec; /* 0 1 2 */
796 u_nsec = pps_tf[0].tv_nsec - pps_tf[2].tv_nsec;
797 } else if (pps_tf[2].tv_nsec > pps_tf[0].tv_nsec) {
798 v_nsec = pps_tf[0].tv_nsec; /* 2 0 1 */
799 u_nsec = pps_tf[2].tv_nsec - pps_tf[1].tv_nsec;
800 } else {
801 v_nsec = pps_tf[2].tv_nsec; /* 0 2 1 */
802 u_nsec = pps_tf[0].tv_nsec - pps_tf[1].tv_nsec;
803 }
804 } else {
805 if (pps_tf[1].tv_nsec < pps_tf[2].tv_nsec) {
806 v_nsec = pps_tf[1].tv_nsec; /* 2 1 0 */
807 u_nsec = pps_tf[2].tv_nsec - pps_tf[0].tv_nsec;
808 } else if (pps_tf[2].tv_nsec < pps_tf[0].tv_nsec) {
809 v_nsec = pps_tf[0].tv_nsec; /* 1 0 2 */
810 u_nsec = pps_tf[1].tv_nsec - pps_tf[2].tv_nsec;
811 } else {
812 v_nsec = pps_tf[2].tv_nsec; /* 1 2 0 */
813 u_nsec = pps_tf[1].tv_nsec - pps_tf[0].tv_nsec;
814 }
815 }
816
817 /*
818 * Nominal jitter is due to PPS signal noise and interrupt
819 * latency. If it exceeds the popcorn threshold, the sample is
820 * discarded. otherwise, if so enabled, the time offset is
821 * updated. We can tolerate a modest loss of data here without
822 * much degrading time accuracy.
823 *
824 * The measurements being checked here were made with the system
825 * timecounter, so the popcorn threshold is not allowed to fall below
826 * the number of nanoseconds in two ticks of the timecounter. For a
827 * timecounter running faster than 1 GHz the lower bound is 2ns, just
828 * to avoid a nonsensical threshold of zero.
829 */
830 if (u_nsec > lmax(pps_jitter << PPS_POPCORN,
831 2 * (NANOSECOND / (long)qmin(NANOSECOND, tc_getfrequency())))) {
832 time_status |= STA_PPSJITTER;
833 pps_jitcnt++;
834 } else if (time_status & STA_PPSTIME) {
835 time_monitor = -v_nsec;
836 L_LINT(time_offset, time_monitor);
837 }
838 pps_jitter += (u_nsec - pps_jitter) >> PPS_FAVG;
839 u_sec = pps_tf[0].tv_sec - pps_lastsec;
840 if (u_sec < (1 << pps_shift))
841 goto out;
842
843 /*
844 * At the end of the calibration interval the difference between
845 * the first and last counter values becomes the scaled
846 * frequency. It will later be divided by the length of the
847 * interval to determine the frequency update. If the frequency
848 * exceeds a sanity threshold, or if the actual calibration
849 * interval is not equal to the expected length, the data are
850 * discarded. We can tolerate a modest loss of data here without
851 * much degrading frequency accuracy.
852 */
853 pps_calcnt++;
854 v_nsec = -pps_fcount;
855 pps_lastsec = pps_tf[0].tv_sec;
856 pps_fcount = 0;
857 u_nsec = MAXFREQ << pps_shift;
858 if (v_nsec > u_nsec || v_nsec < -u_nsec || u_sec != (1 << pps_shift)) {
859 time_status |= STA_PPSERROR;
860 pps_errcnt++;
861 goto out;
862 }
863
864 /*
865 * Here the raw frequency offset and wander (stability) is
866 * calculated. If the wander is less than the wander threshold
867 * for four consecutive averaging intervals, the interval is
868 * doubled; if it is greater than the threshold for four
869 * consecutive intervals, the interval is halved. The scaled
870 * frequency offset is converted to frequency offset. The
871 * stability metric is calculated as the average of recent
872 * frequency changes, but is used only for performance
873 * monitoring.
874 */
875 L_LINT(ftemp, v_nsec);
876 L_RSHIFT(ftemp, pps_shift);
877 L_SUB(ftemp, pps_freq);
878 u_nsec = L_GINT(ftemp);
879 if (u_nsec > PPS_MAXWANDER) {
880 L_LINT(ftemp, PPS_MAXWANDER);
881 pps_intcnt--;
882 time_status |= STA_PPSWANDER;
883 pps_stbcnt++;
884 } else if (u_nsec < -PPS_MAXWANDER) {
885 L_LINT(ftemp, -PPS_MAXWANDER);
886 pps_intcnt--;
887 time_status |= STA_PPSWANDER;
888 pps_stbcnt++;
889 } else {
890 pps_intcnt++;
891 }
892 if (pps_intcnt >= 4) {
893 pps_intcnt = 4;
894 if (pps_shift < pps_shiftmax) {
895 pps_shift++;
896 pps_intcnt = 0;
897 }
898 } else if (pps_intcnt <= -4 || pps_shift > pps_shiftmax) {
899 pps_intcnt = -4;
900 if (pps_shift > PPS_FAVG) {
901 pps_shift--;
902 pps_intcnt = 0;
903 }
904 }
905 if (u_nsec < 0)
906 u_nsec = -u_nsec;
907 pps_stabil += (u_nsec * SCALE_PPM - pps_stabil) >> PPS_FAVG;
908
909 /*
910 * The PPS frequency is recalculated and clamped to the maximum
911 * MAXFREQ. If enabled, the system clock frequency is updated as
912 * well.
913 */
914 L_ADD(pps_freq, ftemp);
915 u_nsec = L_GINT(pps_freq);
916 if (u_nsec > MAXFREQ)
917 L_LINT(pps_freq, MAXFREQ);
918 else if (u_nsec < -MAXFREQ)
919 L_LINT(pps_freq, -MAXFREQ);
920 if (time_status & STA_PPSFREQ)
921 time_freq = pps_freq;
922
923 out:
924 NTP_UNLOCK();
925 }
926 #endif /* PPS_SYNC */
927
928 #ifndef _SYS_SYSPROTO_H_
929 struct adjtime_args {
930 struct timeval *delta;
931 struct timeval *olddelta;
932 };
933 #endif
934 /* ARGSUSED */
935 int
sys_adjtime(struct thread * td,struct adjtime_args * uap)936 sys_adjtime(struct thread *td, struct adjtime_args *uap)
937 {
938 struct timeval delta, olddelta, *deltap;
939 int error;
940
941 if (uap->delta) {
942 error = copyin(uap->delta, &delta, sizeof(delta));
943 if (error)
944 return (error);
945 deltap = δ
946 } else
947 deltap = NULL;
948 error = kern_adjtime(td, deltap, &olddelta);
949 if (uap->olddelta && error == 0)
950 error = copyout(&olddelta, uap->olddelta, sizeof(olddelta));
951 return (error);
952 }
953
954 int
kern_adjtime(struct thread * td,struct timeval * delta,struct timeval * olddelta)955 kern_adjtime(struct thread *td, struct timeval *delta, struct timeval *olddelta)
956 {
957 struct timeval atv;
958 int64_t ltr, ltw;
959 int error;
960
961 if (delta != NULL) {
962 error = priv_check(td, PRIV_ADJTIME);
963 if (error != 0)
964 return (error);
965 ltw = (int64_t)delta->tv_sec * 1000000 + delta->tv_usec;
966 }
967 NTP_LOCK();
968 ltr = time_adjtime;
969 if (delta != NULL)
970 time_adjtime = ltw;
971 NTP_UNLOCK();
972 if (olddelta != NULL) {
973 atv.tv_sec = ltr / 1000000;
974 atv.tv_usec = ltr % 1000000;
975 if (atv.tv_usec < 0) {
976 atv.tv_usec += 1000000;
977 atv.tv_sec--;
978 }
979 *olddelta = atv;
980 }
981 return (0);
982 }
983
984 static struct callout resettodr_callout;
985 static int resettodr_period = 1800;
986
987 static void
periodic_resettodr(void * arg __unused)988 periodic_resettodr(void *arg __unused)
989 {
990
991 /*
992 * Read of time_status is lock-less, which is fine since
993 * ntp_is_time_error() operates on the consistent read value.
994 */
995 if (!ntp_is_time_error(time_status))
996 resettodr();
997 if (resettodr_period > 0)
998 callout_schedule(&resettodr_callout, resettodr_period * hz);
999 }
1000
1001 static void
shutdown_resettodr(void * arg __unused,int howto __unused)1002 shutdown_resettodr(void *arg __unused, int howto __unused)
1003 {
1004
1005 callout_drain(&resettodr_callout);
1006 /* Another unlocked read of time_status */
1007 if (resettodr_period > 0 && !ntp_is_time_error(time_status))
1008 resettodr();
1009 }
1010
1011 static int
sysctl_resettodr_period(SYSCTL_HANDLER_ARGS)1012 sysctl_resettodr_period(SYSCTL_HANDLER_ARGS)
1013 {
1014 int error;
1015
1016 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
1017 if (error || !req->newptr)
1018 return (error);
1019 if (cold)
1020 goto done;
1021 if (resettodr_period == 0)
1022 callout_stop(&resettodr_callout);
1023 else
1024 callout_reset(&resettodr_callout, resettodr_period * hz,
1025 periodic_resettodr, NULL);
1026 done:
1027 return (0);
1028 }
1029
1030 SYSCTL_PROC(_machdep, OID_AUTO, rtc_save_period, CTLTYPE_INT | CTLFLAG_RWTUN |
1031 CTLFLAG_MPSAFE, &resettodr_period, 1800, sysctl_resettodr_period, "I",
1032 "Save system time to RTC with this period (in seconds)");
1033
1034 static void
start_periodic_resettodr(void * arg __unused)1035 start_periodic_resettodr(void *arg __unused)
1036 {
1037
1038 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_resettodr, NULL,
1039 SHUTDOWN_PRI_FIRST);
1040 callout_init(&resettodr_callout, 1);
1041 if (resettodr_period == 0)
1042 return;
1043 callout_reset(&resettodr_callout, resettodr_period * hz,
1044 periodic_resettodr, NULL);
1045 }
1046
1047 SYSINIT(periodic_resettodr, SI_SUB_LAST, SI_ORDER_MIDDLE,
1048 start_periodic_resettodr, NULL);
1049