1 /*- 2 *********************************************************************** 3 * * 4 * Copyright (c) David L. Mills 1993-2001 * 5 * * 6 * Permission to use, copy, modify, and distribute this software and * 7 * its documentation for any purpose and without fee is hereby * 8 * granted, provided that the above copyright notice appears in all * 9 * copies and that both the copyright notice and this permission * 10 * notice appear in supporting documentation, and that the name * 11 * University of Delaware not be used in advertising or publicity * 12 * pertaining to distribution of the software without specific, * 13 * written prior permission. The University of Delaware makes no * 14 * representations about the suitability this software for any * 15 * purpose. It is provided "as is" without express or implied * 16 * warranty. * 17 * * 18 **********************************************************************/ 19 20 /* 21 * Adapted from the original sources for FreeBSD and timecounters by: 22 * Poul-Henning Kamp <phk@FreeBSD.org>. 23 * 24 * The 32bit version of the "LP" macros seems a bit past its "sell by" 25 * date so I have retained only the 64bit version and included it directly 26 * in this file. 27 * 28 * Only minor changes done to interface with the timecounters over in 29 * sys/kern/kern_clock.c. Some of the comments below may be (even more) 30 * confusing and/or plain wrong in that context. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "opt_ntp.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/sysproto.h> 41 #include <sys/eventhandler.h> 42 #include <sys/kernel.h> 43 #include <sys/priv.h> 44 #include <sys/proc.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/time.h> 48 #include <sys/timex.h> 49 #include <sys/timetc.h> 50 #include <sys/timepps.h> 51 #include <sys/syscallsubr.h> 52 #include <sys/sysctl.h> 53 54 #ifdef PPS_SYNC 55 FEATURE(pps_sync, "Support usage of external PPS signal by kernel PLL"); 56 #endif 57 58 /* 59 * Single-precision macros for 64-bit machines 60 */ 61 typedef int64_t l_fp; 62 #define L_ADD(v, u) ((v) += (u)) 63 #define L_SUB(v, u) ((v) -= (u)) 64 #define L_ADDHI(v, a) ((v) += (int64_t)(a) << 32) 65 #define L_NEG(v) ((v) = -(v)) 66 #define L_RSHIFT(v, n) \ 67 do { \ 68 if ((v) < 0) \ 69 (v) = -(-(v) >> (n)); \ 70 else \ 71 (v) = (v) >> (n); \ 72 } while (0) 73 #define L_MPY(v, a) ((v) *= (a)) 74 #define L_CLR(v) ((v) = 0) 75 #define L_ISNEG(v) ((v) < 0) 76 #define L_LINT(v, a) ((v) = (int64_t)(a) << 32) 77 #define L_GINT(v) ((v) < 0 ? -(-(v) >> 32) : (v) >> 32) 78 79 /* 80 * Generic NTP kernel interface 81 * 82 * These routines constitute the Network Time Protocol (NTP) interfaces 83 * for user and daemon application programs. The ntp_gettime() routine 84 * provides the time, maximum error (synch distance) and estimated error 85 * (dispersion) to client user application programs. The ntp_adjtime() 86 * routine is used by the NTP daemon to adjust the system clock to an 87 * externally derived time. The time offset and related variables set by 88 * this routine are used by other routines in this module to adjust the 89 * phase and frequency of the clock discipline loop which controls the 90 * system clock. 91 * 92 * When the kernel time is reckoned directly in nanoseconds (NTP_NANO 93 * defined), the time at each tick interrupt is derived directly from 94 * the kernel time variable. When the kernel time is reckoned in 95 * microseconds, (NTP_NANO undefined), the time is derived from the 96 * kernel time variable together with a variable representing the 97 * leftover nanoseconds at the last tick interrupt. In either case, the 98 * current nanosecond time is reckoned from these values plus an 99 * interpolated value derived by the clock routines in another 100 * architecture-specific module. The interpolation can use either a 101 * dedicated counter or a processor cycle counter (PCC) implemented in 102 * some architectures. 103 * 104 * Note that all routines must run at priority splclock or higher. 105 */ 106 /* 107 * Phase/frequency-lock loop (PLL/FLL) definitions 108 * 109 * The nanosecond clock discipline uses two variable types, time 110 * variables and frequency variables. Both types are represented as 64- 111 * bit fixed-point quantities with the decimal point between two 32-bit 112 * halves. On a 32-bit machine, each half is represented as a single 113 * word and mathematical operations are done using multiple-precision 114 * arithmetic. On a 64-bit machine, ordinary computer arithmetic is 115 * used. 116 * 117 * A time variable is a signed 64-bit fixed-point number in ns and 118 * fraction. It represents the remaining time offset to be amortized 119 * over succeeding tick interrupts. The maximum time offset is about 120 * 0.5 s and the resolution is about 2.3e-10 ns. 121 * 122 * 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 123 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 124 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 125 * |s s s| ns | 126 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 127 * | fraction | 128 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 129 * 130 * A frequency variable is a signed 64-bit fixed-point number in ns/s 131 * and fraction. It represents the ns and fraction to be added to the 132 * kernel time variable at each second. The maximum frequency offset is 133 * about +-500000 ns/s and the resolution is about 2.3e-10 ns/s. 134 * 135 * 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 136 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 137 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 138 * |s s s s s s s s s s s s s| ns/s | 139 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 140 * | fraction | 141 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 142 */ 143 /* 144 * The following variables establish the state of the PLL/FLL and the 145 * residual time and frequency offset of the local clock. 146 */ 147 #define SHIFT_PLL 4 /* PLL loop gain (shift) */ 148 #define SHIFT_FLL 2 /* FLL loop gain (shift) */ 149 150 static int time_state = TIME_OK; /* clock state */ 151 int time_status = STA_UNSYNC; /* clock status bits */ 152 static long time_tai; /* TAI offset (s) */ 153 static long time_monitor; /* last time offset scaled (ns) */ 154 static long time_constant; /* poll interval (shift) (s) */ 155 static long time_precision = 1; /* clock precision (ns) */ 156 static long time_maxerror = MAXPHASE / 1000; /* maximum error (us) */ 157 long time_esterror = MAXPHASE / 1000; /* estimated error (us) */ 158 static long time_reftime; /* uptime at last adjustment (s) */ 159 static l_fp time_offset; /* time offset (ns) */ 160 static l_fp time_freq; /* frequency offset (ns/s) */ 161 static l_fp time_adj; /* tick adjust (ns/s) */ 162 163 static int64_t time_adjtime; /* correction from adjtime(2) (usec) */ 164 165 static struct mtx ntp_lock; 166 MTX_SYSINIT(ntp, &ntp_lock, "ntp", MTX_SPIN); 167 168 #define NTP_LOCK() mtx_lock_spin(&ntp_lock) 169 #define NTP_UNLOCK() mtx_unlock_spin(&ntp_lock) 170 #define NTP_ASSERT_LOCKED() mtx_assert(&ntp_lock, MA_OWNED) 171 172 #ifdef PPS_SYNC 173 /* 174 * The following variables are used when a pulse-per-second (PPS) signal 175 * is available and connected via a modem control lead. They establish 176 * the engineering parameters of the clock discipline loop when 177 * controlled by the PPS signal. 178 */ 179 #define PPS_FAVG 2 /* min freq avg interval (s) (shift) */ 180 #define PPS_FAVGDEF 8 /* default freq avg int (s) (shift) */ 181 #define PPS_FAVGMAX 15 /* max freq avg interval (s) (shift) */ 182 #define PPS_PAVG 4 /* phase avg interval (s) (shift) */ 183 #define PPS_VALID 120 /* PPS signal watchdog max (s) */ 184 #define PPS_MAXWANDER 100000 /* max PPS wander (ns/s) */ 185 #define PPS_POPCORN 2 /* popcorn spike threshold (shift) */ 186 187 static struct timespec pps_tf[3]; /* phase median filter */ 188 static l_fp pps_freq; /* scaled frequency offset (ns/s) */ 189 static long pps_fcount; /* frequency accumulator */ 190 static long pps_jitter; /* nominal jitter (ns) */ 191 static long pps_stabil; /* nominal stability (scaled ns/s) */ 192 static long pps_lastsec; /* time at last calibration (s) */ 193 static int pps_valid; /* signal watchdog counter */ 194 static int pps_shift = PPS_FAVG; /* interval duration (s) (shift) */ 195 static int pps_shiftmax = PPS_FAVGDEF; /* max interval duration (s) (shift) */ 196 static int pps_intcnt; /* wander counter */ 197 198 /* 199 * PPS signal quality monitors 200 */ 201 static long pps_calcnt; /* calibration intervals */ 202 static long pps_jitcnt; /* jitter limit exceeded */ 203 static long pps_stbcnt; /* stability limit exceeded */ 204 static long pps_errcnt; /* calibration errors */ 205 #endif /* PPS_SYNC */ 206 /* 207 * End of phase/frequency-lock loop (PLL/FLL) definitions 208 */ 209 210 static void ntp_init(void); 211 static void hardupdate(long offset); 212 static void ntp_gettime1(struct ntptimeval *ntvp); 213 static bool ntp_is_time_error(int tsl); 214 215 static bool 216 ntp_is_time_error(int tsl) 217 { 218 219 /* 220 * Status word error decode. If any of these conditions occur, 221 * an error is returned, instead of the status word. Most 222 * applications will care only about the fact the system clock 223 * may not be trusted, not about the details. 224 * 225 * Hardware or software error 226 */ 227 if ((tsl & (STA_UNSYNC | STA_CLOCKERR)) || 228 229 /* 230 * PPS signal lost when either time or frequency synchronization 231 * requested 232 */ 233 (tsl & (STA_PPSFREQ | STA_PPSTIME) && 234 !(tsl & STA_PPSSIGNAL)) || 235 236 /* 237 * PPS jitter exceeded when time synchronization requested 238 */ 239 (tsl & STA_PPSTIME && tsl & STA_PPSJITTER) || 240 241 /* 242 * PPS wander exceeded or calibration error when frequency 243 * synchronization requested 244 */ 245 (tsl & STA_PPSFREQ && 246 tsl & (STA_PPSWANDER | STA_PPSERROR))) 247 return (true); 248 249 return (false); 250 } 251 252 static void 253 ntp_gettime1(struct ntptimeval *ntvp) 254 { 255 struct timespec atv; /* nanosecond time */ 256 257 NTP_ASSERT_LOCKED(); 258 259 nanotime(&atv); 260 ntvp->time.tv_sec = atv.tv_sec; 261 ntvp->time.tv_nsec = atv.tv_nsec; 262 ntvp->maxerror = time_maxerror; 263 ntvp->esterror = time_esterror; 264 ntvp->tai = time_tai; 265 ntvp->time_state = time_state; 266 267 if (ntp_is_time_error(time_status)) 268 ntvp->time_state = TIME_ERROR; 269 } 270 271 /* 272 * ntp_gettime() - NTP user application interface 273 * 274 * See the timex.h header file for synopsis and API description. Note that 275 * the TAI offset is returned in the ntvtimeval.tai structure member. 276 */ 277 #ifndef _SYS_SYSPROTO_H_ 278 struct ntp_gettime_args { 279 struct ntptimeval *ntvp; 280 }; 281 #endif 282 /* ARGSUSED */ 283 int 284 sys_ntp_gettime(struct thread *td, struct ntp_gettime_args *uap) 285 { 286 struct ntptimeval ntv; 287 288 memset(&ntv, 0, sizeof(ntv)); 289 290 NTP_LOCK(); 291 ntp_gettime1(&ntv); 292 NTP_UNLOCK(); 293 294 td->td_retval[0] = ntv.time_state; 295 return (copyout(&ntv, uap->ntvp, sizeof(ntv))); 296 } 297 298 static int 299 ntp_sysctl(SYSCTL_HANDLER_ARGS) 300 { 301 struct ntptimeval ntv; /* temporary structure */ 302 303 memset(&ntv, 0, sizeof(ntv)); 304 305 NTP_LOCK(); 306 ntp_gettime1(&ntv); 307 NTP_UNLOCK(); 308 309 return (sysctl_handle_opaque(oidp, &ntv, sizeof(ntv), req)); 310 } 311 312 SYSCTL_NODE(_kern, OID_AUTO, ntp_pll, CTLFLAG_RW, 0, ""); 313 SYSCTL_PROC(_kern_ntp_pll, OID_AUTO, gettime, CTLTYPE_OPAQUE | CTLFLAG_RD | 314 CTLFLAG_MPSAFE, 0, sizeof(struct ntptimeval) , ntp_sysctl, "S,ntptimeval", 315 ""); 316 317 #ifdef PPS_SYNC 318 SYSCTL_INT(_kern_ntp_pll, OID_AUTO, pps_shiftmax, CTLFLAG_RW, 319 &pps_shiftmax, 0, "Max interval duration (sec) (shift)"); 320 SYSCTL_INT(_kern_ntp_pll, OID_AUTO, pps_shift, CTLFLAG_RW, 321 &pps_shift, 0, "Interval duration (sec) (shift)"); 322 SYSCTL_LONG(_kern_ntp_pll, OID_AUTO, time_monitor, CTLFLAG_RD, 323 &time_monitor, 0, "Last time offset scaled (ns)"); 324 325 SYSCTL_S64(_kern_ntp_pll, OID_AUTO, pps_freq, CTLFLAG_RD | CTLFLAG_MPSAFE, 326 &pps_freq, 0, 327 "Scaled frequency offset (ns/sec)"); 328 SYSCTL_S64(_kern_ntp_pll, OID_AUTO, time_freq, CTLFLAG_RD | CTLFLAG_MPSAFE, 329 &time_freq, 0, 330 "Frequency offset (ns/sec)"); 331 #endif 332 333 /* 334 * ntp_adjtime() - NTP daemon application interface 335 * 336 * See the timex.h header file for synopsis and API description. Note that 337 * the timex.constant structure member has a dual purpose to set the time 338 * constant and to set the TAI offset. 339 */ 340 #ifndef _SYS_SYSPROTO_H_ 341 struct ntp_adjtime_args { 342 struct timex *tp; 343 }; 344 #endif 345 346 int 347 sys_ntp_adjtime(struct thread *td, struct ntp_adjtime_args *uap) 348 { 349 struct timex ntv; /* temporary structure */ 350 long freq; /* frequency ns/s) */ 351 int modes; /* mode bits from structure */ 352 int error, retval; 353 354 error = copyin((caddr_t)uap->tp, (caddr_t)&ntv, sizeof(ntv)); 355 if (error) 356 return (error); 357 358 /* 359 * Update selected clock variables - only the superuser can 360 * change anything. Note that there is no error checking here on 361 * the assumption the superuser should know what it is doing. 362 * Note that either the time constant or TAI offset are loaded 363 * from the ntv.constant member, depending on the mode bits. If 364 * the STA_PLL bit in the status word is cleared, the state and 365 * status words are reset to the initial values at boot. 366 */ 367 modes = ntv.modes; 368 if (modes) 369 error = priv_check(td, PRIV_NTP_ADJTIME); 370 if (error != 0) 371 return (error); 372 NTP_LOCK(); 373 if (modes & MOD_MAXERROR) 374 time_maxerror = ntv.maxerror; 375 if (modes & MOD_ESTERROR) 376 time_esterror = ntv.esterror; 377 if (modes & MOD_STATUS) { 378 if (time_status & STA_PLL && !(ntv.status & STA_PLL)) { 379 time_state = TIME_OK; 380 time_status = STA_UNSYNC; 381 #ifdef PPS_SYNC 382 pps_shift = PPS_FAVG; 383 #endif /* PPS_SYNC */ 384 } 385 time_status &= STA_RONLY; 386 time_status |= ntv.status & ~STA_RONLY; 387 } 388 if (modes & MOD_TIMECONST) { 389 if (ntv.constant < 0) 390 time_constant = 0; 391 else if (ntv.constant > MAXTC) 392 time_constant = MAXTC; 393 else 394 time_constant = ntv.constant; 395 } 396 if (modes & MOD_TAI) { 397 if (ntv.constant > 0) /* XXX zero & negative numbers ? */ 398 time_tai = ntv.constant; 399 } 400 #ifdef PPS_SYNC 401 if (modes & MOD_PPSMAX) { 402 if (ntv.shift < PPS_FAVG) 403 pps_shiftmax = PPS_FAVG; 404 else if (ntv.shift > PPS_FAVGMAX) 405 pps_shiftmax = PPS_FAVGMAX; 406 else 407 pps_shiftmax = ntv.shift; 408 } 409 #endif /* PPS_SYNC */ 410 if (modes & MOD_NANO) 411 time_status |= STA_NANO; 412 if (modes & MOD_MICRO) 413 time_status &= ~STA_NANO; 414 if (modes & MOD_CLKB) 415 time_status |= STA_CLK; 416 if (modes & MOD_CLKA) 417 time_status &= ~STA_CLK; 418 if (modes & MOD_FREQUENCY) { 419 freq = (ntv.freq * 1000LL) >> 16; 420 if (freq > MAXFREQ) 421 L_LINT(time_freq, MAXFREQ); 422 else if (freq < -MAXFREQ) 423 L_LINT(time_freq, -MAXFREQ); 424 else { 425 /* 426 * ntv.freq is [PPM * 2^16] = [us/s * 2^16] 427 * time_freq is [ns/s * 2^32] 428 */ 429 time_freq = ntv.freq * 1000LL * 65536LL; 430 } 431 #ifdef PPS_SYNC 432 pps_freq = time_freq; 433 #endif /* PPS_SYNC */ 434 } 435 if (modes & MOD_OFFSET) { 436 if (time_status & STA_NANO) 437 hardupdate(ntv.offset); 438 else 439 hardupdate(ntv.offset * 1000); 440 } 441 442 /* 443 * Retrieve all clock variables. Note that the TAI offset is 444 * returned only by ntp_gettime(); 445 */ 446 if (time_status & STA_NANO) 447 ntv.offset = L_GINT(time_offset); 448 else 449 ntv.offset = L_GINT(time_offset) / 1000; /* XXX rounding ? */ 450 ntv.freq = L_GINT((time_freq / 1000LL) << 16); 451 ntv.maxerror = time_maxerror; 452 ntv.esterror = time_esterror; 453 ntv.status = time_status; 454 ntv.constant = time_constant; 455 if (time_status & STA_NANO) 456 ntv.precision = time_precision; 457 else 458 ntv.precision = time_precision / 1000; 459 ntv.tolerance = MAXFREQ * SCALE_PPM; 460 #ifdef PPS_SYNC 461 ntv.shift = pps_shift; 462 ntv.ppsfreq = L_GINT((pps_freq / 1000LL) << 16); 463 if (time_status & STA_NANO) 464 ntv.jitter = pps_jitter; 465 else 466 ntv.jitter = pps_jitter / 1000; 467 ntv.stabil = pps_stabil; 468 ntv.calcnt = pps_calcnt; 469 ntv.errcnt = pps_errcnt; 470 ntv.jitcnt = pps_jitcnt; 471 ntv.stbcnt = pps_stbcnt; 472 #endif /* PPS_SYNC */ 473 retval = ntp_is_time_error(time_status) ? TIME_ERROR : time_state; 474 NTP_UNLOCK(); 475 476 error = copyout((caddr_t)&ntv, (caddr_t)uap->tp, sizeof(ntv)); 477 if (error == 0) 478 td->td_retval[0] = retval; 479 return (error); 480 } 481 482 /* 483 * second_overflow() - called after ntp_tick_adjust() 484 * 485 * This routine is ordinarily called immediately following the above 486 * routine ntp_tick_adjust(). While these two routines are normally 487 * combined, they are separated here only for the purposes of 488 * simulation. 489 */ 490 void 491 ntp_update_second(int64_t *adjustment, time_t *newsec) 492 { 493 int tickrate; 494 l_fp ftemp; /* 32/64-bit temporary */ 495 496 NTP_LOCK(); 497 498 /* 499 * On rollover of the second both the nanosecond and microsecond 500 * clocks are updated and the state machine cranked as 501 * necessary. The phase adjustment to be used for the next 502 * second is calculated and the maximum error is increased by 503 * the tolerance. 504 */ 505 time_maxerror += MAXFREQ / 1000; 506 507 /* 508 * Leap second processing. If in leap-insert state at 509 * the end of the day, the system clock is set back one 510 * second; if in leap-delete state, the system clock is 511 * set ahead one second. The nano_time() routine or 512 * external clock driver will insure that reported time 513 * is always monotonic. 514 */ 515 switch (time_state) { 516 517 /* 518 * No warning. 519 */ 520 case TIME_OK: 521 if (time_status & STA_INS) 522 time_state = TIME_INS; 523 else if (time_status & STA_DEL) 524 time_state = TIME_DEL; 525 break; 526 527 /* 528 * Insert second 23:59:60 following second 529 * 23:59:59. 530 */ 531 case TIME_INS: 532 if (!(time_status & STA_INS)) 533 time_state = TIME_OK; 534 else if ((*newsec) % 86400 == 0) { 535 (*newsec)--; 536 time_state = TIME_OOP; 537 time_tai++; 538 } 539 break; 540 541 /* 542 * Delete second 23:59:59. 543 */ 544 case TIME_DEL: 545 if (!(time_status & STA_DEL)) 546 time_state = TIME_OK; 547 else if (((*newsec) + 1) % 86400 == 0) { 548 (*newsec)++; 549 time_tai--; 550 time_state = TIME_WAIT; 551 } 552 break; 553 554 /* 555 * Insert second in progress. 556 */ 557 case TIME_OOP: 558 time_state = TIME_WAIT; 559 break; 560 561 /* 562 * Wait for status bits to clear. 563 */ 564 case TIME_WAIT: 565 if (!(time_status & (STA_INS | STA_DEL))) 566 time_state = TIME_OK; 567 } 568 569 /* 570 * Compute the total time adjustment for the next second 571 * in ns. The offset is reduced by a factor depending on 572 * whether the PPS signal is operating. Note that the 573 * value is in effect scaled by the clock frequency, 574 * since the adjustment is added at each tick interrupt. 575 */ 576 ftemp = time_offset; 577 #ifdef PPS_SYNC 578 /* XXX even if PPS signal dies we should finish adjustment ? */ 579 if (time_status & STA_PPSTIME && time_status & 580 STA_PPSSIGNAL) 581 L_RSHIFT(ftemp, pps_shift); 582 else 583 L_RSHIFT(ftemp, SHIFT_PLL + time_constant); 584 #else 585 L_RSHIFT(ftemp, SHIFT_PLL + time_constant); 586 #endif /* PPS_SYNC */ 587 time_adj = ftemp; 588 L_SUB(time_offset, ftemp); 589 L_ADD(time_adj, time_freq); 590 591 /* 592 * Apply any correction from adjtime(2). If more than one second 593 * off we slew at a rate of 5ms/s (5000 PPM) else 500us/s (500PPM) 594 * until the last second is slewed the final < 500 usecs. 595 */ 596 if (time_adjtime != 0) { 597 if (time_adjtime > 1000000) 598 tickrate = 5000; 599 else if (time_adjtime < -1000000) 600 tickrate = -5000; 601 else if (time_adjtime > 500) 602 tickrate = 500; 603 else if (time_adjtime < -500) 604 tickrate = -500; 605 else 606 tickrate = time_adjtime; 607 time_adjtime -= tickrate; 608 L_LINT(ftemp, tickrate * 1000); 609 L_ADD(time_adj, ftemp); 610 } 611 *adjustment = time_adj; 612 613 #ifdef PPS_SYNC 614 if (pps_valid > 0) 615 pps_valid--; 616 else 617 time_status &= ~STA_PPSSIGNAL; 618 #endif /* PPS_SYNC */ 619 620 NTP_UNLOCK(); 621 } 622 623 /* 624 * ntp_init() - initialize variables and structures 625 * 626 * This routine must be called after the kernel variables hz and tick 627 * are set or changed and before the next tick interrupt. In this 628 * particular implementation, these values are assumed set elsewhere in 629 * the kernel. The design allows the clock frequency and tick interval 630 * to be changed while the system is running. So, this routine should 631 * probably be integrated with the code that does that. 632 */ 633 static void 634 ntp_init(void) 635 { 636 637 /* 638 * The following variables are initialized only at startup. Only 639 * those structures not cleared by the compiler need to be 640 * initialized, and these only in the simulator. In the actual 641 * kernel, any nonzero values here will quickly evaporate. 642 */ 643 L_CLR(time_offset); 644 L_CLR(time_freq); 645 #ifdef PPS_SYNC 646 pps_tf[0].tv_sec = pps_tf[0].tv_nsec = 0; 647 pps_tf[1].tv_sec = pps_tf[1].tv_nsec = 0; 648 pps_tf[2].tv_sec = pps_tf[2].tv_nsec = 0; 649 pps_fcount = 0; 650 L_CLR(pps_freq); 651 #endif /* PPS_SYNC */ 652 } 653 654 SYSINIT(ntpclocks, SI_SUB_CLOCKS, SI_ORDER_MIDDLE, ntp_init, NULL); 655 656 /* 657 * hardupdate() - local clock update 658 * 659 * This routine is called by ntp_adjtime() to update the local clock 660 * phase and frequency. The implementation is of an adaptive-parameter, 661 * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new 662 * time and frequency offset estimates for each call. If the kernel PPS 663 * discipline code is configured (PPS_SYNC), the PPS signal itself 664 * determines the new time offset, instead of the calling argument. 665 * Presumably, calls to ntp_adjtime() occur only when the caller 666 * believes the local clock is valid within some bound (+-128 ms with 667 * NTP). If the caller's time is far different than the PPS time, an 668 * argument will ensue, and it's not clear who will lose. 669 * 670 * For uncompensated quartz crystal oscillators and nominal update 671 * intervals less than 256 s, operation should be in phase-lock mode, 672 * where the loop is disciplined to phase. For update intervals greater 673 * than 1024 s, operation should be in frequency-lock mode, where the 674 * loop is disciplined to frequency. Between 256 s and 1024 s, the mode 675 * is selected by the STA_MODE status bit. 676 */ 677 static void 678 hardupdate(offset) 679 long offset; /* clock offset (ns) */ 680 { 681 long mtemp; 682 l_fp ftemp; 683 684 NTP_ASSERT_LOCKED(); 685 686 /* 687 * Select how the phase is to be controlled and from which 688 * source. If the PPS signal is present and enabled to 689 * discipline the time, the PPS offset is used; otherwise, the 690 * argument offset is used. 691 */ 692 if (!(time_status & STA_PLL)) 693 return; 694 if (!(time_status & STA_PPSTIME && time_status & 695 STA_PPSSIGNAL)) { 696 if (offset > MAXPHASE) 697 time_monitor = MAXPHASE; 698 else if (offset < -MAXPHASE) 699 time_monitor = -MAXPHASE; 700 else 701 time_monitor = offset; 702 L_LINT(time_offset, time_monitor); 703 } 704 705 /* 706 * Select how the frequency is to be controlled and in which 707 * mode (PLL or FLL). If the PPS signal is present and enabled 708 * to discipline the frequency, the PPS frequency is used; 709 * otherwise, the argument offset is used to compute it. 710 */ 711 if (time_status & STA_PPSFREQ && time_status & STA_PPSSIGNAL) { 712 time_reftime = time_uptime; 713 return; 714 } 715 if (time_status & STA_FREQHOLD || time_reftime == 0) 716 time_reftime = time_uptime; 717 mtemp = time_uptime - time_reftime; 718 L_LINT(ftemp, time_monitor); 719 L_RSHIFT(ftemp, (SHIFT_PLL + 2 + time_constant) << 1); 720 L_MPY(ftemp, mtemp); 721 L_ADD(time_freq, ftemp); 722 time_status &= ~STA_MODE; 723 if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > 724 MAXSEC)) { 725 L_LINT(ftemp, (time_monitor << 4) / mtemp); 726 L_RSHIFT(ftemp, SHIFT_FLL + 4); 727 L_ADD(time_freq, ftemp); 728 time_status |= STA_MODE; 729 } 730 time_reftime = time_uptime; 731 if (L_GINT(time_freq) > MAXFREQ) 732 L_LINT(time_freq, MAXFREQ); 733 else if (L_GINT(time_freq) < -MAXFREQ) 734 L_LINT(time_freq, -MAXFREQ); 735 } 736 737 #ifdef PPS_SYNC 738 /* 739 * hardpps() - discipline CPU clock oscillator to external PPS signal 740 * 741 * This routine is called at each PPS interrupt in order to discipline 742 * the CPU clock oscillator to the PPS signal. There are two independent 743 * first-order feedback loops, one for the phase, the other for the 744 * frequency. The phase loop measures and grooms the PPS phase offset 745 * and leaves it in a handy spot for the seconds overflow routine. The 746 * frequency loop averages successive PPS phase differences and 747 * calculates the PPS frequency offset, which is also processed by the 748 * seconds overflow routine. The code requires the caller to capture the 749 * time and architecture-dependent hardware counter values in 750 * nanoseconds at the on-time PPS signal transition. 751 * 752 * Note that, on some Unix systems this routine runs at an interrupt 753 * priority level higher than the timer interrupt routine hardclock(). 754 * Therefore, the variables used are distinct from the hardclock() 755 * variables, except for the actual time and frequency variables, which 756 * are determined by this routine and updated atomically. 757 * 758 * tsp - time at PPS 759 * nsec - hardware counter at PPS 760 */ 761 void 762 hardpps(struct timespec *tsp, long nsec) 763 { 764 long u_sec, u_nsec, v_nsec; /* temps */ 765 l_fp ftemp; 766 767 NTP_LOCK(); 768 769 /* 770 * The signal is first processed by a range gate and frequency 771 * discriminator. The range gate rejects noise spikes outside 772 * the range +-500 us. The frequency discriminator rejects input 773 * signals with apparent frequency outside the range 1 +-500 774 * PPM. If two hits occur in the same second, we ignore the 775 * later hit; if not and a hit occurs outside the range gate, 776 * keep the later hit for later comparison, but do not process 777 * it. 778 */ 779 time_status |= STA_PPSSIGNAL | STA_PPSJITTER; 780 time_status &= ~(STA_PPSWANDER | STA_PPSERROR); 781 pps_valid = PPS_VALID; 782 u_sec = tsp->tv_sec; 783 u_nsec = tsp->tv_nsec; 784 if (u_nsec >= (NANOSECOND >> 1)) { 785 u_nsec -= NANOSECOND; 786 u_sec++; 787 } 788 v_nsec = u_nsec - pps_tf[0].tv_nsec; 789 if (u_sec == pps_tf[0].tv_sec && v_nsec < NANOSECOND - MAXFREQ) 790 goto out; 791 pps_tf[2] = pps_tf[1]; 792 pps_tf[1] = pps_tf[0]; 793 pps_tf[0].tv_sec = u_sec; 794 pps_tf[0].tv_nsec = u_nsec; 795 796 /* 797 * Compute the difference between the current and previous 798 * counter values. If the difference exceeds 0.5 s, assume it 799 * has wrapped around, so correct 1.0 s. If the result exceeds 800 * the tick interval, the sample point has crossed a tick 801 * boundary during the last second, so correct the tick. Very 802 * intricate. 803 */ 804 u_nsec = nsec; 805 if (u_nsec > (NANOSECOND >> 1)) 806 u_nsec -= NANOSECOND; 807 else if (u_nsec < -(NANOSECOND >> 1)) 808 u_nsec += NANOSECOND; 809 pps_fcount += u_nsec; 810 if (v_nsec > MAXFREQ || v_nsec < -MAXFREQ) 811 goto out; 812 time_status &= ~STA_PPSJITTER; 813 814 /* 815 * A three-stage median filter is used to help denoise the PPS 816 * time. The median sample becomes the time offset estimate; the 817 * difference between the other two samples becomes the time 818 * dispersion (jitter) estimate. 819 */ 820 if (pps_tf[0].tv_nsec > pps_tf[1].tv_nsec) { 821 if (pps_tf[1].tv_nsec > pps_tf[2].tv_nsec) { 822 v_nsec = pps_tf[1].tv_nsec; /* 0 1 2 */ 823 u_nsec = pps_tf[0].tv_nsec - pps_tf[2].tv_nsec; 824 } else if (pps_tf[2].tv_nsec > pps_tf[0].tv_nsec) { 825 v_nsec = pps_tf[0].tv_nsec; /* 2 0 1 */ 826 u_nsec = pps_tf[2].tv_nsec - pps_tf[1].tv_nsec; 827 } else { 828 v_nsec = pps_tf[2].tv_nsec; /* 0 2 1 */ 829 u_nsec = pps_tf[0].tv_nsec - pps_tf[1].tv_nsec; 830 } 831 } else { 832 if (pps_tf[1].tv_nsec < pps_tf[2].tv_nsec) { 833 v_nsec = pps_tf[1].tv_nsec; /* 2 1 0 */ 834 u_nsec = pps_tf[2].tv_nsec - pps_tf[0].tv_nsec; 835 } else if (pps_tf[2].tv_nsec < pps_tf[0].tv_nsec) { 836 v_nsec = pps_tf[0].tv_nsec; /* 1 0 2 */ 837 u_nsec = pps_tf[1].tv_nsec - pps_tf[2].tv_nsec; 838 } else { 839 v_nsec = pps_tf[2].tv_nsec; /* 1 2 0 */ 840 u_nsec = pps_tf[1].tv_nsec - pps_tf[0].tv_nsec; 841 } 842 } 843 844 /* 845 * Nominal jitter is due to PPS signal noise and interrupt 846 * latency. If it exceeds the popcorn threshold, the sample is 847 * discarded. otherwise, if so enabled, the time offset is 848 * updated. We can tolerate a modest loss of data here without 849 * much degrading time accuracy. 850 * 851 * The measurements being checked here were made with the system 852 * timecounter, so the popcorn threshold is not allowed to fall below 853 * the number of nanoseconds in two ticks of the timecounter. For a 854 * timecounter running faster than 1 GHz the lower bound is 2ns, just 855 * to avoid a nonsensical threshold of zero. 856 */ 857 if (u_nsec > lmax(pps_jitter << PPS_POPCORN, 858 2 * (NANOSECOND / (long)qmin(NANOSECOND, tc_getfrequency())))) { 859 time_status |= STA_PPSJITTER; 860 pps_jitcnt++; 861 } else if (time_status & STA_PPSTIME) { 862 time_monitor = -v_nsec; 863 L_LINT(time_offset, time_monitor); 864 } 865 pps_jitter += (u_nsec - pps_jitter) >> PPS_FAVG; 866 u_sec = pps_tf[0].tv_sec - pps_lastsec; 867 if (u_sec < (1 << pps_shift)) 868 goto out; 869 870 /* 871 * At the end of the calibration interval the difference between 872 * the first and last counter values becomes the scaled 873 * frequency. It will later be divided by the length of the 874 * interval to determine the frequency update. If the frequency 875 * exceeds a sanity threshold, or if the actual calibration 876 * interval is not equal to the expected length, the data are 877 * discarded. We can tolerate a modest loss of data here without 878 * much degrading frequency accuracy. 879 */ 880 pps_calcnt++; 881 v_nsec = -pps_fcount; 882 pps_lastsec = pps_tf[0].tv_sec; 883 pps_fcount = 0; 884 u_nsec = MAXFREQ << pps_shift; 885 if (v_nsec > u_nsec || v_nsec < -u_nsec || u_sec != (1 << pps_shift)) { 886 time_status |= STA_PPSERROR; 887 pps_errcnt++; 888 goto out; 889 } 890 891 /* 892 * Here the raw frequency offset and wander (stability) is 893 * calculated. If the wander is less than the wander threshold 894 * for four consecutive averaging intervals, the interval is 895 * doubled; if it is greater than the threshold for four 896 * consecutive intervals, the interval is halved. The scaled 897 * frequency offset is converted to frequency offset. The 898 * stability metric is calculated as the average of recent 899 * frequency changes, but is used only for performance 900 * monitoring. 901 */ 902 L_LINT(ftemp, v_nsec); 903 L_RSHIFT(ftemp, pps_shift); 904 L_SUB(ftemp, pps_freq); 905 u_nsec = L_GINT(ftemp); 906 if (u_nsec > PPS_MAXWANDER) { 907 L_LINT(ftemp, PPS_MAXWANDER); 908 pps_intcnt--; 909 time_status |= STA_PPSWANDER; 910 pps_stbcnt++; 911 } else if (u_nsec < -PPS_MAXWANDER) { 912 L_LINT(ftemp, -PPS_MAXWANDER); 913 pps_intcnt--; 914 time_status |= STA_PPSWANDER; 915 pps_stbcnt++; 916 } else { 917 pps_intcnt++; 918 } 919 if (pps_intcnt >= 4) { 920 pps_intcnt = 4; 921 if (pps_shift < pps_shiftmax) { 922 pps_shift++; 923 pps_intcnt = 0; 924 } 925 } else if (pps_intcnt <= -4 || pps_shift > pps_shiftmax) { 926 pps_intcnt = -4; 927 if (pps_shift > PPS_FAVG) { 928 pps_shift--; 929 pps_intcnt = 0; 930 } 931 } 932 if (u_nsec < 0) 933 u_nsec = -u_nsec; 934 pps_stabil += (u_nsec * SCALE_PPM - pps_stabil) >> PPS_FAVG; 935 936 /* 937 * The PPS frequency is recalculated and clamped to the maximum 938 * MAXFREQ. If enabled, the system clock frequency is updated as 939 * well. 940 */ 941 L_ADD(pps_freq, ftemp); 942 u_nsec = L_GINT(pps_freq); 943 if (u_nsec > MAXFREQ) 944 L_LINT(pps_freq, MAXFREQ); 945 else if (u_nsec < -MAXFREQ) 946 L_LINT(pps_freq, -MAXFREQ); 947 if (time_status & STA_PPSFREQ) 948 time_freq = pps_freq; 949 950 out: 951 NTP_UNLOCK(); 952 } 953 #endif /* PPS_SYNC */ 954 955 #ifndef _SYS_SYSPROTO_H_ 956 struct adjtime_args { 957 struct timeval *delta; 958 struct timeval *olddelta; 959 }; 960 #endif 961 /* ARGSUSED */ 962 int 963 sys_adjtime(struct thread *td, struct adjtime_args *uap) 964 { 965 struct timeval delta, olddelta, *deltap; 966 int error; 967 968 if (uap->delta) { 969 error = copyin(uap->delta, &delta, sizeof(delta)); 970 if (error) 971 return (error); 972 deltap = δ 973 } else 974 deltap = NULL; 975 error = kern_adjtime(td, deltap, &olddelta); 976 if (uap->olddelta && error == 0) 977 error = copyout(&olddelta, uap->olddelta, sizeof(olddelta)); 978 return (error); 979 } 980 981 int 982 kern_adjtime(struct thread *td, struct timeval *delta, struct timeval *olddelta) 983 { 984 struct timeval atv; 985 int64_t ltr, ltw; 986 int error; 987 988 if (delta != NULL) { 989 error = priv_check(td, PRIV_ADJTIME); 990 if (error != 0) 991 return (error); 992 ltw = (int64_t)delta->tv_sec * 1000000 + delta->tv_usec; 993 } 994 NTP_LOCK(); 995 ltr = time_adjtime; 996 if (delta != NULL) 997 time_adjtime = ltw; 998 NTP_UNLOCK(); 999 if (olddelta != NULL) { 1000 atv.tv_sec = ltr / 1000000; 1001 atv.tv_usec = ltr % 1000000; 1002 if (atv.tv_usec < 0) { 1003 atv.tv_usec += 1000000; 1004 atv.tv_sec--; 1005 } 1006 *olddelta = atv; 1007 } 1008 return (0); 1009 } 1010 1011 static struct callout resettodr_callout; 1012 static int resettodr_period = 1800; 1013 1014 static void 1015 periodic_resettodr(void *arg __unused) 1016 { 1017 1018 /* 1019 * Read of time_status is lock-less, which is fine since 1020 * ntp_is_time_error() operates on the consistent read value. 1021 */ 1022 if (!ntp_is_time_error(time_status)) 1023 resettodr(); 1024 if (resettodr_period > 0) 1025 callout_schedule(&resettodr_callout, resettodr_period * hz); 1026 } 1027 1028 static void 1029 shutdown_resettodr(void *arg __unused, int howto __unused) 1030 { 1031 1032 callout_drain(&resettodr_callout); 1033 /* Another unlocked read of time_status */ 1034 if (resettodr_period > 0 && !ntp_is_time_error(time_status)) 1035 resettodr(); 1036 } 1037 1038 static int 1039 sysctl_resettodr_period(SYSCTL_HANDLER_ARGS) 1040 { 1041 int error; 1042 1043 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req); 1044 if (error || !req->newptr) 1045 return (error); 1046 if (cold) 1047 goto done; 1048 if (resettodr_period == 0) 1049 callout_stop(&resettodr_callout); 1050 else 1051 callout_reset(&resettodr_callout, resettodr_period * hz, 1052 periodic_resettodr, NULL); 1053 done: 1054 return (0); 1055 } 1056 1057 SYSCTL_PROC(_machdep, OID_AUTO, rtc_save_period, CTLTYPE_INT | CTLFLAG_RWTUN | 1058 CTLFLAG_MPSAFE, &resettodr_period, 1800, sysctl_resettodr_period, "I", 1059 "Save system time to RTC with this period (in seconds)"); 1060 1061 static void 1062 start_periodic_resettodr(void *arg __unused) 1063 { 1064 1065 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_resettodr, NULL, 1066 SHUTDOWN_PRI_FIRST); 1067 callout_init(&resettodr_callout, 1); 1068 if (resettodr_period == 0) 1069 return; 1070 callout_reset(&resettodr_callout, resettodr_period * hz, 1071 periodic_resettodr, NULL); 1072 } 1073 1074 SYSINIT(periodic_resettodr, SI_SUB_LAST, SI_ORDER_MIDDLE, 1075 start_periodic_resettodr, NULL); 1076