1 /*- 2 *********************************************************************** 3 * * 4 * Copyright (c) David L. Mills 1993-2001 * 5 * * 6 * Permission to use, copy, modify, and distribute this software and * 7 * its documentation for any purpose and without fee is hereby * 8 * granted, provided that the above copyright notice appears in all * 9 * copies and that both the copyright notice and this permission * 10 * notice appear in supporting documentation, and that the name * 11 * University of Delaware not be used in advertising or publicity * 12 * pertaining to distribution of the software without specific, * 13 * written prior permission. The University of Delaware makes no * 14 * representations about the suitability this software for any * 15 * purpose. It is provided "as is" without express or implied * 16 * warranty. * 17 * * 18 **********************************************************************/ 19 20 /* 21 * Adapted from the original sources for FreeBSD and timecounters by: 22 * Poul-Henning Kamp <phk@FreeBSD.org>. 23 * 24 * The 32bit version of the "LP" macros seems a bit past its "sell by" 25 * date so I have retained only the 64bit version and included it directly 26 * in this file. 27 * 28 * Only minor changes done to interface with the timecounters over in 29 * sys/kern/kern_clock.c. Some of the comments below may be (even more) 30 * confusing and/or plain wrong in that context. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "opt_ntp.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/sysproto.h> 41 #include <sys/eventhandler.h> 42 #include <sys/kernel.h> 43 #include <sys/priv.h> 44 #include <sys/proc.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/time.h> 48 #include <sys/timex.h> 49 #include <sys/timetc.h> 50 #include <sys/timepps.h> 51 #include <sys/syscallsubr.h> 52 #include <sys/sysctl.h> 53 54 #ifdef PPS_SYNC 55 FEATURE(pps_sync, "Support usage of external PPS signal by kernel PLL"); 56 #endif 57 58 /* 59 * Single-precision macros for 64-bit machines 60 */ 61 typedef int64_t l_fp; 62 #define L_ADD(v, u) ((v) += (u)) 63 #define L_SUB(v, u) ((v) -= (u)) 64 #define L_ADDHI(v, a) ((v) += (int64_t)(a) << 32) 65 #define L_NEG(v) ((v) = -(v)) 66 #define L_RSHIFT(v, n) \ 67 do { \ 68 if ((v) < 0) \ 69 (v) = -(-(v) >> (n)); \ 70 else \ 71 (v) = (v) >> (n); \ 72 } while (0) 73 #define L_MPY(v, a) ((v) *= (a)) 74 #define L_CLR(v) ((v) = 0) 75 #define L_ISNEG(v) ((v) < 0) 76 #define L_LINT(v, a) ((v) = (int64_t)(a) << 32) 77 #define L_GINT(v) ((v) < 0 ? -(-(v) >> 32) : (v) >> 32) 78 79 /* 80 * Generic NTP kernel interface 81 * 82 * These routines constitute the Network Time Protocol (NTP) interfaces 83 * for user and daemon application programs. The ntp_gettime() routine 84 * provides the time, maximum error (synch distance) and estimated error 85 * (dispersion) to client user application programs. The ntp_adjtime() 86 * routine is used by the NTP daemon to adjust the system clock to an 87 * externally derived time. The time offset and related variables set by 88 * this routine are used by other routines in this module to adjust the 89 * phase and frequency of the clock discipline loop which controls the 90 * system clock. 91 * 92 * When the kernel time is reckoned directly in nanoseconds (NTP_NANO 93 * defined), the time at each tick interrupt is derived directly from 94 * the kernel time variable. When the kernel time is reckoned in 95 * microseconds, (NTP_NANO undefined), the time is derived from the 96 * kernel time variable together with a variable representing the 97 * leftover nanoseconds at the last tick interrupt. In either case, the 98 * current nanosecond time is reckoned from these values plus an 99 * interpolated value derived by the clock routines in another 100 * architecture-specific module. The interpolation can use either a 101 * dedicated counter or a processor cycle counter (PCC) implemented in 102 * some architectures. 103 * 104 * Note that all routines must run at priority splclock or higher. 105 */ 106 /* 107 * Phase/frequency-lock loop (PLL/FLL) definitions 108 * 109 * The nanosecond clock discipline uses two variable types, time 110 * variables and frequency variables. Both types are represented as 64- 111 * bit fixed-point quantities with the decimal point between two 32-bit 112 * halves. On a 32-bit machine, each half is represented as a single 113 * word and mathematical operations are done using multiple-precision 114 * arithmetic. On a 64-bit machine, ordinary computer arithmetic is 115 * used. 116 * 117 * A time variable is a signed 64-bit fixed-point number in ns and 118 * fraction. It represents the remaining time offset to be amortized 119 * over succeeding tick interrupts. The maximum time offset is about 120 * 0.5 s and the resolution is about 2.3e-10 ns. 121 * 122 * 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 123 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 124 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 125 * |s s s| ns | 126 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 127 * | fraction | 128 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 129 * 130 * A frequency variable is a signed 64-bit fixed-point number in ns/s 131 * and fraction. It represents the ns and fraction to be added to the 132 * kernel time variable at each second. The maximum frequency offset is 133 * about +-500000 ns/s and the resolution is about 2.3e-10 ns/s. 134 * 135 * 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 136 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 137 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 138 * |s s s s s s s s s s s s s| ns/s | 139 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 140 * | fraction | 141 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 142 */ 143 /* 144 * The following variables establish the state of the PLL/FLL and the 145 * residual time and frequency offset of the local clock. 146 */ 147 #define SHIFT_PLL 4 /* PLL loop gain (shift) */ 148 #define SHIFT_FLL 2 /* FLL loop gain (shift) */ 149 150 static int time_state = TIME_OK; /* clock state */ 151 int time_status = STA_UNSYNC; /* clock status bits */ 152 static long time_tai; /* TAI offset (s) */ 153 static long time_monitor; /* last time offset scaled (ns) */ 154 static long time_constant; /* poll interval (shift) (s) */ 155 static long time_precision = 1; /* clock precision (ns) */ 156 static long time_maxerror = MAXPHASE / 1000; /* maximum error (us) */ 157 long time_esterror = MAXPHASE / 1000; /* estimated error (us) */ 158 static long time_reftime; /* uptime at last adjustment (s) */ 159 static l_fp time_offset; /* time offset (ns) */ 160 static l_fp time_freq; /* frequency offset (ns/s) */ 161 static l_fp time_adj; /* tick adjust (ns/s) */ 162 163 static int64_t time_adjtime; /* correction from adjtime(2) (usec) */ 164 165 static struct mtx ntp_lock; 166 MTX_SYSINIT(ntp, &ntp_lock, "ntp", MTX_SPIN); 167 168 #define NTP_LOCK() mtx_lock_spin(&ntp_lock) 169 #define NTP_UNLOCK() mtx_unlock_spin(&ntp_lock) 170 #define NTP_ASSERT_LOCKED() mtx_assert(&ntp_lock, MA_OWNED) 171 172 #ifdef PPS_SYNC 173 /* 174 * The following variables are used when a pulse-per-second (PPS) signal 175 * is available and connected via a modem control lead. They establish 176 * the engineering parameters of the clock discipline loop when 177 * controlled by the PPS signal. 178 */ 179 #define PPS_FAVG 2 /* min freq avg interval (s) (shift) */ 180 #define PPS_FAVGDEF 8 /* default freq avg int (s) (shift) */ 181 #define PPS_FAVGMAX 15 /* max freq avg interval (s) (shift) */ 182 #define PPS_PAVG 4 /* phase avg interval (s) (shift) */ 183 #define PPS_VALID 120 /* PPS signal watchdog max (s) */ 184 #define PPS_MAXWANDER 100000 /* max PPS wander (ns/s) */ 185 #define PPS_POPCORN 2 /* popcorn spike threshold (shift) */ 186 187 static struct timespec pps_tf[3]; /* phase median filter */ 188 static l_fp pps_freq; /* scaled frequency offset (ns/s) */ 189 static long pps_fcount; /* frequency accumulator */ 190 static long pps_jitter; /* nominal jitter (ns) */ 191 static long pps_stabil; /* nominal stability (scaled ns/s) */ 192 static long pps_lastsec; /* time at last calibration (s) */ 193 static int pps_valid; /* signal watchdog counter */ 194 static int pps_shift = PPS_FAVG; /* interval duration (s) (shift) */ 195 static int pps_shiftmax = PPS_FAVGDEF; /* max interval duration (s) (shift) */ 196 static int pps_intcnt; /* wander counter */ 197 198 /* 199 * PPS signal quality monitors 200 */ 201 static long pps_calcnt; /* calibration intervals */ 202 static long pps_jitcnt; /* jitter limit exceeded */ 203 static long pps_stbcnt; /* stability limit exceeded */ 204 static long pps_errcnt; /* calibration errors */ 205 #endif /* PPS_SYNC */ 206 /* 207 * End of phase/frequency-lock loop (PLL/FLL) definitions 208 */ 209 210 static void ntp_init(void); 211 static void hardupdate(long offset); 212 static void ntp_gettime1(struct ntptimeval *ntvp); 213 static bool ntp_is_time_error(int tsl); 214 215 static bool 216 ntp_is_time_error(int tsl) 217 { 218 219 /* 220 * Status word error decode. If any of these conditions occur, 221 * an error is returned, instead of the status word. Most 222 * applications will care only about the fact the system clock 223 * may not be trusted, not about the details. 224 * 225 * Hardware or software error 226 */ 227 if ((tsl & (STA_UNSYNC | STA_CLOCKERR)) || 228 229 /* 230 * PPS signal lost when either time or frequency synchronization 231 * requested 232 */ 233 (tsl & (STA_PPSFREQ | STA_PPSTIME) && 234 !(tsl & STA_PPSSIGNAL)) || 235 236 /* 237 * PPS jitter exceeded when time synchronization requested 238 */ 239 (tsl & STA_PPSTIME && tsl & STA_PPSJITTER) || 240 241 /* 242 * PPS wander exceeded or calibration error when frequency 243 * synchronization requested 244 */ 245 (tsl & STA_PPSFREQ && 246 tsl & (STA_PPSWANDER | STA_PPSERROR))) 247 return (true); 248 249 return (false); 250 } 251 252 static void 253 ntp_gettime1(struct ntptimeval *ntvp) 254 { 255 struct timespec atv; /* nanosecond time */ 256 257 NTP_ASSERT_LOCKED(); 258 259 nanotime(&atv); 260 ntvp->time.tv_sec = atv.tv_sec; 261 ntvp->time.tv_nsec = atv.tv_nsec; 262 ntvp->maxerror = time_maxerror; 263 ntvp->esterror = time_esterror; 264 ntvp->tai = time_tai; 265 ntvp->time_state = time_state; 266 267 if (ntp_is_time_error(time_status)) 268 ntvp->time_state = TIME_ERROR; 269 } 270 271 /* 272 * ntp_gettime() - NTP user application interface 273 * 274 * See the timex.h header file for synopsis and API description. Note that 275 * the TAI offset is returned in the ntvtimeval.tai structure member. 276 */ 277 #ifndef _SYS_SYSPROTO_H_ 278 struct ntp_gettime_args { 279 struct ntptimeval *ntvp; 280 }; 281 #endif 282 /* ARGSUSED */ 283 int 284 sys_ntp_gettime(struct thread *td, struct ntp_gettime_args *uap) 285 { 286 struct ntptimeval ntv; 287 288 NTP_LOCK(); 289 ntp_gettime1(&ntv); 290 NTP_UNLOCK(); 291 292 td->td_retval[0] = ntv.time_state; 293 return (copyout(&ntv, uap->ntvp, sizeof(ntv))); 294 } 295 296 static int 297 ntp_sysctl(SYSCTL_HANDLER_ARGS) 298 { 299 struct ntptimeval ntv; /* temporary structure */ 300 301 NTP_LOCK(); 302 ntp_gettime1(&ntv); 303 NTP_UNLOCK(); 304 305 return (sysctl_handle_opaque(oidp, &ntv, sizeof(ntv), req)); 306 } 307 308 SYSCTL_NODE(_kern, OID_AUTO, ntp_pll, CTLFLAG_RW, 0, ""); 309 SYSCTL_PROC(_kern_ntp_pll, OID_AUTO, gettime, CTLTYPE_OPAQUE | CTLFLAG_RD | 310 CTLFLAG_MPSAFE, 0, sizeof(struct ntptimeval) , ntp_sysctl, "S,ntptimeval", 311 ""); 312 313 #ifdef PPS_SYNC 314 SYSCTL_INT(_kern_ntp_pll, OID_AUTO, pps_shiftmax, CTLFLAG_RW, 315 &pps_shiftmax, 0, "Max interval duration (sec) (shift)"); 316 SYSCTL_INT(_kern_ntp_pll, OID_AUTO, pps_shift, CTLFLAG_RW, 317 &pps_shift, 0, "Interval duration (sec) (shift)"); 318 SYSCTL_LONG(_kern_ntp_pll, OID_AUTO, time_monitor, CTLFLAG_RD, 319 &time_monitor, 0, "Last time offset scaled (ns)"); 320 321 SYSCTL_S64(_kern_ntp_pll, OID_AUTO, pps_freq, CTLFLAG_RD | CTLFLAG_MPSAFE, 322 &pps_freq, 0, 323 "Scaled frequency offset (ns/sec)"); 324 SYSCTL_S64(_kern_ntp_pll, OID_AUTO, time_freq, CTLFLAG_RD | CTLFLAG_MPSAFE, 325 &time_freq, 0, 326 "Frequency offset (ns/sec)"); 327 #endif 328 329 /* 330 * ntp_adjtime() - NTP daemon application interface 331 * 332 * See the timex.h header file for synopsis and API description. Note that 333 * the timex.constant structure member has a dual purpose to set the time 334 * constant and to set the TAI offset. 335 */ 336 #ifndef _SYS_SYSPROTO_H_ 337 struct ntp_adjtime_args { 338 struct timex *tp; 339 }; 340 #endif 341 342 int 343 sys_ntp_adjtime(struct thread *td, struct ntp_adjtime_args *uap) 344 { 345 struct timex ntv; /* temporary structure */ 346 long freq; /* frequency ns/s) */ 347 int modes; /* mode bits from structure */ 348 int error, retval; 349 350 error = copyin((caddr_t)uap->tp, (caddr_t)&ntv, sizeof(ntv)); 351 if (error) 352 return (error); 353 354 /* 355 * Update selected clock variables - only the superuser can 356 * change anything. Note that there is no error checking here on 357 * the assumption the superuser should know what it is doing. 358 * Note that either the time constant or TAI offset are loaded 359 * from the ntv.constant member, depending on the mode bits. If 360 * the STA_PLL bit in the status word is cleared, the state and 361 * status words are reset to the initial values at boot. 362 */ 363 modes = ntv.modes; 364 if (modes) 365 error = priv_check(td, PRIV_NTP_ADJTIME); 366 if (error != 0) 367 return (error); 368 NTP_LOCK(); 369 if (modes & MOD_MAXERROR) 370 time_maxerror = ntv.maxerror; 371 if (modes & MOD_ESTERROR) 372 time_esterror = ntv.esterror; 373 if (modes & MOD_STATUS) { 374 if (time_status & STA_PLL && !(ntv.status & STA_PLL)) { 375 time_state = TIME_OK; 376 time_status = STA_UNSYNC; 377 #ifdef PPS_SYNC 378 pps_shift = PPS_FAVG; 379 #endif /* PPS_SYNC */ 380 } 381 time_status &= STA_RONLY; 382 time_status |= ntv.status & ~STA_RONLY; 383 } 384 if (modes & MOD_TIMECONST) { 385 if (ntv.constant < 0) 386 time_constant = 0; 387 else if (ntv.constant > MAXTC) 388 time_constant = MAXTC; 389 else 390 time_constant = ntv.constant; 391 } 392 if (modes & MOD_TAI) { 393 if (ntv.constant > 0) /* XXX zero & negative numbers ? */ 394 time_tai = ntv.constant; 395 } 396 #ifdef PPS_SYNC 397 if (modes & MOD_PPSMAX) { 398 if (ntv.shift < PPS_FAVG) 399 pps_shiftmax = PPS_FAVG; 400 else if (ntv.shift > PPS_FAVGMAX) 401 pps_shiftmax = PPS_FAVGMAX; 402 else 403 pps_shiftmax = ntv.shift; 404 } 405 #endif /* PPS_SYNC */ 406 if (modes & MOD_NANO) 407 time_status |= STA_NANO; 408 if (modes & MOD_MICRO) 409 time_status &= ~STA_NANO; 410 if (modes & MOD_CLKB) 411 time_status |= STA_CLK; 412 if (modes & MOD_CLKA) 413 time_status &= ~STA_CLK; 414 if (modes & MOD_FREQUENCY) { 415 freq = (ntv.freq * 1000LL) >> 16; 416 if (freq > MAXFREQ) 417 L_LINT(time_freq, MAXFREQ); 418 else if (freq < -MAXFREQ) 419 L_LINT(time_freq, -MAXFREQ); 420 else { 421 /* 422 * ntv.freq is [PPM * 2^16] = [us/s * 2^16] 423 * time_freq is [ns/s * 2^32] 424 */ 425 time_freq = ntv.freq * 1000LL * 65536LL; 426 } 427 #ifdef PPS_SYNC 428 pps_freq = time_freq; 429 #endif /* PPS_SYNC */ 430 } 431 if (modes & MOD_OFFSET) { 432 if (time_status & STA_NANO) 433 hardupdate(ntv.offset); 434 else 435 hardupdate(ntv.offset * 1000); 436 } 437 438 /* 439 * Retrieve all clock variables. Note that the TAI offset is 440 * returned only by ntp_gettime(); 441 */ 442 if (time_status & STA_NANO) 443 ntv.offset = L_GINT(time_offset); 444 else 445 ntv.offset = L_GINT(time_offset) / 1000; /* XXX rounding ? */ 446 ntv.freq = L_GINT((time_freq / 1000LL) << 16); 447 ntv.maxerror = time_maxerror; 448 ntv.esterror = time_esterror; 449 ntv.status = time_status; 450 ntv.constant = time_constant; 451 if (time_status & STA_NANO) 452 ntv.precision = time_precision; 453 else 454 ntv.precision = time_precision / 1000; 455 ntv.tolerance = MAXFREQ * SCALE_PPM; 456 #ifdef PPS_SYNC 457 ntv.shift = pps_shift; 458 ntv.ppsfreq = L_GINT((pps_freq / 1000LL) << 16); 459 if (time_status & STA_NANO) 460 ntv.jitter = pps_jitter; 461 else 462 ntv.jitter = pps_jitter / 1000; 463 ntv.stabil = pps_stabil; 464 ntv.calcnt = pps_calcnt; 465 ntv.errcnt = pps_errcnt; 466 ntv.jitcnt = pps_jitcnt; 467 ntv.stbcnt = pps_stbcnt; 468 #endif /* PPS_SYNC */ 469 retval = ntp_is_time_error(time_status) ? TIME_ERROR : time_state; 470 NTP_UNLOCK(); 471 472 error = copyout((caddr_t)&ntv, (caddr_t)uap->tp, sizeof(ntv)); 473 if (error == 0) 474 td->td_retval[0] = retval; 475 return (error); 476 } 477 478 /* 479 * second_overflow() - called after ntp_tick_adjust() 480 * 481 * This routine is ordinarily called immediately following the above 482 * routine ntp_tick_adjust(). While these two routines are normally 483 * combined, they are separated here only for the purposes of 484 * simulation. 485 */ 486 void 487 ntp_update_second(int64_t *adjustment, time_t *newsec) 488 { 489 int tickrate; 490 l_fp ftemp; /* 32/64-bit temporary */ 491 492 NTP_LOCK(); 493 494 /* 495 * On rollover of the second both the nanosecond and microsecond 496 * clocks are updated and the state machine cranked as 497 * necessary. The phase adjustment to be used for the next 498 * second is calculated and the maximum error is increased by 499 * the tolerance. 500 */ 501 time_maxerror += MAXFREQ / 1000; 502 503 /* 504 * Leap second processing. If in leap-insert state at 505 * the end of the day, the system clock is set back one 506 * second; if in leap-delete state, the system clock is 507 * set ahead one second. The nano_time() routine or 508 * external clock driver will insure that reported time 509 * is always monotonic. 510 */ 511 switch (time_state) { 512 513 /* 514 * No warning. 515 */ 516 case TIME_OK: 517 if (time_status & STA_INS) 518 time_state = TIME_INS; 519 else if (time_status & STA_DEL) 520 time_state = TIME_DEL; 521 break; 522 523 /* 524 * Insert second 23:59:60 following second 525 * 23:59:59. 526 */ 527 case TIME_INS: 528 if (!(time_status & STA_INS)) 529 time_state = TIME_OK; 530 else if ((*newsec) % 86400 == 0) { 531 (*newsec)--; 532 time_state = TIME_OOP; 533 time_tai++; 534 } 535 break; 536 537 /* 538 * Delete second 23:59:59. 539 */ 540 case TIME_DEL: 541 if (!(time_status & STA_DEL)) 542 time_state = TIME_OK; 543 else if (((*newsec) + 1) % 86400 == 0) { 544 (*newsec)++; 545 time_tai--; 546 time_state = TIME_WAIT; 547 } 548 break; 549 550 /* 551 * Insert second in progress. 552 */ 553 case TIME_OOP: 554 time_state = TIME_WAIT; 555 break; 556 557 /* 558 * Wait for status bits to clear. 559 */ 560 case TIME_WAIT: 561 if (!(time_status & (STA_INS | STA_DEL))) 562 time_state = TIME_OK; 563 } 564 565 /* 566 * Compute the total time adjustment for the next second 567 * in ns. The offset is reduced by a factor depending on 568 * whether the PPS signal is operating. Note that the 569 * value is in effect scaled by the clock frequency, 570 * since the adjustment is added at each tick interrupt. 571 */ 572 ftemp = time_offset; 573 #ifdef PPS_SYNC 574 /* XXX even if PPS signal dies we should finish adjustment ? */ 575 if (time_status & STA_PPSTIME && time_status & 576 STA_PPSSIGNAL) 577 L_RSHIFT(ftemp, pps_shift); 578 else 579 L_RSHIFT(ftemp, SHIFT_PLL + time_constant); 580 #else 581 L_RSHIFT(ftemp, SHIFT_PLL + time_constant); 582 #endif /* PPS_SYNC */ 583 time_adj = ftemp; 584 L_SUB(time_offset, ftemp); 585 L_ADD(time_adj, time_freq); 586 587 /* 588 * Apply any correction from adjtime(2). If more than one second 589 * off we slew at a rate of 5ms/s (5000 PPM) else 500us/s (500PPM) 590 * until the last second is slewed the final < 500 usecs. 591 */ 592 if (time_adjtime != 0) { 593 if (time_adjtime > 1000000) 594 tickrate = 5000; 595 else if (time_adjtime < -1000000) 596 tickrate = -5000; 597 else if (time_adjtime > 500) 598 tickrate = 500; 599 else if (time_adjtime < -500) 600 tickrate = -500; 601 else 602 tickrate = time_adjtime; 603 time_adjtime -= tickrate; 604 L_LINT(ftemp, tickrate * 1000); 605 L_ADD(time_adj, ftemp); 606 } 607 *adjustment = time_adj; 608 609 #ifdef PPS_SYNC 610 if (pps_valid > 0) 611 pps_valid--; 612 else 613 time_status &= ~STA_PPSSIGNAL; 614 #endif /* PPS_SYNC */ 615 616 NTP_UNLOCK(); 617 } 618 619 /* 620 * ntp_init() - initialize variables and structures 621 * 622 * This routine must be called after the kernel variables hz and tick 623 * are set or changed and before the next tick interrupt. In this 624 * particular implementation, these values are assumed set elsewhere in 625 * the kernel. The design allows the clock frequency and tick interval 626 * to be changed while the system is running. So, this routine should 627 * probably be integrated with the code that does that. 628 */ 629 static void 630 ntp_init(void) 631 { 632 633 /* 634 * The following variables are initialized only at startup. Only 635 * those structures not cleared by the compiler need to be 636 * initialized, and these only in the simulator. In the actual 637 * kernel, any nonzero values here will quickly evaporate. 638 */ 639 L_CLR(time_offset); 640 L_CLR(time_freq); 641 #ifdef PPS_SYNC 642 pps_tf[0].tv_sec = pps_tf[0].tv_nsec = 0; 643 pps_tf[1].tv_sec = pps_tf[1].tv_nsec = 0; 644 pps_tf[2].tv_sec = pps_tf[2].tv_nsec = 0; 645 pps_fcount = 0; 646 L_CLR(pps_freq); 647 #endif /* PPS_SYNC */ 648 } 649 650 SYSINIT(ntpclocks, SI_SUB_CLOCKS, SI_ORDER_MIDDLE, ntp_init, NULL); 651 652 /* 653 * hardupdate() - local clock update 654 * 655 * This routine is called by ntp_adjtime() to update the local clock 656 * phase and frequency. The implementation is of an adaptive-parameter, 657 * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new 658 * time and frequency offset estimates for each call. If the kernel PPS 659 * discipline code is configured (PPS_SYNC), the PPS signal itself 660 * determines the new time offset, instead of the calling argument. 661 * Presumably, calls to ntp_adjtime() occur only when the caller 662 * believes the local clock is valid within some bound (+-128 ms with 663 * NTP). If the caller's time is far different than the PPS time, an 664 * argument will ensue, and it's not clear who will lose. 665 * 666 * For uncompensated quartz crystal oscillators and nominal update 667 * intervals less than 256 s, operation should be in phase-lock mode, 668 * where the loop is disciplined to phase. For update intervals greater 669 * than 1024 s, operation should be in frequency-lock mode, where the 670 * loop is disciplined to frequency. Between 256 s and 1024 s, the mode 671 * is selected by the STA_MODE status bit. 672 */ 673 static void 674 hardupdate(offset) 675 long offset; /* clock offset (ns) */ 676 { 677 long mtemp; 678 l_fp ftemp; 679 680 NTP_ASSERT_LOCKED(); 681 682 /* 683 * Select how the phase is to be controlled and from which 684 * source. If the PPS signal is present and enabled to 685 * discipline the time, the PPS offset is used; otherwise, the 686 * argument offset is used. 687 */ 688 if (!(time_status & STA_PLL)) 689 return; 690 if (!(time_status & STA_PPSTIME && time_status & 691 STA_PPSSIGNAL)) { 692 if (offset > MAXPHASE) 693 time_monitor = MAXPHASE; 694 else if (offset < -MAXPHASE) 695 time_monitor = -MAXPHASE; 696 else 697 time_monitor = offset; 698 L_LINT(time_offset, time_monitor); 699 } 700 701 /* 702 * Select how the frequency is to be controlled and in which 703 * mode (PLL or FLL). If the PPS signal is present and enabled 704 * to discipline the frequency, the PPS frequency is used; 705 * otherwise, the argument offset is used to compute it. 706 */ 707 if (time_status & STA_PPSFREQ && time_status & STA_PPSSIGNAL) { 708 time_reftime = time_uptime; 709 return; 710 } 711 if (time_status & STA_FREQHOLD || time_reftime == 0) 712 time_reftime = time_uptime; 713 mtemp = time_uptime - time_reftime; 714 L_LINT(ftemp, time_monitor); 715 L_RSHIFT(ftemp, (SHIFT_PLL + 2 + time_constant) << 1); 716 L_MPY(ftemp, mtemp); 717 L_ADD(time_freq, ftemp); 718 time_status &= ~STA_MODE; 719 if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > 720 MAXSEC)) { 721 L_LINT(ftemp, (time_monitor << 4) / mtemp); 722 L_RSHIFT(ftemp, SHIFT_FLL + 4); 723 L_ADD(time_freq, ftemp); 724 time_status |= STA_MODE; 725 } 726 time_reftime = time_uptime; 727 if (L_GINT(time_freq) > MAXFREQ) 728 L_LINT(time_freq, MAXFREQ); 729 else if (L_GINT(time_freq) < -MAXFREQ) 730 L_LINT(time_freq, -MAXFREQ); 731 } 732 733 #ifdef PPS_SYNC 734 /* 735 * hardpps() - discipline CPU clock oscillator to external PPS signal 736 * 737 * This routine is called at each PPS interrupt in order to discipline 738 * the CPU clock oscillator to the PPS signal. There are two independent 739 * first-order feedback loops, one for the phase, the other for the 740 * frequency. The phase loop measures and grooms the PPS phase offset 741 * and leaves it in a handy spot for the seconds overflow routine. The 742 * frequency loop averages successive PPS phase differences and 743 * calculates the PPS frequency offset, which is also processed by the 744 * seconds overflow routine. The code requires the caller to capture the 745 * time and architecture-dependent hardware counter values in 746 * nanoseconds at the on-time PPS signal transition. 747 * 748 * Note that, on some Unix systems this routine runs at an interrupt 749 * priority level higher than the timer interrupt routine hardclock(). 750 * Therefore, the variables used are distinct from the hardclock() 751 * variables, except for the actual time and frequency variables, which 752 * are determined by this routine and updated atomically. 753 * 754 * tsp - time at PPS 755 * nsec - hardware counter at PPS 756 */ 757 void 758 hardpps(struct timespec *tsp, long nsec) 759 { 760 long u_sec, u_nsec, v_nsec; /* temps */ 761 l_fp ftemp; 762 763 NTP_LOCK(); 764 765 /* 766 * The signal is first processed by a range gate and frequency 767 * discriminator. The range gate rejects noise spikes outside 768 * the range +-500 us. The frequency discriminator rejects input 769 * signals with apparent frequency outside the range 1 +-500 770 * PPM. If two hits occur in the same second, we ignore the 771 * later hit; if not and a hit occurs outside the range gate, 772 * keep the later hit for later comparison, but do not process 773 * it. 774 */ 775 time_status |= STA_PPSSIGNAL | STA_PPSJITTER; 776 time_status &= ~(STA_PPSWANDER | STA_PPSERROR); 777 pps_valid = PPS_VALID; 778 u_sec = tsp->tv_sec; 779 u_nsec = tsp->tv_nsec; 780 if (u_nsec >= (NANOSECOND >> 1)) { 781 u_nsec -= NANOSECOND; 782 u_sec++; 783 } 784 v_nsec = u_nsec - pps_tf[0].tv_nsec; 785 if (u_sec == pps_tf[0].tv_sec && v_nsec < NANOSECOND - MAXFREQ) 786 goto out; 787 pps_tf[2] = pps_tf[1]; 788 pps_tf[1] = pps_tf[0]; 789 pps_tf[0].tv_sec = u_sec; 790 pps_tf[0].tv_nsec = u_nsec; 791 792 /* 793 * Compute the difference between the current and previous 794 * counter values. If the difference exceeds 0.5 s, assume it 795 * has wrapped around, so correct 1.0 s. If the result exceeds 796 * the tick interval, the sample point has crossed a tick 797 * boundary during the last second, so correct the tick. Very 798 * intricate. 799 */ 800 u_nsec = nsec; 801 if (u_nsec > (NANOSECOND >> 1)) 802 u_nsec -= NANOSECOND; 803 else if (u_nsec < -(NANOSECOND >> 1)) 804 u_nsec += NANOSECOND; 805 pps_fcount += u_nsec; 806 if (v_nsec > MAXFREQ || v_nsec < -MAXFREQ) 807 goto out; 808 time_status &= ~STA_PPSJITTER; 809 810 /* 811 * A three-stage median filter is used to help denoise the PPS 812 * time. The median sample becomes the time offset estimate; the 813 * difference between the other two samples becomes the time 814 * dispersion (jitter) estimate. 815 */ 816 if (pps_tf[0].tv_nsec > pps_tf[1].tv_nsec) { 817 if (pps_tf[1].tv_nsec > pps_tf[2].tv_nsec) { 818 v_nsec = pps_tf[1].tv_nsec; /* 0 1 2 */ 819 u_nsec = pps_tf[0].tv_nsec - pps_tf[2].tv_nsec; 820 } else if (pps_tf[2].tv_nsec > pps_tf[0].tv_nsec) { 821 v_nsec = pps_tf[0].tv_nsec; /* 2 0 1 */ 822 u_nsec = pps_tf[2].tv_nsec - pps_tf[1].tv_nsec; 823 } else { 824 v_nsec = pps_tf[2].tv_nsec; /* 0 2 1 */ 825 u_nsec = pps_tf[0].tv_nsec - pps_tf[1].tv_nsec; 826 } 827 } else { 828 if (pps_tf[1].tv_nsec < pps_tf[2].tv_nsec) { 829 v_nsec = pps_tf[1].tv_nsec; /* 2 1 0 */ 830 u_nsec = pps_tf[2].tv_nsec - pps_tf[0].tv_nsec; 831 } else if (pps_tf[2].tv_nsec < pps_tf[0].tv_nsec) { 832 v_nsec = pps_tf[0].tv_nsec; /* 1 0 2 */ 833 u_nsec = pps_tf[1].tv_nsec - pps_tf[2].tv_nsec; 834 } else { 835 v_nsec = pps_tf[2].tv_nsec; /* 1 2 0 */ 836 u_nsec = pps_tf[1].tv_nsec - pps_tf[0].tv_nsec; 837 } 838 } 839 840 /* 841 * Nominal jitter is due to PPS signal noise and interrupt 842 * latency. If it exceeds the popcorn threshold, the sample is 843 * discarded. otherwise, if so enabled, the time offset is 844 * updated. We can tolerate a modest loss of data here without 845 * much degrading time accuracy. 846 * 847 * The measurements being checked here were made with the system 848 * timecounter, so the popcorn threshold is not allowed to fall below 849 * the number of nanoseconds in two ticks of the timecounter. For a 850 * timecounter running faster than 1 GHz the lower bound is 2ns, just 851 * to avoid a nonsensical threshold of zero. 852 */ 853 if (u_nsec > lmax(pps_jitter << PPS_POPCORN, 854 2 * (NANOSECOND / (long)qmin(NANOSECOND, tc_getfrequency())))) { 855 time_status |= STA_PPSJITTER; 856 pps_jitcnt++; 857 } else if (time_status & STA_PPSTIME) { 858 time_monitor = -v_nsec; 859 L_LINT(time_offset, time_monitor); 860 } 861 pps_jitter += (u_nsec - pps_jitter) >> PPS_FAVG; 862 u_sec = pps_tf[0].tv_sec - pps_lastsec; 863 if (u_sec < (1 << pps_shift)) 864 goto out; 865 866 /* 867 * At the end of the calibration interval the difference between 868 * the first and last counter values becomes the scaled 869 * frequency. It will later be divided by the length of the 870 * interval to determine the frequency update. If the frequency 871 * exceeds a sanity threshold, or if the actual calibration 872 * interval is not equal to the expected length, the data are 873 * discarded. We can tolerate a modest loss of data here without 874 * much degrading frequency accuracy. 875 */ 876 pps_calcnt++; 877 v_nsec = -pps_fcount; 878 pps_lastsec = pps_tf[0].tv_sec; 879 pps_fcount = 0; 880 u_nsec = MAXFREQ << pps_shift; 881 if (v_nsec > u_nsec || v_nsec < -u_nsec || u_sec != (1 << pps_shift)) { 882 time_status |= STA_PPSERROR; 883 pps_errcnt++; 884 goto out; 885 } 886 887 /* 888 * Here the raw frequency offset and wander (stability) is 889 * calculated. If the wander is less than the wander threshold 890 * for four consecutive averaging intervals, the interval is 891 * doubled; if it is greater than the threshold for four 892 * consecutive intervals, the interval is halved. The scaled 893 * frequency offset is converted to frequency offset. The 894 * stability metric is calculated as the average of recent 895 * frequency changes, but is used only for performance 896 * monitoring. 897 */ 898 L_LINT(ftemp, v_nsec); 899 L_RSHIFT(ftemp, pps_shift); 900 L_SUB(ftemp, pps_freq); 901 u_nsec = L_GINT(ftemp); 902 if (u_nsec > PPS_MAXWANDER) { 903 L_LINT(ftemp, PPS_MAXWANDER); 904 pps_intcnt--; 905 time_status |= STA_PPSWANDER; 906 pps_stbcnt++; 907 } else if (u_nsec < -PPS_MAXWANDER) { 908 L_LINT(ftemp, -PPS_MAXWANDER); 909 pps_intcnt--; 910 time_status |= STA_PPSWANDER; 911 pps_stbcnt++; 912 } else { 913 pps_intcnt++; 914 } 915 if (pps_intcnt >= 4) { 916 pps_intcnt = 4; 917 if (pps_shift < pps_shiftmax) { 918 pps_shift++; 919 pps_intcnt = 0; 920 } 921 } else if (pps_intcnt <= -4 || pps_shift > pps_shiftmax) { 922 pps_intcnt = -4; 923 if (pps_shift > PPS_FAVG) { 924 pps_shift--; 925 pps_intcnt = 0; 926 } 927 } 928 if (u_nsec < 0) 929 u_nsec = -u_nsec; 930 pps_stabil += (u_nsec * SCALE_PPM - pps_stabil) >> PPS_FAVG; 931 932 /* 933 * The PPS frequency is recalculated and clamped to the maximum 934 * MAXFREQ. If enabled, the system clock frequency is updated as 935 * well. 936 */ 937 L_ADD(pps_freq, ftemp); 938 u_nsec = L_GINT(pps_freq); 939 if (u_nsec > MAXFREQ) 940 L_LINT(pps_freq, MAXFREQ); 941 else if (u_nsec < -MAXFREQ) 942 L_LINT(pps_freq, -MAXFREQ); 943 if (time_status & STA_PPSFREQ) 944 time_freq = pps_freq; 945 946 out: 947 NTP_UNLOCK(); 948 } 949 #endif /* PPS_SYNC */ 950 951 #ifndef _SYS_SYSPROTO_H_ 952 struct adjtime_args { 953 struct timeval *delta; 954 struct timeval *olddelta; 955 }; 956 #endif 957 /* ARGSUSED */ 958 int 959 sys_adjtime(struct thread *td, struct adjtime_args *uap) 960 { 961 struct timeval delta, olddelta, *deltap; 962 int error; 963 964 if (uap->delta) { 965 error = copyin(uap->delta, &delta, sizeof(delta)); 966 if (error) 967 return (error); 968 deltap = δ 969 } else 970 deltap = NULL; 971 error = kern_adjtime(td, deltap, &olddelta); 972 if (uap->olddelta && error == 0) 973 error = copyout(&olddelta, uap->olddelta, sizeof(olddelta)); 974 return (error); 975 } 976 977 int 978 kern_adjtime(struct thread *td, struct timeval *delta, struct timeval *olddelta) 979 { 980 struct timeval atv; 981 int64_t ltr, ltw; 982 int error; 983 984 if (delta != NULL) { 985 error = priv_check(td, PRIV_ADJTIME); 986 if (error != 0) 987 return (error); 988 ltw = (int64_t)delta->tv_sec * 1000000 + delta->tv_usec; 989 } 990 NTP_LOCK(); 991 ltr = time_adjtime; 992 if (delta != NULL) 993 time_adjtime = ltw; 994 NTP_UNLOCK(); 995 if (olddelta != NULL) { 996 atv.tv_sec = ltr / 1000000; 997 atv.tv_usec = ltr % 1000000; 998 if (atv.tv_usec < 0) { 999 atv.tv_usec += 1000000; 1000 atv.tv_sec--; 1001 } 1002 *olddelta = atv; 1003 } 1004 return (0); 1005 } 1006 1007 static struct callout resettodr_callout; 1008 static int resettodr_period = 1800; 1009 1010 static void 1011 periodic_resettodr(void *arg __unused) 1012 { 1013 1014 /* 1015 * Read of time_status is lock-less, which is fine since 1016 * ntp_is_time_error() operates on the consistent read value. 1017 */ 1018 if (!ntp_is_time_error(time_status)) 1019 resettodr(); 1020 if (resettodr_period > 0) 1021 callout_schedule(&resettodr_callout, resettodr_period * hz); 1022 } 1023 1024 static void 1025 shutdown_resettodr(void *arg __unused, int howto __unused) 1026 { 1027 1028 callout_drain(&resettodr_callout); 1029 /* Another unlocked read of time_status */ 1030 if (resettodr_period > 0 && !ntp_is_time_error(time_status)) 1031 resettodr(); 1032 } 1033 1034 static int 1035 sysctl_resettodr_period(SYSCTL_HANDLER_ARGS) 1036 { 1037 int error; 1038 1039 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req); 1040 if (error || !req->newptr) 1041 return (error); 1042 if (cold) 1043 goto done; 1044 if (resettodr_period == 0) 1045 callout_stop(&resettodr_callout); 1046 else 1047 callout_reset(&resettodr_callout, resettodr_period * hz, 1048 periodic_resettodr, NULL); 1049 done: 1050 return (0); 1051 } 1052 1053 SYSCTL_PROC(_machdep, OID_AUTO, rtc_save_period, CTLTYPE_INT | CTLFLAG_RWTUN | 1054 CTLFLAG_MPSAFE, &resettodr_period, 1800, sysctl_resettodr_period, "I", 1055 "Save system time to RTC with this period (in seconds)"); 1056 1057 static void 1058 start_periodic_resettodr(void *arg __unused) 1059 { 1060 1061 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_resettodr, NULL, 1062 SHUTDOWN_PRI_FIRST); 1063 callout_init(&resettodr_callout, 1); 1064 if (resettodr_period == 0) 1065 return; 1066 callout_reset(&resettodr_callout, resettodr_period * hz, 1067 periodic_resettodr, NULL); 1068 } 1069 1070 SYSINIT(periodic_resettodr, SI_SUB_LAST, SI_ORDER_MIDDLE, 1071 start_periodic_resettodr, NULL); 1072