1 /*- 2 *********************************************************************** 3 * * 4 * Copyright (c) David L. Mills 1993-2001 * 5 * * 6 * Permission to use, copy, modify, and distribute this software and * 7 * its documentation for any purpose and without fee is hereby * 8 * granted, provided that the above copyright notice appears in all * 9 * copies and that both the copyright notice and this permission * 10 * notice appear in supporting documentation, and that the name * 11 * University of Delaware not be used in advertising or publicity * 12 * pertaining to distribution of the software without specific, * 13 * written prior permission. The University of Delaware makes no * 14 * representations about the suitability this software for any * 15 * purpose. It is provided "as is" without express or implied * 16 * warranty. * 17 * * 18 **********************************************************************/ 19 20 /* 21 * Adapted from the original sources for FreeBSD and timecounters by: 22 * Poul-Henning Kamp <phk@FreeBSD.org>. 23 * 24 * The 32bit version of the "LP" macros seems a bit past its "sell by" 25 * date so I have retained only the 64bit version and included it directly 26 * in this file. 27 * 28 * Only minor changes done to interface with the timecounters over in 29 * sys/kern/kern_clock.c. Some of the comments below may be (even more) 30 * confusing and/or plain wrong in that context. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "opt_ntp.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/sysproto.h> 41 #include <sys/eventhandler.h> 42 #include <sys/kernel.h> 43 #include <sys/priv.h> 44 #include <sys/proc.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/time.h> 48 #include <sys/timex.h> 49 #include <sys/timetc.h> 50 #include <sys/timepps.h> 51 #include <sys/syscallsubr.h> 52 #include <sys/sysctl.h> 53 54 #ifdef PPS_SYNC 55 FEATURE(pps_sync, "Support usage of external PPS signal by kernel PLL"); 56 #endif 57 58 /* 59 * Single-precision macros for 64-bit machines 60 */ 61 typedef int64_t l_fp; 62 #define L_ADD(v, u) ((v) += (u)) 63 #define L_SUB(v, u) ((v) -= (u)) 64 #define L_ADDHI(v, a) ((v) += (int64_t)(a) << 32) 65 #define L_NEG(v) ((v) = -(v)) 66 #define L_RSHIFT(v, n) \ 67 do { \ 68 if ((v) < 0) \ 69 (v) = -(-(v) >> (n)); \ 70 else \ 71 (v) = (v) >> (n); \ 72 } while (0) 73 #define L_MPY(v, a) ((v) *= (a)) 74 #define L_CLR(v) ((v) = 0) 75 #define L_ISNEG(v) ((v) < 0) 76 #define L_LINT(v, a) ((v) = (int64_t)(a) << 32) 77 #define L_GINT(v) ((v) < 0 ? -(-(v) >> 32) : (v) >> 32) 78 79 /* 80 * Generic NTP kernel interface 81 * 82 * These routines constitute the Network Time Protocol (NTP) interfaces 83 * for user and daemon application programs. The ntp_gettime() routine 84 * provides the time, maximum error (synch distance) and estimated error 85 * (dispersion) to client user application programs. The ntp_adjtime() 86 * routine is used by the NTP daemon to adjust the system clock to an 87 * externally derived time. The time offset and related variables set by 88 * this routine are used by other routines in this module to adjust the 89 * phase and frequency of the clock discipline loop which controls the 90 * system clock. 91 * 92 * When the kernel time is reckoned directly in nanoseconds (NTP_NANO 93 * defined), the time at each tick interrupt is derived directly from 94 * the kernel time variable. When the kernel time is reckoned in 95 * microseconds, (NTP_NANO undefined), the time is derived from the 96 * kernel time variable together with a variable representing the 97 * leftover nanoseconds at the last tick interrupt. In either case, the 98 * current nanosecond time is reckoned from these values plus an 99 * interpolated value derived by the clock routines in another 100 * architecture-specific module. The interpolation can use either a 101 * dedicated counter or a processor cycle counter (PCC) implemented in 102 * some architectures. 103 * 104 * Note that all routines must run at priority splclock or higher. 105 */ 106 /* 107 * Phase/frequency-lock loop (PLL/FLL) definitions 108 * 109 * The nanosecond clock discipline uses two variable types, time 110 * variables and frequency variables. Both types are represented as 64- 111 * bit fixed-point quantities with the decimal point between two 32-bit 112 * halves. On a 32-bit machine, each half is represented as a single 113 * word and mathematical operations are done using multiple-precision 114 * arithmetic. On a 64-bit machine, ordinary computer arithmetic is 115 * used. 116 * 117 * A time variable is a signed 64-bit fixed-point number in ns and 118 * fraction. It represents the remaining time offset to be amortized 119 * over succeeding tick interrupts. The maximum time offset is about 120 * 0.5 s and the resolution is about 2.3e-10 ns. 121 * 122 * 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 123 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 124 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 125 * |s s s| ns | 126 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 127 * | fraction | 128 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 129 * 130 * A frequency variable is a signed 64-bit fixed-point number in ns/s 131 * and fraction. It represents the ns and fraction to be added to the 132 * kernel time variable at each second. The maximum frequency offset is 133 * about +-500000 ns/s and the resolution is about 2.3e-10 ns/s. 134 * 135 * 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 136 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 137 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 138 * |s s s s s s s s s s s s s| ns/s | 139 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 140 * | fraction | 141 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 142 */ 143 /* 144 * The following variables establish the state of the PLL/FLL and the 145 * residual time and frequency offset of the local clock. 146 */ 147 #define SHIFT_PLL 4 /* PLL loop gain (shift) */ 148 #define SHIFT_FLL 2 /* FLL loop gain (shift) */ 149 150 static int time_state = TIME_OK; /* clock state */ 151 int time_status = STA_UNSYNC; /* clock status bits */ 152 static long time_tai; /* TAI offset (s) */ 153 static long time_monitor; /* last time offset scaled (ns) */ 154 static long time_constant; /* poll interval (shift) (s) */ 155 static long time_precision = 1; /* clock precision (ns) */ 156 static long time_maxerror = MAXPHASE / 1000; /* maximum error (us) */ 157 long time_esterror = MAXPHASE / 1000; /* estimated error (us) */ 158 static long time_reftime; /* uptime at last adjustment (s) */ 159 static l_fp time_offset; /* time offset (ns) */ 160 static l_fp time_freq; /* frequency offset (ns/s) */ 161 static l_fp time_adj; /* tick adjust (ns/s) */ 162 163 static int64_t time_adjtime; /* correction from adjtime(2) (usec) */ 164 165 static struct mtx ntp_lock; 166 MTX_SYSINIT(ntp, &ntp_lock, "ntp", MTX_SPIN); 167 168 #define NTP_LOCK() mtx_lock_spin(&ntp_lock) 169 #define NTP_UNLOCK() mtx_unlock_spin(&ntp_lock) 170 #define NTP_ASSERT_LOCKED() mtx_assert(&ntp_lock, MA_OWNED) 171 172 #ifdef PPS_SYNC 173 /* 174 * The following variables are used when a pulse-per-second (PPS) signal 175 * is available and connected via a modem control lead. They establish 176 * the engineering parameters of the clock discipline loop when 177 * controlled by the PPS signal. 178 */ 179 #define PPS_FAVG 2 /* min freq avg interval (s) (shift) */ 180 #define PPS_FAVGDEF 8 /* default freq avg int (s) (shift) */ 181 #define PPS_FAVGMAX 15 /* max freq avg interval (s) (shift) */ 182 #define PPS_PAVG 4 /* phase avg interval (s) (shift) */ 183 #define PPS_VALID 120 /* PPS signal watchdog max (s) */ 184 #define PPS_MAXWANDER 100000 /* max PPS wander (ns/s) */ 185 #define PPS_POPCORN 2 /* popcorn spike threshold (shift) */ 186 187 static struct timespec pps_tf[3]; /* phase median filter */ 188 static l_fp pps_freq; /* scaled frequency offset (ns/s) */ 189 static long pps_fcount; /* frequency accumulator */ 190 static long pps_jitter; /* nominal jitter (ns) */ 191 static long pps_stabil; /* nominal stability (scaled ns/s) */ 192 static long pps_lastsec; /* time at last calibration (s) */ 193 static int pps_valid; /* signal watchdog counter */ 194 static int pps_shift = PPS_FAVG; /* interval duration (s) (shift) */ 195 static int pps_shiftmax = PPS_FAVGDEF; /* max interval duration (s) (shift) */ 196 static int pps_intcnt; /* wander counter */ 197 198 /* 199 * PPS signal quality monitors 200 */ 201 static long pps_calcnt; /* calibration intervals */ 202 static long pps_jitcnt; /* jitter limit exceeded */ 203 static long pps_stbcnt; /* stability limit exceeded */ 204 static long pps_errcnt; /* calibration errors */ 205 #endif /* PPS_SYNC */ 206 /* 207 * End of phase/frequency-lock loop (PLL/FLL) definitions 208 */ 209 210 static void ntp_init(void); 211 static void hardupdate(long offset); 212 static void ntp_gettime1(struct ntptimeval *ntvp); 213 static bool ntp_is_time_error(int tsl); 214 215 static bool 216 ntp_is_time_error(int tsl) 217 { 218 219 /* 220 * Status word error decode. If any of these conditions occur, 221 * an error is returned, instead of the status word. Most 222 * applications will care only about the fact the system clock 223 * may not be trusted, not about the details. 224 * 225 * Hardware or software error 226 */ 227 if ((tsl & (STA_UNSYNC | STA_CLOCKERR)) || 228 229 /* 230 * PPS signal lost when either time or frequency synchronization 231 * requested 232 */ 233 (tsl & (STA_PPSFREQ | STA_PPSTIME) && 234 !(tsl & STA_PPSSIGNAL)) || 235 236 /* 237 * PPS jitter exceeded when time synchronization requested 238 */ 239 (tsl & STA_PPSTIME && tsl & STA_PPSJITTER) || 240 241 /* 242 * PPS wander exceeded or calibration error when frequency 243 * synchronization requested 244 */ 245 (tsl & STA_PPSFREQ && 246 tsl & (STA_PPSWANDER | STA_PPSERROR))) 247 return (true); 248 249 return (false); 250 } 251 252 static void 253 ntp_gettime1(struct ntptimeval *ntvp) 254 { 255 struct timespec atv; /* nanosecond time */ 256 257 NTP_ASSERT_LOCKED(); 258 259 nanotime(&atv); 260 ntvp->time.tv_sec = atv.tv_sec; 261 ntvp->time.tv_nsec = atv.tv_nsec; 262 ntvp->maxerror = time_maxerror; 263 ntvp->esterror = time_esterror; 264 ntvp->tai = time_tai; 265 ntvp->time_state = time_state; 266 267 if (ntp_is_time_error(time_status)) 268 ntvp->time_state = TIME_ERROR; 269 } 270 271 /* 272 * ntp_gettime() - NTP user application interface 273 * 274 * See the timex.h header file for synopsis and API description. Note that 275 * the TAI offset is returned in the ntvtimeval.tai structure member. 276 */ 277 #ifndef _SYS_SYSPROTO_H_ 278 struct ntp_gettime_args { 279 struct ntptimeval *ntvp; 280 }; 281 #endif 282 /* ARGSUSED */ 283 int 284 sys_ntp_gettime(struct thread *td, struct ntp_gettime_args *uap) 285 { 286 struct ntptimeval ntv; 287 288 memset(&ntv, 0, sizeof(ntv)); 289 290 NTP_LOCK(); 291 ntp_gettime1(&ntv); 292 NTP_UNLOCK(); 293 294 td->td_retval[0] = ntv.time_state; 295 return (copyout(&ntv, uap->ntvp, sizeof(ntv))); 296 } 297 298 static int 299 ntp_sysctl(SYSCTL_HANDLER_ARGS) 300 { 301 struct ntptimeval ntv; /* temporary structure */ 302 303 NTP_LOCK(); 304 ntp_gettime1(&ntv); 305 NTP_UNLOCK(); 306 307 return (sysctl_handle_opaque(oidp, &ntv, sizeof(ntv), req)); 308 } 309 310 SYSCTL_NODE(_kern, OID_AUTO, ntp_pll, CTLFLAG_RW, 0, ""); 311 SYSCTL_PROC(_kern_ntp_pll, OID_AUTO, gettime, CTLTYPE_OPAQUE | CTLFLAG_RD | 312 CTLFLAG_MPSAFE, 0, sizeof(struct ntptimeval) , ntp_sysctl, "S,ntptimeval", 313 ""); 314 315 #ifdef PPS_SYNC 316 SYSCTL_INT(_kern_ntp_pll, OID_AUTO, pps_shiftmax, CTLFLAG_RW, 317 &pps_shiftmax, 0, "Max interval duration (sec) (shift)"); 318 SYSCTL_INT(_kern_ntp_pll, OID_AUTO, pps_shift, CTLFLAG_RW, 319 &pps_shift, 0, "Interval duration (sec) (shift)"); 320 SYSCTL_LONG(_kern_ntp_pll, OID_AUTO, time_monitor, CTLFLAG_RD, 321 &time_monitor, 0, "Last time offset scaled (ns)"); 322 323 SYSCTL_S64(_kern_ntp_pll, OID_AUTO, pps_freq, CTLFLAG_RD | CTLFLAG_MPSAFE, 324 &pps_freq, 0, 325 "Scaled frequency offset (ns/sec)"); 326 SYSCTL_S64(_kern_ntp_pll, OID_AUTO, time_freq, CTLFLAG_RD | CTLFLAG_MPSAFE, 327 &time_freq, 0, 328 "Frequency offset (ns/sec)"); 329 #endif 330 331 /* 332 * ntp_adjtime() - NTP daemon application interface 333 * 334 * See the timex.h header file for synopsis and API description. Note that 335 * the timex.constant structure member has a dual purpose to set the time 336 * constant and to set the TAI offset. 337 */ 338 #ifndef _SYS_SYSPROTO_H_ 339 struct ntp_adjtime_args { 340 struct timex *tp; 341 }; 342 #endif 343 344 int 345 sys_ntp_adjtime(struct thread *td, struct ntp_adjtime_args *uap) 346 { 347 struct timex ntv; /* temporary structure */ 348 long freq; /* frequency ns/s) */ 349 int modes; /* mode bits from structure */ 350 int error, retval; 351 352 error = copyin((caddr_t)uap->tp, (caddr_t)&ntv, sizeof(ntv)); 353 if (error) 354 return (error); 355 356 /* 357 * Update selected clock variables - only the superuser can 358 * change anything. Note that there is no error checking here on 359 * the assumption the superuser should know what it is doing. 360 * Note that either the time constant or TAI offset are loaded 361 * from the ntv.constant member, depending on the mode bits. If 362 * the STA_PLL bit in the status word is cleared, the state and 363 * status words are reset to the initial values at boot. 364 */ 365 modes = ntv.modes; 366 if (modes) 367 error = priv_check(td, PRIV_NTP_ADJTIME); 368 if (error != 0) 369 return (error); 370 NTP_LOCK(); 371 if (modes & MOD_MAXERROR) 372 time_maxerror = ntv.maxerror; 373 if (modes & MOD_ESTERROR) 374 time_esterror = ntv.esterror; 375 if (modes & MOD_STATUS) { 376 if (time_status & STA_PLL && !(ntv.status & STA_PLL)) { 377 time_state = TIME_OK; 378 time_status = STA_UNSYNC; 379 #ifdef PPS_SYNC 380 pps_shift = PPS_FAVG; 381 #endif /* PPS_SYNC */ 382 } 383 time_status &= STA_RONLY; 384 time_status |= ntv.status & ~STA_RONLY; 385 } 386 if (modes & MOD_TIMECONST) { 387 if (ntv.constant < 0) 388 time_constant = 0; 389 else if (ntv.constant > MAXTC) 390 time_constant = MAXTC; 391 else 392 time_constant = ntv.constant; 393 } 394 if (modes & MOD_TAI) { 395 if (ntv.constant > 0) /* XXX zero & negative numbers ? */ 396 time_tai = ntv.constant; 397 } 398 #ifdef PPS_SYNC 399 if (modes & MOD_PPSMAX) { 400 if (ntv.shift < PPS_FAVG) 401 pps_shiftmax = PPS_FAVG; 402 else if (ntv.shift > PPS_FAVGMAX) 403 pps_shiftmax = PPS_FAVGMAX; 404 else 405 pps_shiftmax = ntv.shift; 406 } 407 #endif /* PPS_SYNC */ 408 if (modes & MOD_NANO) 409 time_status |= STA_NANO; 410 if (modes & MOD_MICRO) 411 time_status &= ~STA_NANO; 412 if (modes & MOD_CLKB) 413 time_status |= STA_CLK; 414 if (modes & MOD_CLKA) 415 time_status &= ~STA_CLK; 416 if (modes & MOD_FREQUENCY) { 417 freq = (ntv.freq * 1000LL) >> 16; 418 if (freq > MAXFREQ) 419 L_LINT(time_freq, MAXFREQ); 420 else if (freq < -MAXFREQ) 421 L_LINT(time_freq, -MAXFREQ); 422 else { 423 /* 424 * ntv.freq is [PPM * 2^16] = [us/s * 2^16] 425 * time_freq is [ns/s * 2^32] 426 */ 427 time_freq = ntv.freq * 1000LL * 65536LL; 428 } 429 #ifdef PPS_SYNC 430 pps_freq = time_freq; 431 #endif /* PPS_SYNC */ 432 } 433 if (modes & MOD_OFFSET) { 434 if (time_status & STA_NANO) 435 hardupdate(ntv.offset); 436 else 437 hardupdate(ntv.offset * 1000); 438 } 439 440 /* 441 * Retrieve all clock variables. Note that the TAI offset is 442 * returned only by ntp_gettime(); 443 */ 444 if (time_status & STA_NANO) 445 ntv.offset = L_GINT(time_offset); 446 else 447 ntv.offset = L_GINT(time_offset) / 1000; /* XXX rounding ? */ 448 ntv.freq = L_GINT((time_freq / 1000LL) << 16); 449 ntv.maxerror = time_maxerror; 450 ntv.esterror = time_esterror; 451 ntv.status = time_status; 452 ntv.constant = time_constant; 453 if (time_status & STA_NANO) 454 ntv.precision = time_precision; 455 else 456 ntv.precision = time_precision / 1000; 457 ntv.tolerance = MAXFREQ * SCALE_PPM; 458 #ifdef PPS_SYNC 459 ntv.shift = pps_shift; 460 ntv.ppsfreq = L_GINT((pps_freq / 1000LL) << 16); 461 if (time_status & STA_NANO) 462 ntv.jitter = pps_jitter; 463 else 464 ntv.jitter = pps_jitter / 1000; 465 ntv.stabil = pps_stabil; 466 ntv.calcnt = pps_calcnt; 467 ntv.errcnt = pps_errcnt; 468 ntv.jitcnt = pps_jitcnt; 469 ntv.stbcnt = pps_stbcnt; 470 #endif /* PPS_SYNC */ 471 retval = ntp_is_time_error(time_status) ? TIME_ERROR : time_state; 472 NTP_UNLOCK(); 473 474 error = copyout((caddr_t)&ntv, (caddr_t)uap->tp, sizeof(ntv)); 475 if (error == 0) 476 td->td_retval[0] = retval; 477 return (error); 478 } 479 480 /* 481 * second_overflow() - called after ntp_tick_adjust() 482 * 483 * This routine is ordinarily called immediately following the above 484 * routine ntp_tick_adjust(). While these two routines are normally 485 * combined, they are separated here only for the purposes of 486 * simulation. 487 */ 488 void 489 ntp_update_second(int64_t *adjustment, time_t *newsec) 490 { 491 int tickrate; 492 l_fp ftemp; /* 32/64-bit temporary */ 493 494 NTP_LOCK(); 495 496 /* 497 * On rollover of the second both the nanosecond and microsecond 498 * clocks are updated and the state machine cranked as 499 * necessary. The phase adjustment to be used for the next 500 * second is calculated and the maximum error is increased by 501 * the tolerance. 502 */ 503 time_maxerror += MAXFREQ / 1000; 504 505 /* 506 * Leap second processing. If in leap-insert state at 507 * the end of the day, the system clock is set back one 508 * second; if in leap-delete state, the system clock is 509 * set ahead one second. The nano_time() routine or 510 * external clock driver will insure that reported time 511 * is always monotonic. 512 */ 513 switch (time_state) { 514 515 /* 516 * No warning. 517 */ 518 case TIME_OK: 519 if (time_status & STA_INS) 520 time_state = TIME_INS; 521 else if (time_status & STA_DEL) 522 time_state = TIME_DEL; 523 break; 524 525 /* 526 * Insert second 23:59:60 following second 527 * 23:59:59. 528 */ 529 case TIME_INS: 530 if (!(time_status & STA_INS)) 531 time_state = TIME_OK; 532 else if ((*newsec) % 86400 == 0) { 533 (*newsec)--; 534 time_state = TIME_OOP; 535 time_tai++; 536 } 537 break; 538 539 /* 540 * Delete second 23:59:59. 541 */ 542 case TIME_DEL: 543 if (!(time_status & STA_DEL)) 544 time_state = TIME_OK; 545 else if (((*newsec) + 1) % 86400 == 0) { 546 (*newsec)++; 547 time_tai--; 548 time_state = TIME_WAIT; 549 } 550 break; 551 552 /* 553 * Insert second in progress. 554 */ 555 case TIME_OOP: 556 time_state = TIME_WAIT; 557 break; 558 559 /* 560 * Wait for status bits to clear. 561 */ 562 case TIME_WAIT: 563 if (!(time_status & (STA_INS | STA_DEL))) 564 time_state = TIME_OK; 565 } 566 567 /* 568 * Compute the total time adjustment for the next second 569 * in ns. The offset is reduced by a factor depending on 570 * whether the PPS signal is operating. Note that the 571 * value is in effect scaled by the clock frequency, 572 * since the adjustment is added at each tick interrupt. 573 */ 574 ftemp = time_offset; 575 #ifdef PPS_SYNC 576 /* XXX even if PPS signal dies we should finish adjustment ? */ 577 if (time_status & STA_PPSTIME && time_status & 578 STA_PPSSIGNAL) 579 L_RSHIFT(ftemp, pps_shift); 580 else 581 L_RSHIFT(ftemp, SHIFT_PLL + time_constant); 582 #else 583 L_RSHIFT(ftemp, SHIFT_PLL + time_constant); 584 #endif /* PPS_SYNC */ 585 time_adj = ftemp; 586 L_SUB(time_offset, ftemp); 587 L_ADD(time_adj, time_freq); 588 589 /* 590 * Apply any correction from adjtime(2). If more than one second 591 * off we slew at a rate of 5ms/s (5000 PPM) else 500us/s (500PPM) 592 * until the last second is slewed the final < 500 usecs. 593 */ 594 if (time_adjtime != 0) { 595 if (time_adjtime > 1000000) 596 tickrate = 5000; 597 else if (time_adjtime < -1000000) 598 tickrate = -5000; 599 else if (time_adjtime > 500) 600 tickrate = 500; 601 else if (time_adjtime < -500) 602 tickrate = -500; 603 else 604 tickrate = time_adjtime; 605 time_adjtime -= tickrate; 606 L_LINT(ftemp, tickrate * 1000); 607 L_ADD(time_adj, ftemp); 608 } 609 *adjustment = time_adj; 610 611 #ifdef PPS_SYNC 612 if (pps_valid > 0) 613 pps_valid--; 614 else 615 time_status &= ~STA_PPSSIGNAL; 616 #endif /* PPS_SYNC */ 617 618 NTP_UNLOCK(); 619 } 620 621 /* 622 * ntp_init() - initialize variables and structures 623 * 624 * This routine must be called after the kernel variables hz and tick 625 * are set or changed and before the next tick interrupt. In this 626 * particular implementation, these values are assumed set elsewhere in 627 * the kernel. The design allows the clock frequency and tick interval 628 * to be changed while the system is running. So, this routine should 629 * probably be integrated with the code that does that. 630 */ 631 static void 632 ntp_init(void) 633 { 634 635 /* 636 * The following variables are initialized only at startup. Only 637 * those structures not cleared by the compiler need to be 638 * initialized, and these only in the simulator. In the actual 639 * kernel, any nonzero values here will quickly evaporate. 640 */ 641 L_CLR(time_offset); 642 L_CLR(time_freq); 643 #ifdef PPS_SYNC 644 pps_tf[0].tv_sec = pps_tf[0].tv_nsec = 0; 645 pps_tf[1].tv_sec = pps_tf[1].tv_nsec = 0; 646 pps_tf[2].tv_sec = pps_tf[2].tv_nsec = 0; 647 pps_fcount = 0; 648 L_CLR(pps_freq); 649 #endif /* PPS_SYNC */ 650 } 651 652 SYSINIT(ntpclocks, SI_SUB_CLOCKS, SI_ORDER_MIDDLE, ntp_init, NULL); 653 654 /* 655 * hardupdate() - local clock update 656 * 657 * This routine is called by ntp_adjtime() to update the local clock 658 * phase and frequency. The implementation is of an adaptive-parameter, 659 * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new 660 * time and frequency offset estimates for each call. If the kernel PPS 661 * discipline code is configured (PPS_SYNC), the PPS signal itself 662 * determines the new time offset, instead of the calling argument. 663 * Presumably, calls to ntp_adjtime() occur only when the caller 664 * believes the local clock is valid within some bound (+-128 ms with 665 * NTP). If the caller's time is far different than the PPS time, an 666 * argument will ensue, and it's not clear who will lose. 667 * 668 * For uncompensated quartz crystal oscillators and nominal update 669 * intervals less than 256 s, operation should be in phase-lock mode, 670 * where the loop is disciplined to phase. For update intervals greater 671 * than 1024 s, operation should be in frequency-lock mode, where the 672 * loop is disciplined to frequency. Between 256 s and 1024 s, the mode 673 * is selected by the STA_MODE status bit. 674 */ 675 static void 676 hardupdate(offset) 677 long offset; /* clock offset (ns) */ 678 { 679 long mtemp; 680 l_fp ftemp; 681 682 NTP_ASSERT_LOCKED(); 683 684 /* 685 * Select how the phase is to be controlled and from which 686 * source. If the PPS signal is present and enabled to 687 * discipline the time, the PPS offset is used; otherwise, the 688 * argument offset is used. 689 */ 690 if (!(time_status & STA_PLL)) 691 return; 692 if (!(time_status & STA_PPSTIME && time_status & 693 STA_PPSSIGNAL)) { 694 if (offset > MAXPHASE) 695 time_monitor = MAXPHASE; 696 else if (offset < -MAXPHASE) 697 time_monitor = -MAXPHASE; 698 else 699 time_monitor = offset; 700 L_LINT(time_offset, time_monitor); 701 } 702 703 /* 704 * Select how the frequency is to be controlled and in which 705 * mode (PLL or FLL). If the PPS signal is present and enabled 706 * to discipline the frequency, the PPS frequency is used; 707 * otherwise, the argument offset is used to compute it. 708 */ 709 if (time_status & STA_PPSFREQ && time_status & STA_PPSSIGNAL) { 710 time_reftime = time_uptime; 711 return; 712 } 713 if (time_status & STA_FREQHOLD || time_reftime == 0) 714 time_reftime = time_uptime; 715 mtemp = time_uptime - time_reftime; 716 L_LINT(ftemp, time_monitor); 717 L_RSHIFT(ftemp, (SHIFT_PLL + 2 + time_constant) << 1); 718 L_MPY(ftemp, mtemp); 719 L_ADD(time_freq, ftemp); 720 time_status &= ~STA_MODE; 721 if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > 722 MAXSEC)) { 723 L_LINT(ftemp, (time_monitor << 4) / mtemp); 724 L_RSHIFT(ftemp, SHIFT_FLL + 4); 725 L_ADD(time_freq, ftemp); 726 time_status |= STA_MODE; 727 } 728 time_reftime = time_uptime; 729 if (L_GINT(time_freq) > MAXFREQ) 730 L_LINT(time_freq, MAXFREQ); 731 else if (L_GINT(time_freq) < -MAXFREQ) 732 L_LINT(time_freq, -MAXFREQ); 733 } 734 735 #ifdef PPS_SYNC 736 /* 737 * hardpps() - discipline CPU clock oscillator to external PPS signal 738 * 739 * This routine is called at each PPS interrupt in order to discipline 740 * the CPU clock oscillator to the PPS signal. There are two independent 741 * first-order feedback loops, one for the phase, the other for the 742 * frequency. The phase loop measures and grooms the PPS phase offset 743 * and leaves it in a handy spot for the seconds overflow routine. The 744 * frequency loop averages successive PPS phase differences and 745 * calculates the PPS frequency offset, which is also processed by the 746 * seconds overflow routine. The code requires the caller to capture the 747 * time and architecture-dependent hardware counter values in 748 * nanoseconds at the on-time PPS signal transition. 749 * 750 * Note that, on some Unix systems this routine runs at an interrupt 751 * priority level higher than the timer interrupt routine hardclock(). 752 * Therefore, the variables used are distinct from the hardclock() 753 * variables, except for the actual time and frequency variables, which 754 * are determined by this routine and updated atomically. 755 * 756 * tsp - time at PPS 757 * nsec - hardware counter at PPS 758 */ 759 void 760 hardpps(struct timespec *tsp, long nsec) 761 { 762 long u_sec, u_nsec, v_nsec; /* temps */ 763 l_fp ftemp; 764 765 NTP_LOCK(); 766 767 /* 768 * The signal is first processed by a range gate and frequency 769 * discriminator. The range gate rejects noise spikes outside 770 * the range +-500 us. The frequency discriminator rejects input 771 * signals with apparent frequency outside the range 1 +-500 772 * PPM. If two hits occur in the same second, we ignore the 773 * later hit; if not and a hit occurs outside the range gate, 774 * keep the later hit for later comparison, but do not process 775 * it. 776 */ 777 time_status |= STA_PPSSIGNAL | STA_PPSJITTER; 778 time_status &= ~(STA_PPSWANDER | STA_PPSERROR); 779 pps_valid = PPS_VALID; 780 u_sec = tsp->tv_sec; 781 u_nsec = tsp->tv_nsec; 782 if (u_nsec >= (NANOSECOND >> 1)) { 783 u_nsec -= NANOSECOND; 784 u_sec++; 785 } 786 v_nsec = u_nsec - pps_tf[0].tv_nsec; 787 if (u_sec == pps_tf[0].tv_sec && v_nsec < NANOSECOND - MAXFREQ) 788 goto out; 789 pps_tf[2] = pps_tf[1]; 790 pps_tf[1] = pps_tf[0]; 791 pps_tf[0].tv_sec = u_sec; 792 pps_tf[0].tv_nsec = u_nsec; 793 794 /* 795 * Compute the difference between the current and previous 796 * counter values. If the difference exceeds 0.5 s, assume it 797 * has wrapped around, so correct 1.0 s. If the result exceeds 798 * the tick interval, the sample point has crossed a tick 799 * boundary during the last second, so correct the tick. Very 800 * intricate. 801 */ 802 u_nsec = nsec; 803 if (u_nsec > (NANOSECOND >> 1)) 804 u_nsec -= NANOSECOND; 805 else if (u_nsec < -(NANOSECOND >> 1)) 806 u_nsec += NANOSECOND; 807 pps_fcount += u_nsec; 808 if (v_nsec > MAXFREQ || v_nsec < -MAXFREQ) 809 goto out; 810 time_status &= ~STA_PPSJITTER; 811 812 /* 813 * A three-stage median filter is used to help denoise the PPS 814 * time. The median sample becomes the time offset estimate; the 815 * difference between the other two samples becomes the time 816 * dispersion (jitter) estimate. 817 */ 818 if (pps_tf[0].tv_nsec > pps_tf[1].tv_nsec) { 819 if (pps_tf[1].tv_nsec > pps_tf[2].tv_nsec) { 820 v_nsec = pps_tf[1].tv_nsec; /* 0 1 2 */ 821 u_nsec = pps_tf[0].tv_nsec - pps_tf[2].tv_nsec; 822 } else if (pps_tf[2].tv_nsec > pps_tf[0].tv_nsec) { 823 v_nsec = pps_tf[0].tv_nsec; /* 2 0 1 */ 824 u_nsec = pps_tf[2].tv_nsec - pps_tf[1].tv_nsec; 825 } else { 826 v_nsec = pps_tf[2].tv_nsec; /* 0 2 1 */ 827 u_nsec = pps_tf[0].tv_nsec - pps_tf[1].tv_nsec; 828 } 829 } else { 830 if (pps_tf[1].tv_nsec < pps_tf[2].tv_nsec) { 831 v_nsec = pps_tf[1].tv_nsec; /* 2 1 0 */ 832 u_nsec = pps_tf[2].tv_nsec - pps_tf[0].tv_nsec; 833 } else if (pps_tf[2].tv_nsec < pps_tf[0].tv_nsec) { 834 v_nsec = pps_tf[0].tv_nsec; /* 1 0 2 */ 835 u_nsec = pps_tf[1].tv_nsec - pps_tf[2].tv_nsec; 836 } else { 837 v_nsec = pps_tf[2].tv_nsec; /* 1 2 0 */ 838 u_nsec = pps_tf[1].tv_nsec - pps_tf[0].tv_nsec; 839 } 840 } 841 842 /* 843 * Nominal jitter is due to PPS signal noise and interrupt 844 * latency. If it exceeds the popcorn threshold, the sample is 845 * discarded. otherwise, if so enabled, the time offset is 846 * updated. We can tolerate a modest loss of data here without 847 * much degrading time accuracy. 848 * 849 * The measurements being checked here were made with the system 850 * timecounter, so the popcorn threshold is not allowed to fall below 851 * the number of nanoseconds in two ticks of the timecounter. For a 852 * timecounter running faster than 1 GHz the lower bound is 2ns, just 853 * to avoid a nonsensical threshold of zero. 854 */ 855 if (u_nsec > lmax(pps_jitter << PPS_POPCORN, 856 2 * (NANOSECOND / (long)qmin(NANOSECOND, tc_getfrequency())))) { 857 time_status |= STA_PPSJITTER; 858 pps_jitcnt++; 859 } else if (time_status & STA_PPSTIME) { 860 time_monitor = -v_nsec; 861 L_LINT(time_offset, time_monitor); 862 } 863 pps_jitter += (u_nsec - pps_jitter) >> PPS_FAVG; 864 u_sec = pps_tf[0].tv_sec - pps_lastsec; 865 if (u_sec < (1 << pps_shift)) 866 goto out; 867 868 /* 869 * At the end of the calibration interval the difference between 870 * the first and last counter values becomes the scaled 871 * frequency. It will later be divided by the length of the 872 * interval to determine the frequency update. If the frequency 873 * exceeds a sanity threshold, or if the actual calibration 874 * interval is not equal to the expected length, the data are 875 * discarded. We can tolerate a modest loss of data here without 876 * much degrading frequency accuracy. 877 */ 878 pps_calcnt++; 879 v_nsec = -pps_fcount; 880 pps_lastsec = pps_tf[0].tv_sec; 881 pps_fcount = 0; 882 u_nsec = MAXFREQ << pps_shift; 883 if (v_nsec > u_nsec || v_nsec < -u_nsec || u_sec != (1 << pps_shift)) { 884 time_status |= STA_PPSERROR; 885 pps_errcnt++; 886 goto out; 887 } 888 889 /* 890 * Here the raw frequency offset and wander (stability) is 891 * calculated. If the wander is less than the wander threshold 892 * for four consecutive averaging intervals, the interval is 893 * doubled; if it is greater than the threshold for four 894 * consecutive intervals, the interval is halved. The scaled 895 * frequency offset is converted to frequency offset. The 896 * stability metric is calculated as the average of recent 897 * frequency changes, but is used only for performance 898 * monitoring. 899 */ 900 L_LINT(ftemp, v_nsec); 901 L_RSHIFT(ftemp, pps_shift); 902 L_SUB(ftemp, pps_freq); 903 u_nsec = L_GINT(ftemp); 904 if (u_nsec > PPS_MAXWANDER) { 905 L_LINT(ftemp, PPS_MAXWANDER); 906 pps_intcnt--; 907 time_status |= STA_PPSWANDER; 908 pps_stbcnt++; 909 } else if (u_nsec < -PPS_MAXWANDER) { 910 L_LINT(ftemp, -PPS_MAXWANDER); 911 pps_intcnt--; 912 time_status |= STA_PPSWANDER; 913 pps_stbcnt++; 914 } else { 915 pps_intcnt++; 916 } 917 if (pps_intcnt >= 4) { 918 pps_intcnt = 4; 919 if (pps_shift < pps_shiftmax) { 920 pps_shift++; 921 pps_intcnt = 0; 922 } 923 } else if (pps_intcnt <= -4 || pps_shift > pps_shiftmax) { 924 pps_intcnt = -4; 925 if (pps_shift > PPS_FAVG) { 926 pps_shift--; 927 pps_intcnt = 0; 928 } 929 } 930 if (u_nsec < 0) 931 u_nsec = -u_nsec; 932 pps_stabil += (u_nsec * SCALE_PPM - pps_stabil) >> PPS_FAVG; 933 934 /* 935 * The PPS frequency is recalculated and clamped to the maximum 936 * MAXFREQ. If enabled, the system clock frequency is updated as 937 * well. 938 */ 939 L_ADD(pps_freq, ftemp); 940 u_nsec = L_GINT(pps_freq); 941 if (u_nsec > MAXFREQ) 942 L_LINT(pps_freq, MAXFREQ); 943 else if (u_nsec < -MAXFREQ) 944 L_LINT(pps_freq, -MAXFREQ); 945 if (time_status & STA_PPSFREQ) 946 time_freq = pps_freq; 947 948 out: 949 NTP_UNLOCK(); 950 } 951 #endif /* PPS_SYNC */ 952 953 #ifndef _SYS_SYSPROTO_H_ 954 struct adjtime_args { 955 struct timeval *delta; 956 struct timeval *olddelta; 957 }; 958 #endif 959 /* ARGSUSED */ 960 int 961 sys_adjtime(struct thread *td, struct adjtime_args *uap) 962 { 963 struct timeval delta, olddelta, *deltap; 964 int error; 965 966 if (uap->delta) { 967 error = copyin(uap->delta, &delta, sizeof(delta)); 968 if (error) 969 return (error); 970 deltap = δ 971 } else 972 deltap = NULL; 973 error = kern_adjtime(td, deltap, &olddelta); 974 if (uap->olddelta && error == 0) 975 error = copyout(&olddelta, uap->olddelta, sizeof(olddelta)); 976 return (error); 977 } 978 979 int 980 kern_adjtime(struct thread *td, struct timeval *delta, struct timeval *olddelta) 981 { 982 struct timeval atv; 983 int64_t ltr, ltw; 984 int error; 985 986 if (delta != NULL) { 987 error = priv_check(td, PRIV_ADJTIME); 988 if (error != 0) 989 return (error); 990 ltw = (int64_t)delta->tv_sec * 1000000 + delta->tv_usec; 991 } 992 NTP_LOCK(); 993 ltr = time_adjtime; 994 if (delta != NULL) 995 time_adjtime = ltw; 996 NTP_UNLOCK(); 997 if (olddelta != NULL) { 998 atv.tv_sec = ltr / 1000000; 999 atv.tv_usec = ltr % 1000000; 1000 if (atv.tv_usec < 0) { 1001 atv.tv_usec += 1000000; 1002 atv.tv_sec--; 1003 } 1004 *olddelta = atv; 1005 } 1006 return (0); 1007 } 1008 1009 static struct callout resettodr_callout; 1010 static int resettodr_period = 1800; 1011 1012 static void 1013 periodic_resettodr(void *arg __unused) 1014 { 1015 1016 /* 1017 * Read of time_status is lock-less, which is fine since 1018 * ntp_is_time_error() operates on the consistent read value. 1019 */ 1020 if (!ntp_is_time_error(time_status)) 1021 resettodr(); 1022 if (resettodr_period > 0) 1023 callout_schedule(&resettodr_callout, resettodr_period * hz); 1024 } 1025 1026 static void 1027 shutdown_resettodr(void *arg __unused, int howto __unused) 1028 { 1029 1030 callout_drain(&resettodr_callout); 1031 /* Another unlocked read of time_status */ 1032 if (resettodr_period > 0 && !ntp_is_time_error(time_status)) 1033 resettodr(); 1034 } 1035 1036 static int 1037 sysctl_resettodr_period(SYSCTL_HANDLER_ARGS) 1038 { 1039 int error; 1040 1041 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req); 1042 if (error || !req->newptr) 1043 return (error); 1044 if (cold) 1045 goto done; 1046 if (resettodr_period == 0) 1047 callout_stop(&resettodr_callout); 1048 else 1049 callout_reset(&resettodr_callout, resettodr_period * hz, 1050 periodic_resettodr, NULL); 1051 done: 1052 return (0); 1053 } 1054 1055 SYSCTL_PROC(_machdep, OID_AUTO, rtc_save_period, CTLTYPE_INT | CTLFLAG_RWTUN | 1056 CTLFLAG_MPSAFE, &resettodr_period, 1800, sysctl_resettodr_period, "I", 1057 "Save system time to RTC with this period (in seconds)"); 1058 1059 static void 1060 start_periodic_resettodr(void *arg __unused) 1061 { 1062 1063 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_resettodr, NULL, 1064 SHUTDOWN_PRI_FIRST); 1065 callout_init(&resettodr_callout, 1); 1066 if (resettodr_period == 0) 1067 return; 1068 callout_reset(&resettodr_callout, resettodr_period * hz, 1069 periodic_resettodr, NULL); 1070 } 1071 1072 SYSINIT(periodic_resettodr, SI_SUB_LAST, SI_ORDER_MIDDLE, 1073 start_periodic_resettodr, NULL); 1074