1 /*- 2 *********************************************************************** 3 * * 4 * Copyright (c) David L. Mills 1993-2001 * 5 * * 6 * Permission to use, copy, modify, and distribute this software and * 7 * its documentation for any purpose and without fee is hereby * 8 * granted, provided that the above copyright notice appears in all * 9 * copies and that both the copyright notice and this permission * 10 * notice appear in supporting documentation, and that the name * 11 * University of Delaware not be used in advertising or publicity * 12 * pertaining to distribution of the software without specific, * 13 * written prior permission. The University of Delaware makes no * 14 * representations about the suitability this software for any * 15 * purpose. It is provided "as is" without express or implied * 16 * warranty. * 17 * * 18 **********************************************************************/ 19 20 /* 21 * Adapted from the original sources for FreeBSD and timecounters by: 22 * Poul-Henning Kamp <phk@FreeBSD.org>. 23 * 24 * The 32bit version of the "LP" macros seems a bit past its "sell by" 25 * date so I have retained only the 64bit version and included it directly 26 * in this file. 27 * 28 * Only minor changes done to interface with the timecounters over in 29 * sys/kern/kern_clock.c. Some of the comments below may be (even more) 30 * confusing and/or plain wrong in that context. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "opt_ntp.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/sysproto.h> 41 #include <sys/eventhandler.h> 42 #include <sys/kernel.h> 43 #include <sys/priv.h> 44 #include <sys/proc.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/time.h> 48 #include <sys/timex.h> 49 #include <sys/timetc.h> 50 #include <sys/timepps.h> 51 #include <sys/syscallsubr.h> 52 #include <sys/sysctl.h> 53 54 #ifdef PPS_SYNC 55 FEATURE(pps_sync, "Support usage of external PPS signal by kernel PLL"); 56 #endif 57 58 /* 59 * Single-precision macros for 64-bit machines 60 */ 61 typedef int64_t l_fp; 62 #define L_ADD(v, u) ((v) += (u)) 63 #define L_SUB(v, u) ((v) -= (u)) 64 #define L_ADDHI(v, a) ((v) += (int64_t)(a) << 32) 65 #define L_NEG(v) ((v) = -(v)) 66 #define L_RSHIFT(v, n) \ 67 do { \ 68 if ((v) < 0) \ 69 (v) = -(-(v) >> (n)); \ 70 else \ 71 (v) = (v) >> (n); \ 72 } while (0) 73 #define L_MPY(v, a) ((v) *= (a)) 74 #define L_CLR(v) ((v) = 0) 75 #define L_ISNEG(v) ((v) < 0) 76 #define L_LINT(v, a) ((v) = (int64_t)(a) << 32) 77 #define L_GINT(v) ((v) < 0 ? -(-(v) >> 32) : (v) >> 32) 78 79 /* 80 * Generic NTP kernel interface 81 * 82 * These routines constitute the Network Time Protocol (NTP) interfaces 83 * for user and daemon application programs. The ntp_gettime() routine 84 * provides the time, maximum error (synch distance) and estimated error 85 * (dispersion) to client user application programs. The ntp_adjtime() 86 * routine is used by the NTP daemon to adjust the system clock to an 87 * externally derived time. The time offset and related variables set by 88 * this routine are used by other routines in this module to adjust the 89 * phase and frequency of the clock discipline loop which controls the 90 * system clock. 91 * 92 * When the kernel time is reckoned directly in nanoseconds (NTP_NANO 93 * defined), the time at each tick interrupt is derived directly from 94 * the kernel time variable. When the kernel time is reckoned in 95 * microseconds, (NTP_NANO undefined), the time is derived from the 96 * kernel time variable together with a variable representing the 97 * leftover nanoseconds at the last tick interrupt. In either case, the 98 * current nanosecond time is reckoned from these values plus an 99 * interpolated value derived by the clock routines in another 100 * architecture-specific module. The interpolation can use either a 101 * dedicated counter or a processor cycle counter (PCC) implemented in 102 * some architectures. 103 * 104 * Note that all routines must run at priority splclock or higher. 105 */ 106 /* 107 * Phase/frequency-lock loop (PLL/FLL) definitions 108 * 109 * The nanosecond clock discipline uses two variable types, time 110 * variables and frequency variables. Both types are represented as 64- 111 * bit fixed-point quantities with the decimal point between two 32-bit 112 * halves. On a 32-bit machine, each half is represented as a single 113 * word and mathematical operations are done using multiple-precision 114 * arithmetic. On a 64-bit machine, ordinary computer arithmetic is 115 * used. 116 * 117 * A time variable is a signed 64-bit fixed-point number in ns and 118 * fraction. It represents the remaining time offset to be amortized 119 * over succeeding tick interrupts. The maximum time offset is about 120 * 0.5 s and the resolution is about 2.3e-10 ns. 121 * 122 * 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 123 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 124 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 125 * |s s s| ns | 126 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 127 * | fraction | 128 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 129 * 130 * A frequency variable is a signed 64-bit fixed-point number in ns/s 131 * and fraction. It represents the ns and fraction to be added to the 132 * kernel time variable at each second. The maximum frequency offset is 133 * about +-500000 ns/s and the resolution is about 2.3e-10 ns/s. 134 * 135 * 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 136 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 137 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 138 * |s s s s s s s s s s s s s| ns/s | 139 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 140 * | fraction | 141 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 142 */ 143 /* 144 * The following variables establish the state of the PLL/FLL and the 145 * residual time and frequency offset of the local clock. 146 */ 147 #define SHIFT_PLL 4 /* PLL loop gain (shift) */ 148 #define SHIFT_FLL 2 /* FLL loop gain (shift) */ 149 150 static int time_state = TIME_OK; /* clock state */ 151 int time_status = STA_UNSYNC; /* clock status bits */ 152 static long time_tai; /* TAI offset (s) */ 153 static long time_monitor; /* last time offset scaled (ns) */ 154 static long time_constant; /* poll interval (shift) (s) */ 155 static long time_precision = 1; /* clock precision (ns) */ 156 static long time_maxerror = MAXPHASE / 1000; /* maximum error (us) */ 157 long time_esterror = MAXPHASE / 1000; /* estimated error (us) */ 158 static long time_reftime; /* uptime at last adjustment (s) */ 159 static l_fp time_offset; /* time offset (ns) */ 160 static l_fp time_freq; /* frequency offset (ns/s) */ 161 static l_fp time_adj; /* tick adjust (ns/s) */ 162 163 static int64_t time_adjtime; /* correction from adjtime(2) (usec) */ 164 165 static struct mtx ntp_lock; 166 MTX_SYSINIT(ntp, &ntp_lock, "ntp", MTX_SPIN); 167 168 #define NTP_LOCK() mtx_lock_spin(&ntp_lock) 169 #define NTP_UNLOCK() mtx_unlock_spin(&ntp_lock) 170 #define NTP_ASSERT_LOCKED() mtx_assert(&ntp_lock, MA_OWNED) 171 172 #ifdef PPS_SYNC 173 /* 174 * The following variables are used when a pulse-per-second (PPS) signal 175 * is available and connected via a modem control lead. They establish 176 * the engineering parameters of the clock discipline loop when 177 * controlled by the PPS signal. 178 */ 179 #define PPS_FAVG 2 /* min freq avg interval (s) (shift) */ 180 #define PPS_FAVGDEF 8 /* default freq avg int (s) (shift) */ 181 #define PPS_FAVGMAX 15 /* max freq avg interval (s) (shift) */ 182 #define PPS_PAVG 4 /* phase avg interval (s) (shift) */ 183 #define PPS_VALID 120 /* PPS signal watchdog max (s) */ 184 #define PPS_MAXWANDER 100000 /* max PPS wander (ns/s) */ 185 #define PPS_POPCORN 2 /* popcorn spike threshold (shift) */ 186 187 static struct timespec pps_tf[3]; /* phase median filter */ 188 static l_fp pps_freq; /* scaled frequency offset (ns/s) */ 189 static long pps_fcount; /* frequency accumulator */ 190 static long pps_jitter; /* nominal jitter (ns) */ 191 static long pps_stabil; /* nominal stability (scaled ns/s) */ 192 static long pps_lastsec; /* time at last calibration (s) */ 193 static int pps_valid; /* signal watchdog counter */ 194 static int pps_shift = PPS_FAVG; /* interval duration (s) (shift) */ 195 static int pps_shiftmax = PPS_FAVGDEF; /* max interval duration (s) (shift) */ 196 static int pps_intcnt; /* wander counter */ 197 198 /* 199 * PPS signal quality monitors 200 */ 201 static long pps_calcnt; /* calibration intervals */ 202 static long pps_jitcnt; /* jitter limit exceeded */ 203 static long pps_stbcnt; /* stability limit exceeded */ 204 static long pps_errcnt; /* calibration errors */ 205 #endif /* PPS_SYNC */ 206 /* 207 * End of phase/frequency-lock loop (PLL/FLL) definitions 208 */ 209 210 static void ntp_init(void); 211 static void hardupdate(long offset); 212 static void ntp_gettime1(struct ntptimeval *ntvp); 213 static bool ntp_is_time_error(int tsl); 214 215 static bool 216 ntp_is_time_error(int tsl) 217 { 218 219 /* 220 * Status word error decode. If any of these conditions occur, 221 * an error is returned, instead of the status word. Most 222 * applications will care only about the fact the system clock 223 * may not be trusted, not about the details. 224 * 225 * Hardware or software error 226 */ 227 if ((tsl & (STA_UNSYNC | STA_CLOCKERR)) || 228 229 /* 230 * PPS signal lost when either time or frequency synchronization 231 * requested 232 */ 233 (tsl & (STA_PPSFREQ | STA_PPSTIME) && 234 !(tsl & STA_PPSSIGNAL)) || 235 236 /* 237 * PPS jitter exceeded when time synchronization requested 238 */ 239 (tsl & STA_PPSTIME && tsl & STA_PPSJITTER) || 240 241 /* 242 * PPS wander exceeded or calibration error when frequency 243 * synchronization requested 244 */ 245 (tsl & STA_PPSFREQ && 246 tsl & (STA_PPSWANDER | STA_PPSERROR))) 247 return (true); 248 249 return (false); 250 } 251 252 static void 253 ntp_gettime1(struct ntptimeval *ntvp) 254 { 255 struct timespec atv; /* nanosecond time */ 256 257 NTP_ASSERT_LOCKED(); 258 259 nanotime(&atv); 260 ntvp->time.tv_sec = atv.tv_sec; 261 ntvp->time.tv_nsec = atv.tv_nsec; 262 ntvp->maxerror = time_maxerror; 263 ntvp->esterror = time_esterror; 264 ntvp->tai = time_tai; 265 ntvp->time_state = time_state; 266 267 if (ntp_is_time_error(time_status)) 268 ntvp->time_state = TIME_ERROR; 269 } 270 271 /* 272 * ntp_gettime() - NTP user application interface 273 * 274 * See the timex.h header file for synopsis and API description. Note that 275 * the TAI offset is returned in the ntvtimeval.tai structure member. 276 */ 277 #ifndef _SYS_SYSPROTO_H_ 278 struct ntp_gettime_args { 279 struct ntptimeval *ntvp; 280 }; 281 #endif 282 /* ARGSUSED */ 283 int 284 sys_ntp_gettime(struct thread *td, struct ntp_gettime_args *uap) 285 { 286 struct ntptimeval ntv; 287 288 NTP_LOCK(); 289 ntp_gettime1(&ntv); 290 NTP_UNLOCK(); 291 292 td->td_retval[0] = ntv.time_state; 293 return (copyout(&ntv, uap->ntvp, sizeof(ntv))); 294 } 295 296 static int 297 ntp_sysctl(SYSCTL_HANDLER_ARGS) 298 { 299 struct ntptimeval ntv; /* temporary structure */ 300 301 NTP_LOCK(); 302 ntp_gettime1(&ntv); 303 NTP_UNLOCK(); 304 305 return (sysctl_handle_opaque(oidp, &ntv, sizeof(ntv), req)); 306 } 307 308 SYSCTL_NODE(_kern, OID_AUTO, ntp_pll, CTLFLAG_RW, 0, ""); 309 SYSCTL_PROC(_kern_ntp_pll, OID_AUTO, gettime, CTLTYPE_OPAQUE | CTLFLAG_RD | 310 CTLFLAG_MPSAFE, 0, sizeof(struct ntptimeval) , ntp_sysctl, "S,ntptimeval", 311 ""); 312 313 #ifdef PPS_SYNC 314 SYSCTL_INT(_kern_ntp_pll, OID_AUTO, pps_shiftmax, CTLFLAG_RW, 315 &pps_shiftmax, 0, "Max interval duration (sec) (shift)"); 316 SYSCTL_INT(_kern_ntp_pll, OID_AUTO, pps_shift, CTLFLAG_RW, 317 &pps_shift, 0, "Interval duration (sec) (shift)"); 318 SYSCTL_LONG(_kern_ntp_pll, OID_AUTO, time_monitor, CTLFLAG_RD, 319 &time_monitor, 0, "Last time offset scaled (ns)"); 320 321 SYSCTL_S64(_kern_ntp_pll, OID_AUTO, pps_freq, CTLFLAG_RD | CTLFLAG_MPSAFE, 322 &pps_freq, 0, 323 "Scaled frequency offset (ns/sec)"); 324 SYSCTL_S64(_kern_ntp_pll, OID_AUTO, time_freq, CTLFLAG_RD | CTLFLAG_MPSAFE, 325 &time_freq, 0, 326 "Frequency offset (ns/sec)"); 327 #endif 328 329 /* 330 * ntp_adjtime() - NTP daemon application interface 331 * 332 * See the timex.h header file for synopsis and API description. Note that 333 * the timex.constant structure member has a dual purpose to set the time 334 * constant and to set the TAI offset. 335 */ 336 #ifndef _SYS_SYSPROTO_H_ 337 struct ntp_adjtime_args { 338 struct timex *tp; 339 }; 340 #endif 341 342 int 343 sys_ntp_adjtime(struct thread *td, struct ntp_adjtime_args *uap) 344 { 345 struct timex ntv; /* temporary structure */ 346 long freq; /* frequency ns/s) */ 347 int modes; /* mode bits from structure */ 348 int error, retval; 349 350 error = copyin((caddr_t)uap->tp, (caddr_t)&ntv, sizeof(ntv)); 351 if (error) 352 return (error); 353 354 /* 355 * Update selected clock variables - only the superuser can 356 * change anything. Note that there is no error checking here on 357 * the assumption the superuser should know what it is doing. 358 * Note that either the time constant or TAI offset are loaded 359 * from the ntv.constant member, depending on the mode bits. If 360 * the STA_PLL bit in the status word is cleared, the state and 361 * status words are reset to the initial values at boot. 362 */ 363 modes = ntv.modes; 364 if (modes) 365 error = priv_check(td, PRIV_NTP_ADJTIME); 366 if (error != 0) 367 return (error); 368 NTP_LOCK(); 369 if (modes & MOD_MAXERROR) 370 time_maxerror = ntv.maxerror; 371 if (modes & MOD_ESTERROR) 372 time_esterror = ntv.esterror; 373 if (modes & MOD_STATUS) { 374 if (time_status & STA_PLL && !(ntv.status & STA_PLL)) { 375 time_state = TIME_OK; 376 time_status = STA_UNSYNC; 377 #ifdef PPS_SYNC 378 pps_shift = PPS_FAVG; 379 #endif /* PPS_SYNC */ 380 } 381 time_status &= STA_RONLY; 382 time_status |= ntv.status & ~STA_RONLY; 383 } 384 if (modes & MOD_TIMECONST) { 385 if (ntv.constant < 0) 386 time_constant = 0; 387 else if (ntv.constant > MAXTC) 388 time_constant = MAXTC; 389 else 390 time_constant = ntv.constant; 391 } 392 if (modes & MOD_TAI) { 393 if (ntv.constant > 0) /* XXX zero & negative numbers ? */ 394 time_tai = ntv.constant; 395 } 396 #ifdef PPS_SYNC 397 if (modes & MOD_PPSMAX) { 398 if (ntv.shift < PPS_FAVG) 399 pps_shiftmax = PPS_FAVG; 400 else if (ntv.shift > PPS_FAVGMAX) 401 pps_shiftmax = PPS_FAVGMAX; 402 else 403 pps_shiftmax = ntv.shift; 404 } 405 #endif /* PPS_SYNC */ 406 if (modes & MOD_NANO) 407 time_status |= STA_NANO; 408 if (modes & MOD_MICRO) 409 time_status &= ~STA_NANO; 410 if (modes & MOD_CLKB) 411 time_status |= STA_CLK; 412 if (modes & MOD_CLKA) 413 time_status &= ~STA_CLK; 414 if (modes & MOD_FREQUENCY) { 415 freq = (ntv.freq * 1000LL) >> 16; 416 if (freq > MAXFREQ) 417 L_LINT(time_freq, MAXFREQ); 418 else if (freq < -MAXFREQ) 419 L_LINT(time_freq, -MAXFREQ); 420 else { 421 /* 422 * ntv.freq is [PPM * 2^16] = [us/s * 2^16] 423 * time_freq is [ns/s * 2^32] 424 */ 425 time_freq = ntv.freq * 1000LL * 65536LL; 426 } 427 #ifdef PPS_SYNC 428 pps_freq = time_freq; 429 #endif /* PPS_SYNC */ 430 } 431 if (modes & MOD_OFFSET) { 432 if (time_status & STA_NANO) 433 hardupdate(ntv.offset); 434 else 435 hardupdate(ntv.offset * 1000); 436 } 437 438 /* 439 * Retrieve all clock variables. Note that the TAI offset is 440 * returned only by ntp_gettime(); 441 */ 442 if (time_status & STA_NANO) 443 ntv.offset = L_GINT(time_offset); 444 else 445 ntv.offset = L_GINT(time_offset) / 1000; /* XXX rounding ? */ 446 ntv.freq = L_GINT((time_freq / 1000LL) << 16); 447 ntv.maxerror = time_maxerror; 448 ntv.esterror = time_esterror; 449 ntv.status = time_status; 450 ntv.constant = time_constant; 451 if (time_status & STA_NANO) 452 ntv.precision = time_precision; 453 else 454 ntv.precision = time_precision / 1000; 455 ntv.tolerance = MAXFREQ * SCALE_PPM; 456 #ifdef PPS_SYNC 457 ntv.shift = pps_shift; 458 ntv.ppsfreq = L_GINT((pps_freq / 1000LL) << 16); 459 if (time_status & STA_NANO) 460 ntv.jitter = pps_jitter; 461 else 462 ntv.jitter = pps_jitter / 1000; 463 ntv.stabil = pps_stabil; 464 ntv.calcnt = pps_calcnt; 465 ntv.errcnt = pps_errcnt; 466 ntv.jitcnt = pps_jitcnt; 467 ntv.stbcnt = pps_stbcnt; 468 #endif /* PPS_SYNC */ 469 retval = ntp_is_time_error(time_status) ? TIME_ERROR : time_state; 470 NTP_UNLOCK(); 471 472 error = copyout((caddr_t)&ntv, (caddr_t)uap->tp, sizeof(ntv)); 473 if (error == 0) 474 td->td_retval[0] = retval; 475 return (error); 476 } 477 478 /* 479 * second_overflow() - called after ntp_tick_adjust() 480 * 481 * This routine is ordinarily called immediately following the above 482 * routine ntp_tick_adjust(). While these two routines are normally 483 * combined, they are separated here only for the purposes of 484 * simulation. 485 */ 486 void 487 ntp_update_second(int64_t *adjustment, time_t *newsec) 488 { 489 int tickrate; 490 l_fp ftemp; /* 32/64-bit temporary */ 491 492 NTP_LOCK(); 493 494 /* 495 * On rollover of the second both the nanosecond and microsecond 496 * clocks are updated and the state machine cranked as 497 * necessary. The phase adjustment to be used for the next 498 * second is calculated and the maximum error is increased by 499 * the tolerance. 500 */ 501 time_maxerror += MAXFREQ / 1000; 502 503 /* 504 * Leap second processing. If in leap-insert state at 505 * the end of the day, the system clock is set back one 506 * second; if in leap-delete state, the system clock is 507 * set ahead one second. The nano_time() routine or 508 * external clock driver will insure that reported time 509 * is always monotonic. 510 */ 511 switch (time_state) { 512 513 /* 514 * No warning. 515 */ 516 case TIME_OK: 517 if (time_status & STA_INS) 518 time_state = TIME_INS; 519 else if (time_status & STA_DEL) 520 time_state = TIME_DEL; 521 break; 522 523 /* 524 * Insert second 23:59:60 following second 525 * 23:59:59. 526 */ 527 case TIME_INS: 528 if (!(time_status & STA_INS)) 529 time_state = TIME_OK; 530 else if ((*newsec) % 86400 == 0) { 531 (*newsec)--; 532 time_state = TIME_OOP; 533 time_tai++; 534 } 535 break; 536 537 /* 538 * Delete second 23:59:59. 539 */ 540 case TIME_DEL: 541 if (!(time_status & STA_DEL)) 542 time_state = TIME_OK; 543 else if (((*newsec) + 1) % 86400 == 0) { 544 (*newsec)++; 545 time_tai--; 546 time_state = TIME_WAIT; 547 } 548 break; 549 550 /* 551 * Insert second in progress. 552 */ 553 case TIME_OOP: 554 time_state = TIME_WAIT; 555 break; 556 557 /* 558 * Wait for status bits to clear. 559 */ 560 case TIME_WAIT: 561 if (!(time_status & (STA_INS | STA_DEL))) 562 time_state = TIME_OK; 563 } 564 565 /* 566 * Compute the total time adjustment for the next second 567 * in ns. The offset is reduced by a factor depending on 568 * whether the PPS signal is operating. Note that the 569 * value is in effect scaled by the clock frequency, 570 * since the adjustment is added at each tick interrupt. 571 */ 572 ftemp = time_offset; 573 #ifdef PPS_SYNC 574 /* XXX even if PPS signal dies we should finish adjustment ? */ 575 if (time_status & STA_PPSTIME && time_status & 576 STA_PPSSIGNAL) 577 L_RSHIFT(ftemp, pps_shift); 578 else 579 L_RSHIFT(ftemp, SHIFT_PLL + time_constant); 580 #else 581 L_RSHIFT(ftemp, SHIFT_PLL + time_constant); 582 #endif /* PPS_SYNC */ 583 time_adj = ftemp; 584 L_SUB(time_offset, ftemp); 585 L_ADD(time_adj, time_freq); 586 587 /* 588 * Apply any correction from adjtime(2). If more than one second 589 * off we slew at a rate of 5ms/s (5000 PPM) else 500us/s (500PPM) 590 * until the last second is slewed the final < 500 usecs. 591 */ 592 if (time_adjtime != 0) { 593 if (time_adjtime > 1000000) 594 tickrate = 5000; 595 else if (time_adjtime < -1000000) 596 tickrate = -5000; 597 else if (time_adjtime > 500) 598 tickrate = 500; 599 else if (time_adjtime < -500) 600 tickrate = -500; 601 else 602 tickrate = time_adjtime; 603 time_adjtime -= tickrate; 604 L_LINT(ftemp, tickrate * 1000); 605 L_ADD(time_adj, ftemp); 606 } 607 *adjustment = time_adj; 608 609 #ifdef PPS_SYNC 610 if (pps_valid > 0) 611 pps_valid--; 612 else 613 time_status &= ~STA_PPSSIGNAL; 614 #endif /* PPS_SYNC */ 615 616 NTP_UNLOCK(); 617 } 618 619 /* 620 * ntp_init() - initialize variables and structures 621 * 622 * This routine must be called after the kernel variables hz and tick 623 * are set or changed and before the next tick interrupt. In this 624 * particular implementation, these values are assumed set elsewhere in 625 * the kernel. The design allows the clock frequency and tick interval 626 * to be changed while the system is running. So, this routine should 627 * probably be integrated with the code that does that. 628 */ 629 static void 630 ntp_init(void) 631 { 632 633 /* 634 * The following variables are initialized only at startup. Only 635 * those structures not cleared by the compiler need to be 636 * initialized, and these only in the simulator. In the actual 637 * kernel, any nonzero values here will quickly evaporate. 638 */ 639 L_CLR(time_offset); 640 L_CLR(time_freq); 641 #ifdef PPS_SYNC 642 pps_tf[0].tv_sec = pps_tf[0].tv_nsec = 0; 643 pps_tf[1].tv_sec = pps_tf[1].tv_nsec = 0; 644 pps_tf[2].tv_sec = pps_tf[2].tv_nsec = 0; 645 pps_fcount = 0; 646 L_CLR(pps_freq); 647 #endif /* PPS_SYNC */ 648 } 649 650 SYSINIT(ntpclocks, SI_SUB_CLOCKS, SI_ORDER_MIDDLE, ntp_init, NULL); 651 652 /* 653 * hardupdate() - local clock update 654 * 655 * This routine is called by ntp_adjtime() to update the local clock 656 * phase and frequency. The implementation is of an adaptive-parameter, 657 * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new 658 * time and frequency offset estimates for each call. If the kernel PPS 659 * discipline code is configured (PPS_SYNC), the PPS signal itself 660 * determines the new time offset, instead of the calling argument. 661 * Presumably, calls to ntp_adjtime() occur only when the caller 662 * believes the local clock is valid within some bound (+-128 ms with 663 * NTP). If the caller's time is far different than the PPS time, an 664 * argument will ensue, and it's not clear who will lose. 665 * 666 * For uncompensated quartz crystal oscillators and nominal update 667 * intervals less than 256 s, operation should be in phase-lock mode, 668 * where the loop is disciplined to phase. For update intervals greater 669 * than 1024 s, operation should be in frequency-lock mode, where the 670 * loop is disciplined to frequency. Between 256 s and 1024 s, the mode 671 * is selected by the STA_MODE status bit. 672 */ 673 static void 674 hardupdate(offset) 675 long offset; /* clock offset (ns) */ 676 { 677 long mtemp; 678 l_fp ftemp; 679 680 NTP_ASSERT_LOCKED(); 681 682 /* 683 * Select how the phase is to be controlled and from which 684 * source. If the PPS signal is present and enabled to 685 * discipline the time, the PPS offset is used; otherwise, the 686 * argument offset is used. 687 */ 688 if (!(time_status & STA_PLL)) 689 return; 690 if (!(time_status & STA_PPSTIME && time_status & 691 STA_PPSSIGNAL)) { 692 if (offset > MAXPHASE) 693 time_monitor = MAXPHASE; 694 else if (offset < -MAXPHASE) 695 time_monitor = -MAXPHASE; 696 else 697 time_monitor = offset; 698 L_LINT(time_offset, time_monitor); 699 } 700 701 /* 702 * Select how the frequency is to be controlled and in which 703 * mode (PLL or FLL). If the PPS signal is present and enabled 704 * to discipline the frequency, the PPS frequency is used; 705 * otherwise, the argument offset is used to compute it. 706 */ 707 if (time_status & STA_PPSFREQ && time_status & STA_PPSSIGNAL) { 708 time_reftime = time_uptime; 709 return; 710 } 711 if (time_status & STA_FREQHOLD || time_reftime == 0) 712 time_reftime = time_uptime; 713 mtemp = time_uptime - time_reftime; 714 L_LINT(ftemp, time_monitor); 715 L_RSHIFT(ftemp, (SHIFT_PLL + 2 + time_constant) << 1); 716 L_MPY(ftemp, mtemp); 717 L_ADD(time_freq, ftemp); 718 time_status &= ~STA_MODE; 719 if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > 720 MAXSEC)) { 721 L_LINT(ftemp, (time_monitor << 4) / mtemp); 722 L_RSHIFT(ftemp, SHIFT_FLL + 4); 723 L_ADD(time_freq, ftemp); 724 time_status |= STA_MODE; 725 } 726 time_reftime = time_uptime; 727 if (L_GINT(time_freq) > MAXFREQ) 728 L_LINT(time_freq, MAXFREQ); 729 else if (L_GINT(time_freq) < -MAXFREQ) 730 L_LINT(time_freq, -MAXFREQ); 731 } 732 733 #ifdef PPS_SYNC 734 /* 735 * hardpps() - discipline CPU clock oscillator to external PPS signal 736 * 737 * This routine is called at each PPS interrupt in order to discipline 738 * the CPU clock oscillator to the PPS signal. There are two independent 739 * first-order feedback loops, one for the phase, the other for the 740 * frequency. The phase loop measures and grooms the PPS phase offset 741 * and leaves it in a handy spot for the seconds overflow routine. The 742 * frequency loop averages successive PPS phase differences and 743 * calculates the PPS frequency offset, which is also processed by the 744 * seconds overflow routine. The code requires the caller to capture the 745 * time and architecture-dependent hardware counter values in 746 * nanoseconds at the on-time PPS signal transition. 747 * 748 * Note that, on some Unix systems this routine runs at an interrupt 749 * priority level higher than the timer interrupt routine hardclock(). 750 * Therefore, the variables used are distinct from the hardclock() 751 * variables, except for the actual time and frequency variables, which 752 * are determined by this routine and updated atomically. 753 */ 754 void 755 hardpps(tsp, nsec) 756 struct timespec *tsp; /* time at PPS */ 757 long nsec; /* hardware counter at PPS */ 758 { 759 long u_sec, u_nsec, v_nsec; /* temps */ 760 l_fp ftemp; 761 762 NTP_LOCK(); 763 764 /* 765 * The signal is first processed by a range gate and frequency 766 * discriminator. The range gate rejects noise spikes outside 767 * the range +-500 us. The frequency discriminator rejects input 768 * signals with apparent frequency outside the range 1 +-500 769 * PPM. If two hits occur in the same second, we ignore the 770 * later hit; if not and a hit occurs outside the range gate, 771 * keep the later hit for later comparison, but do not process 772 * it. 773 */ 774 time_status |= STA_PPSSIGNAL | STA_PPSJITTER; 775 time_status &= ~(STA_PPSWANDER | STA_PPSERROR); 776 pps_valid = PPS_VALID; 777 u_sec = tsp->tv_sec; 778 u_nsec = tsp->tv_nsec; 779 if (u_nsec >= (NANOSECOND >> 1)) { 780 u_nsec -= NANOSECOND; 781 u_sec++; 782 } 783 v_nsec = u_nsec - pps_tf[0].tv_nsec; 784 if (u_sec == pps_tf[0].tv_sec && v_nsec < NANOSECOND - MAXFREQ) 785 goto out; 786 pps_tf[2] = pps_tf[1]; 787 pps_tf[1] = pps_tf[0]; 788 pps_tf[0].tv_sec = u_sec; 789 pps_tf[0].tv_nsec = u_nsec; 790 791 /* 792 * Compute the difference between the current and previous 793 * counter values. If the difference exceeds 0.5 s, assume it 794 * has wrapped around, so correct 1.0 s. If the result exceeds 795 * the tick interval, the sample point has crossed a tick 796 * boundary during the last second, so correct the tick. Very 797 * intricate. 798 */ 799 u_nsec = nsec; 800 if (u_nsec > (NANOSECOND >> 1)) 801 u_nsec -= NANOSECOND; 802 else if (u_nsec < -(NANOSECOND >> 1)) 803 u_nsec += NANOSECOND; 804 pps_fcount += u_nsec; 805 if (v_nsec > MAXFREQ || v_nsec < -MAXFREQ) 806 goto out; 807 time_status &= ~STA_PPSJITTER; 808 809 /* 810 * A three-stage median filter is used to help denoise the PPS 811 * time. The median sample becomes the time offset estimate; the 812 * difference between the other two samples becomes the time 813 * dispersion (jitter) estimate. 814 */ 815 if (pps_tf[0].tv_nsec > pps_tf[1].tv_nsec) { 816 if (pps_tf[1].tv_nsec > pps_tf[2].tv_nsec) { 817 v_nsec = pps_tf[1].tv_nsec; /* 0 1 2 */ 818 u_nsec = pps_tf[0].tv_nsec - pps_tf[2].tv_nsec; 819 } else if (pps_tf[2].tv_nsec > pps_tf[0].tv_nsec) { 820 v_nsec = pps_tf[0].tv_nsec; /* 2 0 1 */ 821 u_nsec = pps_tf[2].tv_nsec - pps_tf[1].tv_nsec; 822 } else { 823 v_nsec = pps_tf[2].tv_nsec; /* 0 2 1 */ 824 u_nsec = pps_tf[0].tv_nsec - pps_tf[1].tv_nsec; 825 } 826 } else { 827 if (pps_tf[1].tv_nsec < pps_tf[2].tv_nsec) { 828 v_nsec = pps_tf[1].tv_nsec; /* 2 1 0 */ 829 u_nsec = pps_tf[2].tv_nsec - pps_tf[0].tv_nsec; 830 } else if (pps_tf[2].tv_nsec < pps_tf[0].tv_nsec) { 831 v_nsec = pps_tf[0].tv_nsec; /* 1 0 2 */ 832 u_nsec = pps_tf[1].tv_nsec - pps_tf[2].tv_nsec; 833 } else { 834 v_nsec = pps_tf[2].tv_nsec; /* 1 2 0 */ 835 u_nsec = pps_tf[1].tv_nsec - pps_tf[0].tv_nsec; 836 } 837 } 838 839 /* 840 * Nominal jitter is due to PPS signal noise and interrupt 841 * latency. If it exceeds the popcorn threshold, the sample is 842 * discarded. otherwise, if so enabled, the time offset is 843 * updated. We can tolerate a modest loss of data here without 844 * much degrading time accuracy. 845 * 846 * The measurements being checked here were made with the system 847 * timecounter, so the popcorn threshold is not allowed to fall below 848 * the number of nanoseconds in two ticks of the timecounter. For a 849 * timecounter running faster than 1 GHz the lower bound is 2ns, just 850 * to avoid a nonsensical threshold of zero. 851 */ 852 if (u_nsec > lmax(pps_jitter << PPS_POPCORN, 853 2 * (NANOSECOND / (long)qmin(NANOSECOND, tc_getfrequency())))) { 854 time_status |= STA_PPSJITTER; 855 pps_jitcnt++; 856 } else if (time_status & STA_PPSTIME) { 857 time_monitor = -v_nsec; 858 L_LINT(time_offset, time_monitor); 859 } 860 pps_jitter += (u_nsec - pps_jitter) >> PPS_FAVG; 861 u_sec = pps_tf[0].tv_sec - pps_lastsec; 862 if (u_sec < (1 << pps_shift)) 863 goto out; 864 865 /* 866 * At the end of the calibration interval the difference between 867 * the first and last counter values becomes the scaled 868 * frequency. It will later be divided by the length of the 869 * interval to determine the frequency update. If the frequency 870 * exceeds a sanity threshold, or if the actual calibration 871 * interval is not equal to the expected length, the data are 872 * discarded. We can tolerate a modest loss of data here without 873 * much degrading frequency accuracy. 874 */ 875 pps_calcnt++; 876 v_nsec = -pps_fcount; 877 pps_lastsec = pps_tf[0].tv_sec; 878 pps_fcount = 0; 879 u_nsec = MAXFREQ << pps_shift; 880 if (v_nsec > u_nsec || v_nsec < -u_nsec || u_sec != (1 << pps_shift)) { 881 time_status |= STA_PPSERROR; 882 pps_errcnt++; 883 goto out; 884 } 885 886 /* 887 * Here the raw frequency offset and wander (stability) is 888 * calculated. If the wander is less than the wander threshold 889 * for four consecutive averaging intervals, the interval is 890 * doubled; if it is greater than the threshold for four 891 * consecutive intervals, the interval is halved. The scaled 892 * frequency offset is converted to frequency offset. The 893 * stability metric is calculated as the average of recent 894 * frequency changes, but is used only for performance 895 * monitoring. 896 */ 897 L_LINT(ftemp, v_nsec); 898 L_RSHIFT(ftemp, pps_shift); 899 L_SUB(ftemp, pps_freq); 900 u_nsec = L_GINT(ftemp); 901 if (u_nsec > PPS_MAXWANDER) { 902 L_LINT(ftemp, PPS_MAXWANDER); 903 pps_intcnt--; 904 time_status |= STA_PPSWANDER; 905 pps_stbcnt++; 906 } else if (u_nsec < -PPS_MAXWANDER) { 907 L_LINT(ftemp, -PPS_MAXWANDER); 908 pps_intcnt--; 909 time_status |= STA_PPSWANDER; 910 pps_stbcnt++; 911 } else { 912 pps_intcnt++; 913 } 914 if (pps_intcnt >= 4) { 915 pps_intcnt = 4; 916 if (pps_shift < pps_shiftmax) { 917 pps_shift++; 918 pps_intcnt = 0; 919 } 920 } else if (pps_intcnt <= -4 || pps_shift > pps_shiftmax) { 921 pps_intcnt = -4; 922 if (pps_shift > PPS_FAVG) { 923 pps_shift--; 924 pps_intcnt = 0; 925 } 926 } 927 if (u_nsec < 0) 928 u_nsec = -u_nsec; 929 pps_stabil += (u_nsec * SCALE_PPM - pps_stabil) >> PPS_FAVG; 930 931 /* 932 * The PPS frequency is recalculated and clamped to the maximum 933 * MAXFREQ. If enabled, the system clock frequency is updated as 934 * well. 935 */ 936 L_ADD(pps_freq, ftemp); 937 u_nsec = L_GINT(pps_freq); 938 if (u_nsec > MAXFREQ) 939 L_LINT(pps_freq, MAXFREQ); 940 else if (u_nsec < -MAXFREQ) 941 L_LINT(pps_freq, -MAXFREQ); 942 if (time_status & STA_PPSFREQ) 943 time_freq = pps_freq; 944 945 out: 946 NTP_UNLOCK(); 947 } 948 #endif /* PPS_SYNC */ 949 950 #ifndef _SYS_SYSPROTO_H_ 951 struct adjtime_args { 952 struct timeval *delta; 953 struct timeval *olddelta; 954 }; 955 #endif 956 /* ARGSUSED */ 957 int 958 sys_adjtime(struct thread *td, struct adjtime_args *uap) 959 { 960 struct timeval delta, olddelta, *deltap; 961 int error; 962 963 if (uap->delta) { 964 error = copyin(uap->delta, &delta, sizeof(delta)); 965 if (error) 966 return (error); 967 deltap = δ 968 } else 969 deltap = NULL; 970 error = kern_adjtime(td, deltap, &olddelta); 971 if (uap->olddelta && error == 0) 972 error = copyout(&olddelta, uap->olddelta, sizeof(olddelta)); 973 return (error); 974 } 975 976 int 977 kern_adjtime(struct thread *td, struct timeval *delta, struct timeval *olddelta) 978 { 979 struct timeval atv; 980 int64_t ltr, ltw; 981 int error; 982 983 if (delta != NULL) { 984 error = priv_check(td, PRIV_ADJTIME); 985 if (error != 0) 986 return (error); 987 ltw = (int64_t)delta->tv_sec * 1000000 + delta->tv_usec; 988 } 989 NTP_LOCK(); 990 ltr = time_adjtime; 991 if (delta != NULL) 992 time_adjtime = ltw; 993 NTP_UNLOCK(); 994 if (olddelta != NULL) { 995 atv.tv_sec = ltr / 1000000; 996 atv.tv_usec = ltr % 1000000; 997 if (atv.tv_usec < 0) { 998 atv.tv_usec += 1000000; 999 atv.tv_sec--; 1000 } 1001 *olddelta = atv; 1002 } 1003 return (0); 1004 } 1005 1006 static struct callout resettodr_callout; 1007 static int resettodr_period = 1800; 1008 1009 static void 1010 periodic_resettodr(void *arg __unused) 1011 { 1012 1013 /* 1014 * Read of time_status is lock-less, which is fine since 1015 * ntp_is_time_error() operates on the consistent read value. 1016 */ 1017 if (!ntp_is_time_error(time_status)) 1018 resettodr(); 1019 if (resettodr_period > 0) 1020 callout_schedule(&resettodr_callout, resettodr_period * hz); 1021 } 1022 1023 static void 1024 shutdown_resettodr(void *arg __unused, int howto __unused) 1025 { 1026 1027 callout_drain(&resettodr_callout); 1028 /* Another unlocked read of time_status */ 1029 if (resettodr_period > 0 && !ntp_is_time_error(time_status)) 1030 resettodr(); 1031 } 1032 1033 static int 1034 sysctl_resettodr_period(SYSCTL_HANDLER_ARGS) 1035 { 1036 int error; 1037 1038 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req); 1039 if (error || !req->newptr) 1040 return (error); 1041 if (cold) 1042 goto done; 1043 if (resettodr_period == 0) 1044 callout_stop(&resettodr_callout); 1045 else 1046 callout_reset(&resettodr_callout, resettodr_period * hz, 1047 periodic_resettodr, NULL); 1048 done: 1049 return (0); 1050 } 1051 1052 SYSCTL_PROC(_machdep, OID_AUTO, rtc_save_period, CTLTYPE_INT | CTLFLAG_RWTUN | 1053 CTLFLAG_MPSAFE, &resettodr_period, 1800, sysctl_resettodr_period, "I", 1054 "Save system time to RTC with this period (in seconds)"); 1055 1056 static void 1057 start_periodic_resettodr(void *arg __unused) 1058 { 1059 1060 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_resettodr, NULL, 1061 SHUTDOWN_PRI_FIRST); 1062 callout_init(&resettodr_callout, 1); 1063 if (resettodr_period == 0) 1064 return; 1065 callout_reset(&resettodr_callout, resettodr_period * hz, 1066 periodic_resettodr, NULL); 1067 } 1068 1069 SYSINIT(periodic_resettodr, SI_SUB_LAST, SI_ORDER_MIDDLE, 1070 start_periodic_resettodr, NULL); 1071