1 /*- 2 *********************************************************************** 3 * * 4 * Copyright (c) David L. Mills 1993-2001 * 5 * * 6 * Permission to use, copy, modify, and distribute this software and * 7 * its documentation for any purpose and without fee is hereby * 8 * granted, provided that the above copyright notice appears in all * 9 * copies and that both the copyright notice and this permission * 10 * notice appear in supporting documentation, and that the name * 11 * University of Delaware not be used in advertising or publicity * 12 * pertaining to distribution of the software without specific, * 13 * written prior permission. The University of Delaware makes no * 14 * representations about the suitability this software for any * 15 * purpose. It is provided "as is" without express or implied * 16 * warranty. * 17 * * 18 **********************************************************************/ 19 20 /* 21 * Adapted from the original sources for FreeBSD and timecounters by: 22 * Poul-Henning Kamp <phk@FreeBSD.org>. 23 * 24 * The 32bit version of the "LP" macros seems a bit past its "sell by" 25 * date so I have retained only the 64bit version and included it directly 26 * in this file. 27 * 28 * Only minor changes done to interface with the timecounters over in 29 * sys/kern/kern_clock.c. Some of the comments below may be (even more) 30 * confusing and/or plain wrong in that context. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "opt_ntp.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/sysproto.h> 41 #include <sys/eventhandler.h> 42 #include <sys/kernel.h> 43 #include <sys/priv.h> 44 #include <sys/proc.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/time.h> 48 #include <sys/timex.h> 49 #include <sys/timetc.h> 50 #include <sys/timepps.h> 51 #include <sys/syscallsubr.h> 52 #include <sys/sysctl.h> 53 54 #ifdef PPS_SYNC 55 FEATURE(pps_sync, "Support usage of external PPS signal by kernel PLL"); 56 #endif 57 58 /* 59 * Single-precision macros for 64-bit machines 60 */ 61 typedef int64_t l_fp; 62 #define L_ADD(v, u) ((v) += (u)) 63 #define L_SUB(v, u) ((v) -= (u)) 64 #define L_ADDHI(v, a) ((v) += (int64_t)(a) << 32) 65 #define L_NEG(v) ((v) = -(v)) 66 #define L_RSHIFT(v, n) \ 67 do { \ 68 if ((v) < 0) \ 69 (v) = -(-(v) >> (n)); \ 70 else \ 71 (v) = (v) >> (n); \ 72 } while (0) 73 #define L_MPY(v, a) ((v) *= (a)) 74 #define L_CLR(v) ((v) = 0) 75 #define L_ISNEG(v) ((v) < 0) 76 #define L_LINT(v, a) ((v) = (int64_t)(a) << 32) 77 #define L_GINT(v) ((v) < 0 ? -(-(v) >> 32) : (v) >> 32) 78 79 /* 80 * Generic NTP kernel interface 81 * 82 * These routines constitute the Network Time Protocol (NTP) interfaces 83 * for user and daemon application programs. The ntp_gettime() routine 84 * provides the time, maximum error (synch distance) and estimated error 85 * (dispersion) to client user application programs. The ntp_adjtime() 86 * routine is used by the NTP daemon to adjust the system clock to an 87 * externally derived time. The time offset and related variables set by 88 * this routine are used by other routines in this module to adjust the 89 * phase and frequency of the clock discipline loop which controls the 90 * system clock. 91 * 92 * When the kernel time is reckoned directly in nanoseconds (NTP_NANO 93 * defined), the time at each tick interrupt is derived directly from 94 * the kernel time variable. When the kernel time is reckoned in 95 * microseconds, (NTP_NANO undefined), the time is derived from the 96 * kernel time variable together with a variable representing the 97 * leftover nanoseconds at the last tick interrupt. In either case, the 98 * current nanosecond time is reckoned from these values plus an 99 * interpolated value derived by the clock routines in another 100 * architecture-specific module. The interpolation can use either a 101 * dedicated counter or a processor cycle counter (PCC) implemented in 102 * some architectures. 103 * 104 * Note that all routines must run at priority splclock or higher. 105 */ 106 /* 107 * Phase/frequency-lock loop (PLL/FLL) definitions 108 * 109 * The nanosecond clock discipline uses two variable types, time 110 * variables and frequency variables. Both types are represented as 64- 111 * bit fixed-point quantities with the decimal point between two 32-bit 112 * halves. On a 32-bit machine, each half is represented as a single 113 * word and mathematical operations are done using multiple-precision 114 * arithmetic. On a 64-bit machine, ordinary computer arithmetic is 115 * used. 116 * 117 * A time variable is a signed 64-bit fixed-point number in ns and 118 * fraction. It represents the remaining time offset to be amortized 119 * over succeeding tick interrupts. The maximum time offset is about 120 * 0.5 s and the resolution is about 2.3e-10 ns. 121 * 122 * 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 123 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 124 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 125 * |s s s| ns | 126 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 127 * | fraction | 128 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 129 * 130 * A frequency variable is a signed 64-bit fixed-point number in ns/s 131 * and fraction. It represents the ns and fraction to be added to the 132 * kernel time variable at each second. The maximum frequency offset is 133 * about +-500000 ns/s and the resolution is about 2.3e-10 ns/s. 134 * 135 * 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 136 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 137 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 138 * |s s s s s s s s s s s s s| ns/s | 139 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 140 * | fraction | 141 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 142 */ 143 /* 144 * The following variables establish the state of the PLL/FLL and the 145 * residual time and frequency offset of the local clock. 146 */ 147 #define SHIFT_PLL 4 /* PLL loop gain (shift) */ 148 #define SHIFT_FLL 2 /* FLL loop gain (shift) */ 149 150 static int time_state = TIME_OK; /* clock state */ 151 int time_status = STA_UNSYNC; /* clock status bits */ 152 static long time_tai; /* TAI offset (s) */ 153 static long time_monitor; /* last time offset scaled (ns) */ 154 static long time_constant; /* poll interval (shift) (s) */ 155 static long time_precision = 1; /* clock precision (ns) */ 156 static long time_maxerror = MAXPHASE / 1000; /* maximum error (us) */ 157 long time_esterror = MAXPHASE / 1000; /* estimated error (us) */ 158 static long time_reftime; /* uptime at last adjustment (s) */ 159 static l_fp time_offset; /* time offset (ns) */ 160 static l_fp time_freq; /* frequency offset (ns/s) */ 161 static l_fp time_adj; /* tick adjust (ns/s) */ 162 163 static int64_t time_adjtime; /* correction from adjtime(2) (usec) */ 164 165 #ifdef PPS_SYNC 166 /* 167 * The following variables are used when a pulse-per-second (PPS) signal 168 * is available and connected via a modem control lead. They establish 169 * the engineering parameters of the clock discipline loop when 170 * controlled by the PPS signal. 171 */ 172 #define PPS_FAVG 2 /* min freq avg interval (s) (shift) */ 173 #define PPS_FAVGDEF 8 /* default freq avg int (s) (shift) */ 174 #define PPS_FAVGMAX 15 /* max freq avg interval (s) (shift) */ 175 #define PPS_PAVG 4 /* phase avg interval (s) (shift) */ 176 #define PPS_VALID 120 /* PPS signal watchdog max (s) */ 177 #define PPS_MAXWANDER 100000 /* max PPS wander (ns/s) */ 178 #define PPS_POPCORN 2 /* popcorn spike threshold (shift) */ 179 180 static struct timespec pps_tf[3]; /* phase median filter */ 181 static l_fp pps_freq; /* scaled frequency offset (ns/s) */ 182 static long pps_fcount; /* frequency accumulator */ 183 static long pps_jitter; /* nominal jitter (ns) */ 184 static long pps_stabil; /* nominal stability (scaled ns/s) */ 185 static long pps_lastsec; /* time at last calibration (s) */ 186 static int pps_valid; /* signal watchdog counter */ 187 static int pps_shift = PPS_FAVG; /* interval duration (s) (shift) */ 188 static int pps_shiftmax = PPS_FAVGDEF; /* max interval duration (s) (shift) */ 189 static int pps_intcnt; /* wander counter */ 190 191 /* 192 * PPS signal quality monitors 193 */ 194 static long pps_calcnt; /* calibration intervals */ 195 static long pps_jitcnt; /* jitter limit exceeded */ 196 static long pps_stbcnt; /* stability limit exceeded */ 197 static long pps_errcnt; /* calibration errors */ 198 #endif /* PPS_SYNC */ 199 /* 200 * End of phase/frequency-lock loop (PLL/FLL) definitions 201 */ 202 203 static void ntp_init(void); 204 static void hardupdate(long offset); 205 static void ntp_gettime1(struct ntptimeval *ntvp); 206 static int ntp_is_time_error(void); 207 208 static int 209 ntp_is_time_error(void) 210 { 211 /* 212 * Status word error decode. If any of these conditions occur, 213 * an error is returned, instead of the status word. Most 214 * applications will care only about the fact the system clock 215 * may not be trusted, not about the details. 216 * 217 * Hardware or software error 218 */ 219 if ((time_status & (STA_UNSYNC | STA_CLOCKERR)) || 220 221 /* 222 * PPS signal lost when either time or frequency synchronization 223 * requested 224 */ 225 (time_status & (STA_PPSFREQ | STA_PPSTIME) && 226 !(time_status & STA_PPSSIGNAL)) || 227 228 /* 229 * PPS jitter exceeded when time synchronization requested 230 */ 231 (time_status & STA_PPSTIME && 232 time_status & STA_PPSJITTER) || 233 234 /* 235 * PPS wander exceeded or calibration error when frequency 236 * synchronization requested 237 */ 238 (time_status & STA_PPSFREQ && 239 time_status & (STA_PPSWANDER | STA_PPSERROR))) 240 return (1); 241 242 return (0); 243 } 244 245 static void 246 ntp_gettime1(struct ntptimeval *ntvp) 247 { 248 struct timespec atv; /* nanosecond time */ 249 250 GIANT_REQUIRED; 251 252 nanotime(&atv); 253 ntvp->time.tv_sec = atv.tv_sec; 254 ntvp->time.tv_nsec = atv.tv_nsec; 255 ntvp->maxerror = time_maxerror; 256 ntvp->esterror = time_esterror; 257 ntvp->tai = time_tai; 258 ntvp->time_state = time_state; 259 260 if (ntp_is_time_error()) 261 ntvp->time_state = TIME_ERROR; 262 } 263 264 /* 265 * ntp_gettime() - NTP user application interface 266 * 267 * See the timex.h header file for synopsis and API description. Note that 268 * the TAI offset is returned in the ntvtimeval.tai structure member. 269 */ 270 #ifndef _SYS_SYSPROTO_H_ 271 struct ntp_gettime_args { 272 struct ntptimeval *ntvp; 273 }; 274 #endif 275 /* ARGSUSED */ 276 int 277 sys_ntp_gettime(struct thread *td, struct ntp_gettime_args *uap) 278 { 279 struct ntptimeval ntv; 280 281 mtx_lock(&Giant); 282 ntp_gettime1(&ntv); 283 mtx_unlock(&Giant); 284 285 td->td_retval[0] = ntv.time_state; 286 return (copyout(&ntv, uap->ntvp, sizeof(ntv))); 287 } 288 289 static int 290 ntp_sysctl(SYSCTL_HANDLER_ARGS) 291 { 292 struct ntptimeval ntv; /* temporary structure */ 293 294 ntp_gettime1(&ntv); 295 296 return (sysctl_handle_opaque(oidp, &ntv, sizeof(ntv), req)); 297 } 298 299 SYSCTL_NODE(_kern, OID_AUTO, ntp_pll, CTLFLAG_RW, 0, ""); 300 SYSCTL_PROC(_kern_ntp_pll, OID_AUTO, gettime, CTLTYPE_OPAQUE|CTLFLAG_RD, 301 0, sizeof(struct ntptimeval) , ntp_sysctl, "S,ntptimeval", ""); 302 303 #ifdef PPS_SYNC 304 SYSCTL_INT(_kern_ntp_pll, OID_AUTO, pps_shiftmax, CTLFLAG_RW, 305 &pps_shiftmax, 0, "Max interval duration (sec) (shift)"); 306 SYSCTL_INT(_kern_ntp_pll, OID_AUTO, pps_shift, CTLFLAG_RW, 307 &pps_shift, 0, "Interval duration (sec) (shift)"); 308 SYSCTL_LONG(_kern_ntp_pll, OID_AUTO, time_monitor, CTLFLAG_RD, 309 &time_monitor, 0, "Last time offset scaled (ns)"); 310 311 SYSCTL_OPAQUE(_kern_ntp_pll, OID_AUTO, pps_freq, CTLFLAG_RD, 312 &pps_freq, sizeof(pps_freq), "I", "Scaled frequency offset (ns/sec)"); 313 SYSCTL_OPAQUE(_kern_ntp_pll, OID_AUTO, time_freq, CTLFLAG_RD, 314 &time_freq, sizeof(time_freq), "I", "Frequency offset (ns/sec)"); 315 #endif 316 317 /* 318 * ntp_adjtime() - NTP daemon application interface 319 * 320 * See the timex.h header file for synopsis and API description. Note that 321 * the timex.constant structure member has a dual purpose to set the time 322 * constant and to set the TAI offset. 323 */ 324 #ifndef _SYS_SYSPROTO_H_ 325 struct ntp_adjtime_args { 326 struct timex *tp; 327 }; 328 #endif 329 330 int 331 sys_ntp_adjtime(struct thread *td, struct ntp_adjtime_args *uap) 332 { 333 struct timex ntv; /* temporary structure */ 334 long freq; /* frequency ns/s) */ 335 int modes; /* mode bits from structure */ 336 int s; /* caller priority */ 337 int error; 338 339 error = copyin((caddr_t)uap->tp, (caddr_t)&ntv, sizeof(ntv)); 340 if (error) 341 return(error); 342 343 /* 344 * Update selected clock variables - only the superuser can 345 * change anything. Note that there is no error checking here on 346 * the assumption the superuser should know what it is doing. 347 * Note that either the time constant or TAI offset are loaded 348 * from the ntv.constant member, depending on the mode bits. If 349 * the STA_PLL bit in the status word is cleared, the state and 350 * status words are reset to the initial values at boot. 351 */ 352 mtx_lock(&Giant); 353 modes = ntv.modes; 354 if (modes) 355 error = priv_check(td, PRIV_NTP_ADJTIME); 356 if (error) 357 goto done2; 358 s = splclock(); 359 if (modes & MOD_MAXERROR) 360 time_maxerror = ntv.maxerror; 361 if (modes & MOD_ESTERROR) 362 time_esterror = ntv.esterror; 363 if (modes & MOD_STATUS) { 364 if (time_status & STA_PLL && !(ntv.status & STA_PLL)) { 365 time_state = TIME_OK; 366 time_status = STA_UNSYNC; 367 #ifdef PPS_SYNC 368 pps_shift = PPS_FAVG; 369 #endif /* PPS_SYNC */ 370 } 371 time_status &= STA_RONLY; 372 time_status |= ntv.status & ~STA_RONLY; 373 } 374 if (modes & MOD_TIMECONST) { 375 if (ntv.constant < 0) 376 time_constant = 0; 377 else if (ntv.constant > MAXTC) 378 time_constant = MAXTC; 379 else 380 time_constant = ntv.constant; 381 } 382 if (modes & MOD_TAI) { 383 if (ntv.constant > 0) /* XXX zero & negative numbers ? */ 384 time_tai = ntv.constant; 385 } 386 #ifdef PPS_SYNC 387 if (modes & MOD_PPSMAX) { 388 if (ntv.shift < PPS_FAVG) 389 pps_shiftmax = PPS_FAVG; 390 else if (ntv.shift > PPS_FAVGMAX) 391 pps_shiftmax = PPS_FAVGMAX; 392 else 393 pps_shiftmax = ntv.shift; 394 } 395 #endif /* PPS_SYNC */ 396 if (modes & MOD_NANO) 397 time_status |= STA_NANO; 398 if (modes & MOD_MICRO) 399 time_status &= ~STA_NANO; 400 if (modes & MOD_CLKB) 401 time_status |= STA_CLK; 402 if (modes & MOD_CLKA) 403 time_status &= ~STA_CLK; 404 if (modes & MOD_FREQUENCY) { 405 freq = (ntv.freq * 1000LL) >> 16; 406 if (freq > MAXFREQ) 407 L_LINT(time_freq, MAXFREQ); 408 else if (freq < -MAXFREQ) 409 L_LINT(time_freq, -MAXFREQ); 410 else { 411 /* 412 * ntv.freq is [PPM * 2^16] = [us/s * 2^16] 413 * time_freq is [ns/s * 2^32] 414 */ 415 time_freq = ntv.freq * 1000LL * 65536LL; 416 } 417 #ifdef PPS_SYNC 418 pps_freq = time_freq; 419 #endif /* PPS_SYNC */ 420 } 421 if (modes & MOD_OFFSET) { 422 if (time_status & STA_NANO) 423 hardupdate(ntv.offset); 424 else 425 hardupdate(ntv.offset * 1000); 426 } 427 428 /* 429 * Retrieve all clock variables. Note that the TAI offset is 430 * returned only by ntp_gettime(); 431 */ 432 if (time_status & STA_NANO) 433 ntv.offset = L_GINT(time_offset); 434 else 435 ntv.offset = L_GINT(time_offset) / 1000; /* XXX rounding ? */ 436 ntv.freq = L_GINT((time_freq / 1000LL) << 16); 437 ntv.maxerror = time_maxerror; 438 ntv.esterror = time_esterror; 439 ntv.status = time_status; 440 ntv.constant = time_constant; 441 if (time_status & STA_NANO) 442 ntv.precision = time_precision; 443 else 444 ntv.precision = time_precision / 1000; 445 ntv.tolerance = MAXFREQ * SCALE_PPM; 446 #ifdef PPS_SYNC 447 ntv.shift = pps_shift; 448 ntv.ppsfreq = L_GINT((pps_freq / 1000LL) << 16); 449 if (time_status & STA_NANO) 450 ntv.jitter = pps_jitter; 451 else 452 ntv.jitter = pps_jitter / 1000; 453 ntv.stabil = pps_stabil; 454 ntv.calcnt = pps_calcnt; 455 ntv.errcnt = pps_errcnt; 456 ntv.jitcnt = pps_jitcnt; 457 ntv.stbcnt = pps_stbcnt; 458 #endif /* PPS_SYNC */ 459 splx(s); 460 461 error = copyout((caddr_t)&ntv, (caddr_t)uap->tp, sizeof(ntv)); 462 if (error) 463 goto done2; 464 465 if (ntp_is_time_error()) 466 td->td_retval[0] = TIME_ERROR; 467 else 468 td->td_retval[0] = time_state; 469 470 done2: 471 mtx_unlock(&Giant); 472 return (error); 473 } 474 475 /* 476 * second_overflow() - called after ntp_tick_adjust() 477 * 478 * This routine is ordinarily called immediately following the above 479 * routine ntp_tick_adjust(). While these two routines are normally 480 * combined, they are separated here only for the purposes of 481 * simulation. 482 */ 483 void 484 ntp_update_second(int64_t *adjustment, time_t *newsec) 485 { 486 int tickrate; 487 l_fp ftemp; /* 32/64-bit temporary */ 488 489 /* 490 * On rollover of the second both the nanosecond and microsecond 491 * clocks are updated and the state machine cranked as 492 * necessary. The phase adjustment to be used for the next 493 * second is calculated and the maximum error is increased by 494 * the tolerance. 495 */ 496 time_maxerror += MAXFREQ / 1000; 497 498 /* 499 * Leap second processing. If in leap-insert state at 500 * the end of the day, the system clock is set back one 501 * second; if in leap-delete state, the system clock is 502 * set ahead one second. The nano_time() routine or 503 * external clock driver will insure that reported time 504 * is always monotonic. 505 */ 506 switch (time_state) { 507 508 /* 509 * No warning. 510 */ 511 case TIME_OK: 512 if (time_status & STA_INS) 513 time_state = TIME_INS; 514 else if (time_status & STA_DEL) 515 time_state = TIME_DEL; 516 break; 517 518 /* 519 * Insert second 23:59:60 following second 520 * 23:59:59. 521 */ 522 case TIME_INS: 523 if (!(time_status & STA_INS)) 524 time_state = TIME_OK; 525 else if ((*newsec) % 86400 == 0) { 526 (*newsec)--; 527 time_state = TIME_OOP; 528 time_tai++; 529 } 530 break; 531 532 /* 533 * Delete second 23:59:59. 534 */ 535 case TIME_DEL: 536 if (!(time_status & STA_DEL)) 537 time_state = TIME_OK; 538 else if (((*newsec) + 1) % 86400 == 0) { 539 (*newsec)++; 540 time_tai--; 541 time_state = TIME_WAIT; 542 } 543 break; 544 545 /* 546 * Insert second in progress. 547 */ 548 case TIME_OOP: 549 time_state = TIME_WAIT; 550 break; 551 552 /* 553 * Wait for status bits to clear. 554 */ 555 case TIME_WAIT: 556 if (!(time_status & (STA_INS | STA_DEL))) 557 time_state = TIME_OK; 558 } 559 560 /* 561 * Compute the total time adjustment for the next second 562 * in ns. The offset is reduced by a factor depending on 563 * whether the PPS signal is operating. Note that the 564 * value is in effect scaled by the clock frequency, 565 * since the adjustment is added at each tick interrupt. 566 */ 567 ftemp = time_offset; 568 #ifdef PPS_SYNC 569 /* XXX even if PPS signal dies we should finish adjustment ? */ 570 if (time_status & STA_PPSTIME && time_status & 571 STA_PPSSIGNAL) 572 L_RSHIFT(ftemp, pps_shift); 573 else 574 L_RSHIFT(ftemp, SHIFT_PLL + time_constant); 575 #else 576 L_RSHIFT(ftemp, SHIFT_PLL + time_constant); 577 #endif /* PPS_SYNC */ 578 time_adj = ftemp; 579 L_SUB(time_offset, ftemp); 580 L_ADD(time_adj, time_freq); 581 582 /* 583 * Apply any correction from adjtime(2). If more than one second 584 * off we slew at a rate of 5ms/s (5000 PPM) else 500us/s (500PPM) 585 * until the last second is slewed the final < 500 usecs. 586 */ 587 if (time_adjtime != 0) { 588 if (time_adjtime > 1000000) 589 tickrate = 5000; 590 else if (time_adjtime < -1000000) 591 tickrate = -5000; 592 else if (time_adjtime > 500) 593 tickrate = 500; 594 else if (time_adjtime < -500) 595 tickrate = -500; 596 else 597 tickrate = time_adjtime; 598 time_adjtime -= tickrate; 599 L_LINT(ftemp, tickrate * 1000); 600 L_ADD(time_adj, ftemp); 601 } 602 *adjustment = time_adj; 603 604 #ifdef PPS_SYNC 605 if (pps_valid > 0) 606 pps_valid--; 607 else 608 time_status &= ~STA_PPSSIGNAL; 609 #endif /* PPS_SYNC */ 610 } 611 612 /* 613 * ntp_init() - initialize variables and structures 614 * 615 * This routine must be called after the kernel variables hz and tick 616 * are set or changed and before the next tick interrupt. In this 617 * particular implementation, these values are assumed set elsewhere in 618 * the kernel. The design allows the clock frequency and tick interval 619 * to be changed while the system is running. So, this routine should 620 * probably be integrated with the code that does that. 621 */ 622 static void 623 ntp_init() 624 { 625 626 /* 627 * The following variables are initialized only at startup. Only 628 * those structures not cleared by the compiler need to be 629 * initialized, and these only in the simulator. In the actual 630 * kernel, any nonzero values here will quickly evaporate. 631 */ 632 L_CLR(time_offset); 633 L_CLR(time_freq); 634 #ifdef PPS_SYNC 635 pps_tf[0].tv_sec = pps_tf[0].tv_nsec = 0; 636 pps_tf[1].tv_sec = pps_tf[1].tv_nsec = 0; 637 pps_tf[2].tv_sec = pps_tf[2].tv_nsec = 0; 638 pps_fcount = 0; 639 L_CLR(pps_freq); 640 #endif /* PPS_SYNC */ 641 } 642 643 SYSINIT(ntpclocks, SI_SUB_CLOCKS, SI_ORDER_MIDDLE, ntp_init, NULL); 644 645 /* 646 * hardupdate() - local clock update 647 * 648 * This routine is called by ntp_adjtime() to update the local clock 649 * phase and frequency. The implementation is of an adaptive-parameter, 650 * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new 651 * time and frequency offset estimates for each call. If the kernel PPS 652 * discipline code is configured (PPS_SYNC), the PPS signal itself 653 * determines the new time offset, instead of the calling argument. 654 * Presumably, calls to ntp_adjtime() occur only when the caller 655 * believes the local clock is valid within some bound (+-128 ms with 656 * NTP). If the caller's time is far different than the PPS time, an 657 * argument will ensue, and it's not clear who will lose. 658 * 659 * For uncompensated quartz crystal oscillators and nominal update 660 * intervals less than 256 s, operation should be in phase-lock mode, 661 * where the loop is disciplined to phase. For update intervals greater 662 * than 1024 s, operation should be in frequency-lock mode, where the 663 * loop is disciplined to frequency. Between 256 s and 1024 s, the mode 664 * is selected by the STA_MODE status bit. 665 */ 666 static void 667 hardupdate(offset) 668 long offset; /* clock offset (ns) */ 669 { 670 long mtemp; 671 l_fp ftemp; 672 673 /* 674 * Select how the phase is to be controlled and from which 675 * source. If the PPS signal is present and enabled to 676 * discipline the time, the PPS offset is used; otherwise, the 677 * argument offset is used. 678 */ 679 if (!(time_status & STA_PLL)) 680 return; 681 if (!(time_status & STA_PPSTIME && time_status & 682 STA_PPSSIGNAL)) { 683 if (offset > MAXPHASE) 684 time_monitor = MAXPHASE; 685 else if (offset < -MAXPHASE) 686 time_monitor = -MAXPHASE; 687 else 688 time_monitor = offset; 689 L_LINT(time_offset, time_monitor); 690 } 691 692 /* 693 * Select how the frequency is to be controlled and in which 694 * mode (PLL or FLL). If the PPS signal is present and enabled 695 * to discipline the frequency, the PPS frequency is used; 696 * otherwise, the argument offset is used to compute it. 697 */ 698 if (time_status & STA_PPSFREQ && time_status & STA_PPSSIGNAL) { 699 time_reftime = time_uptime; 700 return; 701 } 702 if (time_status & STA_FREQHOLD || time_reftime == 0) 703 time_reftime = time_uptime; 704 mtemp = time_uptime - time_reftime; 705 L_LINT(ftemp, time_monitor); 706 L_RSHIFT(ftemp, (SHIFT_PLL + 2 + time_constant) << 1); 707 L_MPY(ftemp, mtemp); 708 L_ADD(time_freq, ftemp); 709 time_status &= ~STA_MODE; 710 if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > 711 MAXSEC)) { 712 L_LINT(ftemp, (time_monitor << 4) / mtemp); 713 L_RSHIFT(ftemp, SHIFT_FLL + 4); 714 L_ADD(time_freq, ftemp); 715 time_status |= STA_MODE; 716 } 717 time_reftime = time_uptime; 718 if (L_GINT(time_freq) > MAXFREQ) 719 L_LINT(time_freq, MAXFREQ); 720 else if (L_GINT(time_freq) < -MAXFREQ) 721 L_LINT(time_freq, -MAXFREQ); 722 } 723 724 #ifdef PPS_SYNC 725 /* 726 * hardpps() - discipline CPU clock oscillator to external PPS signal 727 * 728 * This routine is called at each PPS interrupt in order to discipline 729 * the CPU clock oscillator to the PPS signal. There are two independent 730 * first-order feedback loops, one for the phase, the other for the 731 * frequency. The phase loop measures and grooms the PPS phase offset 732 * and leaves it in a handy spot for the seconds overflow routine. The 733 * frequency loop averages successive PPS phase differences and 734 * calculates the PPS frequency offset, which is also processed by the 735 * seconds overflow routine. The code requires the caller to capture the 736 * time and architecture-dependent hardware counter values in 737 * nanoseconds at the on-time PPS signal transition. 738 * 739 * Note that, on some Unix systems this routine runs at an interrupt 740 * priority level higher than the timer interrupt routine hardclock(). 741 * Therefore, the variables used are distinct from the hardclock() 742 * variables, except for the actual time and frequency variables, which 743 * are determined by this routine and updated atomically. 744 */ 745 void 746 hardpps(tsp, nsec) 747 struct timespec *tsp; /* time at PPS */ 748 long nsec; /* hardware counter at PPS */ 749 { 750 long u_sec, u_nsec, v_nsec; /* temps */ 751 l_fp ftemp; 752 753 /* 754 * The signal is first processed by a range gate and frequency 755 * discriminator. The range gate rejects noise spikes outside 756 * the range +-500 us. The frequency discriminator rejects input 757 * signals with apparent frequency outside the range 1 +-500 758 * PPM. If two hits occur in the same second, we ignore the 759 * later hit; if not and a hit occurs outside the range gate, 760 * keep the later hit for later comparison, but do not process 761 * it. 762 */ 763 time_status |= STA_PPSSIGNAL | STA_PPSJITTER; 764 time_status &= ~(STA_PPSWANDER | STA_PPSERROR); 765 pps_valid = PPS_VALID; 766 u_sec = tsp->tv_sec; 767 u_nsec = tsp->tv_nsec; 768 if (u_nsec >= (NANOSECOND >> 1)) { 769 u_nsec -= NANOSECOND; 770 u_sec++; 771 } 772 v_nsec = u_nsec - pps_tf[0].tv_nsec; 773 if (u_sec == pps_tf[0].tv_sec && v_nsec < NANOSECOND - 774 MAXFREQ) 775 return; 776 pps_tf[2] = pps_tf[1]; 777 pps_tf[1] = pps_tf[0]; 778 pps_tf[0].tv_sec = u_sec; 779 pps_tf[0].tv_nsec = u_nsec; 780 781 /* 782 * Compute the difference between the current and previous 783 * counter values. If the difference exceeds 0.5 s, assume it 784 * has wrapped around, so correct 1.0 s. If the result exceeds 785 * the tick interval, the sample point has crossed a tick 786 * boundary during the last second, so correct the tick. Very 787 * intricate. 788 */ 789 u_nsec = nsec; 790 if (u_nsec > (NANOSECOND >> 1)) 791 u_nsec -= NANOSECOND; 792 else if (u_nsec < -(NANOSECOND >> 1)) 793 u_nsec += NANOSECOND; 794 pps_fcount += u_nsec; 795 if (v_nsec > MAXFREQ || v_nsec < -MAXFREQ) 796 return; 797 time_status &= ~STA_PPSJITTER; 798 799 /* 800 * A three-stage median filter is used to help denoise the PPS 801 * time. The median sample becomes the time offset estimate; the 802 * difference between the other two samples becomes the time 803 * dispersion (jitter) estimate. 804 */ 805 if (pps_tf[0].tv_nsec > pps_tf[1].tv_nsec) { 806 if (pps_tf[1].tv_nsec > pps_tf[2].tv_nsec) { 807 v_nsec = pps_tf[1].tv_nsec; /* 0 1 2 */ 808 u_nsec = pps_tf[0].tv_nsec - pps_tf[2].tv_nsec; 809 } else if (pps_tf[2].tv_nsec > pps_tf[0].tv_nsec) { 810 v_nsec = pps_tf[0].tv_nsec; /* 2 0 1 */ 811 u_nsec = pps_tf[2].tv_nsec - pps_tf[1].tv_nsec; 812 } else { 813 v_nsec = pps_tf[2].tv_nsec; /* 0 2 1 */ 814 u_nsec = pps_tf[0].tv_nsec - pps_tf[1].tv_nsec; 815 } 816 } else { 817 if (pps_tf[1].tv_nsec < pps_tf[2].tv_nsec) { 818 v_nsec = pps_tf[1].tv_nsec; /* 2 1 0 */ 819 u_nsec = pps_tf[2].tv_nsec - pps_tf[0].tv_nsec; 820 } else if (pps_tf[2].tv_nsec < pps_tf[0].tv_nsec) { 821 v_nsec = pps_tf[0].tv_nsec; /* 1 0 2 */ 822 u_nsec = pps_tf[1].tv_nsec - pps_tf[2].tv_nsec; 823 } else { 824 v_nsec = pps_tf[2].tv_nsec; /* 1 2 0 */ 825 u_nsec = pps_tf[1].tv_nsec - pps_tf[0].tv_nsec; 826 } 827 } 828 829 /* 830 * Nominal jitter is due to PPS signal noise and interrupt 831 * latency. If it exceeds the popcorn threshold, the sample is 832 * discarded. otherwise, if so enabled, the time offset is 833 * updated. We can tolerate a modest loss of data here without 834 * much degrading time accuracy. 835 * 836 * The measurements being checked here were made with the system 837 * timecounter, so the popcorn threshold is not allowed to fall below 838 * the number of nanoseconds in two ticks of the timecounter. For a 839 * timecounter running faster than 1 GHz the lower bound is 2ns, just 840 * to avoid a nonsensical threshold of zero. 841 */ 842 if (u_nsec > lmax(pps_jitter << PPS_POPCORN, 843 2 * (NANOSECOND / (long)qmin(NANOSECOND, tc_getfrequency())))) { 844 time_status |= STA_PPSJITTER; 845 pps_jitcnt++; 846 } else if (time_status & STA_PPSTIME) { 847 time_monitor = -v_nsec; 848 L_LINT(time_offset, time_monitor); 849 } 850 pps_jitter += (u_nsec - pps_jitter) >> PPS_FAVG; 851 u_sec = pps_tf[0].tv_sec - pps_lastsec; 852 if (u_sec < (1 << pps_shift)) 853 return; 854 855 /* 856 * At the end of the calibration interval the difference between 857 * the first and last counter values becomes the scaled 858 * frequency. It will later be divided by the length of the 859 * interval to determine the frequency update. If the frequency 860 * exceeds a sanity threshold, or if the actual calibration 861 * interval is not equal to the expected length, the data are 862 * discarded. We can tolerate a modest loss of data here without 863 * much degrading frequency accuracy. 864 */ 865 pps_calcnt++; 866 v_nsec = -pps_fcount; 867 pps_lastsec = pps_tf[0].tv_sec; 868 pps_fcount = 0; 869 u_nsec = MAXFREQ << pps_shift; 870 if (v_nsec > u_nsec || v_nsec < -u_nsec || u_sec != (1 << 871 pps_shift)) { 872 time_status |= STA_PPSERROR; 873 pps_errcnt++; 874 return; 875 } 876 877 /* 878 * Here the raw frequency offset and wander (stability) is 879 * calculated. If the wander is less than the wander threshold 880 * for four consecutive averaging intervals, the interval is 881 * doubled; if it is greater than the threshold for four 882 * consecutive intervals, the interval is halved. The scaled 883 * frequency offset is converted to frequency offset. The 884 * stability metric is calculated as the average of recent 885 * frequency changes, but is used only for performance 886 * monitoring. 887 */ 888 L_LINT(ftemp, v_nsec); 889 L_RSHIFT(ftemp, pps_shift); 890 L_SUB(ftemp, pps_freq); 891 u_nsec = L_GINT(ftemp); 892 if (u_nsec > PPS_MAXWANDER) { 893 L_LINT(ftemp, PPS_MAXWANDER); 894 pps_intcnt--; 895 time_status |= STA_PPSWANDER; 896 pps_stbcnt++; 897 } else if (u_nsec < -PPS_MAXWANDER) { 898 L_LINT(ftemp, -PPS_MAXWANDER); 899 pps_intcnt--; 900 time_status |= STA_PPSWANDER; 901 pps_stbcnt++; 902 } else { 903 pps_intcnt++; 904 } 905 if (pps_intcnt >= 4) { 906 pps_intcnt = 4; 907 if (pps_shift < pps_shiftmax) { 908 pps_shift++; 909 pps_intcnt = 0; 910 } 911 } else if (pps_intcnt <= -4 || pps_shift > pps_shiftmax) { 912 pps_intcnt = -4; 913 if (pps_shift > PPS_FAVG) { 914 pps_shift--; 915 pps_intcnt = 0; 916 } 917 } 918 if (u_nsec < 0) 919 u_nsec = -u_nsec; 920 pps_stabil += (u_nsec * SCALE_PPM - pps_stabil) >> PPS_FAVG; 921 922 /* 923 * The PPS frequency is recalculated and clamped to the maximum 924 * MAXFREQ. If enabled, the system clock frequency is updated as 925 * well. 926 */ 927 L_ADD(pps_freq, ftemp); 928 u_nsec = L_GINT(pps_freq); 929 if (u_nsec > MAXFREQ) 930 L_LINT(pps_freq, MAXFREQ); 931 else if (u_nsec < -MAXFREQ) 932 L_LINT(pps_freq, -MAXFREQ); 933 if (time_status & STA_PPSFREQ) 934 time_freq = pps_freq; 935 } 936 #endif /* PPS_SYNC */ 937 938 #ifndef _SYS_SYSPROTO_H_ 939 struct adjtime_args { 940 struct timeval *delta; 941 struct timeval *olddelta; 942 }; 943 #endif 944 /* ARGSUSED */ 945 int 946 sys_adjtime(struct thread *td, struct adjtime_args *uap) 947 { 948 struct timeval delta, olddelta, *deltap; 949 int error; 950 951 if (uap->delta) { 952 error = copyin(uap->delta, &delta, sizeof(delta)); 953 if (error) 954 return (error); 955 deltap = δ 956 } else 957 deltap = NULL; 958 error = kern_adjtime(td, deltap, &olddelta); 959 if (uap->olddelta && error == 0) 960 error = copyout(&olddelta, uap->olddelta, sizeof(olddelta)); 961 return (error); 962 } 963 964 int 965 kern_adjtime(struct thread *td, struct timeval *delta, struct timeval *olddelta) 966 { 967 struct timeval atv; 968 int error; 969 970 mtx_lock(&Giant); 971 if (olddelta) { 972 atv.tv_sec = time_adjtime / 1000000; 973 atv.tv_usec = time_adjtime % 1000000; 974 if (atv.tv_usec < 0) { 975 atv.tv_usec += 1000000; 976 atv.tv_sec--; 977 } 978 *olddelta = atv; 979 } 980 if (delta) { 981 if ((error = priv_check(td, PRIV_ADJTIME))) { 982 mtx_unlock(&Giant); 983 return (error); 984 } 985 time_adjtime = (int64_t)delta->tv_sec * 1000000 + 986 delta->tv_usec; 987 } 988 mtx_unlock(&Giant); 989 return (0); 990 } 991 992 static struct callout resettodr_callout; 993 static int resettodr_period = 1800; 994 995 static void 996 periodic_resettodr(void *arg __unused) 997 { 998 999 if (!ntp_is_time_error()) { 1000 mtx_lock(&Giant); 1001 resettodr(); 1002 mtx_unlock(&Giant); 1003 } 1004 if (resettodr_period > 0) 1005 callout_schedule(&resettodr_callout, resettodr_period * hz); 1006 } 1007 1008 static void 1009 shutdown_resettodr(void *arg __unused, int howto __unused) 1010 { 1011 1012 callout_drain(&resettodr_callout); 1013 if (resettodr_period > 0 && !ntp_is_time_error()) { 1014 mtx_lock(&Giant); 1015 resettodr(); 1016 mtx_unlock(&Giant); 1017 } 1018 } 1019 1020 static int 1021 sysctl_resettodr_period(SYSCTL_HANDLER_ARGS) 1022 { 1023 int error; 1024 1025 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req); 1026 if (error || !req->newptr) 1027 return (error); 1028 if (cold) 1029 goto done; 1030 if (resettodr_period == 0) 1031 callout_stop(&resettodr_callout); 1032 else 1033 callout_reset(&resettodr_callout, resettodr_period * hz, 1034 periodic_resettodr, NULL); 1035 done: 1036 return (0); 1037 } 1038 1039 SYSCTL_PROC(_machdep, OID_AUTO, rtc_save_period, CTLTYPE_INT|CTLFLAG_RWTUN, 1040 &resettodr_period, 1800, sysctl_resettodr_period, "I", 1041 "Save system time to RTC with this period (in seconds)"); 1042 1043 static void 1044 start_periodic_resettodr(void *arg __unused) 1045 { 1046 1047 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_resettodr, NULL, 1048 SHUTDOWN_PRI_FIRST); 1049 callout_init(&resettodr_callout, 1); 1050 if (resettodr_period == 0) 1051 return; 1052 callout_reset(&resettodr_callout, resettodr_period * hz, 1053 periodic_resettodr, NULL); 1054 } 1055 1056 SYSINIT(periodic_resettodr, SI_SUB_LAST, SI_ORDER_MIDDLE, 1057 start_periodic_resettodr, NULL); 1058