1 /*- 2 * ---------------------------------------------------------------------------- 3 * "THE BEER-WARE LICENSE" (Revision 42): 4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 5 * can do whatever you want with this stuff. If we meet some day, and you think 6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 7 * ---------------------------------------------------------------------------- 8 * 9 * Copyright (c) 2011 The FreeBSD Foundation 10 * All rights reserved. 11 * 12 * Portions of this software were developed by Julien Ridoux at the University 13 * of Melbourne under sponsorship from the FreeBSD Foundation. 14 */ 15 16 #include <sys/cdefs.h> 17 __FBSDID("$FreeBSD$"); 18 19 #include "opt_compat.h" 20 #include "opt_ntp.h" 21 #include "opt_ffclock.h" 22 23 #include <sys/param.h> 24 #include <sys/kernel.h> 25 #ifdef FFCLOCK 26 #include <sys/lock.h> 27 #include <sys/mutex.h> 28 #endif 29 #include <sys/sysctl.h> 30 #include <sys/syslog.h> 31 #include <sys/systm.h> 32 #include <sys/timeffc.h> 33 #include <sys/timepps.h> 34 #include <sys/timetc.h> 35 #include <sys/timex.h> 36 #include <sys/vdso.h> 37 38 /* 39 * A large step happens on boot. This constant detects such steps. 40 * It is relatively small so that ntp_update_second gets called enough 41 * in the typical 'missed a couple of seconds' case, but doesn't loop 42 * forever when the time step is large. 43 */ 44 #define LARGE_STEP 200 45 46 /* 47 * Implement a dummy timecounter which we can use until we get a real one 48 * in the air. This allows the console and other early stuff to use 49 * time services. 50 */ 51 52 static u_int 53 dummy_get_timecount(struct timecounter *tc) 54 { 55 static u_int now; 56 57 return (++now); 58 } 59 60 static struct timecounter dummy_timecounter = { 61 dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000 62 }; 63 64 struct timehands { 65 /* These fields must be initialized by the driver. */ 66 struct timecounter *th_counter; 67 int64_t th_adjustment; 68 uint64_t th_scale; 69 u_int th_offset_count; 70 struct bintime th_offset; 71 struct timeval th_microtime; 72 struct timespec th_nanotime; 73 /* Fields not to be copied in tc_windup start with th_generation. */ 74 volatile u_int th_generation; 75 struct timehands *th_next; 76 }; 77 78 static struct timehands th0; 79 static struct timehands th9 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th0}; 80 static struct timehands th8 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th9}; 81 static struct timehands th7 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th8}; 82 static struct timehands th6 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th7}; 83 static struct timehands th5 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th6}; 84 static struct timehands th4 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th5}; 85 static struct timehands th3 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th4}; 86 static struct timehands th2 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th3}; 87 static struct timehands th1 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th2}; 88 static struct timehands th0 = { 89 &dummy_timecounter, 90 0, 91 (uint64_t)-1 / 1000000, 92 0, 93 {1, 0}, 94 {0, 0}, 95 {0, 0}, 96 1, 97 &th1 98 }; 99 100 static struct timehands *volatile timehands = &th0; 101 struct timecounter *timecounter = &dummy_timecounter; 102 static struct timecounter *timecounters = &dummy_timecounter; 103 104 int tc_min_ticktock_freq = 1; 105 106 time_t time_second = 1; 107 time_t time_uptime = 1; 108 109 struct bintime boottimebin; 110 struct timeval boottime; 111 static int sysctl_kern_boottime(SYSCTL_HANDLER_ARGS); 112 SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime, CTLTYPE_STRUCT|CTLFLAG_RD, 113 NULL, 0, sysctl_kern_boottime, "S,timeval", "System boottime"); 114 115 SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, ""); 116 static SYSCTL_NODE(_kern_timecounter, OID_AUTO, tc, CTLFLAG_RW, 0, ""); 117 118 static int timestepwarnings; 119 SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW, 120 ×tepwarnings, 0, "Log time steps"); 121 122 static void tc_windup(void); 123 static void cpu_tick_calibrate(int); 124 125 void dtrace_getnanotime(struct timespec *tsp); 126 127 static int 128 sysctl_kern_boottime(SYSCTL_HANDLER_ARGS) 129 { 130 #ifndef __mips__ 131 #ifdef SCTL_MASK32 132 int tv[2]; 133 134 if (req->flags & SCTL_MASK32) { 135 tv[0] = boottime.tv_sec; 136 tv[1] = boottime.tv_usec; 137 return SYSCTL_OUT(req, tv, sizeof(tv)); 138 } else 139 #endif 140 #endif 141 return SYSCTL_OUT(req, &boottime, sizeof(boottime)); 142 } 143 144 static int 145 sysctl_kern_timecounter_get(SYSCTL_HANDLER_ARGS) 146 { 147 u_int ncount; 148 struct timecounter *tc = arg1; 149 150 ncount = tc->tc_get_timecount(tc); 151 return sysctl_handle_int(oidp, &ncount, 0, req); 152 } 153 154 static int 155 sysctl_kern_timecounter_freq(SYSCTL_HANDLER_ARGS) 156 { 157 uint64_t freq; 158 struct timecounter *tc = arg1; 159 160 freq = tc->tc_frequency; 161 return sysctl_handle_64(oidp, &freq, 0, req); 162 } 163 164 /* 165 * Return the difference between the timehands' counter value now and what 166 * was when we copied it to the timehands' offset_count. 167 */ 168 static __inline u_int 169 tc_delta(struct timehands *th) 170 { 171 struct timecounter *tc; 172 173 tc = th->th_counter; 174 return ((tc->tc_get_timecount(tc) - th->th_offset_count) & 175 tc->tc_counter_mask); 176 } 177 178 /* 179 * Functions for reading the time. We have to loop until we are sure that 180 * the timehands that we operated on was not updated under our feet. See 181 * the comment in <sys/time.h> for a description of these 12 functions. 182 */ 183 184 #ifdef FFCLOCK 185 void 186 fbclock_binuptime(struct bintime *bt) 187 { 188 struct timehands *th; 189 unsigned int gen; 190 191 do { 192 th = timehands; 193 gen = th->th_generation; 194 *bt = th->th_offset; 195 bintime_addx(bt, th->th_scale * tc_delta(th)); 196 } while (gen == 0 || gen != th->th_generation); 197 } 198 199 void 200 fbclock_nanouptime(struct timespec *tsp) 201 { 202 struct bintime bt; 203 204 fbclock_binuptime(&bt); 205 bintime2timespec(&bt, tsp); 206 } 207 208 void 209 fbclock_microuptime(struct timeval *tvp) 210 { 211 struct bintime bt; 212 213 fbclock_binuptime(&bt); 214 bintime2timeval(&bt, tvp); 215 } 216 217 void 218 fbclock_bintime(struct bintime *bt) 219 { 220 221 fbclock_binuptime(bt); 222 bintime_add(bt, &boottimebin); 223 } 224 225 void 226 fbclock_nanotime(struct timespec *tsp) 227 { 228 struct bintime bt; 229 230 fbclock_bintime(&bt); 231 bintime2timespec(&bt, tsp); 232 } 233 234 void 235 fbclock_microtime(struct timeval *tvp) 236 { 237 struct bintime bt; 238 239 fbclock_bintime(&bt); 240 bintime2timeval(&bt, tvp); 241 } 242 243 void 244 fbclock_getbinuptime(struct bintime *bt) 245 { 246 struct timehands *th; 247 unsigned int gen; 248 249 do { 250 th = timehands; 251 gen = th->th_generation; 252 *bt = th->th_offset; 253 } while (gen == 0 || gen != th->th_generation); 254 } 255 256 void 257 fbclock_getnanouptime(struct timespec *tsp) 258 { 259 struct timehands *th; 260 unsigned int gen; 261 262 do { 263 th = timehands; 264 gen = th->th_generation; 265 bintime2timespec(&th->th_offset, tsp); 266 } while (gen == 0 || gen != th->th_generation); 267 } 268 269 void 270 fbclock_getmicrouptime(struct timeval *tvp) 271 { 272 struct timehands *th; 273 unsigned int gen; 274 275 do { 276 th = timehands; 277 gen = th->th_generation; 278 bintime2timeval(&th->th_offset, tvp); 279 } while (gen == 0 || gen != th->th_generation); 280 } 281 282 void 283 fbclock_getbintime(struct bintime *bt) 284 { 285 struct timehands *th; 286 unsigned int gen; 287 288 do { 289 th = timehands; 290 gen = th->th_generation; 291 *bt = th->th_offset; 292 } while (gen == 0 || gen != th->th_generation); 293 bintime_add(bt, &boottimebin); 294 } 295 296 void 297 fbclock_getnanotime(struct timespec *tsp) 298 { 299 struct timehands *th; 300 unsigned int gen; 301 302 do { 303 th = timehands; 304 gen = th->th_generation; 305 *tsp = th->th_nanotime; 306 } while (gen == 0 || gen != th->th_generation); 307 } 308 309 void 310 fbclock_getmicrotime(struct timeval *tvp) 311 { 312 struct timehands *th; 313 unsigned int gen; 314 315 do { 316 th = timehands; 317 gen = th->th_generation; 318 *tvp = th->th_microtime; 319 } while (gen == 0 || gen != th->th_generation); 320 } 321 #else /* !FFCLOCK */ 322 void 323 binuptime(struct bintime *bt) 324 { 325 struct timehands *th; 326 u_int gen; 327 328 do { 329 th = timehands; 330 gen = th->th_generation; 331 *bt = th->th_offset; 332 bintime_addx(bt, th->th_scale * tc_delta(th)); 333 } while (gen == 0 || gen != th->th_generation); 334 } 335 336 void 337 nanouptime(struct timespec *tsp) 338 { 339 struct bintime bt; 340 341 binuptime(&bt); 342 bintime2timespec(&bt, tsp); 343 } 344 345 void 346 microuptime(struct timeval *tvp) 347 { 348 struct bintime bt; 349 350 binuptime(&bt); 351 bintime2timeval(&bt, tvp); 352 } 353 354 void 355 bintime(struct bintime *bt) 356 { 357 358 binuptime(bt); 359 bintime_add(bt, &boottimebin); 360 } 361 362 void 363 nanotime(struct timespec *tsp) 364 { 365 struct bintime bt; 366 367 bintime(&bt); 368 bintime2timespec(&bt, tsp); 369 } 370 371 void 372 microtime(struct timeval *tvp) 373 { 374 struct bintime bt; 375 376 bintime(&bt); 377 bintime2timeval(&bt, tvp); 378 } 379 380 void 381 getbinuptime(struct bintime *bt) 382 { 383 struct timehands *th; 384 u_int gen; 385 386 do { 387 th = timehands; 388 gen = th->th_generation; 389 *bt = th->th_offset; 390 } while (gen == 0 || gen != th->th_generation); 391 } 392 393 void 394 getnanouptime(struct timespec *tsp) 395 { 396 struct timehands *th; 397 u_int gen; 398 399 do { 400 th = timehands; 401 gen = th->th_generation; 402 bintime2timespec(&th->th_offset, tsp); 403 } while (gen == 0 || gen != th->th_generation); 404 } 405 406 void 407 getmicrouptime(struct timeval *tvp) 408 { 409 struct timehands *th; 410 u_int gen; 411 412 do { 413 th = timehands; 414 gen = th->th_generation; 415 bintime2timeval(&th->th_offset, tvp); 416 } while (gen == 0 || gen != th->th_generation); 417 } 418 419 void 420 getbintime(struct bintime *bt) 421 { 422 struct timehands *th; 423 u_int gen; 424 425 do { 426 th = timehands; 427 gen = th->th_generation; 428 *bt = th->th_offset; 429 } while (gen == 0 || gen != th->th_generation); 430 bintime_add(bt, &boottimebin); 431 } 432 433 void 434 getnanotime(struct timespec *tsp) 435 { 436 struct timehands *th; 437 u_int gen; 438 439 do { 440 th = timehands; 441 gen = th->th_generation; 442 *tsp = th->th_nanotime; 443 } while (gen == 0 || gen != th->th_generation); 444 } 445 446 void 447 getmicrotime(struct timeval *tvp) 448 { 449 struct timehands *th; 450 u_int gen; 451 452 do { 453 th = timehands; 454 gen = th->th_generation; 455 *tvp = th->th_microtime; 456 } while (gen == 0 || gen != th->th_generation); 457 } 458 #endif /* FFCLOCK */ 459 460 #ifdef FFCLOCK 461 /* 462 * Support for feed-forward synchronization algorithms. This is heavily inspired 463 * by the timehands mechanism but kept independent from it. *_windup() functions 464 * have some connection to avoid accessing the timecounter hardware more than 465 * necessary. 466 */ 467 468 /* Feed-forward clock estimates kept updated by the synchronization daemon. */ 469 struct ffclock_estimate ffclock_estimate; 470 struct bintime ffclock_boottime; /* Feed-forward boot time estimate. */ 471 uint32_t ffclock_status; /* Feed-forward clock status. */ 472 int8_t ffclock_updated; /* New estimates are available. */ 473 struct mtx ffclock_mtx; /* Mutex on ffclock_estimate. */ 474 475 struct fftimehands { 476 struct ffclock_estimate cest; 477 struct bintime tick_time; 478 struct bintime tick_time_lerp; 479 ffcounter tick_ffcount; 480 uint64_t period_lerp; 481 volatile uint8_t gen; 482 struct fftimehands *next; 483 }; 484 485 #define NUM_ELEMENTS(x) (sizeof(x) / sizeof(*x)) 486 487 static struct fftimehands ffth[10]; 488 static struct fftimehands *volatile fftimehands = ffth; 489 490 static void 491 ffclock_init(void) 492 { 493 struct fftimehands *cur; 494 struct fftimehands *last; 495 496 memset(ffth, 0, sizeof(ffth)); 497 498 last = ffth + NUM_ELEMENTS(ffth) - 1; 499 for (cur = ffth; cur < last; cur++) 500 cur->next = cur + 1; 501 last->next = ffth; 502 503 ffclock_updated = 0; 504 ffclock_status = FFCLOCK_STA_UNSYNC; 505 mtx_init(&ffclock_mtx, "ffclock lock", NULL, MTX_DEF); 506 } 507 508 /* 509 * Reset the feed-forward clock estimates. Called from inittodr() to get things 510 * kick started and uses the timecounter nominal frequency as a first period 511 * estimate. Note: this function may be called several time just after boot. 512 * Note: this is the only function that sets the value of boot time for the 513 * monotonic (i.e. uptime) version of the feed-forward clock. 514 */ 515 void 516 ffclock_reset_clock(struct timespec *ts) 517 { 518 struct timecounter *tc; 519 struct ffclock_estimate cest; 520 521 tc = timehands->th_counter; 522 memset(&cest, 0, sizeof(struct ffclock_estimate)); 523 524 timespec2bintime(ts, &ffclock_boottime); 525 timespec2bintime(ts, &(cest.update_time)); 526 ffclock_read_counter(&cest.update_ffcount); 527 cest.leapsec_next = 0; 528 cest.period = ((1ULL << 63) / tc->tc_frequency) << 1; 529 cest.errb_abs = 0; 530 cest.errb_rate = 0; 531 cest.status = FFCLOCK_STA_UNSYNC; 532 cest.leapsec_total = 0; 533 cest.leapsec = 0; 534 535 mtx_lock(&ffclock_mtx); 536 bcopy(&cest, &ffclock_estimate, sizeof(struct ffclock_estimate)); 537 ffclock_updated = INT8_MAX; 538 mtx_unlock(&ffclock_mtx); 539 540 printf("ffclock reset: %s (%llu Hz), time = %ld.%09lu\n", tc->tc_name, 541 (unsigned long long)tc->tc_frequency, (long)ts->tv_sec, 542 (unsigned long)ts->tv_nsec); 543 } 544 545 /* 546 * Sub-routine to convert a time interval measured in RAW counter units to time 547 * in seconds stored in bintime format. 548 * NOTE: bintime_mul requires u_int, but the value of the ffcounter may be 549 * larger than the max value of u_int (on 32 bit architecture). Loop to consume 550 * extra cycles. 551 */ 552 static void 553 ffclock_convert_delta(ffcounter ffdelta, uint64_t period, struct bintime *bt) 554 { 555 struct bintime bt2; 556 ffcounter delta, delta_max; 557 558 delta_max = (1ULL << (8 * sizeof(unsigned int))) - 1; 559 bintime_clear(bt); 560 do { 561 if (ffdelta > delta_max) 562 delta = delta_max; 563 else 564 delta = ffdelta; 565 bt2.sec = 0; 566 bt2.frac = period; 567 bintime_mul(&bt2, (unsigned int)delta); 568 bintime_add(bt, &bt2); 569 ffdelta -= delta; 570 } while (ffdelta > 0); 571 } 572 573 /* 574 * Update the fftimehands. 575 * Push the tick ffcount and time(s) forward based on current clock estimate. 576 * The conversion from ffcounter to bintime relies on the difference clock 577 * principle, whose accuracy relies on computing small time intervals. If a new 578 * clock estimate has been passed by the synchronisation daemon, make it 579 * current, and compute the linear interpolation for monotonic time if needed. 580 */ 581 static void 582 ffclock_windup(unsigned int delta) 583 { 584 struct ffclock_estimate *cest; 585 struct fftimehands *ffth; 586 struct bintime bt, gap_lerp; 587 ffcounter ffdelta; 588 uint64_t frac; 589 unsigned int polling; 590 uint8_t forward_jump, ogen; 591 592 /* 593 * Pick the next timehand, copy current ffclock estimates and move tick 594 * times and counter forward. 595 */ 596 forward_jump = 0; 597 ffth = fftimehands->next; 598 ogen = ffth->gen; 599 ffth->gen = 0; 600 cest = &ffth->cest; 601 bcopy(&fftimehands->cest, cest, sizeof(struct ffclock_estimate)); 602 ffdelta = (ffcounter)delta; 603 ffth->period_lerp = fftimehands->period_lerp; 604 605 ffth->tick_time = fftimehands->tick_time; 606 ffclock_convert_delta(ffdelta, cest->period, &bt); 607 bintime_add(&ffth->tick_time, &bt); 608 609 ffth->tick_time_lerp = fftimehands->tick_time_lerp; 610 ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt); 611 bintime_add(&ffth->tick_time_lerp, &bt); 612 613 ffth->tick_ffcount = fftimehands->tick_ffcount + ffdelta; 614 615 /* 616 * Assess the status of the clock, if the last update is too old, it is 617 * likely the synchronisation daemon is dead and the clock is free 618 * running. 619 */ 620 if (ffclock_updated == 0) { 621 ffdelta = ffth->tick_ffcount - cest->update_ffcount; 622 ffclock_convert_delta(ffdelta, cest->period, &bt); 623 if (bt.sec > 2 * FFCLOCK_SKM_SCALE) 624 ffclock_status |= FFCLOCK_STA_UNSYNC; 625 } 626 627 /* 628 * If available, grab updated clock estimates and make them current. 629 * Recompute time at this tick using the updated estimates. The clock 630 * estimates passed the feed-forward synchronisation daemon may result 631 * in time conversion that is not monotonically increasing (just after 632 * the update). time_lerp is a particular linear interpolation over the 633 * synchronisation algo polling period that ensures monotonicity for the 634 * clock ids requesting it. 635 */ 636 if (ffclock_updated > 0) { 637 bcopy(&ffclock_estimate, cest, sizeof(struct ffclock_estimate)); 638 ffdelta = ffth->tick_ffcount - cest->update_ffcount; 639 ffth->tick_time = cest->update_time; 640 ffclock_convert_delta(ffdelta, cest->period, &bt); 641 bintime_add(&ffth->tick_time, &bt); 642 643 /* ffclock_reset sets ffclock_updated to INT8_MAX */ 644 if (ffclock_updated == INT8_MAX) 645 ffth->tick_time_lerp = ffth->tick_time; 646 647 if (bintime_cmp(&ffth->tick_time, &ffth->tick_time_lerp, >)) 648 forward_jump = 1; 649 else 650 forward_jump = 0; 651 652 bintime_clear(&gap_lerp); 653 if (forward_jump) { 654 gap_lerp = ffth->tick_time; 655 bintime_sub(&gap_lerp, &ffth->tick_time_lerp); 656 } else { 657 gap_lerp = ffth->tick_time_lerp; 658 bintime_sub(&gap_lerp, &ffth->tick_time); 659 } 660 661 /* 662 * The reset from the RTC clock may be far from accurate, and 663 * reducing the gap between real time and interpolated time 664 * could take a very long time if the interpolated clock insists 665 * on strict monotonicity. The clock is reset under very strict 666 * conditions (kernel time is known to be wrong and 667 * synchronization daemon has been restarted recently. 668 * ffclock_boottime absorbs the jump to ensure boot time is 669 * correct and uptime functions stay consistent. 670 */ 671 if (((ffclock_status & FFCLOCK_STA_UNSYNC) == FFCLOCK_STA_UNSYNC) && 672 ((cest->status & FFCLOCK_STA_UNSYNC) == 0) && 673 ((cest->status & FFCLOCK_STA_WARMUP) == FFCLOCK_STA_WARMUP)) { 674 if (forward_jump) 675 bintime_add(&ffclock_boottime, &gap_lerp); 676 else 677 bintime_sub(&ffclock_boottime, &gap_lerp); 678 ffth->tick_time_lerp = ffth->tick_time; 679 bintime_clear(&gap_lerp); 680 } 681 682 ffclock_status = cest->status; 683 ffth->period_lerp = cest->period; 684 685 /* 686 * Compute corrected period used for the linear interpolation of 687 * time. The rate of linear interpolation is capped to 5000PPM 688 * (5ms/s). 689 */ 690 if (bintime_isset(&gap_lerp)) { 691 ffdelta = cest->update_ffcount; 692 ffdelta -= fftimehands->cest.update_ffcount; 693 ffclock_convert_delta(ffdelta, cest->period, &bt); 694 polling = bt.sec; 695 bt.sec = 0; 696 bt.frac = 5000000 * (uint64_t)18446744073LL; 697 bintime_mul(&bt, polling); 698 if (bintime_cmp(&gap_lerp, &bt, >)) 699 gap_lerp = bt; 700 701 /* Approximate 1 sec by 1-(1/2^64) to ease arithmetic */ 702 frac = 0; 703 if (gap_lerp.sec > 0) { 704 frac -= 1; 705 frac /= ffdelta / gap_lerp.sec; 706 } 707 frac += gap_lerp.frac / ffdelta; 708 709 if (forward_jump) 710 ffth->period_lerp += frac; 711 else 712 ffth->period_lerp -= frac; 713 } 714 715 ffclock_updated = 0; 716 } 717 if (++ogen == 0) 718 ogen = 1; 719 ffth->gen = ogen; 720 fftimehands = ffth; 721 } 722 723 /* 724 * Adjust the fftimehands when the timecounter is changed. Stating the obvious, 725 * the old and new hardware counter cannot be read simultaneously. tc_windup() 726 * does read the two counters 'back to back', but a few cycles are effectively 727 * lost, and not accumulated in tick_ffcount. This is a fairly radical 728 * operation for a feed-forward synchronization daemon, and it is its job to not 729 * pushing irrelevant data to the kernel. Because there is no locking here, 730 * simply force to ignore pending or next update to give daemon a chance to 731 * realize the counter has changed. 732 */ 733 static void 734 ffclock_change_tc(struct timehands *th) 735 { 736 struct fftimehands *ffth; 737 struct ffclock_estimate *cest; 738 struct timecounter *tc; 739 uint8_t ogen; 740 741 tc = th->th_counter; 742 ffth = fftimehands->next; 743 ogen = ffth->gen; 744 ffth->gen = 0; 745 746 cest = &ffth->cest; 747 bcopy(&(fftimehands->cest), cest, sizeof(struct ffclock_estimate)); 748 cest->period = ((1ULL << 63) / tc->tc_frequency ) << 1; 749 cest->errb_abs = 0; 750 cest->errb_rate = 0; 751 cest->status |= FFCLOCK_STA_UNSYNC; 752 753 ffth->tick_ffcount = fftimehands->tick_ffcount; 754 ffth->tick_time_lerp = fftimehands->tick_time_lerp; 755 ffth->tick_time = fftimehands->tick_time; 756 ffth->period_lerp = cest->period; 757 758 /* Do not lock but ignore next update from synchronization daemon. */ 759 ffclock_updated--; 760 761 if (++ogen == 0) 762 ogen = 1; 763 ffth->gen = ogen; 764 fftimehands = ffth; 765 } 766 767 /* 768 * Retrieve feed-forward counter and time of last kernel tick. 769 */ 770 void 771 ffclock_last_tick(ffcounter *ffcount, struct bintime *bt, uint32_t flags) 772 { 773 struct fftimehands *ffth; 774 uint8_t gen; 775 776 /* 777 * No locking but check generation has not changed. Also need to make 778 * sure ffdelta is positive, i.e. ffcount > tick_ffcount. 779 */ 780 do { 781 ffth = fftimehands; 782 gen = ffth->gen; 783 if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP) 784 *bt = ffth->tick_time_lerp; 785 else 786 *bt = ffth->tick_time; 787 *ffcount = ffth->tick_ffcount; 788 } while (gen == 0 || gen != ffth->gen); 789 } 790 791 /* 792 * Absolute clock conversion. Low level function to convert ffcounter to 793 * bintime. The ffcounter is converted using the current ffclock period estimate 794 * or the "interpolated period" to ensure monotonicity. 795 * NOTE: this conversion may have been deferred, and the clock updated since the 796 * hardware counter has been read. 797 */ 798 void 799 ffclock_convert_abs(ffcounter ffcount, struct bintime *bt, uint32_t flags) 800 { 801 struct fftimehands *ffth; 802 struct bintime bt2; 803 ffcounter ffdelta; 804 uint8_t gen; 805 806 /* 807 * No locking but check generation has not changed. Also need to make 808 * sure ffdelta is positive, i.e. ffcount > tick_ffcount. 809 */ 810 do { 811 ffth = fftimehands; 812 gen = ffth->gen; 813 if (ffcount > ffth->tick_ffcount) 814 ffdelta = ffcount - ffth->tick_ffcount; 815 else 816 ffdelta = ffth->tick_ffcount - ffcount; 817 818 if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP) { 819 *bt = ffth->tick_time_lerp; 820 ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt2); 821 } else { 822 *bt = ffth->tick_time; 823 ffclock_convert_delta(ffdelta, ffth->cest.period, &bt2); 824 } 825 826 if (ffcount > ffth->tick_ffcount) 827 bintime_add(bt, &bt2); 828 else 829 bintime_sub(bt, &bt2); 830 } while (gen == 0 || gen != ffth->gen); 831 } 832 833 /* 834 * Difference clock conversion. 835 * Low level function to Convert a time interval measured in RAW counter units 836 * into bintime. The difference clock allows measuring small intervals much more 837 * reliably than the absolute clock. 838 */ 839 void 840 ffclock_convert_diff(ffcounter ffdelta, struct bintime *bt) 841 { 842 struct fftimehands *ffth; 843 uint8_t gen; 844 845 /* No locking but check generation has not changed. */ 846 do { 847 ffth = fftimehands; 848 gen = ffth->gen; 849 ffclock_convert_delta(ffdelta, ffth->cest.period, bt); 850 } while (gen == 0 || gen != ffth->gen); 851 } 852 853 /* 854 * Access to current ffcounter value. 855 */ 856 void 857 ffclock_read_counter(ffcounter *ffcount) 858 { 859 struct timehands *th; 860 struct fftimehands *ffth; 861 unsigned int gen, delta; 862 863 /* 864 * ffclock_windup() called from tc_windup(), safe to rely on 865 * th->th_generation only, for correct delta and ffcounter. 866 */ 867 do { 868 th = timehands; 869 gen = th->th_generation; 870 ffth = fftimehands; 871 delta = tc_delta(th); 872 *ffcount = ffth->tick_ffcount; 873 } while (gen == 0 || gen != th->th_generation); 874 875 *ffcount += delta; 876 } 877 878 void 879 binuptime(struct bintime *bt) 880 { 881 882 binuptime_fromclock(bt, sysclock_active); 883 } 884 885 void 886 nanouptime(struct timespec *tsp) 887 { 888 889 nanouptime_fromclock(tsp, sysclock_active); 890 } 891 892 void 893 microuptime(struct timeval *tvp) 894 { 895 896 microuptime_fromclock(tvp, sysclock_active); 897 } 898 899 void 900 bintime(struct bintime *bt) 901 { 902 903 bintime_fromclock(bt, sysclock_active); 904 } 905 906 void 907 nanotime(struct timespec *tsp) 908 { 909 910 nanotime_fromclock(tsp, sysclock_active); 911 } 912 913 void 914 microtime(struct timeval *tvp) 915 { 916 917 microtime_fromclock(tvp, sysclock_active); 918 } 919 920 void 921 getbinuptime(struct bintime *bt) 922 { 923 924 getbinuptime_fromclock(bt, sysclock_active); 925 } 926 927 void 928 getnanouptime(struct timespec *tsp) 929 { 930 931 getnanouptime_fromclock(tsp, sysclock_active); 932 } 933 934 void 935 getmicrouptime(struct timeval *tvp) 936 { 937 938 getmicrouptime_fromclock(tvp, sysclock_active); 939 } 940 941 void 942 getbintime(struct bintime *bt) 943 { 944 945 getbintime_fromclock(bt, sysclock_active); 946 } 947 948 void 949 getnanotime(struct timespec *tsp) 950 { 951 952 getnanotime_fromclock(tsp, sysclock_active); 953 } 954 955 void 956 getmicrotime(struct timeval *tvp) 957 { 958 959 getmicrouptime_fromclock(tvp, sysclock_active); 960 } 961 962 #endif /* FFCLOCK */ 963 964 /* 965 * This is a clone of getnanotime and used for walltimestamps. 966 * The dtrace_ prefix prevents fbt from creating probes for 967 * it so walltimestamp can be safely used in all fbt probes. 968 */ 969 void 970 dtrace_getnanotime(struct timespec *tsp) 971 { 972 struct timehands *th; 973 u_int gen; 974 975 do { 976 th = timehands; 977 gen = th->th_generation; 978 *tsp = th->th_nanotime; 979 } while (gen == 0 || gen != th->th_generation); 980 } 981 982 /* 983 * System clock currently providing time to the system. Modifiable via sysctl 984 * when the FFCLOCK option is defined. 985 */ 986 int sysclock_active = SYSCLOCK_FBCK; 987 988 /* Internal NTP status and error estimates. */ 989 extern int time_status; 990 extern long time_esterror; 991 992 /* 993 * Take a snapshot of sysclock data which can be used to compare system clocks 994 * and generate timestamps after the fact. 995 */ 996 void 997 sysclock_getsnapshot(struct sysclock_snap *clock_snap, int fast) 998 { 999 struct fbclock_info *fbi; 1000 struct timehands *th; 1001 struct bintime bt; 1002 unsigned int delta, gen; 1003 #ifdef FFCLOCK 1004 ffcounter ffcount; 1005 struct fftimehands *ffth; 1006 struct ffclock_info *ffi; 1007 struct ffclock_estimate cest; 1008 1009 ffi = &clock_snap->ff_info; 1010 #endif 1011 1012 fbi = &clock_snap->fb_info; 1013 delta = 0; 1014 1015 do { 1016 th = timehands; 1017 gen = th->th_generation; 1018 fbi->th_scale = th->th_scale; 1019 fbi->tick_time = th->th_offset; 1020 #ifdef FFCLOCK 1021 ffth = fftimehands; 1022 ffi->tick_time = ffth->tick_time_lerp; 1023 ffi->tick_time_lerp = ffth->tick_time_lerp; 1024 ffi->period = ffth->cest.period; 1025 ffi->period_lerp = ffth->period_lerp; 1026 clock_snap->ffcount = ffth->tick_ffcount; 1027 cest = ffth->cest; 1028 #endif 1029 if (!fast) 1030 delta = tc_delta(th); 1031 } while (gen == 0 || gen != th->th_generation); 1032 1033 clock_snap->delta = delta; 1034 clock_snap->sysclock_active = sysclock_active; 1035 1036 /* Record feedback clock status and error. */ 1037 clock_snap->fb_info.status = time_status; 1038 /* XXX: Very crude estimate of feedback clock error. */ 1039 bt.sec = time_esterror / 1000000; 1040 bt.frac = ((time_esterror - bt.sec) * 1000000) * 1041 (uint64_t)18446744073709ULL; 1042 clock_snap->fb_info.error = bt; 1043 1044 #ifdef FFCLOCK 1045 if (!fast) 1046 clock_snap->ffcount += delta; 1047 1048 /* Record feed-forward clock leap second adjustment. */ 1049 ffi->leapsec_adjustment = cest.leapsec_total; 1050 if (clock_snap->ffcount > cest.leapsec_next) 1051 ffi->leapsec_adjustment -= cest.leapsec; 1052 1053 /* Record feed-forward clock status and error. */ 1054 clock_snap->ff_info.status = cest.status; 1055 ffcount = clock_snap->ffcount - cest.update_ffcount; 1056 ffclock_convert_delta(ffcount, cest.period, &bt); 1057 /* 18446744073709 = int(2^64/1e12), err_bound_rate in [ps/s]. */ 1058 bintime_mul(&bt, cest.errb_rate * (uint64_t)18446744073709ULL); 1059 /* 18446744073 = int(2^64 / 1e9), since err_abs in [ns]. */ 1060 bintime_addx(&bt, cest.errb_abs * (uint64_t)18446744073ULL); 1061 clock_snap->ff_info.error = bt; 1062 #endif 1063 } 1064 1065 /* 1066 * Convert a sysclock snapshot into a struct bintime based on the specified 1067 * clock source and flags. 1068 */ 1069 int 1070 sysclock_snap2bintime(struct sysclock_snap *cs, struct bintime *bt, 1071 int whichclock, uint32_t flags) 1072 { 1073 #ifdef FFCLOCK 1074 struct bintime bt2; 1075 uint64_t period; 1076 #endif 1077 1078 switch (whichclock) { 1079 case SYSCLOCK_FBCK: 1080 *bt = cs->fb_info.tick_time; 1081 1082 /* If snapshot was created with !fast, delta will be >0. */ 1083 if (cs->delta > 0) 1084 bintime_addx(bt, cs->fb_info.th_scale * cs->delta); 1085 1086 if ((flags & FBCLOCK_UPTIME) == 0) 1087 bintime_add(bt, &boottimebin); 1088 break; 1089 #ifdef FFCLOCK 1090 case SYSCLOCK_FFWD: 1091 if (flags & FFCLOCK_LERP) { 1092 *bt = cs->ff_info.tick_time_lerp; 1093 period = cs->ff_info.period_lerp; 1094 } else { 1095 *bt = cs->ff_info.tick_time; 1096 period = cs->ff_info.period; 1097 } 1098 1099 /* If snapshot was created with !fast, delta will be >0. */ 1100 if (cs->delta > 0) { 1101 ffclock_convert_delta(cs->delta, period, &bt2); 1102 bintime_add(bt, &bt2); 1103 } 1104 1105 /* Leap second adjustment. */ 1106 if (flags & FFCLOCK_LEAPSEC) 1107 bt->sec -= cs->ff_info.leapsec_adjustment; 1108 1109 /* Boot time adjustment, for uptime/monotonic clocks. */ 1110 if (flags & FFCLOCK_UPTIME) 1111 bintime_sub(bt, &ffclock_boottime); 1112 break; 1113 #endif 1114 default: 1115 return (EINVAL); 1116 break; 1117 } 1118 1119 return (0); 1120 } 1121 1122 /* 1123 * Initialize a new timecounter and possibly use it. 1124 */ 1125 void 1126 tc_init(struct timecounter *tc) 1127 { 1128 u_int u; 1129 struct sysctl_oid *tc_root; 1130 1131 u = tc->tc_frequency / tc->tc_counter_mask; 1132 /* XXX: We need some margin here, 10% is a guess */ 1133 u *= 11; 1134 u /= 10; 1135 if (u > hz && tc->tc_quality >= 0) { 1136 tc->tc_quality = -2000; 1137 if (bootverbose) { 1138 printf("Timecounter \"%s\" frequency %ju Hz", 1139 tc->tc_name, (uintmax_t)tc->tc_frequency); 1140 printf(" -- Insufficient hz, needs at least %u\n", u); 1141 } 1142 } else if (tc->tc_quality >= 0 || bootverbose) { 1143 printf("Timecounter \"%s\" frequency %ju Hz quality %d\n", 1144 tc->tc_name, (uintmax_t)tc->tc_frequency, 1145 tc->tc_quality); 1146 } 1147 1148 tc->tc_next = timecounters; 1149 timecounters = tc; 1150 /* 1151 * Set up sysctl tree for this counter. 1152 */ 1153 tc_root = SYSCTL_ADD_NODE(NULL, 1154 SYSCTL_STATIC_CHILDREN(_kern_timecounter_tc), OID_AUTO, tc->tc_name, 1155 CTLFLAG_RW, 0, "timecounter description"); 1156 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO, 1157 "mask", CTLFLAG_RD, &(tc->tc_counter_mask), 0, 1158 "mask for implemented bits"); 1159 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO, 1160 "counter", CTLTYPE_UINT | CTLFLAG_RD, tc, sizeof(*tc), 1161 sysctl_kern_timecounter_get, "IU", "current timecounter value"); 1162 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO, 1163 "frequency", CTLTYPE_U64 | CTLFLAG_RD, tc, sizeof(*tc), 1164 sysctl_kern_timecounter_freq, "QU", "timecounter frequency"); 1165 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO, 1166 "quality", CTLFLAG_RD, &(tc->tc_quality), 0, 1167 "goodness of time counter"); 1168 /* 1169 * Never automatically use a timecounter with negative quality. 1170 * Even though we run on the dummy counter, switching here may be 1171 * worse since this timecounter may not be monotonous. 1172 */ 1173 if (tc->tc_quality < 0) 1174 return; 1175 if (tc->tc_quality < timecounter->tc_quality) 1176 return; 1177 if (tc->tc_quality == timecounter->tc_quality && 1178 tc->tc_frequency < timecounter->tc_frequency) 1179 return; 1180 (void)tc->tc_get_timecount(tc); 1181 (void)tc->tc_get_timecount(tc); 1182 timecounter = tc; 1183 } 1184 1185 /* Report the frequency of the current timecounter. */ 1186 uint64_t 1187 tc_getfrequency(void) 1188 { 1189 1190 return (timehands->th_counter->tc_frequency); 1191 } 1192 1193 /* 1194 * Step our concept of UTC. This is done by modifying our estimate of 1195 * when we booted. 1196 * XXX: not locked. 1197 */ 1198 void 1199 tc_setclock(struct timespec *ts) 1200 { 1201 struct timespec tbef, taft; 1202 struct bintime bt, bt2; 1203 1204 cpu_tick_calibrate(1); 1205 nanotime(&tbef); 1206 timespec2bintime(ts, &bt); 1207 binuptime(&bt2); 1208 bintime_sub(&bt, &bt2); 1209 bintime_add(&bt2, &boottimebin); 1210 boottimebin = bt; 1211 bintime2timeval(&bt, &boottime); 1212 1213 /* XXX fiddle all the little crinkly bits around the fiords... */ 1214 tc_windup(); 1215 nanotime(&taft); 1216 if (timestepwarnings) { 1217 log(LOG_INFO, 1218 "Time stepped from %jd.%09ld to %jd.%09ld (%jd.%09ld)\n", 1219 (intmax_t)tbef.tv_sec, tbef.tv_nsec, 1220 (intmax_t)taft.tv_sec, taft.tv_nsec, 1221 (intmax_t)ts->tv_sec, ts->tv_nsec); 1222 } 1223 cpu_tick_calibrate(1); 1224 } 1225 1226 /* 1227 * Initialize the next struct timehands in the ring and make 1228 * it the active timehands. Along the way we might switch to a different 1229 * timecounter and/or do seconds processing in NTP. Slightly magic. 1230 */ 1231 static void 1232 tc_windup(void) 1233 { 1234 struct bintime bt; 1235 struct timehands *th, *tho; 1236 uint64_t scale; 1237 u_int delta, ncount, ogen; 1238 int i; 1239 time_t t; 1240 1241 /* 1242 * Make the next timehands a copy of the current one, but do not 1243 * overwrite the generation or next pointer. While we update 1244 * the contents, the generation must be zero. 1245 */ 1246 tho = timehands; 1247 th = tho->th_next; 1248 ogen = th->th_generation; 1249 th->th_generation = 0; 1250 bcopy(tho, th, offsetof(struct timehands, th_generation)); 1251 1252 /* 1253 * Capture a timecounter delta on the current timecounter and if 1254 * changing timecounters, a counter value from the new timecounter. 1255 * Update the offset fields accordingly. 1256 */ 1257 delta = tc_delta(th); 1258 if (th->th_counter != timecounter) 1259 ncount = timecounter->tc_get_timecount(timecounter); 1260 else 1261 ncount = 0; 1262 #ifdef FFCLOCK 1263 ffclock_windup(delta); 1264 #endif 1265 th->th_offset_count += delta; 1266 th->th_offset_count &= th->th_counter->tc_counter_mask; 1267 while (delta > th->th_counter->tc_frequency) { 1268 /* Eat complete unadjusted seconds. */ 1269 delta -= th->th_counter->tc_frequency; 1270 th->th_offset.sec++; 1271 } 1272 if ((delta > th->th_counter->tc_frequency / 2) && 1273 (th->th_scale * delta < ((uint64_t)1 << 63))) { 1274 /* The product th_scale * delta just barely overflows. */ 1275 th->th_offset.sec++; 1276 } 1277 bintime_addx(&th->th_offset, th->th_scale * delta); 1278 1279 /* 1280 * Hardware latching timecounters may not generate interrupts on 1281 * PPS events, so instead we poll them. There is a finite risk that 1282 * the hardware might capture a count which is later than the one we 1283 * got above, and therefore possibly in the next NTP second which might 1284 * have a different rate than the current NTP second. It doesn't 1285 * matter in practice. 1286 */ 1287 if (tho->th_counter->tc_poll_pps) 1288 tho->th_counter->tc_poll_pps(tho->th_counter); 1289 1290 /* 1291 * Deal with NTP second processing. The for loop normally 1292 * iterates at most once, but in extreme situations it might 1293 * keep NTP sane if timeouts are not run for several seconds. 1294 * At boot, the time step can be large when the TOD hardware 1295 * has been read, so on really large steps, we call 1296 * ntp_update_second only twice. We need to call it twice in 1297 * case we missed a leap second. 1298 */ 1299 bt = th->th_offset; 1300 bintime_add(&bt, &boottimebin); 1301 i = bt.sec - tho->th_microtime.tv_sec; 1302 if (i > LARGE_STEP) 1303 i = 2; 1304 for (; i > 0; i--) { 1305 t = bt.sec; 1306 ntp_update_second(&th->th_adjustment, &bt.sec); 1307 if (bt.sec != t) 1308 boottimebin.sec += bt.sec - t; 1309 } 1310 /* Update the UTC timestamps used by the get*() functions. */ 1311 /* XXX shouldn't do this here. Should force non-`get' versions. */ 1312 bintime2timeval(&bt, &th->th_microtime); 1313 bintime2timespec(&bt, &th->th_nanotime); 1314 1315 /* Now is a good time to change timecounters. */ 1316 if (th->th_counter != timecounter) { 1317 #ifndef __arm__ 1318 if ((timecounter->tc_flags & TC_FLAGS_C3STOP) != 0) 1319 cpu_disable_deep_sleep++; 1320 if ((th->th_counter->tc_flags & TC_FLAGS_C3STOP) != 0) 1321 cpu_disable_deep_sleep--; 1322 #endif 1323 th->th_counter = timecounter; 1324 th->th_offset_count = ncount; 1325 tc_min_ticktock_freq = max(1, timecounter->tc_frequency / 1326 (((uint64_t)timecounter->tc_counter_mask + 1) / 3)); 1327 #ifdef FFCLOCK 1328 ffclock_change_tc(th); 1329 #endif 1330 } 1331 1332 /*- 1333 * Recalculate the scaling factor. We want the number of 1/2^64 1334 * fractions of a second per period of the hardware counter, taking 1335 * into account the th_adjustment factor which the NTP PLL/adjtime(2) 1336 * processing provides us with. 1337 * 1338 * The th_adjustment is nanoseconds per second with 32 bit binary 1339 * fraction and we want 64 bit binary fraction of second: 1340 * 1341 * x = a * 2^32 / 10^9 = a * 4.294967296 1342 * 1343 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int 1344 * we can only multiply by about 850 without overflowing, that 1345 * leaves no suitably precise fractions for multiply before divide. 1346 * 1347 * Divide before multiply with a fraction of 2199/512 results in a 1348 * systematic undercompensation of 10PPM of th_adjustment. On a 1349 * 5000PPM adjustment this is a 0.05PPM error. This is acceptable. 1350 * 1351 * We happily sacrifice the lowest of the 64 bits of our result 1352 * to the goddess of code clarity. 1353 * 1354 */ 1355 scale = (uint64_t)1 << 63; 1356 scale += (th->th_adjustment / 1024) * 2199; 1357 scale /= th->th_counter->tc_frequency; 1358 th->th_scale = scale * 2; 1359 1360 /* 1361 * Now that the struct timehands is again consistent, set the new 1362 * generation number, making sure to not make it zero. 1363 */ 1364 if (++ogen == 0) 1365 ogen = 1; 1366 th->th_generation = ogen; 1367 1368 /* Go live with the new struct timehands. */ 1369 #ifdef FFCLOCK 1370 switch (sysclock_active) { 1371 case SYSCLOCK_FBCK: 1372 #endif 1373 time_second = th->th_microtime.tv_sec; 1374 time_uptime = th->th_offset.sec; 1375 #ifdef FFCLOCK 1376 break; 1377 case SYSCLOCK_FFWD: 1378 time_second = fftimehands->tick_time_lerp.sec; 1379 time_uptime = fftimehands->tick_time_lerp.sec - ffclock_boottime.sec; 1380 break; 1381 } 1382 #endif 1383 1384 timehands = th; 1385 timekeep_push_vdso(); 1386 } 1387 1388 /* Report or change the active timecounter hardware. */ 1389 static int 1390 sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS) 1391 { 1392 char newname[32]; 1393 struct timecounter *newtc, *tc; 1394 int error; 1395 1396 tc = timecounter; 1397 strlcpy(newname, tc->tc_name, sizeof(newname)); 1398 1399 error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req); 1400 if (error != 0 || req->newptr == NULL || 1401 strcmp(newname, tc->tc_name) == 0) 1402 return (error); 1403 for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) { 1404 if (strcmp(newname, newtc->tc_name) != 0) 1405 continue; 1406 1407 /* Warm up new timecounter. */ 1408 (void)newtc->tc_get_timecount(newtc); 1409 (void)newtc->tc_get_timecount(newtc); 1410 1411 timecounter = newtc; 1412 timekeep_push_vdso(); 1413 return (0); 1414 } 1415 return (EINVAL); 1416 } 1417 1418 SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware, CTLTYPE_STRING | CTLFLAG_RW, 1419 0, 0, sysctl_kern_timecounter_hardware, "A", 1420 "Timecounter hardware selected"); 1421 1422 1423 /* Report or change the active timecounter hardware. */ 1424 static int 1425 sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS) 1426 { 1427 char buf[32], *spc; 1428 struct timecounter *tc; 1429 int error; 1430 1431 spc = ""; 1432 error = 0; 1433 for (tc = timecounters; error == 0 && tc != NULL; tc = tc->tc_next) { 1434 sprintf(buf, "%s%s(%d)", 1435 spc, tc->tc_name, tc->tc_quality); 1436 error = SYSCTL_OUT(req, buf, strlen(buf)); 1437 spc = " "; 1438 } 1439 return (error); 1440 } 1441 1442 SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice, CTLTYPE_STRING | CTLFLAG_RD, 1443 0, 0, sysctl_kern_timecounter_choice, "A", "Timecounter hardware detected"); 1444 1445 /* 1446 * RFC 2783 PPS-API implementation. 1447 */ 1448 1449 int 1450 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps) 1451 { 1452 pps_params_t *app; 1453 struct pps_fetch_args *fapi; 1454 #ifdef FFCLOCK 1455 struct pps_fetch_ffc_args *fapi_ffc; 1456 #endif 1457 #ifdef PPS_SYNC 1458 struct pps_kcbind_args *kapi; 1459 #endif 1460 1461 KASSERT(pps != NULL, ("NULL pps pointer in pps_ioctl")); 1462 switch (cmd) { 1463 case PPS_IOC_CREATE: 1464 return (0); 1465 case PPS_IOC_DESTROY: 1466 return (0); 1467 case PPS_IOC_SETPARAMS: 1468 app = (pps_params_t *)data; 1469 if (app->mode & ~pps->ppscap) 1470 return (EINVAL); 1471 #ifdef FFCLOCK 1472 /* Ensure only a single clock is selected for ffc timestamp. */ 1473 if ((app->mode & PPS_TSCLK_MASK) == PPS_TSCLK_MASK) 1474 return (EINVAL); 1475 #endif 1476 pps->ppsparam = *app; 1477 return (0); 1478 case PPS_IOC_GETPARAMS: 1479 app = (pps_params_t *)data; 1480 *app = pps->ppsparam; 1481 app->api_version = PPS_API_VERS_1; 1482 return (0); 1483 case PPS_IOC_GETCAP: 1484 *(int*)data = pps->ppscap; 1485 return (0); 1486 case PPS_IOC_FETCH: 1487 fapi = (struct pps_fetch_args *)data; 1488 if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC) 1489 return (EINVAL); 1490 if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) 1491 return (EOPNOTSUPP); 1492 pps->ppsinfo.current_mode = pps->ppsparam.mode; 1493 fapi->pps_info_buf = pps->ppsinfo; 1494 return (0); 1495 #ifdef FFCLOCK 1496 case PPS_IOC_FETCH_FFCOUNTER: 1497 fapi_ffc = (struct pps_fetch_ffc_args *)data; 1498 if (fapi_ffc->tsformat && fapi_ffc->tsformat != 1499 PPS_TSFMT_TSPEC) 1500 return (EINVAL); 1501 if (fapi_ffc->timeout.tv_sec || fapi_ffc->timeout.tv_nsec) 1502 return (EOPNOTSUPP); 1503 pps->ppsinfo_ffc.current_mode = pps->ppsparam.mode; 1504 fapi_ffc->pps_info_buf_ffc = pps->ppsinfo_ffc; 1505 /* Overwrite timestamps if feedback clock selected. */ 1506 switch (pps->ppsparam.mode & PPS_TSCLK_MASK) { 1507 case PPS_TSCLK_FBCK: 1508 fapi_ffc->pps_info_buf_ffc.assert_timestamp = 1509 pps->ppsinfo.assert_timestamp; 1510 fapi_ffc->pps_info_buf_ffc.clear_timestamp = 1511 pps->ppsinfo.clear_timestamp; 1512 break; 1513 case PPS_TSCLK_FFWD: 1514 break; 1515 default: 1516 break; 1517 } 1518 return (0); 1519 #endif /* FFCLOCK */ 1520 case PPS_IOC_KCBIND: 1521 #ifdef PPS_SYNC 1522 kapi = (struct pps_kcbind_args *)data; 1523 /* XXX Only root should be able to do this */ 1524 if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC) 1525 return (EINVAL); 1526 if (kapi->kernel_consumer != PPS_KC_HARDPPS) 1527 return (EINVAL); 1528 if (kapi->edge & ~pps->ppscap) 1529 return (EINVAL); 1530 pps->kcmode = kapi->edge; 1531 return (0); 1532 #else 1533 return (EOPNOTSUPP); 1534 #endif 1535 default: 1536 return (ENOIOCTL); 1537 } 1538 } 1539 1540 void 1541 pps_init(struct pps_state *pps) 1542 { 1543 pps->ppscap |= PPS_TSFMT_TSPEC; 1544 if (pps->ppscap & PPS_CAPTUREASSERT) 1545 pps->ppscap |= PPS_OFFSETASSERT; 1546 if (pps->ppscap & PPS_CAPTURECLEAR) 1547 pps->ppscap |= PPS_OFFSETCLEAR; 1548 #ifdef FFCLOCK 1549 pps->ppscap |= PPS_TSCLK_MASK; 1550 #endif 1551 } 1552 1553 void 1554 pps_capture(struct pps_state *pps) 1555 { 1556 struct timehands *th; 1557 1558 KASSERT(pps != NULL, ("NULL pps pointer in pps_capture")); 1559 th = timehands; 1560 pps->capgen = th->th_generation; 1561 pps->capth = th; 1562 #ifdef FFCLOCK 1563 pps->capffth = fftimehands; 1564 #endif 1565 pps->capcount = th->th_counter->tc_get_timecount(th->th_counter); 1566 if (pps->capgen != th->th_generation) 1567 pps->capgen = 0; 1568 } 1569 1570 void 1571 pps_event(struct pps_state *pps, int event) 1572 { 1573 struct bintime bt; 1574 struct timespec ts, *tsp, *osp; 1575 u_int tcount, *pcount; 1576 int foff, fhard; 1577 pps_seq_t *pseq; 1578 #ifdef FFCLOCK 1579 struct timespec *tsp_ffc; 1580 pps_seq_t *pseq_ffc; 1581 ffcounter *ffcount; 1582 #endif 1583 1584 KASSERT(pps != NULL, ("NULL pps pointer in pps_event")); 1585 /* If the timecounter was wound up underneath us, bail out. */ 1586 if (pps->capgen == 0 || pps->capgen != pps->capth->th_generation) 1587 return; 1588 1589 /* Things would be easier with arrays. */ 1590 if (event == PPS_CAPTUREASSERT) { 1591 tsp = &pps->ppsinfo.assert_timestamp; 1592 osp = &pps->ppsparam.assert_offset; 1593 foff = pps->ppsparam.mode & PPS_OFFSETASSERT; 1594 fhard = pps->kcmode & PPS_CAPTUREASSERT; 1595 pcount = &pps->ppscount[0]; 1596 pseq = &pps->ppsinfo.assert_sequence; 1597 #ifdef FFCLOCK 1598 ffcount = &pps->ppsinfo_ffc.assert_ffcount; 1599 tsp_ffc = &pps->ppsinfo_ffc.assert_timestamp; 1600 pseq_ffc = &pps->ppsinfo_ffc.assert_sequence; 1601 #endif 1602 } else { 1603 tsp = &pps->ppsinfo.clear_timestamp; 1604 osp = &pps->ppsparam.clear_offset; 1605 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR; 1606 fhard = pps->kcmode & PPS_CAPTURECLEAR; 1607 pcount = &pps->ppscount[1]; 1608 pseq = &pps->ppsinfo.clear_sequence; 1609 #ifdef FFCLOCK 1610 ffcount = &pps->ppsinfo_ffc.clear_ffcount; 1611 tsp_ffc = &pps->ppsinfo_ffc.clear_timestamp; 1612 pseq_ffc = &pps->ppsinfo_ffc.clear_sequence; 1613 #endif 1614 } 1615 1616 /* 1617 * If the timecounter changed, we cannot compare the count values, so 1618 * we have to drop the rest of the PPS-stuff until the next event. 1619 */ 1620 if (pps->ppstc != pps->capth->th_counter) { 1621 pps->ppstc = pps->capth->th_counter; 1622 *pcount = pps->capcount; 1623 pps->ppscount[2] = pps->capcount; 1624 return; 1625 } 1626 1627 /* Convert the count to a timespec. */ 1628 tcount = pps->capcount - pps->capth->th_offset_count; 1629 tcount &= pps->capth->th_counter->tc_counter_mask; 1630 bt = pps->capth->th_offset; 1631 bintime_addx(&bt, pps->capth->th_scale * tcount); 1632 bintime_add(&bt, &boottimebin); 1633 bintime2timespec(&bt, &ts); 1634 1635 /* If the timecounter was wound up underneath us, bail out. */ 1636 if (pps->capgen != pps->capth->th_generation) 1637 return; 1638 1639 *pcount = pps->capcount; 1640 (*pseq)++; 1641 *tsp = ts; 1642 1643 if (foff) { 1644 timespecadd(tsp, osp); 1645 if (tsp->tv_nsec < 0) { 1646 tsp->tv_nsec += 1000000000; 1647 tsp->tv_sec -= 1; 1648 } 1649 } 1650 1651 #ifdef FFCLOCK 1652 *ffcount = pps->capffth->tick_ffcount + tcount; 1653 bt = pps->capffth->tick_time; 1654 ffclock_convert_delta(tcount, pps->capffth->cest.period, &bt); 1655 bintime_add(&bt, &pps->capffth->tick_time); 1656 bintime2timespec(&bt, &ts); 1657 (*pseq_ffc)++; 1658 *tsp_ffc = ts; 1659 #endif 1660 1661 #ifdef PPS_SYNC 1662 if (fhard) { 1663 uint64_t scale; 1664 1665 /* 1666 * Feed the NTP PLL/FLL. 1667 * The FLL wants to know how many (hardware) nanoseconds 1668 * elapsed since the previous event. 1669 */ 1670 tcount = pps->capcount - pps->ppscount[2]; 1671 pps->ppscount[2] = pps->capcount; 1672 tcount &= pps->capth->th_counter->tc_counter_mask; 1673 scale = (uint64_t)1 << 63; 1674 scale /= pps->capth->th_counter->tc_frequency; 1675 scale *= 2; 1676 bt.sec = 0; 1677 bt.frac = 0; 1678 bintime_addx(&bt, scale * tcount); 1679 bintime2timespec(&bt, &ts); 1680 hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec); 1681 } 1682 #endif 1683 } 1684 1685 /* 1686 * Timecounters need to be updated every so often to prevent the hardware 1687 * counter from overflowing. Updating also recalculates the cached values 1688 * used by the get*() family of functions, so their precision depends on 1689 * the update frequency. 1690 */ 1691 1692 static int tc_tick; 1693 SYSCTL_INT(_kern_timecounter, OID_AUTO, tick, CTLFLAG_RD, &tc_tick, 0, 1694 "Approximate number of hardclock ticks in a millisecond"); 1695 1696 void 1697 tc_ticktock(int cnt) 1698 { 1699 static int count; 1700 1701 count += cnt; 1702 if (count < tc_tick) 1703 return; 1704 count = 0; 1705 tc_windup(); 1706 } 1707 1708 static void 1709 inittimecounter(void *dummy) 1710 { 1711 u_int p; 1712 1713 /* 1714 * Set the initial timeout to 1715 * max(1, <approx. number of hardclock ticks in a millisecond>). 1716 * People should probably not use the sysctl to set the timeout 1717 * to smaller than its inital value, since that value is the 1718 * smallest reasonable one. If they want better timestamps they 1719 * should use the non-"get"* functions. 1720 */ 1721 if (hz > 1000) 1722 tc_tick = (hz + 500) / 1000; 1723 else 1724 tc_tick = 1; 1725 p = (tc_tick * 1000000) / hz; 1726 printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000); 1727 1728 #ifdef FFCLOCK 1729 ffclock_init(); 1730 #endif 1731 /* warm up new timecounter (again) and get rolling. */ 1732 (void)timecounter->tc_get_timecount(timecounter); 1733 (void)timecounter->tc_get_timecount(timecounter); 1734 tc_windup(); 1735 } 1736 1737 SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL); 1738 1739 /* Cpu tick handling -------------------------------------------------*/ 1740 1741 static int cpu_tick_variable; 1742 static uint64_t cpu_tick_frequency; 1743 1744 static uint64_t 1745 tc_cpu_ticks(void) 1746 { 1747 static uint64_t base; 1748 static unsigned last; 1749 unsigned u; 1750 struct timecounter *tc; 1751 1752 tc = timehands->th_counter; 1753 u = tc->tc_get_timecount(tc) & tc->tc_counter_mask; 1754 if (u < last) 1755 base += (uint64_t)tc->tc_counter_mask + 1; 1756 last = u; 1757 return (u + base); 1758 } 1759 1760 void 1761 cpu_tick_calibration(void) 1762 { 1763 static time_t last_calib; 1764 1765 if (time_uptime != last_calib && !(time_uptime & 0xf)) { 1766 cpu_tick_calibrate(0); 1767 last_calib = time_uptime; 1768 } 1769 } 1770 1771 /* 1772 * This function gets called every 16 seconds on only one designated 1773 * CPU in the system from hardclock() via cpu_tick_calibration()(). 1774 * 1775 * Whenever the real time clock is stepped we get called with reset=1 1776 * to make sure we handle suspend/resume and similar events correctly. 1777 */ 1778 1779 static void 1780 cpu_tick_calibrate(int reset) 1781 { 1782 static uint64_t c_last; 1783 uint64_t c_this, c_delta; 1784 static struct bintime t_last; 1785 struct bintime t_this, t_delta; 1786 uint32_t divi; 1787 1788 if (reset) { 1789 /* The clock was stepped, abort & reset */ 1790 t_last.sec = 0; 1791 return; 1792 } 1793 1794 /* we don't calibrate fixed rate cputicks */ 1795 if (!cpu_tick_variable) 1796 return; 1797 1798 getbinuptime(&t_this); 1799 c_this = cpu_ticks(); 1800 if (t_last.sec != 0) { 1801 c_delta = c_this - c_last; 1802 t_delta = t_this; 1803 bintime_sub(&t_delta, &t_last); 1804 /* 1805 * Headroom: 1806 * 2^(64-20) / 16[s] = 1807 * 2^(44) / 16[s] = 1808 * 17.592.186.044.416 / 16 = 1809 * 1.099.511.627.776 [Hz] 1810 */ 1811 divi = t_delta.sec << 20; 1812 divi |= t_delta.frac >> (64 - 20); 1813 c_delta <<= 20; 1814 c_delta /= divi; 1815 if (c_delta > cpu_tick_frequency) { 1816 if (0 && bootverbose) 1817 printf("cpu_tick increased to %ju Hz\n", 1818 c_delta); 1819 cpu_tick_frequency = c_delta; 1820 } 1821 } 1822 c_last = c_this; 1823 t_last = t_this; 1824 } 1825 1826 void 1827 set_cputicker(cpu_tick_f *func, uint64_t freq, unsigned var) 1828 { 1829 1830 if (func == NULL) { 1831 cpu_ticks = tc_cpu_ticks; 1832 } else { 1833 cpu_tick_frequency = freq; 1834 cpu_tick_variable = var; 1835 cpu_ticks = func; 1836 } 1837 } 1838 1839 uint64_t 1840 cpu_tickrate(void) 1841 { 1842 1843 if (cpu_ticks == tc_cpu_ticks) 1844 return (tc_getfrequency()); 1845 return (cpu_tick_frequency); 1846 } 1847 1848 /* 1849 * We need to be slightly careful converting cputicks to microseconds. 1850 * There is plenty of margin in 64 bits of microseconds (half a million 1851 * years) and in 64 bits at 4 GHz (146 years), but if we do a multiply 1852 * before divide conversion (to retain precision) we find that the 1853 * margin shrinks to 1.5 hours (one millionth of 146y). 1854 * With a three prong approach we never lose significant bits, no 1855 * matter what the cputick rate and length of timeinterval is. 1856 */ 1857 1858 uint64_t 1859 cputick2usec(uint64_t tick) 1860 { 1861 1862 if (tick > 18446744073709551LL) /* floor(2^64 / 1000) */ 1863 return (tick / (cpu_tickrate() / 1000000LL)); 1864 else if (tick > 18446744073709LL) /* floor(2^64 / 1000000) */ 1865 return ((tick * 1000LL) / (cpu_tickrate() / 1000LL)); 1866 else 1867 return ((tick * 1000000LL) / cpu_tickrate()); 1868 } 1869 1870 cpu_tick_f *cpu_ticks = tc_cpu_ticks; 1871 1872 static int vdso_th_enable = 1; 1873 static int 1874 sysctl_fast_gettime(SYSCTL_HANDLER_ARGS) 1875 { 1876 int old_vdso_th_enable, error; 1877 1878 old_vdso_th_enable = vdso_th_enable; 1879 error = sysctl_handle_int(oidp, &old_vdso_th_enable, 0, req); 1880 if (error != 0) 1881 return (error); 1882 vdso_th_enable = old_vdso_th_enable; 1883 timekeep_push_vdso(); 1884 return (0); 1885 } 1886 SYSCTL_PROC(_kern_timecounter, OID_AUTO, fast_gettime, 1887 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1888 NULL, 0, sysctl_fast_gettime, "I", "Enable fast time of day"); 1889 1890 uint32_t 1891 tc_fill_vdso_timehands(struct vdso_timehands *vdso_th) 1892 { 1893 struct timehands *th; 1894 uint32_t enabled; 1895 1896 th = timehands; 1897 vdso_th->th_algo = VDSO_TH_ALGO_1; 1898 vdso_th->th_scale = th->th_scale; 1899 vdso_th->th_offset_count = th->th_offset_count; 1900 vdso_th->th_counter_mask = th->th_counter->tc_counter_mask; 1901 vdso_th->th_offset = th->th_offset; 1902 vdso_th->th_boottime = boottimebin; 1903 enabled = cpu_fill_vdso_timehands(vdso_th); 1904 if (!vdso_th_enable) 1905 enabled = 0; 1906 return (enabled); 1907 } 1908 1909 #ifdef COMPAT_FREEBSD32 1910 uint32_t 1911 tc_fill_vdso_timehands32(struct vdso_timehands32 *vdso_th32) 1912 { 1913 struct timehands *th; 1914 uint32_t enabled; 1915 1916 th = timehands; 1917 vdso_th32->th_algo = VDSO_TH_ALGO_1; 1918 *(uint64_t *)&vdso_th32->th_scale[0] = th->th_scale; 1919 vdso_th32->th_offset_count = th->th_offset_count; 1920 vdso_th32->th_counter_mask = th->th_counter->tc_counter_mask; 1921 vdso_th32->th_offset.sec = th->th_offset.sec; 1922 *(uint64_t *)&vdso_th32->th_offset.frac[0] = th->th_offset.frac; 1923 vdso_th32->th_boottime.sec = boottimebin.sec; 1924 *(uint64_t *)&vdso_th32->th_boottime.frac[0] = boottimebin.frac; 1925 enabled = cpu_fill_vdso_timehands32(vdso_th32); 1926 if (!vdso_th_enable) 1927 enabled = 0; 1928 return (enabled); 1929 } 1930 #endif 1931