1 /* 2 * ---------------------------------------------------------------------------- 3 * "THE BEER-WARE LICENSE" (Revision 42): 4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 5 * can do whatever you want with this stuff. If we meet some day, and you think 6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 7 * ---------------------------------------------------------------------------- 8 * 9 * $FreeBSD$ 10 */ 11 12 #include "opt_ntp.h" 13 14 #include <sys/param.h> 15 #include <sys/timetc.h> 16 #include <sys/malloc.h> 17 #include <sys/kernel.h> 18 #include <sys/sysctl.h> 19 #include <sys/systm.h> 20 #include <sys/timex.h> 21 #include <sys/timepps.h> 22 23 /* 24 * Number of timecounters used to implement stable storage 25 */ 26 #ifndef NTIMECOUNTER 27 #define NTIMECOUNTER 45 28 #endif 29 30 static MALLOC_DEFINE(M_TIMECOUNTER, "timecounter", 31 "Timecounter stable storage"); 32 33 static void tco_setscales __P((struct timecounter *tc)); 34 static __inline unsigned tco_delta __P((struct timecounter *tc)); 35 36 time_t time_second; 37 38 struct timeval boottime; 39 SYSCTL_STRUCT(_kern, KERN_BOOTTIME, boottime, CTLFLAG_RD, 40 &boottime, timeval, "System boottime"); 41 42 SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, ""); 43 44 static unsigned nmicrotime; 45 static unsigned nnanotime; 46 static unsigned ngetmicrotime; 47 static unsigned ngetnanotime; 48 static unsigned nmicrouptime; 49 static unsigned nnanouptime; 50 static unsigned ngetmicrouptime; 51 static unsigned ngetnanouptime; 52 SYSCTL_INT(_kern_timecounter, OID_AUTO, nmicrotime, CTLFLAG_RD, &nmicrotime, 0, ""); 53 SYSCTL_INT(_kern_timecounter, OID_AUTO, nnanotime, CTLFLAG_RD, &nnanotime, 0, ""); 54 SYSCTL_INT(_kern_timecounter, OID_AUTO, nmicrouptime, CTLFLAG_RD, &nmicrouptime, 0, ""); 55 SYSCTL_INT(_kern_timecounter, OID_AUTO, nnanouptime, CTLFLAG_RD, &nnanouptime, 0, ""); 56 SYSCTL_INT(_kern_timecounter, OID_AUTO, ngetmicrotime, CTLFLAG_RD, &ngetmicrotime, 0, ""); 57 SYSCTL_INT(_kern_timecounter, OID_AUTO, ngetnanotime, CTLFLAG_RD, &ngetnanotime, 0, ""); 58 SYSCTL_INT(_kern_timecounter, OID_AUTO, ngetmicrouptime, CTLFLAG_RD, &ngetmicrouptime, 0, ""); 59 SYSCTL_INT(_kern_timecounter, OID_AUTO, ngetnanouptime, CTLFLAG_RD, &ngetnanouptime, 0, ""); 60 61 /* 62 * Implement a dummy timecounter which we can use until we get a real one 63 * in the air. This allows the console and other early stuff to use 64 * timeservices. 65 */ 66 67 static unsigned 68 dummy_get_timecount(struct timecounter *tc) 69 { 70 static unsigned now; 71 72 return (++now); 73 } 74 75 static struct timecounter dummy_timecounter = { 76 dummy_get_timecount, 77 0, 78 ~0u, 79 1000000, 80 "dummy" 81 }; 82 83 struct timecounter *timecounter = &dummy_timecounter; 84 85 static __inline unsigned 86 tco_delta(struct timecounter *tc) 87 { 88 89 return ((tc->tc_get_timecount(tc) - tc->tc_offset_count) & 90 tc->tc_counter_mask); 91 } 92 93 /* 94 * We have eight functions for looking at the clock, four for 95 * microseconds and four for nanoseconds. For each there is fast 96 * but less precise version "get{nano|micro}[up]time" which will 97 * return a time which is up to 1/HZ previous to the call, whereas 98 * the raw version "{nano|micro}[up]time" will return a timestamp 99 * which is as precise as possible. The "up" variants return the 100 * time relative to system boot, these are well suited for time 101 * interval measurements. 102 */ 103 104 void 105 getmicrotime(struct timeval *tvp) 106 { 107 struct timecounter *tc; 108 109 ngetmicrotime++; 110 tc = timecounter; 111 *tvp = tc->tc_microtime; 112 } 113 114 void 115 getnanotime(struct timespec *tsp) 116 { 117 struct timecounter *tc; 118 119 ngetnanotime++; 120 tc = timecounter; 121 *tsp = tc->tc_nanotime; 122 } 123 124 void 125 microtime(struct timeval *tv) 126 { 127 struct timecounter *tc; 128 129 nmicrotime++; 130 tc = timecounter; 131 tv->tv_sec = tc->tc_offset_sec; 132 tv->tv_usec = tc->tc_offset_micro; 133 tv->tv_usec += ((u_int64_t)tco_delta(tc) * tc->tc_scale_micro) >> 32; 134 tv->tv_usec += boottime.tv_usec; 135 tv->tv_sec += boottime.tv_sec; 136 while (tv->tv_usec >= 1000000) { 137 tv->tv_usec -= 1000000; 138 tv->tv_sec++; 139 } 140 } 141 142 void 143 nanotime(struct timespec *ts) 144 { 145 unsigned count; 146 u_int64_t delta; 147 struct timecounter *tc; 148 149 nnanotime++; 150 tc = timecounter; 151 ts->tv_sec = tc->tc_offset_sec; 152 count = tco_delta(tc); 153 delta = tc->tc_offset_nano; 154 delta += ((u_int64_t)count * tc->tc_scale_nano_f); 155 delta >>= 32; 156 delta += ((u_int64_t)count * tc->tc_scale_nano_i); 157 delta += boottime.tv_usec * 1000; 158 ts->tv_sec += boottime.tv_sec; 159 while (delta >= 1000000000) { 160 delta -= 1000000000; 161 ts->tv_sec++; 162 } 163 ts->tv_nsec = delta; 164 } 165 166 void 167 getmicrouptime(struct timeval *tvp) 168 { 169 struct timecounter *tc; 170 171 ngetmicrouptime++; 172 tc = timecounter; 173 tvp->tv_sec = tc->tc_offset_sec; 174 tvp->tv_usec = tc->tc_offset_micro; 175 } 176 177 void 178 getnanouptime(struct timespec *tsp) 179 { 180 struct timecounter *tc; 181 182 ngetnanouptime++; 183 tc = timecounter; 184 tsp->tv_sec = tc->tc_offset_sec; 185 tsp->tv_nsec = tc->tc_offset_nano >> 32; 186 } 187 188 void 189 microuptime(struct timeval *tv) 190 { 191 struct timecounter *tc; 192 193 nmicrouptime++; 194 tc = timecounter; 195 tv->tv_sec = tc->tc_offset_sec; 196 tv->tv_usec = tc->tc_offset_micro; 197 tv->tv_usec += ((u_int64_t)tco_delta(tc) * tc->tc_scale_micro) >> 32; 198 if (tv->tv_usec >= 1000000) { 199 tv->tv_usec -= 1000000; 200 tv->tv_sec++; 201 } 202 } 203 204 void 205 nanouptime(struct timespec *ts) 206 { 207 unsigned count; 208 u_int64_t delta; 209 struct timecounter *tc; 210 211 nnanouptime++; 212 tc = timecounter; 213 ts->tv_sec = tc->tc_offset_sec; 214 count = tco_delta(tc); 215 delta = tc->tc_offset_nano; 216 delta += ((u_int64_t)count * tc->tc_scale_nano_f); 217 delta >>= 32; 218 delta += ((u_int64_t)count * tc->tc_scale_nano_i); 219 if (delta >= 1000000000) { 220 delta -= 1000000000; 221 ts->tv_sec++; 222 } 223 ts->tv_nsec = delta; 224 } 225 226 static void 227 tco_setscales(struct timecounter *tc) 228 { 229 u_int64_t scale; 230 231 scale = 1000000000LL << 32; 232 scale += tc->tc_adjustment; 233 scale /= tc->tc_tweak->tc_frequency; 234 tc->tc_scale_micro = scale / 1000; 235 tc->tc_scale_nano_f = scale & 0xffffffff; 236 tc->tc_scale_nano_i = scale >> 32; 237 } 238 239 void 240 tc_update(struct timecounter *tc) 241 { 242 tco_setscales(tc); 243 } 244 245 void 246 tc_init(struct timecounter *tc) 247 { 248 struct timespec ts1; 249 struct timecounter *t1, *t2, *t3; 250 int i; 251 252 tc->tc_adjustment = 0; 253 tc->tc_tweak = tc; 254 tco_setscales(tc); 255 tc->tc_offset_count = tc->tc_get_timecount(tc); 256 if (timecounter == &dummy_timecounter) 257 tc->tc_avail = tc; 258 else { 259 tc->tc_avail = timecounter->tc_tweak->tc_avail; 260 timecounter->tc_tweak->tc_avail = tc; 261 } 262 MALLOC(t1, struct timecounter *, sizeof *t1, M_TIMECOUNTER, M_WAITOK); 263 tc->tc_other = t1; 264 *t1 = *tc; 265 t2 = t1; 266 for (i = 1; i < NTIMECOUNTER; i++) { 267 MALLOC(t3, struct timecounter *, sizeof *t3, 268 M_TIMECOUNTER, M_WAITOK); 269 *t3 = *tc; 270 t3->tc_other = t2; 271 t2 = t3; 272 } 273 t1->tc_other = t3; 274 tc = t1; 275 276 printf("Timecounter \"%s\" frequency %lu Hz\n", 277 tc->tc_name, (u_long)tc->tc_frequency); 278 279 /* XXX: For now always start using the counter. */ 280 tc->tc_offset_count = tc->tc_get_timecount(tc); 281 nanouptime(&ts1); 282 tc->tc_offset_nano = (u_int64_t)ts1.tv_nsec << 32; 283 tc->tc_offset_micro = ts1.tv_nsec / 1000; 284 tc->tc_offset_sec = ts1.tv_sec; 285 timecounter = tc; 286 } 287 288 void 289 tc_setclock(struct timespec *ts) 290 { 291 struct timespec ts2; 292 293 nanouptime(&ts2); 294 boottime.tv_sec = ts->tv_sec - ts2.tv_sec; 295 boottime.tv_usec = (ts->tv_nsec - ts2.tv_nsec) / 1000; 296 if (boottime.tv_usec < 0) { 297 boottime.tv_usec += 1000000; 298 boottime.tv_sec--; 299 } 300 /* fiddle all the little crinkly bits around the fiords... */ 301 tc_windup(); 302 } 303 304 static void 305 switch_timecounter(struct timecounter *newtc) 306 { 307 int s; 308 struct timecounter *tc; 309 struct timespec ts; 310 311 s = splclock(); 312 tc = timecounter; 313 if (newtc->tc_tweak == tc->tc_tweak) { 314 splx(s); 315 return; 316 } 317 newtc = newtc->tc_tweak->tc_other; 318 nanouptime(&ts); 319 newtc->tc_offset_sec = ts.tv_sec; 320 newtc->tc_offset_nano = (u_int64_t)ts.tv_nsec << 32; 321 newtc->tc_offset_micro = ts.tv_nsec / 1000; 322 newtc->tc_offset_count = newtc->tc_get_timecount(newtc); 323 tco_setscales(newtc); 324 timecounter = newtc; 325 splx(s); 326 } 327 328 static struct timecounter * 329 sync_other_counter(void) 330 { 331 struct timecounter *tc, *tcn, *tco; 332 unsigned delta; 333 334 tco = timecounter; 335 tc = tco->tc_other; 336 tcn = tc->tc_other; 337 *tc = *tco; 338 tc->tc_other = tcn; 339 delta = tco_delta(tc); 340 tc->tc_offset_count += delta; 341 tc->tc_offset_count &= tc->tc_counter_mask; 342 tc->tc_offset_nano += (u_int64_t)delta * tc->tc_scale_nano_f; 343 tc->tc_offset_nano += (u_int64_t)delta * tc->tc_scale_nano_i << 32; 344 return (tc); 345 } 346 347 void 348 tc_windup(void) 349 { 350 struct timecounter *tc, *tco; 351 struct timeval tvt; 352 353 tco = timecounter; 354 tc = sync_other_counter(); 355 /* 356 * We may be inducing a tiny error here, the tc_poll_pps() may 357 * process a latched count which happens after the tco_delta() 358 * in sync_other_counter(), which would extend the previous 359 * counters parameters into the domain of this new one. 360 * Since the timewindow is very small for this, the error is 361 * going to be only a few weenieseconds (as Dave Mills would 362 * say), so lets just not talk more about it, OK ? 363 */ 364 if (tco->tc_poll_pps) 365 tco->tc_poll_pps(tco); 366 if (timedelta != 0) { 367 tvt = boottime; 368 tvt.tv_usec += tickdelta; 369 if (tvt.tv_usec >= 1000000) { 370 tvt.tv_sec++; 371 tvt.tv_usec -= 1000000; 372 } else if (tvt.tv_usec < 0) { 373 tvt.tv_sec--; 374 tvt.tv_usec += 1000000; 375 } 376 boottime = tvt; 377 timedelta -= tickdelta; 378 } 379 380 while (tc->tc_offset_nano >= 1000000000ULL << 32) { 381 tc->tc_offset_nano -= 1000000000ULL << 32; 382 tc->tc_offset_sec++; 383 ntp_update_second(tc); /* XXX only needed if xntpd runs */ 384 tco_setscales(tc); 385 } 386 387 tc->tc_offset_micro = (tc->tc_offset_nano / 1000) >> 32; 388 389 /* Figure out the wall-clock time */ 390 tc->tc_nanotime.tv_sec = tc->tc_offset_sec + boottime.tv_sec; 391 tc->tc_nanotime.tv_nsec = 392 (tc->tc_offset_nano >> 32) + boottime.tv_usec * 1000; 393 tc->tc_microtime.tv_usec = tc->tc_offset_micro + boottime.tv_usec; 394 if (tc->tc_nanotime.tv_nsec >= 1000000000) { 395 tc->tc_nanotime.tv_nsec -= 1000000000; 396 tc->tc_microtime.tv_usec -= 1000000; 397 tc->tc_nanotime.tv_sec++; 398 } 399 time_second = tc->tc_microtime.tv_sec = tc->tc_nanotime.tv_sec; 400 401 timecounter = tc; 402 } 403 404 static int 405 sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS) 406 { 407 char newname[32]; 408 struct timecounter *newtc, *tc; 409 int error; 410 411 tc = timecounter->tc_tweak; 412 strncpy(newname, tc->tc_name, sizeof(newname)); 413 error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req); 414 if (error == 0 && req->newptr != NULL && 415 strcmp(newname, tc->tc_name) != 0) { 416 for (newtc = tc->tc_avail; newtc != tc; 417 newtc = newtc->tc_avail) { 418 if (strcmp(newname, newtc->tc_name) == 0) { 419 /* Warm up new timecounter. */ 420 (void)newtc->tc_get_timecount(newtc); 421 422 switch_timecounter(newtc); 423 return (0); 424 } 425 } 426 return (EINVAL); 427 } 428 return (error); 429 } 430 431 SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware, CTLTYPE_STRING | CTLFLAG_RW, 432 0, 0, sysctl_kern_timecounter_hardware, "A", ""); 433 434 435 int 436 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps) 437 { 438 pps_params_t *app; 439 struct pps_fetch_args *fapi; 440 #ifdef PPS_SYNC 441 struct pps_kcbind_args *kapi; 442 #endif 443 444 switch (cmd) { 445 case PPS_IOC_CREATE: 446 return (0); 447 case PPS_IOC_DESTROY: 448 return (0); 449 case PPS_IOC_SETPARAMS: 450 app = (pps_params_t *)data; 451 if (app->mode & ~pps->ppscap) 452 return (EINVAL); 453 pps->ppsparam = *app; 454 return (0); 455 case PPS_IOC_GETPARAMS: 456 app = (pps_params_t *)data; 457 *app = pps->ppsparam; 458 app->api_version = PPS_API_VERS_1; 459 return (0); 460 case PPS_IOC_GETCAP: 461 *(int*)data = pps->ppscap; 462 return (0); 463 case PPS_IOC_FETCH: 464 fapi = (struct pps_fetch_args *)data; 465 if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC) 466 return (EINVAL); 467 if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) 468 return (EOPNOTSUPP); 469 pps->ppsinfo.current_mode = pps->ppsparam.mode; 470 fapi->pps_info_buf = pps->ppsinfo; 471 return (0); 472 case PPS_IOC_KCBIND: 473 #ifdef PPS_SYNC 474 kapi = (struct pps_kcbind_args *)data; 475 /* XXX Only root should be able to do this */ 476 if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC) 477 return (EINVAL); 478 if (kapi->kernel_consumer != PPS_KC_HARDPPS) 479 return (EINVAL); 480 if (kapi->edge & ~pps->ppscap) 481 return (EINVAL); 482 pps->kcmode = kapi->edge; 483 return (0); 484 #else 485 return (EOPNOTSUPP); 486 #endif 487 default: 488 return (ENOTTY); 489 } 490 } 491 492 void 493 pps_init(struct pps_state *pps) 494 { 495 pps->ppscap |= PPS_TSFMT_TSPEC; 496 if (pps->ppscap & PPS_CAPTUREASSERT) 497 pps->ppscap |= PPS_OFFSETASSERT; 498 if (pps->ppscap & PPS_CAPTURECLEAR) 499 pps->ppscap |= PPS_OFFSETCLEAR; 500 } 501 502 void 503 pps_event(struct pps_state *pps, struct timecounter *tc, unsigned count, int event) 504 { 505 struct timespec ts, *tsp, *osp; 506 u_int64_t delta; 507 unsigned tcount, *pcount; 508 int foff, fhard; 509 pps_seq_t *pseq; 510 511 /* Things would be easier with arrays... */ 512 if (event == PPS_CAPTUREASSERT) { 513 tsp = &pps->ppsinfo.assert_timestamp; 514 osp = &pps->ppsparam.assert_offset; 515 foff = pps->ppsparam.mode & PPS_OFFSETASSERT; 516 fhard = pps->kcmode & PPS_CAPTUREASSERT; 517 pcount = &pps->ppscount[0]; 518 pseq = &pps->ppsinfo.assert_sequence; 519 } else { 520 tsp = &pps->ppsinfo.clear_timestamp; 521 osp = &pps->ppsparam.clear_offset; 522 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR; 523 fhard = pps->kcmode & PPS_CAPTURECLEAR; 524 pcount = &pps->ppscount[1]; 525 pseq = &pps->ppsinfo.clear_sequence; 526 } 527 528 /* The timecounter changed: bail */ 529 if (!pps->ppstc || 530 pps->ppstc->tc_name != tc->tc_name || 531 tc->tc_name != timecounter->tc_name) { 532 pps->ppstc = tc; 533 *pcount = count; 534 return; 535 } 536 537 /* Nothing really happened */ 538 if (*pcount == count) 539 return; 540 541 *pcount = count; 542 543 /* Convert the count to timespec */ 544 ts.tv_sec = tc->tc_offset_sec; 545 tcount = count - tc->tc_offset_count; 546 tcount &= tc->tc_counter_mask; 547 delta = tc->tc_offset_nano; 548 delta += ((u_int64_t)tcount * tc->tc_scale_nano_f); 549 delta >>= 32; 550 delta += ((u_int64_t)tcount * tc->tc_scale_nano_i); 551 delta += boottime.tv_usec * 1000; 552 ts.tv_sec += boottime.tv_sec; 553 while (delta >= 1000000000) { 554 delta -= 1000000000; 555 ts.tv_sec++; 556 } 557 ts.tv_nsec = delta; 558 559 (*pseq)++; 560 *tsp = ts; 561 562 if (foff) { 563 timespecadd(tsp, osp); 564 if (tsp->tv_nsec < 0) { 565 tsp->tv_nsec += 1000000000; 566 tsp->tv_sec -= 1; 567 } 568 } 569 #ifdef PPS_SYNC 570 if (fhard) { 571 /* magic, at its best... */ 572 tcount = count - pps->ppscount[2]; 573 pps->ppscount[2] = count; 574 tcount &= tc->tc_counter_mask; 575 delta = ((u_int64_t)tcount * tc->tc_tweak->tc_scale_nano_f); 576 delta >>= 32; 577 delta += ((u_int64_t)tcount * tc->tc_tweak->tc_scale_nano_i); 578 hardpps(tsp, delta); 579 } 580 #endif 581 } 582