1 #include <config.h> 2 3 #include <event2/util.h> 4 #include <event2/event.h> 5 6 #include "ntp_workimpl.h" 7 #ifdef WORK_THREAD 8 # include <event2/thread.h> 9 #endif 10 11 #include "main.h" 12 #include "ntp_libopts.h" 13 #include "kod_management.h" 14 #include "networking.h" 15 #include "utilities.h" 16 #include "log.h" 17 #include "libntp.h" 18 19 20 int shutting_down; 21 int time_derived; 22 int time_adjusted; 23 int n_pending_dns = 0; 24 int n_pending_ntp = 0; 25 int ai_fam_pref = AF_UNSPEC; 26 int ntpver = 4; 27 double steplimit = -1; 28 SOCKET sock4 = -1; /* Socket for IPv4 */ 29 SOCKET sock6 = -1; /* Socket for IPv6 */ 30 /* 31 ** BCAST *must* listen on port 123 (by default), so we can only 32 ** use the UCST sockets (above) if they too are using port 123 33 */ 34 SOCKET bsock4 = -1; /* Broadcast Socket for IPv4 */ 35 SOCKET bsock6 = -1; /* Broadcast Socket for IPv6 */ 36 struct event_base *base; 37 struct event *ev_sock4; 38 struct event *ev_sock6; 39 struct event *ev_worker_timeout; 40 struct event *ev_xmt_timer; 41 42 struct dns_ctx { 43 const char * name; 44 int flags; 45 #define CTX_BCST 0x0001 46 #define CTX_UCST 0x0002 47 #define CTX_xCST 0x0003 48 #define CTX_CONC 0x0004 49 #define CTX_unused 0xfffd 50 int key_id; 51 struct timeval timeout; 52 struct key * key; 53 }; 54 55 typedef struct sent_pkt_tag sent_pkt; 56 struct sent_pkt_tag { 57 sent_pkt * link; 58 struct dns_ctx * dctx; 59 sockaddr_u addr; 60 time_t stime; 61 int done; 62 struct pkt x_pkt; 63 }; 64 65 typedef struct xmt_ctx_tag xmt_ctx; 66 struct xmt_ctx_tag { 67 xmt_ctx * link; 68 SOCKET sock; 69 time_t sched; 70 sent_pkt * spkt; 71 }; 72 73 struct timeval gap; 74 xmt_ctx * xmt_q; 75 struct key * keys = NULL; 76 int response_timeout; 77 struct timeval response_tv; 78 struct timeval start_tv; 79 /* check the timeout at least once per second */ 80 struct timeval wakeup_tv = { 0, 888888 }; 81 82 sent_pkt * fam_listheads[2]; 83 #define v4_pkts_list (fam_listheads[0]) 84 #define v6_pkts_list (fam_listheads[1]) 85 86 static union { 87 struct pkt pkt; 88 char buf[LEN_PKT_NOMAC + NTP_MAXEXTEN + MAX_MAC_LEN]; 89 } rbuf; 90 91 #define r_pkt rbuf.pkt 92 93 #ifdef HAVE_DROPROOT 94 int droproot; /* intres imports these */ 95 int root_dropped; 96 #endif 97 u_long current_time; /* libntp/authkeys.c */ 98 99 void open_sockets(void); 100 void handle_lookup(const char *name, int flags); 101 void sntp_addremove_fd(int fd, int is_pipe, int remove_it); 102 void worker_timeout(evutil_socket_t, short, void *); 103 void worker_resp_cb(evutil_socket_t, short, void *); 104 void sntp_name_resolved(int, int, void *, const char *, const char *, 105 const struct addrinfo *, 106 const struct addrinfo *); 107 void queue_xmt(SOCKET sock, struct dns_ctx *dctx, sent_pkt *spkt, 108 u_int xmt_delay); 109 void xmt_timer_cb(evutil_socket_t, short, void *ptr); 110 void xmt(xmt_ctx *xctx); 111 int check_kod(const struct addrinfo *ai); 112 void timeout_query(sent_pkt *); 113 void timeout_queries(void); 114 void sock_cb(evutil_socket_t, short, void *); 115 void check_exit_conditions(void); 116 void sntp_libevent_log_cb(int, const char *); 117 void set_li_vn_mode(struct pkt *spkt, char leap, char version, char mode); 118 int set_time(double offset); 119 void dec_pending_ntp(const char *, sockaddr_u *); 120 int libevent_version_ok(void); 121 int gettimeofday_cached(struct event_base *b, struct timeval *tv); 122 123 124 /* 125 * The actual main function. 126 */ 127 int 128 sntp_main ( 129 int argc, 130 char **argv, 131 const char *sntpVersion 132 ) 133 { 134 int i; 135 int exitcode; 136 int optct; 137 struct event_config * evcfg; 138 139 /* Initialize logging system - sets up progname */ 140 sntp_init_logging(argv[0]); 141 142 if (!libevent_version_ok()) 143 exit(EX_SOFTWARE); 144 145 init_lib(); 146 init_auth(); 147 148 optct = ntpOptionProcess(&sntpOptions, argc, argv); 149 argc -= optct; 150 argv += optct; 151 152 153 debug = OPT_VALUE_SET_DEBUG_LEVEL; 154 155 TRACE(2, ("init_lib() done, %s%s\n", 156 (ipv4_works) 157 ? "ipv4_works " 158 : "", 159 (ipv6_works) 160 ? "ipv6_works " 161 : "")); 162 ntpver = OPT_VALUE_NTPVERSION; 163 steplimit = OPT_VALUE_STEPLIMIT / 1e3; 164 gap.tv_usec = max(0, OPT_VALUE_GAP * 1000); 165 gap.tv_usec = min(gap.tv_usec, 999999); 166 167 if (HAVE_OPT(LOGFILE)) 168 open_logfile(OPT_ARG(LOGFILE)); 169 170 msyslog(LOG_INFO, "%s", sntpVersion); 171 172 if (0 == argc && !HAVE_OPT(BROADCAST) && !HAVE_OPT(CONCURRENT)) { 173 printf("%s: Must supply at least one of -b hostname, -c hostname, or hostname.\n", 174 progname); 175 exit(EX_USAGE); 176 } 177 178 179 /* 180 ** Eventually, we probably want: 181 ** - separate bcst and ucst timeouts (why?) 182 ** - multiple --timeout values in the commandline 183 */ 184 185 response_timeout = OPT_VALUE_TIMEOUT; 186 response_tv.tv_sec = response_timeout; 187 response_tv.tv_usec = 0; 188 189 /* IPv6 available? */ 190 if (isc_net_probeipv6() != ISC_R_SUCCESS) { 191 ai_fam_pref = AF_INET; 192 TRACE(1, ("No ipv6 support available, forcing ipv4\n")); 193 } else { 194 /* Check for options -4 and -6 */ 195 if (HAVE_OPT(IPV4)) 196 ai_fam_pref = AF_INET; 197 else if (HAVE_OPT(IPV6)) 198 ai_fam_pref = AF_INET6; 199 } 200 201 /* TODO: Parse config file if declared */ 202 203 /* 204 ** Init the KOD system. 205 ** For embedded systems with no writable filesystem, 206 ** -K /dev/null can be used to disable KoD storage. 207 */ 208 kod_init_kod_db(OPT_ARG(KOD), FALSE); 209 210 // HMS: Should we use arg-defalt for this too? 211 if (HAVE_OPT(KEYFILE)) 212 auth_init(OPT_ARG(KEYFILE), &keys); 213 214 /* 215 ** Considering employing a variable that prevents functions of doing 216 ** anything until everything is initialized properly 217 ** 218 ** HMS: What exactly does the above mean? 219 */ 220 event_set_log_callback(&sntp_libevent_log_cb); 221 if (debug > 0) 222 event_enable_debug_mode(); 223 #ifdef WORK_THREAD 224 evthread_use_pthreads(); 225 /* we use libevent from main thread only, locks should be academic */ 226 if (debug > 0) 227 evthread_enable_lock_debuging(); 228 #endif 229 evcfg = event_config_new(); 230 if (NULL == evcfg) { 231 printf("%s: event_config_new() failed!\n", progname); 232 return -1; 233 } 234 #ifndef HAVE_SOCKETPAIR 235 event_config_require_features(evcfg, EV_FEATURE_FDS); 236 #endif 237 /* all libevent calls are from main thread */ 238 /* event_config_set_flag(evcfg, EVENT_BASE_FLAG_NOLOCK); */ 239 base = event_base_new_with_config(evcfg); 240 event_config_free(evcfg); 241 if (NULL == base) { 242 printf("%s: event_base_new() failed!\n", progname); 243 return -1; 244 } 245 246 /* wire into intres resolver */ 247 worker_per_query = TRUE; 248 addremove_io_fd = &sntp_addremove_fd; 249 250 open_sockets(); 251 252 if (HAVE_OPT(BROADCAST)) { 253 int cn = STACKCT_OPT( BROADCAST ); 254 const char ** cp = STACKLST_OPT( BROADCAST ); 255 256 while (cn-- > 0) { 257 handle_lookup(*cp, CTX_BCST); 258 cp++; 259 } 260 } 261 262 if (HAVE_OPT(CONCURRENT)) { 263 int cn = STACKCT_OPT( CONCURRENT ); 264 const char ** cp = STACKLST_OPT( CONCURRENT ); 265 266 while (cn-- > 0) { 267 handle_lookup(*cp, CTX_UCST | CTX_CONC); 268 cp++; 269 } 270 } 271 272 for (i = 0; i < argc; ++i) 273 handle_lookup(argv[i], CTX_UCST); 274 275 gettimeofday_cached(base, &start_tv); 276 event_base_dispatch(base); 277 event_base_free(base); 278 279 if (!time_adjusted && 280 (ENABLED_OPT(STEP) || ENABLED_OPT(SLEW))) 281 exitcode = 1; 282 else 283 exitcode = 0; 284 285 return exitcode; 286 } 287 288 289 /* 290 ** open sockets and make them non-blocking 291 */ 292 void 293 open_sockets( 294 void 295 ) 296 { 297 sockaddr_u name; 298 299 if (-1 == sock4) { 300 sock4 = socket(PF_INET, SOCK_DGRAM, 0); 301 if (-1 == sock4) { 302 /* error getting a socket */ 303 msyslog(LOG_ERR, "open_sockets: socket(PF_INET) failed: %m"); 304 exit(1); 305 } 306 /* Make it non-blocking */ 307 make_socket_nonblocking(sock4); 308 309 /* Let's try using a wildcard... */ 310 ZERO(name); 311 AF(&name) = AF_INET; 312 SET_ADDR4N(&name, INADDR_ANY); 313 SET_PORT(&name, (HAVE_OPT(USERESERVEDPORT) ? 123 : 0)); 314 315 if (-1 == bind(sock4, &name.sa, 316 SOCKLEN(&name))) { 317 msyslog(LOG_ERR, "open_sockets: bind(sock4) failed: %m"); 318 exit(1); 319 } 320 321 /* Register an NTP callback for recv/timeout */ 322 ev_sock4 = event_new(base, sock4, 323 EV_TIMEOUT | EV_READ | EV_PERSIST, 324 &sock_cb, NULL); 325 if (NULL == ev_sock4) { 326 msyslog(LOG_ERR, 327 "open_sockets: event_new(base, sock4) failed!"); 328 } else { 329 event_add(ev_sock4, &wakeup_tv); 330 } 331 } 332 333 /* We may not always have IPv6... */ 334 if (-1 == sock6 && ipv6_works) { 335 sock6 = socket(PF_INET6, SOCK_DGRAM, 0); 336 if (-1 == sock6 && ipv6_works) { 337 /* error getting a socket */ 338 msyslog(LOG_ERR, "open_sockets: socket(PF_INET6) failed: %m"); 339 exit(1); 340 } 341 /* Make it non-blocking */ 342 make_socket_nonblocking(sock6); 343 344 /* Let's try using a wildcard... */ 345 ZERO(name); 346 AF(&name) = AF_INET6; 347 SET_ADDR6N(&name, in6addr_any); 348 SET_PORT(&name, (HAVE_OPT(USERESERVEDPORT) ? 123 : 0)); 349 350 if (-1 == bind(sock6, &name.sa, 351 SOCKLEN(&name))) { 352 msyslog(LOG_ERR, "open_sockets: bind(sock6) failed: %m"); 353 exit(1); 354 } 355 /* Register an NTP callback for recv/timeout */ 356 ev_sock6 = event_new(base, sock6, 357 EV_TIMEOUT | EV_READ | EV_PERSIST, 358 &sock_cb, NULL); 359 if (NULL == ev_sock6) { 360 msyslog(LOG_ERR, 361 "open_sockets: event_new(base, sock6) failed!"); 362 } else { 363 event_add(ev_sock6, &wakeup_tv); 364 } 365 } 366 367 return; 368 } 369 370 371 /* 372 ** handle_lookup 373 */ 374 void 375 handle_lookup( 376 const char *name, 377 int flags 378 ) 379 { 380 struct addrinfo hints; /* Local copy is OK */ 381 struct dns_ctx *ctx; 382 long l; 383 char * name_copy; 384 size_t name_sz; 385 size_t octets; 386 387 TRACE(1, ("handle_lookup(%s,%#x)\n", name, flags)); 388 389 ZERO(hints); 390 hints.ai_family = ai_fam_pref; 391 hints.ai_flags = AI_CANONNAME | Z_AI_NUMERICSERV; 392 /* 393 ** Unless we specify a socktype, we'll get at least two 394 ** entries for each address: one for TCP and one for 395 ** UDP. That's not what we want. 396 */ 397 hints.ai_socktype = SOCK_DGRAM; 398 hints.ai_protocol = IPPROTO_UDP; 399 400 name_sz = 1 + strlen(name); 401 octets = sizeof(*ctx) + name_sz; // Space for a ctx and the name 402 ctx = emalloc_zero(octets); // ctx at ctx[0] 403 name_copy = (char *)(ctx + 1); // Put the name at ctx[1] 404 memcpy(name_copy, name, name_sz); // copy the name to ctx[1] 405 ctx->name = name_copy; // point to it... 406 ctx->flags = flags; 407 ctx->timeout = response_tv; 408 409 /* The following should arguably be passed in... */ 410 if (ENABLED_OPT(AUTHENTICATION) && 411 atoint(OPT_ARG(AUTHENTICATION), &l)) { 412 ctx->key_id = l; 413 get_key(ctx->key_id, &ctx->key); 414 } else { 415 ctx->key_id = -1; 416 ctx->key = NULL; 417 } 418 419 ++n_pending_dns; 420 getaddrinfo_sometime(name, "123", &hints, 0, 421 &sntp_name_resolved, ctx); 422 } 423 424 425 /* 426 ** DNS Callback: 427 ** - For each IP: 428 ** - - open a socket 429 ** - - increment n_pending_ntp 430 ** - - send a request if this is a Unicast callback 431 ** - - queue wait for response 432 ** - decrement n_pending_dns 433 */ 434 void 435 sntp_name_resolved( 436 int rescode, 437 int gai_errno, 438 void * context, 439 const char * name, 440 const char * service, 441 const struct addrinfo * hints, 442 const struct addrinfo * addr 443 ) 444 { 445 struct dns_ctx * dctx; 446 sent_pkt * spkt; 447 const struct addrinfo * ai; 448 SOCKET sock; 449 u_int xmt_delay_v4; 450 u_int xmt_delay_v6; 451 u_int xmt_delay; 452 size_t octets; 453 454 xmt_delay_v4 = 0; 455 xmt_delay_v6 = 0; 456 dctx = context; 457 if (rescode) { 458 #ifdef EAI_SYSTEM 459 if (EAI_SYSTEM == rescode) { 460 errno = gai_errno; 461 mfprintf(stderr, "%s lookup error %m\n", 462 dctx->name); 463 } else 464 #endif 465 fprintf(stderr, "%s lookup error %s\n", 466 dctx->name, gai_strerror(rescode)); 467 } else { 468 TRACE(3, ("%s [%s]\n", dctx->name, 469 (addr->ai_canonname != NULL) 470 ? addr->ai_canonname 471 : "")); 472 473 for (ai = addr; ai != NULL; ai = ai->ai_next) { 474 475 if (check_kod(ai)) 476 continue; 477 478 switch (ai->ai_family) { 479 480 case AF_INET: 481 sock = sock4; 482 xmt_delay = xmt_delay_v4; 483 xmt_delay_v4++; 484 break; 485 486 case AF_INET6: 487 if (!ipv6_works) 488 continue; 489 490 sock = sock6; 491 xmt_delay = xmt_delay_v6; 492 xmt_delay_v6++; 493 break; 494 495 default: 496 msyslog(LOG_ERR, "sntp_name_resolved: unexpected ai_family: %d", 497 ai->ai_family); 498 exit(1); 499 break; 500 } 501 502 /* 503 ** We're waiting for a response for either unicast 504 ** or broadcast, so... 505 */ 506 ++n_pending_ntp; 507 508 /* If this is for a unicast IP, queue a request */ 509 if (dctx->flags & CTX_UCST) { 510 spkt = emalloc_zero(sizeof(*spkt)); 511 spkt->dctx = dctx; 512 octets = min(ai->ai_addrlen, sizeof(spkt->addr)); 513 memcpy(&spkt->addr, ai->ai_addr, octets); 514 queue_xmt(sock, dctx, spkt, xmt_delay); 515 } 516 } 517 } 518 /* n_pending_dns really should be >0 here... */ 519 --n_pending_dns; 520 check_exit_conditions(); 521 } 522 523 524 /* 525 ** queue_xmt 526 */ 527 void 528 queue_xmt( 529 SOCKET sock, 530 struct dns_ctx * dctx, 531 sent_pkt * spkt, 532 u_int xmt_delay 533 ) 534 { 535 sockaddr_u * dest; 536 sent_pkt ** pkt_listp; 537 sent_pkt * match; 538 xmt_ctx * xctx; 539 struct timeval start_cb; 540 struct timeval delay; 541 542 dest = &spkt->addr; 543 if (IS_IPV6(dest)) 544 pkt_listp = &v6_pkts_list; 545 else 546 pkt_listp = &v4_pkts_list; 547 548 /* reject attempts to add address already listed */ 549 for (match = *pkt_listp; match != NULL; match = match->link) { 550 if (ADDR_PORT_EQ(&spkt->addr, &match->addr)) { 551 if (strcasecmp(spkt->dctx->name, 552 match->dctx->name)) 553 printf("%s %s duplicate address from %s ignored.\n", 554 sptoa(&match->addr), 555 match->dctx->name, 556 spkt->dctx->name); 557 else 558 printf("%s %s, duplicate address ignored.\n", 559 sptoa(&match->addr), 560 match->dctx->name); 561 dec_pending_ntp(spkt->dctx->name, &spkt->addr); 562 free(spkt); 563 return; 564 } 565 } 566 567 LINK_SLIST(*pkt_listp, spkt, link); 568 569 xctx = emalloc_zero(sizeof(*xctx)); 570 xctx->sock = sock; 571 xctx->spkt = spkt; 572 gettimeofday_cached(base, &start_cb); 573 xctx->sched = start_cb.tv_sec + (2 * xmt_delay); 574 575 LINK_SORT_SLIST(xmt_q, xctx, (xctx->sched < L_S_S_CUR()->sched), 576 link, xmt_ctx); 577 if (xmt_q == xctx) { 578 /* 579 * The new entry is the first scheduled. The timer is 580 * either not active or is set for the second xmt 581 * context in xmt_q. 582 */ 583 if (NULL == ev_xmt_timer) 584 ev_xmt_timer = event_new(base, INVALID_SOCKET, 585 EV_TIMEOUT, 586 &xmt_timer_cb, NULL); 587 if (NULL == ev_xmt_timer) { 588 msyslog(LOG_ERR, 589 "queue_xmt: event_new(base, -1, EV_TIMEOUT) failed!"); 590 exit(1); 591 } 592 ZERO(delay); 593 if (xctx->sched > start_cb.tv_sec) 594 delay.tv_sec = xctx->sched - start_cb.tv_sec; 595 event_add(ev_xmt_timer, &delay); 596 TRACE(2, ("queue_xmt: xmt timer for %u usec\n", 597 (u_int)delay.tv_usec)); 598 } 599 } 600 601 602 /* 603 ** xmt_timer_cb 604 */ 605 void 606 xmt_timer_cb( 607 evutil_socket_t fd, 608 short what, 609 void * ctx 610 ) 611 { 612 struct timeval start_cb; 613 struct timeval delay; 614 xmt_ctx * x; 615 616 UNUSED_ARG(fd); 617 UNUSED_ARG(ctx); 618 DEBUG_INSIST(EV_TIMEOUT == what); 619 620 if (NULL == xmt_q || shutting_down) 621 return; 622 gettimeofday_cached(base, &start_cb); 623 if (xmt_q->sched <= start_cb.tv_sec) { 624 UNLINK_HEAD_SLIST(x, xmt_q, link); 625 TRACE(2, ("xmt_timer_cb: at .%6.6u -> %s\n", 626 (u_int)start_cb.tv_usec, stoa(&x->spkt->addr))); 627 xmt(x); 628 free(x); 629 if (NULL == xmt_q) 630 return; 631 } 632 if (xmt_q->sched <= start_cb.tv_sec) { 633 event_add(ev_xmt_timer, &gap); 634 TRACE(2, ("xmt_timer_cb: at .%6.6u gap %6.6u\n", 635 (u_int)start_cb.tv_usec, 636 (u_int)gap.tv_usec)); 637 } else { 638 delay.tv_sec = xmt_q->sched - start_cb.tv_sec; 639 delay.tv_usec = 0; 640 event_add(ev_xmt_timer, &delay); 641 TRACE(2, ("xmt_timer_cb: at .%6.6u next %ld seconds\n", 642 (u_int)start_cb.tv_usec, 643 (long)delay.tv_sec)); 644 } 645 } 646 647 648 /* 649 ** xmt() 650 */ 651 void 652 xmt( 653 xmt_ctx * xctx 654 ) 655 { 656 SOCKET sock = xctx->sock; 657 struct dns_ctx *dctx = xctx->spkt->dctx; 658 sent_pkt * spkt = xctx->spkt; 659 sockaddr_u * dst = &spkt->addr; 660 struct timeval tv_xmt; 661 struct pkt x_pkt; 662 size_t pkt_len; 663 int sent; 664 665 if (0 != gettimeofday(&tv_xmt, NULL)) { 666 msyslog(LOG_ERR, 667 "xmt: gettimeofday() failed: %m"); 668 exit(1); 669 } 670 tv_xmt.tv_sec += JAN_1970; 671 672 pkt_len = generate_pkt(&x_pkt, &tv_xmt, dctx->key_id, 673 dctx->key); 674 675 sent = sendpkt(sock, dst, &x_pkt, pkt_len); 676 if (sent) { 677 /* Save the packet we sent... */ 678 memcpy(&spkt->x_pkt, &x_pkt, min(sizeof(spkt->x_pkt), 679 pkt_len)); 680 spkt->stime = tv_xmt.tv_sec - JAN_1970; 681 682 TRACE(2, ("xmt: %lx.%6.6u %s %s\n", (u_long)tv_xmt.tv_sec, 683 (u_int)tv_xmt.tv_usec, dctx->name, stoa(dst))); 684 } else { 685 dec_pending_ntp(dctx->name, dst); 686 } 687 688 return; 689 } 690 691 692 /* 693 * timeout_queries() -- give up on unrequited NTP queries 694 */ 695 void 696 timeout_queries(void) 697 { 698 struct timeval start_cb; 699 u_int idx; 700 sent_pkt * head; 701 sent_pkt * spkt; 702 sent_pkt * spkt_next; 703 long age; 704 int didsomething = 0; 705 706 TRACE(3, ("timeout_queries: called to check %u items\n", 707 (unsigned)COUNTOF(fam_listheads))); 708 709 gettimeofday_cached(base, &start_cb); 710 for (idx = 0; idx < COUNTOF(fam_listheads); idx++) { 711 head = fam_listheads[idx]; 712 for (spkt = head; spkt != NULL; spkt = spkt_next) { 713 char xcst; 714 715 didsomething = 1; 716 switch (spkt->dctx->flags & CTX_xCST) { 717 case CTX_BCST: 718 xcst = 'B'; 719 break; 720 721 case CTX_UCST: 722 xcst = 'U'; 723 break; 724 725 default: 726 INSIST(!"spkt->dctx->flags neither UCST nor BCST"); 727 break; 728 } 729 730 spkt_next = spkt->link; 731 if (0 == spkt->stime || spkt->done) 732 continue; 733 age = start_cb.tv_sec - spkt->stime; 734 TRACE(3, ("%s %s %cCST age %ld\n", 735 stoa(&spkt->addr), 736 spkt->dctx->name, xcst, age)); 737 if (age > response_timeout) 738 timeout_query(spkt); 739 } 740 } 741 // Do we care about didsomething? 742 TRACE(3, ("timeout_queries: didsomething is %d, age is %ld\n", 743 didsomething, (long) (start_cb.tv_sec - start_tv.tv_sec))); 744 if (start_cb.tv_sec - start_tv.tv_sec > response_timeout) { 745 TRACE(3, ("timeout_queries: bail!\n")); 746 event_base_loopexit(base, NULL); 747 shutting_down = TRUE; 748 } 749 } 750 751 752 void dec_pending_ntp( 753 const char * name, 754 sockaddr_u * server 755 ) 756 { 757 if (n_pending_ntp > 0) { 758 --n_pending_ntp; 759 check_exit_conditions(); 760 } else { 761 INSIST(0 == n_pending_ntp); 762 TRACE(1, ("n_pending_ntp was zero before decrement for %s\n", 763 hostnameaddr(name, server))); 764 } 765 } 766 767 768 void timeout_query( 769 sent_pkt * spkt 770 ) 771 { 772 sockaddr_u * server; 773 char xcst; 774 775 776 switch (spkt->dctx->flags & CTX_xCST) { 777 case CTX_BCST: 778 xcst = 'B'; 779 break; 780 781 case CTX_UCST: 782 xcst = 'U'; 783 break; 784 785 default: 786 INSIST(!"spkt->dctx->flags neither UCST nor BCST"); 787 break; 788 } 789 spkt->done = TRUE; 790 server = &spkt->addr; 791 msyslog(LOG_INFO, "%s no %cCST response after %d seconds", 792 hostnameaddr(spkt->dctx->name, server), xcst, 793 response_timeout); 794 dec_pending_ntp(spkt->dctx->name, server); 795 return; 796 } 797 798 799 /* 800 ** check_kod 801 */ 802 int 803 check_kod( 804 const struct addrinfo * ai 805 ) 806 { 807 char *hostname; 808 struct kod_entry *reason; 809 810 /* Is there a KoD on file for this address? */ 811 hostname = addrinfo_to_str(ai); 812 TRACE(2, ("check_kod: checking <%s>\n", hostname)); 813 if (search_entry(hostname, &reason)) { 814 printf("prior KoD for %s, skipping.\n", 815 hostname); 816 free(reason); 817 free(hostname); 818 819 return 1; 820 } 821 free(hostname); 822 823 return 0; 824 } 825 826 827 /* 828 ** Socket readable/timeout Callback: 829 ** Read in the packet 830 ** Unicast: 831 ** - close socket 832 ** - decrement n_pending_ntp 833 ** - If packet is good, set the time and "exit" 834 ** Broadcast: 835 ** - If packet is good, set the time and "exit" 836 */ 837 void 838 sock_cb( 839 evutil_socket_t fd, 840 short what, 841 void *ptr 842 ) 843 { 844 sockaddr_u sender; 845 sockaddr_u * psau; 846 sent_pkt ** p_pktlist; 847 sent_pkt * spkt; 848 int rpktl; 849 int rc; 850 851 INSIST(sock4 == fd || sock6 == fd); 852 853 TRACE(3, ("sock_cb: event on sock%s:%s%s%s%s\n", 854 (fd == sock6) 855 ? "6" 856 : "4", 857 (what & EV_TIMEOUT) ? " timeout" : "", 858 (what & EV_READ) ? " read" : "", 859 (what & EV_WRITE) ? " write" : "", 860 (what & EV_SIGNAL) ? " signal" : "")); 861 862 if (!(EV_READ & what)) { 863 if (EV_TIMEOUT & what) 864 timeout_queries(); 865 866 return; 867 } 868 869 /* Read in the packet */ 870 rpktl = recvdata(fd, &sender, &rbuf, sizeof(rbuf)); 871 if (rpktl < 0) { 872 msyslog(LOG_DEBUG, "recvfrom error %m"); 873 return; 874 } 875 876 if (sock6 == fd) 877 p_pktlist = &v6_pkts_list; 878 else 879 p_pktlist = &v4_pkts_list; 880 881 for (spkt = *p_pktlist; spkt != NULL; spkt = spkt->link) { 882 psau = &spkt->addr; 883 if (SOCK_EQ(&sender, psau)) 884 break; 885 } 886 if (NULL == spkt) { 887 msyslog(LOG_WARNING, 888 "Packet from unexpected source %s dropped", 889 sptoa(&sender)); 890 return; 891 } 892 893 TRACE(1, ("sock_cb: %s %s\n", spkt->dctx->name, 894 sptoa(&sender))); 895 896 rpktl = process_pkt(&r_pkt, &sender, rpktl, MODE_SERVER, 897 &spkt->x_pkt, "sock_cb"); 898 899 TRACE(2, ("sock_cb: process_pkt returned %d\n", rpktl)); 900 901 /* If this is a Unicast packet, one down ... */ 902 if (!spkt->done && (CTX_UCST & spkt->dctx->flags)) { 903 dec_pending_ntp(spkt->dctx->name, &spkt->addr); 904 spkt->done = TRUE; 905 } 906 907 908 /* If the packet is good, set the time and we're all done */ 909 rc = handle_pkt(rpktl, &r_pkt, &spkt->addr, spkt->dctx->name); 910 if (0 != rc) 911 TRACE(1, ("sock_cb: handle_pkt() returned %d\n", rc)); 912 check_exit_conditions(); 913 } 914 915 916 /* 917 * check_exit_conditions() 918 * 919 * If sntp has a reply, ask the event loop to stop after this round of 920 * callbacks, unless --wait was used. 921 */ 922 void 923 check_exit_conditions(void) 924 { 925 if ((0 == n_pending_ntp && 0 == n_pending_dns) || 926 (time_derived && !HAVE_OPT(WAIT))) { 927 event_base_loopexit(base, NULL); 928 shutting_down = TRUE; 929 } else { 930 TRACE(2, ("%d NTP and %d name queries pending\n", 931 n_pending_ntp, n_pending_dns)); 932 } 933 } 934 935 936 /* 937 * sntp_addremove_fd() is invoked by the intres blocking worker code 938 * to read from a pipe, or to stop same. 939 */ 940 void sntp_addremove_fd( 941 int fd, 942 int is_pipe, 943 int remove_it 944 ) 945 { 946 u_int idx; 947 blocking_child *c; 948 struct event * ev; 949 950 #ifdef HAVE_SOCKETPAIR 951 if (is_pipe) { 952 /* sntp only asks for EV_FEATURE_FDS without HAVE_SOCKETPAIR */ 953 msyslog(LOG_ERR, "fatal: pipes not supported on systems with socketpair()"); 954 exit(1); 955 } 956 #endif 957 958 c = NULL; 959 for (idx = 0; idx < blocking_children_alloc; idx++) { 960 c = blocking_children[idx]; 961 if (NULL == c) 962 continue; 963 if (fd == c->resp_read_pipe) 964 break; 965 } 966 if (idx == blocking_children_alloc) 967 return; 968 969 if (remove_it) { 970 ev = c->resp_read_ctx; 971 c->resp_read_ctx = NULL; 972 event_del(ev); 973 event_free(ev); 974 975 return; 976 } 977 978 ev = event_new(base, fd, EV_READ | EV_PERSIST, 979 &worker_resp_cb, c); 980 if (NULL == ev) { 981 msyslog(LOG_ERR, 982 "sntp_addremove_fd: event_new(base, fd) failed!"); 983 return; 984 } 985 c->resp_read_ctx = ev; 986 event_add(ev, NULL); 987 } 988 989 990 /* called by forked intres child to close open descriptors */ 991 #ifdef WORK_FORK 992 void 993 kill_asyncio( 994 int startfd 995 ) 996 { 997 if (INVALID_SOCKET != sock4) { 998 closesocket(sock4); 999 sock4 = INVALID_SOCKET; 1000 } 1001 if (INVALID_SOCKET != sock6) { 1002 closesocket(sock6); 1003 sock6 = INVALID_SOCKET; 1004 } 1005 if (INVALID_SOCKET != bsock4) { 1006 closesocket(sock4); 1007 sock4 = INVALID_SOCKET; 1008 } 1009 if (INVALID_SOCKET != bsock6) { 1010 closesocket(sock6); 1011 sock6 = INVALID_SOCKET; 1012 } 1013 } 1014 #endif 1015 1016 1017 /* 1018 * worker_resp_cb() is invoked when resp_read_pipe is readable. 1019 */ 1020 void 1021 worker_resp_cb( 1022 evutil_socket_t fd, 1023 short what, 1024 void * ctx /* blocking_child * */ 1025 ) 1026 { 1027 blocking_child * c; 1028 1029 DEBUG_INSIST(EV_READ & what); 1030 c = ctx; 1031 DEBUG_INSIST(fd == c->resp_read_pipe); 1032 process_blocking_resp(c); 1033 } 1034 1035 1036 /* 1037 * intres_timeout_req(s) is invoked in the parent to schedule an idle 1038 * timeout to fire in s seconds, if not reset earlier by a call to 1039 * intres_timeout_req(0), which clears any pending timeout. When the 1040 * timeout expires, worker_idle_timer_fired() is invoked (again, in the 1041 * parent). 1042 * 1043 * sntp and ntpd each provide implementations adapted to their timers. 1044 */ 1045 void 1046 intres_timeout_req( 1047 u_int seconds /* 0 cancels */ 1048 ) 1049 { 1050 struct timeval tv_to; 1051 1052 if (NULL == ev_worker_timeout) { 1053 ev_worker_timeout = event_new(base, -1, 1054 EV_TIMEOUT | EV_PERSIST, 1055 &worker_timeout, NULL); 1056 DEBUG_INSIST(NULL != ev_worker_timeout); 1057 } else { 1058 event_del(ev_worker_timeout); 1059 } 1060 if (0 == seconds) 1061 return; 1062 tv_to.tv_sec = seconds; 1063 tv_to.tv_usec = 0; 1064 event_add(ev_worker_timeout, &tv_to); 1065 } 1066 1067 1068 void 1069 worker_timeout( 1070 evutil_socket_t fd, 1071 short what, 1072 void * ctx 1073 ) 1074 { 1075 UNUSED_ARG(fd); 1076 UNUSED_ARG(ctx); 1077 1078 DEBUG_REQUIRE(EV_TIMEOUT & what); 1079 worker_idle_timer_fired(); 1080 } 1081 1082 1083 void 1084 sntp_libevent_log_cb( 1085 int severity, 1086 const char * msg 1087 ) 1088 { 1089 int level; 1090 1091 switch (severity) { 1092 1093 default: 1094 case _EVENT_LOG_DEBUG: 1095 level = LOG_DEBUG; 1096 break; 1097 1098 case _EVENT_LOG_MSG: 1099 level = LOG_NOTICE; 1100 break; 1101 1102 case _EVENT_LOG_WARN: 1103 level = LOG_WARNING; 1104 break; 1105 1106 case _EVENT_LOG_ERR: 1107 level = LOG_ERR; 1108 break; 1109 } 1110 1111 msyslog(level, "%s", msg); 1112 } 1113 1114 1115 int 1116 generate_pkt ( 1117 struct pkt *x_pkt, 1118 const struct timeval *tv_xmt, 1119 int key_id, 1120 struct key *pkt_key 1121 ) 1122 { 1123 l_fp xmt_fp; 1124 int pkt_len; 1125 int mac_size; 1126 1127 pkt_len = LEN_PKT_NOMAC; 1128 ZERO(*x_pkt); 1129 TVTOTS(tv_xmt, &xmt_fp); 1130 HTONL_FP(&xmt_fp, &x_pkt->xmt); 1131 x_pkt->stratum = STRATUM_TO_PKT(STRATUM_UNSPEC); 1132 x_pkt->ppoll = 8; 1133 /* FIXME! Modus broadcast + adr. check -> bdr. pkt */ 1134 set_li_vn_mode(x_pkt, LEAP_NOTINSYNC, ntpver, 3); 1135 if (pkt_key != NULL) { 1136 x_pkt->exten[0] = htonl(key_id); 1137 mac_size = 20; /* max room for MAC */ 1138 mac_size = make_mac(x_pkt, pkt_len, mac_size, 1139 pkt_key, (char *)&x_pkt->exten[1]); 1140 if (mac_size > 0) 1141 pkt_len += mac_size + 4; 1142 } 1143 return pkt_len; 1144 } 1145 1146 1147 int 1148 handle_pkt( 1149 int rpktl, 1150 struct pkt * rpkt, 1151 sockaddr_u * host, 1152 const char * hostname 1153 ) 1154 { 1155 char disptxt[32]; 1156 const char * addrtxt; 1157 struct timeval tv_dst; 1158 int cnt; 1159 int sw_case; 1160 int digits; 1161 int stratum; 1162 char * ref; 1163 char * ts_str; 1164 const char * leaptxt; 1165 double offset; 1166 double precision; 1167 double synch_distance; 1168 char * p_SNTP_PRETEND_TIME; 1169 time_t pretend_time; 1170 #if SIZEOF_TIME_T == 8 1171 long long ll; 1172 #else 1173 long l; 1174 #endif 1175 1176 ts_str = NULL; 1177 1178 if (rpktl > 0) 1179 sw_case = 1; 1180 else 1181 sw_case = rpktl; 1182 1183 switch (sw_case) { 1184 1185 case SERVER_UNUSEABLE: 1186 return -1; 1187 break; 1188 1189 case PACKET_UNUSEABLE: 1190 break; 1191 1192 case SERVER_AUTH_FAIL: 1193 break; 1194 1195 case KOD_DEMOBILIZE: 1196 /* Received a DENY or RESTR KOD packet */ 1197 addrtxt = stoa(host); 1198 ref = (char *)&rpkt->refid; 1199 add_entry(addrtxt, ref); 1200 msyslog(LOG_WARNING, "KOD code %c%c%c%c from %s %s", 1201 ref[0], ref[1], ref[2], ref[3], addrtxt, hostname); 1202 break; 1203 1204 case KOD_RATE: 1205 /* 1206 ** Hmm... 1207 ** We should probably call add_entry() with an 1208 ** expiration timestamp of several seconds in the future, 1209 ** and back-off even more if we get more RATE responses. 1210 */ 1211 break; 1212 1213 case 1: 1214 TRACE(3, ("handle_pkt: %d bytes from %s %s\n", 1215 rpktl, stoa(host), hostname)); 1216 1217 gettimeofday_cached(base, &tv_dst); 1218 1219 p_SNTP_PRETEND_TIME = getenv("SNTP_PRETEND_TIME"); 1220 if (p_SNTP_PRETEND_TIME) { 1221 pretend_time = 0; 1222 #if SIZEOF_TIME_T == 4 1223 if (1 == sscanf(p_SNTP_PRETEND_TIME, "%ld", &l)) 1224 pretend_time = (time_t)l; 1225 #elif SIZEOF_TIME_T == 8 1226 if (1 == sscanf(p_SNTP_PRETEND_TIME, "%lld", &ll)) 1227 pretend_time = (time_t)ll; 1228 #else 1229 # include "GRONK: unexpected value for SIZEOF_TIME_T" 1230 #endif 1231 if (0 != pretend_time) 1232 tv_dst.tv_sec = pretend_time; 1233 } 1234 1235 offset_calculation(rpkt, rpktl, &tv_dst, &offset, 1236 &precision, &synch_distance); 1237 time_derived = TRUE; 1238 1239 for (digits = 0; (precision *= 10.) < 1.; ++digits) 1240 /* empty */ ; 1241 if (digits > 6) 1242 digits = 6; 1243 1244 ts_str = tv_to_str(&tv_dst); 1245 stratum = rpkt->stratum; 1246 if (0 == stratum) 1247 stratum = 16; 1248 1249 if (synch_distance > 0.) { 1250 cnt = snprintf(disptxt, sizeof(disptxt), 1251 " +/- %f", synch_distance); 1252 if ((size_t)cnt >= sizeof(disptxt)) 1253 snprintf(disptxt, sizeof(disptxt), 1254 "ERROR %d >= %d", cnt, 1255 (int)sizeof(disptxt)); 1256 } else { 1257 disptxt[0] = '\0'; 1258 } 1259 1260 switch (PKT_LEAP(rpkt->li_vn_mode)) { 1261 case LEAP_NOWARNING: 1262 leaptxt = "no-leap"; 1263 break; 1264 case LEAP_ADDSECOND: 1265 leaptxt = "add-leap"; 1266 break; 1267 case LEAP_DELSECOND: 1268 leaptxt = "del-leap"; 1269 break; 1270 case LEAP_NOTINSYNC: 1271 leaptxt = "unsync"; 1272 break; 1273 default: 1274 leaptxt = "LEAP-ERROR"; 1275 break; 1276 } 1277 1278 msyslog(LOG_INFO, "%s %+.*f%s %s s%d %s%s", ts_str, 1279 digits, offset, disptxt, 1280 hostnameaddr(hostname, host), stratum, 1281 leaptxt, 1282 (time_adjusted) 1283 ? " [excess]" 1284 : ""); 1285 free(ts_str); 1286 1287 if (p_SNTP_PRETEND_TIME) 1288 return 0; 1289 1290 if (!time_adjusted && 1291 (ENABLED_OPT(STEP) || ENABLED_OPT(SLEW))) 1292 return set_time(offset); 1293 1294 return EX_OK; 1295 } 1296 1297 return 1; 1298 } 1299 1300 1301 void 1302 offset_calculation( 1303 struct pkt *rpkt, 1304 int rpktl, 1305 struct timeval *tv_dst, 1306 double *offset, 1307 double *precision, 1308 double *synch_distance 1309 ) 1310 { 1311 l_fp p_rec, p_xmt, p_ref, p_org, tmp, dst; 1312 u_fp p_rdly, p_rdsp; 1313 double t21, t34, delta; 1314 1315 /* Convert timestamps from network to host byte order */ 1316 p_rdly = NTOHS_FP(rpkt->rootdelay); 1317 p_rdsp = NTOHS_FP(rpkt->rootdisp); 1318 NTOHL_FP(&rpkt->reftime, &p_ref); 1319 NTOHL_FP(&rpkt->org, &p_org); 1320 NTOHL_FP(&rpkt->rec, &p_rec); 1321 NTOHL_FP(&rpkt->xmt, &p_xmt); 1322 1323 *precision = LOGTOD(rpkt->precision); 1324 1325 TRACE(3, ("offset_calculation: LOGTOD(rpkt->precision): %f\n", *precision)); 1326 1327 /* Compute offset etc. */ 1328 tmp = p_rec; 1329 L_SUB(&tmp, &p_org); 1330 LFPTOD(&tmp, t21); 1331 TVTOTS(tv_dst, &dst); 1332 dst.l_ui += JAN_1970; 1333 tmp = p_xmt; 1334 L_SUB(&tmp, &dst); 1335 LFPTOD(&tmp, t34); 1336 *offset = (t21 + t34) / 2.; 1337 delta = t21 - t34; 1338 1339 // synch_distance is: 1340 // (peer->delay + peer->rootdelay) / 2 + peer->disp 1341 // + peer->rootdisp + clock_phi * (current_time - peer->update) 1342 // + peer->jitter; 1343 // 1344 // and peer->delay = fabs(peer->offset - p_offset) * 2; 1345 // and peer->offset needs history, so we're left with 1346 // p_offset = (t21 + t34) / 2.; 1347 // peer->disp = 0; (we have no history to augment this) 1348 // clock_phi = 15e-6; 1349 // peer->jitter = LOGTOD(sys_precision); (we have no history to augment this) 1350 // and ntp_proto.c:set_sys_tick_precision() should get us sys_precision. 1351 // 1352 // so our answer seems to be: 1353 // 1354 // (fabs(t21 + t34) + peer->rootdelay) / 3. 1355 // + 0 (peer->disp) 1356 // + peer->rootdisp 1357 // + 15e-6 (clock_phi) 1358 // + LOGTOD(sys_precision) 1359 1360 INSIST( FPTOD(p_rdly) >= 0. ); 1361 #if 1 1362 *synch_distance = (fabs(t21 + t34) + FPTOD(p_rdly)) / 3. 1363 + 0. 1364 + FPTOD(p_rdsp) 1365 + 15e-6 1366 + 0. /* LOGTOD(sys_precision) when we can get it */ 1367 ; 1368 INSIST( *synch_distance >= 0. ); 1369 #else 1370 *synch_distance = (FPTOD(p_rdly) + FPTOD(p_rdsp))/2.0; 1371 #endif 1372 1373 #ifdef DEBUG 1374 if (debug > 3) { 1375 printf("sntp rootdelay: %f\n", FPTOD(p_rdly)); 1376 printf("sntp rootdisp: %f\n", FPTOD(p_rdsp)); 1377 printf("sntp syncdist: %f\n", *synch_distance); 1378 1379 pkt_output(rpkt, rpktl, stdout); 1380 1381 printf("sntp offset_calculation: rpkt->reftime:\n"); 1382 l_fp_output(&p_ref, stdout); 1383 printf("sntp offset_calculation: rpkt->org:\n"); 1384 l_fp_output(&p_org, stdout); 1385 printf("sntp offset_calculation: rpkt->rec:\n"); 1386 l_fp_output(&p_rec, stdout); 1387 printf("sntp offset_calculation: rpkt->xmt:\n"); 1388 l_fp_output(&p_xmt, stdout); 1389 } 1390 #endif 1391 1392 TRACE(3, ("sntp offset_calculation:\trec - org t21: %.6f\n" 1393 "\txmt - dst t34: %.6f\tdelta: %.6f\toffset: %.6f\n", 1394 t21, t34, delta, *offset)); 1395 1396 return; 1397 } 1398 1399 1400 1401 /* Compute the 8 bits for li_vn_mode */ 1402 void 1403 set_li_vn_mode ( 1404 struct pkt *spkt, 1405 char leap, 1406 char version, 1407 char mode 1408 ) 1409 { 1410 if (leap > 3) { 1411 msyslog(LOG_DEBUG, "set_li_vn_mode: leap > 3, using max. 3"); 1412 leap = 3; 1413 } 1414 1415 if ((unsigned char)version > 7) { 1416 msyslog(LOG_DEBUG, "set_li_vn_mode: version < 0 or > 7, using 4"); 1417 version = 4; 1418 } 1419 1420 if (mode > 7) { 1421 msyslog(LOG_DEBUG, "set_li_vn_mode: mode > 7, using client mode 3"); 1422 mode = 3; 1423 } 1424 1425 spkt->li_vn_mode = leap << 6; 1426 spkt->li_vn_mode |= version << 3; 1427 spkt->li_vn_mode |= mode; 1428 } 1429 1430 1431 /* 1432 ** set_time applies 'offset' to the local clock. 1433 */ 1434 int 1435 set_time( 1436 double offset 1437 ) 1438 { 1439 int rc; 1440 1441 if (time_adjusted) 1442 return EX_OK; 1443 1444 /* 1445 ** If we can step but we cannot slew, then step. 1446 ** If we can step or slew and and |offset| > steplimit, then step. 1447 */ 1448 if (ENABLED_OPT(STEP) && 1449 ( !ENABLED_OPT(SLEW) 1450 || (ENABLED_OPT(SLEW) && (fabs(offset) > steplimit)) 1451 )) { 1452 rc = step_systime(offset); 1453 1454 /* If there was a problem, can we rely on errno? */ 1455 if (1 == rc) 1456 time_adjusted = TRUE; 1457 return (time_adjusted) 1458 ? EX_OK 1459 : 1; 1460 /* 1461 ** In case of error, what should we use? 1462 ** EX_UNAVAILABLE? 1463 ** EX_OSERR? 1464 ** EX_NOPERM? 1465 */ 1466 } 1467 1468 if (ENABLED_OPT(SLEW)) { 1469 rc = adj_systime(offset); 1470 1471 /* If there was a problem, can we rely on errno? */ 1472 if (1 == rc) 1473 time_adjusted = TRUE; 1474 return (time_adjusted) 1475 ? EX_OK 1476 : 1; 1477 /* 1478 ** In case of error, what should we use? 1479 ** EX_UNAVAILABLE? 1480 ** EX_OSERR? 1481 ** EX_NOPERM? 1482 */ 1483 } 1484 1485 return EX_SOFTWARE; 1486 } 1487 1488 1489 int 1490 libevent_version_ok(void) 1491 { 1492 ev_uint32_t v_compile_maj; 1493 ev_uint32_t v_run_maj; 1494 1495 v_compile_maj = LIBEVENT_VERSION_NUMBER & 0xffff0000; 1496 v_run_maj = event_get_version_number() & 0xffff0000; 1497 if (v_compile_maj != v_run_maj) { 1498 fprintf(stderr, 1499 "Incompatible libevent versions: have %s, built with %s\n", 1500 event_get_version(), 1501 LIBEVENT_VERSION); 1502 return 0; 1503 } 1504 return 1; 1505 } 1506 1507 /* 1508 * gettimeofday_cached() 1509 * 1510 * Clones the event_base_gettimeofday_cached() interface but ensures the 1511 * times are always on the gettimeofday() 1970 scale. Older libevent 2 1512 * sometimes used gettimeofday(), sometimes the since-system-start 1513 * clock_gettime(CLOCK_MONOTONIC), depending on the platform. 1514 * 1515 * It is not cleanly possible to tell which timescale older libevent is 1516 * using. 1517 * 1518 * The strategy involves 1 hour thresholds chosen to be far longer than 1519 * the duration of a round of libevent callbacks, which share a cached 1520 * start-of-round time. First compare the last cached time with the 1521 * current gettimeofday() time. If they are within one hour, libevent 1522 * is using the proper timescale so leave the offset 0. Otherwise, 1523 * compare libevent's cached time and the current time on the monotonic 1524 * scale. If they are within an hour, libevent is using the monotonic 1525 * scale so calculate the offset to add to such times to bring them to 1526 * gettimeofday()'s scale. 1527 */ 1528 int 1529 gettimeofday_cached( 1530 struct event_base * b, 1531 struct timeval * caller_tv 1532 ) 1533 { 1534 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) 1535 static struct event_base * cached_b; 1536 static struct timeval cached; 1537 static struct timeval adj_cached; 1538 static struct timeval offset; 1539 static int offset_ready; 1540 struct timeval latest; 1541 struct timeval systemt; 1542 struct timespec ts; 1543 struct timeval mono; 1544 struct timeval diff; 1545 int cgt_rc; 1546 int gtod_rc; 1547 1548 event_base_gettimeofday_cached(b, &latest); 1549 if (b == cached_b && 1550 !memcmp(&latest, &cached, sizeof(latest))) { 1551 *caller_tv = adj_cached; 1552 return 0; 1553 } 1554 cached = latest; 1555 cached_b = b; 1556 if (!offset_ready) { 1557 cgt_rc = clock_gettime(CLOCK_MONOTONIC, &ts); 1558 gtod_rc = gettimeofday(&systemt, NULL); 1559 if (0 != gtod_rc) { 1560 msyslog(LOG_ERR, 1561 "%s: gettimeofday() error %m", 1562 progname); 1563 exit(1); 1564 } 1565 diff = sub_tval(systemt, latest); 1566 if (debug > 1) 1567 printf("system minus cached %+ld.%06ld\n", 1568 (long)diff.tv_sec, (long)diff.tv_usec); 1569 if (0 != cgt_rc || labs((long)diff.tv_sec) < 3600) { 1570 /* 1571 * Either use_monotonic == 0, or this libevent 1572 * has been repaired. Leave offset at zero. 1573 */ 1574 } else { 1575 mono.tv_sec = ts.tv_sec; 1576 mono.tv_usec = ts.tv_nsec / 1000; 1577 diff = sub_tval(latest, mono); 1578 if (debug > 1) 1579 printf("cached minus monotonic %+ld.%06ld\n", 1580 (long)diff.tv_sec, (long)diff.tv_usec); 1581 if (labs((long)diff.tv_sec) < 3600) { 1582 /* older libevent2 using monotonic */ 1583 offset = sub_tval(systemt, mono); 1584 TRACE(1, ("%s: Offsetting libevent CLOCK_MONOTONIC times by %+ld.%06ld\n", 1585 "gettimeofday_cached", 1586 (long)offset.tv_sec, 1587 (long)offset.tv_usec)); 1588 } 1589 } 1590 offset_ready = TRUE; 1591 } 1592 adj_cached = add_tval(cached, offset); 1593 *caller_tv = adj_cached; 1594 1595 return 0; 1596 #else 1597 return event_base_gettimeofday_cached(b, caller_tv); 1598 #endif 1599 } 1600 1601