1 #include <config.h> 2 3 #include <event2/util.h> 4 #include <event2/event.h> 5 6 #include "ntp_workimpl.h" 7 #ifdef WORK_THREAD 8 # include <event2/thread.h> 9 #endif 10 11 #ifdef HAVE_SYSEXITS_H 12 # include <sysexits.h> 13 #endif 14 15 #include "main.h" 16 #include "ntp_libopts.h" 17 #include "kod_management.h" 18 #include "networking.h" 19 #include "utilities.h" 20 #include "log.h" 21 #include "libntp.h" 22 23 extern const char *progname; 24 25 int shutting_down; 26 int time_derived; 27 int time_adjusted; 28 int n_pending_dns = 0; 29 int n_pending_ntp = 0; 30 int ai_fam_pref = AF_UNSPEC; 31 int ntpver = 4; 32 double steplimit = -1; 33 SOCKET sock4 = -1; /* Socket for IPv4 */ 34 SOCKET sock6 = -1; /* Socket for IPv6 */ 35 /* 36 ** BCAST *must* listen on port 123 (by default), so we can only 37 ** use the UCST sockets (above) if they too are using port 123 38 */ 39 SOCKET bsock4 = -1; /* Broadcast Socket for IPv4 */ 40 SOCKET bsock6 = -1; /* Broadcast Socket for IPv6 */ 41 struct event_base *base; 42 struct event *ev_sock4; 43 struct event *ev_sock6; 44 struct event *ev_worker_timeout; 45 struct event *ev_xmt_timer; 46 47 struct dns_ctx { 48 const char * name; 49 int flags; 50 #define CTX_BCST 0x0001 51 #define CTX_UCST 0x0002 52 #define CTX_xCST 0x0003 53 #define CTX_CONC 0x0004 54 #define CTX_unused 0xfffd 55 int key_id; 56 struct timeval timeout; 57 struct key * key; 58 }; 59 60 typedef struct sent_pkt_tag sent_pkt; 61 struct sent_pkt_tag { 62 sent_pkt * link; 63 struct dns_ctx * dctx; 64 sockaddr_u addr; 65 time_t stime; 66 int done; 67 struct pkt x_pkt; 68 }; 69 70 typedef struct xmt_ctx_tag xmt_ctx; 71 struct xmt_ctx_tag { 72 xmt_ctx * link; 73 SOCKET sock; 74 time_t sched; 75 sent_pkt * spkt; 76 }; 77 78 struct timeval gap; 79 xmt_ctx * xmt_q; 80 struct key * keys = NULL; 81 int response_timeout; 82 struct timeval response_tv; 83 struct timeval start_tv; 84 /* check the timeout at least once per second */ 85 struct timeval wakeup_tv = { 0, 888888 }; 86 87 sent_pkt * fam_listheads[2]; 88 #define v4_pkts_list (fam_listheads[0]) 89 #define v6_pkts_list (fam_listheads[1]) 90 91 static union { 92 struct pkt pkt; 93 char buf[LEN_PKT_NOMAC + NTP_MAXEXTEN + MAX_MAC_LEN]; 94 } rbuf; 95 96 #define r_pkt rbuf.pkt 97 98 #ifdef HAVE_DROPROOT 99 int droproot; /* intres imports these */ 100 int root_dropped; 101 #endif 102 u_long current_time; /* libntp/authkeys.c */ 103 104 void open_sockets(void); 105 void handle_lookup(const char *name, int flags); 106 void sntp_addremove_fd(int fd, int is_pipe, int remove_it); 107 void worker_timeout(evutil_socket_t, short, void *); 108 void worker_resp_cb(evutil_socket_t, short, void *); 109 void sntp_name_resolved(int, int, void *, const char *, const char *, 110 const struct addrinfo *, 111 const struct addrinfo *); 112 void queue_xmt(SOCKET sock, struct dns_ctx *dctx, sent_pkt *spkt, 113 u_int xmt_delay); 114 void xmt_timer_cb(evutil_socket_t, short, void *ptr); 115 void xmt(xmt_ctx *xctx); 116 int check_kod(const struct addrinfo *ai); 117 void timeout_query(sent_pkt *); 118 void timeout_queries(void); 119 void sock_cb(evutil_socket_t, short, void *); 120 void check_exit_conditions(void); 121 void sntp_libevent_log_cb(int, const char *); 122 void set_li_vn_mode(struct pkt *spkt, char leap, char version, char mode); 123 int set_time(double offset); 124 void dec_pending_ntp(const char *, sockaddr_u *); 125 int libevent_version_ok(void); 126 int gettimeofday_cached(struct event_base *b, struct timeval *tv); 127 128 129 /* 130 * The actual main function. 131 */ 132 int 133 sntp_main ( 134 int argc, 135 char **argv, 136 const char *sntpVersion 137 ) 138 { 139 int i; 140 int exitcode; 141 int optct; 142 struct event_config * evcfg; 143 144 /* Initialize logging system - sets up progname */ 145 sntp_init_logging(argv[0]); 146 147 if (!libevent_version_ok()) 148 exit(EX_SOFTWARE); 149 150 init_lib(); 151 init_auth(); 152 153 optct = ntpOptionProcess(&sntpOptions, argc, argv); 154 argc -= optct; 155 argv += optct; 156 157 158 debug = OPT_VALUE_SET_DEBUG_LEVEL; 159 160 TRACE(2, ("init_lib() done, %s%s\n", 161 (ipv4_works) 162 ? "ipv4_works " 163 : "", 164 (ipv6_works) 165 ? "ipv6_works " 166 : "")); 167 ntpver = OPT_VALUE_NTPVERSION; 168 steplimit = OPT_VALUE_STEPLIMIT / 1e3; 169 gap.tv_usec = max(0, OPT_VALUE_GAP * 1000); 170 gap.tv_usec = min(gap.tv_usec, 999999); 171 172 if (HAVE_OPT(LOGFILE)) 173 open_logfile(OPT_ARG(LOGFILE)); 174 175 msyslog(LOG_INFO, "%s", sntpVersion); 176 177 if (0 == argc && !HAVE_OPT(BROADCAST) && !HAVE_OPT(CONCURRENT)) { 178 printf("%s: Must supply at least one of -b hostname, -c hostname, or hostname.\n", 179 progname); 180 exit(EX_USAGE); 181 } 182 183 184 /* 185 ** Eventually, we probably want: 186 ** - separate bcst and ucst timeouts (why?) 187 ** - multiple --timeout values in the commandline 188 */ 189 190 response_timeout = OPT_VALUE_TIMEOUT; 191 response_tv.tv_sec = response_timeout; 192 response_tv.tv_usec = 0; 193 194 /* IPv6 available? */ 195 if (isc_net_probeipv6() != ISC_R_SUCCESS) { 196 ai_fam_pref = AF_INET; 197 TRACE(1, ("No ipv6 support available, forcing ipv4\n")); 198 } else { 199 /* Check for options -4 and -6 */ 200 if (HAVE_OPT(IPV4)) 201 ai_fam_pref = AF_INET; 202 else if (HAVE_OPT(IPV6)) 203 ai_fam_pref = AF_INET6; 204 } 205 206 /* TODO: Parse config file if declared */ 207 208 /* 209 ** Init the KOD system. 210 ** For embedded systems with no writable filesystem, 211 ** -K /dev/null can be used to disable KoD storage. 212 */ 213 kod_init_kod_db(OPT_ARG(KOD), FALSE); 214 215 /* HMS: Check and see what happens if KEYFILE doesn't exist */ 216 auth_init(OPT_ARG(KEYFILE), &keys); 217 218 /* 219 ** Considering employing a variable that prevents functions of doing 220 ** anything until everything is initialized properly 221 ** 222 ** HMS: What exactly does the above mean? 223 */ 224 event_set_log_callback(&sntp_libevent_log_cb); 225 if (debug > 0) 226 event_enable_debug_mode(); 227 #ifdef WORK_THREAD 228 evthread_use_pthreads(); 229 /* we use libevent from main thread only, locks should be academic */ 230 if (debug > 0) 231 evthread_enable_lock_debuging(); 232 #endif 233 evcfg = event_config_new(); 234 if (NULL == evcfg) { 235 printf("%s: event_config_new() failed!\n", progname); 236 return -1; 237 } 238 #ifndef HAVE_SOCKETPAIR 239 event_config_require_features(evcfg, EV_FEATURE_FDS); 240 #endif 241 /* all libevent calls are from main thread */ 242 /* event_config_set_flag(evcfg, EVENT_BASE_FLAG_NOLOCK); */ 243 base = event_base_new_with_config(evcfg); 244 event_config_free(evcfg); 245 if (NULL == base) { 246 printf("%s: event_base_new() failed!\n", progname); 247 return -1; 248 } 249 250 /* wire into intres resolver */ 251 worker_per_query = TRUE; 252 addremove_io_fd = &sntp_addremove_fd; 253 254 open_sockets(); 255 256 if (HAVE_OPT(BROADCAST)) { 257 int cn = STACKCT_OPT( BROADCAST ); 258 const char ** cp = STACKLST_OPT( BROADCAST ); 259 260 while (cn-- > 0) { 261 handle_lookup(*cp, CTX_BCST); 262 cp++; 263 } 264 } 265 266 if (HAVE_OPT(CONCURRENT)) { 267 int cn = STACKCT_OPT( CONCURRENT ); 268 const char ** cp = STACKLST_OPT( CONCURRENT ); 269 270 while (cn-- > 0) { 271 handle_lookup(*cp, CTX_UCST | CTX_CONC); 272 cp++; 273 } 274 } 275 276 for (i = 0; i < argc; ++i) 277 handle_lookup(argv[i], CTX_UCST); 278 279 gettimeofday_cached(base, &start_tv); 280 event_base_dispatch(base); 281 event_base_free(base); 282 283 if (!time_adjusted && 284 (ENABLED_OPT(STEP) || ENABLED_OPT(SLEW))) 285 exitcode = 1; 286 else 287 exitcode = 0; 288 289 return exitcode; 290 } 291 292 293 /* 294 ** open sockets and make them non-blocking 295 */ 296 void 297 open_sockets( 298 void 299 ) 300 { 301 sockaddr_u name; 302 303 if (-1 == sock4) { 304 sock4 = socket(PF_INET, SOCK_DGRAM, 0); 305 if (-1 == sock4) { 306 /* error getting a socket */ 307 msyslog(LOG_ERR, "open_sockets: socket(PF_INET) failed: %m"); 308 exit(1); 309 } 310 /* Make it non-blocking */ 311 make_socket_nonblocking(sock4); 312 313 /* Let's try using a wildcard... */ 314 ZERO(name); 315 AF(&name) = AF_INET; 316 SET_ADDR4N(&name, INADDR_ANY); 317 SET_PORT(&name, (HAVE_OPT(USERESERVEDPORT) ? 123 : 0)); 318 319 if (-1 == bind(sock4, &name.sa, 320 SOCKLEN(&name))) { 321 msyslog(LOG_ERR, "open_sockets: bind(sock4) failed: %m"); 322 exit(1); 323 } 324 325 /* Register an NTP callback for recv/timeout */ 326 ev_sock4 = event_new(base, sock4, 327 EV_TIMEOUT | EV_READ | EV_PERSIST, 328 &sock_cb, NULL); 329 if (NULL == ev_sock4) { 330 msyslog(LOG_ERR, 331 "open_sockets: event_new(base, sock4) failed!"); 332 } else { 333 event_add(ev_sock4, &wakeup_tv); 334 } 335 } 336 337 /* We may not always have IPv6... */ 338 if (-1 == sock6 && ipv6_works) { 339 sock6 = socket(PF_INET6, SOCK_DGRAM, 0); 340 if (-1 == sock6 && ipv6_works) { 341 /* error getting a socket */ 342 msyslog(LOG_ERR, "open_sockets: socket(PF_INET6) failed: %m"); 343 exit(1); 344 } 345 /* Make it non-blocking */ 346 make_socket_nonblocking(sock6); 347 348 /* Let's try using a wildcard... */ 349 ZERO(name); 350 AF(&name) = AF_INET6; 351 SET_ADDR6N(&name, in6addr_any); 352 SET_PORT(&name, (HAVE_OPT(USERESERVEDPORT) ? 123 : 0)); 353 354 if (-1 == bind(sock6, &name.sa, 355 SOCKLEN(&name))) { 356 msyslog(LOG_ERR, "open_sockets: bind(sock6) failed: %m"); 357 exit(1); 358 } 359 /* Register an NTP callback for recv/timeout */ 360 ev_sock6 = event_new(base, sock6, 361 EV_TIMEOUT | EV_READ | EV_PERSIST, 362 &sock_cb, NULL); 363 if (NULL == ev_sock6) { 364 msyslog(LOG_ERR, 365 "open_sockets: event_new(base, sock6) failed!"); 366 } else { 367 event_add(ev_sock6, &wakeup_tv); 368 } 369 } 370 371 return; 372 } 373 374 375 /* 376 ** handle_lookup 377 */ 378 void 379 handle_lookup( 380 const char *name, 381 int flags 382 ) 383 { 384 struct addrinfo hints; /* Local copy is OK */ 385 struct dns_ctx *ctx; 386 char * name_copy; 387 size_t name_sz; 388 size_t octets; 389 390 TRACE(1, ("handle_lookup(%s,%#x)\n", name, flags)); 391 392 ZERO(hints); 393 hints.ai_family = ai_fam_pref; 394 hints.ai_flags = AI_CANONNAME | Z_AI_NUMERICSERV; 395 /* 396 ** Unless we specify a socktype, we'll get at least two 397 ** entries for each address: one for TCP and one for 398 ** UDP. That's not what we want. 399 */ 400 hints.ai_socktype = SOCK_DGRAM; 401 hints.ai_protocol = IPPROTO_UDP; 402 403 name_sz = 1 + strlen(name); 404 octets = sizeof(*ctx) + name_sz; // Space for a ctx and the name 405 ctx = emalloc_zero(octets); // ctx at ctx[0] 406 name_copy = (char *)(ctx + 1); // Put the name at ctx[1] 407 memcpy(name_copy, name, name_sz); // copy the name to ctx[1] 408 ctx->name = name_copy; // point to it... 409 ctx->flags = flags; 410 ctx->timeout = response_tv; 411 ctx->key = NULL; 412 413 /* The following should arguably be passed in... */ 414 if (ENABLED_OPT(AUTHENTICATION)) { 415 ctx->key_id = OPT_VALUE_AUTHENTICATION; 416 get_key(ctx->key_id, &ctx->key); 417 if (NULL == ctx->key) { 418 fprintf(stderr, "%s: Authentication with keyID %d requested, but no matching keyID found in <%s>!\n", 419 progname, ctx->key_id, OPT_ARG(KEYFILE)); 420 exit(1); 421 } 422 } else { 423 ctx->key_id = -1; 424 } 425 426 ++n_pending_dns; 427 getaddrinfo_sometime(name, "123", &hints, 0, 428 &sntp_name_resolved, ctx); 429 } 430 431 432 /* 433 ** DNS Callback: 434 ** - For each IP: 435 ** - - open a socket 436 ** - - increment n_pending_ntp 437 ** - - send a request if this is a Unicast callback 438 ** - - queue wait for response 439 ** - decrement n_pending_dns 440 */ 441 void 442 sntp_name_resolved( 443 int rescode, 444 int gai_errno, 445 void * context, 446 const char * name, 447 const char * service, 448 const struct addrinfo * hints, 449 const struct addrinfo * addr 450 ) 451 { 452 struct dns_ctx * dctx; 453 sent_pkt * spkt; 454 const struct addrinfo * ai; 455 SOCKET sock; 456 u_int xmt_delay_v4; 457 u_int xmt_delay_v6; 458 u_int xmt_delay; 459 size_t octets; 460 461 xmt_delay_v4 = 0; 462 xmt_delay_v6 = 0; 463 dctx = context; 464 if (rescode) { 465 #ifdef EAI_SYSTEM 466 if (EAI_SYSTEM == rescode) { 467 errno = gai_errno; 468 mfprintf(stderr, "%s lookup error %m\n", 469 dctx->name); 470 } else 471 #endif 472 fprintf(stderr, "%s lookup error %s\n", 473 dctx->name, gai_strerror(rescode)); 474 } else { 475 TRACE(3, ("%s [%s]\n", dctx->name, 476 (addr->ai_canonname != NULL) 477 ? addr->ai_canonname 478 : "")); 479 480 for (ai = addr; ai != NULL; ai = ai->ai_next) { 481 482 if (check_kod(ai)) 483 continue; 484 485 switch (ai->ai_family) { 486 487 case AF_INET: 488 sock = sock4; 489 xmt_delay = xmt_delay_v4; 490 xmt_delay_v4++; 491 break; 492 493 case AF_INET6: 494 if (!ipv6_works) 495 continue; 496 497 sock = sock6; 498 xmt_delay = xmt_delay_v6; 499 xmt_delay_v6++; 500 break; 501 502 default: 503 msyslog(LOG_ERR, "sntp_name_resolved: unexpected ai_family: %d", 504 ai->ai_family); 505 exit(1); 506 break; 507 } 508 509 /* 510 ** We're waiting for a response for either unicast 511 ** or broadcast, so... 512 */ 513 ++n_pending_ntp; 514 515 /* If this is for a unicast IP, queue a request */ 516 if (dctx->flags & CTX_UCST) { 517 spkt = emalloc_zero(sizeof(*spkt)); 518 spkt->dctx = dctx; 519 octets = min(ai->ai_addrlen, sizeof(spkt->addr)); 520 memcpy(&spkt->addr, ai->ai_addr, octets); 521 queue_xmt(sock, dctx, spkt, xmt_delay); 522 } 523 } 524 } 525 /* n_pending_dns really should be >0 here... */ 526 --n_pending_dns; 527 check_exit_conditions(); 528 } 529 530 531 /* 532 ** queue_xmt 533 */ 534 void 535 queue_xmt( 536 SOCKET sock, 537 struct dns_ctx * dctx, 538 sent_pkt * spkt, 539 u_int xmt_delay 540 ) 541 { 542 sockaddr_u * dest; 543 sent_pkt ** pkt_listp; 544 sent_pkt * match; 545 xmt_ctx * xctx; 546 struct timeval start_cb; 547 struct timeval delay; 548 549 dest = &spkt->addr; 550 if (IS_IPV6(dest)) 551 pkt_listp = &v6_pkts_list; 552 else 553 pkt_listp = &v4_pkts_list; 554 555 /* reject attempts to add address already listed */ 556 for (match = *pkt_listp; match != NULL; match = match->link) { 557 if (ADDR_PORT_EQ(&spkt->addr, &match->addr)) { 558 if (strcasecmp(spkt->dctx->name, 559 match->dctx->name)) 560 printf("%s %s duplicate address from %s ignored.\n", 561 sptoa(&match->addr), 562 match->dctx->name, 563 spkt->dctx->name); 564 else 565 printf("%s %s, duplicate address ignored.\n", 566 sptoa(&match->addr), 567 match->dctx->name); 568 dec_pending_ntp(spkt->dctx->name, &spkt->addr); 569 free(spkt); 570 return; 571 } 572 } 573 574 LINK_SLIST(*pkt_listp, spkt, link); 575 576 xctx = emalloc_zero(sizeof(*xctx)); 577 xctx->sock = sock; 578 xctx->spkt = spkt; 579 gettimeofday_cached(base, &start_cb); 580 xctx->sched = start_cb.tv_sec + (2 * xmt_delay); 581 582 LINK_SORT_SLIST(xmt_q, xctx, (xctx->sched < L_S_S_CUR()->sched), 583 link, xmt_ctx); 584 if (xmt_q == xctx) { 585 /* 586 * The new entry is the first scheduled. The timer is 587 * either not active or is set for the second xmt 588 * context in xmt_q. 589 */ 590 if (NULL == ev_xmt_timer) 591 ev_xmt_timer = event_new(base, INVALID_SOCKET, 592 EV_TIMEOUT, 593 &xmt_timer_cb, NULL); 594 if (NULL == ev_xmt_timer) { 595 msyslog(LOG_ERR, 596 "queue_xmt: event_new(base, -1, EV_TIMEOUT) failed!"); 597 exit(1); 598 } 599 ZERO(delay); 600 if (xctx->sched > start_cb.tv_sec) 601 delay.tv_sec = xctx->sched - start_cb.tv_sec; 602 event_add(ev_xmt_timer, &delay); 603 TRACE(2, ("queue_xmt: xmt timer for %u usec\n", 604 (u_int)delay.tv_usec)); 605 } 606 } 607 608 609 /* 610 ** xmt_timer_cb 611 */ 612 void 613 xmt_timer_cb( 614 evutil_socket_t fd, 615 short what, 616 void * ctx 617 ) 618 { 619 struct timeval start_cb; 620 struct timeval delay; 621 xmt_ctx * x; 622 623 UNUSED_ARG(fd); 624 UNUSED_ARG(ctx); 625 DEBUG_INSIST(EV_TIMEOUT == what); 626 627 if (NULL == xmt_q || shutting_down) 628 return; 629 gettimeofday_cached(base, &start_cb); 630 if (xmt_q->sched <= start_cb.tv_sec) { 631 UNLINK_HEAD_SLIST(x, xmt_q, link); 632 TRACE(2, ("xmt_timer_cb: at .%6.6u -> %s\n", 633 (u_int)start_cb.tv_usec, stoa(&x->spkt->addr))); 634 xmt(x); 635 free(x); 636 if (NULL == xmt_q) 637 return; 638 } 639 if (xmt_q->sched <= start_cb.tv_sec) { 640 event_add(ev_xmt_timer, &gap); 641 TRACE(2, ("xmt_timer_cb: at .%6.6u gap %6.6u\n", 642 (u_int)start_cb.tv_usec, 643 (u_int)gap.tv_usec)); 644 } else { 645 delay.tv_sec = xmt_q->sched - start_cb.tv_sec; 646 delay.tv_usec = 0; 647 event_add(ev_xmt_timer, &delay); 648 TRACE(2, ("xmt_timer_cb: at .%6.6u next %ld seconds\n", 649 (u_int)start_cb.tv_usec, 650 (long)delay.tv_sec)); 651 } 652 } 653 654 655 /* 656 ** xmt() 657 */ 658 void 659 xmt( 660 xmt_ctx * xctx 661 ) 662 { 663 SOCKET sock = xctx->sock; 664 struct dns_ctx *dctx = xctx->spkt->dctx; 665 sent_pkt * spkt = xctx->spkt; 666 sockaddr_u * dst = &spkt->addr; 667 struct timeval tv_xmt; 668 struct pkt x_pkt; 669 size_t pkt_len; 670 int sent; 671 672 if (0 != gettimeofday(&tv_xmt, NULL)) { 673 msyslog(LOG_ERR, 674 "xmt: gettimeofday() failed: %m"); 675 exit(1); 676 } 677 tv_xmt.tv_sec += JAN_1970; 678 679 pkt_len = generate_pkt(&x_pkt, &tv_xmt, dctx->key_id, 680 dctx->key); 681 682 sent = sendpkt(sock, dst, &x_pkt, pkt_len); 683 if (sent) { 684 /* Save the packet we sent... */ 685 memcpy(&spkt->x_pkt, &x_pkt, min(sizeof(spkt->x_pkt), 686 pkt_len)); 687 spkt->stime = tv_xmt.tv_sec - JAN_1970; 688 689 TRACE(2, ("xmt: %lx.%6.6u %s %s\n", (u_long)tv_xmt.tv_sec, 690 (u_int)tv_xmt.tv_usec, dctx->name, stoa(dst))); 691 } else { 692 dec_pending_ntp(dctx->name, dst); 693 } 694 695 return; 696 } 697 698 699 /* 700 * timeout_queries() -- give up on unrequited NTP queries 701 */ 702 void 703 timeout_queries(void) 704 { 705 struct timeval start_cb; 706 u_int idx; 707 sent_pkt * head; 708 sent_pkt * spkt; 709 sent_pkt * spkt_next; 710 long age; 711 int didsomething = 0; 712 713 TRACE(3, ("timeout_queries: called to check %u items\n", 714 (unsigned)COUNTOF(fam_listheads))); 715 716 gettimeofday_cached(base, &start_cb); 717 for (idx = 0; idx < COUNTOF(fam_listheads); idx++) { 718 head = fam_listheads[idx]; 719 for (spkt = head; spkt != NULL; spkt = spkt_next) { 720 char xcst; 721 722 didsomething = 1; 723 switch (spkt->dctx->flags & CTX_xCST) { 724 case CTX_BCST: 725 xcst = 'B'; 726 break; 727 728 case CTX_UCST: 729 xcst = 'U'; 730 break; 731 732 default: 733 INSIST(!"spkt->dctx->flags neither UCST nor BCST"); 734 break; 735 } 736 737 spkt_next = spkt->link; 738 if (0 == spkt->stime || spkt->done) 739 continue; 740 age = start_cb.tv_sec - spkt->stime; 741 TRACE(3, ("%s %s %cCST age %ld\n", 742 stoa(&spkt->addr), 743 spkt->dctx->name, xcst, age)); 744 if (age > response_timeout) 745 timeout_query(spkt); 746 } 747 } 748 // Do we care about didsomething? 749 TRACE(3, ("timeout_queries: didsomething is %d, age is %ld\n", 750 didsomething, (long) (start_cb.tv_sec - start_tv.tv_sec))); 751 if (start_cb.tv_sec - start_tv.tv_sec > response_timeout) { 752 TRACE(3, ("timeout_queries: bail!\n")); 753 event_base_loopexit(base, NULL); 754 shutting_down = TRUE; 755 } 756 } 757 758 759 void dec_pending_ntp( 760 const char * name, 761 sockaddr_u * server 762 ) 763 { 764 if (n_pending_ntp > 0) { 765 --n_pending_ntp; 766 check_exit_conditions(); 767 } else { 768 INSIST(0 == n_pending_ntp); 769 TRACE(1, ("n_pending_ntp was zero before decrement for %s\n", 770 hostnameaddr(name, server))); 771 } 772 } 773 774 775 void timeout_query( 776 sent_pkt * spkt 777 ) 778 { 779 sockaddr_u * server; 780 char xcst; 781 782 783 switch (spkt->dctx->flags & CTX_xCST) { 784 case CTX_BCST: 785 xcst = 'B'; 786 break; 787 788 case CTX_UCST: 789 xcst = 'U'; 790 break; 791 792 default: 793 INSIST(!"spkt->dctx->flags neither UCST nor BCST"); 794 break; 795 } 796 spkt->done = TRUE; 797 server = &spkt->addr; 798 msyslog(LOG_INFO, "%s no %cCST response after %d seconds", 799 hostnameaddr(spkt->dctx->name, server), xcst, 800 response_timeout); 801 dec_pending_ntp(spkt->dctx->name, server); 802 return; 803 } 804 805 806 /* 807 ** check_kod 808 */ 809 int 810 check_kod( 811 const struct addrinfo * ai 812 ) 813 { 814 char *hostname; 815 struct kod_entry *reason; 816 817 /* Is there a KoD on file for this address? */ 818 hostname = addrinfo_to_str(ai); 819 TRACE(2, ("check_kod: checking <%s>\n", hostname)); 820 if (search_entry(hostname, &reason)) { 821 printf("prior KoD for %s, skipping.\n", 822 hostname); 823 free(reason); 824 free(hostname); 825 826 return 1; 827 } 828 free(hostname); 829 830 return 0; 831 } 832 833 834 /* 835 ** Socket readable/timeout Callback: 836 ** Read in the packet 837 ** Unicast: 838 ** - close socket 839 ** - decrement n_pending_ntp 840 ** - If packet is good, set the time and "exit" 841 ** Broadcast: 842 ** - If packet is good, set the time and "exit" 843 */ 844 void 845 sock_cb( 846 evutil_socket_t fd, 847 short what, 848 void *ptr 849 ) 850 { 851 sockaddr_u sender; 852 sockaddr_u * psau; 853 sent_pkt ** p_pktlist; 854 sent_pkt * spkt; 855 int rpktl; 856 int rc; 857 858 INSIST(sock4 == fd || sock6 == fd); 859 860 TRACE(3, ("sock_cb: event on sock%s:%s%s%s%s\n", 861 (fd == sock6) 862 ? "6" 863 : "4", 864 (what & EV_TIMEOUT) ? " timeout" : "", 865 (what & EV_READ) ? " read" : "", 866 (what & EV_WRITE) ? " write" : "", 867 (what & EV_SIGNAL) ? " signal" : "")); 868 869 if (!(EV_READ & what)) { 870 if (EV_TIMEOUT & what) 871 timeout_queries(); 872 873 return; 874 } 875 876 /* Read in the packet */ 877 rpktl = recvdata(fd, &sender, &rbuf, sizeof(rbuf)); 878 if (rpktl < 0) { 879 msyslog(LOG_DEBUG, "recvfrom error %m"); 880 return; 881 } 882 883 if (sock6 == fd) 884 p_pktlist = &v6_pkts_list; 885 else 886 p_pktlist = &v4_pkts_list; 887 888 for (spkt = *p_pktlist; spkt != NULL; spkt = spkt->link) { 889 psau = &spkt->addr; 890 if (SOCK_EQ(&sender, psau)) 891 break; 892 } 893 if (NULL == spkt) { 894 msyslog(LOG_WARNING, 895 "Packet from unexpected source %s dropped", 896 sptoa(&sender)); 897 return; 898 } 899 900 TRACE(1, ("sock_cb: %s %s\n", spkt->dctx->name, 901 sptoa(&sender))); 902 903 rpktl = process_pkt(&r_pkt, &sender, rpktl, MODE_SERVER, 904 &spkt->x_pkt, "sock_cb"); 905 906 TRACE(2, ("sock_cb: process_pkt returned %d\n", rpktl)); 907 908 /* If this is a Unicast packet, one down ... */ 909 if (!spkt->done && (CTX_UCST & spkt->dctx->flags)) { 910 dec_pending_ntp(spkt->dctx->name, &spkt->addr); 911 spkt->done = TRUE; 912 } 913 914 915 /* If the packet is good, set the time and we're all done */ 916 rc = handle_pkt(rpktl, &r_pkt, &spkt->addr, spkt->dctx->name); 917 if (0 != rc) 918 TRACE(1, ("sock_cb: handle_pkt() returned %d\n", rc)); 919 check_exit_conditions(); 920 } 921 922 923 /* 924 * check_exit_conditions() 925 * 926 * If sntp has a reply, ask the event loop to stop after this round of 927 * callbacks, unless --wait was used. 928 */ 929 void 930 check_exit_conditions(void) 931 { 932 if ((0 == n_pending_ntp && 0 == n_pending_dns) || 933 (time_derived && !HAVE_OPT(WAIT))) { 934 event_base_loopexit(base, NULL); 935 shutting_down = TRUE; 936 } else { 937 TRACE(2, ("%d NTP and %d name queries pending\n", 938 n_pending_ntp, n_pending_dns)); 939 } 940 } 941 942 943 /* 944 * sntp_addremove_fd() is invoked by the intres blocking worker code 945 * to read from a pipe, or to stop same. 946 */ 947 void sntp_addremove_fd( 948 int fd, 949 int is_pipe, 950 int remove_it 951 ) 952 { 953 u_int idx; 954 blocking_child *c; 955 struct event * ev; 956 957 #ifdef HAVE_SOCKETPAIR 958 if (is_pipe) { 959 /* sntp only asks for EV_FEATURE_FDS without HAVE_SOCKETPAIR */ 960 msyslog(LOG_ERR, "fatal: pipes not supported on systems with socketpair()"); 961 exit(1); 962 } 963 #endif 964 965 c = NULL; 966 for (idx = 0; idx < blocking_children_alloc; idx++) { 967 c = blocking_children[idx]; 968 if (NULL == c) 969 continue; 970 if (fd == c->resp_read_pipe) 971 break; 972 } 973 if (idx == blocking_children_alloc) 974 return; 975 976 if (remove_it) { 977 ev = c->resp_read_ctx; 978 c->resp_read_ctx = NULL; 979 event_del(ev); 980 event_free(ev); 981 982 return; 983 } 984 985 ev = event_new(base, fd, EV_READ | EV_PERSIST, 986 &worker_resp_cb, c); 987 if (NULL == ev) { 988 msyslog(LOG_ERR, 989 "sntp_addremove_fd: event_new(base, fd) failed!"); 990 return; 991 } 992 c->resp_read_ctx = ev; 993 event_add(ev, NULL); 994 } 995 996 997 /* called by forked intres child to close open descriptors */ 998 #ifdef WORK_FORK 999 void 1000 kill_asyncio( 1001 int startfd 1002 ) 1003 { 1004 if (INVALID_SOCKET != sock4) { 1005 closesocket(sock4); 1006 sock4 = INVALID_SOCKET; 1007 } 1008 if (INVALID_SOCKET != sock6) { 1009 closesocket(sock6); 1010 sock6 = INVALID_SOCKET; 1011 } 1012 if (INVALID_SOCKET != bsock4) { 1013 closesocket(bsock4); 1014 bsock4 = INVALID_SOCKET; 1015 } 1016 if (INVALID_SOCKET != bsock6) { 1017 closesocket(bsock6); 1018 bsock6 = INVALID_SOCKET; 1019 } 1020 } 1021 #endif 1022 1023 1024 /* 1025 * worker_resp_cb() is invoked when resp_read_pipe is readable. 1026 */ 1027 void 1028 worker_resp_cb( 1029 evutil_socket_t fd, 1030 short what, 1031 void * ctx /* blocking_child * */ 1032 ) 1033 { 1034 blocking_child * c; 1035 1036 REQUIRE(EV_READ & what); 1037 c = ctx; 1038 INSIST(fd == c->resp_read_pipe); 1039 process_blocking_resp(c); 1040 } 1041 1042 1043 /* 1044 * intres_timeout_req(s) is invoked in the parent to schedule an idle 1045 * timeout to fire in s seconds, if not reset earlier by a call to 1046 * intres_timeout_req(0), which clears any pending timeout. When the 1047 * timeout expires, worker_idle_timer_fired() is invoked (again, in the 1048 * parent). 1049 * 1050 * sntp and ntpd each provide implementations adapted to their timers. 1051 */ 1052 void 1053 intres_timeout_req( 1054 u_int seconds /* 0 cancels */ 1055 ) 1056 { 1057 struct timeval tv_to; 1058 1059 if (NULL == ev_worker_timeout) { 1060 ev_worker_timeout = event_new(base, -1, 1061 EV_TIMEOUT | EV_PERSIST, 1062 &worker_timeout, NULL); 1063 INSIST(NULL != ev_worker_timeout); 1064 } else { 1065 event_del(ev_worker_timeout); 1066 } 1067 if (0 == seconds) 1068 return; 1069 tv_to.tv_sec = seconds; 1070 tv_to.tv_usec = 0; 1071 event_add(ev_worker_timeout, &tv_to); 1072 } 1073 1074 1075 void 1076 worker_timeout( 1077 evutil_socket_t fd, 1078 short what, 1079 void * ctx 1080 ) 1081 { 1082 UNUSED_ARG(fd); 1083 UNUSED_ARG(ctx); 1084 1085 REQUIRE(EV_TIMEOUT & what); 1086 worker_idle_timer_fired(); 1087 } 1088 1089 1090 void 1091 sntp_libevent_log_cb( 1092 int severity, 1093 const char * msg 1094 ) 1095 { 1096 int level; 1097 1098 switch (severity) { 1099 1100 default: 1101 case _EVENT_LOG_DEBUG: 1102 level = LOG_DEBUG; 1103 break; 1104 1105 case _EVENT_LOG_MSG: 1106 level = LOG_NOTICE; 1107 break; 1108 1109 case _EVENT_LOG_WARN: 1110 level = LOG_WARNING; 1111 break; 1112 1113 case _EVENT_LOG_ERR: 1114 level = LOG_ERR; 1115 break; 1116 } 1117 1118 msyslog(level, "%s", msg); 1119 } 1120 1121 1122 int 1123 generate_pkt ( 1124 struct pkt *x_pkt, 1125 const struct timeval *tv_xmt, 1126 int key_id, 1127 struct key *pkt_key 1128 ) 1129 { 1130 l_fp xmt_fp; 1131 int pkt_len; 1132 int mac_size; 1133 1134 pkt_len = LEN_PKT_NOMAC; 1135 ZERO(*x_pkt); 1136 TVTOTS(tv_xmt, &xmt_fp); 1137 HTONL_FP(&xmt_fp, &x_pkt->xmt); 1138 x_pkt->stratum = STRATUM_TO_PKT(STRATUM_UNSPEC); 1139 x_pkt->ppoll = 8; 1140 /* FIXME! Modus broadcast + adr. check -> bdr. pkt */ 1141 set_li_vn_mode(x_pkt, LEAP_NOTINSYNC, ntpver, 3); 1142 if (debug > 0) { 1143 printf("generate_pkt: key_id %d, key pointer %p\n", key_id, pkt_key); 1144 } 1145 if (pkt_key != NULL) { 1146 x_pkt->exten[0] = htonl(key_id); 1147 mac_size = make_mac(x_pkt, pkt_len, pkt_key, 1148 (char *)&x_pkt->exten[1], MAX_MDG_LEN); 1149 if (mac_size > 0) 1150 pkt_len += mac_size + KEY_MAC_LEN; 1151 #ifdef DEBUG 1152 if (debug > 0) { 1153 printf("generate_pkt: mac_size is %d\n", mac_size); 1154 } 1155 #endif 1156 1157 } 1158 return pkt_len; 1159 } 1160 1161 1162 int 1163 handle_pkt( 1164 int rpktl, 1165 struct pkt * rpkt, 1166 sockaddr_u * host, 1167 const char * hostname 1168 ) 1169 { 1170 char disptxt[32]; 1171 const char * addrtxt; 1172 struct timeval tv_dst; 1173 int cnt; 1174 int sw_case; 1175 int digits; 1176 int stratum; 1177 char * ref; 1178 char * ts_str; 1179 const char * leaptxt; 1180 double offset; 1181 double precision; 1182 double synch_distance; 1183 char * p_SNTP_PRETEND_TIME; 1184 time_t pretend_time; 1185 #if SIZEOF_TIME_T == 8 1186 long long ll; 1187 #else 1188 long l; 1189 #endif 1190 1191 ts_str = NULL; 1192 1193 if (rpktl > 0) 1194 sw_case = 1; 1195 else 1196 sw_case = rpktl; 1197 1198 switch (sw_case) { 1199 1200 case SERVER_UNUSEABLE: 1201 return -1; 1202 break; 1203 1204 case PACKET_UNUSEABLE: 1205 break; 1206 1207 case SERVER_AUTH_FAIL: 1208 break; 1209 1210 case KOD_DEMOBILIZE: 1211 /* Received a DENY or RESTR KOD packet */ 1212 addrtxt = stoa(host); 1213 ref = (char *)&rpkt->refid; 1214 add_entry(addrtxt, ref); 1215 msyslog(LOG_WARNING, "KOD code %c%c%c%c from %s %s", 1216 ref[0], ref[1], ref[2], ref[3], addrtxt, hostname); 1217 break; 1218 1219 case KOD_RATE: 1220 /* 1221 ** Hmm... 1222 ** We should probably call add_entry() with an 1223 ** expiration timestamp of several seconds in the future, 1224 ** and back-off even more if we get more RATE responses. 1225 */ 1226 break; 1227 1228 case 1: 1229 TRACE(3, ("handle_pkt: %d bytes from %s %s\n", 1230 rpktl, stoa(host), hostname)); 1231 1232 gettimeofday_cached(base, &tv_dst); 1233 1234 p_SNTP_PRETEND_TIME = getenv("SNTP_PRETEND_TIME"); 1235 if (p_SNTP_PRETEND_TIME) { 1236 pretend_time = 0; 1237 #if SIZEOF_TIME_T == 4 1238 if (1 == sscanf(p_SNTP_PRETEND_TIME, "%ld", &l)) 1239 pretend_time = (time_t)l; 1240 #elif SIZEOF_TIME_T == 8 1241 if (1 == sscanf(p_SNTP_PRETEND_TIME, "%lld", &ll)) 1242 pretend_time = (time_t)ll; 1243 #else 1244 # include "GRONK: unexpected value for SIZEOF_TIME_T" 1245 #endif 1246 if (0 != pretend_time) 1247 tv_dst.tv_sec = pretend_time; 1248 } 1249 1250 offset_calculation(rpkt, rpktl, &tv_dst, &offset, 1251 &precision, &synch_distance); 1252 time_derived = TRUE; 1253 1254 for (digits = 0; (precision *= 10.) < 1.; ++digits) 1255 /* empty */ ; 1256 if (digits > 6) 1257 digits = 6; 1258 1259 ts_str = tv_to_str(&tv_dst); 1260 stratum = rpkt->stratum; 1261 if (0 == stratum) 1262 stratum = 16; 1263 1264 if (synch_distance > 0.) { 1265 cnt = snprintf(disptxt, sizeof(disptxt), 1266 " +/- %f", synch_distance); 1267 if ((size_t)cnt >= sizeof(disptxt)) 1268 snprintf(disptxt, sizeof(disptxt), 1269 "ERROR %d >= %d", cnt, 1270 (int)sizeof(disptxt)); 1271 } else { 1272 disptxt[0] = '\0'; 1273 } 1274 1275 switch (PKT_LEAP(rpkt->li_vn_mode)) { 1276 case LEAP_NOWARNING: 1277 leaptxt = "no-leap"; 1278 break; 1279 case LEAP_ADDSECOND: 1280 leaptxt = "add-leap"; 1281 break; 1282 case LEAP_DELSECOND: 1283 leaptxt = "del-leap"; 1284 break; 1285 case LEAP_NOTINSYNC: 1286 leaptxt = "unsync"; 1287 break; 1288 default: 1289 leaptxt = "LEAP-ERROR"; 1290 break; 1291 } 1292 1293 msyslog(LOG_INFO, "%s %+.*f%s %s s%d %s%s", ts_str, 1294 digits, offset, disptxt, 1295 hostnameaddr(hostname, host), stratum, 1296 leaptxt, 1297 (time_adjusted) 1298 ? " [excess]" 1299 : ""); 1300 free(ts_str); 1301 1302 if (p_SNTP_PRETEND_TIME) 1303 return 0; 1304 1305 if (!time_adjusted && 1306 (ENABLED_OPT(STEP) || ENABLED_OPT(SLEW))) 1307 return set_time(offset); 1308 1309 return EX_OK; 1310 } 1311 1312 return 1; 1313 } 1314 1315 1316 void 1317 offset_calculation( 1318 struct pkt *rpkt, 1319 int rpktl, 1320 struct timeval *tv_dst, 1321 double *offset, 1322 double *precision, 1323 double *synch_distance 1324 ) 1325 { 1326 l_fp p_rec, p_xmt, p_ref, p_org, tmp, dst; 1327 u_fp p_rdly, p_rdsp; 1328 double t21, t34, delta; 1329 1330 /* Convert timestamps from network to host byte order */ 1331 p_rdly = NTOHS_FP(rpkt->rootdelay); 1332 p_rdsp = NTOHS_FP(rpkt->rootdisp); 1333 NTOHL_FP(&rpkt->reftime, &p_ref); 1334 NTOHL_FP(&rpkt->org, &p_org); 1335 NTOHL_FP(&rpkt->rec, &p_rec); 1336 NTOHL_FP(&rpkt->xmt, &p_xmt); 1337 1338 *precision = LOGTOD(rpkt->precision); 1339 1340 TRACE(3, ("offset_calculation: LOGTOD(rpkt->precision): %f\n", *precision)); 1341 1342 /* Compute offset etc. */ 1343 tmp = p_rec; 1344 L_SUB(&tmp, &p_org); 1345 LFPTOD(&tmp, t21); 1346 TVTOTS(tv_dst, &dst); 1347 dst.l_ui += JAN_1970; 1348 tmp = p_xmt; 1349 L_SUB(&tmp, &dst); 1350 LFPTOD(&tmp, t34); 1351 *offset = (t21 + t34) / 2.; 1352 delta = t21 - t34; 1353 1354 // synch_distance is: 1355 // (peer->delay + peer->rootdelay) / 2 + peer->disp 1356 // + peer->rootdisp + clock_phi * (current_time - peer->update) 1357 // + peer->jitter; 1358 // 1359 // and peer->delay = fabs(peer->offset - p_offset) * 2; 1360 // and peer->offset needs history, so we're left with 1361 // p_offset = (t21 + t34) / 2.; 1362 // peer->disp = 0; (we have no history to augment this) 1363 // clock_phi = 15e-6; 1364 // peer->jitter = LOGTOD(sys_precision); (we have no history to augment this) 1365 // and ntp_proto.c:set_sys_tick_precision() should get us sys_precision. 1366 // 1367 // so our answer seems to be: 1368 // 1369 // (fabs(t21 + t34) + peer->rootdelay) / 3. 1370 // + 0 (peer->disp) 1371 // + peer->rootdisp 1372 // + 15e-6 (clock_phi) 1373 // + LOGTOD(sys_precision) 1374 1375 INSIST( FPTOD(p_rdly) >= 0. ); 1376 #if 1 1377 *synch_distance = (fabs(t21 + t34) + FPTOD(p_rdly)) / 3. 1378 + 0. 1379 + FPTOD(p_rdsp) 1380 + 15e-6 1381 + 0. /* LOGTOD(sys_precision) when we can get it */ 1382 ; 1383 INSIST( *synch_distance >= 0. ); 1384 #else 1385 *synch_distance = (FPTOD(p_rdly) + FPTOD(p_rdsp))/2.0; 1386 #endif 1387 1388 #ifdef DEBUG 1389 if (debug > 3) { 1390 printf("sntp rootdelay: %f\n", FPTOD(p_rdly)); 1391 printf("sntp rootdisp: %f\n", FPTOD(p_rdsp)); 1392 printf("sntp syncdist: %f\n", *synch_distance); 1393 1394 pkt_output(rpkt, rpktl, stdout); 1395 1396 printf("sntp offset_calculation: rpkt->reftime:\n"); 1397 l_fp_output(&p_ref, stdout); 1398 printf("sntp offset_calculation: rpkt->org:\n"); 1399 l_fp_output(&p_org, stdout); 1400 printf("sntp offset_calculation: rpkt->rec:\n"); 1401 l_fp_output(&p_rec, stdout); 1402 printf("sntp offset_calculation: rpkt->xmt:\n"); 1403 l_fp_output(&p_xmt, stdout); 1404 } 1405 #endif 1406 1407 TRACE(3, ("sntp offset_calculation:\trec - org t21: %.6f\n" 1408 "\txmt - dst t34: %.6f\tdelta: %.6f\toffset: %.6f\n", 1409 t21, t34, delta, *offset)); 1410 1411 return; 1412 } 1413 1414 1415 1416 /* Compute the 8 bits for li_vn_mode */ 1417 void 1418 set_li_vn_mode ( 1419 struct pkt *spkt, 1420 char leap, 1421 char version, 1422 char mode 1423 ) 1424 { 1425 if (leap > 3) { 1426 msyslog(LOG_DEBUG, "set_li_vn_mode: leap > 3, using max. 3"); 1427 leap = 3; 1428 } 1429 1430 if ((unsigned char)version > 7) { 1431 msyslog(LOG_DEBUG, "set_li_vn_mode: version < 0 or > 7, using 4"); 1432 version = 4; 1433 } 1434 1435 if (mode > 7) { 1436 msyslog(LOG_DEBUG, "set_li_vn_mode: mode > 7, using client mode 3"); 1437 mode = 3; 1438 } 1439 1440 spkt->li_vn_mode = leap << 6; 1441 spkt->li_vn_mode |= version << 3; 1442 spkt->li_vn_mode |= mode; 1443 } 1444 1445 1446 /* 1447 ** set_time applies 'offset' to the local clock. 1448 */ 1449 int 1450 set_time( 1451 double offset 1452 ) 1453 { 1454 int rc; 1455 1456 if (time_adjusted) 1457 return EX_OK; 1458 1459 /* 1460 ** If we can step but we cannot slew, then step. 1461 ** If we can step or slew and and |offset| > steplimit, then step. 1462 */ 1463 if (ENABLED_OPT(STEP) && 1464 ( !ENABLED_OPT(SLEW) 1465 || (ENABLED_OPT(SLEW) && (fabs(offset) > steplimit)) 1466 )) { 1467 rc = step_systime(offset); 1468 1469 /* If there was a problem, can we rely on errno? */ 1470 if (1 == rc) 1471 time_adjusted = TRUE; 1472 return (time_adjusted) 1473 ? EX_OK 1474 : 1; 1475 /* 1476 ** In case of error, what should we use? 1477 ** EX_UNAVAILABLE? 1478 ** EX_OSERR? 1479 ** EX_NOPERM? 1480 */ 1481 } 1482 1483 if (ENABLED_OPT(SLEW)) { 1484 rc = adj_systime(offset); 1485 1486 /* If there was a problem, can we rely on errno? */ 1487 if (1 == rc) 1488 time_adjusted = TRUE; 1489 return (time_adjusted) 1490 ? EX_OK 1491 : 1; 1492 /* 1493 ** In case of error, what should we use? 1494 ** EX_UNAVAILABLE? 1495 ** EX_OSERR? 1496 ** EX_NOPERM? 1497 */ 1498 } 1499 1500 return EX_SOFTWARE; 1501 } 1502 1503 1504 int 1505 libevent_version_ok(void) 1506 { 1507 ev_uint32_t v_compile_maj; 1508 ev_uint32_t v_run_maj; 1509 1510 v_compile_maj = LIBEVENT_VERSION_NUMBER & 0xffff0000; 1511 v_run_maj = event_get_version_number() & 0xffff0000; 1512 if (v_compile_maj != v_run_maj) { 1513 fprintf(stderr, 1514 "Incompatible libevent versions: have %s, built with %s\n", 1515 event_get_version(), 1516 LIBEVENT_VERSION); 1517 return 0; 1518 } 1519 return 1; 1520 } 1521 1522 /* 1523 * gettimeofday_cached() 1524 * 1525 * Clones the event_base_gettimeofday_cached() interface but ensures the 1526 * times are always on the gettimeofday() 1970 scale. Older libevent 2 1527 * sometimes used gettimeofday(), sometimes the since-system-start 1528 * clock_gettime(CLOCK_MONOTONIC), depending on the platform. 1529 * 1530 * It is not cleanly possible to tell which timescale older libevent is 1531 * using. 1532 * 1533 * The strategy involves 1 hour thresholds chosen to be far longer than 1534 * the duration of a round of libevent callbacks, which share a cached 1535 * start-of-round time. First compare the last cached time with the 1536 * current gettimeofday() time. If they are within one hour, libevent 1537 * is using the proper timescale so leave the offset 0. Otherwise, 1538 * compare libevent's cached time and the current time on the monotonic 1539 * scale. If they are within an hour, libevent is using the monotonic 1540 * scale so calculate the offset to add to such times to bring them to 1541 * gettimeofday()'s scale. 1542 */ 1543 int 1544 gettimeofday_cached( 1545 struct event_base * b, 1546 struct timeval * caller_tv 1547 ) 1548 { 1549 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) 1550 static struct event_base * cached_b; 1551 static struct timeval cached; 1552 static struct timeval adj_cached; 1553 static struct timeval offset; 1554 static int offset_ready; 1555 struct timeval latest; 1556 struct timeval systemt; 1557 struct timespec ts; 1558 struct timeval mono; 1559 struct timeval diff; 1560 int cgt_rc; 1561 int gtod_rc; 1562 1563 event_base_gettimeofday_cached(b, &latest); 1564 if (b == cached_b && 1565 !memcmp(&latest, &cached, sizeof(latest))) { 1566 *caller_tv = adj_cached; 1567 return 0; 1568 } 1569 cached = latest; 1570 cached_b = b; 1571 if (!offset_ready) { 1572 cgt_rc = clock_gettime(CLOCK_MONOTONIC, &ts); 1573 gtod_rc = gettimeofday(&systemt, NULL); 1574 if (0 != gtod_rc) { 1575 msyslog(LOG_ERR, 1576 "%s: gettimeofday() error %m", 1577 progname); 1578 exit(1); 1579 } 1580 diff = sub_tval(systemt, latest); 1581 if (debug > 1) 1582 printf("system minus cached %+ld.%06ld\n", 1583 (long)diff.tv_sec, (long)diff.tv_usec); 1584 if (0 != cgt_rc || labs((long)diff.tv_sec) < 3600) { 1585 /* 1586 * Either use_monotonic == 0, or this libevent 1587 * has been repaired. Leave offset at zero. 1588 */ 1589 } else { 1590 mono.tv_sec = ts.tv_sec; 1591 mono.tv_usec = ts.tv_nsec / 1000; 1592 diff = sub_tval(latest, mono); 1593 if (debug > 1) 1594 printf("cached minus monotonic %+ld.%06ld\n", 1595 (long)diff.tv_sec, (long)diff.tv_usec); 1596 if (labs((long)diff.tv_sec) < 3600) { 1597 /* older libevent2 using monotonic */ 1598 offset = sub_tval(systemt, mono); 1599 TRACE(1, ("%s: Offsetting libevent CLOCK_MONOTONIC times by %+ld.%06ld\n", 1600 "gettimeofday_cached", 1601 (long)offset.tv_sec, 1602 (long)offset.tv_usec)); 1603 } 1604 } 1605 offset_ready = TRUE; 1606 } 1607 adj_cached = add_tval(cached, offset); 1608 *caller_tv = adj_cached; 1609 1610 return 0; 1611 #else 1612 return event_base_gettimeofday_cached(b, caller_tv); 1613 #endif 1614 } 1615 1616 /* Dummy function to satisfy libntp/work_fork.c */ 1617 extern int set_user_group_ids(void); 1618 int set_user_group_ids(void) 1619 { 1620 return 1; 1621 } 1622