1 /* 2 * ntp_proto.c - NTP version 4 protocol machinery 3 * 4 * ATTENTION: Get approval from Dave Mills on all changes to this file! 5 * 6 */ 7 #ifdef HAVE_CONFIG_H 8 #include <config.h> 9 #endif 10 11 #include "ntpd.h" 12 #include "ntp_stdlib.h" 13 #include "ntp_unixtime.h" 14 #include "ntp_control.h" 15 #include "ntp_string.h" 16 #include "ntp_leapsec.h" 17 #include "refidsmear.h" 18 #include "lib_strbuf.h" 19 20 #include <stdio.h> 21 #ifdef HAVE_LIBSCF_H 22 #include <libscf.h> 23 #endif 24 #ifdef HAVE_UNISTD_H 25 #include <unistd.h> 26 #endif 27 28 /* 29 * This macro defines the authentication state. If x is 1 authentication 30 * is required; othewise it is optional. 31 */ 32 #define AUTH(x, y) ((x) ? (y) == AUTH_OK \ 33 : (y) == AUTH_OK || (y) == AUTH_NONE) 34 35 #define AUTH_NONE 0 /* authentication not required */ 36 #define AUTH_OK 1 /* authentication OK */ 37 #define AUTH_ERROR 2 /* authentication error */ 38 #define AUTH_CRYPTO 3 /* crypto_NAK */ 39 40 /* 41 * Set up Kiss Code values 42 */ 43 44 enum kiss_codes { 45 NOKISS, /* No Kiss Code */ 46 RATEKISS, /* Rate limit Kiss Code */ 47 DENYKISS, /* Deny Kiss */ 48 RSTRKISS, /* Restricted Kiss */ 49 XKISS, /* Experimental Kiss */ 50 UNKNOWNKISS /* Unknown Kiss Code */ 51 }; 52 53 /* 54 * traffic shaping parameters 55 */ 56 #define NTP_IBURST 6 /* packets in iburst */ 57 #define RESP_DELAY 1 /* refclock burst delay (s) */ 58 59 /* 60 * pool soliciting restriction duration (s) 61 */ 62 #define POOL_SOLICIT_WINDOW 8 63 64 /* 65 * peer_select groups statistics for a peer used by clock_select() and 66 * clock_cluster(). 67 */ 68 typedef struct peer_select_tag { 69 struct peer * peer; 70 double synch; /* sync distance */ 71 double error; /* jitter */ 72 double seljit; /* selection jitter */ 73 } peer_select; 74 75 /* 76 * System variables are declared here. Unless specified otherwise, all 77 * times are in seconds. 78 */ 79 u_char sys_leap; /* system leap indicator, use set_sys_leap() to change this */ 80 u_char xmt_leap; /* leap indicator sent in client requests, set up by set_sys_leap() */ 81 u_char sys_stratum; /* system stratum */ 82 s_char sys_precision; /* local clock precision (log2 s) */ 83 double sys_rootdelay; /* roundtrip delay to primary source */ 84 double sys_rootdisp; /* dispersion to primary source */ 85 u_int32 sys_refid; /* reference id (network byte order) */ 86 l_fp sys_reftime; /* last update time */ 87 struct peer *sys_peer; /* current peer */ 88 89 #ifdef LEAP_SMEAR 90 struct leap_smear_info leap_smear; 91 #endif 92 int leap_sec_in_progress; 93 94 /* 95 * Rate controls. Leaky buckets are used to throttle the packet 96 * transmission rates in order to protect busy servers such as at NIST 97 * and USNO. There is a counter for each association and another for KoD 98 * packets. The association counter decrements each second, but not 99 * below zero. Each time a packet is sent the counter is incremented by 100 * a configurable value representing the average interval between 101 * packets. A packet is delayed as long as the counter is greater than 102 * zero. Note this does not affect the time value computations. 103 */ 104 /* 105 * Nonspecified system state variables 106 */ 107 int sys_bclient; /* broadcast client enable */ 108 double sys_bdelay; /* broadcast client default delay */ 109 int sys_authenticate; /* requre authentication for config */ 110 l_fp sys_authdelay; /* authentication delay */ 111 double sys_offset; /* current local clock offset */ 112 double sys_mindisp = MINDISPERSE; /* minimum distance (s) */ 113 double sys_maxdist = MAXDISTANCE; /* selection threshold */ 114 double sys_jitter; /* system jitter */ 115 u_long sys_epoch; /* last clock update time */ 116 static double sys_clockhop; /* clockhop threshold */ 117 static int leap_vote_ins; /* leap consensus for insert */ 118 static int leap_vote_del; /* leap consensus for delete */ 119 keyid_t sys_private; /* private value for session seed */ 120 int sys_manycastserver; /* respond to manycast client pkts */ 121 int ntp_mode7; /* respond to ntpdc (mode7) */ 122 int peer_ntpdate; /* active peers in ntpdate mode */ 123 int sys_survivors; /* truest of the truechimers */ 124 char *sys_ident = NULL; /* identity scheme */ 125 126 /* 127 * TOS and multicast mapping stuff 128 */ 129 int sys_floor = 0; /* cluster stratum floor */ 130 int sys_ceiling = STRATUM_UNSPEC - 1; /* cluster stratum ceiling */ 131 int sys_minsane = 1; /* minimum candidates */ 132 int sys_minclock = NTP_MINCLOCK; /* minimum candidates */ 133 int sys_maxclock = NTP_MAXCLOCK; /* maximum candidates */ 134 int sys_cohort = 0; /* cohort switch */ 135 int sys_orphan = STRATUM_UNSPEC + 1; /* orphan stratum */ 136 int sys_orphwait = NTP_ORPHWAIT; /* orphan wait */ 137 int sys_beacon = BEACON; /* manycast beacon interval */ 138 int sys_ttlmax; /* max ttl mapping vector index */ 139 u_char sys_ttl[MAX_TTL]; /* ttl mapping vector */ 140 141 /* 142 * Statistics counters - first the good, then the bad 143 */ 144 u_long sys_stattime; /* elapsed time */ 145 u_long sys_received; /* packets received */ 146 u_long sys_processed; /* packets for this host */ 147 u_long sys_newversion; /* current version */ 148 u_long sys_oldversion; /* old version */ 149 u_long sys_restricted; /* access denied */ 150 u_long sys_badlength; /* bad length or format */ 151 u_long sys_badauth; /* bad authentication */ 152 u_long sys_declined; /* declined */ 153 u_long sys_limitrejected; /* rate exceeded */ 154 u_long sys_kodsent; /* KoD sent */ 155 156 /* 157 * Mechanism knobs: how soon do we unpeer()? 158 * 159 * The default way is "on-receipt". If this was a packet from a 160 * well-behaved source, on-receipt will offer the fastest recovery. 161 * If this was from a DoS attack, the default way makes it easier 162 * for a bad-guy to DoS us. So look and see what bites you harder 163 * and choose according to your environment. 164 */ 165 int unpeer_crypto_early = 1; /* bad crypto (TEST9) */ 166 int unpeer_crypto_nak_early = 1; /* crypto_NAK (TEST5) */ 167 int unpeer_digest_early = 1; /* bad digest (TEST5) */ 168 169 static int kiss_code_check(u_char hisleap, u_char hisstratum, u_char hismode, u_int32 refid); 170 static double root_distance (struct peer *); 171 static void clock_combine (peer_select *, int, int); 172 static void peer_xmit (struct peer *); 173 static void fast_xmit (struct recvbuf *, int, keyid_t, int); 174 static void pool_xmit (struct peer *); 175 static void clock_update (struct peer *); 176 static void measure_precision(void); 177 static double measure_tick_fuzz(void); 178 static int local_refid (struct peer *); 179 static int peer_unfit (struct peer *); 180 #ifdef AUTOKEY 181 static int group_test (char *, char *); 182 #endif /* AUTOKEY */ 183 #ifdef WORKER 184 void pool_name_resolved (int, int, void *, const char *, 185 const char *, const struct addrinfo *, 186 const struct addrinfo *); 187 #endif /* WORKER */ 188 189 const char * amtoa (int am); 190 191 192 void 193 set_sys_leap( 194 u_char new_sys_leap 195 ) 196 { 197 sys_leap = new_sys_leap; 198 xmt_leap = sys_leap; 199 200 /* 201 * Under certain conditions we send faked leap bits to clients, so 202 * eventually change xmt_leap below, but never change LEAP_NOTINSYNC. 203 */ 204 if (xmt_leap != LEAP_NOTINSYNC) { 205 if (leap_sec_in_progress) { 206 /* always send "not sync" */ 207 xmt_leap = LEAP_NOTINSYNC; 208 } 209 #ifdef LEAP_SMEAR 210 else { 211 /* 212 * If leap smear is enabled in general we must 213 * never send a leap second warning to clients, 214 * so make sure we only send "in sync". 215 */ 216 if (leap_smear.enabled) 217 xmt_leap = LEAP_NOWARNING; 218 } 219 #endif /* LEAP_SMEAR */ 220 } 221 } 222 223 224 /* 225 * Kiss Code check 226 */ 227 int 228 kiss_code_check( 229 u_char hisleap, 230 u_char hisstratum, 231 u_char hismode, 232 u_int32 refid 233 ) 234 { 235 236 if ( hismode == MODE_SERVER 237 && hisleap == LEAP_NOTINSYNC 238 && hisstratum == STRATUM_UNSPEC) { 239 if(memcmp(&refid,"RATE", 4) == 0) { 240 return (RATEKISS); 241 } else if(memcmp(&refid,"DENY", 4) == 0) { 242 return (DENYKISS); 243 } else if(memcmp(&refid,"RSTR", 4) == 0) { 244 return (RSTRKISS); 245 } else if(memcmp(&refid,"X", 1) == 0) { 246 return (XKISS); 247 } else { 248 return (UNKNOWNKISS); 249 } 250 } else { 251 return (NOKISS); 252 } 253 } 254 255 256 /* 257 * transmit - transmit procedure called by poll timeout 258 */ 259 void 260 transmit( 261 struct peer *peer /* peer structure pointer */ 262 ) 263 { 264 u_char hpoll; 265 266 /* 267 * The polling state machine. There are two kinds of machines, 268 * those that never expect a reply (broadcast and manycast 269 * server modes) and those that do (all other modes). The dance 270 * is intricate... 271 */ 272 hpoll = peer->hpoll; 273 274 /* 275 * In broadcast mode the poll interval is never changed from 276 * minpoll. 277 */ 278 if (peer->cast_flags & (MDF_BCAST | MDF_MCAST)) { 279 peer->outdate = current_time; 280 if (sys_leap != LEAP_NOTINSYNC) 281 peer_xmit(peer); 282 poll_update(peer, hpoll); 283 return; 284 } 285 286 /* 287 * In manycast mode we start with unity ttl. The ttl is 288 * increased by one for each poll until either sys_maxclock 289 * servers have been found or the maximum ttl is reached. When 290 * sys_maxclock servers are found we stop polling until one or 291 * more servers have timed out or until less than sys_minclock 292 * associations turn up. In this case additional better servers 293 * are dragged in and preempt the existing ones. Once every 294 * sys_beacon seconds we are to transmit unconditionally, but 295 * this code is not quite right -- peer->unreach counts polls 296 * and is being compared with sys_beacon, so the beacons happen 297 * every sys_beacon polls. 298 */ 299 if (peer->cast_flags & MDF_ACAST) { 300 peer->outdate = current_time; 301 if (peer->unreach > sys_beacon) { 302 peer->unreach = 0; 303 peer->ttl = 0; 304 peer_xmit(peer); 305 } else if ( sys_survivors < sys_minclock 306 || peer_associations < sys_maxclock) { 307 if (peer->ttl < (u_int32)sys_ttlmax) 308 peer->ttl++; 309 peer_xmit(peer); 310 } 311 peer->unreach++; 312 poll_update(peer, hpoll); 313 return; 314 } 315 316 /* 317 * Pool associations transmit unicast solicitations when there 318 * are less than a hard limit of 2 * sys_maxclock associations, 319 * and either less than sys_minclock survivors or less than 320 * sys_maxclock associations. The hard limit prevents unbounded 321 * growth in associations if the system clock or network quality 322 * result in survivor count dipping below sys_minclock often. 323 * This was observed testing with pool, where sys_maxclock == 12 324 * resulted in 60 associations without the hard limit. A 325 * similar hard limit on manycastclient ephemeral associations 326 * may be appropriate. 327 */ 328 if (peer->cast_flags & MDF_POOL) { 329 peer->outdate = current_time; 330 if ( (peer_associations <= 2 * sys_maxclock) 331 && ( peer_associations < sys_maxclock 332 || sys_survivors < sys_minclock)) 333 pool_xmit(peer); 334 poll_update(peer, hpoll); 335 return; 336 } 337 338 /* 339 * In unicast modes the dance is much more intricate. It is 340 * designed to back off whenever possible to minimize network 341 * traffic. 342 */ 343 if (peer->burst == 0) { 344 u_char oreach; 345 346 /* 347 * Update the reachability status. If not heard for 348 * three consecutive polls, stuff infinity in the clock 349 * filter. 350 */ 351 oreach = peer->reach; 352 peer->outdate = current_time; 353 peer->unreach++; 354 peer->reach <<= 1; 355 if (!peer->reach) { 356 357 /* 358 * Here the peer is unreachable. If it was 359 * previously reachable raise a trap. Send a 360 * burst if enabled. 361 */ 362 clock_filter(peer, 0., 0., MAXDISPERSE); 363 if (oreach) { 364 peer_unfit(peer); 365 report_event(PEVNT_UNREACH, peer, NULL); 366 } 367 if ( (peer->flags & FLAG_IBURST) 368 && peer->retry == 0) 369 peer->retry = NTP_RETRY; 370 } else { 371 372 /* 373 * Here the peer is reachable. Send a burst if 374 * enabled and the peer is fit. Reset unreach 375 * for persistent and ephemeral associations. 376 * Unreach is also reset for survivors in 377 * clock_select(). 378 */ 379 hpoll = sys_poll; 380 if (!(peer->flags & FLAG_PREEMPT)) 381 peer->unreach = 0; 382 if ( (peer->flags & FLAG_BURST) 383 && peer->retry == 0 384 && !peer_unfit(peer)) 385 peer->retry = NTP_RETRY; 386 } 387 388 /* 389 * Watch for timeout. If ephemeral, toss the rascal; 390 * otherwise, bump the poll interval. Note the 391 * poll_update() routine will clamp it to maxpoll. 392 * If preemptible and we have more peers than maxclock, 393 * and this peer has the minimum score of preemptibles, 394 * demobilize. 395 */ 396 if (peer->unreach >= NTP_UNREACH) { 397 hpoll++; 398 /* ephemeral: no FLAG_CONFIG nor FLAG_PREEMPT */ 399 if (!(peer->flags & (FLAG_CONFIG | FLAG_PREEMPT))) { 400 report_event(PEVNT_RESTART, peer, "timeout"); 401 peer_clear(peer, "TIME"); 402 unpeer(peer); 403 return; 404 } 405 if ( (peer->flags & FLAG_PREEMPT) 406 && (peer_associations > sys_maxclock) 407 && score_all(peer)) { 408 report_event(PEVNT_RESTART, peer, "timeout"); 409 peer_clear(peer, "TIME"); 410 unpeer(peer); 411 return; 412 } 413 } 414 } else { 415 peer->burst--; 416 if (peer->burst == 0) { 417 418 /* 419 * If ntpdate mode and the clock has not been 420 * set and all peers have completed the burst, 421 * we declare a successful failure. 422 */ 423 if (mode_ntpdate) { 424 peer_ntpdate--; 425 if (peer_ntpdate == 0) { 426 msyslog(LOG_NOTICE, 427 "ntpd: no servers found"); 428 if (!msyslog_term) 429 printf( 430 "ntpd: no servers found\n"); 431 exit (0); 432 } 433 } 434 } 435 } 436 if (peer->retry > 0) 437 peer->retry--; 438 439 /* 440 * Do not transmit if in broadcast client mode. 441 */ 442 if (peer->hmode != MODE_BCLIENT) 443 peer_xmit(peer); 444 poll_update(peer, hpoll); 445 446 return; 447 } 448 449 450 const char * 451 amtoa( 452 int am 453 ) 454 { 455 char *bp; 456 457 switch(am) { 458 case AM_ERR: return "AM_ERR"; 459 case AM_NOMATCH: return "AM_NOMATCH"; 460 case AM_PROCPKT: return "AM_PROCPKT"; 461 case AM_BCST: return "AM_BCST"; 462 case AM_FXMIT: return "AM_FXMIT"; 463 case AM_MANYCAST: return "AM_MANYCAST"; 464 case AM_NEWPASS: return "AM_NEWPASS"; 465 case AM_NEWBCL: return "AM_NEWBCL"; 466 case AM_POSSBCL: return "AM_POSSBCL"; 467 default: 468 LIB_GETBUF(bp); 469 snprintf(bp, LIB_BUFLENGTH, "AM_#%d", am); 470 return bp; 471 } 472 } 473 474 475 /* 476 * receive - receive procedure called for each packet received 477 */ 478 void 479 receive( 480 struct recvbuf *rbufp 481 ) 482 { 483 register struct peer *peer; /* peer structure pointer */ 484 register struct pkt *pkt; /* receive packet pointer */ 485 u_char hisversion; /* packet version */ 486 u_char hisleap; /* packet leap indicator */ 487 u_char hismode; /* packet mode */ 488 u_char hisstratum; /* packet stratum */ 489 u_short restrict_mask; /* restrict bits */ 490 const char *hm_str; /* hismode string */ 491 const char *am_str; /* association match string */ 492 int kissCode = NOKISS; /* Kiss Code */ 493 int has_mac; /* length of MAC field */ 494 int authlen; /* offset of MAC field */ 495 int is_authentic = 0; /* cryptosum ok */ 496 int retcode = AM_NOMATCH; /* match code */ 497 keyid_t skeyid = 0; /* key IDs */ 498 u_int32 opcode = 0; /* extension field opcode */ 499 sockaddr_u *dstadr_sin; /* active runway */ 500 struct peer *peer2; /* aux peer structure pointer */ 501 endpt *match_ep; /* newpeer() local address */ 502 l_fp p_org; /* origin timestamp */ 503 l_fp p_rec; /* receive timestamp */ 504 l_fp p_xmt; /* transmit timestamp */ 505 #ifdef AUTOKEY 506 char hostname[NTP_MAXSTRLEN + 1]; 507 char *groupname = NULL; 508 struct autokey *ap; /* autokey structure pointer */ 509 int rval; /* cookie snatcher */ 510 keyid_t pkeyid = 0, tkeyid = 0; /* key IDs */ 511 #endif /* AUTOKEY */ 512 #ifdef HAVE_NTP_SIGND 513 static unsigned char zero_key[16]; 514 #endif /* HAVE_NTP_SIGND */ 515 516 /* 517 * Monitor the packet and get restrictions. Note that the packet 518 * length for control and private mode packets must be checked 519 * by the service routines. Some restrictions have to be handled 520 * later in order to generate a kiss-o'-death packet. 521 */ 522 /* 523 * Bogus port check is before anything, since it probably 524 * reveals a clogging attack. 525 */ 526 sys_received++; 527 if (0 == SRCPORT(&rbufp->recv_srcadr)) { 528 sys_badlength++; 529 return; /* bogus port */ 530 } 531 restrict_mask = restrictions(&rbufp->recv_srcadr); 532 pkt = &rbufp->recv_pkt; 533 DPRINTF(2, ("receive: at %ld %s<-%s flags %x restrict %03x org %#010x.%08x xmt %#010x.%08x\n", 534 current_time, stoa(&rbufp->dstadr->sin), 535 stoa(&rbufp->recv_srcadr), rbufp->dstadr->flags, 536 restrict_mask, ntohl(pkt->org.l_ui), ntohl(pkt->org.l_uf), 537 ntohl(pkt->xmt.l_ui), ntohl(pkt->xmt.l_uf))); 538 hisversion = PKT_VERSION(pkt->li_vn_mode); 539 hisleap = PKT_LEAP(pkt->li_vn_mode); 540 hismode = (int)PKT_MODE(pkt->li_vn_mode); 541 hisstratum = PKT_TO_STRATUM(pkt->stratum); 542 if (restrict_mask & RES_IGNORE) { 543 sys_restricted++; 544 return; /* ignore everything */ 545 } 546 if (hismode == MODE_PRIVATE) { 547 if (!ntp_mode7 || (restrict_mask & RES_NOQUERY)) { 548 sys_restricted++; 549 return; /* no query private */ 550 } 551 process_private(rbufp, ((restrict_mask & 552 RES_NOMODIFY) == 0)); 553 return; 554 } 555 if (hismode == MODE_CONTROL) { 556 if (restrict_mask & RES_NOQUERY) { 557 sys_restricted++; 558 return; /* no query control */ 559 } 560 process_control(rbufp, restrict_mask); 561 return; 562 } 563 if (restrict_mask & RES_DONTSERVE) { 564 sys_restricted++; 565 return; /* no time serve */ 566 } 567 568 /* 569 * This is for testing. If restricted drop ten percent of 570 * surviving packets. 571 */ 572 if (restrict_mask & RES_FLAKE) { 573 if ((double)ntp_random() / 0x7fffffff < .1) { 574 sys_restricted++; 575 return; /* no flakeway */ 576 } 577 } 578 579 /* 580 * Version check must be after the query packets, since they 581 * intentionally use an early version. 582 */ 583 if (hisversion == NTP_VERSION) { 584 sys_newversion++; /* new version */ 585 } else if ( !(restrict_mask & RES_VERSION) 586 && hisversion >= NTP_OLDVERSION) { 587 sys_oldversion++; /* previous version */ 588 } else { 589 sys_badlength++; 590 return; /* old version */ 591 } 592 593 /* 594 * Figure out his mode and validate the packet. This has some 595 * legacy raunch that probably should be removed. In very early 596 * NTP versions mode 0 was equivalent to what later versions 597 * would interpret as client mode. 598 */ 599 if (hismode == MODE_UNSPEC) { 600 if (hisversion == NTP_OLDVERSION) { 601 hismode = MODE_CLIENT; 602 } else { 603 sys_badlength++; 604 return; /* invalid mode */ 605 } 606 } 607 608 /* 609 * Parse the extension field if present. We figure out whether 610 * an extension field is present by measuring the MAC size. If 611 * the number of words following the packet header is 0, no MAC 612 * is present and the packet is not authenticated. If 1, the 613 * packet is a crypto-NAK; if 3, the packet is authenticated 614 * with DES; if 5, the packet is authenticated with MD5; if 6, 615 * the packet is authenticated with SHA. If 2 or * 4, the packet 616 * is a runt and discarded forthwith. If greater than 6, an 617 * extension field is present, so we subtract the length of the 618 * field and go around again. 619 */ 620 authlen = LEN_PKT_NOMAC; 621 has_mac = rbufp->recv_length - authlen; 622 while (has_mac > 0) { 623 u_int32 len; 624 #ifdef AUTOKEY 625 u_int32 hostlen; 626 struct exten *ep; 627 #endif /*AUTOKEY */ 628 629 if (has_mac % 4 != 0 || has_mac < (int)MIN_MAC_LEN) { 630 sys_badlength++; 631 return; /* bad length */ 632 } 633 if (has_mac <= (int)MAX_MAC_LEN) { 634 skeyid = ntohl(((u_int32 *)pkt)[authlen / 4]); 635 break; 636 637 } else { 638 opcode = ntohl(((u_int32 *)pkt)[authlen / 4]); 639 len = opcode & 0xffff; 640 if ( len % 4 != 0 641 || len < 4 642 || (int)len + authlen > rbufp->recv_length) { 643 sys_badlength++; 644 return; /* bad length */ 645 } 646 #ifdef AUTOKEY 647 /* 648 * Extract calling group name for later. If 649 * sys_groupname is non-NULL, there must be 650 * a group name provided to elicit a response. 651 */ 652 if ( (opcode & 0x3fff0000) == CRYPTO_ASSOC 653 && sys_groupname != NULL) { 654 ep = (struct exten *)&((u_int32 *)pkt)[authlen / 4]; 655 hostlen = ntohl(ep->vallen); 656 if ( hostlen >= sizeof(hostname) 657 || hostlen > len - 658 offsetof(struct exten, pkt)) { 659 sys_badlength++; 660 return; /* bad length */ 661 } 662 memcpy(hostname, &ep->pkt, hostlen); 663 hostname[hostlen] = '\0'; 664 groupname = strchr(hostname, '@'); 665 if (groupname == NULL) { 666 sys_declined++; 667 return; 668 } 669 groupname++; 670 } 671 #endif /* AUTOKEY */ 672 authlen += len; 673 has_mac -= len; 674 } 675 } 676 677 /* 678 * If has_mac is < 0 we had a malformed packet. 679 */ 680 if (has_mac < 0) { 681 sys_badlength++; 682 return; /* bad length */ 683 } 684 685 /* 686 * If authentication required, a MAC must be present. 687 */ 688 if (restrict_mask & RES_DONTTRUST && has_mac == 0) { 689 sys_restricted++; 690 return; /* access denied */ 691 } 692 693 /* 694 * Update the MRU list and finger the cloggers. It can be a 695 * little expensive, so turn it off for production use. 696 * RES_LIMITED and RES_KOD will be cleared in the returned 697 * restrict_mask unless one or both actions are warranted. 698 */ 699 restrict_mask = ntp_monitor(rbufp, restrict_mask); 700 if (restrict_mask & RES_LIMITED) { 701 sys_limitrejected++; 702 if ( !(restrict_mask & RES_KOD) 703 || MODE_BROADCAST == hismode 704 || MODE_SERVER == hismode) { 705 if (MODE_SERVER == hismode) 706 DPRINTF(1, ("Possibly self-induced rate limiting of MODE_SERVER from %s\n", 707 stoa(&rbufp->recv_srcadr))); 708 return; /* rate exceeded */ 709 } 710 if (hismode == MODE_CLIENT) 711 fast_xmit(rbufp, MODE_SERVER, skeyid, 712 restrict_mask); 713 else 714 fast_xmit(rbufp, MODE_ACTIVE, skeyid, 715 restrict_mask); 716 return; /* rate exceeded */ 717 } 718 restrict_mask &= ~RES_KOD; 719 720 /* 721 * We have tossed out as many buggy packets as possible early in 722 * the game to reduce the exposure to a clogging attack. Now we 723 * have to burn some cycles to find the association and 724 * authenticate the packet if required. Note that we burn only 725 * digest cycles, again to reduce exposure. There may be no 726 * matching association and that's okay. 727 * 728 * More on the autokey mambo. Normally the local interface is 729 * found when the association was mobilized with respect to a 730 * designated remote address. We assume packets arriving from 731 * the remote address arrive via this interface and the local 732 * address used to construct the autokey is the unicast address 733 * of the interface. However, if the sender is a broadcaster, 734 * the interface broadcast address is used instead. 735 * Notwithstanding this technobabble, if the sender is a 736 * multicaster, the broadcast address is null, so we use the 737 * unicast address anyway. Don't ask. 738 */ 739 peer = findpeer(rbufp, hismode, &retcode); 740 dstadr_sin = &rbufp->dstadr->sin; 741 NTOHL_FP(&pkt->org, &p_org); 742 NTOHL_FP(&pkt->rec, &p_rec); 743 NTOHL_FP(&pkt->xmt, &p_xmt); 744 hm_str = modetoa(hismode); 745 am_str = amtoa(retcode); 746 747 /* 748 * Authentication is conditioned by three switches: 749 * 750 * NOPEER (RES_NOPEER) do not mobilize an association unless 751 * authenticated 752 * NOTRUST (RES_DONTTRUST) do not allow access unless 753 * authenticated (implies NOPEER) 754 * enable (sys_authenticate) master NOPEER switch, by default 755 * on 756 * 757 * The NOPEER and NOTRUST can be specified on a per-client basis 758 * using the restrict command. The enable switch if on implies 759 * NOPEER for all clients. There are four outcomes: 760 * 761 * NONE The packet has no MAC. 762 * OK the packet has a MAC and authentication succeeds 763 * ERROR the packet has a MAC and authentication fails 764 * CRYPTO crypto-NAK. The MAC has four octets only. 765 * 766 * Note: The AUTH(x, y) macro is used to filter outcomes. If x 767 * is zero, acceptable outcomes of y are NONE and OK. If x is 768 * one, the only acceptable outcome of y is OK. 769 */ 770 771 if (has_mac == 0) { 772 restrict_mask &= ~RES_MSSNTP; 773 is_authentic = AUTH_NONE; /* not required */ 774 DPRINTF(2, ("receive: at %ld %s<-%s mode %d/%s:%s len %d org %#010x.%08x xmt %#010x.%08x NOMAC\n", 775 current_time, stoa(dstadr_sin), 776 stoa(&rbufp->recv_srcadr), hismode, hm_str, am_str, 777 authlen, 778 ntohl(pkt->org.l_ui), ntohl(pkt->org.l_uf), 779 ntohl(pkt->xmt.l_ui), ntohl(pkt->xmt.l_uf))); 780 } else if (has_mac == 4) { 781 restrict_mask &= ~RES_MSSNTP; 782 is_authentic = AUTH_CRYPTO; /* crypto-NAK */ 783 DPRINTF(2, ("receive: at %ld %s<-%s mode %d/%s:%s keyid %08x len %d auth %d org %#010x.%08x xmt %#010x.%08x MAC4\n", 784 current_time, stoa(dstadr_sin), 785 stoa(&rbufp->recv_srcadr), hismode, hm_str, am_str, 786 skeyid, authlen + has_mac, is_authentic, 787 ntohl(pkt->org.l_ui), ntohl(pkt->org.l_uf), 788 ntohl(pkt->xmt.l_ui), ntohl(pkt->xmt.l_uf))); 789 790 #ifdef HAVE_NTP_SIGND 791 /* 792 * If the signature is 20 bytes long, the last 16 of 793 * which are zero, then this is a Microsoft client 794 * wanting AD-style authentication of the server's 795 * reply. 796 * 797 * This is described in Microsoft's WSPP docs, in MS-SNTP: 798 * http://msdn.microsoft.com/en-us/library/cc212930.aspx 799 */ 800 } else if ( has_mac == MAX_MD5_LEN 801 && (restrict_mask & RES_MSSNTP) 802 && (retcode == AM_FXMIT || retcode == AM_NEWPASS) 803 && (memcmp(zero_key, (char *)pkt + authlen + 4, 804 MAX_MD5_LEN - 4) == 0)) { 805 is_authentic = AUTH_NONE; 806 #endif /* HAVE_NTP_SIGND */ 807 808 } else { 809 restrict_mask &= ~RES_MSSNTP; 810 #ifdef AUTOKEY 811 /* 812 * For autokey modes, generate the session key 813 * and install in the key cache. Use the socket 814 * broadcast or unicast address as appropriate. 815 */ 816 if (crypto_flags && skeyid > NTP_MAXKEY) { 817 818 /* 819 * More on the autokey dance (AKD). A cookie is 820 * constructed from public and private values. 821 * For broadcast packets, the cookie is public 822 * (zero). For packets that match no 823 * association, the cookie is hashed from the 824 * addresses and private value. For server 825 * packets, the cookie was previously obtained 826 * from the server. For symmetric modes, the 827 * cookie was previously constructed using an 828 * agreement protocol; however, should PKI be 829 * unavailable, we construct a fake agreement as 830 * the EXOR of the peer and host cookies. 831 * 832 * hismode ephemeral persistent 833 * ======================================= 834 * active 0 cookie# 835 * passive 0% cookie# 836 * client sys cookie 0% 837 * server 0% sys cookie 838 * broadcast 0 0 839 * 840 * # if unsync, 0 841 * % can't happen 842 */ 843 if (has_mac < (int)MAX_MD5_LEN) { 844 sys_badauth++; 845 return; 846 } 847 if (hismode == MODE_BROADCAST) { 848 849 /* 850 * For broadcaster, use the interface 851 * broadcast address when available; 852 * otherwise, use the unicast address 853 * found when the association was 854 * mobilized. However, if this is from 855 * the wildcard interface, game over. 856 */ 857 if ( crypto_flags 858 && rbufp->dstadr == 859 ANY_INTERFACE_CHOOSE(&rbufp->recv_srcadr)) { 860 sys_restricted++; 861 return; /* no wildcard */ 862 } 863 pkeyid = 0; 864 if (!SOCK_UNSPEC(&rbufp->dstadr->bcast)) 865 dstadr_sin = 866 &rbufp->dstadr->bcast; 867 } else if (peer == NULL) { 868 pkeyid = session_key( 869 &rbufp->recv_srcadr, dstadr_sin, 0, 870 sys_private, 0); 871 } else { 872 pkeyid = peer->pcookie; 873 } 874 875 /* 876 * The session key includes both the public 877 * values and cookie. In case of an extension 878 * field, the cookie used for authentication 879 * purposes is zero. Note the hash is saved for 880 * use later in the autokey mambo. 881 */ 882 if (authlen > (int)LEN_PKT_NOMAC && pkeyid != 0) { 883 session_key(&rbufp->recv_srcadr, 884 dstadr_sin, skeyid, 0, 2); 885 tkeyid = session_key( 886 &rbufp->recv_srcadr, dstadr_sin, 887 skeyid, pkeyid, 0); 888 } else { 889 tkeyid = session_key( 890 &rbufp->recv_srcadr, dstadr_sin, 891 skeyid, pkeyid, 2); 892 } 893 894 } 895 #endif /* AUTOKEY */ 896 897 /* 898 * Compute the cryptosum. Note a clogging attack may 899 * succeed in bloating the key cache. If an autokey, 900 * purge it immediately, since we won't be needing it 901 * again. If the packet is authentic, it can mobilize an 902 * association. Note that there is no key zero. 903 */ 904 if (!authdecrypt(skeyid, (u_int32 *)pkt, authlen, 905 has_mac)) 906 is_authentic = AUTH_ERROR; 907 else 908 is_authentic = AUTH_OK; 909 #ifdef AUTOKEY 910 if (crypto_flags && skeyid > NTP_MAXKEY) 911 authtrust(skeyid, 0); 912 #endif /* AUTOKEY */ 913 DPRINTF(2, ("receive: at %ld %s<-%s mode %d/%s:%s keyid %08x len %d auth %d org %#010x.%08x xmt %#010x.%08x\n", 914 current_time, stoa(dstadr_sin), 915 stoa(&rbufp->recv_srcadr), hismode, hm_str, am_str, 916 skeyid, authlen + has_mac, is_authentic, 917 ntohl(pkt->org.l_ui), ntohl(pkt->org.l_uf), 918 ntohl(pkt->xmt.l_ui), ntohl(pkt->xmt.l_uf))); 919 } 920 921 /* 922 * The association matching rules are implemented by a set of 923 * routines and an association table. A packet matching an 924 * association is processed by the peer process for that 925 * association. If there are no errors, an ephemeral association 926 * is mobilized: a broadcast packet mobilizes a broadcast client 927 * aassociation; a manycast server packet mobilizes a manycast 928 * client association; a symmetric active packet mobilizes a 929 * symmetric passive association. 930 */ 931 switch (retcode) { 932 933 /* 934 * This is a client mode packet not matching any association. If 935 * an ordinary client, simply toss a server mode packet back 936 * over the fence. If a manycast client, we have to work a 937 * little harder. 938 */ 939 case AM_FXMIT: 940 941 /* 942 * If authentication OK, send a server reply; otherwise, 943 * send a crypto-NAK. 944 */ 945 if (!(rbufp->dstadr->flags & INT_MCASTOPEN)) { 946 if (AUTH(restrict_mask & RES_DONTTRUST, 947 is_authentic)) { 948 fast_xmit(rbufp, MODE_SERVER, skeyid, 949 restrict_mask); 950 } else if (is_authentic == AUTH_ERROR) { 951 fast_xmit(rbufp, MODE_SERVER, 0, 952 restrict_mask); 953 sys_badauth++; 954 } else { 955 sys_restricted++; 956 } 957 return; /* hooray */ 958 } 959 960 /* 961 * This must be manycast. Do not respond if not 962 * configured as a manycast server. 963 */ 964 if (!sys_manycastserver) { 965 sys_restricted++; 966 return; /* not enabled */ 967 } 968 969 #ifdef AUTOKEY 970 /* 971 * Do not respond if not the same group. 972 */ 973 if (group_test(groupname, NULL)) { 974 sys_declined++; 975 return; 976 } 977 #endif /* AUTOKEY */ 978 979 /* 980 * Do not respond if we are not synchronized or our 981 * stratum is greater than the manycaster or the 982 * manycaster has already synchronized to us. 983 */ 984 if ( sys_leap == LEAP_NOTINSYNC 985 || sys_stratum >= hisstratum 986 || (!sys_cohort && sys_stratum == hisstratum + 1) 987 || rbufp->dstadr->addr_refid == pkt->refid) { 988 sys_declined++; 989 return; /* no help */ 990 } 991 992 /* 993 * Respond only if authentication succeeds. Don't do a 994 * crypto-NAK, as that would not be useful. 995 */ 996 if (AUTH(restrict_mask & RES_DONTTRUST, is_authentic)) 997 fast_xmit(rbufp, MODE_SERVER, skeyid, 998 restrict_mask); 999 return; /* hooray */ 1000 1001 /* 1002 * This is a server mode packet returned in response to a client 1003 * mode packet sent to a multicast group address (for 1004 * manycastclient) or to a unicast address (for pool). The 1005 * origin timestamp is a good nonce to reliably associate the 1006 * reply with what was sent. If there is no match, that's 1007 * curious and could be an intruder attempting to clog, so we 1008 * just ignore it. 1009 * 1010 * If the packet is authentic and the manycastclient or pool 1011 * association is found, we mobilize a client association and 1012 * copy pertinent variables from the manycastclient or pool 1013 * association to the new client association. If not, just 1014 * ignore the packet. 1015 * 1016 * There is an implosion hazard at the manycast client, since 1017 * the manycast servers send the server packet immediately. If 1018 * the guy is already here, don't fire up a duplicate. 1019 */ 1020 case AM_MANYCAST: 1021 1022 #ifdef AUTOKEY 1023 /* 1024 * Do not respond if not the same group. 1025 */ 1026 if (group_test(groupname, NULL)) { 1027 sys_declined++; 1028 return; 1029 } 1030 #endif /* AUTOKEY */ 1031 if ((peer2 = findmanycastpeer(rbufp)) == NULL) { 1032 sys_restricted++; 1033 return; /* not enabled */ 1034 } 1035 if (!AUTH( (!(peer2->cast_flags & MDF_POOL) 1036 && sys_authenticate) 1037 || (restrict_mask & (RES_NOPEER | 1038 RES_DONTTRUST)), is_authentic)) { 1039 sys_restricted++; 1040 return; /* access denied */ 1041 } 1042 1043 /* 1044 * Do not respond if unsynchronized or stratum is below 1045 * the floor or at or above the ceiling. 1046 */ 1047 if ( hisleap == LEAP_NOTINSYNC 1048 || hisstratum < sys_floor 1049 || hisstratum >= sys_ceiling) { 1050 sys_declined++; 1051 return; /* no help */ 1052 } 1053 peer = newpeer(&rbufp->recv_srcadr, NULL, rbufp->dstadr, 1054 MODE_CLIENT, hisversion, peer2->minpoll, 1055 peer2->maxpoll, FLAG_PREEMPT | 1056 (FLAG_IBURST & peer2->flags), MDF_UCAST | 1057 MDF_UCLNT, 0, skeyid, sys_ident); 1058 if (NULL == peer) { 1059 sys_declined++; 1060 return; /* ignore duplicate */ 1061 } 1062 1063 /* 1064 * After each ephemeral pool association is spun, 1065 * accelerate the next poll for the pool solicitor so 1066 * the pool will fill promptly. 1067 */ 1068 if (peer2->cast_flags & MDF_POOL) 1069 peer2->nextdate = current_time + 1; 1070 1071 /* 1072 * Further processing of the solicitation response would 1073 * simply detect its origin timestamp as bogus for the 1074 * brand-new association (it matches the prototype 1075 * association) and tinker with peer->nextdate delaying 1076 * first sync. 1077 */ 1078 return; /* solicitation response handled */ 1079 1080 /* 1081 * This is the first packet received from a broadcast server. If 1082 * the packet is authentic and we are enabled as broadcast 1083 * client, mobilize a broadcast client association. We don't 1084 * kiss any frogs here. 1085 */ 1086 case AM_NEWBCL: 1087 1088 #ifdef AUTOKEY 1089 /* 1090 * Do not respond if not the same group. 1091 */ 1092 if (group_test(groupname, sys_ident)) { 1093 sys_declined++; 1094 return; 1095 } 1096 #endif /* AUTOKEY */ 1097 if (sys_bclient == 0) { 1098 sys_restricted++; 1099 return; /* not enabled */ 1100 } 1101 if (!AUTH(sys_authenticate | (restrict_mask & 1102 (RES_NOPEER | RES_DONTTRUST)), is_authentic)) { 1103 sys_restricted++; 1104 return; /* access denied */ 1105 } 1106 1107 /* 1108 * Do not respond if unsynchronized or stratum is below 1109 * the floor or at or above the ceiling. 1110 */ 1111 if ( hisleap == LEAP_NOTINSYNC 1112 || hisstratum < sys_floor 1113 || hisstratum >= sys_ceiling) { 1114 sys_declined++; 1115 return; /* no help */ 1116 } 1117 1118 #ifdef AUTOKEY 1119 /* 1120 * Do not respond if Autokey and the opcode is not a 1121 * CRYPTO_ASSOC response with association ID. 1122 */ 1123 if ( crypto_flags && skeyid > NTP_MAXKEY 1124 && (opcode & 0xffff0000) != (CRYPTO_ASSOC | CRYPTO_RESP)) { 1125 sys_declined++; 1126 return; /* protocol error */ 1127 } 1128 #endif /* AUTOKEY */ 1129 1130 /* 1131 * Broadcasts received via a multicast address may 1132 * arrive after a unicast volley has begun 1133 * with the same remote address. newpeer() will not 1134 * find duplicate associations on other local endpoints 1135 * if a non-NULL endpoint is supplied. multicastclient 1136 * ephemeral associations are unique across all local 1137 * endpoints. 1138 */ 1139 if (!(INT_MCASTOPEN & rbufp->dstadr->flags)) 1140 match_ep = rbufp->dstadr; 1141 else 1142 match_ep = NULL; 1143 1144 /* 1145 * Determine whether to execute the initial volley. 1146 */ 1147 if (sys_bdelay != 0) { 1148 #ifdef AUTOKEY 1149 /* 1150 * If a two-way exchange is not possible, 1151 * neither is Autokey. 1152 */ 1153 if (crypto_flags && skeyid > NTP_MAXKEY) { 1154 sys_restricted++; 1155 return; /* no autokey */ 1156 } 1157 #endif /* AUTOKEY */ 1158 1159 /* 1160 * Do not execute the volley. Start out in 1161 * broadcast client mode. 1162 */ 1163 peer = newpeer(&rbufp->recv_srcadr, NULL, 1164 match_ep, MODE_BCLIENT, hisversion, 1165 pkt->ppoll, pkt->ppoll, FLAG_PREEMPT, 1166 MDF_BCLNT, 0, skeyid, sys_ident); 1167 if (NULL == peer) { 1168 sys_restricted++; 1169 return; /* ignore duplicate */ 1170 1171 } else { 1172 peer->delay = sys_bdelay; 1173 peer->bxmt = p_xmt; 1174 } 1175 break; 1176 } 1177 1178 /* 1179 * Execute the initial volley in order to calibrate the 1180 * propagation delay and run the Autokey protocol. 1181 * 1182 * Note that the minpoll is taken from the broadcast 1183 * packet, normally 6 (64 s) and that the poll interval 1184 * is fixed at this value. 1185 */ 1186 peer = newpeer(&rbufp->recv_srcadr, NULL, match_ep, 1187 MODE_CLIENT, hisversion, pkt->ppoll, pkt->ppoll, 1188 FLAG_BC_VOL | FLAG_IBURST | FLAG_PREEMPT, MDF_BCLNT, 1189 0, skeyid, sys_ident); 1190 if (NULL == peer) { 1191 sys_restricted++; 1192 return; /* ignore duplicate */ 1193 } 1194 peer->bxmt = p_xmt; 1195 #ifdef AUTOKEY 1196 if (skeyid > NTP_MAXKEY) 1197 crypto_recv(peer, rbufp); 1198 #endif /* AUTOKEY */ 1199 1200 return; /* hooray */ 1201 1202 /* 1203 * This is the first packet received from a symmetric active 1204 * peer. If the packet is authentic and the first he sent, 1205 * mobilize a passive association. If not, kiss the frog. 1206 */ 1207 case AM_NEWPASS: 1208 1209 #ifdef AUTOKEY 1210 /* 1211 * Do not respond if not the same group. 1212 */ 1213 if (group_test(groupname, sys_ident)) { 1214 sys_declined++; 1215 return; 1216 } 1217 #endif /* AUTOKEY */ 1218 if (!AUTH(sys_authenticate | (restrict_mask & 1219 (RES_NOPEER | RES_DONTTRUST)), is_authentic)) { 1220 1221 /* 1222 * If authenticated but cannot mobilize an 1223 * association, send a symmetric passive 1224 * response without mobilizing an association. 1225 * This is for drat broken Windows clients. See 1226 * Microsoft KB 875424 for preferred workaround. 1227 */ 1228 if (AUTH(restrict_mask & RES_DONTTRUST, 1229 is_authentic)) { 1230 fast_xmit(rbufp, MODE_PASSIVE, skeyid, 1231 restrict_mask); 1232 return; /* hooray */ 1233 } 1234 if (is_authentic == AUTH_ERROR) { 1235 fast_xmit(rbufp, MODE_ACTIVE, 0, 1236 restrict_mask); 1237 sys_restricted++; 1238 return; 1239 } 1240 /* [Bug 2941] 1241 * If we got here, the packet isn't part of an 1242 * existing association, it isn't correctly 1243 * authenticated, and it didn't meet either of 1244 * the previous two special cases so we should 1245 * just drop it on the floor. For example, 1246 * crypto-NAKs (is_authentic == AUTH_CRYPTO) 1247 * will make it this far. This is just 1248 * debug-printed and not logged to avoid log 1249 * flooding. 1250 */ 1251 DPRINTF(2, ("receive: at %ld refusing to mobilize passive association" 1252 " with unknown peer %s mode %d/%s:%s keyid %08x len %d auth %d\n", 1253 current_time, stoa(&rbufp->recv_srcadr), 1254 hismode, hm_str, am_str, skeyid, 1255 (authlen + has_mac), is_authentic)); 1256 sys_declined++; 1257 return; 1258 } 1259 1260 /* 1261 * Do not respond if synchronized and if stratum is 1262 * below the floor or at or above the ceiling. Note, 1263 * this allows an unsynchronized peer to synchronize to 1264 * us. It would be very strange if he did and then was 1265 * nipped, but that could only happen if we were 1266 * operating at the top end of the range. It also means 1267 * we will spin an ephemeral association in response to 1268 * MODE_ACTIVE KoDs, which will time out eventually. 1269 */ 1270 if ( hisleap != LEAP_NOTINSYNC 1271 && (hisstratum < sys_floor || hisstratum >= sys_ceiling)) { 1272 sys_declined++; 1273 return; /* no help */ 1274 } 1275 1276 /* 1277 * The message is correctly authenticated and allowed. 1278 * Mobilize a symmetric passive association. 1279 */ 1280 if ((peer = newpeer(&rbufp->recv_srcadr, NULL, 1281 rbufp->dstadr, MODE_PASSIVE, hisversion, pkt->ppoll, 1282 NTP_MAXDPOLL, 0, MDF_UCAST, 0, skeyid, 1283 sys_ident)) == NULL) { 1284 sys_declined++; 1285 return; /* ignore duplicate */ 1286 } 1287 break; 1288 1289 1290 /* 1291 * Process regular packet. Nothing special. 1292 */ 1293 case AM_PROCPKT: 1294 1295 #ifdef AUTOKEY 1296 /* 1297 * Do not respond if not the same group. 1298 */ 1299 if (group_test(groupname, peer->ident)) { 1300 sys_declined++; 1301 return; 1302 } 1303 #endif /* AUTOKEY */ 1304 1305 if (MODE_BROADCAST == hismode) { 1306 u_char poll; 1307 int bail = 0; 1308 l_fp tdiff; 1309 1310 DPRINTF(2, ("receive: PROCPKT/BROADCAST: prev pkt %ld seconds ago, ppoll: %d, %d secs\n", 1311 (current_time - peer->timelastrec), 1312 peer->ppoll, (1 << peer->ppoll) 1313 )); 1314 /* Things we can check: 1315 * 1316 * Did the poll interval change? 1317 * Is the poll interval in the packet in-range? 1318 * Did this packet arrive too soon? 1319 * Is the timestamp in this packet monotonic 1320 * with respect to the previous packet? 1321 */ 1322 1323 /* This is noteworthy, not error-worthy */ 1324 if (pkt->ppoll != peer->ppoll) { 1325 msyslog(LOG_INFO, "receive: broadcast poll from %s changed from %ud to %ud", 1326 stoa(&rbufp->recv_srcadr), 1327 peer->ppoll, pkt->ppoll); 1328 } 1329 1330 poll = min(peer->maxpoll, 1331 max(peer->minpoll, pkt->ppoll)); 1332 1333 /* This is error-worthy */ 1334 if (pkt->ppoll != poll) { 1335 msyslog(LOG_INFO, "receive: broadcast poll of %ud from %s is out-of-range (%d to %d)!", 1336 pkt->ppoll, stoa(&rbufp->recv_srcadr), 1337 peer->minpoll, peer->maxpoll); 1338 ++bail; 1339 } 1340 1341 if ( (current_time - peer->timelastrec) 1342 < (1 << pkt->ppoll)) { 1343 msyslog(LOG_INFO, "receive: broadcast packet from %s arrived after %ld, not %d seconds!", 1344 stoa(&rbufp->recv_srcadr), 1345 (current_time - peer->timelastrec), 1346 (1 << pkt->ppoll) 1347 ); 1348 ++bail; 1349 } 1350 1351 tdiff = p_xmt; 1352 L_SUB(&tdiff, &peer->bxmt); 1353 if (tdiff.l_i < 0) { 1354 msyslog(LOG_INFO, "receive: broadcast packet from %s contains non-monotonic timestamp: %#010x.%08x -> %#010x.%08x", 1355 stoa(&rbufp->recv_srcadr), 1356 peer->bxmt.l_ui, peer->bxmt.l_uf, 1357 p_xmt.l_ui, p_xmt.l_uf 1358 ); 1359 ++bail; 1360 } 1361 1362 peer->bxmt = p_xmt; 1363 1364 if (bail) { 1365 peer->timelastrec = current_time; 1366 sys_declined++; 1367 return; 1368 } 1369 } 1370 1371 break; 1372 1373 /* 1374 * A passive packet matches a passive association. This is 1375 * usually the result of reconfiguring a client on the fly. As 1376 * this association might be legitimate and this packet an 1377 * attempt to deny service, just ignore it. 1378 */ 1379 case AM_ERR: 1380 sys_declined++; 1381 return; 1382 1383 /* 1384 * For everything else there is the bit bucket. 1385 */ 1386 default: 1387 sys_declined++; 1388 return; 1389 } 1390 1391 #ifdef AUTOKEY 1392 /* 1393 * If the association is configured for Autokey, the packet must 1394 * have a public key ID; if not, the packet must have a 1395 * symmetric key ID. 1396 */ 1397 if ( is_authentic != AUTH_CRYPTO 1398 && ( ((peer->flags & FLAG_SKEY) && skeyid <= NTP_MAXKEY) 1399 || (!(peer->flags & FLAG_SKEY) && skeyid > NTP_MAXKEY))) { 1400 sys_badauth++; 1401 return; 1402 } 1403 #endif /* AUTOKEY */ 1404 peer->received++; 1405 peer->flash &= ~PKT_TEST_MASK; 1406 if (peer->flags & FLAG_XBOGUS) { 1407 peer->flags &= ~FLAG_XBOGUS; 1408 peer->flash |= TEST3; 1409 } 1410 1411 /* 1412 * Next comes a rigorous schedule of timestamp checking. If the 1413 * transmit timestamp is zero, the server has not initialized in 1414 * interleaved modes or is horribly broken. 1415 */ 1416 if (L_ISZERO(&p_xmt)) { 1417 peer->flash |= TEST3; /* unsynch */ 1418 1419 /* 1420 * If the transmit timestamp duplicates a previous one, the 1421 * packet is a replay. This prevents the bad guys from replaying 1422 * the most recent packet, authenticated or not. 1423 */ 1424 } else if (L_ISEQU(&peer->xmt, &p_xmt)) { 1425 peer->flash |= TEST1; /* duplicate */ 1426 peer->oldpkt++; 1427 return; 1428 1429 /* 1430 * If this is a broadcast mode packet, skip further checking. If 1431 * an initial volley, bail out now and let the client do its 1432 * stuff. If the origin timestamp is nonzero, this is an 1433 * interleaved broadcast. so restart the protocol. 1434 */ 1435 } else if (hismode == MODE_BROADCAST) { 1436 if (!L_ISZERO(&p_org) && !(peer->flags & FLAG_XB)) { 1437 peer->flags |= FLAG_XB; 1438 peer->aorg = p_xmt; 1439 peer->borg = rbufp->recv_time; 1440 report_event(PEVNT_XLEAVE, peer, NULL); 1441 return; 1442 } 1443 1444 /* 1445 * Basic mode checks: 1446 * 1447 * If there is no origin timestamp, it's either an initial packet 1448 * or we've already received a response to our query. Of course, 1449 * should 'aorg' be all-zero because this really was the original 1450 * transmit timestamp, we'll drop the reply. There is a window of 1451 * one nanosecond once every 136 years' time where this is possible. 1452 * We currently ignore this situation. 1453 * 1454 * Otherwise, check for bogus packet in basic mode. 1455 * If it is bogus, switch to interleaved mode and resynchronize, 1456 * but only after confirming the packet is not bogus in 1457 * symmetric interleaved mode. 1458 * 1459 * This could also mean somebody is forging packets claiming to 1460 * be from us, attempting to cause our server to KoD us. 1461 */ 1462 } else if (peer->flip == 0) { 1463 if (0 < hisstratum && L_ISZERO(&p_org)) { 1464 L_CLR(&peer->aorg); 1465 } else if ( L_ISZERO(&peer->aorg) 1466 || !L_ISEQU(&p_org, &peer->aorg)) { 1467 peer->bogusorg++; 1468 peer->flash |= TEST2; /* bogus */ 1469 msyslog(LOG_INFO, 1470 "receive: Unexpected origin timestamp %#010x.%08x from %s xmt %#010x.%08x", 1471 ntohl(pkt->org.l_ui), ntohl(pkt->org.l_uf), 1472 ntoa(&peer->srcadr), 1473 ntohl(pkt->xmt.l_ui), ntohl(pkt->xmt.l_uf)); 1474 if ( !L_ISZERO(&peer->dst) 1475 && L_ISEQU(&p_org, &peer->dst)) { 1476 /* Might be the start of an interleave */ 1477 peer->flip = 1; 1478 report_event(PEVNT_XLEAVE, peer, NULL); 1479 } 1480 return; /* Bogus or possible interleave packet */ 1481 } else { 1482 L_CLR(&peer->aorg); 1483 } 1484 1485 /* 1486 * Check for valid nonzero timestamp fields. 1487 */ 1488 } else if (L_ISZERO(&p_org) || L_ISZERO(&p_rec) || 1489 L_ISZERO(&peer->dst)) { 1490 peer->flash |= TEST3; /* unsynch */ 1491 1492 /* 1493 * Check for bogus packet in interleaved symmetric mode. This 1494 * can happen if a packet is lost, duplicated or crossed. If 1495 * found, flip and resynchronize. 1496 */ 1497 } else if ( !L_ISZERO(&peer->dst) 1498 && !L_ISEQU(&p_org, &peer->dst)) { 1499 peer->bogusorg++; 1500 peer->flags |= FLAG_XBOGUS; 1501 peer->flash |= TEST2; /* bogus */ 1502 return; /* Bogus packet, we are done */ 1503 } 1504 1505 /* 1506 * If this is a crypto_NAK, the server cannot authenticate a 1507 * client packet. The server might have just changed keys. Clear 1508 * the association and restart the protocol. 1509 */ 1510 if (is_authentic == AUTH_CRYPTO) { 1511 report_event(PEVNT_AUTH, peer, "crypto_NAK"); 1512 peer->flash |= TEST5; /* bad auth */ 1513 peer->badauth++; 1514 if (peer->flags & FLAG_PREEMPT) { 1515 if (unpeer_crypto_nak_early) { 1516 unpeer(peer); 1517 } 1518 return; 1519 } 1520 #ifdef AUTOKEY 1521 if (peer->crypto) 1522 peer_clear(peer, "AUTH"); 1523 #endif /* AUTOKEY */ 1524 return; 1525 1526 /* 1527 * If the digest fails or it's missing for authenticated 1528 * associations, the client cannot authenticate a server 1529 * reply to a client packet previously sent. The loopback check 1530 * is designed to avoid a bait-and-switch attack, which was 1531 * possible in past versions. If symmetric modes, return a 1532 * crypto-NAK. The peer should restart the protocol. 1533 */ 1534 } else if (!AUTH(peer->keyid || has_mac || 1535 (restrict_mask & RES_DONTTRUST), is_authentic)) { 1536 report_event(PEVNT_AUTH, peer, "digest"); 1537 peer->flash |= TEST5; /* bad auth */ 1538 peer->badauth++; 1539 if ( has_mac 1540 && (hismode == MODE_ACTIVE || hismode == MODE_PASSIVE)) 1541 fast_xmit(rbufp, MODE_ACTIVE, 0, restrict_mask); 1542 if (peer->flags & FLAG_PREEMPT) { 1543 if (unpeer_digest_early) { 1544 unpeer(peer); 1545 } 1546 return; 1547 } 1548 #ifdef AUTOKEY 1549 if (peer->crypto) 1550 peer_clear(peer, "AUTH"); 1551 #endif /* AUTOKEY */ 1552 return; 1553 } 1554 1555 /* 1556 * Update the state variables. 1557 */ 1558 if (peer->flip == 0) { 1559 if (hismode != MODE_BROADCAST) 1560 peer->rec = p_xmt; 1561 peer->dst = rbufp->recv_time; 1562 } 1563 peer->xmt = p_xmt; 1564 1565 /* 1566 * Set the peer ppoll to the maximum of the packet ppoll and the 1567 * peer minpoll. If a kiss-o'-death, set the peer minpoll to 1568 * this maximum and advance the headway to give the sender some 1569 * headroom. Very intricate. 1570 */ 1571 1572 /* 1573 * Check for any kiss codes. Note this is only used when a server 1574 * responds to a packet request 1575 */ 1576 1577 kissCode = kiss_code_check(hisleap, hisstratum, hismode, pkt->refid); 1578 1579 /* 1580 * Check to see if this is a RATE Kiss Code 1581 * Currently this kiss code will accept whatever poll 1582 * rate that the server sends 1583 */ 1584 peer->ppoll = max(peer->minpoll, pkt->ppoll); 1585 if (kissCode == RATEKISS) { 1586 peer->selbroken++; /* Increment the KoD count */ 1587 report_event(PEVNT_RATE, peer, NULL); 1588 if (pkt->ppoll > peer->minpoll) 1589 peer->minpoll = peer->ppoll; 1590 peer->burst = peer->retry = 0; 1591 peer->throttle = (NTP_SHIFT + 1) * (1 << peer->minpoll); 1592 poll_update(peer, pkt->ppoll); 1593 return; /* kiss-o'-death */ 1594 } 1595 if (kissCode != NOKISS) { 1596 peer->selbroken++; /* Increment the KoD count */ 1597 return; /* Drop any other kiss code packets */ 1598 } 1599 1600 /* 1601 * If: 1602 * - this is a *cast (uni-, broad-, or m-) server packet 1603 * - and it's authenticated 1604 * then see if the sender's IP is trusted for this keyid. 1605 * If it is, great - nothing special to do here. 1606 * Otherwise, we should report and bail. 1607 */ 1608 1609 switch (hismode) { 1610 case MODE_SERVER: /* server mode */ 1611 case MODE_BROADCAST: /* broadcast mode */ 1612 case MODE_ACTIVE: /* symmetric active mode */ 1613 if ( is_authentic == AUTH_OK 1614 && !authistrustedip(skeyid, &peer->srcadr)) { 1615 report_event(PEVNT_AUTH, peer, "authIP"); 1616 peer->badauth++; 1617 return; 1618 } 1619 break; 1620 1621 case MODE_UNSPEC: /* unspecified (old version) */ 1622 case MODE_PASSIVE: /* symmetric passive mode */ 1623 case MODE_CLIENT: /* client mode */ 1624 #if 0 /* At this point, MODE_CONTROL is overloaded by MODE_BCLIENT */ 1625 case MODE_CONTROL: /* control mode */ 1626 #endif 1627 case MODE_PRIVATE: /* private mode */ 1628 case MODE_BCLIENT: /* broadcast client mode */ 1629 break; 1630 default: 1631 break; 1632 } 1633 1634 1635 /* 1636 * That was hard and I am sweaty, but the packet is squeaky 1637 * clean. Get on with real work. 1638 */ 1639 peer->timereceived = current_time; 1640 peer->timelastrec = current_time; 1641 if (is_authentic == AUTH_OK) 1642 peer->flags |= FLAG_AUTHENTIC; 1643 else 1644 peer->flags &= ~FLAG_AUTHENTIC; 1645 1646 #ifdef AUTOKEY 1647 /* 1648 * More autokey dance. The rules of the cha-cha are as follows: 1649 * 1650 * 1. If there is no key or the key is not auto, do nothing. 1651 * 1652 * 2. If this packet is in response to the one just previously 1653 * sent or from a broadcast server, do the extension fields. 1654 * Otherwise, assume bogosity and bail out. 1655 * 1656 * 3. If an extension field contains a verified signature, it is 1657 * self-authenticated and we sit the dance. 1658 * 1659 * 4. If this is a server reply, check only to see that the 1660 * transmitted key ID matches the received key ID. 1661 * 1662 * 5. Check to see that one or more hashes of the current key ID 1663 * matches the previous key ID or ultimate original key ID 1664 * obtained from the broadcaster or symmetric peer. If no 1665 * match, sit the dance and call for new autokey values. 1666 * 1667 * In case of crypto error, fire the orchestra, stop dancing and 1668 * restart the protocol. 1669 */ 1670 if (peer->flags & FLAG_SKEY) { 1671 /* 1672 * Decrement remaining autokey hashes. This isn't 1673 * perfect if a packet is lost, but results in no harm. 1674 */ 1675 ap = (struct autokey *)peer->recval.ptr; 1676 if (ap != NULL) { 1677 if (ap->seq > 0) 1678 ap->seq--; 1679 } 1680 peer->flash |= TEST8; 1681 rval = crypto_recv(peer, rbufp); 1682 if (rval == XEVNT_OK) { 1683 peer->unreach = 0; 1684 } else { 1685 if (rval == XEVNT_ERR) { 1686 report_event(PEVNT_RESTART, peer, 1687 "crypto error"); 1688 peer_clear(peer, "CRYP"); 1689 peer->flash |= TEST9; /* bad crypt */ 1690 if (peer->flags & FLAG_PREEMPT) { 1691 if (unpeer_crypto_early) { 1692 unpeer(peer); 1693 } 1694 } 1695 } 1696 return; 1697 } 1698 1699 /* 1700 * If server mode, verify the receive key ID matches 1701 * the transmit key ID. 1702 */ 1703 if (hismode == MODE_SERVER) { 1704 if (skeyid == peer->keyid) 1705 peer->flash &= ~TEST8; 1706 1707 /* 1708 * If an extension field is present, verify only that it 1709 * has been correctly signed. We don't need a sequence 1710 * check here, but the sequence continues. 1711 */ 1712 } else if (!(peer->flash & TEST8)) { 1713 peer->pkeyid = skeyid; 1714 1715 /* 1716 * Now the fun part. Here, skeyid is the current ID in 1717 * the packet, pkeyid is the ID in the last packet and 1718 * tkeyid is the hash of skeyid. If the autokey values 1719 * have not been received, this is an automatic error. 1720 * If so, check that the tkeyid matches pkeyid. If not, 1721 * hash tkeyid and try again. If the number of hashes 1722 * exceeds the number remaining in the sequence, declare 1723 * a successful failure and refresh the autokey values. 1724 */ 1725 } else if (ap != NULL) { 1726 int i; 1727 1728 for (i = 0; ; i++) { 1729 if ( tkeyid == peer->pkeyid 1730 || tkeyid == ap->key) { 1731 peer->flash &= ~TEST8; 1732 peer->pkeyid = skeyid; 1733 ap->seq -= i; 1734 break; 1735 } 1736 if (i > ap->seq) { 1737 peer->crypto &= 1738 ~CRYPTO_FLAG_AUTO; 1739 break; 1740 } 1741 tkeyid = session_key( 1742 &rbufp->recv_srcadr, dstadr_sin, 1743 tkeyid, pkeyid, 0); 1744 } 1745 if (peer->flash & TEST8) 1746 report_event(PEVNT_AUTH, peer, "keylist"); 1747 } 1748 if (!(peer->crypto & CRYPTO_FLAG_PROV)) /* test 9 */ 1749 peer->flash |= TEST8; /* bad autokey */ 1750 1751 /* 1752 * The maximum lifetime of the protocol is about one 1753 * week before restarting the Autokey protocol to 1754 * refresh certificates and leapseconds values. 1755 */ 1756 if (current_time > peer->refresh) { 1757 report_event(PEVNT_RESTART, peer, 1758 "crypto refresh"); 1759 peer_clear(peer, "TIME"); 1760 return; 1761 } 1762 } 1763 #endif /* AUTOKEY */ 1764 1765 /* 1766 * The dance is complete and the flash bits have been lit. Toss 1767 * the packet over the fence for processing, which may light up 1768 * more flashers. 1769 */ 1770 process_packet(peer, pkt, rbufp->recv_length); 1771 1772 /* 1773 * In interleaved mode update the state variables. Also adjust the 1774 * transmit phase to avoid crossover. 1775 */ 1776 if (peer->flip != 0) { 1777 peer->rec = p_rec; 1778 peer->dst = rbufp->recv_time; 1779 if (peer->nextdate - current_time < (1U << min(peer->ppoll, 1780 peer->hpoll)) / 2) 1781 peer->nextdate++; 1782 else 1783 peer->nextdate--; 1784 } 1785 } 1786 1787 1788 /* 1789 * process_packet - Packet Procedure, a la Section 3.4.4 of the 1790 * specification. Or almost, at least. If we're in here we have a 1791 * reasonable expectation that we will be having a long term 1792 * relationship with this host. 1793 */ 1794 void 1795 process_packet( 1796 register struct peer *peer, 1797 register struct pkt *pkt, 1798 u_int len 1799 ) 1800 { 1801 double t34, t21; 1802 double p_offset, p_del, p_disp; 1803 l_fp p_rec, p_xmt, p_org, p_reftime, ci; 1804 u_char pmode, pleap, pversion, pstratum; 1805 char statstr[NTP_MAXSTRLEN]; 1806 #ifdef ASSYM 1807 int itemp; 1808 double etemp, ftemp, td; 1809 #endif /* ASSYM */ 1810 1811 sys_processed++; 1812 peer->processed++; 1813 p_del = FPTOD(NTOHS_FP(pkt->rootdelay)); 1814 p_offset = 0; 1815 p_disp = FPTOD(NTOHS_FP(pkt->rootdisp)); 1816 NTOHL_FP(&pkt->reftime, &p_reftime); 1817 NTOHL_FP(&pkt->org, &p_org); 1818 NTOHL_FP(&pkt->rec, &p_rec); 1819 NTOHL_FP(&pkt->xmt, &p_xmt); 1820 pmode = PKT_MODE(pkt->li_vn_mode); 1821 pleap = PKT_LEAP(pkt->li_vn_mode); 1822 pversion = PKT_VERSION(pkt->li_vn_mode); 1823 pstratum = PKT_TO_STRATUM(pkt->stratum); 1824 1825 /* 1826 * Capture the header values in the client/peer association.. 1827 */ 1828 record_raw_stats(&peer->srcadr, peer->dstadr ? 1829 &peer->dstadr->sin : NULL, 1830 &p_org, &p_rec, &p_xmt, &peer->dst, 1831 pleap, pversion, pmode, pstratum, pkt->ppoll, pkt->precision, 1832 p_del, p_disp, pkt->refid); 1833 peer->leap = pleap; 1834 peer->stratum = min(pstratum, STRATUM_UNSPEC); 1835 peer->pmode = pmode; 1836 peer->precision = pkt->precision; 1837 peer->rootdelay = p_del; 1838 peer->rootdisp = p_disp; 1839 peer->refid = pkt->refid; /* network byte order */ 1840 peer->reftime = p_reftime; 1841 1842 /* 1843 * First, if either burst mode is armed, enable the burst. 1844 * Compute the headway for the next packet and delay if 1845 * necessary to avoid exceeding the threshold. 1846 */ 1847 if (peer->retry > 0) { 1848 peer->retry = 0; 1849 if (peer->reach) 1850 peer->burst = min(1 << (peer->hpoll - 1851 peer->minpoll), NTP_SHIFT) - 1; 1852 else 1853 peer->burst = NTP_IBURST - 1; 1854 if (peer->burst > 0) 1855 peer->nextdate = current_time; 1856 } 1857 poll_update(peer, peer->hpoll); 1858 1859 /* 1860 * Verify the server is synchronized; that is, the leap bits, 1861 * stratum and root distance are valid. 1862 */ 1863 if ( pleap == LEAP_NOTINSYNC /* test 6 */ 1864 || pstratum < sys_floor || pstratum >= sys_ceiling) 1865 peer->flash |= TEST6; /* bad synch or strat */ 1866 if (p_del / 2 + p_disp >= MAXDISPERSE) /* test 7 */ 1867 peer->flash |= TEST7; /* bad header */ 1868 1869 /* 1870 * If any tests fail at this point, the packet is discarded. 1871 * Note that some flashers may have already been set in the 1872 * receive() routine. 1873 */ 1874 if (peer->flash & PKT_TEST_MASK) { 1875 peer->seldisptoolarge++; 1876 DPRINTF(1, ("packet: flash header %04x\n", 1877 peer->flash)); 1878 return; 1879 } 1880 1881 /* 1882 * If the peer was previously unreachable, raise a trap. In any 1883 * case, mark it reachable. 1884 */ 1885 if (!peer->reach) { 1886 report_event(PEVNT_REACH, peer, NULL); 1887 peer->timereachable = current_time; 1888 } 1889 peer->reach |= 1; 1890 1891 /* 1892 * For a client/server association, calculate the clock offset, 1893 * roundtrip delay and dispersion. The equations are reordered 1894 * from the spec for more efficient use of temporaries. For a 1895 * broadcast association, offset the last measurement by the 1896 * computed delay during the client/server volley. Note the 1897 * computation of dispersion includes the system precision plus 1898 * that due to the frequency error since the origin time. 1899 * 1900 * It is very important to respect the hazards of overflow. The 1901 * only permitted operation on raw timestamps is subtraction, 1902 * where the result is a signed quantity spanning from 68 years 1903 * in the past to 68 years in the future. To avoid loss of 1904 * precision, these calculations are done using 64-bit integer 1905 * arithmetic. However, the offset and delay calculations are 1906 * sums and differences of these first-order differences, which 1907 * if done using 64-bit integer arithmetic, would be valid over 1908 * only half that span. Since the typical first-order 1909 * differences are usually very small, they are converted to 64- 1910 * bit doubles and all remaining calculations done in floating- 1911 * double arithmetic. This preserves the accuracy while 1912 * retaining the 68-year span. 1913 * 1914 * There are three interleaving schemes, basic, interleaved 1915 * symmetric and interleaved broadcast. The timestamps are 1916 * idioscyncratically different. See the onwire briefing/white 1917 * paper at www.eecis.udel.edu/~mills for details. 1918 * 1919 * Interleaved symmetric mode 1920 * t1 = peer->aorg/borg, t2 = peer->rec, t3 = p_xmt, 1921 * t4 = peer->dst 1922 */ 1923 if (peer->flip != 0) { 1924 ci = p_xmt; /* t3 - t4 */ 1925 L_SUB(&ci, &peer->dst); 1926 LFPTOD(&ci, t34); 1927 ci = p_rec; /* t2 - t1 */ 1928 if (peer->flip > 0) 1929 L_SUB(&ci, &peer->borg); 1930 else 1931 L_SUB(&ci, &peer->aorg); 1932 LFPTOD(&ci, t21); 1933 p_del = t21 - t34; 1934 p_offset = (t21 + t34) / 2.; 1935 if (p_del < 0 || p_del > 1.) { 1936 snprintf(statstr, sizeof(statstr), 1937 "t21 %.6f t34 %.6f", t21, t34); 1938 report_event(PEVNT_XERR, peer, statstr); 1939 return; 1940 } 1941 1942 /* 1943 * Broadcast modes 1944 */ 1945 } else if (peer->pmode == MODE_BROADCAST) { 1946 1947 /* 1948 * Interleaved broadcast mode. Use interleaved timestamps. 1949 * t1 = peer->borg, t2 = p_org, t3 = p_org, t4 = aorg 1950 */ 1951 if (peer->flags & FLAG_XB) { 1952 ci = p_org; /* delay */ 1953 L_SUB(&ci, &peer->aorg); 1954 LFPTOD(&ci, t34); 1955 ci = p_org; /* t2 - t1 */ 1956 L_SUB(&ci, &peer->borg); 1957 LFPTOD(&ci, t21); 1958 peer->aorg = p_xmt; 1959 peer->borg = peer->dst; 1960 if (t34 < 0 || t34 > 1.) { 1961 snprintf(statstr, sizeof(statstr), 1962 "offset %.6f delay %.6f", t21, t34); 1963 report_event(PEVNT_XERR, peer, statstr); 1964 return; 1965 } 1966 p_offset = t21; 1967 peer->xleave = t34; 1968 1969 /* 1970 * Basic broadcast - use direct timestamps. 1971 * t3 = p_xmt, t4 = peer->dst 1972 */ 1973 } else { 1974 ci = p_xmt; /* t3 - t4 */ 1975 L_SUB(&ci, &peer->dst); 1976 LFPTOD(&ci, t34); 1977 p_offset = t34; 1978 } 1979 1980 /* 1981 * When calibration is complete and the clock is 1982 * synchronized, the bias is calculated as the difference 1983 * between the unicast timestamp and the broadcast 1984 * timestamp. This works for both basic and interleaved 1985 * modes. 1986 */ 1987 if (FLAG_BC_VOL & peer->flags) { 1988 peer->flags &= ~FLAG_BC_VOL; 1989 peer->delay = fabs(peer->offset - p_offset) * 2; 1990 } 1991 p_del = peer->delay; 1992 p_offset += p_del / 2; 1993 1994 1995 /* 1996 * Basic mode, otherwise known as the old fashioned way. 1997 * 1998 * t1 = p_org, t2 = p_rec, t3 = p_xmt, t4 = peer->dst 1999 */ 2000 } else { 2001 ci = p_xmt; /* t3 - t4 */ 2002 L_SUB(&ci, &peer->dst); 2003 LFPTOD(&ci, t34); 2004 ci = p_rec; /* t2 - t1 */ 2005 L_SUB(&ci, &p_org); 2006 LFPTOD(&ci, t21); 2007 p_del = fabs(t21 - t34); 2008 p_offset = (t21 + t34) / 2.; 2009 } 2010 p_del = max(p_del, LOGTOD(sys_precision)); 2011 p_disp = LOGTOD(sys_precision) + LOGTOD(peer->precision) + 2012 clock_phi * p_del; 2013 2014 #if ASSYM 2015 /* 2016 * This code calculates the outbound and inbound data rates by 2017 * measuring the differences between timestamps at different 2018 * packet lengths. This is helpful in cases of large asymmetric 2019 * delays commonly experienced on deep space communication 2020 * links. 2021 */ 2022 if (peer->t21_last > 0 && peer->t34_bytes > 0) { 2023 itemp = peer->t21_bytes - peer->t21_last; 2024 if (itemp > 25) { 2025 etemp = t21 - peer->t21; 2026 if (fabs(etemp) > 1e-6) { 2027 ftemp = itemp / etemp; 2028 if (ftemp > 1000.) 2029 peer->r21 = ftemp; 2030 } 2031 } 2032 itemp = len - peer->t34_bytes; 2033 if (itemp > 25) { 2034 etemp = -t34 - peer->t34; 2035 if (fabs(etemp) > 1e-6) { 2036 ftemp = itemp / etemp; 2037 if (ftemp > 1000.) 2038 peer->r34 = ftemp; 2039 } 2040 } 2041 } 2042 2043 /* 2044 * The following section compensates for different data rates on 2045 * the outbound (d21) and inbound (t34) directions. To do this, 2046 * it finds t such that r21 * t - r34 * (d - t) = 0, where d is 2047 * the roundtrip delay. Then it calculates the correction as a 2048 * fraction of d. 2049 */ 2050 peer->t21 = t21; 2051 peer->t21_last = peer->t21_bytes; 2052 peer->t34 = -t34; 2053 peer->t34_bytes = len; 2054 DPRINTF(2, ("packet: t21 %.9lf %d t34 %.9lf %d\n", peer->t21, 2055 peer->t21_bytes, peer->t34, peer->t34_bytes)); 2056 if (peer->r21 > 0 && peer->r34 > 0 && p_del > 0) { 2057 if (peer->pmode != MODE_BROADCAST) 2058 td = (peer->r34 / (peer->r21 + peer->r34) - 2059 .5) * p_del; 2060 else 2061 td = 0; 2062 2063 /* 2064 * Unfortunately, in many cases the errors are 2065 * unacceptable, so for the present the rates are not 2066 * used. In future, we might find conditions where the 2067 * calculations are useful, so this should be considered 2068 * a work in progress. 2069 */ 2070 t21 -= td; 2071 t34 -= td; 2072 DPRINTF(2, ("packet: del %.6lf r21 %.1lf r34 %.1lf %.6lf\n", 2073 p_del, peer->r21 / 1e3, peer->r34 / 1e3, 2074 td)); 2075 } 2076 #endif /* ASSYM */ 2077 2078 /* 2079 * That was awesome. Now hand off to the clock filter. 2080 */ 2081 clock_filter(peer, p_offset + peer->bias, p_del, p_disp); 2082 2083 /* 2084 * If we are in broadcast calibrate mode, return to broadcast 2085 * client mode when the client is fit and the autokey dance is 2086 * complete. 2087 */ 2088 if ( (FLAG_BC_VOL & peer->flags) 2089 && MODE_CLIENT == peer->hmode 2090 && !(TEST11 & peer_unfit(peer))) { /* distance exceeded */ 2091 #ifdef AUTOKEY 2092 if (peer->flags & FLAG_SKEY) { 2093 if (!(~peer->crypto & CRYPTO_FLAG_ALL)) 2094 peer->hmode = MODE_BCLIENT; 2095 } else { 2096 peer->hmode = MODE_BCLIENT; 2097 } 2098 #else /* !AUTOKEY follows */ 2099 peer->hmode = MODE_BCLIENT; 2100 #endif /* !AUTOKEY */ 2101 } 2102 } 2103 2104 2105 /* 2106 * clock_update - Called at system process update intervals. 2107 */ 2108 static void 2109 clock_update( 2110 struct peer *peer /* peer structure pointer */ 2111 ) 2112 { 2113 double dtemp; 2114 l_fp now; 2115 #ifdef HAVE_LIBSCF_H 2116 char *fmri; 2117 #endif /* HAVE_LIBSCF_H */ 2118 2119 /* 2120 * Update the system state variables. We do this very carefully, 2121 * as the poll interval might need to be clamped differently. 2122 */ 2123 sys_peer = peer; 2124 sys_epoch = peer->epoch; 2125 if (sys_poll < peer->minpoll) 2126 sys_poll = peer->minpoll; 2127 if (sys_poll > peer->maxpoll) 2128 sys_poll = peer->maxpoll; 2129 poll_update(peer, sys_poll); 2130 sys_stratum = min(peer->stratum + 1, STRATUM_UNSPEC); 2131 if ( peer->stratum == STRATUM_REFCLOCK 2132 || peer->stratum == STRATUM_UNSPEC) 2133 sys_refid = peer->refid; 2134 else 2135 sys_refid = addr2refid(&peer->srcadr); 2136 /* 2137 * Root Dispersion (E) is defined (in RFC 5905) as: 2138 * 2139 * E = p.epsilon_r + p.epsilon + p.psi + PHI*(s.t - p.t) + |THETA| 2140 * 2141 * where: 2142 * p.epsilon_r is the PollProc's root dispersion 2143 * p.epsilon is the PollProc's dispersion 2144 * p.psi is the PollProc's jitter 2145 * THETA is the combined offset 2146 * 2147 * NB: Think Hard about where these numbers come from and 2148 * what they mean. When did peer->update happen? Has anything 2149 * interesting happened since then? What values are the most 2150 * defensible? Why? 2151 * 2152 * DLM thinks this equation is probably the best of all worse choices. 2153 */ 2154 dtemp = peer->rootdisp 2155 + peer->disp 2156 + sys_jitter 2157 + clock_phi * (current_time - peer->update) 2158 + fabs(sys_offset); 2159 2160 if (dtemp > sys_mindisp) 2161 sys_rootdisp = dtemp; 2162 else 2163 sys_rootdisp = sys_mindisp; 2164 sys_rootdelay = peer->delay + peer->rootdelay; 2165 sys_reftime = peer->dst; 2166 2167 DPRINTF(1, ("clock_update: at %lu sample %lu associd %d\n", 2168 current_time, peer->epoch, peer->associd)); 2169 2170 /* 2171 * Comes now the moment of truth. Crank the clock discipline and 2172 * see what comes out. 2173 */ 2174 switch (local_clock(peer, sys_offset)) { 2175 2176 /* 2177 * Clock exceeds panic threshold. Life as we know it ends. 2178 */ 2179 case -1: 2180 #ifdef HAVE_LIBSCF_H 2181 /* 2182 * For Solaris enter the maintenance mode. 2183 */ 2184 if ((fmri = getenv("SMF_FMRI")) != NULL) { 2185 if (smf_maintain_instance(fmri, 0) < 0) { 2186 printf("smf_maintain_instance: %s\n", 2187 scf_strerror(scf_error())); 2188 exit(1); 2189 } 2190 /* 2191 * Sleep until SMF kills us. 2192 */ 2193 for (;;) 2194 pause(); 2195 } 2196 #endif /* HAVE_LIBSCF_H */ 2197 exit (-1); 2198 /* not reached */ 2199 2200 /* 2201 * Clock was stepped. Flush all time values of all peers. 2202 */ 2203 case 2: 2204 clear_all(); 2205 set_sys_leap(LEAP_NOTINSYNC); 2206 sys_stratum = STRATUM_UNSPEC; 2207 memcpy(&sys_refid, "STEP", 4); 2208 sys_rootdelay = 0; 2209 sys_rootdisp = 0; 2210 L_CLR(&sys_reftime); 2211 sys_jitter = LOGTOD(sys_precision); 2212 leapsec_reset_frame(); 2213 break; 2214 2215 /* 2216 * Clock was slewed. Handle the leapsecond stuff. 2217 */ 2218 case 1: 2219 2220 /* 2221 * If this is the first time the clock is set, reset the 2222 * leap bits. If crypto, the timer will goose the setup 2223 * process. 2224 */ 2225 if (sys_leap == LEAP_NOTINSYNC) { 2226 set_sys_leap(LEAP_NOWARNING); 2227 #ifdef AUTOKEY 2228 if (crypto_flags) 2229 crypto_update(); 2230 #endif /* AUTOKEY */ 2231 /* 2232 * If our parent process is waiting for the 2233 * first clock sync, send them home satisfied. 2234 */ 2235 #ifdef HAVE_WORKING_FORK 2236 if (waitsync_fd_to_close != -1) { 2237 close(waitsync_fd_to_close); 2238 waitsync_fd_to_close = -1; 2239 DPRINTF(1, ("notified parent --wait-sync is done\n")); 2240 } 2241 #endif /* HAVE_WORKING_FORK */ 2242 2243 } 2244 2245 /* 2246 * If there is no leap second pending and the number of 2247 * survivor leap bits is greater than half the number of 2248 * survivors, try to schedule a leap for the end of the 2249 * current month. (This only works if no leap second for 2250 * that range is in the table, so doing this more than 2251 * once is mostly harmless.) 2252 */ 2253 if (leapsec == LSPROX_NOWARN) { 2254 if ( leap_vote_ins > leap_vote_del 2255 && leap_vote_ins > sys_survivors / 2) { 2256 get_systime(&now); 2257 leapsec_add_dyn(TRUE, now.l_ui, NULL); 2258 } 2259 if ( leap_vote_del > leap_vote_ins 2260 && leap_vote_del > sys_survivors / 2) { 2261 get_systime(&now); 2262 leapsec_add_dyn(FALSE, now.l_ui, NULL); 2263 } 2264 } 2265 break; 2266 2267 /* 2268 * Popcorn spike or step threshold exceeded. Pretend it never 2269 * happened. 2270 */ 2271 default: 2272 break; 2273 } 2274 } 2275 2276 2277 /* 2278 * poll_update - update peer poll interval 2279 */ 2280 void 2281 poll_update( 2282 struct peer *peer, /* peer structure pointer */ 2283 u_char mpoll 2284 ) 2285 { 2286 u_long next, utemp; 2287 u_char hpoll; 2288 2289 /* 2290 * This routine figures out when the next poll should be sent. 2291 * That turns out to be wickedly complicated. One problem is 2292 * that sometimes the time for the next poll is in the past when 2293 * the poll interval is reduced. We watch out for races here 2294 * between the receive process and the poll process. 2295 * 2296 * Clamp the poll interval between minpoll and maxpoll. 2297 */ 2298 hpoll = max(min(peer->maxpoll, mpoll), peer->minpoll); 2299 2300 #ifdef AUTOKEY 2301 /* 2302 * If during the crypto protocol the poll interval has changed, 2303 * the lifetimes in the key list are probably bogus. Purge the 2304 * the key list and regenerate it later. 2305 */ 2306 if ((peer->flags & FLAG_SKEY) && hpoll != peer->hpoll) 2307 key_expire(peer); 2308 #endif /* AUTOKEY */ 2309 peer->hpoll = hpoll; 2310 2311 /* 2312 * There are three variables important for poll scheduling, the 2313 * current time (current_time), next scheduled time (nextdate) 2314 * and the earliest time (utemp). The earliest time is 2 s 2315 * seconds, but could be more due to rate management. When 2316 * sending in a burst, use the earliest time. When not in a 2317 * burst but with a reply pending, send at the earliest time 2318 * unless the next scheduled time has not advanced. This can 2319 * only happen if multiple replies are pending in the same 2320 * response interval. Otherwise, send at the later of the next 2321 * scheduled time and the earliest time. 2322 * 2323 * Now we figure out if there is an override. If a burst is in 2324 * progress and we get called from the receive process, just 2325 * slink away. If called from the poll process, delay 1 s for a 2326 * reference clock, otherwise 2 s. 2327 */ 2328 utemp = current_time + max(peer->throttle - (NTP_SHIFT - 1) * 2329 (1 << peer->minpoll), ntp_minpkt); 2330 if (peer->burst > 0) { 2331 if (peer->nextdate > current_time) 2332 return; 2333 #ifdef REFCLOCK 2334 else if (peer->flags & FLAG_REFCLOCK) 2335 peer->nextdate = current_time + RESP_DELAY; 2336 #endif /* REFCLOCK */ 2337 else 2338 peer->nextdate = utemp; 2339 2340 #ifdef AUTOKEY 2341 /* 2342 * If a burst is not in progress and a crypto response message 2343 * is pending, delay 2 s, but only if this is a new interval. 2344 */ 2345 } else if (peer->cmmd != NULL) { 2346 if (peer->nextdate > current_time) { 2347 if (peer->nextdate + ntp_minpkt != utemp) 2348 peer->nextdate = utemp; 2349 } else { 2350 peer->nextdate = utemp; 2351 } 2352 #endif /* AUTOKEY */ 2353 2354 /* 2355 * The ordinary case. If a retry, use minpoll; if unreachable, 2356 * use host poll; otherwise, use the minimum of host and peer 2357 * polls; In other words, oversampling is okay but 2358 * understampling is evil. Use the maximum of this value and the 2359 * headway. If the average headway is greater than the headway 2360 * threshold, increase the headway by the minimum interval. 2361 */ 2362 } else { 2363 if (peer->retry > 0) 2364 hpoll = peer->minpoll; 2365 else if (!(peer->reach)) 2366 hpoll = peer->hpoll; 2367 else 2368 hpoll = min(peer->ppoll, peer->hpoll); 2369 #ifdef REFCLOCK 2370 if (peer->flags & FLAG_REFCLOCK) 2371 next = 1 << hpoll; 2372 else 2373 #endif /* REFCLOCK */ 2374 next = ((0x1000UL | (ntp_random() & 0x0ff)) << 2375 hpoll) >> 12; 2376 next += peer->outdate; 2377 if (next > utemp) 2378 peer->nextdate = next; 2379 else 2380 peer->nextdate = utemp; 2381 if (peer->throttle > (1 << peer->minpoll)) 2382 peer->nextdate += ntp_minpkt; 2383 } 2384 DPRINTF(2, ("poll_update: at %lu %s poll %d burst %d retry %d head %d early %lu next %lu\n", 2385 current_time, ntoa(&peer->srcadr), peer->hpoll, 2386 peer->burst, peer->retry, peer->throttle, 2387 utemp - current_time, peer->nextdate - 2388 current_time)); 2389 } 2390 2391 2392 /* 2393 * peer_clear - clear peer filter registers. See Section 3.4.8 of the 2394 * spec. 2395 */ 2396 void 2397 peer_clear( 2398 struct peer *peer, /* peer structure */ 2399 const char *ident /* tally lights */ 2400 ) 2401 { 2402 u_char u; 2403 2404 #ifdef AUTOKEY 2405 /* 2406 * If cryptographic credentials have been acquired, toss them to 2407 * Valhalla. Note that autokeys are ephemeral, in that they are 2408 * tossed immediately upon use. Therefore, the keylist can be 2409 * purged anytime without needing to preserve random keys. Note 2410 * that, if the peer is purged, the cryptographic variables are 2411 * purged, too. This makes it much harder to sneak in some 2412 * unauthenticated data in the clock filter. 2413 */ 2414 key_expire(peer); 2415 if (peer->iffval != NULL) 2416 BN_free(peer->iffval); 2417 value_free(&peer->cookval); 2418 value_free(&peer->recval); 2419 value_free(&peer->encrypt); 2420 value_free(&peer->sndval); 2421 if (peer->cmmd != NULL) 2422 free(peer->cmmd); 2423 if (peer->subject != NULL) 2424 free(peer->subject); 2425 if (peer->issuer != NULL) 2426 free(peer->issuer); 2427 #endif /* AUTOKEY */ 2428 2429 /* 2430 * Clear all values, including the optional crypto values above. 2431 */ 2432 memset(CLEAR_TO_ZERO(peer), 0, LEN_CLEAR_TO_ZERO(peer)); 2433 peer->ppoll = peer->maxpoll; 2434 peer->hpoll = peer->minpoll; 2435 peer->disp = MAXDISPERSE; 2436 peer->flash = peer_unfit(peer); 2437 peer->jitter = LOGTOD(sys_precision); 2438 2439 /* 2440 * If interleave mode, initialize the alternate origin switch. 2441 */ 2442 if (peer->flags & FLAG_XLEAVE) 2443 peer->flip = 1; 2444 for (u = 0; u < NTP_SHIFT; u++) { 2445 peer->filter_order[u] = u; 2446 peer->filter_disp[u] = MAXDISPERSE; 2447 } 2448 #ifdef REFCLOCK 2449 if (!(peer->flags & FLAG_REFCLOCK)) { 2450 #endif 2451 peer->leap = LEAP_NOTINSYNC; 2452 peer->stratum = STRATUM_UNSPEC; 2453 memcpy(&peer->refid, ident, 4); 2454 #ifdef REFCLOCK 2455 } 2456 #endif 2457 2458 /* 2459 * During initialization use the association count to spread out 2460 * the polls at one-second intervals. Passive associations' 2461 * first poll is delayed by the "discard minimum" to avoid rate 2462 * limiting. Other post-startup new or cleared associations 2463 * randomize the first poll over the minimum poll interval to 2464 * avoid implosion. 2465 */ 2466 peer->nextdate = peer->update = peer->outdate = current_time; 2467 if (initializing) { 2468 peer->nextdate += peer_associations; 2469 } else if (MODE_PASSIVE == peer->hmode) { 2470 peer->nextdate += ntp_minpkt; 2471 } else { 2472 peer->nextdate += ntp_random() % peer->minpoll; 2473 } 2474 #ifdef AUTOKEY 2475 peer->refresh = current_time + (1 << NTP_REFRESH); 2476 #endif /* AUTOKEY */ 2477 DPRINTF(1, ("peer_clear: at %ld next %ld associd %d refid %s\n", 2478 current_time, peer->nextdate, peer->associd, 2479 ident)); 2480 } 2481 2482 2483 /* 2484 * clock_filter - add incoming clock sample to filter register and run 2485 * the filter procedure to find the best sample. 2486 */ 2487 void 2488 clock_filter( 2489 struct peer *peer, /* peer structure pointer */ 2490 double sample_offset, /* clock offset */ 2491 double sample_delay, /* roundtrip delay */ 2492 double sample_disp /* dispersion */ 2493 ) 2494 { 2495 double dst[NTP_SHIFT]; /* distance vector */ 2496 int ord[NTP_SHIFT]; /* index vector */ 2497 int i, j, k, m; 2498 double dtemp, etemp; 2499 char tbuf[80]; 2500 2501 /* 2502 * A sample consists of the offset, delay, dispersion and epoch 2503 * of arrival. The offset and delay are determined by the on- 2504 * wire protocol. The dispersion grows from the last outbound 2505 * packet to the arrival of this one increased by the sum of the 2506 * peer precision and the system precision as required by the 2507 * error budget. First, shift the new arrival into the shift 2508 * register discarding the oldest one. 2509 */ 2510 j = peer->filter_nextpt; 2511 peer->filter_offset[j] = sample_offset; 2512 peer->filter_delay[j] = sample_delay; 2513 peer->filter_disp[j] = sample_disp; 2514 peer->filter_epoch[j] = current_time; 2515 j = (j + 1) % NTP_SHIFT; 2516 peer->filter_nextpt = j; 2517 2518 /* 2519 * Update dispersions since the last update and at the same 2520 * time initialize the distance and index lists. Since samples 2521 * become increasingly uncorrelated beyond the Allan intercept, 2522 * only under exceptional cases will an older sample be used. 2523 * Therefore, the distance list uses a compound metric. If the 2524 * dispersion is greater than the maximum dispersion, clamp the 2525 * distance at that value. If the time since the last update is 2526 * less than the Allan intercept use the delay; otherwise, use 2527 * the sum of the delay and dispersion. 2528 */ 2529 dtemp = clock_phi * (current_time - peer->update); 2530 peer->update = current_time; 2531 for (i = NTP_SHIFT - 1; i >= 0; i--) { 2532 if (i != 0) 2533 peer->filter_disp[j] += dtemp; 2534 if (peer->filter_disp[j] >= MAXDISPERSE) { 2535 peer->filter_disp[j] = MAXDISPERSE; 2536 dst[i] = MAXDISPERSE; 2537 } else if (peer->update - peer->filter_epoch[j] > 2538 (u_long)ULOGTOD(allan_xpt)) { 2539 dst[i] = peer->filter_delay[j] + 2540 peer->filter_disp[j]; 2541 } else { 2542 dst[i] = peer->filter_delay[j]; 2543 } 2544 ord[i] = j; 2545 j = (j + 1) % NTP_SHIFT; 2546 } 2547 2548 /* 2549 * If the clock has stabilized, sort the samples by distance. 2550 */ 2551 if (freq_cnt == 0) { 2552 for (i = 1; i < NTP_SHIFT; i++) { 2553 for (j = 0; j < i; j++) { 2554 if (dst[j] > dst[i]) { 2555 k = ord[j]; 2556 ord[j] = ord[i]; 2557 ord[i] = k; 2558 etemp = dst[j]; 2559 dst[j] = dst[i]; 2560 dst[i] = etemp; 2561 } 2562 } 2563 } 2564 } 2565 2566 /* 2567 * Copy the index list to the association structure so ntpq 2568 * can see it later. Prune the distance list to leave only 2569 * samples less than the maximum dispersion, which disfavors 2570 * uncorrelated samples older than the Allan intercept. To 2571 * further improve the jitter estimate, of the remainder leave 2572 * only samples less than the maximum distance, but keep at 2573 * least two samples for jitter calculation. 2574 */ 2575 m = 0; 2576 for (i = 0; i < NTP_SHIFT; i++) { 2577 peer->filter_order[i] = (u_char) ord[i]; 2578 if ( dst[i] >= MAXDISPERSE 2579 || (m >= 2 && dst[i] >= sys_maxdist)) 2580 continue; 2581 m++; 2582 } 2583 2584 /* 2585 * Compute the dispersion and jitter. The dispersion is weighted 2586 * exponentially by NTP_FWEIGHT (0.5) so it is normalized close 2587 * to 1.0. The jitter is the RMS differences relative to the 2588 * lowest delay sample. 2589 */ 2590 peer->disp = peer->jitter = 0; 2591 k = ord[0]; 2592 for (i = NTP_SHIFT - 1; i >= 0; i--) { 2593 j = ord[i]; 2594 peer->disp = NTP_FWEIGHT * (peer->disp + 2595 peer->filter_disp[j]); 2596 if (i < m) 2597 peer->jitter += DIFF(peer->filter_offset[j], 2598 peer->filter_offset[k]); 2599 } 2600 2601 /* 2602 * If no acceptable samples remain in the shift register, 2603 * quietly tiptoe home leaving only the dispersion. Otherwise, 2604 * save the offset, delay and jitter. Note the jitter must not 2605 * be less than the precision. 2606 */ 2607 if (m == 0) { 2608 clock_select(); 2609 return; 2610 } 2611 etemp = fabs(peer->offset - peer->filter_offset[k]); 2612 peer->offset = peer->filter_offset[k]; 2613 peer->delay = peer->filter_delay[k]; 2614 if (m > 1) 2615 peer->jitter /= m - 1; 2616 peer->jitter = max(SQRT(peer->jitter), LOGTOD(sys_precision)); 2617 2618 /* 2619 * If the the new sample and the current sample are both valid 2620 * and the difference between their offsets exceeds CLOCK_SGATE 2621 * (3) times the jitter and the interval between them is less 2622 * than twice the host poll interval, consider the new sample 2623 * a popcorn spike and ignore it. 2624 */ 2625 if ( peer->disp < sys_maxdist 2626 && peer->filter_disp[k] < sys_maxdist 2627 && etemp > CLOCK_SGATE * peer->jitter 2628 && peer->filter_epoch[k] - peer->epoch 2629 < 2. * ULOGTOD(peer->hpoll)) { 2630 snprintf(tbuf, sizeof(tbuf), "%.6f s", etemp); 2631 report_event(PEVNT_POPCORN, peer, tbuf); 2632 return; 2633 } 2634 2635 /* 2636 * A new minimum sample is useful only if it is later than the 2637 * last one used. In this design the maximum lifetime of any 2638 * sample is not greater than eight times the poll interval, so 2639 * the maximum interval between minimum samples is eight 2640 * packets. 2641 */ 2642 if (peer->filter_epoch[k] <= peer->epoch) { 2643 DPRINTF(2, ("clock_filter: old sample %lu\n", current_time - 2644 peer->filter_epoch[k])); 2645 return; 2646 } 2647 peer->epoch = peer->filter_epoch[k]; 2648 2649 /* 2650 * The mitigated sample statistics are saved for later 2651 * processing. If not synchronized or not in a burst, tickle the 2652 * clock select algorithm. 2653 */ 2654 record_peer_stats(&peer->srcadr, ctlpeerstatus(peer), 2655 peer->offset, peer->delay, peer->disp, peer->jitter); 2656 DPRINTF(1, ("clock_filter: n %d off %.6f del %.6f dsp %.6f jit %.6f\n", 2657 m, peer->offset, peer->delay, peer->disp, 2658 peer->jitter)); 2659 if (peer->burst == 0 || sys_leap == LEAP_NOTINSYNC) 2660 clock_select(); 2661 } 2662 2663 2664 /* 2665 * clock_select - find the pick-of-the-litter clock 2666 * 2667 * LOCKCLOCK: (1) If the local clock is the prefer peer, it will always 2668 * be enabled, even if declared falseticker, (2) only the prefer peer 2669 * can be selected as the system peer, (3) if the external source is 2670 * down, the system leap bits are set to 11 and the stratum set to 2671 * infinity. 2672 */ 2673 void 2674 clock_select(void) 2675 { 2676 struct peer *peer; 2677 int i, j, k, n; 2678 int nlist, nl2; 2679 int allow; 2680 int speer; 2681 double d, e, f, g; 2682 double high, low; 2683 double speermet; 2684 double orphmet = 2.0 * U_INT32_MAX; /* 2x is greater than */ 2685 struct endpoint endp; 2686 struct peer *osys_peer; 2687 struct peer *sys_prefer = NULL; /* prefer peer */ 2688 struct peer *typesystem = NULL; 2689 struct peer *typeorphan = NULL; 2690 #ifdef REFCLOCK 2691 struct peer *typeacts = NULL; 2692 struct peer *typelocal = NULL; 2693 struct peer *typepps = NULL; 2694 #endif /* REFCLOCK */ 2695 static struct endpoint *endpoint = NULL; 2696 static int *indx = NULL; 2697 static peer_select *peers = NULL; 2698 static u_int endpoint_size = 0; 2699 static u_int peers_size = 0; 2700 static u_int indx_size = 0; 2701 size_t octets; 2702 2703 /* 2704 * Initialize and create endpoint, index and peer lists big 2705 * enough to handle all associations. 2706 */ 2707 osys_peer = sys_peer; 2708 sys_survivors = 0; 2709 #ifdef LOCKCLOCK 2710 set_sys_leap(LEAP_NOTINSYNC); 2711 sys_stratum = STRATUM_UNSPEC; 2712 memcpy(&sys_refid, "DOWN", 4); 2713 #endif /* LOCKCLOCK */ 2714 2715 /* 2716 * Allocate dynamic space depending on the number of 2717 * associations. 2718 */ 2719 nlist = 1; 2720 for (peer = peer_list; peer != NULL; peer = peer->p_link) 2721 nlist++; 2722 endpoint_size = ALIGNED_SIZE(nlist * 2 * sizeof(*endpoint)); 2723 peers_size = ALIGNED_SIZE(nlist * sizeof(*peers)); 2724 indx_size = ALIGNED_SIZE(nlist * 2 * sizeof(*indx)); 2725 octets = endpoint_size + peers_size + indx_size; 2726 endpoint = erealloc(endpoint, octets); 2727 peers = INC_ALIGNED_PTR(endpoint, endpoint_size); 2728 indx = INC_ALIGNED_PTR(peers, peers_size); 2729 2730 /* 2731 * Initially, we populate the island with all the rifraff peers 2732 * that happen to be lying around. Those with seriously 2733 * defective clocks are immediately booted off the island. Then, 2734 * the falsetickers are culled and put to sea. The truechimers 2735 * remaining are subject to repeated rounds where the most 2736 * unpopular at each round is kicked off. When the population 2737 * has dwindled to sys_minclock, the survivors split a million 2738 * bucks and collectively crank the chimes. 2739 */ 2740 nlist = nl2 = 0; /* none yet */ 2741 for (peer = peer_list; peer != NULL; peer = peer->p_link) { 2742 peer->new_status = CTL_PST_SEL_REJECT; 2743 2744 /* 2745 * Leave the island immediately if the peer is 2746 * unfit to synchronize. 2747 */ 2748 if (peer_unfit(peer)) 2749 continue; 2750 2751 /* 2752 * If this peer is an orphan parent, elect the 2753 * one with the lowest metric defined as the 2754 * IPv4 address or the first 64 bits of the 2755 * hashed IPv6 address. To ensure convergence 2756 * on the same selected orphan, consider as 2757 * well that this system may have the lowest 2758 * metric and be the orphan parent. If this 2759 * system wins, sys_peer will be NULL to trigger 2760 * orphan mode in timer(). 2761 */ 2762 if (peer->stratum == sys_orphan) { 2763 u_int32 localmet; 2764 u_int32 peermet; 2765 2766 if (peer->dstadr != NULL) 2767 localmet = ntohl(peer->dstadr->addr_refid); 2768 else 2769 localmet = U_INT32_MAX; 2770 peermet = ntohl(addr2refid(&peer->srcadr)); 2771 if (peermet < localmet && peermet < orphmet) { 2772 typeorphan = peer; 2773 orphmet = peermet; 2774 } 2775 continue; 2776 } 2777 2778 /* 2779 * If this peer could have the orphan parent 2780 * as a synchronization ancestor, exclude it 2781 * from selection to avoid forming a 2782 * synchronization loop within the orphan mesh, 2783 * triggering stratum climb to infinity 2784 * instability. Peers at stratum higher than 2785 * the orphan stratum could have the orphan 2786 * parent in ancestry so are excluded. 2787 * See http://bugs.ntp.org/2050 2788 */ 2789 if (peer->stratum > sys_orphan) 2790 continue; 2791 #ifdef REFCLOCK 2792 /* 2793 * The following are special cases. We deal 2794 * with them later. 2795 */ 2796 if (!(peer->flags & FLAG_PREFER)) { 2797 switch (peer->refclktype) { 2798 case REFCLK_LOCALCLOCK: 2799 if ( current_time > orphwait 2800 && typelocal == NULL) 2801 typelocal = peer; 2802 continue; 2803 2804 case REFCLK_ACTS: 2805 if ( current_time > orphwait 2806 && typeacts == NULL) 2807 typeacts = peer; 2808 continue; 2809 } 2810 } 2811 #endif /* REFCLOCK */ 2812 2813 /* 2814 * If we get this far, the peer can stay on the 2815 * island, but does not yet have the immunity 2816 * idol. 2817 */ 2818 peer->new_status = CTL_PST_SEL_SANE; 2819 f = root_distance(peer); 2820 peers[nlist].peer = peer; 2821 peers[nlist].error = peer->jitter; 2822 peers[nlist].synch = f; 2823 nlist++; 2824 2825 /* 2826 * Insert each interval endpoint on the unsorted 2827 * endpoint[] list. 2828 */ 2829 e = peer->offset; 2830 endpoint[nl2].type = -1; /* lower end */ 2831 endpoint[nl2].val = e - f; 2832 nl2++; 2833 endpoint[nl2].type = 1; /* upper end */ 2834 endpoint[nl2].val = e + f; 2835 nl2++; 2836 } 2837 /* 2838 * Construct sorted indx[] of endpoint[] indexes ordered by 2839 * offset. 2840 */ 2841 for (i = 0; i < nl2; i++) 2842 indx[i] = i; 2843 for (i = 0; i < nl2; i++) { 2844 endp = endpoint[indx[i]]; 2845 e = endp.val; 2846 k = i; 2847 for (j = i + 1; j < nl2; j++) { 2848 endp = endpoint[indx[j]]; 2849 if (endp.val < e) { 2850 e = endp.val; 2851 k = j; 2852 } 2853 } 2854 if (k != i) { 2855 j = indx[k]; 2856 indx[k] = indx[i]; 2857 indx[i] = j; 2858 } 2859 } 2860 for (i = 0; i < nl2; i++) 2861 DPRINTF(3, ("select: endpoint %2d %.6f\n", 2862 endpoint[indx[i]].type, endpoint[indx[i]].val)); 2863 2864 /* 2865 * This is the actual algorithm that cleaves the truechimers 2866 * from the falsetickers. The original algorithm was described 2867 * in Keith Marzullo's dissertation, but has been modified for 2868 * better accuracy. 2869 * 2870 * Briefly put, we first assume there are no falsetickers, then 2871 * scan the candidate list first from the low end upwards and 2872 * then from the high end downwards. The scans stop when the 2873 * number of intersections equals the number of candidates less 2874 * the number of falsetickers. If this doesn't happen for a 2875 * given number of falsetickers, we bump the number of 2876 * falsetickers and try again. If the number of falsetickers 2877 * becomes equal to or greater than half the number of 2878 * candidates, the Albanians have won the Byzantine wars and 2879 * correct synchronization is not possible. 2880 * 2881 * Here, nlist is the number of candidates and allow is the 2882 * number of falsetickers. Upon exit, the truechimers are the 2883 * survivors with offsets not less than low and not greater than 2884 * high. There may be none of them. 2885 */ 2886 low = 1e9; 2887 high = -1e9; 2888 for (allow = 0; 2 * allow < nlist; allow++) { 2889 2890 /* 2891 * Bound the interval (low, high) as the smallest 2892 * interval containing points from the most sources. 2893 */ 2894 n = 0; 2895 for (i = 0; i < nl2; i++) { 2896 low = endpoint[indx[i]].val; 2897 n -= endpoint[indx[i]].type; 2898 if (n >= nlist - allow) 2899 break; 2900 } 2901 n = 0; 2902 for (j = nl2 - 1; j >= 0; j--) { 2903 high = endpoint[indx[j]].val; 2904 n += endpoint[indx[j]].type; 2905 if (n >= nlist - allow) 2906 break; 2907 } 2908 2909 /* 2910 * If an interval containing truechimers is found, stop. 2911 * If not, increase the number of falsetickers and go 2912 * around again. 2913 */ 2914 if (high > low) 2915 break; 2916 } 2917 2918 /* 2919 * Clustering algorithm. Whittle candidate list of falsetickers, 2920 * who leave the island immediately. The TRUE peer is always a 2921 * truechimer. We must leave at least one peer to collect the 2922 * million bucks. 2923 * 2924 * We assert the correct time is contained in the interval, but 2925 * the best offset estimate for the interval might not be 2926 * contained in the interval. For this purpose, a truechimer is 2927 * defined as the midpoint of an interval that overlaps the 2928 * intersection interval. 2929 */ 2930 j = 0; 2931 for (i = 0; i < nlist; i++) { 2932 double h; 2933 2934 peer = peers[i].peer; 2935 h = peers[i].synch; 2936 if (( high <= low 2937 || peer->offset + h < low 2938 || peer->offset - h > high 2939 ) && !(peer->flags & FLAG_TRUE)) 2940 continue; 2941 2942 #ifdef REFCLOCK 2943 /* 2944 * Eligible PPS peers must survive the intersection 2945 * algorithm. Use the first one found, but don't 2946 * include any of them in the cluster population. 2947 */ 2948 if (peer->flags & FLAG_PPS) { 2949 if (typepps == NULL) 2950 typepps = peer; 2951 if (!(peer->flags & FLAG_TSTAMP_PPS)) 2952 continue; 2953 } 2954 #endif /* REFCLOCK */ 2955 2956 if (j != i) 2957 peers[j] = peers[i]; 2958 j++; 2959 } 2960 nlist = j; 2961 2962 /* 2963 * If no survivors remain at this point, check if the modem 2964 * driver, local driver or orphan parent in that order. If so, 2965 * nominate the first one found as the only survivor. 2966 * Otherwise, give up and leave the island to the rats. 2967 */ 2968 if (nlist == 0) { 2969 peers[0].error = 0; 2970 peers[0].synch = sys_mindisp; 2971 #ifdef REFCLOCK 2972 if (typeacts != NULL) { 2973 peers[0].peer = typeacts; 2974 nlist = 1; 2975 } else if (typelocal != NULL) { 2976 peers[0].peer = typelocal; 2977 nlist = 1; 2978 } else 2979 #endif /* REFCLOCK */ 2980 if (typeorphan != NULL) { 2981 peers[0].peer = typeorphan; 2982 nlist = 1; 2983 } 2984 } 2985 2986 /* 2987 * Mark the candidates at this point as truechimers. 2988 */ 2989 for (i = 0; i < nlist; i++) { 2990 peers[i].peer->new_status = CTL_PST_SEL_SELCAND; 2991 DPRINTF(2, ("select: survivor %s %f\n", 2992 stoa(&peers[i].peer->srcadr), peers[i].synch)); 2993 } 2994 2995 /* 2996 * Now, vote outliers off the island by select jitter weighted 2997 * by root distance. Continue voting as long as there are more 2998 * than sys_minclock survivors and the select jitter of the peer 2999 * with the worst metric is greater than the minimum peer 3000 * jitter. Stop if we are about to discard a TRUE or PREFER 3001 * peer, who of course have the immunity idol. 3002 */ 3003 while (1) { 3004 d = 1e9; 3005 e = -1e9; 3006 g = 0; 3007 k = 0; 3008 for (i = 0; i < nlist; i++) { 3009 if (peers[i].error < d) 3010 d = peers[i].error; 3011 peers[i].seljit = 0; 3012 if (nlist > 1) { 3013 f = 0; 3014 for (j = 0; j < nlist; j++) 3015 f += DIFF(peers[j].peer->offset, 3016 peers[i].peer->offset); 3017 peers[i].seljit = SQRT(f / (nlist - 1)); 3018 } 3019 if (peers[i].seljit * peers[i].synch > e) { 3020 g = peers[i].seljit; 3021 e = peers[i].seljit * peers[i].synch; 3022 k = i; 3023 } 3024 } 3025 g = max(g, LOGTOD(sys_precision)); 3026 if ( nlist <= max(1, sys_minclock) 3027 || g <= d 3028 || ((FLAG_TRUE | FLAG_PREFER) & peers[k].peer->flags)) 3029 break; 3030 3031 DPRINTF(3, ("select: drop %s seljit %.6f jit %.6f\n", 3032 ntoa(&peers[k].peer->srcadr), g, d)); 3033 if (nlist > sys_maxclock) 3034 peers[k].peer->new_status = CTL_PST_SEL_EXCESS; 3035 for (j = k + 1; j < nlist; j++) 3036 peers[j - 1] = peers[j]; 3037 nlist--; 3038 } 3039 3040 /* 3041 * What remains is a list usually not greater than sys_minclock 3042 * peers. Note that unsynchronized peers cannot survive this 3043 * far. Count and mark these survivors. 3044 * 3045 * While at it, count the number of leap warning bits found. 3046 * This will be used later to vote the system leap warning bit. 3047 * If a leap warning bit is found on a reference clock, the vote 3048 * is always won. 3049 * 3050 * Choose the system peer using a hybrid metric composed of the 3051 * selection jitter scaled by the root distance augmented by 3052 * stratum scaled by sys_mindisp (.001 by default). The goal of 3053 * the small stratum factor is to avoid clockhop between a 3054 * reference clock and a network peer which has a refclock and 3055 * is using an older ntpd, which does not floor sys_rootdisp at 3056 * sys_mindisp. 3057 * 3058 * In contrast, ntpd 4.2.6 and earlier used stratum primarily 3059 * in selecting the system peer, using a weight of 1 second of 3060 * additional root distance per stratum. This heavy bias is no 3061 * longer appropriate, as the scaled root distance provides a 3062 * more rational metric carrying the cumulative error budget. 3063 */ 3064 e = 1e9; 3065 speer = 0; 3066 leap_vote_ins = 0; 3067 leap_vote_del = 0; 3068 for (i = 0; i < nlist; i++) { 3069 peer = peers[i].peer; 3070 peer->unreach = 0; 3071 peer->new_status = CTL_PST_SEL_SYNCCAND; 3072 sys_survivors++; 3073 if (peer->leap == LEAP_ADDSECOND) { 3074 if (peer->flags & FLAG_REFCLOCK) 3075 leap_vote_ins = nlist; 3076 else if (leap_vote_ins < nlist) 3077 leap_vote_ins++; 3078 } 3079 if (peer->leap == LEAP_DELSECOND) { 3080 if (peer->flags & FLAG_REFCLOCK) 3081 leap_vote_del = nlist; 3082 else if (leap_vote_del < nlist) 3083 leap_vote_del++; 3084 } 3085 if (peer->flags & FLAG_PREFER) 3086 sys_prefer = peer; 3087 speermet = peers[i].seljit * peers[i].synch + 3088 peer->stratum * sys_mindisp; 3089 if (speermet < e) { 3090 e = speermet; 3091 speer = i; 3092 } 3093 } 3094 3095 /* 3096 * Unless there are at least sys_misane survivors, leave the 3097 * building dark. Otherwise, do a clockhop dance. Ordinarily, 3098 * use the selected survivor speer. However, if the current 3099 * system peer is not speer, stay with the current system peer 3100 * as long as it doesn't get too old or too ugly. 3101 */ 3102 if (nlist > 0 && nlist >= sys_minsane) { 3103 double x; 3104 3105 typesystem = peers[speer].peer; 3106 if (osys_peer == NULL || osys_peer == typesystem) { 3107 sys_clockhop = 0; 3108 } else if ((x = fabs(typesystem->offset - 3109 osys_peer->offset)) < sys_mindisp) { 3110 if (sys_clockhop == 0) 3111 sys_clockhop = sys_mindisp; 3112 else 3113 sys_clockhop *= .5; 3114 DPRINTF(1, ("select: clockhop %d %.6f %.6f\n", 3115 j, x, sys_clockhop)); 3116 if (fabs(x) < sys_clockhop) 3117 typesystem = osys_peer; 3118 else 3119 sys_clockhop = 0; 3120 } else { 3121 sys_clockhop = 0; 3122 } 3123 } 3124 3125 /* 3126 * Mitigation rules of the game. We have the pick of the 3127 * litter in typesystem if any survivors are left. If 3128 * there is a prefer peer, use its offset and jitter. 3129 * Otherwise, use the combined offset and jitter of all kitters. 3130 */ 3131 if (typesystem != NULL) { 3132 if (sys_prefer == NULL) { 3133 typesystem->new_status = CTL_PST_SEL_SYSPEER; 3134 clock_combine(peers, sys_survivors, speer); 3135 } else { 3136 typesystem = sys_prefer; 3137 sys_clockhop = 0; 3138 typesystem->new_status = CTL_PST_SEL_SYSPEER; 3139 sys_offset = typesystem->offset; 3140 sys_jitter = typesystem->jitter; 3141 } 3142 DPRINTF(1, ("select: combine offset %.9f jitter %.9f\n", 3143 sys_offset, sys_jitter)); 3144 } 3145 #ifdef REFCLOCK 3146 /* 3147 * If a PPS driver is lit and the combined offset is less than 3148 * 0.4 s, select the driver as the PPS peer and use its offset 3149 * and jitter. However, if this is the atom driver, use it only 3150 * if there is a prefer peer or there are no survivors and none 3151 * are required. 3152 */ 3153 if ( typepps != NULL 3154 && fabs(sys_offset) < 0.4 3155 && ( typepps->refclktype != REFCLK_ATOM_PPS 3156 || ( typepps->refclktype == REFCLK_ATOM_PPS 3157 && ( sys_prefer != NULL 3158 || (typesystem == NULL && sys_minsane == 0))))) { 3159 typesystem = typepps; 3160 sys_clockhop = 0; 3161 typesystem->new_status = CTL_PST_SEL_PPS; 3162 sys_offset = typesystem->offset; 3163 sys_jitter = typesystem->jitter; 3164 DPRINTF(1, ("select: pps offset %.9f jitter %.9f\n", 3165 sys_offset, sys_jitter)); 3166 } 3167 #endif /* REFCLOCK */ 3168 3169 /* 3170 * If there are no survivors at this point, there is no 3171 * system peer. If so and this is an old update, keep the 3172 * current statistics, but do not update the clock. 3173 */ 3174 if (typesystem == NULL) { 3175 if (osys_peer != NULL) { 3176 if (sys_orphwait > 0) 3177 orphwait = current_time + sys_orphwait; 3178 report_event(EVNT_NOPEER, NULL, NULL); 3179 } 3180 sys_peer = NULL; 3181 for (peer = peer_list; peer != NULL; peer = peer->p_link) 3182 peer->status = peer->new_status; 3183 return; 3184 } 3185 3186 /* 3187 * Do not use old data, as this may mess up the clock discipline 3188 * stability. 3189 */ 3190 if (typesystem->epoch <= sys_epoch) 3191 return; 3192 3193 /* 3194 * We have found the alpha male. Wind the clock. 3195 */ 3196 if (osys_peer != typesystem) 3197 report_event(PEVNT_NEWPEER, typesystem, NULL); 3198 for (peer = peer_list; peer != NULL; peer = peer->p_link) 3199 peer->status = peer->new_status; 3200 clock_update(typesystem); 3201 } 3202 3203 3204 static void 3205 clock_combine( 3206 peer_select * peers, /* survivor list */ 3207 int npeers, /* number of survivors */ 3208 int syspeer /* index of sys.peer */ 3209 ) 3210 { 3211 int i; 3212 double x, y, z, w; 3213 3214 y = z = w = 0; 3215 for (i = 0; i < npeers; i++) { 3216 x = 1. / peers[i].synch; 3217 y += x; 3218 z += x * peers[i].peer->offset; 3219 w += x * DIFF(peers[i].peer->offset, 3220 peers[syspeer].peer->offset); 3221 } 3222 sys_offset = z / y; 3223 sys_jitter = SQRT(w / y + SQUARE(peers[syspeer].seljit)); 3224 } 3225 3226 3227 /* 3228 * root_distance - compute synchronization distance from peer to root 3229 */ 3230 static double 3231 root_distance( 3232 struct peer *peer /* peer structure pointer */ 3233 ) 3234 { 3235 double dtemp; 3236 3237 /* 3238 * Root Distance (LAMBDA) is defined as: 3239 * (delta + DELTA)/2 + epsilon + EPSILON + phi 3240 * 3241 * where: 3242 * delta is the round-trip delay 3243 * DELTA is the root delay 3244 * epsilon is the remote server precision + local precision 3245 * + (15 usec each second) 3246 * EPSILON is the root dispersion 3247 * phi is the peer jitter statistic 3248 * 3249 * NB: Think hard about why we are using these values, and what 3250 * the alternatives are, and the various pros/cons. 3251 * 3252 * DLM thinks these are probably the best choices from any of the 3253 * other worse choices. 3254 */ 3255 dtemp = (peer->delay + peer->rootdelay) / 2 3256 + LOGTOD(peer->precision) 3257 + LOGTOD(sys_precision) 3258 + clock_phi * (current_time - peer->update) 3259 + peer->rootdisp 3260 + peer->jitter; 3261 /* 3262 * Careful squeak here. The value returned must be greater than 3263 * the minimum root dispersion in order to avoid clockhop with 3264 * highly precise reference clocks. Note that the root distance 3265 * cannot exceed the sys_maxdist, as this is the cutoff by the 3266 * selection algorithm. 3267 */ 3268 if (dtemp < sys_mindisp) 3269 dtemp = sys_mindisp; 3270 return (dtemp); 3271 } 3272 3273 3274 /* 3275 * peer_xmit - send packet for persistent association. 3276 */ 3277 static void 3278 peer_xmit( 3279 struct peer *peer /* peer structure pointer */ 3280 ) 3281 { 3282 struct pkt xpkt; /* transmit packet */ 3283 size_t sendlen, authlen; 3284 keyid_t xkeyid = 0; /* transmit key ID */ 3285 l_fp xmt_tx, xmt_ty; 3286 3287 if (!peer->dstadr) /* drop peers without interface */ 3288 return; 3289 3290 xpkt.li_vn_mode = PKT_LI_VN_MODE(sys_leap, peer->version, 3291 peer->hmode); 3292 xpkt.stratum = STRATUM_TO_PKT(sys_stratum); 3293 xpkt.ppoll = peer->hpoll; 3294 xpkt.precision = sys_precision; 3295 xpkt.refid = sys_refid; 3296 xpkt.rootdelay = HTONS_FP(DTOFP(sys_rootdelay)); 3297 xpkt.rootdisp = HTONS_FP(DTOUFP(sys_rootdisp)); 3298 HTONL_FP(&sys_reftime, &xpkt.reftime); 3299 HTONL_FP(&peer->rec, &xpkt.org); 3300 HTONL_FP(&peer->dst, &xpkt.rec); 3301 3302 /* 3303 * If the received packet contains a MAC, the transmitted packet 3304 * is authenticated and contains a MAC. If not, the transmitted 3305 * packet is not authenticated. 3306 * 3307 * It is most important when autokey is in use that the local 3308 * interface IP address be known before the first packet is 3309 * sent. Otherwise, it is not possible to compute a correct MAC 3310 * the recipient will accept. Thus, the I/O semantics have to do 3311 * a little more work. In particular, the wildcard interface 3312 * might not be usable. 3313 */ 3314 sendlen = LEN_PKT_NOMAC; 3315 if ( 3316 #ifdef AUTOKEY 3317 !(peer->flags & FLAG_SKEY) && 3318 #endif /* !AUTOKEY */ 3319 peer->keyid == 0) { 3320 3321 /* 3322 * Transmit a-priori timestamps 3323 */ 3324 get_systime(&xmt_tx); 3325 if (peer->flip == 0) { /* basic mode */ 3326 peer->aorg = xmt_tx; 3327 HTONL_FP(&xmt_tx, &xpkt.xmt); 3328 } else { /* interleaved modes */ 3329 if (peer->hmode == MODE_BROADCAST) { /* bcst */ 3330 HTONL_FP(&xmt_tx, &xpkt.xmt); 3331 if (peer->flip > 0) 3332 HTONL_FP(&peer->borg, 3333 &xpkt.org); 3334 else 3335 HTONL_FP(&peer->aorg, 3336 &xpkt.org); 3337 } else { /* symmetric */ 3338 if (peer->flip > 0) 3339 HTONL_FP(&peer->borg, 3340 &xpkt.xmt); 3341 else 3342 HTONL_FP(&peer->aorg, 3343 &xpkt.xmt); 3344 } 3345 } 3346 peer->t21_bytes = sendlen; 3347 sendpkt(&peer->srcadr, peer->dstadr, sys_ttl[peer->ttl], 3348 &xpkt, sendlen); 3349 peer->sent++; 3350 peer->throttle += (1 << peer->minpoll) - 2; 3351 3352 /* 3353 * Capture a-posteriori timestamps 3354 */ 3355 get_systime(&xmt_ty); 3356 if (peer->flip != 0) { /* interleaved modes */ 3357 if (peer->flip > 0) 3358 peer->aorg = xmt_ty; 3359 else 3360 peer->borg = xmt_ty; 3361 peer->flip = -peer->flip; 3362 } 3363 L_SUB(&xmt_ty, &xmt_tx); 3364 LFPTOD(&xmt_ty, peer->xleave); 3365 DPRINTF(1, ("peer_xmit: at %ld %s->%s mode %d len %zu xmt %#010x.%08x\n", 3366 current_time, 3367 peer->dstadr ? stoa(&peer->dstadr->sin) : "-", 3368 stoa(&peer->srcadr), peer->hmode, sendlen, 3369 xmt_tx.l_ui, xmt_tx.l_uf)); 3370 return; 3371 } 3372 3373 /* 3374 * Authentication is enabled, so the transmitted packet must be 3375 * authenticated. If autokey is enabled, fuss with the various 3376 * modes; otherwise, symmetric key cryptography is used. 3377 */ 3378 #ifdef AUTOKEY 3379 if (peer->flags & FLAG_SKEY) { 3380 struct exten *exten; /* extension field */ 3381 3382 /* 3383 * The Public Key Dance (PKD): Cryptographic credentials 3384 * are contained in extension fields, each including a 3385 * 4-octet length/code word followed by a 4-octet 3386 * association ID and optional additional data. Optional 3387 * data includes a 4-octet data length field followed by 3388 * the data itself. Request messages are sent from a 3389 * configured association; response messages can be sent 3390 * from a configured association or can take the fast 3391 * path without ever matching an association. Response 3392 * messages have the same code as the request, but have 3393 * a response bit and possibly an error bit set. In this 3394 * implementation, a message may contain no more than 3395 * one command and one or more responses. 3396 * 3397 * Cryptographic session keys include both a public and 3398 * a private componet. Request and response messages 3399 * using extension fields are always sent with the 3400 * private component set to zero. Packets without 3401 * extension fields indlude the private component when 3402 * the session key is generated. 3403 */ 3404 while (1) { 3405 3406 /* 3407 * Allocate and initialize a keylist if not 3408 * already done. Then, use the list in inverse 3409 * order, discarding keys once used. Keep the 3410 * latest key around until the next one, so 3411 * clients can use client/server packets to 3412 * compute propagation delay. 3413 * 3414 * Note that once a key is used from the list, 3415 * it is retained in the key cache until the 3416 * next key is used. This is to allow a client 3417 * to retrieve the encrypted session key 3418 * identifier to verify authenticity. 3419 * 3420 * If for some reason a key is no longer in the 3421 * key cache, a birthday has happened or the key 3422 * has expired, so the pseudo-random sequence is 3423 * broken. In that case, purge the keylist and 3424 * regenerate it. 3425 */ 3426 if (peer->keynumber == 0) 3427 make_keylist(peer, peer->dstadr); 3428 else 3429 peer->keynumber--; 3430 xkeyid = peer->keylist[peer->keynumber]; 3431 if (authistrusted(xkeyid)) 3432 break; 3433 else 3434 key_expire(peer); 3435 } 3436 peer->keyid = xkeyid; 3437 exten = NULL; 3438 switch (peer->hmode) { 3439 3440 /* 3441 * In broadcast server mode the autokey values are 3442 * required by the broadcast clients. Push them when a 3443 * new keylist is generated; otherwise, push the 3444 * association message so the client can request them at 3445 * other times. 3446 */ 3447 case MODE_BROADCAST: 3448 if (peer->flags & FLAG_ASSOC) 3449 exten = crypto_args(peer, CRYPTO_AUTO | 3450 CRYPTO_RESP, peer->associd, NULL); 3451 else 3452 exten = crypto_args(peer, CRYPTO_ASSOC | 3453 CRYPTO_RESP, peer->associd, NULL); 3454 break; 3455 3456 /* 3457 * In symmetric modes the parameter, certificate, 3458 * identity, cookie and autokey exchanges are 3459 * required. The leapsecond exchange is optional. But, a 3460 * peer will not believe the other peer until the other 3461 * peer has synchronized, so the certificate exchange 3462 * might loop until then. If a peer finds a broken 3463 * autokey sequence, it uses the autokey exchange to 3464 * retrieve the autokey values. In any case, if a new 3465 * keylist is generated, the autokey values are pushed. 3466 */ 3467 case MODE_ACTIVE: 3468 case MODE_PASSIVE: 3469 3470 /* 3471 * Parameter, certificate and identity. 3472 */ 3473 if (!peer->crypto) 3474 exten = crypto_args(peer, CRYPTO_ASSOC, 3475 peer->associd, hostval.ptr); 3476 else if (!(peer->crypto & CRYPTO_FLAG_CERT)) 3477 exten = crypto_args(peer, CRYPTO_CERT, 3478 peer->associd, peer->issuer); 3479 else if (!(peer->crypto & CRYPTO_FLAG_VRFY)) 3480 exten = crypto_args(peer, 3481 crypto_ident(peer), peer->associd, 3482 NULL); 3483 3484 /* 3485 * Cookie and autokey. We request the cookie 3486 * only when the this peer and the other peer 3487 * are synchronized. But, this peer needs the 3488 * autokey values when the cookie is zero. Any 3489 * time we regenerate the key list, we offer the 3490 * autokey values without being asked. If for 3491 * some reason either peer finds a broken 3492 * autokey sequence, the autokey exchange is 3493 * used to retrieve the autokey values. 3494 */ 3495 else if ( sys_leap != LEAP_NOTINSYNC 3496 && peer->leap != LEAP_NOTINSYNC 3497 && !(peer->crypto & CRYPTO_FLAG_COOK)) 3498 exten = crypto_args(peer, CRYPTO_COOK, 3499 peer->associd, NULL); 3500 else if (!(peer->crypto & CRYPTO_FLAG_AUTO)) 3501 exten = crypto_args(peer, CRYPTO_AUTO, 3502 peer->associd, NULL); 3503 else if ( peer->flags & FLAG_ASSOC 3504 && peer->crypto & CRYPTO_FLAG_SIGN) 3505 exten = crypto_args(peer, CRYPTO_AUTO | 3506 CRYPTO_RESP, peer->assoc, NULL); 3507 3508 /* 3509 * Wait for clock sync, then sign the 3510 * certificate and retrieve the leapsecond 3511 * values. 3512 */ 3513 else if (sys_leap == LEAP_NOTINSYNC) 3514 break; 3515 3516 else if (!(peer->crypto & CRYPTO_FLAG_SIGN)) 3517 exten = crypto_args(peer, CRYPTO_SIGN, 3518 peer->associd, hostval.ptr); 3519 else if (!(peer->crypto & CRYPTO_FLAG_LEAP)) 3520 exten = crypto_args(peer, CRYPTO_LEAP, 3521 peer->associd, NULL); 3522 break; 3523 3524 /* 3525 * In client mode the parameter, certificate, identity, 3526 * cookie and sign exchanges are required. The 3527 * leapsecond exchange is optional. If broadcast client 3528 * mode the same exchanges are required, except that the 3529 * autokey exchange is substitutes for the cookie 3530 * exchange, since the cookie is always zero. If the 3531 * broadcast client finds a broken autokey sequence, it 3532 * uses the autokey exchange to retrieve the autokey 3533 * values. 3534 */ 3535 case MODE_CLIENT: 3536 3537 /* 3538 * Parameter, certificate and identity. 3539 */ 3540 if (!peer->crypto) 3541 exten = crypto_args(peer, CRYPTO_ASSOC, 3542 peer->associd, hostval.ptr); 3543 else if (!(peer->crypto & CRYPTO_FLAG_CERT)) 3544 exten = crypto_args(peer, CRYPTO_CERT, 3545 peer->associd, peer->issuer); 3546 else if (!(peer->crypto & CRYPTO_FLAG_VRFY)) 3547 exten = crypto_args(peer, 3548 crypto_ident(peer), peer->associd, 3549 NULL); 3550 3551 /* 3552 * Cookie and autokey. These are requests, but 3553 * we use the peer association ID with autokey 3554 * rather than our own. 3555 */ 3556 else if (!(peer->crypto & CRYPTO_FLAG_COOK)) 3557 exten = crypto_args(peer, CRYPTO_COOK, 3558 peer->associd, NULL); 3559 else if (!(peer->crypto & CRYPTO_FLAG_AUTO)) 3560 exten = crypto_args(peer, CRYPTO_AUTO, 3561 peer->assoc, NULL); 3562 3563 /* 3564 * Wait for clock sync, then sign the 3565 * certificate and retrieve the leapsecond 3566 * values. 3567 */ 3568 else if (sys_leap == LEAP_NOTINSYNC) 3569 break; 3570 3571 else if (!(peer->crypto & CRYPTO_FLAG_SIGN)) 3572 exten = crypto_args(peer, CRYPTO_SIGN, 3573 peer->associd, hostval.ptr); 3574 else if (!(peer->crypto & CRYPTO_FLAG_LEAP)) 3575 exten = crypto_args(peer, CRYPTO_LEAP, 3576 peer->associd, NULL); 3577 break; 3578 } 3579 3580 /* 3581 * Add a queued extension field if present. This is 3582 * always a request message, so the reply ID is already 3583 * in the message. If an error occurs, the error bit is 3584 * lit in the response. 3585 */ 3586 if (peer->cmmd != NULL) { 3587 u_int32 temp32; 3588 3589 temp32 = CRYPTO_RESP; 3590 peer->cmmd->opcode |= htonl(temp32); 3591 sendlen += crypto_xmit(peer, &xpkt, NULL, 3592 sendlen, peer->cmmd, 0); 3593 free(peer->cmmd); 3594 peer->cmmd = NULL; 3595 } 3596 3597 /* 3598 * Add an extension field created above. All but the 3599 * autokey response message are request messages. 3600 */ 3601 if (exten != NULL) { 3602 if (exten->opcode != 0) 3603 sendlen += crypto_xmit(peer, &xpkt, 3604 NULL, sendlen, exten, 0); 3605 free(exten); 3606 } 3607 3608 /* 3609 * Calculate the next session key. Since extension 3610 * fields are present, the cookie value is zero. 3611 */ 3612 if (sendlen > (int)LEN_PKT_NOMAC) { 3613 session_key(&peer->dstadr->sin, &peer->srcadr, 3614 xkeyid, 0, 2); 3615 } 3616 } 3617 #endif /* AUTOKEY */ 3618 3619 /* 3620 * Transmit a-priori timestamps 3621 */ 3622 get_systime(&xmt_tx); 3623 if (peer->flip == 0) { /* basic mode */ 3624 peer->aorg = xmt_tx; 3625 HTONL_FP(&xmt_tx, &xpkt.xmt); 3626 } else { /* interleaved modes */ 3627 if (peer->hmode == MODE_BROADCAST) { /* bcst */ 3628 HTONL_FP(&xmt_tx, &xpkt.xmt); 3629 if (peer->flip > 0) 3630 HTONL_FP(&peer->borg, &xpkt.org); 3631 else 3632 HTONL_FP(&peer->aorg, &xpkt.org); 3633 } else { /* symmetric */ 3634 if (peer->flip > 0) 3635 HTONL_FP(&peer->borg, &xpkt.xmt); 3636 else 3637 HTONL_FP(&peer->aorg, &xpkt.xmt); 3638 } 3639 } 3640 xkeyid = peer->keyid; 3641 authlen = authencrypt(xkeyid, (u_int32 *)&xpkt, sendlen); 3642 if (authlen == 0) { 3643 report_event(PEVNT_AUTH, peer, "no key"); 3644 peer->flash |= TEST5; /* auth error */ 3645 peer->badauth++; 3646 return; 3647 } 3648 sendlen += authlen; 3649 #ifdef AUTOKEY 3650 if (xkeyid > NTP_MAXKEY) 3651 authtrust(xkeyid, 0); 3652 #endif /* AUTOKEY */ 3653 if (sendlen > sizeof(xpkt)) { 3654 msyslog(LOG_ERR, "peer_xmit: buffer overflow %zu", sendlen); 3655 exit (-1); 3656 } 3657 peer->t21_bytes = sendlen; 3658 sendpkt(&peer->srcadr, peer->dstadr, sys_ttl[peer->ttl], &xpkt, 3659 sendlen); 3660 peer->sent++; 3661 peer->throttle += (1 << peer->minpoll) - 2; 3662 3663 /* 3664 * Capture a-posteriori timestamps 3665 */ 3666 get_systime(&xmt_ty); 3667 if (peer->flip != 0) { /* interleaved modes */ 3668 if (peer->flip > 0) 3669 peer->aorg = xmt_ty; 3670 else 3671 peer->borg = xmt_ty; 3672 peer->flip = -peer->flip; 3673 } 3674 L_SUB(&xmt_ty, &xmt_tx); 3675 LFPTOD(&xmt_ty, peer->xleave); 3676 #ifdef AUTOKEY 3677 DPRINTF(1, ("peer_xmit: at %ld %s->%s mode %d keyid %08x len %zu index %d\n", 3678 current_time, latoa(peer->dstadr), 3679 ntoa(&peer->srcadr), peer->hmode, xkeyid, sendlen, 3680 peer->keynumber)); 3681 #else /* !AUTOKEY follows */ 3682 DPRINTF(1, ("peer_xmit: at %ld %s->%s mode %d keyid %08x len %d\n", 3683 current_time, peer->dstadr ? 3684 ntoa(&peer->dstadr->sin) : "-", 3685 ntoa(&peer->srcadr), peer->hmode, xkeyid, sendlen)); 3686 #endif /* !AUTOKEY */ 3687 3688 return; 3689 } 3690 3691 3692 #ifdef LEAP_SMEAR 3693 3694 static void 3695 leap_smear_add_offs( 3696 l_fp *t, 3697 l_fp *t_recv 3698 ) 3699 { 3700 3701 L_ADD(t, &leap_smear.offset); 3702 3703 return; 3704 } 3705 3706 #endif /* LEAP_SMEAR */ 3707 3708 3709 /* 3710 * fast_xmit - Send packet for nonpersistent association. Note that 3711 * neither the source or destination can be a broadcast address. 3712 */ 3713 static void 3714 fast_xmit( 3715 struct recvbuf *rbufp, /* receive packet pointer */ 3716 int xmode, /* receive mode */ 3717 keyid_t xkeyid, /* transmit key ID */ 3718 int flags /* restrict mask */ 3719 ) 3720 { 3721 struct pkt xpkt; /* transmit packet structure */ 3722 struct pkt *rpkt; /* receive packet structure */ 3723 l_fp xmt_tx, xmt_ty; 3724 size_t sendlen; 3725 #ifdef AUTOKEY 3726 u_int32 temp32; 3727 #endif 3728 3729 /* 3730 * Initialize transmit packet header fields from the receive 3731 * buffer provided. We leave the fields intact as received, but 3732 * set the peer poll at the maximum of the receive peer poll and 3733 * the system minimum poll (ntp_minpoll). This is for KoD rate 3734 * control and not strictly specification compliant, but doesn't 3735 * break anything. 3736 * 3737 * If the gazinta was from a multicast address, the gazoutta 3738 * must go out another way. 3739 */ 3740 rpkt = &rbufp->recv_pkt; 3741 if (rbufp->dstadr->flags & INT_MCASTOPEN) 3742 rbufp->dstadr = findinterface(&rbufp->recv_srcadr); 3743 3744 /* 3745 * If this is a kiss-o'-death (KoD) packet, show leap 3746 * unsynchronized, stratum zero, reference ID the four-character 3747 * kiss code and system root delay. Note we don't reveal the 3748 * local time, so these packets can't be used for 3749 * synchronization. 3750 */ 3751 if (flags & RES_KOD) { 3752 sys_kodsent++; 3753 xpkt.li_vn_mode = PKT_LI_VN_MODE(LEAP_NOTINSYNC, 3754 PKT_VERSION(rpkt->li_vn_mode), xmode); 3755 xpkt.stratum = STRATUM_PKT_UNSPEC; 3756 xpkt.ppoll = max(rpkt->ppoll, ntp_minpoll); 3757 xpkt.precision = rpkt->precision; 3758 memcpy(&xpkt.refid, "RATE", 4); 3759 xpkt.rootdelay = rpkt->rootdelay; 3760 xpkt.rootdisp = rpkt->rootdisp; 3761 xpkt.reftime = rpkt->reftime; 3762 xpkt.org = rpkt->xmt; 3763 xpkt.rec = rpkt->xmt; 3764 xpkt.xmt = rpkt->xmt; 3765 3766 /* 3767 * This is a normal packet. Use the system variables. 3768 */ 3769 } else { 3770 #ifdef LEAP_SMEAR 3771 /* 3772 * Make copies of the variables which can be affected by smearing. 3773 */ 3774 l_fp this_ref_time; 3775 l_fp this_recv_time; 3776 #endif 3777 3778 /* 3779 * If we are inside the leap smear interval we add the current smear offset to 3780 * the packet receive time, to the packet transmit time, and eventually to the 3781 * reftime to make sure the reftime isn't later than the transmit/receive times. 3782 */ 3783 xpkt.li_vn_mode = PKT_LI_VN_MODE(xmt_leap, 3784 PKT_VERSION(rpkt->li_vn_mode), xmode); 3785 3786 xpkt.stratum = STRATUM_TO_PKT(sys_stratum); 3787 xpkt.ppoll = max(rpkt->ppoll, ntp_minpoll); 3788 xpkt.precision = sys_precision; 3789 xpkt.refid = sys_refid; 3790 xpkt.rootdelay = HTONS_FP(DTOFP(sys_rootdelay)); 3791 xpkt.rootdisp = HTONS_FP(DTOUFP(sys_rootdisp)); 3792 3793 #ifdef LEAP_SMEAR 3794 this_ref_time = sys_reftime; 3795 if (leap_smear.in_progress) { 3796 leap_smear_add_offs(&this_ref_time, NULL); 3797 xpkt.refid = convertLFPToRefID(leap_smear.offset); 3798 DPRINTF(2, ("fast_xmit: leap_smear.in_progress: refid %8x, smear %s\n", 3799 ntohl(xpkt.refid), 3800 lfptoa(&leap_smear.offset, 8) 3801 )); 3802 } 3803 HTONL_FP(&this_ref_time, &xpkt.reftime); 3804 #else 3805 HTONL_FP(&sys_reftime, &xpkt.reftime); 3806 #endif 3807 3808 xpkt.org = rpkt->xmt; 3809 3810 #ifdef LEAP_SMEAR 3811 this_recv_time = rbufp->recv_time; 3812 if (leap_smear.in_progress) 3813 leap_smear_add_offs(&this_recv_time, NULL); 3814 HTONL_FP(&this_recv_time, &xpkt.rec); 3815 #else 3816 HTONL_FP(&rbufp->recv_time, &xpkt.rec); 3817 #endif 3818 3819 get_systime(&xmt_tx); 3820 #ifdef LEAP_SMEAR 3821 if (leap_smear.in_progress) 3822 leap_smear_add_offs(&xmt_tx, &this_recv_time); 3823 #endif 3824 HTONL_FP(&xmt_tx, &xpkt.xmt); 3825 } 3826 3827 #ifdef HAVE_NTP_SIGND 3828 if (flags & RES_MSSNTP) { 3829 send_via_ntp_signd(rbufp, xmode, xkeyid, flags, &xpkt); 3830 return; 3831 } 3832 #endif /* HAVE_NTP_SIGND */ 3833 3834 /* 3835 * If the received packet contains a MAC, the transmitted packet 3836 * is authenticated and contains a MAC. If not, the transmitted 3837 * packet is not authenticated. 3838 */ 3839 sendlen = LEN_PKT_NOMAC; 3840 if (rbufp->recv_length == sendlen) { 3841 sendpkt(&rbufp->recv_srcadr, rbufp->dstadr, 0, &xpkt, 3842 sendlen); 3843 DPRINTF(1, ("fast_xmit: at %ld %s->%s mode %d len %lu\n", 3844 current_time, stoa(&rbufp->dstadr->sin), 3845 stoa(&rbufp->recv_srcadr), xmode, 3846 (u_long)sendlen)); 3847 return; 3848 } 3849 3850 /* 3851 * The received packet contains a MAC, so the transmitted packet 3852 * must be authenticated. For symmetric key cryptography, use 3853 * the predefined and trusted symmetric keys to generate the 3854 * cryptosum. For autokey cryptography, use the server private 3855 * value to generate the cookie, which is unique for every 3856 * source-destination-key ID combination. 3857 */ 3858 #ifdef AUTOKEY 3859 if (xkeyid > NTP_MAXKEY) { 3860 keyid_t cookie; 3861 3862 /* 3863 * The only way to get here is a reply to a legitimate 3864 * client request message, so the mode must be 3865 * MODE_SERVER. If an extension field is present, there 3866 * can be only one and that must be a command. Do what 3867 * needs, but with private value of zero so the poor 3868 * jerk can decode it. If no extension field is present, 3869 * use the cookie to generate the session key. 3870 */ 3871 cookie = session_key(&rbufp->recv_srcadr, 3872 &rbufp->dstadr->sin, 0, sys_private, 0); 3873 if ((size_t)rbufp->recv_length > sendlen + MAX_MAC_LEN) { 3874 session_key(&rbufp->dstadr->sin, 3875 &rbufp->recv_srcadr, xkeyid, 0, 2); 3876 temp32 = CRYPTO_RESP; 3877 rpkt->exten[0] |= htonl(temp32); 3878 sendlen += crypto_xmit(NULL, &xpkt, rbufp, 3879 sendlen, (struct exten *)rpkt->exten, 3880 cookie); 3881 } else { 3882 session_key(&rbufp->dstadr->sin, 3883 &rbufp->recv_srcadr, xkeyid, cookie, 2); 3884 } 3885 } 3886 #endif /* AUTOKEY */ 3887 get_systime(&xmt_tx); 3888 sendlen += authencrypt(xkeyid, (u_int32 *)&xpkt, sendlen); 3889 #ifdef AUTOKEY 3890 if (xkeyid > NTP_MAXKEY) 3891 authtrust(xkeyid, 0); 3892 #endif /* AUTOKEY */ 3893 sendpkt(&rbufp->recv_srcadr, rbufp->dstadr, 0, &xpkt, sendlen); 3894 get_systime(&xmt_ty); 3895 L_SUB(&xmt_ty, &xmt_tx); 3896 sys_authdelay = xmt_ty; 3897 DPRINTF(1, ("fast_xmit: at %ld %s->%s mode %d keyid %08x len %lu\n", 3898 current_time, ntoa(&rbufp->dstadr->sin), 3899 ntoa(&rbufp->recv_srcadr), xmode, xkeyid, 3900 (u_long)sendlen)); 3901 } 3902 3903 3904 /* 3905 * pool_xmit - resolve hostname or send unicast solicitation for pool. 3906 */ 3907 static void 3908 pool_xmit( 3909 struct peer *pool /* pool solicitor association */ 3910 ) 3911 { 3912 #ifdef WORKER 3913 struct pkt xpkt; /* transmit packet structure */ 3914 struct addrinfo hints; 3915 int rc; 3916 struct interface * lcladr; 3917 sockaddr_u * rmtadr; 3918 int restrict_mask; 3919 struct peer * p; 3920 l_fp xmt_tx; 3921 3922 if (NULL == pool->ai) { 3923 if (pool->addrs != NULL) { 3924 /* free() is used with copy_addrinfo_list() */ 3925 free(pool->addrs); 3926 pool->addrs = NULL; 3927 } 3928 ZERO(hints); 3929 hints.ai_family = AF(&pool->srcadr); 3930 hints.ai_socktype = SOCK_DGRAM; 3931 hints.ai_protocol = IPPROTO_UDP; 3932 /* ignore getaddrinfo_sometime() errors, we will retry */ 3933 rc = getaddrinfo_sometime( 3934 pool->hostname, 3935 "ntp", 3936 &hints, 3937 0, /* no retry */ 3938 &pool_name_resolved, 3939 (void *)(intptr_t)pool->associd); 3940 if (!rc) 3941 DPRINTF(1, ("pool DNS lookup %s started\n", 3942 pool->hostname)); 3943 else 3944 msyslog(LOG_ERR, 3945 "unable to start pool DNS %s: %m", 3946 pool->hostname); 3947 return; 3948 } 3949 3950 do { 3951 /* copy_addrinfo_list ai_addr points to a sockaddr_u */ 3952 rmtadr = (sockaddr_u *)(void *)pool->ai->ai_addr; 3953 pool->ai = pool->ai->ai_next; 3954 p = findexistingpeer(rmtadr, NULL, NULL, MODE_CLIENT, 0); 3955 } while (p != NULL && pool->ai != NULL); 3956 if (p != NULL) 3957 return; /* out of addresses, re-query DNS next poll */ 3958 restrict_mask = restrictions(rmtadr); 3959 if (RES_FLAGS & restrict_mask) 3960 restrict_source(rmtadr, 0, 3961 current_time + POOL_SOLICIT_WINDOW + 1); 3962 lcladr = findinterface(rmtadr); 3963 memset(&xpkt, 0, sizeof(xpkt)); 3964 xpkt.li_vn_mode = PKT_LI_VN_MODE(sys_leap, pool->version, 3965 MODE_CLIENT); 3966 xpkt.stratum = STRATUM_TO_PKT(sys_stratum); 3967 xpkt.ppoll = pool->hpoll; 3968 xpkt.precision = sys_precision; 3969 xpkt.refid = sys_refid; 3970 xpkt.rootdelay = HTONS_FP(DTOFP(sys_rootdelay)); 3971 xpkt.rootdisp = HTONS_FP(DTOUFP(sys_rootdisp)); 3972 HTONL_FP(&sys_reftime, &xpkt.reftime); 3973 get_systime(&xmt_tx); 3974 pool->aorg = xmt_tx; 3975 HTONL_FP(&xmt_tx, &xpkt.xmt); 3976 sendpkt(rmtadr, lcladr, sys_ttl[pool->ttl], &xpkt, 3977 LEN_PKT_NOMAC); 3978 pool->sent++; 3979 pool->throttle += (1 << pool->minpoll) - 2; 3980 DPRINTF(1, ("pool_xmit: at %ld %s->%s pool\n", 3981 current_time, latoa(lcladr), stoa(rmtadr))); 3982 msyslog(LOG_INFO, "Soliciting pool server %s", stoa(rmtadr)); 3983 #endif /* WORKER */ 3984 } 3985 3986 3987 #ifdef AUTOKEY 3988 /* 3989 * group_test - test if this is the same group 3990 * 3991 * host assoc return action 3992 * none none 0 mobilize * 3993 * none group 0 mobilize * 3994 * group none 0 mobilize * 3995 * group group 1 mobilize 3996 * group different 1 ignore 3997 * * ignore if notrust 3998 */ 3999 int 4000 group_test( 4001 char *grp, 4002 char *ident 4003 ) 4004 { 4005 if (grp == NULL) 4006 return (0); 4007 4008 if (strcmp(grp, sys_groupname) == 0) 4009 return (0); 4010 4011 if (ident == NULL) 4012 return (1); 4013 4014 if (strcmp(grp, ident) == 0) 4015 return (0); 4016 4017 return (1); 4018 } 4019 #endif /* AUTOKEY */ 4020 4021 #ifdef WORKER 4022 void 4023 pool_name_resolved( 4024 int rescode, 4025 int gai_errno, 4026 void * context, 4027 const char * name, 4028 const char * service, 4029 const struct addrinfo * hints, 4030 const struct addrinfo * res 4031 ) 4032 { 4033 struct peer * pool; /* pool solicitor association */ 4034 associd_t assoc; 4035 4036 if (rescode) { 4037 msyslog(LOG_ERR, 4038 "error resolving pool %s: %s (%d)", 4039 name, gai_strerror(rescode), rescode); 4040 return; 4041 } 4042 4043 assoc = (associd_t)(intptr_t)context; 4044 pool = findpeerbyassoc(assoc); 4045 if (NULL == pool) { 4046 msyslog(LOG_ERR, 4047 "Could not find assoc %u for pool DNS %s", 4048 assoc, name); 4049 return; 4050 } 4051 DPRINTF(1, ("pool DNS %s completed\n", name)); 4052 pool->addrs = copy_addrinfo_list(res); 4053 pool->ai = pool->addrs; 4054 pool_xmit(pool); 4055 4056 } 4057 #endif /* WORKER */ 4058 4059 4060 #ifdef AUTOKEY 4061 /* 4062 * key_expire - purge the key list 4063 */ 4064 void 4065 key_expire( 4066 struct peer *peer /* peer structure pointer */ 4067 ) 4068 { 4069 int i; 4070 4071 if (peer->keylist != NULL) { 4072 for (i = 0; i <= peer->keynumber; i++) 4073 authtrust(peer->keylist[i], 0); 4074 free(peer->keylist); 4075 peer->keylist = NULL; 4076 } 4077 value_free(&peer->sndval); 4078 peer->keynumber = 0; 4079 peer->flags &= ~FLAG_ASSOC; 4080 DPRINTF(1, ("key_expire: at %lu associd %d\n", current_time, 4081 peer->associd)); 4082 } 4083 #endif /* AUTOKEY */ 4084 4085 4086 /* 4087 * local_refid(peer) - check peer refid to avoid selecting peers 4088 * currently synced to this ntpd. 4089 */ 4090 static int 4091 local_refid( 4092 struct peer * p 4093 ) 4094 { 4095 endpt * unicast_ep; 4096 4097 if (p->dstadr != NULL && !(INT_MCASTIF & p->dstadr->flags)) 4098 unicast_ep = p->dstadr; 4099 else 4100 unicast_ep = findinterface(&p->srcadr); 4101 4102 if (unicast_ep != NULL && p->refid == unicast_ep->addr_refid) 4103 return TRUE; 4104 else 4105 return FALSE; 4106 } 4107 4108 4109 /* 4110 * Determine if the peer is unfit for synchronization 4111 * 4112 * A peer is unfit for synchronization if 4113 * > TEST10 bad leap or stratum below floor or at or above ceiling 4114 * > TEST11 root distance exceeded for remote peer 4115 * > TEST12 a direct or indirect synchronization loop would form 4116 * > TEST13 unreachable or noselect 4117 */ 4118 int /* FALSE if fit, TRUE if unfit */ 4119 peer_unfit( 4120 struct peer *peer /* peer structure pointer */ 4121 ) 4122 { 4123 int rval = 0; 4124 4125 /* 4126 * A stratum error occurs if (1) the server has never been 4127 * synchronized, (2) the server stratum is below the floor or 4128 * greater than or equal to the ceiling. 4129 */ 4130 if ( peer->leap == LEAP_NOTINSYNC 4131 || peer->stratum < sys_floor 4132 || peer->stratum >= sys_ceiling) 4133 rval |= TEST10; /* bad synch or stratum */ 4134 4135 /* 4136 * A distance error for a remote peer occurs if the root 4137 * distance is greater than or equal to the distance threshold 4138 * plus the increment due to one host poll interval. 4139 */ 4140 if ( !(peer->flags & FLAG_REFCLOCK) 4141 && root_distance(peer) >= sys_maxdist 4142 + clock_phi * ULOGTOD(peer->hpoll)) 4143 rval |= TEST11; /* distance exceeded */ 4144 4145 /* 4146 * A loop error occurs if the remote peer is synchronized to the 4147 * local peer or if the remote peer is synchronized to the same 4148 * server as the local peer but only if the remote peer is 4149 * neither a reference clock nor an orphan. 4150 */ 4151 if (peer->stratum > 1 && local_refid(peer)) 4152 rval |= TEST12; /* synchronization loop */ 4153 4154 /* 4155 * An unreachable error occurs if the server is unreachable or 4156 * the noselect bit is set. 4157 */ 4158 if (!peer->reach || (peer->flags & FLAG_NOSELECT)) 4159 rval |= TEST13; /* unreachable */ 4160 4161 peer->flash &= ~PEER_TEST_MASK; 4162 peer->flash |= rval; 4163 return (rval); 4164 } 4165 4166 4167 /* 4168 * Find the precision of this particular machine 4169 */ 4170 #define MINSTEP 20e-9 /* minimum clock increment (s) */ 4171 #define MAXSTEP 1 /* maximum clock increment (s) */ 4172 #define MINCHANGES 12 /* minimum number of step samples */ 4173 #define MAXLOOPS ((int)(1. / MINSTEP)) /* avoid infinite loop */ 4174 4175 /* 4176 * This routine measures the system precision defined as the minimum of 4177 * a sequence of differences between successive readings of the system 4178 * clock. However, if a difference is less than MINSTEP, the clock has 4179 * been read more than once during a clock tick and the difference is 4180 * ignored. We set MINSTEP greater than zero in case something happens 4181 * like a cache miss, and to tolerate underlying system clocks which 4182 * ensure each reading is strictly greater than prior readings while 4183 * using an underlying stepping (not interpolated) clock. 4184 * 4185 * sys_tick and sys_precision represent the time to read the clock for 4186 * systems with high-precision clocks, and the tick interval or step 4187 * size for lower-precision stepping clocks. 4188 * 4189 * This routine also measures the time to read the clock on stepping 4190 * system clocks by counting the number of readings between changes of 4191 * the underlying clock. With either type of clock, the minimum time 4192 * to read the clock is saved as sys_fuzz, and used to ensure the 4193 * get_systime() readings always increase and are fuzzed below sys_fuzz. 4194 */ 4195 void 4196 measure_precision(void) 4197 { 4198 /* 4199 * With sys_fuzz set to zero, get_systime() fuzzing of low bits 4200 * is effectively disabled. trunc_os_clock is FALSE to disable 4201 * get_ostime() simulation of a low-precision system clock. 4202 */ 4203 set_sys_fuzz(0.); 4204 trunc_os_clock = FALSE; 4205 measured_tick = measure_tick_fuzz(); 4206 set_sys_tick_precision(measured_tick); 4207 msyslog(LOG_INFO, "proto: precision = %.3f usec (%d)", 4208 sys_tick * 1e6, sys_precision); 4209 if (sys_fuzz < sys_tick) { 4210 msyslog(LOG_NOTICE, "proto: fuzz beneath %.3f usec", 4211 sys_fuzz * 1e6); 4212 } 4213 } 4214 4215 4216 /* 4217 * measure_tick_fuzz() 4218 * 4219 * measures the minimum time to read the clock (stored in sys_fuzz) 4220 * and returns the tick, the larger of the minimum increment observed 4221 * between successive clock readings and the time to read the clock. 4222 */ 4223 double 4224 measure_tick_fuzz(void) 4225 { 4226 l_fp minstep; /* MINSTEP as l_fp */ 4227 l_fp val; /* current seconds fraction */ 4228 l_fp last; /* last seconds fraction */ 4229 l_fp ldiff; /* val - last */ 4230 double tick; /* computed tick value */ 4231 double diff; 4232 long repeats; 4233 long max_repeats; 4234 int changes; 4235 int i; /* log2 precision */ 4236 4237 tick = MAXSTEP; 4238 max_repeats = 0; 4239 repeats = 0; 4240 changes = 0; 4241 DTOLFP(MINSTEP, &minstep); 4242 get_systime(&last); 4243 for (i = 0; i < MAXLOOPS && changes < MINCHANGES; i++) { 4244 get_systime(&val); 4245 ldiff = val; 4246 L_SUB(&ldiff, &last); 4247 last = val; 4248 if (L_ISGT(&ldiff, &minstep)) { 4249 max_repeats = max(repeats, max_repeats); 4250 repeats = 0; 4251 changes++; 4252 LFPTOD(&ldiff, diff); 4253 tick = min(diff, tick); 4254 } else { 4255 repeats++; 4256 } 4257 } 4258 if (changes < MINCHANGES) { 4259 msyslog(LOG_ERR, "Fatal error: precision could not be measured (MINSTEP too large?)"); 4260 exit(1); 4261 } 4262 4263 if (0 == max_repeats) { 4264 set_sys_fuzz(tick); 4265 } else { 4266 set_sys_fuzz(tick / max_repeats); 4267 } 4268 4269 return tick; 4270 } 4271 4272 4273 void 4274 set_sys_tick_precision( 4275 double tick 4276 ) 4277 { 4278 int i; 4279 4280 if (tick > 1.) { 4281 msyslog(LOG_ERR, 4282 "unsupported tick %.3f > 1s ignored", tick); 4283 return; 4284 } 4285 if (tick < measured_tick) { 4286 msyslog(LOG_ERR, 4287 "proto: tick %.3f less than measured tick %.3f, ignored", 4288 tick, measured_tick); 4289 return; 4290 } else if (tick > measured_tick) { 4291 trunc_os_clock = TRUE; 4292 msyslog(LOG_NOTICE, 4293 "proto: truncating system clock to multiples of %.9f", 4294 tick); 4295 } 4296 sys_tick = tick; 4297 4298 /* 4299 * Find the nearest power of two. 4300 */ 4301 for (i = 0; tick <= 1; i--) 4302 tick *= 2; 4303 if (tick - 1 > 1 - tick / 2) 4304 i++; 4305 4306 sys_precision = (s_char)i; 4307 } 4308 4309 4310 /* 4311 * init_proto - initialize the protocol module's data 4312 */ 4313 void 4314 init_proto(void) 4315 { 4316 l_fp dummy; 4317 int i; 4318 4319 /* 4320 * Fill in the sys_* stuff. Default is don't listen to 4321 * broadcasting, require authentication. 4322 */ 4323 set_sys_leap(LEAP_NOTINSYNC); 4324 sys_stratum = STRATUM_UNSPEC; 4325 memcpy(&sys_refid, "INIT", 4); 4326 sys_peer = NULL; 4327 sys_rootdelay = 0; 4328 sys_rootdisp = 0; 4329 L_CLR(&sys_reftime); 4330 sys_jitter = 0; 4331 measure_precision(); 4332 get_systime(&dummy); 4333 sys_survivors = 0; 4334 sys_manycastserver = 0; 4335 sys_bclient = 0; 4336 sys_bdelay = 0; 4337 sys_authenticate = 1; 4338 sys_stattime = current_time; 4339 orphwait = current_time + sys_orphwait; 4340 proto_clr_stats(); 4341 for (i = 0; i < MAX_TTL; i++) { 4342 sys_ttl[i] = (u_char)((i * 256) / MAX_TTL); 4343 sys_ttlmax = i; 4344 } 4345 hardpps_enable = 0; 4346 stats_control = 1; 4347 } 4348 4349 4350 /* 4351 * proto_config - configure the protocol module 4352 */ 4353 void 4354 proto_config( 4355 int item, 4356 u_long value, 4357 double dvalue, 4358 sockaddr_u *svalue 4359 ) 4360 { 4361 /* 4362 * Figure out what he wants to change, then do it 4363 */ 4364 DPRINTF(2, ("proto_config: code %d value %lu dvalue %lf\n", 4365 item, value, dvalue)); 4366 4367 switch (item) { 4368 4369 /* 4370 * enable and disable commands - arguments are Boolean. 4371 */ 4372 case PROTO_AUTHENTICATE: /* authentication (auth) */ 4373 sys_authenticate = value; 4374 break; 4375 4376 case PROTO_BROADCLIENT: /* broadcast client (bclient) */ 4377 sys_bclient = (int)value; 4378 if (sys_bclient == 0) 4379 io_unsetbclient(); 4380 else 4381 io_setbclient(); 4382 break; 4383 4384 #ifdef REFCLOCK 4385 case PROTO_CAL: /* refclock calibrate (calibrate) */ 4386 cal_enable = value; 4387 break; 4388 #endif /* REFCLOCK */ 4389 4390 case PROTO_KERNEL: /* kernel discipline (kernel) */ 4391 select_loop(value); 4392 break; 4393 4394 case PROTO_MONITOR: /* monitoring (monitor) */ 4395 if (value) 4396 mon_start(MON_ON); 4397 else { 4398 mon_stop(MON_ON); 4399 if (mon_enabled) 4400 msyslog(LOG_WARNING, 4401 "restrict: 'monitor' cannot be disabled while 'limited' is enabled"); 4402 } 4403 break; 4404 4405 case PROTO_NTP: /* NTP discipline (ntp) */ 4406 ntp_enable = value; 4407 break; 4408 4409 case PROTO_MODE7: /* mode7 management (ntpdc) */ 4410 ntp_mode7 = value; 4411 break; 4412 4413 case PROTO_PPS: /* PPS discipline (pps) */ 4414 hardpps_enable = value; 4415 break; 4416 4417 case PROTO_FILEGEN: /* statistics (stats) */ 4418 stats_control = value; 4419 break; 4420 4421 /* 4422 * tos command - arguments are double, sometimes cast to int 4423 */ 4424 case PROTO_BEACON: /* manycast beacon (beacon) */ 4425 sys_beacon = (int)dvalue; 4426 break; 4427 4428 case PROTO_BROADDELAY: /* default broadcast delay (bdelay) */ 4429 sys_bdelay = dvalue; 4430 break; 4431 4432 case PROTO_CEILING: /* stratum ceiling (ceiling) */ 4433 sys_ceiling = (int)dvalue; 4434 break; 4435 4436 case PROTO_COHORT: /* cohort switch (cohort) */ 4437 sys_cohort = (int)dvalue; 4438 break; 4439 4440 case PROTO_FLOOR: /* stratum floor (floor) */ 4441 sys_floor = (int)dvalue; 4442 break; 4443 4444 case PROTO_MAXCLOCK: /* maximum candidates (maxclock) */ 4445 sys_maxclock = (int)dvalue; 4446 break; 4447 4448 case PROTO_MAXDIST: /* select threshold (maxdist) */ 4449 sys_maxdist = dvalue; 4450 break; 4451 4452 case PROTO_CALLDELAY: /* modem call delay (mdelay) */ 4453 break; /* NOT USED */ 4454 4455 case PROTO_MINCLOCK: /* minimum candidates (minclock) */ 4456 sys_minclock = (int)dvalue; 4457 break; 4458 4459 case PROTO_MINDISP: /* minimum distance (mindist) */ 4460 sys_mindisp = dvalue; 4461 break; 4462 4463 case PROTO_MINSANE: /* minimum survivors (minsane) */ 4464 sys_minsane = (int)dvalue; 4465 break; 4466 4467 case PROTO_ORPHAN: /* orphan stratum (orphan) */ 4468 sys_orphan = (int)dvalue; 4469 break; 4470 4471 case PROTO_ORPHWAIT: /* orphan wait (orphwait) */ 4472 orphwait -= sys_orphwait; 4473 sys_orphwait = (int)dvalue; 4474 orphwait += sys_orphwait; 4475 break; 4476 4477 /* 4478 * Miscellaneous commands 4479 */ 4480 case PROTO_MULTICAST_ADD: /* add group address */ 4481 if (svalue != NULL) 4482 io_multicast_add(svalue); 4483 sys_bclient = 1; 4484 break; 4485 4486 case PROTO_MULTICAST_DEL: /* delete group address */ 4487 if (svalue != NULL) 4488 io_multicast_del(svalue); 4489 break; 4490 4491 /* 4492 * Unpeer Early policy choices 4493 */ 4494 4495 case PROTO_UECRYPTO: /* Crypto */ 4496 unpeer_crypto_early = value; 4497 break; 4498 4499 case PROTO_UECRYPTONAK: /* Crypto_NAK */ 4500 unpeer_crypto_nak_early = value; 4501 break; 4502 4503 case PROTO_UEDIGEST: /* Digest */ 4504 unpeer_digest_early = value; 4505 break; 4506 4507 default: 4508 msyslog(LOG_NOTICE, 4509 "proto: unsupported option %d", item); 4510 } 4511 } 4512 4513 4514 /* 4515 * proto_clr_stats - clear protocol stat counters 4516 */ 4517 void 4518 proto_clr_stats(void) 4519 { 4520 sys_stattime = current_time; 4521 sys_received = 0; 4522 sys_processed = 0; 4523 sys_newversion = 0; 4524 sys_oldversion = 0; 4525 sys_declined = 0; 4526 sys_restricted = 0; 4527 sys_badlength = 0; 4528 sys_badauth = 0; 4529 sys_limitrejected = 0; 4530 sys_kodsent = 0; 4531 } 4532