1 /* 2 * ntp_proto.c - NTP version 4 protocol machinery 3 * 4 * ATTENTION: Get approval from Dave Mills on all changes to this file! 5 * 6 */ 7 #ifdef HAVE_CONFIG_H 8 #include <config.h> 9 #endif 10 11 #include "ntpd.h" 12 #include "ntp_stdlib.h" 13 #include "ntp_unixtime.h" 14 #include "ntp_control.h" 15 #include "ntp_string.h" 16 #include "ntp_leapsec.h" 17 18 #include <stdio.h> 19 #ifdef HAVE_LIBSCF_H 20 #include <libscf.h> 21 #endif 22 #ifdef HAVE_UNISTD_H 23 #include <unistd.h> 24 #endif 25 26 /* 27 * This macro defines the authentication state. If x is 1 authentication 28 * is required; othewise it is optional. 29 */ 30 #define AUTH(x, y) ((x) ? (y) == AUTH_OK : (y) == AUTH_OK || \ 31 (y) == AUTH_NONE) 32 33 #define AUTH_NONE 0 /* authentication not required */ 34 #define AUTH_OK 1 /* authentication OK */ 35 #define AUTH_ERROR 2 /* authentication error */ 36 #define AUTH_CRYPTO 3 /* crypto_NAK */ 37 38 /* 39 * traffic shaping parameters 40 */ 41 #define NTP_IBURST 6 /* packets in iburst */ 42 #define RESP_DELAY 1 /* refclock burst delay (s) */ 43 44 /* 45 * pool soliciting restriction duration (s) 46 */ 47 #define POOL_SOLICIT_WINDOW 8 48 49 /* 50 * peer_select groups statistics for a peer used by clock_select() and 51 * clock_cluster(). 52 */ 53 typedef struct peer_select_tag { 54 struct peer * peer; 55 double synch; /* sync distance */ 56 double error; /* jitter */ 57 double seljit; /* selection jitter */ 58 } peer_select; 59 60 /* 61 * System variables are declared here. Unless specified otherwise, all 62 * times are in seconds. 63 */ 64 u_char sys_leap; /* system leap indicator */ 65 u_char sys_stratum; /* system stratum */ 66 s_char sys_precision; /* local clock precision (log2 s) */ 67 double sys_rootdelay; /* roundtrip delay to primary source */ 68 double sys_rootdisp; /* dispersion to primary source */ 69 u_int32 sys_refid; /* reference id (network byte order) */ 70 l_fp sys_reftime; /* last update time */ 71 struct peer *sys_peer; /* current peer */ 72 73 /* 74 * Rate controls. Leaky buckets are used to throttle the packet 75 * transmission rates in order to protect busy servers such as at NIST 76 * and USNO. There is a counter for each association and another for KoD 77 * packets. The association counter decrements each second, but not 78 * below zero. Each time a packet is sent the counter is incremented by 79 * a configurable value representing the average interval between 80 * packets. A packet is delayed as long as the counter is greater than 81 * zero. Note this does not affect the time value computations. 82 */ 83 /* 84 * Nonspecified system state variables 85 */ 86 int sys_bclient; /* broadcast client enable */ 87 double sys_bdelay; /* broadcast client default delay */ 88 int sys_authenticate; /* requre authentication for config */ 89 l_fp sys_authdelay; /* authentication delay */ 90 double sys_offset; /* current local clock offset */ 91 double sys_mindisp = MINDISPERSE; /* minimum distance (s) */ 92 double sys_maxdist = MAXDISTANCE; /* selection threshold */ 93 double sys_jitter; /* system jitter */ 94 u_long sys_epoch; /* last clock update time */ 95 static double sys_clockhop; /* clockhop threshold */ 96 static int leap_vote_ins; /* leap consensus for insert */ 97 static int leap_vote_del; /* leap consensus for delete */ 98 keyid_t sys_private; /* private value for session seed */ 99 int sys_manycastserver; /* respond to manycast client pkts */ 100 int ntp_mode7; /* respond to ntpdc (mode7) */ 101 int peer_ntpdate; /* active peers in ntpdate mode */ 102 int sys_survivors; /* truest of the truechimers */ 103 char *sys_ident = NULL; /* identity scheme */ 104 105 /* 106 * TOS and multicast mapping stuff 107 */ 108 int sys_floor = 0; /* cluster stratum floor */ 109 int sys_ceiling = STRATUM_UNSPEC - 1; /* cluster stratum ceiling */ 110 int sys_minsane = 1; /* minimum candidates */ 111 int sys_minclock = NTP_MINCLOCK; /* minimum candidates */ 112 int sys_maxclock = NTP_MAXCLOCK; /* maximum candidates */ 113 int sys_cohort = 0; /* cohort switch */ 114 int sys_orphan = STRATUM_UNSPEC + 1; /* orphan stratum */ 115 int sys_orphwait = NTP_ORPHWAIT; /* orphan wait */ 116 int sys_beacon = BEACON; /* manycast beacon interval */ 117 int sys_ttlmax; /* max ttl mapping vector index */ 118 u_char sys_ttl[MAX_TTL]; /* ttl mapping vector */ 119 120 /* 121 * Statistics counters - first the good, then the bad 122 */ 123 u_long sys_stattime; /* elapsed time */ 124 u_long sys_received; /* packets received */ 125 u_long sys_processed; /* packets for this host */ 126 u_long sys_newversion; /* current version */ 127 u_long sys_oldversion; /* old version */ 128 u_long sys_restricted; /* access denied */ 129 u_long sys_badlength; /* bad length or format */ 130 u_long sys_badauth; /* bad authentication */ 131 u_long sys_declined; /* declined */ 132 u_long sys_limitrejected; /* rate exceeded */ 133 u_long sys_kodsent; /* KoD sent */ 134 135 static double root_distance (struct peer *); 136 static void clock_combine (peer_select *, int, int); 137 static void peer_xmit (struct peer *); 138 static void fast_xmit (struct recvbuf *, int, keyid_t, int); 139 static void pool_xmit (struct peer *); 140 static void clock_update (struct peer *); 141 static void measure_precision(void); 142 static double measure_tick_fuzz(void); 143 static int local_refid (struct peer *); 144 static int peer_unfit (struct peer *); 145 #ifdef AUTOKEY 146 static int group_test (char *, char *); 147 #endif /* AUTOKEY */ 148 #ifdef WORKER 149 void pool_name_resolved (int, int, void *, const char *, 150 const char *, const struct addrinfo *, 151 const struct addrinfo *); 152 #endif /* WORKER */ 153 154 155 /* 156 * transmit - transmit procedure called by poll timeout 157 */ 158 void 159 transmit( 160 struct peer *peer /* peer structure pointer */ 161 ) 162 { 163 u_char hpoll; 164 165 /* 166 * The polling state machine. There are two kinds of machines, 167 * those that never expect a reply (broadcast and manycast 168 * server modes) and those that do (all other modes). The dance 169 * is intricate... 170 */ 171 hpoll = peer->hpoll; 172 173 /* 174 * In broadcast mode the poll interval is never changed from 175 * minpoll. 176 */ 177 if (peer->cast_flags & (MDF_BCAST | MDF_MCAST)) { 178 peer->outdate = current_time; 179 if (sys_leap != LEAP_NOTINSYNC) 180 peer_xmit(peer); 181 poll_update(peer, hpoll); 182 return; 183 } 184 185 /* 186 * In manycast mode we start with unity ttl. The ttl is 187 * increased by one for each poll until either sys_maxclock 188 * servers have been found or the maximum ttl is reached. When 189 * sys_maxclock servers are found we stop polling until one or 190 * more servers have timed out or until less than sys_minclock 191 * associations turn up. In this case additional better servers 192 * are dragged in and preempt the existing ones. Once every 193 * sys_beacon seconds we are to transmit unconditionally, but 194 * this code is not quite right -- peer->unreach counts polls 195 * and is being compared with sys_beacon, so the beacons happen 196 * every sys_beacon polls. 197 */ 198 if (peer->cast_flags & MDF_ACAST) { 199 peer->outdate = current_time; 200 if (peer->unreach > sys_beacon) { 201 peer->unreach = 0; 202 peer->ttl = 0; 203 peer_xmit(peer); 204 } else if (sys_survivors < sys_minclock || 205 peer_associations < sys_maxclock) { 206 if (peer->ttl < (u_int32)sys_ttlmax) 207 peer->ttl++; 208 peer_xmit(peer); 209 } 210 peer->unreach++; 211 poll_update(peer, hpoll); 212 return; 213 } 214 215 /* 216 * Pool associations transmit unicast solicitations when there 217 * are less than a hard limit of 2 * sys_maxclock associations, 218 * and either less than sys_minclock survivors or less than 219 * sys_maxclock associations. The hard limit prevents unbounded 220 * growth in associations if the system clock or network quality 221 * result in survivor count dipping below sys_minclock often. 222 * This was observed testing with pool, where sys_maxclock == 12 223 * resulted in 60 associations without the hard limit. A 224 * similar hard limit on manycastclient ephemeral associations 225 * may be appropriate. 226 */ 227 if (peer->cast_flags & MDF_POOL) { 228 peer->outdate = current_time; 229 if ((peer_associations <= 2 * sys_maxclock) && 230 (peer_associations < sys_maxclock || 231 sys_survivors < sys_minclock)) 232 pool_xmit(peer); 233 poll_update(peer, hpoll); 234 return; 235 } 236 237 /* 238 * In unicast modes the dance is much more intricate. It is 239 * designed to back off whenever possible to minimize network 240 * traffic. 241 */ 242 if (peer->burst == 0) { 243 u_char oreach; 244 245 /* 246 * Update the reachability status. If not heard for 247 * three consecutive polls, stuff infinity in the clock 248 * filter. 249 */ 250 oreach = peer->reach; 251 peer->outdate = current_time; 252 peer->unreach++; 253 peer->reach <<= 1; 254 if (!peer->reach) { 255 256 /* 257 * Here the peer is unreachable. If it was 258 * previously reachable raise a trap. Send a 259 * burst if enabled. 260 */ 261 clock_filter(peer, 0., 0., MAXDISPERSE); 262 if (oreach) { 263 peer_unfit(peer); 264 report_event(PEVNT_UNREACH, peer, NULL); 265 } 266 if ((peer->flags & FLAG_IBURST) && 267 peer->retry == 0) 268 peer->retry = NTP_RETRY; 269 } else { 270 271 /* 272 * Here the peer is reachable. Send a burst if 273 * enabled and the peer is fit. Reset unreach 274 * for persistent and ephemeral associations. 275 * Unreach is also reset for survivors in 276 * clock_select(). 277 */ 278 hpoll = sys_poll; 279 if (!(peer->flags & FLAG_PREEMPT)) 280 peer->unreach = 0; 281 if ((peer->flags & FLAG_BURST) && peer->retry == 282 0 && !peer_unfit(peer)) 283 peer->retry = NTP_RETRY; 284 } 285 286 /* 287 * Watch for timeout. If ephemeral, toss the rascal; 288 * otherwise, bump the poll interval. Note the 289 * poll_update() routine will clamp it to maxpoll. 290 * If preemptible and we have more peers than maxclock, 291 * and this peer has the minimum score of preemptibles, 292 * demobilize. 293 */ 294 if (peer->unreach >= NTP_UNREACH) { 295 hpoll++; 296 /* ephemeral: no FLAG_CONFIG nor FLAG_PREEMPT */ 297 if (!(peer->flags & (FLAG_CONFIG | FLAG_PREEMPT))) { 298 report_event(PEVNT_RESTART, peer, "timeout"); 299 peer_clear(peer, "TIME"); 300 unpeer(peer); 301 return; 302 } 303 if ((peer->flags & FLAG_PREEMPT) && 304 (peer_associations > sys_maxclock) && 305 score_all(peer)) { 306 report_event(PEVNT_RESTART, peer, "timeout"); 307 peer_clear(peer, "TIME"); 308 unpeer(peer); 309 return; 310 } 311 } 312 } else { 313 peer->burst--; 314 if (peer->burst == 0) { 315 316 /* 317 * If ntpdate mode and the clock has not been 318 * set and all peers have completed the burst, 319 * we declare a successful failure. 320 */ 321 if (mode_ntpdate) { 322 peer_ntpdate--; 323 if (peer_ntpdate == 0) { 324 msyslog(LOG_NOTICE, 325 "ntpd: no servers found"); 326 if (!msyslog_term) 327 printf( 328 "ntpd: no servers found\n"); 329 exit (0); 330 } 331 } 332 } 333 } 334 if (peer->retry > 0) 335 peer->retry--; 336 337 /* 338 * Do not transmit if in broadcast client mode. 339 */ 340 if (peer->hmode != MODE_BCLIENT) 341 peer_xmit(peer); 342 poll_update(peer, hpoll); 343 } 344 345 346 /* 347 * receive - receive procedure called for each packet received 348 */ 349 void 350 receive( 351 struct recvbuf *rbufp 352 ) 353 { 354 register struct peer *peer; /* peer structure pointer */ 355 register struct pkt *pkt; /* receive packet pointer */ 356 u_char hisversion; /* packet version */ 357 u_char hisleap; /* packet leap indicator */ 358 u_char hismode; /* packet mode */ 359 u_char hisstratum; /* packet stratum */ 360 u_short restrict_mask; /* restrict bits */ 361 int has_mac; /* length of MAC field */ 362 int authlen; /* offset of MAC field */ 363 int is_authentic = 0; /* cryptosum ok */ 364 int retcode = AM_NOMATCH; /* match code */ 365 keyid_t skeyid = 0; /* key IDs */ 366 u_int32 opcode = 0; /* extension field opcode */ 367 sockaddr_u *dstadr_sin; /* active runway */ 368 struct peer *peer2; /* aux peer structure pointer */ 369 endpt * match_ep; /* newpeer() local address */ 370 l_fp p_org; /* origin timestamp */ 371 l_fp p_rec; /* receive timestamp */ 372 l_fp p_xmt; /* transmit timestamp */ 373 #ifdef AUTOKEY 374 char hostname[NTP_MAXSTRLEN + 1]; 375 char *groupname = NULL; 376 struct autokey *ap; /* autokey structure pointer */ 377 int rval; /* cookie snatcher */ 378 keyid_t pkeyid = 0, tkeyid = 0; /* key IDs */ 379 #endif /* AUTOKEY */ 380 #ifdef HAVE_NTP_SIGND 381 static unsigned char zero_key[16]; 382 #endif /* HAVE_NTP_SIGND */ 383 384 /* 385 * Monitor the packet and get restrictions. Note that the packet 386 * length for control and private mode packets must be checked 387 * by the service routines. Some restrictions have to be handled 388 * later in order to generate a kiss-o'-death packet. 389 */ 390 /* 391 * Bogus port check is before anything, since it probably 392 * reveals a clogging attack. 393 */ 394 sys_received++; 395 if (0 == SRCPORT(&rbufp->recv_srcadr)) { 396 sys_badlength++; 397 return; /* bogus port */ 398 } 399 restrict_mask = restrictions(&rbufp->recv_srcadr); 400 DPRINTF(2, ("receive: at %ld %s<-%s flags %x restrict %03x\n", 401 current_time, stoa(&rbufp->dstadr->sin), 402 stoa(&rbufp->recv_srcadr), 403 rbufp->dstadr->flags, restrict_mask)); 404 pkt = &rbufp->recv_pkt; 405 hisversion = PKT_VERSION(pkt->li_vn_mode); 406 hisleap = PKT_LEAP(pkt->li_vn_mode); 407 hismode = (int)PKT_MODE(pkt->li_vn_mode); 408 hisstratum = PKT_TO_STRATUM(pkt->stratum); 409 if (restrict_mask & RES_IGNORE) { 410 sys_restricted++; 411 return; /* ignore everything */ 412 } 413 if (hismode == MODE_PRIVATE) { 414 if (!ntp_mode7 || (restrict_mask & RES_NOQUERY)) { 415 sys_restricted++; 416 return; /* no query private */ 417 } 418 process_private(rbufp, ((restrict_mask & 419 RES_NOMODIFY) == 0)); 420 return; 421 } 422 if (hismode == MODE_CONTROL) { 423 if (restrict_mask & RES_NOQUERY) { 424 sys_restricted++; 425 return; /* no query control */ 426 } 427 process_control(rbufp, restrict_mask); 428 return; 429 } 430 if (restrict_mask & RES_DONTSERVE) { 431 sys_restricted++; 432 return; /* no time serve */ 433 } 434 435 /* 436 * This is for testing. If restricted drop ten percent of 437 * surviving packets. 438 */ 439 if (restrict_mask & RES_FLAKE) { 440 if ((double)ntp_random() / 0x7fffffff < .1) { 441 sys_restricted++; 442 return; /* no flakeway */ 443 } 444 } 445 446 /* 447 * Version check must be after the query packets, since they 448 * intentionally use an early version. 449 */ 450 if (hisversion == NTP_VERSION) { 451 sys_newversion++; /* new version */ 452 } else if (!(restrict_mask & RES_VERSION) && hisversion >= 453 NTP_OLDVERSION) { 454 sys_oldversion++; /* previous version */ 455 } else { 456 sys_badlength++; 457 return; /* old version */ 458 } 459 460 /* 461 * Figure out his mode and validate the packet. This has some 462 * legacy raunch that probably should be removed. In very early 463 * NTP versions mode 0 was equivalent to what later versions 464 * would interpret as client mode. 465 */ 466 if (hismode == MODE_UNSPEC) { 467 if (hisversion == NTP_OLDVERSION) { 468 hismode = MODE_CLIENT; 469 } else { 470 sys_badlength++; 471 return; /* invalid mode */ 472 } 473 } 474 475 /* 476 * Parse the extension field if present. We figure out whether 477 * an extension field is present by measuring the MAC size. If 478 * the number of words following the packet header is 0, no MAC 479 * is present and the packet is not authenticated. If 1, the 480 * packet is a crypto-NAK; if 3, the packet is authenticated 481 * with DES; if 5, the packet is authenticated with MD5; if 6, 482 * the packet is authenticated with SHA. If 2 or * 4, the packet 483 * is a runt and discarded forthwith. If greater than 6, an 484 * extension field is present, so we subtract the length of the 485 * field and go around again. 486 */ 487 authlen = LEN_PKT_NOMAC; 488 has_mac = rbufp->recv_length - authlen; 489 while (has_mac > 0) { 490 u_int32 len; 491 #ifdef AUTOKEY 492 u_int32 hostlen; 493 struct exten *ep; 494 #endif /*AUTOKEY */ 495 496 if (has_mac % 4 != 0 || has_mac < (int)MIN_MAC_LEN) { 497 sys_badlength++; 498 return; /* bad length */ 499 } 500 if (has_mac <= (int)MAX_MAC_LEN) { 501 skeyid = ntohl(((u_int32 *)pkt)[authlen / 4]); 502 break; 503 504 } else { 505 opcode = ntohl(((u_int32 *)pkt)[authlen / 4]); 506 len = opcode & 0xffff; 507 if (len % 4 != 0 || len < 4 || (int)len + 508 authlen > rbufp->recv_length) { 509 sys_badlength++; 510 return; /* bad length */ 511 } 512 #ifdef AUTOKEY 513 /* 514 * Extract calling group name for later. If 515 * sys_groupname is non-NULL, there must be 516 * a group name provided to elicit a response. 517 */ 518 if ((opcode & 0x3fff0000) == CRYPTO_ASSOC && 519 sys_groupname != NULL) { 520 ep = (struct exten *)&((u_int32 *)pkt)[authlen / 4]; 521 hostlen = ntohl(ep->vallen); 522 if (hostlen >= sizeof(hostname) || 523 hostlen > len - 524 offsetof(struct exten, pkt)) { 525 sys_badlength++; 526 return; /* bad length */ 527 } 528 memcpy(hostname, &ep->pkt, hostlen); 529 hostname[hostlen] = '\0'; 530 groupname = strchr(hostname, '@'); 531 if (groupname == NULL) { 532 sys_declined++; 533 return; 534 } 535 groupname++; 536 } 537 #endif /* AUTOKEY */ 538 authlen += len; 539 has_mac -= len; 540 } 541 } 542 543 /* 544 * If has_mac is < 0 we had a malformed packet. 545 */ 546 if (has_mac < 0) { 547 sys_badlength++; 548 return; /* bad length */ 549 } 550 551 /* 552 * If authentication required, a MAC must be present. 553 */ 554 if (restrict_mask & RES_DONTTRUST && has_mac == 0) { 555 sys_restricted++; 556 return; /* access denied */ 557 } 558 559 /* 560 * Update the MRU list and finger the cloggers. It can be a 561 * little expensive, so turn it off for production use. 562 * RES_LIMITED and RES_KOD will be cleared in the returned 563 * restrict_mask unless one or both actions are warranted. 564 */ 565 restrict_mask = ntp_monitor(rbufp, restrict_mask); 566 if (restrict_mask & RES_LIMITED) { 567 sys_limitrejected++; 568 if (!(restrict_mask & RES_KOD) || MODE_BROADCAST == 569 hismode || MODE_SERVER == hismode) { 570 if (MODE_SERVER == hismode) 571 DPRINTF(1, ("Possibly self-induced rate limiting of MODE_SERVER from %s\n", 572 stoa(&rbufp->recv_srcadr))); 573 return; /* rate exceeded */ 574 } 575 if (hismode == MODE_CLIENT) 576 fast_xmit(rbufp, MODE_SERVER, skeyid, 577 restrict_mask); 578 else 579 fast_xmit(rbufp, MODE_ACTIVE, skeyid, 580 restrict_mask); 581 return; /* rate exceeded */ 582 } 583 restrict_mask &= ~RES_KOD; 584 585 /* 586 * We have tossed out as many buggy packets as possible early in 587 * the game to reduce the exposure to a clogging attack. Now we 588 * have to burn some cycles to find the association and 589 * authenticate the packet if required. Note that we burn only 590 * digest cycles, again to reduce exposure. There may be no 591 * matching association and that's okay. 592 * 593 * More on the autokey mambo. Normally the local interface is 594 * found when the association was mobilized with respect to a 595 * designated remote address. We assume packets arriving from 596 * the remote address arrive via this interface and the local 597 * address used to construct the autokey is the unicast address 598 * of the interface. However, if the sender is a broadcaster, 599 * the interface broadcast address is used instead. 600 * Notwithstanding this technobabble, if the sender is a 601 * multicaster, the broadcast address is null, so we use the 602 * unicast address anyway. Don't ask. 603 */ 604 peer = findpeer(rbufp, hismode, &retcode); 605 dstadr_sin = &rbufp->dstadr->sin; 606 NTOHL_FP(&pkt->org, &p_org); 607 NTOHL_FP(&pkt->rec, &p_rec); 608 NTOHL_FP(&pkt->xmt, &p_xmt); 609 610 /* 611 * Authentication is conditioned by three switches: 612 * 613 * NOPEER (RES_NOPEER) do not mobilize an association unless 614 * authenticated 615 * NOTRUST (RES_DONTTRUST) do not allow access unless 616 * authenticated (implies NOPEER) 617 * enable (sys_authenticate) master NOPEER switch, by default 618 * on 619 * 620 * The NOPEER and NOTRUST can be specified on a per-client basis 621 * using the restrict command. The enable switch if on implies 622 * NOPEER for all clients. There are four outcomes: 623 * 624 * NONE The packet has no MAC. 625 * OK the packet has a MAC and authentication succeeds 626 * ERROR the packet has a MAC and authentication fails 627 * CRYPTO crypto-NAK. The MAC has four octets only. 628 * 629 * Note: The AUTH(x, y) macro is used to filter outcomes. If x 630 * is zero, acceptable outcomes of y are NONE and OK. If x is 631 * one, the only acceptable outcome of y is OK. 632 */ 633 634 if (has_mac == 0) { 635 restrict_mask &= ~RES_MSSNTP; 636 is_authentic = AUTH_NONE; /* not required */ 637 #ifdef DEBUG 638 if (debug) 639 printf( 640 "receive: at %ld %s<-%s mode %d len %d\n", 641 current_time, stoa(dstadr_sin), 642 stoa(&rbufp->recv_srcadr), hismode, 643 authlen); 644 #endif 645 } else if (has_mac == 4) { 646 restrict_mask &= ~RES_MSSNTP; 647 is_authentic = AUTH_CRYPTO; /* crypto-NAK */ 648 #ifdef DEBUG 649 if (debug) 650 printf( 651 "receive: at %ld %s<-%s mode %d keyid %08x len %d auth %d\n", 652 current_time, stoa(dstadr_sin), 653 stoa(&rbufp->recv_srcadr), hismode, skeyid, 654 authlen + has_mac, is_authentic); 655 #endif 656 657 #ifdef HAVE_NTP_SIGND 658 /* 659 * If the signature is 20 bytes long, the last 16 of 660 * which are zero, then this is a Microsoft client 661 * wanting AD-style authentication of the server's 662 * reply. 663 * 664 * This is described in Microsoft's WSPP docs, in MS-SNTP: 665 * http://msdn.microsoft.com/en-us/library/cc212930.aspx 666 */ 667 } else if (has_mac == MAX_MD5_LEN && (restrict_mask & RES_MSSNTP) && 668 (retcode == AM_FXMIT || retcode == AM_NEWPASS) && 669 (memcmp(zero_key, (char *)pkt + authlen + 4, MAX_MD5_LEN - 4) == 670 0)) { 671 is_authentic = AUTH_NONE; 672 #endif /* HAVE_NTP_SIGND */ 673 674 } else { 675 restrict_mask &= ~RES_MSSNTP; 676 #ifdef AUTOKEY 677 /* 678 * For autokey modes, generate the session key 679 * and install in the key cache. Use the socket 680 * broadcast or unicast address as appropriate. 681 */ 682 if (crypto_flags && skeyid > NTP_MAXKEY) { 683 684 /* 685 * More on the autokey dance (AKD). A cookie is 686 * constructed from public and private values. 687 * For broadcast packets, the cookie is public 688 * (zero). For packets that match no 689 * association, the cookie is hashed from the 690 * addresses and private value. For server 691 * packets, the cookie was previously obtained 692 * from the server. For symmetric modes, the 693 * cookie was previously constructed using an 694 * agreement protocol; however, should PKI be 695 * unavailable, we construct a fake agreement as 696 * the EXOR of the peer and host cookies. 697 * 698 * hismode ephemeral persistent 699 * ======================================= 700 * active 0 cookie# 701 * passive 0% cookie# 702 * client sys cookie 0% 703 * server 0% sys cookie 704 * broadcast 0 0 705 * 706 * # if unsync, 0 707 * % can't happen 708 */ 709 if (has_mac < (int)MAX_MD5_LEN) { 710 sys_badauth++; 711 return; 712 } 713 if (hismode == MODE_BROADCAST) { 714 715 /* 716 * For broadcaster, use the interface 717 * broadcast address when available; 718 * otherwise, use the unicast address 719 * found when the association was 720 * mobilized. However, if this is from 721 * the wildcard interface, game over. 722 */ 723 if (crypto_flags && rbufp->dstadr == 724 ANY_INTERFACE_CHOOSE(&rbufp->recv_srcadr)) { 725 sys_restricted++; 726 return; /* no wildcard */ 727 } 728 pkeyid = 0; 729 if (!SOCK_UNSPEC(&rbufp->dstadr->bcast)) 730 dstadr_sin = 731 &rbufp->dstadr->bcast; 732 } else if (peer == NULL) { 733 pkeyid = session_key( 734 &rbufp->recv_srcadr, dstadr_sin, 0, 735 sys_private, 0); 736 } else { 737 pkeyid = peer->pcookie; 738 } 739 740 /* 741 * The session key includes both the public 742 * values and cookie. In case of an extension 743 * field, the cookie used for authentication 744 * purposes is zero. Note the hash is saved for 745 * use later in the autokey mambo. 746 */ 747 if (authlen > (int)LEN_PKT_NOMAC && pkeyid != 0) { 748 session_key(&rbufp->recv_srcadr, 749 dstadr_sin, skeyid, 0, 2); 750 tkeyid = session_key( 751 &rbufp->recv_srcadr, dstadr_sin, 752 skeyid, pkeyid, 0); 753 } else { 754 tkeyid = session_key( 755 &rbufp->recv_srcadr, dstadr_sin, 756 skeyid, pkeyid, 2); 757 } 758 759 } 760 #endif /* AUTOKEY */ 761 762 /* 763 * Compute the cryptosum. Note a clogging attack may 764 * succeed in bloating the key cache. If an autokey, 765 * purge it immediately, since we won't be needing it 766 * again. If the packet is authentic, it can mobilize an 767 * association. Note that there is no key zero. 768 */ 769 if (!authdecrypt(skeyid, (u_int32 *)pkt, authlen, 770 has_mac)) 771 is_authentic = AUTH_ERROR; 772 else 773 is_authentic = AUTH_OK; 774 #ifdef AUTOKEY 775 if (crypto_flags && skeyid > NTP_MAXKEY) 776 authtrust(skeyid, 0); 777 #endif /* AUTOKEY */ 778 #ifdef DEBUG 779 if (debug) 780 printf( 781 "receive: at %ld %s<-%s mode %d keyid %08x len %d auth %d\n", 782 current_time, stoa(dstadr_sin), 783 stoa(&rbufp->recv_srcadr), hismode, skeyid, 784 authlen + has_mac, is_authentic); 785 #endif 786 } 787 788 /* 789 * The association matching rules are implemented by a set of 790 * routines and an association table. A packet matching an 791 * association is processed by the peer process for that 792 * association. If there are no errors, an ephemeral association 793 * is mobilized: a broadcast packet mobilizes a broadcast client 794 * aassociation; a manycast server packet mobilizes a manycast 795 * client association; a symmetric active packet mobilizes a 796 * symmetric passive association. 797 */ 798 switch (retcode) { 799 800 /* 801 * This is a client mode packet not matching any association. If 802 * an ordinary client, simply toss a server mode packet back 803 * over the fence. If a manycast client, we have to work a 804 * little harder. 805 */ 806 case AM_FXMIT: 807 808 /* 809 * If authentication OK, send a server reply; otherwise, 810 * send a crypto-NAK. 811 */ 812 if (!(rbufp->dstadr->flags & INT_MCASTOPEN)) { 813 if (AUTH(restrict_mask & RES_DONTTRUST, 814 is_authentic)) { 815 fast_xmit(rbufp, MODE_SERVER, skeyid, 816 restrict_mask); 817 } else if (is_authentic == AUTH_ERROR) { 818 fast_xmit(rbufp, MODE_SERVER, 0, 819 restrict_mask); 820 sys_badauth++; 821 } else { 822 sys_restricted++; 823 } 824 return; /* hooray */ 825 } 826 827 /* 828 * This must be manycast. Do not respond if not 829 * configured as a manycast server. 830 */ 831 if (!sys_manycastserver) { 832 sys_restricted++; 833 return; /* not enabled */ 834 } 835 836 #ifdef AUTOKEY 837 /* 838 * Do not respond if not the same group. 839 */ 840 if (group_test(groupname, NULL)) { 841 sys_declined++; 842 return; 843 } 844 #endif /* AUTOKEY */ 845 846 /* 847 * Do not respond if we are not synchronized or our 848 * stratum is greater than the manycaster or the 849 * manycaster has already synchronized to us. 850 */ 851 if (sys_leap == LEAP_NOTINSYNC || sys_stratum >= 852 hisstratum || (!sys_cohort && sys_stratum == 853 hisstratum + 1) || rbufp->dstadr->addr_refid == 854 pkt->refid) { 855 sys_declined++; 856 return; /* no help */ 857 } 858 859 /* 860 * Respond only if authentication succeeds. Don't do a 861 * crypto-NAK, as that would not be useful. 862 */ 863 if (AUTH(restrict_mask & RES_DONTTRUST, is_authentic)) 864 fast_xmit(rbufp, MODE_SERVER, skeyid, 865 restrict_mask); 866 return; /* hooray */ 867 868 /* 869 * This is a server mode packet returned in response to a client 870 * mode packet sent to a multicast group address (for 871 * manycastclient) or to a unicast address (for pool). The 872 * origin timestamp is a good nonce to reliably associate the 873 * reply with what was sent. If there is no match, that's 874 * curious and could be an intruder attempting to clog, so we 875 * just ignore it. 876 * 877 * If the packet is authentic and the manycastclient or pool 878 * association is found, we mobilize a client association and 879 * copy pertinent variables from the manycastclient or pool 880 * association to the new client association. If not, just 881 * ignore the packet. 882 * 883 * There is an implosion hazard at the manycast client, since 884 * the manycast servers send the server packet immediately. If 885 * the guy is already here, don't fire up a duplicate. 886 */ 887 case AM_MANYCAST: 888 889 #ifdef AUTOKEY 890 /* 891 * Do not respond if not the same group. 892 */ 893 if (group_test(groupname, NULL)) { 894 sys_declined++; 895 return; 896 } 897 #endif /* AUTOKEY */ 898 if ((peer2 = findmanycastpeer(rbufp)) == NULL) { 899 sys_restricted++; 900 return; /* not enabled */ 901 } 902 if (!AUTH((!(peer2->cast_flags & MDF_POOL) && 903 sys_authenticate) | (restrict_mask & (RES_NOPEER | 904 RES_DONTTRUST)), is_authentic)) { 905 sys_restricted++; 906 return; /* access denied */ 907 } 908 909 /* 910 * Do not respond if unsynchronized or stratum is below 911 * the floor or at or above the ceiling. 912 */ 913 if (hisleap == LEAP_NOTINSYNC || hisstratum < 914 sys_floor || hisstratum >= sys_ceiling) { 915 sys_declined++; 916 return; /* no help */ 917 } 918 peer = newpeer(&rbufp->recv_srcadr, NULL, rbufp->dstadr, 919 MODE_CLIENT, hisversion, peer2->minpoll, 920 peer2->maxpoll, FLAG_PREEMPT | 921 (FLAG_IBURST & peer2->flags), MDF_UCAST | 922 MDF_UCLNT, 0, skeyid, sys_ident); 923 if (NULL == peer) { 924 sys_declined++; 925 return; /* ignore duplicate */ 926 } 927 928 /* 929 * After each ephemeral pool association is spun, 930 * accelerate the next poll for the pool solicitor so 931 * the pool will fill promptly. 932 */ 933 if (peer2->cast_flags & MDF_POOL) 934 peer2->nextdate = current_time + 1; 935 936 /* 937 * Further processing of the solicitation response would 938 * simply detect its origin timestamp as bogus for the 939 * brand-new association (it matches the prototype 940 * association) and tinker with peer->nextdate delaying 941 * first sync. 942 */ 943 return; /* solicitation response handled */ 944 945 /* 946 * This is the first packet received from a broadcast server. If 947 * the packet is authentic and we are enabled as broadcast 948 * client, mobilize a broadcast client association. We don't 949 * kiss any frogs here. 950 */ 951 case AM_NEWBCL: 952 953 #ifdef AUTOKEY 954 /* 955 * Do not respond if not the same group. 956 */ 957 if (group_test(groupname, sys_ident)) { 958 sys_declined++; 959 return; 960 } 961 #endif /* AUTOKEY */ 962 if (sys_bclient == 0) { 963 sys_restricted++; 964 return; /* not enabled */ 965 } 966 if (!AUTH(sys_authenticate | (restrict_mask & 967 (RES_NOPEER | RES_DONTTRUST)), is_authentic)) { 968 sys_restricted++; 969 return; /* access denied */ 970 } 971 972 /* 973 * Do not respond if unsynchronized or stratum is below 974 * the floor or at or above the ceiling. 975 */ 976 if (hisleap == LEAP_NOTINSYNC || hisstratum < 977 sys_floor || hisstratum >= sys_ceiling) { 978 sys_declined++; 979 return; /* no help */ 980 } 981 982 #ifdef AUTOKEY 983 /* 984 * Do not respond if Autokey and the opcode is not a 985 * CRYPTO_ASSOC response with association ID. 986 */ 987 if (crypto_flags && skeyid > NTP_MAXKEY && (opcode & 988 0xffff0000) != (CRYPTO_ASSOC | CRYPTO_RESP)) { 989 sys_declined++; 990 return; /* protocol error */ 991 } 992 #endif /* AUTOKEY */ 993 994 /* 995 * Broadcasts received via a multicast address may 996 * arrive after a unicast volley has begun 997 * with the same remote address. newpeer() will not 998 * find duplicate associations on other local endpoints 999 * if a non-NULL endpoint is supplied. multicastclient 1000 * ephemeral associations are unique across all local 1001 * endpoints. 1002 */ 1003 if (!(INT_MCASTOPEN & rbufp->dstadr->flags)) 1004 match_ep = rbufp->dstadr; 1005 else 1006 match_ep = NULL; 1007 1008 /* 1009 * Determine whether to execute the initial volley. 1010 */ 1011 if (sys_bdelay != 0) { 1012 #ifdef AUTOKEY 1013 /* 1014 * If a two-way exchange is not possible, 1015 * neither is Autokey. 1016 */ 1017 if (crypto_flags && skeyid > NTP_MAXKEY) { 1018 sys_restricted++; 1019 return; /* no autokey */ 1020 } 1021 #endif /* AUTOKEY */ 1022 1023 /* 1024 * Do not execute the volley. Start out in 1025 * broadcast client mode. 1026 */ 1027 peer = newpeer(&rbufp->recv_srcadr, NULL, 1028 match_ep, MODE_BCLIENT, hisversion, 1029 pkt->ppoll, pkt->ppoll, FLAG_PREEMPT, 1030 MDF_BCLNT, 0, skeyid, sys_ident); 1031 if (NULL == peer) { 1032 sys_restricted++; 1033 return; /* ignore duplicate */ 1034 1035 } else { 1036 peer->delay = sys_bdelay; 1037 } 1038 break; 1039 } 1040 1041 /* 1042 * Execute the initial volley in order to calibrate the 1043 * propagation delay and run the Autokey protocol. 1044 * 1045 * Note that the minpoll is taken from the broadcast 1046 * packet, normally 6 (64 s) and that the poll interval 1047 * is fixed at this value. 1048 */ 1049 peer = newpeer(&rbufp->recv_srcadr, NULL, match_ep, 1050 MODE_CLIENT, hisversion, pkt->ppoll, pkt->ppoll, 1051 FLAG_BC_VOL | FLAG_IBURST | FLAG_PREEMPT, MDF_BCLNT, 1052 0, skeyid, sys_ident); 1053 if (NULL == peer) { 1054 sys_restricted++; 1055 return; /* ignore duplicate */ 1056 } 1057 #ifdef AUTOKEY 1058 if (skeyid > NTP_MAXKEY) 1059 crypto_recv(peer, rbufp); 1060 #endif /* AUTOKEY */ 1061 1062 return; /* hooray */ 1063 1064 /* 1065 * This is the first packet received from a symmetric active 1066 * peer. If the packet is authentic and the first he sent, 1067 * mobilize a passive association. If not, kiss the frog. 1068 */ 1069 case AM_NEWPASS: 1070 1071 #ifdef AUTOKEY 1072 /* 1073 * Do not respond if not the same group. 1074 */ 1075 if (group_test(groupname, sys_ident)) { 1076 sys_declined++; 1077 return; 1078 } 1079 #endif /* AUTOKEY */ 1080 if (!AUTH(sys_authenticate | (restrict_mask & 1081 (RES_NOPEER | RES_DONTTRUST)), is_authentic)) { 1082 1083 /* 1084 * If authenticated but cannot mobilize an 1085 * association, send a symmetric passive 1086 * response without mobilizing an association. 1087 * This is for drat broken Windows clients. See 1088 * Microsoft KB 875424 for preferred workaround. 1089 */ 1090 if (AUTH(restrict_mask & RES_DONTTRUST, 1091 is_authentic)) { 1092 fast_xmit(rbufp, MODE_PASSIVE, skeyid, 1093 restrict_mask); 1094 return; /* hooray */ 1095 } 1096 if (is_authentic == AUTH_ERROR) { 1097 fast_xmit(rbufp, MODE_ACTIVE, 0, 1098 restrict_mask); 1099 sys_restricted++; 1100 return; 1101 } 1102 } 1103 1104 /* 1105 * Do not respond if synchronized and if stratum is 1106 * below the floor or at or above the ceiling. Note, 1107 * this allows an unsynchronized peer to synchronize to 1108 * us. It would be very strange if he did and then was 1109 * nipped, but that could only happen if we were 1110 * operating at the top end of the range. It also means 1111 * we will spin an ephemeral association in response to 1112 * MODE_ACTIVE KoDs, which will time out eventually. 1113 */ 1114 if (hisleap != LEAP_NOTINSYNC && (hisstratum < 1115 sys_floor || hisstratum >= sys_ceiling)) { 1116 sys_declined++; 1117 return; /* no help */ 1118 } 1119 1120 /* 1121 * The message is correctly authenticated and allowed. 1122 * Mobilize a symmetric passive association. 1123 */ 1124 if ((peer = newpeer(&rbufp->recv_srcadr, NULL, 1125 rbufp->dstadr, MODE_PASSIVE, hisversion, pkt->ppoll, 1126 NTP_MAXDPOLL, 0, MDF_UCAST, 0, skeyid, 1127 sys_ident)) == NULL) { 1128 sys_declined++; 1129 return; /* ignore duplicate */ 1130 } 1131 break; 1132 1133 1134 /* 1135 * Process regular packet. Nothing special. 1136 */ 1137 case AM_PROCPKT: 1138 1139 #ifdef AUTOKEY 1140 /* 1141 * Do not respond if not the same group. 1142 */ 1143 if (group_test(groupname, peer->ident)) { 1144 sys_declined++; 1145 return; 1146 } 1147 #endif /* AUTOKEY */ 1148 break; 1149 1150 /* 1151 * A passive packet matches a passive association. This is 1152 * usually the result of reconfiguring a client on the fly. As 1153 * this association might be legitimate and this packet an 1154 * attempt to deny service, just ignore it. 1155 */ 1156 case AM_ERR: 1157 sys_declined++; 1158 return; 1159 1160 /* 1161 * For everything else there is the bit bucket. 1162 */ 1163 default: 1164 sys_declined++; 1165 return; 1166 } 1167 1168 #ifdef AUTOKEY 1169 /* 1170 * If the association is configured for Autokey, the packet must 1171 * have a public key ID; if not, the packet must have a 1172 * symmetric key ID. 1173 */ 1174 if (is_authentic != AUTH_CRYPTO && (((peer->flags & 1175 FLAG_SKEY) && skeyid <= NTP_MAXKEY) || (!(peer->flags & 1176 FLAG_SKEY) && skeyid > NTP_MAXKEY))) { 1177 sys_badauth++; 1178 return; 1179 } 1180 #endif /* AUTOKEY */ 1181 peer->received++; 1182 peer->flash &= ~PKT_TEST_MASK; 1183 if (peer->flags & FLAG_XBOGUS) { 1184 peer->flags &= ~FLAG_XBOGUS; 1185 peer->flash |= TEST3; 1186 } 1187 1188 /* 1189 * Next comes a rigorous schedule of timestamp checking. If the 1190 * transmit timestamp is zero, the server has not initialized in 1191 * interleaved modes or is horribly broken. 1192 */ 1193 if (L_ISZERO(&p_xmt)) { 1194 peer->flash |= TEST3; /* unsynch */ 1195 1196 /* 1197 * If the transmit timestamp duplicates a previous one, the 1198 * packet is a replay. This prevents the bad guys from replaying 1199 * the most recent packet, authenticated or not. 1200 */ 1201 } else if (L_ISEQU(&peer->xmt, &p_xmt)) { 1202 peer->flash |= TEST1; /* duplicate */ 1203 peer->oldpkt++; 1204 return; 1205 1206 /* 1207 * If this is a broadcast mode packet, skip further checking. If 1208 * an initial volley, bail out now and let the client do its 1209 * stuff. If the origin timestamp is nonzero, this is an 1210 * interleaved broadcast. so restart the protocol. 1211 */ 1212 } else if (hismode == MODE_BROADCAST) { 1213 if (!L_ISZERO(&p_org) && !(peer->flags & FLAG_XB)) { 1214 peer->flags |= FLAG_XB; 1215 peer->aorg = p_xmt; 1216 peer->borg = rbufp->recv_time; 1217 report_event(PEVNT_XLEAVE, peer, NULL); 1218 return; 1219 } 1220 1221 /* 1222 * Check for bogus packet in basic mode. If found, switch to 1223 * interleaved mode and resynchronize, but only after confirming 1224 * the packet is not bogus in symmetric interleaved mode. 1225 */ 1226 } else if (peer->flip == 0) { 1227 if (!L_ISEQU(&p_org, &peer->aorg)) { 1228 peer->bogusorg++; 1229 peer->flash |= TEST2; /* bogus */ 1230 if (!L_ISZERO(&peer->dst) && L_ISEQU(&p_org, 1231 &peer->dst)) { 1232 peer->flip = 1; 1233 report_event(PEVNT_XLEAVE, peer, NULL); 1234 } 1235 } else { 1236 L_CLR(&peer->aorg); 1237 } 1238 1239 /* 1240 * Check for valid nonzero timestamp fields. 1241 */ 1242 } else if (L_ISZERO(&p_org) || L_ISZERO(&p_rec) || 1243 L_ISZERO(&peer->dst)) { 1244 peer->flash |= TEST3; /* unsynch */ 1245 1246 /* 1247 * Check for bogus packet in interleaved symmetric mode. This 1248 * can happen if a packet is lost, duplicated or crossed. If 1249 * found, flip and resynchronize. 1250 */ 1251 } else if (!L_ISZERO(&peer->dst) && !L_ISEQU(&p_org, 1252 &peer->dst)) { 1253 peer->bogusorg++; 1254 peer->flags |= FLAG_XBOGUS; 1255 peer->flash |= TEST2; /* bogus */ 1256 } 1257 1258 /* 1259 * Update the state variables. 1260 */ 1261 if (peer->flip == 0) { 1262 if (hismode != MODE_BROADCAST) 1263 peer->rec = p_xmt; 1264 peer->dst = rbufp->recv_time; 1265 } 1266 peer->xmt = p_xmt; 1267 1268 /* 1269 * If this is a crypto_NAK, the server cannot authenticate a 1270 * client packet. The server might have just changed keys. Clear 1271 * the association and restart the protocol. 1272 */ 1273 if (is_authentic == AUTH_CRYPTO) { 1274 report_event(PEVNT_AUTH, peer, "crypto_NAK"); 1275 peer->flash |= TEST5; /* bad auth */ 1276 peer->badauth++; 1277 if (peer->flags & FLAG_PREEMPT) { 1278 unpeer(peer); 1279 return; 1280 } 1281 #ifdef AUTOKEY 1282 if (peer->crypto) 1283 peer_clear(peer, "AUTH"); 1284 #endif /* AUTOKEY */ 1285 return; 1286 1287 /* 1288 * If the digest fails, the client cannot authenticate a server 1289 * reply to a client packet previously sent. The loopback check 1290 * is designed to avoid a bait-and-switch attack, which was 1291 * possible in past versions. If symmetric modes, return a 1292 * crypto-NAK. The peer should restart the protocol. 1293 */ 1294 } else if (!AUTH(has_mac || (restrict_mask & RES_DONTTRUST), 1295 is_authentic)) { 1296 report_event(PEVNT_AUTH, peer, "digest"); 1297 peer->flash |= TEST5; /* bad auth */ 1298 peer->badauth++; 1299 if (hismode == MODE_ACTIVE || hismode == MODE_PASSIVE) 1300 fast_xmit(rbufp, MODE_ACTIVE, 0, restrict_mask); 1301 if (peer->flags & FLAG_PREEMPT) { 1302 unpeer(peer); 1303 return; 1304 } 1305 #ifdef AUTOKEY 1306 if (peer->crypto) 1307 peer_clear(peer, "AUTH"); 1308 #endif /* AUTOKEY */ 1309 return; 1310 } 1311 1312 /* 1313 * Set the peer ppoll to the maximum of the packet ppoll and the 1314 * peer minpoll. If a kiss-o'-death, set the peer minpoll to 1315 * this maximum and advance the headway to give the sender some 1316 * headroom. Very intricate. 1317 */ 1318 peer->ppoll = max(peer->minpoll, pkt->ppoll); 1319 if (hismode == MODE_SERVER && hisleap == LEAP_NOTINSYNC && 1320 hisstratum == STRATUM_UNSPEC && memcmp(&pkt->refid, 1321 "RATE", 4) == 0) { 1322 peer->selbroken++; 1323 report_event(PEVNT_RATE, peer, NULL); 1324 if (pkt->ppoll > peer->minpoll) 1325 peer->minpoll = peer->ppoll; 1326 peer->burst = peer->retry = 0; 1327 peer->throttle = (NTP_SHIFT + 1) * (1 << peer->minpoll); 1328 poll_update(peer, pkt->ppoll); 1329 return; /* kiss-o'-death */ 1330 } 1331 1332 /* 1333 * That was hard and I am sweaty, but the packet is squeaky 1334 * clean. Get on with real work. 1335 */ 1336 peer->timereceived = current_time; 1337 if (is_authentic == AUTH_OK) 1338 peer->flags |= FLAG_AUTHENTIC; 1339 else 1340 peer->flags &= ~FLAG_AUTHENTIC; 1341 1342 #ifdef AUTOKEY 1343 /* 1344 * More autokey dance. The rules of the cha-cha are as follows: 1345 * 1346 * 1. If there is no key or the key is not auto, do nothing. 1347 * 1348 * 2. If this packet is in response to the one just previously 1349 * sent or from a broadcast server, do the extension fields. 1350 * Otherwise, assume bogosity and bail out. 1351 * 1352 * 3. If an extension field contains a verified signature, it is 1353 * self-authenticated and we sit the dance. 1354 * 1355 * 4. If this is a server reply, check only to see that the 1356 * transmitted key ID matches the received key ID. 1357 * 1358 * 5. Check to see that one or more hashes of the current key ID 1359 * matches the previous key ID or ultimate original key ID 1360 * obtained from the broadcaster or symmetric peer. If no 1361 * match, sit the dance and call for new autokey values. 1362 * 1363 * In case of crypto error, fire the orchestra, stop dancing and 1364 * restart the protocol. 1365 */ 1366 if (peer->flags & FLAG_SKEY) { 1367 /* 1368 * Decrement remaining autokey hashes. This isn't 1369 * perfect if a packet is lost, but results in no harm. 1370 */ 1371 ap = (struct autokey *)peer->recval.ptr; 1372 if (ap != NULL) { 1373 if (ap->seq > 0) 1374 ap->seq--; 1375 } 1376 peer->flash |= TEST8; 1377 rval = crypto_recv(peer, rbufp); 1378 if (rval == XEVNT_OK) { 1379 peer->unreach = 0; 1380 } else { 1381 if (rval == XEVNT_ERR) { 1382 report_event(PEVNT_RESTART, peer, 1383 "crypto error"); 1384 peer_clear(peer, "CRYP"); 1385 peer->flash |= TEST9; /* bad crypt */ 1386 if (peer->flags & FLAG_PREEMPT) 1387 unpeer(peer); 1388 } 1389 return; 1390 } 1391 1392 /* 1393 * If server mode, verify the receive key ID matches 1394 * the transmit key ID. 1395 */ 1396 if (hismode == MODE_SERVER) { 1397 if (skeyid == peer->keyid) 1398 peer->flash &= ~TEST8; 1399 1400 /* 1401 * If an extension field is present, verify only that it 1402 * has been correctly signed. We don't need a sequence 1403 * check here, but the sequence continues. 1404 */ 1405 } else if (!(peer->flash & TEST8)) { 1406 peer->pkeyid = skeyid; 1407 1408 /* 1409 * Now the fun part. Here, skeyid is the current ID in 1410 * the packet, pkeyid is the ID in the last packet and 1411 * tkeyid is the hash of skeyid. If the autokey values 1412 * have not been received, this is an automatic error. 1413 * If so, check that the tkeyid matches pkeyid. If not, 1414 * hash tkeyid and try again. If the number of hashes 1415 * exceeds the number remaining in the sequence, declare 1416 * a successful failure and refresh the autokey values. 1417 */ 1418 } else if (ap != NULL) { 1419 int i; 1420 1421 for (i = 0; ; i++) { 1422 if (tkeyid == peer->pkeyid || 1423 tkeyid == ap->key) { 1424 peer->flash &= ~TEST8; 1425 peer->pkeyid = skeyid; 1426 ap->seq -= i; 1427 break; 1428 } 1429 if (i > ap->seq) { 1430 peer->crypto &= 1431 ~CRYPTO_FLAG_AUTO; 1432 break; 1433 } 1434 tkeyid = session_key( 1435 &rbufp->recv_srcadr, dstadr_sin, 1436 tkeyid, pkeyid, 0); 1437 } 1438 if (peer->flash & TEST8) 1439 report_event(PEVNT_AUTH, peer, "keylist"); 1440 } 1441 if (!(peer->crypto & CRYPTO_FLAG_PROV)) /* test 9 */ 1442 peer->flash |= TEST8; /* bad autokey */ 1443 1444 /* 1445 * The maximum lifetime of the protocol is about one 1446 * week before restarting the Autokey protocol to 1447 * refresh certificates and leapseconds values. 1448 */ 1449 if (current_time > peer->refresh) { 1450 report_event(PEVNT_RESTART, peer, 1451 "crypto refresh"); 1452 peer_clear(peer, "TIME"); 1453 return; 1454 } 1455 } 1456 #endif /* AUTOKEY */ 1457 1458 /* 1459 * The dance is complete and the flash bits have been lit. Toss 1460 * the packet over the fence for processing, which may light up 1461 * more flashers. 1462 */ 1463 process_packet(peer, pkt, rbufp->recv_length); 1464 1465 /* 1466 * In interleaved mode update the state variables. Also adjust the 1467 * transmit phase to avoid crossover. 1468 */ 1469 if (peer->flip != 0) { 1470 peer->rec = p_rec; 1471 peer->dst = rbufp->recv_time; 1472 if (peer->nextdate - current_time < (1U << min(peer->ppoll, 1473 peer->hpoll)) / 2) 1474 peer->nextdate++; 1475 else 1476 peer->nextdate--; 1477 } 1478 } 1479 1480 1481 /* 1482 * process_packet - Packet Procedure, a la Section 3.4.4 of the 1483 * specification. Or almost, at least. If we're in here we have a 1484 * reasonable expectation that we will be having a long term 1485 * relationship with this host. 1486 */ 1487 void 1488 process_packet( 1489 register struct peer *peer, 1490 register struct pkt *pkt, 1491 u_int len 1492 ) 1493 { 1494 double t34, t21; 1495 double p_offset, p_del, p_disp; 1496 l_fp p_rec, p_xmt, p_org, p_reftime, ci; 1497 u_char pmode, pleap, pversion, pstratum; 1498 char statstr[NTP_MAXSTRLEN]; 1499 #ifdef ASSYM 1500 int itemp; 1501 double etemp, ftemp, td; 1502 #endif /* ASSYM */ 1503 1504 sys_processed++; 1505 peer->processed++; 1506 p_del = FPTOD(NTOHS_FP(pkt->rootdelay)); 1507 p_offset = 0; 1508 p_disp = FPTOD(NTOHS_FP(pkt->rootdisp)); 1509 NTOHL_FP(&pkt->reftime, &p_reftime); 1510 NTOHL_FP(&pkt->org, &p_org); 1511 NTOHL_FP(&pkt->rec, &p_rec); 1512 NTOHL_FP(&pkt->xmt, &p_xmt); 1513 pmode = PKT_MODE(pkt->li_vn_mode); 1514 pleap = PKT_LEAP(pkt->li_vn_mode); 1515 pversion = PKT_VERSION(pkt->li_vn_mode); 1516 pstratum = PKT_TO_STRATUM(pkt->stratum); 1517 1518 /* 1519 * Capture the header values in the client/peer association.. 1520 */ 1521 record_raw_stats(&peer->srcadr, peer->dstadr ? 1522 &peer->dstadr->sin : NULL, 1523 &p_org, &p_rec, &p_xmt, &peer->dst, 1524 pleap, pversion, pmode, pstratum, pkt->ppoll, pkt->precision, 1525 p_del, p_disp, pkt->refid); 1526 peer->leap = pleap; 1527 peer->stratum = min(pstratum, STRATUM_UNSPEC); 1528 peer->pmode = pmode; 1529 peer->precision = pkt->precision; 1530 peer->rootdelay = p_del; 1531 peer->rootdisp = p_disp; 1532 peer->refid = pkt->refid; /* network byte order */ 1533 peer->reftime = p_reftime; 1534 1535 /* 1536 * First, if either burst mode is armed, enable the burst. 1537 * Compute the headway for the next packet and delay if 1538 * necessary to avoid exceeding the threshold. 1539 */ 1540 if (peer->retry > 0) { 1541 peer->retry = 0; 1542 if (peer->reach) 1543 peer->burst = min(1 << (peer->hpoll - 1544 peer->minpoll), NTP_SHIFT) - 1; 1545 else 1546 peer->burst = NTP_IBURST - 1; 1547 if (peer->burst > 0) 1548 peer->nextdate = current_time; 1549 } 1550 poll_update(peer, peer->hpoll); 1551 1552 /* 1553 * Verify the server is synchronized; that is, the leap bits, 1554 * stratum and root distance are valid. 1555 */ 1556 if (pleap == LEAP_NOTINSYNC || /* test 6 */ 1557 pstratum < sys_floor || pstratum >= sys_ceiling) 1558 peer->flash |= TEST6; /* bad synch or strat */ 1559 if (p_del / 2 + p_disp >= MAXDISPERSE) /* test 7 */ 1560 peer->flash |= TEST7; /* bad header */ 1561 1562 /* 1563 * If any tests fail at this point, the packet is discarded. 1564 * Note that some flashers may have already been set in the 1565 * receive() routine. 1566 */ 1567 if (peer->flash & PKT_TEST_MASK) { 1568 peer->seldisptoolarge++; 1569 #ifdef DEBUG 1570 if (debug) 1571 printf("packet: flash header %04x\n", 1572 peer->flash); 1573 #endif 1574 return; 1575 } 1576 1577 /* 1578 * If the peer was previously unreachable, raise a trap. In any 1579 * case, mark it reachable. 1580 */ 1581 if (!peer->reach) { 1582 report_event(PEVNT_REACH, peer, NULL); 1583 peer->timereachable = current_time; 1584 } 1585 peer->reach |= 1; 1586 1587 /* 1588 * For a client/server association, calculate the clock offset, 1589 * roundtrip delay and dispersion. The equations are reordered 1590 * from the spec for more efficient use of temporaries. For a 1591 * broadcast association, offset the last measurement by the 1592 * computed delay during the client/server volley. Note the 1593 * computation of dispersion includes the system precision plus 1594 * that due to the frequency error since the origin time. 1595 * 1596 * It is very important to respect the hazards of overflow. The 1597 * only permitted operation on raw timestamps is subtraction, 1598 * where the result is a signed quantity spanning from 68 years 1599 * in the past to 68 years in the future. To avoid loss of 1600 * precision, these calculations are done using 64-bit integer 1601 * arithmetic. However, the offset and delay calculations are 1602 * sums and differences of these first-order differences, which 1603 * if done using 64-bit integer arithmetic, would be valid over 1604 * only half that span. Since the typical first-order 1605 * differences are usually very small, they are converted to 64- 1606 * bit doubles and all remaining calculations done in floating- 1607 * double arithmetic. This preserves the accuracy while 1608 * retaining the 68-year span. 1609 * 1610 * There are three interleaving schemes, basic, interleaved 1611 * symmetric and interleaved broadcast. The timestamps are 1612 * idioscyncratically different. See the onwire briefing/white 1613 * paper at www.eecis.udel.edu/~mills for details. 1614 * 1615 * Interleaved symmetric mode 1616 * t1 = peer->aorg/borg, t2 = peer->rec, t3 = p_xmt, 1617 * t4 = peer->dst 1618 */ 1619 if (peer->flip != 0) { 1620 ci = p_xmt; /* t3 - t4 */ 1621 L_SUB(&ci, &peer->dst); 1622 LFPTOD(&ci, t34); 1623 ci = p_rec; /* t2 - t1 */ 1624 if (peer->flip > 0) 1625 L_SUB(&ci, &peer->borg); 1626 else 1627 L_SUB(&ci, &peer->aorg); 1628 LFPTOD(&ci, t21); 1629 p_del = t21 - t34; 1630 p_offset = (t21 + t34) / 2.; 1631 if (p_del < 0 || p_del > 1.) { 1632 snprintf(statstr, sizeof(statstr), 1633 "t21 %.6f t34 %.6f", t21, t34); 1634 report_event(PEVNT_XERR, peer, statstr); 1635 return; 1636 } 1637 1638 /* 1639 * Broadcast modes 1640 */ 1641 } else if (peer->pmode == MODE_BROADCAST) { 1642 1643 /* 1644 * Interleaved broadcast mode. Use interleaved timestamps. 1645 * t1 = peer->borg, t2 = p_org, t3 = p_org, t4 = aorg 1646 */ 1647 if (peer->flags & FLAG_XB) { 1648 ci = p_org; /* delay */ 1649 L_SUB(&ci, &peer->aorg); 1650 LFPTOD(&ci, t34); 1651 ci = p_org; /* t2 - t1 */ 1652 L_SUB(&ci, &peer->borg); 1653 LFPTOD(&ci, t21); 1654 peer->aorg = p_xmt; 1655 peer->borg = peer->dst; 1656 if (t34 < 0 || t34 > 1.) { 1657 snprintf(statstr, sizeof(statstr), 1658 "offset %.6f delay %.6f", t21, t34); 1659 report_event(PEVNT_XERR, peer, statstr); 1660 return; 1661 } 1662 p_offset = t21; 1663 peer->xleave = t34; 1664 1665 /* 1666 * Basic broadcast - use direct timestamps. 1667 * t3 = p_xmt, t4 = peer->dst 1668 */ 1669 } else { 1670 ci = p_xmt; /* t3 - t4 */ 1671 L_SUB(&ci, &peer->dst); 1672 LFPTOD(&ci, t34); 1673 p_offset = t34; 1674 } 1675 1676 /* 1677 * When calibration is complete and the clock is 1678 * synchronized, the bias is calculated as the difference 1679 * between the unicast timestamp and the broadcast 1680 * timestamp. This works for both basic and interleaved 1681 * modes. 1682 */ 1683 if (FLAG_BC_VOL & peer->flags) { 1684 peer->flags &= ~FLAG_BC_VOL; 1685 peer->delay = fabs(peer->offset - p_offset) * 2; 1686 } 1687 p_del = peer->delay; 1688 p_offset += p_del / 2; 1689 1690 1691 /* 1692 * Basic mode, otherwise known as the old fashioned way. 1693 * 1694 * t1 = p_org, t2 = p_rec, t3 = p_xmt, t4 = peer->dst 1695 */ 1696 } else { 1697 ci = p_xmt; /* t3 - t4 */ 1698 L_SUB(&ci, &peer->dst); 1699 LFPTOD(&ci, t34); 1700 ci = p_rec; /* t2 - t1 */ 1701 L_SUB(&ci, &p_org); 1702 LFPTOD(&ci, t21); 1703 p_del = fabs(t21 - t34); 1704 p_offset = (t21 + t34) / 2.; 1705 } 1706 p_del = max(p_del, LOGTOD(sys_precision)); 1707 p_disp = LOGTOD(sys_precision) + LOGTOD(peer->precision) + 1708 clock_phi * p_del; 1709 1710 #if ASSYM 1711 /* 1712 * This code calculates the outbound and inbound data rates by 1713 * measuring the differences between timestamps at different 1714 * packet lengths. This is helpful in cases of large asymmetric 1715 * delays commonly experienced on deep space communication 1716 * links. 1717 */ 1718 if (peer->t21_last > 0 && peer->t34_bytes > 0) { 1719 itemp = peer->t21_bytes - peer->t21_last; 1720 if (itemp > 25) { 1721 etemp = t21 - peer->t21; 1722 if (fabs(etemp) > 1e-6) { 1723 ftemp = itemp / etemp; 1724 if (ftemp > 1000.) 1725 peer->r21 = ftemp; 1726 } 1727 } 1728 itemp = len - peer->t34_bytes; 1729 if (itemp > 25) { 1730 etemp = -t34 - peer->t34; 1731 if (fabs(etemp) > 1e-6) { 1732 ftemp = itemp / etemp; 1733 if (ftemp > 1000.) 1734 peer->r34 = ftemp; 1735 } 1736 } 1737 } 1738 1739 /* 1740 * The following section compensates for different data rates on 1741 * the outbound (d21) and inbound (t34) directions. To do this, 1742 * it finds t such that r21 * t - r34 * (d - t) = 0, where d is 1743 * the roundtrip delay. Then it calculates the correction as a 1744 * fraction of d. 1745 */ 1746 peer->t21 = t21; 1747 peer->t21_last = peer->t21_bytes; 1748 peer->t34 = -t34; 1749 peer->t34_bytes = len; 1750 #ifdef DEBUG 1751 if (debug > 1) 1752 printf("packet: t21 %.9lf %d t34 %.9lf %d\n", peer->t21, 1753 peer->t21_bytes, peer->t34, peer->t34_bytes); 1754 #endif 1755 if (peer->r21 > 0 && peer->r34 > 0 && p_del > 0) { 1756 if (peer->pmode != MODE_BROADCAST) 1757 td = (peer->r34 / (peer->r21 + peer->r34) - 1758 .5) * p_del; 1759 else 1760 td = 0; 1761 1762 /* 1763 * Unfortunately, in many cases the errors are 1764 * unacceptable, so for the present the rates are not 1765 * used. In future, we might find conditions where the 1766 * calculations are useful, so this should be considered 1767 * a work in progress. 1768 */ 1769 t21 -= td; 1770 t34 -= td; 1771 #ifdef DEBUG 1772 if (debug > 1) 1773 printf("packet: del %.6lf r21 %.1lf r34 %.1lf %.6lf\n", 1774 p_del, peer->r21 / 1e3, peer->r34 / 1e3, 1775 td); 1776 #endif 1777 } 1778 #endif /* ASSYM */ 1779 1780 /* 1781 * That was awesome. Now hand off to the clock filter. 1782 */ 1783 clock_filter(peer, p_offset + peer->bias, p_del, p_disp); 1784 1785 /* 1786 * If we are in broadcast calibrate mode, return to broadcast 1787 * client mode when the client is fit and the autokey dance is 1788 * complete. 1789 */ 1790 if ((FLAG_BC_VOL & peer->flags) && MODE_CLIENT == peer->hmode && 1791 !(TEST11 & peer_unfit(peer))) { /* distance exceeded */ 1792 #ifdef AUTOKEY 1793 if (peer->flags & FLAG_SKEY) { 1794 if (!(~peer->crypto & CRYPTO_FLAG_ALL)) 1795 peer->hmode = MODE_BCLIENT; 1796 } else { 1797 peer->hmode = MODE_BCLIENT; 1798 } 1799 #else /* !AUTOKEY follows */ 1800 peer->hmode = MODE_BCLIENT; 1801 #endif /* !AUTOKEY */ 1802 } 1803 } 1804 1805 1806 /* 1807 * clock_update - Called at system process update intervals. 1808 */ 1809 static void 1810 clock_update( 1811 struct peer *peer /* peer structure pointer */ 1812 ) 1813 { 1814 double dtemp; 1815 l_fp now; 1816 #ifdef HAVE_LIBSCF_H 1817 char *fmri; 1818 #endif /* HAVE_LIBSCF_H */ 1819 1820 /* 1821 * Update the system state variables. We do this very carefully, 1822 * as the poll interval might need to be clamped differently. 1823 */ 1824 sys_peer = peer; 1825 sys_epoch = peer->epoch; 1826 if (sys_poll < peer->minpoll) 1827 sys_poll = peer->minpoll; 1828 if (sys_poll > peer->maxpoll) 1829 sys_poll = peer->maxpoll; 1830 poll_update(peer, sys_poll); 1831 sys_stratum = min(peer->stratum + 1, STRATUM_UNSPEC); 1832 if (peer->stratum == STRATUM_REFCLOCK || 1833 peer->stratum == STRATUM_UNSPEC) 1834 sys_refid = peer->refid; 1835 else 1836 sys_refid = addr2refid(&peer->srcadr); 1837 /* 1838 * Root Dispersion (E) is defined (in RFC 5905) as: 1839 * 1840 * E = p.epsilon_r + p.epsilon + p.psi + PHI*(s.t - p.t) + |THETA| 1841 * 1842 * where: 1843 * p.epsilon_r is the PollProc's root dispersion 1844 * p.epsilon is the PollProc's dispersion 1845 * p.psi is the PollProc's jitter 1846 * THETA is the combined offset 1847 * 1848 * NB: Think Hard about where these numbers come from and 1849 * what they mean. When did peer->update happen? Has anything 1850 * interesting happened since then? What values are the most 1851 * defensible? Why? 1852 * 1853 * DLM thinks this equation is probably the best of all worse choices. 1854 */ 1855 dtemp = peer->rootdisp 1856 + peer->disp 1857 + sys_jitter 1858 + clock_phi * (current_time - peer->update) 1859 + fabs(sys_offset); 1860 1861 if (dtemp > sys_mindisp) 1862 sys_rootdisp = dtemp; 1863 else 1864 sys_rootdisp = sys_mindisp; 1865 sys_rootdelay = peer->delay + peer->rootdelay; 1866 sys_reftime = peer->dst; 1867 1868 #ifdef DEBUG 1869 if (debug) 1870 printf( 1871 "clock_update: at %lu sample %lu associd %d\n", 1872 current_time, peer->epoch, peer->associd); 1873 #endif 1874 1875 /* 1876 * Comes now the moment of truth. Crank the clock discipline and 1877 * see what comes out. 1878 */ 1879 switch (local_clock(peer, sys_offset)) { 1880 1881 /* 1882 * Clock exceeds panic threshold. Life as we know it ends. 1883 */ 1884 case -1: 1885 #ifdef HAVE_LIBSCF_H 1886 /* 1887 * For Solaris enter the maintenance mode. 1888 */ 1889 if ((fmri = getenv("SMF_FMRI")) != NULL) { 1890 if (smf_maintain_instance(fmri, 0) < 0) { 1891 printf("smf_maintain_instance: %s\n", 1892 scf_strerror(scf_error())); 1893 exit(1); 1894 } 1895 /* 1896 * Sleep until SMF kills us. 1897 */ 1898 for (;;) 1899 pause(); 1900 } 1901 #endif /* HAVE_LIBSCF_H */ 1902 exit (-1); 1903 /* not reached */ 1904 1905 /* 1906 * Clock was stepped. Flush all time values of all peers. 1907 */ 1908 case 2: 1909 clear_all(); 1910 sys_leap = LEAP_NOTINSYNC; 1911 sys_stratum = STRATUM_UNSPEC; 1912 memcpy(&sys_refid, "STEP", 4); 1913 sys_rootdelay = 0; 1914 sys_rootdisp = 0; 1915 L_CLR(&sys_reftime); 1916 sys_jitter = LOGTOD(sys_precision); 1917 leapsec_reset_frame(); 1918 break; 1919 1920 /* 1921 * Clock was slewed. Handle the leapsecond stuff. 1922 */ 1923 case 1: 1924 1925 /* 1926 * If this is the first time the clock is set, reset the 1927 * leap bits. If crypto, the timer will goose the setup 1928 * process. 1929 */ 1930 if (sys_leap == LEAP_NOTINSYNC) { 1931 sys_leap = LEAP_NOWARNING; 1932 #ifdef AUTOKEY 1933 if (crypto_flags) 1934 crypto_update(); 1935 #endif /* AUTOKEY */ 1936 /* 1937 * If our parent process is waiting for the 1938 * first clock sync, send them home satisfied. 1939 */ 1940 #ifdef HAVE_WORKING_FORK 1941 if (waitsync_fd_to_close != -1) { 1942 close(waitsync_fd_to_close); 1943 waitsync_fd_to_close = -1; 1944 DPRINTF(1, ("notified parent --wait-sync is done\n")); 1945 } 1946 #endif /* HAVE_WORKING_FORK */ 1947 1948 } 1949 1950 /* 1951 * If there is no leap second pending and the number of 1952 * survivor leap bits is greater than half the number of 1953 * survivors, try to schedule a leap for the end of the 1954 * current month. (This only works if no leap second for 1955 * that range is in the table, so doing this more than 1956 * once is mostly harmless.) 1957 */ 1958 if (leapsec == LSPROX_NOWARN) { 1959 if (leap_vote_ins > leap_vote_del 1960 && leap_vote_ins > sys_survivors / 2) { 1961 get_systime(&now); 1962 leapsec_add_dyn(TRUE, now.l_ui, NULL); 1963 } 1964 if (leap_vote_del > leap_vote_ins 1965 && leap_vote_del > sys_survivors / 2) { 1966 get_systime(&now); 1967 leapsec_add_dyn(FALSE, now.l_ui, NULL); 1968 } 1969 } 1970 break; 1971 1972 /* 1973 * Popcorn spike or step threshold exceeded. Pretend it never 1974 * happened. 1975 */ 1976 default: 1977 break; 1978 } 1979 } 1980 1981 1982 /* 1983 * poll_update - update peer poll interval 1984 */ 1985 void 1986 poll_update( 1987 struct peer *peer, /* peer structure pointer */ 1988 u_char mpoll 1989 ) 1990 { 1991 u_long next, utemp; 1992 u_char hpoll; 1993 1994 /* 1995 * This routine figures out when the next poll should be sent. 1996 * That turns out to be wickedly complicated. One problem is 1997 * that sometimes the time for the next poll is in the past when 1998 * the poll interval is reduced. We watch out for races here 1999 * between the receive process and the poll process. 2000 * 2001 * Clamp the poll interval between minpoll and maxpoll. 2002 */ 2003 hpoll = max(min(peer->maxpoll, mpoll), peer->minpoll); 2004 2005 #ifdef AUTOKEY 2006 /* 2007 * If during the crypto protocol the poll interval has changed, 2008 * the lifetimes in the key list are probably bogus. Purge the 2009 * the key list and regenerate it later. 2010 */ 2011 if ((peer->flags & FLAG_SKEY) && hpoll != peer->hpoll) 2012 key_expire(peer); 2013 #endif /* AUTOKEY */ 2014 peer->hpoll = hpoll; 2015 2016 /* 2017 * There are three variables important for poll scheduling, the 2018 * current time (current_time), next scheduled time (nextdate) 2019 * and the earliest time (utemp). The earliest time is 2 s 2020 * seconds, but could be more due to rate management. When 2021 * sending in a burst, use the earliest time. When not in a 2022 * burst but with a reply pending, send at the earliest time 2023 * unless the next scheduled time has not advanced. This can 2024 * only happen if multiple replies are pending in the same 2025 * response interval. Otherwise, send at the later of the next 2026 * scheduled time and the earliest time. 2027 * 2028 * Now we figure out if there is an override. If a burst is in 2029 * progress and we get called from the receive process, just 2030 * slink away. If called from the poll process, delay 1 s for a 2031 * reference clock, otherwise 2 s. 2032 */ 2033 utemp = current_time + max(peer->throttle - (NTP_SHIFT - 1) * 2034 (1 << peer->minpoll), ntp_minpkt); 2035 if (peer->burst > 0) { 2036 if (peer->nextdate > current_time) 2037 return; 2038 #ifdef REFCLOCK 2039 else if (peer->flags & FLAG_REFCLOCK) 2040 peer->nextdate = current_time + RESP_DELAY; 2041 #endif /* REFCLOCK */ 2042 else 2043 peer->nextdate = utemp; 2044 2045 #ifdef AUTOKEY 2046 /* 2047 * If a burst is not in progress and a crypto response message 2048 * is pending, delay 2 s, but only if this is a new interval. 2049 */ 2050 } else if (peer->cmmd != NULL) { 2051 if (peer->nextdate > current_time) { 2052 if (peer->nextdate + ntp_minpkt != utemp) 2053 peer->nextdate = utemp; 2054 } else { 2055 peer->nextdate = utemp; 2056 } 2057 #endif /* AUTOKEY */ 2058 2059 /* 2060 * The ordinary case. If a retry, use minpoll; if unreachable, 2061 * use host poll; otherwise, use the minimum of host and peer 2062 * polls; In other words, oversampling is okay but 2063 * understampling is evil. Use the maximum of this value and the 2064 * headway. If the average headway is greater than the headway 2065 * threshold, increase the headway by the minimum interval. 2066 */ 2067 } else { 2068 if (peer->retry > 0) 2069 hpoll = peer->minpoll; 2070 else if (!(peer->reach)) 2071 hpoll = peer->hpoll; 2072 else 2073 hpoll = min(peer->ppoll, peer->hpoll); 2074 #ifdef REFCLOCK 2075 if (peer->flags & FLAG_REFCLOCK) 2076 next = 1 << hpoll; 2077 else 2078 #endif /* REFCLOCK */ 2079 next = ((0x1000UL | (ntp_random() & 0x0ff)) << 2080 hpoll) >> 12; 2081 next += peer->outdate; 2082 if (next > utemp) 2083 peer->nextdate = next; 2084 else 2085 peer->nextdate = utemp; 2086 if (peer->throttle > (1 << peer->minpoll)) 2087 peer->nextdate += ntp_minpkt; 2088 } 2089 DPRINTF(2, ("poll_update: at %lu %s poll %d burst %d retry %d head %d early %lu next %lu\n", 2090 current_time, ntoa(&peer->srcadr), peer->hpoll, 2091 peer->burst, peer->retry, peer->throttle, 2092 utemp - current_time, peer->nextdate - 2093 current_time)); 2094 } 2095 2096 2097 /* 2098 * peer_clear - clear peer filter registers. See Section 3.4.8 of the 2099 * spec. 2100 */ 2101 void 2102 peer_clear( 2103 struct peer *peer, /* peer structure */ 2104 const char *ident /* tally lights */ 2105 ) 2106 { 2107 u_char u; 2108 2109 #ifdef AUTOKEY 2110 /* 2111 * If cryptographic credentials have been acquired, toss them to 2112 * Valhalla. Note that autokeys are ephemeral, in that they are 2113 * tossed immediately upon use. Therefore, the keylist can be 2114 * purged anytime without needing to preserve random keys. Note 2115 * that, if the peer is purged, the cryptographic variables are 2116 * purged, too. This makes it much harder to sneak in some 2117 * unauthenticated data in the clock filter. 2118 */ 2119 key_expire(peer); 2120 if (peer->iffval != NULL) 2121 BN_free(peer->iffval); 2122 value_free(&peer->cookval); 2123 value_free(&peer->recval); 2124 value_free(&peer->encrypt); 2125 value_free(&peer->sndval); 2126 if (peer->cmmd != NULL) 2127 free(peer->cmmd); 2128 if (peer->subject != NULL) 2129 free(peer->subject); 2130 if (peer->issuer != NULL) 2131 free(peer->issuer); 2132 #endif /* AUTOKEY */ 2133 2134 /* 2135 * Clear all values, including the optional crypto values above. 2136 */ 2137 memset(CLEAR_TO_ZERO(peer), 0, LEN_CLEAR_TO_ZERO(peer)); 2138 peer->ppoll = peer->maxpoll; 2139 peer->hpoll = peer->minpoll; 2140 peer->disp = MAXDISPERSE; 2141 peer->flash = peer_unfit(peer); 2142 peer->jitter = LOGTOD(sys_precision); 2143 2144 /* 2145 * If interleave mode, initialize the alternate origin switch. 2146 */ 2147 if (peer->flags & FLAG_XLEAVE) 2148 peer->flip = 1; 2149 for (u = 0; u < NTP_SHIFT; u++) { 2150 peer->filter_order[u] = u; 2151 peer->filter_disp[u] = MAXDISPERSE; 2152 } 2153 #ifdef REFCLOCK 2154 if (!(peer->flags & FLAG_REFCLOCK)) { 2155 #endif 2156 peer->leap = LEAP_NOTINSYNC; 2157 peer->stratum = STRATUM_UNSPEC; 2158 memcpy(&peer->refid, ident, 4); 2159 #ifdef REFCLOCK 2160 } 2161 #endif 2162 2163 /* 2164 * During initialization use the association count to spread out 2165 * the polls at one-second intervals. Passive associations' 2166 * first poll is delayed by the "discard minimum" to avoid rate 2167 * limiting. Other post-startup new or cleared associations 2168 * randomize the first poll over the minimum poll interval to 2169 * avoid implosion. 2170 */ 2171 peer->nextdate = peer->update = peer->outdate = current_time; 2172 if (initializing) { 2173 peer->nextdate += peer_associations; 2174 } else if (MODE_PASSIVE == peer->hmode) { 2175 peer->nextdate += ntp_minpkt; 2176 } else { 2177 peer->nextdate += ntp_random() % peer->minpoll; 2178 } 2179 #ifdef AUTOKEY 2180 peer->refresh = current_time + (1 << NTP_REFRESH); 2181 #endif /* AUTOKEY */ 2182 #ifdef DEBUG 2183 if (debug) 2184 printf( 2185 "peer_clear: at %ld next %ld associd %d refid %s\n", 2186 current_time, peer->nextdate, peer->associd, 2187 ident); 2188 #endif 2189 } 2190 2191 2192 /* 2193 * clock_filter - add incoming clock sample to filter register and run 2194 * the filter procedure to find the best sample. 2195 */ 2196 void 2197 clock_filter( 2198 struct peer *peer, /* peer structure pointer */ 2199 double sample_offset, /* clock offset */ 2200 double sample_delay, /* roundtrip delay */ 2201 double sample_disp /* dispersion */ 2202 ) 2203 { 2204 double dst[NTP_SHIFT]; /* distance vector */ 2205 int ord[NTP_SHIFT]; /* index vector */ 2206 int i, j, k, m; 2207 double dtemp, etemp; 2208 char tbuf[80]; 2209 2210 /* 2211 * A sample consists of the offset, delay, dispersion and epoch 2212 * of arrival. The offset and delay are determined by the on- 2213 * wire protocol. The dispersion grows from the last outbound 2214 * packet to the arrival of this one increased by the sum of the 2215 * peer precision and the system precision as required by the 2216 * error budget. First, shift the new arrival into the shift 2217 * register discarding the oldest one. 2218 */ 2219 j = peer->filter_nextpt; 2220 peer->filter_offset[j] = sample_offset; 2221 peer->filter_delay[j] = sample_delay; 2222 peer->filter_disp[j] = sample_disp; 2223 peer->filter_epoch[j] = current_time; 2224 j = (j + 1) % NTP_SHIFT; 2225 peer->filter_nextpt = j; 2226 2227 /* 2228 * Update dispersions since the last update and at the same 2229 * time initialize the distance and index lists. Since samples 2230 * become increasingly uncorrelated beyond the Allan intercept, 2231 * only under exceptional cases will an older sample be used. 2232 * Therefore, the distance list uses a compound metric. If the 2233 * dispersion is greater than the maximum dispersion, clamp the 2234 * distance at that value. If the time since the last update is 2235 * less than the Allan intercept use the delay; otherwise, use 2236 * the sum of the delay and dispersion. 2237 */ 2238 dtemp = clock_phi * (current_time - peer->update); 2239 peer->update = current_time; 2240 for (i = NTP_SHIFT - 1; i >= 0; i--) { 2241 if (i != 0) 2242 peer->filter_disp[j] += dtemp; 2243 if (peer->filter_disp[j] >= MAXDISPERSE) { 2244 peer->filter_disp[j] = MAXDISPERSE; 2245 dst[i] = MAXDISPERSE; 2246 } else if (peer->update - peer->filter_epoch[j] > 2247 (u_long)ULOGTOD(allan_xpt)) { 2248 dst[i] = peer->filter_delay[j] + 2249 peer->filter_disp[j]; 2250 } else { 2251 dst[i] = peer->filter_delay[j]; 2252 } 2253 ord[i] = j; 2254 j = (j + 1) % NTP_SHIFT; 2255 } 2256 2257 /* 2258 * If the clock has stabilized, sort the samples by distance. 2259 */ 2260 if (freq_cnt == 0) { 2261 for (i = 1; i < NTP_SHIFT; i++) { 2262 for (j = 0; j < i; j++) { 2263 if (dst[j] > dst[i]) { 2264 k = ord[j]; 2265 ord[j] = ord[i]; 2266 ord[i] = k; 2267 etemp = dst[j]; 2268 dst[j] = dst[i]; 2269 dst[i] = etemp; 2270 } 2271 } 2272 } 2273 } 2274 2275 /* 2276 * Copy the index list to the association structure so ntpq 2277 * can see it later. Prune the distance list to leave only 2278 * samples less than the maximum dispersion, which disfavors 2279 * uncorrelated samples older than the Allan intercept. To 2280 * further improve the jitter estimate, of the remainder leave 2281 * only samples less than the maximum distance, but keep at 2282 * least two samples for jitter calculation. 2283 */ 2284 m = 0; 2285 for (i = 0; i < NTP_SHIFT; i++) { 2286 peer->filter_order[i] = (u_char) ord[i]; 2287 if (dst[i] >= MAXDISPERSE || (m >= 2 && dst[i] >= 2288 sys_maxdist)) 2289 continue; 2290 m++; 2291 } 2292 2293 /* 2294 * Compute the dispersion and jitter. The dispersion is weighted 2295 * exponentially by NTP_FWEIGHT (0.5) so it is normalized close 2296 * to 1.0. The jitter is the RMS differences relative to the 2297 * lowest delay sample. 2298 */ 2299 peer->disp = peer->jitter = 0; 2300 k = ord[0]; 2301 for (i = NTP_SHIFT - 1; i >= 0; i--) { 2302 j = ord[i]; 2303 peer->disp = NTP_FWEIGHT * (peer->disp + 2304 peer->filter_disp[j]); 2305 if (i < m) 2306 peer->jitter += DIFF(peer->filter_offset[j], 2307 peer->filter_offset[k]); 2308 } 2309 2310 /* 2311 * If no acceptable samples remain in the shift register, 2312 * quietly tiptoe home leaving only the dispersion. Otherwise, 2313 * save the offset, delay and jitter. Note the jitter must not 2314 * be less than the precision. 2315 */ 2316 if (m == 0) { 2317 clock_select(); 2318 return; 2319 } 2320 etemp = fabs(peer->offset - peer->filter_offset[k]); 2321 peer->offset = peer->filter_offset[k]; 2322 peer->delay = peer->filter_delay[k]; 2323 if (m > 1) 2324 peer->jitter /= m - 1; 2325 peer->jitter = max(SQRT(peer->jitter), LOGTOD(sys_precision)); 2326 2327 /* 2328 * If the the new sample and the current sample are both valid 2329 * and the difference between their offsets exceeds CLOCK_SGATE 2330 * (3) times the jitter and the interval between them is less 2331 * than twice the host poll interval, consider the new sample 2332 * a popcorn spike and ignore it. 2333 */ 2334 if (peer->disp < sys_maxdist && peer->filter_disp[k] < 2335 sys_maxdist && etemp > CLOCK_SGATE * peer->jitter && 2336 peer->filter_epoch[k] - peer->epoch < 2. * 2337 ULOGTOD(peer->hpoll)) { 2338 snprintf(tbuf, sizeof(tbuf), "%.6f s", etemp); 2339 report_event(PEVNT_POPCORN, peer, tbuf); 2340 return; 2341 } 2342 2343 /* 2344 * A new minimum sample is useful only if it is later than the 2345 * last one used. In this design the maximum lifetime of any 2346 * sample is not greater than eight times the poll interval, so 2347 * the maximum interval between minimum samples is eight 2348 * packets. 2349 */ 2350 if (peer->filter_epoch[k] <= peer->epoch) { 2351 #if DEBUG 2352 if (debug > 1) 2353 printf("clock_filter: old sample %lu\n", current_time - 2354 peer->filter_epoch[k]); 2355 #endif 2356 return; 2357 } 2358 peer->epoch = peer->filter_epoch[k]; 2359 2360 /* 2361 * The mitigated sample statistics are saved for later 2362 * processing. If not synchronized or not in a burst, tickle the 2363 * clock select algorithm. 2364 */ 2365 record_peer_stats(&peer->srcadr, ctlpeerstatus(peer), 2366 peer->offset, peer->delay, peer->disp, peer->jitter); 2367 #ifdef DEBUG 2368 if (debug) 2369 printf( 2370 "clock_filter: n %d off %.6f del %.6f dsp %.6f jit %.6f\n", 2371 m, peer->offset, peer->delay, peer->disp, 2372 peer->jitter); 2373 #endif 2374 if (peer->burst == 0 || sys_leap == LEAP_NOTINSYNC) 2375 clock_select(); 2376 } 2377 2378 2379 /* 2380 * clock_select - find the pick-of-the-litter clock 2381 * 2382 * LOCKCLOCK: (1) If the local clock is the prefer peer, it will always 2383 * be enabled, even if declared falseticker, (2) only the prefer peer 2384 * can be selected as the system peer, (3) if the external source is 2385 * down, the system leap bits are set to 11 and the stratum set to 2386 * infinity. 2387 */ 2388 void 2389 clock_select(void) 2390 { 2391 struct peer *peer; 2392 int i, j, k, n; 2393 int nlist, nl2; 2394 int allow; 2395 int speer; 2396 double d, e, f, g; 2397 double high, low; 2398 double speermet; 2399 double orphmet = 2.0 * U_INT32_MAX; /* 2x is greater than */ 2400 struct endpoint endp; 2401 struct peer *osys_peer; 2402 struct peer *sys_prefer = NULL; /* prefer peer */ 2403 struct peer *typesystem = NULL; 2404 struct peer *typeorphan = NULL; 2405 #ifdef REFCLOCK 2406 struct peer *typeacts = NULL; 2407 struct peer *typelocal = NULL; 2408 struct peer *typepps = NULL; 2409 #endif /* REFCLOCK */ 2410 static struct endpoint *endpoint = NULL; 2411 static int *indx = NULL; 2412 static peer_select *peers = NULL; 2413 static u_int endpoint_size = 0; 2414 static u_int peers_size = 0; 2415 static u_int indx_size = 0; 2416 size_t octets; 2417 2418 /* 2419 * Initialize and create endpoint, index and peer lists big 2420 * enough to handle all associations. 2421 */ 2422 osys_peer = sys_peer; 2423 sys_survivors = 0; 2424 #ifdef LOCKCLOCK 2425 sys_leap = LEAP_NOTINSYNC; 2426 sys_stratum = STRATUM_UNSPEC; 2427 memcpy(&sys_refid, "DOWN", 4); 2428 #endif /* LOCKCLOCK */ 2429 2430 /* 2431 * Allocate dynamic space depending on the number of 2432 * associations. 2433 */ 2434 nlist = 1; 2435 for (peer = peer_list; peer != NULL; peer = peer->p_link) 2436 nlist++; 2437 endpoint_size = ALIGNED_SIZE(nlist * 2 * sizeof(*endpoint)); 2438 peers_size = ALIGNED_SIZE(nlist * sizeof(*peers)); 2439 indx_size = ALIGNED_SIZE(nlist * 2 * sizeof(*indx)); 2440 octets = endpoint_size + peers_size + indx_size; 2441 endpoint = erealloc(endpoint, octets); 2442 peers = INC_ALIGNED_PTR(endpoint, endpoint_size); 2443 indx = INC_ALIGNED_PTR(peers, peers_size); 2444 2445 /* 2446 * Initially, we populate the island with all the rifraff peers 2447 * that happen to be lying around. Those with seriously 2448 * defective clocks are immediately booted off the island. Then, 2449 * the falsetickers are culled and put to sea. The truechimers 2450 * remaining are subject to repeated rounds where the most 2451 * unpopular at each round is kicked off. When the population 2452 * has dwindled to sys_minclock, the survivors split a million 2453 * bucks and collectively crank the chimes. 2454 */ 2455 nlist = nl2 = 0; /* none yet */ 2456 for (peer = peer_list; peer != NULL; peer = peer->p_link) { 2457 peer->new_status = CTL_PST_SEL_REJECT; 2458 2459 /* 2460 * Leave the island immediately if the peer is 2461 * unfit to synchronize. 2462 */ 2463 if (peer_unfit(peer)) 2464 continue; 2465 2466 /* 2467 * If this peer is an orphan parent, elect the 2468 * one with the lowest metric defined as the 2469 * IPv4 address or the first 64 bits of the 2470 * hashed IPv6 address. To ensure convergence 2471 * on the same selected orphan, consider as 2472 * well that this system may have the lowest 2473 * metric and be the orphan parent. If this 2474 * system wins, sys_peer will be NULL to trigger 2475 * orphan mode in timer(). 2476 */ 2477 if (peer->stratum == sys_orphan) { 2478 u_int32 localmet; 2479 u_int32 peermet; 2480 2481 if (peer->dstadr != NULL) 2482 localmet = ntohl(peer->dstadr->addr_refid); 2483 else 2484 localmet = U_INT32_MAX; 2485 peermet = ntohl(addr2refid(&peer->srcadr)); 2486 if (peermet < localmet && peermet < orphmet) { 2487 typeorphan = peer; 2488 orphmet = peermet; 2489 } 2490 continue; 2491 } 2492 2493 /* 2494 * If this peer could have the orphan parent 2495 * as a synchronization ancestor, exclude it 2496 * from selection to avoid forming a 2497 * synchronization loop within the orphan mesh, 2498 * triggering stratum climb to infinity 2499 * instability. Peers at stratum higher than 2500 * the orphan stratum could have the orphan 2501 * parent in ancestry so are excluded. 2502 * See http://bugs.ntp.org/2050 2503 */ 2504 if (peer->stratum > sys_orphan) 2505 continue; 2506 #ifdef REFCLOCK 2507 /* 2508 * The following are special cases. We deal 2509 * with them later. 2510 */ 2511 if (!(peer->flags & FLAG_PREFER)) { 2512 switch (peer->refclktype) { 2513 case REFCLK_LOCALCLOCK: 2514 if (current_time > orphwait && 2515 typelocal == NULL) 2516 typelocal = peer; 2517 continue; 2518 2519 case REFCLK_ACTS: 2520 if (current_time > orphwait && 2521 typeacts == NULL) 2522 typeacts = peer; 2523 continue; 2524 } 2525 } 2526 #endif /* REFCLOCK */ 2527 2528 /* 2529 * If we get this far, the peer can stay on the 2530 * island, but does not yet have the immunity 2531 * idol. 2532 */ 2533 peer->new_status = CTL_PST_SEL_SANE; 2534 f = root_distance(peer); 2535 peers[nlist].peer = peer; 2536 peers[nlist].error = peer->jitter; 2537 peers[nlist].synch = f; 2538 nlist++; 2539 2540 /* 2541 * Insert each interval endpoint on the unsorted 2542 * endpoint[] list. 2543 */ 2544 e = peer->offset; 2545 endpoint[nl2].type = -1; /* lower end */ 2546 endpoint[nl2].val = e - f; 2547 nl2++; 2548 endpoint[nl2].type = 1; /* upper end */ 2549 endpoint[nl2].val = e + f; 2550 nl2++; 2551 } 2552 /* 2553 * Construct sorted indx[] of endpoint[] indexes ordered by 2554 * offset. 2555 */ 2556 for (i = 0; i < nl2; i++) 2557 indx[i] = i; 2558 for (i = 0; i < nl2; i++) { 2559 endp = endpoint[indx[i]]; 2560 e = endp.val; 2561 k = i; 2562 for (j = i + 1; j < nl2; j++) { 2563 endp = endpoint[indx[j]]; 2564 if (endp.val < e) { 2565 e = endp.val; 2566 k = j; 2567 } 2568 } 2569 if (k != i) { 2570 j = indx[k]; 2571 indx[k] = indx[i]; 2572 indx[i] = j; 2573 } 2574 } 2575 for (i = 0; i < nl2; i++) 2576 DPRINTF(3, ("select: endpoint %2d %.6f\n", 2577 endpoint[indx[i]].type, endpoint[indx[i]].val)); 2578 2579 /* 2580 * This is the actual algorithm that cleaves the truechimers 2581 * from the falsetickers. The original algorithm was described 2582 * in Keith Marzullo's dissertation, but has been modified for 2583 * better accuracy. 2584 * 2585 * Briefly put, we first assume there are no falsetickers, then 2586 * scan the candidate list first from the low end upwards and 2587 * then from the high end downwards. The scans stop when the 2588 * number of intersections equals the number of candidates less 2589 * the number of falsetickers. If this doesn't happen for a 2590 * given number of falsetickers, we bump the number of 2591 * falsetickers and try again. If the number of falsetickers 2592 * becomes equal to or greater than half the number of 2593 * candidates, the Albanians have won the Byzantine wars and 2594 * correct synchronization is not possible. 2595 * 2596 * Here, nlist is the number of candidates and allow is the 2597 * number of falsetickers. Upon exit, the truechimers are the 2598 * survivors with offsets not less than low and not greater than 2599 * high. There may be none of them. 2600 */ 2601 low = 1e9; 2602 high = -1e9; 2603 for (allow = 0; 2 * allow < nlist; allow++) { 2604 2605 /* 2606 * Bound the interval (low, high) as the smallest 2607 * interval containing points from the most sources. 2608 */ 2609 n = 0; 2610 for (i = 0; i < nl2; i++) { 2611 low = endpoint[indx[i]].val; 2612 n -= endpoint[indx[i]].type; 2613 if (n >= nlist - allow) 2614 break; 2615 } 2616 n = 0; 2617 for (j = nl2 - 1; j >= 0; j--) { 2618 high = endpoint[indx[j]].val; 2619 n += endpoint[indx[j]].type; 2620 if (n >= nlist - allow) 2621 break; 2622 } 2623 2624 /* 2625 * If an interval containing truechimers is found, stop. 2626 * If not, increase the number of falsetickers and go 2627 * around again. 2628 */ 2629 if (high > low) 2630 break; 2631 } 2632 2633 /* 2634 * Clustering algorithm. Whittle candidate list of falsetickers, 2635 * who leave the island immediately. The TRUE peer is always a 2636 * truechimer. We must leave at least one peer to collect the 2637 * million bucks. 2638 * 2639 * We assert the correct time is contained in the interval, but 2640 * the best offset estimate for the interval might not be 2641 * contained in the interval. For this purpose, a truechimer is 2642 * defined as the midpoint of an interval that overlaps the 2643 * intersection interval. 2644 */ 2645 j = 0; 2646 for (i = 0; i < nlist; i++) { 2647 double h; 2648 2649 peer = peers[i].peer; 2650 h = peers[i].synch; 2651 if ((high <= low || peer->offset + h < low || 2652 peer->offset - h > high) && !(peer->flags & FLAG_TRUE)) 2653 continue; 2654 2655 #ifdef REFCLOCK 2656 /* 2657 * Eligible PPS peers must survive the intersection 2658 * algorithm. Use the first one found, but don't 2659 * include any of them in the cluster population. 2660 */ 2661 if (peer->flags & FLAG_PPS) { 2662 if (typepps == NULL) 2663 typepps = peer; 2664 continue; 2665 } 2666 #endif /* REFCLOCK */ 2667 2668 if (j != i) 2669 peers[j] = peers[i]; 2670 j++; 2671 } 2672 nlist = j; 2673 2674 /* 2675 * If no survivors remain at this point, check if the modem 2676 * driver, local driver or orphan parent in that order. If so, 2677 * nominate the first one found as the only survivor. 2678 * Otherwise, give up and leave the island to the rats. 2679 */ 2680 if (nlist == 0) { 2681 peers[0].error = 0; 2682 peers[0].synch = sys_mindisp; 2683 #ifdef REFCLOCK 2684 if (typeacts != NULL) { 2685 peers[0].peer = typeacts; 2686 nlist = 1; 2687 } else if (typelocal != NULL) { 2688 peers[0].peer = typelocal; 2689 nlist = 1; 2690 } else 2691 #endif /* REFCLOCK */ 2692 if (typeorphan != NULL) { 2693 peers[0].peer = typeorphan; 2694 nlist = 1; 2695 } 2696 } 2697 2698 /* 2699 * Mark the candidates at this point as truechimers. 2700 */ 2701 for (i = 0; i < nlist; i++) { 2702 peers[i].peer->new_status = CTL_PST_SEL_SELCAND; 2703 DPRINTF(2, ("select: survivor %s %f\n", 2704 stoa(&peers[i].peer->srcadr), peers[i].synch)); 2705 } 2706 2707 /* 2708 * Now, vote outlyers off the island by select jitter weighted 2709 * by root distance. Continue voting as long as there are more 2710 * than sys_minclock survivors and the select jitter of the peer 2711 * with the worst metric is greater than the minimum peer 2712 * jitter. Stop if we are about to discard a TRUE or PREFER 2713 * peer, who of course have the immunity idol. 2714 */ 2715 while (1) { 2716 d = 1e9; 2717 e = -1e9; 2718 g = 0; 2719 k = 0; 2720 for (i = 0; i < nlist; i++) { 2721 if (peers[i].error < d) 2722 d = peers[i].error; 2723 peers[i].seljit = 0; 2724 if (nlist > 1) { 2725 f = 0; 2726 for (j = 0; j < nlist; j++) 2727 f += DIFF(peers[j].peer->offset, 2728 peers[i].peer->offset); 2729 peers[i].seljit = SQRT(f / (nlist - 1)); 2730 } 2731 if (peers[i].seljit * peers[i].synch > e) { 2732 g = peers[i].seljit; 2733 e = peers[i].seljit * peers[i].synch; 2734 k = i; 2735 } 2736 } 2737 g = max(g, LOGTOD(sys_precision)); 2738 if (nlist <= max(1, sys_minclock) || g <= d || 2739 ((FLAG_TRUE | FLAG_PREFER) & peers[k].peer->flags)) 2740 break; 2741 2742 DPRINTF(3, ("select: drop %s seljit %.6f jit %.6f\n", 2743 ntoa(&peers[k].peer->srcadr), g, d)); 2744 if (nlist > sys_maxclock) 2745 peers[k].peer->new_status = CTL_PST_SEL_EXCESS; 2746 for (j = k + 1; j < nlist; j++) 2747 peers[j - 1] = peers[j]; 2748 nlist--; 2749 } 2750 2751 /* 2752 * What remains is a list usually not greater than sys_minclock 2753 * peers. Note that unsynchronized peers cannot survive this 2754 * far. Count and mark these survivors. 2755 * 2756 * While at it, count the number of leap warning bits found. 2757 * This will be used later to vote the system leap warning bit. 2758 * If a leap warning bit is found on a reference clock, the vote 2759 * is always won. 2760 * 2761 * Choose the system peer using a hybrid metric composed of the 2762 * selection jitter scaled by the root distance augmented by 2763 * stratum scaled by sys_mindisp (.001 by default). The goal of 2764 * the small stratum factor is to avoid clockhop between a 2765 * reference clock and a network peer which has a refclock and 2766 * is using an older ntpd, which does not floor sys_rootdisp at 2767 * sys_mindisp. 2768 * 2769 * In contrast, ntpd 4.2.6 and earlier used stratum primarily 2770 * in selecting the system peer, using a weight of 1 second of 2771 * additional root distance per stratum. This heavy bias is no 2772 * longer appropriate, as the scaled root distance provides a 2773 * more rational metric carrying the cumulative error budget. 2774 */ 2775 e = 1e9; 2776 speer = 0; 2777 leap_vote_ins = 0; 2778 leap_vote_del = 0; 2779 for (i = 0; i < nlist; i++) { 2780 peer = peers[i].peer; 2781 peer->unreach = 0; 2782 peer->new_status = CTL_PST_SEL_SYNCCAND; 2783 sys_survivors++; 2784 if (peer->leap == LEAP_ADDSECOND) { 2785 if (peer->flags & FLAG_REFCLOCK) 2786 leap_vote_ins = nlist; 2787 else if (leap_vote_ins < nlist) 2788 leap_vote_ins++; 2789 } 2790 if (peer->leap == LEAP_DELSECOND) { 2791 if (peer->flags & FLAG_REFCLOCK) 2792 leap_vote_del = nlist; 2793 else if (leap_vote_del < nlist) 2794 leap_vote_del++; 2795 } 2796 if (peer->flags & FLAG_PREFER) 2797 sys_prefer = peer; 2798 speermet = peers[i].seljit * peers[i].synch + 2799 peer->stratum * sys_mindisp; 2800 if (speermet < e) { 2801 e = speermet; 2802 speer = i; 2803 } 2804 } 2805 2806 /* 2807 * Unless there are at least sys_misane survivors, leave the 2808 * building dark. Otherwise, do a clockhop dance. Ordinarily, 2809 * use the selected survivor speer. However, if the current 2810 * system peer is not speer, stay with the current system peer 2811 * as long as it doesn't get too old or too ugly. 2812 */ 2813 if (nlist > 0 && nlist >= sys_minsane) { 2814 double x; 2815 2816 typesystem = peers[speer].peer; 2817 if (osys_peer == NULL || osys_peer == typesystem) { 2818 sys_clockhop = 0; 2819 } else if ((x = fabs(typesystem->offset - 2820 osys_peer->offset)) < sys_mindisp) { 2821 if (sys_clockhop == 0) 2822 sys_clockhop = sys_mindisp; 2823 else 2824 sys_clockhop *= .5; 2825 DPRINTF(1, ("select: clockhop %d %.6f %.6f\n", 2826 j, x, sys_clockhop)); 2827 if (fabs(x) < sys_clockhop) 2828 typesystem = osys_peer; 2829 else 2830 sys_clockhop = 0; 2831 } else { 2832 sys_clockhop = 0; 2833 } 2834 } 2835 2836 /* 2837 * Mitigation rules of the game. We have the pick of the 2838 * litter in typesystem if any survivors are left. If 2839 * there is a prefer peer, use its offset and jitter. 2840 * Otherwise, use the combined offset and jitter of all kitters. 2841 */ 2842 if (typesystem != NULL) { 2843 if (sys_prefer == NULL) { 2844 typesystem->new_status = CTL_PST_SEL_SYSPEER; 2845 clock_combine(peers, sys_survivors, speer); 2846 } else { 2847 typesystem = sys_prefer; 2848 sys_clockhop = 0; 2849 typesystem->new_status = CTL_PST_SEL_SYSPEER; 2850 sys_offset = typesystem->offset; 2851 sys_jitter = typesystem->jitter; 2852 } 2853 DPRINTF(1, ("select: combine offset %.9f jitter %.9f\n", 2854 sys_offset, sys_jitter)); 2855 } 2856 #ifdef REFCLOCK 2857 /* 2858 * If a PPS driver is lit and the combined offset is less than 2859 * 0.4 s, select the driver as the PPS peer and use its offset 2860 * and jitter. However, if this is the atom driver, use it only 2861 * if there is a prefer peer or there are no survivors and none 2862 * are required. 2863 */ 2864 if (typepps != NULL && fabs(sys_offset) < 0.4 && 2865 (typepps->refclktype != REFCLK_ATOM_PPS || 2866 (typepps->refclktype == REFCLK_ATOM_PPS && (sys_prefer != 2867 NULL || (typesystem == NULL && sys_minsane == 0))))) { 2868 typesystem = typepps; 2869 sys_clockhop = 0; 2870 typesystem->new_status = CTL_PST_SEL_PPS; 2871 sys_offset = typesystem->offset; 2872 sys_jitter = typesystem->jitter; 2873 DPRINTF(1, ("select: pps offset %.9f jitter %.9f\n", 2874 sys_offset, sys_jitter)); 2875 } 2876 #endif /* REFCLOCK */ 2877 2878 /* 2879 * If there are no survivors at this point, there is no 2880 * system peer. If so and this is an old update, keep the 2881 * current statistics, but do not update the clock. 2882 */ 2883 if (typesystem == NULL) { 2884 if (osys_peer != NULL) { 2885 if (sys_orphwait > 0) 2886 orphwait = current_time + sys_orphwait; 2887 report_event(EVNT_NOPEER, NULL, NULL); 2888 } 2889 sys_peer = NULL; 2890 for (peer = peer_list; peer != NULL; peer = peer->p_link) 2891 peer->status = peer->new_status; 2892 return; 2893 } 2894 2895 /* 2896 * Do not use old data, as this may mess up the clock discipline 2897 * stability. 2898 */ 2899 if (typesystem->epoch <= sys_epoch) 2900 return; 2901 2902 /* 2903 * We have found the alpha male. Wind the clock. 2904 */ 2905 if (osys_peer != typesystem) 2906 report_event(PEVNT_NEWPEER, typesystem, NULL); 2907 for (peer = peer_list; peer != NULL; peer = peer->p_link) 2908 peer->status = peer->new_status; 2909 clock_update(typesystem); 2910 } 2911 2912 2913 static void 2914 clock_combine( 2915 peer_select * peers, /* survivor list */ 2916 int npeers, /* number of survivors */ 2917 int syspeer /* index of sys.peer */ 2918 ) 2919 { 2920 int i; 2921 double x, y, z, w; 2922 2923 y = z = w = 0; 2924 for (i = 0; i < npeers; i++) { 2925 x = 1. / peers[i].synch; 2926 y += x; 2927 z += x * peers[i].peer->offset; 2928 w += x * DIFF(peers[i].peer->offset, 2929 peers[syspeer].peer->offset); 2930 } 2931 sys_offset = z / y; 2932 sys_jitter = SQRT(w / y + SQUARE(peers[syspeer].seljit)); 2933 } 2934 2935 2936 /* 2937 * root_distance - compute synchronization distance from peer to root 2938 */ 2939 static double 2940 root_distance( 2941 struct peer *peer /* peer structure pointer */ 2942 ) 2943 { 2944 double dtemp; 2945 2946 /* 2947 * Root Distance (LAMBDA) is defined as: 2948 * (delta + DELTA)/2 + epsilon + EPSILON + phi 2949 * 2950 * where: 2951 * delta is the round-trip delay 2952 * DELTA is the root delay 2953 * epsilon is the remote server precision + local precision 2954 * + (15 usec each second) 2955 * EPSILON is the root dispersion 2956 * phi is the peer jitter statistic 2957 * 2958 * NB: Think hard about why we are using these values, and what 2959 * the alternatives are, and the various pros/cons. 2960 * 2961 * DLM thinks these are probably the best choices from any of the 2962 * other worse choices. 2963 */ 2964 dtemp = (peer->delay + peer->rootdelay) / 2 2965 + LOGTOD(peer->precision) 2966 + LOGTOD(sys_precision) 2967 + clock_phi * (current_time - peer->update) 2968 + peer->rootdisp 2969 + peer->jitter; 2970 /* 2971 * Careful squeak here. The value returned must be greater than 2972 * the minimum root dispersion in order to avoid clockhop with 2973 * highly precise reference clocks. Note that the root distance 2974 * cannot exceed the sys_maxdist, as this is the cutoff by the 2975 * selection algorithm. 2976 */ 2977 if (dtemp < sys_mindisp) 2978 dtemp = sys_mindisp; 2979 return (dtemp); 2980 } 2981 2982 2983 /* 2984 * peer_xmit - send packet for persistent association. 2985 */ 2986 static void 2987 peer_xmit( 2988 struct peer *peer /* peer structure pointer */ 2989 ) 2990 { 2991 struct pkt xpkt; /* transmit packet */ 2992 size_t sendlen, authlen; 2993 keyid_t xkeyid = 0; /* transmit key ID */ 2994 l_fp xmt_tx, xmt_ty; 2995 2996 if (!peer->dstadr) /* drop peers without interface */ 2997 return; 2998 2999 xpkt.li_vn_mode = PKT_LI_VN_MODE(sys_leap, peer->version, 3000 peer->hmode); 3001 xpkt.stratum = STRATUM_TO_PKT(sys_stratum); 3002 xpkt.ppoll = peer->hpoll; 3003 xpkt.precision = sys_precision; 3004 xpkt.refid = sys_refid; 3005 xpkt.rootdelay = HTONS_FP(DTOFP(sys_rootdelay)); 3006 xpkt.rootdisp = HTONS_FP(DTOUFP(sys_rootdisp)); 3007 HTONL_FP(&sys_reftime, &xpkt.reftime); 3008 HTONL_FP(&peer->rec, &xpkt.org); 3009 HTONL_FP(&peer->dst, &xpkt.rec); 3010 3011 /* 3012 * If the received packet contains a MAC, the transmitted packet 3013 * is authenticated and contains a MAC. If not, the transmitted 3014 * packet is not authenticated. 3015 * 3016 * It is most important when autokey is in use that the local 3017 * interface IP address be known before the first packet is 3018 * sent. Otherwise, it is not possible to compute a correct MAC 3019 * the recipient will accept. Thus, the I/O semantics have to do 3020 * a little more work. In particular, the wildcard interface 3021 * might not be usable. 3022 */ 3023 sendlen = LEN_PKT_NOMAC; 3024 #ifdef AUTOKEY 3025 if (!(peer->flags & FLAG_SKEY) && peer->keyid == 0) { 3026 #else /* !AUTOKEY follows */ 3027 if (peer->keyid == 0) { 3028 #endif /* !AUTOKEY */ 3029 3030 /* 3031 * Transmit a-priori timestamps 3032 */ 3033 get_systime(&xmt_tx); 3034 if (peer->flip == 0) { /* basic mode */ 3035 peer->aorg = xmt_tx; 3036 HTONL_FP(&xmt_tx, &xpkt.xmt); 3037 } else { /* interleaved modes */ 3038 if (peer->hmode == MODE_BROADCAST) { /* bcst */ 3039 HTONL_FP(&xmt_tx, &xpkt.xmt); 3040 if (peer->flip > 0) 3041 HTONL_FP(&peer->borg, 3042 &xpkt.org); 3043 else 3044 HTONL_FP(&peer->aorg, 3045 &xpkt.org); 3046 } else { /* symmetric */ 3047 if (peer->flip > 0) 3048 HTONL_FP(&peer->borg, 3049 &xpkt.xmt); 3050 else 3051 HTONL_FP(&peer->aorg, 3052 &xpkt.xmt); 3053 } 3054 } 3055 peer->t21_bytes = sendlen; 3056 sendpkt(&peer->srcadr, peer->dstadr, sys_ttl[peer->ttl], 3057 &xpkt, sendlen); 3058 peer->sent++; 3059 peer->throttle += (1 << peer->minpoll) - 2; 3060 3061 /* 3062 * Capture a-posteriori timestamps 3063 */ 3064 get_systime(&xmt_ty); 3065 if (peer->flip != 0) { /* interleaved modes */ 3066 if (peer->flip > 0) 3067 peer->aorg = xmt_ty; 3068 else 3069 peer->borg = xmt_ty; 3070 peer->flip = -peer->flip; 3071 } 3072 L_SUB(&xmt_ty, &xmt_tx); 3073 LFPTOD(&xmt_ty, peer->xleave); 3074 #ifdef DEBUG 3075 if (debug) 3076 printf("transmit: at %ld %s->%s mode %d len %zu\n", 3077 current_time, peer->dstadr ? 3078 stoa(&peer->dstadr->sin) : "-", 3079 stoa(&peer->srcadr), peer->hmode, sendlen); 3080 #endif 3081 return; 3082 } 3083 3084 /* 3085 * Authentication is enabled, so the transmitted packet must be 3086 * authenticated. If autokey is enabled, fuss with the various 3087 * modes; otherwise, symmetric key cryptography is used. 3088 */ 3089 #ifdef AUTOKEY 3090 if (peer->flags & FLAG_SKEY) { 3091 struct exten *exten; /* extension field */ 3092 3093 /* 3094 * The Public Key Dance (PKD): Cryptographic credentials 3095 * are contained in extension fields, each including a 3096 * 4-octet length/code word followed by a 4-octet 3097 * association ID and optional additional data. Optional 3098 * data includes a 4-octet data length field followed by 3099 * the data itself. Request messages are sent from a 3100 * configured association; response messages can be sent 3101 * from a configured association or can take the fast 3102 * path without ever matching an association. Response 3103 * messages have the same code as the request, but have 3104 * a response bit and possibly an error bit set. In this 3105 * implementation, a message may contain no more than 3106 * one command and one or more responses. 3107 * 3108 * Cryptographic session keys include both a public and 3109 * a private componet. Request and response messages 3110 * using extension fields are always sent with the 3111 * private component set to zero. Packets without 3112 * extension fields indlude the private component when 3113 * the session key is generated. 3114 */ 3115 while (1) { 3116 3117 /* 3118 * Allocate and initialize a keylist if not 3119 * already done. Then, use the list in inverse 3120 * order, discarding keys once used. Keep the 3121 * latest key around until the next one, so 3122 * clients can use client/server packets to 3123 * compute propagation delay. 3124 * 3125 * Note that once a key is used from the list, 3126 * it is retained in the key cache until the 3127 * next key is used. This is to allow a client 3128 * to retrieve the encrypted session key 3129 * identifier to verify authenticity. 3130 * 3131 * If for some reason a key is no longer in the 3132 * key cache, a birthday has happened or the key 3133 * has expired, so the pseudo-random sequence is 3134 * broken. In that case, purge the keylist and 3135 * regenerate it. 3136 */ 3137 if (peer->keynumber == 0) 3138 make_keylist(peer, peer->dstadr); 3139 else 3140 peer->keynumber--; 3141 xkeyid = peer->keylist[peer->keynumber]; 3142 if (authistrusted(xkeyid)) 3143 break; 3144 else 3145 key_expire(peer); 3146 } 3147 peer->keyid = xkeyid; 3148 exten = NULL; 3149 switch (peer->hmode) { 3150 3151 /* 3152 * In broadcast server mode the autokey values are 3153 * required by the broadcast clients. Push them when a 3154 * new keylist is generated; otherwise, push the 3155 * association message so the client can request them at 3156 * other times. 3157 */ 3158 case MODE_BROADCAST: 3159 if (peer->flags & FLAG_ASSOC) 3160 exten = crypto_args(peer, CRYPTO_AUTO | 3161 CRYPTO_RESP, peer->associd, NULL); 3162 else 3163 exten = crypto_args(peer, CRYPTO_ASSOC | 3164 CRYPTO_RESP, peer->associd, NULL); 3165 break; 3166 3167 /* 3168 * In symmetric modes the parameter, certificate, 3169 * identity, cookie and autokey exchanges are 3170 * required. The leapsecond exchange is optional. But, a 3171 * peer will not believe the other peer until the other 3172 * peer has synchronized, so the certificate exchange 3173 * might loop until then. If a peer finds a broken 3174 * autokey sequence, it uses the autokey exchange to 3175 * retrieve the autokey values. In any case, if a new 3176 * keylist is generated, the autokey values are pushed. 3177 */ 3178 case MODE_ACTIVE: 3179 case MODE_PASSIVE: 3180 3181 /* 3182 * Parameter, certificate and identity. 3183 */ 3184 if (!peer->crypto) 3185 exten = crypto_args(peer, CRYPTO_ASSOC, 3186 peer->associd, hostval.ptr); 3187 else if (!(peer->crypto & CRYPTO_FLAG_CERT)) 3188 exten = crypto_args(peer, CRYPTO_CERT, 3189 peer->associd, peer->issuer); 3190 else if (!(peer->crypto & CRYPTO_FLAG_VRFY)) 3191 exten = crypto_args(peer, 3192 crypto_ident(peer), peer->associd, 3193 NULL); 3194 3195 /* 3196 * Cookie and autokey. We request the cookie 3197 * only when the this peer and the other peer 3198 * are synchronized. But, this peer needs the 3199 * autokey values when the cookie is zero. Any 3200 * time we regenerate the key list, we offer the 3201 * autokey values without being asked. If for 3202 * some reason either peer finds a broken 3203 * autokey sequence, the autokey exchange is 3204 * used to retrieve the autokey values. 3205 */ 3206 else if (sys_leap != LEAP_NOTINSYNC && 3207 peer->leap != LEAP_NOTINSYNC && 3208 !(peer->crypto & CRYPTO_FLAG_COOK)) 3209 exten = crypto_args(peer, CRYPTO_COOK, 3210 peer->associd, NULL); 3211 else if (!(peer->crypto & CRYPTO_FLAG_AUTO)) 3212 exten = crypto_args(peer, CRYPTO_AUTO, 3213 peer->associd, NULL); 3214 else if (peer->flags & FLAG_ASSOC && 3215 peer->crypto & CRYPTO_FLAG_SIGN) 3216 exten = crypto_args(peer, CRYPTO_AUTO | 3217 CRYPTO_RESP, peer->assoc, NULL); 3218 3219 /* 3220 * Wait for clock sync, then sign the 3221 * certificate and retrieve the leapsecond 3222 * values. 3223 */ 3224 else if (sys_leap == LEAP_NOTINSYNC) 3225 break; 3226 3227 else if (!(peer->crypto & CRYPTO_FLAG_SIGN)) 3228 exten = crypto_args(peer, CRYPTO_SIGN, 3229 peer->associd, hostval.ptr); 3230 else if (!(peer->crypto & CRYPTO_FLAG_LEAP)) 3231 exten = crypto_args(peer, CRYPTO_LEAP, 3232 peer->associd, NULL); 3233 break; 3234 3235 /* 3236 * In client mode the parameter, certificate, identity, 3237 * cookie and sign exchanges are required. The 3238 * leapsecond exchange is optional. If broadcast client 3239 * mode the same exchanges are required, except that the 3240 * autokey exchange is substitutes for the cookie 3241 * exchange, since the cookie is always zero. If the 3242 * broadcast client finds a broken autokey sequence, it 3243 * uses the autokey exchange to retrieve the autokey 3244 * values. 3245 */ 3246 case MODE_CLIENT: 3247 3248 /* 3249 * Parameter, certificate and identity. 3250 */ 3251 if (!peer->crypto) 3252 exten = crypto_args(peer, CRYPTO_ASSOC, 3253 peer->associd, hostval.ptr); 3254 else if (!(peer->crypto & CRYPTO_FLAG_CERT)) 3255 exten = crypto_args(peer, CRYPTO_CERT, 3256 peer->associd, peer->issuer); 3257 else if (!(peer->crypto & CRYPTO_FLAG_VRFY)) 3258 exten = crypto_args(peer, 3259 crypto_ident(peer), peer->associd, 3260 NULL); 3261 3262 /* 3263 * Cookie and autokey. These are requests, but 3264 * we use the peer association ID with autokey 3265 * rather than our own. 3266 */ 3267 else if (!(peer->crypto & CRYPTO_FLAG_COOK)) 3268 exten = crypto_args(peer, CRYPTO_COOK, 3269 peer->associd, NULL); 3270 else if (!(peer->crypto & CRYPTO_FLAG_AUTO)) 3271 exten = crypto_args(peer, CRYPTO_AUTO, 3272 peer->assoc, NULL); 3273 3274 /* 3275 * Wait for clock sync, then sign the 3276 * certificate and retrieve the leapsecond 3277 * values. 3278 */ 3279 else if (sys_leap == LEAP_NOTINSYNC) 3280 break; 3281 3282 else if (!(peer->crypto & CRYPTO_FLAG_SIGN)) 3283 exten = crypto_args(peer, CRYPTO_SIGN, 3284 peer->associd, hostval.ptr); 3285 else if (!(peer->crypto & CRYPTO_FLAG_LEAP)) 3286 exten = crypto_args(peer, CRYPTO_LEAP, 3287 peer->associd, NULL); 3288 break; 3289 } 3290 3291 /* 3292 * Add a queued extension field if present. This is 3293 * always a request message, so the reply ID is already 3294 * in the message. If an error occurs, the error bit is 3295 * lit in the response. 3296 */ 3297 if (peer->cmmd != NULL) { 3298 u_int32 temp32; 3299 3300 temp32 = CRYPTO_RESP; 3301 peer->cmmd->opcode |= htonl(temp32); 3302 sendlen += crypto_xmit(peer, &xpkt, NULL, 3303 sendlen, peer->cmmd, 0); 3304 free(peer->cmmd); 3305 peer->cmmd = NULL; 3306 } 3307 3308 /* 3309 * Add an extension field created above. All but the 3310 * autokey response message are request messages. 3311 */ 3312 if (exten != NULL) { 3313 if (exten->opcode != 0) 3314 sendlen += crypto_xmit(peer, &xpkt, 3315 NULL, sendlen, exten, 0); 3316 free(exten); 3317 } 3318 3319 /* 3320 * Calculate the next session key. Since extension 3321 * fields are present, the cookie value is zero. 3322 */ 3323 if (sendlen > (int)LEN_PKT_NOMAC) { 3324 session_key(&peer->dstadr->sin, &peer->srcadr, 3325 xkeyid, 0, 2); 3326 } 3327 } 3328 #endif /* AUTOKEY */ 3329 3330 /* 3331 * Transmit a-priori timestamps 3332 */ 3333 get_systime(&xmt_tx); 3334 if (peer->flip == 0) { /* basic mode */ 3335 peer->aorg = xmt_tx; 3336 HTONL_FP(&xmt_tx, &xpkt.xmt); 3337 } else { /* interleaved modes */ 3338 if (peer->hmode == MODE_BROADCAST) { /* bcst */ 3339 HTONL_FP(&xmt_tx, &xpkt.xmt); 3340 if (peer->flip > 0) 3341 HTONL_FP(&peer->borg, &xpkt.org); 3342 else 3343 HTONL_FP(&peer->aorg, &xpkt.org); 3344 } else { /* symmetric */ 3345 if (peer->flip > 0) 3346 HTONL_FP(&peer->borg, &xpkt.xmt); 3347 else 3348 HTONL_FP(&peer->aorg, &xpkt.xmt); 3349 } 3350 } 3351 xkeyid = peer->keyid; 3352 authlen = authencrypt(xkeyid, (u_int32 *)&xpkt, sendlen); 3353 if (authlen == 0) { 3354 report_event(PEVNT_AUTH, peer, "no key"); 3355 peer->flash |= TEST5; /* auth error */ 3356 peer->badauth++; 3357 return; 3358 } 3359 sendlen += authlen; 3360 #ifdef AUTOKEY 3361 if (xkeyid > NTP_MAXKEY) 3362 authtrust(xkeyid, 0); 3363 #endif /* AUTOKEY */ 3364 if (sendlen > sizeof(xpkt)) { 3365 msyslog(LOG_ERR, "proto: buffer overflow %zu", sendlen); 3366 exit (-1); 3367 } 3368 peer->t21_bytes = sendlen; 3369 sendpkt(&peer->srcadr, peer->dstadr, sys_ttl[peer->ttl], &xpkt, 3370 sendlen); 3371 peer->sent++; 3372 peer->throttle += (1 << peer->minpoll) - 2; 3373 3374 /* 3375 * Capture a-posteriori timestamps 3376 */ 3377 get_systime(&xmt_ty); 3378 if (peer->flip != 0) { /* interleaved modes */ 3379 if (peer->flip > 0) 3380 peer->aorg = xmt_ty; 3381 else 3382 peer->borg = xmt_ty; 3383 peer->flip = -peer->flip; 3384 } 3385 L_SUB(&xmt_ty, &xmt_tx); 3386 LFPTOD(&xmt_ty, peer->xleave); 3387 #ifdef AUTOKEY 3388 #ifdef DEBUG 3389 if (debug) 3390 printf("transmit: at %ld %s->%s mode %d keyid %08x len %zu index %d\n", 3391 current_time, latoa(peer->dstadr), 3392 ntoa(&peer->srcadr), peer->hmode, xkeyid, sendlen, 3393 peer->keynumber); 3394 #endif 3395 #else /* !AUTOKEY follows */ 3396 #ifdef DEBUG 3397 if (debug) 3398 printf("transmit: at %ld %s->%s mode %d keyid %08x len %d\n", 3399 current_time, peer->dstadr ? 3400 ntoa(&peer->dstadr->sin) : "-", 3401 ntoa(&peer->srcadr), peer->hmode, xkeyid, sendlen); 3402 #endif 3403 #endif /* !AUTOKEY */ 3404 } 3405 3406 3407 /* 3408 * fast_xmit - Send packet for nonpersistent association. Note that 3409 * neither the source or destination can be a broadcast address. 3410 */ 3411 static void 3412 fast_xmit( 3413 struct recvbuf *rbufp, /* receive packet pointer */ 3414 int xmode, /* receive mode */ 3415 keyid_t xkeyid, /* transmit key ID */ 3416 int flags /* restrict mask */ 3417 ) 3418 { 3419 struct pkt xpkt; /* transmit packet structure */ 3420 struct pkt *rpkt; /* receive packet structure */ 3421 l_fp xmt_tx, xmt_ty; 3422 int sendlen; 3423 #ifdef AUTOKEY 3424 u_int32 temp32; 3425 #endif 3426 3427 /* 3428 * Initialize transmit packet header fields from the receive 3429 * buffer provided. We leave the fields intact as received, but 3430 * set the peer poll at the maximum of the receive peer poll and 3431 * the system minimum poll (ntp_minpoll). This is for KoD rate 3432 * control and not strictly specification compliant, but doesn't 3433 * break anything. 3434 * 3435 * If the gazinta was from a multicast address, the gazoutta 3436 * must go out another way. 3437 */ 3438 rpkt = &rbufp->recv_pkt; 3439 if (rbufp->dstadr->flags & INT_MCASTOPEN) 3440 rbufp->dstadr = findinterface(&rbufp->recv_srcadr); 3441 3442 /* 3443 * If this is a kiss-o'-death (KoD) packet, show leap 3444 * unsynchronized, stratum zero, reference ID the four-character 3445 * kiss code and system root delay. Note we don't reveal the 3446 * local time, so these packets can't be used for 3447 * synchronization. 3448 */ 3449 if (flags & RES_KOD) { 3450 sys_kodsent++; 3451 xpkt.li_vn_mode = PKT_LI_VN_MODE(LEAP_NOTINSYNC, 3452 PKT_VERSION(rpkt->li_vn_mode), xmode); 3453 xpkt.stratum = STRATUM_PKT_UNSPEC; 3454 xpkt.ppoll = max(rpkt->ppoll, ntp_minpoll); 3455 xpkt.precision = rpkt->precision; 3456 memcpy(&xpkt.refid, "RATE", 4); 3457 xpkt.rootdelay = rpkt->rootdelay; 3458 xpkt.rootdisp = rpkt->rootdisp; 3459 xpkt.reftime = rpkt->reftime; 3460 xpkt.org = rpkt->xmt; 3461 xpkt.rec = rpkt->xmt; 3462 xpkt.xmt = rpkt->xmt; 3463 3464 /* 3465 * This is a normal packet. Use the system variables. 3466 */ 3467 } else { 3468 xpkt.li_vn_mode = PKT_LI_VN_MODE(sys_leap, 3469 PKT_VERSION(rpkt->li_vn_mode), xmode); 3470 xpkt.stratum = STRATUM_TO_PKT(sys_stratum); 3471 xpkt.ppoll = max(rpkt->ppoll, ntp_minpoll); 3472 xpkt.precision = sys_precision; 3473 xpkt.refid = sys_refid; 3474 xpkt.rootdelay = HTONS_FP(DTOFP(sys_rootdelay)); 3475 xpkt.rootdisp = HTONS_FP(DTOUFP(sys_rootdisp)); 3476 HTONL_FP(&sys_reftime, &xpkt.reftime); 3477 xpkt.org = rpkt->xmt; 3478 HTONL_FP(&rbufp->recv_time, &xpkt.rec); 3479 get_systime(&xmt_tx); 3480 HTONL_FP(&xmt_tx, &xpkt.xmt); 3481 } 3482 3483 #ifdef HAVE_NTP_SIGND 3484 if (flags & RES_MSSNTP) { 3485 send_via_ntp_signd(rbufp, xmode, xkeyid, flags, &xpkt); 3486 return; 3487 } 3488 #endif /* HAVE_NTP_SIGND */ 3489 3490 /* 3491 * If the received packet contains a MAC, the transmitted packet 3492 * is authenticated and contains a MAC. If not, the transmitted 3493 * packet is not authenticated. 3494 */ 3495 sendlen = LEN_PKT_NOMAC; 3496 if (rbufp->recv_length == sendlen) { 3497 sendpkt(&rbufp->recv_srcadr, rbufp->dstadr, 0, &xpkt, 3498 sendlen); 3499 #ifdef DEBUG 3500 if (debug) 3501 printf( 3502 "transmit: at %ld %s->%s mode %d len %d\n", 3503 current_time, stoa(&rbufp->dstadr->sin), 3504 stoa(&rbufp->recv_srcadr), xmode, sendlen); 3505 #endif 3506 return; 3507 } 3508 3509 /* 3510 * The received packet contains a MAC, so the transmitted packet 3511 * must be authenticated. For symmetric key cryptography, use 3512 * the predefined and trusted symmetric keys to generate the 3513 * cryptosum. For autokey cryptography, use the server private 3514 * value to generate the cookie, which is unique for every 3515 * source-destination-key ID combination. 3516 */ 3517 #ifdef AUTOKEY 3518 if (xkeyid > NTP_MAXKEY) { 3519 keyid_t cookie; 3520 3521 /* 3522 * The only way to get here is a reply to a legitimate 3523 * client request message, so the mode must be 3524 * MODE_SERVER. If an extension field is present, there 3525 * can be only one and that must be a command. Do what 3526 * needs, but with private value of zero so the poor 3527 * jerk can decode it. If no extension field is present, 3528 * use the cookie to generate the session key. 3529 */ 3530 cookie = session_key(&rbufp->recv_srcadr, 3531 &rbufp->dstadr->sin, 0, sys_private, 0); 3532 if (rbufp->recv_length > sendlen + (int)MAX_MAC_LEN) { 3533 session_key(&rbufp->dstadr->sin, 3534 &rbufp->recv_srcadr, xkeyid, 0, 2); 3535 temp32 = CRYPTO_RESP; 3536 rpkt->exten[0] |= htonl(temp32); 3537 sendlen += crypto_xmit(NULL, &xpkt, rbufp, 3538 sendlen, (struct exten *)rpkt->exten, 3539 cookie); 3540 } else { 3541 session_key(&rbufp->dstadr->sin, 3542 &rbufp->recv_srcadr, xkeyid, cookie, 2); 3543 } 3544 } 3545 #endif /* AUTOKEY */ 3546 get_systime(&xmt_tx); 3547 sendlen += authencrypt(xkeyid, (u_int32 *)&xpkt, sendlen); 3548 #ifdef AUTOKEY 3549 if (xkeyid > NTP_MAXKEY) 3550 authtrust(xkeyid, 0); 3551 #endif /* AUTOKEY */ 3552 sendpkt(&rbufp->recv_srcadr, rbufp->dstadr, 0, &xpkt, sendlen); 3553 get_systime(&xmt_ty); 3554 L_SUB(&xmt_ty, &xmt_tx); 3555 sys_authdelay = xmt_ty; 3556 #ifdef DEBUG 3557 if (debug) 3558 printf( 3559 "transmit: at %ld %s->%s mode %d keyid %08x len %d\n", 3560 current_time, ntoa(&rbufp->dstadr->sin), 3561 ntoa(&rbufp->recv_srcadr), xmode, xkeyid, sendlen); 3562 #endif 3563 } 3564 3565 3566 /* 3567 * pool_xmit - resolve hostname or send unicast solicitation for pool. 3568 */ 3569 static void 3570 pool_xmit( 3571 struct peer *pool /* pool solicitor association */ 3572 ) 3573 { 3574 #ifdef WORKER 3575 struct pkt xpkt; /* transmit packet structure */ 3576 struct addrinfo hints; 3577 int rc; 3578 struct interface * lcladr; 3579 sockaddr_u * rmtadr; 3580 int restrict_mask; 3581 struct peer * p; 3582 l_fp xmt_tx; 3583 3584 if (NULL == pool->ai) { 3585 if (pool->addrs != NULL) { 3586 /* free() is used with copy_addrinfo_list() */ 3587 free(pool->addrs); 3588 pool->addrs = NULL; 3589 } 3590 ZERO(hints); 3591 hints.ai_family = AF(&pool->srcadr); 3592 hints.ai_socktype = SOCK_DGRAM; 3593 hints.ai_protocol = IPPROTO_UDP; 3594 /* ignore getaddrinfo_sometime() errors, we will retry */ 3595 rc = getaddrinfo_sometime( 3596 pool->hostname, 3597 "ntp", 3598 &hints, 3599 0, /* no retry */ 3600 &pool_name_resolved, 3601 (void *)(intptr_t)pool->associd); 3602 if (!rc) 3603 DPRINTF(1, ("pool DNS lookup %s started\n", 3604 pool->hostname)); 3605 else 3606 msyslog(LOG_ERR, 3607 "unable to start pool DNS %s %m", 3608 pool->hostname); 3609 return; 3610 } 3611 3612 do { 3613 /* copy_addrinfo_list ai_addr points to a sockaddr_u */ 3614 rmtadr = (sockaddr_u *)(void *)pool->ai->ai_addr; 3615 pool->ai = pool->ai->ai_next; 3616 p = findexistingpeer(rmtadr, NULL, NULL, MODE_CLIENT, 0); 3617 } while (p != NULL && pool->ai != NULL); 3618 if (p != NULL) 3619 return; /* out of addresses, re-query DNS next poll */ 3620 restrict_mask = restrictions(rmtadr); 3621 if (RES_FLAGS & restrict_mask) 3622 restrict_source(rmtadr, 0, 3623 current_time + POOL_SOLICIT_WINDOW + 1); 3624 lcladr = findinterface(rmtadr); 3625 memset(&xpkt, 0, sizeof(xpkt)); 3626 xpkt.li_vn_mode = PKT_LI_VN_MODE(sys_leap, pool->version, 3627 MODE_CLIENT); 3628 xpkt.stratum = STRATUM_TO_PKT(sys_stratum); 3629 xpkt.ppoll = pool->hpoll; 3630 xpkt.precision = sys_precision; 3631 xpkt.refid = sys_refid; 3632 xpkt.rootdelay = HTONS_FP(DTOFP(sys_rootdelay)); 3633 xpkt.rootdisp = HTONS_FP(DTOUFP(sys_rootdisp)); 3634 HTONL_FP(&sys_reftime, &xpkt.reftime); 3635 get_systime(&xmt_tx); 3636 pool->aorg = xmt_tx; 3637 HTONL_FP(&xmt_tx, &xpkt.xmt); 3638 sendpkt(rmtadr, lcladr, sys_ttl[pool->ttl], &xpkt, 3639 LEN_PKT_NOMAC); 3640 pool->sent++; 3641 pool->throttle += (1 << pool->minpoll) - 2; 3642 #ifdef DEBUG 3643 if (debug) 3644 printf("transmit: at %ld %s->%s pool\n", 3645 current_time, latoa(lcladr), stoa(rmtadr)); 3646 #endif 3647 msyslog(LOG_INFO, "Soliciting pool server %s", stoa(rmtadr)); 3648 #endif /* WORKER */ 3649 } 3650 3651 3652 #ifdef AUTOKEY 3653 /* 3654 * group_test - test if this is the same group 3655 * 3656 * host assoc return action 3657 * none none 0 mobilize * 3658 * none group 0 mobilize * 3659 * group none 0 mobilize * 3660 * group group 1 mobilize 3661 * group different 1 ignore 3662 * * ignore if notrust 3663 */ 3664 int group_test( 3665 char *grp, 3666 char *ident 3667 ) 3668 { 3669 if (grp == NULL) 3670 return (0); 3671 3672 if (strcmp(grp, sys_groupname) == 0) 3673 return (0); 3674 3675 if (ident == NULL) 3676 return (1); 3677 3678 if (strcmp(grp, ident) == 0) 3679 return (0); 3680 3681 return (1); 3682 } 3683 #endif /* AUTOKEY */ 3684 3685 #ifdef WORKER 3686 void 3687 pool_name_resolved( 3688 int rescode, 3689 int gai_errno, 3690 void * context, 3691 const char * name, 3692 const char * service, 3693 const struct addrinfo * hints, 3694 const struct addrinfo * res 3695 ) 3696 { 3697 struct peer * pool; /* pool solicitor association */ 3698 associd_t assoc; 3699 3700 if (rescode) { 3701 msyslog(LOG_ERR, 3702 "error resolving pool %s: %s (%d)", 3703 name, gai_strerror(rescode), rescode); 3704 return; 3705 } 3706 3707 assoc = (associd_t)(intptr_t)context; 3708 pool = findpeerbyassoc(assoc); 3709 if (NULL == pool) { 3710 msyslog(LOG_ERR, 3711 "Could not find assoc %u for pool DNS %s", 3712 assoc, name); 3713 return; 3714 } 3715 DPRINTF(1, ("pool DNS %s completed\n", name)); 3716 pool->addrs = copy_addrinfo_list(res); 3717 pool->ai = pool->addrs; 3718 pool_xmit(pool); 3719 3720 } 3721 #endif /* WORKER */ 3722 3723 3724 #ifdef AUTOKEY 3725 /* 3726 * key_expire - purge the key list 3727 */ 3728 void 3729 key_expire( 3730 struct peer *peer /* peer structure pointer */ 3731 ) 3732 { 3733 int i; 3734 3735 if (peer->keylist != NULL) { 3736 for (i = 0; i <= peer->keynumber; i++) 3737 authtrust(peer->keylist[i], 0); 3738 free(peer->keylist); 3739 peer->keylist = NULL; 3740 } 3741 value_free(&peer->sndval); 3742 peer->keynumber = 0; 3743 peer->flags &= ~FLAG_ASSOC; 3744 #ifdef DEBUG 3745 if (debug) 3746 printf("key_expire: at %lu associd %d\n", current_time, 3747 peer->associd); 3748 #endif 3749 } 3750 #endif /* AUTOKEY */ 3751 3752 3753 /* 3754 * local_refid(peer) - check peer refid to avoid selecting peers 3755 * currently synced to this ntpd. 3756 */ 3757 static int 3758 local_refid( 3759 struct peer * p 3760 ) 3761 { 3762 endpt * unicast_ep; 3763 3764 if (p->dstadr != NULL && !(INT_MCASTIF & p->dstadr->flags)) 3765 unicast_ep = p->dstadr; 3766 else 3767 unicast_ep = findinterface(&p->srcadr); 3768 3769 if (unicast_ep != NULL && p->refid == unicast_ep->addr_refid) 3770 return TRUE; 3771 else 3772 return FALSE; 3773 } 3774 3775 3776 /* 3777 * Determine if the peer is unfit for synchronization 3778 * 3779 * A peer is unfit for synchronization if 3780 * > TEST10 bad leap or stratum below floor or at or above ceiling 3781 * > TEST11 root distance exceeded for remote peer 3782 * > TEST12 a direct or indirect synchronization loop would form 3783 * > TEST13 unreachable or noselect 3784 */ 3785 int /* FALSE if fit, TRUE if unfit */ 3786 peer_unfit( 3787 struct peer *peer /* peer structure pointer */ 3788 ) 3789 { 3790 int rval = 0; 3791 3792 /* 3793 * A stratum error occurs if (1) the server has never been 3794 * synchronized, (2) the server stratum is below the floor or 3795 * greater than or equal to the ceiling. 3796 */ 3797 if (peer->leap == LEAP_NOTINSYNC || peer->stratum < sys_floor || 3798 peer->stratum >= sys_ceiling) 3799 rval |= TEST10; /* bad synch or stratum */ 3800 3801 /* 3802 * A distance error for a remote peer occurs if the root 3803 * distance is greater than or equal to the distance threshold 3804 * plus the increment due to one host poll interval. 3805 */ 3806 if (!(peer->flags & FLAG_REFCLOCK) && root_distance(peer) >= 3807 sys_maxdist + clock_phi * ULOGTOD(peer->hpoll)) 3808 rval |= TEST11; /* distance exceeded */ 3809 3810 /* 3811 * A loop error occurs if the remote peer is synchronized to the 3812 * local peer or if the remote peer is synchronized to the same 3813 * server as the local peer but only if the remote peer is 3814 * neither a reference clock nor an orphan. 3815 */ 3816 if (peer->stratum > 1 && local_refid(peer)) 3817 rval |= TEST12; /* synchronization loop */ 3818 3819 /* 3820 * An unreachable error occurs if the server is unreachable or 3821 * the noselect bit is set. 3822 */ 3823 if (!peer->reach || (peer->flags & FLAG_NOSELECT)) 3824 rval |= TEST13; /* unreachable */ 3825 3826 peer->flash &= ~PEER_TEST_MASK; 3827 peer->flash |= rval; 3828 return (rval); 3829 } 3830 3831 3832 /* 3833 * Find the precision of this particular machine 3834 */ 3835 #define MINSTEP 20e-9 /* minimum clock increment (s) */ 3836 #define MAXSTEP 1 /* maximum clock increment (s) */ 3837 #define MINCHANGES 12 /* minimum number of step samples */ 3838 #define MAXLOOPS ((int)(1. / MINSTEP)) /* avoid infinite loop */ 3839 3840 /* 3841 * This routine measures the system precision defined as the minimum of 3842 * a sequence of differences between successive readings of the system 3843 * clock. However, if a difference is less than MINSTEP, the clock has 3844 * been read more than once during a clock tick and the difference is 3845 * ignored. We set MINSTEP greater than zero in case something happens 3846 * like a cache miss, and to tolerate underlying system clocks which 3847 * ensure each reading is strictly greater than prior readings while 3848 * using an underlying stepping (not interpolated) clock. 3849 * 3850 * sys_tick and sys_precision represent the time to read the clock for 3851 * systems with high-precision clocks, and the tick interval or step 3852 * size for lower-precision stepping clocks. 3853 * 3854 * This routine also measures the time to read the clock on stepping 3855 * system clocks by counting the number of readings between changes of 3856 * the underlying clock. With either type of clock, the minimum time 3857 * to read the clock is saved as sys_fuzz, and used to ensure the 3858 * get_systime() readings always increase and are fuzzed below sys_fuzz. 3859 */ 3860 void 3861 measure_precision(void) 3862 { 3863 /* 3864 * With sys_fuzz set to zero, get_systime() fuzzing of low bits 3865 * is effectively disabled. trunc_os_clock is FALSE to disable 3866 * get_ostime() simulation of a low-precision system clock. 3867 */ 3868 set_sys_fuzz(0.); 3869 trunc_os_clock = FALSE; 3870 measured_tick = measure_tick_fuzz(); 3871 set_sys_tick_precision(measured_tick); 3872 msyslog(LOG_INFO, "proto: precision = %.3f usec (%d)", 3873 sys_tick * 1e6, sys_precision); 3874 if (sys_fuzz < sys_tick) { 3875 msyslog(LOG_NOTICE, "proto: fuzz beneath %.3f usec", 3876 sys_fuzz * 1e6); 3877 } 3878 } 3879 3880 3881 /* 3882 * measure_tick_fuzz() 3883 * 3884 * measures the minimum time to read the clock (stored in sys_fuzz) 3885 * and returns the tick, the larger of the minimum increment observed 3886 * between successive clock readings and the time to read the clock. 3887 */ 3888 double 3889 measure_tick_fuzz(void) 3890 { 3891 l_fp minstep; /* MINSTEP as l_fp */ 3892 l_fp val; /* current seconds fraction */ 3893 l_fp last; /* last seconds fraction */ 3894 l_fp ldiff; /* val - last */ 3895 double tick; /* computed tick value */ 3896 double diff; 3897 long repeats; 3898 long max_repeats; 3899 int changes; 3900 int i; /* log2 precision */ 3901 3902 tick = MAXSTEP; 3903 max_repeats = 0; 3904 repeats = 0; 3905 changes = 0; 3906 DTOLFP(MINSTEP, &minstep); 3907 get_systime(&last); 3908 for (i = 0; i < MAXLOOPS && changes < MINCHANGES; i++) { 3909 get_systime(&val); 3910 ldiff = val; 3911 L_SUB(&ldiff, &last); 3912 last = val; 3913 if (L_ISGT(&ldiff, &minstep)) { 3914 max_repeats = max(repeats, max_repeats); 3915 repeats = 0; 3916 changes++; 3917 LFPTOD(&ldiff, diff); 3918 tick = min(diff, tick); 3919 } else { 3920 repeats++; 3921 } 3922 } 3923 if (changes < MINCHANGES) { 3924 msyslog(LOG_ERR, "Fatal error: precision could not be measured (MINSTEP too large?)"); 3925 exit(1); 3926 } 3927 3928 if (0 == max_repeats) { 3929 set_sys_fuzz(tick); 3930 } else { 3931 set_sys_fuzz(tick / max_repeats); 3932 } 3933 3934 return tick; 3935 } 3936 3937 3938 void 3939 set_sys_tick_precision( 3940 double tick 3941 ) 3942 { 3943 int i; 3944 3945 if (tick > 1.) { 3946 msyslog(LOG_ERR, 3947 "unsupported tick %.3f > 1s ignored", tick); 3948 return; 3949 } 3950 if (tick < measured_tick) { 3951 msyslog(LOG_ERR, 3952 "proto: tick %.3f less than measured tick %.3f, ignored", 3953 tick, measured_tick); 3954 return; 3955 } else if (tick > measured_tick) { 3956 trunc_os_clock = TRUE; 3957 msyslog(LOG_NOTICE, 3958 "proto: truncating system clock to multiples of %.9f", 3959 tick); 3960 } 3961 sys_tick = tick; 3962 3963 /* 3964 * Find the nearest power of two. 3965 */ 3966 for (i = 0; tick <= 1; i--) 3967 tick *= 2; 3968 if (tick - 1 > 1 - tick / 2) 3969 i++; 3970 3971 sys_precision = (s_char)i; 3972 } 3973 3974 3975 /* 3976 * init_proto - initialize the protocol module's data 3977 */ 3978 void 3979 init_proto(void) 3980 { 3981 l_fp dummy; 3982 int i; 3983 3984 /* 3985 * Fill in the sys_* stuff. Default is don't listen to 3986 * broadcasting, require authentication. 3987 */ 3988 sys_leap = LEAP_NOTINSYNC; 3989 sys_stratum = STRATUM_UNSPEC; 3990 memcpy(&sys_refid, "INIT", 4); 3991 sys_peer = NULL; 3992 sys_rootdelay = 0; 3993 sys_rootdisp = 0; 3994 L_CLR(&sys_reftime); 3995 sys_jitter = 0; 3996 measure_precision(); 3997 get_systime(&dummy); 3998 sys_survivors = 0; 3999 sys_manycastserver = 0; 4000 sys_bclient = 0; 4001 sys_bdelay = 0; 4002 sys_authenticate = 1; 4003 sys_stattime = current_time; 4004 orphwait = current_time + sys_orphwait; 4005 proto_clr_stats(); 4006 for (i = 0; i < MAX_TTL; i++) { 4007 sys_ttl[i] = (u_char)((i * 256) / MAX_TTL); 4008 sys_ttlmax = i; 4009 } 4010 hardpps_enable = 0; 4011 stats_control = 1; 4012 } 4013 4014 4015 /* 4016 * proto_config - configure the protocol module 4017 */ 4018 void 4019 proto_config( 4020 int item, 4021 u_long value, 4022 double dvalue, 4023 sockaddr_u *svalue 4024 ) 4025 { 4026 /* 4027 * Figure out what he wants to change, then do it 4028 */ 4029 DPRINTF(2, ("proto_config: code %d value %lu dvalue %lf\n", 4030 item, value, dvalue)); 4031 4032 switch (item) { 4033 4034 /* 4035 * enable and disable commands - arguments are Boolean. 4036 */ 4037 case PROTO_AUTHENTICATE: /* authentication (auth) */ 4038 sys_authenticate = value; 4039 break; 4040 4041 case PROTO_BROADCLIENT: /* broadcast client (bclient) */ 4042 sys_bclient = (int)value; 4043 if (sys_bclient == 0) 4044 io_unsetbclient(); 4045 else 4046 io_setbclient(); 4047 break; 4048 4049 #ifdef REFCLOCK 4050 case PROTO_CAL: /* refclock calibrate (calibrate) */ 4051 cal_enable = value; 4052 break; 4053 #endif /* REFCLOCK */ 4054 4055 case PROTO_KERNEL: /* kernel discipline (kernel) */ 4056 select_loop(value); 4057 break; 4058 4059 case PROTO_MONITOR: /* monitoring (monitor) */ 4060 if (value) 4061 mon_start(MON_ON); 4062 else 4063 mon_stop(MON_ON); 4064 break; 4065 4066 case PROTO_NTP: /* NTP discipline (ntp) */ 4067 ntp_enable = value; 4068 break; 4069 4070 case PROTO_MODE7: /* mode7 management (ntpdc) */ 4071 ntp_mode7 = value; 4072 break; 4073 4074 case PROTO_PPS: /* PPS discipline (pps) */ 4075 hardpps_enable = value; 4076 break; 4077 4078 case PROTO_FILEGEN: /* statistics (stats) */ 4079 stats_control = value; 4080 break; 4081 4082 /* 4083 * tos command - arguments are double, sometimes cast to int 4084 */ 4085 case PROTO_BEACON: /* manycast beacon (beacon) */ 4086 sys_beacon = (int)dvalue; 4087 break; 4088 4089 case PROTO_BROADDELAY: /* default broadcast delay (bdelay) */ 4090 sys_bdelay = dvalue; 4091 break; 4092 4093 case PROTO_CEILING: /* stratum ceiling (ceiling) */ 4094 sys_ceiling = (int)dvalue; 4095 break; 4096 4097 case PROTO_COHORT: /* cohort switch (cohort) */ 4098 sys_cohort = (int)dvalue; 4099 break; 4100 4101 case PROTO_FLOOR: /* stratum floor (floor) */ 4102 sys_floor = (int)dvalue; 4103 break; 4104 4105 case PROTO_MAXCLOCK: /* maximum candidates (maxclock) */ 4106 sys_maxclock = (int)dvalue; 4107 break; 4108 4109 case PROTO_MAXDIST: /* select threshold (maxdist) */ 4110 sys_maxdist = dvalue; 4111 break; 4112 4113 case PROTO_CALLDELAY: /* modem call delay (mdelay) */ 4114 break; /* NOT USED */ 4115 4116 case PROTO_MINCLOCK: /* minimum candidates (minclock) */ 4117 sys_minclock = (int)dvalue; 4118 break; 4119 4120 case PROTO_MINDISP: /* minimum distance (mindist) */ 4121 sys_mindisp = dvalue; 4122 break; 4123 4124 case PROTO_MINSANE: /* minimum survivors (minsane) */ 4125 sys_minsane = (int)dvalue; 4126 break; 4127 4128 case PROTO_ORPHAN: /* orphan stratum (orphan) */ 4129 sys_orphan = (int)dvalue; 4130 break; 4131 4132 case PROTO_ORPHWAIT: /* orphan wait (orphwait) */ 4133 orphwait -= sys_orphwait; 4134 sys_orphwait = (int)dvalue; 4135 orphwait += sys_orphwait; 4136 break; 4137 4138 /* 4139 * Miscellaneous commands 4140 */ 4141 case PROTO_MULTICAST_ADD: /* add group address */ 4142 if (svalue != NULL) 4143 io_multicast_add(svalue); 4144 sys_bclient = 1; 4145 break; 4146 4147 case PROTO_MULTICAST_DEL: /* delete group address */ 4148 if (svalue != NULL) 4149 io_multicast_del(svalue); 4150 break; 4151 4152 default: 4153 msyslog(LOG_NOTICE, 4154 "proto: unsupported option %d", item); 4155 } 4156 } 4157 4158 4159 /* 4160 * proto_clr_stats - clear protocol stat counters 4161 */ 4162 void 4163 proto_clr_stats(void) 4164 { 4165 sys_stattime = current_time; 4166 sys_received = 0; 4167 sys_processed = 0; 4168 sys_newversion = 0; 4169 sys_oldversion = 0; 4170 sys_declined = 0; 4171 sys_restricted = 0; 4172 sys_badlength = 0; 4173 sys_badauth = 0; 4174 sys_limitrejected = 0; 4175 sys_kodsent = 0; 4176 } 4177