1 /* 2 * ntp_proto.c - NTP version 4 protocol machinery 3 * 4 * ATTENTION: Get approval from Dave Mills on all changes to this file! 5 * 6 */ 7 #ifdef HAVE_CONFIG_H 8 #include <config.h> 9 #endif 10 11 #include "ntpd.h" 12 #include "ntp_stdlib.h" 13 #include "ntp_unixtime.h" 14 #include "ntp_control.h" 15 #include "ntp_string.h" 16 #include "ntp_leapsec.h" 17 #include "refidsmear.h" 18 19 #include <stdio.h> 20 #ifdef HAVE_LIBSCF_H 21 #include <libscf.h> 22 #endif 23 #ifdef HAVE_UNISTD_H 24 #include <unistd.h> 25 #endif 26 27 /* 28 * This macro defines the authentication state. If x is 1 authentication 29 * is required; othewise it is optional. 30 */ 31 #define AUTH(x, y) ((x) ? (y) == AUTH_OK \ 32 : (y) == AUTH_OK || (y) == AUTH_NONE) 33 34 #define AUTH_NONE 0 /* authentication not required */ 35 #define AUTH_OK 1 /* authentication OK */ 36 #define AUTH_ERROR 2 /* authentication error */ 37 #define AUTH_CRYPTO 3 /* crypto_NAK */ 38 39 /* 40 * Set up Kiss Code values 41 */ 42 43 enum kiss_codes { 44 NOKISS, /* No Kiss Code */ 45 RATEKISS, /* Rate limit Kiss Code */ 46 DENYKISS, /* Deny Kiss */ 47 RSTRKISS, /* Restricted Kiss */ 48 XKISS, /* Experimental Kiss */ 49 UNKNOWNKISS /* Unknown Kiss Code */ 50 }; 51 52 /* 53 * traffic shaping parameters 54 */ 55 #define NTP_IBURST 6 /* packets in iburst */ 56 #define RESP_DELAY 1 /* refclock burst delay (s) */ 57 58 /* 59 * pool soliciting restriction duration (s) 60 */ 61 #define POOL_SOLICIT_WINDOW 8 62 63 /* 64 * peer_select groups statistics for a peer used by clock_select() and 65 * clock_cluster(). 66 */ 67 typedef struct peer_select_tag { 68 struct peer * peer; 69 double synch; /* sync distance */ 70 double error; /* jitter */ 71 double seljit; /* selection jitter */ 72 } peer_select; 73 74 /* 75 * System variables are declared here. Unless specified otherwise, all 76 * times are in seconds. 77 */ 78 u_char sys_leap; /* system leap indicator, use set_sys_leap() to change this */ 79 u_char xmt_leap; /* leap indicator sent in client requests, set up by set_sys_leap() */ 80 u_char sys_stratum; /* system stratum */ 81 s_char sys_precision; /* local clock precision (log2 s) */ 82 double sys_rootdelay; /* roundtrip delay to primary source */ 83 double sys_rootdisp; /* dispersion to primary source */ 84 u_int32 sys_refid; /* reference id (network byte order) */ 85 l_fp sys_reftime; /* last update time */ 86 struct peer *sys_peer; /* current peer */ 87 88 #ifdef LEAP_SMEAR 89 struct leap_smear_info leap_smear; 90 #endif 91 int leap_sec_in_progress; 92 93 /* 94 * Rate controls. Leaky buckets are used to throttle the packet 95 * transmission rates in order to protect busy servers such as at NIST 96 * and USNO. There is a counter for each association and another for KoD 97 * packets. The association counter decrements each second, but not 98 * below zero. Each time a packet is sent the counter is incremented by 99 * a configurable value representing the average interval between 100 * packets. A packet is delayed as long as the counter is greater than 101 * zero. Note this does not affect the time value computations. 102 */ 103 /* 104 * Nonspecified system state variables 105 */ 106 int sys_bclient; /* broadcast client enable */ 107 double sys_bdelay; /* broadcast client default delay */ 108 int sys_authenticate; /* requre authentication for config */ 109 l_fp sys_authdelay; /* authentication delay */ 110 double sys_offset; /* current local clock offset */ 111 double sys_mindisp = MINDISPERSE; /* minimum distance (s) */ 112 double sys_maxdist = MAXDISTANCE; /* selection threshold */ 113 double sys_jitter; /* system jitter */ 114 u_long sys_epoch; /* last clock update time */ 115 static double sys_clockhop; /* clockhop threshold */ 116 static int leap_vote_ins; /* leap consensus for insert */ 117 static int leap_vote_del; /* leap consensus for delete */ 118 keyid_t sys_private; /* private value for session seed */ 119 int sys_manycastserver; /* respond to manycast client pkts */ 120 int ntp_mode7; /* respond to ntpdc (mode7) */ 121 int peer_ntpdate; /* active peers in ntpdate mode */ 122 int sys_survivors; /* truest of the truechimers */ 123 char *sys_ident = NULL; /* identity scheme */ 124 125 /* 126 * TOS and multicast mapping stuff 127 */ 128 int sys_floor = 0; /* cluster stratum floor */ 129 int sys_ceiling = STRATUM_UNSPEC - 1; /* cluster stratum ceiling */ 130 int sys_minsane = 1; /* minimum candidates */ 131 int sys_minclock = NTP_MINCLOCK; /* minimum candidates */ 132 int sys_maxclock = NTP_MAXCLOCK; /* maximum candidates */ 133 int sys_cohort = 0; /* cohort switch */ 134 int sys_orphan = STRATUM_UNSPEC + 1; /* orphan stratum */ 135 int sys_orphwait = NTP_ORPHWAIT; /* orphan wait */ 136 int sys_beacon = BEACON; /* manycast beacon interval */ 137 int sys_ttlmax; /* max ttl mapping vector index */ 138 u_char sys_ttl[MAX_TTL]; /* ttl mapping vector */ 139 140 /* 141 * Statistics counters - first the good, then the bad 142 */ 143 u_long sys_stattime; /* elapsed time */ 144 u_long sys_received; /* packets received */ 145 u_long sys_processed; /* packets for this host */ 146 u_long sys_newversion; /* current version */ 147 u_long sys_oldversion; /* old version */ 148 u_long sys_restricted; /* access denied */ 149 u_long sys_badlength; /* bad length or format */ 150 u_long sys_badauth; /* bad authentication */ 151 u_long sys_declined; /* declined */ 152 u_long sys_limitrejected; /* rate exceeded */ 153 u_long sys_kodsent; /* KoD sent */ 154 155 static int kiss_code_check(u_char hisleap, u_char hisstratum, u_char hismode, u_int32 refid); 156 static double root_distance (struct peer *); 157 static void clock_combine (peer_select *, int, int); 158 static void peer_xmit (struct peer *); 159 static void fast_xmit (struct recvbuf *, int, keyid_t, int); 160 static void pool_xmit (struct peer *); 161 static void clock_update (struct peer *); 162 static void measure_precision(void); 163 static double measure_tick_fuzz(void); 164 static int local_refid (struct peer *); 165 static int peer_unfit (struct peer *); 166 #ifdef AUTOKEY 167 static int group_test (char *, char *); 168 #endif /* AUTOKEY */ 169 #ifdef WORKER 170 void pool_name_resolved (int, int, void *, const char *, 171 const char *, const struct addrinfo *, 172 const struct addrinfo *); 173 #endif /* WORKER */ 174 175 void 176 set_sys_leap(u_char new_sys_leap) { 177 sys_leap = new_sys_leap; 178 xmt_leap = sys_leap; 179 180 /* 181 * Under certain conditions we send faked leap bits to clients, so 182 * eventually change xmt_leap below, but never change LEAP_NOTINSYNC. 183 */ 184 if (xmt_leap != LEAP_NOTINSYNC) { 185 if (leap_sec_in_progress) { 186 /* always send "not sync" */ 187 xmt_leap = LEAP_NOTINSYNC; 188 } 189 #ifdef LEAP_SMEAR 190 else { 191 /* 192 * If leap smear is enabled in general we must never send a leap second warning 193 * to clients, so make sure we only send "in sync". 194 */ 195 if (leap_smear.enabled) 196 xmt_leap = LEAP_NOWARNING; 197 } 198 #endif /* LEAP_SMEAR */ 199 } 200 } 201 202 /* 203 * Kiss Code check 204 */ 205 int kiss_code_check(u_char hisleap, u_char hisstratum, u_char hismode, u_int32 refid) { 206 207 if ( hismode == MODE_SERVER 208 && hisleap == LEAP_NOTINSYNC 209 && hisstratum == STRATUM_UNSPEC) { 210 if(memcmp(&refid,"RATE", 4) == 0) { 211 return (RATEKISS); 212 } 213 else if(memcmp(&refid,"DENY", 4) == 0) { 214 return (DENYKISS); 215 } 216 else if(memcmp(&refid,"RSTR", 4) == 0) { 217 return (RSTRKISS); 218 } 219 else if(memcmp(&refid,"X", 1) == 0) { 220 return (XKISS); 221 } 222 else { 223 return (UNKNOWNKISS); 224 } 225 } 226 else { 227 return (NOKISS); 228 } 229 } 230 /* 231 * transmit - transmit procedure called by poll timeout 232 */ 233 void 234 transmit( 235 struct peer *peer /* peer structure pointer */ 236 ) 237 { 238 u_char hpoll; 239 240 /* 241 * The polling state machine. There are two kinds of machines, 242 * those that never expect a reply (broadcast and manycast 243 * server modes) and those that do (all other modes). The dance 244 * is intricate... 245 */ 246 hpoll = peer->hpoll; 247 248 /* 249 * In broadcast mode the poll interval is never changed from 250 * minpoll. 251 */ 252 if (peer->cast_flags & (MDF_BCAST | MDF_MCAST)) { 253 peer->outdate = current_time; 254 if (sys_leap != LEAP_NOTINSYNC) 255 peer_xmit(peer); 256 poll_update(peer, hpoll); 257 return; 258 } 259 260 /* 261 * In manycast mode we start with unity ttl. The ttl is 262 * increased by one for each poll until either sys_maxclock 263 * servers have been found or the maximum ttl is reached. When 264 * sys_maxclock servers are found we stop polling until one or 265 * more servers have timed out or until less than sys_minclock 266 * associations turn up. In this case additional better servers 267 * are dragged in and preempt the existing ones. Once every 268 * sys_beacon seconds we are to transmit unconditionally, but 269 * this code is not quite right -- peer->unreach counts polls 270 * and is being compared with sys_beacon, so the beacons happen 271 * every sys_beacon polls. 272 */ 273 if (peer->cast_flags & MDF_ACAST) { 274 peer->outdate = current_time; 275 if (peer->unreach > sys_beacon) { 276 peer->unreach = 0; 277 peer->ttl = 0; 278 peer_xmit(peer); 279 } else if ( sys_survivors < sys_minclock 280 || peer_associations < sys_maxclock) { 281 if (peer->ttl < (u_int32)sys_ttlmax) 282 peer->ttl++; 283 peer_xmit(peer); 284 } 285 peer->unreach++; 286 poll_update(peer, hpoll); 287 return; 288 } 289 290 /* 291 * Pool associations transmit unicast solicitations when there 292 * are less than a hard limit of 2 * sys_maxclock associations, 293 * and either less than sys_minclock survivors or less than 294 * sys_maxclock associations. The hard limit prevents unbounded 295 * growth in associations if the system clock or network quality 296 * result in survivor count dipping below sys_minclock often. 297 * This was observed testing with pool, where sys_maxclock == 12 298 * resulted in 60 associations without the hard limit. A 299 * similar hard limit on manycastclient ephemeral associations 300 * may be appropriate. 301 */ 302 if (peer->cast_flags & MDF_POOL) { 303 peer->outdate = current_time; 304 if ( (peer_associations <= 2 * sys_maxclock) 305 && ( peer_associations < sys_maxclock 306 || sys_survivors < sys_minclock)) 307 pool_xmit(peer); 308 poll_update(peer, hpoll); 309 return; 310 } 311 312 /* 313 * In unicast modes the dance is much more intricate. It is 314 * designed to back off whenever possible to minimize network 315 * traffic. 316 */ 317 if (peer->burst == 0) { 318 u_char oreach; 319 320 /* 321 * Update the reachability status. If not heard for 322 * three consecutive polls, stuff infinity in the clock 323 * filter. 324 */ 325 oreach = peer->reach; 326 peer->outdate = current_time; 327 peer->unreach++; 328 peer->reach <<= 1; 329 if (!peer->reach) { 330 331 /* 332 * Here the peer is unreachable. If it was 333 * previously reachable raise a trap. Send a 334 * burst if enabled. 335 */ 336 clock_filter(peer, 0., 0., MAXDISPERSE); 337 if (oreach) { 338 peer_unfit(peer); 339 report_event(PEVNT_UNREACH, peer, NULL); 340 } 341 if ( (peer->flags & FLAG_IBURST) 342 && peer->retry == 0) 343 peer->retry = NTP_RETRY; 344 } else { 345 346 /* 347 * Here the peer is reachable. Send a burst if 348 * enabled and the peer is fit. Reset unreach 349 * for persistent and ephemeral associations. 350 * Unreach is also reset for survivors in 351 * clock_select(). 352 */ 353 hpoll = sys_poll; 354 if (!(peer->flags & FLAG_PREEMPT)) 355 peer->unreach = 0; 356 if ( (peer->flags & FLAG_BURST) 357 && peer->retry == 0 358 && !peer_unfit(peer)) 359 peer->retry = NTP_RETRY; 360 } 361 362 /* 363 * Watch for timeout. If ephemeral, toss the rascal; 364 * otherwise, bump the poll interval. Note the 365 * poll_update() routine will clamp it to maxpoll. 366 * If preemptible and we have more peers than maxclock, 367 * and this peer has the minimum score of preemptibles, 368 * demobilize. 369 */ 370 if (peer->unreach >= NTP_UNREACH) { 371 hpoll++; 372 /* ephemeral: no FLAG_CONFIG nor FLAG_PREEMPT */ 373 if (!(peer->flags & (FLAG_CONFIG | FLAG_PREEMPT))) { 374 report_event(PEVNT_RESTART, peer, "timeout"); 375 peer_clear(peer, "TIME"); 376 unpeer(peer); 377 return; 378 } 379 if ( (peer->flags & FLAG_PREEMPT) 380 && (peer_associations > sys_maxclock) 381 && score_all(peer)) { 382 report_event(PEVNT_RESTART, peer, "timeout"); 383 peer_clear(peer, "TIME"); 384 unpeer(peer); 385 return; 386 } 387 } 388 } else { 389 peer->burst--; 390 if (peer->burst == 0) { 391 392 /* 393 * If ntpdate mode and the clock has not been 394 * set and all peers have completed the burst, 395 * we declare a successful failure. 396 */ 397 if (mode_ntpdate) { 398 peer_ntpdate--; 399 if (peer_ntpdate == 0) { 400 msyslog(LOG_NOTICE, 401 "ntpd: no servers found"); 402 if (!msyslog_term) 403 printf( 404 "ntpd: no servers found\n"); 405 exit (0); 406 } 407 } 408 } 409 } 410 if (peer->retry > 0) 411 peer->retry--; 412 413 /* 414 * Do not transmit if in broadcast client mode. 415 */ 416 if (peer->hmode != MODE_BCLIENT) 417 peer_xmit(peer); 418 poll_update(peer, hpoll); 419 } 420 421 422 /* 423 * receive - receive procedure called for each packet received 424 */ 425 void 426 receive( 427 struct recvbuf *rbufp 428 ) 429 { 430 register struct peer *peer; /* peer structure pointer */ 431 register struct pkt *pkt; /* receive packet pointer */ 432 u_char hisversion; /* packet version */ 433 u_char hisleap; /* packet leap indicator */ 434 u_char hismode; /* packet mode */ 435 u_char hisstratum; /* packet stratum */ 436 u_short restrict_mask; /* restrict bits */ 437 int kissCode = NOKISS; /* Kiss Code */ 438 int has_mac; /* length of MAC field */ 439 int authlen; /* offset of MAC field */ 440 int is_authentic = 0; /* cryptosum ok */ 441 int retcode = AM_NOMATCH; /* match code */ 442 keyid_t skeyid = 0; /* key IDs */ 443 u_int32 opcode = 0; /* extension field opcode */ 444 sockaddr_u *dstadr_sin; /* active runway */ 445 struct peer *peer2; /* aux peer structure pointer */ 446 endpt * match_ep; /* newpeer() local address */ 447 l_fp p_org; /* origin timestamp */ 448 l_fp p_rec; /* receive timestamp */ 449 l_fp p_xmt; /* transmit timestamp */ 450 #ifdef AUTOKEY 451 char hostname[NTP_MAXSTRLEN + 1]; 452 char *groupname = NULL; 453 struct autokey *ap; /* autokey structure pointer */ 454 int rval; /* cookie snatcher */ 455 keyid_t pkeyid = 0, tkeyid = 0; /* key IDs */ 456 #endif /* AUTOKEY */ 457 #ifdef HAVE_NTP_SIGND 458 static unsigned char zero_key[16]; 459 #endif /* HAVE_NTP_SIGND */ 460 461 /* 462 * Monitor the packet and get restrictions. Note that the packet 463 * length for control and private mode packets must be checked 464 * by the service routines. Some restrictions have to be handled 465 * later in order to generate a kiss-o'-death packet. 466 */ 467 /* 468 * Bogus port check is before anything, since it probably 469 * reveals a clogging attack. 470 */ 471 sys_received++; 472 if (0 == SRCPORT(&rbufp->recv_srcadr)) { 473 sys_badlength++; 474 return; /* bogus port */ 475 } 476 restrict_mask = restrictions(&rbufp->recv_srcadr); 477 DPRINTF(2, ("receive: at %ld %s<-%s flags %x restrict %03x\n", 478 current_time, stoa(&rbufp->dstadr->sin), 479 stoa(&rbufp->recv_srcadr), 480 rbufp->dstadr->flags, restrict_mask)); 481 pkt = &rbufp->recv_pkt; 482 hisversion = PKT_VERSION(pkt->li_vn_mode); 483 hisleap = PKT_LEAP(pkt->li_vn_mode); 484 hismode = (int)PKT_MODE(pkt->li_vn_mode); 485 hisstratum = PKT_TO_STRATUM(pkt->stratum); 486 if (restrict_mask & RES_IGNORE) { 487 sys_restricted++; 488 return; /* ignore everything */ 489 } 490 if (hismode == MODE_PRIVATE) { 491 if (!ntp_mode7 || (restrict_mask & RES_NOQUERY)) { 492 sys_restricted++; 493 return; /* no query private */ 494 } 495 process_private(rbufp, ((restrict_mask & 496 RES_NOMODIFY) == 0)); 497 return; 498 } 499 if (hismode == MODE_CONTROL) { 500 if (restrict_mask & RES_NOQUERY) { 501 sys_restricted++; 502 return; /* no query control */ 503 } 504 process_control(rbufp, restrict_mask); 505 return; 506 } 507 if (restrict_mask & RES_DONTSERVE) { 508 sys_restricted++; 509 return; /* no time serve */ 510 } 511 512 /* 513 * This is for testing. If restricted drop ten percent of 514 * surviving packets. 515 */ 516 if (restrict_mask & RES_FLAKE) { 517 if ((double)ntp_random() / 0x7fffffff < .1) { 518 sys_restricted++; 519 return; /* no flakeway */ 520 } 521 } 522 523 /* 524 * Version check must be after the query packets, since they 525 * intentionally use an early version. 526 */ 527 if (hisversion == NTP_VERSION) { 528 sys_newversion++; /* new version */ 529 } else if ( !(restrict_mask & RES_VERSION) 530 && hisversion >= NTP_OLDVERSION) { 531 sys_oldversion++; /* previous version */ 532 } else { 533 sys_badlength++; 534 return; /* old version */ 535 } 536 537 /* 538 * Figure out his mode and validate the packet. This has some 539 * legacy raunch that probably should be removed. In very early 540 * NTP versions mode 0 was equivalent to what later versions 541 * would interpret as client mode. 542 */ 543 if (hismode == MODE_UNSPEC) { 544 if (hisversion == NTP_OLDVERSION) { 545 hismode = MODE_CLIENT; 546 } else { 547 sys_badlength++; 548 return; /* invalid mode */ 549 } 550 } 551 552 /* 553 * Parse the extension field if present. We figure out whether 554 * an extension field is present by measuring the MAC size. If 555 * the number of words following the packet header is 0, no MAC 556 * is present and the packet is not authenticated. If 1, the 557 * packet is a crypto-NAK; if 3, the packet is authenticated 558 * with DES; if 5, the packet is authenticated with MD5; if 6, 559 * the packet is authenticated with SHA. If 2 or * 4, the packet 560 * is a runt and discarded forthwith. If greater than 6, an 561 * extension field is present, so we subtract the length of the 562 * field and go around again. 563 */ 564 authlen = LEN_PKT_NOMAC; 565 has_mac = rbufp->recv_length - authlen; 566 while (has_mac > 0) { 567 u_int32 len; 568 #ifdef AUTOKEY 569 u_int32 hostlen; 570 struct exten *ep; 571 #endif /*AUTOKEY */ 572 573 if (has_mac % 4 != 0 || has_mac < (int)MIN_MAC_LEN) { 574 sys_badlength++; 575 return; /* bad length */ 576 } 577 if (has_mac <= (int)MAX_MAC_LEN) { 578 skeyid = ntohl(((u_int32 *)pkt)[authlen / 4]); 579 break; 580 581 } else { 582 opcode = ntohl(((u_int32 *)pkt)[authlen / 4]); 583 len = opcode & 0xffff; 584 if ( len % 4 != 0 585 || len < 4 586 || (int)len + authlen > rbufp->recv_length) { 587 sys_badlength++; 588 return; /* bad length */ 589 } 590 #ifdef AUTOKEY 591 /* 592 * Extract calling group name for later. If 593 * sys_groupname is non-NULL, there must be 594 * a group name provided to elicit a response. 595 */ 596 if ( (opcode & 0x3fff0000) == CRYPTO_ASSOC 597 && sys_groupname != NULL) { 598 ep = (struct exten *)&((u_int32 *)pkt)[authlen / 4]; 599 hostlen = ntohl(ep->vallen); 600 if ( hostlen >= sizeof(hostname) 601 || hostlen > len - 602 offsetof(struct exten, pkt)) { 603 sys_badlength++; 604 return; /* bad length */ 605 } 606 memcpy(hostname, &ep->pkt, hostlen); 607 hostname[hostlen] = '\0'; 608 groupname = strchr(hostname, '@'); 609 if (groupname == NULL) { 610 sys_declined++; 611 return; 612 } 613 groupname++; 614 } 615 #endif /* AUTOKEY */ 616 authlen += len; 617 has_mac -= len; 618 } 619 } 620 621 /* 622 * If has_mac is < 0 we had a malformed packet. 623 */ 624 if (has_mac < 0) { 625 sys_badlength++; 626 return; /* bad length */ 627 } 628 629 /* 630 * If authentication required, a MAC must be present. 631 */ 632 if (restrict_mask & RES_DONTTRUST && has_mac == 0) { 633 sys_restricted++; 634 return; /* access denied */ 635 } 636 637 /* 638 * Update the MRU list and finger the cloggers. It can be a 639 * little expensive, so turn it off for production use. 640 * RES_LIMITED and RES_KOD will be cleared in the returned 641 * restrict_mask unless one or both actions are warranted. 642 */ 643 restrict_mask = ntp_monitor(rbufp, restrict_mask); 644 if (restrict_mask & RES_LIMITED) { 645 sys_limitrejected++; 646 if ( !(restrict_mask & RES_KOD) 647 || MODE_BROADCAST == hismode 648 || MODE_SERVER == hismode) { 649 if (MODE_SERVER == hismode) 650 DPRINTF(1, ("Possibly self-induced rate limiting of MODE_SERVER from %s\n", 651 stoa(&rbufp->recv_srcadr))); 652 return; /* rate exceeded */ 653 } 654 if (hismode == MODE_CLIENT) 655 fast_xmit(rbufp, MODE_SERVER, skeyid, 656 restrict_mask); 657 else 658 fast_xmit(rbufp, MODE_ACTIVE, skeyid, 659 restrict_mask); 660 return; /* rate exceeded */ 661 } 662 restrict_mask &= ~RES_KOD; 663 664 /* 665 * We have tossed out as many buggy packets as possible early in 666 * the game to reduce the exposure to a clogging attack. Now we 667 * have to burn some cycles to find the association and 668 * authenticate the packet if required. Note that we burn only 669 * digest cycles, again to reduce exposure. There may be no 670 * matching association and that's okay. 671 * 672 * More on the autokey mambo. Normally the local interface is 673 * found when the association was mobilized with respect to a 674 * designated remote address. We assume packets arriving from 675 * the remote address arrive via this interface and the local 676 * address used to construct the autokey is the unicast address 677 * of the interface. However, if the sender is a broadcaster, 678 * the interface broadcast address is used instead. 679 * Notwithstanding this technobabble, if the sender is a 680 * multicaster, the broadcast address is null, so we use the 681 * unicast address anyway. Don't ask. 682 */ 683 peer = findpeer(rbufp, hismode, &retcode); 684 dstadr_sin = &rbufp->dstadr->sin; 685 NTOHL_FP(&pkt->org, &p_org); 686 NTOHL_FP(&pkt->rec, &p_rec); 687 NTOHL_FP(&pkt->xmt, &p_xmt); 688 689 /* 690 * Authentication is conditioned by three switches: 691 * 692 * NOPEER (RES_NOPEER) do not mobilize an association unless 693 * authenticated 694 * NOTRUST (RES_DONTTRUST) do not allow access unless 695 * authenticated (implies NOPEER) 696 * enable (sys_authenticate) master NOPEER switch, by default 697 * on 698 * 699 * The NOPEER and NOTRUST can be specified on a per-client basis 700 * using the restrict command. The enable switch if on implies 701 * NOPEER for all clients. There are four outcomes: 702 * 703 * NONE The packet has no MAC. 704 * OK the packet has a MAC and authentication succeeds 705 * ERROR the packet has a MAC and authentication fails 706 * CRYPTO crypto-NAK. The MAC has four octets only. 707 * 708 * Note: The AUTH(x, y) macro is used to filter outcomes. If x 709 * is zero, acceptable outcomes of y are NONE and OK. If x is 710 * one, the only acceptable outcome of y is OK. 711 */ 712 713 if (has_mac == 0) { 714 restrict_mask &= ~RES_MSSNTP; 715 is_authentic = AUTH_NONE; /* not required */ 716 #ifdef DEBUG 717 if (debug) 718 printf( 719 "receive: at %ld %s<-%s mode %d len %d\n", 720 current_time, stoa(dstadr_sin), 721 stoa(&rbufp->recv_srcadr), hismode, 722 authlen); 723 #endif 724 } else if (has_mac == 4) { 725 restrict_mask &= ~RES_MSSNTP; 726 is_authentic = AUTH_CRYPTO; /* crypto-NAK */ 727 #ifdef DEBUG 728 if (debug) 729 printf( 730 "receive: at %ld %s<-%s mode %d keyid %08x len %d auth %d\n", 731 current_time, stoa(dstadr_sin), 732 stoa(&rbufp->recv_srcadr), hismode, skeyid, 733 authlen + has_mac, is_authentic); 734 #endif 735 736 #ifdef HAVE_NTP_SIGND 737 /* 738 * If the signature is 20 bytes long, the last 16 of 739 * which are zero, then this is a Microsoft client 740 * wanting AD-style authentication of the server's 741 * reply. 742 * 743 * This is described in Microsoft's WSPP docs, in MS-SNTP: 744 * http://msdn.microsoft.com/en-us/library/cc212930.aspx 745 */ 746 } else if ( has_mac == MAX_MD5_LEN 747 && (restrict_mask & RES_MSSNTP) 748 && (retcode == AM_FXMIT || retcode == AM_NEWPASS) 749 && (memcmp(zero_key, (char *)pkt + authlen + 4, 750 MAX_MD5_LEN - 4) == 0)) { 751 is_authentic = AUTH_NONE; 752 #endif /* HAVE_NTP_SIGND */ 753 754 } else { 755 restrict_mask &= ~RES_MSSNTP; 756 #ifdef AUTOKEY 757 /* 758 * For autokey modes, generate the session key 759 * and install in the key cache. Use the socket 760 * broadcast or unicast address as appropriate. 761 */ 762 if (crypto_flags && skeyid > NTP_MAXKEY) { 763 764 /* 765 * More on the autokey dance (AKD). A cookie is 766 * constructed from public and private values. 767 * For broadcast packets, the cookie is public 768 * (zero). For packets that match no 769 * association, the cookie is hashed from the 770 * addresses and private value. For server 771 * packets, the cookie was previously obtained 772 * from the server. For symmetric modes, the 773 * cookie was previously constructed using an 774 * agreement protocol; however, should PKI be 775 * unavailable, we construct a fake agreement as 776 * the EXOR of the peer and host cookies. 777 * 778 * hismode ephemeral persistent 779 * ======================================= 780 * active 0 cookie# 781 * passive 0% cookie# 782 * client sys cookie 0% 783 * server 0% sys cookie 784 * broadcast 0 0 785 * 786 * # if unsync, 0 787 * % can't happen 788 */ 789 if (has_mac < (int)MAX_MD5_LEN) { 790 sys_badauth++; 791 return; 792 } 793 if (hismode == MODE_BROADCAST) { 794 795 /* 796 * For broadcaster, use the interface 797 * broadcast address when available; 798 * otherwise, use the unicast address 799 * found when the association was 800 * mobilized. However, if this is from 801 * the wildcard interface, game over. 802 */ 803 if ( crypto_flags 804 && rbufp->dstadr == 805 ANY_INTERFACE_CHOOSE(&rbufp->recv_srcadr)) { 806 sys_restricted++; 807 return; /* no wildcard */ 808 } 809 pkeyid = 0; 810 if (!SOCK_UNSPEC(&rbufp->dstadr->bcast)) 811 dstadr_sin = 812 &rbufp->dstadr->bcast; 813 } else if (peer == NULL) { 814 pkeyid = session_key( 815 &rbufp->recv_srcadr, dstadr_sin, 0, 816 sys_private, 0); 817 } else { 818 pkeyid = peer->pcookie; 819 } 820 821 /* 822 * The session key includes both the public 823 * values and cookie. In case of an extension 824 * field, the cookie used for authentication 825 * purposes is zero. Note the hash is saved for 826 * use later in the autokey mambo. 827 */ 828 if (authlen > (int)LEN_PKT_NOMAC && pkeyid != 0) { 829 session_key(&rbufp->recv_srcadr, 830 dstadr_sin, skeyid, 0, 2); 831 tkeyid = session_key( 832 &rbufp->recv_srcadr, dstadr_sin, 833 skeyid, pkeyid, 0); 834 } else { 835 tkeyid = session_key( 836 &rbufp->recv_srcadr, dstadr_sin, 837 skeyid, pkeyid, 2); 838 } 839 840 } 841 #endif /* AUTOKEY */ 842 843 /* 844 * Compute the cryptosum. Note a clogging attack may 845 * succeed in bloating the key cache. If an autokey, 846 * purge it immediately, since we won't be needing it 847 * again. If the packet is authentic, it can mobilize an 848 * association. Note that there is no key zero. 849 */ 850 if (!authdecrypt(skeyid, (u_int32 *)pkt, authlen, 851 has_mac)) 852 is_authentic = AUTH_ERROR; 853 else 854 is_authentic = AUTH_OK; 855 #ifdef AUTOKEY 856 if (crypto_flags && skeyid > NTP_MAXKEY) 857 authtrust(skeyid, 0); 858 #endif /* AUTOKEY */ 859 #ifdef DEBUG 860 if (debug) 861 printf( 862 "receive: at %ld %s<-%s mode %d keyid %08x len %d auth %d\n", 863 current_time, stoa(dstadr_sin), 864 stoa(&rbufp->recv_srcadr), hismode, skeyid, 865 authlen + has_mac, is_authentic); 866 #endif 867 } 868 869 /* 870 * The association matching rules are implemented by a set of 871 * routines and an association table. A packet matching an 872 * association is processed by the peer process for that 873 * association. If there are no errors, an ephemeral association 874 * is mobilized: a broadcast packet mobilizes a broadcast client 875 * aassociation; a manycast server packet mobilizes a manycast 876 * client association; a symmetric active packet mobilizes a 877 * symmetric passive association. 878 */ 879 switch (retcode) { 880 881 /* 882 * This is a client mode packet not matching any association. If 883 * an ordinary client, simply toss a server mode packet back 884 * over the fence. If a manycast client, we have to work a 885 * little harder. 886 */ 887 case AM_FXMIT: 888 889 /* 890 * If authentication OK, send a server reply; otherwise, 891 * send a crypto-NAK. 892 */ 893 if (!(rbufp->dstadr->flags & INT_MCASTOPEN)) { 894 if (AUTH(restrict_mask & RES_DONTTRUST, 895 is_authentic)) { 896 fast_xmit(rbufp, MODE_SERVER, skeyid, 897 restrict_mask); 898 } else if (is_authentic == AUTH_ERROR) { 899 fast_xmit(rbufp, MODE_SERVER, 0, 900 restrict_mask); 901 sys_badauth++; 902 } else { 903 sys_restricted++; 904 } 905 return; /* hooray */ 906 } 907 908 /* 909 * This must be manycast. Do not respond if not 910 * configured as a manycast server. 911 */ 912 if (!sys_manycastserver) { 913 sys_restricted++; 914 return; /* not enabled */ 915 } 916 917 #ifdef AUTOKEY 918 /* 919 * Do not respond if not the same group. 920 */ 921 if (group_test(groupname, NULL)) { 922 sys_declined++; 923 return; 924 } 925 #endif /* AUTOKEY */ 926 927 /* 928 * Do not respond if we are not synchronized or our 929 * stratum is greater than the manycaster or the 930 * manycaster has already synchronized to us. 931 */ 932 if ( sys_leap == LEAP_NOTINSYNC 933 || sys_stratum >= hisstratum 934 || (!sys_cohort && sys_stratum == hisstratum + 1) 935 || rbufp->dstadr->addr_refid == pkt->refid) { 936 sys_declined++; 937 return; /* no help */ 938 } 939 940 /* 941 * Respond only if authentication succeeds. Don't do a 942 * crypto-NAK, as that would not be useful. 943 */ 944 if (AUTH(restrict_mask & RES_DONTTRUST, is_authentic)) 945 fast_xmit(rbufp, MODE_SERVER, skeyid, 946 restrict_mask); 947 return; /* hooray */ 948 949 /* 950 * This is a server mode packet returned in response to a client 951 * mode packet sent to a multicast group address (for 952 * manycastclient) or to a unicast address (for pool). The 953 * origin timestamp is a good nonce to reliably associate the 954 * reply with what was sent. If there is no match, that's 955 * curious and could be an intruder attempting to clog, so we 956 * just ignore it. 957 * 958 * If the packet is authentic and the manycastclient or pool 959 * association is found, we mobilize a client association and 960 * copy pertinent variables from the manycastclient or pool 961 * association to the new client association. If not, just 962 * ignore the packet. 963 * 964 * There is an implosion hazard at the manycast client, since 965 * the manycast servers send the server packet immediately. If 966 * the guy is already here, don't fire up a duplicate. 967 */ 968 case AM_MANYCAST: 969 970 #ifdef AUTOKEY 971 /* 972 * Do not respond if not the same group. 973 */ 974 if (group_test(groupname, NULL)) { 975 sys_declined++; 976 return; 977 } 978 #endif /* AUTOKEY */ 979 if ((peer2 = findmanycastpeer(rbufp)) == NULL) { 980 sys_restricted++; 981 return; /* not enabled */ 982 } 983 if (!AUTH( (!(peer2->cast_flags & MDF_POOL) 984 && sys_authenticate) 985 || (restrict_mask & (RES_NOPEER | 986 RES_DONTTRUST)), is_authentic)) { 987 sys_restricted++; 988 return; /* access denied */ 989 } 990 991 /* 992 * Do not respond if unsynchronized or stratum is below 993 * the floor or at or above the ceiling. 994 */ 995 if ( hisleap == LEAP_NOTINSYNC 996 || hisstratum < sys_floor 997 || hisstratum >= sys_ceiling) { 998 sys_declined++; 999 return; /* no help */ 1000 } 1001 peer = newpeer(&rbufp->recv_srcadr, NULL, rbufp->dstadr, 1002 MODE_CLIENT, hisversion, peer2->minpoll, 1003 peer2->maxpoll, FLAG_PREEMPT | 1004 (FLAG_IBURST & peer2->flags), MDF_UCAST | 1005 MDF_UCLNT, 0, skeyid, sys_ident); 1006 if (NULL == peer) { 1007 sys_declined++; 1008 return; /* ignore duplicate */ 1009 } 1010 1011 /* 1012 * After each ephemeral pool association is spun, 1013 * accelerate the next poll for the pool solicitor so 1014 * the pool will fill promptly. 1015 */ 1016 if (peer2->cast_flags & MDF_POOL) 1017 peer2->nextdate = current_time + 1; 1018 1019 /* 1020 * Further processing of the solicitation response would 1021 * simply detect its origin timestamp as bogus for the 1022 * brand-new association (it matches the prototype 1023 * association) and tinker with peer->nextdate delaying 1024 * first sync. 1025 */ 1026 return; /* solicitation response handled */ 1027 1028 /* 1029 * This is the first packet received from a broadcast server. If 1030 * the packet is authentic and we are enabled as broadcast 1031 * client, mobilize a broadcast client association. We don't 1032 * kiss any frogs here. 1033 */ 1034 case AM_NEWBCL: 1035 1036 #ifdef AUTOKEY 1037 /* 1038 * Do not respond if not the same group. 1039 */ 1040 if (group_test(groupname, sys_ident)) { 1041 sys_declined++; 1042 return; 1043 } 1044 #endif /* AUTOKEY */ 1045 if (sys_bclient == 0) { 1046 sys_restricted++; 1047 return; /* not enabled */ 1048 } 1049 if (!AUTH(sys_authenticate | (restrict_mask & 1050 (RES_NOPEER | RES_DONTTRUST)), is_authentic)) { 1051 sys_restricted++; 1052 return; /* access denied */ 1053 } 1054 1055 /* 1056 * Do not respond if unsynchronized or stratum is below 1057 * the floor or at or above the ceiling. 1058 */ 1059 if ( hisleap == LEAP_NOTINSYNC 1060 || hisstratum < sys_floor 1061 || hisstratum >= sys_ceiling) { 1062 sys_declined++; 1063 return; /* no help */ 1064 } 1065 1066 #ifdef AUTOKEY 1067 /* 1068 * Do not respond if Autokey and the opcode is not a 1069 * CRYPTO_ASSOC response with association ID. 1070 */ 1071 if ( crypto_flags && skeyid > NTP_MAXKEY 1072 && (opcode & 0xffff0000) != (CRYPTO_ASSOC | CRYPTO_RESP)) { 1073 sys_declined++; 1074 return; /* protocol error */ 1075 } 1076 #endif /* AUTOKEY */ 1077 1078 /* 1079 * Broadcasts received via a multicast address may 1080 * arrive after a unicast volley has begun 1081 * with the same remote address. newpeer() will not 1082 * find duplicate associations on other local endpoints 1083 * if a non-NULL endpoint is supplied. multicastclient 1084 * ephemeral associations are unique across all local 1085 * endpoints. 1086 */ 1087 if (!(INT_MCASTOPEN & rbufp->dstadr->flags)) 1088 match_ep = rbufp->dstadr; 1089 else 1090 match_ep = NULL; 1091 1092 /* 1093 * Determine whether to execute the initial volley. 1094 */ 1095 if (sys_bdelay != 0) { 1096 #ifdef AUTOKEY 1097 /* 1098 * If a two-way exchange is not possible, 1099 * neither is Autokey. 1100 */ 1101 if (crypto_flags && skeyid > NTP_MAXKEY) { 1102 sys_restricted++; 1103 return; /* no autokey */ 1104 } 1105 #endif /* AUTOKEY */ 1106 1107 /* 1108 * Do not execute the volley. Start out in 1109 * broadcast client mode. 1110 */ 1111 peer = newpeer(&rbufp->recv_srcadr, NULL, 1112 match_ep, MODE_BCLIENT, hisversion, 1113 pkt->ppoll, pkt->ppoll, FLAG_PREEMPT, 1114 MDF_BCLNT, 0, skeyid, sys_ident); 1115 if (NULL == peer) { 1116 sys_restricted++; 1117 return; /* ignore duplicate */ 1118 1119 } else { 1120 peer->delay = sys_bdelay; 1121 } 1122 break; 1123 } 1124 1125 /* 1126 * Execute the initial volley in order to calibrate the 1127 * propagation delay and run the Autokey protocol. 1128 * 1129 * Note that the minpoll is taken from the broadcast 1130 * packet, normally 6 (64 s) and that the poll interval 1131 * is fixed at this value. 1132 */ 1133 peer = newpeer(&rbufp->recv_srcadr, NULL, match_ep, 1134 MODE_CLIENT, hisversion, pkt->ppoll, pkt->ppoll, 1135 FLAG_BC_VOL | FLAG_IBURST | FLAG_PREEMPT, MDF_BCLNT, 1136 0, skeyid, sys_ident); 1137 if (NULL == peer) { 1138 sys_restricted++; 1139 return; /* ignore duplicate */ 1140 } 1141 #ifdef AUTOKEY 1142 if (skeyid > NTP_MAXKEY) 1143 crypto_recv(peer, rbufp); 1144 #endif /* AUTOKEY */ 1145 1146 return; /* hooray */ 1147 1148 /* 1149 * This is the first packet received from a symmetric active 1150 * peer. If the packet is authentic and the first he sent, 1151 * mobilize a passive association. If not, kiss the frog. 1152 */ 1153 case AM_NEWPASS: 1154 1155 #ifdef AUTOKEY 1156 /* 1157 * Do not respond if not the same group. 1158 */ 1159 if (group_test(groupname, sys_ident)) { 1160 sys_declined++; 1161 return; 1162 } 1163 #endif /* AUTOKEY */ 1164 if (!AUTH(sys_authenticate | (restrict_mask & 1165 (RES_NOPEER | RES_DONTTRUST)), is_authentic)) { 1166 1167 /* 1168 * If authenticated but cannot mobilize an 1169 * association, send a symmetric passive 1170 * response without mobilizing an association. 1171 * This is for drat broken Windows clients. See 1172 * Microsoft KB 875424 for preferred workaround. 1173 */ 1174 if (AUTH(restrict_mask & RES_DONTTRUST, 1175 is_authentic)) { 1176 fast_xmit(rbufp, MODE_PASSIVE, skeyid, 1177 restrict_mask); 1178 return; /* hooray */ 1179 } 1180 if (is_authentic == AUTH_ERROR) { 1181 fast_xmit(rbufp, MODE_ACTIVE, 0, 1182 restrict_mask); 1183 sys_restricted++; 1184 return; 1185 } 1186 /* [Bug 2941] 1187 * If we got here, the packet isn't part of an 1188 * existing association, it isn't correctly 1189 * authenticated, and it didn't meet either of 1190 * the previous two special cases so we should 1191 * just drop it on the floor. For example, 1192 * crypto-NAKs (is_authentic == AUTH_CRYPTO) 1193 * will make it this far. This is just 1194 * debug-printed and not logged to avoid log 1195 * flooding. 1196 */ 1197 DPRINTF(1, ("receive: at %ld refusing to mobilize passive association" 1198 " with unknown peer %s mode %d keyid %08x len %d auth %d\n", 1199 current_time, stoa(&rbufp->recv_srcadr), 1200 hismode, skeyid, (authlen + has_mac), 1201 is_authentic)); 1202 sys_declined++; 1203 return; 1204 } 1205 1206 /* 1207 * Do not respond if synchronized and if stratum is 1208 * below the floor or at or above the ceiling. Note, 1209 * this allows an unsynchronized peer to synchronize to 1210 * us. It would be very strange if he did and then was 1211 * nipped, but that could only happen if we were 1212 * operating at the top end of the range. It also means 1213 * we will spin an ephemeral association in response to 1214 * MODE_ACTIVE KoDs, which will time out eventually. 1215 */ 1216 if ( hisleap != LEAP_NOTINSYNC 1217 && (hisstratum < sys_floor || hisstratum >= sys_ceiling)) { 1218 sys_declined++; 1219 return; /* no help */ 1220 } 1221 1222 /* 1223 * The message is correctly authenticated and allowed. 1224 * Mobilize a symmetric passive association. 1225 */ 1226 if ((peer = newpeer(&rbufp->recv_srcadr, NULL, 1227 rbufp->dstadr, MODE_PASSIVE, hisversion, pkt->ppoll, 1228 NTP_MAXDPOLL, 0, MDF_UCAST, 0, skeyid, 1229 sys_ident)) == NULL) { 1230 sys_declined++; 1231 return; /* ignore duplicate */ 1232 } 1233 break; 1234 1235 1236 /* 1237 * Process regular packet. Nothing special. 1238 */ 1239 case AM_PROCPKT: 1240 1241 #ifdef AUTOKEY 1242 /* 1243 * Do not respond if not the same group. 1244 */ 1245 if (group_test(groupname, peer->ident)) { 1246 sys_declined++; 1247 return; 1248 } 1249 #endif /* AUTOKEY */ 1250 break; 1251 1252 /* 1253 * A passive packet matches a passive association. This is 1254 * usually the result of reconfiguring a client on the fly. As 1255 * this association might be legitimate and this packet an 1256 * attempt to deny service, just ignore it. 1257 */ 1258 case AM_ERR: 1259 sys_declined++; 1260 return; 1261 1262 /* 1263 * For everything else there is the bit bucket. 1264 */ 1265 default: 1266 sys_declined++; 1267 return; 1268 } 1269 1270 #ifdef AUTOKEY 1271 /* 1272 * If the association is configured for Autokey, the packet must 1273 * have a public key ID; if not, the packet must have a 1274 * symmetric key ID. 1275 */ 1276 if ( is_authentic != AUTH_CRYPTO 1277 && ( ((peer->flags & FLAG_SKEY) && skeyid <= NTP_MAXKEY) 1278 || (!(peer->flags & FLAG_SKEY) && skeyid > NTP_MAXKEY))) { 1279 sys_badauth++; 1280 return; 1281 } 1282 #endif /* AUTOKEY */ 1283 peer->received++; 1284 peer->flash &= ~PKT_TEST_MASK; 1285 if (peer->flags & FLAG_XBOGUS) { 1286 peer->flags &= ~FLAG_XBOGUS; 1287 peer->flash |= TEST3; 1288 } 1289 1290 /* 1291 * Next comes a rigorous schedule of timestamp checking. If the 1292 * transmit timestamp is zero, the server has not initialized in 1293 * interleaved modes or is horribly broken. 1294 */ 1295 if (L_ISZERO(&p_xmt)) { 1296 peer->flash |= TEST3; /* unsynch */ 1297 1298 /* 1299 * If the transmit timestamp duplicates a previous one, the 1300 * packet is a replay. This prevents the bad guys from replaying 1301 * the most recent packet, authenticated or not. 1302 */ 1303 } else if (L_ISEQU(&peer->xmt, &p_xmt)) { 1304 peer->flash |= TEST1; /* duplicate */ 1305 peer->oldpkt++; 1306 return; 1307 1308 /* 1309 * If this is a broadcast mode packet, skip further checking. If 1310 * an initial volley, bail out now and let the client do its 1311 * stuff. If the origin timestamp is nonzero, this is an 1312 * interleaved broadcast. so restart the protocol. 1313 */ 1314 } else if (hismode == MODE_BROADCAST) { 1315 if (!L_ISZERO(&p_org) && !(peer->flags & FLAG_XB)) { 1316 peer->flags |= FLAG_XB; 1317 peer->aorg = p_xmt; 1318 peer->borg = rbufp->recv_time; 1319 report_event(PEVNT_XLEAVE, peer, NULL); 1320 return; 1321 } 1322 1323 /* 1324 * Check for bogus packet in basic mode. If found, switch to 1325 * interleaved mode and resynchronize, but only after confirming 1326 * the packet is not bogus in symmetric interleaved mode. 1327 * 1328 * This could also mean somebody is forging packets claiming to 1329 * be from us, attempting to cause our server to KoD us. 1330 */ 1331 } else if (peer->flip == 0) { 1332 if (!L_ISEQU(&p_org, &peer->aorg)) { 1333 peer->bogusorg++; 1334 peer->flash |= TEST2; /* bogus */ 1335 msyslog(LOG_INFO, 1336 "receive: Unexpected origin timestamp from %s", 1337 ntoa(&peer->srcadr)); 1338 if ( !L_ISZERO(&peer->dst) 1339 && L_ISEQU(&p_org, &peer->dst)) { 1340 peer->flip = 1; 1341 report_event(PEVNT_XLEAVE, peer, NULL); 1342 } 1343 return; /* Bogus packet, we are done */ 1344 } else { 1345 L_CLR(&peer->aorg); 1346 } 1347 1348 /* 1349 * Check for valid nonzero timestamp fields. 1350 */ 1351 } else if (L_ISZERO(&p_org) || L_ISZERO(&p_rec) || 1352 L_ISZERO(&peer->dst)) { 1353 peer->flash |= TEST3; /* unsynch */ 1354 1355 /* 1356 * Check for bogus packet in interleaved symmetric mode. This 1357 * can happen if a packet is lost, duplicated or crossed. If 1358 * found, flip and resynchronize. 1359 */ 1360 } else if ( !L_ISZERO(&peer->dst) 1361 && !L_ISEQU(&p_org, &peer->dst)) { 1362 peer->bogusorg++; 1363 peer->flags |= FLAG_XBOGUS; 1364 peer->flash |= TEST2; /* bogus */ 1365 return; /* Bogus packet, we are done */ 1366 } 1367 1368 /* 1369 * If this is a crypto_NAK, the server cannot authenticate a 1370 * client packet. The server might have just changed keys. Clear 1371 * the association and restart the protocol. 1372 */ 1373 if (is_authentic == AUTH_CRYPTO) { 1374 report_event(PEVNT_AUTH, peer, "crypto_NAK"); 1375 peer->flash |= TEST5; /* bad auth */ 1376 peer->badauth++; 1377 if (peer->flags & FLAG_PREEMPT) { 1378 unpeer(peer); 1379 return; 1380 } 1381 #ifdef AUTOKEY 1382 if (peer->crypto) 1383 peer_clear(peer, "AUTH"); 1384 #endif /* AUTOKEY */ 1385 return; 1386 1387 /* 1388 * If the digest fails or it's missing for authenticated 1389 * associations, the client cannot authenticate a server 1390 * reply to a client packet previously sent. The loopback check 1391 * is designed to avoid a bait-and-switch attack, which was 1392 * possible in past versions. If symmetric modes, return a 1393 * crypto-NAK. The peer should restart the protocol. 1394 */ 1395 } else if (!AUTH(peer->keyid || has_mac || 1396 (restrict_mask & RES_DONTTRUST), is_authentic)) { 1397 report_event(PEVNT_AUTH, peer, "digest"); 1398 peer->flash |= TEST5; /* bad auth */ 1399 peer->badauth++; 1400 if ( has_mac 1401 && (hismode == MODE_ACTIVE || hismode == MODE_PASSIVE)) 1402 fast_xmit(rbufp, MODE_ACTIVE, 0, restrict_mask); 1403 if (peer->flags & FLAG_PREEMPT) { 1404 unpeer(peer); 1405 return; 1406 } 1407 #ifdef AUTOKEY 1408 if (peer->crypto) 1409 peer_clear(peer, "AUTH"); 1410 #endif /* AUTOKEY */ 1411 return; 1412 } 1413 1414 /* 1415 * Update the state variables. 1416 */ 1417 if (peer->flip == 0) { 1418 if (hismode != MODE_BROADCAST) 1419 peer->rec = p_xmt; 1420 peer->dst = rbufp->recv_time; 1421 } 1422 peer->xmt = p_xmt; 1423 1424 /* 1425 * Set the peer ppoll to the maximum of the packet ppoll and the 1426 * peer minpoll. If a kiss-o'-death, set the peer minpoll to 1427 * this maximum and advance the headway to give the sender some 1428 * headroom. Very intricate. 1429 */ 1430 1431 /* 1432 * Check for any kiss codes. Note this is only used when a server 1433 * responds to a packet request 1434 */ 1435 1436 kissCode = kiss_code_check(hisleap, hisstratum, hismode, pkt->refid); 1437 1438 /* 1439 * Check to see if this is a RATE Kiss Code 1440 * Currently this kiss code will accept whatever poll 1441 * rate that the server sends 1442 */ 1443 peer->ppoll = max(peer->minpoll, pkt->ppoll); 1444 if (kissCode == RATEKISS) { 1445 peer->selbroken++; /* Increment the KoD count */ 1446 report_event(PEVNT_RATE, peer, NULL); 1447 if (pkt->ppoll > peer->minpoll) 1448 peer->minpoll = peer->ppoll; 1449 peer->burst = peer->retry = 0; 1450 peer->throttle = (NTP_SHIFT + 1) * (1 << peer->minpoll); 1451 poll_update(peer, pkt->ppoll); 1452 return; /* kiss-o'-death */ 1453 } 1454 if (kissCode != NOKISS) { 1455 peer->selbroken++; /* Increment the KoD count */ 1456 return; /* Drop any other kiss code packets */ 1457 } 1458 1459 1460 /* 1461 * That was hard and I am sweaty, but the packet is squeaky 1462 * clean. Get on with real work. 1463 */ 1464 peer->timereceived = current_time; 1465 if (is_authentic == AUTH_OK) 1466 peer->flags |= FLAG_AUTHENTIC; 1467 else 1468 peer->flags &= ~FLAG_AUTHENTIC; 1469 1470 #ifdef AUTOKEY 1471 /* 1472 * More autokey dance. The rules of the cha-cha are as follows: 1473 * 1474 * 1. If there is no key or the key is not auto, do nothing. 1475 * 1476 * 2. If this packet is in response to the one just previously 1477 * sent or from a broadcast server, do the extension fields. 1478 * Otherwise, assume bogosity and bail out. 1479 * 1480 * 3. If an extension field contains a verified signature, it is 1481 * self-authenticated and we sit the dance. 1482 * 1483 * 4. If this is a server reply, check only to see that the 1484 * transmitted key ID matches the received key ID. 1485 * 1486 * 5. Check to see that one or more hashes of the current key ID 1487 * matches the previous key ID or ultimate original key ID 1488 * obtained from the broadcaster or symmetric peer. If no 1489 * match, sit the dance and call for new autokey values. 1490 * 1491 * In case of crypto error, fire the orchestra, stop dancing and 1492 * restart the protocol. 1493 */ 1494 if (peer->flags & FLAG_SKEY) { 1495 /* 1496 * Decrement remaining autokey hashes. This isn't 1497 * perfect if a packet is lost, but results in no harm. 1498 */ 1499 ap = (struct autokey *)peer->recval.ptr; 1500 if (ap != NULL) { 1501 if (ap->seq > 0) 1502 ap->seq--; 1503 } 1504 peer->flash |= TEST8; 1505 rval = crypto_recv(peer, rbufp); 1506 if (rval == XEVNT_OK) { 1507 peer->unreach = 0; 1508 } else { 1509 if (rval == XEVNT_ERR) { 1510 report_event(PEVNT_RESTART, peer, 1511 "crypto error"); 1512 peer_clear(peer, "CRYP"); 1513 peer->flash |= TEST9; /* bad crypt */ 1514 if (peer->flags & FLAG_PREEMPT) 1515 unpeer(peer); 1516 } 1517 return; 1518 } 1519 1520 /* 1521 * If server mode, verify the receive key ID matches 1522 * the transmit key ID. 1523 */ 1524 if (hismode == MODE_SERVER) { 1525 if (skeyid == peer->keyid) 1526 peer->flash &= ~TEST8; 1527 1528 /* 1529 * If an extension field is present, verify only that it 1530 * has been correctly signed. We don't need a sequence 1531 * check here, but the sequence continues. 1532 */ 1533 } else if (!(peer->flash & TEST8)) { 1534 peer->pkeyid = skeyid; 1535 1536 /* 1537 * Now the fun part. Here, skeyid is the current ID in 1538 * the packet, pkeyid is the ID in the last packet and 1539 * tkeyid is the hash of skeyid. If the autokey values 1540 * have not been received, this is an automatic error. 1541 * If so, check that the tkeyid matches pkeyid. If not, 1542 * hash tkeyid and try again. If the number of hashes 1543 * exceeds the number remaining in the sequence, declare 1544 * a successful failure and refresh the autokey values. 1545 */ 1546 } else if (ap != NULL) { 1547 int i; 1548 1549 for (i = 0; ; i++) { 1550 if ( tkeyid == peer->pkeyid 1551 || tkeyid == ap->key) { 1552 peer->flash &= ~TEST8; 1553 peer->pkeyid = skeyid; 1554 ap->seq -= i; 1555 break; 1556 } 1557 if (i > ap->seq) { 1558 peer->crypto &= 1559 ~CRYPTO_FLAG_AUTO; 1560 break; 1561 } 1562 tkeyid = session_key( 1563 &rbufp->recv_srcadr, dstadr_sin, 1564 tkeyid, pkeyid, 0); 1565 } 1566 if (peer->flash & TEST8) 1567 report_event(PEVNT_AUTH, peer, "keylist"); 1568 } 1569 if (!(peer->crypto & CRYPTO_FLAG_PROV)) /* test 9 */ 1570 peer->flash |= TEST8; /* bad autokey */ 1571 1572 /* 1573 * The maximum lifetime of the protocol is about one 1574 * week before restarting the Autokey protocol to 1575 * refresh certificates and leapseconds values. 1576 */ 1577 if (current_time > peer->refresh) { 1578 report_event(PEVNT_RESTART, peer, 1579 "crypto refresh"); 1580 peer_clear(peer, "TIME"); 1581 return; 1582 } 1583 } 1584 #endif /* AUTOKEY */ 1585 1586 /* 1587 * The dance is complete and the flash bits have been lit. Toss 1588 * the packet over the fence for processing, which may light up 1589 * more flashers. 1590 */ 1591 process_packet(peer, pkt, rbufp->recv_length); 1592 1593 /* 1594 * In interleaved mode update the state variables. Also adjust the 1595 * transmit phase to avoid crossover. 1596 */ 1597 if (peer->flip != 0) { 1598 peer->rec = p_rec; 1599 peer->dst = rbufp->recv_time; 1600 if (peer->nextdate - current_time < (1U << min(peer->ppoll, 1601 peer->hpoll)) / 2) 1602 peer->nextdate++; 1603 else 1604 peer->nextdate--; 1605 } 1606 } 1607 1608 1609 /* 1610 * process_packet - Packet Procedure, a la Section 3.4.4 of the 1611 * specification. Or almost, at least. If we're in here we have a 1612 * reasonable expectation that we will be having a long term 1613 * relationship with this host. 1614 */ 1615 void 1616 process_packet( 1617 register struct peer *peer, 1618 register struct pkt *pkt, 1619 u_int len 1620 ) 1621 { 1622 double t34, t21; 1623 double p_offset, p_del, p_disp; 1624 l_fp p_rec, p_xmt, p_org, p_reftime, ci; 1625 u_char pmode, pleap, pversion, pstratum; 1626 char statstr[NTP_MAXSTRLEN]; 1627 #ifdef ASSYM 1628 int itemp; 1629 double etemp, ftemp, td; 1630 #endif /* ASSYM */ 1631 1632 sys_processed++; 1633 peer->processed++; 1634 p_del = FPTOD(NTOHS_FP(pkt->rootdelay)); 1635 p_offset = 0; 1636 p_disp = FPTOD(NTOHS_FP(pkt->rootdisp)); 1637 NTOHL_FP(&pkt->reftime, &p_reftime); 1638 NTOHL_FP(&pkt->org, &p_org); 1639 NTOHL_FP(&pkt->rec, &p_rec); 1640 NTOHL_FP(&pkt->xmt, &p_xmt); 1641 pmode = PKT_MODE(pkt->li_vn_mode); 1642 pleap = PKT_LEAP(pkt->li_vn_mode); 1643 pversion = PKT_VERSION(pkt->li_vn_mode); 1644 pstratum = PKT_TO_STRATUM(pkt->stratum); 1645 1646 /* 1647 * Capture the header values in the client/peer association.. 1648 */ 1649 record_raw_stats(&peer->srcadr, peer->dstadr ? 1650 &peer->dstadr->sin : NULL, 1651 &p_org, &p_rec, &p_xmt, &peer->dst, 1652 pleap, pversion, pmode, pstratum, pkt->ppoll, pkt->precision, 1653 p_del, p_disp, pkt->refid); 1654 peer->leap = pleap; 1655 peer->stratum = min(pstratum, STRATUM_UNSPEC); 1656 peer->pmode = pmode; 1657 peer->precision = pkt->precision; 1658 peer->rootdelay = p_del; 1659 peer->rootdisp = p_disp; 1660 peer->refid = pkt->refid; /* network byte order */ 1661 peer->reftime = p_reftime; 1662 1663 /* 1664 * First, if either burst mode is armed, enable the burst. 1665 * Compute the headway for the next packet and delay if 1666 * necessary to avoid exceeding the threshold. 1667 */ 1668 if (peer->retry > 0) { 1669 peer->retry = 0; 1670 if (peer->reach) 1671 peer->burst = min(1 << (peer->hpoll - 1672 peer->minpoll), NTP_SHIFT) - 1; 1673 else 1674 peer->burst = NTP_IBURST - 1; 1675 if (peer->burst > 0) 1676 peer->nextdate = current_time; 1677 } 1678 poll_update(peer, peer->hpoll); 1679 1680 /* 1681 * Verify the server is synchronized; that is, the leap bits, 1682 * stratum and root distance are valid. 1683 */ 1684 if ( pleap == LEAP_NOTINSYNC /* test 6 */ 1685 || pstratum < sys_floor || pstratum >= sys_ceiling) 1686 peer->flash |= TEST6; /* bad synch or strat */ 1687 if (p_del / 2 + p_disp >= MAXDISPERSE) /* test 7 */ 1688 peer->flash |= TEST7; /* bad header */ 1689 1690 /* 1691 * If any tests fail at this point, the packet is discarded. 1692 * Note that some flashers may have already been set in the 1693 * receive() routine. 1694 */ 1695 if (peer->flash & PKT_TEST_MASK) { 1696 peer->seldisptoolarge++; 1697 #ifdef DEBUG 1698 if (debug) 1699 printf("packet: flash header %04x\n", 1700 peer->flash); 1701 #endif 1702 return; 1703 } 1704 1705 /* 1706 * If the peer was previously unreachable, raise a trap. In any 1707 * case, mark it reachable. 1708 */ 1709 if (!peer->reach) { 1710 report_event(PEVNT_REACH, peer, NULL); 1711 peer->timereachable = current_time; 1712 } 1713 peer->reach |= 1; 1714 1715 /* 1716 * For a client/server association, calculate the clock offset, 1717 * roundtrip delay and dispersion. The equations are reordered 1718 * from the spec for more efficient use of temporaries. For a 1719 * broadcast association, offset the last measurement by the 1720 * computed delay during the client/server volley. Note the 1721 * computation of dispersion includes the system precision plus 1722 * that due to the frequency error since the origin time. 1723 * 1724 * It is very important to respect the hazards of overflow. The 1725 * only permitted operation on raw timestamps is subtraction, 1726 * where the result is a signed quantity spanning from 68 years 1727 * in the past to 68 years in the future. To avoid loss of 1728 * precision, these calculations are done using 64-bit integer 1729 * arithmetic. However, the offset and delay calculations are 1730 * sums and differences of these first-order differences, which 1731 * if done using 64-bit integer arithmetic, would be valid over 1732 * only half that span. Since the typical first-order 1733 * differences are usually very small, they are converted to 64- 1734 * bit doubles and all remaining calculations done in floating- 1735 * double arithmetic. This preserves the accuracy while 1736 * retaining the 68-year span. 1737 * 1738 * There are three interleaving schemes, basic, interleaved 1739 * symmetric and interleaved broadcast. The timestamps are 1740 * idioscyncratically different. See the onwire briefing/white 1741 * paper at www.eecis.udel.edu/~mills for details. 1742 * 1743 * Interleaved symmetric mode 1744 * t1 = peer->aorg/borg, t2 = peer->rec, t3 = p_xmt, 1745 * t4 = peer->dst 1746 */ 1747 if (peer->flip != 0) { 1748 ci = p_xmt; /* t3 - t4 */ 1749 L_SUB(&ci, &peer->dst); 1750 LFPTOD(&ci, t34); 1751 ci = p_rec; /* t2 - t1 */ 1752 if (peer->flip > 0) 1753 L_SUB(&ci, &peer->borg); 1754 else 1755 L_SUB(&ci, &peer->aorg); 1756 LFPTOD(&ci, t21); 1757 p_del = t21 - t34; 1758 p_offset = (t21 + t34) / 2.; 1759 if (p_del < 0 || p_del > 1.) { 1760 snprintf(statstr, sizeof(statstr), 1761 "t21 %.6f t34 %.6f", t21, t34); 1762 report_event(PEVNT_XERR, peer, statstr); 1763 return; 1764 } 1765 1766 /* 1767 * Broadcast modes 1768 */ 1769 } else if (peer->pmode == MODE_BROADCAST) { 1770 1771 /* 1772 * Interleaved broadcast mode. Use interleaved timestamps. 1773 * t1 = peer->borg, t2 = p_org, t3 = p_org, t4 = aorg 1774 */ 1775 if (peer->flags & FLAG_XB) { 1776 ci = p_org; /* delay */ 1777 L_SUB(&ci, &peer->aorg); 1778 LFPTOD(&ci, t34); 1779 ci = p_org; /* t2 - t1 */ 1780 L_SUB(&ci, &peer->borg); 1781 LFPTOD(&ci, t21); 1782 peer->aorg = p_xmt; 1783 peer->borg = peer->dst; 1784 if (t34 < 0 || t34 > 1.) { 1785 snprintf(statstr, sizeof(statstr), 1786 "offset %.6f delay %.6f", t21, t34); 1787 report_event(PEVNT_XERR, peer, statstr); 1788 return; 1789 } 1790 p_offset = t21; 1791 peer->xleave = t34; 1792 1793 /* 1794 * Basic broadcast - use direct timestamps. 1795 * t3 = p_xmt, t4 = peer->dst 1796 */ 1797 } else { 1798 ci = p_xmt; /* t3 - t4 */ 1799 L_SUB(&ci, &peer->dst); 1800 LFPTOD(&ci, t34); 1801 p_offset = t34; 1802 } 1803 1804 /* 1805 * When calibration is complete and the clock is 1806 * synchronized, the bias is calculated as the difference 1807 * between the unicast timestamp and the broadcast 1808 * timestamp. This works for both basic and interleaved 1809 * modes. 1810 */ 1811 if (FLAG_BC_VOL & peer->flags) { 1812 peer->flags &= ~FLAG_BC_VOL; 1813 peer->delay = fabs(peer->offset - p_offset) * 2; 1814 } 1815 p_del = peer->delay; 1816 p_offset += p_del / 2; 1817 1818 1819 /* 1820 * Basic mode, otherwise known as the old fashioned way. 1821 * 1822 * t1 = p_org, t2 = p_rec, t3 = p_xmt, t4 = peer->dst 1823 */ 1824 } else { 1825 ci = p_xmt; /* t3 - t4 */ 1826 L_SUB(&ci, &peer->dst); 1827 LFPTOD(&ci, t34); 1828 ci = p_rec; /* t2 - t1 */ 1829 L_SUB(&ci, &p_org); 1830 LFPTOD(&ci, t21); 1831 p_del = fabs(t21 - t34); 1832 p_offset = (t21 + t34) / 2.; 1833 } 1834 p_del = max(p_del, LOGTOD(sys_precision)); 1835 p_disp = LOGTOD(sys_precision) + LOGTOD(peer->precision) + 1836 clock_phi * p_del; 1837 1838 #if ASSYM 1839 /* 1840 * This code calculates the outbound and inbound data rates by 1841 * measuring the differences between timestamps at different 1842 * packet lengths. This is helpful in cases of large asymmetric 1843 * delays commonly experienced on deep space communication 1844 * links. 1845 */ 1846 if (peer->t21_last > 0 && peer->t34_bytes > 0) { 1847 itemp = peer->t21_bytes - peer->t21_last; 1848 if (itemp > 25) { 1849 etemp = t21 - peer->t21; 1850 if (fabs(etemp) > 1e-6) { 1851 ftemp = itemp / etemp; 1852 if (ftemp > 1000.) 1853 peer->r21 = ftemp; 1854 } 1855 } 1856 itemp = len - peer->t34_bytes; 1857 if (itemp > 25) { 1858 etemp = -t34 - peer->t34; 1859 if (fabs(etemp) > 1e-6) { 1860 ftemp = itemp / etemp; 1861 if (ftemp > 1000.) 1862 peer->r34 = ftemp; 1863 } 1864 } 1865 } 1866 1867 /* 1868 * The following section compensates for different data rates on 1869 * the outbound (d21) and inbound (t34) directions. To do this, 1870 * it finds t such that r21 * t - r34 * (d - t) = 0, where d is 1871 * the roundtrip delay. Then it calculates the correction as a 1872 * fraction of d. 1873 */ 1874 peer->t21 = t21; 1875 peer->t21_last = peer->t21_bytes; 1876 peer->t34 = -t34; 1877 peer->t34_bytes = len; 1878 #ifdef DEBUG 1879 if (debug > 1) 1880 printf("packet: t21 %.9lf %d t34 %.9lf %d\n", peer->t21, 1881 peer->t21_bytes, peer->t34, peer->t34_bytes); 1882 #endif 1883 if (peer->r21 > 0 && peer->r34 > 0 && p_del > 0) { 1884 if (peer->pmode != MODE_BROADCAST) 1885 td = (peer->r34 / (peer->r21 + peer->r34) - 1886 .5) * p_del; 1887 else 1888 td = 0; 1889 1890 /* 1891 * Unfortunately, in many cases the errors are 1892 * unacceptable, so for the present the rates are not 1893 * used. In future, we might find conditions where the 1894 * calculations are useful, so this should be considered 1895 * a work in progress. 1896 */ 1897 t21 -= td; 1898 t34 -= td; 1899 #ifdef DEBUG 1900 if (debug > 1) 1901 printf("packet: del %.6lf r21 %.1lf r34 %.1lf %.6lf\n", 1902 p_del, peer->r21 / 1e3, peer->r34 / 1e3, 1903 td); 1904 #endif 1905 } 1906 #endif /* ASSYM */ 1907 1908 /* 1909 * That was awesome. Now hand off to the clock filter. 1910 */ 1911 clock_filter(peer, p_offset + peer->bias, p_del, p_disp); 1912 1913 /* 1914 * If we are in broadcast calibrate mode, return to broadcast 1915 * client mode when the client is fit and the autokey dance is 1916 * complete. 1917 */ 1918 if ( (FLAG_BC_VOL & peer->flags) 1919 && MODE_CLIENT == peer->hmode 1920 && !(TEST11 & peer_unfit(peer))) { /* distance exceeded */ 1921 #ifdef AUTOKEY 1922 if (peer->flags & FLAG_SKEY) { 1923 if (!(~peer->crypto & CRYPTO_FLAG_ALL)) 1924 peer->hmode = MODE_BCLIENT; 1925 } else { 1926 peer->hmode = MODE_BCLIENT; 1927 } 1928 #else /* !AUTOKEY follows */ 1929 peer->hmode = MODE_BCLIENT; 1930 #endif /* !AUTOKEY */ 1931 } 1932 } 1933 1934 1935 /* 1936 * clock_update - Called at system process update intervals. 1937 */ 1938 static void 1939 clock_update( 1940 struct peer *peer /* peer structure pointer */ 1941 ) 1942 { 1943 double dtemp; 1944 l_fp now; 1945 #ifdef HAVE_LIBSCF_H 1946 char *fmri; 1947 #endif /* HAVE_LIBSCF_H */ 1948 1949 /* 1950 * Update the system state variables. We do this very carefully, 1951 * as the poll interval might need to be clamped differently. 1952 */ 1953 sys_peer = peer; 1954 sys_epoch = peer->epoch; 1955 if (sys_poll < peer->minpoll) 1956 sys_poll = peer->minpoll; 1957 if (sys_poll > peer->maxpoll) 1958 sys_poll = peer->maxpoll; 1959 poll_update(peer, sys_poll); 1960 sys_stratum = min(peer->stratum + 1, STRATUM_UNSPEC); 1961 if ( peer->stratum == STRATUM_REFCLOCK 1962 || peer->stratum == STRATUM_UNSPEC) 1963 sys_refid = peer->refid; 1964 else 1965 sys_refid = addr2refid(&peer->srcadr); 1966 /* 1967 * Root Dispersion (E) is defined (in RFC 5905) as: 1968 * 1969 * E = p.epsilon_r + p.epsilon + p.psi + PHI*(s.t - p.t) + |THETA| 1970 * 1971 * where: 1972 * p.epsilon_r is the PollProc's root dispersion 1973 * p.epsilon is the PollProc's dispersion 1974 * p.psi is the PollProc's jitter 1975 * THETA is the combined offset 1976 * 1977 * NB: Think Hard about where these numbers come from and 1978 * what they mean. When did peer->update happen? Has anything 1979 * interesting happened since then? What values are the most 1980 * defensible? Why? 1981 * 1982 * DLM thinks this equation is probably the best of all worse choices. 1983 */ 1984 dtemp = peer->rootdisp 1985 + peer->disp 1986 + sys_jitter 1987 + clock_phi * (current_time - peer->update) 1988 + fabs(sys_offset); 1989 1990 if (dtemp > sys_mindisp) 1991 sys_rootdisp = dtemp; 1992 else 1993 sys_rootdisp = sys_mindisp; 1994 sys_rootdelay = peer->delay + peer->rootdelay; 1995 sys_reftime = peer->dst; 1996 1997 #ifdef DEBUG 1998 if (debug) 1999 printf( 2000 "clock_update: at %lu sample %lu associd %d\n", 2001 current_time, peer->epoch, peer->associd); 2002 #endif 2003 2004 /* 2005 * Comes now the moment of truth. Crank the clock discipline and 2006 * see what comes out. 2007 */ 2008 switch (local_clock(peer, sys_offset)) { 2009 2010 /* 2011 * Clock exceeds panic threshold. Life as we know it ends. 2012 */ 2013 case -1: 2014 #ifdef HAVE_LIBSCF_H 2015 /* 2016 * For Solaris enter the maintenance mode. 2017 */ 2018 if ((fmri = getenv("SMF_FMRI")) != NULL) { 2019 if (smf_maintain_instance(fmri, 0) < 0) { 2020 printf("smf_maintain_instance: %s\n", 2021 scf_strerror(scf_error())); 2022 exit(1); 2023 } 2024 /* 2025 * Sleep until SMF kills us. 2026 */ 2027 for (;;) 2028 pause(); 2029 } 2030 #endif /* HAVE_LIBSCF_H */ 2031 exit (-1); 2032 /* not reached */ 2033 2034 /* 2035 * Clock was stepped. Flush all time values of all peers. 2036 */ 2037 case 2: 2038 clear_all(); 2039 set_sys_leap(LEAP_NOTINSYNC); 2040 sys_stratum = STRATUM_UNSPEC; 2041 memcpy(&sys_refid, "STEP", 4); 2042 sys_rootdelay = 0; 2043 sys_rootdisp = 0; 2044 L_CLR(&sys_reftime); 2045 sys_jitter = LOGTOD(sys_precision); 2046 leapsec_reset_frame(); 2047 break; 2048 2049 /* 2050 * Clock was slewed. Handle the leapsecond stuff. 2051 */ 2052 case 1: 2053 2054 /* 2055 * If this is the first time the clock is set, reset the 2056 * leap bits. If crypto, the timer will goose the setup 2057 * process. 2058 */ 2059 if (sys_leap == LEAP_NOTINSYNC) { 2060 set_sys_leap(LEAP_NOWARNING); 2061 #ifdef AUTOKEY 2062 if (crypto_flags) 2063 crypto_update(); 2064 #endif /* AUTOKEY */ 2065 /* 2066 * If our parent process is waiting for the 2067 * first clock sync, send them home satisfied. 2068 */ 2069 #ifdef HAVE_WORKING_FORK 2070 if (waitsync_fd_to_close != -1) { 2071 close(waitsync_fd_to_close); 2072 waitsync_fd_to_close = -1; 2073 DPRINTF(1, ("notified parent --wait-sync is done\n")); 2074 } 2075 #endif /* HAVE_WORKING_FORK */ 2076 2077 } 2078 2079 /* 2080 * If there is no leap second pending and the number of 2081 * survivor leap bits is greater than half the number of 2082 * survivors, try to schedule a leap for the end of the 2083 * current month. (This only works if no leap second for 2084 * that range is in the table, so doing this more than 2085 * once is mostly harmless.) 2086 */ 2087 if (leapsec == LSPROX_NOWARN) { 2088 if ( leap_vote_ins > leap_vote_del 2089 && leap_vote_ins > sys_survivors / 2) { 2090 get_systime(&now); 2091 leapsec_add_dyn(TRUE, now.l_ui, NULL); 2092 } 2093 if ( leap_vote_del > leap_vote_ins 2094 && leap_vote_del > sys_survivors / 2) { 2095 get_systime(&now); 2096 leapsec_add_dyn(FALSE, now.l_ui, NULL); 2097 } 2098 } 2099 break; 2100 2101 /* 2102 * Popcorn spike or step threshold exceeded. Pretend it never 2103 * happened. 2104 */ 2105 default: 2106 break; 2107 } 2108 } 2109 2110 2111 /* 2112 * poll_update - update peer poll interval 2113 */ 2114 void 2115 poll_update( 2116 struct peer *peer, /* peer structure pointer */ 2117 u_char mpoll 2118 ) 2119 { 2120 u_long next, utemp; 2121 u_char hpoll; 2122 2123 /* 2124 * This routine figures out when the next poll should be sent. 2125 * That turns out to be wickedly complicated. One problem is 2126 * that sometimes the time for the next poll is in the past when 2127 * the poll interval is reduced. We watch out for races here 2128 * between the receive process and the poll process. 2129 * 2130 * Clamp the poll interval between minpoll and maxpoll. 2131 */ 2132 hpoll = max(min(peer->maxpoll, mpoll), peer->minpoll); 2133 2134 #ifdef AUTOKEY 2135 /* 2136 * If during the crypto protocol the poll interval has changed, 2137 * the lifetimes in the key list are probably bogus. Purge the 2138 * the key list and regenerate it later. 2139 */ 2140 if ((peer->flags & FLAG_SKEY) && hpoll != peer->hpoll) 2141 key_expire(peer); 2142 #endif /* AUTOKEY */ 2143 peer->hpoll = hpoll; 2144 2145 /* 2146 * There are three variables important for poll scheduling, the 2147 * current time (current_time), next scheduled time (nextdate) 2148 * and the earliest time (utemp). The earliest time is 2 s 2149 * seconds, but could be more due to rate management. When 2150 * sending in a burst, use the earliest time. When not in a 2151 * burst but with a reply pending, send at the earliest time 2152 * unless the next scheduled time has not advanced. This can 2153 * only happen if multiple replies are pending in the same 2154 * response interval. Otherwise, send at the later of the next 2155 * scheduled time and the earliest time. 2156 * 2157 * Now we figure out if there is an override. If a burst is in 2158 * progress and we get called from the receive process, just 2159 * slink away. If called from the poll process, delay 1 s for a 2160 * reference clock, otherwise 2 s. 2161 */ 2162 utemp = current_time + max(peer->throttle - (NTP_SHIFT - 1) * 2163 (1 << peer->minpoll), ntp_minpkt); 2164 if (peer->burst > 0) { 2165 if (peer->nextdate > current_time) 2166 return; 2167 #ifdef REFCLOCK 2168 else if (peer->flags & FLAG_REFCLOCK) 2169 peer->nextdate = current_time + RESP_DELAY; 2170 #endif /* REFCLOCK */ 2171 else 2172 peer->nextdate = utemp; 2173 2174 #ifdef AUTOKEY 2175 /* 2176 * If a burst is not in progress and a crypto response message 2177 * is pending, delay 2 s, but only if this is a new interval. 2178 */ 2179 } else if (peer->cmmd != NULL) { 2180 if (peer->nextdate > current_time) { 2181 if (peer->nextdate + ntp_minpkt != utemp) 2182 peer->nextdate = utemp; 2183 } else { 2184 peer->nextdate = utemp; 2185 } 2186 #endif /* AUTOKEY */ 2187 2188 /* 2189 * The ordinary case. If a retry, use minpoll; if unreachable, 2190 * use host poll; otherwise, use the minimum of host and peer 2191 * polls; In other words, oversampling is okay but 2192 * understampling is evil. Use the maximum of this value and the 2193 * headway. If the average headway is greater than the headway 2194 * threshold, increase the headway by the minimum interval. 2195 */ 2196 } else { 2197 if (peer->retry > 0) 2198 hpoll = peer->minpoll; 2199 else if (!(peer->reach)) 2200 hpoll = peer->hpoll; 2201 else 2202 hpoll = min(peer->ppoll, peer->hpoll); 2203 #ifdef REFCLOCK 2204 if (peer->flags & FLAG_REFCLOCK) 2205 next = 1 << hpoll; 2206 else 2207 #endif /* REFCLOCK */ 2208 next = ((0x1000UL | (ntp_random() & 0x0ff)) << 2209 hpoll) >> 12; 2210 next += peer->outdate; 2211 if (next > utemp) 2212 peer->nextdate = next; 2213 else 2214 peer->nextdate = utemp; 2215 if (peer->throttle > (1 << peer->minpoll)) 2216 peer->nextdate += ntp_minpkt; 2217 } 2218 DPRINTF(2, ("poll_update: at %lu %s poll %d burst %d retry %d head %d early %lu next %lu\n", 2219 current_time, ntoa(&peer->srcadr), peer->hpoll, 2220 peer->burst, peer->retry, peer->throttle, 2221 utemp - current_time, peer->nextdate - 2222 current_time)); 2223 } 2224 2225 2226 /* 2227 * peer_clear - clear peer filter registers. See Section 3.4.8 of the 2228 * spec. 2229 */ 2230 void 2231 peer_clear( 2232 struct peer *peer, /* peer structure */ 2233 const char *ident /* tally lights */ 2234 ) 2235 { 2236 u_char u; 2237 2238 #ifdef AUTOKEY 2239 /* 2240 * If cryptographic credentials have been acquired, toss them to 2241 * Valhalla. Note that autokeys are ephemeral, in that they are 2242 * tossed immediately upon use. Therefore, the keylist can be 2243 * purged anytime without needing to preserve random keys. Note 2244 * that, if the peer is purged, the cryptographic variables are 2245 * purged, too. This makes it much harder to sneak in some 2246 * unauthenticated data in the clock filter. 2247 */ 2248 key_expire(peer); 2249 if (peer->iffval != NULL) 2250 BN_free(peer->iffval); 2251 value_free(&peer->cookval); 2252 value_free(&peer->recval); 2253 value_free(&peer->encrypt); 2254 value_free(&peer->sndval); 2255 if (peer->cmmd != NULL) 2256 free(peer->cmmd); 2257 if (peer->subject != NULL) 2258 free(peer->subject); 2259 if (peer->issuer != NULL) 2260 free(peer->issuer); 2261 #endif /* AUTOKEY */ 2262 2263 /* 2264 * Clear all values, including the optional crypto values above. 2265 */ 2266 memset(CLEAR_TO_ZERO(peer), 0, LEN_CLEAR_TO_ZERO(peer)); 2267 peer->ppoll = peer->maxpoll; 2268 peer->hpoll = peer->minpoll; 2269 peer->disp = MAXDISPERSE; 2270 peer->flash = peer_unfit(peer); 2271 peer->jitter = LOGTOD(sys_precision); 2272 2273 /* 2274 * If interleave mode, initialize the alternate origin switch. 2275 */ 2276 if (peer->flags & FLAG_XLEAVE) 2277 peer->flip = 1; 2278 for (u = 0; u < NTP_SHIFT; u++) { 2279 peer->filter_order[u] = u; 2280 peer->filter_disp[u] = MAXDISPERSE; 2281 } 2282 #ifdef REFCLOCK 2283 if (!(peer->flags & FLAG_REFCLOCK)) { 2284 #endif 2285 peer->leap = LEAP_NOTINSYNC; 2286 peer->stratum = STRATUM_UNSPEC; 2287 memcpy(&peer->refid, ident, 4); 2288 #ifdef REFCLOCK 2289 } 2290 #endif 2291 2292 /* 2293 * During initialization use the association count to spread out 2294 * the polls at one-second intervals. Passive associations' 2295 * first poll is delayed by the "discard minimum" to avoid rate 2296 * limiting. Other post-startup new or cleared associations 2297 * randomize the first poll over the minimum poll interval to 2298 * avoid implosion. 2299 */ 2300 peer->nextdate = peer->update = peer->outdate = current_time; 2301 if (initializing) { 2302 peer->nextdate += peer_associations; 2303 } else if (MODE_PASSIVE == peer->hmode) { 2304 peer->nextdate += ntp_minpkt; 2305 } else { 2306 peer->nextdate += ntp_random() % peer->minpoll; 2307 } 2308 #ifdef AUTOKEY 2309 peer->refresh = current_time + (1 << NTP_REFRESH); 2310 #endif /* AUTOKEY */ 2311 #ifdef DEBUG 2312 if (debug) 2313 printf( 2314 "peer_clear: at %ld next %ld associd %d refid %s\n", 2315 current_time, peer->nextdate, peer->associd, 2316 ident); 2317 #endif 2318 } 2319 2320 2321 /* 2322 * clock_filter - add incoming clock sample to filter register and run 2323 * the filter procedure to find the best sample. 2324 */ 2325 void 2326 clock_filter( 2327 struct peer *peer, /* peer structure pointer */ 2328 double sample_offset, /* clock offset */ 2329 double sample_delay, /* roundtrip delay */ 2330 double sample_disp /* dispersion */ 2331 ) 2332 { 2333 double dst[NTP_SHIFT]; /* distance vector */ 2334 int ord[NTP_SHIFT]; /* index vector */ 2335 int i, j, k, m; 2336 double dtemp, etemp; 2337 char tbuf[80]; 2338 2339 /* 2340 * A sample consists of the offset, delay, dispersion and epoch 2341 * of arrival. The offset and delay are determined by the on- 2342 * wire protocol. The dispersion grows from the last outbound 2343 * packet to the arrival of this one increased by the sum of the 2344 * peer precision and the system precision as required by the 2345 * error budget. First, shift the new arrival into the shift 2346 * register discarding the oldest one. 2347 */ 2348 j = peer->filter_nextpt; 2349 peer->filter_offset[j] = sample_offset; 2350 peer->filter_delay[j] = sample_delay; 2351 peer->filter_disp[j] = sample_disp; 2352 peer->filter_epoch[j] = current_time; 2353 j = (j + 1) % NTP_SHIFT; 2354 peer->filter_nextpt = j; 2355 2356 /* 2357 * Update dispersions since the last update and at the same 2358 * time initialize the distance and index lists. Since samples 2359 * become increasingly uncorrelated beyond the Allan intercept, 2360 * only under exceptional cases will an older sample be used. 2361 * Therefore, the distance list uses a compound metric. If the 2362 * dispersion is greater than the maximum dispersion, clamp the 2363 * distance at that value. If the time since the last update is 2364 * less than the Allan intercept use the delay; otherwise, use 2365 * the sum of the delay and dispersion. 2366 */ 2367 dtemp = clock_phi * (current_time - peer->update); 2368 peer->update = current_time; 2369 for (i = NTP_SHIFT - 1; i >= 0; i--) { 2370 if (i != 0) 2371 peer->filter_disp[j] += dtemp; 2372 if (peer->filter_disp[j] >= MAXDISPERSE) { 2373 peer->filter_disp[j] = MAXDISPERSE; 2374 dst[i] = MAXDISPERSE; 2375 } else if (peer->update - peer->filter_epoch[j] > 2376 (u_long)ULOGTOD(allan_xpt)) { 2377 dst[i] = peer->filter_delay[j] + 2378 peer->filter_disp[j]; 2379 } else { 2380 dst[i] = peer->filter_delay[j]; 2381 } 2382 ord[i] = j; 2383 j = (j + 1) % NTP_SHIFT; 2384 } 2385 2386 /* 2387 * If the clock has stabilized, sort the samples by distance. 2388 */ 2389 if (freq_cnt == 0) { 2390 for (i = 1; i < NTP_SHIFT; i++) { 2391 for (j = 0; j < i; j++) { 2392 if (dst[j] > dst[i]) { 2393 k = ord[j]; 2394 ord[j] = ord[i]; 2395 ord[i] = k; 2396 etemp = dst[j]; 2397 dst[j] = dst[i]; 2398 dst[i] = etemp; 2399 } 2400 } 2401 } 2402 } 2403 2404 /* 2405 * Copy the index list to the association structure so ntpq 2406 * can see it later. Prune the distance list to leave only 2407 * samples less than the maximum dispersion, which disfavors 2408 * uncorrelated samples older than the Allan intercept. To 2409 * further improve the jitter estimate, of the remainder leave 2410 * only samples less than the maximum distance, but keep at 2411 * least two samples for jitter calculation. 2412 */ 2413 m = 0; 2414 for (i = 0; i < NTP_SHIFT; i++) { 2415 peer->filter_order[i] = (u_char) ord[i]; 2416 if ( dst[i] >= MAXDISPERSE 2417 || (m >= 2 && dst[i] >= sys_maxdist)) 2418 continue; 2419 m++; 2420 } 2421 2422 /* 2423 * Compute the dispersion and jitter. The dispersion is weighted 2424 * exponentially by NTP_FWEIGHT (0.5) so it is normalized close 2425 * to 1.0. The jitter is the RMS differences relative to the 2426 * lowest delay sample. 2427 */ 2428 peer->disp = peer->jitter = 0; 2429 k = ord[0]; 2430 for (i = NTP_SHIFT - 1; i >= 0; i--) { 2431 j = ord[i]; 2432 peer->disp = NTP_FWEIGHT * (peer->disp + 2433 peer->filter_disp[j]); 2434 if (i < m) 2435 peer->jitter += DIFF(peer->filter_offset[j], 2436 peer->filter_offset[k]); 2437 } 2438 2439 /* 2440 * If no acceptable samples remain in the shift register, 2441 * quietly tiptoe home leaving only the dispersion. Otherwise, 2442 * save the offset, delay and jitter. Note the jitter must not 2443 * be less than the precision. 2444 */ 2445 if (m == 0) { 2446 clock_select(); 2447 return; 2448 } 2449 etemp = fabs(peer->offset - peer->filter_offset[k]); 2450 peer->offset = peer->filter_offset[k]; 2451 peer->delay = peer->filter_delay[k]; 2452 if (m > 1) 2453 peer->jitter /= m - 1; 2454 peer->jitter = max(SQRT(peer->jitter), LOGTOD(sys_precision)); 2455 2456 /* 2457 * If the the new sample and the current sample are both valid 2458 * and the difference between their offsets exceeds CLOCK_SGATE 2459 * (3) times the jitter and the interval between them is less 2460 * than twice the host poll interval, consider the new sample 2461 * a popcorn spike and ignore it. 2462 */ 2463 if ( peer->disp < sys_maxdist 2464 && peer->filter_disp[k] < sys_maxdist 2465 && etemp > CLOCK_SGATE * peer->jitter 2466 && peer->filter_epoch[k] - peer->epoch 2467 < 2. * ULOGTOD(peer->hpoll)) { 2468 snprintf(tbuf, sizeof(tbuf), "%.6f s", etemp); 2469 report_event(PEVNT_POPCORN, peer, tbuf); 2470 return; 2471 } 2472 2473 /* 2474 * A new minimum sample is useful only if it is later than the 2475 * last one used. In this design the maximum lifetime of any 2476 * sample is not greater than eight times the poll interval, so 2477 * the maximum interval between minimum samples is eight 2478 * packets. 2479 */ 2480 if (peer->filter_epoch[k] <= peer->epoch) { 2481 #if DEBUG 2482 if (debug > 1) 2483 printf("clock_filter: old sample %lu\n", current_time - 2484 peer->filter_epoch[k]); 2485 #endif 2486 return; 2487 } 2488 peer->epoch = peer->filter_epoch[k]; 2489 2490 /* 2491 * The mitigated sample statistics are saved for later 2492 * processing. If not synchronized or not in a burst, tickle the 2493 * clock select algorithm. 2494 */ 2495 record_peer_stats(&peer->srcadr, ctlpeerstatus(peer), 2496 peer->offset, peer->delay, peer->disp, peer->jitter); 2497 #ifdef DEBUG 2498 if (debug) 2499 printf( 2500 "clock_filter: n %d off %.6f del %.6f dsp %.6f jit %.6f\n", 2501 m, peer->offset, peer->delay, peer->disp, 2502 peer->jitter); 2503 #endif 2504 if (peer->burst == 0 || sys_leap == LEAP_NOTINSYNC) 2505 clock_select(); 2506 } 2507 2508 2509 /* 2510 * clock_select - find the pick-of-the-litter clock 2511 * 2512 * LOCKCLOCK: (1) If the local clock is the prefer peer, it will always 2513 * be enabled, even if declared falseticker, (2) only the prefer peer 2514 * can be selected as the system peer, (3) if the external source is 2515 * down, the system leap bits are set to 11 and the stratum set to 2516 * infinity. 2517 */ 2518 void 2519 clock_select(void) 2520 { 2521 struct peer *peer; 2522 int i, j, k, n; 2523 int nlist, nl2; 2524 int allow; 2525 int speer; 2526 double d, e, f, g; 2527 double high, low; 2528 double speermet; 2529 double orphmet = 2.0 * U_INT32_MAX; /* 2x is greater than */ 2530 struct endpoint endp; 2531 struct peer *osys_peer; 2532 struct peer *sys_prefer = NULL; /* prefer peer */ 2533 struct peer *typesystem = NULL; 2534 struct peer *typeorphan = NULL; 2535 #ifdef REFCLOCK 2536 struct peer *typeacts = NULL; 2537 struct peer *typelocal = NULL; 2538 struct peer *typepps = NULL; 2539 #endif /* REFCLOCK */ 2540 static struct endpoint *endpoint = NULL; 2541 static int *indx = NULL; 2542 static peer_select *peers = NULL; 2543 static u_int endpoint_size = 0; 2544 static u_int peers_size = 0; 2545 static u_int indx_size = 0; 2546 size_t octets; 2547 2548 /* 2549 * Initialize and create endpoint, index and peer lists big 2550 * enough to handle all associations. 2551 */ 2552 osys_peer = sys_peer; 2553 sys_survivors = 0; 2554 #ifdef LOCKCLOCK 2555 set_sys_leap(LEAP_NOTINSYNC); 2556 sys_stratum = STRATUM_UNSPEC; 2557 memcpy(&sys_refid, "DOWN", 4); 2558 #endif /* LOCKCLOCK */ 2559 2560 /* 2561 * Allocate dynamic space depending on the number of 2562 * associations. 2563 */ 2564 nlist = 1; 2565 for (peer = peer_list; peer != NULL; peer = peer->p_link) 2566 nlist++; 2567 endpoint_size = ALIGNED_SIZE(nlist * 2 * sizeof(*endpoint)); 2568 peers_size = ALIGNED_SIZE(nlist * sizeof(*peers)); 2569 indx_size = ALIGNED_SIZE(nlist * 2 * sizeof(*indx)); 2570 octets = endpoint_size + peers_size + indx_size; 2571 endpoint = erealloc(endpoint, octets); 2572 peers = INC_ALIGNED_PTR(endpoint, endpoint_size); 2573 indx = INC_ALIGNED_PTR(peers, peers_size); 2574 2575 /* 2576 * Initially, we populate the island with all the rifraff peers 2577 * that happen to be lying around. Those with seriously 2578 * defective clocks are immediately booted off the island. Then, 2579 * the falsetickers are culled and put to sea. The truechimers 2580 * remaining are subject to repeated rounds where the most 2581 * unpopular at each round is kicked off. When the population 2582 * has dwindled to sys_minclock, the survivors split a million 2583 * bucks and collectively crank the chimes. 2584 */ 2585 nlist = nl2 = 0; /* none yet */ 2586 for (peer = peer_list; peer != NULL; peer = peer->p_link) { 2587 peer->new_status = CTL_PST_SEL_REJECT; 2588 2589 /* 2590 * Leave the island immediately if the peer is 2591 * unfit to synchronize. 2592 */ 2593 if (peer_unfit(peer)) 2594 continue; 2595 2596 /* 2597 * If this peer is an orphan parent, elect the 2598 * one with the lowest metric defined as the 2599 * IPv4 address or the first 64 bits of the 2600 * hashed IPv6 address. To ensure convergence 2601 * on the same selected orphan, consider as 2602 * well that this system may have the lowest 2603 * metric and be the orphan parent. If this 2604 * system wins, sys_peer will be NULL to trigger 2605 * orphan mode in timer(). 2606 */ 2607 if (peer->stratum == sys_orphan) { 2608 u_int32 localmet; 2609 u_int32 peermet; 2610 2611 if (peer->dstadr != NULL) 2612 localmet = ntohl(peer->dstadr->addr_refid); 2613 else 2614 localmet = U_INT32_MAX; 2615 peermet = ntohl(addr2refid(&peer->srcadr)); 2616 if (peermet < localmet && peermet < orphmet) { 2617 typeorphan = peer; 2618 orphmet = peermet; 2619 } 2620 continue; 2621 } 2622 2623 /* 2624 * If this peer could have the orphan parent 2625 * as a synchronization ancestor, exclude it 2626 * from selection to avoid forming a 2627 * synchronization loop within the orphan mesh, 2628 * triggering stratum climb to infinity 2629 * instability. Peers at stratum higher than 2630 * the orphan stratum could have the orphan 2631 * parent in ancestry so are excluded. 2632 * See http://bugs.ntp.org/2050 2633 */ 2634 if (peer->stratum > sys_orphan) 2635 continue; 2636 #ifdef REFCLOCK 2637 /* 2638 * The following are special cases. We deal 2639 * with them later. 2640 */ 2641 if (!(peer->flags & FLAG_PREFER)) { 2642 switch (peer->refclktype) { 2643 case REFCLK_LOCALCLOCK: 2644 if ( current_time > orphwait 2645 && typelocal == NULL) 2646 typelocal = peer; 2647 continue; 2648 2649 case REFCLK_ACTS: 2650 if ( current_time > orphwait 2651 && typeacts == NULL) 2652 typeacts = peer; 2653 continue; 2654 } 2655 } 2656 #endif /* REFCLOCK */ 2657 2658 /* 2659 * If we get this far, the peer can stay on the 2660 * island, but does not yet have the immunity 2661 * idol. 2662 */ 2663 peer->new_status = CTL_PST_SEL_SANE; 2664 f = root_distance(peer); 2665 peers[nlist].peer = peer; 2666 peers[nlist].error = peer->jitter; 2667 peers[nlist].synch = f; 2668 nlist++; 2669 2670 /* 2671 * Insert each interval endpoint on the unsorted 2672 * endpoint[] list. 2673 */ 2674 e = peer->offset; 2675 endpoint[nl2].type = -1; /* lower end */ 2676 endpoint[nl2].val = e - f; 2677 nl2++; 2678 endpoint[nl2].type = 1; /* upper end */ 2679 endpoint[nl2].val = e + f; 2680 nl2++; 2681 } 2682 /* 2683 * Construct sorted indx[] of endpoint[] indexes ordered by 2684 * offset. 2685 */ 2686 for (i = 0; i < nl2; i++) 2687 indx[i] = i; 2688 for (i = 0; i < nl2; i++) { 2689 endp = endpoint[indx[i]]; 2690 e = endp.val; 2691 k = i; 2692 for (j = i + 1; j < nl2; j++) { 2693 endp = endpoint[indx[j]]; 2694 if (endp.val < e) { 2695 e = endp.val; 2696 k = j; 2697 } 2698 } 2699 if (k != i) { 2700 j = indx[k]; 2701 indx[k] = indx[i]; 2702 indx[i] = j; 2703 } 2704 } 2705 for (i = 0; i < nl2; i++) 2706 DPRINTF(3, ("select: endpoint %2d %.6f\n", 2707 endpoint[indx[i]].type, endpoint[indx[i]].val)); 2708 2709 /* 2710 * This is the actual algorithm that cleaves the truechimers 2711 * from the falsetickers. The original algorithm was described 2712 * in Keith Marzullo's dissertation, but has been modified for 2713 * better accuracy. 2714 * 2715 * Briefly put, we first assume there are no falsetickers, then 2716 * scan the candidate list first from the low end upwards and 2717 * then from the high end downwards. The scans stop when the 2718 * number of intersections equals the number of candidates less 2719 * the number of falsetickers. If this doesn't happen for a 2720 * given number of falsetickers, we bump the number of 2721 * falsetickers and try again. If the number of falsetickers 2722 * becomes equal to or greater than half the number of 2723 * candidates, the Albanians have won the Byzantine wars and 2724 * correct synchronization is not possible. 2725 * 2726 * Here, nlist is the number of candidates and allow is the 2727 * number of falsetickers. Upon exit, the truechimers are the 2728 * survivors with offsets not less than low and not greater than 2729 * high. There may be none of them. 2730 */ 2731 low = 1e9; 2732 high = -1e9; 2733 for (allow = 0; 2 * allow < nlist; allow++) { 2734 2735 /* 2736 * Bound the interval (low, high) as the smallest 2737 * interval containing points from the most sources. 2738 */ 2739 n = 0; 2740 for (i = 0; i < nl2; i++) { 2741 low = endpoint[indx[i]].val; 2742 n -= endpoint[indx[i]].type; 2743 if (n >= nlist - allow) 2744 break; 2745 } 2746 n = 0; 2747 for (j = nl2 - 1; j >= 0; j--) { 2748 high = endpoint[indx[j]].val; 2749 n += endpoint[indx[j]].type; 2750 if (n >= nlist - allow) 2751 break; 2752 } 2753 2754 /* 2755 * If an interval containing truechimers is found, stop. 2756 * If not, increase the number of falsetickers and go 2757 * around again. 2758 */ 2759 if (high > low) 2760 break; 2761 } 2762 2763 /* 2764 * Clustering algorithm. Whittle candidate list of falsetickers, 2765 * who leave the island immediately. The TRUE peer is always a 2766 * truechimer. We must leave at least one peer to collect the 2767 * million bucks. 2768 * 2769 * We assert the correct time is contained in the interval, but 2770 * the best offset estimate for the interval might not be 2771 * contained in the interval. For this purpose, a truechimer is 2772 * defined as the midpoint of an interval that overlaps the 2773 * intersection interval. 2774 */ 2775 j = 0; 2776 for (i = 0; i < nlist; i++) { 2777 double h; 2778 2779 peer = peers[i].peer; 2780 h = peers[i].synch; 2781 if (( high <= low 2782 || peer->offset + h < low 2783 || peer->offset - h > high 2784 ) && !(peer->flags & FLAG_TRUE)) 2785 continue; 2786 2787 #ifdef REFCLOCK 2788 /* 2789 * Eligible PPS peers must survive the intersection 2790 * algorithm. Use the first one found, but don't 2791 * include any of them in the cluster population. 2792 */ 2793 if (peer->flags & FLAG_PPS) { 2794 if (typepps == NULL) 2795 typepps = peer; 2796 if (!(peer->flags & FLAG_TSTAMP_PPS)) 2797 continue; 2798 } 2799 #endif /* REFCLOCK */ 2800 2801 if (j != i) 2802 peers[j] = peers[i]; 2803 j++; 2804 } 2805 nlist = j; 2806 2807 /* 2808 * If no survivors remain at this point, check if the modem 2809 * driver, local driver or orphan parent in that order. If so, 2810 * nominate the first one found as the only survivor. 2811 * Otherwise, give up and leave the island to the rats. 2812 */ 2813 if (nlist == 0) { 2814 peers[0].error = 0; 2815 peers[0].synch = sys_mindisp; 2816 #ifdef REFCLOCK 2817 if (typeacts != NULL) { 2818 peers[0].peer = typeacts; 2819 nlist = 1; 2820 } else if (typelocal != NULL) { 2821 peers[0].peer = typelocal; 2822 nlist = 1; 2823 } else 2824 #endif /* REFCLOCK */ 2825 if (typeorphan != NULL) { 2826 peers[0].peer = typeorphan; 2827 nlist = 1; 2828 } 2829 } 2830 2831 /* 2832 * Mark the candidates at this point as truechimers. 2833 */ 2834 for (i = 0; i < nlist; i++) { 2835 peers[i].peer->new_status = CTL_PST_SEL_SELCAND; 2836 DPRINTF(2, ("select: survivor %s %f\n", 2837 stoa(&peers[i].peer->srcadr), peers[i].synch)); 2838 } 2839 2840 /* 2841 * Now, vote outliers off the island by select jitter weighted 2842 * by root distance. Continue voting as long as there are more 2843 * than sys_minclock survivors and the select jitter of the peer 2844 * with the worst metric is greater than the minimum peer 2845 * jitter. Stop if we are about to discard a TRUE or PREFER 2846 * peer, who of course have the immunity idol. 2847 */ 2848 while (1) { 2849 d = 1e9; 2850 e = -1e9; 2851 g = 0; 2852 k = 0; 2853 for (i = 0; i < nlist; i++) { 2854 if (peers[i].error < d) 2855 d = peers[i].error; 2856 peers[i].seljit = 0; 2857 if (nlist > 1) { 2858 f = 0; 2859 for (j = 0; j < nlist; j++) 2860 f += DIFF(peers[j].peer->offset, 2861 peers[i].peer->offset); 2862 peers[i].seljit = SQRT(f / (nlist - 1)); 2863 } 2864 if (peers[i].seljit * peers[i].synch > e) { 2865 g = peers[i].seljit; 2866 e = peers[i].seljit * peers[i].synch; 2867 k = i; 2868 } 2869 } 2870 g = max(g, LOGTOD(sys_precision)); 2871 if ( nlist <= max(1, sys_minclock) 2872 || g <= d 2873 || ((FLAG_TRUE | FLAG_PREFER) & peers[k].peer->flags)) 2874 break; 2875 2876 DPRINTF(3, ("select: drop %s seljit %.6f jit %.6f\n", 2877 ntoa(&peers[k].peer->srcadr), g, d)); 2878 if (nlist > sys_maxclock) 2879 peers[k].peer->new_status = CTL_PST_SEL_EXCESS; 2880 for (j = k + 1; j < nlist; j++) 2881 peers[j - 1] = peers[j]; 2882 nlist--; 2883 } 2884 2885 /* 2886 * What remains is a list usually not greater than sys_minclock 2887 * peers. Note that unsynchronized peers cannot survive this 2888 * far. Count and mark these survivors. 2889 * 2890 * While at it, count the number of leap warning bits found. 2891 * This will be used later to vote the system leap warning bit. 2892 * If a leap warning bit is found on a reference clock, the vote 2893 * is always won. 2894 * 2895 * Choose the system peer using a hybrid metric composed of the 2896 * selection jitter scaled by the root distance augmented by 2897 * stratum scaled by sys_mindisp (.001 by default). The goal of 2898 * the small stratum factor is to avoid clockhop between a 2899 * reference clock and a network peer which has a refclock and 2900 * is using an older ntpd, which does not floor sys_rootdisp at 2901 * sys_mindisp. 2902 * 2903 * In contrast, ntpd 4.2.6 and earlier used stratum primarily 2904 * in selecting the system peer, using a weight of 1 second of 2905 * additional root distance per stratum. This heavy bias is no 2906 * longer appropriate, as the scaled root distance provides a 2907 * more rational metric carrying the cumulative error budget. 2908 */ 2909 e = 1e9; 2910 speer = 0; 2911 leap_vote_ins = 0; 2912 leap_vote_del = 0; 2913 for (i = 0; i < nlist; i++) { 2914 peer = peers[i].peer; 2915 peer->unreach = 0; 2916 peer->new_status = CTL_PST_SEL_SYNCCAND; 2917 sys_survivors++; 2918 if (peer->leap == LEAP_ADDSECOND) { 2919 if (peer->flags & FLAG_REFCLOCK) 2920 leap_vote_ins = nlist; 2921 else if (leap_vote_ins < nlist) 2922 leap_vote_ins++; 2923 } 2924 if (peer->leap == LEAP_DELSECOND) { 2925 if (peer->flags & FLAG_REFCLOCK) 2926 leap_vote_del = nlist; 2927 else if (leap_vote_del < nlist) 2928 leap_vote_del++; 2929 } 2930 if (peer->flags & FLAG_PREFER) 2931 sys_prefer = peer; 2932 speermet = peers[i].seljit * peers[i].synch + 2933 peer->stratum * sys_mindisp; 2934 if (speermet < e) { 2935 e = speermet; 2936 speer = i; 2937 } 2938 } 2939 2940 /* 2941 * Unless there are at least sys_misane survivors, leave the 2942 * building dark. Otherwise, do a clockhop dance. Ordinarily, 2943 * use the selected survivor speer. However, if the current 2944 * system peer is not speer, stay with the current system peer 2945 * as long as it doesn't get too old or too ugly. 2946 */ 2947 if (nlist > 0 && nlist >= sys_minsane) { 2948 double x; 2949 2950 typesystem = peers[speer].peer; 2951 if (osys_peer == NULL || osys_peer == typesystem) { 2952 sys_clockhop = 0; 2953 } else if ((x = fabs(typesystem->offset - 2954 osys_peer->offset)) < sys_mindisp) { 2955 if (sys_clockhop == 0) 2956 sys_clockhop = sys_mindisp; 2957 else 2958 sys_clockhop *= .5; 2959 DPRINTF(1, ("select: clockhop %d %.6f %.6f\n", 2960 j, x, sys_clockhop)); 2961 if (fabs(x) < sys_clockhop) 2962 typesystem = osys_peer; 2963 else 2964 sys_clockhop = 0; 2965 } else { 2966 sys_clockhop = 0; 2967 } 2968 } 2969 2970 /* 2971 * Mitigation rules of the game. We have the pick of the 2972 * litter in typesystem if any survivors are left. If 2973 * there is a prefer peer, use its offset and jitter. 2974 * Otherwise, use the combined offset and jitter of all kitters. 2975 */ 2976 if (typesystem != NULL) { 2977 if (sys_prefer == NULL) { 2978 typesystem->new_status = CTL_PST_SEL_SYSPEER; 2979 clock_combine(peers, sys_survivors, speer); 2980 } else { 2981 typesystem = sys_prefer; 2982 sys_clockhop = 0; 2983 typesystem->new_status = CTL_PST_SEL_SYSPEER; 2984 sys_offset = typesystem->offset; 2985 sys_jitter = typesystem->jitter; 2986 } 2987 DPRINTF(1, ("select: combine offset %.9f jitter %.9f\n", 2988 sys_offset, sys_jitter)); 2989 } 2990 #ifdef REFCLOCK 2991 /* 2992 * If a PPS driver is lit and the combined offset is less than 2993 * 0.4 s, select the driver as the PPS peer and use its offset 2994 * and jitter. However, if this is the atom driver, use it only 2995 * if there is a prefer peer or there are no survivors and none 2996 * are required. 2997 */ 2998 if ( typepps != NULL 2999 && fabs(sys_offset) < 0.4 3000 && ( typepps->refclktype != REFCLK_ATOM_PPS 3001 || ( typepps->refclktype == REFCLK_ATOM_PPS 3002 && ( sys_prefer != NULL 3003 || (typesystem == NULL && sys_minsane == 0))))) { 3004 typesystem = typepps; 3005 sys_clockhop = 0; 3006 typesystem->new_status = CTL_PST_SEL_PPS; 3007 sys_offset = typesystem->offset; 3008 sys_jitter = typesystem->jitter; 3009 DPRINTF(1, ("select: pps offset %.9f jitter %.9f\n", 3010 sys_offset, sys_jitter)); 3011 } 3012 #endif /* REFCLOCK */ 3013 3014 /* 3015 * If there are no survivors at this point, there is no 3016 * system peer. If so and this is an old update, keep the 3017 * current statistics, but do not update the clock. 3018 */ 3019 if (typesystem == NULL) { 3020 if (osys_peer != NULL) { 3021 if (sys_orphwait > 0) 3022 orphwait = current_time + sys_orphwait; 3023 report_event(EVNT_NOPEER, NULL, NULL); 3024 } 3025 sys_peer = NULL; 3026 for (peer = peer_list; peer != NULL; peer = peer->p_link) 3027 peer->status = peer->new_status; 3028 return; 3029 } 3030 3031 /* 3032 * Do not use old data, as this may mess up the clock discipline 3033 * stability. 3034 */ 3035 if (typesystem->epoch <= sys_epoch) 3036 return; 3037 3038 /* 3039 * We have found the alpha male. Wind the clock. 3040 */ 3041 if (osys_peer != typesystem) 3042 report_event(PEVNT_NEWPEER, typesystem, NULL); 3043 for (peer = peer_list; peer != NULL; peer = peer->p_link) 3044 peer->status = peer->new_status; 3045 clock_update(typesystem); 3046 } 3047 3048 3049 static void 3050 clock_combine( 3051 peer_select * peers, /* survivor list */ 3052 int npeers, /* number of survivors */ 3053 int syspeer /* index of sys.peer */ 3054 ) 3055 { 3056 int i; 3057 double x, y, z, w; 3058 3059 y = z = w = 0; 3060 for (i = 0; i < npeers; i++) { 3061 x = 1. / peers[i].synch; 3062 y += x; 3063 z += x * peers[i].peer->offset; 3064 w += x * DIFF(peers[i].peer->offset, 3065 peers[syspeer].peer->offset); 3066 } 3067 sys_offset = z / y; 3068 sys_jitter = SQRT(w / y + SQUARE(peers[syspeer].seljit)); 3069 } 3070 3071 3072 /* 3073 * root_distance - compute synchronization distance from peer to root 3074 */ 3075 static double 3076 root_distance( 3077 struct peer *peer /* peer structure pointer */ 3078 ) 3079 { 3080 double dtemp; 3081 3082 /* 3083 * Root Distance (LAMBDA) is defined as: 3084 * (delta + DELTA)/2 + epsilon + EPSILON + phi 3085 * 3086 * where: 3087 * delta is the round-trip delay 3088 * DELTA is the root delay 3089 * epsilon is the remote server precision + local precision 3090 * + (15 usec each second) 3091 * EPSILON is the root dispersion 3092 * phi is the peer jitter statistic 3093 * 3094 * NB: Think hard about why we are using these values, and what 3095 * the alternatives are, and the various pros/cons. 3096 * 3097 * DLM thinks these are probably the best choices from any of the 3098 * other worse choices. 3099 */ 3100 dtemp = (peer->delay + peer->rootdelay) / 2 3101 + LOGTOD(peer->precision) 3102 + LOGTOD(sys_precision) 3103 + clock_phi * (current_time - peer->update) 3104 + peer->rootdisp 3105 + peer->jitter; 3106 /* 3107 * Careful squeak here. The value returned must be greater than 3108 * the minimum root dispersion in order to avoid clockhop with 3109 * highly precise reference clocks. Note that the root distance 3110 * cannot exceed the sys_maxdist, as this is the cutoff by the 3111 * selection algorithm. 3112 */ 3113 if (dtemp < sys_mindisp) 3114 dtemp = sys_mindisp; 3115 return (dtemp); 3116 } 3117 3118 3119 /* 3120 * peer_xmit - send packet for persistent association. 3121 */ 3122 static void 3123 peer_xmit( 3124 struct peer *peer /* peer structure pointer */ 3125 ) 3126 { 3127 struct pkt xpkt; /* transmit packet */ 3128 size_t sendlen, authlen; 3129 keyid_t xkeyid = 0; /* transmit key ID */ 3130 l_fp xmt_tx, xmt_ty; 3131 3132 if (!peer->dstadr) /* drop peers without interface */ 3133 return; 3134 3135 xpkt.li_vn_mode = PKT_LI_VN_MODE(sys_leap, peer->version, 3136 peer->hmode); 3137 xpkt.stratum = STRATUM_TO_PKT(sys_stratum); 3138 xpkt.ppoll = peer->hpoll; 3139 xpkt.precision = sys_precision; 3140 xpkt.refid = sys_refid; 3141 xpkt.rootdelay = HTONS_FP(DTOFP(sys_rootdelay)); 3142 xpkt.rootdisp = HTONS_FP(DTOUFP(sys_rootdisp)); 3143 HTONL_FP(&sys_reftime, &xpkt.reftime); 3144 HTONL_FP(&peer->rec, &xpkt.org); 3145 HTONL_FP(&peer->dst, &xpkt.rec); 3146 3147 /* 3148 * If the received packet contains a MAC, the transmitted packet 3149 * is authenticated and contains a MAC. If not, the transmitted 3150 * packet is not authenticated. 3151 * 3152 * It is most important when autokey is in use that the local 3153 * interface IP address be known before the first packet is 3154 * sent. Otherwise, it is not possible to compute a correct MAC 3155 * the recipient will accept. Thus, the I/O semantics have to do 3156 * a little more work. In particular, the wildcard interface 3157 * might not be usable. 3158 */ 3159 sendlen = LEN_PKT_NOMAC; 3160 #ifdef AUTOKEY 3161 if (!(peer->flags & FLAG_SKEY) && peer->keyid == 0) { 3162 #else /* !AUTOKEY follows */ 3163 if (peer->keyid == 0) { 3164 #endif /* !AUTOKEY */ 3165 3166 /* 3167 * Transmit a-priori timestamps 3168 */ 3169 get_systime(&xmt_tx); 3170 if (peer->flip == 0) { /* basic mode */ 3171 peer->aorg = xmt_tx; 3172 HTONL_FP(&xmt_tx, &xpkt.xmt); 3173 } else { /* interleaved modes */ 3174 if (peer->hmode == MODE_BROADCAST) { /* bcst */ 3175 HTONL_FP(&xmt_tx, &xpkt.xmt); 3176 if (peer->flip > 0) 3177 HTONL_FP(&peer->borg, 3178 &xpkt.org); 3179 else 3180 HTONL_FP(&peer->aorg, 3181 &xpkt.org); 3182 } else { /* symmetric */ 3183 if (peer->flip > 0) 3184 HTONL_FP(&peer->borg, 3185 &xpkt.xmt); 3186 else 3187 HTONL_FP(&peer->aorg, 3188 &xpkt.xmt); 3189 } 3190 } 3191 peer->t21_bytes = sendlen; 3192 sendpkt(&peer->srcadr, peer->dstadr, sys_ttl[peer->ttl], 3193 &xpkt, sendlen); 3194 peer->sent++; 3195 peer->throttle += (1 << peer->minpoll) - 2; 3196 3197 /* 3198 * Capture a-posteriori timestamps 3199 */ 3200 get_systime(&xmt_ty); 3201 if (peer->flip != 0) { /* interleaved modes */ 3202 if (peer->flip > 0) 3203 peer->aorg = xmt_ty; 3204 else 3205 peer->borg = xmt_ty; 3206 peer->flip = -peer->flip; 3207 } 3208 L_SUB(&xmt_ty, &xmt_tx); 3209 LFPTOD(&xmt_ty, peer->xleave); 3210 #ifdef DEBUG 3211 if (debug) 3212 printf("transmit: at %ld %s->%s mode %d len %zu\n", 3213 current_time, peer->dstadr ? 3214 stoa(&peer->dstadr->sin) : "-", 3215 stoa(&peer->srcadr), peer->hmode, sendlen); 3216 #endif 3217 return; 3218 } 3219 3220 /* 3221 * Authentication is enabled, so the transmitted packet must be 3222 * authenticated. If autokey is enabled, fuss with the various 3223 * modes; otherwise, symmetric key cryptography is used. 3224 */ 3225 #ifdef AUTOKEY 3226 if (peer->flags & FLAG_SKEY) { 3227 struct exten *exten; /* extension field */ 3228 3229 /* 3230 * The Public Key Dance (PKD): Cryptographic credentials 3231 * are contained in extension fields, each including a 3232 * 4-octet length/code word followed by a 4-octet 3233 * association ID and optional additional data. Optional 3234 * data includes a 4-octet data length field followed by 3235 * the data itself. Request messages are sent from a 3236 * configured association; response messages can be sent 3237 * from a configured association or can take the fast 3238 * path without ever matching an association. Response 3239 * messages have the same code as the request, but have 3240 * a response bit and possibly an error bit set. In this 3241 * implementation, a message may contain no more than 3242 * one command and one or more responses. 3243 * 3244 * Cryptographic session keys include both a public and 3245 * a private componet. Request and response messages 3246 * using extension fields are always sent with the 3247 * private component set to zero. Packets without 3248 * extension fields indlude the private component when 3249 * the session key is generated. 3250 */ 3251 while (1) { 3252 3253 /* 3254 * Allocate and initialize a keylist if not 3255 * already done. Then, use the list in inverse 3256 * order, discarding keys once used. Keep the 3257 * latest key around until the next one, so 3258 * clients can use client/server packets to 3259 * compute propagation delay. 3260 * 3261 * Note that once a key is used from the list, 3262 * it is retained in the key cache until the 3263 * next key is used. This is to allow a client 3264 * to retrieve the encrypted session key 3265 * identifier to verify authenticity. 3266 * 3267 * If for some reason a key is no longer in the 3268 * key cache, a birthday has happened or the key 3269 * has expired, so the pseudo-random sequence is 3270 * broken. In that case, purge the keylist and 3271 * regenerate it. 3272 */ 3273 if (peer->keynumber == 0) 3274 make_keylist(peer, peer->dstadr); 3275 else 3276 peer->keynumber--; 3277 xkeyid = peer->keylist[peer->keynumber]; 3278 if (authistrusted(xkeyid)) 3279 break; 3280 else 3281 key_expire(peer); 3282 } 3283 peer->keyid = xkeyid; 3284 exten = NULL; 3285 switch (peer->hmode) { 3286 3287 /* 3288 * In broadcast server mode the autokey values are 3289 * required by the broadcast clients. Push them when a 3290 * new keylist is generated; otherwise, push the 3291 * association message so the client can request them at 3292 * other times. 3293 */ 3294 case MODE_BROADCAST: 3295 if (peer->flags & FLAG_ASSOC) 3296 exten = crypto_args(peer, CRYPTO_AUTO | 3297 CRYPTO_RESP, peer->associd, NULL); 3298 else 3299 exten = crypto_args(peer, CRYPTO_ASSOC | 3300 CRYPTO_RESP, peer->associd, NULL); 3301 break; 3302 3303 /* 3304 * In symmetric modes the parameter, certificate, 3305 * identity, cookie and autokey exchanges are 3306 * required. The leapsecond exchange is optional. But, a 3307 * peer will not believe the other peer until the other 3308 * peer has synchronized, so the certificate exchange 3309 * might loop until then. If a peer finds a broken 3310 * autokey sequence, it uses the autokey exchange to 3311 * retrieve the autokey values. In any case, if a new 3312 * keylist is generated, the autokey values are pushed. 3313 */ 3314 case MODE_ACTIVE: 3315 case MODE_PASSIVE: 3316 3317 /* 3318 * Parameter, certificate and identity. 3319 */ 3320 if (!peer->crypto) 3321 exten = crypto_args(peer, CRYPTO_ASSOC, 3322 peer->associd, hostval.ptr); 3323 else if (!(peer->crypto & CRYPTO_FLAG_CERT)) 3324 exten = crypto_args(peer, CRYPTO_CERT, 3325 peer->associd, peer->issuer); 3326 else if (!(peer->crypto & CRYPTO_FLAG_VRFY)) 3327 exten = crypto_args(peer, 3328 crypto_ident(peer), peer->associd, 3329 NULL); 3330 3331 /* 3332 * Cookie and autokey. We request the cookie 3333 * only when the this peer and the other peer 3334 * are synchronized. But, this peer needs the 3335 * autokey values when the cookie is zero. Any 3336 * time we regenerate the key list, we offer the 3337 * autokey values without being asked. If for 3338 * some reason either peer finds a broken 3339 * autokey sequence, the autokey exchange is 3340 * used to retrieve the autokey values. 3341 */ 3342 else if ( sys_leap != LEAP_NOTINSYNC 3343 && peer->leap != LEAP_NOTINSYNC 3344 && !(peer->crypto & CRYPTO_FLAG_COOK)) 3345 exten = crypto_args(peer, CRYPTO_COOK, 3346 peer->associd, NULL); 3347 else if (!(peer->crypto & CRYPTO_FLAG_AUTO)) 3348 exten = crypto_args(peer, CRYPTO_AUTO, 3349 peer->associd, NULL); 3350 else if ( peer->flags & FLAG_ASSOC 3351 && peer->crypto & CRYPTO_FLAG_SIGN) 3352 exten = crypto_args(peer, CRYPTO_AUTO | 3353 CRYPTO_RESP, peer->assoc, NULL); 3354 3355 /* 3356 * Wait for clock sync, then sign the 3357 * certificate and retrieve the leapsecond 3358 * values. 3359 */ 3360 else if (sys_leap == LEAP_NOTINSYNC) 3361 break; 3362 3363 else if (!(peer->crypto & CRYPTO_FLAG_SIGN)) 3364 exten = crypto_args(peer, CRYPTO_SIGN, 3365 peer->associd, hostval.ptr); 3366 else if (!(peer->crypto & CRYPTO_FLAG_LEAP)) 3367 exten = crypto_args(peer, CRYPTO_LEAP, 3368 peer->associd, NULL); 3369 break; 3370 3371 /* 3372 * In client mode the parameter, certificate, identity, 3373 * cookie and sign exchanges are required. The 3374 * leapsecond exchange is optional. If broadcast client 3375 * mode the same exchanges are required, except that the 3376 * autokey exchange is substitutes for the cookie 3377 * exchange, since the cookie is always zero. If the 3378 * broadcast client finds a broken autokey sequence, it 3379 * uses the autokey exchange to retrieve the autokey 3380 * values. 3381 */ 3382 case MODE_CLIENT: 3383 3384 /* 3385 * Parameter, certificate and identity. 3386 */ 3387 if (!peer->crypto) 3388 exten = crypto_args(peer, CRYPTO_ASSOC, 3389 peer->associd, hostval.ptr); 3390 else if (!(peer->crypto & CRYPTO_FLAG_CERT)) 3391 exten = crypto_args(peer, CRYPTO_CERT, 3392 peer->associd, peer->issuer); 3393 else if (!(peer->crypto & CRYPTO_FLAG_VRFY)) 3394 exten = crypto_args(peer, 3395 crypto_ident(peer), peer->associd, 3396 NULL); 3397 3398 /* 3399 * Cookie and autokey. These are requests, but 3400 * we use the peer association ID with autokey 3401 * rather than our own. 3402 */ 3403 else if (!(peer->crypto & CRYPTO_FLAG_COOK)) 3404 exten = crypto_args(peer, CRYPTO_COOK, 3405 peer->associd, NULL); 3406 else if (!(peer->crypto & CRYPTO_FLAG_AUTO)) 3407 exten = crypto_args(peer, CRYPTO_AUTO, 3408 peer->assoc, NULL); 3409 3410 /* 3411 * Wait for clock sync, then sign the 3412 * certificate and retrieve the leapsecond 3413 * values. 3414 */ 3415 else if (sys_leap == LEAP_NOTINSYNC) 3416 break; 3417 3418 else if (!(peer->crypto & CRYPTO_FLAG_SIGN)) 3419 exten = crypto_args(peer, CRYPTO_SIGN, 3420 peer->associd, hostval.ptr); 3421 else if (!(peer->crypto & CRYPTO_FLAG_LEAP)) 3422 exten = crypto_args(peer, CRYPTO_LEAP, 3423 peer->associd, NULL); 3424 break; 3425 } 3426 3427 /* 3428 * Add a queued extension field if present. This is 3429 * always a request message, so the reply ID is already 3430 * in the message. If an error occurs, the error bit is 3431 * lit in the response. 3432 */ 3433 if (peer->cmmd != NULL) { 3434 u_int32 temp32; 3435 3436 temp32 = CRYPTO_RESP; 3437 peer->cmmd->opcode |= htonl(temp32); 3438 sendlen += crypto_xmit(peer, &xpkt, NULL, 3439 sendlen, peer->cmmd, 0); 3440 free(peer->cmmd); 3441 peer->cmmd = NULL; 3442 } 3443 3444 /* 3445 * Add an extension field created above. All but the 3446 * autokey response message are request messages. 3447 */ 3448 if (exten != NULL) { 3449 if (exten->opcode != 0) 3450 sendlen += crypto_xmit(peer, &xpkt, 3451 NULL, sendlen, exten, 0); 3452 free(exten); 3453 } 3454 3455 /* 3456 * Calculate the next session key. Since extension 3457 * fields are present, the cookie value is zero. 3458 */ 3459 if (sendlen > (int)LEN_PKT_NOMAC) { 3460 session_key(&peer->dstadr->sin, &peer->srcadr, 3461 xkeyid, 0, 2); 3462 } 3463 } 3464 #endif /* AUTOKEY */ 3465 3466 /* 3467 * Transmit a-priori timestamps 3468 */ 3469 get_systime(&xmt_tx); 3470 if (peer->flip == 0) { /* basic mode */ 3471 peer->aorg = xmt_tx; 3472 HTONL_FP(&xmt_tx, &xpkt.xmt); 3473 } else { /* interleaved modes */ 3474 if (peer->hmode == MODE_BROADCAST) { /* bcst */ 3475 HTONL_FP(&xmt_tx, &xpkt.xmt); 3476 if (peer->flip > 0) 3477 HTONL_FP(&peer->borg, &xpkt.org); 3478 else 3479 HTONL_FP(&peer->aorg, &xpkt.org); 3480 } else { /* symmetric */ 3481 if (peer->flip > 0) 3482 HTONL_FP(&peer->borg, &xpkt.xmt); 3483 else 3484 HTONL_FP(&peer->aorg, &xpkt.xmt); 3485 } 3486 } 3487 xkeyid = peer->keyid; 3488 authlen = authencrypt(xkeyid, (u_int32 *)&xpkt, sendlen); 3489 if (authlen == 0) { 3490 report_event(PEVNT_AUTH, peer, "no key"); 3491 peer->flash |= TEST5; /* auth error */ 3492 peer->badauth++; 3493 return; 3494 } 3495 sendlen += authlen; 3496 #ifdef AUTOKEY 3497 if (xkeyid > NTP_MAXKEY) 3498 authtrust(xkeyid, 0); 3499 #endif /* AUTOKEY */ 3500 if (sendlen > sizeof(xpkt)) { 3501 msyslog(LOG_ERR, "proto: buffer overflow %zu", sendlen); 3502 exit (-1); 3503 } 3504 peer->t21_bytes = sendlen; 3505 sendpkt(&peer->srcadr, peer->dstadr, sys_ttl[peer->ttl], &xpkt, 3506 sendlen); 3507 peer->sent++; 3508 peer->throttle += (1 << peer->minpoll) - 2; 3509 3510 /* 3511 * Capture a-posteriori timestamps 3512 */ 3513 get_systime(&xmt_ty); 3514 if (peer->flip != 0) { /* interleaved modes */ 3515 if (peer->flip > 0) 3516 peer->aorg = xmt_ty; 3517 else 3518 peer->borg = xmt_ty; 3519 peer->flip = -peer->flip; 3520 } 3521 L_SUB(&xmt_ty, &xmt_tx); 3522 LFPTOD(&xmt_ty, peer->xleave); 3523 #ifdef AUTOKEY 3524 #ifdef DEBUG 3525 if (debug) 3526 printf("transmit: at %ld %s->%s mode %d keyid %08x len %zu index %d\n", 3527 current_time, latoa(peer->dstadr), 3528 ntoa(&peer->srcadr), peer->hmode, xkeyid, sendlen, 3529 peer->keynumber); 3530 #endif 3531 #else /* !AUTOKEY follows */ 3532 #ifdef DEBUG 3533 if (debug) 3534 printf("transmit: at %ld %s->%s mode %d keyid %08x len %d\n", 3535 current_time, peer->dstadr ? 3536 ntoa(&peer->dstadr->sin) : "-", 3537 ntoa(&peer->srcadr), peer->hmode, xkeyid, sendlen); 3538 #endif 3539 #endif /* !AUTOKEY */ 3540 } 3541 3542 3543 #ifdef LEAP_SMEAR 3544 3545 static void 3546 leap_smear_add_offs(l_fp *t, l_fp *t_recv) { 3547 L_ADD(t, &leap_smear.offset); 3548 } 3549 3550 #endif /* LEAP_SMEAR */ 3551 3552 3553 /* 3554 * fast_xmit - Send packet for nonpersistent association. Note that 3555 * neither the source or destination can be a broadcast address. 3556 */ 3557 static void 3558 fast_xmit( 3559 struct recvbuf *rbufp, /* receive packet pointer */ 3560 int xmode, /* receive mode */ 3561 keyid_t xkeyid, /* transmit key ID */ 3562 int flags /* restrict mask */ 3563 ) 3564 { 3565 struct pkt xpkt; /* transmit packet structure */ 3566 struct pkt *rpkt; /* receive packet structure */ 3567 l_fp xmt_tx, xmt_ty; 3568 int sendlen; 3569 #ifdef AUTOKEY 3570 u_int32 temp32; 3571 #endif 3572 3573 /* 3574 * Initialize transmit packet header fields from the receive 3575 * buffer provided. We leave the fields intact as received, but 3576 * set the peer poll at the maximum of the receive peer poll and 3577 * the system minimum poll (ntp_minpoll). This is for KoD rate 3578 * control and not strictly specification compliant, but doesn't 3579 * break anything. 3580 * 3581 * If the gazinta was from a multicast address, the gazoutta 3582 * must go out another way. 3583 */ 3584 rpkt = &rbufp->recv_pkt; 3585 if (rbufp->dstadr->flags & INT_MCASTOPEN) 3586 rbufp->dstadr = findinterface(&rbufp->recv_srcadr); 3587 3588 /* 3589 * If this is a kiss-o'-death (KoD) packet, show leap 3590 * unsynchronized, stratum zero, reference ID the four-character 3591 * kiss code and system root delay. Note we don't reveal the 3592 * local time, so these packets can't be used for 3593 * synchronization. 3594 */ 3595 if (flags & RES_KOD) { 3596 sys_kodsent++; 3597 xpkt.li_vn_mode = PKT_LI_VN_MODE(LEAP_NOTINSYNC, 3598 PKT_VERSION(rpkt->li_vn_mode), xmode); 3599 xpkt.stratum = STRATUM_PKT_UNSPEC; 3600 xpkt.ppoll = max(rpkt->ppoll, ntp_minpoll); 3601 xpkt.precision = rpkt->precision; 3602 memcpy(&xpkt.refid, "RATE", 4); 3603 xpkt.rootdelay = rpkt->rootdelay; 3604 xpkt.rootdisp = rpkt->rootdisp; 3605 xpkt.reftime = rpkt->reftime; 3606 xpkt.org = rpkt->xmt; 3607 xpkt.rec = rpkt->xmt; 3608 xpkt.xmt = rpkt->xmt; 3609 3610 /* 3611 * This is a normal packet. Use the system variables. 3612 */ 3613 } else { 3614 #ifdef LEAP_SMEAR 3615 /* 3616 * Make copies of the variables which can be affected by smearing. 3617 */ 3618 l_fp this_ref_time; 3619 l_fp this_recv_time; 3620 #endif 3621 3622 /* 3623 * If we are inside the leap smear interval we add the current smear offset to 3624 * the packet receive time, to the packet transmit time, and eventually to the 3625 * reftime to make sure the reftime isn't later than the transmit/receive times. 3626 */ 3627 xpkt.li_vn_mode = PKT_LI_VN_MODE(xmt_leap, 3628 PKT_VERSION(rpkt->li_vn_mode), xmode); 3629 3630 xpkt.stratum = STRATUM_TO_PKT(sys_stratum); 3631 xpkt.ppoll = max(rpkt->ppoll, ntp_minpoll); 3632 xpkt.precision = sys_precision; 3633 xpkt.refid = sys_refid; 3634 xpkt.rootdelay = HTONS_FP(DTOFP(sys_rootdelay)); 3635 xpkt.rootdisp = HTONS_FP(DTOUFP(sys_rootdisp)); 3636 3637 #ifdef LEAP_SMEAR 3638 this_ref_time = sys_reftime; 3639 if (leap_smear.in_progress) { 3640 leap_smear_add_offs(&this_ref_time, NULL); 3641 xpkt.refid = convertLFPToRefID(leap_smear.offset); 3642 DPRINTF(2, ("fast_xmit: leap_smear.in_progress: refid %8x, smear %s\n", 3643 ntohl(xpkt.refid), 3644 lfptoa(&leap_smear.offset, 8) 3645 )); 3646 } 3647 HTONL_FP(&this_ref_time, &xpkt.reftime); 3648 #else 3649 HTONL_FP(&sys_reftime, &xpkt.reftime); 3650 #endif 3651 3652 xpkt.org = rpkt->xmt; 3653 3654 #ifdef LEAP_SMEAR 3655 this_recv_time = rbufp->recv_time; 3656 if (leap_smear.in_progress) 3657 leap_smear_add_offs(&this_recv_time, NULL); 3658 HTONL_FP(&this_recv_time, &xpkt.rec); 3659 #else 3660 HTONL_FP(&rbufp->recv_time, &xpkt.rec); 3661 #endif 3662 3663 get_systime(&xmt_tx); 3664 #ifdef LEAP_SMEAR 3665 if (leap_smear.in_progress) 3666 leap_smear_add_offs(&xmt_tx, &this_recv_time); 3667 #endif 3668 HTONL_FP(&xmt_tx, &xpkt.xmt); 3669 } 3670 3671 #ifdef HAVE_NTP_SIGND 3672 if (flags & RES_MSSNTP) { 3673 send_via_ntp_signd(rbufp, xmode, xkeyid, flags, &xpkt); 3674 return; 3675 } 3676 #endif /* HAVE_NTP_SIGND */ 3677 3678 /* 3679 * If the received packet contains a MAC, the transmitted packet 3680 * is authenticated and contains a MAC. If not, the transmitted 3681 * packet is not authenticated. 3682 */ 3683 sendlen = LEN_PKT_NOMAC; 3684 if (rbufp->recv_length == sendlen) { 3685 sendpkt(&rbufp->recv_srcadr, rbufp->dstadr, 0, &xpkt, 3686 sendlen); 3687 #ifdef DEBUG 3688 if (debug) 3689 printf( 3690 "transmit: at %ld %s->%s mode %d len %d\n", 3691 current_time, stoa(&rbufp->dstadr->sin), 3692 stoa(&rbufp->recv_srcadr), xmode, sendlen); 3693 #endif 3694 return; 3695 } 3696 3697 /* 3698 * The received packet contains a MAC, so the transmitted packet 3699 * must be authenticated. For symmetric key cryptography, use 3700 * the predefined and trusted symmetric keys to generate the 3701 * cryptosum. For autokey cryptography, use the server private 3702 * value to generate the cookie, which is unique for every 3703 * source-destination-key ID combination. 3704 */ 3705 #ifdef AUTOKEY 3706 if (xkeyid > NTP_MAXKEY) { 3707 keyid_t cookie; 3708 3709 /* 3710 * The only way to get here is a reply to a legitimate 3711 * client request message, so the mode must be 3712 * MODE_SERVER. If an extension field is present, there 3713 * can be only one and that must be a command. Do what 3714 * needs, but with private value of zero so the poor 3715 * jerk can decode it. If no extension field is present, 3716 * use the cookie to generate the session key. 3717 */ 3718 cookie = session_key(&rbufp->recv_srcadr, 3719 &rbufp->dstadr->sin, 0, sys_private, 0); 3720 if (rbufp->recv_length > sendlen + (int)MAX_MAC_LEN) { 3721 session_key(&rbufp->dstadr->sin, 3722 &rbufp->recv_srcadr, xkeyid, 0, 2); 3723 temp32 = CRYPTO_RESP; 3724 rpkt->exten[0] |= htonl(temp32); 3725 sendlen += crypto_xmit(NULL, &xpkt, rbufp, 3726 sendlen, (struct exten *)rpkt->exten, 3727 cookie); 3728 } else { 3729 session_key(&rbufp->dstadr->sin, 3730 &rbufp->recv_srcadr, xkeyid, cookie, 2); 3731 } 3732 } 3733 #endif /* AUTOKEY */ 3734 get_systime(&xmt_tx); 3735 sendlen += authencrypt(xkeyid, (u_int32 *)&xpkt, sendlen); 3736 #ifdef AUTOKEY 3737 if (xkeyid > NTP_MAXKEY) 3738 authtrust(xkeyid, 0); 3739 #endif /* AUTOKEY */ 3740 sendpkt(&rbufp->recv_srcadr, rbufp->dstadr, 0, &xpkt, sendlen); 3741 get_systime(&xmt_ty); 3742 L_SUB(&xmt_ty, &xmt_tx); 3743 sys_authdelay = xmt_ty; 3744 #ifdef DEBUG 3745 if (debug) 3746 printf( 3747 "transmit: at %ld %s->%s mode %d keyid %08x len %d\n", 3748 current_time, ntoa(&rbufp->dstadr->sin), 3749 ntoa(&rbufp->recv_srcadr), xmode, xkeyid, sendlen); 3750 #endif 3751 } 3752 3753 3754 /* 3755 * pool_xmit - resolve hostname or send unicast solicitation for pool. 3756 */ 3757 static void 3758 pool_xmit( 3759 struct peer *pool /* pool solicitor association */ 3760 ) 3761 { 3762 #ifdef WORKER 3763 struct pkt xpkt; /* transmit packet structure */ 3764 struct addrinfo hints; 3765 int rc; 3766 struct interface * lcladr; 3767 sockaddr_u * rmtadr; 3768 int restrict_mask; 3769 struct peer * p; 3770 l_fp xmt_tx; 3771 3772 if (NULL == pool->ai) { 3773 if (pool->addrs != NULL) { 3774 /* free() is used with copy_addrinfo_list() */ 3775 free(pool->addrs); 3776 pool->addrs = NULL; 3777 } 3778 ZERO(hints); 3779 hints.ai_family = AF(&pool->srcadr); 3780 hints.ai_socktype = SOCK_DGRAM; 3781 hints.ai_protocol = IPPROTO_UDP; 3782 /* ignore getaddrinfo_sometime() errors, we will retry */ 3783 rc = getaddrinfo_sometime( 3784 pool->hostname, 3785 "ntp", 3786 &hints, 3787 0, /* no retry */ 3788 &pool_name_resolved, 3789 (void *)(intptr_t)pool->associd); 3790 if (!rc) 3791 DPRINTF(1, ("pool DNS lookup %s started\n", 3792 pool->hostname)); 3793 else 3794 msyslog(LOG_ERR, 3795 "unable to start pool DNS %s: %m", 3796 pool->hostname); 3797 return; 3798 } 3799 3800 do { 3801 /* copy_addrinfo_list ai_addr points to a sockaddr_u */ 3802 rmtadr = (sockaddr_u *)(void *)pool->ai->ai_addr; 3803 pool->ai = pool->ai->ai_next; 3804 p = findexistingpeer(rmtadr, NULL, NULL, MODE_CLIENT, 0); 3805 } while (p != NULL && pool->ai != NULL); 3806 if (p != NULL) 3807 return; /* out of addresses, re-query DNS next poll */ 3808 restrict_mask = restrictions(rmtadr); 3809 if (RES_FLAGS & restrict_mask) 3810 restrict_source(rmtadr, 0, 3811 current_time + POOL_SOLICIT_WINDOW + 1); 3812 lcladr = findinterface(rmtadr); 3813 memset(&xpkt, 0, sizeof(xpkt)); 3814 xpkt.li_vn_mode = PKT_LI_VN_MODE(sys_leap, pool->version, 3815 MODE_CLIENT); 3816 xpkt.stratum = STRATUM_TO_PKT(sys_stratum); 3817 xpkt.ppoll = pool->hpoll; 3818 xpkt.precision = sys_precision; 3819 xpkt.refid = sys_refid; 3820 xpkt.rootdelay = HTONS_FP(DTOFP(sys_rootdelay)); 3821 xpkt.rootdisp = HTONS_FP(DTOUFP(sys_rootdisp)); 3822 HTONL_FP(&sys_reftime, &xpkt.reftime); 3823 get_systime(&xmt_tx); 3824 pool->aorg = xmt_tx; 3825 HTONL_FP(&xmt_tx, &xpkt.xmt); 3826 sendpkt(rmtadr, lcladr, sys_ttl[pool->ttl], &xpkt, 3827 LEN_PKT_NOMAC); 3828 pool->sent++; 3829 pool->throttle += (1 << pool->minpoll) - 2; 3830 #ifdef DEBUG 3831 if (debug) 3832 printf("transmit: at %ld %s->%s pool\n", 3833 current_time, latoa(lcladr), stoa(rmtadr)); 3834 #endif 3835 msyslog(LOG_INFO, "Soliciting pool server %s", stoa(rmtadr)); 3836 #endif /* WORKER */ 3837 } 3838 3839 3840 #ifdef AUTOKEY 3841 /* 3842 * group_test - test if this is the same group 3843 * 3844 * host assoc return action 3845 * none none 0 mobilize * 3846 * none group 0 mobilize * 3847 * group none 0 mobilize * 3848 * group group 1 mobilize 3849 * group different 1 ignore 3850 * * ignore if notrust 3851 */ 3852 int group_test( 3853 char *grp, 3854 char *ident 3855 ) 3856 { 3857 if (grp == NULL) 3858 return (0); 3859 3860 if (strcmp(grp, sys_groupname) == 0) 3861 return (0); 3862 3863 if (ident == NULL) 3864 return (1); 3865 3866 if (strcmp(grp, ident) == 0) 3867 return (0); 3868 3869 return (1); 3870 } 3871 #endif /* AUTOKEY */ 3872 3873 #ifdef WORKER 3874 void 3875 pool_name_resolved( 3876 int rescode, 3877 int gai_errno, 3878 void * context, 3879 const char * name, 3880 const char * service, 3881 const struct addrinfo * hints, 3882 const struct addrinfo * res 3883 ) 3884 { 3885 struct peer * pool; /* pool solicitor association */ 3886 associd_t assoc; 3887 3888 if (rescode) { 3889 msyslog(LOG_ERR, 3890 "error resolving pool %s: %s (%d)", 3891 name, gai_strerror(rescode), rescode); 3892 return; 3893 } 3894 3895 assoc = (associd_t)(intptr_t)context; 3896 pool = findpeerbyassoc(assoc); 3897 if (NULL == pool) { 3898 msyslog(LOG_ERR, 3899 "Could not find assoc %u for pool DNS %s", 3900 assoc, name); 3901 return; 3902 } 3903 DPRINTF(1, ("pool DNS %s completed\n", name)); 3904 pool->addrs = copy_addrinfo_list(res); 3905 pool->ai = pool->addrs; 3906 pool_xmit(pool); 3907 3908 } 3909 #endif /* WORKER */ 3910 3911 3912 #ifdef AUTOKEY 3913 /* 3914 * key_expire - purge the key list 3915 */ 3916 void 3917 key_expire( 3918 struct peer *peer /* peer structure pointer */ 3919 ) 3920 { 3921 int i; 3922 3923 if (peer->keylist != NULL) { 3924 for (i = 0; i <= peer->keynumber; i++) 3925 authtrust(peer->keylist[i], 0); 3926 free(peer->keylist); 3927 peer->keylist = NULL; 3928 } 3929 value_free(&peer->sndval); 3930 peer->keynumber = 0; 3931 peer->flags &= ~FLAG_ASSOC; 3932 #ifdef DEBUG 3933 if (debug) 3934 printf("key_expire: at %lu associd %d\n", current_time, 3935 peer->associd); 3936 #endif 3937 } 3938 #endif /* AUTOKEY */ 3939 3940 3941 /* 3942 * local_refid(peer) - check peer refid to avoid selecting peers 3943 * currently synced to this ntpd. 3944 */ 3945 static int 3946 local_refid( 3947 struct peer * p 3948 ) 3949 { 3950 endpt * unicast_ep; 3951 3952 if (p->dstadr != NULL && !(INT_MCASTIF & p->dstadr->flags)) 3953 unicast_ep = p->dstadr; 3954 else 3955 unicast_ep = findinterface(&p->srcadr); 3956 3957 if (unicast_ep != NULL && p->refid == unicast_ep->addr_refid) 3958 return TRUE; 3959 else 3960 return FALSE; 3961 } 3962 3963 3964 /* 3965 * Determine if the peer is unfit for synchronization 3966 * 3967 * A peer is unfit for synchronization if 3968 * > TEST10 bad leap or stratum below floor or at or above ceiling 3969 * > TEST11 root distance exceeded for remote peer 3970 * > TEST12 a direct or indirect synchronization loop would form 3971 * > TEST13 unreachable or noselect 3972 */ 3973 int /* FALSE if fit, TRUE if unfit */ 3974 peer_unfit( 3975 struct peer *peer /* peer structure pointer */ 3976 ) 3977 { 3978 int rval = 0; 3979 3980 /* 3981 * A stratum error occurs if (1) the server has never been 3982 * synchronized, (2) the server stratum is below the floor or 3983 * greater than or equal to the ceiling. 3984 */ 3985 if ( peer->leap == LEAP_NOTINSYNC 3986 || peer->stratum < sys_floor 3987 || peer->stratum >= sys_ceiling) 3988 rval |= TEST10; /* bad synch or stratum */ 3989 3990 /* 3991 * A distance error for a remote peer occurs if the root 3992 * distance is greater than or equal to the distance threshold 3993 * plus the increment due to one host poll interval. 3994 */ 3995 if ( !(peer->flags & FLAG_REFCLOCK) 3996 && root_distance(peer) >= sys_maxdist 3997 + clock_phi * ULOGTOD(peer->hpoll)) 3998 rval |= TEST11; /* distance exceeded */ 3999 4000 /* 4001 * A loop error occurs if the remote peer is synchronized to the 4002 * local peer or if the remote peer is synchronized to the same 4003 * server as the local peer but only if the remote peer is 4004 * neither a reference clock nor an orphan. 4005 */ 4006 if (peer->stratum > 1 && local_refid(peer)) 4007 rval |= TEST12; /* synchronization loop */ 4008 4009 /* 4010 * An unreachable error occurs if the server is unreachable or 4011 * the noselect bit is set. 4012 */ 4013 if (!peer->reach || (peer->flags & FLAG_NOSELECT)) 4014 rval |= TEST13; /* unreachable */ 4015 4016 peer->flash &= ~PEER_TEST_MASK; 4017 peer->flash |= rval; 4018 return (rval); 4019 } 4020 4021 4022 /* 4023 * Find the precision of this particular machine 4024 */ 4025 #define MINSTEP 20e-9 /* minimum clock increment (s) */ 4026 #define MAXSTEP 1 /* maximum clock increment (s) */ 4027 #define MINCHANGES 12 /* minimum number of step samples */ 4028 #define MAXLOOPS ((int)(1. / MINSTEP)) /* avoid infinite loop */ 4029 4030 /* 4031 * This routine measures the system precision defined as the minimum of 4032 * a sequence of differences between successive readings of the system 4033 * clock. However, if a difference is less than MINSTEP, the clock has 4034 * been read more than once during a clock tick and the difference is 4035 * ignored. We set MINSTEP greater than zero in case something happens 4036 * like a cache miss, and to tolerate underlying system clocks which 4037 * ensure each reading is strictly greater than prior readings while 4038 * using an underlying stepping (not interpolated) clock. 4039 * 4040 * sys_tick and sys_precision represent the time to read the clock for 4041 * systems with high-precision clocks, and the tick interval or step 4042 * size for lower-precision stepping clocks. 4043 * 4044 * This routine also measures the time to read the clock on stepping 4045 * system clocks by counting the number of readings between changes of 4046 * the underlying clock. With either type of clock, the minimum time 4047 * to read the clock is saved as sys_fuzz, and used to ensure the 4048 * get_systime() readings always increase and are fuzzed below sys_fuzz. 4049 */ 4050 void 4051 measure_precision(void) 4052 { 4053 /* 4054 * With sys_fuzz set to zero, get_systime() fuzzing of low bits 4055 * is effectively disabled. trunc_os_clock is FALSE to disable 4056 * get_ostime() simulation of a low-precision system clock. 4057 */ 4058 set_sys_fuzz(0.); 4059 trunc_os_clock = FALSE; 4060 measured_tick = measure_tick_fuzz(); 4061 set_sys_tick_precision(measured_tick); 4062 msyslog(LOG_INFO, "proto: precision = %.3f usec (%d)", 4063 sys_tick * 1e6, sys_precision); 4064 if (sys_fuzz < sys_tick) { 4065 msyslog(LOG_NOTICE, "proto: fuzz beneath %.3f usec", 4066 sys_fuzz * 1e6); 4067 } 4068 } 4069 4070 4071 /* 4072 * measure_tick_fuzz() 4073 * 4074 * measures the minimum time to read the clock (stored in sys_fuzz) 4075 * and returns the tick, the larger of the minimum increment observed 4076 * between successive clock readings and the time to read the clock. 4077 */ 4078 double 4079 measure_tick_fuzz(void) 4080 { 4081 l_fp minstep; /* MINSTEP as l_fp */ 4082 l_fp val; /* current seconds fraction */ 4083 l_fp last; /* last seconds fraction */ 4084 l_fp ldiff; /* val - last */ 4085 double tick; /* computed tick value */ 4086 double diff; 4087 long repeats; 4088 long max_repeats; 4089 int changes; 4090 int i; /* log2 precision */ 4091 4092 tick = MAXSTEP; 4093 max_repeats = 0; 4094 repeats = 0; 4095 changes = 0; 4096 DTOLFP(MINSTEP, &minstep); 4097 get_systime(&last); 4098 for (i = 0; i < MAXLOOPS && changes < MINCHANGES; i++) { 4099 get_systime(&val); 4100 ldiff = val; 4101 L_SUB(&ldiff, &last); 4102 last = val; 4103 if (L_ISGT(&ldiff, &minstep)) { 4104 max_repeats = max(repeats, max_repeats); 4105 repeats = 0; 4106 changes++; 4107 LFPTOD(&ldiff, diff); 4108 tick = min(diff, tick); 4109 } else { 4110 repeats++; 4111 } 4112 } 4113 if (changes < MINCHANGES) { 4114 msyslog(LOG_ERR, "Fatal error: precision could not be measured (MINSTEP too large?)"); 4115 exit(1); 4116 } 4117 4118 if (0 == max_repeats) { 4119 set_sys_fuzz(tick); 4120 } else { 4121 set_sys_fuzz(tick / max_repeats); 4122 } 4123 4124 return tick; 4125 } 4126 4127 4128 void 4129 set_sys_tick_precision( 4130 double tick 4131 ) 4132 { 4133 int i; 4134 4135 if (tick > 1.) { 4136 msyslog(LOG_ERR, 4137 "unsupported tick %.3f > 1s ignored", tick); 4138 return; 4139 } 4140 if (tick < measured_tick) { 4141 msyslog(LOG_ERR, 4142 "proto: tick %.3f less than measured tick %.3f, ignored", 4143 tick, measured_tick); 4144 return; 4145 } else if (tick > measured_tick) { 4146 trunc_os_clock = TRUE; 4147 msyslog(LOG_NOTICE, 4148 "proto: truncating system clock to multiples of %.9f", 4149 tick); 4150 } 4151 sys_tick = tick; 4152 4153 /* 4154 * Find the nearest power of two. 4155 */ 4156 for (i = 0; tick <= 1; i--) 4157 tick *= 2; 4158 if (tick - 1 > 1 - tick / 2) 4159 i++; 4160 4161 sys_precision = (s_char)i; 4162 } 4163 4164 4165 /* 4166 * init_proto - initialize the protocol module's data 4167 */ 4168 void 4169 init_proto(void) 4170 { 4171 l_fp dummy; 4172 int i; 4173 4174 /* 4175 * Fill in the sys_* stuff. Default is don't listen to 4176 * broadcasting, require authentication. 4177 */ 4178 set_sys_leap(LEAP_NOTINSYNC); 4179 sys_stratum = STRATUM_UNSPEC; 4180 memcpy(&sys_refid, "INIT", 4); 4181 sys_peer = NULL; 4182 sys_rootdelay = 0; 4183 sys_rootdisp = 0; 4184 L_CLR(&sys_reftime); 4185 sys_jitter = 0; 4186 measure_precision(); 4187 get_systime(&dummy); 4188 sys_survivors = 0; 4189 sys_manycastserver = 0; 4190 sys_bclient = 0; 4191 sys_bdelay = 0; 4192 sys_authenticate = 1; 4193 sys_stattime = current_time; 4194 orphwait = current_time + sys_orphwait; 4195 proto_clr_stats(); 4196 for (i = 0; i < MAX_TTL; i++) { 4197 sys_ttl[i] = (u_char)((i * 256) / MAX_TTL); 4198 sys_ttlmax = i; 4199 } 4200 hardpps_enable = 0; 4201 stats_control = 1; 4202 } 4203 4204 4205 /* 4206 * proto_config - configure the protocol module 4207 */ 4208 void 4209 proto_config( 4210 int item, 4211 u_long value, 4212 double dvalue, 4213 sockaddr_u *svalue 4214 ) 4215 { 4216 /* 4217 * Figure out what he wants to change, then do it 4218 */ 4219 DPRINTF(2, ("proto_config: code %d value %lu dvalue %lf\n", 4220 item, value, dvalue)); 4221 4222 switch (item) { 4223 4224 /* 4225 * enable and disable commands - arguments are Boolean. 4226 */ 4227 case PROTO_AUTHENTICATE: /* authentication (auth) */ 4228 sys_authenticate = value; 4229 break; 4230 4231 case PROTO_BROADCLIENT: /* broadcast client (bclient) */ 4232 sys_bclient = (int)value; 4233 if (sys_bclient == 0) 4234 io_unsetbclient(); 4235 else 4236 io_setbclient(); 4237 break; 4238 4239 #ifdef REFCLOCK 4240 case PROTO_CAL: /* refclock calibrate (calibrate) */ 4241 cal_enable = value; 4242 break; 4243 #endif /* REFCLOCK */ 4244 4245 case PROTO_KERNEL: /* kernel discipline (kernel) */ 4246 select_loop(value); 4247 break; 4248 4249 case PROTO_MONITOR: /* monitoring (monitor) */ 4250 if (value) 4251 mon_start(MON_ON); 4252 else { 4253 mon_stop(MON_ON); 4254 if (mon_enabled) 4255 msyslog(LOG_WARNING, 4256 "restrict: 'monitor' cannot be disabled while 'limited' is enabled"); 4257 } 4258 break; 4259 4260 case PROTO_NTP: /* NTP discipline (ntp) */ 4261 ntp_enable = value; 4262 break; 4263 4264 case PROTO_MODE7: /* mode7 management (ntpdc) */ 4265 ntp_mode7 = value; 4266 break; 4267 4268 case PROTO_PPS: /* PPS discipline (pps) */ 4269 hardpps_enable = value; 4270 break; 4271 4272 case PROTO_FILEGEN: /* statistics (stats) */ 4273 stats_control = value; 4274 break; 4275 4276 /* 4277 * tos command - arguments are double, sometimes cast to int 4278 */ 4279 case PROTO_BEACON: /* manycast beacon (beacon) */ 4280 sys_beacon = (int)dvalue; 4281 break; 4282 4283 case PROTO_BROADDELAY: /* default broadcast delay (bdelay) */ 4284 sys_bdelay = dvalue; 4285 break; 4286 4287 case PROTO_CEILING: /* stratum ceiling (ceiling) */ 4288 sys_ceiling = (int)dvalue; 4289 break; 4290 4291 case PROTO_COHORT: /* cohort switch (cohort) */ 4292 sys_cohort = (int)dvalue; 4293 break; 4294 4295 case PROTO_FLOOR: /* stratum floor (floor) */ 4296 sys_floor = (int)dvalue; 4297 break; 4298 4299 case PROTO_MAXCLOCK: /* maximum candidates (maxclock) */ 4300 sys_maxclock = (int)dvalue; 4301 break; 4302 4303 case PROTO_MAXDIST: /* select threshold (maxdist) */ 4304 sys_maxdist = dvalue; 4305 break; 4306 4307 case PROTO_CALLDELAY: /* modem call delay (mdelay) */ 4308 break; /* NOT USED */ 4309 4310 case PROTO_MINCLOCK: /* minimum candidates (minclock) */ 4311 sys_minclock = (int)dvalue; 4312 break; 4313 4314 case PROTO_MINDISP: /* minimum distance (mindist) */ 4315 sys_mindisp = dvalue; 4316 break; 4317 4318 case PROTO_MINSANE: /* minimum survivors (minsane) */ 4319 sys_minsane = (int)dvalue; 4320 break; 4321 4322 case PROTO_ORPHAN: /* orphan stratum (orphan) */ 4323 sys_orphan = (int)dvalue; 4324 break; 4325 4326 case PROTO_ORPHWAIT: /* orphan wait (orphwait) */ 4327 orphwait -= sys_orphwait; 4328 sys_orphwait = (int)dvalue; 4329 orphwait += sys_orphwait; 4330 break; 4331 4332 /* 4333 * Miscellaneous commands 4334 */ 4335 case PROTO_MULTICAST_ADD: /* add group address */ 4336 if (svalue != NULL) 4337 io_multicast_add(svalue); 4338 sys_bclient = 1; 4339 break; 4340 4341 case PROTO_MULTICAST_DEL: /* delete group address */ 4342 if (svalue != NULL) 4343 io_multicast_del(svalue); 4344 break; 4345 4346 default: 4347 msyslog(LOG_NOTICE, 4348 "proto: unsupported option %d", item); 4349 } 4350 } 4351 4352 4353 /* 4354 * proto_clr_stats - clear protocol stat counters 4355 */ 4356 void 4357 proto_clr_stats(void) 4358 { 4359 sys_stattime = current_time; 4360 sys_received = 0; 4361 sys_processed = 0; 4362 sys_newversion = 0; 4363 sys_oldversion = 0; 4364 sys_declined = 0; 4365 sys_restricted = 0; 4366 sys_badlength = 0; 4367 sys_badauth = 0; 4368 sys_limitrejected = 0; 4369 sys_kodsent = 0; 4370 } 4371