1 /* 2 * ntp_proto.c - NTP version 4 protocol machinery 3 * 4 * $FreeBSD$ 5 */ 6 #ifdef HAVE_CONFIG_H 7 #include <config.h> 8 #endif 9 10 #include <stdio.h> 11 #include <sys/types.h> 12 #include <sys/time.h> 13 14 #include "ntpd.h" 15 #include "ntp_stdlib.h" 16 #include "ntp_unixtime.h" 17 #include "ntp_control.h" 18 #include "ntp_string.h" 19 20 #if defined(VMS) && defined(VMS_LOCALUNIT) /*wjm*/ 21 #include "ntp_refclock.h" 22 #endif 23 24 #if defined(__FreeBSD__) && __FreeBSD__ >= 3 25 #include <sys/sysctl.h> 26 #endif 27 28 /* 29 * System variables are declared here. See Section 3.2 of the 30 * specification. 31 */ 32 u_char sys_leap; /* system leap indicator */ 33 u_char sys_stratum; /* stratum of system */ 34 s_char sys_precision; /* local clock precision */ 35 double sys_rootdelay; /* distance to current sync source */ 36 double sys_rootdispersion; /* dispersion of system clock */ 37 u_int32 sys_refid; /* reference source for local clock */ 38 static double sys_offset; /* current local clock offset */ 39 l_fp sys_reftime; /* time we were last updated */ 40 struct peer *sys_peer; /* our current peer */ 41 u_long sys_automax; /* maximum session key lifetime */ 42 43 /* 44 * Nonspecified system state variables. 45 */ 46 int sys_bclient; /* we set our time to broadcasts */ 47 double sys_bdelay; /* broadcast client default delay */ 48 int sys_authenticate; /* requre authentication for config */ 49 l_fp sys_authdelay; /* authentication delay */ 50 static u_long sys_authdly[2]; /* authentication delay shift reg */ 51 static u_char leap_consensus; /* consensus of survivor leap bits */ 52 static double sys_maxd; /* select error (squares) */ 53 static double sys_epsil; /* system error (squares) */ 54 u_long sys_private; /* private value for session seed */ 55 int sys_manycastserver; /* 1 => respond to manycast client pkts */ 56 57 /* 58 * Statistics counters 59 */ 60 u_long sys_stattime; /* time when we started recording */ 61 u_long sys_badstratum; /* packets with invalid stratum */ 62 u_long sys_oldversionpkt; /* old version packets received */ 63 u_long sys_newversionpkt; /* new version packets received */ 64 u_long sys_unknownversion; /* don't know version packets */ 65 u_long sys_badlength; /* packets with bad length */ 66 u_long sys_processed; /* packets processed */ 67 u_long sys_badauth; /* packets dropped because of auth */ 68 u_long sys_limitrejected; /* pkts rejected due to client count per net */ 69 70 static double root_distance P((struct peer *)); 71 static double clock_combine P((struct peer **, int)); 72 static void peer_xmit P((struct peer *)); 73 static void fast_xmit P((struct recvbuf *, int, u_long)); 74 static void clock_update P((void)); 75 #ifdef MD5 76 static void make_keylist P((struct peer *)); 77 #endif /* MD5 */ 78 79 /* 80 * transmit - Transmit Procedure. See Section 3.4.2 of the 81 * specification. 82 */ 83 void 84 transmit( 85 struct peer *peer /* peer structure pointer */ 86 ) 87 { 88 int hpoll; 89 90 hpoll = peer->hpoll; 91 if (peer->burst == 0) { 92 u_char oreach; 93 94 /* 95 * Determine reachability and diddle things if we 96 * haven't heard from the host for a while. If the peer 97 * is not configured and not likely to stay around, 98 * we exhaust it. 99 */ 100 oreach = peer->reach; 101 if (oreach & 0x01) 102 peer->valid++; 103 if (oreach & 0x80) 104 peer->valid--; 105 if (!(peer->flags & FLAG_CONFIG) && 106 peer->valid > NTP_SHIFT / 2 && (peer->reach & 0x80) && 107 peer->status < CTL_PST_SEL_SYNCCAND) 108 peer->reach = 0; 109 peer->reach <<= 1; 110 if (peer->reach == 0) { 111 112 /* 113 * If this is an uncofigured association and 114 * has become unreachable, demobilize it. 115 */ 116 if (oreach != 0) { 117 report_event(EVNT_UNREACH, peer); 118 peer->timereachable = current_time; 119 peer_clear(peer); 120 if (!(peer->flags & FLAG_CONFIG)) { 121 unpeer(peer); 122 return; 123 } 124 } 125 126 /* 127 * We would like to respond quickly when the 128 * peer comes back to life. If the probes since 129 * becoming unreachable are less than 130 * NTP_UNREACH, clamp the poll interval to the 131 * minimum. In order to minimize the network 132 * traffic, the interval gradually ramps up the 133 * the maximum after that. 134 */ 135 peer->ppoll = peer->maxpoll; 136 if (peer->unreach < NTP_UNREACH) { 137 if (peer->hmode == MODE_CLIENT) 138 peer->unreach++; 139 hpoll = peer->minpoll; 140 } else { 141 hpoll++; 142 } 143 if (peer->flags & FLAG_BURST) 144 peer->burst = 2; 145 146 } else { 147 148 /* 149 * Here the peer is reachable. If there is no 150 * system peer or if the stratum of the system 151 * peer is greater than this peer, clamp the 152 * poll interval to the minimum. If less than 153 * two samples are in the reachability register, 154 * reduce the interval; if more than six samples 155 * are in the register, increase the interval. 156 */ 157 peer->unreach = 0; 158 if (sys_peer == 0) 159 hpoll = peer->minpoll; 160 else if (sys_peer->stratum > peer->stratum) 161 hpoll = peer->minpoll; 162 if ((peer->reach & 0x03) == 0) { 163 clock_filter(peer, 0., 0., MAXDISPERSE); 164 clock_select(); 165 } 166 if (peer->valid <= 2) 167 hpoll--; 168 else if (peer->valid >= NTP_SHIFT - 2) 169 hpoll++; 170 if (peer->flags & FLAG_BURST) 171 peer->burst = NTP_SHIFT; 172 } 173 } else { 174 peer->burst--; 175 if (peer->burst == 0) { 176 if (peer->flags & FLAG_MCAST2) { 177 peer->flags &= ~FLAG_BURST; 178 peer->hmode = MODE_BCLIENT; 179 } 180 clock_select(); 181 poll_update(peer, hpoll); 182 return; 183 } 184 } 185 186 /* 187 * We need to be very careful about honking uncivilized time. If 188 * not operating in broadcast mode, honk in all except broadcast 189 * client mode. If operating in broadcast mode and synchronized 190 * to a real source, honk except when the peer is the local- 191 * clock driver and the prefer flag is not set. In other words, 192 * in broadcast mode we never honk unless known to be 193 * synchronized to real time. 194 */ 195 if (peer->hmode != MODE_BROADCAST) { 196 if (peer->hmode != MODE_BCLIENT) 197 peer_xmit(peer); 198 } else if (sys_peer != 0 && sys_leap != LEAP_NOTINSYNC) { 199 if (!(sys_peer->refclktype == REFCLK_LOCALCLOCK && 200 !(sys_peer->flags & FLAG_PREFER))) 201 peer_xmit(peer); 202 } 203 peer->outdate = current_time; 204 poll_update(peer, hpoll); 205 } 206 207 /* 208 * receive - Receive Procedure. See section 3.4.3 in the specification. 209 */ 210 void 211 receive( 212 struct recvbuf *rbufp 213 ) 214 { 215 register struct peer *peer; 216 register struct pkt *pkt; 217 int hismode; 218 int oflags; 219 int restrict_mask; 220 int has_mac; /* has MAC field */ 221 int authlen; /* length of MAC field */ 222 int is_authentic; /* cryptosum ok */ 223 int is_mystic; /* session key exists */ 224 int is_error; /* parse error */ 225 /* u_long pkeyid; */ 226 u_long skeyid, tkeyid; 227 struct peer *peer2; 228 int retcode = AM_NOMATCH; 229 230 /* 231 * Monitor the packet and get restrictions 232 */ 233 ntp_monitor(rbufp); 234 restrict_mask = restrictions(&rbufp->recv_srcadr); 235 #ifdef DEBUG 236 if (debug > 1) 237 printf("receive: from %s restrict %02x\n", 238 ntoa(&rbufp->recv_srcadr), restrict_mask); 239 #endif 240 if (restrict_mask & RES_IGNORE) 241 return; 242 243 /* 244 * Discard packets with invalid version number. 245 */ 246 pkt = &rbufp->recv_pkt; 247 if (PKT_VERSION(pkt->li_vn_mode) >= NTP_VERSION) 248 sys_newversionpkt++; 249 else if (PKT_VERSION(pkt->li_vn_mode) >= NTP_OLDVERSION) 250 sys_oldversionpkt++; 251 else { 252 sys_unknownversion++; 253 return; 254 } 255 256 /* 257 * Restrict control/private mode packets. Note that packet 258 * length has to be checked in the control/private mode protocol 259 * module. 260 */ 261 if (PKT_MODE(pkt->li_vn_mode) == MODE_PRIVATE) { 262 if (restrict_mask & RES_NOQUERY) 263 return; 264 process_private(rbufp, ((restrict_mask & RES_NOMODIFY) == 265 0)); 266 return; 267 } 268 if (PKT_MODE(pkt->li_vn_mode) == MODE_CONTROL) { 269 if (restrict_mask & RES_NOQUERY) 270 return; 271 process_control(rbufp, restrict_mask); 272 return; 273 } 274 275 /* 276 * Restrict revenue packets. 277 */ 278 if (restrict_mask & RES_DONTSERVE) 279 return; 280 281 /* 282 * See if we only accept limited number of clients from the net 283 * this guy is from. Note: the flag is determined dynamically 284 * within restrictions() 285 */ 286 if (restrict_mask & RES_LIMITED) { 287 sys_limitrejected++; 288 return; 289 } 290 291 /* 292 * If we are not a broadcast client, ignore broadcast packets. 293 */ 294 if ((PKT_MODE(pkt->li_vn_mode) == MODE_BROADCAST && !sys_bclient)) 295 return; 296 297 /* 298 * This is really awful ugly. We figure out whether an extension 299 * field is present and then measure the MAC size. If the number 300 * of words following the packet header is less than or equal to 301 * 5, no extension field is present and these words constitute the 302 * MAC. If the number of words is greater than 5, an extension 303 * field is present and the first word contains the length of 304 * the extension field and the MAC follows that. 305 */ 306 has_mac = 0; 307 /* pkeyid = 0; */ 308 skeyid = tkeyid = 0; 309 authlen = LEN_PKT_NOMAC; 310 has_mac = rbufp->recv_length - authlen; 311 if (has_mac <= 5 * sizeof(u_int32)) { 312 skeyid = (u_long)ntohl(pkt->keyid1) & 0xffffffff; 313 } else { 314 authlen += (u_long)ntohl(pkt->keyid1) & 0xffffffff; 315 has_mac = rbufp->recv_length - authlen; 316 if (authlen <= 0) { 317 sys_badlength++; 318 return; 319 } 320 321 /* 322 * Note that keyid3 is actually the key ident of the 323 * MAC itself. 324 */ 325 /* pkeyid = (u_long)ntohl(pkt->keyid2) & 0xffffffff; */ 326 skeyid = tkeyid = (u_long)ntohl(pkt->keyid3) & 0xffffffff; 327 } 328 329 /* 330 * Figure out his mode and validate it. 331 */ 332 hismode = (int)PKT_MODE(pkt->li_vn_mode); 333 if (PKT_VERSION(pkt->li_vn_mode) == NTP_OLDVERSION && hismode == 334 0) { 335 /* 336 * Easy. If it is from the NTP port it is 337 * a sym act, else client. 338 */ 339 if (SRCPORT(&rbufp->recv_srcadr) == NTP_PORT) 340 hismode = MODE_ACTIVE; 341 else 342 hismode = MODE_CLIENT; 343 } else { 344 if (hismode != MODE_ACTIVE && hismode != MODE_PASSIVE && 345 hismode != MODE_SERVER && hismode != MODE_CLIENT && 346 hismode != MODE_BROADCAST) 347 return; 348 } 349 350 /* 351 * If he included a mac field, decrypt it to see if it is 352 * authentic. 353 */ 354 is_authentic = is_mystic = 0; 355 if (has_mac == 0) { 356 #ifdef DEBUG 357 if (debug) 358 printf("receive: at %ld from %s mode %d\n", 359 current_time, ntoa(&rbufp->recv_srcadr), 360 hismode); 361 #endif 362 } else { 363 is_mystic = authistrusted(skeyid); 364 #ifdef MD5 365 if (skeyid > NTP_MAXKEY && !is_mystic) { 366 367 /* 368 * For multicast mode, generate the session key 369 * and install in the key cache. For client mode, 370 * generate the session key for the unicast 371 * address. For server mode, the session key should 372 * already be in the key cache, since it was 373 * generated when the last request was sent. 374 */ 375 if (hismode == MODE_BROADCAST) { 376 tkeyid = session_key( 377 ntohl((&rbufp->recv_srcadr)->sin_addr.s_addr), 378 ntohl(rbufp->dstadr->bcast.sin_addr.s_addr), 379 skeyid, (u_long)(4 * (1 << pkt->ppoll))); 380 } else if (hismode != MODE_SERVER) { 381 tkeyid = session_key( 382 ntohl((&rbufp->recv_srcadr)->sin_addr.s_addr), 383 ntohl(rbufp->dstadr->sin.sin_addr.s_addr), 384 skeyid, (u_long)(4 * (1 << pkt->ppoll))); 385 } 386 387 } 388 #endif /* MD5 */ 389 390 /* 391 * Compute the cryptosum. Note a clogging attack may 392 * succceed in bloating the key cache. 393 */ 394 if (authdecrypt(skeyid, (u_int32 *)pkt, authlen, has_mac)) 395 is_authentic = 1; 396 else 397 sys_badauth++; 398 #ifdef DEBUG 399 if (debug) 400 printf( 401 "receive: at %ld %s mode %d keyid %08lx mac %d auth %d\n", 402 current_time, ntoa(&rbufp->recv_srcadr), 403 hismode, skeyid, has_mac, is_authentic); 404 #endif 405 } 406 407 /* 408 * Find the peer. This will return a null if this guy isn't in 409 * the database. 410 */ 411 peer = findpeer(&rbufp->recv_srcadr, rbufp->dstadr, rbufp->fd, 412 hismode, &retcode); 413 /* 414 * The new association matching rules are driven by a table specified 415 * in ntp.h. We have replaced the *default* behaviour of replying 416 * to bogus packets in server mode in this version. 417 * A packet must now match an association in order to be processed. 418 * In the event that no association exists, then an association is 419 * mobilized if need be. Two different associations can be mobilized 420 * a) passive associations 421 * b) client associations due to broadcasts or manycasts. 422 */ 423 is_error = 0; 424 switch (retcode) { 425 case AM_FXMIT: 426 /* 427 * If the client is configured purely as a broadcast client and 428 * not as an manycast server, it has no business being a server. 429 * Simply go home. Otherwise, send a MODE_SERVER response and go 430 * home. Note that we don't do a authentication check here, 431 * since we can't set the system clock; but, we do set the 432 * key ID to zero to tell the caller about this. 433 */ 434 if (!sys_bclient || sys_manycastserver) { 435 if (is_authentic) 436 fast_xmit(rbufp, MODE_SERVER, skeyid); 437 else 438 fast_xmit(rbufp, MODE_SERVER, 0); 439 } 440 441 /* 442 * We can't get here if an association is mobilized, so just 443 * toss the key, if appropriate. 444 */ 445 if (!is_mystic && skeyid > NTP_MAXKEY) 446 authtrust(skeyid, 0); 447 return; 448 449 case AM_MANYCAST: 450 /* 451 * This could be in response to a multicast packet sent by 452 * the "manycast" mode association. Find peer based on the 453 * originate timestamp in the packet. Note that we don't 454 * mobilize a new association, unless the packet is properly 455 * authenticated. The response must be properly authenticated 456 * and it's darn funny of the manycaster isn't around now. 457 */ 458 if ((sys_authenticate && !is_authentic)) { 459 is_error = 1; 460 break; 461 } 462 peer2 = (struct peer *)findmanycastpeer(&pkt->org); 463 if (peer2 == 0) { 464 is_error = 1; 465 break; 466 } 467 468 /* 469 * Create a new association and copy the peer variables to it. 470 * If something goes wrong, carefully pry the new association 471 * away and return its marbles to the candy store. 472 */ 473 peer = newpeer(&rbufp->recv_srcadr, 474 rbufp->dstadr, MODE_CLIENT, PKT_VERSION(pkt->li_vn_mode), 475 NTP_MINDPOLL, NTP_MAXDPOLL, 0, skeyid); 476 if (peer == 0) { 477 is_error = 1; 478 break; 479 } 480 peer_config_manycast(peer2, peer); 481 break; 482 483 case AM_ERR: 484 /* 485 * Something bad happened. Dirty floor will be mopped by the 486 * code at the end of this adventure. 487 */ 488 is_error = 1; 489 break; 490 491 case AM_NEWPASS: 492 /* 493 * Okay, we're going to keep him around. Allocate him some 494 * memory. But, don't do that unless the packet is properly 495 * authenticated. 496 */ 497 if ((sys_authenticate && !is_authentic)) { 498 is_error = 1; 499 break; 500 } 501 peer = newpeer(&rbufp->recv_srcadr, 502 rbufp->dstadr, MODE_PASSIVE, PKT_VERSION(pkt->li_vn_mode), 503 NTP_MINDPOLL, NTP_MAXDPOLL, 0, skeyid); 504 break; 505 506 case AM_NEWBCL: 507 /* 508 * Broadcast client being set up now. Do this only if the 509 * packet is properly authenticated. 510 */ 511 if ((restrict_mask & RES_NOPEER) || !sys_bclient || 512 (sys_authenticate && !is_authentic)) { 513 is_error = 1; 514 break; 515 } 516 peer = newpeer(&rbufp->recv_srcadr, 517 rbufp->dstadr, MODE_MCLIENT, PKT_VERSION(pkt->li_vn_mode), 518 NTP_MINDPOLL, NTP_MAXDPOLL, 0, skeyid); 519 if (peer == 0) 520 break; 521 peer->flags |= FLAG_MCAST1 | FLAG_MCAST2 | FLAG_BURST; 522 peer->hmode = MODE_CLIENT; 523 break; 524 525 case AM_POSSBCL: 526 case AM_PROCPKT: 527 /* 528 * It seems like it is okay to process the packet now 529 */ 530 break; 531 532 default: 533 /* 534 * shouldn't be getting here, but simply return anyway! 535 */ 536 is_error = 1; 537 } 538 if (is_error) { 539 540 /* 541 * Error stub. If we get here, something broke. We scuttle 542 * the autokey if necessary and sink the ship. This can 543 * occur only upon mobilization, so we can throw the 544 * structure away without fear of breaking anything. 545 */ 546 if (!is_mystic && skeyid > NTP_MAXKEY) 547 authtrust(skeyid, 0); 548 if (peer != 0) 549 if (!(peer->flags & FLAG_CONFIG)) 550 unpeer(peer); 551 #ifdef DEBUG 552 if (debug) 553 printf("match error code %d assoc %d\n", retcode, 554 peer_associations); 555 #endif 556 return; 557 } 558 559 /* 560 * If the peer isn't configured, set his keyid and authenable 561 * status based on the packet. 562 */ 563 oflags = peer->flags; 564 peer->timereceived = current_time; 565 if (!(peer->flags & FLAG_CONFIG) && has_mac) { 566 peer->flags |= FLAG_AUTHENABLE; 567 if (skeyid > NTP_MAXKEY) { 568 if (peer->flags & FLAG_MCAST2) 569 peer->keyid = skeyid; 570 else 571 peer->flags |= FLAG_SKEY; 572 } 573 } 574 575 /* 576 * Determine if this guy is basically trustable. If not, flush 577 * the bugger. If this is the first packet that is authenticated, 578 * flush the clock filter. This is to foil clogging attacks that 579 * might starve the poor dear. 580 */ 581 peer->flash = 0; 582 if (is_authentic) 583 peer->flags |= FLAG_AUTHENTIC; 584 else 585 peer->flags &= ~FLAG_AUTHENTIC; 586 if (peer->hmode == MODE_BROADCAST && (restrict_mask & RES_DONTTRUST)) 587 peer->flash |= TEST10; /* access denied */ 588 if (peer->flags & FLAG_AUTHENABLE) { 589 if (!(peer->flags & FLAG_AUTHENTIC)) 590 peer->flash |= TEST5; /* authentication failed */ 591 else if (skeyid == 0) 592 peer->flash |= TEST9; /* peer not authenticated */ 593 else if (!(oflags & FLAG_AUTHENABLE)) { 594 peer_clear(peer); 595 report_event(EVNT_PEERAUTH, peer); 596 } 597 } 598 if ((peer->flash & ~(u_int)TEST9) != 0) { 599 600 /* 601 * The packet is bogus, so we throw it away before becoming 602 * a denial-of-service hazard. We don't throw the current 603 * association away if it is configured or if it has prior 604 * reachable friends. 605 */ 606 if (!is_mystic && skeyid > NTP_MAXKEY) 607 authtrust(skeyid, 0); 608 if (!(peer->flags & FLAG_CONFIG) && peer->reach == 0) 609 unpeer(peer); 610 #ifdef DEBUG 611 if (debug) 612 printf( 613 "invalid packet 0x%02x code %d assoc %d\n", 614 peer->flash, retcode, peer_associations); 615 #endif 616 return; 617 } 618 619 #ifdef MD5 620 /* 621 * The autokey dance. The cha-cha requires that the hash of the 622 * current session key matches the previous key identifier. Heaps 623 * of trouble if the steps falter. 624 */ 625 if (skeyid > NTP_MAXKEY) { 626 int i; 627 628 /* 629 * In the case of a new autokey, verify the hash matches 630 * one of the previous four hashes. If not, raise the 631 * authentication flasher and hope the next one works. 632 */ 633 if (hismode == MODE_SERVER) { 634 peer->pkeyid = peer->keyid; 635 } else if (peer->flags & FLAG_MCAST2) { 636 if (peer->pkeyid > NTP_MAXKEY) 637 authtrust(peer->pkeyid, 0); 638 for (i = 0; i < 4 && tkeyid != peer->pkeyid; i++) { 639 tkeyid = session_key( 640 ntohl((&rbufp->recv_srcadr)->sin_addr.s_addr), 641 ntohl(rbufp->dstadr->bcast.sin_addr.s_addr), 642 tkeyid, 0); 643 } 644 } else { 645 if (peer->pkeyid > NTP_MAXKEY) 646 authtrust(peer->pkeyid, 0); 647 for (i = 0; i < 4 && tkeyid != peer->pkeyid; i++) { 648 tkeyid = session_key( 649 ntohl((&rbufp->recv_srcadr)->sin_addr.s_addr), 650 ntohl(rbufp->dstadr->sin.sin_addr.s_addr), 651 tkeyid, 0); 652 } 653 } 654 #ifdef XXX /* temp until certificate code is mplemented */ 655 if (tkeyid != peer->pkeyid) 656 peer->flash |= TEST9; /* peer not authentic */ 657 #endif 658 peer->pkeyid = skeyid; 659 } 660 #endif /* MD5 */ 661 662 /* 663 * Gawdz, it's come to this. Process the dang packet. If something 664 * breaks and the association doesn't deserve to live, toss it. 665 * Be careful in active mode and return a packet anyway. 666 */ 667 process_packet(peer, pkt, &(rbufp->recv_time)); 668 if (!(peer->flags & FLAG_CONFIG) && peer->reach == 0) { 669 if (peer->hmode == MODE_PASSIVE) { 670 if (is_authentic) 671 fast_xmit(rbufp, MODE_PASSIVE, skeyid); 672 else 673 fast_xmit(rbufp, MODE_PASSIVE, 0); 674 } 675 unpeer(peer); 676 } 677 } 678 679 680 /* 681 * process_packet - Packet Procedure, a la Section 3.4.4 of the 682 * specification. Or almost, at least. If we're in here we have a 683 * reasonable expectation that we will be having a long term 684 * relationship with this host. 685 */ 686 int 687 process_packet( 688 register struct peer *peer, 689 register struct pkt *pkt, 690 l_fp *recv_ts 691 ) 692 { 693 l_fp t10, t23; 694 double p_offset, p_del, p_disp; 695 double dtemp; 696 l_fp p_rec, p_xmt, p_org, p_reftime; 697 l_fp ci; 698 int pmode; 699 700 /* 701 * Swap header fields and keep the books. 702 */ 703 sys_processed++; 704 peer->processed++; 705 p_del = FPTOD(NTOHS_FP(pkt->rootdelay)); 706 p_disp = FPTOD(NTOHS_FP(pkt->rootdispersion)); 707 NTOHL_FP(&pkt->reftime, &p_reftime); 708 NTOHL_FP(&pkt->rec, &p_rec); 709 NTOHL_FP(&pkt->xmt, &p_xmt); 710 if (PKT_MODE(pkt->li_vn_mode) != MODE_BROADCAST) 711 NTOHL_FP(&pkt->org, &p_org); 712 else 713 p_org = peer->rec; 714 peer->rec = *recv_ts; 715 peer->ppoll = pkt->ppoll; 716 pmode = PKT_MODE(pkt->li_vn_mode); 717 718 /* 719 * Test for old or duplicate packets (tests 1 through 3). 720 */ 721 if (L_ISHIS(&peer->org, &p_xmt)) /* count old packets */ 722 peer->oldpkt++; 723 if (L_ISEQU(&peer->org, &p_xmt)) /* test 1 */ 724 peer->flash |= TEST1; /* duplicate packet */ 725 if (PKT_MODE(pkt->li_vn_mode) != MODE_BROADCAST) { 726 if (!L_ISEQU(&peer->xmt, &p_org)) { /* test 2 */ 727 peer->bogusorg++; 728 peer->flash |= TEST2; /* bogus packet */ 729 } 730 if (L_ISZERO(&p_rec) || L_ISZERO(&p_org)) 731 peer->flash |= TEST3; /* unsynchronized */ 732 } else { 733 if (L_ISZERO(&p_org)) 734 peer->flash |= TEST3; /* unsynchronized */ 735 } 736 peer->org = p_xmt; 737 738 /* 739 * Test for valid header (tests 5 through 10) 740 */ 741 ci = p_xmt; 742 L_SUB(&ci, &p_reftime); 743 LFPTOD(&ci, dtemp); 744 if (PKT_LEAP(pkt->li_vn_mode) == LEAP_NOTINSYNC || /* test 6 */ 745 PKT_TO_STRATUM(pkt->stratum) >= NTP_MAXSTRATUM || 746 dtemp < 0) 747 peer->flash |= TEST6; /* peer clock unsynchronized */ 748 if (!(peer->flags & FLAG_CONFIG) && sys_peer != 0) { /* test 7 */ 749 if (PKT_TO_STRATUM(pkt->stratum) > sys_stratum) { 750 peer->flash |= TEST7; /* peer stratum too high */ 751 sys_badstratum++; 752 } 753 } 754 if (fabs(p_del) >= MAXDISPERSE /* test 8 */ 755 || p_disp >= MAXDISPERSE) 756 peer->flash |= TEST8; /* delay/dispersion too high */ 757 758 /* 759 * If the packet header is invalid (tests 5 through 10), exit. 760 * XXX we let TEST9 sneak by until the certificate code is 761 * implemented, but only to mobilize the association. 762 */ 763 if (peer->flash & (TEST5 | TEST6 | TEST7 | TEST8 | TEST10)) { 764 #ifdef DEBUG 765 if (debug) 766 printf( 767 "invalid packet header 0x%02x mode %d\n", 768 peer->flash, pmode); 769 #endif 770 return (0); 771 } 772 773 /* 774 * Valid header; update our state. 775 */ 776 record_raw_stats(&peer->srcadr, &peer->dstadr->sin, 777 &p_org, &p_rec, &p_xmt, &peer->rec); 778 779 peer->leap = PKT_LEAP(pkt->li_vn_mode); 780 peer->pmode = pmode; /* unspec */ 781 peer->stratum = PKT_TO_STRATUM(pkt->stratum); 782 peer->precision = pkt->precision; 783 peer->rootdelay = p_del; 784 peer->rootdispersion = p_disp; 785 peer->refid = pkt->refid; 786 peer->reftime = p_reftime; 787 if (peer->reach == 0) { 788 report_event(EVNT_REACH, peer); 789 peer->timereachable = current_time; 790 } 791 peer->reach |= 1; 792 poll_update(peer, peer->hpoll); 793 794 /* 795 * If running in a client/server association, calculate the 796 * clock offset c, roundtrip delay d and dispersion e. We use 797 * the equations (reordered from those in the spec). Note that, 798 * in a broadcast association, org has been set to the time of 799 * last reception. Note the computation of dispersion includes 800 * the system precision plus that due to the frequency error 801 * since the originate time. 802 * 803 * c = ((t2 - t3) + (t1 - t0)) / 2 804 * d = (t2 - t3) - (t1 - t0) 805 * e = (org - rec) (seconds only) 806 */ 807 t10 = p_xmt; /* compute t1 - t0 */ 808 L_SUB(&t10, &peer->rec); 809 t23 = p_rec; /* compute t2 - t3 */ 810 L_SUB(&t23, &p_org); 811 ci = t10; 812 p_disp = CLOCK_PHI * (peer->rec.l_ui - p_org.l_ui); 813 814 /* 815 * If running in a broadcast association, the clock offset is (t1 816 * - t0) corrected by the one-way delay, but we can't measure 817 * that directly; therefore, we start up in client/server mode, 818 * calculate the clock offset, using the engineered refinement 819 * algorithms, while also receiving broadcasts. When a broadcast 820 * is received in client/server mode, we calculate a correction 821 * factor to use after switching back to broadcast mode. We know 822 * NTP_SKEWFACTOR == 16, which accounts for the simplified ei 823 * calculation. 824 * 825 * If FLAG_MCAST2 is set, we are a broadcast/multicast client. 826 * If FLAG_MCAST1 is set, we haven't calculated the propagation 827 * delay. If hmode is MODE_CLIENT, we haven't set the local 828 * clock in client/server mode. Initially, we come up 829 * MODE_CLIENT. When the clock is first updated and FLAG_MCAST2 830 * is set, we switch from MODE_CLIENT to MODE_BCLIENT. 831 */ 832 if (pmode == MODE_BROADCAST) { 833 if (peer->flags & FLAG_MCAST1) { 834 if (peer->hmode == MODE_BCLIENT) 835 peer->flags &= ~FLAG_MCAST1; 836 LFPTOD(&ci, p_offset); 837 peer->estbdelay = peer->offset - p_offset; 838 return (1); 839 840 } 841 DTOLFP(peer->estbdelay, &t10); 842 L_ADD(&ci, &t10); 843 p_del = peer->delay; 844 } else { 845 L_ADD(&ci, &t23); 846 L_RSHIFT(&ci); 847 L_SUB(&t23, &t10); 848 LFPTOD(&t23, p_del); 849 } 850 LFPTOD(&ci, p_offset); 851 if (fabs(p_del) >= MAXDISPERSE || p_disp >= MAXDISPERSE) /* test 4 */ 852 peer->flash |= TEST4; /* delay/dispersion too big */ 853 854 /* 855 * If the packet data are invalid (tests 1 through 4), exit. 856 */ 857 if (peer->flash) { 858 #ifdef DEBUG 859 if (debug) 860 printf("invalid packet data 0x%02x mode %d\n", 861 peer->flash, pmode); 862 #endif 863 return(1); 864 } 865 866 867 /* 868 * This one is valid. Mark it so, give it to clock_filter(). 869 */ 870 clock_filter(peer, p_offset, p_del, fabs(p_disp)); 871 clock_select(); 872 record_peer_stats(&peer->srcadr, ctlpeerstatus(peer), 873 peer->offset, peer->delay, peer->disp, SQRT(peer->variance)); 874 return(1); 875 } 876 877 878 /* 879 * clock_update - Called at system process update intervals. 880 */ 881 static void 882 clock_update(void) 883 { 884 u_char oleap; 885 u_char ostratum; 886 int i; 887 struct peer *peer; 888 889 /* 890 * Reset/adjust the system clock. Do this only if there is a 891 * system peer and we haven't seen that peer lately. Watch for 892 * timewarps here. 893 */ 894 if (sys_peer == 0) 895 return; 896 if (sys_peer->pollsw == FALSE || sys_peer->burst > 0) 897 return; 898 sys_peer->pollsw = FALSE; 899 #ifdef DEBUG 900 if (debug) 901 printf("clock_update: at %ld assoc %d \n", current_time, 902 peer_associations); 903 #endif 904 oleap = sys_leap; 905 ostratum = sys_stratum; 906 switch (local_clock(sys_peer, sys_offset, sys_epsil)) { 907 908 case -1: 909 /* 910 * Clock is too screwed up. Just exit for now. 911 */ 912 report_event(EVNT_SYSFAULT, (struct peer *)0); 913 exit(1); 914 /*NOTREACHED*/ 915 916 case 1: 917 /* 918 * Clock was stepped. Clear filter registers 919 * of all peers. 920 */ 921 for (i = 0; i < HASH_SIZE; i++) { 922 for (peer = peer_hash[i]; peer != 0; 923 peer =peer->next) 924 peer_clear(peer); 925 } 926 NLOG(NLOG_SYNCSTATUS) 927 msyslog(LOG_INFO, "synchronisation lost"); 928 sys_peer = 0; 929 sys_stratum = STRATUM_UNSPEC; 930 report_event(EVNT_CLOCKRESET, (struct peer *)0); 931 break; 932 933 default: 934 /* 935 * Update the system stratum, leap bits, root delay, 936 * root dispersion, reference ID and reference time. We 937 * also update select dispersion and max frequency 938 * error. 939 */ 940 sys_stratum = sys_peer->stratum + 1; 941 if (sys_stratum == 1) 942 sys_refid = sys_peer->refid; 943 else 944 sys_refid = sys_peer->srcadr.sin_addr.s_addr; 945 sys_reftime = sys_peer->rec; 946 sys_rootdelay = sys_peer->rootdelay + fabs(sys_peer->delay); 947 sys_leap = leap_consensus; 948 } 949 if (oleap != sys_leap) 950 report_event(EVNT_SYNCCHG, (struct peer *)0); 951 if (ostratum != sys_stratum) 952 report_event(EVNT_PEERSTCHG, (struct peer *)0); 953 } 954 955 956 /* 957 * poll_update - update peer poll interval. See Section 3.4.9 of the 958 * spec. 959 */ 960 void 961 poll_update( 962 struct peer *peer, 963 int hpoll 964 ) 965 { 966 long update; 967 968 /* 969 * The wiggle-the-poll-interval dance. Broadcasters dance only 970 * the minpoll beat. Reference clock partners sit this one out. 971 * Dancers surviving the clustering algorithm beat to the system 972 * clock. Broadcast clients are usually lead by their broadcast 973 * partner, but faster in the initial mating dance. 974 */ 975 if (peer->hmode == MODE_BROADCAST) { 976 peer->hpoll = peer->minpoll; 977 } else if (peer->flags & FLAG_SYSPEER) { 978 peer->hpoll = sys_poll; 979 } else { 980 if (hpoll > peer->maxpoll) 981 peer->hpoll = peer->maxpoll; 982 else if (hpoll < peer->minpoll) 983 peer->hpoll = peer->minpoll; 984 else 985 peer->hpoll = hpoll; 986 } 987 if (peer->burst > 0) { 988 if (peer->nextdate != current_time) 989 return; 990 if (peer->flags & FLAG_REFCLOCK) 991 peer->nextdate++; 992 else if (peer->reach & 0x1) 993 peer->nextdate += RANDPOLL(BURST_INTERVAL2); 994 else 995 peer->nextdate += RANDPOLL(BURST_INTERVAL1); 996 } else { 997 update = max(min(peer->ppoll, peer->hpoll), peer->minpoll); 998 peer->nextdate = peer->outdate + RANDPOLL(update); 999 } 1000 #ifdef DEBUG 1001 if (debug > 1) 1002 printf("poll_update: at %lu %s poll %d burst %d last %lu next %lu\n", 1003 current_time, ntoa(&peer->srcadr), hpoll, peer->burst, 1004 peer->outdate, peer->nextdate); 1005 #endif 1006 } 1007 1008 1009 /* 1010 * clear - clear peer filter registers. See Section 3.4.8 of the spec. 1011 */ 1012 void 1013 peer_clear( 1014 register struct peer *peer 1015 ) 1016 { 1017 register int i; 1018 1019 memset(CLEAR_TO_ZERO(peer), 0, LEN_CLEAR_TO_ZERO); 1020 peer->estbdelay = sys_bdelay; 1021 peer->hpoll = peer->minpoll; 1022 peer->pollsw = FALSE; 1023 peer->variance = MAXDISPERSE; 1024 peer->epoch = current_time; 1025 for (i = 0; i < NTP_SHIFT; i++) { 1026 peer->filter_order[i] = i; 1027 peer->filter_disp[i] = MAXDISPERSE; 1028 peer->filter_epoch[i] = current_time; 1029 } 1030 poll_update(peer, peer->minpoll); 1031 1032 /* 1033 * Since we have a chance to correct possible funniness in 1034 * our selection of interfaces on a multihomed host, do so 1035 * by setting us to no particular interface. 1036 * WARNING: do so only in non-broadcast mode! 1037 */ 1038 if (peer->hmode != MODE_BROADCAST) 1039 peer->dstadr = any_interface; 1040 } 1041 1042 1043 /* 1044 * clock_filter - add incoming clock sample to filter register and run 1045 * the filter procedure to find the best sample. 1046 */ 1047 void 1048 clock_filter( 1049 register struct peer *peer, 1050 double sample_offset, 1051 double sample_delay, 1052 double sample_disp 1053 ) 1054 { 1055 register int i, j, k, n = 0; 1056 register u_char *ord; 1057 double distance[NTP_SHIFT]; 1058 double x, y, z, off; 1059 1060 /* 1061 * Update error bounds and calculate distances. Also initialize 1062 * sort index vector. 1063 */ 1064 x = CLOCK_PHI * (current_time - peer->update); 1065 peer->update = current_time; 1066 ord = peer->filter_order; 1067 j = peer->filter_nextpt; 1068 for (i = 0; i < NTP_SHIFT; i++) { 1069 peer->filter_disp[j] += x; 1070 if (peer->filter_disp[j] > MAXDISPERSE) 1071 peer->filter_disp[j] = MAXDISPERSE; 1072 distance[i] = fabs(peer->filter_delay[j]) / 2 + 1073 peer->filter_disp[j]; 1074 ord[i] = j; 1075 if (--j < 0) 1076 j += NTP_SHIFT; 1077 } 1078 1079 /* 1080 * Insert the new sample at the beginning of the register. 1081 */ 1082 peer->filter_offset[peer->filter_nextpt] = sample_offset; 1083 peer->filter_delay[peer->filter_nextpt] = sample_delay; 1084 x = LOGTOD(peer->precision) + LOGTOD(sys_precision) + sample_disp; 1085 peer->filter_disp[peer->filter_nextpt] = min(x, MAXDISPERSE); 1086 peer->filter_epoch[peer->filter_nextpt] = current_time; 1087 distance[0] = min(x + fabs(sample_delay) / 2, MAXDISTANCE); 1088 peer->filter_nextpt++; 1089 if (peer->filter_nextpt >= NTP_SHIFT) 1090 peer->filter_nextpt = 0; 1091 1092 /* 1093 * Sort the samples in the register by distance. The winning 1094 * sample will be in ord[0]. Sort the samples only if they 1095 * are younger than the Allen intercept. 1096 */ 1097 y = min(allan_xpt, NTP_SHIFT * ULOGTOD(sys_poll)); 1098 for (n = 0; n < NTP_SHIFT && current_time - 1099 peer->filter_epoch[ord[n]] <= y; n++) { 1100 for (j = 0; j < n; j++) { 1101 if (distance[j] > distance[n]) { 1102 x = distance[j]; 1103 k = ord[j]; 1104 distance[j] = distance[n]; 1105 ord[j] = ord[n]; 1106 distance[n] = x; 1107 ord[n] = k; 1108 } 1109 } 1110 } 1111 1112 /* 1113 * Compute the error bound and standard error. 1114 */ 1115 x = y = z = off = 0.; 1116 for (i = NTP_SHIFT - 1; i >= 0; i--) { 1117 x = NTP_FWEIGHT * (x + peer->filter_disp[ord[i]]); 1118 if (i < n) { 1119 z += 1. / distance[i]; 1120 off += peer->filter_offset[ord[i]] / distance[i]; 1121 y += DIFF(peer->filter_offset[ord[i]], 1122 peer->filter_offset[ord[0]]); 1123 } 1124 } 1125 peer->delay = peer->filter_delay[ord[0]]; 1126 peer->variance = min(y / n, MAXDISPERSE); 1127 peer->disp = min(x, MAXDISPERSE); 1128 peer->epoch = current_time; 1129 x = peer->offset; 1130 if (peer->flags & FLAG_BURST) 1131 peer->offset = off / z; 1132 else 1133 peer->offset = peer->filter_offset[ord[0]]; 1134 1135 /* 1136 * A new sample is useful only if it is younger than the last 1137 * one used. 1138 */ 1139 if (peer->filter_epoch[ord[0]] > peer->epoch) { 1140 #ifdef DEBUG 1141 if (debug) 1142 printf("clock_filter: discard %lu\n", 1143 peer->filter_epoch[ord[0]] - peer->epoch); 1144 #endif 1145 return; 1146 } 1147 1148 /* 1149 * If the offset exceeds the dispersion by CLOCK_SGATE and the 1150 * interval since the last update is less than twice the system 1151 * poll interval, consider the update a popcorn spike and ignore 1152 * it. 1153 */ 1154 if (fabs(x - peer->offset) > CLOCK_SGATE && 1155 peer->filter_epoch[ord[0]] - peer->epoch < (1 << 1156 (sys_poll + 1))) { 1157 #ifdef DEBUG 1158 if (debug) 1159 printf("clock_filter: popcorn spike %.6f\n", x); 1160 #endif 1161 return; 1162 } 1163 peer->epoch = peer->filter_epoch[ord[0]]; 1164 peer->pollsw = TRUE; 1165 #ifdef DEBUG 1166 if (debug) 1167 printf( 1168 "clock_filter: offset %.6f delay %.6f disp %.6f std %.6f, age %lu\n", 1169 peer->offset, peer->delay, peer->disp, 1170 SQRT(peer->variance), current_time - peer->epoch); 1171 #endif 1172 } 1173 1174 1175 /* 1176 * clock_select - find the pick-of-the-litter clock 1177 */ 1178 void 1179 clock_select(void) 1180 { 1181 register struct peer *peer; 1182 int i; 1183 int nlist, nl3; 1184 double d, e, f; 1185 int j; 1186 int n; 1187 int allow, found, k; 1188 double high, low; 1189 double synch[NTP_MAXCLOCK], error[NTP_MAXCLOCK]; 1190 struct peer *osys_peer; 1191 struct peer *typeacts = 0; 1192 struct peer *typelocal = 0; 1193 struct peer *typepps = 0; 1194 struct peer *typeprefer = 0; 1195 struct peer *typesystem = 0; 1196 1197 static int list_alloc = 0; 1198 static struct endpoint *endpoint = NULL; 1199 static int *index = NULL; 1200 static struct peer **peer_list = NULL; 1201 static u_int endpoint_size = 0; 1202 static u_int index_size = 0; 1203 static u_int peer_list_size = 0; 1204 1205 /* 1206 * Initialize. If a prefer peer does not survive this thing, 1207 * the pps_update switch will remain zero. 1208 */ 1209 pps_update = 0; 1210 nlist = 0; 1211 low = 1e9; 1212 high = -1e9; 1213 for (n = 0; n < HASH_SIZE; n++) 1214 nlist += peer_hash_count[n]; 1215 if (nlist > list_alloc) { 1216 if (list_alloc > 0) { 1217 free(endpoint); 1218 free(index); 1219 free(peer_list); 1220 } 1221 while (list_alloc < nlist) { 1222 list_alloc += 5; 1223 endpoint_size += 5 * 3 * sizeof *endpoint; 1224 index_size += 5 * 3 * sizeof *index; 1225 peer_list_size += 5 * sizeof *peer_list; 1226 } 1227 endpoint = (struct endpoint *)emalloc(endpoint_size); 1228 index = (int *)emalloc(index_size); 1229 peer_list = (struct peer **)emalloc(peer_list_size); 1230 } 1231 1232 /* 1233 * This first chunk of code is supposed to go through all 1234 * peers we know about to find the peers which are most likely 1235 * to succeed. We run through the list doing the sanity checks 1236 * and trying to insert anyone who looks okay. 1237 */ 1238 nlist = nl3 = 0; /* none yet */ 1239 for (n = 0; n < HASH_SIZE; n++) { 1240 for (peer = peer_hash[n]; peer != 0; peer = peer->next) { 1241 peer->flags &= ~FLAG_SYSPEER; 1242 peer->status = CTL_PST_SEL_REJECT; 1243 if (peer->flags & FLAG_NOSELECT) 1244 continue; /* noselect (survey only) */ 1245 if (peer->reach == 0) 1246 continue; /* unreachable */ 1247 if (peer->stratum > 1 && peer->refid == 1248 peer->dstadr->sin.sin_addr.s_addr) 1249 continue; /* sync loop */ 1250 if (root_distance(peer) >= MAXDISTANCE + 2 * 1251 CLOCK_PHI * ULOGTOD(sys_poll)) { 1252 peer->seldisptoolarge++; 1253 continue; /* too noisy or broken */ 1254 } 1255 1256 /* 1257 * Don't allow the local-clock or acts drivers 1258 * in the kitchen at this point, unless the 1259 * prefer peer. Do that later, but only if 1260 * nobody else is around. 1261 */ 1262 if (peer->refclktype == REFCLK_LOCALCLOCK 1263 #if defined(VMS) && defined(VMS_LOCALUNIT) 1264 /* wjm: local unit VMS_LOCALUNIT taken seriously */ 1265 && REFCLOCKUNIT(&peer->srcadr) != VMS_LOCALUNIT 1266 #endif /* VMS && VMS_LOCALUNIT */ 1267 ) { 1268 typelocal = peer; 1269 if (!(peer->flags & FLAG_PREFER)) 1270 continue; /* no local clock */ 1271 } 1272 if (peer->sstclktype == CTL_SST_TS_TELEPHONE) { 1273 typeacts = peer; 1274 if (!(peer->flags & FLAG_PREFER)) 1275 continue; /* no acts */ 1276 } 1277 1278 /* 1279 * If we get this far, we assume the peer is 1280 * acceptable. 1281 */ 1282 peer->status = CTL_PST_SEL_SANE; 1283 peer_list[nlist++] = peer; 1284 1285 /* 1286 * Insert each interval endpoint on the sorted 1287 * list. 1288 */ 1289 e = peer->offset; /* Upper end */ 1290 f = root_distance(peer); 1291 e = e + f; 1292 for (i = nl3 - 1; i >= 0; i--) { 1293 if (e >= endpoint[index[i]].val) 1294 break; 1295 index[i + 3] = index[i]; 1296 } 1297 index[i + 3] = nl3; 1298 endpoint[nl3].type = 1; 1299 endpoint[nl3++].val = e; 1300 1301 e = e - f; /* Center point */ 1302 for ( ; i >= 0; i--) { 1303 if (e >= endpoint[index[i]].val) 1304 break; 1305 index[i + 2] = index[i]; 1306 } 1307 index[i + 2] = nl3; 1308 endpoint[nl3].type = 0; 1309 endpoint[nl3++].val = e; 1310 1311 e = e - f; /* Lower end */ 1312 for ( ; i >= 0; i--) { 1313 if (e >= endpoint[index[i]].val) 1314 break; 1315 index[i + 1] = index[i]; 1316 } 1317 index[i + 1] = nl3; 1318 endpoint[nl3].type = -1; 1319 endpoint[nl3++].val = e; 1320 } 1321 } 1322 #ifdef DEBUG 1323 if (debug > 1) 1324 for (i = 0; i < nl3; i++) 1325 printf("select: endpoint %2d %.6f\n", 1326 endpoint[index[i]].type, endpoint[index[i]].val); 1327 #endif 1328 i = 0; 1329 j = nl3 - 1; 1330 allow = nlist; /* falsetickers assumed */ 1331 found = 0; 1332 while (allow > 0) { 1333 allow--; 1334 for (n = 0; i <= j; i++) { 1335 n += endpoint[index[i]].type; 1336 if (n < 0) 1337 break; 1338 if (endpoint[index[i]].type == 0) 1339 found++; 1340 } 1341 for (n = 0; i <= j; j--) { 1342 n += endpoint[index[j]].type; 1343 if (n > 0) 1344 break; 1345 if (endpoint[index[j]].type == 0) 1346 found++; 1347 } 1348 if (found > allow) 1349 break; 1350 low = endpoint[index[i++]].val; 1351 high = endpoint[index[j--]].val; 1352 } 1353 1354 /* 1355 * If no survivors remain at this point, check if the acts or 1356 * local clock drivers have been found. If so, nominate one of 1357 * them as the only survivor. Otherwise, give up and declare us 1358 * unsynchronized. 1359 */ 1360 if ((allow << 1) >= nlist) { 1361 if (typeacts != 0) { 1362 typeacts->status = CTL_PST_SEL_SANE; 1363 peer_list[0] = typeacts; 1364 nlist = 1; 1365 } else if (typelocal != 0) { 1366 typelocal->status = CTL_PST_SEL_SANE; 1367 peer_list[0] = typelocal; 1368 nlist = 1; 1369 } else { 1370 if (sys_peer != 0) { 1371 report_event(EVNT_PEERSTCHG, 1372 (struct peer *)0); 1373 NLOG(NLOG_SYNCSTATUS) 1374 msyslog(LOG_INFO, "synchronisation lost"); 1375 } 1376 sys_peer = 0; 1377 return; 1378 } 1379 } 1380 #ifdef DEBUG 1381 if (debug > 1) 1382 printf("select: low %.6f high %.6f\n", low, high); 1383 #endif 1384 1385 /* 1386 * Clustering algorithm. Process intersection list to discard 1387 * outlyers. Construct candidate list in cluster order 1388 * determined by the sum of peer synchronization distance plus 1389 * scaled stratum. We must find at least one peer. 1390 */ 1391 j = 0; 1392 for (i = 0; i < nlist; i++) { 1393 peer = peer_list[i]; 1394 if (nlist > 1 && (low >= peer->offset || 1395 peer->offset >= high)) 1396 continue; 1397 peer->status = CTL_PST_SEL_CORRECT; 1398 d = root_distance(peer) + peer->stratum * MAXDISPERSE; 1399 if (j >= NTP_MAXCLOCK) { 1400 if (d >= synch[j - 1]) 1401 continue; 1402 else 1403 j--; 1404 } 1405 for (k = j; k > 0; k--) { 1406 if (d >= synch[k - 1]) 1407 break; 1408 synch[k] = synch[k - 1]; 1409 peer_list[k] = peer_list[k - 1]; 1410 } 1411 peer_list[k] = peer; 1412 synch[k] = d; 1413 j++; 1414 } 1415 nlist = j; 1416 1417 #ifdef DEBUG 1418 if (debug > 1) 1419 for (i = 0; i < nlist; i++) 1420 printf("select: %s distance %.6f\n", 1421 ntoa(&peer_list[i]->srcadr), synch[i]); 1422 #endif 1423 1424 /* 1425 * Now, prune outlyers by root dispersion. Continue as long as 1426 * there are more than NTP_MINCLOCK survivors and the minimum 1427 * select dispersion is greater than the maximum peer 1428 * dispersion. Stop if we are about to discard a prefer peer. 1429 */ 1430 for (i = 0; i < nlist; i++) { 1431 peer = peer_list[i]; 1432 error[i] = peer->variance; 1433 if (i < NTP_CANCLOCK) 1434 peer->status = CTL_PST_SEL_SELCAND; 1435 else 1436 peer->status = CTL_PST_SEL_DISTSYSPEER; 1437 } 1438 while (1) { 1439 sys_maxd = 0; 1440 d = error[0]; 1441 for (k = i = nlist - 1; i >= 0; i--) { 1442 double sdisp = 0; 1443 1444 for (j = nlist - 1; j > 0; j--) { 1445 sdisp = NTP_SWEIGHT * (sdisp + 1446 DIFF(peer_list[i]->offset, 1447 peer_list[j]->offset)); 1448 } 1449 if (sdisp > sys_maxd) { 1450 sys_maxd = sdisp; 1451 k = i; 1452 } 1453 if (error[i] < d) 1454 d = error[i]; 1455 } 1456 1457 #ifdef DEBUG 1458 if (debug > 1) 1459 printf( 1460 "select: survivors %d select %.6f peer %.6f\n", 1461 nlist, SQRT(sys_maxd), SQRT(d)); 1462 #endif 1463 if (nlist <= NTP_MINCLOCK || sys_maxd <= d || 1464 peer_list[k]->flags & FLAG_PREFER) 1465 break; 1466 for (j = k + 1; j < nlist; j++) { 1467 peer_list[j - 1] = peer_list[j]; 1468 error[j - 1] = error[j]; 1469 } 1470 nlist--; 1471 } 1472 #ifdef DEBUG 1473 if (debug > 1) { 1474 for (i = 0; i < nlist; i++) 1475 printf( 1476 "select: %s offset %.6f, distance %.6f poll %d\n", 1477 ntoa(&peer_list[i]->srcadr), peer_list[i]->offset, 1478 synch[i], peer_list[i]->pollsw); 1479 } 1480 #endif 1481 1482 /* 1483 * What remains is a list of not greater than NTP_MINCLOCK 1484 * peers. We want only a peer at the lowest stratum to become 1485 * the system peer, although all survivors are eligible for the 1486 * combining algorithm. First record their order, diddle the 1487 * flags and clamp the poll intervals. Then, consider the peers 1488 * at the lowest stratum. Of these, OR the leap bits on the 1489 * assumption that, if some of them honk nonzero bits, they must 1490 * know what they are doing. Also, check for prefer and pps 1491 * peers. If a prefer peer is found within clock_max, update the 1492 * pps switch. Of the other peers not at the lowest stratum, 1493 * check if the system peer is among them and, if found, zap 1494 * him. We note that the head of the list is at the lowest 1495 * stratum and that unsynchronized peers cannot survive this 1496 * far. 1497 */ 1498 leap_consensus = 0; 1499 for (i = nlist - 1; i >= 0; i--) { 1500 peer_list[i]->status = CTL_PST_SEL_SYNCCAND; 1501 peer_list[i]->flags |= FLAG_SYSPEER; 1502 poll_update(peer_list[i], peer_list[i]->hpoll); 1503 if (peer_list[i]->stratum == peer_list[0]->stratum) { 1504 leap_consensus |= peer_list[i]->leap; 1505 if (peer_list[i]->refclktype == REFCLK_ATOM_PPS) 1506 typepps = peer_list[i]; 1507 if (peer_list[i] == sys_peer) 1508 typesystem = peer_list[i]; 1509 if (peer_list[i]->flags & FLAG_PREFER) { 1510 typeprefer = peer_list[i]; 1511 if (fabs(typeprefer->offset) < clock_max) 1512 pps_update = 1; 1513 } 1514 } else { 1515 if (peer_list[i] == sys_peer) 1516 sys_peer = 0; 1517 } 1518 } 1519 1520 /* 1521 * Mitigation rules of the game. There are several types of 1522 * peers that make a difference here: (1) prefer local peers 1523 * (type REFCLK_LOCALCLOCK with FLAG_PREFER) or prefer modem 1524 * peers (type REFCLK_NIST_ATOM etc with FLAG_PREFER), (2) pps peers 1525 * (type REFCLK_ATOM_PPS), (3) remaining prefer peers (flag 1526 * FLAG_PREFER), (4) the existing system peer, if any, (5) the 1527 * head of the survivor list. Note that only one peer can be 1528 * declared prefer. The order of preference is in the order 1529 * stated. Note that all of these must be at the lowest stratum, 1530 * i.e., the stratum of the head of the survivor list. 1531 */ 1532 osys_peer = sys_peer; 1533 if (typeprefer && (typeprefer->refclktype == REFCLK_LOCALCLOCK || 1534 typeprefer->sstclktype == CTL_SST_TS_TELEPHONE || !typepps)) { 1535 sys_peer = typeprefer; 1536 sys_peer->status = CTL_PST_SEL_SYSPEER; 1537 sys_offset = sys_peer->offset; 1538 sys_epsil = sys_peer->variance; 1539 #ifdef DEBUG 1540 if (debug > 1) 1541 printf("select: prefer offset %.6f\n", sys_offset); 1542 #endif 1543 } else if (typepps && pps_update) { 1544 sys_peer = typepps; 1545 sys_peer->status = CTL_PST_SEL_PPS; 1546 sys_offset = sys_peer->offset; 1547 sys_epsil = sys_peer->variance; 1548 if (!pps_control) 1549 NLOG(NLOG_SYSEVENT) /* conditional syslog */ 1550 msyslog(LOG_INFO, "pps sync enabled"); 1551 pps_control = current_time; 1552 #ifdef DEBUG 1553 if (debug > 1) 1554 printf("select: pps offset %.6f\n", sys_offset); 1555 #endif 1556 } else { 1557 if (!typesystem) 1558 sys_peer = peer_list[0]; 1559 sys_peer->status = CTL_PST_SEL_SYSPEER; 1560 sys_offset = clock_combine(peer_list, nlist); 1561 sys_epsil = sys_peer->variance + sys_maxd; 1562 #ifdef DEBUG 1563 if (debug > 1) 1564 printf("select: combine offset %.6f\n", 1565 sys_offset); 1566 #endif 1567 } 1568 if (osys_peer != sys_peer) 1569 report_event(EVNT_PEERSTCHG, (struct peer *)0); 1570 clock_update(); 1571 } 1572 1573 /* 1574 * clock_combine - combine offsets from selected peers 1575 */ 1576 static double 1577 clock_combine( 1578 struct peer **peers, 1579 int npeers 1580 ) 1581 { 1582 int i; 1583 double x, y, z; 1584 y = z = 0; 1585 for (i = 0; i < npeers; i++) { 1586 x = root_distance(peers[i]); 1587 y += 1. / x; 1588 z += peers[i]->offset / x; 1589 } 1590 return (z / y); 1591 } 1592 1593 /* 1594 * root_distance - compute synchronization distance from peer to root 1595 */ 1596 static double 1597 root_distance( 1598 struct peer *peer 1599 ) 1600 { 1601 return ((fabs(peer->delay) + peer->rootdelay) / 2 + 1602 peer->rootdispersion + peer->disp + 1603 SQRT(peer->variance) + CLOCK_PHI * (current_time - 1604 peer->update)); 1605 } 1606 1607 /* 1608 * peer_xmit - send packet for persistent association. 1609 */ 1610 static void 1611 peer_xmit( 1612 struct peer *peer /* peer structure pointer */ 1613 ) 1614 { 1615 struct pkt xpkt; 1616 int find_rtt = (peer->cast_flags & MDF_MCAST) && 1617 peer->hmode != MODE_BROADCAST; 1618 int sendlen; 1619 1620 /* 1621 * Initialize protocol fields. 1622 */ 1623 xpkt.li_vn_mode = PKT_LI_VN_MODE(sys_leap, 1624 peer->version, peer->hmode); 1625 xpkt.stratum = STRATUM_TO_PKT(sys_stratum); 1626 xpkt.ppoll = peer->hpoll; 1627 xpkt.precision = sys_precision; 1628 xpkt.rootdelay = HTONS_FP(DTOFP(sys_rootdelay)); 1629 xpkt.rootdispersion = HTONS_FP(DTOUFP(sys_rootdispersion + 1630 LOGTOD(sys_precision))); 1631 xpkt.refid = sys_refid; 1632 HTONL_FP(&sys_reftime, &xpkt.reftime); 1633 HTONL_FP(&peer->org, &xpkt.org); 1634 HTONL_FP(&peer->rec, &xpkt.rec); 1635 1636 /* 1637 * Authenticate the packet if enabled and either configured or 1638 * the previous packet was authenticated. If for some reason the 1639 * key associated with the key identifier is not in the key 1640 * cache, then honk key zero. 1641 */ 1642 sendlen = LEN_PKT_NOMAC; 1643 if (peer->flags & FLAG_AUTHENABLE) { 1644 u_long xkeyid; 1645 l_fp xmt_tx; 1646 1647 /* 1648 * Transmit encrypted packet compensated for the 1649 * encryption delay. 1650 */ 1651 #ifdef MD5 1652 if (peer->flags & FLAG_SKEY) { 1653 1654 /* 1655 * In SKEY mode, allocate and initialize a key list if 1656 * not already done. Then, use the list in inverse 1657 * order, discarding keys once used. Keep the latest 1658 * key around until the next one, so clients can use 1659 * client/server packets to compute propagation delay. 1660 * Note we have to wait until the receive side of the 1661 * socket is bound and the server address confirmed. 1662 */ 1663 if (ntohl(peer->dstadr->sin.sin_addr.s_addr) == 0 && 1664 ntohl(peer->dstadr->bcast.sin_addr.s_addr) == 0) 1665 peer->keyid = 0; 1666 else { 1667 if (peer->keylist == 0) { 1668 make_keylist(peer); 1669 } else { 1670 authtrust(peer->keylist[peer->keynumber], 0); 1671 if (peer->keynumber == 0) 1672 make_keylist(peer); 1673 else { 1674 peer->keynumber--; 1675 xkeyid = peer->keylist[peer->keynumber]; 1676 if (!authistrusted(xkeyid)) 1677 make_keylist(peer); 1678 } 1679 } 1680 peer->keyid = peer->keylist[peer->keynumber]; 1681 xpkt.keyid1 = htonl(2 * sizeof(u_int32)); 1682 xpkt.keyid2 = htonl(sys_private); 1683 sendlen += 2 * sizeof(u_int32); 1684 } 1685 } 1686 #endif /* MD5 */ 1687 xkeyid = peer->keyid; 1688 get_systime(&peer->xmt); 1689 L_ADD(&peer->xmt, &sys_authdelay); 1690 HTONL_FP(&peer->xmt, &xpkt.xmt); 1691 sendlen += authencrypt(xkeyid, (u_int32 *)&xpkt, sendlen); 1692 get_systime(&xmt_tx); 1693 sendpkt(&peer->srcadr, find_rtt ? any_interface : 1694 peer->dstadr, 1695 ((peer->cast_flags & MDF_MCAST) && !find_rtt) ? 1696 ((peer->cast_flags & MDF_ACAST) ? -7 : peer->ttl) : -7, 1697 &xpkt, sendlen); 1698 1699 /* 1700 * Calculate the encryption delay. Keep the minimum over 1701 * the latest two samples. 1702 */ 1703 L_SUB(&xmt_tx, &peer->xmt); 1704 L_ADD(&xmt_tx, &sys_authdelay); 1705 sys_authdly[1] = sys_authdly[0]; 1706 sys_authdly[0] = xmt_tx.l_uf; 1707 if (sys_authdly[0] < sys_authdly[1]) 1708 sys_authdelay.l_uf = sys_authdly[0]; 1709 else 1710 sys_authdelay.l_uf = sys_authdly[1]; 1711 peer->sent++; 1712 #ifdef DEBUG 1713 if (debug) 1714 printf( 1715 "transmit: at %ld to %s mode %d keyid %08lx index %d\n", 1716 current_time, ntoa(&peer->srcadr), 1717 peer->hmode, xkeyid, peer->keynumber); 1718 #endif 1719 } else { 1720 /* 1721 * Transmit non-authenticated packet. 1722 */ 1723 get_systime(&(peer->xmt)); 1724 HTONL_FP(&peer->xmt, &xpkt.xmt); 1725 sendpkt(&(peer->srcadr), find_rtt ? any_interface : 1726 peer->dstadr, 1727 ((peer->cast_flags & MDF_MCAST) && !find_rtt) ? 1728 ((peer->cast_flags & MDF_ACAST) ? -7 : peer->ttl) : -8, 1729 &xpkt, sendlen); 1730 peer->sent++; 1731 #ifdef DEBUG 1732 if (debug) 1733 printf("transmit: at %ld to %s mode %d\n", 1734 current_time, ntoa(&peer->srcadr), 1735 peer->hmode); 1736 #endif 1737 } 1738 } 1739 1740 /* 1741 * fast_xmit - Send packet for nonpersistent association. 1742 */ 1743 static void 1744 fast_xmit( 1745 struct recvbuf *rbufp, /* receive packet pointer */ 1746 int xmode, /* transmit mode */ 1747 u_long xkeyid /* transmit key ID */ 1748 ) 1749 { 1750 struct pkt xpkt; 1751 struct pkt *rpkt; 1752 int sendlen; 1753 l_fp xmt_ts; 1754 1755 /* 1756 * Initialize transmit packet header fields in the receive 1757 * buffer provided. We leave some fields intact as received. 1758 */ 1759 rpkt = &rbufp->recv_pkt; 1760 xpkt.li_vn_mode = PKT_LI_VN_MODE(sys_leap, 1761 PKT_VERSION(rpkt->li_vn_mode), xmode); 1762 xpkt.stratum = STRATUM_TO_PKT(sys_stratum); 1763 xpkt.ppoll = rpkt->ppoll; 1764 xpkt.precision = sys_precision; 1765 xpkt.rootdelay = HTONS_FP(DTOFP(sys_rootdelay)); 1766 xpkt.rootdispersion = HTONS_FP(DTOUFP(sys_rootdispersion + 1767 LOGTOD(sys_precision))); 1768 xpkt.refid = sys_refid; 1769 HTONL_FP(&sys_reftime, &xpkt.reftime); 1770 xpkt.org = rpkt->xmt; 1771 HTONL_FP(&rbufp->recv_time, &xpkt.rec); 1772 sendlen = LEN_PKT_NOMAC; 1773 if (rbufp->recv_length > sendlen) { 1774 l_fp xmt_tx; 1775 1776 /* 1777 * Transmit encrypted packet compensated for the 1778 * encryption delay. 1779 */ 1780 if (xkeyid > NTP_MAXKEY) { 1781 xpkt.keyid1 = htonl(2 * sizeof(u_int32)); 1782 xpkt.keyid2 = htonl(sys_private); 1783 sendlen += 2 * sizeof(u_int32); 1784 } 1785 get_systime(&xmt_ts); 1786 L_ADD(&xmt_ts, &sys_authdelay); 1787 HTONL_FP(&xmt_ts, &xpkt.xmt); 1788 sendlen += authencrypt(xkeyid, (u_int32 *)&xpkt, sendlen); 1789 get_systime(&xmt_tx); 1790 sendpkt(&rbufp->recv_srcadr, rbufp->dstadr, -9, &xpkt, 1791 sendlen); 1792 1793 /* 1794 * Calculate the encryption delay. Keep the minimum over 1795 * the latest two samples. 1796 */ 1797 L_SUB(&xmt_tx, &xmt_ts); 1798 L_ADD(&xmt_tx, &sys_authdelay); 1799 sys_authdly[1] = sys_authdly[0]; 1800 sys_authdly[0] = xmt_tx.l_uf; 1801 if (sys_authdly[0] < sys_authdly[1]) 1802 sys_authdelay.l_uf = sys_authdly[0]; 1803 else 1804 sys_authdelay.l_uf = sys_authdly[1]; 1805 #ifdef DEBUG 1806 if (debug) 1807 printf( 1808 "transmit: at %ld to %s mode %d keyid %08lx\n", 1809 current_time, ntoa(&rbufp->recv_srcadr), 1810 xmode, xkeyid); 1811 #endif 1812 } else { 1813 1814 /* 1815 * Transmit non-authenticated packet. 1816 */ 1817 get_systime(&xmt_ts); 1818 HTONL_FP(&xmt_ts, &xpkt.xmt); 1819 sendpkt(&rbufp->recv_srcadr, rbufp->dstadr, -10, &xpkt, 1820 sendlen); 1821 #ifdef DEBUG 1822 if (debug) 1823 printf("transmit: at %ld to %s mode %d\n", 1824 current_time, ntoa(&rbufp->recv_srcadr), 1825 xmode); 1826 #endif 1827 } 1828 } 1829 1830 #ifdef MD5 1831 /* 1832 * Compute key list 1833 */ 1834 static void 1835 make_keylist( 1836 struct peer *peer 1837 ) 1838 { 1839 int i; 1840 u_long keyid; 1841 u_long ltemp; 1842 1843 /* 1844 * Allocate the key list if necessary. 1845 */ 1846 if (peer->keylist == 0) 1847 peer->keylist = (u_long *)emalloc(sizeof(u_long) * 1848 NTP_MAXSESSION); 1849 1850 /* 1851 * Generate an initial key ID which is unique and greater than 1852 * NTP_MAXKEY. 1853 */ 1854 while (1) { 1855 keyid = (u_long)RANDOM & 0xffffffff; 1856 if (keyid <= NTP_MAXKEY) 1857 continue; 1858 if (authhavekey(keyid)) 1859 continue; 1860 break; 1861 } 1862 1863 /* 1864 * Generate up to NTP_MAXSESSION session keys. Stop if the 1865 * next one would not be unique or not a session key ID or if 1866 * it would expire before the next poll. 1867 */ 1868 ltemp = sys_automax; 1869 for (i = 0; i < NTP_MAXSESSION; i++) { 1870 peer->keylist[i] = keyid; 1871 peer->keynumber = i; 1872 keyid = session_key( 1873 ntohl(peer->dstadr->sin.sin_addr.s_addr), 1874 (peer->hmode == MODE_BROADCAST || (peer->flags & 1875 FLAG_MCAST2)) ? 1876 ntohl(peer->dstadr->bcast.sin_addr.s_addr): 1877 ntohl(peer->srcadr.sin_addr.s_addr), 1878 keyid, ltemp); 1879 ltemp -= 1 << peer->hpoll; 1880 if (auth_havekey(keyid) || keyid <= NTP_MAXKEY || 1881 ltemp <= (1 << (peer->hpoll + 1))) 1882 break; 1883 } 1884 } 1885 #endif /* MD5 */ 1886 1887 /* 1888 * Find the precision of this particular machine 1889 */ 1890 #define DUSECS 1000000 /* us in a s */ 1891 #define HUSECS (1 << 20) /* approx DUSECS for shifting etc */ 1892 #define MINSTEP 5 /* minimum clock increment (us) */ 1893 #define MAXSTEP 20000 /* maximum clock increment (us) */ 1894 #define MINLOOPS 5 /* minimum number of step samples */ 1895 1896 /* 1897 * This routine calculates the differences between successive calls to 1898 * gettimeofday(). If a difference is less than zero, the us field 1899 * has rolled over to the next second, so we add a second in us. If 1900 * the difference is greater than zero and less than MINSTEP, the 1901 * clock has been advanced by a small amount to avoid standing still. 1902 * If the clock has advanced by a greater amount, then a timer interrupt 1903 * has occurred and this amount represents the precision of the clock. 1904 * In order to guard against spurious values, which could occur if we 1905 * happen to hit a fat interrupt, we do this for MINLOOPS times and 1906 * keep the minimum value obtained. 1907 */ 1908 int 1909 default_get_precision(void) 1910 { 1911 struct timeval tp; 1912 #if !defined(SYS_WINNT) && !defined(VMS) && !defined(_SEQUENT_) 1913 struct timezone tzp; 1914 #elif defined(VMS) || defined(_SEQUENT_) 1915 struct timezone { 1916 int tz_minuteswest; 1917 int tz_dsttime; 1918 } tzp; 1919 #endif /* defined(VMS) || defined(_SEQUENT_) */ 1920 long last; 1921 int i; 1922 long diff; 1923 long val; 1924 long usec; 1925 #ifdef HAVE_GETCLOCK 1926 struct timespec ts; 1927 #endif 1928 #if defined(__FreeBSD__) && __FreeBSD__ >= 3 1929 u_long freq; 1930 size_t j; 1931 1932 /* Try to see if we can find the frequency of of the counter 1933 * which drives our timekeeping 1934 */ 1935 j = sizeof freq; 1936 i = sysctlbyname("kern.timecounter.frequency", 1937 &freq, &j , 0, 0); 1938 if (i) 1939 i = sysctlbyname("machdep.tsc_freq", 1940 &freq, &j , 0, 0); 1941 if (i) 1942 i = sysctlbyname("machdep.i586_freq", 1943 &freq, &j , 0, 0); 1944 if (i) 1945 i = sysctlbyname("machdep.i8254_freq", 1946 &freq, &j , 0, 0); 1947 if (!i) { 1948 for (i = 1; freq ; i--) 1949 freq >>= 1; 1950 return (i); 1951 } 1952 #endif 1953 usec = 0; 1954 val = MAXSTEP; 1955 #ifdef HAVE_GETCLOCK 1956 (void) getclock(TIMEOFDAY, &ts); 1957 tp.tv_sec = ts.tv_sec; 1958 tp.tv_usec = ts.tv_nsec / 1000; 1959 #else /* not HAVE_GETCLOCK */ 1960 GETTIMEOFDAY(&tp, &tzp); 1961 #endif /* not HAVE_GETCLOCK */ 1962 last = tp.tv_usec; 1963 for (i = 0; i < MINLOOPS && usec < HUSECS;) { 1964 #ifdef HAVE_GETCLOCK 1965 (void) getclock(TIMEOFDAY, &ts); 1966 tp.tv_sec = ts.tv_sec; 1967 tp.tv_usec = ts.tv_nsec / 1000; 1968 #else /* not HAVE_GETCLOCK */ 1969 GETTIMEOFDAY(&tp, &tzp); 1970 #endif /* not HAVE_GETCLOCK */ 1971 diff = tp.tv_usec - last; 1972 last = tp.tv_usec; 1973 if (diff < 0) 1974 diff += DUSECS; 1975 usec += diff; 1976 if (diff > MINSTEP) { 1977 i++; 1978 if (diff < val) 1979 val = diff; 1980 } 1981 } 1982 NLOG(NLOG_SYSINFO) /* conditional if clause for conditional syslog */ 1983 msyslog(LOG_INFO, "precision = %ld usec", val); 1984 if (usec >= HUSECS) 1985 val = MINSTEP; /* val <= MINSTEP; fast machine */ 1986 diff = HUSECS; 1987 for (i = 0; diff > val; i--) 1988 diff >>= 1; 1989 return (i); 1990 } 1991 1992 /* 1993 * init_proto - initialize the protocol module's data 1994 */ 1995 void 1996 init_proto(void) 1997 { 1998 l_fp dummy; 1999 2000 /* 2001 * Fill in the sys_* stuff. Default is don't listen to 2002 * broadcasting, authenticate. 2003 */ 2004 sys_leap = LEAP_NOTINSYNC; 2005 sys_stratum = STRATUM_UNSPEC; 2006 sys_precision = (s_char)default_get_precision(); 2007 sys_rootdelay = 0; 2008 sys_rootdispersion = 0; 2009 sys_refid = 0; 2010 L_CLR(&sys_reftime); 2011 sys_peer = 0; 2012 get_systime(&dummy); 2013 sys_bclient = 0; 2014 sys_bdelay = DEFBROADDELAY; 2015 #if defined(DES) || defined(MD5) 2016 sys_authenticate = 1; 2017 #else 2018 sys_authenticate = 0; 2019 #endif 2020 L_CLR(&sys_authdelay); 2021 sys_authdly[0] = sys_authdly[1] = 0; 2022 sys_stattime = 0; 2023 sys_badstratum = 0; 2024 sys_oldversionpkt = 0; 2025 sys_newversionpkt = 0; 2026 sys_badlength = 0; 2027 sys_unknownversion = 0; 2028 sys_processed = 0; 2029 sys_badauth = 0; 2030 sys_manycastserver = 0; 2031 sys_automax = 1 << NTP_AUTOMAX; 2032 2033 /* 2034 * Default these to enable 2035 */ 2036 ntp_enable = 1; 2037 #ifndef KERNEL_FLL_BUG 2038 kern_enable = 1; 2039 #endif 2040 msyslog(LOG_DEBUG, "kern_enable is %d", kern_enable); 2041 stats_control = 1; 2042 2043 /* 2044 * Some system clocks should only be adjusted in 10ms increments. 2045 */ 2046 #if defined RELIANTUNIX_CLOCK 2047 systime_10ms_ticks = 1; /* Reliant UNIX */ 2048 #elif defined SCO5_CLOCK 2049 if (sys_precision >= (s_char)-10) /* pre- SCO OpenServer 5.0.6 */ 2050 systime_10ms_ticks = 1; 2051 #endif 2052 if (systime_10ms_ticks) 2053 msyslog(LOG_INFO, "using 10ms tick adjustments"); 2054 } 2055 2056 2057 /* 2058 * proto_config - configure the protocol module 2059 */ 2060 void 2061 proto_config( 2062 int item, 2063 u_long value, 2064 double dvalue 2065 ) 2066 { 2067 /* 2068 * Figure out what he wants to change, then do it 2069 */ 2070 switch (item) { 2071 case PROTO_KERNEL: 2072 /* 2073 * Turn on/off kernel discipline 2074 */ 2075 kern_enable = (int)value; 2076 break; 2077 2078 case PROTO_NTP: 2079 /* 2080 * Turn on/off clock discipline 2081 */ 2082 ntp_enable = (int)value; 2083 break; 2084 2085 case PROTO_MONITOR: 2086 /* 2087 * Turn on/off monitoring 2088 */ 2089 if (value) 2090 mon_start(MON_ON); 2091 else 2092 mon_stop(MON_ON); 2093 break; 2094 2095 case PROTO_FILEGEN: 2096 /* 2097 * Turn on/off statistics 2098 */ 2099 stats_control = (int)value; 2100 break; 2101 2102 case PROTO_BROADCLIENT: 2103 /* 2104 * Turn on/off facility to listen to broadcasts 2105 */ 2106 sys_bclient = (int)value; 2107 if (value) 2108 io_setbclient(); 2109 else 2110 io_unsetbclient(); 2111 break; 2112 2113 case PROTO_MULTICAST_ADD: 2114 /* 2115 * Add muliticast group address 2116 */ 2117 io_multicast_add(value); 2118 break; 2119 2120 case PROTO_MULTICAST_DEL: 2121 /* 2122 * Delete multicast group address 2123 */ 2124 io_multicast_del(value); 2125 break; 2126 2127 case PROTO_BROADDELAY: 2128 /* 2129 * Set default broadcast delay 2130 */ 2131 sys_bdelay = dvalue; 2132 break; 2133 2134 case PROTO_AUTHENTICATE: 2135 /* 2136 * Specify the use of authenticated data 2137 */ 2138 sys_authenticate = (int)value; 2139 break; 2140 2141 default: 2142 /* 2143 * Log this error 2144 */ 2145 msyslog(LOG_ERR, "proto_config: illegal item %d, value %ld", 2146 item, value); 2147 break; 2148 } 2149 } 2150 2151 2152 /* 2153 * proto_clr_stats - clear protocol stat counters 2154 */ 2155 void 2156 proto_clr_stats(void) 2157 { 2158 sys_badstratum = 0; 2159 sys_oldversionpkt = 0; 2160 sys_newversionpkt = 0; 2161 sys_unknownversion = 0; 2162 sys_badlength = 0; 2163 sys_processed = 0; 2164 sys_badauth = 0; 2165 sys_stattime = current_time; 2166 sys_limitrejected = 0; 2167 } 2168