xref: /freebsd/contrib/ntp/ntpd/ntp_proto.c (revision 1b6c76a2fe091c74f08427e6c870851025a9cf67)
1 /*
2  * ntp_proto.c - NTP version 4 protocol machinery
3  *
4  * $FreeBSD$
5  */
6 #ifdef HAVE_CONFIG_H
7 #include <config.h>
8 #endif
9 
10 #include <stdio.h>
11 #include <sys/types.h>
12 #include <sys/time.h>
13 
14 #include "ntpd.h"
15 #include "ntp_stdlib.h"
16 #include "ntp_unixtime.h"
17 #include "ntp_control.h"
18 #include "ntp_string.h"
19 
20 #if defined(VMS) && defined(VMS_LOCALUNIT)	/*wjm*/
21 #include "ntp_refclock.h"
22 #endif
23 
24 #if defined(__FreeBSD__) && __FreeBSD__ >= 3
25 #include <sys/sysctl.h>
26 #endif
27 
28 /*
29  * System variables are declared here.	See Section 3.2 of the
30  * specification.
31  */
32 u_char	sys_leap;		/* system leap indicator */
33 u_char	sys_stratum;		/* stratum of system */
34 s_char	sys_precision;		/* local clock precision */
35 double	sys_rootdelay;		/* distance to current sync source */
36 double	sys_rootdispersion;	/* dispersion of system clock */
37 u_int32 sys_refid;		/* reference source for local clock */
38 static	double sys_offset;	/* current local clock offset */
39 l_fp	sys_reftime;		/* time we were last updated */
40 struct	peer *sys_peer; 	/* our current peer */
41 u_long	sys_automax;		/* maximum session key lifetime */
42 
43 /*
44  * Nonspecified system state variables.
45  */
46 int	sys_bclient;		/* we set our time to broadcasts */
47 double	sys_bdelay; 		/* broadcast client default delay */
48 int	sys_authenticate;	/* requre authentication for config */
49 l_fp	sys_authdelay;		/* authentication delay */
50 static	u_long sys_authdly[2]; 	/* authentication delay shift reg */
51 static	u_char leap_consensus;	/* consensus of survivor leap bits */
52 static	double sys_maxd; 	/* select error (squares) */
53 static	double sys_epsil;	/* system error (squares) */
54 u_long	sys_private;		/* private value for session seed */
55 int	sys_manycastserver;	/* 1 => respond to manycast client pkts */
56 
57 /*
58  * Statistics counters
59  */
60 u_long	sys_stattime;		/* time when we started recording */
61 u_long	sys_badstratum; 	/* packets with invalid stratum */
62 u_long	sys_oldversionpkt;	/* old version packets received */
63 u_long	sys_newversionpkt;	/* new version packets received */
64 u_long	sys_unknownversion;	/* don't know version packets */
65 u_long	sys_badlength;		/* packets with bad length */
66 u_long	sys_processed;		/* packets processed */
67 u_long	sys_badauth;		/* packets dropped because of auth */
68 u_long	sys_limitrejected;	/* pkts rejected due to client count per net */
69 
70 static	double	root_distance	P((struct peer *));
71 static	double	clock_combine	P((struct peer **, int));
72 static	void	peer_xmit	P((struct peer *));
73 static	void	fast_xmit	P((struct recvbuf *, int, u_long));
74 static	void	clock_update	P((void));
75 int	default_get_precision	P((void));
76 #ifdef MD5
77 static	void	make_keylist	P((struct peer *));
78 #endif /* MD5 */
79 
80 /*
81  * transmit - Transmit Procedure. See Section 3.4.2 of the
82  *	specification.
83  */
84 void
85 transmit(
86 	struct peer *peer	/* peer structure pointer */
87 	)
88 {
89 	int hpoll;
90 
91 	hpoll = peer->hpoll;
92 	if (peer->burst == 0) {
93 		u_char oreach;
94 
95 		/*
96 		 * Determine reachability and diddle things if we
97 		 * haven't heard from the host for a while. If the peer
98 		 * is not configured and not likely to stay around,
99 		 * we exhaust it.
100 		 */
101 		oreach = peer->reach;
102 		if (oreach & 0x01)
103 			peer->valid++;
104 		if (oreach & 0x80)
105 			peer->valid--;
106 		if (!(peer->flags & FLAG_CONFIG) && peer->valid >
107 		    NTP_SHIFT / 2 && (peer->reach & 0x80) &&
108 		    peer->status < CTL_PST_SEL_SYNCCAND)
109 			peer->reach = 0;
110 		peer->reach <<= 1;
111 		if (peer->reach == 0) {
112 
113 			/*
114 			 * If this is an uncofigured association and
115 			 * has become unreachable, demobilize it.
116 			 */
117 			if (oreach != 0) {
118 				report_event(EVNT_UNREACH, peer);
119 				peer->timereachable = current_time;
120 				peer_clear(peer);
121 				if (!(peer->flags & FLAG_CONFIG)) {
122 					unpeer(peer);
123 					return;
124 				}
125 			}
126 
127 			/*
128 			 * We would like to respond quickly when the
129 			 * peer comes back to life. If the probes since
130 			 * becoming unreachable are less than
131 			 * NTP_UNREACH, clamp the poll interval to the
132 			 * minimum. In order to minimize the network
133 			 * traffic, the interval gradually ramps up the
134 			 * the maximum after that.
135 			 */
136 			peer->ppoll = peer->maxpoll;
137 			if (peer->unreach < NTP_UNREACH) {
138 				if (peer->hmode == MODE_CLIENT)
139 					peer->unreach++;
140 				hpoll = peer->minpoll;
141 			} else {
142 				hpoll++;
143 			}
144 			if (peer->flags & FLAG_BURST)
145 				peer->burst = 2;
146 
147 		} else {
148 
149 			/*
150 			 * Here the peer is reachable. If there is no
151 			 * system peer or if the stratum of the system
152 			 * peer is greater than this peer, clamp the
153 			 * poll interval to the minimum. If less than
154 			 * two samples are in the reachability register,
155 			 * reduce the interval; if more than six samples
156 			 * are in the register, increase the interval.
157 			 */
158 			peer->unreach = 0;
159 			if (sys_peer == 0)
160 				hpoll = peer->minpoll;
161 			else if (sys_peer->stratum > peer->stratum)
162 				hpoll = peer->minpoll;
163 			if ((peer->reach & 0x03) == 0) {
164 				clock_filter(peer, 0., 0., MAXDISPERSE);
165 				clock_select();
166 			}
167 			if (peer->valid <= 2)
168 				hpoll--;
169 			else if (peer->valid >= NTP_SHIFT - 2)
170 				hpoll++;
171 			if (peer->flags & FLAG_BURST)
172 				peer->burst = NTP_SHIFT;
173 		}
174 	} else {
175 		peer->burst--;
176 		if (peer->burst == 0) {
177 			if (peer->flags & FLAG_MCAST2) {
178 				peer->flags &= ~FLAG_BURST;
179 				peer->hmode = MODE_BCLIENT;
180 			}
181 			clock_select();
182 			poll_update(peer, hpoll);
183 			return;
184 		}
185 	}
186 
187 	/*
188 	 * We need to be very careful about honking uncivilized time. If
189 	 * not operating in broadcast mode, honk in all except broadcast
190 	 * client mode. If operating in broadcast mode and synchronized
191 	 * to a real source, honk except when the peer is the local-
192 	 * clock driver and the prefer flag is not set. In other words,
193 	 * in broadcast mode we never honk unless known to be
194 	 * synchronized to real time.
195 	 */
196 	if (peer->hmode != MODE_BROADCAST) {
197 		if (peer->hmode != MODE_BCLIENT)
198 			peer_xmit(peer);
199 	} else if (sys_peer != 0 && sys_leap != LEAP_NOTINSYNC) {
200 		if (!(sys_peer->refclktype == REFCLK_LOCALCLOCK &&
201 		    !(sys_peer->flags & FLAG_PREFER)))
202 			peer_xmit(peer);
203 	}
204 	peer->outdate = current_time;
205 	poll_update(peer, hpoll);
206 }
207 
208 /*
209  * receive - Receive Procedure.  See section 3.4.3 in the specification.
210  */
211 void
212 receive(
213 	struct recvbuf *rbufp
214 	)
215 {
216 	register struct peer *peer;
217 	register struct pkt *pkt;
218 	int hismode;
219 	int oflags;
220 	int restrict_mask;
221 	int has_mac;			/* has MAC field */
222 	int authlen;			/* length of MAC field */
223 	int is_authentic;		/* cryptosum ok */
224 	int is_mystic;			/* session key exists */
225 	int is_error;			/* parse error */
226 /*	u_long pkeyid; */
227 	u_long skeyid, tkeyid;
228 	struct peer *peer2;
229 	int retcode = AM_NOMATCH;
230 
231 	/*
232 	 * Monitor the packet and get restrictions
233 	 */
234 	ntp_monitor(rbufp);
235 	restrict_mask = restrictions(&rbufp->recv_srcadr);
236 #ifdef DEBUG
237 	if (debug > 1)
238 		printf("receive: from %s restrict %02x\n",
239 		    ntoa(&rbufp->recv_srcadr), restrict_mask);
240 #endif
241 	if (restrict_mask & RES_IGNORE)
242 		return;
243 
244 	/*
245 	 * Discard packets with invalid version number.
246 	 */
247 	pkt = &rbufp->recv_pkt;
248 	if (PKT_VERSION(pkt->li_vn_mode) >= NTP_VERSION)
249 		sys_newversionpkt++;
250 	else if (PKT_VERSION(pkt->li_vn_mode) >= NTP_OLDVERSION)
251 		sys_oldversionpkt++;
252 	else {
253 		sys_unknownversion++;
254 		return;
255 	}
256 
257 	/*
258 	 * Restrict control/private mode packets. Note that packet
259 	 * length has to be checked in the control/private mode protocol
260 	 * module.
261 	 */
262 	if (PKT_MODE(pkt->li_vn_mode) == MODE_PRIVATE) {
263 		if (restrict_mask & RES_NOQUERY)
264 		    return;
265 		process_private(rbufp, ((restrict_mask &
266 		    RES_NOMODIFY) == 0));
267 		return;
268 	}
269 	if (PKT_MODE(pkt->li_vn_mode) == MODE_CONTROL) {
270 		if (restrict_mask & RES_NOQUERY)
271 		    return;
272 		process_control(rbufp, restrict_mask);
273 		return;
274 	}
275 
276 	/*
277 	 * Restrict revenue packets.
278 	 */
279 	if (restrict_mask & RES_DONTSERVE)
280 		return;
281 
282         /*
283 	 * See if we only accept limited number of clients from the net
284 	 * this guy is from. Note: the flag is determined dynamically
285 	 * within restrictions()
286 	 */
287 	if (restrict_mask & RES_LIMITED) {
288 		sys_limitrejected++;
289                 return;
290         }
291 
292 	/*
293 	 * If we are not a broadcast client, ignore broadcast packets.
294 	 */
295 	if ((PKT_MODE(pkt->li_vn_mode) == MODE_BROADCAST &&
296 	    !sys_bclient))
297 		return;
298 
299 	/*
300 	 * This is really awful ugly. We figure out whether an extension
301 	 * field is present and then measure the MAC size. If the number
302 	 * of words following the packet header is less than or equal to
303 	 * 5, no extension field is present and these words constitute
304 	 * the MAC. If the number of words is greater than 5, an
305 	 * extension field is present and the first word contains the
306 	 * length of the extension field and the MAC follows that.
307 	 */
308 	has_mac = 0;
309 /*	pkeyid = 0; */
310 	skeyid = tkeyid = 0;
311 	authlen = LEN_PKT_NOMAC;
312 	has_mac = rbufp->recv_length - authlen;
313 	if (has_mac <= 5 * sizeof(u_int32)) {
314 		skeyid = (u_long)ntohl(pkt->keyid1) & 0xffffffff;
315 	} else {
316 		authlen += (u_long)ntohl(pkt->keyid1) & 0xffffffff;
317 		has_mac = rbufp->recv_length - authlen;
318 		if (authlen <= 0) {
319 			sys_badlength++;
320 			return;
321 		}
322 
323 		/*
324 		 * Note that keyid3 is actually the key ident of the
325 		 * MAC itself.
326 		 */
327 /* 		pkeyid = (u_long)ntohl(pkt->keyid2) & 0xffffffff; */
328 		skeyid = tkeyid = (u_long)ntohl(pkt->keyid3) &
329 		    0xffffffff;
330 	}
331 
332 	/*
333 	 * Figure out his mode and validate it.
334 	 */
335 	hismode = (int)PKT_MODE(pkt->li_vn_mode);
336 	if (PKT_VERSION(pkt->li_vn_mode) == NTP_OLDVERSION && hismode ==
337 		0) {
338 		/*
339 		 * Easy.  If it is from the NTP port it is
340 		 * a sym act, else client.
341 		 */
342 		if (SRCPORT(&rbufp->recv_srcadr) == NTP_PORT)
343 			hismode = MODE_ACTIVE;
344 		else
345 			hismode = MODE_CLIENT;
346 	} else {
347 		if (hismode != MODE_ACTIVE && hismode != MODE_PASSIVE &&
348 			hismode != MODE_SERVER && hismode != MODE_CLIENT &&
349 			hismode != MODE_BROADCAST)
350 			return;
351 	}
352 
353 	/*
354 	 * If he included a mac field, decrypt it to see if it is
355 	 * authentic.
356 	 */
357 	is_authentic = is_mystic = 0;
358 	if (has_mac == 0) {
359 #ifdef DEBUG
360 		if (debug)
361 			printf("receive: at %ld from %s mode %d\n",
362 				current_time, ntoa(&rbufp->recv_srcadr),
363 				hismode);
364 #endif
365 	} else {
366 		is_mystic = authistrusted(skeyid);
367 #ifdef MD5
368 		if (skeyid > NTP_MAXKEY && !is_mystic) {
369 
370 			/*
371 			 * For multicast mode, generate the session key
372 			 * and install in the key cache. For client
373 			 * mode, generate the session key for the
374 			 * unicast address. For server mode, the session
375 			 * key should already be in the key cache, since
376 			 * it was generated when the last request was
377 			 * sent.
378 			 */
379 			if (hismode == MODE_BROADCAST) {
380 				tkeyid = session_key(
381 					ntohl((&rbufp->recv_srcadr)->sin_addr.s_addr),
382 					ntohl(rbufp->dstadr->bcast.sin_addr.s_addr),
383 					skeyid, (u_long)(4 * (1 << pkt->ppoll)));
384 			} else if (hismode != MODE_SERVER) {
385 				tkeyid = session_key(
386 					ntohl((&rbufp->recv_srcadr)->sin_addr.s_addr),
387 					ntohl(rbufp->dstadr->sin.sin_addr.s_addr),
388 					skeyid, (u_long)(4 * (1 << pkt->ppoll)));
389 			}
390 
391 		}
392 #endif /* MD5 */
393 
394 		/*
395 		 * Compute the cryptosum. Note a clogging attack may
396 		 * succceed in bloating the key cache.
397 		 */
398 		if (authdecrypt(skeyid, (u_int32 *)pkt, authlen,
399 		    has_mac))
400 			is_authentic = 1;
401 		else
402 			sys_badauth++;
403 #ifdef DEBUG
404 		if (debug)
405 			printf(
406 				"receive: at %ld %s mode %d keyid %08lx mac %d auth %d\n",
407 				current_time, ntoa(&rbufp->recv_srcadr),
408 				hismode, skeyid, has_mac, is_authentic);
409 #endif
410 	}
411 
412 	/*
413 	 * Find the peer.  This will return a null if this guy isn't in
414 	 * the database.
415 	 */
416 	peer = findpeer(&rbufp->recv_srcadr, rbufp->dstadr, rbufp->fd,
417 		hismode, &retcode);
418 	/*
419 	 * The new association matching rules are driven by a table
420 	 * specified in ntp.h. We have replaced the *default* behaviour
421 	 * of replying to bogus packets in server mode in this version.
422 	 * A packet must now match an association in order to be
423 	 * processed. In the event that no association exists, then an
424 	 * association is mobilized if need be. Two different
425 	 * associations can be mobilized a) passive associations b)
426 	 * client associations due to broadcasts or manycasts.
427 	 */
428 	is_error = 0;
429 	switch (retcode) {
430 	case AM_FXMIT:
431 
432 		/*
433 		 * If the client is configured purely as a broadcast
434 		 * client and not as an manycast server, it has no
435 		 * business being a server. Simply go home. Otherwise,
436 		 * send a MODE_SERVER response and go home. Note that we
437 		 * don't do a authentication check here, since we can't
438 		 * set the system clock; but, we do set the key ID to
439 		 * zero to tell the caller about this.
440 		 */
441 		if (!sys_bclient || sys_manycastserver) {
442 			if (is_authentic)
443 				fast_xmit(rbufp, MODE_SERVER, skeyid);
444 			else
445 				fast_xmit(rbufp, MODE_SERVER, 0);
446 		}
447 
448 		/*
449 		 * We can't get here if an association is mobilized, so
450 		 * just toss the key, if appropriate.
451 		 */
452 		if (!is_mystic && skeyid > NTP_MAXKEY)
453 			authtrust(skeyid, 0);
454 			return;
455 
456 	case AM_MANYCAST:
457 
458 		/*
459 		 * This could be in response to a multicast packet sent
460 		 * by the "manycast" mode association. Find peer based
461 		 * on the originate timestamp in the packet. Note that
462 		 * we don't mobilize a new association, unless the
463 		 * packet is properly authenticated. The response must
464 		 * be properly authenticated and it's darn funny of the
465 		 * manycaster isn't around now.
466 		 */
467 		if ((sys_authenticate && !is_authentic)) {
468 			is_error = 1;
469 			break;
470 		}
471 		peer2 = (struct peer *)findmanycastpeer(&pkt->org);
472 		if (peer2 == 0) {
473 			is_error = 1;
474 			break;
475 		}
476 
477 		/*
478 		 * Create a new association and copy the peer variables
479 		 * to it. If something goes wrong, carefully pry the new
480 		 * association away and return its marbles to the candy
481 		 * store.
482 		*/
483 		peer = newpeer(&rbufp->recv_srcadr, rbufp->dstadr,
484 		    MODE_CLIENT, PKT_VERSION(pkt->li_vn_mode),
485 		    NTP_MINDPOLL, NTP_MAXDPOLL, 0, skeyid);
486 		if (peer == 0) {
487 			is_error = 1;
488 			break;
489 		}
490 		peer_config_manycast(peer2, peer);
491 		break;
492 
493 	case AM_ERR:
494 
495 		/*
496 		 * Something bad happened. Dirty floor will be mopped by
497 		 * the code at the end of this adventure.
498 		 */
499 		is_error = 1;
500 		break;
501 
502 	case AM_NEWPASS:
503 
504 		/*
505 		 * Okay, we're going to keep him around.  Allocate him
506 		 * some memory. But, don't do that unless the packet is
507 		 * properly authenticated.
508 		 */
509 		if ((sys_authenticate && !is_authentic)) {
510 			is_error = 1;
511 	    		break;
512 		}
513 		peer = newpeer(&rbufp->recv_srcadr, rbufp->dstadr,
514 		    MODE_PASSIVE, PKT_VERSION(pkt->li_vn_mode),
515 	 	    NTP_MINDPOLL, NTP_MAXDPOLL, 0, skeyid);
516 		break;
517 
518 	case AM_NEWBCL:
519 
520 		/*
521 		 * Broadcast client being set up now. Do this only if
522 		 * the packet is properly authenticated.
523 		 */
524 		if ((restrict_mask & RES_NOPEER) || !sys_bclient ||
525 		    (sys_authenticate && !is_authentic)) {
526 			is_error = 1;
527 			break;
528 		}
529 		peer = newpeer(&rbufp->recv_srcadr, rbufp->dstadr,
530 		    MODE_MCLIENT, PKT_VERSION(pkt->li_vn_mode),
531 		    NTP_MINDPOLL, NTP_MAXDPOLL, 0, skeyid);
532 		if (peer == 0)
533 			break;
534 		peer->flags |= FLAG_MCAST1 | FLAG_MCAST2 | FLAG_BURST;
535 		peer->hmode = MODE_CLIENT;
536 		break;
537 
538 	case AM_POSSBCL:
539 	case AM_PROCPKT:
540 
541 		/*
542 		 * It seems like it is okay to process the packet now
543 		 */
544 		break;
545 
546 	default:
547 
548 		/*
549 		 * shouldn't be getting here, but simply return anyway!
550 		 */
551 		is_error = 1;
552 	}
553 	if (is_error) {
554 
555 		/*
556 		 * Error stub. If we get here, something broke. We
557 		 * scuttle the autokey if necessary and sink the ship.
558 		 * This can occur only upon mobilization, so we can
559 		 * throw the structure away without fear of breaking
560 		 * anything.
561 		 */
562 		if (!is_mystic && skeyid > NTP_MAXKEY)
563 			authtrust(skeyid, 0);
564 		if (peer != 0)
565 			if (!(peer->flags & FLAG_CONFIG))
566 				unpeer(peer);
567 #ifdef DEBUG
568 		if (debug)
569 			printf("match error code %d assoc %d\n",
570 			    retcode, peer_associations);
571 #endif
572 		return;
573 	}
574 
575 	/*
576 	 * If the peer isn't configured, set his keyid and authenable
577 	 * status based on the packet.
578 	 */
579 	oflags = peer->flags;
580 	peer->timereceived = current_time;
581 	if (!(peer->flags & FLAG_CONFIG) && has_mac) {
582 		peer->flags |= FLAG_AUTHENABLE;
583 		if (skeyid > NTP_MAXKEY) {
584 			if (peer->flags & FLAG_MCAST2)
585 				peer->keyid = skeyid;
586 			else
587 				peer->flags |= FLAG_SKEY;
588 		}
589 	}
590 
591 	/*
592 	 * Determine if this guy is basically trustable. If not, flush
593 	 * the bugger. If this is the first packet that is
594 	 * authenticated, flush the clock filter. This is to foil
595 	 * clogging attacks that might starve the poor dear.
596 	 */
597 	peer->flash = 0;
598 	if (is_authentic)
599 		peer->flags |= FLAG_AUTHENTIC;
600 	else
601 		peer->flags &= ~FLAG_AUTHENTIC;
602 	if (peer->hmode == MODE_BROADCAST && (restrict_mask &
603 	    RES_DONTTRUST))
604 		peer->flash |= TEST10;		/* access denied */
605 	if (peer->flags & FLAG_AUTHENABLE) {
606 		if (!(peer->flags & FLAG_AUTHENTIC))
607 			peer->flash |= TEST5;	/* auth failed */
608 		else if (skeyid == 0)
609 			peer->flash |= TEST9;	/* peer not auth */
610 		else if (!(oflags & FLAG_AUTHENABLE)) {
611 			peer_clear(peer);
612 			report_event(EVNT_PEERAUTH, peer);
613 		}
614 	}
615 	if ((peer->flash & ~(u_int)TEST9) != 0) {
616 
617 		/*
618 		 * The packet is bogus, so we throw it away before
619 		 * becoming a denial-of-service hazard. We don't throw
620 		 * the current association away if it is configured or
621 		 * if it has prior reachable friends.
622 		 */
623 		if (!is_mystic && skeyid > NTP_MAXKEY)
624 			authtrust(skeyid, 0);
625 		if (!(peer->flags & FLAG_CONFIG) && peer->reach == 0)
626 			unpeer(peer);
627 #ifdef DEBUG
628 		if (debug)
629 			printf(
630 			    "invalid packet 0x%02x code %d assoc %d\n",
631 			    peer->flash, retcode, peer_associations);
632 #endif
633 		return;
634 	}
635 
636 #ifdef MD5
637 	/*
638 	 * The autokey dance. The cha-cha requires that the hash of the
639 	 * current session key matches the previous key identifier.
640 	 * Heaps of trouble if the steps falter.
641 	 */
642 	if (skeyid > NTP_MAXKEY) {
643 		int i;
644 
645 		/*
646 		 * In the case of a new autokey, verify the hash matches
647 		 * one of the previous four hashes. If not, raise the
648 		 * authentication flasher and hope the next one works.
649 		 */
650 		if (hismode == MODE_SERVER) {
651 			peer->pkeyid = peer->keyid;
652 		} else if (peer->flags & FLAG_MCAST2) {
653 			if (peer->pkeyid > NTP_MAXKEY)
654 				authtrust(peer->pkeyid, 0);
655 			for (i = 0; i < 4 && tkeyid != peer->pkeyid;
656 			    i++) {
657 				tkeyid = session_key(
658 					ntohl((&rbufp->recv_srcadr)->sin_addr.s_addr),
659 					ntohl(rbufp->dstadr->bcast.sin_addr.s_addr),
660 					tkeyid, 0);
661 			}
662 		} else {
663 			if (peer->pkeyid > NTP_MAXKEY)
664 				authtrust(peer->pkeyid, 0);
665 			for (i = 0; i < 4 && tkeyid != peer->pkeyid;
666 			    i++) {
667 				tkeyid = session_key(
668 				    ntohl((&rbufp->recv_srcadr)->sin_addr.s_addr),
669 				    ntohl(rbufp->dstadr->sin.sin_addr.s_addr),
670 				    tkeyid, 0);
671 			}
672 		}
673 #ifdef XXX /* temp until certificate code is mplemented */
674 		if (tkeyid != peer->pkeyid)
675 			peer->flash |= TEST9;	/* peer not authentic */
676 #endif
677 		peer->pkeyid = skeyid;
678 	}
679 #endif /* MD5 */
680 
681 	/*
682 	 * Gawdz, it's come to this. Process the dang packet. If
683 	 * something breaks and the association doesn't deserve to live,
684 	 * toss it. Be careful in active mode and return a packet
685 	 * anyway.
686 	 */
687 	process_packet(peer, pkt, &(rbufp->recv_time));
688 	if (!(peer->flags & FLAG_CONFIG) && peer->reach == 0) {
689 		if (peer->hmode == MODE_PASSIVE) {
690 			if (is_authentic)
691 				fast_xmit(rbufp, MODE_PASSIVE, skeyid);
692 			else
693 				fast_xmit(rbufp, MODE_PASSIVE, 0);
694 		}
695 		unpeer(peer);
696 	}
697 }
698 
699 
700 /*
701  * process_packet - Packet Procedure, a la Section 3.4.4 of the
702  *	specification. Or almost, at least. If we're in here we have a
703  *	reasonable expectation that we will be having a long term
704  *	relationship with this host.
705  */
706 int
707 process_packet(
708 	register struct peer *peer,
709 	register struct pkt *pkt,
710 	l_fp *recv_ts
711 	)
712 {
713 	l_fp t10, t23;
714 	double p_offset, p_del, p_disp;
715 	double dtemp;
716 	l_fp p_rec, p_xmt, p_org, p_reftime;
717 	l_fp ci;
718 	int pmode;
719 
720 	/*
721 	 * Swap header fields and keep the books.
722 	 */
723 	sys_processed++;
724 	peer->processed++;
725 	p_del = FPTOD(NTOHS_FP(pkt->rootdelay));
726 	p_disp = FPTOD(NTOHS_FP(pkt->rootdispersion));
727 	NTOHL_FP(&pkt->reftime, &p_reftime);
728 	NTOHL_FP(&pkt->rec, &p_rec);
729 	NTOHL_FP(&pkt->xmt, &p_xmt);
730 	if (PKT_MODE(pkt->li_vn_mode) != MODE_BROADCAST)
731 		NTOHL_FP(&pkt->org, &p_org);
732 	else
733 		p_org = peer->rec;
734 	peer->rec = *recv_ts;
735 	peer->ppoll = pkt->ppoll;
736 	pmode = PKT_MODE(pkt->li_vn_mode);
737 
738 	/*
739 	 * Test for old or duplicate packets (tests 1 through 3).
740 	 */
741 	if (L_ISHIS(&peer->org, &p_xmt))	/* count old packets */
742 		peer->oldpkt++;
743 	if (L_ISEQU(&peer->org, &p_xmt))	/* test 1 */
744 		peer->flash |= TEST1;		/* duplicate packet */
745 	if (PKT_MODE(pkt->li_vn_mode) != MODE_BROADCAST) {
746 		if (!L_ISEQU(&peer->xmt, &p_org)) { /* test 2 */
747 			peer->bogusorg++;
748 			peer->flash |= TEST2;	/* bogus packet */
749 		}
750 		if (L_ISZERO(&p_rec) || L_ISZERO(&p_org))
751 			peer->flash |= TEST3;	/* unsynchronized */
752 	} else {
753 		if (L_ISZERO(&p_org))
754 			peer->flash |= TEST3;	/* unsynchronized */
755 	}
756 	peer->org = p_xmt;
757 
758 	/*
759 	 * Test for valid header (tests 5 through 10)
760 	 */
761 	ci = p_xmt;
762 	L_SUB(&ci, &p_reftime);
763 	LFPTOD(&ci, dtemp);
764 	if (PKT_LEAP(pkt->li_vn_mode) == LEAP_NOTINSYNC || /* test 6 */
765 	    PKT_TO_STRATUM(pkt->stratum) >= NTP_MAXSTRATUM ||
766 	    dtemp < 0)
767 		peer->flash |= TEST6;	/* peer clock unsynchronized */
768 	if (!(peer->flags & FLAG_CONFIG) && sys_peer != 0) { /* test 7 */
769 		if (PKT_TO_STRATUM(pkt->stratum) > sys_stratum) {
770 			peer->flash |= TEST7; /* peer stratum too high */
771 			sys_badstratum++;
772 		}
773 	}
774 	if (fabs(p_del) >= MAXDISPERSE	/* test 8 */
775 	    || p_disp >= MAXDISPERSE)
776 		peer->flash |= TEST8;	/* delay/dispersion too high */
777 
778 	/*
779 	 * If the packet header is invalid (tests 5 through 10), exit.
780 	 * XXX we let TEST9 sneak by until the certificate code is
781 	 * implemented, but only to mobilize the association.
782 	 */
783 	if (peer->flash & (TEST5 | TEST6 | TEST7 | TEST8 | TEST10)) {
784 #ifdef DEBUG
785 		if (debug)
786 			printf(
787 			    "invalid packet header 0x%02x mode %d\n",
788 			    peer->flash, pmode);
789 #endif
790 		return (0);
791 	}
792 
793 	/*
794 	 * Valid header; update our state.
795 	 */
796 	record_raw_stats(&peer->srcadr, &peer->dstadr->sin,
797 	    &p_org, &p_rec, &p_xmt, &peer->rec);
798 
799 	peer->leap = PKT_LEAP(pkt->li_vn_mode);
800 	peer->pmode = pmode;		/* unspec */
801 	peer->stratum = PKT_TO_STRATUM(pkt->stratum);
802 	peer->precision = pkt->precision;
803 	peer->rootdelay = p_del;
804 	peer->rootdispersion = p_disp;
805 	peer->refid = pkt->refid;
806 	peer->reftime = p_reftime;
807 	if (peer->reach == 0) {
808 		report_event(EVNT_REACH, peer);
809 		peer->timereachable = current_time;
810 	}
811 	peer->reach |= 1;
812 	poll_update(peer, peer->hpoll);
813 
814 	/*
815 	 * If running in a client/server association, calculate the
816 	 * clock offset c, roundtrip delay d and dispersion e. We use
817 	 * the equations (reordered from those in the spec). Note that,
818 	 * in a broadcast association, org has been set to the time of
819 	 * last reception. Note the computation of dispersion includes
820 	 * the system precision plus that due to the frequency error
821 	 * since the originate time.
822 	 *
823 	 * c = ((t2 - t3) + (t1 - t0)) / 2
824 	 * d = (t2 - t3) - (t1 - t0)
825 	 * e = (org - rec) (seconds only)
826 	 */
827 	t10 = p_xmt;			/* compute t1 - t0 */
828 	L_SUB(&t10, &peer->rec);
829 	t23 = p_rec;			/* compute t2 - t3 */
830 	L_SUB(&t23, &p_org);
831 	ci = t10;
832 	p_disp = CLOCK_PHI * (peer->rec.l_ui - p_org.l_ui);
833 
834 	/*
835 	 * If running in a broadcast association, the clock offset is
836 	 * (t1 - t0) corrected by the one-way delay, but we can't
837 	 * measure that directly; therefore, we start up in
838 	 * client/server mode, calculate the clock offset, using the
839 	 * engineered refinement algorithms, while also receiving
840 	 * broadcasts. When a broadcast is received in client/server
841 	 * mode, we calculate a correction factor to use after switching
842 	 * back to broadcast mode. We know NTP_SKEWFACTOR == 16, which
843 	 * accounts for the simplified ei calculation.
844 	 *
845 	 * If FLAG_MCAST2 is set, we are a broadcast/multicast client.
846 	 * If FLAG_MCAST1 is set, we haven't calculated the propagation
847 	 * delay. If hmode is MODE_CLIENT, we haven't set the local
848 	 * clock in client/server mode. Initially, we come up
849 	 * MODE_CLIENT. When the clock is first updated and FLAG_MCAST2
850 	 * is set, we switch from MODE_CLIENT to MODE_BCLIENT.
851 	 */
852 	if (pmode == MODE_BROADCAST) {
853 		if (peer->flags & FLAG_MCAST1) {
854 			if (peer->hmode == MODE_BCLIENT)
855 				peer->flags &= ~FLAG_MCAST1;
856 			LFPTOD(&ci, p_offset);
857 			peer->estbdelay = peer->offset - p_offset;
858 			return (1);
859 
860 		}
861 		DTOLFP(peer->estbdelay, &t10);
862 		L_ADD(&ci, &t10);
863 		p_del = peer->delay;
864 	} else {
865 		L_ADD(&ci, &t23);
866 		L_RSHIFT(&ci);
867 		L_SUB(&t23, &t10);
868 		LFPTOD(&t23, p_del);
869 	}
870 	LFPTOD(&ci, p_offset);
871 	if (fabs(p_del) >= MAXDISPERSE || p_disp >= MAXDISPERSE) /* test 4 */
872 		peer->flash |= TEST4;	/* delay/dispersion too big */
873 
874 	/*
875 	 * If the packet data are invalid (tests 1 through 4), exit.
876 	 */
877 	if (peer->flash) {
878 #ifdef DEBUG
879 		if (debug)
880 			printf("invalid packet data 0x%02x mode %d\n",
881 			    peer->flash, pmode);
882 #endif
883 		return(1);
884 	}
885 
886 
887 	/*
888 	 * This one is valid. Mark it so, give it to clock_filter().
889 	 */
890 	clock_filter(peer, p_offset, p_del, fabs(p_disp));
891 	clock_select();
892 	record_peer_stats(&peer->srcadr, ctlpeerstatus(peer),
893 	    peer->offset, peer->delay, peer->disp,
894 	    SQRT(peer->variance));
895 	return(1);
896 }
897 
898 
899 /*
900  * clock_update - Called at system process update intervals.
901  */
902 static void
903 clock_update(void)
904 {
905 	u_char oleap;
906 	u_char ostratum;
907 	int i;
908 	struct peer *peer;
909 
910 	/*
911 	 * Reset/adjust the system clock. Do this only if there is a
912 	 * system peer and we haven't seen that peer lately. Watch for
913 	 * timewarps here.
914 	 */
915 	if (sys_peer == 0)
916 		return;
917 	if (sys_peer->pollsw == FALSE || sys_peer->burst > 0)
918 		return;
919 	sys_peer->pollsw = FALSE;
920 #ifdef DEBUG
921 	if (debug)
922 		printf("clock_update: at %ld assoc %d \n", current_time,
923 		    peer_associations);
924 #endif
925 	oleap = sys_leap;
926 	ostratum = sys_stratum;
927 	switch (local_clock(sys_peer, sys_offset, sys_epsil)) {
928 
929 		case -1:
930 		/*
931 		 * Clock is too screwed up. Just exit for now.
932 		 */
933 		report_event(EVNT_SYSFAULT, (struct peer *)0);
934 		exit(1);
935 		/*NOTREACHED*/
936 
937 		case 1:
938 		/*
939 		 * Clock was stepped. Clear filter registers
940 		 * of all peers.
941 		 */
942 		for (i = 0; i < HASH_SIZE; i++) {
943 			for (peer = peer_hash[i]; peer != 0;
944 				peer =peer->next)
945 				peer_clear(peer);
946 		}
947 		NLOG(NLOG_SYNCSTATUS)
948 			msyslog(LOG_INFO, "synchronisation lost");
949 		sys_peer = 0;
950 		sys_stratum = STRATUM_UNSPEC;
951 		report_event(EVNT_CLOCKRESET, (struct peer *)0);
952 		break;
953 
954 		default:
955 		/*
956 		 * Update the system stratum, leap bits, root delay,
957 		 * root dispersion, reference ID and reference time. We
958 		 * also update select dispersion and max frequency
959 		 * error.
960 		 */
961 		sys_stratum = sys_peer->stratum + 1;
962 		if (sys_stratum == 1)
963 			sys_refid = sys_peer->refid;
964 		else
965 			sys_refid = sys_peer->srcadr.sin_addr.s_addr;
966 		sys_reftime = sys_peer->rec;
967 		sys_rootdelay = sys_peer->rootdelay +
968 		    fabs(sys_peer->delay);
969 		sys_leap = leap_consensus;
970 	}
971 	if (oleap != sys_leap)
972 		report_event(EVNT_SYNCCHG, (struct peer *)0);
973 	if (ostratum != sys_stratum)
974 		report_event(EVNT_PEERSTCHG, (struct peer *)0);
975 }
976 
977 
978 /*
979  * poll_update - update peer poll interval. See Section 3.4.9 of the
980  *	   spec.
981  */
982 void
983 poll_update(
984 	struct peer *peer,
985 	int hpoll
986 	)
987 {
988 	long update;
989 
990 	/*
991 	 * The wiggle-the-poll-interval dance. Broadcasters dance only
992 	 * the minpoll beat. Reference clock partners sit this one out.
993 	 * Dancers surviving the clustering algorithm beat to the system
994 	 * clock. Broadcast clients are usually lead by their broadcast
995 	 * partner, but faster in the initial mating dance.
996 	 */
997 	if (peer->hmode == MODE_BROADCAST) {
998 		peer->hpoll = peer->minpoll;
999 	} else if (peer->flags & FLAG_SYSPEER) {
1000 		peer->hpoll = sys_poll;
1001 	} else {
1002 		if (hpoll > peer->maxpoll)
1003 			peer->hpoll = peer->maxpoll;
1004 		else if (hpoll < peer->minpoll)
1005 			peer->hpoll = peer->minpoll;
1006 		else
1007 			peer->hpoll = hpoll;
1008 	}
1009 	if (peer->burst > 0) {
1010 		if (peer->nextdate != current_time)
1011 			return;
1012 		if (peer->flags & FLAG_REFCLOCK)
1013 			peer->nextdate++;
1014 		else if (peer->reach & 0x1)
1015 			peer->nextdate += RANDPOLL(BURST_INTERVAL2);
1016 		else
1017 			peer->nextdate += RANDPOLL(BURST_INTERVAL1);
1018 	} else {
1019 		update = max(min(peer->ppoll, peer->hpoll),
1020 		    peer->minpoll);
1021 		peer->nextdate = peer->outdate + RANDPOLL(update);
1022 	}
1023 #ifdef DEBUG
1024 	if (debug > 1)
1025 		printf("poll_update: at %lu %s poll %d burst %d last %lu next %lu\n",
1026 		    current_time, ntoa(&peer->srcadr), hpoll,
1027 		    peer->burst, peer->outdate, peer->nextdate);
1028 #endif
1029 }
1030 
1031 
1032 /*
1033  * clear - clear peer filter registers.  See Section 3.4.8 of the spec.
1034  */
1035 void
1036 peer_clear(
1037 	register struct peer *peer
1038 	)
1039 {
1040 	register int i;
1041 
1042 	memset(CLEAR_TO_ZERO(peer), 0, LEN_CLEAR_TO_ZERO);
1043 	peer->estbdelay = sys_bdelay;
1044 	peer->hpoll = peer->minpoll;
1045 	peer->pollsw = FALSE;
1046 	peer->variance = MAXDISPERSE;
1047 	peer->epoch = current_time;
1048 	for (i = 0; i < NTP_SHIFT; i++) {
1049 		peer->filter_order[i] = i;
1050 		peer->filter_disp[i] = MAXDISPERSE;
1051 		peer->filter_epoch[i] = current_time;
1052 	}
1053 	poll_update(peer, peer->minpoll);
1054 
1055 	/*
1056 	 * Since we have a chance to correct possible funniness in
1057 	 * our selection of interfaces on a multihomed host, do so
1058 	 * by setting us to no particular interface.
1059 	 * WARNING: do so only in non-broadcast mode!
1060 	 */
1061 	if (peer->hmode != MODE_BROADCAST)
1062 		peer->dstadr = any_interface;
1063 }
1064 
1065 
1066 /*
1067  * clock_filter - add incoming clock sample to filter register and run
1068  *		  the filter procedure to find the best sample.
1069  */
1070 void
1071 clock_filter(
1072 	register struct peer *peer,
1073 	double sample_offset,
1074 	double sample_delay,
1075 	double sample_disp
1076 	)
1077 {
1078 	register int i, j, k, n = 0;
1079 	register u_char *ord;
1080 	double distance[NTP_SHIFT];
1081 	double x, y, z, off;
1082 
1083 	/*
1084 	 * Update error bounds and calculate distances. Also initialize
1085 	 * sort index vector.
1086 	 */
1087 	x = CLOCK_PHI * (current_time - peer->update);
1088 	peer->update = current_time;
1089 	ord = peer->filter_order;
1090 	j = peer->filter_nextpt;
1091 	for (i = 0; i < NTP_SHIFT; i++) {
1092 		peer->filter_disp[j] += x;
1093 		if (peer->filter_disp[j] > MAXDISPERSE)
1094 			peer->filter_disp[j] = MAXDISPERSE;
1095 		distance[i] = fabs(peer->filter_delay[j]) / 2 +
1096 		    peer->filter_disp[j];
1097 		ord[i] = j;
1098 		if (--j < 0)
1099 			j += NTP_SHIFT;
1100 	}
1101 
1102 	/*
1103 	 * Insert the new sample at the beginning of the register.
1104 	 */
1105 	peer->filter_offset[peer->filter_nextpt] = sample_offset;
1106 	peer->filter_delay[peer->filter_nextpt] = sample_delay;
1107 	x = LOGTOD(peer->precision) + LOGTOD(sys_precision) +
1108 	    sample_disp;
1109 	peer->filter_disp[peer->filter_nextpt] = min(x, MAXDISPERSE);
1110 	peer->filter_epoch[peer->filter_nextpt] = current_time;
1111 	distance[0] = min(x + fabs(sample_delay) / 2, MAXDISTANCE);
1112 	peer->filter_nextpt++;
1113 	if (peer->filter_nextpt >= NTP_SHIFT)
1114 		peer->filter_nextpt = 0;
1115 
1116 	/*
1117 	 * Sort the samples in the register by distance. The winning
1118 	 * sample will be in ord[0]. Sort the samples only if they
1119 	 * are younger than the Allen intercept.
1120 	 */
1121 	y = min(allan_xpt, NTP_SHIFT * ULOGTOD(sys_poll));
1122 	for (n = 0; n < NTP_SHIFT && current_time -
1123 	    peer->filter_epoch[ord[n]] <= y; n++) {
1124 		for (j = 0; j < n; j++) {
1125 			if (distance[j] > distance[n]) {
1126 				x = distance[j];
1127 				k = ord[j];
1128 				distance[j] = distance[n];
1129 				ord[j] = ord[n];
1130 				distance[n] = x;
1131 				ord[n] = k;
1132 			}
1133 		}
1134 	}
1135 
1136 	/*
1137 	 * Compute the error bound and standard error.
1138 	 */
1139 	x = y = z = off = 0.;
1140 	for (i = NTP_SHIFT - 1; i >= 0; i--) {
1141 		x = NTP_FWEIGHT * (x + peer->filter_disp[ord[i]]);
1142 		if (i < n) {
1143 			z += 1. / distance[i];
1144 			off += peer->filter_offset[ord[i]] /
1145 			    distance[i];
1146 			y += DIFF(peer->filter_offset[ord[i]],
1147 			    peer->filter_offset[ord[0]]);
1148 		}
1149 	}
1150 	peer->delay = peer->filter_delay[ord[0]];
1151 	peer->variance = min(y / n, MAXDISPERSE);
1152 	peer->disp = min(x, MAXDISPERSE);
1153 	peer->epoch = current_time;
1154 	x = peer->offset;
1155 	if (peer->flags & FLAG_BURST)
1156 		peer->offset = off / z;
1157 	else
1158 		peer->offset = peer->filter_offset[ord[0]];
1159 
1160 	/*
1161 	 * A new sample is useful only if it is younger than the last
1162 	 * one used.
1163 	 */
1164 	if (peer->filter_epoch[ord[0]] > peer->epoch) {
1165 #ifdef DEBUG
1166 		if (debug)
1167 			printf("clock_filter: discard %lu\n",
1168 			    peer->filter_epoch[ord[0]] - peer->epoch);
1169 #endif
1170 		return;
1171 	}
1172 
1173 	/*
1174 	 * If the offset exceeds the dispersion by CLOCK_SGATE and the
1175 	 * interval since the last update is less than twice the system
1176 	 * poll interval, consider the update a popcorn spike and ignore
1177 	 * it.
1178 	 */
1179 	if (fabs(x - peer->offset) > CLOCK_SGATE &&
1180 	    peer->filter_epoch[ord[0]] - peer->epoch < (1 <<
1181 	    (sys_poll + 1))) {
1182 #ifdef DEBUG
1183 		if (debug)
1184 			printf("clock_filter: popcorn spike %.6f\n", x);
1185 #endif
1186 		return;
1187 	}
1188 	peer->epoch = peer->filter_epoch[ord[0]];
1189 	peer->pollsw = TRUE;
1190 #ifdef DEBUG
1191 	if (debug)
1192 		printf(
1193 		    "clock_filter: offset %.6f delay %.6f disp %.6f std %.6f, age %lu\n",
1194 		    peer->offset, peer->delay, peer->disp,
1195 		    SQRT(peer->variance), current_time - peer->epoch);
1196 #endif
1197 }
1198 
1199 
1200 /*
1201  * clock_select - find the pick-of-the-litter clock
1202  */
1203 void
1204 clock_select(void)
1205 {
1206 	register struct peer *peer;
1207 	int i;
1208 	int nlist, nl3;
1209 	double d, e, f;
1210 	int j;
1211 	int n;
1212 	int allow, found, k;
1213 	double high, low;
1214 	double synch[NTP_MAXCLOCK], error[NTP_MAXCLOCK];
1215 	struct peer *osys_peer;
1216 	struct peer *typeacts = 0;
1217 	struct peer *typelocal = 0;
1218 	struct peer *typepps = 0;
1219 	struct peer *typeprefer = 0;
1220 	struct peer *typesystem = 0;
1221 
1222 	static int list_alloc = 0;
1223 	static struct endpoint *endpoint = NULL;
1224 	static int *index = NULL;
1225 	static struct peer **peer_list = NULL;
1226 	static u_int endpoint_size = 0;
1227 	static u_int index_size = 0;
1228 	static u_int peer_list_size = 0;
1229 
1230 	/*
1231 	 * Initialize. If a prefer peer does not survive this thing,
1232 	 * the pps_update switch will remain zero.
1233 	 */
1234 	pps_update = 0;
1235 	nlist = 0;
1236 	low = 1e9;
1237 	high = -1e9;
1238 	for (n = 0; n < HASH_SIZE; n++)
1239 		nlist += peer_hash_count[n];
1240 	if (nlist > list_alloc) {
1241 		if (list_alloc > 0) {
1242 			free(endpoint);
1243 			free(index);
1244 			free(peer_list);
1245 		}
1246 		while (list_alloc < nlist) {
1247 			list_alloc += 5;
1248 			endpoint_size += 5 * 3 * sizeof *endpoint;
1249 			index_size += 5 * 3 * sizeof *index;
1250 			peer_list_size += 5 * sizeof *peer_list;
1251 		}
1252 		endpoint = (struct endpoint *)emalloc(endpoint_size);
1253 		index = (int *)emalloc(index_size);
1254 		peer_list = (struct peer **)emalloc(peer_list_size);
1255 	}
1256 
1257 	/*
1258 	 * This first chunk of code is supposed to go through all
1259 	 * peers we know about to find the peers which are most likely
1260 	 * to succeed. We run through the list doing the sanity checks
1261 	 * and trying to insert anyone who looks okay.
1262 	 */
1263 	nlist = nl3 = 0;	/* none yet */
1264 	for (n = 0; n < HASH_SIZE; n++) {
1265 		for (peer = peer_hash[n]; peer != 0; peer = peer->next) {
1266 			peer->flags &= ~FLAG_SYSPEER;
1267 			peer->status = CTL_PST_SEL_REJECT;
1268 			if (peer->flags & FLAG_NOSELECT)
1269 				continue;	/* noselect (survey) */
1270 			if (peer->reach == 0)
1271 				continue;	/* unreachable */
1272 			if (peer->stratum > 1 && peer->refid ==
1273 			    peer->dstadr->sin.sin_addr.s_addr)
1274 				continue;	/* sync loop */
1275 			if (root_distance(peer) >= MAXDISTANCE + 2 *
1276 			    CLOCK_PHI * ULOGTOD(sys_poll)) {
1277 				peer->seldisptoolarge++;
1278 				continue;	/* noisy or broken */
1279 			}
1280 
1281 			/*
1282 			 * Don't allow the local-clock or acts drivers
1283 			 * in the kitchen at this point, unless the
1284 			 * prefer peer. Do that later, but only if
1285 			 * nobody else is around.
1286 			 */
1287 			if (peer->refclktype == REFCLK_LOCALCLOCK
1288 #if defined(VMS) && defined(VMS_LOCALUNIT)
1289 				/* wjm: local unit VMS_LOCALUNIT taken seriously */
1290 				&& REFCLOCKUNIT(&peer->srcadr) != VMS_LOCALUNIT
1291 #endif	/* VMS && VMS_LOCALUNIT */
1292 				) {
1293 				typelocal = peer;
1294 				if (!(peer->flags & FLAG_PREFER))
1295 					continue; /* no local clock */
1296 			}
1297 			if (peer->sstclktype == CTL_SST_TS_TELEPHONE) {
1298 				typeacts = peer;
1299 				if (!(peer->flags & FLAG_PREFER))
1300 					continue; /* no acts */
1301 			}
1302 
1303 			/*
1304 			 * If we get this far, we assume the peer is
1305 			 * acceptable.
1306 			 */
1307 			peer->status = CTL_PST_SEL_SANE;
1308 			peer_list[nlist++] = peer;
1309 
1310 			/*
1311 			 * Insert each interval endpoint on the sorted
1312 			 * list.
1313 			 */
1314 			e = peer->offset;	 /* Upper end */
1315 			f = root_distance(peer);
1316 			e = e + f;
1317 			for (i = nl3 - 1; i >= 0; i--) {
1318 				if (e >= endpoint[index[i]].val)
1319 					break;
1320 				index[i + 3] = index[i];
1321 			}
1322 			index[i + 3] = nl3;
1323 			endpoint[nl3].type = 1;
1324 			endpoint[nl3++].val = e;
1325 
1326 			e = e - f;		/* Center point */
1327 			for ( ; i >= 0; i--) {
1328 				if (e >= endpoint[index[i]].val)
1329 					break;
1330 				index[i + 2] = index[i];
1331 			}
1332 			index[i + 2] = nl3;
1333 			endpoint[nl3].type = 0;
1334 			endpoint[nl3++].val = e;
1335 
1336 			e = e - f;		/* Lower end */
1337 			for ( ; i >= 0; i--) {
1338 				if (e >= endpoint[index[i]].val)
1339 					break;
1340 				index[i + 1] = index[i];
1341 			}
1342 			index[i + 1] = nl3;
1343 			endpoint[nl3].type = -1;
1344 			endpoint[nl3++].val = e;
1345 		}
1346 	}
1347 #ifdef DEBUG
1348 	if (debug > 1)
1349 		for (i = 0; i < nl3; i++)
1350 		printf("select: endpoint %2d %.6f\n",
1351 		   endpoint[index[i]].type, endpoint[index[i]].val);
1352 #endif
1353 	i = 0;
1354 	j = nl3 - 1;
1355 	allow = nlist;		/* falsetickers assumed */
1356 	found = 0;
1357 	while (allow > 0) {
1358 		allow--;
1359 		for (n = 0; i <= j; i++) {
1360 			n += endpoint[index[i]].type;
1361 			if (n < 0)
1362 				break;
1363 			if (endpoint[index[i]].type == 0)
1364 				found++;
1365 		}
1366 		for (n = 0; i <= j; j--) {
1367 			n += endpoint[index[j]].type;
1368 			if (n > 0)
1369 				break;
1370 			if (endpoint[index[j]].type == 0)
1371 				found++;
1372 		}
1373 		if (found > allow)
1374 			break;
1375 		low = endpoint[index[i++]].val;
1376 		high = endpoint[index[j--]].val;
1377 	}
1378 
1379 	/*
1380 	 * If no survivors remain at this point, check if the acts or
1381 	 * local clock drivers have been found. If so, nominate one of
1382 	 * them as the only survivor. Otherwise, give up and declare us
1383 	 * unsynchronized.
1384 	 */
1385 	if ((allow << 1) >= nlist) {
1386 		if (typeacts != 0) {
1387 			typeacts->status = CTL_PST_SEL_SANE;
1388 			peer_list[0] = typeacts;
1389 			nlist = 1;
1390 		} else if (typelocal != 0) {
1391 			typelocal->status = CTL_PST_SEL_SANE;
1392 			peer_list[0] = typelocal;
1393 			nlist = 1;
1394 		} else {
1395 			if (sys_peer != 0) {
1396 				report_event(EVNT_PEERSTCHG,
1397 				    (struct peer *)0);
1398 				NLOG(NLOG_SYNCSTATUS)
1399 				msyslog(LOG_INFO,
1400 				    "synchronisation lost");
1401 			}
1402 			sys_peer = 0;
1403 			return;
1404 		}
1405 	}
1406 #ifdef DEBUG
1407 	if (debug > 1)
1408 		printf("select: low %.6f high %.6f\n", low, high);
1409 #endif
1410 
1411 	/*
1412 	 * Clustering algorithm. Process intersection list to discard
1413 	 * outlyers. Construct candidate list in cluster order
1414 	 * determined by the sum of peer synchronization distance plus
1415 	 * scaled stratum. We must find at least one peer.
1416 	 */
1417 	j = 0;
1418 	for (i = 0; i < nlist; i++) {
1419 		peer = peer_list[i];
1420 		if (nlist > 1 && (low >= peer->offset ||
1421 			peer->offset >= high))
1422 			continue;
1423 		peer->status = CTL_PST_SEL_CORRECT;
1424 		d = root_distance(peer) + peer->stratum * MAXDISPERSE;
1425 		if (j >= NTP_MAXCLOCK) {
1426 			if (d >= synch[j - 1])
1427 				continue;
1428 			else
1429 				j--;
1430 		}
1431 		for (k = j; k > 0; k--) {
1432 			if (d >= synch[k - 1])
1433 				break;
1434 			synch[k] = synch[k - 1];
1435 			peer_list[k] = peer_list[k - 1];
1436 		}
1437 		peer_list[k] = peer;
1438 		synch[k] = d;
1439 		j++;
1440 	}
1441 	nlist = j;
1442 
1443 #ifdef DEBUG
1444 	if (debug > 1)
1445 		for (i = 0; i < nlist; i++)
1446 			printf("select: %s distance %.6f\n",
1447 			    ntoa(&peer_list[i]->srcadr), synch[i]);
1448 #endif
1449 
1450 	/*
1451 	 * Now, prune outlyers by root dispersion. Continue as long as
1452 	 * there are more than NTP_MINCLOCK survivors and the minimum
1453 	 * select dispersion is greater than the maximum peer
1454 	 * dispersion. Stop if we are about to discard a prefer peer.
1455 	 */
1456 	for (i = 0; i < nlist; i++) {
1457 		peer = peer_list[i];
1458 		error[i] = peer->variance;
1459 		if (i < NTP_CANCLOCK)
1460 			peer->status = CTL_PST_SEL_SELCAND;
1461 		else
1462 			peer->status = CTL_PST_SEL_DISTSYSPEER;
1463 	}
1464 	while (1) {
1465 		sys_maxd = 0;
1466 		d = error[0];
1467 		for (k = i = nlist - 1; i >= 0; i--) {
1468 			double sdisp = 0;
1469 
1470 			for (j = nlist - 1; j > 0; j--) {
1471 				sdisp = NTP_SWEIGHT * (sdisp +
1472 					DIFF(peer_list[i]->offset,
1473 					peer_list[j]->offset));
1474 			}
1475 			if (sdisp > sys_maxd) {
1476 				sys_maxd = sdisp;
1477 				k = i;
1478 			}
1479 			if (error[i] < d)
1480 				d = error[i];
1481 		}
1482 
1483 #ifdef DEBUG
1484 		if (debug > 1)
1485 			printf(
1486 			    "select: survivors %d select %.6f peer %.6f\n",
1487 			    nlist, SQRT(sys_maxd), SQRT(d));
1488 #endif
1489 		if (nlist <= NTP_MINCLOCK || sys_maxd <= d ||
1490 			peer_list[k]->flags & FLAG_PREFER)
1491 			break;
1492 		for (j = k + 1; j < nlist; j++) {
1493 			peer_list[j - 1] = peer_list[j];
1494 			error[j - 1] = error[j];
1495 		}
1496 		nlist--;
1497 	}
1498 #ifdef DEBUG
1499 	if (debug > 1) {
1500 		for (i = 0; i < nlist; i++)
1501 			printf(
1502 			    "select: %s offset %.6f, distance %.6f poll %d\n",
1503 			    ntoa(&peer_list[i]->srcadr), peer_list[i]->offset,
1504 			    synch[i], peer_list[i]->pollsw);
1505 	}
1506 #endif
1507 
1508 	/*
1509 	 * What remains is a list of not greater than NTP_MINCLOCK
1510 	 * peers. We want only a peer at the lowest stratum to become
1511 	 * the system peer, although all survivors are eligible for the
1512 	 * combining algorithm. First record their order, diddle the
1513 	 * flags and clamp the poll intervals. Then, consider the peers
1514 	 * at the lowest stratum. Of these, OR the leap bits on the
1515 	 * assumption that, if some of them honk nonzero bits, they must
1516 	 * know what they are doing. Also, check for prefer and pps
1517 	 * peers. If a prefer peer is found within clock_max, update the
1518 	 * pps switch. Of the other peers not at the lowest stratum,
1519 	 * check if the system peer is among them and, if found, zap
1520 	 * him. We note that the head of the list is at the lowest
1521 	 * stratum and that unsynchronized peers cannot survive this
1522 	 * far.
1523 	 */
1524 	leap_consensus = 0;
1525 	for (i = nlist - 1; i >= 0; i--) {
1526 		peer_list[i]->status = CTL_PST_SEL_SYNCCAND;
1527 		peer_list[i]->flags |= FLAG_SYSPEER;
1528 		poll_update(peer_list[i], peer_list[i]->hpoll);
1529 		if (peer_list[i]->stratum == peer_list[0]->stratum) {
1530 			leap_consensus |= peer_list[i]->leap;
1531 			if (peer_list[i]->refclktype == REFCLK_ATOM_PPS)
1532 				typepps = peer_list[i];
1533 			if (peer_list[i] == sys_peer)
1534 				typesystem = peer_list[i];
1535 			if (peer_list[i]->flags & FLAG_PREFER) {
1536 				typeprefer = peer_list[i];
1537 				if (fabs(typeprefer->offset) <
1538 				    clock_max)
1539 					pps_update = 1;
1540 			}
1541 		} else {
1542 			if (peer_list[i] == sys_peer)
1543 				sys_peer = 0;
1544 		}
1545 	}
1546 
1547 	/*
1548 	 * Mitigation rules of the game. There are several types of
1549 	 * peers that make a difference here: (1) prefer local peers
1550 	 * (type REFCLK_LOCALCLOCK with FLAG_PREFER) or prefer modem
1551 	 * peers (type REFCLK_NIST_ATOM etc with FLAG_PREFER), (2) pps
1552 	 * peers (type REFCLK_ATOM_PPS), (3) remaining prefer peers
1553 	 * (flag FLAG_PREFER), (4) the existing system peer, if any, (5)
1554 	 * the head of the survivor list. Note that only one peer can be
1555 	 * declared prefer. The order of preference is in the order
1556 	 * stated. Note that all of these must be at the lowest stratum,
1557 	 * i.e., the stratum of the head of the survivor list.
1558 	 */
1559 	osys_peer = sys_peer;
1560 	if (typeprefer && (typeprefer->refclktype == REFCLK_LOCALCLOCK
1561 	    || typeprefer->sstclktype == CTL_SST_TS_TELEPHONE ||
1562 		!typepps)) {
1563 		sys_peer = typeprefer;
1564 		sys_peer->status = CTL_PST_SEL_SYSPEER;
1565 		sys_offset = sys_peer->offset;
1566 		sys_epsil = sys_peer->variance;
1567 #ifdef DEBUG
1568 		if (debug > 1)
1569 			printf("select: prefer offset %.6f\n",
1570 			    sys_offset);
1571 #endif
1572 	} else if (typepps && pps_update) {
1573 		sys_peer = typepps;
1574 		sys_peer->status = CTL_PST_SEL_PPS;
1575 		sys_offset = sys_peer->offset;
1576 		sys_epsil = sys_peer->variance;
1577 		if (!pps_control)
1578 			NLOG(NLOG_SYSEVENT) /* conditional syslog */
1579 				msyslog(LOG_INFO, "pps sync enabled");
1580 		pps_control = current_time;
1581 #ifdef DEBUG
1582 		if (debug > 1)
1583 			printf("select: pps offset %.6f\n", sys_offset);
1584 #endif
1585 	} else {
1586 		if (!typesystem)
1587 			sys_peer = peer_list[0];
1588 		sys_peer->status = CTL_PST_SEL_SYSPEER;
1589 		sys_offset = clock_combine(peer_list, nlist);
1590 		sys_epsil = sys_peer->variance + sys_maxd;
1591 #ifdef DEBUG
1592 		if (debug > 1)
1593 			printf("select: combine offset %.6f\n",
1594 			   sys_offset);
1595 #endif
1596 	}
1597 	if (osys_peer != sys_peer)
1598 		report_event(EVNT_PEERSTCHG, (struct peer *)0);
1599 	clock_update();
1600 }
1601 
1602 /*
1603  * clock_combine - combine offsets from selected peers
1604  */
1605 static double
1606 clock_combine(
1607 	struct peer **peers,
1608 	int npeers
1609 	)
1610 {
1611 	int i;
1612 	double x, y, z;
1613 	y = z = 0;
1614 	for (i = 0; i < npeers; i++) {
1615 		x = root_distance(peers[i]);
1616 		y += 1. / x;
1617 		z += peers[i]->offset / x;
1618 	}
1619 	return (z / y);
1620 }
1621 
1622 /*
1623  * root_distance - compute synchronization distance from peer to root
1624  */
1625 static double
1626 root_distance(
1627 	struct peer *peer
1628 	)
1629 {
1630 	return ((fabs(peer->delay) + peer->rootdelay) / 2 +
1631 		peer->rootdispersion + peer->disp +
1632 		    SQRT(peer->variance) + CLOCK_PHI * (current_time -
1633 		    peer->update));
1634 }
1635 
1636 /*
1637  * peer_xmit - send packet for persistent association.
1638  */
1639 static void
1640 peer_xmit(
1641 	struct peer *peer	/* peer structure pointer */
1642 	)
1643 {
1644 	struct pkt xpkt;
1645 	int find_rtt = (peer->cast_flags & MDF_MCAST) &&
1646 		peer->hmode != MODE_BROADCAST;
1647 	int sendlen;
1648 
1649 	/*
1650 	 * Initialize protocol fields.
1651 	 */
1652 	xpkt.li_vn_mode = PKT_LI_VN_MODE(sys_leap,
1653 		peer->version, peer->hmode);
1654 	xpkt.stratum = STRATUM_TO_PKT(sys_stratum);
1655 	xpkt.ppoll = peer->hpoll;
1656 	xpkt.precision = sys_precision;
1657 	xpkt.rootdelay = HTONS_FP(DTOFP(sys_rootdelay));
1658 	xpkt.rootdispersion = HTONS_FP(DTOUFP(sys_rootdispersion +
1659 		LOGTOD(sys_precision)));
1660 	xpkt.refid = sys_refid;
1661 	HTONL_FP(&sys_reftime, &xpkt.reftime);
1662 	HTONL_FP(&peer->org, &xpkt.org);
1663 	HTONL_FP(&peer->rec, &xpkt.rec);
1664 
1665 	/*
1666 	 * Authenticate the packet if enabled and either configured or
1667 	 * the previous packet was authenticated. If for some reason the
1668 	 * key associated with the key identifier is not in the key
1669 	 * cache, then honk key zero.
1670 	 */
1671 	sendlen = LEN_PKT_NOMAC;
1672 	if (peer->flags & FLAG_AUTHENABLE) {
1673 		u_long xkeyid;
1674 		l_fp xmt_tx;
1675 
1676 		/*
1677 		 * Transmit encrypted packet compensated for the
1678 		 * encryption delay.
1679 		 */
1680 #ifdef MD5
1681 		if (peer->flags & FLAG_SKEY) {
1682 
1683 			/*
1684 			 * In autokey mode, allocate and initialize a
1685 			 * key list if not already done. Then, use the
1686 			 * list in inverse order, discarding keys once
1687 			 * used. Keep the latest key around until the
1688 			 * next one, so clients can use client/server
1689 			 * packets to compute propagation delay. Note we
1690 			 * have to wait until the receive side of the
1691 			 * socket is bound and the server address
1692 			 * confirmed.
1693 			 */
1694 			if (ntohl(peer->dstadr->sin.sin_addr.s_addr) ==
1695 			    0 &&
1696 				ntohl(peer->dstadr->bcast.sin_addr.s_addr) == 0)
1697 				peer->keyid = 0;
1698 			else {
1699 				 if (peer->keylist == 0) {
1700 					make_keylist(peer);
1701 				} else {
1702 					authtrust(peer->keylist[peer->keynumber], 0);
1703 					if (peer->keynumber == 0)
1704 						make_keylist(peer);
1705 					else {
1706 						peer->keynumber--;
1707 						xkeyid = peer->keylist[peer->keynumber];
1708 						if (!authistrusted(xkeyid))
1709 							make_keylist(peer);
1710 					}
1711 				}
1712 				peer->keyid = peer->keylist[peer->keynumber];
1713 				xpkt.keyid1 = htonl(2 * sizeof(u_int32));
1714 				xpkt.keyid2 = htonl(sys_private);
1715 				sendlen += 2 * sizeof(u_int32);
1716 			}
1717 		}
1718 #endif /* MD5 */
1719 		xkeyid = peer->keyid;
1720 		get_systime(&peer->xmt);
1721 		L_ADD(&peer->xmt, &sys_authdelay);
1722 		HTONL_FP(&peer->xmt, &xpkt.xmt);
1723 		sendlen += authencrypt(xkeyid, (u_int32 *)&xpkt,
1724 		    sendlen);
1725 		get_systime(&xmt_tx);
1726 		sendpkt(&peer->srcadr, find_rtt ? any_interface :
1727 		    peer->dstadr, ((peer->cast_flags & MDF_MCAST) &&
1728 		    !find_rtt) ? ((peer->cast_flags & MDF_ACAST) ? -7 :
1729 		    peer->ttl) : -7, &xpkt, sendlen);
1730 
1731 		/*
1732 		 * Calculate the encryption delay. Keep the minimum over
1733 		 * the latest two samples.
1734 		 */
1735 		L_SUB(&xmt_tx, &peer->xmt);
1736 		L_ADD(&xmt_tx, &sys_authdelay);
1737 		sys_authdly[1] = sys_authdly[0];
1738 		sys_authdly[0] = xmt_tx.l_uf;
1739 		if (sys_authdly[0] < sys_authdly[1])
1740 			sys_authdelay.l_uf = sys_authdly[0];
1741 		else
1742 			sys_authdelay.l_uf = sys_authdly[1];
1743 		peer->sent++;
1744 #ifdef DEBUG
1745 		if (debug)
1746 			printf(
1747 			    "transmit: at %ld to %s mode %d keyid %08lx index %d\n",
1748 			    current_time, ntoa(&peer->srcadr),
1749 			    peer->hmode, xkeyid, peer->keynumber);
1750 #endif
1751 	} else {
1752 		/*
1753 		 * Transmit non-authenticated packet.
1754 		 */
1755 		get_systime(&(peer->xmt));
1756 		HTONL_FP(&peer->xmt, &xpkt.xmt);
1757 		sendpkt(&(peer->srcadr), find_rtt ? any_interface :
1758 		    peer->dstadr, ((peer->cast_flags & MDF_MCAST) &&
1759 		    !find_rtt) ? ((peer->cast_flags & MDF_ACAST) ? -7 :
1760 		    peer->ttl) : -8, &xpkt, sendlen);
1761 		peer->sent++;
1762 #ifdef DEBUG
1763 		if (debug)
1764 			printf("transmit: at %ld to %s mode %d\n",
1765 				current_time, ntoa(&peer->srcadr),
1766 				peer->hmode);
1767 #endif
1768 	}
1769 }
1770 
1771 /*
1772  * fast_xmit - Send packet for nonpersistent association.
1773  */
1774 static void
1775 fast_xmit(
1776 	struct recvbuf *rbufp,	/* receive packet pointer */
1777 	int xmode,		/* transmit mode */
1778 	u_long xkeyid		/* transmit key ID */
1779 	)
1780 {
1781 	struct pkt xpkt;
1782 	struct pkt *rpkt;
1783 	int sendlen;
1784 	l_fp xmt_ts;
1785 
1786 	/*
1787 	 * Initialize transmit packet header fields in the receive
1788 	 * buffer provided. We leave some fields intact as received.
1789 	 */
1790 	rpkt = &rbufp->recv_pkt;
1791 	xpkt.li_vn_mode = PKT_LI_VN_MODE(sys_leap,
1792 		PKT_VERSION(rpkt->li_vn_mode), xmode);
1793 	xpkt.stratum = STRATUM_TO_PKT(sys_stratum);
1794 	xpkt.ppoll = rpkt->ppoll;
1795 	xpkt.precision = sys_precision;
1796 	xpkt.rootdelay = HTONS_FP(DTOFP(sys_rootdelay));
1797 	xpkt.rootdispersion = HTONS_FP(DTOUFP(sys_rootdispersion +
1798 		LOGTOD(sys_precision)));
1799 	xpkt.refid = sys_refid;
1800 	HTONL_FP(&sys_reftime, &xpkt.reftime);
1801 	xpkt.org = rpkt->xmt;
1802 	HTONL_FP(&rbufp->recv_time, &xpkt.rec);
1803 	sendlen = LEN_PKT_NOMAC;
1804 	if (rbufp->recv_length > sendlen) {
1805 		l_fp xmt_tx;
1806 
1807 		/*
1808 		 * Transmit encrypted packet compensated for the
1809 		 * encryption delay.
1810 		 */
1811 		if (xkeyid > NTP_MAXKEY) {
1812 			xpkt.keyid1 = htonl(2 * sizeof(u_int32));
1813 			xpkt.keyid2 = htonl(sys_private);
1814 			sendlen += 2 * sizeof(u_int32);
1815 		}
1816 		get_systime(&xmt_ts);
1817 		L_ADD(&xmt_ts, &sys_authdelay);
1818 		HTONL_FP(&xmt_ts, &xpkt.xmt);
1819 		sendlen += authencrypt(xkeyid, (u_int32 *)&xpkt,
1820 		    sendlen);
1821 		get_systime(&xmt_tx);
1822 		sendpkt(&rbufp->recv_srcadr, rbufp->dstadr, -9, &xpkt,
1823 		    sendlen);
1824 
1825 		/*
1826 		 * Calculate the encryption delay. Keep the minimum over
1827 		 * the latest two samples.
1828 		 */
1829 		L_SUB(&xmt_tx, &xmt_ts);
1830 		L_ADD(&xmt_tx, &sys_authdelay);
1831 		sys_authdly[1] = sys_authdly[0];
1832 		sys_authdly[0] = xmt_tx.l_uf;
1833 		if (sys_authdly[0] < sys_authdly[1])
1834 			sys_authdelay.l_uf = sys_authdly[0];
1835 		else
1836 			sys_authdelay.l_uf = sys_authdly[1];
1837 #ifdef DEBUG
1838 		if (debug)
1839 			printf(
1840 			    "transmit: at %ld to %s mode %d keyid %08lx\n",
1841 				current_time, ntoa(&rbufp->recv_srcadr),
1842 				xmode, xkeyid);
1843 #endif
1844 	} else {
1845 
1846 		/*
1847 		 * Transmit non-authenticated packet.
1848 		 */
1849 		get_systime(&xmt_ts);
1850 		HTONL_FP(&xmt_ts, &xpkt.xmt);
1851 		sendpkt(&rbufp->recv_srcadr, rbufp->dstadr, -10, &xpkt,
1852 			sendlen);
1853 #ifdef DEBUG
1854 		if (debug)
1855 			printf("transmit: at %ld to %s mode %d\n",
1856 				current_time, ntoa(&rbufp->recv_srcadr),
1857 				xmode);
1858 #endif
1859 	}
1860 }
1861 
1862 #ifdef MD5
1863 /*
1864  * Compute key list
1865  */
1866 static void
1867 make_keylist(
1868 	struct peer *peer
1869 	)
1870 {
1871 	int i;
1872 	u_long keyid;
1873 	u_long ltemp;
1874 
1875 	/*
1876 	 * Allocate the key list if necessary.
1877 	 */
1878 	if (peer->keylist == 0)
1879 		peer->keylist = (u_long *)emalloc(sizeof(u_long) *
1880 		    NTP_MAXSESSION);
1881 
1882 	/*
1883 	 * Generate an initial key ID which is unique and greater than
1884 	 * NTP_MAXKEY.
1885 	 */
1886 	while (1) {
1887 		keyid = (u_long)RANDOM & 0xffffffff;
1888 		if (keyid <= NTP_MAXKEY)
1889 			continue;
1890 		if (authhavekey(keyid))
1891 			continue;
1892 		break;
1893 	}
1894 
1895 	/*
1896 	 * Generate up to NTP_MAXSESSION session keys. Stop if the
1897 	 * next one would not be unique or not a session key ID or if
1898 	 * it would expire before the next poll.
1899 	 */
1900 	ltemp = sys_automax;
1901 	for (i = 0; i < NTP_MAXSESSION; i++) {
1902 		peer->keylist[i] = keyid;
1903 		peer->keynumber = i;
1904 		keyid = session_key(
1905 		    ntohl(peer->dstadr->sin.sin_addr.s_addr),
1906 		    (peer->hmode == MODE_BROADCAST || (peer->flags &
1907 		    FLAG_MCAST2)) ?
1908 		    ntohl(peer->dstadr->bcast.sin_addr.s_addr):
1909 		    ntohl(peer->srcadr.sin_addr.s_addr), keyid, ltemp);
1910 		ltemp -= 1 << peer->hpoll;
1911 		if (auth_havekey(keyid) || keyid <= NTP_MAXKEY ||
1912 		    ltemp <= (1 << (peer->hpoll + 1)))
1913 			break;
1914 	}
1915 }
1916 #endif /* MD5 */
1917 
1918 /*
1919  * Find the precision of this particular machine
1920  */
1921 #define DUSECS	1000000 /* us in a s */
1922 #define HUSECS	(1 << 20) /* approx DUSECS for shifting etc */
1923 #define MINSTEP 5	/* minimum clock increment (us) */
1924 #define MAXSTEP 20000	/* maximum clock increment (us) */
1925 #define MINLOOPS 5	/* minimum number of step samples */
1926 
1927 /*
1928  * This routine calculates the differences between successive calls to
1929  * gettimeofday(). If a difference is less than zero, the us field
1930  * has rolled over to the next second, so we add a second in us. If
1931  * the difference is greater than zero and less than MINSTEP, the
1932  * clock has been advanced by a small amount to avoid standing still.
1933  * If the clock has advanced by a greater amount, then a timer interrupt
1934  * has occurred and this amount represents the precision of the clock.
1935  * In order to guard against spurious values, which could occur if we
1936  * happen to hit a fat interrupt, we do this for MINLOOPS times and
1937  * keep the minimum value obtained.
1938  */
1939 int
1940 default_get_precision(void)
1941 {
1942 	struct timeval tp;
1943 #if !defined(SYS_WINNT) && !defined(VMS) && !defined(_SEQUENT_)
1944 	struct timezone tzp;
1945 #elif defined(VMS) || defined(_SEQUENT_)
1946 	struct timezone {
1947 		int tz_minuteswest;
1948 		int tz_dsttime;
1949 	} tzp;
1950 #endif /* defined(VMS) || defined(_SEQUENT_) */
1951 	long last;
1952 	int i;
1953 	long diff;
1954 	long val;
1955 	long usec;
1956 #ifdef HAVE_GETCLOCK
1957 	struct timespec ts;
1958 #endif
1959 #if defined(__FreeBSD__) && __FreeBSD__ >= 3
1960 	u_long freq;
1961 	size_t j;
1962 
1963 	/* Try to see if we can find the frequency of of the counter
1964 	 * which drives our timekeeping
1965 	 */
1966 	j = sizeof freq;
1967 	i = sysctlbyname("kern.timecounter.frequency", &freq, &j , 0,
1968 	    0);
1969 	if (i)
1970 		i = sysctlbyname("machdep.tsc_freq", &freq, &j , 0, 0);
1971 	if (i)
1972 		i = sysctlbyname("machdep.i586_freq", &freq, &j , 0, 0);
1973 	if (i)
1974 		i = sysctlbyname("machdep.i8254_freq", &freq, &j , 0,
1975 		    0);
1976 	if (!i) {
1977 		for (i = 1; freq ; i--)
1978 			freq >>= 1;
1979 		return (i);
1980 	}
1981 #endif
1982 	usec = 0;
1983 	val = MAXSTEP;
1984 #ifdef HAVE_GETCLOCK
1985 	(void) getclock(TIMEOFDAY, &ts);
1986 	tp.tv_sec = ts.tv_sec;
1987 	tp.tv_usec = ts.tv_nsec / 1000;
1988 #else /*  not HAVE_GETCLOCK */
1989 	GETTIMEOFDAY(&tp, &tzp);
1990 #endif /* not HAVE_GETCLOCK */
1991 	last = tp.tv_usec;
1992 	for (i = 0; i < MINLOOPS && usec < HUSECS;) {
1993 #ifdef HAVE_GETCLOCK
1994 		(void) getclock(TIMEOFDAY, &ts);
1995 		tp.tv_sec = ts.tv_sec;
1996 		tp.tv_usec = ts.tv_nsec / 1000;
1997 #else /*  not HAVE_GETCLOCK */
1998 		GETTIMEOFDAY(&tp, &tzp);
1999 #endif /* not HAVE_GETCLOCK */
2000 		diff = tp.tv_usec - last;
2001 		last = tp.tv_usec;
2002 		if (diff < 0)
2003 			diff += DUSECS;
2004 		usec += diff;
2005 		if (diff > MINSTEP) {
2006 			i++;
2007 			if (diff < val)
2008 				val = diff;
2009 		}
2010 	}
2011 	NLOG(NLOG_SYSINFO)
2012 		msyslog(LOG_INFO, "precision = %ld usec", val);
2013 	if (usec >= HUSECS)
2014 		val = MINSTEP;	/* val <= MINSTEP; fast machine */
2015 	diff = HUSECS;
2016 	for (i = 0; diff > val; i--)
2017 		diff >>= 1;
2018 	return (i);
2019 }
2020 
2021 /*
2022  * init_proto - initialize the protocol module's data
2023  */
2024 void
2025 init_proto(void)
2026 {
2027 	l_fp dummy;
2028 
2029 	/*
2030 	 * Fill in the sys_* stuff.  Default is don't listen to
2031 	 * broadcasting, authenticate.
2032 	 */
2033 	sys_leap = LEAP_NOTINSYNC;
2034 	sys_stratum = STRATUM_UNSPEC;
2035 	sys_precision = (s_char)default_get_precision();
2036 	sys_rootdelay = 0;
2037 	sys_rootdispersion = 0;
2038 	sys_refid = 0;
2039 	L_CLR(&sys_reftime);
2040 	sys_peer = 0;
2041 	get_systime(&dummy);
2042 	sys_bclient = 0;
2043 	sys_bdelay = DEFBROADDELAY;
2044 #if defined(DES) || defined(MD5)
2045 	sys_authenticate = 1;
2046 #else
2047 	sys_authenticate = 0;
2048 #endif
2049 	L_CLR(&sys_authdelay);
2050 	sys_authdly[0] = sys_authdly[1] = 0;
2051 	sys_stattime = 0;
2052 	sys_badstratum = 0;
2053 	sys_oldversionpkt = 0;
2054 	sys_newversionpkt = 0;
2055 	sys_badlength = 0;
2056 	sys_unknownversion = 0;
2057 	sys_processed = 0;
2058 	sys_badauth = 0;
2059 	sys_manycastserver = 0;
2060 	sys_automax = 1 << NTP_AUTOMAX;
2061 
2062 	/*
2063 	 * Default these to enable
2064 	 */
2065 	ntp_enable = 1;
2066 #ifndef KERNEL_FLL_BUG
2067 	kern_enable = 1;
2068 #endif
2069 	msyslog(LOG_DEBUG, "kern_enable is %d", kern_enable);
2070 	stats_control = 1;
2071 
2072 	/*
2073 	 * Some system clocks should only be adjusted in 10ms increments.
2074 	 */
2075 #if defined RELIANTUNIX_CLOCK
2076 	systime_10ms_ticks = 1;		  /* Reliant UNIX */
2077 #elif defined SCO5_CLOCK
2078 	if (sys_precision >= (s_char)-10) /* pre-SCO OpenServer 5.0.6 */
2079 		systime_10ms_ticks = 1;
2080 #endif
2081 	if (systime_10ms_ticks)
2082 		msyslog(LOG_INFO, "using 10ms tick adjustments");
2083 }
2084 
2085 
2086 /*
2087  * proto_config - configure the protocol module
2088  */
2089 void
2090 proto_config(
2091 	int item,
2092 	u_long value,
2093 	double dvalue
2094 	)
2095 {
2096 	/*
2097 	 * Figure out what he wants to change, then do it
2098 	 */
2099 	switch (item) {
2100 	case PROTO_KERNEL:
2101 
2102 		/*
2103 		 * Turn on/off kernel discipline
2104 		 */
2105 		kern_enable = (int)value;
2106 		break;
2107 
2108 	case PROTO_NTP:
2109 
2110 		/*
2111 		 * Turn on/off clock discipline
2112 		 */
2113 		ntp_enable = (int)value;
2114 		break;
2115 
2116 	case PROTO_MONITOR:
2117 
2118 		/*
2119 		 * Turn on/off monitoring
2120 		 */
2121 		if (value)
2122 			mon_start(MON_ON);
2123 		else
2124 			mon_stop(MON_ON);
2125 		break;
2126 
2127 	case PROTO_FILEGEN:
2128 
2129 		/*
2130 		 * Turn on/off statistics
2131 		 */
2132 		stats_control = (int)value;
2133 		break;
2134 
2135 	case PROTO_BROADCLIENT:
2136 
2137 		/*
2138 		 * Turn on/off facility to listen to broadcasts
2139 		 */
2140 		sys_bclient = (int)value;
2141 		if (value)
2142 			io_setbclient();
2143 		else
2144 			io_unsetbclient();
2145 		break;
2146 
2147 	case PROTO_MULTICAST_ADD:
2148 
2149 		/*
2150 		 * Add muliticast group address
2151 		 */
2152 		io_multicast_add(value);
2153 		break;
2154 
2155 	case PROTO_MULTICAST_DEL:
2156 
2157 		/*
2158 		 * Delete multicast group address
2159 		 */
2160 		io_multicast_del(value);
2161 		break;
2162 
2163 	case PROTO_BROADDELAY:
2164 
2165 		/*
2166 		 * Set default broadcast delay
2167 		 */
2168 		sys_bdelay = dvalue;
2169 		break;
2170 
2171 	case PROTO_AUTHENTICATE:
2172 
2173 		/*
2174 		 * Specify the use of authenticated data
2175 		 */
2176 		sys_authenticate = (int)value;
2177 		break;
2178 
2179 	default:
2180 
2181 		/*
2182 		 * Log this error
2183 		 */
2184 		msyslog(LOG_ERR,
2185 		    "proto_config: illegal item %d, value %ld",
2186 			item, value);
2187 		break;
2188 	}
2189 }
2190 
2191 
2192 /*
2193  * proto_clr_stats - clear protocol stat counters
2194  */
2195 void
2196 proto_clr_stats(void)
2197 {
2198 	sys_badstratum = 0;
2199 	sys_oldversionpkt = 0;
2200 	sys_newversionpkt = 0;
2201 	sys_unknownversion = 0;
2202 	sys_badlength = 0;
2203 	sys_processed = 0;
2204 	sys_badauth = 0;
2205 	sys_stattime = current_time;
2206 	sys_limitrejected = 0;
2207 }
2208