xref: /freebsd/contrib/ntp/ntpd/ntp_request.c (revision 3416500aef140042c64bc149cb1ec6620483bc44)
1 /*
2  * ntp_request.c - respond to information requests
3  */
4 
5 #ifdef HAVE_CONFIG_H
6 # include <config.h>
7 #endif
8 
9 #include "ntpd.h"
10 #include "ntp_io.h"
11 #include "ntp_request.h"
12 #include "ntp_control.h"
13 #include "ntp_refclock.h"
14 #include "ntp_if.h"
15 #include "ntp_stdlib.h"
16 #include "ntp_assert.h"
17 
18 #include <stdio.h>
19 #include <stddef.h>
20 #include <signal.h>
21 #ifdef HAVE_NETINET_IN_H
22 #include <netinet/in.h>
23 #endif
24 #include <arpa/inet.h>
25 
26 #include "recvbuff.h"
27 
28 #ifdef KERNEL_PLL
29 #include "ntp_syscall.h"
30 #endif /* KERNEL_PLL */
31 
32 /*
33  * Structure to hold request procedure information
34  */
35 #define	NOAUTH	0
36 #define	AUTH	1
37 
38 #define	NO_REQUEST	(-1)
39 /*
40  * Because we now have v6 addresses in the messages, we need to compensate
41  * for the larger size.  Therefore, we introduce the alternate size to
42  * keep us friendly with older implementations.  A little ugly.
43  */
44 static int client_v6_capable = 0;   /* the client can handle longer messages */
45 
46 #define v6sizeof(type)	(client_v6_capable ? sizeof(type) : v4sizeof(type))
47 
48 struct req_proc {
49 	short request_code;	/* defined request code */
50 	short needs_auth;	/* true when authentication needed */
51 	short sizeofitem;	/* size of request data item (older size)*/
52 	short v6_sizeofitem;	/* size of request data item (new size)*/
53 	void (*handler) (sockaddr_u *, endpt *,
54 			   struct req_pkt *);	/* routine to handle request */
55 };
56 
57 /*
58  * Universal request codes
59  */
60 static const struct req_proc univ_codes[] = {
61 	{ NO_REQUEST,		NOAUTH,	 0,	0, NULL }
62 };
63 
64 static	void	req_ack	(sockaddr_u *, endpt *, struct req_pkt *, int);
65 static	void *	prepare_pkt	(sockaddr_u *, endpt *,
66 				 struct req_pkt *, size_t);
67 static	void *	more_pkt	(void);
68 static	void	flush_pkt	(void);
69 static	void	list_peers	(sockaddr_u *, endpt *, struct req_pkt *);
70 static	void	list_peers_sum	(sockaddr_u *, endpt *, struct req_pkt *);
71 static	void	peer_info	(sockaddr_u *, endpt *, struct req_pkt *);
72 static	void	peer_stats	(sockaddr_u *, endpt *, struct req_pkt *);
73 static	void	sys_info	(sockaddr_u *, endpt *, struct req_pkt *);
74 static	void	sys_stats	(sockaddr_u *, endpt *, struct req_pkt *);
75 static	void	mem_stats	(sockaddr_u *, endpt *, struct req_pkt *);
76 static	void	io_stats	(sockaddr_u *, endpt *, struct req_pkt *);
77 static	void	timer_stats	(sockaddr_u *, endpt *, struct req_pkt *);
78 static	void	loop_info	(sockaddr_u *, endpt *, struct req_pkt *);
79 static	void	do_conf		(sockaddr_u *, endpt *, struct req_pkt *);
80 static	void	do_unconf	(sockaddr_u *, endpt *, struct req_pkt *);
81 static	void	set_sys_flag	(sockaddr_u *, endpt *, struct req_pkt *);
82 static	void	clr_sys_flag	(sockaddr_u *, endpt *, struct req_pkt *);
83 static	void	setclr_flags	(sockaddr_u *, endpt *, struct req_pkt *, u_long);
84 static	void	list_restrict4	(const restrict_u *, struct info_restrict **);
85 static	void	list_restrict6	(const restrict_u *, struct info_restrict **);
86 static	void	list_restrict	(sockaddr_u *, endpt *, struct req_pkt *);
87 static	void	do_resaddflags	(sockaddr_u *, endpt *, struct req_pkt *);
88 static	void	do_ressubflags	(sockaddr_u *, endpt *, struct req_pkt *);
89 static	void	do_unrestrict	(sockaddr_u *, endpt *, struct req_pkt *);
90 static	void	do_restrict	(sockaddr_u *, endpt *, struct req_pkt *, int);
91 static	void	mon_getlist	(sockaddr_u *, endpt *, struct req_pkt *);
92 static	void	reset_stats	(sockaddr_u *, endpt *, struct req_pkt *);
93 static	void	reset_peer	(sockaddr_u *, endpt *, struct req_pkt *);
94 static	void	do_key_reread	(sockaddr_u *, endpt *, struct req_pkt *);
95 static	void	trust_key	(sockaddr_u *, endpt *, struct req_pkt *);
96 static	void	untrust_key	(sockaddr_u *, endpt *, struct req_pkt *);
97 static	void	do_trustkey	(sockaddr_u *, endpt *, struct req_pkt *, u_long);
98 static	void	get_auth_info	(sockaddr_u *, endpt *, struct req_pkt *);
99 static	void	req_get_traps	(sockaddr_u *, endpt *, struct req_pkt *);
100 static	void	req_set_trap	(sockaddr_u *, endpt *, struct req_pkt *);
101 static	void	req_clr_trap	(sockaddr_u *, endpt *, struct req_pkt *);
102 static	void	do_setclr_trap	(sockaddr_u *, endpt *, struct req_pkt *, int);
103 static	void	set_request_keyid (sockaddr_u *, endpt *, struct req_pkt *);
104 static	void	set_control_keyid (sockaddr_u *, endpt *, struct req_pkt *);
105 static	void	get_ctl_stats   (sockaddr_u *, endpt *, struct req_pkt *);
106 static	void	get_if_stats    (sockaddr_u *, endpt *, struct req_pkt *);
107 static	void	do_if_reload    (sockaddr_u *, endpt *, struct req_pkt *);
108 #ifdef KERNEL_PLL
109 static	void	get_kernel_info (sockaddr_u *, endpt *, struct req_pkt *);
110 #endif /* KERNEL_PLL */
111 #ifdef REFCLOCK
112 static	void	get_clock_info (sockaddr_u *, endpt *, struct req_pkt *);
113 static	void	set_clock_fudge (sockaddr_u *, endpt *, struct req_pkt *);
114 #endif	/* REFCLOCK */
115 #ifdef REFCLOCK
116 static	void	get_clkbug_info (sockaddr_u *, endpt *, struct req_pkt *);
117 #endif	/* REFCLOCK */
118 
119 /*
120  * ntpd request codes
121  */
122 static const struct req_proc ntp_codes[] = {
123 	{ REQ_PEER_LIST,	NOAUTH,	0, 0,	list_peers },
124 	{ REQ_PEER_LIST_SUM,	NOAUTH,	0, 0,	list_peers_sum },
125 	{ REQ_PEER_INFO,    NOAUTH, v4sizeof(struct info_peer_list),
126 				sizeof(struct info_peer_list), peer_info},
127 	{ REQ_PEER_STATS,   NOAUTH, v4sizeof(struct info_peer_list),
128 				sizeof(struct info_peer_list), peer_stats},
129 	{ REQ_SYS_INFO,		NOAUTH,	0, 0,	sys_info },
130 	{ REQ_SYS_STATS,	NOAUTH,	0, 0,	sys_stats },
131 	{ REQ_IO_STATS,		NOAUTH,	0, 0,	io_stats },
132 	{ REQ_MEM_STATS,	NOAUTH,	0, 0,	mem_stats },
133 	{ REQ_LOOP_INFO,	NOAUTH,	0, 0,	loop_info },
134 	{ REQ_TIMER_STATS,	NOAUTH,	0, 0,	timer_stats },
135 	{ REQ_CONFIG,	    AUTH, v4sizeof(struct conf_peer),
136 				sizeof(struct conf_peer), do_conf },
137 	{ REQ_UNCONFIG,	    AUTH, v4sizeof(struct conf_unpeer),
138 				sizeof(struct conf_unpeer), do_unconf },
139 	{ REQ_SET_SYS_FLAG, AUTH, sizeof(struct conf_sys_flags),
140 				sizeof(struct conf_sys_flags), set_sys_flag },
141 	{ REQ_CLR_SYS_FLAG, AUTH, sizeof(struct conf_sys_flags),
142 				sizeof(struct conf_sys_flags),  clr_sys_flag },
143 	{ REQ_GET_RESTRICT,	NOAUTH,	0, 0,	list_restrict },
144 	{ REQ_RESADDFLAGS, AUTH, v4sizeof(struct conf_restrict),
145 				sizeof(struct conf_restrict), do_resaddflags },
146 	{ REQ_RESSUBFLAGS, AUTH, v4sizeof(struct conf_restrict),
147 				sizeof(struct conf_restrict), do_ressubflags },
148 	{ REQ_UNRESTRICT, AUTH, v4sizeof(struct conf_restrict),
149 				sizeof(struct conf_restrict), do_unrestrict },
150 	{ REQ_MON_GETLIST,	NOAUTH,	0, 0,	mon_getlist },
151 	{ REQ_MON_GETLIST_1,	NOAUTH,	0, 0,	mon_getlist },
152 	{ REQ_RESET_STATS, AUTH, sizeof(struct reset_flags), 0, reset_stats },
153 	{ REQ_RESET_PEER,  AUTH, v4sizeof(struct conf_unpeer),
154 				sizeof(struct conf_unpeer), reset_peer },
155 	{ REQ_REREAD_KEYS,	AUTH,	0, 0,	do_key_reread },
156 	{ REQ_TRUSTKEY,   AUTH, sizeof(u_long), sizeof(u_long), trust_key },
157 	{ REQ_UNTRUSTKEY, AUTH, sizeof(u_long), sizeof(u_long), untrust_key },
158 	{ REQ_AUTHINFO,		NOAUTH,	0, 0,	get_auth_info },
159 	{ REQ_TRAPS,		NOAUTH, 0, 0,	req_get_traps },
160 	{ REQ_ADD_TRAP,	AUTH, v4sizeof(struct conf_trap),
161 				sizeof(struct conf_trap), req_set_trap },
162 	{ REQ_CLR_TRAP,	AUTH, v4sizeof(struct conf_trap),
163 				sizeof(struct conf_trap), req_clr_trap },
164 	{ REQ_REQUEST_KEY, AUTH, sizeof(u_long), sizeof(u_long),
165 				set_request_keyid },
166 	{ REQ_CONTROL_KEY, AUTH, sizeof(u_long), sizeof(u_long),
167 				set_control_keyid },
168 	{ REQ_GET_CTLSTATS,	NOAUTH,	0, 0,	get_ctl_stats },
169 #ifdef KERNEL_PLL
170 	{ REQ_GET_KERNEL,	NOAUTH,	0, 0,	get_kernel_info },
171 #endif
172 #ifdef REFCLOCK
173 	{ REQ_GET_CLOCKINFO, NOAUTH, sizeof(u_int32), sizeof(u_int32),
174 				get_clock_info },
175 	{ REQ_SET_CLKFUDGE, AUTH, sizeof(struct conf_fudge),
176 				sizeof(struct conf_fudge), set_clock_fudge },
177 	{ REQ_GET_CLKBUGINFO, NOAUTH, sizeof(u_int32), sizeof(u_int32),
178 				get_clkbug_info },
179 #endif
180 	{ REQ_IF_STATS,		AUTH, 0, 0,	get_if_stats },
181 	{ REQ_IF_RELOAD,	AUTH, 0, 0,	do_if_reload },
182 
183 	{ NO_REQUEST,		NOAUTH,	0, 0,	0 }
184 };
185 
186 
187 /*
188  * Authentication keyid used to authenticate requests.  Zero means we
189  * don't allow writing anything.
190  */
191 keyid_t info_auth_keyid;
192 
193 /*
194  * Statistic counters to keep track of requests and responses.
195  */
196 u_long numrequests;		/* number of requests we've received */
197 u_long numresppkts;		/* number of resp packets sent with data */
198 
199 /*
200  * lazy way to count errors, indexed by the error code
201  */
202 u_long errorcounter[MAX_INFO_ERR + 1];
203 
204 /*
205  * A hack.  To keep the authentication module clear of ntp-ism's, we
206  * include a time reset variable for its stats here.
207  */
208 u_long auth_timereset;
209 
210 /*
211  * Response packet used by these routines.  Also some state information
212  * so that we can handle packet formatting within a common set of
213  * subroutines.  Note we try to enter data in place whenever possible,
214  * but the need to set the more bit correctly means we occasionally
215  * use the extra buffer and copy.
216  */
217 static struct resp_pkt rpkt;
218 static int reqver;
219 static int seqno;
220 static int nitems;
221 static int itemsize;
222 static int databytes;
223 static char exbuf[RESP_DATA_SIZE];
224 static int usingexbuf;
225 static sockaddr_u *toaddr;
226 static endpt *frominter;
227 
228 /*
229  * init_request - initialize request data
230  */
231 void
232 init_request (void)
233 {
234 	size_t i;
235 
236 	numrequests = 0;
237 	numresppkts = 0;
238 	auth_timereset = 0;
239 	info_auth_keyid = 0;	/* by default, can't do this */
240 
241 	for (i = 0; i < sizeof(errorcounter)/sizeof(errorcounter[0]); i++)
242 	    errorcounter[i] = 0;
243 }
244 
245 
246 /*
247  * req_ack - acknowledge request with no data
248  */
249 static void
250 req_ack(
251 	sockaddr_u *srcadr,
252 	endpt *inter,
253 	struct req_pkt *inpkt,
254 	int errcode
255 	)
256 {
257 	/*
258 	 * fill in the fields
259 	 */
260 	rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, 0, reqver);
261 	rpkt.auth_seq = AUTH_SEQ(0, 0);
262 	rpkt.implementation = inpkt->implementation;
263 	rpkt.request = inpkt->request;
264 	rpkt.err_nitems = ERR_NITEMS(errcode, 0);
265 	rpkt.mbz_itemsize = MBZ_ITEMSIZE(0);
266 
267 	/*
268 	 * send packet and bump counters
269 	 */
270 	sendpkt(srcadr, inter, -1, (struct pkt *)&rpkt, RESP_HEADER_SIZE);
271 	errorcounter[errcode]++;
272 }
273 
274 
275 /*
276  * prepare_pkt - prepare response packet for transmission, return pointer
277  *		 to storage for data item.
278  */
279 static void *
280 prepare_pkt(
281 	sockaddr_u *srcadr,
282 	endpt *inter,
283 	struct req_pkt *pkt,
284 	size_t structsize
285 	)
286 {
287 	DPRINTF(4, ("request: preparing pkt\n"));
288 
289 	/*
290 	 * Fill in the implementation, request and itemsize fields
291 	 * since these won't change.
292 	 */
293 	rpkt.implementation = pkt->implementation;
294 	rpkt.request = pkt->request;
295 	rpkt.mbz_itemsize = MBZ_ITEMSIZE(structsize);
296 
297 	/*
298 	 * Compute the static data needed to carry on.
299 	 */
300 	toaddr = srcadr;
301 	frominter = inter;
302 	seqno = 0;
303 	nitems = 0;
304 	itemsize = structsize;
305 	databytes = 0;
306 	usingexbuf = 0;
307 
308 	/*
309 	 * return the beginning of the packet buffer.
310 	 */
311 	return &rpkt.u;
312 }
313 
314 
315 /*
316  * more_pkt - return a data pointer for a new item.
317  */
318 static void *
319 more_pkt(void)
320 {
321 	/*
322 	 * If we were using the extra buffer, send the packet.
323 	 */
324 	if (usingexbuf) {
325 		DPRINTF(3, ("request: sending pkt\n"));
326 		rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, MORE_BIT, reqver);
327 		rpkt.auth_seq = AUTH_SEQ(0, seqno);
328 		rpkt.err_nitems = htons((u_short)nitems);
329 		sendpkt(toaddr, frominter, -1, (struct pkt *)&rpkt,
330 			RESP_HEADER_SIZE + databytes);
331 		numresppkts++;
332 
333 		/*
334 		 * Copy data out of exbuf into the packet.
335 		 */
336 		memcpy(&rpkt.u.data[0], exbuf, (unsigned)itemsize);
337 		seqno++;
338 		databytes = 0;
339 		nitems = 0;
340 		usingexbuf = 0;
341 	}
342 
343 	databytes += itemsize;
344 	nitems++;
345 	if (databytes + itemsize <= RESP_DATA_SIZE) {
346 		DPRINTF(4, ("request: giving him more data\n"));
347 		/*
348 		 * More room in packet.  Give him the
349 		 * next address.
350 		 */
351 		return &rpkt.u.data[databytes];
352 	} else {
353 		/*
354 		 * No room in packet.  Give him the extra
355 		 * buffer unless this was the last in the sequence.
356 		 */
357 		DPRINTF(4, ("request: into extra buffer\n"));
358 		if (seqno == MAXSEQ)
359 			return NULL;
360 		else {
361 			usingexbuf = 1;
362 			return exbuf;
363 		}
364 	}
365 }
366 
367 
368 /*
369  * flush_pkt - we're done, return remaining information.
370  */
371 static void
372 flush_pkt(void)
373 {
374 	DPRINTF(3, ("request: flushing packet, %d items\n", nitems));
375 	/*
376 	 * Must send the last packet.  If nothing in here and nothing
377 	 * has been sent, send an error saying no data to be found.
378 	 */
379 	if (seqno == 0 && nitems == 0)
380 		req_ack(toaddr, frominter, (struct req_pkt *)&rpkt,
381 			INFO_ERR_NODATA);
382 	else {
383 		rpkt.rm_vn_mode = RM_VN_MODE(RESP_BIT, 0, reqver);
384 		rpkt.auth_seq = AUTH_SEQ(0, seqno);
385 		rpkt.err_nitems = htons((u_short)nitems);
386 		sendpkt(toaddr, frominter, -1, (struct pkt *)&rpkt,
387 			RESP_HEADER_SIZE+databytes);
388 		numresppkts++;
389 	}
390 }
391 
392 
393 
394 /*
395  * Given a buffer, return the packet mode
396  */
397 int
398 get_packet_mode(struct recvbuf *rbufp)
399 {
400 	struct req_pkt *inpkt = (struct req_pkt *)&rbufp->recv_pkt;
401 	return (INFO_MODE(inpkt->rm_vn_mode));
402 }
403 
404 
405 /*
406  * process_private - process private mode (7) packets
407  */
408 void
409 process_private(
410 	struct recvbuf *rbufp,
411 	int mod_okay
412 	)
413 {
414 	static u_long quiet_until;
415 	struct req_pkt *inpkt;
416 	struct req_pkt_tail *tailinpkt;
417 	sockaddr_u *srcadr;
418 	endpt *inter;
419 	const struct req_proc *proc;
420 	int ec;
421 	short temp_size;
422 	l_fp ftmp;
423 	double dtemp;
424 	size_t recv_len;
425 	size_t noslop_len;
426 	size_t mac_len;
427 
428 	/*
429 	 * Initialize pointers, for convenience
430 	 */
431 	recv_len = rbufp->recv_length;
432 	inpkt = (struct req_pkt *)&rbufp->recv_pkt;
433 	srcadr = &rbufp->recv_srcadr;
434 	inter = rbufp->dstadr;
435 
436 	DPRINTF(3, ("process_private: impl %d req %d\n",
437 		    inpkt->implementation, inpkt->request));
438 
439 	/*
440 	 * Do some sanity checks on the packet.  Return a format
441 	 * error if it fails.
442 	 */
443 	ec = 0;
444 	if (   (++ec, ISRESPONSE(inpkt->rm_vn_mode))
445 	    || (++ec, ISMORE(inpkt->rm_vn_mode))
446 	    || (++ec, INFO_VERSION(inpkt->rm_vn_mode) > NTP_VERSION)
447 	    || (++ec, INFO_VERSION(inpkt->rm_vn_mode) < NTP_OLDVERSION)
448 	    || (++ec, INFO_SEQ(inpkt->auth_seq) != 0)
449 	    || (++ec, INFO_ERR(inpkt->err_nitems) != 0)
450 	    || (++ec, INFO_MBZ(inpkt->mbz_itemsize) != 0)
451 	    || (++ec, rbufp->recv_length < (int)REQ_LEN_HDR)
452 		) {
453 		NLOG(NLOG_SYSEVENT)
454 			if (current_time >= quiet_until) {
455 				msyslog(LOG_ERR,
456 					"process_private: drop test %d"
457 					" failed, pkt from %s",
458 					ec, stoa(srcadr));
459 				quiet_until = current_time + 60;
460 			}
461 		return;
462 	}
463 
464 	reqver = INFO_VERSION(inpkt->rm_vn_mode);
465 
466 	/*
467 	 * Get the appropriate procedure list to search.
468 	 */
469 	if (inpkt->implementation == IMPL_UNIV)
470 		proc = univ_codes;
471 	else if ((inpkt->implementation == IMPL_XNTPD) ||
472 		 (inpkt->implementation == IMPL_XNTPD_OLD))
473 		proc = ntp_codes;
474 	else {
475 		req_ack(srcadr, inter, inpkt, INFO_ERR_IMPL);
476 		return;
477 	}
478 
479 	/*
480 	 * Search the list for the request codes.  If it isn't one
481 	 * we know, return an error.
482 	 */
483 	while (proc->request_code != NO_REQUEST) {
484 		if (proc->request_code == (short) inpkt->request)
485 			break;
486 		proc++;
487 	}
488 	if (proc->request_code == NO_REQUEST) {
489 		req_ack(srcadr, inter, inpkt, INFO_ERR_REQ);
490 		return;
491 	}
492 
493 	DPRINTF(4, ("found request in tables\n"));
494 
495 	/*
496 	 * If we need data, check to see if we have some.  If we
497 	 * don't, check to see that there is none (picky, picky).
498 	 */
499 
500 	/* This part is a bit tricky, we want to be sure that the size
501 	 * returned is either the old or the new size.  We also can find
502 	 * out if the client can accept both types of messages this way.
503 	 *
504 	 * Handle the exception of REQ_CONFIG. It can have two data sizes.
505 	 */
506 	temp_size = INFO_ITEMSIZE(inpkt->mbz_itemsize);
507 	if ((temp_size != proc->sizeofitem &&
508 	     temp_size != proc->v6_sizeofitem) &&
509 	    !(inpkt->implementation == IMPL_XNTPD &&
510 	      inpkt->request == REQ_CONFIG &&
511 	      temp_size == sizeof(struct old_conf_peer))) {
512 		DPRINTF(3, ("process_private: wrong item size, received %d, should be %d or %d\n",
513 			    temp_size, proc->sizeofitem, proc->v6_sizeofitem));
514 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
515 		return;
516 	}
517 	if ((proc->sizeofitem != 0) &&
518 	    ((size_t)(temp_size * INFO_NITEMS(inpkt->err_nitems)) >
519 	     (recv_len - REQ_LEN_HDR))) {
520 		DPRINTF(3, ("process_private: not enough data\n"));
521 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
522 		return;
523 	}
524 
525 	switch (inpkt->implementation) {
526 	case IMPL_XNTPD:
527 		client_v6_capable = 1;
528 		break;
529 	case IMPL_XNTPD_OLD:
530 		client_v6_capable = 0;
531 		break;
532 	default:
533 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
534 		return;
535 	}
536 
537 	/*
538 	 * If we need to authenticate, do so.  Note that an
539 	 * authenticatable packet must include a mac field, must
540 	 * have used key info_auth_keyid and must have included
541 	 * a time stamp in the appropriate field.  The time stamp
542 	 * must be within INFO_TS_MAXSKEW of the receive
543 	 * time stamp.
544 	 */
545 	if (proc->needs_auth && sys_authenticate) {
546 
547 		if (recv_len < (REQ_LEN_HDR +
548 		    (INFO_ITEMSIZE(inpkt->mbz_itemsize) *
549 		    INFO_NITEMS(inpkt->err_nitems)) +
550 		    REQ_TAIL_MIN)) {
551 			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
552 			return;
553 		}
554 
555 		/*
556 		 * For 16-octet digests, regardless of itemsize and
557 		 * nitems, authenticated requests are a fixed size
558 		 * with the timestamp, key ID, and digest located
559 		 * at the end of the packet.  Because the key ID
560 		 * determining the digest size precedes the digest,
561 		 * for larger digests the fixed size request scheme
562 		 * is abandoned and the timestamp, key ID, and digest
563 		 * are located relative to the start of the packet,
564 		 * with the digest size determined by the packet size.
565 		 */
566 		noslop_len = REQ_LEN_HDR
567 			     + INFO_ITEMSIZE(inpkt->mbz_itemsize) *
568 			       INFO_NITEMS(inpkt->err_nitems)
569 			     + sizeof(inpkt->tstamp);
570 		/* 32-bit alignment */
571 		noslop_len = (noslop_len + 3) & ~3;
572 		if (recv_len > (noslop_len + MAX_MAC_LEN))
573 			mac_len = 20;
574 		else
575 			mac_len = recv_len - noslop_len;
576 
577 		tailinpkt = (void *)((char *)inpkt + recv_len -
578 			    (mac_len + sizeof(inpkt->tstamp)));
579 
580 		/*
581 		 * If this guy is restricted from doing this, don't let
582 		 * him.  If the wrong key was used, or packet doesn't
583 		 * have mac, return.
584 		 */
585 		if (!INFO_IS_AUTH(inpkt->auth_seq) || !info_auth_keyid
586 		    || ntohl(tailinpkt->keyid) != info_auth_keyid) {
587 			DPRINTF(5, ("failed auth %d info_auth_keyid %u pkt keyid %u maclen %lu\n",
588 				    INFO_IS_AUTH(inpkt->auth_seq),
589 				    info_auth_keyid,
590 				    ntohl(tailinpkt->keyid), (u_long)mac_len));
591 #ifdef DEBUG
592 			msyslog(LOG_DEBUG,
593 				"process_private: failed auth %d info_auth_keyid %u pkt keyid %u maclen %lu\n",
594 				INFO_IS_AUTH(inpkt->auth_seq),
595 				info_auth_keyid,
596 				ntohl(tailinpkt->keyid), (u_long)mac_len);
597 #endif
598 			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
599 			return;
600 		}
601 		if (recv_len > REQ_LEN_NOMAC + MAX_MAC_LEN) {
602 			DPRINTF(5, ("bad pkt length %zu\n", recv_len));
603 			msyslog(LOG_ERR,
604 				"process_private: bad pkt length %zu",
605 				recv_len);
606 			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
607 			return;
608 		}
609 		if (!mod_okay || !authhavekey(info_auth_keyid)) {
610 			DPRINTF(5, ("failed auth mod_okay %d\n",
611 				    mod_okay));
612 #ifdef DEBUG
613 			msyslog(LOG_DEBUG,
614 				"process_private: failed auth mod_okay %d\n",
615 				mod_okay);
616 #endif
617 			if (!mod_okay) {
618 				sys_restricted++;
619 			}
620 			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
621 			return;
622 		}
623 
624 		/*
625 		 * calculate absolute time difference between xmit time stamp
626 		 * and receive time stamp.  If too large, too bad.
627 		 */
628 		NTOHL_FP(&tailinpkt->tstamp, &ftmp);
629 		L_SUB(&ftmp, &rbufp->recv_time);
630 		LFPTOD(&ftmp, dtemp);
631 		if (fabs(dtemp) > INFO_TS_MAXSKEW) {
632 			/*
633 			 * He's a loser.  Tell him.
634 			 */
635 			DPRINTF(5, ("xmit/rcv timestamp delta %g > INFO_TS_MAXSKEW %g\n",
636 				    dtemp, INFO_TS_MAXSKEW));
637 			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
638 			return;
639 		}
640 
641 		/*
642 		 * So far so good.  See if decryption works out okay.
643 		 */
644 		if (!authdecrypt(info_auth_keyid, (u_int32 *)inpkt,
645 				 recv_len - mac_len, mac_len)) {
646 			DPRINTF(5, ("authdecrypt failed\n"));
647 			req_ack(srcadr, inter, inpkt, INFO_ERR_AUTH);
648 			return;
649 		}
650 	}
651 
652 	DPRINTF(3, ("process_private: all okay, into handler\n"));
653 	/*
654 	 * Packet is okay.  Call the handler to send him data.
655 	 */
656 	(proc->handler)(srcadr, inter, inpkt);
657 }
658 
659 
660 /*
661  * list_peers - send a list of the peers
662  */
663 static void
664 list_peers(
665 	sockaddr_u *srcadr,
666 	endpt *inter,
667 	struct req_pkt *inpkt
668 	)
669 {
670 	struct info_peer_list *	ip;
671 	const struct peer *	pp;
672 
673 	ip = (struct info_peer_list *)prepare_pkt(srcadr, inter, inpkt,
674 	    v6sizeof(struct info_peer_list));
675 	for (pp = peer_list; pp != NULL && ip != NULL; pp = pp->p_link) {
676 		if (IS_IPV6(&pp->srcadr)) {
677 			if (!client_v6_capable)
678 				continue;
679 			ip->addr6 = SOCK_ADDR6(&pp->srcadr);
680 			ip->v6_flag = 1;
681 		} else {
682 			ip->addr = NSRCADR(&pp->srcadr);
683 			if (client_v6_capable)
684 				ip->v6_flag = 0;
685 		}
686 
687 		ip->port = NSRCPORT(&pp->srcadr);
688 		ip->hmode = pp->hmode;
689 		ip->flags = 0;
690 		if (pp->flags & FLAG_CONFIG)
691 			ip->flags |= INFO_FLAG_CONFIG;
692 		if (pp == sys_peer)
693 			ip->flags |= INFO_FLAG_SYSPEER;
694 		if (pp->status == CTL_PST_SEL_SYNCCAND)
695 			ip->flags |= INFO_FLAG_SEL_CANDIDATE;
696 		if (pp->status >= CTL_PST_SEL_SYSPEER)
697 			ip->flags |= INFO_FLAG_SHORTLIST;
698 		ip = (struct info_peer_list *)more_pkt();
699 	}	/* for pp */
700 
701 	flush_pkt();
702 }
703 
704 
705 /*
706  * list_peers_sum - return extended peer list
707  */
708 static void
709 list_peers_sum(
710 	sockaddr_u *srcadr,
711 	endpt *inter,
712 	struct req_pkt *inpkt
713 	)
714 {
715 	struct info_peer_summary *	ips;
716 	const struct peer *		pp;
717 	l_fp 				ltmp;
718 
719 	DPRINTF(3, ("wants peer list summary\n"));
720 
721 	ips = (struct info_peer_summary *)prepare_pkt(srcadr, inter, inpkt,
722 	    v6sizeof(struct info_peer_summary));
723 	for (pp = peer_list; pp != NULL && ips != NULL; pp = pp->p_link) {
724 		DPRINTF(4, ("sum: got one\n"));
725 		/*
726 		 * Be careful here not to return v6 peers when we
727 		 * want only v4.
728 		 */
729 		if (IS_IPV6(&pp->srcadr)) {
730 			if (!client_v6_capable)
731 				continue;
732 			ips->srcadr6 = SOCK_ADDR6(&pp->srcadr);
733 			ips->v6_flag = 1;
734 			if (pp->dstadr)
735 				ips->dstadr6 = SOCK_ADDR6(&pp->dstadr->sin);
736 			else
737 				ZERO(ips->dstadr6);
738 		} else {
739 			ips->srcadr = NSRCADR(&pp->srcadr);
740 			if (client_v6_capable)
741 				ips->v6_flag = 0;
742 
743 			if (pp->dstadr) {
744 				if (!pp->processed)
745 					ips->dstadr = NSRCADR(&pp->dstadr->sin);
746 				else {
747 					if (MDF_BCAST == pp->cast_flags)
748 						ips->dstadr = NSRCADR(&pp->dstadr->bcast);
749 					else if (pp->cast_flags) {
750 						ips->dstadr = NSRCADR(&pp->dstadr->sin);
751 						if (!ips->dstadr)
752 							ips->dstadr = NSRCADR(&pp->dstadr->bcast);
753 					}
754 				}
755 			} else {
756 				ips->dstadr = 0;
757 			}
758 		}
759 
760 		ips->srcport = NSRCPORT(&pp->srcadr);
761 		ips->stratum = pp->stratum;
762 		ips->hpoll = pp->hpoll;
763 		ips->ppoll = pp->ppoll;
764 		ips->reach = pp->reach;
765 		ips->flags = 0;
766 		if (pp == sys_peer)
767 			ips->flags |= INFO_FLAG_SYSPEER;
768 		if (pp->flags & FLAG_CONFIG)
769 			ips->flags |= INFO_FLAG_CONFIG;
770 		if (pp->flags & FLAG_REFCLOCK)
771 			ips->flags |= INFO_FLAG_REFCLOCK;
772 		if (pp->flags & FLAG_PREFER)
773 			ips->flags |= INFO_FLAG_PREFER;
774 		if (pp->flags & FLAG_BURST)
775 			ips->flags |= INFO_FLAG_BURST;
776 		if (pp->status == CTL_PST_SEL_SYNCCAND)
777 			ips->flags |= INFO_FLAG_SEL_CANDIDATE;
778 		if (pp->status >= CTL_PST_SEL_SYSPEER)
779 			ips->flags |= INFO_FLAG_SHORTLIST;
780 		ips->hmode = pp->hmode;
781 		ips->delay = HTONS_FP(DTOFP(pp->delay));
782 		DTOLFP(pp->offset, &ltmp);
783 		HTONL_FP(&ltmp, &ips->offset);
784 		ips->dispersion = HTONS_FP(DTOUFP(SQRT(pp->disp)));
785 
786 		ips = (struct info_peer_summary *)more_pkt();
787 	}	/* for pp */
788 
789 	flush_pkt();
790 }
791 
792 
793 /*
794  * peer_info - send information for one or more peers
795  */
796 static void
797 peer_info (
798 	sockaddr_u *srcadr,
799 	endpt *inter,
800 	struct req_pkt *inpkt
801 	)
802 {
803 	u_short			items;
804 	size_t			item_sz;
805 	char *			datap;
806 	struct info_peer_list	ipl;
807 	struct peer *		pp;
808 	struct info_peer *	ip;
809 	int			i;
810 	int			j;
811 	sockaddr_u		addr;
812 	l_fp			ltmp;
813 
814 	items = INFO_NITEMS(inpkt->err_nitems);
815 	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
816 	datap = inpkt->u.data;
817 	if (item_sz != sizeof(ipl)) {
818 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
819 		return;
820 	}
821 	ip = prepare_pkt(srcadr, inter, inpkt,
822 			 v6sizeof(struct info_peer));
823 	while (items-- > 0 && ip != NULL) {
824 		ZERO(ipl);
825 		memcpy(&ipl, datap, item_sz);
826 		ZERO_SOCK(&addr);
827 		NSRCPORT(&addr) = ipl.port;
828 		if (client_v6_capable && ipl.v6_flag) {
829 			AF(&addr) = AF_INET6;
830 			SOCK_ADDR6(&addr) = ipl.addr6;
831 		} else {
832 			AF(&addr) = AF_INET;
833 			NSRCADR(&addr) = ipl.addr;
834 		}
835 #ifdef ISC_PLATFORM_HAVESALEN
836 		addr.sa.sa_len = SOCKLEN(&addr);
837 #endif
838 		datap += item_sz;
839 
840 		pp = findexistingpeer(&addr, NULL, NULL, -1, 0);
841 		if (NULL == pp)
842 			continue;
843 		if (IS_IPV6(srcadr)) {
844 			if (pp->dstadr)
845 				ip->dstadr6 =
846 				    (MDF_BCAST == pp->cast_flags)
847 					? SOCK_ADDR6(&pp->dstadr->bcast)
848 					: SOCK_ADDR6(&pp->dstadr->sin);
849 			else
850 				ZERO(ip->dstadr6);
851 
852 			ip->srcadr6 = SOCK_ADDR6(&pp->srcadr);
853 			ip->v6_flag = 1;
854 		} else {
855 			if (pp->dstadr) {
856 				if (!pp->processed)
857 					ip->dstadr = NSRCADR(&pp->dstadr->sin);
858 				else {
859 					if (MDF_BCAST == pp->cast_flags)
860 						ip->dstadr = NSRCADR(&pp->dstadr->bcast);
861 					else if (pp->cast_flags) {
862 						ip->dstadr = NSRCADR(&pp->dstadr->sin);
863 						if (!ip->dstadr)
864 							ip->dstadr = NSRCADR(&pp->dstadr->bcast);
865 					}
866 				}
867 			} else
868 				ip->dstadr = 0;
869 
870 			ip->srcadr = NSRCADR(&pp->srcadr);
871 			if (client_v6_capable)
872 				ip->v6_flag = 0;
873 		}
874 		ip->srcport = NSRCPORT(&pp->srcadr);
875 		ip->flags = 0;
876 		if (pp == sys_peer)
877 			ip->flags |= INFO_FLAG_SYSPEER;
878 		if (pp->flags & FLAG_CONFIG)
879 			ip->flags |= INFO_FLAG_CONFIG;
880 		if (pp->flags & FLAG_REFCLOCK)
881 			ip->flags |= INFO_FLAG_REFCLOCK;
882 		if (pp->flags & FLAG_PREFER)
883 			ip->flags |= INFO_FLAG_PREFER;
884 		if (pp->flags & FLAG_BURST)
885 			ip->flags |= INFO_FLAG_BURST;
886 		if (pp->status == CTL_PST_SEL_SYNCCAND)
887 			ip->flags |= INFO_FLAG_SEL_CANDIDATE;
888 		if (pp->status >= CTL_PST_SEL_SYSPEER)
889 			ip->flags |= INFO_FLAG_SHORTLIST;
890 		ip->leap = pp->leap;
891 		ip->hmode = pp->hmode;
892 		ip->keyid = pp->keyid;
893 		ip->stratum = pp->stratum;
894 		ip->ppoll = pp->ppoll;
895 		ip->hpoll = pp->hpoll;
896 		ip->precision = pp->precision;
897 		ip->version = pp->version;
898 		ip->reach = pp->reach;
899 		ip->unreach = (u_char)pp->unreach;
900 		ip->flash = (u_char)pp->flash;
901 		ip->flash2 = (u_short)pp->flash;
902 		ip->estbdelay = HTONS_FP(DTOFP(pp->delay));
903 		ip->ttl = (u_char)pp->ttl;
904 		ip->associd = htons(pp->associd);
905 		ip->rootdelay = HTONS_FP(DTOUFP(pp->rootdelay));
906 		ip->rootdispersion = HTONS_FP(DTOUFP(pp->rootdisp));
907 		ip->refid = pp->refid;
908 		HTONL_FP(&pp->reftime, &ip->reftime);
909 		HTONL_FP(&pp->aorg, &ip->org);
910 		HTONL_FP(&pp->rec, &ip->rec);
911 		HTONL_FP(&pp->xmt, &ip->xmt);
912 		j = pp->filter_nextpt - 1;
913 		for (i = 0; i < NTP_SHIFT; i++, j--) {
914 			if (j < 0)
915 				j = NTP_SHIFT-1;
916 			ip->filtdelay[i] = HTONS_FP(DTOFP(pp->filter_delay[j]));
917 			DTOLFP(pp->filter_offset[j], &ltmp);
918 			HTONL_FP(&ltmp, &ip->filtoffset[i]);
919 			ip->order[i] = (u_char)((pp->filter_nextpt +
920 						 NTP_SHIFT - 1) -
921 						pp->filter_order[i]);
922 			if (ip->order[i] >= NTP_SHIFT)
923 				ip->order[i] -= NTP_SHIFT;
924 		}
925 		DTOLFP(pp->offset, &ltmp);
926 		HTONL_FP(&ltmp, &ip->offset);
927 		ip->delay = HTONS_FP(DTOFP(pp->delay));
928 		ip->dispersion = HTONS_FP(DTOUFP(SQRT(pp->disp)));
929 		ip->selectdisp = HTONS_FP(DTOUFP(SQRT(pp->jitter)));
930 		ip = more_pkt();
931 	}
932 	flush_pkt();
933 }
934 
935 
936 /*
937  * peer_stats - send statistics for one or more peers
938  */
939 static void
940 peer_stats (
941 	sockaddr_u *srcadr,
942 	endpt *inter,
943 	struct req_pkt *inpkt
944 	)
945 {
946 	u_short			items;
947 	size_t			item_sz;
948 	char *			datap;
949 	struct info_peer_list	ipl;
950 	struct peer *		pp;
951 	struct info_peer_stats *ip;
952 	sockaddr_u addr;
953 
954 	DPRINTF(1, ("peer_stats: called\n"));
955 	items = INFO_NITEMS(inpkt->err_nitems);
956 	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
957 	datap = inpkt->u.data;
958 	if (item_sz > sizeof(ipl)) {
959 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
960 		return;
961 	}
962 	ip = prepare_pkt(srcadr, inter, inpkt,
963 			 v6sizeof(struct info_peer_stats));
964 	while (items-- > 0 && ip != NULL) {
965 		ZERO(ipl);
966 		memcpy(&ipl, datap, item_sz);
967 		ZERO(addr);
968 		NSRCPORT(&addr) = ipl.port;
969 		if (client_v6_capable && ipl.v6_flag) {
970 			AF(&addr) = AF_INET6;
971 			SOCK_ADDR6(&addr) = ipl.addr6;
972 		} else {
973 			AF(&addr) = AF_INET;
974 			NSRCADR(&addr) = ipl.addr;
975 		}
976 #ifdef ISC_PLATFORM_HAVESALEN
977 		addr.sa.sa_len = SOCKLEN(&addr);
978 #endif
979 		DPRINTF(1, ("peer_stats: looking for %s, %d, %d\n",
980 			    stoa(&addr), ipl.port, NSRCPORT(&addr)));
981 
982 		datap += item_sz;
983 
984 		pp = findexistingpeer(&addr, NULL, NULL, -1, 0);
985 		if (NULL == pp)
986 			continue;
987 
988 		DPRINTF(1, ("peer_stats: found %s\n", stoa(&addr)));
989 
990 		if (IS_IPV4(&pp->srcadr)) {
991 			if (pp->dstadr) {
992 				if (!pp->processed)
993 					ip->dstadr = NSRCADR(&pp->dstadr->sin);
994 				else {
995 					if (MDF_BCAST == pp->cast_flags)
996 						ip->dstadr = NSRCADR(&pp->dstadr->bcast);
997 					else if (pp->cast_flags) {
998 						ip->dstadr = NSRCADR(&pp->dstadr->sin);
999 						if (!ip->dstadr)
1000 							ip->dstadr = NSRCADR(&pp->dstadr->bcast);
1001 					}
1002 				}
1003 			} else
1004 				ip->dstadr = 0;
1005 
1006 			ip->srcadr = NSRCADR(&pp->srcadr);
1007 			if (client_v6_capable)
1008 				ip->v6_flag = 0;
1009 		} else {
1010 			if (pp->dstadr)
1011 				ip->dstadr6 =
1012 				    (MDF_BCAST == pp->cast_flags)
1013 					? SOCK_ADDR6(&pp->dstadr->bcast)
1014 					: SOCK_ADDR6(&pp->dstadr->sin);
1015 			else
1016 				ZERO(ip->dstadr6);
1017 
1018 			ip->srcadr6 = SOCK_ADDR6(&pp->srcadr);
1019 			ip->v6_flag = 1;
1020 		}
1021 		ip->srcport = NSRCPORT(&pp->srcadr);
1022 		ip->flags = 0;
1023 		if (pp == sys_peer)
1024 		    ip->flags |= INFO_FLAG_SYSPEER;
1025 		if (pp->flags & FLAG_CONFIG)
1026 		    ip->flags |= INFO_FLAG_CONFIG;
1027 		if (pp->flags & FLAG_REFCLOCK)
1028 		    ip->flags |= INFO_FLAG_REFCLOCK;
1029 		if (pp->flags & FLAG_PREFER)
1030 		    ip->flags |= INFO_FLAG_PREFER;
1031 		if (pp->flags & FLAG_BURST)
1032 		    ip->flags |= INFO_FLAG_BURST;
1033 		if (pp->flags & FLAG_IBURST)
1034 		    ip->flags |= INFO_FLAG_IBURST;
1035 		if (pp->status == CTL_PST_SEL_SYNCCAND)
1036 		    ip->flags |= INFO_FLAG_SEL_CANDIDATE;
1037 		if (pp->status >= CTL_PST_SEL_SYSPEER)
1038 		    ip->flags |= INFO_FLAG_SHORTLIST;
1039 		ip->flags = htons(ip->flags);
1040 		ip->timereceived = htonl((u_int32)(current_time - pp->timereceived));
1041 		ip->timetosend = htonl(pp->nextdate - current_time);
1042 		ip->timereachable = htonl((u_int32)(current_time - pp->timereachable));
1043 		ip->sent = htonl((u_int32)(pp->sent));
1044 		ip->processed = htonl((u_int32)(pp->processed));
1045 		ip->badauth = htonl((u_int32)(pp->badauth));
1046 		ip->bogusorg = htonl((u_int32)(pp->bogusorg));
1047 		ip->oldpkt = htonl((u_int32)(pp->oldpkt));
1048 		ip->seldisp = htonl((u_int32)(pp->seldisptoolarge));
1049 		ip->selbroken = htonl((u_int32)(pp->selbroken));
1050 		ip->candidate = pp->status;
1051 		ip = (struct info_peer_stats *)more_pkt();
1052 	}
1053 	flush_pkt();
1054 }
1055 
1056 
1057 /*
1058  * sys_info - return system info
1059  */
1060 static void
1061 sys_info(
1062 	sockaddr_u *srcadr,
1063 	endpt *inter,
1064 	struct req_pkt *inpkt
1065 	)
1066 {
1067 	register struct info_sys *is;
1068 
1069 	is = (struct info_sys *)prepare_pkt(srcadr, inter, inpkt,
1070 	    v6sizeof(struct info_sys));
1071 
1072 	if (sys_peer) {
1073 		if (IS_IPV4(&sys_peer->srcadr)) {
1074 			is->peer = NSRCADR(&sys_peer->srcadr);
1075 			if (client_v6_capable)
1076 				is->v6_flag = 0;
1077 		} else if (client_v6_capable) {
1078 			is->peer6 = SOCK_ADDR6(&sys_peer->srcadr);
1079 			is->v6_flag = 1;
1080 		}
1081 		is->peer_mode = sys_peer->hmode;
1082 	} else {
1083 		is->peer = 0;
1084 		if (client_v6_capable) {
1085 			is->v6_flag = 0;
1086 		}
1087 		is->peer_mode = 0;
1088 	}
1089 
1090 	is->leap = sys_leap;
1091 	is->stratum = sys_stratum;
1092 	is->precision = sys_precision;
1093 	is->rootdelay = htonl(DTOFP(sys_rootdelay));
1094 	is->rootdispersion = htonl(DTOUFP(sys_rootdisp));
1095 	is->frequency = htonl(DTOFP(sys_jitter));
1096 	is->stability = htonl(DTOUFP(clock_stability * 1e6));
1097 	is->refid = sys_refid;
1098 	HTONL_FP(&sys_reftime, &is->reftime);
1099 
1100 	is->poll = sys_poll;
1101 
1102 	is->flags = 0;
1103 	if (sys_authenticate)
1104 		is->flags |= INFO_FLAG_AUTHENTICATE;
1105 	if (sys_bclient)
1106 		is->flags |= INFO_FLAG_BCLIENT;
1107 #ifdef REFCLOCK
1108 	if (cal_enable)
1109 		is->flags |= INFO_FLAG_CAL;
1110 #endif /* REFCLOCK */
1111 	if (kern_enable)
1112 		is->flags |= INFO_FLAG_KERNEL;
1113 	if (mon_enabled != MON_OFF)
1114 		is->flags |= INFO_FLAG_MONITOR;
1115 	if (ntp_enable)
1116 		is->flags |= INFO_FLAG_NTP;
1117 	if (hardpps_enable)
1118 		is->flags |= INFO_FLAG_PPS_SYNC;
1119 	if (stats_control)
1120 		is->flags |= INFO_FLAG_FILEGEN;
1121 	is->bdelay = HTONS_FP(DTOFP(sys_bdelay));
1122 	HTONL_UF(sys_authdelay.l_uf, &is->authdelay);
1123 	(void) more_pkt();
1124 	flush_pkt();
1125 }
1126 
1127 
1128 /*
1129  * sys_stats - return system statistics
1130  */
1131 static void
1132 sys_stats(
1133 	sockaddr_u *srcadr,
1134 	endpt *inter,
1135 	struct req_pkt *inpkt
1136 	)
1137 {
1138 	register struct info_sys_stats *ss;
1139 
1140 	ss = (struct info_sys_stats *)prepare_pkt(srcadr, inter, inpkt,
1141 		sizeof(struct info_sys_stats));
1142 	ss->timeup = htonl((u_int32)current_time);
1143 	ss->timereset = htonl((u_int32)(current_time - sys_stattime));
1144 	ss->denied = htonl((u_int32)sys_restricted);
1145 	ss->oldversionpkt = htonl((u_int32)sys_oldversion);
1146 	ss->newversionpkt = htonl((u_int32)sys_newversion);
1147 	ss->unknownversion = htonl((u_int32)sys_declined);
1148 	ss->badlength = htonl((u_int32)sys_badlength);
1149 	ss->processed = htonl((u_int32)sys_processed);
1150 	ss->badauth = htonl((u_int32)sys_badauth);
1151 	ss->limitrejected = htonl((u_int32)sys_limitrejected);
1152 	ss->received = htonl((u_int32)sys_received);
1153 	(void) more_pkt();
1154 	flush_pkt();
1155 }
1156 
1157 
1158 /*
1159  * mem_stats - return memory statistics
1160  */
1161 static void
1162 mem_stats(
1163 	sockaddr_u *srcadr,
1164 	endpt *inter,
1165 	struct req_pkt *inpkt
1166 	)
1167 {
1168 	register struct info_mem_stats *ms;
1169 	register int i;
1170 
1171 	ms = (struct info_mem_stats *)prepare_pkt(srcadr, inter, inpkt,
1172 						  sizeof(struct info_mem_stats));
1173 
1174 	ms->timereset = htonl((u_int32)(current_time - peer_timereset));
1175 	ms->totalpeermem = htons((u_short)total_peer_structs);
1176 	ms->freepeermem = htons((u_short)peer_free_count);
1177 	ms->findpeer_calls = htonl((u_int32)findpeer_calls);
1178 	ms->allocations = htonl((u_int32)peer_allocations);
1179 	ms->demobilizations = htonl((u_int32)peer_demobilizations);
1180 
1181 	for (i = 0; i < NTP_HASH_SIZE; i++)
1182 		ms->hashcount[i] = (u_char)
1183 		    max((u_int)peer_hash_count[i], UCHAR_MAX);
1184 
1185 	(void) more_pkt();
1186 	flush_pkt();
1187 }
1188 
1189 
1190 /*
1191  * io_stats - return io statistics
1192  */
1193 static void
1194 io_stats(
1195 	sockaddr_u *srcadr,
1196 	endpt *inter,
1197 	struct req_pkt *inpkt
1198 	)
1199 {
1200 	struct info_io_stats *io;
1201 
1202 	io = (struct info_io_stats *)prepare_pkt(srcadr, inter, inpkt,
1203 						 sizeof(struct info_io_stats));
1204 
1205 	io->timereset = htonl((u_int32)(current_time - io_timereset));
1206 	io->totalrecvbufs = htons((u_short) total_recvbuffs());
1207 	io->freerecvbufs = htons((u_short) free_recvbuffs());
1208 	io->fullrecvbufs = htons((u_short) full_recvbuffs());
1209 	io->lowwater = htons((u_short) lowater_additions());
1210 	io->dropped = htonl((u_int32)packets_dropped);
1211 	io->ignored = htonl((u_int32)packets_ignored);
1212 	io->received = htonl((u_int32)packets_received);
1213 	io->sent = htonl((u_int32)packets_sent);
1214 	io->notsent = htonl((u_int32)packets_notsent);
1215 	io->interrupts = htonl((u_int32)handler_calls);
1216 	io->int_received = htonl((u_int32)handler_pkts);
1217 
1218 	(void) more_pkt();
1219 	flush_pkt();
1220 }
1221 
1222 
1223 /*
1224  * timer_stats - return timer statistics
1225  */
1226 static void
1227 timer_stats(
1228 	sockaddr_u *		srcadr,
1229 	endpt *			inter,
1230 	struct req_pkt *	inpkt
1231 	)
1232 {
1233 	struct info_timer_stats *	ts;
1234 	u_long				sincereset;
1235 
1236 	ts = (struct info_timer_stats *)prepare_pkt(srcadr, inter,
1237 						    inpkt, sizeof(*ts));
1238 
1239 	sincereset = current_time - timer_timereset;
1240 	ts->timereset = htonl((u_int32)sincereset);
1241 	ts->alarms = ts->timereset;
1242 	ts->overflows = htonl((u_int32)alarm_overflow);
1243 	ts->xmtcalls = htonl((u_int32)timer_xmtcalls);
1244 
1245 	(void) more_pkt();
1246 	flush_pkt();
1247 }
1248 
1249 
1250 /*
1251  * loop_info - return the current state of the loop filter
1252  */
1253 static void
1254 loop_info(
1255 	sockaddr_u *srcadr,
1256 	endpt *inter,
1257 	struct req_pkt *inpkt
1258 	)
1259 {
1260 	struct info_loop *li;
1261 	l_fp ltmp;
1262 
1263 	li = (struct info_loop *)prepare_pkt(srcadr, inter, inpkt,
1264 	    sizeof(struct info_loop));
1265 
1266 	DTOLFP(last_offset, &ltmp);
1267 	HTONL_FP(&ltmp, &li->last_offset);
1268 	DTOLFP(drift_comp * 1e6, &ltmp);
1269 	HTONL_FP(&ltmp, &li->drift_comp);
1270 	li->compliance = htonl((u_int32)(tc_counter));
1271 	li->watchdog_timer = htonl((u_int32)(current_time - sys_epoch));
1272 
1273 	(void) more_pkt();
1274 	flush_pkt();
1275 }
1276 
1277 
1278 /*
1279  * do_conf - add a peer to the configuration list
1280  */
1281 static void
1282 do_conf(
1283 	sockaddr_u *srcadr,
1284 	endpt *inter,
1285 	struct req_pkt *inpkt
1286 	)
1287 {
1288 	u_short			items;
1289 	size_t			item_sz;
1290 	u_int			fl;
1291 	char *			datap;
1292 	struct conf_peer	temp_cp;
1293 	sockaddr_u		peeraddr;
1294 
1295 	/*
1296 	 * Do a check of everything to see that it looks
1297 	 * okay.  If not, complain about it.  Note we are
1298 	 * very picky here.
1299 	 */
1300 	items = INFO_NITEMS(inpkt->err_nitems);
1301 	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1302 	datap = inpkt->u.data;
1303 	if (item_sz > sizeof(temp_cp)) {
1304 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1305 		return;
1306 	}
1307 
1308 	while (items-- > 0) {
1309 		ZERO(temp_cp);
1310 		memcpy(&temp_cp, datap, item_sz);
1311 		ZERO_SOCK(&peeraddr);
1312 
1313 		fl = 0;
1314 		if (temp_cp.flags & CONF_FLAG_PREFER)
1315 			fl |= FLAG_PREFER;
1316 		if (temp_cp.flags & CONF_FLAG_BURST)
1317 			fl |= FLAG_BURST;
1318 		if (temp_cp.flags & CONF_FLAG_IBURST)
1319 			fl |= FLAG_IBURST;
1320 #ifdef AUTOKEY
1321 		if (temp_cp.flags & CONF_FLAG_SKEY)
1322 			fl |= FLAG_SKEY;
1323 #endif	/* AUTOKEY */
1324 		if (client_v6_capable && temp_cp.v6_flag) {
1325 			AF(&peeraddr) = AF_INET6;
1326 			SOCK_ADDR6(&peeraddr) = temp_cp.peeraddr6;
1327 		} else {
1328 			AF(&peeraddr) = AF_INET;
1329 			NSRCADR(&peeraddr) = temp_cp.peeraddr;
1330 			/*
1331 			 * Make sure the address is valid
1332 			 */
1333 			if (!ISREFCLOCKADR(&peeraddr) &&
1334 			    ISBADADR(&peeraddr)) {
1335 				req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1336 				return;
1337 			}
1338 
1339 		}
1340 		NSRCPORT(&peeraddr) = htons(NTP_PORT);
1341 #ifdef ISC_PLATFORM_HAVESALEN
1342 		peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
1343 #endif
1344 
1345 		/* check mode value: 0 <= hmode <= 6
1346 		 *
1347 		 * There's no good global define for that limit, and
1348 		 * using a magic define is as good (or bad, actually) as
1349 		 * a magic number. So we use the highest possible peer
1350 		 * mode, and that is MODE_BCLIENT.
1351 		 *
1352 		 * [Bug 3009] claims that a problem occurs for hmode > 7,
1353 		 * but the code in ntp_peer.c indicates trouble for any
1354 		 * hmode > 6 ( --> MODE_BCLIENT).
1355 		 */
1356 		if (temp_cp.hmode > MODE_BCLIENT) {
1357 			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1358 			return;
1359 		}
1360 
1361 		/* Any more checks on the values? Unchecked at this
1362 		 * point:
1363 		 *   - version
1364 		 *   - ttl
1365 		 *   - keyid
1366 		 *
1367 		 *   - minpoll/maxpoll, but they are treated properly
1368 		 *     for all cases internally. Checking not necessary.
1369 		 */
1370 
1371 		/* finally create the peer */
1372 		if (peer_config(&peeraddr, NULL, NULL,
1373 		    temp_cp.hmode, temp_cp.version, temp_cp.minpoll,
1374 		    temp_cp.maxpoll, fl, temp_cp.ttl, temp_cp.keyid,
1375 		    NULL) == 0)
1376 		{
1377 			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
1378 			return;
1379 		}
1380 
1381 		datap += item_sz;
1382 	}
1383 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1384 }
1385 
1386 
1387 /*
1388  * do_unconf - remove a peer from the configuration list
1389  */
1390 static void
1391 do_unconf(
1392 	sockaddr_u *	srcadr,
1393 	endpt *		inter,
1394 	struct req_pkt *inpkt
1395 	)
1396 {
1397 	u_short			items;
1398 	size_t			item_sz;
1399 	char *			datap;
1400 	struct conf_unpeer	temp_cp;
1401 	struct peer *		p;
1402 	sockaddr_u		peeraddr;
1403 	int			loops;
1404 
1405 	/*
1406 	 * This is a bit unstructured, but I like to be careful.
1407 	 * We check to see that every peer exists and is actually
1408 	 * configured.  If so, we remove them.  If not, we return
1409 	 * an error.
1410 	 *
1411 	 * [Bug 3011] Even if we checked all peers given in the request
1412 	 * in a dry run, there's still a chance that the caller played
1413 	 * unfair and gave the same peer multiple times. So we still
1414 	 * have to be prepared for nasty surprises in the second run ;)
1415 	 */
1416 
1417 	/* basic consistency checks */
1418 	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1419 	if (item_sz > sizeof(temp_cp)) {
1420 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1421 		return;
1422 	}
1423 
1424 	/* now do two runs: first a dry run, then a busy one */
1425 	for (loops = 0; loops != 2; ++loops) {
1426 		items = INFO_NITEMS(inpkt->err_nitems);
1427 		datap = inpkt->u.data;
1428 		while (items-- > 0) {
1429 			/* copy from request to local */
1430 			ZERO(temp_cp);
1431 			memcpy(&temp_cp, datap, item_sz);
1432 			/* get address structure */
1433 			ZERO_SOCK(&peeraddr);
1434 			if (client_v6_capable && temp_cp.v6_flag) {
1435 				AF(&peeraddr) = AF_INET6;
1436 				SOCK_ADDR6(&peeraddr) = temp_cp.peeraddr6;
1437 			} else {
1438 				AF(&peeraddr) = AF_INET;
1439 				NSRCADR(&peeraddr) = temp_cp.peeraddr;
1440 			}
1441 			SET_PORT(&peeraddr, NTP_PORT);
1442 #ifdef ISC_PLATFORM_HAVESALEN
1443 			peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
1444 #endif
1445 			DPRINTF(1, ("searching for %s\n",
1446 				    stoa(&peeraddr)));
1447 
1448 			/* search for matching configred(!) peer */
1449 			p = NULL;
1450 			do {
1451 				p = findexistingpeer(
1452 					&peeraddr, NULL, p, -1, 0);
1453 			} while (p && !(FLAG_CONFIG & p->flags));
1454 
1455 			if (!loops && !p) {
1456 				/* Item not found in dry run -- bail! */
1457 				req_ack(srcadr, inter, inpkt,
1458 					INFO_ERR_NODATA);
1459 				return;
1460 			} else if (loops && p) {
1461 				/* Item found in busy run -- remove! */
1462 				peer_clear(p, "GONE");
1463 				unpeer(p);
1464 			}
1465 			datap += item_sz;
1466 		}
1467 	}
1468 
1469 	/* report success */
1470 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1471 }
1472 
1473 
1474 /*
1475  * set_sys_flag - set system flags
1476  */
1477 static void
1478 set_sys_flag(
1479 	sockaddr_u *srcadr,
1480 	endpt *inter,
1481 	struct req_pkt *inpkt
1482 	)
1483 {
1484 	setclr_flags(srcadr, inter, inpkt, 1);
1485 }
1486 
1487 
1488 /*
1489  * clr_sys_flag - clear system flags
1490  */
1491 static void
1492 clr_sys_flag(
1493 	sockaddr_u *srcadr,
1494 	endpt *inter,
1495 	struct req_pkt *inpkt
1496 	)
1497 {
1498 	setclr_flags(srcadr, inter, inpkt, 0);
1499 }
1500 
1501 
1502 /*
1503  * setclr_flags - do the grunge work of flag setting/clearing
1504  */
1505 static void
1506 setclr_flags(
1507 	sockaddr_u *srcadr,
1508 	endpt *inter,
1509 	struct req_pkt *inpkt,
1510 	u_long set
1511 	)
1512 {
1513 	struct conf_sys_flags *sf;
1514 	u_int32 flags;
1515 
1516 	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
1517 		msyslog(LOG_ERR, "setclr_flags: err_nitems > 1");
1518 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1519 		return;
1520 	}
1521 
1522 	sf = (struct conf_sys_flags *)&inpkt->u;
1523 	flags = ntohl(sf->flags);
1524 
1525 	if (flags & ~(SYS_FLAG_BCLIENT | SYS_FLAG_PPS |
1526 		      SYS_FLAG_NTP | SYS_FLAG_KERNEL | SYS_FLAG_MONITOR |
1527 		      SYS_FLAG_FILEGEN | SYS_FLAG_AUTH | SYS_FLAG_CAL)) {
1528 		msyslog(LOG_ERR, "setclr_flags: extra flags: %#x",
1529 			flags & ~(SYS_FLAG_BCLIENT | SYS_FLAG_PPS |
1530 				  SYS_FLAG_NTP | SYS_FLAG_KERNEL |
1531 				  SYS_FLAG_MONITOR | SYS_FLAG_FILEGEN |
1532 				  SYS_FLAG_AUTH | SYS_FLAG_CAL));
1533 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1534 		return;
1535 	}
1536 
1537 	if (flags & SYS_FLAG_BCLIENT)
1538 		proto_config(PROTO_BROADCLIENT, set, 0., NULL);
1539 	if (flags & SYS_FLAG_PPS)
1540 		proto_config(PROTO_PPS, set, 0., NULL);
1541 	if (flags & SYS_FLAG_NTP)
1542 		proto_config(PROTO_NTP, set, 0., NULL);
1543 	if (flags & SYS_FLAG_KERNEL)
1544 		proto_config(PROTO_KERNEL, set, 0., NULL);
1545 	if (flags & SYS_FLAG_MONITOR)
1546 		proto_config(PROTO_MONITOR, set, 0., NULL);
1547 	if (flags & SYS_FLAG_FILEGEN)
1548 		proto_config(PROTO_FILEGEN, set, 0., NULL);
1549 	if (flags & SYS_FLAG_AUTH)
1550 		proto_config(PROTO_AUTHENTICATE, set, 0., NULL);
1551 	if (flags & SYS_FLAG_CAL)
1552 		proto_config(PROTO_CAL, set, 0., NULL);
1553 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1554 }
1555 
1556 /* There have been some issues with the restrict list processing,
1557  * ranging from problems with deep recursion (resulting in stack
1558  * overflows) and overfull reply buffers.
1559  *
1560  * To avoid this trouble the list reversal is done iteratively using a
1561  * scratch pad.
1562  */
1563 typedef struct RestrictStack RestrictStackT;
1564 struct RestrictStack {
1565 	RestrictStackT   *link;
1566 	size_t            fcnt;
1567 	const restrict_u *pres[63];
1568 };
1569 
1570 static size_t
1571 getStackSheetSize(
1572 	RestrictStackT *sp
1573 	)
1574 {
1575 	if (sp)
1576 		return sizeof(sp->pres)/sizeof(sp->pres[0]);
1577 	return 0u;
1578 }
1579 
1580 static int/*BOOL*/
1581 pushRestriction(
1582 	RestrictStackT  **spp,
1583 	const restrict_u *ptr
1584 	)
1585 {
1586 	RestrictStackT *sp;
1587 
1588 	if (NULL == (sp = *spp) || 0 == sp->fcnt) {
1589 		/* need another sheet in the scratch pad */
1590 		sp = emalloc(sizeof(*sp));
1591 		sp->link = *spp;
1592 		sp->fcnt = getStackSheetSize(sp);
1593 		*spp = sp;
1594 	}
1595 	sp->pres[--sp->fcnt] = ptr;
1596 	return TRUE;
1597 }
1598 
1599 static int/*BOOL*/
1600 popRestriction(
1601 	RestrictStackT   **spp,
1602 	const restrict_u **opp
1603 	)
1604 {
1605 	RestrictStackT *sp;
1606 
1607 	if (NULL == (sp = *spp) || sp->fcnt >= getStackSheetSize(sp))
1608 		return FALSE;
1609 
1610 	*opp = sp->pres[sp->fcnt++];
1611 	if (sp->fcnt >= getStackSheetSize(sp)) {
1612 		/* discard sheet from scratch pad */
1613 		*spp = sp->link;
1614 		free(sp);
1615 	}
1616 	return TRUE;
1617 }
1618 
1619 static void
1620 flushRestrictionStack(
1621 	RestrictStackT **spp
1622 	)
1623 {
1624 	RestrictStackT *sp;
1625 
1626 	while (NULL != (sp = *spp)) {
1627 		*spp = sp->link;
1628 		free(sp);
1629 	}
1630 }
1631 
1632 /*
1633  * list_restrict4 - iterative helper for list_restrict dumps IPv4
1634  *		    restriction list in reverse order.
1635  */
1636 static void
1637 list_restrict4(
1638 	const restrict_u *	res,
1639 	struct info_restrict **	ppir
1640 	)
1641 {
1642 	RestrictStackT *	rpad;
1643 	struct info_restrict *	pir;
1644 
1645 	pir = *ppir;
1646 	for (rpad = NULL; res; res = res->link)
1647 		if (!pushRestriction(&rpad, res))
1648 			break;
1649 
1650 	while (pir && popRestriction(&rpad, &res)) {
1651 		pir->addr = htonl(res->u.v4.addr);
1652 		if (client_v6_capable)
1653 			pir->v6_flag = 0;
1654 		pir->mask = htonl(res->u.v4.mask);
1655 		pir->count = htonl(res->count);
1656 		pir->flags = htons(res->flags);
1657 		pir->mflags = htons(res->mflags);
1658 		pir = (struct info_restrict *)more_pkt();
1659 	}
1660 	flushRestrictionStack(&rpad);
1661 	*ppir = pir;
1662 }
1663 
1664 /*
1665  * list_restrict6 - iterative helper for list_restrict dumps IPv6
1666  *		    restriction list in reverse order.
1667  */
1668 static void
1669 list_restrict6(
1670 	const restrict_u *	res,
1671 	struct info_restrict **	ppir
1672 	)
1673 {
1674 	RestrictStackT *	rpad;
1675 	struct info_restrict *	pir;
1676 
1677 	pir = *ppir;
1678 	for (rpad = NULL; res; res = res->link)
1679 		if (!pushRestriction(&rpad, res))
1680 			break;
1681 
1682 	while (pir && popRestriction(&rpad, &res)) {
1683 		pir->addr6 = res->u.v6.addr;
1684 		pir->mask6 = res->u.v6.mask;
1685 		pir->v6_flag = 1;
1686 		pir->count = htonl(res->count);
1687 		pir->flags = htons(res->flags);
1688 		pir->mflags = htons(res->mflags);
1689 		pir = (struct info_restrict *)more_pkt();
1690 	}
1691 	flushRestrictionStack(&rpad);
1692 	*ppir = pir;
1693 }
1694 
1695 
1696 /*
1697  * list_restrict - return the restrict list
1698  */
1699 static void
1700 list_restrict(
1701 	sockaddr_u *srcadr,
1702 	endpt *inter,
1703 	struct req_pkt *inpkt
1704 	)
1705 {
1706 	struct info_restrict *ir;
1707 
1708 	DPRINTF(3, ("wants restrict list summary\n"));
1709 
1710 	ir = (struct info_restrict *)prepare_pkt(srcadr, inter, inpkt,
1711 	    v6sizeof(struct info_restrict));
1712 
1713 	/*
1714 	 * The restriction lists are kept sorted in the reverse order
1715 	 * than they were originally.  To preserve the output semantics,
1716 	 * dump each list in reverse order. The workers take care of that.
1717 	 */
1718 	list_restrict4(restrictlist4, &ir);
1719 	if (client_v6_capable)
1720 		list_restrict6(restrictlist6, &ir);
1721 	flush_pkt();
1722 }
1723 
1724 
1725 /*
1726  * do_resaddflags - add flags to a restrict entry (or create one)
1727  */
1728 static void
1729 do_resaddflags(
1730 	sockaddr_u *srcadr,
1731 	endpt *inter,
1732 	struct req_pkt *inpkt
1733 	)
1734 {
1735 	do_restrict(srcadr, inter, inpkt, RESTRICT_FLAGS);
1736 }
1737 
1738 
1739 
1740 /*
1741  * do_ressubflags - remove flags from a restrict entry
1742  */
1743 static void
1744 do_ressubflags(
1745 	sockaddr_u *srcadr,
1746 	endpt *inter,
1747 	struct req_pkt *inpkt
1748 	)
1749 {
1750 	do_restrict(srcadr, inter, inpkt, RESTRICT_UNFLAG);
1751 }
1752 
1753 
1754 /*
1755  * do_unrestrict - remove a restrict entry from the list
1756  */
1757 static void
1758 do_unrestrict(
1759 	sockaddr_u *srcadr,
1760 	endpt *inter,
1761 	struct req_pkt *inpkt
1762 	)
1763 {
1764 	do_restrict(srcadr, inter, inpkt, RESTRICT_REMOVE);
1765 }
1766 
1767 
1768 /*
1769  * do_restrict - do the dirty stuff of dealing with restrictions
1770  */
1771 static void
1772 do_restrict(
1773 	sockaddr_u *srcadr,
1774 	endpt *inter,
1775 	struct req_pkt *inpkt,
1776 	int op
1777 	)
1778 {
1779 	char *			datap;
1780 	struct conf_restrict	cr;
1781 	u_short			items;
1782 	size_t			item_sz;
1783 	sockaddr_u		matchaddr;
1784 	sockaddr_u		matchmask;
1785 	int			bad;
1786 
1787 	/*
1788 	 * Do a check of the flags to make sure that only
1789 	 * the NTPPORT flag is set, if any.  If not, complain
1790 	 * about it.  Note we are very picky here.
1791 	 */
1792 	items = INFO_NITEMS(inpkt->err_nitems);
1793 	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1794 	datap = inpkt->u.data;
1795 	if (item_sz > sizeof(cr)) {
1796 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1797 		return;
1798 	}
1799 
1800 	bad = FALSE;
1801 	while (items-- > 0 && !bad) {
1802 		memcpy(&cr, datap, item_sz);
1803 		cr.flags = ntohs(cr.flags);
1804 		cr.mflags = ntohs(cr.mflags);
1805 		if (~RESM_NTPONLY & cr.mflags)
1806 			bad |= 1;
1807 		if (~RES_ALLFLAGS & cr.flags)
1808 			bad |= 2;
1809 		if (INADDR_ANY != cr.mask) {
1810 			if (client_v6_capable && cr.v6_flag) {
1811 				if (IN6_IS_ADDR_UNSPECIFIED(&cr.addr6))
1812 					bad |= 4;
1813 			} else {
1814 				if (INADDR_ANY == cr.addr)
1815 					bad |= 8;
1816 			}
1817 		}
1818 		datap += item_sz;
1819 	}
1820 
1821 	if (bad) {
1822 		msyslog(LOG_ERR, "do_restrict: bad = %#x", bad);
1823 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1824 		return;
1825 	}
1826 
1827 	/*
1828 	 * Looks okay, try it out.  Needs to reload data pointer and
1829 	 * item counter. (Talos-CAN-0052)
1830 	 */
1831 	ZERO_SOCK(&matchaddr);
1832 	ZERO_SOCK(&matchmask);
1833 	items = INFO_NITEMS(inpkt->err_nitems);
1834 	datap = inpkt->u.data;
1835 
1836 	while (items-- > 0) {
1837 		memcpy(&cr, datap, item_sz);
1838 		cr.flags = ntohs(cr.flags);
1839 		cr.mflags = ntohs(cr.mflags);
1840 		if (client_v6_capable && cr.v6_flag) {
1841 			AF(&matchaddr) = AF_INET6;
1842 			AF(&matchmask) = AF_INET6;
1843 			SOCK_ADDR6(&matchaddr) = cr.addr6;
1844 			SOCK_ADDR6(&matchmask) = cr.mask6;
1845 		} else {
1846 			AF(&matchaddr) = AF_INET;
1847 			AF(&matchmask) = AF_INET;
1848 			NSRCADR(&matchaddr) = cr.addr;
1849 			NSRCADR(&matchmask) = cr.mask;
1850 		}
1851 		hack_restrict(op, &matchaddr, &matchmask, cr.mflags,
1852 			      cr.flags, 0);
1853 		datap += item_sz;
1854 	}
1855 
1856 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1857 }
1858 
1859 
1860 /*
1861  * mon_getlist - return monitor data
1862  */
1863 static void
1864 mon_getlist(
1865 	sockaddr_u *srcadr,
1866 	endpt *inter,
1867 	struct req_pkt *inpkt
1868 	)
1869 {
1870 	req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
1871 }
1872 
1873 
1874 /*
1875  * Module entry points and the flags they correspond with
1876  */
1877 struct reset_entry {
1878 	int flag;		/* flag this corresponds to */
1879 	void (*handler)(void);	/* routine to handle request */
1880 };
1881 
1882 struct reset_entry reset_entries[] = {
1883 	{ RESET_FLAG_ALLPEERS,	peer_all_reset },
1884 	{ RESET_FLAG_IO,	io_clr_stats },
1885 	{ RESET_FLAG_SYS,	proto_clr_stats },
1886 	{ RESET_FLAG_MEM,	peer_clr_stats },
1887 	{ RESET_FLAG_TIMER,	timer_clr_stats },
1888 	{ RESET_FLAG_AUTH,	reset_auth_stats },
1889 	{ RESET_FLAG_CTL,	ctl_clr_stats },
1890 	{ 0,			0 }
1891 };
1892 
1893 /*
1894  * reset_stats - reset statistic counters here and there
1895  */
1896 static void
1897 reset_stats(
1898 	sockaddr_u *srcadr,
1899 	endpt *inter,
1900 	struct req_pkt *inpkt
1901 	)
1902 {
1903 	struct reset_flags *rflags;
1904 	u_long flags;
1905 	struct reset_entry *rent;
1906 
1907 	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
1908 		msyslog(LOG_ERR, "reset_stats: err_nitems > 1");
1909 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1910 		return;
1911 	}
1912 
1913 	rflags = (struct reset_flags *)&inpkt->u;
1914 	flags = ntohl(rflags->flags);
1915 
1916 	if (flags & ~RESET_ALLFLAGS) {
1917 		msyslog(LOG_ERR, "reset_stats: reset leaves %#lx",
1918 			flags & ~RESET_ALLFLAGS);
1919 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1920 		return;
1921 	}
1922 
1923 	for (rent = reset_entries; rent->flag != 0; rent++) {
1924 		if (flags & rent->flag)
1925 			(*rent->handler)();
1926 	}
1927 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
1928 }
1929 
1930 
1931 /*
1932  * reset_peer - clear a peer's statistics
1933  */
1934 static void
1935 reset_peer(
1936 	sockaddr_u *srcadr,
1937 	endpt *inter,
1938 	struct req_pkt *inpkt
1939 	)
1940 {
1941 	u_short			items;
1942 	size_t			item_sz;
1943 	char *			datap;
1944 	struct conf_unpeer	cp;
1945 	struct peer *		p;
1946 	sockaddr_u		peeraddr;
1947 	int			bad;
1948 
1949 	/*
1950 	 * We check first to see that every peer exists.  If not,
1951 	 * we return an error.
1952 	 */
1953 
1954 	items = INFO_NITEMS(inpkt->err_nitems);
1955 	item_sz = INFO_ITEMSIZE(inpkt->mbz_itemsize);
1956 	datap = inpkt->u.data;
1957 	if (item_sz > sizeof(cp)) {
1958 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
1959 		return;
1960 	}
1961 
1962 	bad = FALSE;
1963 	while (items-- > 0 && !bad) {
1964 		ZERO(cp);
1965 		memcpy(&cp, datap, item_sz);
1966 		ZERO_SOCK(&peeraddr);
1967 		if (client_v6_capable && cp.v6_flag) {
1968 			AF(&peeraddr) = AF_INET6;
1969 			SOCK_ADDR6(&peeraddr) = cp.peeraddr6;
1970 		} else {
1971 			AF(&peeraddr) = AF_INET;
1972 			NSRCADR(&peeraddr) = cp.peeraddr;
1973 		}
1974 
1975 #ifdef ISC_PLATFORM_HAVESALEN
1976 		peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
1977 #endif
1978 		p = findexistingpeer(&peeraddr, NULL, NULL, -1, 0);
1979 		if (NULL == p)
1980 			bad++;
1981 		datap += item_sz;
1982 	}
1983 
1984 	if (bad) {
1985 		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
1986 		return;
1987 	}
1988 
1989 	/*
1990 	 * Now do it in earnest. Needs to reload data pointer and item
1991 	 * counter. (Talos-CAN-0052)
1992 	 */
1993 
1994 	items = INFO_NITEMS(inpkt->err_nitems);
1995 	datap = inpkt->u.data;
1996 	while (items-- > 0) {
1997 		ZERO(cp);
1998 		memcpy(&cp, datap, item_sz);
1999 		ZERO_SOCK(&peeraddr);
2000 		if (client_v6_capable && cp.v6_flag) {
2001 			AF(&peeraddr) = AF_INET6;
2002 			SOCK_ADDR6(&peeraddr) = cp.peeraddr6;
2003 		} else {
2004 			AF(&peeraddr) = AF_INET;
2005 			NSRCADR(&peeraddr) = cp.peeraddr;
2006 		}
2007 		SET_PORT(&peeraddr, 123);
2008 #ifdef ISC_PLATFORM_HAVESALEN
2009 		peeraddr.sa.sa_len = SOCKLEN(&peeraddr);
2010 #endif
2011 		p = findexistingpeer(&peeraddr, NULL, NULL, -1, 0);
2012 		while (p != NULL) {
2013 			peer_reset(p);
2014 			p = findexistingpeer(&peeraddr, NULL, p, -1, 0);
2015 		}
2016 		datap += item_sz;
2017 	}
2018 
2019 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2020 }
2021 
2022 
2023 /*
2024  * do_key_reread - reread the encryption key file
2025  */
2026 static void
2027 do_key_reread(
2028 	sockaddr_u *srcadr,
2029 	endpt *inter,
2030 	struct req_pkt *inpkt
2031 	)
2032 {
2033 	rereadkeys();
2034 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2035 }
2036 
2037 
2038 /*
2039  * trust_key - make one or more keys trusted
2040  */
2041 static void
2042 trust_key(
2043 	sockaddr_u *srcadr,
2044 	endpt *inter,
2045 	struct req_pkt *inpkt
2046 	)
2047 {
2048 	do_trustkey(srcadr, inter, inpkt, 1);
2049 }
2050 
2051 
2052 /*
2053  * untrust_key - make one or more keys untrusted
2054  */
2055 static void
2056 untrust_key(
2057 	sockaddr_u *srcadr,
2058 	endpt *inter,
2059 	struct req_pkt *inpkt
2060 	)
2061 {
2062 	do_trustkey(srcadr, inter, inpkt, 0);
2063 }
2064 
2065 
2066 /*
2067  * do_trustkey - make keys either trustable or untrustable
2068  */
2069 static void
2070 do_trustkey(
2071 	sockaddr_u *srcadr,
2072 	endpt *inter,
2073 	struct req_pkt *inpkt,
2074 	u_long trust
2075 	)
2076 {
2077 	register uint32_t *kp;
2078 	register int items;
2079 
2080 	items = INFO_NITEMS(inpkt->err_nitems);
2081 	kp = (uint32_t *)&inpkt->u;
2082 	while (items-- > 0) {
2083 		authtrust(*kp, trust);
2084 		kp++;
2085 	}
2086 
2087 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2088 }
2089 
2090 
2091 /*
2092  * get_auth_info - return some stats concerning the authentication module
2093  */
2094 static void
2095 get_auth_info(
2096 	sockaddr_u *srcadr,
2097 	endpt *inter,
2098 	struct req_pkt *inpkt
2099 	)
2100 {
2101 	register struct info_auth *ia;
2102 
2103 	ia = (struct info_auth *)prepare_pkt(srcadr, inter, inpkt,
2104 					     sizeof(struct info_auth));
2105 
2106 	ia->numkeys = htonl((u_int32)authnumkeys);
2107 	ia->numfreekeys = htonl((u_int32)authnumfreekeys);
2108 	ia->keylookups = htonl((u_int32)authkeylookups);
2109 	ia->keynotfound = htonl((u_int32)authkeynotfound);
2110 	ia->encryptions = htonl((u_int32)authencryptions);
2111 	ia->decryptions = htonl((u_int32)authdecryptions);
2112 	ia->keyuncached = htonl((u_int32)authkeyuncached);
2113 	ia->expired = htonl((u_int32)authkeyexpired);
2114 	ia->timereset = htonl((u_int32)(current_time - auth_timereset));
2115 
2116 	(void) more_pkt();
2117 	flush_pkt();
2118 }
2119 
2120 
2121 
2122 /*
2123  * reset_auth_stats - reset the authentication stat counters.  Done here
2124  *		      to keep ntp-isms out of the authentication module
2125  */
2126 void
2127 reset_auth_stats(void)
2128 {
2129 	authkeylookups = 0;
2130 	authkeynotfound = 0;
2131 	authencryptions = 0;
2132 	authdecryptions = 0;
2133 	authkeyuncached = 0;
2134 	auth_timereset = current_time;
2135 }
2136 
2137 
2138 /*
2139  * req_get_traps - return information about current trap holders
2140  */
2141 static void
2142 req_get_traps(
2143 	sockaddr_u *srcadr,
2144 	endpt *inter,
2145 	struct req_pkt *inpkt
2146 	)
2147 {
2148 	struct info_trap *it;
2149 	struct ctl_trap *tr;
2150 	size_t i;
2151 
2152 	if (num_ctl_traps == 0) {
2153 		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2154 		return;
2155 	}
2156 
2157 	it = (struct info_trap *)prepare_pkt(srcadr, inter, inpkt,
2158 	    v6sizeof(struct info_trap));
2159 
2160 	for (i = 0, tr = ctl_traps; it && i < COUNTOF(ctl_traps); i++, tr++) {
2161 		if (tr->tr_flags & TRAP_INUSE) {
2162 			if (IS_IPV4(&tr->tr_addr)) {
2163 				if (tr->tr_localaddr == any_interface)
2164 					it->local_address = 0;
2165 				else
2166 					it->local_address
2167 					    = NSRCADR(&tr->tr_localaddr->sin);
2168 				it->trap_address = NSRCADR(&tr->tr_addr);
2169 				if (client_v6_capable)
2170 					it->v6_flag = 0;
2171 			} else {
2172 				if (!client_v6_capable)
2173 					continue;
2174 				it->local_address6
2175 				    = SOCK_ADDR6(&tr->tr_localaddr->sin);
2176 				it->trap_address6 = SOCK_ADDR6(&tr->tr_addr);
2177 				it->v6_flag = 1;
2178 			}
2179 			it->trap_port = NSRCPORT(&tr->tr_addr);
2180 			it->sequence = htons(tr->tr_sequence);
2181 			it->settime = htonl((u_int32)(current_time - tr->tr_settime));
2182 			it->origtime = htonl((u_int32)(current_time - tr->tr_origtime));
2183 			it->resets = htonl((u_int32)tr->tr_resets);
2184 			it->flags = htonl((u_int32)tr->tr_flags);
2185 			it = (struct info_trap *)more_pkt();
2186 		}
2187 	}
2188 	flush_pkt();
2189 }
2190 
2191 
2192 /*
2193  * req_set_trap - configure a trap
2194  */
2195 static void
2196 req_set_trap(
2197 	sockaddr_u *srcadr,
2198 	endpt *inter,
2199 	struct req_pkt *inpkt
2200 	)
2201 {
2202 	do_setclr_trap(srcadr, inter, inpkt, 1);
2203 }
2204 
2205 
2206 
2207 /*
2208  * req_clr_trap - unconfigure a trap
2209  */
2210 static void
2211 req_clr_trap(
2212 	sockaddr_u *srcadr,
2213 	endpt *inter,
2214 	struct req_pkt *inpkt
2215 	)
2216 {
2217 	do_setclr_trap(srcadr, inter, inpkt, 0);
2218 }
2219 
2220 
2221 
2222 /*
2223  * do_setclr_trap - do the grunge work of (un)configuring a trap
2224  */
2225 static void
2226 do_setclr_trap(
2227 	sockaddr_u *srcadr,
2228 	endpt *inter,
2229 	struct req_pkt *inpkt,
2230 	int set
2231 	)
2232 {
2233 	register struct conf_trap *ct;
2234 	register endpt *linter;
2235 	int res;
2236 	sockaddr_u laddr;
2237 
2238 	/*
2239 	 * Prepare sockaddr
2240 	 */
2241 	ZERO_SOCK(&laddr);
2242 	AF(&laddr) = AF(srcadr);
2243 	SET_PORT(&laddr, NTP_PORT);
2244 
2245 	/*
2246 	 * Restrict ourselves to one item only.  This eliminates
2247 	 * the error reporting problem.
2248 	 */
2249 	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
2250 		msyslog(LOG_ERR, "do_setclr_trap: err_nitems > 1");
2251 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2252 		return;
2253 	}
2254 	ct = (struct conf_trap *)&inpkt->u;
2255 
2256 	/*
2257 	 * Look for the local interface.  If none, use the default.
2258 	 */
2259 	if (ct->local_address == 0) {
2260 		linter = any_interface;
2261 	} else {
2262 		if (IS_IPV4(&laddr))
2263 			NSRCADR(&laddr) = ct->local_address;
2264 		else
2265 			SOCK_ADDR6(&laddr) = ct->local_address6;
2266 		linter = findinterface(&laddr);
2267 		if (NULL == linter) {
2268 			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2269 			return;
2270 		}
2271 	}
2272 
2273 	if (IS_IPV4(&laddr))
2274 		NSRCADR(&laddr) = ct->trap_address;
2275 	else
2276 		SOCK_ADDR6(&laddr) = ct->trap_address6;
2277 	if (ct->trap_port)
2278 		NSRCPORT(&laddr) = ct->trap_port;
2279 	else
2280 		SET_PORT(&laddr, TRAPPORT);
2281 
2282 	if (set) {
2283 		res = ctlsettrap(&laddr, linter, 0,
2284 				 INFO_VERSION(inpkt->rm_vn_mode));
2285 	} else {
2286 		res = ctlclrtrap(&laddr, linter, 0);
2287 	}
2288 
2289 	if (!res) {
2290 		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2291 	} else {
2292 		req_ack(srcadr, inter, inpkt, INFO_OKAY);
2293 	}
2294 	return;
2295 }
2296 
2297 /*
2298  * Validate a request packet for a new request or control key:
2299  *  - only one item allowed
2300  *  - key must be valid (that is, known, and not in the autokey range)
2301  */
2302 static void
2303 set_keyid_checked(
2304 	keyid_t        *into,
2305 	const char     *what,
2306 	sockaddr_u     *srcadr,
2307 	endpt          *inter,
2308 	struct req_pkt *inpkt
2309 	)
2310 {
2311 	keyid_t *pkeyid;
2312 	keyid_t  tmpkey;
2313 
2314 	/* restrict ourselves to one item only */
2315 	if (INFO_NITEMS(inpkt->err_nitems) > 1) {
2316 		msyslog(LOG_ERR, "set_keyid_checked[%s]: err_nitems > 1",
2317 			what);
2318 		req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2319 		return;
2320 	}
2321 
2322 	/* plug the new key from the packet */
2323 	pkeyid = (keyid_t *)&inpkt->u;
2324 	tmpkey = ntohl(*pkeyid);
2325 
2326 	/* validate the new key id, claim data error on failure */
2327 	if (tmpkey < 1 || tmpkey > NTP_MAXKEY || !auth_havekey(tmpkey)) {
2328 		msyslog(LOG_ERR, "set_keyid_checked[%s]: invalid key id: %ld",
2329 			what, (long)tmpkey);
2330 		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2331 		return;
2332 	}
2333 
2334 	/* if we arrive here, the key is good -- use it */
2335 	*into = tmpkey;
2336 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2337 }
2338 
2339 /*
2340  * set_request_keyid - set the keyid used to authenticate requests
2341  */
2342 static void
2343 set_request_keyid(
2344 	sockaddr_u *srcadr,
2345 	endpt *inter,
2346 	struct req_pkt *inpkt
2347 	)
2348 {
2349 	set_keyid_checked(&info_auth_keyid, "request",
2350 			  srcadr, inter, inpkt);
2351 }
2352 
2353 
2354 
2355 /*
2356  * set_control_keyid - set the keyid used to authenticate requests
2357  */
2358 static void
2359 set_control_keyid(
2360 	sockaddr_u *srcadr,
2361 	endpt *inter,
2362 	struct req_pkt *inpkt
2363 	)
2364 {
2365 	set_keyid_checked(&ctl_auth_keyid, "control",
2366 			  srcadr, inter, inpkt);
2367 }
2368 
2369 
2370 
2371 /*
2372  * get_ctl_stats - return some stats concerning the control message module
2373  */
2374 static void
2375 get_ctl_stats(
2376 	sockaddr_u *srcadr,
2377 	endpt *inter,
2378 	struct req_pkt *inpkt
2379 	)
2380 {
2381 	register struct info_control *ic;
2382 
2383 	ic = (struct info_control *)prepare_pkt(srcadr, inter, inpkt,
2384 						sizeof(struct info_control));
2385 
2386 	ic->ctltimereset = htonl((u_int32)(current_time - ctltimereset));
2387 	ic->numctlreq = htonl((u_int32)numctlreq);
2388 	ic->numctlbadpkts = htonl((u_int32)numctlbadpkts);
2389 	ic->numctlresponses = htonl((u_int32)numctlresponses);
2390 	ic->numctlfrags = htonl((u_int32)numctlfrags);
2391 	ic->numctlerrors = htonl((u_int32)numctlerrors);
2392 	ic->numctltooshort = htonl((u_int32)numctltooshort);
2393 	ic->numctlinputresp = htonl((u_int32)numctlinputresp);
2394 	ic->numctlinputfrag = htonl((u_int32)numctlinputfrag);
2395 	ic->numctlinputerr = htonl((u_int32)numctlinputerr);
2396 	ic->numctlbadoffset = htonl((u_int32)numctlbadoffset);
2397 	ic->numctlbadversion = htonl((u_int32)numctlbadversion);
2398 	ic->numctldatatooshort = htonl((u_int32)numctldatatooshort);
2399 	ic->numctlbadop = htonl((u_int32)numctlbadop);
2400 	ic->numasyncmsgs = htonl((u_int32)numasyncmsgs);
2401 
2402 	(void) more_pkt();
2403 	flush_pkt();
2404 }
2405 
2406 
2407 #ifdef KERNEL_PLL
2408 /*
2409  * get_kernel_info - get kernel pll/pps information
2410  */
2411 static void
2412 get_kernel_info(
2413 	sockaddr_u *srcadr,
2414 	endpt *inter,
2415 	struct req_pkt *inpkt
2416 	)
2417 {
2418 	register struct info_kernel *ik;
2419 	struct timex ntx;
2420 
2421 	if (!pll_control) {
2422 		req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2423 		return;
2424 	}
2425 
2426 	ZERO(ntx);
2427 	if (ntp_adjtime(&ntx) < 0)
2428 		msyslog(LOG_ERR, "get_kernel_info: ntp_adjtime() failed: %m");
2429 	ik = (struct info_kernel *)prepare_pkt(srcadr, inter, inpkt,
2430 	    sizeof(struct info_kernel));
2431 
2432 	/*
2433 	 * pll variables
2434 	 */
2435 	ik->offset = htonl((u_int32)ntx.offset);
2436 	ik->freq = htonl((u_int32)ntx.freq);
2437 	ik->maxerror = htonl((u_int32)ntx.maxerror);
2438 	ik->esterror = htonl((u_int32)ntx.esterror);
2439 	ik->status = htons(ntx.status);
2440 	ik->constant = htonl((u_int32)ntx.constant);
2441 	ik->precision = htonl((u_int32)ntx.precision);
2442 	ik->tolerance = htonl((u_int32)ntx.tolerance);
2443 
2444 	/*
2445 	 * pps variables
2446 	 */
2447 	ik->ppsfreq = htonl((u_int32)ntx.ppsfreq);
2448 	ik->jitter = htonl((u_int32)ntx.jitter);
2449 	ik->shift = htons(ntx.shift);
2450 	ik->stabil = htonl((u_int32)ntx.stabil);
2451 	ik->jitcnt = htonl((u_int32)ntx.jitcnt);
2452 	ik->calcnt = htonl((u_int32)ntx.calcnt);
2453 	ik->errcnt = htonl((u_int32)ntx.errcnt);
2454 	ik->stbcnt = htonl((u_int32)ntx.stbcnt);
2455 
2456 	(void) more_pkt();
2457 	flush_pkt();
2458 }
2459 #endif /* KERNEL_PLL */
2460 
2461 
2462 #ifdef REFCLOCK
2463 /*
2464  * get_clock_info - get info about a clock
2465  */
2466 static void
2467 get_clock_info(
2468 	sockaddr_u *srcadr,
2469 	endpt *inter,
2470 	struct req_pkt *inpkt
2471 	)
2472 {
2473 	register struct info_clock *ic;
2474 	register u_int32 *clkaddr;
2475 	register int items;
2476 	struct refclockstat clock_stat;
2477 	sockaddr_u addr;
2478 	l_fp ltmp;
2479 
2480 	ZERO_SOCK(&addr);
2481 	AF(&addr) = AF_INET;
2482 #ifdef ISC_PLATFORM_HAVESALEN
2483 	addr.sa.sa_len = SOCKLEN(&addr);
2484 #endif
2485 	SET_PORT(&addr, NTP_PORT);
2486 	items = INFO_NITEMS(inpkt->err_nitems);
2487 	clkaddr = &inpkt->u.u32[0];
2488 
2489 	ic = (struct info_clock *)prepare_pkt(srcadr, inter, inpkt,
2490 					      sizeof(struct info_clock));
2491 
2492 	while (items-- > 0 && ic) {
2493 		NSRCADR(&addr) = *clkaddr++;
2494 		if (!ISREFCLOCKADR(&addr) || NULL ==
2495 		    findexistingpeer(&addr, NULL, NULL, -1, 0)) {
2496 			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2497 			return;
2498 		}
2499 
2500 		clock_stat.kv_list = (struct ctl_var *)0;
2501 
2502 		refclock_control(&addr, NULL, &clock_stat);
2503 
2504 		ic->clockadr = NSRCADR(&addr);
2505 		ic->type = clock_stat.type;
2506 		ic->flags = clock_stat.flags;
2507 		ic->lastevent = clock_stat.lastevent;
2508 		ic->currentstatus = clock_stat.currentstatus;
2509 		ic->polls = htonl((u_int32)clock_stat.polls);
2510 		ic->noresponse = htonl((u_int32)clock_stat.noresponse);
2511 		ic->badformat = htonl((u_int32)clock_stat.badformat);
2512 		ic->baddata = htonl((u_int32)clock_stat.baddata);
2513 		ic->timestarted = htonl((u_int32)clock_stat.timereset);
2514 		DTOLFP(clock_stat.fudgetime1, &ltmp);
2515 		HTONL_FP(&ltmp, &ic->fudgetime1);
2516 		DTOLFP(clock_stat.fudgetime2, &ltmp);
2517 		HTONL_FP(&ltmp, &ic->fudgetime2);
2518 		ic->fudgeval1 = htonl((u_int32)clock_stat.fudgeval1);
2519 		ic->fudgeval2 = htonl(clock_stat.fudgeval2);
2520 
2521 		free_varlist(clock_stat.kv_list);
2522 
2523 		ic = (struct info_clock *)more_pkt();
2524 	}
2525 	flush_pkt();
2526 }
2527 
2528 
2529 
2530 /*
2531  * set_clock_fudge - get a clock's fudge factors
2532  */
2533 static void
2534 set_clock_fudge(
2535 	sockaddr_u *srcadr,
2536 	endpt *inter,
2537 	struct req_pkt *inpkt
2538 	)
2539 {
2540 	register struct conf_fudge *cf;
2541 	register int items;
2542 	struct refclockstat clock_stat;
2543 	sockaddr_u addr;
2544 	l_fp ltmp;
2545 
2546 	ZERO(addr);
2547 	ZERO(clock_stat);
2548 	items = INFO_NITEMS(inpkt->err_nitems);
2549 	cf = (struct conf_fudge *)&inpkt->u;
2550 
2551 	while (items-- > 0) {
2552 		AF(&addr) = AF_INET;
2553 		NSRCADR(&addr) = cf->clockadr;
2554 #ifdef ISC_PLATFORM_HAVESALEN
2555 		addr.sa.sa_len = SOCKLEN(&addr);
2556 #endif
2557 		SET_PORT(&addr, NTP_PORT);
2558 		if (!ISREFCLOCKADR(&addr) || NULL ==
2559 		    findexistingpeer(&addr, NULL, NULL, -1, 0)) {
2560 			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2561 			return;
2562 		}
2563 
2564 		switch(ntohl(cf->which)) {
2565 		    case FUDGE_TIME1:
2566 			NTOHL_FP(&cf->fudgetime, &ltmp);
2567 			LFPTOD(&ltmp, clock_stat.fudgetime1);
2568 			clock_stat.haveflags = CLK_HAVETIME1;
2569 			break;
2570 		    case FUDGE_TIME2:
2571 			NTOHL_FP(&cf->fudgetime, &ltmp);
2572 			LFPTOD(&ltmp, clock_stat.fudgetime2);
2573 			clock_stat.haveflags = CLK_HAVETIME2;
2574 			break;
2575 		    case FUDGE_VAL1:
2576 			clock_stat.fudgeval1 = ntohl(cf->fudgeval_flags);
2577 			clock_stat.haveflags = CLK_HAVEVAL1;
2578 			break;
2579 		    case FUDGE_VAL2:
2580 			clock_stat.fudgeval2 = ntohl(cf->fudgeval_flags);
2581 			clock_stat.haveflags = CLK_HAVEVAL2;
2582 			break;
2583 		    case FUDGE_FLAGS:
2584 			clock_stat.flags = (u_char) (ntohl(cf->fudgeval_flags) & 0xf);
2585 			clock_stat.haveflags =
2586 				(CLK_HAVEFLAG1|CLK_HAVEFLAG2|CLK_HAVEFLAG3|CLK_HAVEFLAG4);
2587 			break;
2588 		    default:
2589 			msyslog(LOG_ERR, "set_clock_fudge: default!");
2590 			req_ack(srcadr, inter, inpkt, INFO_ERR_FMT);
2591 			return;
2592 		}
2593 
2594 		refclock_control(&addr, &clock_stat, (struct refclockstat *)0);
2595 	}
2596 
2597 	req_ack(srcadr, inter, inpkt, INFO_OKAY);
2598 }
2599 #endif
2600 
2601 #ifdef REFCLOCK
2602 /*
2603  * get_clkbug_info - get debugging info about a clock
2604  */
2605 static void
2606 get_clkbug_info(
2607 	sockaddr_u *srcadr,
2608 	endpt *inter,
2609 	struct req_pkt *inpkt
2610 	)
2611 {
2612 	register int i;
2613 	register struct info_clkbug *ic;
2614 	register u_int32 *clkaddr;
2615 	register int items;
2616 	struct refclockbug bug;
2617 	sockaddr_u addr;
2618 
2619 	ZERO_SOCK(&addr);
2620 	AF(&addr) = AF_INET;
2621 #ifdef ISC_PLATFORM_HAVESALEN
2622 	addr.sa.sa_len = SOCKLEN(&addr);
2623 #endif
2624 	SET_PORT(&addr, NTP_PORT);
2625 	items = INFO_NITEMS(inpkt->err_nitems);
2626 	clkaddr = (u_int32 *)&inpkt->u;
2627 
2628 	ic = (struct info_clkbug *)prepare_pkt(srcadr, inter, inpkt,
2629 					       sizeof(struct info_clkbug));
2630 
2631 	while (items-- > 0 && ic) {
2632 		NSRCADR(&addr) = *clkaddr++;
2633 		if (!ISREFCLOCKADR(&addr) || NULL ==
2634 		    findexistingpeer(&addr, NULL, NULL, -1, 0)) {
2635 			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2636 			return;
2637 		}
2638 
2639 		ZERO(bug);
2640 		refclock_buginfo(&addr, &bug);
2641 		if (bug.nvalues == 0 && bug.ntimes == 0) {
2642 			req_ack(srcadr, inter, inpkt, INFO_ERR_NODATA);
2643 			return;
2644 		}
2645 
2646 		ic->clockadr = NSRCADR(&addr);
2647 		i = bug.nvalues;
2648 		if (i > NUMCBUGVALUES)
2649 		    i = NUMCBUGVALUES;
2650 		ic->nvalues = (u_char)i;
2651 		ic->svalues = htons((u_short) (bug.svalues & ((1<<i)-1)));
2652 		while (--i >= 0)
2653 		    ic->values[i] = htonl(bug.values[i]);
2654 
2655 		i = bug.ntimes;
2656 		if (i > NUMCBUGTIMES)
2657 		    i = NUMCBUGTIMES;
2658 		ic->ntimes = (u_char)i;
2659 		ic->stimes = htonl(bug.stimes);
2660 		while (--i >= 0) {
2661 			HTONL_FP(&bug.times[i], &ic->times[i]);
2662 		}
2663 
2664 		ic = (struct info_clkbug *)more_pkt();
2665 	}
2666 	flush_pkt();
2667 }
2668 #endif
2669 
2670 /*
2671  * receiver of interface structures
2672  */
2673 static void
2674 fill_info_if_stats(void *data, interface_info_t *interface_info)
2675 {
2676 	struct info_if_stats **ifsp = (struct info_if_stats **)data;
2677 	struct info_if_stats *ifs = *ifsp;
2678 	endpt *ep = interface_info->ep;
2679 
2680 	if (NULL == ifs)
2681 		return;
2682 
2683 	ZERO(*ifs);
2684 
2685 	if (IS_IPV6(&ep->sin)) {
2686 		if (!client_v6_capable)
2687 			return;
2688 		ifs->v6_flag = 1;
2689 		ifs->unaddr.addr6 = SOCK_ADDR6(&ep->sin);
2690 		ifs->unbcast.addr6 = SOCK_ADDR6(&ep->bcast);
2691 		ifs->unmask.addr6 = SOCK_ADDR6(&ep->mask);
2692 	} else {
2693 		ifs->v6_flag = 0;
2694 		ifs->unaddr.addr = SOCK_ADDR4(&ep->sin);
2695 		ifs->unbcast.addr = SOCK_ADDR4(&ep->bcast);
2696 		ifs->unmask.addr = SOCK_ADDR4(&ep->mask);
2697 	}
2698 	ifs->v6_flag = htonl(ifs->v6_flag);
2699 	strlcpy(ifs->name, ep->name, sizeof(ifs->name));
2700 	ifs->family = htons(ep->family);
2701 	ifs->flags = htonl(ep->flags);
2702 	ifs->last_ttl = htonl(ep->last_ttl);
2703 	ifs->num_mcast = htonl(ep->num_mcast);
2704 	ifs->received = htonl(ep->received);
2705 	ifs->sent = htonl(ep->sent);
2706 	ifs->notsent = htonl(ep->notsent);
2707 	ifs->ifindex = htonl(ep->ifindex);
2708 	/* scope no longer in endpt, in in6_addr typically */
2709 	ifs->scopeid = ifs->ifindex;
2710 	ifs->ifnum = htonl(ep->ifnum);
2711 	ifs->uptime = htonl(current_time - ep->starttime);
2712 	ifs->ignore_packets = ep->ignore_packets;
2713 	ifs->peercnt = htonl(ep->peercnt);
2714 	ifs->action = interface_info->action;
2715 
2716 	*ifsp = (struct info_if_stats *)more_pkt();
2717 }
2718 
2719 /*
2720  * get_if_stats - get interface statistics
2721  */
2722 static void
2723 get_if_stats(
2724 	sockaddr_u *srcadr,
2725 	endpt *inter,
2726 	struct req_pkt *inpkt
2727 	)
2728 {
2729 	struct info_if_stats *ifs;
2730 
2731 	DPRINTF(3, ("wants interface statistics\n"));
2732 
2733 	ifs = (struct info_if_stats *)prepare_pkt(srcadr, inter, inpkt,
2734 	    v6sizeof(struct info_if_stats));
2735 
2736 	interface_enumerate(fill_info_if_stats, &ifs);
2737 
2738 	flush_pkt();
2739 }
2740 
2741 static void
2742 do_if_reload(
2743 	sockaddr_u *srcadr,
2744 	endpt *inter,
2745 	struct req_pkt *inpkt
2746 	)
2747 {
2748 	struct info_if_stats *ifs;
2749 
2750 	DPRINTF(3, ("wants interface reload\n"));
2751 
2752 	ifs = (struct info_if_stats *)prepare_pkt(srcadr, inter, inpkt,
2753 	    v6sizeof(struct info_if_stats));
2754 
2755 	interface_update(fill_info_if_stats, &ifs);
2756 
2757 	flush_pkt();
2758 }
2759 
2760